Home | History | Annotate | Download | only in src
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef V8_ISOLATE_H_
      6 #define V8_ISOLATE_H_
      7 
      8 #include <cstddef>
      9 #include <memory>
     10 #include <queue>
     11 #include <unordered_map>
     12 #include <vector>
     13 
     14 #include "include/v8-inspector.h"
     15 #include "include/v8.h"
     16 #include "src/allocation.h"
     17 #include "src/base/atomicops.h"
     18 #include "src/base/macros.h"
     19 #include "src/builtins/builtins.h"
     20 #include "src/contexts.h"
     21 #include "src/date.h"
     22 #include "src/debug/debug-interface.h"
     23 #include "src/execution.h"
     24 #include "src/futex-emulation.h"
     25 #include "src/globals.h"
     26 #include "src/handles.h"
     27 #include "src/heap/factory.h"
     28 #include "src/heap/heap.h"
     29 #include "src/messages.h"
     30 #include "src/objects/code.h"
     31 #include "src/objects/debug-objects.h"
     32 #include "src/runtime/runtime.h"
     33 #include "src/unicode.h"
     34 
     35 #ifdef V8_INTL_SUPPORT
     36 #include "unicode/uversion.h"  // Define U_ICU_NAMESPACE.
     37 // 'icu' does not work. Use U_ICU_NAMESPACE.
     38 namespace U_ICU_NAMESPACE {
     39 
     40 class RegexMatcher;
     41 
     42 }  // namespace U_ICU_NAMESPACE
     43 #endif  // V8_INTL_SUPPORT
     44 
     45 namespace v8 {
     46 
     47 namespace base {
     48 class RandomNumberGenerator;
     49 }
     50 
     51 namespace debug {
     52 class ConsoleDelegate;
     53 }
     54 
     55 namespace internal {
     56 
     57 namespace heap {
     58 class HeapTester;
     59 }  // namespace heap
     60 
     61 class AccessCompilerData;
     62 class AddressToIndexHashMap;
     63 class AstStringConstants;
     64 class Bootstrapper;
     65 class BuiltinsConstantsTableBuilder;
     66 class CancelableTaskManager;
     67 class CodeEventDispatcher;
     68 class ExternalCodeEventListener;
     69 class CodeGenerator;
     70 class CodeRange;
     71 class CodeStubDescriptor;
     72 class CodeTracer;
     73 class CompilationCache;
     74 class CompilationStatistics;
     75 class CompilerDispatcher;
     76 class ContextSlotCache;
     77 class Counters;
     78 class CpuFeatures;
     79 class Debug;
     80 class DeoptimizerData;
     81 class DescriptorLookupCache;
     82 class EmptyStatement;
     83 class EternalHandles;
     84 class ExternalCallbackScope;
     85 class HandleScopeImplementer;
     86 class HeapObjectToIndexHashMap;
     87 class HeapProfiler;
     88 class InlineRuntimeFunctionsTable;
     89 class InnerPointerToCodeCache;
     90 class InstructionStream;
     91 class Logger;
     92 class MaterializedObjectStore;
     93 class Microtask;
     94 class OptimizingCompileDispatcher;
     95 class PromiseOnStack;
     96 class Redirection;
     97 class RegExpStack;
     98 class RootVisitor;
     99 class RuntimeProfiler;
    100 class SaveContext;
    101 class SetupIsolateDelegate;
    102 class Simulator;
    103 class StartupDeserializer;
    104 class StandardFrame;
    105 class StatsTable;
    106 class StringTracker;
    107 class StubCache;
    108 class SweeperThread;
    109 class ThreadManager;
    110 class ThreadState;
    111 class ThreadVisitor;  // Defined in v8threads.h
    112 class TracingCpuProfilerImpl;
    113 class UnicodeCache;
    114 struct ManagedPtrDestructor;
    115 
    116 template <StateTag Tag> class VMState;
    117 
    118 namespace interpreter {
    119 class Interpreter;
    120 }
    121 
    122 namespace wasm {
    123 class WasmEngine;
    124 }
    125 
    126 #define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate) \
    127   do {                                                 \
    128     Isolate* __isolate__ = (isolate);                  \
    129     DCHECK(!__isolate__->has_pending_exception());     \
    130     if (__isolate__->has_scheduled_exception()) {      \
    131       return __isolate__->PromoteScheduledException(); \
    132     }                                                  \
    133   } while (false)
    134 
    135 // Macros for MaybeHandle.
    136 
    137 #define RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, value) \
    138   do {                                                      \
    139     Isolate* __isolate__ = (isolate);                       \
    140     DCHECK(!__isolate__->has_pending_exception());          \
    141     if (__isolate__->has_scheduled_exception()) {           \
    142       __isolate__->PromoteScheduledException();             \
    143       return value;                                         \
    144     }                                                       \
    145   } while (false)
    146 
    147 #define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T) \
    148   RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, MaybeHandle<T>())
    149 
    150 #define ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(isolate, dst, call, value) \
    151   do {                                                                        \
    152     Isolate* __isolate__ = (isolate);                                         \
    153     if (!(call).ToLocal(&dst)) {                                              \
    154       DCHECK(__isolate__->has_scheduled_exception());                         \
    155       __isolate__->PromoteScheduledException();                               \
    156       return value;                                                           \
    157     }                                                                         \
    158   } while (false)
    159 
    160 #define RETURN_ON_SCHEDULED_EXCEPTION_VALUE(isolate, call, value) \
    161   do {                                                            \
    162     Isolate* __isolate__ = (isolate);                             \
    163     if ((call).IsNothing()) {                                     \
    164       DCHECK(__isolate__->has_scheduled_exception());             \
    165       __isolate__->PromoteScheduledException();                   \
    166       return value;                                               \
    167     }                                                             \
    168   } while (false)
    169 
    170 /**
    171  * RETURN_RESULT_OR_FAILURE is used in functions with return type Object* (such
    172  * as "RUNTIME_FUNCTION(...) {...}" or "BUILTIN(...) {...}" ) to return either
    173  * the contents of a MaybeHandle<X>, or the "exception" sentinel value.
    174  * Example usage:
    175  *
    176  * RUNTIME_FUNCTION(Runtime_Func) {
    177  *   ...
    178  *   RETURN_RESULT_OR_FAILURE(
    179  *       isolate,
    180  *       FunctionWithReturnTypeMaybeHandleX(...));
    181  * }
    182  *
    183  * If inside a function with return type MaybeHandle<X> use RETURN_ON_EXCEPTION
    184  * instead.
    185  * If inside a function with return type Handle<X>, or Maybe<X> use
    186  * RETURN_ON_EXCEPTION_VALUE instead.
    187  */
    188 #define RETURN_RESULT_OR_FAILURE(isolate, call)      \
    189   do {                                               \
    190     Handle<Object> __result__;                       \
    191     Isolate* __isolate__ = (isolate);                \
    192     if (!(call).ToHandle(&__result__)) {             \
    193       DCHECK(__isolate__->has_pending_exception());  \
    194       return ReadOnlyRoots(__isolate__).exception(); \
    195     }                                                \
    196     DCHECK(!__isolate__->has_pending_exception());   \
    197     return *__result__;                              \
    198   } while (false)
    199 
    200 #define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value)  \
    201   do {                                                               \
    202     if (!(call).ToHandle(&dst)) {                                    \
    203       DCHECK((isolate)->has_pending_exception());                    \
    204       return value;                                                  \
    205     }                                                                \
    206   } while (false)
    207 
    208 #define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call)                \
    209   do {                                                                        \
    210     Isolate* __isolate__ = (isolate);                                         \
    211     ASSIGN_RETURN_ON_EXCEPTION_VALUE(__isolate__, dst, call,                  \
    212                                      ReadOnlyRoots(__isolate__).exception()); \
    213   } while (false)
    214 
    215 #define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T)  \
    216   ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>())
    217 
    218 #define THROW_NEW_ERROR(isolate, call, T)                       \
    219   do {                                                          \
    220     Isolate* __isolate__ = (isolate);                           \
    221     return __isolate__->Throw<T>(__isolate__->factory()->call); \
    222   } while (false)
    223 
    224 #define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call)         \
    225   do {                                                        \
    226     Isolate* __isolate__ = (isolate);                         \
    227     return __isolate__->Throw(*__isolate__->factory()->call); \
    228   } while (false)
    229 
    230 #define THROW_NEW_ERROR_RETURN_VALUE(isolate, call, value) \
    231   do {                                                     \
    232     Isolate* __isolate__ = (isolate);                      \
    233     __isolate__->Throw(*__isolate__->factory()->call);     \
    234     return value;                                          \
    235   } while (false)
    236 
    237 /**
    238  * RETURN_ON_EXCEPTION_VALUE conditionally returns the given value when the
    239  * given MaybeHandle is empty. It is typically used in functions with return
    240  * type Maybe<X> or Handle<X>. Example usage:
    241  *
    242  * Handle<X> Func() {
    243  *   ...
    244  *   RETURN_ON_EXCEPTION_VALUE(
    245  *       isolate,
    246  *       FunctionWithReturnTypeMaybeHandleX(...),
    247  *       Handle<X>());
    248  *   // code to handle non exception
    249  *   ...
    250  * }
    251  *
    252  * Maybe<bool> Func() {
    253  *   ..
    254  *   RETURN_ON_EXCEPTION_VALUE(
    255  *       isolate,
    256  *       FunctionWithReturnTypeMaybeHandleX(...),
    257  *       Nothing<bool>);
    258  *   // code to handle non exception
    259  *   return Just(true);
    260  * }
    261  *
    262  * If inside a function with return type MaybeHandle<X>, use RETURN_ON_EXCEPTION
    263  * instead.
    264  * If inside a function with return type Object*, use
    265  * RETURN_FAILURE_ON_EXCEPTION instead.
    266  */
    267 #define RETURN_ON_EXCEPTION_VALUE(isolate, call, value)            \
    268   do {                                                             \
    269     if ((call).is_null()) {                                        \
    270       DCHECK((isolate)->has_pending_exception());                  \
    271       return value;                                                \
    272     }                                                              \
    273   } while (false)
    274 
    275 /**
    276  * RETURN_FAILURE_ON_EXCEPTION conditionally returns the "exception" sentinel if
    277  * the given MaybeHandle is empty; so it can only be used in functions with
    278  * return type Object*, such as RUNTIME_FUNCTION(...) {...} or BUILTIN(...)
    279  * {...}. Example usage:
    280  *
    281  * RUNTIME_FUNCTION(Runtime_Func) {
    282  *   ...
    283  *   RETURN_FAILURE_ON_EXCEPTION(
    284  *       isolate,
    285  *       FunctionWithReturnTypeMaybeHandleX(...));
    286  *   // code to handle non exception
    287  *   ...
    288  * }
    289  *
    290  * If inside a function with return type MaybeHandle<X>, use RETURN_ON_EXCEPTION
    291  * instead.
    292  * If inside a function with return type Maybe<X> or Handle<X>, use
    293  * RETURN_ON_EXCEPTION_VALUE instead.
    294  */
    295 #define RETURN_FAILURE_ON_EXCEPTION(isolate, call)                     \
    296   do {                                                                 \
    297     Isolate* __isolate__ = (isolate);                                  \
    298     RETURN_ON_EXCEPTION_VALUE(__isolate__, call,                       \
    299                               ReadOnlyRoots(__isolate__).exception()); \
    300   } while (false);
    301 
    302 /**
    303  * RETURN_ON_EXCEPTION conditionally returns an empty MaybeHandle<T> if the
    304  * given MaybeHandle is empty. Use it to return immediately from a function with
    305  * return type MaybeHandle when an exception was thrown. Example usage:
    306  *
    307  * MaybeHandle<X> Func() {
    308  *   ...
    309  *   RETURN_ON_EXCEPTION(
    310  *       isolate,
    311  *       FunctionWithReturnTypeMaybeHandleY(...),
    312  *       X);
    313  *   // code to handle non exception
    314  *   ...
    315  * }
    316  *
    317  * If inside a function with return type Object*, use
    318  * RETURN_FAILURE_ON_EXCEPTION instead.
    319  * If inside a function with return type
    320  * Maybe<X> or Handle<X>, use RETURN_ON_EXCEPTION_VALUE instead.
    321  */
    322 #define RETURN_ON_EXCEPTION(isolate, call, T)  \
    323   RETURN_ON_EXCEPTION_VALUE(isolate, call, MaybeHandle<T>())
    324 
    325 
    326 #define FOR_WITH_HANDLE_SCOPE(isolate, loop_var_type, init, loop_var,      \
    327                               limit_check, increment, body)                \
    328   do {                                                                     \
    329     loop_var_type init;                                                    \
    330     loop_var_type for_with_handle_limit = loop_var;                        \
    331     Isolate* for_with_handle_isolate = isolate;                            \
    332     while (limit_check) {                                                  \
    333       for_with_handle_limit += 1024;                                       \
    334       HandleScope loop_scope(for_with_handle_isolate);                     \
    335       for (; limit_check && loop_var < for_with_handle_limit; increment) { \
    336         body                                                               \
    337       }                                                                    \
    338     }                                                                      \
    339   } while (false)
    340 
    341 // Platform-independent, reliable thread identifier.
    342 class ThreadId {
    343  public:
    344   // Creates an invalid ThreadId.
    345   ThreadId() { base::Relaxed_Store(&id_, kInvalidId); }
    346 
    347   ThreadId& operator=(const ThreadId& other) {
    348     base::Relaxed_Store(&id_, base::Relaxed_Load(&other.id_));
    349     return *this;
    350   }
    351 
    352   bool operator==(const ThreadId& other) const { return Equals(other); }
    353 
    354   // Returns ThreadId for current thread.
    355   static ThreadId Current() { return ThreadId(GetCurrentThreadId()); }
    356 
    357   // Returns invalid ThreadId (guaranteed not to be equal to any thread).
    358   static ThreadId Invalid() { return ThreadId(kInvalidId); }
    359 
    360   // Compares ThreadIds for equality.
    361   V8_INLINE bool Equals(const ThreadId& other) const {
    362     return base::Relaxed_Load(&id_) == base::Relaxed_Load(&other.id_);
    363   }
    364 
    365   // Checks whether this ThreadId refers to any thread.
    366   V8_INLINE bool IsValid() const {
    367     return base::Relaxed_Load(&id_) != kInvalidId;
    368   }
    369 
    370   // Converts ThreadId to an integer representation
    371   // (required for public API: V8::V8::GetCurrentThreadId).
    372   int ToInteger() const { return static_cast<int>(base::Relaxed_Load(&id_)); }
    373 
    374   // Converts ThreadId to an integer representation
    375   // (required for public API: V8::V8::TerminateExecution).
    376   static ThreadId FromInteger(int id) { return ThreadId(id); }
    377 
    378  private:
    379   static const int kInvalidId = -1;
    380 
    381   explicit ThreadId(int id) { base::Relaxed_Store(&id_, id); }
    382 
    383   static int AllocateThreadId();
    384 
    385   V8_EXPORT_PRIVATE static int GetCurrentThreadId();
    386 
    387   base::Atomic32 id_;
    388 
    389   static base::Atomic32 highest_thread_id_;
    390 
    391   friend class Isolate;
    392 };
    393 
    394 #define FIELD_ACCESSOR(type, name)                 \
    395   inline void set_##name(type v) { name##_ = v; }  \
    396   inline type name() const { return name##_; }
    397 
    398 
    399 class ThreadLocalTop BASE_EMBEDDED {
    400  public:
    401   // Does early low-level initialization that does not depend on the
    402   // isolate being present.
    403   ThreadLocalTop() = default;
    404 
    405   // Initialize the thread data.
    406   void Initialize(Isolate*);
    407 
    408   // Get the top C++ try catch handler or nullptr if none are registered.
    409   //
    410   // This method is not guaranteed to return an address that can be
    411   // used for comparison with addresses into the JS stack.  If such an
    412   // address is needed, use try_catch_handler_address.
    413   FIELD_ACCESSOR(v8::TryCatch*, try_catch_handler)
    414 
    415   // Get the address of the top C++ try catch handler or nullptr if
    416   // none are registered.
    417   //
    418   // This method always returns an address that can be compared to
    419   // pointers into the JavaScript stack.  When running on actual
    420   // hardware, try_catch_handler_address and TryCatchHandler return
    421   // the same pointer.  When running on a simulator with a separate JS
    422   // stack, try_catch_handler_address returns a JS stack address that
    423   // corresponds to the place on the JS stack where the C++ handler
    424   // would have been if the stack were not separate.
    425   Address try_catch_handler_address() {
    426     return reinterpret_cast<Address>(
    427         v8::TryCatch::JSStackComparableAddress(try_catch_handler()));
    428   }
    429 
    430   void Free();
    431 
    432   Isolate* isolate_ = nullptr;
    433   // The context where the current execution method is created and for variable
    434   // lookups.
    435   Context* context_ = nullptr;
    436   ThreadId thread_id_ = ThreadId::Invalid();
    437   Object* pending_exception_ = nullptr;
    438   // TODO(kschimpf): Change this to a stack of caught exceptions (rather than
    439   // just innermost catching try block).
    440   Object* wasm_caught_exception_ = nullptr;
    441 
    442   // Communication channel between Isolate::FindHandler and the CEntry.
    443   Context* pending_handler_context_ = nullptr;
    444   Address pending_handler_entrypoint_ = kNullAddress;
    445   Address pending_handler_constant_pool_ = kNullAddress;
    446   Address pending_handler_fp_ = kNullAddress;
    447   Address pending_handler_sp_ = kNullAddress;
    448 
    449   // Communication channel between Isolate::Throw and message consumers.
    450   bool rethrowing_message_ = false;
    451   Object* pending_message_obj_ = nullptr;
    452 
    453   // Use a separate value for scheduled exceptions to preserve the
    454   // invariants that hold about pending_exception.  We may want to
    455   // unify them later.
    456   Object* scheduled_exception_ = nullptr;
    457   bool external_caught_exception_ = false;
    458   SaveContext* save_context_ = nullptr;
    459 
    460   // Stack.
    461   // The frame pointer of the top c entry frame.
    462   Address c_entry_fp_ = kNullAddress;
    463   // Try-blocks are chained through the stack.
    464   Address handler_ = kNullAddress;
    465   // C function that was called at c entry.
    466   Address c_function_ = kNullAddress;
    467 
    468   // Throwing an exception may cause a Promise rejection.  For this purpose
    469   // we keep track of a stack of nested promises and the corresponding
    470   // try-catch handlers.
    471   PromiseOnStack* promise_on_stack_ = nullptr;
    472 
    473 #ifdef USE_SIMULATOR
    474   Simulator* simulator_ = nullptr;
    475 #endif
    476 
    477   // The stack pointer of the bottom JS entry frame.
    478   Address js_entry_sp_ = kNullAddress;
    479   // The external callback we're currently in.
    480   ExternalCallbackScope* external_callback_scope_ = nullptr;
    481   StateTag current_vm_state_ = EXTERNAL;
    482 
    483   // Call back function to report unsafe JS accesses.
    484   v8::FailedAccessCheckCallback failed_access_check_callback_ = nullptr;
    485 
    486   // Address of the thread-local "thread in wasm" flag.
    487   Address thread_in_wasm_flag_address_ = kNullAddress;
    488 
    489  private:
    490   v8::TryCatch* try_catch_handler_ = nullptr;
    491 };
    492 
    493 #ifdef DEBUG
    494 
    495 #define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)               \
    496   V(CommentStatistic, paged_space_comments_statistics, \
    497     CommentStatistic::kMaxComments + 1)                \
    498   V(int, code_kind_statistics, AbstractCode::NUMBER_OF_KINDS)
    499 #else
    500 
    501 #define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
    502 
    503 #endif
    504 
    505 #define ISOLATE_INIT_ARRAY_LIST(V)                                             \
    506   /* SerializerDeserializer state. */                                          \
    507   V(int32_t, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
    508   V(int, bad_char_shift_table, kUC16AlphabetSize)                              \
    509   V(int, good_suffix_shift_table, (kBMMaxShift + 1))                           \
    510   V(int, suffix_table, (kBMMaxShift + 1))                                      \
    511   ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
    512 
    513 typedef std::vector<HeapObject*> DebugObjectCache;
    514 
    515 #define ISOLATE_INIT_LIST(V)                                                  \
    516   /* Assembler state. */                                                      \
    517   V(FatalErrorCallback, exception_behavior, nullptr)                          \
    518   V(OOMErrorCallback, oom_behavior, nullptr)                                  \
    519   V(LogEventCallback, event_logger, nullptr)                                  \
    520   V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
    521   V(AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback, nullptr)   \
    522   V(ExtensionCallback, wasm_module_callback, &NoExtension)                    \
    523   V(ExtensionCallback, wasm_instance_callback, &NoExtension)                  \
    524   V(ApiImplementationCallback, wasm_compile_streaming_callback, nullptr)      \
    525   V(WasmStreamingCallback, wasm_streaming_callback, nullptr)                  \
    526   V(WasmThreadsEnabledCallback, wasm_threads_enabled_callback, nullptr)       \
    527   /* State for Relocatable. */                                                \
    528   V(Relocatable*, relocatable_top, nullptr)                                   \
    529   V(DebugObjectCache*, string_stream_debug_object_cache, nullptr)             \
    530   V(Object*, string_stream_current_security_token, nullptr)                   \
    531   V(const intptr_t*, api_external_references, nullptr)                        \
    532   V(AddressToIndexHashMap*, external_reference_map, nullptr)                  \
    533   V(HeapObjectToIndexHashMap*, root_index_map, nullptr)                       \
    534   V(int, pending_microtask_count, 0)                                          \
    535   V(CompilationStatistics*, turbo_statistics, nullptr)                        \
    536   V(CodeTracer*, code_tracer, nullptr)                                        \
    537   V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu)                           \
    538   V(PromiseRejectCallback, promise_reject_callback, nullptr)                  \
    539   V(const v8::StartupData*, snapshot_blob, nullptr)                           \
    540   V(int, code_and_metadata_size, 0)                                           \
    541   V(int, bytecode_and_metadata_size, 0)                                       \
    542   V(int, external_script_source_size, 0)                                      \
    543   /* true if being profiled. Causes collection of extra compile info. */      \
    544   V(bool, is_profiling, false)                                                \
    545   /* true if a trace is being formatted through Error.prepareStackTrace. */   \
    546   V(bool, formatting_stack_trace, false)                                      \
    547   /* Perform side effect checks on function call and API callbacks. */        \
    548   V(DebugInfo::ExecutionMode, debug_execution_mode, DebugInfo::kBreakpoints)  \
    549   /* Current code coverage mode */                                            \
    550   V(debug::Coverage::Mode, code_coverage_mode, debug::Coverage::kBestEffort)  \
    551   V(debug::TypeProfile::Mode, type_profile_mode, debug::TypeProfile::kNone)   \
    552   V(int, last_stack_frame_info_id, 0)                                         \
    553   V(int, last_console_context_id, 0)                                          \
    554   V(v8_inspector::V8Inspector*, inspector, nullptr)                           \
    555   V(bool, next_v8_call_is_safe_for_termination, false)                        \
    556   V(bool, only_terminate_in_safe_scope, false)
    557 
    558 #define THREAD_LOCAL_TOP_ACCESSOR(type, name)                        \
    559   inline void set_##name(type v) { thread_local_top_.name##_ = v; }  \
    560   inline type name() const { return thread_local_top_.name##_; }
    561 
    562 #define THREAD_LOCAL_TOP_ADDRESS(type, name) \
    563   type* name##_address() { return &thread_local_top_.name##_; }
    564 
    565 // HiddenFactory exists so Isolate can privately inherit from it without making
    566 // Factory's members available to Isolate directly.
    567 class V8_EXPORT_PRIVATE HiddenFactory : private Factory {};
    568 
    569 class Isolate : private HiddenFactory {
    570   // These forward declarations are required to make the friend declarations in
    571   // PerIsolateThreadData work on some older versions of gcc.
    572   class ThreadDataTable;
    573   class EntryStackItem;
    574  public:
    575   ~Isolate();
    576 
    577   // A thread has a PerIsolateThreadData instance for each isolate that it has
    578   // entered. That instance is allocated when the isolate is initially entered
    579   // and reused on subsequent entries.
    580   class PerIsolateThreadData {
    581    public:
    582     PerIsolateThreadData(Isolate* isolate, ThreadId thread_id)
    583         : isolate_(isolate),
    584           thread_id_(thread_id),
    585           stack_limit_(0),
    586           thread_state_(nullptr),
    587 #if USE_SIMULATOR
    588           simulator_(nullptr),
    589 #endif
    590           next_(nullptr),
    591           prev_(nullptr) {
    592     }
    593     ~PerIsolateThreadData();
    594     Isolate* isolate() const { return isolate_; }
    595     ThreadId thread_id() const { return thread_id_; }
    596 
    597     FIELD_ACCESSOR(uintptr_t, stack_limit)
    598     FIELD_ACCESSOR(ThreadState*, thread_state)
    599 
    600 #if USE_SIMULATOR
    601     FIELD_ACCESSOR(Simulator*, simulator)
    602 #endif
    603 
    604     bool Matches(Isolate* isolate, ThreadId thread_id) const {
    605       return isolate_ == isolate && thread_id_.Equals(thread_id);
    606     }
    607 
    608    private:
    609     Isolate* isolate_;
    610     ThreadId thread_id_;
    611     uintptr_t stack_limit_;
    612     ThreadState* thread_state_;
    613 
    614 #if USE_SIMULATOR
    615     Simulator* simulator_;
    616 #endif
    617 
    618     PerIsolateThreadData* next_;
    619     PerIsolateThreadData* prev_;
    620 
    621     friend class Isolate;
    622     friend class ThreadDataTable;
    623     friend class EntryStackItem;
    624 
    625     DISALLOW_COPY_AND_ASSIGN(PerIsolateThreadData);
    626   };
    627 
    628   static void InitializeOncePerProcess();
    629 
    630   // Returns the PerIsolateThreadData for the current thread (or nullptr if one
    631   // is not currently set).
    632   static PerIsolateThreadData* CurrentPerIsolateThreadData() {
    633     return reinterpret_cast<PerIsolateThreadData*>(
    634         base::Thread::GetThreadLocal(per_isolate_thread_data_key_));
    635   }
    636 
    637   // Returns the isolate inside which the current thread is running.
    638   V8_INLINE static Isolate* Current() {
    639     DCHECK_EQ(base::Relaxed_Load(&isolate_key_created_), 1);
    640     Isolate* isolate = reinterpret_cast<Isolate*>(
    641         base::Thread::GetExistingThreadLocal(isolate_key_));
    642     DCHECK_NOT_NULL(isolate);
    643     return isolate;
    644   }
    645 
    646   // Get the isolate that the given HeapObject lives in, returning true on
    647   // success. If the object is not writable (i.e. lives in read-only space),
    648   // return false.
    649   inline static bool FromWritableHeapObject(HeapObject* obj, Isolate** isolate);
    650 
    651   // Usually called by Init(), but can be called early e.g. to allow
    652   // testing components that require logging but not the whole
    653   // isolate.
    654   //
    655   // Safe to call more than once.
    656   void InitializeLoggingAndCounters();
    657   bool InitializeCounters();  // Returns false if already initialized.
    658 
    659   bool Init(StartupDeserializer* des);
    660 
    661   // True if at least one thread Enter'ed this isolate.
    662   bool IsInUse() { return entry_stack_ != nullptr; }
    663 
    664   // Destroys the non-default isolates.
    665   // Sets default isolate into "has_been_disposed" state rather then destroying,
    666   // for legacy API reasons.
    667   void TearDown();
    668 
    669   void ReleaseSharedPtrs();
    670 
    671   void ClearSerializerData();
    672 
    673   // Find the PerThread for this particular (isolate, thread) combination
    674   // If one does not yet exist, return null.
    675   PerIsolateThreadData* FindPerThreadDataForThisThread();
    676 
    677   // Find the PerThread for given (isolate, thread) combination
    678   // If one does not yet exist, return null.
    679   PerIsolateThreadData* FindPerThreadDataForThread(ThreadId thread_id);
    680 
    681   // Discard the PerThread for this particular (isolate, thread) combination
    682   // If one does not yet exist, no-op.
    683   void DiscardPerThreadDataForThisThread();
    684 
    685   // Returns the key used to store the pointer to the current isolate.
    686   // Used internally for V8 threads that do not execute JavaScript but still
    687   // are part of the domain of an isolate (like the context switcher).
    688   static base::Thread::LocalStorageKey isolate_key() {
    689     return isolate_key_;
    690   }
    691 
    692   // Returns the key used to store process-wide thread IDs.
    693   static base::Thread::LocalStorageKey thread_id_key() {
    694     return thread_id_key_;
    695   }
    696 
    697   static base::Thread::LocalStorageKey per_isolate_thread_data_key();
    698 
    699   // Mutex for serializing access to break control structures.
    700   base::RecursiveMutex* break_access() { return &break_access_; }
    701 
    702   Address get_address_from_id(IsolateAddressId id);
    703 
    704   // Access to top context (where the current function object was created).
    705   Context* context() { return thread_local_top_.context_; }
    706   inline void set_context(Context* context);
    707   Context** context_address() { return &thread_local_top_.context_; }
    708 
    709   THREAD_LOCAL_TOP_ACCESSOR(SaveContext*, save_context)
    710 
    711   // Access to current thread id.
    712   THREAD_LOCAL_TOP_ACCESSOR(ThreadId, thread_id)
    713 
    714   // Interface to pending exception.
    715   inline Object* pending_exception();
    716   inline void set_pending_exception(Object* exception_obj);
    717   inline void clear_pending_exception();
    718 
    719   // Interface to wasm caught exception.
    720   inline Object* get_wasm_caught_exception();
    721   inline void set_wasm_caught_exception(Object* exception);
    722   inline void clear_wasm_caught_exception();
    723 
    724   bool AreWasmThreadsEnabled(Handle<Context> context);
    725 
    726   THREAD_LOCAL_TOP_ADDRESS(Object*, pending_exception)
    727 
    728   inline bool has_pending_exception();
    729 
    730   THREAD_LOCAL_TOP_ADDRESS(Context*, pending_handler_context)
    731   THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_entrypoint)
    732   THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_constant_pool)
    733   THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_fp)
    734   THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_sp)
    735 
    736   THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)
    737 
    738   v8::TryCatch* try_catch_handler() {
    739     return thread_local_top_.try_catch_handler();
    740   }
    741   bool* external_caught_exception_address() {
    742     return &thread_local_top_.external_caught_exception_;
    743   }
    744 
    745   THREAD_LOCAL_TOP_ADDRESS(Object*, scheduled_exception)
    746 
    747   inline void clear_pending_message();
    748   Address pending_message_obj_address() {
    749     return reinterpret_cast<Address>(&thread_local_top_.pending_message_obj_);
    750   }
    751 
    752   inline Object* scheduled_exception();
    753   inline bool has_scheduled_exception();
    754   inline void clear_scheduled_exception();
    755 
    756   bool IsJavaScriptHandlerOnTop(Object* exception);
    757   bool IsExternalHandlerOnTop(Object* exception);
    758 
    759   inline bool is_catchable_by_javascript(Object* exception);
    760   bool is_catchable_by_wasm(Object* exception);
    761 
    762   // JS execution stack (see frames.h).
    763   static Address c_entry_fp(ThreadLocalTop* thread) {
    764     return thread->c_entry_fp_;
    765   }
    766   static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
    767   Address c_function() { return thread_local_top_.c_function_; }
    768 
    769   inline Address* c_entry_fp_address() {
    770     return &thread_local_top_.c_entry_fp_;
    771   }
    772   inline Address* handler_address() { return &thread_local_top_.handler_; }
    773   inline Address* c_function_address() {
    774     return &thread_local_top_.c_function_;
    775   }
    776 
    777   // Bottom JS entry.
    778   Address js_entry_sp() {
    779     return thread_local_top_.js_entry_sp_;
    780   }
    781   inline Address* js_entry_sp_address() {
    782     return &thread_local_top_.js_entry_sp_;
    783   }
    784 
    785   // Returns the global object of the current context. It could be
    786   // a builtin object, or a JS global object.
    787   inline Handle<JSGlobalObject> global_object();
    788 
    789   // Returns the global proxy object of the current context.
    790   inline Handle<JSObject> global_proxy();
    791 
    792   static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
    793   void FreeThreadResources() { thread_local_top_.Free(); }
    794 
    795   // This method is called by the api after operations that may throw
    796   // exceptions.  If an exception was thrown and not handled by an external
    797   // handler the exception is scheduled to be rethrown when we return to running
    798   // JavaScript code.  If an exception is scheduled true is returned.
    799   V8_EXPORT_PRIVATE bool OptionalRescheduleException(bool is_bottom_call);
    800 
    801   // Push and pop a promise and the current try-catch handler.
    802   void PushPromise(Handle<JSObject> promise);
    803   void PopPromise();
    804 
    805   // Return the relevant Promise that a throw/rejection pertains to, based
    806   // on the contents of the Promise stack
    807   Handle<Object> GetPromiseOnStackOnThrow();
    808 
    809   // Heuristically guess whether a Promise is handled by user catch handler
    810   bool PromiseHasUserDefinedRejectHandler(Handle<Object> promise);
    811 
    812   class ExceptionScope {
    813    public:
    814     // Scope currently can only be used for regular exceptions,
    815     // not termination exception.
    816     inline explicit ExceptionScope(Isolate* isolate);
    817     inline ~ExceptionScope();
    818 
    819    private:
    820     Isolate* isolate_;
    821     Handle<Object> pending_exception_;
    822   };
    823 
    824   void SetCaptureStackTraceForUncaughtExceptions(
    825       bool capture,
    826       int frame_limit,
    827       StackTrace::StackTraceOptions options);
    828 
    829   void SetAbortOnUncaughtExceptionCallback(
    830       v8::Isolate::AbortOnUncaughtExceptionCallback callback);
    831 
    832   enum PrintStackMode { kPrintStackConcise, kPrintStackVerbose };
    833   void PrintCurrentStackTrace(FILE* out);
    834   void PrintStack(StringStream* accumulator,
    835                   PrintStackMode mode = kPrintStackVerbose);
    836   V8_EXPORT_PRIVATE void PrintStack(FILE* out,
    837                                     PrintStackMode mode = kPrintStackVerbose);
    838   Handle<String> StackTraceString();
    839   // Stores a stack trace in a stack-allocated temporary buffer which will
    840   // end up in the minidump for debugging purposes.
    841   V8_NOINLINE void PushStackTraceAndDie(void* ptr1 = nullptr,
    842                                         void* ptr2 = nullptr,
    843                                         void* ptr3 = nullptr,
    844                                         void* ptr4 = nullptr);
    845   Handle<FixedArray> CaptureCurrentStackTrace(
    846       int frame_limit, StackTrace::StackTraceOptions options);
    847   Handle<Object> CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
    848                                          FrameSkipMode mode,
    849                                          Handle<Object> caller);
    850   MaybeHandle<JSReceiver> CaptureAndSetDetailedStackTrace(
    851       Handle<JSReceiver> error_object);
    852   MaybeHandle<JSReceiver> CaptureAndSetSimpleStackTrace(
    853       Handle<JSReceiver> error_object, FrameSkipMode mode,
    854       Handle<Object> caller);
    855   Handle<FixedArray> GetDetailedStackTrace(Handle<JSObject> error_object);
    856 
    857   Address GetAbstractPC(int* line, int* column);
    858 
    859   // Returns if the given context may access the given global object. If
    860   // the result is false, the pending exception is guaranteed to be
    861   // set.
    862   bool MayAccess(Handle<Context> accessing_context, Handle<JSObject> receiver);
    863 
    864   void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
    865   void ReportFailedAccessCheck(Handle<JSObject> receiver);
    866 
    867   // Exception throwing support. The caller should use the result
    868   // of Throw() as its return value.
    869   Object* Throw(Object* exception, MessageLocation* location = nullptr);
    870   Object* ThrowIllegalOperation();
    871 
    872   template <typename T>
    873   V8_WARN_UNUSED_RESULT MaybeHandle<T> Throw(
    874       Handle<Object> exception, MessageLocation* location = nullptr) {
    875     Throw(*exception, location);
    876     return MaybeHandle<T>();
    877   }
    878 
    879   void set_console_delegate(debug::ConsoleDelegate* delegate) {
    880     console_delegate_ = delegate;
    881   }
    882   debug::ConsoleDelegate* console_delegate() { return console_delegate_; }
    883 
    884   void set_async_event_delegate(debug::AsyncEventDelegate* delegate) {
    885     async_event_delegate_ = delegate;
    886     PromiseHookStateUpdated();
    887   }
    888   void OnAsyncFunctionStateChanged(Handle<JSPromise> promise,
    889                                    debug::DebugAsyncActionType);
    890 
    891   // Re-throw an exception.  This involves no error reporting since error
    892   // reporting was handled when the exception was thrown originally.
    893   Object* ReThrow(Object* exception);
    894 
    895   // Find the correct handler for the current pending exception. This also
    896   // clears and returns the current pending exception.
    897   Object* UnwindAndFindHandler();
    898 
    899   // Tries to predict whether an exception will be caught. Note that this can
    900   // only produce an estimate, because it is undecidable whether a finally
    901   // clause will consume or re-throw an exception.
    902   enum CatchType {
    903     NOT_CAUGHT,
    904     CAUGHT_BY_JAVASCRIPT,
    905     CAUGHT_BY_EXTERNAL,
    906     CAUGHT_BY_DESUGARING,
    907     CAUGHT_BY_PROMISE,
    908     CAUGHT_BY_ASYNC_AWAIT
    909   };
    910   CatchType PredictExceptionCatcher();
    911 
    912   V8_EXPORT_PRIVATE void ScheduleThrow(Object* exception);
    913   // Re-set pending message, script and positions reported to the TryCatch
    914   // back to the TLS for re-use when rethrowing.
    915   void RestorePendingMessageFromTryCatch(v8::TryCatch* handler);
    916   // Un-schedule an exception that was caught by a TryCatch handler.
    917   void CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler);
    918   void ReportPendingMessages();
    919   void ReportPendingMessagesFromJavaScript();
    920 
    921   // Implements code shared between the two above methods
    922   void ReportPendingMessagesImpl(bool report_externally);
    923 
    924   // Return pending location if any or unfilled structure.
    925   MessageLocation GetMessageLocation();
    926 
    927   // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
    928   Object* PromoteScheduledException();
    929 
    930   // Attempts to compute the current source location, storing the
    931   // result in the target out parameter. The source location is attached to a
    932   // Message object as the location which should be shown to the user. It's
    933   // typically the top-most meaningful location on the stack.
    934   bool ComputeLocation(MessageLocation* target);
    935   bool ComputeLocationFromException(MessageLocation* target,
    936                                     Handle<Object> exception);
    937   bool ComputeLocationFromStackTrace(MessageLocation* target,
    938                                      Handle<Object> exception);
    939 
    940   Handle<JSMessageObject> CreateMessage(Handle<Object> exception,
    941                                         MessageLocation* location);
    942 
    943   // Out of resource exception helpers.
    944   Object* StackOverflow();
    945   Object* TerminateExecution();
    946   void CancelTerminateExecution();
    947 
    948   void RequestInterrupt(InterruptCallback callback, void* data);
    949   void InvokeApiInterruptCallbacks();
    950 
    951   // Administration
    952   void Iterate(RootVisitor* v);
    953   void Iterate(RootVisitor* v, ThreadLocalTop* t);
    954   char* Iterate(RootVisitor* v, char* t);
    955   void IterateThread(ThreadVisitor* v, char* t);
    956 
    957   // Returns the current native context.
    958   inline Handle<NativeContext> native_context();
    959   inline NativeContext* raw_native_context();
    960 
    961   Handle<Context> GetIncumbentContext();
    962 
    963   void RegisterTryCatchHandler(v8::TryCatch* that);
    964   void UnregisterTryCatchHandler(v8::TryCatch* that);
    965 
    966   char* ArchiveThread(char* to);
    967   char* RestoreThread(char* from);
    968 
    969   static const int kUC16AlphabetSize = 256;  // See StringSearchBase.
    970   static const int kBMMaxShift = 250;        // See StringSearchBase.
    971 
    972   // Accessors.
    973 #define GLOBAL_ACCESSOR(type, name, initialvalue)                       \
    974   inline type name() const {                                            \
    975     DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
    976     return name##_;                                                     \
    977   }                                                                     \
    978   inline void set_##name(type value) {                                  \
    979     DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
    980     name##_ = value;                                                    \
    981   }
    982   ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
    983 #undef GLOBAL_ACCESSOR
    984 
    985 #define GLOBAL_ARRAY_ACCESSOR(type, name, length)                       \
    986   inline type* name() {                                                 \
    987     DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
    988     return &(name##_)[0];                                               \
    989   }
    990   ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
    991 #undef GLOBAL_ARRAY_ACCESSOR
    992 
    993 #define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
    994   inline Handle<type> name();                            \
    995   inline bool is_##name(type* value);
    996   NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
    997 #undef NATIVE_CONTEXT_FIELD_ACCESSOR
    998 
    999   Bootstrapper* bootstrapper() { return bootstrapper_; }
   1000   // Use for updating counters on a foreground thread.
   1001   Counters* counters() { return async_counters().get(); }
   1002   // Use for updating counters on a background thread.
   1003   const std::shared_ptr<Counters>& async_counters() {
   1004     // Make sure InitializeCounters() has been called.
   1005     DCHECK_NOT_NULL(async_counters_.get());
   1006     return async_counters_;
   1007   }
   1008   RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
   1009   CompilationCache* compilation_cache() { return compilation_cache_; }
   1010   Logger* logger() {
   1011     // Call InitializeLoggingAndCounters() if logging is needed before
   1012     // the isolate is fully initialized.
   1013     DCHECK_NOT_NULL(logger_);
   1014     return logger_;
   1015   }
   1016   StackGuard* stack_guard() { return &stack_guard_; }
   1017   Heap* heap() { return &heap_; }
   1018   StubCache* load_stub_cache() { return load_stub_cache_; }
   1019   StubCache* store_stub_cache() { return store_stub_cache_; }
   1020   DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
   1021   bool deoptimizer_lazy_throw() const { return deoptimizer_lazy_throw_; }
   1022   void set_deoptimizer_lazy_throw(bool value) {
   1023     deoptimizer_lazy_throw_ = value;
   1024   }
   1025   ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
   1026   MaterializedObjectStore* materialized_object_store() {
   1027     return materialized_object_store_;
   1028   }
   1029 
   1030   ContextSlotCache* context_slot_cache() {
   1031     return context_slot_cache_;
   1032   }
   1033 
   1034   DescriptorLookupCache* descriptor_lookup_cache() {
   1035     return descriptor_lookup_cache_;
   1036   }
   1037 
   1038   HandleScopeData* handle_scope_data() { return &handle_scope_data_; }
   1039 
   1040   HandleScopeImplementer* handle_scope_implementer() {
   1041     DCHECK(handle_scope_implementer_);
   1042     return handle_scope_implementer_;
   1043   }
   1044 
   1045   UnicodeCache* unicode_cache() {
   1046     return unicode_cache_;
   1047   }
   1048 
   1049   InnerPointerToCodeCache* inner_pointer_to_code_cache() {
   1050     return inner_pointer_to_code_cache_;
   1051   }
   1052 
   1053   GlobalHandles* global_handles() { return global_handles_; }
   1054 
   1055   EternalHandles* eternal_handles() { return eternal_handles_; }
   1056 
   1057   ThreadManager* thread_manager() { return thread_manager_; }
   1058 
   1059   unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
   1060     return &jsregexp_uncanonicalize_;
   1061   }
   1062 
   1063   unibrow::Mapping<unibrow::CanonicalizationRange>* jsregexp_canonrange() {
   1064     return &jsregexp_canonrange_;
   1065   }
   1066 
   1067   RuntimeState* runtime_state() { return &runtime_state_; }
   1068 
   1069   Builtins* builtins() { return &builtins_; }
   1070 
   1071   unibrow::Mapping<unibrow::Ecma262Canonicalize>*
   1072       regexp_macro_assembler_canonicalize() {
   1073     return &regexp_macro_assembler_canonicalize_;
   1074   }
   1075 
   1076   RegExpStack* regexp_stack() { return regexp_stack_; }
   1077 
   1078   size_t total_regexp_code_generated() { return total_regexp_code_generated_; }
   1079   void IncreaseTotalRegexpCodeGenerated(int size) {
   1080     total_regexp_code_generated_ += size;
   1081   }
   1082 
   1083   std::vector<int>* regexp_indices() { return &regexp_indices_; }
   1084 
   1085   unibrow::Mapping<unibrow::Ecma262Canonicalize>*
   1086       interp_canonicalize_mapping() {
   1087     return &regexp_macro_assembler_canonicalize_;
   1088   }
   1089 
   1090   Debug* debug() { return debug_; }
   1091 
   1092   bool* is_profiling_address() { return &is_profiling_; }
   1093   CodeEventDispatcher* code_event_dispatcher() const {
   1094     return code_event_dispatcher_.get();
   1095   }
   1096   HeapProfiler* heap_profiler() const { return heap_profiler_; }
   1097 
   1098 #ifdef DEBUG
   1099   static size_t non_disposed_isolates() { return non_disposed_isolates_; }
   1100 #endif
   1101 
   1102   v8::internal::Factory* factory() {
   1103     // Upcast to the privately inherited base-class using c-style casts to avoid
   1104     // undefined behavior (as static_cast cannot cast across private bases).
   1105     return (v8::internal::Factory*)this;  // NOLINT(readability/casting)
   1106   }
   1107 
   1108   static const int kJSRegexpStaticOffsetsVectorSize = 128;
   1109 
   1110   THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope*, external_callback_scope)
   1111 
   1112   THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state)
   1113 
   1114   void SetData(uint32_t slot, void* data) {
   1115     DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
   1116     embedder_data_[slot] = data;
   1117   }
   1118   void* GetData(uint32_t slot) {
   1119     DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
   1120     return embedder_data_[slot];
   1121   }
   1122 
   1123   bool serializer_enabled() const { return serializer_enabled_; }
   1124 
   1125   void enable_serializer() { serializer_enabled_ = true; }
   1126 
   1127   bool snapshot_available() const {
   1128     return snapshot_blob_ != nullptr && snapshot_blob_->raw_size != 0;
   1129   }
   1130 
   1131   bool IsDead() { return has_fatal_error_; }
   1132   void SignalFatalError() { has_fatal_error_ = true; }
   1133 
   1134   bool use_optimizer();
   1135 
   1136   bool initialized_from_snapshot() { return initialized_from_snapshot_; }
   1137 
   1138   bool NeedsSourcePositionsForProfiling() const;
   1139 
   1140   bool NeedsDetailedOptimizedCodeLineInfo() const;
   1141 
   1142   bool is_best_effort_code_coverage() const {
   1143     return code_coverage_mode() == debug::Coverage::kBestEffort;
   1144   }
   1145 
   1146   bool is_precise_count_code_coverage() const {
   1147     return code_coverage_mode() == debug::Coverage::kPreciseCount;
   1148   }
   1149 
   1150   bool is_precise_binary_code_coverage() const {
   1151     return code_coverage_mode() == debug::Coverage::kPreciseBinary;
   1152   }
   1153 
   1154   bool is_block_count_code_coverage() const {
   1155     return code_coverage_mode() == debug::Coverage::kBlockCount;
   1156   }
   1157 
   1158   bool is_block_binary_code_coverage() const {
   1159     return code_coverage_mode() == debug::Coverage::kBlockBinary;
   1160   }
   1161 
   1162   bool is_block_code_coverage() const {
   1163     return is_block_count_code_coverage() || is_block_binary_code_coverage();
   1164   }
   1165 
   1166   bool is_collecting_type_profile() const {
   1167     return type_profile_mode() == debug::TypeProfile::kCollect;
   1168   }
   1169 
   1170   // Collect feedback vectors with data for code coverage or type profile.
   1171   // Reset the list, when both code coverage and type profile are not
   1172   // needed anymore. This keeps many feedback vectors alive, but code
   1173   // coverage or type profile are used for debugging only and increase in
   1174   // memory usage is expected.
   1175   void SetFeedbackVectorsForProfilingTools(Object* value);
   1176 
   1177   void MaybeInitializeVectorListFromHeap();
   1178 
   1179   double time_millis_since_init() {
   1180     return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_;
   1181   }
   1182 
   1183   DateCache* date_cache() {
   1184     return date_cache_;
   1185   }
   1186 
   1187   void set_date_cache(DateCache* date_cache) {
   1188     if (date_cache != date_cache_) {
   1189       delete date_cache_;
   1190     }
   1191     date_cache_ = date_cache;
   1192   }
   1193 
   1194 #ifdef V8_INTL_SUPPORT
   1195   icu::RegexMatcher* language_singleton_regexp_matcher() {
   1196     return language_singleton_regexp_matcher_;
   1197   }
   1198 
   1199   icu::RegexMatcher* language_tag_regexp_matcher() {
   1200     return language_tag_regexp_matcher_;
   1201   }
   1202 
   1203   icu::RegexMatcher* language_variant_regexp_matcher() {
   1204     return language_variant_regexp_matcher_;
   1205   }
   1206 
   1207   const std::string& default_locale() { return default_locale_; }
   1208 
   1209   void set_default_locale(const std::string& locale) {
   1210     DCHECK_EQ(default_locale_.length(), 0);
   1211     default_locale_ = locale;
   1212   }
   1213 
   1214   void set_language_tag_regexp_matchers(
   1215       icu::RegexMatcher* language_singleton_regexp_matcher,
   1216       icu::RegexMatcher* language_tag_regexp_matcher,
   1217       icu::RegexMatcher* language_variant_regexp_matcher) {
   1218     DCHECK_NULL(language_singleton_regexp_matcher_);
   1219     DCHECK_NULL(language_tag_regexp_matcher_);
   1220     DCHECK_NULL(language_variant_regexp_matcher_);
   1221     language_singleton_regexp_matcher_ = language_singleton_regexp_matcher;
   1222     language_tag_regexp_matcher_ = language_tag_regexp_matcher;
   1223     language_variant_regexp_matcher_ = language_variant_regexp_matcher;
   1224   }
   1225 #endif  // V8_INTL_SUPPORT
   1226 
   1227   static const int kProtectorValid = 1;
   1228   static const int kProtectorInvalid = 0;
   1229 
   1230   inline bool IsArrayConstructorIntact();
   1231 
   1232   // The version with an explicit context parameter can be used when
   1233   // Isolate::context is not set up, e.g. when calling directly into C++ from
   1234   // CSA.
   1235   bool IsNoElementsProtectorIntact(Context* context);
   1236   bool IsNoElementsProtectorIntact();
   1237 
   1238   inline bool IsArraySpeciesLookupChainIntact();
   1239   inline bool IsTypedArraySpeciesLookupChainIntact();
   1240   inline bool IsPromiseSpeciesLookupChainIntact();
   1241   bool IsIsConcatSpreadableLookupChainIntact();
   1242   bool IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver);
   1243   inline bool IsStringLengthOverflowIntact();
   1244   inline bool IsArrayIteratorLookupChainIntact();
   1245 
   1246   // Make sure we do check for neutered array buffers.
   1247   inline bool IsArrayBufferNeuteringIntact();
   1248 
   1249   // Disable promise optimizations if promise (debug) hooks have ever been
   1250   // active.
   1251   bool IsPromiseHookProtectorIntact();
   1252 
   1253   // Make sure a lookup of "resolve" on the %Promise% intrinsic object
   1254   // yeidls the initial Promise.resolve method.
   1255   bool IsPromiseResolveLookupChainIntact();
   1256 
   1257   // Make sure a lookup of "then" on any JSPromise whose [[Prototype]] is the
   1258   // initial %PromisePrototype% yields the initial method. In addition this
   1259   // protector also guards the negative lookup of "then" on the intrinsic
   1260   // %ObjectPrototype%, meaning that such lookups are guaranteed to yield
   1261   // undefined without triggering any side-effects.
   1262   bool IsPromiseThenLookupChainIntact();
   1263   bool IsPromiseThenLookupChainIntact(Handle<JSReceiver> receiver);
   1264 
   1265   // On intent to set an element in object, make sure that appropriate
   1266   // notifications occur if the set is on the elements of the array or
   1267   // object prototype. Also ensure that changes to prototype chain between
   1268   // Array and Object fire notifications.
   1269   void UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object);
   1270   void UpdateNoElementsProtectorOnSetLength(Handle<JSObject> object) {
   1271     UpdateNoElementsProtectorOnSetElement(object);
   1272   }
   1273   void UpdateNoElementsProtectorOnSetPrototype(Handle<JSObject> object) {
   1274     UpdateNoElementsProtectorOnSetElement(object);
   1275   }
   1276   void UpdateNoElementsProtectorOnNormalizeElements(Handle<JSObject> object) {
   1277     UpdateNoElementsProtectorOnSetElement(object);
   1278   }
   1279   void InvalidateArrayConstructorProtector();
   1280   void InvalidateArraySpeciesProtector();
   1281   void InvalidateTypedArraySpeciesProtector();
   1282   void InvalidatePromiseSpeciesProtector();
   1283   void InvalidateIsConcatSpreadableProtector();
   1284   void InvalidateStringLengthOverflowProtector();
   1285   void InvalidateArrayIteratorProtector();
   1286   void InvalidateArrayBufferNeuteringProtector();
   1287   V8_EXPORT_PRIVATE void InvalidatePromiseHookProtector();
   1288   void InvalidatePromiseResolveProtector();
   1289   void InvalidatePromiseThenProtector();
   1290 
   1291   // Returns true if array is the initial array prototype in any native context.
   1292   bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
   1293 
   1294   void IterateDeferredHandles(RootVisitor* visitor);
   1295   void LinkDeferredHandles(DeferredHandles* deferred_handles);
   1296   void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
   1297 
   1298 #ifdef DEBUG
   1299   bool IsDeferredHandle(Object** location);
   1300 #endif  // DEBUG
   1301 
   1302   bool concurrent_recompilation_enabled() {
   1303     // Thread is only available with flag enabled.
   1304     DCHECK(optimizing_compile_dispatcher_ == nullptr ||
   1305            FLAG_concurrent_recompilation);
   1306     return optimizing_compile_dispatcher_ != nullptr;
   1307   }
   1308 
   1309   OptimizingCompileDispatcher* optimizing_compile_dispatcher() {
   1310     return optimizing_compile_dispatcher_;
   1311   }
   1312   // Flushes all pending concurrent optimzation jobs from the optimizing
   1313   // compile dispatcher's queue.
   1314   void AbortConcurrentOptimization(BlockingBehavior blocking_behavior);
   1315 
   1316   int id() const { return static_cast<int>(id_); }
   1317 
   1318   CompilationStatistics* GetTurboStatistics();
   1319   CodeTracer* GetCodeTracer();
   1320 
   1321   void DumpAndResetStats();
   1322 
   1323   FunctionEntryHook function_entry_hook() { return function_entry_hook_; }
   1324   void set_function_entry_hook(FunctionEntryHook function_entry_hook) {
   1325     function_entry_hook_ = function_entry_hook;
   1326   }
   1327 
   1328   void* stress_deopt_count_address() { return &stress_deopt_count_; }
   1329 
   1330   void set_force_slow_path(bool v) { force_slow_path_ = v; }
   1331   bool force_slow_path() const { return force_slow_path_; }
   1332   bool* force_slow_path_address() { return &force_slow_path_; }
   1333 
   1334   V8_EXPORT_PRIVATE base::RandomNumberGenerator* random_number_generator();
   1335 
   1336   V8_EXPORT_PRIVATE base::RandomNumberGenerator* fuzzer_rng();
   1337 
   1338   // Generates a random number that is non-zero when masked
   1339   // with the provided mask.
   1340   int GenerateIdentityHash(uint32_t mask);
   1341 
   1342   // Given an address occupied by a live code object, return that object.
   1343   Code* FindCodeObject(Address a);
   1344 
   1345   int NextOptimizationId() {
   1346     int id = next_optimization_id_++;
   1347     if (!Smi::IsValid(next_optimization_id_)) {
   1348       next_optimization_id_ = 0;
   1349     }
   1350     return id;
   1351   }
   1352 
   1353   void AddNearHeapLimitCallback(v8::NearHeapLimitCallback, void* data);
   1354   void RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
   1355                                    size_t heap_limit);
   1356   void AddCallCompletedCallback(CallCompletedCallback callback);
   1357   void RemoveCallCompletedCallback(CallCompletedCallback callback);
   1358   void FireCallCompletedCallback();
   1359 
   1360   void AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
   1361   void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
   1362   inline void FireBeforeCallEnteredCallback();
   1363 
   1364   void AddMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
   1365   void RemoveMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
   1366   inline void FireMicrotasksCompletedCallback();
   1367 
   1368   void SetPromiseRejectCallback(PromiseRejectCallback callback);
   1369   void ReportPromiseReject(Handle<JSPromise> promise, Handle<Object> value,
   1370                            v8::PromiseRejectEvent event);
   1371 
   1372   void EnqueueMicrotask(Handle<Microtask> microtask);
   1373   void RunMicrotasks();
   1374   bool IsRunningMicrotasks() const { return is_running_microtasks_; }
   1375 
   1376   Handle<Symbol> SymbolFor(Heap::RootListIndex dictionary_index,
   1377                            Handle<String> name, bool private_symbol);
   1378 
   1379   void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
   1380   void CountUsage(v8::Isolate::UseCounterFeature feature);
   1381 
   1382   std::string GetTurboCfgFileName();
   1383 
   1384 #if V8_SFI_HAS_UNIQUE_ID
   1385   int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; }
   1386 #endif
   1387 
   1388   Address promise_hook_address() {
   1389     return reinterpret_cast<Address>(&promise_hook_);
   1390   }
   1391 
   1392   Address async_event_delegate_address() {
   1393     return reinterpret_cast<Address>(&async_event_delegate_);
   1394   }
   1395 
   1396   Address promise_hook_or_async_event_delegate_address() {
   1397     return reinterpret_cast<Address>(&promise_hook_or_async_event_delegate_);
   1398   }
   1399 
   1400   Address pending_microtask_count_address() {
   1401     return reinterpret_cast<Address>(&pending_microtask_count_);
   1402   }
   1403 
   1404   Address handle_scope_implementer_address() {
   1405     return reinterpret_cast<Address>(&handle_scope_implementer_);
   1406   }
   1407 
   1408   void SetAtomicsWaitCallback(v8::Isolate::AtomicsWaitCallback callback,
   1409                               void* data);
   1410   void RunAtomicsWaitCallback(v8::Isolate::AtomicsWaitEvent event,
   1411                               Handle<JSArrayBuffer> array_buffer,
   1412                               size_t offset_in_bytes, int32_t value,
   1413                               double timeout_in_ms,
   1414                               AtomicsWaitWakeHandle* stop_handle);
   1415 
   1416   void SetPromiseHook(PromiseHook hook);
   1417   void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
   1418                       Handle<Object> parent);
   1419 
   1420   void AddDetachedContext(Handle<Context> context);
   1421   void CheckDetachedContextsAfterGC();
   1422 
   1423   std::vector<Object*>* partial_snapshot_cache() {
   1424     return &partial_snapshot_cache_;
   1425   }
   1426 
   1427   // Off-heap builtins cannot embed constants within the code object itself,
   1428   // and thus need to load them from the root list.
   1429   bool ShouldLoadConstantsFromRootList() const {
   1430     if (FLAG_embedded_builtins) {
   1431       return (serializer_enabled() &&
   1432               builtins_constants_table_builder() != nullptr);
   1433     } else {
   1434       return false;
   1435     }
   1436   }
   1437 
   1438   // Called only prior to serialization.
   1439   // This function copies off-heap-safe builtins off the heap, creates off-heap
   1440   // trampolines, and sets up this isolate's embedded blob.
   1441   void PrepareEmbeddedBlobForSerialization();
   1442 
   1443   BuiltinsConstantsTableBuilder* builtins_constants_table_builder() const {
   1444     return builtins_constants_table_builder_;
   1445   }
   1446 
   1447   static const uint8_t* CurrentEmbeddedBlob();
   1448   static uint32_t CurrentEmbeddedBlobSize();
   1449 
   1450   // These always return the same result as static methods above, but don't
   1451   // access the global atomic variable (and thus *might be* slightly faster).
   1452   const uint8_t* embedded_blob() const;
   1453   uint32_t embedded_blob_size() const;
   1454 
   1455   void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
   1456     array_buffer_allocator_ = allocator;
   1457   }
   1458   v8::ArrayBuffer::Allocator* array_buffer_allocator() const {
   1459     return array_buffer_allocator_;
   1460   }
   1461 
   1462   FutexWaitListNode* futex_wait_list_node() { return &futex_wait_list_node_; }
   1463 
   1464   CancelableTaskManager* cancelable_task_manager() {
   1465     return cancelable_task_manager_;
   1466   }
   1467 
   1468   const AstStringConstants* ast_string_constants() const {
   1469     return ast_string_constants_;
   1470   }
   1471 
   1472   interpreter::Interpreter* interpreter() const { return interpreter_; }
   1473 
   1474   AccountingAllocator* allocator() { return allocator_; }
   1475 
   1476   CompilerDispatcher* compiler_dispatcher() const {
   1477     return compiler_dispatcher_;
   1478   }
   1479 
   1480   bool IsInAnyContext(Object* object, uint32_t index);
   1481 
   1482   void SetHostImportModuleDynamicallyCallback(
   1483       HostImportModuleDynamicallyCallback callback);
   1484   MaybeHandle<JSPromise> RunHostImportModuleDynamicallyCallback(
   1485       Handle<Script> referrer, Handle<Object> specifier);
   1486 
   1487   void SetHostInitializeImportMetaObjectCallback(
   1488       HostInitializeImportMetaObjectCallback callback);
   1489   Handle<JSObject> RunHostInitializeImportMetaObjectCallback(
   1490       Handle<Module> module);
   1491 
   1492   void SetRAILMode(RAILMode rail_mode);
   1493 
   1494   RAILMode rail_mode() { return rail_mode_.Value(); }
   1495 
   1496   double LoadStartTimeMs();
   1497 
   1498   void IsolateInForegroundNotification();
   1499 
   1500   void IsolateInBackgroundNotification();
   1501 
   1502   bool IsIsolateInBackground() { return is_isolate_in_background_; }
   1503 
   1504   void EnableMemorySavingsMode() { memory_savings_mode_active_ = true; }
   1505 
   1506   void DisableMemorySavingsMode() { memory_savings_mode_active_ = false; }
   1507 
   1508   bool IsMemorySavingsModeActive() { return memory_savings_mode_active_; }
   1509 
   1510   PRINTF_FORMAT(2, 3) void PrintWithTimestamp(const char* format, ...);
   1511 
   1512   void set_allow_atomics_wait(bool set) { allow_atomics_wait_ = set; }
   1513   bool allow_atomics_wait() { return allow_atomics_wait_; }
   1514 
   1515   // Register a finalizer to be called at isolate teardown.
   1516   void RegisterManagedPtrDestructor(ManagedPtrDestructor* finalizer);
   1517 
   1518   // Removes a previously-registered shared object finalizer.
   1519   void UnregisterManagedPtrDestructor(ManagedPtrDestructor* finalizer);
   1520 
   1521   size_t elements_deletion_counter() { return elements_deletion_counter_; }
   1522   void set_elements_deletion_counter(size_t value) {
   1523     elements_deletion_counter_ = value;
   1524   }
   1525 
   1526   wasm::WasmEngine* wasm_engine() const { return wasm_engine_.get(); }
   1527   void set_wasm_engine(std::shared_ptr<wasm::WasmEngine> engine) {
   1528     DCHECK_NULL(wasm_engine_);  // Only call once before {Init}.
   1529     wasm_engine_ = std::move(engine);
   1530   }
   1531 
   1532   const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope() const {
   1533     return top_backup_incumbent_scope_;
   1534   }
   1535   void set_top_backup_incumbent_scope(
   1536       const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope) {
   1537     top_backup_incumbent_scope_ = top_backup_incumbent_scope;
   1538   }
   1539 
   1540   void SetIdle(bool is_idle);
   1541 
   1542  protected:
   1543   Isolate();
   1544   bool IsArrayOrObjectOrStringPrototype(Object* object);
   1545 
   1546  private:
   1547   friend struct GlobalState;
   1548   friend struct InitializeGlobalState;
   1549 
   1550   // These fields are accessed through the API, offsets must be kept in sync
   1551   // with v8::internal::Internals (in include/v8.h) constants. This is also
   1552   // verified in Isolate::Init() using runtime checks.
   1553   void* embedder_data_[Internals::kNumIsolateDataSlots];
   1554   Heap heap_;
   1555 
   1556   class ThreadDataTable {
   1557    public:
   1558     ThreadDataTable();
   1559     ~ThreadDataTable();
   1560 
   1561     PerIsolateThreadData* Lookup(ThreadId thread_id);
   1562     void Insert(PerIsolateThreadData* data);
   1563     void Remove(PerIsolateThreadData* data);
   1564     void RemoveAllThreads();
   1565 
   1566    private:
   1567     struct Hasher {
   1568       std::size_t operator()(const ThreadId& t) const {
   1569         return std::hash<int>()(t.ToInteger());
   1570       }
   1571     };
   1572 
   1573     std::unordered_map<ThreadId, PerIsolateThreadData*, Hasher> table_;
   1574   };
   1575 
   1576   // These items form a stack synchronously with threads Enter'ing and Exit'ing
   1577   // the Isolate. The top of the stack points to a thread which is currently
   1578   // running the Isolate. When the stack is empty, the Isolate is considered
   1579   // not entered by any thread and can be Disposed.
   1580   // If the same thread enters the Isolate more than once, the entry_count_
   1581   // is incremented rather then a new item pushed to the stack.
   1582   class EntryStackItem {
   1583    public:
   1584     EntryStackItem(PerIsolateThreadData* previous_thread_data,
   1585                    Isolate* previous_isolate,
   1586                    EntryStackItem* previous_item)
   1587         : entry_count(1),
   1588           previous_thread_data(previous_thread_data),
   1589           previous_isolate(previous_isolate),
   1590           previous_item(previous_item) { }
   1591 
   1592     int entry_count;
   1593     PerIsolateThreadData* previous_thread_data;
   1594     Isolate* previous_isolate;
   1595     EntryStackItem* previous_item;
   1596 
   1597    private:
   1598     DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
   1599   };
   1600 
   1601   static base::Thread::LocalStorageKey per_isolate_thread_data_key_;
   1602   static base::Thread::LocalStorageKey isolate_key_;
   1603   static base::Thread::LocalStorageKey thread_id_key_;
   1604 
   1605   // A global counter for all generated Isolates, might overflow.
   1606   static base::Atomic32 isolate_counter_;
   1607 
   1608 #if DEBUG
   1609   static base::Atomic32 isolate_key_created_;
   1610 #endif
   1611 
   1612   void Deinit();
   1613 
   1614   static void SetIsolateThreadLocals(Isolate* isolate,
   1615                                      PerIsolateThreadData* data);
   1616 
   1617   // Find the PerThread for this particular (isolate, thread) combination.
   1618   // If one does not yet exist, allocate a new one.
   1619   PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
   1620 
   1621   // Initializes the current thread to run this Isolate.
   1622   // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
   1623   // at the same time, this should be prevented using external locking.
   1624   void Enter();
   1625 
   1626   // Exits the current thread. The previosuly entered Isolate is restored
   1627   // for the thread.
   1628   // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
   1629   // at the same time, this should be prevented using external locking.
   1630   void Exit();
   1631 
   1632   void InitializeThreadLocal();
   1633 
   1634   void MarkCompactPrologue(bool is_compacting,
   1635                            ThreadLocalTop* archived_thread_data);
   1636   void MarkCompactEpilogue(bool is_compacting,
   1637                            ThreadLocalTop* archived_thread_data);
   1638 
   1639   void FillCache();
   1640 
   1641   // Propagate pending exception message to the v8::TryCatch.
   1642   // If there is no external try-catch or message was successfully propagated,
   1643   // then return true.
   1644   bool PropagatePendingExceptionToExternalTryCatch();
   1645 
   1646   void SetTerminationOnExternalTryCatch();
   1647 
   1648   void PromiseHookStateUpdated();
   1649   void RunPromiseHookForAsyncEventDelegate(PromiseHookType type,
   1650                                            Handle<JSPromise> promise);
   1651 
   1652   const char* RAILModeName(RAILMode rail_mode) const {
   1653     switch (rail_mode) {
   1654       case PERFORMANCE_RESPONSE:
   1655         return "RESPONSE";
   1656       case PERFORMANCE_ANIMATION:
   1657         return "ANIMATION";
   1658       case PERFORMANCE_IDLE:
   1659         return "IDLE";
   1660       case PERFORMANCE_LOAD:
   1661         return "LOAD";
   1662     }
   1663     return "";
   1664   }
   1665 
   1666   base::Atomic32 id_;
   1667   EntryStackItem* entry_stack_;
   1668   int stack_trace_nesting_level_;
   1669   StringStream* incomplete_message_;
   1670   Address isolate_addresses_[kIsolateAddressCount + 1];  // NOLINT
   1671   Bootstrapper* bootstrapper_;
   1672   RuntimeProfiler* runtime_profiler_;
   1673   CompilationCache* compilation_cache_;
   1674   std::shared_ptr<Counters> async_counters_;
   1675   base::RecursiveMutex break_access_;
   1676   Logger* logger_;
   1677   StackGuard stack_guard_;
   1678   StubCache* load_stub_cache_;
   1679   StubCache* store_stub_cache_;
   1680   DeoptimizerData* deoptimizer_data_;
   1681   bool deoptimizer_lazy_throw_;
   1682   MaterializedObjectStore* materialized_object_store_;
   1683   ThreadLocalTop thread_local_top_;
   1684   bool capture_stack_trace_for_uncaught_exceptions_;
   1685   int stack_trace_for_uncaught_exceptions_frame_limit_;
   1686   StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
   1687   ContextSlotCache* context_slot_cache_;
   1688   DescriptorLookupCache* descriptor_lookup_cache_;
   1689   HandleScopeData handle_scope_data_;
   1690   HandleScopeImplementer* handle_scope_implementer_;
   1691   UnicodeCache* unicode_cache_;
   1692   AccountingAllocator* allocator_;
   1693   InnerPointerToCodeCache* inner_pointer_to_code_cache_;
   1694   GlobalHandles* global_handles_;
   1695   EternalHandles* eternal_handles_;
   1696   ThreadManager* thread_manager_;
   1697   RuntimeState runtime_state_;
   1698   Builtins builtins_;
   1699   SetupIsolateDelegate* setup_delegate_;
   1700   unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
   1701   unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
   1702   unibrow::Mapping<unibrow::Ecma262Canonicalize>
   1703       regexp_macro_assembler_canonicalize_;
   1704   RegExpStack* regexp_stack_;
   1705   std::vector<int> regexp_indices_;
   1706   DateCache* date_cache_;
   1707   base::RandomNumberGenerator* random_number_generator_;
   1708   base::RandomNumberGenerator* fuzzer_rng_;
   1709   base::AtomicValue<RAILMode> rail_mode_;
   1710   v8::Isolate::AtomicsWaitCallback atomics_wait_callback_;
   1711   void* atomics_wait_callback_data_;
   1712   PromiseHook promise_hook_;
   1713   HostImportModuleDynamicallyCallback host_import_module_dynamically_callback_;
   1714   HostInitializeImportMetaObjectCallback
   1715       host_initialize_import_meta_object_callback_;
   1716   base::Mutex rail_mutex_;
   1717   double load_start_time_ms_;
   1718 
   1719 #ifdef V8_INTL_SUPPORT
   1720   icu::RegexMatcher* language_singleton_regexp_matcher_;
   1721   icu::RegexMatcher* language_tag_regexp_matcher_;
   1722   icu::RegexMatcher* language_variant_regexp_matcher_;
   1723   std::string default_locale_;
   1724 #endif  // V8_INTL_SUPPORT
   1725 
   1726   // Whether the isolate has been created for snapshotting.
   1727   bool serializer_enabled_;
   1728 
   1729   // True if fatal error has been signaled for this isolate.
   1730   bool has_fatal_error_;
   1731 
   1732   // True if this isolate was initialized from a snapshot.
   1733   bool initialized_from_snapshot_;
   1734 
   1735   // True if ES2015 tail call elimination feature is enabled.
   1736   bool is_tail_call_elimination_enabled_;
   1737 
   1738   // True if the isolate is in background. This flag is used
   1739   // to prioritize between memory usage and latency.
   1740   bool is_isolate_in_background_;
   1741 
   1742   // True if the isolate is in memory savings mode. This flag is used to
   1743   // favor memory over runtime performance.
   1744   bool memory_savings_mode_active_;
   1745 
   1746   // Time stamp at initialization.
   1747   double time_millis_at_init_;
   1748 
   1749 #ifdef DEBUG
   1750   static std::atomic<size_t> non_disposed_isolates_;
   1751 
   1752   JSObject::SpillInformation js_spill_information_;
   1753 #endif
   1754 
   1755   Debug* debug_;
   1756   HeapProfiler* heap_profiler_;
   1757   std::unique_ptr<CodeEventDispatcher> code_event_dispatcher_;
   1758   FunctionEntryHook function_entry_hook_;
   1759 
   1760   const AstStringConstants* ast_string_constants_;
   1761 
   1762   interpreter::Interpreter* interpreter_;
   1763 
   1764   CompilerDispatcher* compiler_dispatcher_;
   1765 
   1766   typedef std::pair<InterruptCallback, void*> InterruptEntry;
   1767   std::queue<InterruptEntry> api_interrupts_queue_;
   1768 
   1769 #define GLOBAL_BACKING_STORE(type, name, initialvalue)                         \
   1770   type name##_;
   1771   ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
   1772 #undef GLOBAL_BACKING_STORE
   1773 
   1774 #define GLOBAL_ARRAY_BACKING_STORE(type, name, length)                         \
   1775   type name##_[length];
   1776   ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_BACKING_STORE)
   1777 #undef GLOBAL_ARRAY_BACKING_STORE
   1778 
   1779 #ifdef DEBUG
   1780   // This class is huge and has a number of fields controlled by
   1781   // preprocessor defines. Make sure the offsets of these fields agree
   1782   // between compilation units.
   1783 #define ISOLATE_FIELD_OFFSET(type, name, ignored)                              \
   1784   static const intptr_t name##_debug_offset_;
   1785   ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
   1786   ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
   1787 #undef ISOLATE_FIELD_OFFSET
   1788 #endif
   1789 
   1790   DeferredHandles* deferred_handles_head_;
   1791   OptimizingCompileDispatcher* optimizing_compile_dispatcher_;
   1792 
   1793   // Counts deopt points if deopt_every_n_times is enabled.
   1794   unsigned int stress_deopt_count_;
   1795 
   1796   bool force_slow_path_;
   1797 
   1798   int next_optimization_id_;
   1799 
   1800 #if V8_SFI_HAS_UNIQUE_ID
   1801   int next_unique_sfi_id_;
   1802 #endif
   1803 
   1804   // Vector of callbacks before a Call starts execution.
   1805   std::vector<BeforeCallEnteredCallback> before_call_entered_callbacks_;
   1806 
   1807   // Vector of callbacks when a Call completes.
   1808   std::vector<CallCompletedCallback> call_completed_callbacks_;
   1809 
   1810   // Vector of callbacks after microtasks were run.
   1811   std::vector<MicrotasksCompletedCallback> microtasks_completed_callbacks_;
   1812   bool is_running_microtasks_;
   1813 
   1814   v8::Isolate::UseCounterCallback use_counter_callback_;
   1815 
   1816   std::vector<Object*> partial_snapshot_cache_;
   1817 
   1818   // Used during builtins compilation to build the builtins constants table,
   1819   // which is stored on the root list prior to serialization.
   1820   BuiltinsConstantsTableBuilder* builtins_constants_table_builder_ = nullptr;
   1821 
   1822   void SetEmbeddedBlob(const uint8_t* blob, uint32_t blob_size);
   1823 
   1824   const uint8_t* embedded_blob_ = nullptr;
   1825   uint32_t embedded_blob_size_ = 0;
   1826 
   1827   v8::ArrayBuffer::Allocator* array_buffer_allocator_;
   1828 
   1829   FutexWaitListNode futex_wait_list_node_;
   1830 
   1831   CancelableTaskManager* cancelable_task_manager_;
   1832 
   1833   debug::ConsoleDelegate* console_delegate_ = nullptr;
   1834 
   1835   debug::AsyncEventDelegate* async_event_delegate_ = nullptr;
   1836   bool promise_hook_or_async_event_delegate_ = false;
   1837   int async_task_count_ = 0;
   1838 
   1839   v8::Isolate::AbortOnUncaughtExceptionCallback
   1840       abort_on_uncaught_exception_callback_;
   1841 
   1842   bool allow_atomics_wait_;
   1843 
   1844   ManagedPtrDestructor* managed_ptr_destructors_head_ = nullptr;
   1845 
   1846   size_t total_regexp_code_generated_;
   1847 
   1848   size_t elements_deletion_counter_ = 0;
   1849 
   1850   std::shared_ptr<wasm::WasmEngine> wasm_engine_;
   1851 
   1852   std::unique_ptr<TracingCpuProfilerImpl> tracing_cpu_profiler_;
   1853 
   1854   // The top entry of the v8::Context::BackupIncumbentScope stack.
   1855   const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope_ =
   1856       nullptr;
   1857 
   1858   // TODO(kenton (at) cloudflare.com): This mutex can be removed if
   1859   // thread_data_table_ is always accessed under the isolate lock. I do not
   1860   // know if this is the case, so I'm preserving it for now.
   1861   base::Mutex thread_data_table_mutex_;
   1862   ThreadDataTable thread_data_table_;
   1863 
   1864   friend class ExecutionAccess;
   1865   friend class HandleScopeImplementer;
   1866   friend class heap::HeapTester;
   1867   friend class OptimizingCompileDispatcher;
   1868   friend class Simulator;
   1869   friend class StackGuard;
   1870   friend class SweeperThread;
   1871   friend class TestIsolate;
   1872   friend class ThreadId;
   1873   friend class ThreadManager;
   1874   friend class v8::Isolate;
   1875   friend class v8::Locker;
   1876   friend class v8::SnapshotCreator;
   1877   friend class v8::Unlocker;
   1878 
   1879   DISALLOW_COPY_AND_ASSIGN(Isolate);
   1880 };
   1881 
   1882 
   1883 #undef FIELD_ACCESSOR
   1884 #undef THREAD_LOCAL_TOP_ACCESSOR
   1885 
   1886 
   1887 class PromiseOnStack {
   1888  public:
   1889   PromiseOnStack(Handle<JSObject> promise, PromiseOnStack* prev)
   1890       : promise_(promise), prev_(prev) {}
   1891   Handle<JSObject> promise() { return promise_; }
   1892   PromiseOnStack* prev() { return prev_; }
   1893 
   1894  private:
   1895   Handle<JSObject> promise_;
   1896   PromiseOnStack* prev_;
   1897 };
   1898 
   1899 
   1900 // If the GCC version is 4.1.x or 4.2.x an additional field is added to the
   1901 // class as a work around for a bug in the generated code found with these
   1902 // versions of GCC. See V8 issue 122 for details.
   1903 class SaveContext BASE_EMBEDDED {
   1904  public:
   1905   explicit SaveContext(Isolate* isolate);
   1906   ~SaveContext();
   1907 
   1908   Handle<Context> context() { return context_; }
   1909   SaveContext* prev() { return prev_; }
   1910 
   1911   // Returns true if this save context is below a given JavaScript frame.
   1912   bool IsBelowFrame(StandardFrame* frame);
   1913 
   1914  private:
   1915   Isolate* const isolate_;
   1916   Handle<Context> context_;
   1917   SaveContext* const prev_;
   1918   Address c_entry_fp_;
   1919 };
   1920 
   1921 
   1922 class AssertNoContextChange BASE_EMBEDDED {
   1923 #ifdef DEBUG
   1924  public:
   1925   explicit AssertNoContextChange(Isolate* isolate);
   1926   ~AssertNoContextChange() {
   1927     DCHECK(isolate_->context() == *context_);
   1928   }
   1929 
   1930  private:
   1931   Isolate* isolate_;
   1932   Handle<Context> context_;
   1933 #else
   1934  public:
   1935   explicit AssertNoContextChange(Isolate* isolate) { }
   1936 #endif
   1937 };
   1938 
   1939 
   1940 class ExecutionAccess BASE_EMBEDDED {
   1941  public:
   1942   explicit ExecutionAccess(Isolate* isolate) : isolate_(isolate) {
   1943     Lock(isolate);
   1944   }
   1945   ~ExecutionAccess() { Unlock(isolate_); }
   1946 
   1947   static void Lock(Isolate* isolate) { isolate->break_access()->Lock(); }
   1948   static void Unlock(Isolate* isolate) { isolate->break_access()->Unlock(); }
   1949 
   1950   static bool TryLock(Isolate* isolate) {
   1951     return isolate->break_access()->TryLock();
   1952   }
   1953 
   1954  private:
   1955   Isolate* isolate_;
   1956 };
   1957 
   1958 
   1959 // Support for checking for stack-overflows.
   1960 class StackLimitCheck BASE_EMBEDDED {
   1961  public:
   1962   explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }
   1963 
   1964   // Use this to check for stack-overflows in C++ code.
   1965   bool HasOverflowed() const {
   1966     StackGuard* stack_guard = isolate_->stack_guard();
   1967     return GetCurrentStackPosition() < stack_guard->real_climit();
   1968   }
   1969 
   1970   // Use this to check for interrupt request in C++ code.
   1971   bool InterruptRequested() {
   1972     StackGuard* stack_guard = isolate_->stack_guard();
   1973     return GetCurrentStackPosition() < stack_guard->climit();
   1974   }
   1975 
   1976   // Use this to check for stack-overflow when entering runtime from JS code.
   1977   bool JsHasOverflowed(uintptr_t gap = 0) const;
   1978 
   1979  private:
   1980   Isolate* isolate_;
   1981 };
   1982 
   1983 #define STACK_CHECK(isolate, result_value) \
   1984   do {                                     \
   1985     StackLimitCheck stack_check(isolate);  \
   1986     if (stack_check.HasOverflowed()) {     \
   1987       isolate->StackOverflow();            \
   1988       return result_value;                 \
   1989     }                                      \
   1990   } while (false)
   1991 
   1992 // Scope intercepts only interrupt which is part of its interrupt_mask and does
   1993 // not affect other interrupts.
   1994 class InterruptsScope {
   1995  public:
   1996   enum Mode { kPostponeInterrupts, kRunInterrupts, kNoop };
   1997 
   1998   virtual ~InterruptsScope() {
   1999     if (mode_ != kNoop) stack_guard_->PopInterruptsScope();
   2000   }
   2001 
   2002   // Find the scope that intercepts this interrupt.
   2003   // It may be outermost PostponeInterruptsScope or innermost
   2004   // SafeForInterruptsScope if any.
   2005   // Return whether the interrupt has been intercepted.
   2006   bool Intercept(StackGuard::InterruptFlag flag);
   2007 
   2008   InterruptsScope(Isolate* isolate, int intercept_mask, Mode mode)
   2009       : stack_guard_(isolate->stack_guard()),
   2010         intercept_mask_(intercept_mask),
   2011         intercepted_flags_(0),
   2012         mode_(mode) {
   2013     if (mode_ != kNoop) stack_guard_->PushInterruptsScope(this);
   2014   }
   2015 
   2016  private:
   2017   StackGuard* stack_guard_;
   2018   int intercept_mask_;
   2019   int intercepted_flags_;
   2020   Mode mode_;
   2021   InterruptsScope* prev_;
   2022 
   2023   friend class StackGuard;
   2024 };
   2025 
   2026 // Support for temporarily postponing interrupts. When the outermost
   2027 // postpone scope is left the interrupts will be re-enabled and any
   2028 // interrupts that occurred while in the scope will be taken into
   2029 // account.
   2030 class PostponeInterruptsScope : public InterruptsScope {
   2031  public:
   2032   PostponeInterruptsScope(Isolate* isolate,
   2033                           int intercept_mask = StackGuard::ALL_INTERRUPTS)
   2034       : InterruptsScope(isolate, intercept_mask,
   2035                         InterruptsScope::kPostponeInterrupts) {}
   2036   virtual ~PostponeInterruptsScope() = default;
   2037 };
   2038 
   2039 // Support for overriding PostponeInterruptsScope. Interrupt is not ignored if
   2040 // innermost scope is SafeForInterruptsScope ignoring any outer
   2041 // PostponeInterruptsScopes.
   2042 class SafeForInterruptsScope : public InterruptsScope {
   2043  public:
   2044   SafeForInterruptsScope(Isolate* isolate,
   2045                          int intercept_mask = StackGuard::ALL_INTERRUPTS)
   2046       : InterruptsScope(isolate, intercept_mask,
   2047                         InterruptsScope::kRunInterrupts) {}
   2048   virtual ~SafeForInterruptsScope() = default;
   2049 };
   2050 
   2051 class StackTraceFailureMessage {
   2052  public:
   2053   explicit StackTraceFailureMessage(Isolate* isolate, void* ptr1 = nullptr,
   2054                                     void* ptr2 = nullptr, void* ptr3 = nullptr,
   2055                                     void* ptr4 = nullptr);
   2056 
   2057   V8_NOINLINE void Print() volatile;
   2058 
   2059   static const uintptr_t kStartMarker = 0xdecade30;
   2060   static const uintptr_t kEndMarker = 0xdecade31;
   2061   static const int kStacktraceBufferSize = 32 * KB;
   2062 
   2063   uintptr_t start_marker_ = kStartMarker;
   2064   void* isolate_;
   2065   void* ptr1_;
   2066   void* ptr2_;
   2067   void* ptr3_;
   2068   void* ptr4_;
   2069   void* code_objects_[4];
   2070   char js_stack_trace_[kStacktraceBufferSize];
   2071   uintptr_t end_marker_ = kEndMarker;
   2072 };
   2073 
   2074 }  // namespace internal
   2075 }  // namespace v8
   2076 
   2077 #endif  // V8_ISOLATE_H_
   2078