Home | History | Annotate | Download | only in heap
      1 // Copyright 2017 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef V8_HEAP_CONCURRENT_MARKING_H_
      6 #define V8_HEAP_CONCURRENT_MARKING_H_
      7 
      8 #include "include/v8-platform.h"
      9 #include "src/allocation.h"
     10 #include "src/base/atomic-utils.h"
     11 #include "src/base/platform/condition-variable.h"
     12 #include "src/base/platform/mutex.h"
     13 #include "src/cancelable-task.h"
     14 #include "src/heap/spaces.h"
     15 #include "src/heap/worklist.h"
     16 #include "src/utils.h"
     17 #include "src/v8.h"
     18 
     19 namespace v8 {
     20 namespace internal {
     21 
     22 class Heap;
     23 class Isolate;
     24 class MajorNonAtomicMarkingState;
     25 struct WeakObjects;
     26 
     27 using LiveBytesMap =
     28     std::unordered_map<MemoryChunk*, intptr_t, MemoryChunk::Hasher>;
     29 
     30 class ConcurrentMarking {
     31  public:
     32   // When the scope is entered, the concurrent marking tasks
     33   // are preempted and are not looking at the heap objects, concurrent marking
     34   // is resumed when the scope is exited.
     35   class PauseScope {
     36    public:
     37     explicit PauseScope(ConcurrentMarking* concurrent_marking);
     38     ~PauseScope();
     39 
     40    private:
     41     ConcurrentMarking* const concurrent_marking_;
     42     const bool resume_on_exit_;
     43   };
     44 
     45   enum class StopRequest {
     46     // Preempt ongoing tasks ASAP (and cancel unstarted tasks).
     47     PREEMPT_TASKS,
     48     // Wait for ongoing tasks to complete (and cancels unstarted tasks).
     49     COMPLETE_ONGOING_TASKS,
     50     // Wait for all scheduled tasks to complete (only use this in tests that
     51     // control the full stack -- otherwise tasks cancelled by the platform can
     52     // make this call hang).
     53     COMPLETE_TASKS_FOR_TESTING,
     54   };
     55 
     56   // TODO(gab): The only thing that prevents this being above 7 is
     57   // Worklist::kMaxNumTasks being maxed at 8 (concurrent marking doesn't use
     58   // task 0, reserved for the main thread).
     59   static constexpr int kMaxTasks = 7;
     60   using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
     61 
     62   ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
     63                     MarkingWorklist* bailout, MarkingWorklist* on_hold,
     64                     WeakObjects* weak_objects);
     65 
     66   // Schedules asynchronous tasks to perform concurrent marking. Objects in the
     67   // heap should not be moved while these are active (can be stopped safely via
     68   // Stop() or PauseScope).
     69   void ScheduleTasks();
     70 
     71   // Stops concurrent marking per |stop_request|'s semantics. Returns true
     72   // if concurrent marking was in progress, false otherwise.
     73   bool Stop(StopRequest stop_request);
     74 
     75   void RescheduleTasksIfNeeded();
     76   // Flushes the local live bytes into the given marking state.
     77   void FlushLiveBytes(MajorNonAtomicMarkingState* marking_state);
     78   // This function is called for a new space page that was cleared after
     79   // scavenge and is going to be re-used.
     80   void ClearLiveness(MemoryChunk* chunk);
     81 
     82   int TaskCount() { return task_count_; }
     83 
     84   // Checks if all threads are stopped.
     85   bool IsStopped();
     86 
     87   size_t TotalMarkedBytes();
     88 
     89   void set_ephemeron_marked(bool ephemeron_marked) {
     90     ephemeron_marked_.store(ephemeron_marked);
     91   }
     92   bool ephemeron_marked() { return ephemeron_marked_.load(); }
     93 
     94  private:
     95   struct TaskState {
     96     // The main thread sets this flag to true when it wants the concurrent
     97     // marker to give up the worker thread.
     98     std::atomic<bool> preemption_request;
     99 
    100     LiveBytesMap live_bytes;
    101     size_t marked_bytes = 0;
    102     char cache_line_padding[64];
    103   };
    104   class Task;
    105   void Run(int task_id, TaskState* task_state);
    106   Heap* const heap_;
    107   MarkingWorklist* const shared_;
    108   MarkingWorklist* const bailout_;
    109   MarkingWorklist* const on_hold_;
    110   WeakObjects* const weak_objects_;
    111   TaskState task_state_[kMaxTasks + 1];
    112   std::atomic<size_t> total_marked_bytes_{0};
    113   std::atomic<bool> ephemeron_marked_{false};
    114   base::Mutex pending_lock_;
    115   base::ConditionVariable pending_condition_;
    116   int pending_task_count_ = 0;
    117   bool is_pending_[kMaxTasks + 1] = {};
    118   CancelableTaskManager::Id cancelable_id_[kMaxTasks + 1] = {};
    119   int task_count_ = 0;
    120 };
    121 
    122 }  // namespace internal
    123 }  // namespace v8
    124 
    125 #endif  // V8_HEAP_CONCURRENT_MARKING_H_
    126