Home | History | Annotate | Download | only in compiler-dispatcher
      1 // Copyright 2016 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/compiler-dispatcher/compiler-dispatcher.h"
      6 
      7 #include "include/v8-platform.h"
      8 #include "include/v8.h"
      9 #include "src/base/platform/time.h"
     10 #include "src/cancelable-task.h"
     11 #include "src/compilation-info.h"
     12 #include "src/compiler-dispatcher/compiler-dispatcher-job.h"
     13 #include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
     14 #include "src/flags.h"
     15 #include "src/objects-inl.h"
     16 
     17 namespace v8 {
     18 namespace internal {
     19 
     20 namespace {
     21 
     22 enum class ExceptionHandling { kSwallow, kThrow };
     23 
     24 bool DoNextStepOnMainThread(Isolate* isolate, CompilerDispatcherJob* job,
     25                             ExceptionHandling exception_handling) {
     26   DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
     27   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
     28                "V8.CompilerDispatcherForgroundStep");
     29 
     30   // Ensure we are in the correct context for the job.
     31   SaveContext save(isolate);
     32   isolate->set_context(job->context());
     33 
     34   switch (job->status()) {
     35     case CompileJobStatus::kInitial:
     36       job->PrepareToParseOnMainThread();
     37       break;
     38 
     39     case CompileJobStatus::kReadyToParse:
     40       job->Parse();
     41       break;
     42 
     43     case CompileJobStatus::kParsed:
     44       job->FinalizeParsingOnMainThread();
     45       break;
     46 
     47     case CompileJobStatus::kReadyToAnalyze:
     48       job->AnalyzeOnMainThread();
     49       break;
     50 
     51     case CompileJobStatus::kAnalyzed:
     52       job->PrepareToCompileOnMainThread();
     53       break;
     54 
     55     case CompileJobStatus::kReadyToCompile:
     56       job->Compile();
     57       break;
     58 
     59     case CompileJobStatus::kCompiled:
     60       job->FinalizeCompilingOnMainThread();
     61       break;
     62 
     63     case CompileJobStatus::kFailed:
     64     case CompileJobStatus::kDone:
     65       break;
     66   }
     67 
     68   DCHECK_EQ(job->status() == CompileJobStatus::kFailed,
     69             isolate->has_pending_exception());
     70   if (job->status() == CompileJobStatus::kFailed &&
     71       exception_handling == ExceptionHandling::kSwallow) {
     72     isolate->clear_pending_exception();
     73   }
     74   return job->status() != CompileJobStatus::kFailed;
     75 }
     76 
     77 bool IsFinished(CompilerDispatcherJob* job) {
     78   return job->status() == CompileJobStatus::kDone ||
     79          job->status() == CompileJobStatus::kFailed;
     80 }
     81 
     82 bool CanRunOnAnyThread(CompilerDispatcherJob* job) {
     83   return job->status() == CompileJobStatus::kReadyToParse ||
     84          job->status() == CompileJobStatus::kReadyToCompile;
     85 }
     86 
     87 void DoNextStepOnBackgroundThread(CompilerDispatcherJob* job) {
     88   DCHECK(CanRunOnAnyThread(job));
     89   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
     90                "V8.CompilerDispatcherBackgroundStep");
     91 
     92   switch (job->status()) {
     93     case CompileJobStatus::kReadyToParse:
     94       job->Parse();
     95       break;
     96 
     97     case CompileJobStatus::kReadyToCompile:
     98       job->Compile();
     99       break;
    100 
    101     default:
    102       UNREACHABLE();
    103   }
    104 }
    105 
    106 // Theoretically we get 50ms of idle time max, however it's unlikely that
    107 // we'll get all of it so try to be a conservative.
    108 const double kMaxIdleTimeToExpectInMs = 40;
    109 
    110 class MemoryPressureTask : public CancelableTask {
    111  public:
    112   MemoryPressureTask(Isolate* isolate, CancelableTaskManager* task_manager,
    113                      CompilerDispatcher* dispatcher);
    114   ~MemoryPressureTask() override;
    115 
    116   // CancelableTask implementation.
    117   void RunInternal() override;
    118 
    119  private:
    120   CompilerDispatcher* dispatcher_;
    121 
    122   DISALLOW_COPY_AND_ASSIGN(MemoryPressureTask);
    123 };
    124 
    125 MemoryPressureTask::MemoryPressureTask(Isolate* isolate,
    126                                        CancelableTaskManager* task_manager,
    127                                        CompilerDispatcher* dispatcher)
    128     : CancelableTask(isolate, task_manager), dispatcher_(dispatcher) {}
    129 
    130 MemoryPressureTask::~MemoryPressureTask() {}
    131 
    132 void MemoryPressureTask::RunInternal() {
    133   dispatcher_->AbortAll(CompilerDispatcher::BlockingBehavior::kDontBlock);
    134 }
    135 
    136 }  // namespace
    137 
    138 class CompilerDispatcher::AbortTask : public CancelableTask {
    139  public:
    140   AbortTask(Isolate* isolate, CancelableTaskManager* task_manager,
    141             CompilerDispatcher* dispatcher);
    142   ~AbortTask() override;
    143 
    144   // CancelableTask implementation.
    145   void RunInternal() override;
    146 
    147  private:
    148   CompilerDispatcher* dispatcher_;
    149 
    150   DISALLOW_COPY_AND_ASSIGN(AbortTask);
    151 };
    152 
    153 CompilerDispatcher::AbortTask::AbortTask(Isolate* isolate,
    154                                          CancelableTaskManager* task_manager,
    155                                          CompilerDispatcher* dispatcher)
    156     : CancelableTask(isolate, task_manager), dispatcher_(dispatcher) {}
    157 
    158 CompilerDispatcher::AbortTask::~AbortTask() {}
    159 
    160 void CompilerDispatcher::AbortTask::RunInternal() {
    161   dispatcher_->AbortInactiveJobs();
    162 }
    163 
    164 class CompilerDispatcher::BackgroundTask : public CancelableTask {
    165  public:
    166   BackgroundTask(Isolate* isolate, CancelableTaskManager* task_manager,
    167                  CompilerDispatcher* dispatcher);
    168   ~BackgroundTask() override;
    169 
    170   // CancelableTask implementation.
    171   void RunInternal() override;
    172 
    173  private:
    174   CompilerDispatcher* dispatcher_;
    175 
    176   DISALLOW_COPY_AND_ASSIGN(BackgroundTask);
    177 };
    178 
    179 CompilerDispatcher::BackgroundTask::BackgroundTask(
    180     Isolate* isolate, CancelableTaskManager* task_manager,
    181     CompilerDispatcher* dispatcher)
    182     : CancelableTask(isolate, task_manager), dispatcher_(dispatcher) {}
    183 
    184 CompilerDispatcher::BackgroundTask::~BackgroundTask() {}
    185 
    186 void CompilerDispatcher::BackgroundTask::RunInternal() {
    187   dispatcher_->DoBackgroundWork();
    188 }
    189 
    190 class CompilerDispatcher::IdleTask : public CancelableIdleTask {
    191  public:
    192   IdleTask(Isolate* isolate, CancelableTaskManager* task_manager,
    193            CompilerDispatcher* dispatcher);
    194   ~IdleTask() override;
    195 
    196   // CancelableIdleTask implementation.
    197   void RunInternal(double deadline_in_seconds) override;
    198 
    199  private:
    200   CompilerDispatcher* dispatcher_;
    201 
    202   DISALLOW_COPY_AND_ASSIGN(IdleTask);
    203 };
    204 
    205 CompilerDispatcher::IdleTask::IdleTask(Isolate* isolate,
    206                                        CancelableTaskManager* task_manager,
    207                                        CompilerDispatcher* dispatcher)
    208     : CancelableIdleTask(isolate, task_manager), dispatcher_(dispatcher) {}
    209 
    210 CompilerDispatcher::IdleTask::~IdleTask() {}
    211 
    212 void CompilerDispatcher::IdleTask::RunInternal(double deadline_in_seconds) {
    213   dispatcher_->DoIdleWork(deadline_in_seconds);
    214 }
    215 
    216 CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform,
    217                                        size_t max_stack_size)
    218     : isolate_(isolate),
    219       platform_(platform),
    220       max_stack_size_(max_stack_size),
    221       trace_compiler_dispatcher_(FLAG_trace_compiler_dispatcher),
    222       tracer_(new CompilerDispatcherTracer(isolate_)),
    223       task_manager_(new CancelableTaskManager()),
    224       memory_pressure_level_(MemoryPressureLevel::kNone),
    225       abort_(false),
    226       idle_task_scheduled_(false),
    227       num_scheduled_background_tasks_(0),
    228       main_thread_blocking_on_job_(nullptr),
    229       block_for_testing_(false),
    230       semaphore_for_testing_(0) {
    231   if (trace_compiler_dispatcher_ && !IsEnabled()) {
    232     PrintF("CompilerDispatcher: dispatcher is disabled\n");
    233   }
    234 }
    235 
    236 CompilerDispatcher::~CompilerDispatcher() {
    237   // To avoid crashing in unit tests due to unfished jobs.
    238   AbortAll(BlockingBehavior::kBlock);
    239   task_manager_->CancelAndWait();
    240 }
    241 
    242 bool CompilerDispatcher::CanEnqueue(Handle<SharedFunctionInfo> function) {
    243   if (!IsEnabled()) return false;
    244 
    245   DCHECK(FLAG_ignition);
    246 
    247   if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
    248     return false;
    249   }
    250 
    251   {
    252     base::LockGuard<base::Mutex> lock(&mutex_);
    253     if (abort_) return false;
    254   }
    255 
    256   // We only handle functions (no eval / top-level code / wasm) that are
    257   // attached to a script.
    258   if (!function->script()->IsScript() || function->is_toplevel() ||
    259       function->asm_function() || function->native()) {
    260     return false;
    261   }
    262 
    263   return true;
    264 }
    265 
    266 bool CompilerDispatcher::Enqueue(Handle<SharedFunctionInfo> function) {
    267   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
    268                "V8.CompilerDispatcherEnqueue");
    269   if (!CanEnqueue(function)) return false;
    270   if (IsEnqueued(function)) return true;
    271 
    272   if (trace_compiler_dispatcher_) {
    273     PrintF("CompilerDispatcher: enqueuing ");
    274     function->ShortPrint();
    275     PrintF(" for parse and compile\n");
    276   }
    277 
    278   std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
    279       isolate_, tracer_.get(), function, max_stack_size_));
    280   std::pair<int, int> key(Script::cast(function->script())->id(),
    281                           function->function_literal_id());
    282   jobs_.insert(std::make_pair(key, std::move(job)));
    283   ScheduleIdleTaskIfNeeded();
    284   return true;
    285 }
    286 
    287 bool CompilerDispatcher::EnqueueAndStep(Handle<SharedFunctionInfo> function) {
    288   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
    289                "V8.CompilerDispatcherEnqueueAndStep");
    290   if (IsEnqueued(function)) return true;
    291   if (!Enqueue(function)) return false;
    292 
    293   if (trace_compiler_dispatcher_) {
    294     PrintF("CompilerDispatcher: stepping ");
    295     function->ShortPrint();
    296     PrintF("\n");
    297   }
    298   JobMap::const_iterator job = GetJobFor(function);
    299   DoNextStepOnMainThread(isolate_, job->second.get(),
    300                          ExceptionHandling::kSwallow);
    301   ConsiderJobForBackgroundProcessing(job->second.get());
    302   return true;
    303 }
    304 
    305 bool CompilerDispatcher::Enqueue(
    306     Handle<Script> script, Handle<SharedFunctionInfo> function,
    307     FunctionLiteral* literal, std::shared_ptr<Zone> parse_zone,
    308     std::shared_ptr<DeferredHandles> parse_handles,
    309     std::shared_ptr<DeferredHandles> compile_handles) {
    310   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
    311                "V8.CompilerDispatcherEnqueue");
    312   if (!CanEnqueue(function)) return false;
    313   if (IsEnqueued(function)) return true;
    314 
    315   if (trace_compiler_dispatcher_) {
    316     PrintF("CompilerDispatcher: enqueuing ");
    317     function->ShortPrint();
    318     PrintF(" for compile\n");
    319   }
    320 
    321   std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
    322       isolate_, tracer_.get(), script, function, literal, parse_zone,
    323       parse_handles, compile_handles, max_stack_size_));
    324   std::pair<int, int> key(Script::cast(function->script())->id(),
    325                           function->function_literal_id());
    326   jobs_.insert(std::make_pair(key, std::move(job)));
    327   ScheduleIdleTaskIfNeeded();
    328   return true;
    329 }
    330 
    331 bool CompilerDispatcher::EnqueueAndStep(
    332     Handle<Script> script, Handle<SharedFunctionInfo> function,
    333     FunctionLiteral* literal, std::shared_ptr<Zone> parse_zone,
    334     std::shared_ptr<DeferredHandles> parse_handles,
    335     std::shared_ptr<DeferredHandles> compile_handles) {
    336   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
    337                "V8.CompilerDispatcherEnqueueAndStep");
    338   if (IsEnqueued(function)) return true;
    339   if (!Enqueue(script, function, literal, parse_zone, parse_handles,
    340                compile_handles)) {
    341     return false;
    342   }
    343 
    344   if (trace_compiler_dispatcher_) {
    345     PrintF("CompilerDispatcher: stepping ");
    346     function->ShortPrint();
    347     PrintF("\n");
    348   }
    349   JobMap::const_iterator job = GetJobFor(function);
    350   DoNextStepOnMainThread(isolate_, job->second.get(),
    351                          ExceptionHandling::kSwallow);
    352   ConsiderJobForBackgroundProcessing(job->second.get());
    353   return true;
    354 }
    355 
    356 bool CompilerDispatcher::IsEnabled() const { return FLAG_compiler_dispatcher; }
    357 
    358 bool CompilerDispatcher::IsEnqueued(Handle<SharedFunctionInfo> function) const {
    359   if (jobs_.empty()) return false;
    360   return GetJobFor(function) != jobs_.end();
    361 }
    362 
    363 void CompilerDispatcher::WaitForJobIfRunningOnBackground(
    364     CompilerDispatcherJob* job) {
    365   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
    366                "V8.CompilerDispatcherWaitForBackgroundJob");
    367   RuntimeCallTimerScope runtimeTimer(
    368       isolate_, &RuntimeCallStats::CompileWaitForDispatcher);
    369 
    370   base::LockGuard<base::Mutex> lock(&mutex_);
    371   if (running_background_jobs_.find(job) == running_background_jobs_.end()) {
    372     pending_background_jobs_.erase(job);
    373     return;
    374   }
    375   DCHECK_NULL(main_thread_blocking_on_job_);
    376   main_thread_blocking_on_job_ = job;
    377   while (main_thread_blocking_on_job_ != nullptr) {
    378     main_thread_blocking_signal_.Wait(&mutex_);
    379   }
    380   DCHECK(pending_background_jobs_.find(job) == pending_background_jobs_.end());
    381   DCHECK(running_background_jobs_.find(job) == running_background_jobs_.end());
    382 }
    383 
    384 bool CompilerDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
    385   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
    386                "V8.CompilerDispatcherFinishNow");
    387   JobMap::const_iterator job = GetJobFor(function);
    388   CHECK(job != jobs_.end());
    389 
    390   if (trace_compiler_dispatcher_) {
    391     PrintF("CompilerDispatcher: finishing ");
    392     function->ShortPrint();
    393     PrintF(" now\n");
    394   }
    395 
    396   WaitForJobIfRunningOnBackground(job->second.get());
    397   while (!IsFinished(job->second.get())) {
    398     DoNextStepOnMainThread(isolate_, job->second.get(),
    399                            ExceptionHandling::kThrow);
    400   }
    401   bool result = job->second->status() != CompileJobStatus::kFailed;
    402 
    403   if (trace_compiler_dispatcher_) {
    404     PrintF("CompilerDispatcher: finished working on ");
    405     function->ShortPrint();
    406     PrintF(": %s\n", result ? "success" : "failure");
    407     tracer_->DumpStatistics();
    408   }
    409 
    410   job->second->ResetOnMainThread();
    411   jobs_.erase(job);
    412   if (jobs_.empty()) {
    413     base::LockGuard<base::Mutex> lock(&mutex_);
    414     abort_ = false;
    415   }
    416   return result;
    417 }
    418 
    419 void CompilerDispatcher::AbortAll(BlockingBehavior blocking) {
    420   bool background_tasks_running =
    421       task_manager_->TryAbortAll() == CancelableTaskManager::kTaskRunning;
    422   if (!background_tasks_running || blocking == BlockingBehavior::kBlock) {
    423     for (auto& it : jobs_) {
    424       WaitForJobIfRunningOnBackground(it.second.get());
    425       if (trace_compiler_dispatcher_) {
    426         PrintF("CompilerDispatcher: aborted ");
    427         it.second->ShortPrint();
    428         PrintF("\n");
    429       }
    430       it.second->ResetOnMainThread();
    431     }
    432     jobs_.clear();
    433     {
    434       base::LockGuard<base::Mutex> lock(&mutex_);
    435       DCHECK(pending_background_jobs_.empty());
    436       DCHECK(running_background_jobs_.empty());
    437       abort_ = false;
    438     }
    439     return;
    440   }
    441 
    442   {
    443     base::LockGuard<base::Mutex> lock(&mutex_);
    444     abort_ = true;
    445     pending_background_jobs_.clear();
    446   }
    447   AbortInactiveJobs();
    448 
    449   // All running background jobs might already have scheduled idle tasks instead
    450   // of abort tasks. Schedule a single abort task here to make sure they get
    451   // processed as soon as possible (and not first when we have idle time).
    452   ScheduleAbortTask();
    453 }
    454 
    455 void CompilerDispatcher::AbortInactiveJobs() {
    456   {
    457     base::LockGuard<base::Mutex> lock(&mutex_);
    458     // Since we schedule two abort tasks per async abort, we might end up
    459     // here with nothing left to do.
    460     if (!abort_) return;
    461   }
    462   for (auto it = jobs_.begin(); it != jobs_.end();) {
    463     auto job = it;
    464     ++it;
    465     {
    466       base::LockGuard<base::Mutex> lock(&mutex_);
    467       if (running_background_jobs_.find(job->second.get()) !=
    468           running_background_jobs_.end()) {
    469         continue;
    470       }
    471     }
    472     if (trace_compiler_dispatcher_) {
    473       PrintF("CompilerDispatcher: aborted ");
    474       job->second->ShortPrint();
    475       PrintF("\n");
    476     }
    477     job->second->ResetOnMainThread();
    478     jobs_.erase(job);
    479   }
    480   if (jobs_.empty()) {
    481     base::LockGuard<base::Mutex> lock(&mutex_);
    482     abort_ = false;
    483   }
    484 }
    485 
    486 void CompilerDispatcher::MemoryPressureNotification(
    487     v8::MemoryPressureLevel level, bool is_isolate_locked) {
    488   MemoryPressureLevel previous = memory_pressure_level_.Value();
    489   memory_pressure_level_.SetValue(level);
    490   // If we're already under pressure, we haven't accepted new tasks meanwhile
    491   // and can just return. If we're no longer under pressure, we're also done.
    492   if (previous != MemoryPressureLevel::kNone ||
    493       level == MemoryPressureLevel::kNone) {
    494     return;
    495   }
    496   if (trace_compiler_dispatcher_) {
    497     PrintF("CompilerDispatcher: received memory pressure notification\n");
    498   }
    499   if (is_isolate_locked) {
    500     AbortAll(BlockingBehavior::kDontBlock);
    501   } else {
    502     {
    503       base::LockGuard<base::Mutex> lock(&mutex_);
    504       if (abort_) return;
    505       // By going into abort mode here, and clearing the
    506       // pending_background_jobs_, we at keep existing background jobs from
    507       // picking up more work before the MemoryPressureTask gets executed.
    508       abort_ = true;
    509       pending_background_jobs_.clear();
    510     }
    511     platform_->CallOnForegroundThread(
    512         reinterpret_cast<v8::Isolate*>(isolate_),
    513         new MemoryPressureTask(isolate_, task_manager_.get(), this));
    514   }
    515 }
    516 
    517 CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::GetJobFor(
    518     Handle<SharedFunctionInfo> shared) const {
    519   if (!shared->script()->IsScript()) return jobs_.end();
    520   std::pair<int, int> key(Script::cast(shared->script())->id(),
    521                           shared->function_literal_id());
    522   auto range = jobs_.equal_range(key);
    523   for (auto job = range.first; job != range.second; ++job) {
    524     if (job->second->IsAssociatedWith(shared)) return job;
    525   }
    526   return jobs_.end();
    527 }
    528 
    529 void CompilerDispatcher::ScheduleIdleTaskFromAnyThread() {
    530   v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
    531   DCHECK(platform_->IdleTasksEnabled(v8_isolate));
    532   {
    533     base::LockGuard<base::Mutex> lock(&mutex_);
    534     if (idle_task_scheduled_) return;
    535     idle_task_scheduled_ = true;
    536   }
    537   platform_->CallIdleOnForegroundThread(
    538       v8_isolate, new IdleTask(isolate_, task_manager_.get(), this));
    539 }
    540 
    541 void CompilerDispatcher::ScheduleIdleTaskIfNeeded() {
    542   if (jobs_.empty()) return;
    543   ScheduleIdleTaskFromAnyThread();
    544 }
    545 
    546 void CompilerDispatcher::ScheduleAbortTask() {
    547   v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
    548   platform_->CallOnForegroundThread(
    549       v8_isolate, new AbortTask(isolate_, task_manager_.get(), this));
    550 }
    551 
    552 void CompilerDispatcher::ConsiderJobForBackgroundProcessing(
    553     CompilerDispatcherJob* job) {
    554   if (!CanRunOnAnyThread(job)) return;
    555   {
    556     base::LockGuard<base::Mutex> lock(&mutex_);
    557     pending_background_jobs_.insert(job);
    558   }
    559   ScheduleMoreBackgroundTasksIfNeeded();
    560 }
    561 
    562 void CompilerDispatcher::ScheduleMoreBackgroundTasksIfNeeded() {
    563   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
    564                "V8.CompilerDispatcherScheduleMoreBackgroundTasksIfNeeded");
    565   if (FLAG_single_threaded) return;
    566   {
    567     base::LockGuard<base::Mutex> lock(&mutex_);
    568     if (pending_background_jobs_.empty()) return;
    569     if (platform_->NumberOfAvailableBackgroundThreads() <=
    570         num_scheduled_background_tasks_) {
    571       return;
    572     }
    573     ++num_scheduled_background_tasks_;
    574   }
    575   platform_->CallOnBackgroundThread(
    576       new BackgroundTask(isolate_, task_manager_.get(), this),
    577       v8::Platform::kShortRunningTask);
    578 }
    579 
    580 void CompilerDispatcher::DoBackgroundWork() {
    581   CompilerDispatcherJob* job = nullptr;
    582   {
    583     base::LockGuard<base::Mutex> lock(&mutex_);
    584     --num_scheduled_background_tasks_;
    585     if (!pending_background_jobs_.empty()) {
    586       auto it = pending_background_jobs_.begin();
    587       job = *it;
    588       pending_background_jobs_.erase(it);
    589       running_background_jobs_.insert(job);
    590     }
    591   }
    592   if (job == nullptr) return;
    593 
    594   if (V8_UNLIKELY(block_for_testing_.Value())) {
    595     block_for_testing_.SetValue(false);
    596     semaphore_for_testing_.Wait();
    597   }
    598 
    599   if (trace_compiler_dispatcher_) {
    600     PrintF("CompilerDispatcher: doing background work\n");
    601   }
    602 
    603   DoNextStepOnBackgroundThread(job);
    604 
    605   ScheduleMoreBackgroundTasksIfNeeded();
    606   // Unconditionally schedule an idle task, as all background steps have to be
    607   // followed by a main thread step.
    608   ScheduleIdleTaskFromAnyThread();
    609 
    610   {
    611     base::LockGuard<base::Mutex> lock(&mutex_);
    612     running_background_jobs_.erase(job);
    613 
    614     if (running_background_jobs_.empty() && abort_) {
    615       // This is the last background job that finished. The abort task
    616       // scheduled by AbortAll might already have ran, so schedule another
    617       // one to be on the safe side.
    618       ScheduleAbortTask();
    619     }
    620 
    621     if (main_thread_blocking_on_job_ == job) {
    622       main_thread_blocking_on_job_ = nullptr;
    623       main_thread_blocking_signal_.NotifyOne();
    624     }
    625   }
    626   // Don't touch |this| anymore after this point, as it might have been
    627   // deleted.
    628 }
    629 
    630 void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) {
    631   bool aborted = false;
    632   {
    633     base::LockGuard<base::Mutex> lock(&mutex_);
    634     idle_task_scheduled_ = false;
    635     aborted = abort_;
    636   }
    637 
    638   if (aborted) {
    639     AbortInactiveJobs();
    640     return;
    641   }
    642 
    643   // Number of jobs that are unlikely to make progress during any idle callback
    644   // due to their estimated duration.
    645   size_t too_long_jobs = 0;
    646 
    647   // Iterate over all available jobs & remaining time. For each job, decide
    648   // whether to 1) skip it (if it would take too long), 2) erase it (if it's
    649   // finished), or 3) make progress on it.
    650   double idle_time_in_seconds =
    651       deadline_in_seconds - platform_->MonotonicallyIncreasingTime();
    652 
    653   if (trace_compiler_dispatcher_) {
    654     PrintF("CompilerDispatcher: received %0.1lfms of idle time\n",
    655            idle_time_in_seconds *
    656                static_cast<double>(base::Time::kMillisecondsPerSecond));
    657   }
    658   for (auto job = jobs_.begin();
    659        job != jobs_.end() && idle_time_in_seconds > 0.0;
    660        idle_time_in_seconds =
    661            deadline_in_seconds - platform_->MonotonicallyIncreasingTime()) {
    662     // Don't work on jobs that are being worked on by background tasks.
    663     // Similarly, remove jobs we work on from the set of available background
    664     // jobs.
    665     std::unique_ptr<base::LockGuard<base::Mutex>> lock(
    666         new base::LockGuard<base::Mutex>(&mutex_));
    667     if (running_background_jobs_.find(job->second.get()) !=
    668         running_background_jobs_.end()) {
    669       ++job;
    670       continue;
    671     }
    672     auto it = pending_background_jobs_.find(job->second.get());
    673     double estimate_in_ms = job->second->EstimateRuntimeOfNextStepInMs();
    674     if (idle_time_in_seconds <
    675         (estimate_in_ms /
    676          static_cast<double>(base::Time::kMillisecondsPerSecond))) {
    677       // If there's not enough time left, try to estimate whether we would
    678       // have managed to finish the job in a large idle task to assess
    679       // whether we should ask for another idle callback.
    680       if (estimate_in_ms > kMaxIdleTimeToExpectInMs) ++too_long_jobs;
    681       if (it == pending_background_jobs_.end()) {
    682         lock.reset();
    683         ConsiderJobForBackgroundProcessing(job->second.get());
    684       }
    685       ++job;
    686     } else if (IsFinished(job->second.get())) {
    687       DCHECK(it == pending_background_jobs_.end());
    688       if (trace_compiler_dispatcher_) {
    689         PrintF("CompilerDispatcher: finished working on ");
    690         job->second->ShortPrint();
    691         PrintF(": %s\n", job->second->status() == CompileJobStatus::kDone
    692                              ? "success"
    693                              : "failure");
    694         tracer_->DumpStatistics();
    695       }
    696       job->second->ResetOnMainThread();
    697       job = jobs_.erase(job);
    698       continue;
    699     } else {
    700       // Do one step, and keep processing the job (as we don't advance the
    701       // iterator).
    702       if (it != pending_background_jobs_.end()) {
    703         pending_background_jobs_.erase(it);
    704       }
    705       lock.reset();
    706       DoNextStepOnMainThread(isolate_, job->second.get(),
    707                              ExceptionHandling::kSwallow);
    708     }
    709   }
    710   if (jobs_.size() > too_long_jobs) ScheduleIdleTaskIfNeeded();
    711 }
    712 
    713 }  // namespace internal
    714 }  // namespace v8
    715