Home | History | Annotate | Download | only in src
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/optimizing-compile-dispatcher.h"
      6 
      7 #include "src/base/atomicops.h"
      8 #include "src/full-codegen/full-codegen.h"
      9 #include "src/isolate.h"
     10 #include "src/v8.h"
     11 
     12 namespace v8 {
     13 namespace internal {
     14 
     15 namespace {
     16 
     17 void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
     18                                 bool restore_function_code) {
     19   // The recompile job is allocated in the CompilationInfo's zone.
     20   CompilationInfo* info = job->info();
     21   if (restore_function_code) {
     22     if (info->is_osr()) {
     23       if (!job->IsWaitingForInstall()) {
     24         // Remove stack check that guards OSR entry on original code.
     25         Handle<Code> code = info->unoptimized_code();
     26         uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
     27         BackEdgeTable::RemoveStackCheck(code, offset);
     28       }
     29     } else {
     30       Handle<JSFunction> function = info->closure();
     31       function->ReplaceCode(function->shared()->code());
     32     }
     33   }
     34   delete info;
     35 }
     36 
     37 }  // namespace
     38 
     39 
     40 class OptimizingCompileDispatcher::CompileTask : public v8::Task {
     41  public:
     42   explicit CompileTask(Isolate* isolate) : isolate_(isolate) {
     43     OptimizingCompileDispatcher* dispatcher =
     44         isolate_->optimizing_compile_dispatcher();
     45     base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
     46     ++dispatcher->ref_count_;
     47   }
     48 
     49   virtual ~CompileTask() {}
     50 
     51  private:
     52   // v8::Task overrides.
     53   void Run() override {
     54     DisallowHeapAllocation no_allocation;
     55     DisallowHandleAllocation no_handles;
     56     DisallowHandleDereference no_deref;
     57 
     58     OptimizingCompileDispatcher* dispatcher =
     59         isolate_->optimizing_compile_dispatcher();
     60     {
     61       TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
     62 
     63       if (dispatcher->recompilation_delay_ != 0) {
     64         base::OS::Sleep(base::TimeDelta::FromMilliseconds(
     65             dispatcher->recompilation_delay_));
     66       }
     67 
     68       dispatcher->CompileNext(dispatcher->NextInput(true));
     69     }
     70     {
     71       base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
     72       if (--dispatcher->ref_count_ == 0) {
     73         dispatcher->ref_count_zero_.NotifyOne();
     74       }
     75     }
     76   }
     77 
     78   Isolate* isolate_;
     79 
     80   DISALLOW_COPY_AND_ASSIGN(CompileTask);
     81 };
     82 
     83 
     84 OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
     85 #ifdef DEBUG
     86   {
     87     base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
     88     DCHECK_EQ(0, ref_count_);
     89   }
     90 #endif
     91   DCHECK_EQ(0, input_queue_length_);
     92   DeleteArray(input_queue_);
     93   if (FLAG_concurrent_osr) {
     94 #ifdef DEBUG
     95     for (int i = 0; i < osr_buffer_capacity_; i++) {
     96       CHECK_NULL(osr_buffer_[i]);
     97     }
     98 #endif
     99     DeleteArray(osr_buffer_);
    100   }
    101 }
    102 
    103 
    104 OptimizedCompileJob* OptimizingCompileDispatcher::NextInput(
    105     bool check_if_flushing) {
    106   base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
    107   if (input_queue_length_ == 0) return NULL;
    108   OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
    109   DCHECK_NOT_NULL(job);
    110   input_queue_shift_ = InputQueueIndex(1);
    111   input_queue_length_--;
    112   if (check_if_flushing) {
    113     if (static_cast<ModeFlag>(base::Acquire_Load(&mode_)) == FLUSH) {
    114       if (!job->info()->is_osr()) {
    115         AllowHandleDereference allow_handle_dereference;
    116         DisposeOptimizedCompileJob(job, true);
    117       }
    118       return NULL;
    119     }
    120   }
    121   return job;
    122 }
    123 
    124 
    125 void OptimizingCompileDispatcher::CompileNext(OptimizedCompileJob* job) {
    126   if (!job) return;
    127 
    128   // The function may have already been optimized by OSR.  Simply continue.
    129   OptimizedCompileJob::Status status = job->OptimizeGraph();
    130   USE(status);  // Prevent an unused-variable error in release mode.
    131   DCHECK(status != OptimizedCompileJob::FAILED);
    132 
    133   // The function may have already been optimized by OSR.  Simply continue.
    134   // Use a mutex to make sure that functions marked for install
    135   // are always also queued.
    136   base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
    137   output_queue_.push(job);
    138   isolate_->stack_guard()->RequestInstallCode();
    139 }
    140 
    141 
    142 void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
    143   for (;;) {
    144     OptimizedCompileJob* job = NULL;
    145     {
    146       base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
    147       if (output_queue_.empty()) return;
    148       job = output_queue_.front();
    149       output_queue_.pop();
    150     }
    151 
    152     // OSR jobs are dealt with separately.
    153     if (!job->info()->is_osr()) {
    154       DisposeOptimizedCompileJob(job, restore_function_code);
    155     }
    156   }
    157 }
    158 
    159 
    160 void OptimizingCompileDispatcher::FlushOsrBuffer(bool restore_function_code) {
    161   for (int i = 0; i < osr_buffer_capacity_; i++) {
    162     if (osr_buffer_[i] != NULL) {
    163       DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code);
    164       osr_buffer_[i] = NULL;
    165     }
    166   }
    167 }
    168 
    169 
    170 void OptimizingCompileDispatcher::Flush() {
    171   base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
    172   if (FLAG_block_concurrent_recompilation) Unblock();
    173   {
    174     base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
    175     while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
    176     base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
    177   }
    178   FlushOutputQueue(true);
    179   if (FLAG_concurrent_osr) FlushOsrBuffer(true);
    180   if (FLAG_trace_concurrent_recompilation) {
    181     PrintF("  ** Flushed concurrent recompilation queues.\n");
    182   }
    183 }
    184 
    185 
    186 void OptimizingCompileDispatcher::Stop() {
    187   base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
    188   if (FLAG_block_concurrent_recompilation) Unblock();
    189   {
    190     base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
    191     while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
    192     base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
    193   }
    194 
    195   if (recompilation_delay_ != 0) {
    196     // At this point the optimizing compiler thread's event loop has stopped.
    197     // There is no need for a mutex when reading input_queue_length_.
    198     while (input_queue_length_ > 0) CompileNext(NextInput());
    199     InstallOptimizedFunctions();
    200   } else {
    201     FlushOutputQueue(false);
    202   }
    203 
    204   if (FLAG_concurrent_osr) FlushOsrBuffer(false);
    205 
    206   if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) &&
    207       FLAG_concurrent_osr) {
    208     PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
    209   }
    210 }
    211 
    212 
    213 void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
    214   HandleScope handle_scope(isolate_);
    215 
    216   for (;;) {
    217     OptimizedCompileJob* job = NULL;
    218     {
    219       base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
    220       if (output_queue_.empty()) return;
    221       job = output_queue_.front();
    222       output_queue_.pop();
    223     }
    224     CompilationInfo* info = job->info();
    225     Handle<JSFunction> function(*info->closure());
    226     if (info->is_osr()) {
    227       if (FLAG_trace_osr) {
    228         PrintF("[COSR - ");
    229         function->ShortPrint();
    230         PrintF(" is ready for install and entry at AST id %d]\n",
    231                info->osr_ast_id().ToInt());
    232       }
    233       job->WaitForInstall();
    234       // Remove stack check that guards OSR entry on original code.
    235       Handle<Code> code = info->unoptimized_code();
    236       uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
    237       BackEdgeTable::RemoveStackCheck(code, offset);
    238     } else {
    239       if (function->IsOptimized()) {
    240         if (FLAG_trace_concurrent_recompilation) {
    241           PrintF("  ** Aborting compilation for ");
    242           function->ShortPrint();
    243           PrintF(" as it has already been optimized.\n");
    244         }
    245         DisposeOptimizedCompileJob(job, false);
    246       } else {
    247         Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
    248         function->ReplaceCode(code.is_null() ? function->shared()->code()
    249                                              : *code);
    250       }
    251     }
    252   }
    253 }
    254 
    255 
    256 void OptimizingCompileDispatcher::QueueForOptimization(
    257     OptimizedCompileJob* job) {
    258   DCHECK(IsQueueAvailable());
    259   CompilationInfo* info = job->info();
    260   if (info->is_osr()) {
    261     osr_attempts_++;
    262     AddToOsrBuffer(job);
    263     // Add job to the front of the input queue.
    264     base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
    265     DCHECK_LT(input_queue_length_, input_queue_capacity_);
    266     // Move shift_ back by one.
    267     input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1);
    268     input_queue_[InputQueueIndex(0)] = job;
    269     input_queue_length_++;
    270   } else {
    271     // Add job to the back of the input queue.
    272     base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
    273     DCHECK_LT(input_queue_length_, input_queue_capacity_);
    274     input_queue_[InputQueueIndex(input_queue_length_)] = job;
    275     input_queue_length_++;
    276   }
    277   if (FLAG_block_concurrent_recompilation) {
    278     blocked_jobs_++;
    279   } else {
    280     V8::GetCurrentPlatform()->CallOnBackgroundThread(
    281         new CompileTask(isolate_), v8::Platform::kShortRunningTask);
    282   }
    283 }
    284 
    285 
    286 void OptimizingCompileDispatcher::Unblock() {
    287   while (blocked_jobs_ > 0) {
    288     V8::GetCurrentPlatform()->CallOnBackgroundThread(
    289         new CompileTask(isolate_), v8::Platform::kShortRunningTask);
    290     blocked_jobs_--;
    291   }
    292 }
    293 
    294 
    295 OptimizedCompileJob* OptimizingCompileDispatcher::FindReadyOSRCandidate(
    296     Handle<JSFunction> function, BailoutId osr_ast_id) {
    297   for (int i = 0; i < osr_buffer_capacity_; i++) {
    298     OptimizedCompileJob* current = osr_buffer_[i];
    299     if (current != NULL && current->IsWaitingForInstall() &&
    300         current->info()->HasSameOsrEntry(function, osr_ast_id)) {
    301       osr_hits_++;
    302       osr_buffer_[i] = NULL;
    303       return current;
    304     }
    305   }
    306   return NULL;
    307 }
    308 
    309 
    310 bool OptimizingCompileDispatcher::IsQueuedForOSR(Handle<JSFunction> function,
    311                                                  BailoutId osr_ast_id) {
    312   for (int i = 0; i < osr_buffer_capacity_; i++) {
    313     OptimizedCompileJob* current = osr_buffer_[i];
    314     if (current != NULL &&
    315         current->info()->HasSameOsrEntry(function, osr_ast_id)) {
    316       return !current->IsWaitingForInstall();
    317     }
    318   }
    319   return false;
    320 }
    321 
    322 
    323 bool OptimizingCompileDispatcher::IsQueuedForOSR(JSFunction* function) {
    324   for (int i = 0; i < osr_buffer_capacity_; i++) {
    325     OptimizedCompileJob* current = osr_buffer_[i];
    326     if (current != NULL && *current->info()->closure() == function) {
    327       return !current->IsWaitingForInstall();
    328     }
    329   }
    330   return false;
    331 }
    332 
    333 
    334 void OptimizingCompileDispatcher::AddToOsrBuffer(OptimizedCompileJob* job) {
    335   // Find the next slot that is empty or has a stale job.
    336   OptimizedCompileJob* stale = NULL;
    337   while (true) {
    338     stale = osr_buffer_[osr_buffer_cursor_];
    339     if (stale == NULL || stale->IsWaitingForInstall()) break;
    340     osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
    341   }
    342 
    343   // Add to found slot and dispose the evicted job.
    344   if (stale != NULL) {
    345     DCHECK(stale->IsWaitingForInstall());
    346     CompilationInfo* info = stale->info();
    347     if (FLAG_trace_osr) {
    348       PrintF("[COSR - Discarded ");
    349       info->closure()->PrintName();
    350       PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
    351     }
    352     DisposeOptimizedCompileJob(stale, false);
    353   }
    354   osr_buffer_[osr_buffer_cursor_] = job;
    355   osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
    356 }
    357 }  // namespace internal
    358 }  // namespace v8
    359