Home | History | Annotate | Download | only in src
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/optimizing-compiler-thread.h"
      6 
      7 #include "src/v8.h"
      8 
      9 #include "src/base/atomicops.h"
     10 #include "src/full-codegen.h"
     11 #include "src/hydrogen.h"
     12 #include "src/isolate.h"
     13 #include "src/v8threads.h"
     14 
     15 namespace v8 {
     16 namespace internal {
     17 
     18 OptimizingCompilerThread::~OptimizingCompilerThread() {
     19   ASSERT_EQ(0, input_queue_length_);
     20   DeleteArray(input_queue_);
     21   if (FLAG_concurrent_osr) {
     22 #ifdef DEBUG
     23     for (int i = 0; i < osr_buffer_capacity_; i++) {
     24       CHECK_EQ(NULL, osr_buffer_[i]);
     25     }
     26 #endif
     27     DeleteArray(osr_buffer_);
     28   }
     29 }
     30 
     31 
     32 void OptimizingCompilerThread::Run() {
     33 #ifdef DEBUG
     34   { LockGuard<Mutex> lock_guard(&thread_id_mutex_);
     35     thread_id_ = ThreadId::Current().ToInteger();
     36   }
     37 #endif
     38   Isolate::SetIsolateThreadLocals(isolate_, NULL);
     39   DisallowHeapAllocation no_allocation;
     40   DisallowHandleAllocation no_handles;
     41   DisallowHandleDereference no_deref;
     42 
     43   ElapsedTimer total_timer;
     44   if (FLAG_trace_concurrent_recompilation) total_timer.Start();
     45 
     46   while (true) {
     47     input_queue_semaphore_.Wait();
     48     Logger::TimerEventScope timer(
     49         isolate_, Logger::TimerEventScope::v8_recompile_concurrent);
     50 
     51     if (FLAG_concurrent_recompilation_delay != 0) {
     52       OS::Sleep(FLAG_concurrent_recompilation_delay);
     53     }
     54 
     55     switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) {
     56       case CONTINUE:
     57         break;
     58       case STOP:
     59         if (FLAG_trace_concurrent_recompilation) {
     60           time_spent_total_ = total_timer.Elapsed();
     61         }
     62         stop_semaphore_.Signal();
     63         return;
     64       case FLUSH:
     65         // The main thread is blocked, waiting for the stop semaphore.
     66         { AllowHandleDereference allow_handle_dereference;
     67           FlushInputQueue(true);
     68         }
     69         base::Release_Store(&stop_thread_,
     70                             static_cast<base::AtomicWord>(CONTINUE));
     71         stop_semaphore_.Signal();
     72         // Return to start of consumer loop.
     73         continue;
     74     }
     75 
     76     ElapsedTimer compiling_timer;
     77     if (FLAG_trace_concurrent_recompilation) compiling_timer.Start();
     78 
     79     CompileNext();
     80 
     81     if (FLAG_trace_concurrent_recompilation) {
     82       time_spent_compiling_ += compiling_timer.Elapsed();
     83     }
     84   }
     85 }
     86 
     87 
     88 OptimizedCompileJob* OptimizingCompilerThread::NextInput() {
     89   LockGuard<Mutex> access_input_queue_(&input_queue_mutex_);
     90   if (input_queue_length_ == 0) return NULL;
     91   OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
     92   ASSERT_NE(NULL, job);
     93   input_queue_shift_ = InputQueueIndex(1);
     94   input_queue_length_--;
     95   return job;
     96 }
     97 
     98 
     99 void OptimizingCompilerThread::CompileNext() {
    100   OptimizedCompileJob* job = NextInput();
    101   ASSERT_NE(NULL, job);
    102 
    103   // The function may have already been optimized by OSR.  Simply continue.
    104   OptimizedCompileJob::Status status = job->OptimizeGraph();
    105   USE(status);   // Prevent an unused-variable error in release mode.
    106   ASSERT(status != OptimizedCompileJob::FAILED);
    107 
    108   // The function may have already been optimized by OSR.  Simply continue.
    109   // Use a mutex to make sure that functions marked for install
    110   // are always also queued.
    111   output_queue_.Enqueue(job);
    112   isolate_->stack_guard()->RequestInstallCode();
    113 }
    114 
    115 
    116 static void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
    117                                        bool restore_function_code) {
    118   // The recompile job is allocated in the CompilationInfo's zone.
    119   CompilationInfo* info = job->info();
    120   if (restore_function_code) {
    121     if (info->is_osr()) {
    122       if (!job->IsWaitingForInstall()) {
    123         // Remove stack check that guards OSR entry on original code.
    124         Handle<Code> code = info->unoptimized_code();
    125         uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
    126         BackEdgeTable::RemoveStackCheck(code, offset);
    127       }
    128     } else {
    129       Handle<JSFunction> function = info->closure();
    130       function->ReplaceCode(function->shared()->code());
    131     }
    132   }
    133   delete info;
    134 }
    135 
    136 
    137 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
    138   OptimizedCompileJob* job;
    139   while ((job = NextInput())) {
    140     // This should not block, since we have one signal on the input queue
    141     // semaphore corresponding to each element in the input queue.
    142     input_queue_semaphore_.Wait();
    143     // OSR jobs are dealt with separately.
    144     if (!job->info()->is_osr()) {
    145       DisposeOptimizedCompileJob(job, restore_function_code);
    146     }
    147   }
    148 }
    149 
    150 
    151 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
    152   OptimizedCompileJob* job;
    153   while (output_queue_.Dequeue(&job)) {
    154     // OSR jobs are dealt with separately.
    155     if (!job->info()->is_osr()) {
    156       DisposeOptimizedCompileJob(job, restore_function_code);
    157     }
    158   }
    159 }
    160 
    161 
    162 void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
    163   for (int i = 0; i < osr_buffer_capacity_; i++) {
    164     if (osr_buffer_[i] != NULL) {
    165       DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code);
    166       osr_buffer_[i] = NULL;
    167     }
    168   }
    169 }
    170 
    171 
    172 void OptimizingCompilerThread::Flush() {
    173   ASSERT(!IsOptimizerThread());
    174   base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
    175   if (FLAG_block_concurrent_recompilation) Unblock();
    176   input_queue_semaphore_.Signal();
    177   stop_semaphore_.Wait();
    178   FlushOutputQueue(true);
    179   if (FLAG_concurrent_osr) FlushOsrBuffer(true);
    180   if (FLAG_trace_concurrent_recompilation) {
    181     PrintF("  ** Flushed concurrent recompilation queues.\n");
    182   }
    183 }
    184 
    185 
    186 void OptimizingCompilerThread::Stop() {
    187   ASSERT(!IsOptimizerThread());
    188   base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP));
    189   if (FLAG_block_concurrent_recompilation) Unblock();
    190   input_queue_semaphore_.Signal();
    191   stop_semaphore_.Wait();
    192 
    193   if (FLAG_concurrent_recompilation_delay != 0) {
    194     // At this point the optimizing compiler thread's event loop has stopped.
    195     // There is no need for a mutex when reading input_queue_length_.
    196     while (input_queue_length_ > 0) CompileNext();
    197     InstallOptimizedFunctions();
    198   } else {
    199     FlushInputQueue(false);
    200     FlushOutputQueue(false);
    201   }
    202 
    203   if (FLAG_concurrent_osr) FlushOsrBuffer(false);
    204 
    205   if (FLAG_trace_concurrent_recompilation) {
    206     double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
    207     PrintF("  ** Compiler thread did %.2f%% useful work\n", percentage);
    208   }
    209 
    210   if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) &&
    211       FLAG_concurrent_osr) {
    212     PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
    213   }
    214 
    215   Join();
    216 }
    217 
    218 
    219 void OptimizingCompilerThread::InstallOptimizedFunctions() {
    220   ASSERT(!IsOptimizerThread());
    221   HandleScope handle_scope(isolate_);
    222 
    223   OptimizedCompileJob* job;
    224   while (output_queue_.Dequeue(&job)) {
    225     CompilationInfo* info = job->info();
    226     Handle<JSFunction> function(*info->closure());
    227     if (info->is_osr()) {
    228       if (FLAG_trace_osr) {
    229         PrintF("[COSR - ");
    230         info->closure()->PrintName();
    231         PrintF(" is ready for install and entry at AST id %d]\n",
    232                info->osr_ast_id().ToInt());
    233       }
    234       job->WaitForInstall();
    235       // Remove stack check that guards OSR entry on original code.
    236       Handle<Code> code = info->unoptimized_code();
    237       uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
    238       BackEdgeTable::RemoveStackCheck(code, offset);
    239     } else {
    240       if (function->IsOptimized()) {
    241         DisposeOptimizedCompileJob(job, false);
    242       } else {
    243         Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
    244         function->ReplaceCode(
    245             code.is_null() ? function->shared()->code() : *code);
    246       }
    247     }
    248   }
    249 }
    250 
    251 
    252 void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
    253   ASSERT(IsQueueAvailable());
    254   ASSERT(!IsOptimizerThread());
    255   CompilationInfo* info = job->info();
    256   if (info->is_osr()) {
    257     osr_attempts_++;
    258     AddToOsrBuffer(job);
    259     // Add job to the front of the input queue.
    260     LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
    261     ASSERT_LT(input_queue_length_, input_queue_capacity_);
    262     // Move shift_ back by one.
    263     input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1);
    264     input_queue_[InputQueueIndex(0)] = job;
    265     input_queue_length_++;
    266   } else {
    267     // Add job to the back of the input queue.
    268     LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
    269     ASSERT_LT(input_queue_length_, input_queue_capacity_);
    270     input_queue_[InputQueueIndex(input_queue_length_)] = job;
    271     input_queue_length_++;
    272   }
    273   if (FLAG_block_concurrent_recompilation) {
    274     blocked_jobs_++;
    275   } else {
    276     input_queue_semaphore_.Signal();
    277   }
    278 }
    279 
    280 
    281 void OptimizingCompilerThread::Unblock() {
    282   ASSERT(!IsOptimizerThread());
    283   while (blocked_jobs_ > 0) {
    284     input_queue_semaphore_.Signal();
    285     blocked_jobs_--;
    286   }
    287 }
    288 
    289 
    290 OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
    291     Handle<JSFunction> function, BailoutId osr_ast_id) {
    292   ASSERT(!IsOptimizerThread());
    293   for (int i = 0; i < osr_buffer_capacity_; i++) {
    294     OptimizedCompileJob* current = osr_buffer_[i];
    295     if (current != NULL &&
    296         current->IsWaitingForInstall() &&
    297         current->info()->HasSameOsrEntry(function, osr_ast_id)) {
    298       osr_hits_++;
    299       osr_buffer_[i] = NULL;
    300       return current;
    301     }
    302   }
    303   return NULL;
    304 }
    305 
    306 
    307 bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
    308                                               BailoutId osr_ast_id) {
    309   ASSERT(!IsOptimizerThread());
    310   for (int i = 0; i < osr_buffer_capacity_; i++) {
    311     OptimizedCompileJob* current = osr_buffer_[i];
    312     if (current != NULL &&
    313         current->info()->HasSameOsrEntry(function, osr_ast_id)) {
    314       return !current->IsWaitingForInstall();
    315     }
    316   }
    317   return false;
    318 }
    319 
    320 
    321 bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
    322   ASSERT(!IsOptimizerThread());
    323   for (int i = 0; i < osr_buffer_capacity_; i++) {
    324     OptimizedCompileJob* current = osr_buffer_[i];
    325     if (current != NULL && *current->info()->closure() == function) {
    326       return !current->IsWaitingForInstall();
    327     }
    328   }
    329   return false;
    330 }
    331 
    332 
    333 void OptimizingCompilerThread::AddToOsrBuffer(OptimizedCompileJob* job) {
    334   ASSERT(!IsOptimizerThread());
    335   // Find the next slot that is empty or has a stale job.
    336   OptimizedCompileJob* stale = NULL;
    337   while (true) {
    338     stale = osr_buffer_[osr_buffer_cursor_];
    339     if (stale == NULL || stale->IsWaitingForInstall()) break;
    340     osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
    341   }
    342 
    343   // Add to found slot and dispose the evicted job.
    344   if (stale != NULL) {
    345     ASSERT(stale->IsWaitingForInstall());
    346     CompilationInfo* info = stale->info();
    347     if (FLAG_trace_osr) {
    348       PrintF("[COSR - Discarded ");
    349       info->closure()->PrintName();
    350       PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
    351     }
    352     DisposeOptimizedCompileJob(stale, false);
    353   }
    354   osr_buffer_[osr_buffer_cursor_] = job;
    355   osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
    356 }
    357 
    358 
    359 #ifdef DEBUG
    360 bool OptimizingCompilerThread::IsOptimizerThread(Isolate* isolate) {
    361   return isolate->concurrent_recompilation_enabled() &&
    362          isolate->optimizing_compiler_thread()->IsOptimizerThread();
    363 }
    364 
    365 
    366 bool OptimizingCompilerThread::IsOptimizerThread() {
    367   LockGuard<Mutex> lock_guard(&thread_id_mutex_);
    368   return ThreadId::Current().ToInteger() == thread_id_;
    369 }
    370 #endif
    371 
    372 
    373 } }  // namespace v8::internal
    374