Home | History | Annotate | Download | only in src
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/v8.h"
      6 
      7 #include "src/runtime-profiler.h"
      8 
      9 #include "src/assembler.h"
     10 #include "src/bootstrapper.h"
     11 #include "src/code-stubs.h"
     12 #include "src/compilation-cache.h"
     13 #include "src/execution.h"
     14 #include "src/full-codegen.h"
     15 #include "src/global-handles.h"
     16 #include "src/isolate-inl.h"
     17 #include "src/mark-compact.h"
     18 #include "src/platform.h"
     19 #include "src/scopeinfo.h"
     20 
     21 namespace v8 {
     22 namespace internal {
     23 
     24 
     25 // Number of times a function has to be seen on the stack before it is
     26 // optimized.
     27 static const int kProfilerTicksBeforeOptimization = 2;
     28 // If the function optimization was disabled due to high deoptimization count,
     29 // but the function is hot and has been seen on the stack this number of times,
     30 // then we try to reenable optimization for this function.
     31 static const int kProfilerTicksBeforeReenablingOptimization = 250;
     32 // If a function does not have enough type info (according to
     33 // FLAG_type_info_threshold), but has seen a huge number of ticks,
     34 // optimize it as it is.
     35 static const int kTicksWhenNotEnoughTypeInfo = 100;
     36 // We only have one byte to store the number of ticks.
     37 STATIC_ASSERT(kProfilerTicksBeforeOptimization < 256);
     38 STATIC_ASSERT(kProfilerTicksBeforeReenablingOptimization < 256);
     39 STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
     40 
     41 // Maximum size in bytes of generate code for a function to allow OSR.
     42 static const int kOSRCodeSizeAllowanceBase =
     43     100 * FullCodeGenerator::kCodeSizeMultiplier;
     44 
     45 static const int kOSRCodeSizeAllowancePerTick =
     46     4 * FullCodeGenerator::kCodeSizeMultiplier;
     47 
     48 // Maximum size in bytes of generated code for a function to be optimized
     49 // the very first time it is seen on the stack.
     50 static const int kMaxSizeEarlyOpt =
     51     5 * FullCodeGenerator::kCodeSizeMultiplier;
     52 
     53 
     54 RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
     55     : isolate_(isolate),
     56       any_ic_changed_(false) {
     57 }
     58 
     59 
     60 static void GetICCounts(Code* shared_code,
     61                         int* ic_with_type_info_count,
     62                         int* ic_total_count,
     63                         int* percentage) {
     64   *ic_total_count = 0;
     65   *ic_with_type_info_count = 0;
     66   Object* raw_info = shared_code->type_feedback_info();
     67   if (raw_info->IsTypeFeedbackInfo()) {
     68     TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
     69     *ic_with_type_info_count = info->ic_with_type_info_count();
     70     *ic_total_count = info->ic_total_count();
     71   }
     72   *percentage = *ic_total_count > 0
     73       ? 100 * *ic_with_type_info_count / *ic_total_count
     74       : 100;
     75 }
     76 
     77 
     78 void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
     79   ASSERT(function->IsOptimizable());
     80 
     81   if (FLAG_trace_opt && function->PassesFilter(FLAG_hydrogen_filter)) {
     82     PrintF("[marking ");
     83     function->ShortPrint();
     84     PrintF(" for recompilation, reason: %s", reason);
     85     if (FLAG_type_info_threshold > 0) {
     86       int typeinfo, total, percentage;
     87       GetICCounts(function->shared()->code(), &typeinfo, &total, &percentage);
     88       PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total, percentage);
     89     }
     90     PrintF("]\n");
     91   }
     92 
     93 
     94   if (isolate_->concurrent_recompilation_enabled() &&
     95       !isolate_->bootstrapper()->IsActive()) {
     96     if (isolate_->concurrent_osr_enabled() &&
     97         isolate_->optimizing_compiler_thread()->IsQueuedForOSR(function)) {
     98       // Do not attempt regular recompilation if we already queued this for OSR.
     99       // TODO(yangguo): This is necessary so that we don't install optimized
    100       // code on a function that is already optimized, since OSR and regular
    101       // recompilation race.  This goes away as soon as OSR becomes one-shot.
    102       return;
    103     }
    104     ASSERT(!function->IsInOptimizationQueue());
    105     function->MarkForConcurrentOptimization();
    106   } else {
    107     // The next call to the function will trigger optimization.
    108     function->MarkForOptimization();
    109   }
    110 }
    111 
    112 
    113 void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
    114   // See AlwaysFullCompiler (in compiler.cc) comment on why we need
    115   // Debug::has_break_points().
    116   if (!FLAG_use_osr ||
    117       isolate_->DebuggerHasBreakPoints() ||
    118       function->IsBuiltin()) {
    119     return;
    120   }
    121 
    122   SharedFunctionInfo* shared = function->shared();
    123   // If the code is not optimizable, don't try OSR.
    124   if (!shared->code()->optimizable()) return;
    125 
    126   // We are not prepared to do OSR for a function that already has an
    127   // allocated arguments object.  The optimized code would bypass it for
    128   // arguments accesses, which is unsound.  Don't try OSR.
    129   if (shared->uses_arguments()) return;
    130 
    131   // We're using on-stack replacement: patch the unoptimized code so that
    132   // any back edge in any unoptimized frame will trigger on-stack
    133   // replacement for that frame.
    134   if (FLAG_trace_osr) {
    135     PrintF("[OSR - patching back edges in ");
    136     function->PrintName();
    137     PrintF("]\n");
    138   }
    139 
    140   BackEdgeTable::Patch(isolate_, shared->code());
    141 }
    142 
    143 
    144 void RuntimeProfiler::OptimizeNow() {
    145   HandleScope scope(isolate_);
    146 
    147   if (isolate_->DebuggerHasBreakPoints()) return;
    148 
    149   DisallowHeapAllocation no_gc;
    150 
    151   // Run through the JavaScript frames and collect them. If we already
    152   // have a sample of the function, we mark it for optimizations
    153   // (eagerly or lazily).
    154   int frame_count = 0;
    155   int frame_count_limit = FLAG_frame_count;
    156   for (JavaScriptFrameIterator it(isolate_);
    157        frame_count++ < frame_count_limit && !it.done();
    158        it.Advance()) {
    159     JavaScriptFrame* frame = it.frame();
    160     JSFunction* function = frame->function();
    161 
    162     SharedFunctionInfo* shared = function->shared();
    163     Code* shared_code = shared->code();
    164 
    165     List<JSFunction*> functions(4);
    166     frame->GetFunctions(&functions);
    167     for (int i = functions.length(); --i >= 0; ) {
    168       SharedFunctionInfo* shared_function_info = functions[i]->shared();
    169       int ticks = shared_function_info->profiler_ticks();
    170       if (ticks < Smi::kMaxValue) {
    171         shared_function_info->set_profiler_ticks(ticks + 1);
    172       }
    173     }
    174 
    175     if (shared_code->kind() != Code::FUNCTION) continue;
    176     if (function->IsInOptimizationQueue()) continue;
    177 
    178     if (FLAG_always_osr &&
    179         shared_code->allow_osr_at_loop_nesting_level() == 0) {
    180       // Testing mode: always try an OSR compile for every function.
    181       for (int i = 0; i < Code::kMaxLoopNestingMarker; i++) {
    182         // TODO(titzer): fix AttemptOnStackReplacement to avoid this dumb loop.
    183         shared_code->set_allow_osr_at_loop_nesting_level(i);
    184         AttemptOnStackReplacement(function);
    185       }
    186       // Fall through and do a normal optimized compile as well.
    187     } else if (!frame->is_optimized() &&
    188         (function->IsMarkedForOptimization() ||
    189          function->IsMarkedForConcurrentOptimization() ||
    190          function->IsOptimized())) {
    191       // Attempt OSR if we are still running unoptimized code even though the
    192       // the function has long been marked or even already been optimized.
    193       int ticks = shared_code->profiler_ticks();
    194       int allowance = kOSRCodeSizeAllowanceBase +
    195                       ticks * kOSRCodeSizeAllowancePerTick;
    196       if (shared_code->CodeSize() > allowance) {
    197         if (ticks < 255) shared_code->set_profiler_ticks(ticks + 1);
    198       } else {
    199         int nesting = shared_code->allow_osr_at_loop_nesting_level();
    200         if (nesting < Code::kMaxLoopNestingMarker) {
    201           int new_nesting = nesting + 1;
    202           shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
    203           AttemptOnStackReplacement(function);
    204         }
    205       }
    206       continue;
    207     }
    208 
    209     // Only record top-level code on top of the execution stack and
    210     // avoid optimizing excessively large scripts since top-level code
    211     // will be executed only once.
    212     const int kMaxToplevelSourceSize = 10 * 1024;
    213     if (shared->is_toplevel() &&
    214         (frame_count > 1 || shared->SourceSize() > kMaxToplevelSourceSize)) {
    215       continue;
    216     }
    217 
    218     // Do not record non-optimizable functions.
    219     if (shared->optimization_disabled()) {
    220       if (shared->deopt_count() >= FLAG_max_opt_count) {
    221         // If optimization was disabled due to many deoptimizations,
    222         // then check if the function is hot and try to reenable optimization.
    223         int ticks = shared_code->profiler_ticks();
    224         if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
    225           shared_code->set_profiler_ticks(0);
    226           shared->TryReenableOptimization();
    227         } else {
    228           shared_code->set_profiler_ticks(ticks + 1);
    229         }
    230       }
    231       continue;
    232     }
    233     if (!function->IsOptimizable()) continue;
    234 
    235     int ticks = shared_code->profiler_ticks();
    236 
    237     if (ticks >= kProfilerTicksBeforeOptimization) {
    238       int typeinfo, total, percentage;
    239       GetICCounts(shared_code, &typeinfo, &total, &percentage);
    240       if (percentage >= FLAG_type_info_threshold) {
    241         // If this particular function hasn't had any ICs patched for enough
    242         // ticks, optimize it now.
    243         Optimize(function, "hot and stable");
    244       } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
    245         Optimize(function, "not much type info but very hot");
    246       } else {
    247         shared_code->set_profiler_ticks(ticks + 1);
    248         if (FLAG_trace_opt_verbose) {
    249           PrintF("[not yet optimizing ");
    250           function->PrintName();
    251           PrintF(", not enough type info: %d/%d (%d%%)]\n",
    252                  typeinfo, total, percentage);
    253         }
    254       }
    255     } else if (!any_ic_changed_ &&
    256                shared_code->instruction_size() < kMaxSizeEarlyOpt) {
    257       // If no IC was patched since the last tick and this function is very
    258       // small, optimistically optimize it now.
    259       Optimize(function, "small function");
    260     } else {
    261       shared_code->set_profiler_ticks(ticks + 1);
    262     }
    263   }
    264   any_ic_changed_ = false;
    265 }
    266 
    267 
    268 } }  // namespace v8::internal
    269