Home | History | Annotate | Download | only in profiler
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/profiler/cpu-profiler.h"
      6 
      7 #include "src/debug/debug.h"
      8 #include "src/deoptimizer.h"
      9 #include "src/frames-inl.h"
     10 #include "src/locked-queue-inl.h"
     11 #include "src/log-inl.h"
     12 #include "src/profiler/cpu-profiler-inl.h"
     13 #include "src/vm-state-inl.h"
     14 
     15 #include "include/v8-profiler.h"
     16 
     17 namespace v8 {
     18 namespace internal {
     19 
     20 static const int kProfilerStackSize = 64 * KB;
     21 
     22 
     23 ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator,
     24                                                  sampler::Sampler* sampler,
     25                                                  base::TimeDelta period)
     26     : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
     27       generator_(generator),
     28       sampler_(sampler),
     29       running_(1),
     30       period_(period),
     31       last_code_event_id_(0),
     32       last_processed_code_event_id_(0) {}
     33 
     34 
     35 ProfilerEventsProcessor::~ProfilerEventsProcessor() {}
     36 
     37 
     38 void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
     39   event.generic.order = last_code_event_id_.Increment(1);
     40   events_buffer_.Enqueue(event);
     41 }
     42 
     43 
     44 void ProfilerEventsProcessor::AddDeoptStack(Isolate* isolate, Address from,
     45                                             int fp_to_sp_delta) {
     46   TickSampleEventRecord record(last_code_event_id_.Value());
     47   RegisterState regs;
     48   Address fp = isolate->c_entry_fp(isolate->thread_local_top());
     49   regs.sp = fp - fp_to_sp_delta;
     50   regs.fp = fp;
     51   regs.pc = from;
     52   record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, false);
     53   ticks_from_vm_buffer_.Enqueue(record);
     54 }
     55 
     56 void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate,
     57                                               bool update_stats) {
     58   TickSampleEventRecord record(last_code_event_id_.Value());
     59   RegisterState regs;
     60   StackFrameIterator it(isolate);
     61   if (!it.done()) {
     62     StackFrame* frame = it.frame();
     63     regs.sp = frame->sp();
     64     regs.fp = frame->fp();
     65     regs.pc = frame->pc();
     66   }
     67   record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, update_stats);
     68   ticks_from_vm_buffer_.Enqueue(record);
     69 }
     70 
     71 
     72 void ProfilerEventsProcessor::StopSynchronously() {
     73   if (!base::NoBarrier_AtomicExchange(&running_, 0)) return;
     74   Join();
     75 }
     76 
     77 
     78 bool ProfilerEventsProcessor::ProcessCodeEvent() {
     79   CodeEventsContainer record;
     80   if (events_buffer_.Dequeue(&record)) {
     81     switch (record.generic.type) {
     82 #define PROFILER_TYPE_CASE(type, clss)                          \
     83       case CodeEventRecord::type:                               \
     84         record.clss##_.UpdateCodeMap(generator_->code_map());   \
     85         break;
     86 
     87       CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE)
     88 
     89 #undef PROFILER_TYPE_CASE
     90       default: return true;  // Skip record.
     91     }
     92     last_processed_code_event_id_ = record.generic.order;
     93     return true;
     94   }
     95   return false;
     96 }
     97 
     98 ProfilerEventsProcessor::SampleProcessingResult
     99     ProfilerEventsProcessor::ProcessOneSample() {
    100   TickSampleEventRecord record1;
    101   if (ticks_from_vm_buffer_.Peek(&record1) &&
    102       (record1.order == last_processed_code_event_id_)) {
    103     TickSampleEventRecord record;
    104     ticks_from_vm_buffer_.Dequeue(&record);
    105     generator_->RecordTickSample(record.sample);
    106     return OneSampleProcessed;
    107   }
    108 
    109   const TickSampleEventRecord* record = ticks_buffer_.Peek();
    110   if (record == NULL) {
    111     if (ticks_from_vm_buffer_.IsEmpty()) return NoSamplesInQueue;
    112     return FoundSampleForNextCodeEvent;
    113   }
    114   if (record->order != last_processed_code_event_id_) {
    115     return FoundSampleForNextCodeEvent;
    116   }
    117   generator_->RecordTickSample(record->sample);
    118   ticks_buffer_.Remove();
    119   return OneSampleProcessed;
    120 }
    121 
    122 
    123 void ProfilerEventsProcessor::Run() {
    124   while (!!base::NoBarrier_Load(&running_)) {
    125     base::TimeTicks nextSampleTime =
    126         base::TimeTicks::HighResolutionNow() + period_;
    127     base::TimeTicks now;
    128     SampleProcessingResult result;
    129     // Keep processing existing events until we need to do next sample
    130     // or the ticks buffer is empty.
    131     do {
    132       result = ProcessOneSample();
    133       if (result == FoundSampleForNextCodeEvent) {
    134         // All ticks of the current last_processed_code_event_id_ are
    135         // processed, proceed to the next code event.
    136         ProcessCodeEvent();
    137       }
    138       now = base::TimeTicks::HighResolutionNow();
    139     } while (result != NoSamplesInQueue && now < nextSampleTime);
    140 
    141     if (nextSampleTime > now) {
    142 #if V8_OS_WIN
    143       // Do not use Sleep on Windows as it is very imprecise.
    144       // Could be up to 16ms jitter, which is unacceptable for the purpose.
    145       while (base::TimeTicks::HighResolutionNow() < nextSampleTime) {
    146       }
    147 #else
    148       base::OS::Sleep(nextSampleTime - now);
    149 #endif
    150     }
    151 
    152     // Schedule next sample. sampler_ is NULL in tests.
    153     if (sampler_) sampler_->DoSample();
    154   }
    155 
    156   // Process remaining tick events.
    157   do {
    158     SampleProcessingResult result;
    159     do {
    160       result = ProcessOneSample();
    161     } while (result == OneSampleProcessed);
    162   } while (ProcessCodeEvent());
    163 }
    164 
    165 
    166 void* ProfilerEventsProcessor::operator new(size_t size) {
    167   return AlignedAlloc(size, V8_ALIGNOF(ProfilerEventsProcessor));
    168 }
    169 
    170 
    171 void ProfilerEventsProcessor::operator delete(void* ptr) {
    172   AlignedFree(ptr);
    173 }
    174 
    175 
    176 int CpuProfiler::GetProfilesCount() {
    177   // The count of profiles doesn't depend on a security token.
    178   return profiles_->profiles()->length();
    179 }
    180 
    181 
    182 CpuProfile* CpuProfiler::GetProfile(int index) {
    183   return profiles_->profiles()->at(index);
    184 }
    185 
    186 
    187 void CpuProfiler::DeleteAllProfiles() {
    188   if (is_profiling_) StopProcessor();
    189   ResetProfiles();
    190 }
    191 
    192 
    193 void CpuProfiler::DeleteProfile(CpuProfile* profile) {
    194   profiles_->RemoveProfile(profile);
    195   delete profile;
    196   if (profiles_->profiles()->is_empty() && !is_profiling_) {
    197     // If this was the last profile, clean up all accessory data as well.
    198     ResetProfiles();
    199   }
    200 }
    201 
    202 void CpuProfiler::CodeEventHandler(const CodeEventsContainer& evt_rec) {
    203   switch (evt_rec.generic.type) {
    204     case CodeEventRecord::CODE_CREATION:
    205     case CodeEventRecord::CODE_MOVE:
    206     case CodeEventRecord::CODE_DISABLE_OPT:
    207       processor_->Enqueue(evt_rec);
    208       break;
    209     case CodeEventRecord::CODE_DEOPT: {
    210       const CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
    211       Address pc = reinterpret_cast<Address>(rec->pc);
    212       int fp_to_sp_delta = rec->fp_to_sp_delta;
    213       processor_->Enqueue(evt_rec);
    214       processor_->AddDeoptStack(isolate_, pc, fp_to_sp_delta);
    215       break;
    216     }
    217     default:
    218       UNREACHABLE();
    219   }
    220 }
    221 
    222 CpuProfiler::CpuProfiler(Isolate* isolate)
    223     : isolate_(isolate),
    224       sampling_interval_(base::TimeDelta::FromMicroseconds(
    225           FLAG_cpu_profiler_sampling_interval)),
    226       profiles_(new CpuProfilesCollection(isolate)),
    227       is_profiling_(false) {
    228   profiles_->set_cpu_profiler(this);
    229 }
    230 
    231 CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilesCollection* test_profiles,
    232                          ProfileGenerator* test_generator,
    233                          ProfilerEventsProcessor* test_processor)
    234     : isolate_(isolate),
    235       sampling_interval_(base::TimeDelta::FromMicroseconds(
    236           FLAG_cpu_profiler_sampling_interval)),
    237       profiles_(test_profiles),
    238       generator_(test_generator),
    239       processor_(test_processor),
    240       is_profiling_(false) {
    241   profiles_->set_cpu_profiler(this);
    242 }
    243 
    244 CpuProfiler::~CpuProfiler() {
    245   DCHECK(!is_profiling_);
    246 }
    247 
    248 void CpuProfiler::set_sampling_interval(base::TimeDelta value) {
    249   DCHECK(!is_profiling_);
    250   sampling_interval_ = value;
    251 }
    252 
    253 void CpuProfiler::ResetProfiles() {
    254   profiles_.reset(new CpuProfilesCollection(isolate_));
    255   profiles_->set_cpu_profiler(this);
    256 }
    257 
    258 void CpuProfiler::CollectSample() {
    259   if (processor_) {
    260     processor_->AddCurrentStack(isolate_);
    261   }
    262 }
    263 
    264 void CpuProfiler::StartProfiling(const char* title, bool record_samples) {
    265   if (profiles_->StartProfiling(title, record_samples)) {
    266     StartProcessorIfNotStarted();
    267   }
    268 }
    269 
    270 
    271 void CpuProfiler::StartProfiling(String* title, bool record_samples) {
    272   StartProfiling(profiles_->GetName(title), record_samples);
    273   isolate_->debug()->feature_tracker()->Track(DebugFeatureTracker::kProfiler);
    274 }
    275 
    276 
    277 void CpuProfiler::StartProcessorIfNotStarted() {
    278   if (processor_) {
    279     processor_->AddCurrentStack(isolate_);
    280     return;
    281   }
    282   Logger* logger = isolate_->logger();
    283   // Disable logging when using the new implementation.
    284   saved_is_logging_ = logger->is_logging_;
    285   logger->is_logging_ = false;
    286   sampler::Sampler* sampler = logger->sampler();
    287   generator_.reset(new ProfileGenerator(profiles_.get()));
    288   processor_.reset(new ProfilerEventsProcessor(generator_.get(), sampler,
    289                                                sampling_interval_));
    290   logger->SetUpProfilerListener();
    291   ProfilerListener* profiler_listener = logger->profiler_listener();
    292   profiler_listener->AddObserver(this);
    293   is_profiling_ = true;
    294   isolate_->set_is_profiling(true);
    295   // Enumerate stuff we already have in the heap.
    296   DCHECK(isolate_->heap()->HasBeenSetUp());
    297   if (!FLAG_prof_browser_mode) {
    298     logger->LogCodeObjects();
    299   }
    300   logger->LogCompiledFunctions();
    301   logger->LogAccessorCallbacks();
    302   LogBuiltins();
    303   // Enable stack sampling.
    304   sampler->SetHasProcessingThread(true);
    305   sampler->IncreaseProfilingDepth();
    306   processor_->AddCurrentStack(isolate_);
    307   processor_->StartSynchronously();
    308 }
    309 
    310 
    311 CpuProfile* CpuProfiler::StopProfiling(const char* title) {
    312   if (!is_profiling_) return nullptr;
    313   StopProcessorIfLastProfile(title);
    314   CpuProfile* result = profiles_->StopProfiling(title);
    315   if (result) {
    316     result->Print();
    317   }
    318   return result;
    319 }
    320 
    321 
    322 CpuProfile* CpuProfiler::StopProfiling(String* title) {
    323   if (!is_profiling_) return nullptr;
    324   const char* profile_title = profiles_->GetName(title);
    325   StopProcessorIfLastProfile(profile_title);
    326   return profiles_->StopProfiling(profile_title);
    327 }
    328 
    329 
    330 void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
    331   if (profiles_->IsLastProfile(title)) {
    332     StopProcessor();
    333   }
    334 }
    335 
    336 
    337 void CpuProfiler::StopProcessor() {
    338   Logger* logger = isolate_->logger();
    339   sampler::Sampler* sampler =
    340       reinterpret_cast<sampler::Sampler*>(logger->ticker_);
    341   is_profiling_ = false;
    342   isolate_->set_is_profiling(false);
    343   ProfilerListener* profiler_listener = logger->profiler_listener();
    344   profiler_listener->RemoveObserver(this);
    345   processor_->StopSynchronously();
    346   logger->TearDownProfilerListener();
    347   processor_.reset();
    348   generator_.reset();
    349   sampler->SetHasProcessingThread(false);
    350   sampler->DecreaseProfilingDepth();
    351   logger->is_logging_ = saved_is_logging_;
    352 }
    353 
    354 
    355 void CpuProfiler::LogBuiltins() {
    356   Builtins* builtins = isolate_->builtins();
    357   DCHECK(builtins->is_initialized());
    358   for (int i = 0; i < Builtins::builtin_count; i++) {
    359     CodeEventsContainer evt_rec(CodeEventRecord::REPORT_BUILTIN);
    360     ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_;
    361     Builtins::Name id = static_cast<Builtins::Name>(i);
    362     rec->start = builtins->builtin(id)->address();
    363     rec->builtin_id = id;
    364     processor_->Enqueue(evt_rec);
    365   }
    366 }
    367 
    368 }  // namespace internal
    369 }  // namespace v8
    370