Home | History | Annotate | Download | only in profiler
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/profiler/profile-generator.h"
      6 
      7 #include "src/ast/scopeinfo.h"
      8 #include "src/debug/debug.h"
      9 #include "src/deoptimizer.h"
     10 #include "src/global-handles.h"
     11 #include "src/profiler/profile-generator-inl.h"
     12 #include "src/profiler/sampler.h"
     13 #include "src/splay-tree-inl.h"
     14 #include "src/unicode.h"
     15 
     16 namespace v8 {
     17 namespace internal {
     18 
     19 
     20 JITLineInfoTable::JITLineInfoTable() {}
     21 
     22 
     23 JITLineInfoTable::~JITLineInfoTable() {}
     24 
     25 
     26 void JITLineInfoTable::SetPosition(int pc_offset, int line) {
     27   DCHECK(pc_offset >= 0);
     28   DCHECK(line > 0);  // The 1-based number of the source line.
     29   if (GetSourceLineNumber(pc_offset) != line) {
     30     pc_offset_map_.insert(std::make_pair(pc_offset, line));
     31   }
     32 }
     33 
     34 
     35 int JITLineInfoTable::GetSourceLineNumber(int pc_offset) const {
     36   PcOffsetMap::const_iterator it = pc_offset_map_.lower_bound(pc_offset);
     37   if (it == pc_offset_map_.end()) {
     38     if (pc_offset_map_.empty()) return v8::CpuProfileNode::kNoLineNumberInfo;
     39     return (--pc_offset_map_.end())->second;
     40   }
     41   return it->second;
     42 }
     43 
     44 
     45 const char* const CodeEntry::kEmptyNamePrefix = "";
     46 const char* const CodeEntry::kEmptyResourceName = "";
     47 const char* const CodeEntry::kEmptyBailoutReason = "";
     48 const char* const CodeEntry::kNoDeoptReason = "";
     49 
     50 
     51 CodeEntry::~CodeEntry() {
     52   delete line_info_;
     53 }
     54 
     55 
     56 uint32_t CodeEntry::GetHash() const {
     57   uint32_t hash = ComputeIntegerHash(tag(), v8::internal::kZeroHashSeed);
     58   if (script_id_ != v8::UnboundScript::kNoScriptId) {
     59     hash ^= ComputeIntegerHash(static_cast<uint32_t>(script_id_),
     60                                v8::internal::kZeroHashSeed);
     61     hash ^= ComputeIntegerHash(static_cast<uint32_t>(position_),
     62                                v8::internal::kZeroHashSeed);
     63   } else {
     64     hash ^= ComputeIntegerHash(
     65         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)),
     66         v8::internal::kZeroHashSeed);
     67     hash ^= ComputeIntegerHash(
     68         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)),
     69         v8::internal::kZeroHashSeed);
     70     hash ^= ComputeIntegerHash(
     71         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)),
     72         v8::internal::kZeroHashSeed);
     73     hash ^= ComputeIntegerHash(line_number_, v8::internal::kZeroHashSeed);
     74   }
     75   return hash;
     76 }
     77 
     78 
     79 bool CodeEntry::IsSameFunctionAs(CodeEntry* entry) const {
     80   if (this == entry) return true;
     81   if (script_id_ != v8::UnboundScript::kNoScriptId) {
     82     return script_id_ == entry->script_id_ && position_ == entry->position_;
     83   }
     84   return name_prefix_ == entry->name_prefix_ && name_ == entry->name_ &&
     85          resource_name_ == entry->resource_name_ &&
     86          line_number_ == entry->line_number_;
     87 }
     88 
     89 
     90 void CodeEntry::SetBuiltinId(Builtins::Name id) {
     91   bit_field_ = TagField::update(bit_field_, Logger::BUILTIN_TAG);
     92   bit_field_ = BuiltinIdField::update(bit_field_, id);
     93 }
     94 
     95 
     96 int CodeEntry::GetSourceLine(int pc_offset) const {
     97   if (line_info_ && !line_info_->empty()) {
     98     return line_info_->GetSourceLineNumber(pc_offset);
     99   }
    100   return v8::CpuProfileNode::kNoLineNumberInfo;
    101 }
    102 
    103 
    104 void CodeEntry::FillFunctionInfo(SharedFunctionInfo* shared) {
    105   if (!shared->script()->IsScript()) return;
    106   Script* script = Script::cast(shared->script());
    107   set_script_id(script->id());
    108   set_position(shared->start_position());
    109   set_bailout_reason(GetBailoutReason(shared->disable_optimization_reason()));
    110 }
    111 
    112 
    113 CpuProfileDeoptInfo CodeEntry::GetDeoptInfo() {
    114   DCHECK(has_deopt_info());
    115 
    116   CpuProfileDeoptInfo info;
    117   info.deopt_reason = deopt_reason_;
    118   if (inlined_function_infos_.empty()) {
    119     info.stack.push_back(CpuProfileDeoptFrame(
    120         {script_id_, position_ + deopt_position_.position()}));
    121     return info;
    122   }
    123   // Copy the only branch from the inlining tree where the deopt happened.
    124   SourcePosition position = deopt_position_;
    125   int inlining_id = InlinedFunctionInfo::kNoParentId;
    126   for (size_t i = 0; i < inlined_function_infos_.size(); ++i) {
    127     InlinedFunctionInfo& current_info = inlined_function_infos_.at(i);
    128     if (std::binary_search(current_info.deopt_pc_offsets.begin(),
    129                            current_info.deopt_pc_offsets.end(), pc_offset_)) {
    130       inlining_id = static_cast<int>(i);
    131       break;
    132     }
    133   }
    134   while (inlining_id != InlinedFunctionInfo::kNoParentId) {
    135     InlinedFunctionInfo& inlined_info = inlined_function_infos_.at(inlining_id);
    136     info.stack.push_back(
    137         CpuProfileDeoptFrame({inlined_info.script_id,
    138                               inlined_info.start_position + position.raw()}));
    139     position = inlined_info.inline_position;
    140     inlining_id = inlined_info.parent_id;
    141   }
    142   return info;
    143 }
    144 
    145 
    146 void ProfileNode::CollectDeoptInfo(CodeEntry* entry) {
    147   deopt_infos_.push_back(entry->GetDeoptInfo());
    148   entry->clear_deopt_info();
    149 }
    150 
    151 
    152 ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
    153   HashMap::Entry* map_entry = children_.Lookup(entry, CodeEntryHash(entry));
    154   return map_entry != NULL ?
    155       reinterpret_cast<ProfileNode*>(map_entry->value) : NULL;
    156 }
    157 
    158 
    159 ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
    160   HashMap::Entry* map_entry =
    161       children_.LookupOrInsert(entry, CodeEntryHash(entry));
    162   ProfileNode* node = reinterpret_cast<ProfileNode*>(map_entry->value);
    163   if (node == NULL) {
    164     // New node added.
    165     node = new ProfileNode(tree_, entry);
    166     map_entry->value = node;
    167     children_list_.Add(node);
    168   }
    169   return node;
    170 }
    171 
    172 
    173 void ProfileNode::IncrementLineTicks(int src_line) {
    174   if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) return;
    175   // Increment a hit counter of a certain source line.
    176   // Add a new source line if not found.
    177   HashMap::Entry* e =
    178       line_ticks_.LookupOrInsert(reinterpret_cast<void*>(src_line), src_line);
    179   DCHECK(e);
    180   e->value = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(e->value) + 1);
    181 }
    182 
    183 
    184 bool ProfileNode::GetLineTicks(v8::CpuProfileNode::LineTick* entries,
    185                                unsigned int length) const {
    186   if (entries == NULL || length == 0) return false;
    187 
    188   unsigned line_count = line_ticks_.occupancy();
    189 
    190   if (line_count == 0) return true;
    191   if (length < line_count) return false;
    192 
    193   v8::CpuProfileNode::LineTick* entry = entries;
    194 
    195   for (HashMap::Entry* p = line_ticks_.Start(); p != NULL;
    196        p = line_ticks_.Next(p), entry++) {
    197     entry->line =
    198         static_cast<unsigned int>(reinterpret_cast<uintptr_t>(p->key));
    199     entry->hit_count =
    200         static_cast<unsigned int>(reinterpret_cast<uintptr_t>(p->value));
    201   }
    202 
    203   return true;
    204 }
    205 
    206 
    207 void ProfileNode::Print(int indent) {
    208   base::OS::Print("%5u %*s %s%s %d #%d", self_ticks_, indent, "",
    209                   entry_->name_prefix(), entry_->name(), entry_->script_id(),
    210                   id());
    211   if (entry_->resource_name()[0] != '\0')
    212     base::OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
    213   base::OS::Print("\n");
    214   for (size_t i = 0; i < deopt_infos_.size(); ++i) {
    215     CpuProfileDeoptInfo& info = deopt_infos_[i];
    216     base::OS::Print(
    217         "%*s;;; deopted at script_id: %d position: %d with reason '%s'.\n",
    218         indent + 10, "", info.stack[0].script_id, info.stack[0].position,
    219         info.deopt_reason);
    220     for (size_t index = 1; index < info.stack.size(); ++index) {
    221       base::OS::Print("%*s;;;     Inline point: script_id %d position: %d.\n",
    222                       indent + 10, "", info.stack[index].script_id,
    223                       info.stack[index].position);
    224     }
    225   }
    226   const char* bailout_reason = entry_->bailout_reason();
    227   if (bailout_reason != GetBailoutReason(BailoutReason::kNoReason) &&
    228       bailout_reason != CodeEntry::kEmptyBailoutReason) {
    229     base::OS::Print("%*s bailed out due to '%s'\n", indent + 10, "",
    230                     bailout_reason);
    231   }
    232   for (HashMap::Entry* p = children_.Start();
    233        p != NULL;
    234        p = children_.Next(p)) {
    235     reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2);
    236   }
    237 }
    238 
    239 
    240 class DeleteNodesCallback {
    241  public:
    242   void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
    243 
    244   void AfterAllChildrenTraversed(ProfileNode* node) {
    245     delete node;
    246   }
    247 
    248   void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
    249 };
    250 
    251 
    252 ProfileTree::ProfileTree(Isolate* isolate)
    253     : root_entry_(Logger::FUNCTION_TAG, "(root)"),
    254       next_node_id_(1),
    255       root_(new ProfileNode(this, &root_entry_)),
    256       isolate_(isolate),
    257       next_function_id_(1),
    258       function_ids_(ProfileNode::CodeEntriesMatch) {}
    259 
    260 
    261 ProfileTree::~ProfileTree() {
    262   DeleteNodesCallback cb;
    263   TraverseDepthFirst(&cb);
    264 }
    265 
    266 
    267 unsigned ProfileTree::GetFunctionId(const ProfileNode* node) {
    268   CodeEntry* code_entry = node->entry();
    269   HashMap::Entry* entry =
    270       function_ids_.LookupOrInsert(code_entry, code_entry->GetHash());
    271   if (!entry->value) {
    272     entry->value = reinterpret_cast<void*>(next_function_id_++);
    273   }
    274   return static_cast<unsigned>(reinterpret_cast<uintptr_t>(entry->value));
    275 }
    276 
    277 
    278 ProfileNode* ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path,
    279                                          int src_line) {
    280   ProfileNode* node = root_;
    281   CodeEntry* last_entry = NULL;
    282   for (CodeEntry** entry = path.start() + path.length() - 1;
    283        entry != path.start() - 1;
    284        --entry) {
    285     if (*entry != NULL) {
    286       node = node->FindOrAddChild(*entry);
    287       last_entry = *entry;
    288     }
    289   }
    290   if (last_entry && last_entry->has_deopt_info()) {
    291     node->CollectDeoptInfo(last_entry);
    292   }
    293   node->IncrementSelfTicks();
    294   if (src_line != v8::CpuProfileNode::kNoLineNumberInfo) {
    295     node->IncrementLineTicks(src_line);
    296   }
    297   return node;
    298 }
    299 
    300 
    301 struct NodesPair {
    302   NodesPair(ProfileNode* src, ProfileNode* dst)
    303       : src(src), dst(dst) { }
    304   ProfileNode* src;
    305   ProfileNode* dst;
    306 };
    307 
    308 
    309 class Position {
    310  public:
    311   explicit Position(ProfileNode* node)
    312       : node(node), child_idx_(0) { }
    313   INLINE(ProfileNode* current_child()) {
    314     return node->children()->at(child_idx_);
    315   }
    316   INLINE(bool has_current_child()) {
    317     return child_idx_ < node->children()->length();
    318   }
    319   INLINE(void next_child()) { ++child_idx_; }
    320 
    321   ProfileNode* node;
    322  private:
    323   int child_idx_;
    324 };
    325 
    326 
    327 // Non-recursive implementation of a depth-first post-order tree traversal.
    328 template <typename Callback>
    329 void ProfileTree::TraverseDepthFirst(Callback* callback) {
    330   List<Position> stack(10);
    331   stack.Add(Position(root_));
    332   while (stack.length() > 0) {
    333     Position& current = stack.last();
    334     if (current.has_current_child()) {
    335       callback->BeforeTraversingChild(current.node, current.current_child());
    336       stack.Add(Position(current.current_child()));
    337     } else {
    338       callback->AfterAllChildrenTraversed(current.node);
    339       if (stack.length() > 1) {
    340         Position& parent = stack[stack.length() - 2];
    341         callback->AfterChildTraversed(parent.node, current.node);
    342         parent.next_child();
    343       }
    344       // Remove child from the stack.
    345       stack.RemoveLast();
    346     }
    347   }
    348 }
    349 
    350 
    351 CpuProfile::CpuProfile(Isolate* isolate, const char* title, bool record_samples)
    352     : title_(title),
    353       record_samples_(record_samples),
    354       start_time_(base::TimeTicks::HighResolutionNow()),
    355       top_down_(isolate) {}
    356 
    357 
    358 void CpuProfile::AddPath(base::TimeTicks timestamp,
    359                          const Vector<CodeEntry*>& path, int src_line) {
    360   ProfileNode* top_frame_node = top_down_.AddPathFromEnd(path, src_line);
    361   if (record_samples_) {
    362     timestamps_.Add(timestamp);
    363     samples_.Add(top_frame_node);
    364   }
    365 }
    366 
    367 
    368 void CpuProfile::CalculateTotalTicksAndSamplingRate() {
    369   end_time_ = base::TimeTicks::HighResolutionNow();
    370 }
    371 
    372 
    373 void CpuProfile::Print() {
    374   base::OS::Print("[Top down]:\n");
    375   top_down_.Print();
    376 }
    377 
    378 
    379 CodeMap::~CodeMap() {}
    380 
    381 
    382 const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
    383 
    384 
    385 void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
    386   DeleteAllCoveredCode(addr, addr + size);
    387   CodeTree::Locator locator;
    388   tree_.Insert(addr, &locator);
    389   locator.set_value(CodeEntryInfo(entry, size));
    390 }
    391 
    392 
    393 void CodeMap::DeleteAllCoveredCode(Address start, Address end) {
    394   List<Address> to_delete;
    395   Address addr = end - 1;
    396   while (addr >= start) {
    397     CodeTree::Locator locator;
    398     if (!tree_.FindGreatestLessThan(addr, &locator)) break;
    399     Address start2 = locator.key(), end2 = start2 + locator.value().size;
    400     if (start2 < end && start < end2) to_delete.Add(start2);
    401     addr = start2 - 1;
    402   }
    403   for (int i = 0; i < to_delete.length(); ++i) tree_.Remove(to_delete[i]);
    404 }
    405 
    406 
    407 CodeEntry* CodeMap::FindEntry(Address addr) {
    408   CodeTree::Locator locator;
    409   if (tree_.FindGreatestLessThan(addr, &locator)) {
    410     // locator.key() <= addr. Need to check that addr is within entry.
    411     const CodeEntryInfo& entry = locator.value();
    412     if (addr < (locator.key() + entry.size)) {
    413       return entry.entry;
    414     }
    415   }
    416   return NULL;
    417 }
    418 
    419 
    420 void CodeMap::MoveCode(Address from, Address to) {
    421   if (from == to) return;
    422   CodeTree::Locator locator;
    423   if (!tree_.Find(from, &locator)) return;
    424   CodeEntryInfo entry = locator.value();
    425   tree_.Remove(from);
    426   AddCode(to, entry.entry, entry.size);
    427 }
    428 
    429 
    430 void CodeMap::CodeTreePrinter::Call(
    431     const Address& key, const CodeMap::CodeEntryInfo& value) {
    432   base::OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
    433 }
    434 
    435 
    436 void CodeMap::Print() {
    437   CodeTreePrinter printer;
    438   tree_.ForEach(&printer);
    439 }
    440 
    441 
    442 CpuProfilesCollection::CpuProfilesCollection(Heap* heap)
    443     : function_and_resource_names_(heap),
    444       isolate_(heap->isolate()),
    445       current_profiles_semaphore_(1) {}
    446 
    447 
    448 static void DeleteCodeEntry(CodeEntry** entry_ptr) {
    449   delete *entry_ptr;
    450 }
    451 
    452 
    453 static void DeleteCpuProfile(CpuProfile** profile_ptr) {
    454   delete *profile_ptr;
    455 }
    456 
    457 
    458 CpuProfilesCollection::~CpuProfilesCollection() {
    459   finished_profiles_.Iterate(DeleteCpuProfile);
    460   current_profiles_.Iterate(DeleteCpuProfile);
    461   code_entries_.Iterate(DeleteCodeEntry);
    462 }
    463 
    464 
    465 bool CpuProfilesCollection::StartProfiling(const char* title,
    466                                            bool record_samples) {
    467   current_profiles_semaphore_.Wait();
    468   if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
    469     current_profiles_semaphore_.Signal();
    470     return false;
    471   }
    472   for (int i = 0; i < current_profiles_.length(); ++i) {
    473     if (strcmp(current_profiles_[i]->title(), title) == 0) {
    474       // Ignore attempts to start profile with the same title...
    475       current_profiles_semaphore_.Signal();
    476       // ... though return true to force it collect a sample.
    477       return true;
    478     }
    479   }
    480   current_profiles_.Add(new CpuProfile(isolate_, title, record_samples));
    481   current_profiles_semaphore_.Signal();
    482   return true;
    483 }
    484 
    485 
    486 CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
    487   const int title_len = StrLength(title);
    488   CpuProfile* profile = NULL;
    489   current_profiles_semaphore_.Wait();
    490   for (int i = current_profiles_.length() - 1; i >= 0; --i) {
    491     if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
    492       profile = current_profiles_.Remove(i);
    493       break;
    494     }
    495   }
    496   current_profiles_semaphore_.Signal();
    497 
    498   if (profile == NULL) return NULL;
    499   profile->CalculateTotalTicksAndSamplingRate();
    500   finished_profiles_.Add(profile);
    501   return profile;
    502 }
    503 
    504 
    505 bool CpuProfilesCollection::IsLastProfile(const char* title) {
    506   // Called from VM thread, and only it can mutate the list,
    507   // so no locking is needed here.
    508   if (current_profiles_.length() != 1) return false;
    509   return StrLength(title) == 0
    510       || strcmp(current_profiles_[0]->title(), title) == 0;
    511 }
    512 
    513 
    514 void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
    515   // Called from VM thread for a completed profile.
    516   for (int i = 0; i < finished_profiles_.length(); i++) {
    517     if (profile == finished_profiles_[i]) {
    518       finished_profiles_.Remove(i);
    519       return;
    520     }
    521   }
    522   UNREACHABLE();
    523 }
    524 
    525 
    526 void CpuProfilesCollection::AddPathToCurrentProfiles(
    527     base::TimeTicks timestamp, const Vector<CodeEntry*>& path, int src_line) {
    528   // As starting / stopping profiles is rare relatively to this
    529   // method, we don't bother minimizing the duration of lock holding,
    530   // e.g. copying contents of the list to a local vector.
    531   current_profiles_semaphore_.Wait();
    532   for (int i = 0; i < current_profiles_.length(); ++i) {
    533     current_profiles_[i]->AddPath(timestamp, path, src_line);
    534   }
    535   current_profiles_semaphore_.Signal();
    536 }
    537 
    538 
    539 CodeEntry* CpuProfilesCollection::NewCodeEntry(
    540     Logger::LogEventsAndTags tag, const char* name, const char* name_prefix,
    541     const char* resource_name, int line_number, int column_number,
    542     JITLineInfoTable* line_info, Address instruction_start) {
    543   CodeEntry* code_entry =
    544       new CodeEntry(tag, name, name_prefix, resource_name, line_number,
    545                     column_number, line_info, instruction_start);
    546   code_entries_.Add(code_entry);
    547   return code_entry;
    548 }
    549 
    550 
    551 const char* const ProfileGenerator::kProgramEntryName =
    552     "(program)";
    553 const char* const ProfileGenerator::kIdleEntryName =
    554     "(idle)";
    555 const char* const ProfileGenerator::kGarbageCollectorEntryName =
    556     "(garbage collector)";
    557 const char* const ProfileGenerator::kUnresolvedFunctionName =
    558     "(unresolved function)";
    559 
    560 
    561 ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
    562     : profiles_(profiles),
    563       program_entry_(
    564           profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)),
    565       idle_entry_(
    566           profiles->NewCodeEntry(Logger::FUNCTION_TAG, kIdleEntryName)),
    567       gc_entry_(
    568           profiles->NewCodeEntry(Logger::BUILTIN_TAG,
    569                                  kGarbageCollectorEntryName)),
    570       unresolved_entry_(
    571           profiles->NewCodeEntry(Logger::FUNCTION_TAG,
    572                                  kUnresolvedFunctionName)) {
    573 }
    574 
    575 
    576 void ProfileGenerator::RecordTickSample(const TickSample& sample) {
    577   // Allocate space for stack frames + pc + function + vm-state.
    578   ScopedVector<CodeEntry*> entries(sample.frames_count + 3);
    579   // As actual number of decoded code entries may vary, initialize
    580   // entries vector with NULL values.
    581   CodeEntry** entry = entries.start();
    582   memset(entry, 0, entries.length() * sizeof(*entry));
    583 
    584   // The ProfileNode knows nothing about all versions of generated code for
    585   // the same JS function. The line number information associated with
    586   // the latest version of generated code is used to find a source line number
    587   // for a JS function. Then, the detected source line is passed to
    588   // ProfileNode to increase the tick count for this source line.
    589   int src_line = v8::CpuProfileNode::kNoLineNumberInfo;
    590   bool src_line_not_found = true;
    591 
    592   if (sample.pc != NULL) {
    593     if (sample.has_external_callback && sample.state == EXTERNAL &&
    594         sample.top_frame_type == StackFrame::EXIT) {
    595       // Don't use PC when in external callback code, as it can point
    596       // inside callback's code, and we will erroneously report
    597       // that a callback calls itself.
    598       *entry++ = code_map_.FindEntry(sample.external_callback);
    599     } else {
    600       CodeEntry* pc_entry = code_map_.FindEntry(sample.pc);
    601       // If there is no pc_entry we're likely in native code.
    602       // Find out, if top of stack was pointing inside a JS function
    603       // meaning that we have encountered a frameless invocation.
    604       if (!pc_entry && (sample.top_frame_type == StackFrame::JAVA_SCRIPT ||
    605                         sample.top_frame_type == StackFrame::OPTIMIZED)) {
    606         pc_entry = code_map_.FindEntry(sample.tos);
    607       }
    608       // If pc is in the function code before it set up stack frame or after the
    609       // frame was destroyed SafeStackFrameIterator incorrectly thinks that
    610       // ebp contains return address of the current function and skips caller's
    611       // frame. Check for this case and just skip such samples.
    612       if (pc_entry) {
    613         int pc_offset =
    614             static_cast<int>(sample.pc - pc_entry->instruction_start());
    615         src_line = pc_entry->GetSourceLine(pc_offset);
    616         if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
    617           src_line = pc_entry->line_number();
    618         }
    619         src_line_not_found = false;
    620         *entry++ = pc_entry;
    621 
    622         if (pc_entry->builtin_id() == Builtins::kFunctionPrototypeApply ||
    623             pc_entry->builtin_id() == Builtins::kFunctionPrototypeCall) {
    624           // When current function is either the Function.prototype.apply or the
    625           // Function.prototype.call builtin the top frame is either frame of
    626           // the calling JS function or internal frame.
    627           // In the latter case we know the caller for sure but in the
    628           // former case we don't so we simply replace the frame with
    629           // 'unresolved' entry.
    630           if (sample.top_frame_type == StackFrame::JAVA_SCRIPT) {
    631             *entry++ = unresolved_entry_;
    632           }
    633         }
    634       }
    635     }
    636 
    637     for (const Address* stack_pos = sample.stack,
    638            *stack_end = stack_pos + sample.frames_count;
    639          stack_pos != stack_end;
    640          ++stack_pos) {
    641       *entry = code_map_.FindEntry(*stack_pos);
    642 
    643       // Skip unresolved frames (e.g. internal frame) and get source line of
    644       // the first JS caller.
    645       if (src_line_not_found && *entry) {
    646         int pc_offset =
    647             static_cast<int>(*stack_pos - (*entry)->instruction_start());
    648         src_line = (*entry)->GetSourceLine(pc_offset);
    649         if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
    650           src_line = (*entry)->line_number();
    651         }
    652         src_line_not_found = false;
    653       }
    654 
    655       entry++;
    656     }
    657   }
    658 
    659   if (FLAG_prof_browser_mode) {
    660     bool no_symbolized_entries = true;
    661     for (CodeEntry** e = entries.start(); e != entry; ++e) {
    662       if (*e != NULL) {
    663         no_symbolized_entries = false;
    664         break;
    665       }
    666     }
    667     // If no frames were symbolized, put the VM state entry in.
    668     if (no_symbolized_entries) {
    669       *entry++ = EntryForVMState(sample.state);
    670     }
    671   }
    672 
    673   profiles_->AddPathToCurrentProfiles(sample.timestamp, entries, src_line);
    674 }
    675 
    676 
    677 CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
    678   switch (tag) {
    679     case GC:
    680       return gc_entry_;
    681     case JS:
    682     case COMPILER:
    683     // DOM events handlers are reported as OTHER / EXTERNAL entries.
    684     // To avoid confusing people, let's put all these entries into
    685     // one bucket.
    686     case OTHER:
    687     case EXTERNAL:
    688       return program_entry_;
    689     case IDLE:
    690       return idle_entry_;
    691     default: return NULL;
    692   }
    693 }
    694 
    695 }  // namespace internal
    696 }  // namespace v8
    697