Home | History | Annotate | Download | only in profiler
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/profiler/profile-generator.h"
      6 
      7 #include "src/ast/scopeinfo.h"
      8 #include "src/base/adapters.h"
      9 #include "src/debug/debug.h"
     10 #include "src/deoptimizer.h"
     11 #include "src/global-handles.h"
     12 #include "src/profiler/cpu-profiler.h"
     13 #include "src/profiler/profile-generator-inl.h"
     14 #include "src/profiler/tick-sample.h"
     15 #include "src/unicode.h"
     16 
     17 namespace v8 {
     18 namespace internal {
     19 
     20 
     21 JITLineInfoTable::JITLineInfoTable() {}
     22 
     23 
     24 JITLineInfoTable::~JITLineInfoTable() {}
     25 
     26 
     27 void JITLineInfoTable::SetPosition(int pc_offset, int line) {
     28   DCHECK(pc_offset >= 0);
     29   DCHECK(line > 0);  // The 1-based number of the source line.
     30   if (GetSourceLineNumber(pc_offset) != line) {
     31     pc_offset_map_.insert(std::make_pair(pc_offset, line));
     32   }
     33 }
     34 
     35 
     36 int JITLineInfoTable::GetSourceLineNumber(int pc_offset) const {
     37   PcOffsetMap::const_iterator it = pc_offset_map_.lower_bound(pc_offset);
     38   if (it == pc_offset_map_.end()) {
     39     if (pc_offset_map_.empty()) return v8::CpuProfileNode::kNoLineNumberInfo;
     40     return (--pc_offset_map_.end())->second;
     41   }
     42   return it->second;
     43 }
     44 
     45 
     46 const char* const CodeEntry::kEmptyNamePrefix = "";
     47 const char* const CodeEntry::kEmptyResourceName = "";
     48 const char* const CodeEntry::kEmptyBailoutReason = "";
     49 const char* const CodeEntry::kNoDeoptReason = "";
     50 
     51 const char* const CodeEntry::kProgramEntryName = "(program)";
     52 const char* const CodeEntry::kIdleEntryName = "(idle)";
     53 const char* const CodeEntry::kGarbageCollectorEntryName = "(garbage collector)";
     54 const char* const CodeEntry::kUnresolvedFunctionName = "(unresolved function)";
     55 
     56 base::LazyDynamicInstance<CodeEntry, CodeEntry::ProgramEntryCreateTrait>::type
     57     CodeEntry::kProgramEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
     58 
     59 base::LazyDynamicInstance<CodeEntry, CodeEntry::IdleEntryCreateTrait>::type
     60     CodeEntry::kIdleEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
     61 
     62 base::LazyDynamicInstance<CodeEntry, CodeEntry::GCEntryCreateTrait>::type
     63     CodeEntry::kGCEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
     64 
     65 base::LazyDynamicInstance<CodeEntry,
     66                           CodeEntry::UnresolvedEntryCreateTrait>::type
     67     CodeEntry::kUnresolvedEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
     68 
     69 CodeEntry* CodeEntry::ProgramEntryCreateTrait::Create() {
     70   return new CodeEntry(Logger::FUNCTION_TAG, CodeEntry::kProgramEntryName);
     71 }
     72 
     73 CodeEntry* CodeEntry::IdleEntryCreateTrait::Create() {
     74   return new CodeEntry(Logger::FUNCTION_TAG, CodeEntry::kIdleEntryName);
     75 }
     76 
     77 CodeEntry* CodeEntry::GCEntryCreateTrait::Create() {
     78   return new CodeEntry(Logger::BUILTIN_TAG,
     79                        CodeEntry::kGarbageCollectorEntryName);
     80 }
     81 
     82 CodeEntry* CodeEntry::UnresolvedEntryCreateTrait::Create() {
     83   return new CodeEntry(Logger::FUNCTION_TAG,
     84                        CodeEntry::kUnresolvedFunctionName);
     85 }
     86 
     87 CodeEntry::~CodeEntry() {
     88   delete line_info_;
     89   for (auto location : inline_locations_) {
     90     for (auto entry : location.second) {
     91       delete entry;
     92     }
     93   }
     94 }
     95 
     96 
     97 uint32_t CodeEntry::GetHash() const {
     98   uint32_t hash = ComputeIntegerHash(tag(), v8::internal::kZeroHashSeed);
     99   if (script_id_ != v8::UnboundScript::kNoScriptId) {
    100     hash ^= ComputeIntegerHash(static_cast<uint32_t>(script_id_),
    101                                v8::internal::kZeroHashSeed);
    102     hash ^= ComputeIntegerHash(static_cast<uint32_t>(position_),
    103                                v8::internal::kZeroHashSeed);
    104   } else {
    105     hash ^= ComputeIntegerHash(
    106         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)),
    107         v8::internal::kZeroHashSeed);
    108     hash ^= ComputeIntegerHash(
    109         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)),
    110         v8::internal::kZeroHashSeed);
    111     hash ^= ComputeIntegerHash(
    112         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)),
    113         v8::internal::kZeroHashSeed);
    114     hash ^= ComputeIntegerHash(line_number_, v8::internal::kZeroHashSeed);
    115   }
    116   return hash;
    117 }
    118 
    119 
    120 bool CodeEntry::IsSameFunctionAs(CodeEntry* entry) const {
    121   if (this == entry) return true;
    122   if (script_id_ != v8::UnboundScript::kNoScriptId) {
    123     return script_id_ == entry->script_id_ && position_ == entry->position_;
    124   }
    125   return name_prefix_ == entry->name_prefix_ && name_ == entry->name_ &&
    126          resource_name_ == entry->resource_name_ &&
    127          line_number_ == entry->line_number_;
    128 }
    129 
    130 
    131 void CodeEntry::SetBuiltinId(Builtins::Name id) {
    132   bit_field_ = TagField::update(bit_field_, CodeEventListener::BUILTIN_TAG);
    133   bit_field_ = BuiltinIdField::update(bit_field_, id);
    134 }
    135 
    136 
    137 int CodeEntry::GetSourceLine(int pc_offset) const {
    138   if (line_info_ && !line_info_->empty()) {
    139     return line_info_->GetSourceLineNumber(pc_offset);
    140   }
    141   return v8::CpuProfileNode::kNoLineNumberInfo;
    142 }
    143 
    144 void CodeEntry::AddInlineStack(int pc_offset,
    145                                std::vector<CodeEntry*>& inline_stack) {
    146   // It's better to use std::move to place the vector into the map,
    147   // but it's not supported by the current stdlibc++ on MacOS.
    148   inline_locations_.insert(std::make_pair(pc_offset, std::vector<CodeEntry*>()))
    149       .first->second.swap(inline_stack);
    150 }
    151 
    152 const std::vector<CodeEntry*>* CodeEntry::GetInlineStack(int pc_offset) const {
    153   auto it = inline_locations_.find(pc_offset);
    154   return it != inline_locations_.end() ? &it->second : NULL;
    155 }
    156 
    157 void CodeEntry::AddDeoptInlinedFrames(
    158     int deopt_id, std::vector<DeoptInlinedFrame>& inlined_frames) {
    159   // It's better to use std::move to place the vector into the map,
    160   // but it's not supported by the current stdlibc++ on MacOS.
    161   deopt_inlined_frames_
    162       .insert(std::make_pair(deopt_id, std::vector<DeoptInlinedFrame>()))
    163       .first->second.swap(inlined_frames);
    164 }
    165 
    166 bool CodeEntry::HasDeoptInlinedFramesFor(int deopt_id) const {
    167   return deopt_inlined_frames_.find(deopt_id) != deopt_inlined_frames_.end();
    168 }
    169 
    170 void CodeEntry::FillFunctionInfo(SharedFunctionInfo* shared) {
    171   if (!shared->script()->IsScript()) return;
    172   Script* script = Script::cast(shared->script());
    173   set_script_id(script->id());
    174   set_position(shared->start_position());
    175   set_bailout_reason(GetBailoutReason(shared->disable_optimization_reason()));
    176 }
    177 
    178 CpuProfileDeoptInfo CodeEntry::GetDeoptInfo() {
    179   DCHECK(has_deopt_info());
    180 
    181   CpuProfileDeoptInfo info;
    182   info.deopt_reason = deopt_reason_;
    183   DCHECK_NE(Deoptimizer::DeoptInfo::kNoDeoptId, deopt_id_);
    184   if (deopt_inlined_frames_.find(deopt_id_) == deopt_inlined_frames_.end()) {
    185     info.stack.push_back(CpuProfileDeoptFrame(
    186         {script_id_, position_ + deopt_position_.position()}));
    187   } else {
    188     size_t deopt_position = deopt_position_.raw();
    189     // Copy stack of inlined frames where the deopt happened.
    190     std::vector<DeoptInlinedFrame>& frames = deopt_inlined_frames_[deopt_id_];
    191     for (DeoptInlinedFrame& inlined_frame : base::Reversed(frames)) {
    192       info.stack.push_back(CpuProfileDeoptFrame(
    193           {inlined_frame.script_id, deopt_position + inlined_frame.position}));
    194       deopt_position = 0;  // Done with innermost frame.
    195     }
    196   }
    197   return info;
    198 }
    199 
    200 
    201 void ProfileNode::CollectDeoptInfo(CodeEntry* entry) {
    202   deopt_infos_.push_back(entry->GetDeoptInfo());
    203   entry->clear_deopt_info();
    204 }
    205 
    206 
    207 ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
    208   base::HashMap::Entry* map_entry =
    209       children_.Lookup(entry, CodeEntryHash(entry));
    210   return map_entry != NULL ?
    211       reinterpret_cast<ProfileNode*>(map_entry->value) : NULL;
    212 }
    213 
    214 
    215 ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
    216   base::HashMap::Entry* map_entry =
    217       children_.LookupOrInsert(entry, CodeEntryHash(entry));
    218   ProfileNode* node = reinterpret_cast<ProfileNode*>(map_entry->value);
    219   if (node == NULL) {
    220     // New node added.
    221     node = new ProfileNode(tree_, entry);
    222     map_entry->value = node;
    223     children_list_.Add(node);
    224   }
    225   return node;
    226 }
    227 
    228 
    229 void ProfileNode::IncrementLineTicks(int src_line) {
    230   if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) return;
    231   // Increment a hit counter of a certain source line.
    232   // Add a new source line if not found.
    233   base::HashMap::Entry* e =
    234       line_ticks_.LookupOrInsert(reinterpret_cast<void*>(src_line), src_line);
    235   DCHECK(e);
    236   e->value = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(e->value) + 1);
    237 }
    238 
    239 
    240 bool ProfileNode::GetLineTicks(v8::CpuProfileNode::LineTick* entries,
    241                                unsigned int length) const {
    242   if (entries == NULL || length == 0) return false;
    243 
    244   unsigned line_count = line_ticks_.occupancy();
    245 
    246   if (line_count == 0) return true;
    247   if (length < line_count) return false;
    248 
    249   v8::CpuProfileNode::LineTick* entry = entries;
    250 
    251   for (base::HashMap::Entry *p = line_ticks_.Start(); p != NULL;
    252        p = line_ticks_.Next(p), entry++) {
    253     entry->line =
    254         static_cast<unsigned int>(reinterpret_cast<uintptr_t>(p->key));
    255     entry->hit_count =
    256         static_cast<unsigned int>(reinterpret_cast<uintptr_t>(p->value));
    257   }
    258 
    259   return true;
    260 }
    261 
    262 
    263 void ProfileNode::Print(int indent) {
    264   base::OS::Print("%5u %*s %s%s %d #%d", self_ticks_, indent, "",
    265                   entry_->name_prefix(), entry_->name(), entry_->script_id(),
    266                   id());
    267   if (entry_->resource_name()[0] != '\0')
    268     base::OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
    269   base::OS::Print("\n");
    270   for (size_t i = 0; i < deopt_infos_.size(); ++i) {
    271     CpuProfileDeoptInfo& info = deopt_infos_[i];
    272     base::OS::Print("%*s;;; deopted at script_id: %d position: %" PRIuS
    273                     " with reason '%s'.\n",
    274                     indent + 10, "", info.stack[0].script_id,
    275                     info.stack[0].position, info.deopt_reason);
    276     for (size_t index = 1; index < info.stack.size(); ++index) {
    277       base::OS::Print("%*s;;;     Inline point: script_id %d position: %" PRIuS
    278                       ".\n",
    279                       indent + 10, "", info.stack[index].script_id,
    280                       info.stack[index].position);
    281     }
    282   }
    283   const char* bailout_reason = entry_->bailout_reason();
    284   if (bailout_reason != GetBailoutReason(BailoutReason::kNoReason) &&
    285       bailout_reason != CodeEntry::kEmptyBailoutReason) {
    286     base::OS::Print("%*s bailed out due to '%s'\n", indent + 10, "",
    287                     bailout_reason);
    288   }
    289   for (base::HashMap::Entry* p = children_.Start(); p != NULL;
    290        p = children_.Next(p)) {
    291     reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2);
    292   }
    293 }
    294 
    295 
    296 class DeleteNodesCallback {
    297  public:
    298   void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
    299 
    300   void AfterAllChildrenTraversed(ProfileNode* node) {
    301     delete node;
    302   }
    303 
    304   void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
    305 };
    306 
    307 ProfileTree::ProfileTree(Isolate* isolate)
    308     : root_entry_(CodeEventListener::FUNCTION_TAG, "(root)"),
    309       next_node_id_(1),
    310       root_(new ProfileNode(this, &root_entry_)),
    311       isolate_(isolate),
    312       next_function_id_(1),
    313       function_ids_(ProfileNode::CodeEntriesMatch) {}
    314 
    315 ProfileTree::~ProfileTree() {
    316   DeleteNodesCallback cb;
    317   TraverseDepthFirst(&cb);
    318 }
    319 
    320 
    321 unsigned ProfileTree::GetFunctionId(const ProfileNode* node) {
    322   CodeEntry* code_entry = node->entry();
    323   base::HashMap::Entry* entry =
    324       function_ids_.LookupOrInsert(code_entry, code_entry->GetHash());
    325   if (!entry->value) {
    326     entry->value = reinterpret_cast<void*>(next_function_id_++);
    327   }
    328   return static_cast<unsigned>(reinterpret_cast<uintptr_t>(entry->value));
    329 }
    330 
    331 ProfileNode* ProfileTree::AddPathFromEnd(const std::vector<CodeEntry*>& path,
    332                                          int src_line, bool update_stats) {
    333   ProfileNode* node = root_;
    334   CodeEntry* last_entry = NULL;
    335   for (auto it = path.rbegin(); it != path.rend(); ++it) {
    336     if (*it == NULL) continue;
    337     last_entry = *it;
    338     node = node->FindOrAddChild(*it);
    339   }
    340   if (last_entry && last_entry->has_deopt_info()) {
    341     node->CollectDeoptInfo(last_entry);
    342   }
    343   if (update_stats) {
    344     node->IncrementSelfTicks();
    345     if (src_line != v8::CpuProfileNode::kNoLineNumberInfo) {
    346       node->IncrementLineTicks(src_line);
    347     }
    348   }
    349   return node;
    350 }
    351 
    352 
    353 struct NodesPair {
    354   NodesPair(ProfileNode* src, ProfileNode* dst)
    355       : src(src), dst(dst) { }
    356   ProfileNode* src;
    357   ProfileNode* dst;
    358 };
    359 
    360 
    361 class Position {
    362  public:
    363   explicit Position(ProfileNode* node)
    364       : node(node), child_idx_(0) { }
    365   INLINE(ProfileNode* current_child()) {
    366     return node->children()->at(child_idx_);
    367   }
    368   INLINE(bool has_current_child()) {
    369     return child_idx_ < node->children()->length();
    370   }
    371   INLINE(void next_child()) { ++child_idx_; }
    372 
    373   ProfileNode* node;
    374  private:
    375   int child_idx_;
    376 };
    377 
    378 
    379 // Non-recursive implementation of a depth-first post-order tree traversal.
    380 template <typename Callback>
    381 void ProfileTree::TraverseDepthFirst(Callback* callback) {
    382   List<Position> stack(10);
    383   stack.Add(Position(root_));
    384   while (stack.length() > 0) {
    385     Position& current = stack.last();
    386     if (current.has_current_child()) {
    387       callback->BeforeTraversingChild(current.node, current.current_child());
    388       stack.Add(Position(current.current_child()));
    389     } else {
    390       callback->AfterAllChildrenTraversed(current.node);
    391       if (stack.length() > 1) {
    392         Position& parent = stack[stack.length() - 2];
    393         callback->AfterChildTraversed(parent.node, current.node);
    394         parent.next_child();
    395       }
    396       // Remove child from the stack.
    397       stack.RemoveLast();
    398     }
    399   }
    400 }
    401 
    402 CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
    403                        bool record_samples)
    404     : title_(title),
    405       record_samples_(record_samples),
    406       start_time_(base::TimeTicks::HighResolutionNow()),
    407       top_down_(profiler->isolate()),
    408       profiler_(profiler) {}
    409 
    410 void CpuProfile::AddPath(base::TimeTicks timestamp,
    411                          const std::vector<CodeEntry*>& path, int src_line,
    412                          bool update_stats) {
    413   ProfileNode* top_frame_node =
    414       top_down_.AddPathFromEnd(path, src_line, update_stats);
    415   if (record_samples_ && !timestamp.IsNull()) {
    416     timestamps_.Add(timestamp);
    417     samples_.Add(top_frame_node);
    418   }
    419 }
    420 
    421 void CpuProfile::CalculateTotalTicksAndSamplingRate() {
    422   end_time_ = base::TimeTicks::HighResolutionNow();
    423 }
    424 
    425 void CpuProfile::Print() {
    426   base::OS::Print("[Top down]:\n");
    427   top_down_.Print();
    428 }
    429 
    430 void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
    431   DeleteAllCoveredCode(addr, addr + size);
    432   code_map_.insert({addr, CodeEntryInfo(entry, size)});
    433 }
    434 
    435 void CodeMap::DeleteAllCoveredCode(Address start, Address end) {
    436   auto left = code_map_.upper_bound(start);
    437   if (left != code_map_.begin()) {
    438     --left;
    439     if (left->first + left->second.size <= start) ++left;
    440   }
    441   auto right = left;
    442   while (right != code_map_.end() && right->first < end) ++right;
    443   code_map_.erase(left, right);
    444 }
    445 
    446 CodeEntry* CodeMap::FindEntry(Address addr) {
    447   auto it = code_map_.upper_bound(addr);
    448   if (it == code_map_.begin()) return nullptr;
    449   --it;
    450   Address end_address = it->first + it->second.size;
    451   return addr < end_address ? it->second.entry : nullptr;
    452 }
    453 
    454 void CodeMap::MoveCode(Address from, Address to) {
    455   if (from == to) return;
    456   auto it = code_map_.find(from);
    457   if (it == code_map_.end()) return;
    458   CodeEntryInfo info = it->second;
    459   code_map_.erase(it);
    460   AddCode(to, info.entry, info.size);
    461 }
    462 
    463 void CodeMap::Print() {
    464   for (auto it = code_map_.begin(); it != code_map_.end(); ++it) {
    465     base::OS::Print("%p %5d %s\n", static_cast<void*>(it->first),
    466                     it->second.size, it->second.entry->name());
    467   }
    468 }
    469 
    470 CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
    471     : resource_names_(isolate->heap()),
    472       profiler_(nullptr),
    473       current_profiles_semaphore_(1) {}
    474 
    475 static void DeleteCpuProfile(CpuProfile** profile_ptr) {
    476   delete *profile_ptr;
    477 }
    478 
    479 
    480 CpuProfilesCollection::~CpuProfilesCollection() {
    481   finished_profiles_.Iterate(DeleteCpuProfile);
    482   current_profiles_.Iterate(DeleteCpuProfile);
    483 }
    484 
    485 
    486 bool CpuProfilesCollection::StartProfiling(const char* title,
    487                                            bool record_samples) {
    488   current_profiles_semaphore_.Wait();
    489   if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
    490     current_profiles_semaphore_.Signal();
    491     return false;
    492   }
    493   for (int i = 0; i < current_profiles_.length(); ++i) {
    494     if (strcmp(current_profiles_[i]->title(), title) == 0) {
    495       // Ignore attempts to start profile with the same title...
    496       current_profiles_semaphore_.Signal();
    497       // ... though return true to force it collect a sample.
    498       return true;
    499     }
    500   }
    501   current_profiles_.Add(new CpuProfile(profiler_, title, record_samples));
    502   current_profiles_semaphore_.Signal();
    503   return true;
    504 }
    505 
    506 
    507 CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
    508   const int title_len = StrLength(title);
    509   CpuProfile* profile = NULL;
    510   current_profiles_semaphore_.Wait();
    511   for (int i = current_profiles_.length() - 1; i >= 0; --i) {
    512     if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
    513       profile = current_profiles_.Remove(i);
    514       break;
    515     }
    516   }
    517   current_profiles_semaphore_.Signal();
    518 
    519   if (profile == NULL) return NULL;
    520   profile->CalculateTotalTicksAndSamplingRate();
    521   finished_profiles_.Add(profile);
    522   return profile;
    523 }
    524 
    525 
    526 bool CpuProfilesCollection::IsLastProfile(const char* title) {
    527   // Called from VM thread, and only it can mutate the list,
    528   // so no locking is needed here.
    529   if (current_profiles_.length() != 1) return false;
    530   return StrLength(title) == 0
    531       || strcmp(current_profiles_[0]->title(), title) == 0;
    532 }
    533 
    534 
    535 void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
    536   // Called from VM thread for a completed profile.
    537   for (int i = 0; i < finished_profiles_.length(); i++) {
    538     if (profile == finished_profiles_[i]) {
    539       finished_profiles_.Remove(i);
    540       return;
    541     }
    542   }
    543   UNREACHABLE();
    544 }
    545 
    546 void CpuProfilesCollection::AddPathToCurrentProfiles(
    547     base::TimeTicks timestamp, const std::vector<CodeEntry*>& path,
    548     int src_line, bool update_stats) {
    549   // As starting / stopping profiles is rare relatively to this
    550   // method, we don't bother minimizing the duration of lock holding,
    551   // e.g. copying contents of the list to a local vector.
    552   current_profiles_semaphore_.Wait();
    553   for (int i = 0; i < current_profiles_.length(); ++i) {
    554     current_profiles_[i]->AddPath(timestamp, path, src_line, update_stats);
    555   }
    556   current_profiles_semaphore_.Signal();
    557 }
    558 
    559 ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
    560     : profiles_(profiles) {}
    561 
    562 void ProfileGenerator::RecordTickSample(const TickSample& sample) {
    563   std::vector<CodeEntry*> entries;
    564   // Conservatively reserve space for stack frames + pc + function + vm-state.
    565   // There could in fact be more of them because of inlined entries.
    566   entries.reserve(sample.frames_count + 3);
    567 
    568   // The ProfileNode knows nothing about all versions of generated code for
    569   // the same JS function. The line number information associated with
    570   // the latest version of generated code is used to find a source line number
    571   // for a JS function. Then, the detected source line is passed to
    572   // ProfileNode to increase the tick count for this source line.
    573   int src_line = v8::CpuProfileNode::kNoLineNumberInfo;
    574   bool src_line_not_found = true;
    575 
    576   if (sample.pc != nullptr) {
    577     if (sample.has_external_callback && sample.state == EXTERNAL) {
    578       // Don't use PC when in external callback code, as it can point
    579       // inside callback's code, and we will erroneously report
    580       // that a callback calls itself.
    581       entries.push_back(code_map_.FindEntry(sample.external_callback_entry));
    582     } else {
    583       CodeEntry* pc_entry = code_map_.FindEntry(sample.pc);
    584       // If there is no pc_entry we're likely in native code.
    585       // Find out, if top of stack was pointing inside a JS function
    586       // meaning that we have encountered a frameless invocation.
    587       if (!pc_entry && !sample.has_external_callback) {
    588         pc_entry = code_map_.FindEntry(sample.tos);
    589       }
    590       // If pc is in the function code before it set up stack frame or after the
    591       // frame was destroyed SafeStackFrameIterator incorrectly thinks that
    592       // ebp contains return address of the current function and skips caller's
    593       // frame. Check for this case and just skip such samples.
    594       if (pc_entry) {
    595         int pc_offset =
    596             static_cast<int>(sample.pc - pc_entry->instruction_start());
    597         src_line = pc_entry->GetSourceLine(pc_offset);
    598         if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
    599           src_line = pc_entry->line_number();
    600         }
    601         src_line_not_found = false;
    602         entries.push_back(pc_entry);
    603 
    604         if (pc_entry->builtin_id() == Builtins::kFunctionPrototypeApply ||
    605             pc_entry->builtin_id() == Builtins::kFunctionPrototypeCall) {
    606           // When current function is either the Function.prototype.apply or the
    607           // Function.prototype.call builtin the top frame is either frame of
    608           // the calling JS function or internal frame.
    609           // In the latter case we know the caller for sure but in the
    610           // former case we don't so we simply replace the frame with
    611           // 'unresolved' entry.
    612           if (!sample.has_external_callback) {
    613             entries.push_back(CodeEntry::unresolved_entry());
    614           }
    615         }
    616       }
    617     }
    618 
    619     for (const Address *stack_pos = sample.stack,
    620                        *stack_end = stack_pos + sample.frames_count;
    621          stack_pos != stack_end; ++stack_pos) {
    622       CodeEntry* entry = code_map_.FindEntry(*stack_pos);
    623 
    624       if (entry) {
    625         // Find out if the entry has an inlining stack associated.
    626         int pc_offset =
    627             static_cast<int>(*stack_pos - entry->instruction_start());
    628         const std::vector<CodeEntry*>* inline_stack =
    629             entry->GetInlineStack(pc_offset);
    630         if (inline_stack) {
    631           entries.insert(entries.end(), inline_stack->rbegin(),
    632                          inline_stack->rend());
    633         }
    634         // Skip unresolved frames (e.g. internal frame) and get source line of
    635         // the first JS caller.
    636         if (src_line_not_found) {
    637           src_line = entry->GetSourceLine(pc_offset);
    638           if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
    639             src_line = entry->line_number();
    640           }
    641           src_line_not_found = false;
    642         }
    643       }
    644       entries.push_back(entry);
    645     }
    646   }
    647 
    648   if (FLAG_prof_browser_mode) {
    649     bool no_symbolized_entries = true;
    650     for (auto e : entries) {
    651       if (e != NULL) {
    652         no_symbolized_entries = false;
    653         break;
    654       }
    655     }
    656     // If no frames were symbolized, put the VM state entry in.
    657     if (no_symbolized_entries) {
    658       entries.push_back(EntryForVMState(sample.state));
    659     }
    660   }
    661 
    662   profiles_->AddPathToCurrentProfiles(sample.timestamp, entries, src_line,
    663                                       sample.update_stats);
    664 }
    665 
    666 
    667 CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
    668   switch (tag) {
    669     case GC:
    670       return CodeEntry::gc_entry();
    671     case JS:
    672     case COMPILER:
    673     // DOM events handlers are reported as OTHER / EXTERNAL entries.
    674     // To avoid confusing people, let's put all these entries into
    675     // one bucket.
    676     case OTHER:
    677     case EXTERNAL:
    678       return CodeEntry::program_entry();
    679     case IDLE:
    680       return CodeEntry::idle_entry();
    681     default: return NULL;
    682   }
    683 }
    684 
    685 }  // namespace internal
    686 }  // namespace v8
    687