Home | History | Annotate | Download | only in profiler
      1 // Copyright 2011 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef V8_PROFILER_PROFILE_GENERATOR_H_
      6 #define V8_PROFILER_PROFILE_GENERATOR_H_
      7 
      8 #include <atomic>
      9 #include <deque>
     10 #include <limits>
     11 #include <map>
     12 #include <memory>
     13 #include <unordered_map>
     14 #include <utility>
     15 #include <vector>
     16 
     17 #include "include/v8-profiler.h"
     18 #include "src/allocation.h"
     19 #include "src/log.h"
     20 #include "src/profiler/strings-storage.h"
     21 #include "src/source-position.h"
     22 
     23 namespace v8 {
     24 namespace internal {
     25 
     26 struct TickSample;
     27 
     28 // Provides a mapping from the offsets within generated code or a bytecode array
     29 // to the source line.
     30 class SourcePositionTable : public Malloced {
     31  public:
     32   SourcePositionTable() = default;
     33 
     34   void SetPosition(int pc_offset, int line);
     35   int GetSourceLineNumber(int pc_offset) const;
     36 
     37  private:
     38   struct PCOffsetAndLineNumber {
     39     bool operator<(const PCOffsetAndLineNumber& other) const {
     40       return pc_offset < other.pc_offset;
     41     }
     42     int pc_offset;
     43     int line_number;
     44   };
     45   // This is logically a map, but we store it as a vector of pairs, sorted by
     46   // the pc offset, so that we can save space and look up items using binary
     47   // search.
     48   std::vector<PCOffsetAndLineNumber> pc_offsets_to_lines_;
     49   DISALLOW_COPY_AND_ASSIGN(SourcePositionTable);
     50 };
     51 
     52 class CodeEntry {
     53  public:
     54   // CodeEntry doesn't own name strings, just references them.
     55   inline CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
     56                    const char* resource_name = CodeEntry::kEmptyResourceName,
     57                    int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
     58                    int column_number = v8::CpuProfileNode::kNoColumnNumberInfo,
     59                    std::unique_ptr<SourcePositionTable> line_info = nullptr,
     60                    Address instruction_start = kNullAddress);
     61 
     62   const char* name() const { return name_; }
     63   const char* resource_name() const { return resource_name_; }
     64   int line_number() const { return line_number_; }
     65   int column_number() const { return column_number_; }
     66   const SourcePositionTable* line_info() const { return line_info_.get(); }
     67   int script_id() const { return script_id_; }
     68   void set_script_id(int script_id) { script_id_ = script_id; }
     69   int position() const { return position_; }
     70   void set_position(int position) { position_ = position; }
     71   void set_bailout_reason(const char* bailout_reason) {
     72     EnsureRareData()->bailout_reason_ = bailout_reason;
     73   }
     74   const char* bailout_reason() const {
     75     return rare_data_ ? rare_data_->bailout_reason_ : kEmptyBailoutReason;
     76   }
     77 
     78   void set_deopt_info(const char* deopt_reason, int deopt_id,
     79                       std::vector<CpuProfileDeoptFrame> inlined_frames);
     80 
     81   CpuProfileDeoptInfo GetDeoptInfo();
     82   bool has_deopt_info() const {
     83     return rare_data_ && rare_data_->deopt_id_ != kNoDeoptimizationId;
     84   }
     85   void clear_deopt_info() {
     86     if (!rare_data_) return;
     87     // TODO(alph): Clear rare_data_ if that was the only field in use.
     88     rare_data_->deopt_reason_ = kNoDeoptReason;
     89     rare_data_->deopt_id_ = kNoDeoptimizationId;
     90   }
     91   void mark_used() { bit_field_ = UsedField::update(bit_field_, true); }
     92   bool used() const { return UsedField::decode(bit_field_); }
     93 
     94   void FillFunctionInfo(SharedFunctionInfo* shared);
     95 
     96   void SetBuiltinId(Builtins::Name id);
     97   Builtins::Name builtin_id() const {
     98     return BuiltinIdField::decode(bit_field_);
     99   }
    100 
    101   uint32_t GetHash() const;
    102   bool IsSameFunctionAs(const CodeEntry* entry) const;
    103 
    104   int GetSourceLine(int pc_offset) const;
    105 
    106   void AddInlineStack(int pc_offset,
    107                       std::vector<std::unique_ptr<CodeEntry>> inline_stack);
    108   const std::vector<std::unique_ptr<CodeEntry>>* GetInlineStack(
    109       int pc_offset) const;
    110 
    111   void set_instruction_start(Address start) { instruction_start_ = start; }
    112   Address instruction_start() const { return instruction_start_; }
    113 
    114   CodeEventListener::LogEventsAndTags tag() const {
    115     return TagField::decode(bit_field_);
    116   }
    117 
    118   static const char* const kWasmResourceNamePrefix;
    119   static const char* const kEmptyResourceName;
    120   static const char* const kEmptyBailoutReason;
    121   static const char* const kNoDeoptReason;
    122 
    123   static const char* const kProgramEntryName;
    124   static const char* const kIdleEntryName;
    125   static const char* const kGarbageCollectorEntryName;
    126   // Used to represent frames for which we have no reliable way to
    127   // detect function.
    128   static const char* const kUnresolvedFunctionName;
    129 
    130   V8_INLINE static CodeEntry* program_entry() {
    131     return kProgramEntry.Pointer();
    132   }
    133   V8_INLINE static CodeEntry* idle_entry() { return kIdleEntry.Pointer(); }
    134   V8_INLINE static CodeEntry* gc_entry() { return kGCEntry.Pointer(); }
    135   V8_INLINE static CodeEntry* unresolved_entry() {
    136     return kUnresolvedEntry.Pointer();
    137   }
    138 
    139  private:
    140   struct RareData {
    141     const char* deopt_reason_ = kNoDeoptReason;
    142     const char* bailout_reason_ = kEmptyBailoutReason;
    143     int deopt_id_ = kNoDeoptimizationId;
    144     std::unordered_map<int, std::vector<std::unique_ptr<CodeEntry>>>
    145         inline_locations_;
    146     std::vector<CpuProfileDeoptFrame> deopt_inlined_frames_;
    147   };
    148 
    149   RareData* EnsureRareData();
    150 
    151   struct ProgramEntryCreateTrait {
    152     static CodeEntry* Create();
    153   };
    154   struct IdleEntryCreateTrait {
    155     static CodeEntry* Create();
    156   };
    157   struct GCEntryCreateTrait {
    158     static CodeEntry* Create();
    159   };
    160   struct UnresolvedEntryCreateTrait {
    161     static CodeEntry* Create();
    162   };
    163 
    164   static base::LazyDynamicInstance<CodeEntry, ProgramEntryCreateTrait>::type
    165       kProgramEntry;
    166   static base::LazyDynamicInstance<CodeEntry, IdleEntryCreateTrait>::type
    167       kIdleEntry;
    168   static base::LazyDynamicInstance<CodeEntry, GCEntryCreateTrait>::type
    169       kGCEntry;
    170   static base::LazyDynamicInstance<CodeEntry, UnresolvedEntryCreateTrait>::type
    171       kUnresolvedEntry;
    172 
    173   using TagField = BitField<Logger::LogEventsAndTags, 0, 8>;
    174   using BuiltinIdField = BitField<Builtins::Name, 8, 23>;
    175   using UsedField = BitField<bool, 31, 1>;
    176 
    177   uint32_t bit_field_;
    178   const char* name_;
    179   const char* resource_name_;
    180   int line_number_;
    181   int column_number_;
    182   int script_id_;
    183   int position_;
    184   std::unique_ptr<SourcePositionTable> line_info_;
    185   Address instruction_start_;
    186   std::unique_ptr<RareData> rare_data_;
    187 
    188   DISALLOW_COPY_AND_ASSIGN(CodeEntry);
    189 };
    190 
    191 struct CodeEntryAndLineNumber {
    192   CodeEntry* code_entry;
    193   int line_number;
    194 };
    195 
    196 typedef std::vector<CodeEntryAndLineNumber> ProfileStackTrace;
    197 
    198 class ProfileTree;
    199 
    200 class ProfileNode {
    201  public:
    202   inline ProfileNode(ProfileTree* tree, CodeEntry* entry, ProfileNode* parent,
    203                      int line_number = 0);
    204 
    205   ProfileNode* FindChild(
    206       CodeEntry* entry,
    207       int line_number = v8::CpuProfileNode::kNoLineNumberInfo);
    208   ProfileNode* FindOrAddChild(CodeEntry* entry, int line_number = 0);
    209   void IncrementSelfTicks() { ++self_ticks_; }
    210   void IncreaseSelfTicks(unsigned amount) { self_ticks_ += amount; }
    211   void IncrementLineTicks(int src_line);
    212 
    213   CodeEntry* entry() const { return entry_; }
    214   unsigned self_ticks() const { return self_ticks_; }
    215   const std::vector<ProfileNode*>* children() const { return &children_list_; }
    216   unsigned id() const { return id_; }
    217   unsigned function_id() const;
    218   ProfileNode* parent() const { return parent_; }
    219   int line_number() const {
    220     return line_number_ != 0 ? line_number_ : entry_->line_number();
    221   }
    222 
    223   unsigned int GetHitLineCount() const {
    224     return static_cast<unsigned int>(line_ticks_.size());
    225   }
    226   bool GetLineTicks(v8::CpuProfileNode::LineTick* entries,
    227                     unsigned int length) const;
    228   void CollectDeoptInfo(CodeEntry* entry);
    229   const std::vector<CpuProfileDeoptInfo>& deopt_infos() const {
    230     return deopt_infos_;
    231   }
    232   Isolate* isolate() const;
    233 
    234   void Print(int indent);
    235 
    236  private:
    237   struct Equals {
    238     bool operator()(CodeEntryAndLineNumber lhs,
    239                     CodeEntryAndLineNumber rhs) const {
    240       return lhs.code_entry->IsSameFunctionAs(rhs.code_entry) &&
    241              lhs.line_number == rhs.line_number;
    242     }
    243   };
    244   struct Hasher {
    245     std::size_t operator()(CodeEntryAndLineNumber pair) const {
    246       return pair.code_entry->GetHash() ^ ComputeIntegerHash(pair.line_number);
    247     }
    248   };
    249 
    250   ProfileTree* tree_;
    251   CodeEntry* entry_;
    252   unsigned self_ticks_;
    253   std::unordered_map<CodeEntryAndLineNumber, ProfileNode*, Hasher, Equals>
    254       children_;
    255   int line_number_;
    256   std::vector<ProfileNode*> children_list_;
    257   ProfileNode* parent_;
    258   unsigned id_;
    259   // maps line number --> number of ticks
    260   std::unordered_map<int, int> line_ticks_;
    261 
    262   std::vector<CpuProfileDeoptInfo> deopt_infos_;
    263 
    264   DISALLOW_COPY_AND_ASSIGN(ProfileNode);
    265 };
    266 
    267 class ProfileTree {
    268  public:
    269   explicit ProfileTree(Isolate* isolate);
    270   ~ProfileTree();
    271 
    272   typedef v8::CpuProfilingMode ProfilingMode;
    273 
    274   ProfileNode* AddPathFromEnd(
    275       const std::vector<CodeEntry*>& path,
    276       int src_line = v8::CpuProfileNode::kNoLineNumberInfo,
    277       bool update_stats = true);
    278   ProfileNode* AddPathFromEnd(
    279       const ProfileStackTrace& path,
    280       int src_line = v8::CpuProfileNode::kNoLineNumberInfo,
    281       bool update_stats = true,
    282       ProfilingMode mode = ProfilingMode::kLeafNodeLineNumbers);
    283   ProfileNode* root() const { return root_; }
    284   unsigned next_node_id() { return next_node_id_++; }
    285   unsigned GetFunctionId(const ProfileNode* node);
    286 
    287   void Print() {
    288     root_->Print(0);
    289   }
    290 
    291   Isolate* isolate() const { return isolate_; }
    292 
    293   void EnqueueNode(const ProfileNode* node) { pending_nodes_.push_back(node); }
    294   size_t pending_nodes_count() const { return pending_nodes_.size(); }
    295   std::vector<const ProfileNode*> TakePendingNodes() {
    296     return std::move(pending_nodes_);
    297   }
    298 
    299  private:
    300   template <typename Callback>
    301   void TraverseDepthFirst(Callback* callback);
    302 
    303   std::vector<const ProfileNode*> pending_nodes_;
    304 
    305   CodeEntry root_entry_;
    306   unsigned next_node_id_;
    307   ProfileNode* root_;
    308   Isolate* isolate_;
    309 
    310   unsigned next_function_id_;
    311   std::unordered_map<CodeEntry*, unsigned> function_ids_;
    312 
    313   DISALLOW_COPY_AND_ASSIGN(ProfileTree);
    314 };
    315 
    316 
    317 class CpuProfile {
    318  public:
    319   typedef v8::CpuProfilingMode ProfilingMode;
    320 
    321   CpuProfile(CpuProfiler* profiler, const char* title, bool record_samples,
    322              ProfilingMode mode);
    323 
    324   // Add pc -> ... -> main() call path to the profile.
    325   void AddPath(base::TimeTicks timestamp, const ProfileStackTrace& path,
    326                int src_line, bool update_stats);
    327   void FinishProfile();
    328 
    329   const char* title() const { return title_; }
    330   const ProfileTree* top_down() const { return &top_down_; }
    331 
    332   int samples_count() const { return static_cast<int>(samples_.size()); }
    333   ProfileNode* sample(int index) const { return samples_.at(index); }
    334   base::TimeTicks sample_timestamp(int index) const {
    335     return timestamps_.at(index);
    336   }
    337 
    338   base::TimeTicks start_time() const { return start_time_; }
    339   base::TimeTicks end_time() const { return end_time_; }
    340   CpuProfiler* cpu_profiler() const { return profiler_; }
    341 
    342   void UpdateTicksScale();
    343 
    344   void Print();
    345 
    346  private:
    347   void StreamPendingTraceEvents();
    348 
    349   const char* title_;
    350   bool record_samples_;
    351   ProfilingMode mode_;
    352   base::TimeTicks start_time_;
    353   base::TimeTicks end_time_;
    354   std::vector<ProfileNode*> samples_;
    355   std::vector<base::TimeTicks> timestamps_;
    356   ProfileTree top_down_;
    357   CpuProfiler* const profiler_;
    358   size_t streaming_next_sample_;
    359   uint32_t id_;
    360 
    361   static std::atomic<uint32_t> last_id_;
    362 
    363   DISALLOW_COPY_AND_ASSIGN(CpuProfile);
    364 };
    365 
    366 class CodeMap {
    367  public:
    368   CodeMap();
    369   ~CodeMap();
    370 
    371   void AddCode(Address addr, CodeEntry* entry, unsigned size);
    372   void MoveCode(Address from, Address to);
    373   CodeEntry* FindEntry(Address addr);
    374   void Print();
    375 
    376  private:
    377   struct CodeEntryMapInfo {
    378     unsigned index;
    379     unsigned size;
    380   };
    381 
    382   union CodeEntrySlotInfo {
    383     CodeEntry* entry;
    384     unsigned next_free_slot;
    385   };
    386 
    387   static constexpr unsigned kNoFreeSlot = std::numeric_limits<unsigned>::max();
    388 
    389   void ClearCodesInRange(Address start, Address end);
    390   unsigned AddCodeEntry(Address start, CodeEntry*);
    391   void DeleteCodeEntry(unsigned index);
    392 
    393   CodeEntry* entry(unsigned index) { return code_entries_[index].entry; }
    394 
    395   std::deque<CodeEntrySlotInfo> code_entries_;
    396   std::map<Address, CodeEntryMapInfo> code_map_;
    397   unsigned free_list_head_ = kNoFreeSlot;
    398 
    399   DISALLOW_COPY_AND_ASSIGN(CodeMap);
    400 };
    401 
    402 class CpuProfilesCollection {
    403  public:
    404   explicit CpuProfilesCollection(Isolate* isolate);
    405 
    406   typedef v8::CpuProfilingMode ProfilingMode;
    407 
    408   void set_cpu_profiler(CpuProfiler* profiler) { profiler_ = profiler; }
    409   bool StartProfiling(const char* title, bool record_samples,
    410                       ProfilingMode mode = ProfilingMode::kLeafNodeLineNumbers);
    411   CpuProfile* StopProfiling(const char* title);
    412   std::vector<std::unique_ptr<CpuProfile>>* profiles() {
    413     return &finished_profiles_;
    414   }
    415   const char* GetName(Name* name) { return resource_names_.GetName(name); }
    416   bool IsLastProfile(const char* title);
    417   void RemoveProfile(CpuProfile* profile);
    418 
    419   // Called from profile generator thread.
    420   void AddPathToCurrentProfiles(base::TimeTicks timestamp,
    421                                 const ProfileStackTrace& path, int src_line,
    422                                 bool update_stats);
    423 
    424   // Limits the number of profiles that can be simultaneously collected.
    425   static const int kMaxSimultaneousProfiles = 100;
    426 
    427  private:
    428   StringsStorage resource_names_;
    429   std::vector<std::unique_ptr<CpuProfile>> finished_profiles_;
    430   CpuProfiler* profiler_;
    431 
    432   // Accessed by VM thread and profile generator thread.
    433   std::vector<std::unique_ptr<CpuProfile>> current_profiles_;
    434   base::Semaphore current_profiles_semaphore_;
    435 
    436   DISALLOW_COPY_AND_ASSIGN(CpuProfilesCollection);
    437 };
    438 
    439 class ProfileGenerator {
    440  public:
    441   explicit ProfileGenerator(CpuProfilesCollection* profiles);
    442 
    443   void RecordTickSample(const TickSample& sample);
    444 
    445   CodeMap* code_map() { return &code_map_; }
    446 
    447  private:
    448   CodeEntry* FindEntry(Address address);
    449   CodeEntry* EntryForVMState(StateTag tag);
    450 
    451   CpuProfilesCollection* profiles_;
    452   CodeMap code_map_;
    453 
    454   DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
    455 };
    456 
    457 }  // namespace internal
    458 }  // namespace v8
    459 
    460 #endif  // V8_PROFILER_PROFILE_GENERATOR_H_
    461