Home | History | Annotate | Download | only in util
      1 #ifndef __PERF_EVSEL_H
      2 #define __PERF_EVSEL_H 1
      3 
      4 /* ANDROID_CHANGE_BEGIN */
      5 #if 0
      6 #include <linux/list.h>
      7 #include <stdbool.h>
      8 #include "../../../include/linux/perf_event.h"
      9 #else
     10 #include "include/linux/list.h"
     11 #include <stdbool.h>
     12 #include "include/linux/added/perf_event.h"
     13 #endif
     14 /* ANDROID_CHANGE_END */
     15 #include "types.h"
     16 #include "xyarray.h"
     17 #include "cgroup.h"
     18 #include "hist.h"
     19 
     20 struct perf_counts_values {
     21 	union {
     22 		struct {
     23 			u64 val;
     24 			u64 ena;
     25 			u64 run;
     26 		};
     27 		u64 values[3];
     28 	};
     29 };
     30 
     31 struct perf_counts {
     32 	s8		   	  scaled;
     33 	struct perf_counts_values aggr;
     34 	struct perf_counts_values cpu[];
     35 };
     36 
     37 struct perf_evsel;
     38 
     39 /*
     40  * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
     41  * more than one entry in the evlist.
     42  */
     43 struct perf_sample_id {
     44 	struct hlist_node 	node;
     45 	u64		 	id;
     46 	struct perf_evsel	*evsel;
     47 };
     48 
     49 /** struct perf_evsel - event selector
     50  *
     51  * @name - Can be set to retain the original event name passed by the user,
     52  *         so that when showing results in tools such as 'perf stat', we
     53  *         show the name used, not some alias.
     54  */
     55 struct perf_evsel {
     56 	struct list_head	node;
     57 	struct perf_event_attr	attr;
     58 	char			*filter;
     59 	struct xyarray		*fd;
     60 	struct xyarray		*sample_id;
     61 	u64			*id;
     62 	struct perf_counts	*counts;
     63 	int			idx;
     64 	int			ids;
     65 	struct hists		hists;
     66 	char			*name;
     67 	union {
     68 		void		*priv;
     69 		off_t		id_offset;
     70 	};
     71 	struct cgroup_sel	*cgrp;
     72 };
     73 
     74 struct cpu_map;
     75 struct thread_map;
     76 struct perf_evlist;
     77 
     78 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
     79 void perf_evsel__init(struct perf_evsel *evsel,
     80 		      struct perf_event_attr *attr, int idx);
     81 void perf_evsel__exit(struct perf_evsel *evsel);
     82 void perf_evsel__delete(struct perf_evsel *evsel);
     83 
     84 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
     85 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
     86 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
     87 void perf_evsel__free_fd(struct perf_evsel *evsel);
     88 void perf_evsel__free_id(struct perf_evsel *evsel);
     89 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
     90 
     91 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
     92 			     struct cpu_map *cpus, bool group);
     93 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
     94 				struct thread_map *threads, bool group);
     95 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
     96 		     struct thread_map *threads, bool group);
     97 
     98 #define perf_evsel__match(evsel, t, c)		\
     99 	(evsel->attr.type == PERF_TYPE_##t &&	\
    100 	 evsel->attr.config == PERF_COUNT_##c)
    101 
    102 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
    103 			      int cpu, int thread, bool scale);
    104 
    105 /**
    106  * perf_evsel__read_on_cpu - Read out the results on a CPU and thread
    107  *
    108  * @evsel - event selector to read value
    109  * @cpu - CPU of interest
    110  * @thread - thread of interest
    111  */
    112 static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel,
    113 					  int cpu, int thread)
    114 {
    115 	return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
    116 }
    117 
    118 /**
    119  * perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
    120  *
    121  * @evsel - event selector to read value
    122  * @cpu - CPU of interest
    123  * @thread - thread of interest
    124  */
    125 static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
    126 						 int cpu, int thread)
    127 {
    128 	return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
    129 }
    130 
    131 int __perf_evsel__read(struct perf_evsel *evsel, int ncpus, int nthreads,
    132 		       bool scale);
    133 
    134 /**
    135  * perf_evsel__read - Read the aggregate results on all CPUs
    136  *
    137  * @evsel - event selector to read value
    138  * @ncpus - Number of cpus affected, from zero
    139  * @nthreads - Number of threads affected, from zero
    140  */
    141 static inline int perf_evsel__read(struct perf_evsel *evsel,
    142 				    int ncpus, int nthreads)
    143 {
    144 	return __perf_evsel__read(evsel, ncpus, nthreads, false);
    145 }
    146 
    147 /**
    148  * perf_evsel__read_scaled - Read the aggregate results on all CPUs, scaled
    149  *
    150  * @evsel - event selector to read value
    151  * @ncpus - Number of cpus affected, from zero
    152  * @nthreads - Number of threads affected, from zero
    153  */
    154 static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
    155 					  int ncpus, int nthreads)
    156 {
    157 	return __perf_evsel__read(evsel, ncpus, nthreads, true);
    158 }
    159 
    160 int __perf_evsel__sample_size(u64 sample_type);
    161 
    162 static inline int perf_evsel__sample_size(struct perf_evsel *evsel)
    163 {
    164 	return __perf_evsel__sample_size(evsel->attr.sample_type);
    165 }
    166 
    167 #endif /* __PERF_EVSEL_H */
    168