Home | History | Annotate | Download | only in util

Lines Matching refs:evsel

10 #include "evsel.h"
34 void perf_evsel__init(struct perf_evsel *evsel,
37 evsel->idx = idx;
38 evsel->attr = *attr;
39 INIT_LIST_HEAD(&evsel->node);
44 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
46 if (evsel != NULL)
47 perf_evsel__init(evsel, attr, idx);
49 return evsel;
52 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
55 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
57 if (evsel->fd) {
60 FD(evsel, cpu, thread) = -1;
65 return evsel->fd != NULL ? 0 : -ENOMEM;
68 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
70 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
71 if (evsel->sample_id == NULL)
74 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
75 if (evsel->id == NULL) {
76 xyarray__delete(evsel->sample_id);
77 evsel->sample_id = NULL;
84 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
86 evsel->counts = zalloc((sizeof(*evsel->counts) +
88 return evsel->counts != NULL ? 0 : -ENOMEM;
91 void perf_evsel__free_fd(struct perf_evsel *evsel)
93 xyarray__delete(evsel->fd);
94 evsel->fd = NULL;
97 void perf_evsel__free_id(struct perf_evsel *evsel)
99 xyarray__delete(evsel->sample_id);
100 evsel->sample_id = NULL;
101 free(evsel->id);
102 evsel->id = NULL;
105 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
111 close(FD(evsel, cpu, thread));
112 FD(evsel, cpu, thread) = -1;
116 void perf_evsel__exit(struct perf_evsel *evsel)
118 assert(list_empty(&evsel->node));
119 xyarray__delete(evsel->fd);
120 xyarray__delete(evsel->sample_id);
121 free(evsel->id);
124 void perf_evsel__delete(struct perf_evsel *evsel)
126 perf_evsel__exit(evsel);
127 close_cgroup(evsel->cgrp);
128 free(evsel->name);
129 free(evsel);
132 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
138 if (FD(evsel, cpu, thread) < 0)
141 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
144 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
155 evsel->counts->cpu[cpu] = count;
159 int __perf_evsel__read(struct perf_evsel *evsel,
164 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
170 if (FD(evsel, cpu, thread) < 0)
173 if (readn(FD(evsel, cpu, thread),
185 evsel->counts->scaled = 0;
188 evsel->counts->scaled = -1;
194 evsel->counts->scaled = 1;
203 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
212 if (evsel->fd == NULL &&
213 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
216 if (evsel->cgrp) {
218 pid = evsel->cgrp->fd;
226 if (!evsel->cgrp)
229 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
233 if (FD(evsel, cpu, thread) < 0)
237 group_fd = FD(evsel, cpu, thread);
246 close(FD(evsel, cpu, thread));
247 FD(evsel, cpu, thread) = -1;
274 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
285 return __perf_evsel__open(evsel, cpus, threads, group);
288 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
291 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group);
294 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
297 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group);