Home | History | Annotate | Download | only in util

Lines Matching refs:evlist

15 #include "evlist.h"
30 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
36 INIT_HLIST_HEAD(&evlist->heads[i]);
37 INIT_LIST_HEAD(&evlist->entries);
38 perf_evlist__set_maps(evlist, cpus, threads);
39 evlist->workload.pid = -1;
44 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
46 if (evlist != NULL)
47 perf_evlist__init(evlist, NULL, NULL);
49 return evlist;
54 * @evlist: selected event list
57 * and is_pos. For convenience, put a copy on evlist.
59 void perf_evlist__set_id_pos(struct perf_evlist *evlist)
61 struct perf_evsel *first = perf_evlist__first(evlist);
63 evlist->id_pos = first->id_pos;
64 evlist->is_pos = first->is_pos;
67 static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
71 list_for_each_entry(evsel, &evlist->entries, node)
74 perf_evlist__set_id_pos(evlist);
77 static void perf_evlist__purge(struct perf_evlist *evlist)
81 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
86 evlist->nr_entries = 0;
89 void perf_evlist__exit(struct perf_evlist *evlist)
91 free(evlist->mmap);
92 free(evlist->pollfd);
93 evlist->mmap = NULL;
94 evlist->pollfd = NULL;
97 void perf_evlist__delete(struct perf_evlist *evlist)
99 perf_evlist__purge(evlist);
100 perf_evlist__exit(evlist);
101 free(evlist);
104 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
106 list_add_tail(&entry->node, &evlist->entries);
107 if (!evlist->nr_entries++)
108 perf_evlist__set_id_pos(evlist);
111 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
115 bool set_id_pos = !evlist->nr_entries;
117 list_splice_tail(list, &evlist->entries);
118 evlist->nr_entries += nr_entries;
120 perf_evlist__set_id_pos(evlist);
137 void perf_evlist__set_leader(struct perf_evlist *evlist)
139 if (evlist->nr_entries) {
140 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
141 __perf_evlist__set_leader(&evlist->entries);
145 int perf_evlist__add_default(struct perf_evlist *evlist)
164 perf_evlist__add(evlist, evsel);
172 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
180 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
186 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
196 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
204 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
208 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
212 list_for_each_entry(evsel, &evlist->entries, node) {
222 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
227 list_for_each_entry(evsel, &evlist->entries, node) {
236 int perf_evlist__add_newtp(struct perf_evlist *evlist,
241 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
246 perf_evlist__add(evlist, evsel);
250 void perf_evlist__disable(struct perf_evlist *evlist)
254 int nr_cpus = cpu_map__nr(evlist->cpus);
255 int nr_threads = thread_map__nr(evlist->threads);
258 list_for_each_entry(pos, &evlist->entries, node) {
268 void perf_evlist__enable(struct perf_evlist *evlist)
272 int nr_cpus = cpu_map__nr(evlist->cpus);
273 int nr_threads = thread_map__nr(evlist->threads);
276 list_for_each_entry(pos, &evlist->entries, node) {
286 int perf_evlist__disable_event(struct perf_evlist *evlist,
294 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
295 for (thread = 0; thread < evlist->threads->nr; thread++) {
305 int perf_evlist__enable_event(struct perf_evlist *evlist,
313 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
314 for (thread = 0; thread < evlist->threads->nr; thread++) {
324 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
326 int nr_cpus = cpu_map__nr(evlist->cpus);
327 int nr_threads = thread_map__nr(evlist->threads);
328 int nfds = nr_cpus * nr_threads * evlist->nr_entries;
329 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
330 return evlist->pollfd != NULL ? 0 : -ENOMEM;
333 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
336 evlist->pollfd[evlist->nr_fds].fd = fd;
337 evlist->pollfd[evlist->nr_fds].events = POLLIN;
338 evlist->nr_fds++;
341 static void perf_evlist__id_hash(struct perf_evlist *evlist,
351 hlist_add_head(&sid->node, &evlist->heads[hash]);
354 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
357 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
361 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
383 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
398 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
402 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
409 head = &evlist->heads[hash];
418 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
422 if (evlist->nr_entries == 1)
423 return perf_evlist__first(evlist);
425 sid = perf_evlist__id2sid(evlist, id);
429 if (!perf_evlist__sample_id_all(evlist))
430 return perf_evlist__first(evlist);
435 static int perf_evlist__event2id(struct perf_evlist *evlist,
444 if (evlist->id_pos >= n)
446 *id = array[evlist->id_pos];
448 if (evlist->is_pos > n)
450 n -= evlist->is_pos;
456 static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
459 struct perf_evsel *first = perf_evlist__first(evlist);
465 if (evlist->nr_entries == 1)
472 if (perf_evlist__event2id(evlist, event, &id))
480 head = &evlist->heads[hash];
489 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
491 struct perf_mmap *md = &evlist->mmap[idx];
497 if (evlist->overwrite) {
551 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
553 if (!evlist->overwrite) {
554 struct perf_mmap *md = &evlist->mmap[idx];
561 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
563 if (evlist->mmap[idx].base != NULL) {
564 munmap(evlist->mmap[idx].base, evlist->mmap_len);
565 evlist->mmap[idx].base = NULL;
569 void perf_evlist__munmap(struct perf_evlist *evlist)
573 for (i = 0; i < evlist->nr_mmaps; i++)
574 __perf_evlist__munmap(evlist, i);
576 free(evlist->mmap);
577 evlist->mmap = NULL;
580 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
582 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
583 if (cpu_map__empty(evlist->cpus))
584 evlist->nr_mmaps = thread_map__nr(evlist->threads);
585 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
586 return evlist->mmap != NULL ? 0 : -ENOMEM;
589 static int __perf_evlist__mmap(struct perf_evlist *evlist,
592 evlist->mmap[idx].prev = 0;
593 evlist->mmap[idx].mask = mask;
594 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
596 if (evlist->mmap[idx].base == MAP_FAILED) {
597 evlist->mmap[idx].base = NULL;
601 perf_evlist__add_pollfd(evlist, fd);
605 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
609 int nr_cpus = cpu_map__nr(evlist->cpus);
610 int nr_threads = thread_map__nr(evlist->threads);
617 list_for_each_entry(evsel, &evlist->entries, node) {
622 if (__perf_evlist__mmap(evlist, cpu,
631 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
641 __perf_evlist__munmap(evlist, cpu);
645 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
649 int nr_threads = thread_map__nr(evlist->threads);
655 list_for_each_entry(evsel, &evlist->entries, node) {
660 if (__perf_evlist__mmap(evlist, thread,
669 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
678 __perf_evlist__munmap(evlist, thread);
684 * @evlist - list of events
690 * struct perf_mmap *m = &evlist->mmap[cpu];
697 evlist, unsigned int pages,
701 const struct cpu_map *cpus = evlist->cpus;
702 const struct thread_map *threads = evlist->threads;
713 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
716 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
719 evlist->overwrite = overwrite;
720 evlist->mmap_len = (pages + 1) * page_size;
722 list_for_each_entry(evsel, &evlist->entries, node) {
730 return perf_evlist__mmap_per_thread(evlist, prot, mask);
732 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
735 int perf_evlist__create_maps(struct perf_evlist *evlist,
738 evlist->threads = thread_map__new_str(target->pid, target->tid,
741 if (evlist->threads == NULL)
745 evlist->cpus = cpu_map__dummy_new();
747 evlist->cpus = cpu_map__dummy_new();
749 evlist->cpus = cpu_map__new(target->cpu_list);
751 if (evlist->cpus == NULL)
757 thread_map__delete(evlist->threads);
761 void perf_evlist__delete_maps(struct perf_evlist *evlist)
763 cpu_map__delete(evlist->cpus);
764 thread_map__delete(evlist->threads);
765 evlist->cpus = NULL;
766 evlist->threads = NULL;
769 int perf_evlist__apply_filters(struct perf_evlist *evlist)
773 const int ncpus = cpu_map__nr(evlist->cpus),
774 nthreads = thread_map__nr(evlist->threads);
776 list_for_each_entry(evsel, &evlist->entries, node) {
788 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
792 const int ncpus = cpu_map__nr(evlist->cpus),
793 nthreads = thread_map__nr(evlist->threads);
795 list_for_each_entry(evsel, &evlist->entries, node) {
804 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
808 if (evlist->nr_entries == 1)
811 if (evlist->id_pos < 0 || evlist->is_pos < 0)
814 list_for_each_entry(pos, &evlist->entries, node) {
815 if (pos->id_pos != evlist->id_pos ||
816 pos->is_pos != evlist->is_pos)
823 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
827 if (evlist->combined_sample_type)
828 return evlist->combined_sample_type;
830 list_for_each_entry(evsel, &evlist->entries, node)
831 evlist->combined_sample_type |= evsel->attr.sample_type;
833 return evlist->combined_sample_type;
836 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
838 evlist->combined_sample_type = 0;
839 return __perf_evlist__combined_sample_type(evlist);
842 bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
844 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
848 list_for_each_entry_continue(pos, &evlist->entries, node) {
862 u64 perf_evlist__read_format(struct perf_evlist *evlist)
864 struct perf_evsel *first = perf_evlist__first(evlist);
868 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
870 struct perf_evsel *first = perf_evlist__first(evlist);
901 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
903 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
905 list_for_each_entry_continue(pos, &evlist->entries, node) {
913 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
915 struct perf_evsel *first = perf_evlist__first(evlist);
919 void perf_evlist__set_selected(struct perf_evlist *evlist,
922 evlist->selected = evsel;
925 void perf_evlist__close(struct perf_evlist *evlist)
928 int ncpus = cpu_map__nr(evlist->cpus);
929 int nthreads = thread_map__nr(evlist->threads);
931 list_for_each_entry_reverse(evsel, &evlist->entries, node)
935 int perf_evlist__open(struct perf_evlist *evlist)
940 perf_evlist__update_id_pos(evlist);
942 list_for_each_entry(evsel, &evlist->entries, node) {
943 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
950 perf_evlist__close(evlist);
955 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
973 evlist->workload.pid = fork();
974 if (evlist->workload.pid < 0) {
979 if (!evlist->workload.pid) {
1009 evlist->threads->map[0] = evlist->workload.pid;
1022 evlist->workload.cork_fd = go_pipe[1];
1035 int perf_evlist__start_workload(struct perf_evlist *evlist)
1037 if (evlist->workload.cork_fd > 0) {
1043 ret = write(evlist->workload.cork_fd, &bf, 1);
1047 close(evlist->workload.cork_fd);
1054 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1057 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1064 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1069 list_for_each_entry(evsel, &evlist->entries, node) {