Home | History | Annotate | Download | only in util
      1 /*
      2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme (at) redhat.com>
      3  *
      4  * Parts came from builtin-{top,stat,record}.c, see those files for further
      5  * copyright notes.
      6  *
      7  * Released under the GPL v2. (and only v2, not any later version)
      8  */
      9 
     10 #include <byteswap.h>
     11 #include <linux/bitops.h>
     12 #include <lk/debugfs.h>
     13 #include <traceevent/event-parse.h>
     14 #include <linux/hw_breakpoint.h>
     15 #include <linux/perf_event.h>
     16 #include <sys/resource.h>
     17 #include "asm/bug.h"
     18 #include "evsel.h"
     19 #include "evlist.h"
     20 #include "util.h"
     21 #include "cpumap.h"
     22 #include "thread_map.h"
     23 #include "target.h"
     24 #include "perf_regs.h"
     25 #include "debug.h"
     26 
     27 static struct {
     28 	bool sample_id_all;
     29 	bool exclude_guest;
     30 	bool mmap2;
     31 } perf_missing_features;
     32 
     33 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
     34 
     35 int __perf_evsel__sample_size(u64 sample_type)
     36 {
     37 	u64 mask = sample_type & PERF_SAMPLE_MASK;
     38 	int size = 0;
     39 	int i;
     40 
     41 	for (i = 0; i < 64; i++) {
     42 		if (mask & (1ULL << i))
     43 			size++;
     44 	}
     45 
     46 	size *= sizeof(u64);
     47 
     48 	return size;
     49 }
     50 
     51 /**
     52  * __perf_evsel__calc_id_pos - calculate id_pos.
     53  * @sample_type: sample type
     54  *
     55  * This function returns the position of the event id (PERF_SAMPLE_ID or
     56  * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
     57  * sample_event.
     58  */
     59 static int __perf_evsel__calc_id_pos(u64 sample_type)
     60 {
     61 	int idx = 0;
     62 
     63 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
     64 		return 0;
     65 
     66 	if (!(sample_type & PERF_SAMPLE_ID))
     67 		return -1;
     68 
     69 	if (sample_type & PERF_SAMPLE_IP)
     70 		idx += 1;
     71 
     72 	if (sample_type & PERF_SAMPLE_TID)
     73 		idx += 1;
     74 
     75 	if (sample_type & PERF_SAMPLE_TIME)
     76 		idx += 1;
     77 
     78 	if (sample_type & PERF_SAMPLE_ADDR)
     79 		idx += 1;
     80 
     81 	return idx;
     82 }
     83 
     84 /**
     85  * __perf_evsel__calc_is_pos - calculate is_pos.
     86  * @sample_type: sample type
     87  *
     88  * This function returns the position (counting backwards) of the event id
     89  * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
     90  * sample_id_all is used there is an id sample appended to non-sample events.
     91  */
     92 static int __perf_evsel__calc_is_pos(u64 sample_type)
     93 {
     94 	int idx = 1;
     95 
     96 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
     97 		return 1;
     98 
     99 	if (!(sample_type & PERF_SAMPLE_ID))
    100 		return -1;
    101 
    102 	if (sample_type & PERF_SAMPLE_CPU)
    103 		idx += 1;
    104 
    105 	if (sample_type & PERF_SAMPLE_STREAM_ID)
    106 		idx += 1;
    107 
    108 	return idx;
    109 }
    110 
    111 void perf_evsel__calc_id_pos(struct perf_evsel *evsel)
    112 {
    113 	evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type);
    114 	evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type);
    115 }
    116 
    117 void hists__init(struct hists *hists)
    118 {
    119 	memset(hists, 0, sizeof(*hists));
    120 	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
    121 	hists->entries_in = &hists->entries_in_array[0];
    122 	hists->entries_collapsed = RB_ROOT;
    123 	hists->entries = RB_ROOT;
    124 	pthread_mutex_init(&hists->lock, NULL);
    125 }
    126 
    127 void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
    128 				  enum perf_event_sample_format bit)
    129 {
    130 	if (!(evsel->attr.sample_type & bit)) {
    131 		evsel->attr.sample_type |= bit;
    132 		evsel->sample_size += sizeof(u64);
    133 		perf_evsel__calc_id_pos(evsel);
    134 	}
    135 }
    136 
    137 void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
    138 				    enum perf_event_sample_format bit)
    139 {
    140 	if (evsel->attr.sample_type & bit) {
    141 		evsel->attr.sample_type &= ~bit;
    142 		evsel->sample_size -= sizeof(u64);
    143 		perf_evsel__calc_id_pos(evsel);
    144 	}
    145 }
    146 
    147 void perf_evsel__set_sample_id(struct perf_evsel *evsel,
    148 			       bool can_sample_identifier)
    149 {
    150 	if (can_sample_identifier) {
    151 		perf_evsel__reset_sample_bit(evsel, ID);
    152 		perf_evsel__set_sample_bit(evsel, IDENTIFIER);
    153 	} else {
    154 		perf_evsel__set_sample_bit(evsel, ID);
    155 	}
    156 	evsel->attr.read_format |= PERF_FORMAT_ID;
    157 }
    158 
    159 void perf_evsel__init(struct perf_evsel *evsel,
    160 		      struct perf_event_attr *attr, int idx)
    161 {
    162 	evsel->idx	   = idx;
    163 	evsel->attr	   = *attr;
    164 	evsel->leader	   = evsel;
    165 	INIT_LIST_HEAD(&evsel->node);
    166 	hists__init(&evsel->hists);
    167 	evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
    168 	perf_evsel__calc_id_pos(evsel);
    169 }
    170 
    171 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
    172 {
    173 	struct perf_evsel *evsel = zalloc(sizeof(*evsel));
    174 
    175 	if (evsel != NULL)
    176 		perf_evsel__init(evsel, attr, idx);
    177 
    178 	return evsel;
    179 }
    180 
    181 struct event_format *event_format__new(const char *sys, const char *name)
    182 {
    183 	int fd, n;
    184 	char *filename;
    185 	void *bf = NULL, *nbf;
    186 	size_t size = 0, alloc_size = 0;
    187 	struct event_format *format = NULL;
    188 
    189 	if (asprintf(&filename, "%s/%s/%s/format", tracing_events_path, sys, name) < 0)
    190 		goto out;
    191 
    192 	fd = open(filename, O_RDONLY);
    193 	if (fd < 0)
    194 		goto out_free_filename;
    195 
    196 	do {
    197 		if (size == alloc_size) {
    198 			alloc_size += BUFSIZ;
    199 			nbf = realloc(bf, alloc_size);
    200 			if (nbf == NULL)
    201 				goto out_free_bf;
    202 			bf = nbf;
    203 		}
    204 
    205 		n = read(fd, bf + size, alloc_size - size);
    206 		if (n < 0)
    207 			goto out_free_bf;
    208 		size += n;
    209 	} while (n > 0);
    210 
    211 	pevent_parse_format(&format, bf, size, sys);
    212 
    213 out_free_bf:
    214 	free(bf);
    215 	close(fd);
    216 out_free_filename:
    217 	free(filename);
    218 out:
    219 	return format;
    220 }
    221 
    222 struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx)
    223 {
    224 	struct perf_evsel *evsel = zalloc(sizeof(*evsel));
    225 
    226 	if (evsel != NULL) {
    227 		struct perf_event_attr attr = {
    228 			.type	       = PERF_TYPE_TRACEPOINT,
    229 			.sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
    230 					  PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
    231 		};
    232 
    233 		if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
    234 			goto out_free;
    235 
    236 		evsel->tp_format = event_format__new(sys, name);
    237 		if (evsel->tp_format == NULL)
    238 			goto out_free;
    239 
    240 		event_attr_init(&attr);
    241 		attr.config = evsel->tp_format->id;
    242 		attr.sample_period = 1;
    243 		perf_evsel__init(evsel, &attr, idx);
    244 	}
    245 
    246 	return evsel;
    247 
    248 out_free:
    249 	free(evsel->name);
    250 	free(evsel);
    251 	return NULL;
    252 }
    253 
    254 const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
    255 	"cycles",
    256 	"instructions",
    257 	"cache-references",
    258 	"cache-misses",
    259 	"branches",
    260 	"branch-misses",
    261 	"bus-cycles",
    262 	"stalled-cycles-frontend",
    263 	"stalled-cycles-backend",
    264 	"ref-cycles",
    265 };
    266 
    267 static const char *__perf_evsel__hw_name(u64 config)
    268 {
    269 	if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
    270 		return perf_evsel__hw_names[config];
    271 
    272 	return "unknown-hardware";
    273 }
    274 
    275 static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
    276 {
    277 	int colon = 0, r = 0;
    278 	struct perf_event_attr *attr = &evsel->attr;
    279 	bool exclude_guest_default = false;
    280 
    281 #define MOD_PRINT(context, mod)	do {					\
    282 		if (!attr->exclude_##context) {				\
    283 			if (!colon) colon = ++r;			\
    284 			r += scnprintf(bf + r, size - r, "%c", mod);	\
    285 		} } while(0)
    286 
    287 	if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
    288 		MOD_PRINT(kernel, 'k');
    289 		MOD_PRINT(user, 'u');
    290 		MOD_PRINT(hv, 'h');
    291 		exclude_guest_default = true;
    292 	}
    293 
    294 	if (attr->precise_ip) {
    295 		if (!colon)
    296 			colon = ++r;
    297 		r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
    298 		exclude_guest_default = true;
    299 	}
    300 
    301 	if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
    302 		MOD_PRINT(host, 'H');
    303 		MOD_PRINT(guest, 'G');
    304 	}
    305 #undef MOD_PRINT
    306 	if (colon)
    307 		bf[colon - 1] = ':';
    308 	return r;
    309 }
    310 
    311 static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
    312 {
    313 	int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
    314 	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
    315 }
    316 
    317 const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
    318 	"cpu-clock",
    319 	"task-clock",
    320 	"page-faults",
    321 	"context-switches",
    322 	"cpu-migrations",
    323 	"minor-faults",
    324 	"major-faults",
    325 	"alignment-faults",
    326 	"emulation-faults",
    327 	"dummy",
    328 };
    329 
    330 static const char *__perf_evsel__sw_name(u64 config)
    331 {
    332 	if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
    333 		return perf_evsel__sw_names[config];
    334 	return "unknown-software";
    335 }
    336 
    337 static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
    338 {
    339 	int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
    340 	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
    341 }
    342 
    343 static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
    344 {
    345 	int r;
    346 
    347 	r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
    348 
    349 	if (type & HW_BREAKPOINT_R)
    350 		r += scnprintf(bf + r, size - r, "r");
    351 
    352 	if (type & HW_BREAKPOINT_W)
    353 		r += scnprintf(bf + r, size - r, "w");
    354 
    355 	if (type & HW_BREAKPOINT_X)
    356 		r += scnprintf(bf + r, size - r, "x");
    357 
    358 	return r;
    359 }
    360 
    361 static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
    362 {
    363 	struct perf_event_attr *attr = &evsel->attr;
    364 	int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
    365 	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
    366 }
    367 
    368 const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
    369 				[PERF_EVSEL__MAX_ALIASES] = {
    370  { "L1-dcache",	"l1-d",		"l1d",		"L1-data",		},
    371  { "L1-icache",	"l1-i",		"l1i",		"L1-instruction",	},
    372  { "LLC",	"L2",							},
    373  { "dTLB",	"d-tlb",	"Data-TLB",				},
    374  { "iTLB",	"i-tlb",	"Instruction-TLB",			},
    375  { "branch",	"branches",	"bpu",		"btb",		"bpc",	},
    376  { "node",								},
    377 };
    378 
    379 const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
    380 				   [PERF_EVSEL__MAX_ALIASES] = {
    381  { "load",	"loads",	"read",					},
    382  { "store",	"stores",	"write",				},
    383  { "prefetch",	"prefetches",	"speculative-read", "speculative-load",	},
    384 };
    385 
    386 const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
    387 				       [PERF_EVSEL__MAX_ALIASES] = {
    388  { "refs",	"Reference",	"ops",		"access",		},
    389  { "misses",	"miss",							},
    390 };
    391 
    392 #define C(x)		PERF_COUNT_HW_CACHE_##x
    393 #define CACHE_READ	(1 << C(OP_READ))
    394 #define CACHE_WRITE	(1 << C(OP_WRITE))
    395 #define CACHE_PREFETCH	(1 << C(OP_PREFETCH))
    396 #define COP(x)		(1 << x)
    397 
    398 /*
    399  * cache operartion stat
    400  * L1I : Read and prefetch only
    401  * ITLB and BPU : Read-only
    402  */
    403 static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
    404  [C(L1D)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
    405  [C(L1I)]	= (CACHE_READ | CACHE_PREFETCH),
    406  [C(LL)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
    407  [C(DTLB)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
    408  [C(ITLB)]	= (CACHE_READ),
    409  [C(BPU)]	= (CACHE_READ),
    410  [C(NODE)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
    411 };
    412 
    413 bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
    414 {
    415 	if (perf_evsel__hw_cache_stat[type] & COP(op))
    416 		return true;	/* valid */
    417 	else
    418 		return false;	/* invalid */
    419 }
    420 
    421 int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
    422 					    char *bf, size_t size)
    423 {
    424 	if (result) {
    425 		return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
    426 				 perf_evsel__hw_cache_op[op][0],
    427 				 perf_evsel__hw_cache_result[result][0]);
    428 	}
    429 
    430 	return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
    431 			 perf_evsel__hw_cache_op[op][1]);
    432 }
    433 
    434 static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
    435 {
    436 	u8 op, result, type = (config >>  0) & 0xff;
    437 	const char *err = "unknown-ext-hardware-cache-type";
    438 
    439 	if (type > PERF_COUNT_HW_CACHE_MAX)
    440 		goto out_err;
    441 
    442 	op = (config >>  8) & 0xff;
    443 	err = "unknown-ext-hardware-cache-op";
    444 	if (op > PERF_COUNT_HW_CACHE_OP_MAX)
    445 		goto out_err;
    446 
    447 	result = (config >> 16) & 0xff;
    448 	err = "unknown-ext-hardware-cache-result";
    449 	if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
    450 		goto out_err;
    451 
    452 	err = "invalid-cache";
    453 	if (!perf_evsel__is_cache_op_valid(type, op))
    454 		goto out_err;
    455 
    456 	return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
    457 out_err:
    458 	return scnprintf(bf, size, "%s", err);
    459 }
    460 
    461 static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
    462 {
    463 	int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
    464 	return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
    465 }
    466 
    467 static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
    468 {
    469 	int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
    470 	return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
    471 }
    472 
    473 const char *perf_evsel__name(struct perf_evsel *evsel)
    474 {
    475 	char bf[128];
    476 
    477 	if (evsel->name)
    478 		return evsel->name;
    479 
    480 	switch (evsel->attr.type) {
    481 	case PERF_TYPE_RAW:
    482 		perf_evsel__raw_name(evsel, bf, sizeof(bf));
    483 		break;
    484 
    485 	case PERF_TYPE_HARDWARE:
    486 		perf_evsel__hw_name(evsel, bf, sizeof(bf));
    487 		break;
    488 
    489 	case PERF_TYPE_HW_CACHE:
    490 		perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
    491 		break;
    492 
    493 	case PERF_TYPE_SOFTWARE:
    494 		perf_evsel__sw_name(evsel, bf, sizeof(bf));
    495 		break;
    496 
    497 	case PERF_TYPE_TRACEPOINT:
    498 		scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
    499 		break;
    500 
    501 	case PERF_TYPE_BREAKPOINT:
    502 		perf_evsel__bp_name(evsel, bf, sizeof(bf));
    503 		break;
    504 
    505 	default:
    506 		scnprintf(bf, sizeof(bf), "unknown attr type: %d",
    507 			  evsel->attr.type);
    508 		break;
    509 	}
    510 
    511 	evsel->name = strdup(bf);
    512 
    513 	return evsel->name ?: "unknown";
    514 }
    515 
    516 const char *perf_evsel__group_name(struct perf_evsel *evsel)
    517 {
    518 	return evsel->group_name ?: "anon group";
    519 }
    520 
    521 int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
    522 {
    523 	int ret;
    524 	struct perf_evsel *pos;
    525 	const char *group_name = perf_evsel__group_name(evsel);
    526 
    527 	ret = scnprintf(buf, size, "%s", group_name);
    528 
    529 	ret += scnprintf(buf + ret, size - ret, " { %s",
    530 			 perf_evsel__name(evsel));
    531 
    532 	for_each_group_member(pos, evsel)
    533 		ret += scnprintf(buf + ret, size - ret, ", %s",
    534 				 perf_evsel__name(pos));
    535 
    536 	ret += scnprintf(buf + ret, size - ret, " }");
    537 
    538 	return ret;
    539 }
    540 
    541 /*
    542  * The enable_on_exec/disabled value strategy:
    543  *
    544  *  1) For any type of traced program:
    545  *    - all independent events and group leaders are disabled
    546  *    - all group members are enabled
    547  *
    548  *     Group members are ruled by group leaders. They need to
    549  *     be enabled, because the group scheduling relies on that.
    550  *
    551  *  2) For traced programs executed by perf:
    552  *     - all independent events and group leaders have
    553  *       enable_on_exec set
    554  *     - we don't specifically enable or disable any event during
    555  *       the record command
    556  *
    557  *     Independent events and group leaders are initially disabled
    558  *     and get enabled by exec. Group members are ruled by group
    559  *     leaders as stated in 1).
    560  *
    561  *  3) For traced programs attached by perf (pid/tid):
    562  *     - we specifically enable or disable all events during
    563  *       the record command
    564  *
    565  *     When attaching events to already running traced we
    566  *     enable/disable events specifically, as there's no
    567  *     initial traced exec call.
    568  */
    569 void perf_evsel__config(struct perf_evsel *evsel,
    570 			struct perf_record_opts *opts)
    571 {
    572 	struct perf_evsel *leader = evsel->leader;
    573 	struct perf_event_attr *attr = &evsel->attr;
    574 	int track = !evsel->idx; /* only the first counter needs these */
    575 
    576 	attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
    577 	attr->inherit	    = !opts->no_inherit;
    578 
    579 	perf_evsel__set_sample_bit(evsel, IP);
    580 	perf_evsel__set_sample_bit(evsel, TID);
    581 
    582 	if (evsel->sample_read) {
    583 		perf_evsel__set_sample_bit(evsel, READ);
    584 
    585 		/*
    586 		 * We need ID even in case of single event, because
    587 		 * PERF_SAMPLE_READ process ID specific data.
    588 		 */
    589 		perf_evsel__set_sample_id(evsel, false);
    590 
    591 		/*
    592 		 * Apply group format only if we belong to group
    593 		 * with more than one members.
    594 		 */
    595 		if (leader->nr_members > 1) {
    596 			attr->read_format |= PERF_FORMAT_GROUP;
    597 			attr->inherit = 0;
    598 		}
    599 	}
    600 
    601 	/*
    602 	 * We default some events to a 1 default interval. But keep
    603 	 * it a weak assumption overridable by the user.
    604 	 */
    605 	if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
    606 				     opts->user_interval != ULLONG_MAX)) {
    607 		if (opts->freq) {
    608 			perf_evsel__set_sample_bit(evsel, PERIOD);
    609 			attr->freq		= 1;
    610 			attr->sample_freq	= opts->freq;
    611 		} else {
    612 			attr->sample_period = opts->default_interval;
    613 		}
    614 	}
    615 
    616 	/*
    617 	 * Disable sampling for all group members other
    618 	 * than leader in case leader 'leads' the sampling.
    619 	 */
    620 	if ((leader != evsel) && leader->sample_read) {
    621 		attr->sample_freq   = 0;
    622 		attr->sample_period = 0;
    623 	}
    624 
    625 	if (opts->no_samples)
    626 		attr->sample_freq = 0;
    627 
    628 	if (opts->inherit_stat)
    629 		attr->inherit_stat = 1;
    630 
    631 	if (opts->sample_address) {
    632 		perf_evsel__set_sample_bit(evsel, ADDR);
    633 		attr->mmap_data = track;
    634 	}
    635 
    636 	if (opts->call_graph) {
    637 		perf_evsel__set_sample_bit(evsel, CALLCHAIN);
    638 
    639 		if (opts->call_graph == CALLCHAIN_DWARF) {
    640 			perf_evsel__set_sample_bit(evsel, REGS_USER);
    641 			perf_evsel__set_sample_bit(evsel, STACK_USER);
    642 			attr->sample_regs_user = PERF_REGS_MASK;
    643 			attr->sample_stack_user = opts->stack_dump_size;
    644 			attr->exclude_callchain_user = 1;
    645 		}
    646 	}
    647 
    648 	if (perf_target__has_cpu(&opts->target))
    649 		perf_evsel__set_sample_bit(evsel, CPU);
    650 
    651 	if (opts->period)
    652 		perf_evsel__set_sample_bit(evsel, PERIOD);
    653 
    654 	if (!perf_missing_features.sample_id_all &&
    655 	    (opts->sample_time || !opts->no_inherit ||
    656 	     perf_target__has_cpu(&opts->target)))
    657 		perf_evsel__set_sample_bit(evsel, TIME);
    658 
    659 	if (opts->raw_samples) {
    660 		perf_evsel__set_sample_bit(evsel, TIME);
    661 		perf_evsel__set_sample_bit(evsel, RAW);
    662 		perf_evsel__set_sample_bit(evsel, CPU);
    663 	}
    664 
    665 	if (opts->sample_address)
    666 		attr->sample_type	|= PERF_SAMPLE_DATA_SRC;
    667 
    668 	if (opts->no_delay) {
    669 		attr->watermark = 0;
    670 		attr->wakeup_events = 1;
    671 	}
    672 	if (opts->branch_stack) {
    673 		perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
    674 		attr->branch_sample_type = opts->branch_stack;
    675 	}
    676 
    677 	if (opts->sample_weight)
    678 		attr->sample_type	|= PERF_SAMPLE_WEIGHT;
    679 
    680 	attr->mmap  = track;
    681 	attr->comm  = track;
    682 
    683 	/*
    684 	 * XXX see the function comment above
    685 	 *
    686 	 * Disabling only independent events or group leaders,
    687 	 * keeping group members enabled.
    688 	 */
    689 	if (perf_evsel__is_group_leader(evsel))
    690 		attr->disabled = 1;
    691 
    692 	/*
    693 	 * Setting enable_on_exec for independent events and
    694 	 * group leaders for traced executed by perf.
    695 	 */
    696 	if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
    697 		attr->enable_on_exec = 1;
    698 }
    699 
    700 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
    701 {
    702 	int cpu, thread;
    703 	evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
    704 
    705 	if (evsel->fd) {
    706 		for (cpu = 0; cpu < ncpus; cpu++) {
    707 			for (thread = 0; thread < nthreads; thread++) {
    708 				FD(evsel, cpu, thread) = -1;
    709 			}
    710 		}
    711 	}
    712 
    713 	return evsel->fd != NULL ? 0 : -ENOMEM;
    714 }
    715 
    716 static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads,
    717 			  int ioc,  void *arg)
    718 {
    719 	int cpu, thread;
    720 
    721 	for (cpu = 0; cpu < ncpus; cpu++) {
    722 		for (thread = 0; thread < nthreads; thread++) {
    723 			int fd = FD(evsel, cpu, thread),
    724 			    err = ioctl(fd, ioc, arg);
    725 
    726 			if (err)
    727 				return err;
    728 		}
    729 	}
    730 
    731 	return 0;
    732 }
    733 
    734 int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
    735 			   const char *filter)
    736 {
    737 	return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
    738 				     PERF_EVENT_IOC_SET_FILTER,
    739 				     (void *)filter);
    740 }
    741 
    742 int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
    743 {
    744 	return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
    745 				     PERF_EVENT_IOC_ENABLE,
    746 				     0);
    747 }
    748 
    749 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
    750 {
    751 	evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
    752 	if (evsel->sample_id == NULL)
    753 		return -ENOMEM;
    754 
    755 	evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
    756 	if (evsel->id == NULL) {
    757 		xyarray__delete(evsel->sample_id);
    758 		evsel->sample_id = NULL;
    759 		return -ENOMEM;
    760 	}
    761 
    762 	return 0;
    763 }
    764 
    765 void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus)
    766 {
    767 	memset(evsel->counts, 0, (sizeof(*evsel->counts) +
    768 				 (ncpus * sizeof(struct perf_counts_values))));
    769 }
    770 
    771 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
    772 {
    773 	evsel->counts = zalloc((sizeof(*evsel->counts) +
    774 				(ncpus * sizeof(struct perf_counts_values))));
    775 	return evsel->counts != NULL ? 0 : -ENOMEM;
    776 }
    777 
    778 void perf_evsel__free_fd(struct perf_evsel *evsel)
    779 {
    780 	xyarray__delete(evsel->fd);
    781 	evsel->fd = NULL;
    782 }
    783 
    784 void perf_evsel__free_id(struct perf_evsel *evsel)
    785 {
    786 	xyarray__delete(evsel->sample_id);
    787 	evsel->sample_id = NULL;
    788 	free(evsel->id);
    789 	evsel->id = NULL;
    790 }
    791 
    792 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
    793 {
    794 	int cpu, thread;
    795 
    796 	for (cpu = 0; cpu < ncpus; cpu++)
    797 		for (thread = 0; thread < nthreads; ++thread) {
    798 			close(FD(evsel, cpu, thread));
    799 			FD(evsel, cpu, thread) = -1;
    800 		}
    801 }
    802 
    803 void perf_evsel__free_counts(struct perf_evsel *evsel)
    804 {
    805 	free(evsel->counts);
    806 }
    807 
    808 void perf_evsel__exit(struct perf_evsel *evsel)
    809 {
    810 	assert(list_empty(&evsel->node));
    811 	perf_evsel__free_fd(evsel);
    812 	perf_evsel__free_id(evsel);
    813 }
    814 
    815 void perf_evsel__delete(struct perf_evsel *evsel)
    816 {
    817 	perf_evsel__exit(evsel);
    818 	close_cgroup(evsel->cgrp);
    819 	free(evsel->group_name);
    820 	if (evsel->tp_format)
    821 		pevent_free_format(evsel->tp_format);
    822 	free(evsel->name);
    823 	free(evsel);
    824 }
    825 
    826 static inline void compute_deltas(struct perf_evsel *evsel,
    827 				  int cpu,
    828 				  struct perf_counts_values *count)
    829 {
    830 	struct perf_counts_values tmp;
    831 
    832 	if (!evsel->prev_raw_counts)
    833 		return;
    834 
    835 	if (cpu == -1) {
    836 		tmp = evsel->prev_raw_counts->aggr;
    837 		evsel->prev_raw_counts->aggr = *count;
    838 	} else {
    839 		tmp = evsel->prev_raw_counts->cpu[cpu];
    840 		evsel->prev_raw_counts->cpu[cpu] = *count;
    841 	}
    842 
    843 	count->val = count->val - tmp.val;
    844 	count->ena = count->ena - tmp.ena;
    845 	count->run = count->run - tmp.run;
    846 }
    847 
    848 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
    849 			      int cpu, int thread, bool scale)
    850 {
    851 	struct perf_counts_values count;
    852 	size_t nv = scale ? 3 : 1;
    853 
    854 	if (FD(evsel, cpu, thread) < 0)
    855 		return -EINVAL;
    856 
    857 	if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
    858 		return -ENOMEM;
    859 
    860 	if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
    861 		return -errno;
    862 
    863 	compute_deltas(evsel, cpu, &count);
    864 
    865 	if (scale) {
    866 		if (count.run == 0)
    867 			count.val = 0;
    868 		else if (count.run < count.ena)
    869 			count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
    870 	} else
    871 		count.ena = count.run = 0;
    872 
    873 	evsel->counts->cpu[cpu] = count;
    874 	return 0;
    875 }
    876 
    877 int __perf_evsel__read(struct perf_evsel *evsel,
    878 		       int ncpus, int nthreads, bool scale)
    879 {
    880 	size_t nv = scale ? 3 : 1;
    881 	int cpu, thread;
    882 	struct perf_counts_values *aggr = &evsel->counts->aggr, count;
    883 
    884 	aggr->val = aggr->ena = aggr->run = 0;
    885 
    886 	for (cpu = 0; cpu < ncpus; cpu++) {
    887 		for (thread = 0; thread < nthreads; thread++) {
    888 			if (FD(evsel, cpu, thread) < 0)
    889 				continue;
    890 
    891 			if (readn(FD(evsel, cpu, thread),
    892 				  &count, nv * sizeof(u64)) < 0)
    893 				return -errno;
    894 
    895 			aggr->val += count.val;
    896 			if (scale) {
    897 				aggr->ena += count.ena;
    898 				aggr->run += count.run;
    899 			}
    900 		}
    901 	}
    902 
    903 	compute_deltas(evsel, -1, aggr);
    904 
    905 	evsel->counts->scaled = 0;
    906 	if (scale) {
    907 		if (aggr->run == 0) {
    908 			evsel->counts->scaled = -1;
    909 			aggr->val = 0;
    910 			return 0;
    911 		}
    912 
    913 		if (aggr->run < aggr->ena) {
    914 			evsel->counts->scaled = 1;
    915 			aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
    916 		}
    917 	} else
    918 		aggr->ena = aggr->run = 0;
    919 
    920 	return 0;
    921 }
    922 
    923 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
    924 {
    925 	struct perf_evsel *leader = evsel->leader;
    926 	int fd;
    927 
    928 	if (perf_evsel__is_group_leader(evsel))
    929 		return -1;
    930 
    931 	/*
    932 	 * Leader must be already processed/open,
    933 	 * if not it's a bug.
    934 	 */
    935 	BUG_ON(!leader->fd);
    936 
    937 	fd = FD(leader, cpu, thread);
    938 	BUG_ON(fd == -1);
    939 
    940 	return fd;
    941 }
    942 
    943 #define __PRINT_ATTR(fmt, cast, field)  \
    944 	fprintf(fp, "  %-19s "fmt"\n", #field, cast attr->field)
    945 
    946 #define PRINT_ATTR_U32(field)  __PRINT_ATTR("%u" , , field)
    947 #define PRINT_ATTR_X32(field)  __PRINT_ATTR("%#x", , field)
    948 #define PRINT_ATTR_U64(field)  __PRINT_ATTR("%" PRIu64, (uint64_t), field)
    949 #define PRINT_ATTR_X64(field)  __PRINT_ATTR("%#"PRIx64, (uint64_t), field)
    950 
    951 #define PRINT_ATTR2N(name1, field1, name2, field2)	\
    952 	fprintf(fp, "  %-19s %u    %-19s %u\n",		\
    953 	name1, attr->field1, name2, attr->field2)
    954 
    955 #define PRINT_ATTR2(field1, field2) \
    956 	PRINT_ATTR2N(#field1, field1, #field2, field2)
    957 
    958 static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp)
    959 {
    960 	size_t ret = 0;
    961 
    962 	ret += fprintf(fp, "%.60s\n", graph_dotted_line);
    963 	ret += fprintf(fp, "perf_event_attr:\n");
    964 
    965 	ret += PRINT_ATTR_U32(type);
    966 	ret += PRINT_ATTR_U32(size);
    967 	ret += PRINT_ATTR_X64(config);
    968 	ret += PRINT_ATTR_U64(sample_period);
    969 	ret += PRINT_ATTR_U64(sample_freq);
    970 	ret += PRINT_ATTR_X64(sample_type);
    971 	ret += PRINT_ATTR_X64(read_format);
    972 
    973 	ret += PRINT_ATTR2(disabled, inherit);
    974 	ret += PRINT_ATTR2(pinned, exclusive);
    975 	ret += PRINT_ATTR2(exclude_user, exclude_kernel);
    976 	ret += PRINT_ATTR2(exclude_hv, exclude_idle);
    977 	ret += PRINT_ATTR2(mmap, comm);
    978 	ret += PRINT_ATTR2(freq, inherit_stat);
    979 	ret += PRINT_ATTR2(enable_on_exec, task);
    980 	ret += PRINT_ATTR2(watermark, precise_ip);
    981 	ret += PRINT_ATTR2(mmap_data, sample_id_all);
    982 	ret += PRINT_ATTR2(exclude_host, exclude_guest);
    983 	ret += PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel,
    984 			    "excl.callchain_user", exclude_callchain_user);
    985 
    986 	ret += PRINT_ATTR_U32(wakeup_events);
    987 	ret += PRINT_ATTR_U32(wakeup_watermark);
    988 	ret += PRINT_ATTR_X32(bp_type);
    989 	ret += PRINT_ATTR_X64(bp_addr);
    990 	ret += PRINT_ATTR_X64(config1);
    991 	ret += PRINT_ATTR_U64(bp_len);
    992 	ret += PRINT_ATTR_X64(config2);
    993 	ret += PRINT_ATTR_X64(branch_sample_type);
    994 	ret += PRINT_ATTR_X64(sample_regs_user);
    995 	ret += PRINT_ATTR_U32(sample_stack_user);
    996 
    997 	ret += fprintf(fp, "%.60s\n", graph_dotted_line);
    998 
    999 	return ret;
   1000 }
   1001 
   1002 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
   1003 			      struct thread_map *threads)
   1004 {
   1005 	int cpu, thread;
   1006 	unsigned long flags = 0;
   1007 	int pid = -1, err;
   1008 	enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
   1009 
   1010 	if (evsel->fd == NULL &&
   1011 	    perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
   1012 		return -ENOMEM;
   1013 
   1014 	if (evsel->cgrp) {
   1015 		flags = PERF_FLAG_PID_CGROUP;
   1016 		pid = evsel->cgrp->fd;
   1017 	}
   1018 
   1019 fallback_missing_features:
   1020 	if (perf_missing_features.mmap2)
   1021 		evsel->attr.mmap2 = 0;
   1022 	if (perf_missing_features.exclude_guest)
   1023 		evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
   1024 retry_sample_id:
   1025 	if (perf_missing_features.sample_id_all)
   1026 		evsel->attr.sample_id_all = 0;
   1027 
   1028 	if (verbose >= 2)
   1029 		perf_event_attr__fprintf(&evsel->attr, stderr);
   1030 
   1031 	for (cpu = 0; cpu < cpus->nr; cpu++) {
   1032 
   1033 		for (thread = 0; thread < threads->nr; thread++) {
   1034 			int group_fd;
   1035 
   1036 			if (!evsel->cgrp)
   1037 				pid = threads->map[thread];
   1038 
   1039 			group_fd = get_group_fd(evsel, cpu, thread);
   1040 retry_open:
   1041 			pr_debug2("perf_event_open: pid %d  cpu %d  group_fd %d  flags %#lx\n",
   1042 				  pid, cpus->map[cpu], group_fd, flags);
   1043 
   1044 			FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
   1045 								     pid,
   1046 								     cpus->map[cpu],
   1047 								     group_fd, flags);
   1048 			if (FD(evsel, cpu, thread) < 0) {
   1049 				err = -errno;
   1050 				goto try_fallback;
   1051 			}
   1052 			set_rlimit = NO_CHANGE;
   1053 		}
   1054 	}
   1055 
   1056 	return 0;
   1057 
   1058 try_fallback:
   1059 	/*
   1060 	 * perf stat needs between 5 and 22 fds per CPU. When we run out
   1061 	 * of them try to increase the limits.
   1062 	 */
   1063 	if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
   1064 		struct rlimit l;
   1065 		int old_errno = errno;
   1066 
   1067 		if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
   1068 			if (set_rlimit == NO_CHANGE)
   1069 				l.rlim_cur = l.rlim_max;
   1070 			else {
   1071 				l.rlim_cur = l.rlim_max + 1000;
   1072 				l.rlim_max = l.rlim_cur;
   1073 			}
   1074 			if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
   1075 				set_rlimit++;
   1076 				errno = old_errno;
   1077 				goto retry_open;
   1078 			}
   1079 		}
   1080 		errno = old_errno;
   1081 	}
   1082 
   1083 	if (err != -EINVAL || cpu > 0 || thread > 0)
   1084 		goto out_close;
   1085 
   1086 	if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
   1087 		perf_missing_features.mmap2 = true;
   1088 		goto fallback_missing_features;
   1089 	} else if (!perf_missing_features.exclude_guest &&
   1090 		   (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
   1091 		perf_missing_features.exclude_guest = true;
   1092 		goto fallback_missing_features;
   1093 	} else if (!perf_missing_features.sample_id_all) {
   1094 		perf_missing_features.sample_id_all = true;
   1095 		goto retry_sample_id;
   1096 	}
   1097 
   1098 out_close:
   1099 	do {
   1100 		while (--thread >= 0) {
   1101 			close(FD(evsel, cpu, thread));
   1102 			FD(evsel, cpu, thread) = -1;
   1103 		}
   1104 		thread = threads->nr;
   1105 	} while (--cpu >= 0);
   1106 	return err;
   1107 }
   1108 
   1109 void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
   1110 {
   1111 	if (evsel->fd == NULL)
   1112 		return;
   1113 
   1114 	perf_evsel__close_fd(evsel, ncpus, nthreads);
   1115 	perf_evsel__free_fd(evsel);
   1116 	evsel->fd = NULL;
   1117 }
   1118 
   1119 static struct {
   1120 	struct cpu_map map;
   1121 	int cpus[1];
   1122 } empty_cpu_map = {
   1123 	.map.nr	= 1,
   1124 	.cpus	= { -1, },
   1125 };
   1126 
   1127 static struct {
   1128 	struct thread_map map;
   1129 	int threads[1];
   1130 } empty_thread_map = {
   1131 	.map.nr	 = 1,
   1132 	.threads = { -1, },
   1133 };
   1134 
   1135 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
   1136 		     struct thread_map *threads)
   1137 {
   1138 	if (cpus == NULL) {
   1139 		/* Work around old compiler warnings about strict aliasing */
   1140 		cpus = &empty_cpu_map.map;
   1141 	}
   1142 
   1143 	if (threads == NULL)
   1144 		threads = &empty_thread_map.map;
   1145 
   1146 	return __perf_evsel__open(evsel, cpus, threads);
   1147 }
   1148 
   1149 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
   1150 			     struct cpu_map *cpus)
   1151 {
   1152 	return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
   1153 }
   1154 
   1155 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
   1156 				struct thread_map *threads)
   1157 {
   1158 	return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
   1159 }
   1160 
   1161 static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
   1162 				       const union perf_event *event,
   1163 				       struct perf_sample *sample)
   1164 {
   1165 	u64 type = evsel->attr.sample_type;
   1166 	const u64 *array = event->sample.array;
   1167 	bool swapped = evsel->needs_swap;
   1168 	union u64_swap u;
   1169 
   1170 	array += ((event->header.size -
   1171 		   sizeof(event->header)) / sizeof(u64)) - 1;
   1172 
   1173 	if (type & PERF_SAMPLE_IDENTIFIER) {
   1174 		sample->id = *array;
   1175 		array--;
   1176 	}
   1177 
   1178 	if (type & PERF_SAMPLE_CPU) {
   1179 		u.val64 = *array;
   1180 		if (swapped) {
   1181 			/* undo swap of u64, then swap on individual u32s */
   1182 			u.val64 = bswap_64(u.val64);
   1183 			u.val32[0] = bswap_32(u.val32[0]);
   1184 		}
   1185 
   1186 		sample->cpu = u.val32[0];
   1187 		array--;
   1188 	}
   1189 
   1190 	if (type & PERF_SAMPLE_STREAM_ID) {
   1191 		sample->stream_id = *array;
   1192 		array--;
   1193 	}
   1194 
   1195 	if (type & PERF_SAMPLE_ID) {
   1196 		sample->id = *array;
   1197 		array--;
   1198 	}
   1199 
   1200 	if (type & PERF_SAMPLE_TIME) {
   1201 		sample->time = *array;
   1202 		array--;
   1203 	}
   1204 
   1205 	if (type & PERF_SAMPLE_TID) {
   1206 		u.val64 = *array;
   1207 		if (swapped) {
   1208 			/* undo swap of u64, then swap on individual u32s */
   1209 			u.val64 = bswap_64(u.val64);
   1210 			u.val32[0] = bswap_32(u.val32[0]);
   1211 			u.val32[1] = bswap_32(u.val32[1]);
   1212 		}
   1213 
   1214 		sample->pid = u.val32[0];
   1215 		sample->tid = u.val32[1];
   1216 	}
   1217 
   1218 	return 0;
   1219 }
   1220 
   1221 static inline bool overflow(const void *endp, u16 max_size, const void *offset,
   1222 			    u64 size)
   1223 {
   1224 	return size > max_size || offset + size > endp;
   1225 }
   1226 
   1227 #define OVERFLOW_CHECK(offset, size, max_size)				\
   1228 	do {								\
   1229 		if (overflow(endp, (max_size), (offset), (size)))	\
   1230 			return -EFAULT;					\
   1231 	} while (0)
   1232 
   1233 #define OVERFLOW_CHECK_u64(offset) \
   1234 	OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
   1235 
   1236 int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
   1237 			     struct perf_sample *data)
   1238 {
   1239 	u64 type = evsel->attr.sample_type;
   1240 	bool swapped = evsel->needs_swap;
   1241 	const u64 *array;
   1242 	u16 max_size = event->header.size;
   1243 	const void *endp = (void *)event + max_size;
   1244 	u64 sz;
   1245 
   1246 	/*
   1247 	 * used for cross-endian analysis. See git commit 65014ab3
   1248 	 * for why this goofiness is needed.
   1249 	 */
   1250 	union u64_swap u;
   1251 
   1252 	memset(data, 0, sizeof(*data));
   1253 	data->cpu = data->pid = data->tid = -1;
   1254 	data->stream_id = data->id = data->time = -1ULL;
   1255 	data->period = 1;
   1256 	data->weight = 0;
   1257 
   1258 	if (event->header.type != PERF_RECORD_SAMPLE) {
   1259 		if (!evsel->attr.sample_id_all)
   1260 			return 0;
   1261 		return perf_evsel__parse_id_sample(evsel, event, data);
   1262 	}
   1263 
   1264 	array = event->sample.array;
   1265 
   1266 	/*
   1267 	 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
   1268 	 * up to PERF_SAMPLE_PERIOD.  After that overflow() must be used to
   1269 	 * check the format does not go past the end of the event.
   1270 	 */
   1271 	if (evsel->sample_size + sizeof(event->header) > event->header.size)
   1272 		return -EFAULT;
   1273 
   1274 	data->id = -1ULL;
   1275 	if (type & PERF_SAMPLE_IDENTIFIER) {
   1276 		data->id = *array;
   1277 		array++;
   1278 	}
   1279 
   1280 	if (type & PERF_SAMPLE_IP) {
   1281 		data->ip = *array;
   1282 		array++;
   1283 	}
   1284 
   1285 	if (type & PERF_SAMPLE_TID) {
   1286 		u.val64 = *array;
   1287 		if (swapped) {
   1288 			/* undo swap of u64, then swap on individual u32s */
   1289 			u.val64 = bswap_64(u.val64);
   1290 			u.val32[0] = bswap_32(u.val32[0]);
   1291 			u.val32[1] = bswap_32(u.val32[1]);
   1292 		}
   1293 
   1294 		data->pid = u.val32[0];
   1295 		data->tid = u.val32[1];
   1296 		array++;
   1297 	}
   1298 
   1299 	if (type & PERF_SAMPLE_TIME) {
   1300 		data->time = *array;
   1301 		array++;
   1302 	}
   1303 
   1304 	data->addr = 0;
   1305 	if (type & PERF_SAMPLE_ADDR) {
   1306 		data->addr = *array;
   1307 		array++;
   1308 	}
   1309 
   1310 	if (type & PERF_SAMPLE_ID) {
   1311 		data->id = *array;
   1312 		array++;
   1313 	}
   1314 
   1315 	if (type & PERF_SAMPLE_STREAM_ID) {
   1316 		data->stream_id = *array;
   1317 		array++;
   1318 	}
   1319 
   1320 	if (type & PERF_SAMPLE_CPU) {
   1321 
   1322 		u.val64 = *array;
   1323 		if (swapped) {
   1324 			/* undo swap of u64, then swap on individual u32s */
   1325 			u.val64 = bswap_64(u.val64);
   1326 			u.val32[0] = bswap_32(u.val32[0]);
   1327 		}
   1328 
   1329 		data->cpu = u.val32[0];
   1330 		array++;
   1331 	}
   1332 
   1333 	if (type & PERF_SAMPLE_PERIOD) {
   1334 		data->period = *array;
   1335 		array++;
   1336 	}
   1337 
   1338 	if (type & PERF_SAMPLE_READ) {
   1339 		u64 read_format = evsel->attr.read_format;
   1340 
   1341 		OVERFLOW_CHECK_u64(array);
   1342 		if (read_format & PERF_FORMAT_GROUP)
   1343 			data->read.group.nr = *array;
   1344 		else
   1345 			data->read.one.value = *array;
   1346 
   1347 		array++;
   1348 
   1349 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
   1350 			OVERFLOW_CHECK_u64(array);
   1351 			data->read.time_enabled = *array;
   1352 			array++;
   1353 		}
   1354 
   1355 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
   1356 			OVERFLOW_CHECK_u64(array);
   1357 			data->read.time_running = *array;
   1358 			array++;
   1359 		}
   1360 
   1361 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
   1362 		if (read_format & PERF_FORMAT_GROUP) {
   1363 			const u64 max_group_nr = UINT64_MAX /
   1364 					sizeof(struct sample_read_value);
   1365 
   1366 			if (data->read.group.nr > max_group_nr)
   1367 				return -EFAULT;
   1368 			sz = data->read.group.nr *
   1369 			     sizeof(struct sample_read_value);
   1370 			OVERFLOW_CHECK(array, sz, max_size);
   1371 			data->read.group.values =
   1372 					(struct sample_read_value *)array;
   1373 			array = (void *)array + sz;
   1374 		} else {
   1375 			OVERFLOW_CHECK_u64(array);
   1376 			data->read.one.id = *array;
   1377 			array++;
   1378 		}
   1379 	}
   1380 
   1381 	if (type & PERF_SAMPLE_CALLCHAIN) {
   1382 		const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
   1383 
   1384 		OVERFLOW_CHECK_u64(array);
   1385 		data->callchain = (struct ip_callchain *)array++;
   1386 		if (data->callchain->nr > max_callchain_nr)
   1387 			return -EFAULT;
   1388 		sz = data->callchain->nr * sizeof(u64);
   1389 		OVERFLOW_CHECK(array, sz, max_size);
   1390 		array = (void *)array + sz;
   1391 	}
   1392 
   1393 	if (type & PERF_SAMPLE_RAW) {
   1394 		OVERFLOW_CHECK_u64(array);
   1395 		u.val64 = *array;
   1396 		if (WARN_ONCE(swapped,
   1397 			      "Endianness of raw data not corrected!\n")) {
   1398 			/* undo swap of u64, then swap on individual u32s */
   1399 			u.val64 = bswap_64(u.val64);
   1400 			u.val32[0] = bswap_32(u.val32[0]);
   1401 			u.val32[1] = bswap_32(u.val32[1]);
   1402 		}
   1403 		data->raw_size = u.val32[0];
   1404 		array = (void *)array + sizeof(u32);
   1405 
   1406 		OVERFLOW_CHECK(array, data->raw_size, max_size);
   1407 		data->raw_data = (void *)array;
   1408 		array = (void *)array + data->raw_size;
   1409 	}
   1410 
   1411 	if (type & PERF_SAMPLE_BRANCH_STACK) {
   1412 		const u64 max_branch_nr = UINT64_MAX /
   1413 					  sizeof(struct branch_entry);
   1414 
   1415 		OVERFLOW_CHECK_u64(array);
   1416 		data->branch_stack = (struct branch_stack *)array++;
   1417 
   1418 		if (data->branch_stack->nr > max_branch_nr)
   1419 			return -EFAULT;
   1420 		sz = data->branch_stack->nr * sizeof(struct branch_entry);
   1421 		OVERFLOW_CHECK(array, sz, max_size);
   1422 		array = (void *)array + sz;
   1423 	}
   1424 
   1425 	if (type & PERF_SAMPLE_REGS_USER) {
   1426 		OVERFLOW_CHECK_u64(array);
   1427 		data->user_regs.abi = *array;
   1428 		array++;
   1429 
   1430 		if (data->user_regs.abi) {
   1431 			u64 regs_user = evsel->attr.sample_regs_user;
   1432 
   1433 			sz = hweight_long(regs_user) * sizeof(u64);
   1434 			OVERFLOW_CHECK(array, sz, max_size);
   1435 			data->user_regs.regs = (u64 *)array;
   1436 			array = (void *)array + sz;
   1437 		}
   1438 	}
   1439 
   1440 	if (type & PERF_SAMPLE_STACK_USER) {
   1441 		OVERFLOW_CHECK_u64(array);
   1442 		sz = *array++;
   1443 
   1444 		data->user_stack.offset = ((char *)(array - 1)
   1445 					  - (char *) event);
   1446 
   1447 		if (!sz) {
   1448 			data->user_stack.size = 0;
   1449 		} else {
   1450 			OVERFLOW_CHECK(array, sz, max_size);
   1451 			data->user_stack.data = (char *)array;
   1452 			array = (void *)array + sz;
   1453 			OVERFLOW_CHECK_u64(array);
   1454 			data->user_stack.size = *array++;
   1455 		}
   1456 	}
   1457 
   1458 	data->weight = 0;
   1459 	if (type & PERF_SAMPLE_WEIGHT) {
   1460 		OVERFLOW_CHECK_u64(array);
   1461 		data->weight = *array;
   1462 		array++;
   1463 	}
   1464 
   1465 	data->data_src = PERF_MEM_DATA_SRC_NONE;
   1466 	if (type & PERF_SAMPLE_DATA_SRC) {
   1467 		OVERFLOW_CHECK_u64(array);
   1468 		data->data_src = *array;
   1469 		array++;
   1470 	}
   1471 
   1472 	return 0;
   1473 }
   1474 
   1475 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
   1476 				     u64 sample_regs_user, u64 read_format)
   1477 {
   1478 	size_t sz, result = sizeof(struct sample_event);
   1479 
   1480 	if (type & PERF_SAMPLE_IDENTIFIER)
   1481 		result += sizeof(u64);
   1482 
   1483 	if (type & PERF_SAMPLE_IP)
   1484 		result += sizeof(u64);
   1485 
   1486 	if (type & PERF_SAMPLE_TID)
   1487 		result += sizeof(u64);
   1488 
   1489 	if (type & PERF_SAMPLE_TIME)
   1490 		result += sizeof(u64);
   1491 
   1492 	if (type & PERF_SAMPLE_ADDR)
   1493 		result += sizeof(u64);
   1494 
   1495 	if (type & PERF_SAMPLE_ID)
   1496 		result += sizeof(u64);
   1497 
   1498 	if (type & PERF_SAMPLE_STREAM_ID)
   1499 		result += sizeof(u64);
   1500 
   1501 	if (type & PERF_SAMPLE_CPU)
   1502 		result += sizeof(u64);
   1503 
   1504 	if (type & PERF_SAMPLE_PERIOD)
   1505 		result += sizeof(u64);
   1506 
   1507 	if (type & PERF_SAMPLE_READ) {
   1508 		result += sizeof(u64);
   1509 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
   1510 			result += sizeof(u64);
   1511 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
   1512 			result += sizeof(u64);
   1513 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
   1514 		if (read_format & PERF_FORMAT_GROUP) {
   1515 			sz = sample->read.group.nr *
   1516 			     sizeof(struct sample_read_value);
   1517 			result += sz;
   1518 		} else {
   1519 			result += sizeof(u64);
   1520 		}
   1521 	}
   1522 
   1523 	if (type & PERF_SAMPLE_CALLCHAIN) {
   1524 		sz = (sample->callchain->nr + 1) * sizeof(u64);
   1525 		result += sz;
   1526 	}
   1527 
   1528 	if (type & PERF_SAMPLE_RAW) {
   1529 		result += sizeof(u32);
   1530 		result += sample->raw_size;
   1531 	}
   1532 
   1533 	if (type & PERF_SAMPLE_BRANCH_STACK) {
   1534 		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
   1535 		sz += sizeof(u64);
   1536 		result += sz;
   1537 	}
   1538 
   1539 	if (type & PERF_SAMPLE_REGS_USER) {
   1540 		if (sample->user_regs.abi) {
   1541 			result += sizeof(u64);
   1542 			sz = hweight_long(sample_regs_user) * sizeof(u64);
   1543 			result += sz;
   1544 		} else {
   1545 			result += sizeof(u64);
   1546 		}
   1547 	}
   1548 
   1549 	if (type & PERF_SAMPLE_STACK_USER) {
   1550 		sz = sample->user_stack.size;
   1551 		result += sizeof(u64);
   1552 		if (sz) {
   1553 			result += sz;
   1554 			result += sizeof(u64);
   1555 		}
   1556 	}
   1557 
   1558 	if (type & PERF_SAMPLE_WEIGHT)
   1559 		result += sizeof(u64);
   1560 
   1561 	if (type & PERF_SAMPLE_DATA_SRC)
   1562 		result += sizeof(u64);
   1563 
   1564 	return result;
   1565 }
   1566 
   1567 int perf_event__synthesize_sample(union perf_event *event, u64 type,
   1568 				  u64 sample_regs_user, u64 read_format,
   1569 				  const struct perf_sample *sample,
   1570 				  bool swapped)
   1571 {
   1572 	u64 *array;
   1573 	size_t sz;
   1574 	/*
   1575 	 * used for cross-endian analysis. See git commit 65014ab3
   1576 	 * for why this goofiness is needed.
   1577 	 */
   1578 	union u64_swap u;
   1579 
   1580 	array = event->sample.array;
   1581 
   1582 	if (type & PERF_SAMPLE_IDENTIFIER) {
   1583 		*array = sample->id;
   1584 		array++;
   1585 	}
   1586 
   1587 	if (type & PERF_SAMPLE_IP) {
   1588 		*array = sample->ip;
   1589 		array++;
   1590 	}
   1591 
   1592 	if (type & PERF_SAMPLE_TID) {
   1593 		u.val32[0] = sample->pid;
   1594 		u.val32[1] = sample->tid;
   1595 		if (swapped) {
   1596 			/*
   1597 			 * Inverse of what is done in perf_evsel__parse_sample
   1598 			 */
   1599 			u.val32[0] = bswap_32(u.val32[0]);
   1600 			u.val32[1] = bswap_32(u.val32[1]);
   1601 			u.val64 = bswap_64(u.val64);
   1602 		}
   1603 
   1604 		*array = u.val64;
   1605 		array++;
   1606 	}
   1607 
   1608 	if (type & PERF_SAMPLE_TIME) {
   1609 		*array = sample->time;
   1610 		array++;
   1611 	}
   1612 
   1613 	if (type & PERF_SAMPLE_ADDR) {
   1614 		*array = sample->addr;
   1615 		array++;
   1616 	}
   1617 
   1618 	if (type & PERF_SAMPLE_ID) {
   1619 		*array = sample->id;
   1620 		array++;
   1621 	}
   1622 
   1623 	if (type & PERF_SAMPLE_STREAM_ID) {
   1624 		*array = sample->stream_id;
   1625 		array++;
   1626 	}
   1627 
   1628 	if (type & PERF_SAMPLE_CPU) {
   1629 		u.val32[0] = sample->cpu;
   1630 		if (swapped) {
   1631 			/*
   1632 			 * Inverse of what is done in perf_evsel__parse_sample
   1633 			 */
   1634 			u.val32[0] = bswap_32(u.val32[0]);
   1635 			u.val64 = bswap_64(u.val64);
   1636 		}
   1637 		*array = u.val64;
   1638 		array++;
   1639 	}
   1640 
   1641 	if (type & PERF_SAMPLE_PERIOD) {
   1642 		*array = sample->period;
   1643 		array++;
   1644 	}
   1645 
   1646 	if (type & PERF_SAMPLE_READ) {
   1647 		if (read_format & PERF_FORMAT_GROUP)
   1648 			*array = sample->read.group.nr;
   1649 		else
   1650 			*array = sample->read.one.value;
   1651 		array++;
   1652 
   1653 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
   1654 			*array = sample->read.time_enabled;
   1655 			array++;
   1656 		}
   1657 
   1658 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
   1659 			*array = sample->read.time_running;
   1660 			array++;
   1661 		}
   1662 
   1663 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
   1664 		if (read_format & PERF_FORMAT_GROUP) {
   1665 			sz = sample->read.group.nr *
   1666 			     sizeof(struct sample_read_value);
   1667 			memcpy(array, sample->read.group.values, sz);
   1668 			array = (void *)array + sz;
   1669 		} else {
   1670 			*array = sample->read.one.id;
   1671 			array++;
   1672 		}
   1673 	}
   1674 
   1675 	if (type & PERF_SAMPLE_CALLCHAIN) {
   1676 		sz = (sample->callchain->nr + 1) * sizeof(u64);
   1677 		memcpy(array, sample->callchain, sz);
   1678 		array = (void *)array + sz;
   1679 	}
   1680 
   1681 	if (type & PERF_SAMPLE_RAW) {
   1682 		u.val32[0] = sample->raw_size;
   1683 		if (WARN_ONCE(swapped,
   1684 			      "Endianness of raw data not corrected!\n")) {
   1685 			/*
   1686 			 * Inverse of what is done in perf_evsel__parse_sample
   1687 			 */
   1688 			u.val32[0] = bswap_32(u.val32[0]);
   1689 			u.val32[1] = bswap_32(u.val32[1]);
   1690 			u.val64 = bswap_64(u.val64);
   1691 		}
   1692 		*array = u.val64;
   1693 		array = (void *)array + sizeof(u32);
   1694 
   1695 		memcpy(array, sample->raw_data, sample->raw_size);
   1696 		array = (void *)array + sample->raw_size;
   1697 	}
   1698 
   1699 	if (type & PERF_SAMPLE_BRANCH_STACK) {
   1700 		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
   1701 		sz += sizeof(u64);
   1702 		memcpy(array, sample->branch_stack, sz);
   1703 		array = (void *)array + sz;
   1704 	}
   1705 
   1706 	if (type & PERF_SAMPLE_REGS_USER) {
   1707 		if (sample->user_regs.abi) {
   1708 			*array++ = sample->user_regs.abi;
   1709 			sz = hweight_long(sample_regs_user) * sizeof(u64);
   1710 			memcpy(array, sample->user_regs.regs, sz);
   1711 			array = (void *)array + sz;
   1712 		} else {
   1713 			*array++ = 0;
   1714 		}
   1715 	}
   1716 
   1717 	if (type & PERF_SAMPLE_STACK_USER) {
   1718 		sz = sample->user_stack.size;
   1719 		*array++ = sz;
   1720 		if (sz) {
   1721 			memcpy(array, sample->user_stack.data, sz);
   1722 			array = (void *)array + sz;
   1723 			*array++ = sz;
   1724 		}
   1725 	}
   1726 
   1727 	if (type & PERF_SAMPLE_WEIGHT) {
   1728 		*array = sample->weight;
   1729 		array++;
   1730 	}
   1731 
   1732 	if (type & PERF_SAMPLE_DATA_SRC) {
   1733 		*array = sample->data_src;
   1734 		array++;
   1735 	}
   1736 
   1737 	return 0;
   1738 }
   1739 
   1740 struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
   1741 {
   1742 	return pevent_find_field(evsel->tp_format, name);
   1743 }
   1744 
   1745 void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
   1746 			 const char *name)
   1747 {
   1748 	struct format_field *field = perf_evsel__field(evsel, name);
   1749 	int offset;
   1750 
   1751 	if (!field)
   1752 		return NULL;
   1753 
   1754 	offset = field->offset;
   1755 
   1756 	if (field->flags & FIELD_IS_DYNAMIC) {
   1757 		offset = *(int *)(sample->raw_data + field->offset);
   1758 		offset &= 0xffff;
   1759 	}
   1760 
   1761 	return sample->raw_data + offset;
   1762 }
   1763 
   1764 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
   1765 		       const char *name)
   1766 {
   1767 	struct format_field *field = perf_evsel__field(evsel, name);
   1768 	void *ptr;
   1769 	u64 value;
   1770 
   1771 	if (!field)
   1772 		return 0;
   1773 
   1774 	ptr = sample->raw_data + field->offset;
   1775 
   1776 	switch (field->size) {
   1777 	case 1:
   1778 		return *(u8 *)ptr;
   1779 	case 2:
   1780 		value = *(u16 *)ptr;
   1781 		break;
   1782 	case 4:
   1783 		value = *(u32 *)ptr;
   1784 		break;
   1785 	case 8:
   1786 		value = *(u64 *)ptr;
   1787 		break;
   1788 	default:
   1789 		return 0;
   1790 	}
   1791 
   1792 	if (!evsel->needs_swap)
   1793 		return value;
   1794 
   1795 	switch (field->size) {
   1796 	case 2:
   1797 		return bswap_16(value);
   1798 	case 4:
   1799 		return bswap_32(value);
   1800 	case 8:
   1801 		return bswap_64(value);
   1802 	default:
   1803 		return 0;
   1804 	}
   1805 
   1806 	return 0;
   1807 }
   1808 
   1809 static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
   1810 {
   1811 	va_list args;
   1812 	int ret = 0;
   1813 
   1814 	if (!*first) {
   1815 		ret += fprintf(fp, ",");
   1816 	} else {
   1817 		ret += fprintf(fp, ":");
   1818 		*first = false;
   1819 	}
   1820 
   1821 	va_start(args, fmt);
   1822 	ret += vfprintf(fp, fmt, args);
   1823 	va_end(args);
   1824 	return ret;
   1825 }
   1826 
   1827 static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value)
   1828 {
   1829 	if (value == 0)
   1830 		return 0;
   1831 
   1832 	return comma_fprintf(fp, first, " %s: %" PRIu64, field, value);
   1833 }
   1834 
   1835 #define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field)
   1836 
   1837 struct bit_names {
   1838 	int bit;
   1839 	const char *name;
   1840 };
   1841 
   1842 static int bits__fprintf(FILE *fp, const char *field, u64 value,
   1843 			 struct bit_names *bits, bool *first)
   1844 {
   1845 	int i = 0, printed = comma_fprintf(fp, first, " %s: ", field);
   1846 	bool first_bit = true;
   1847 
   1848 	do {
   1849 		if (value & bits[i].bit) {
   1850 			printed += fprintf(fp, "%s%s", first_bit ? "" : "|", bits[i].name);
   1851 			first_bit = false;
   1852 		}
   1853 	} while (bits[++i].name != NULL);
   1854 
   1855 	return printed;
   1856 }
   1857 
   1858 static int sample_type__fprintf(FILE *fp, bool *first, u64 value)
   1859 {
   1860 #define bit_name(n) { PERF_SAMPLE_##n, #n }
   1861 	struct bit_names bits[] = {
   1862 		bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
   1863 		bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
   1864 		bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
   1865 		bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
   1866 		bit_name(IDENTIFIER),
   1867 		{ .name = NULL, }
   1868 	};
   1869 #undef bit_name
   1870 	return bits__fprintf(fp, "sample_type", value, bits, first);
   1871 }
   1872 
   1873 static int read_format__fprintf(FILE *fp, bool *first, u64 value)
   1874 {
   1875 #define bit_name(n) { PERF_FORMAT_##n, #n }
   1876 	struct bit_names bits[] = {
   1877 		bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
   1878 		bit_name(ID), bit_name(GROUP),
   1879 		{ .name = NULL, }
   1880 	};
   1881 #undef bit_name
   1882 	return bits__fprintf(fp, "read_format", value, bits, first);
   1883 }
   1884 
   1885 int perf_evsel__fprintf(struct perf_evsel *evsel,
   1886 			struct perf_attr_details *details, FILE *fp)
   1887 {
   1888 	bool first = true;
   1889 	int printed = 0;
   1890 
   1891 	if (details->event_group) {
   1892 		struct perf_evsel *pos;
   1893 
   1894 		if (!perf_evsel__is_group_leader(evsel))
   1895 			return 0;
   1896 
   1897 		if (evsel->nr_members > 1)
   1898 			printed += fprintf(fp, "%s{", evsel->group_name ?: "");
   1899 
   1900 		printed += fprintf(fp, "%s", perf_evsel__name(evsel));
   1901 		for_each_group_member(pos, evsel)
   1902 			printed += fprintf(fp, ",%s", perf_evsel__name(pos));
   1903 
   1904 		if (evsel->nr_members > 1)
   1905 			printed += fprintf(fp, "}");
   1906 		goto out;
   1907 	}
   1908 
   1909 	printed += fprintf(fp, "%s", perf_evsel__name(evsel));
   1910 
   1911 	if (details->verbose || details->freq) {
   1912 		printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64,
   1913 					 (u64)evsel->attr.sample_freq);
   1914 	}
   1915 
   1916 	if (details->verbose) {
   1917 		if_print(type);
   1918 		if_print(config);
   1919 		if_print(config1);
   1920 		if_print(config2);
   1921 		if_print(size);
   1922 		printed += sample_type__fprintf(fp, &first, evsel->attr.sample_type);
   1923 		if (evsel->attr.read_format)
   1924 			printed += read_format__fprintf(fp, &first, evsel->attr.read_format);
   1925 		if_print(disabled);
   1926 		if_print(inherit);
   1927 		if_print(pinned);
   1928 		if_print(exclusive);
   1929 		if_print(exclude_user);
   1930 		if_print(exclude_kernel);
   1931 		if_print(exclude_hv);
   1932 		if_print(exclude_idle);
   1933 		if_print(mmap);
   1934 		if_print(mmap2);
   1935 		if_print(comm);
   1936 		if_print(freq);
   1937 		if_print(inherit_stat);
   1938 		if_print(enable_on_exec);
   1939 		if_print(task);
   1940 		if_print(watermark);
   1941 		if_print(precise_ip);
   1942 		if_print(mmap_data);
   1943 		if_print(sample_id_all);
   1944 		if_print(exclude_host);
   1945 		if_print(exclude_guest);
   1946 		if_print(__reserved_1);
   1947 		if_print(wakeup_events);
   1948 		if_print(bp_type);
   1949 		if_print(branch_sample_type);
   1950 	}
   1951 out:
   1952 	fputc('\n', fp);
   1953 	return ++printed;
   1954 }
   1955 
   1956 bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
   1957 			  char *msg, size_t msgsize)
   1958 {
   1959 	if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
   1960 	    evsel->attr.type   == PERF_TYPE_HARDWARE &&
   1961 	    evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
   1962 		/*
   1963 		 * If it's cycles then fall back to hrtimer based
   1964 		 * cpu-clock-tick sw counter, which is always available even if
   1965 		 * no PMU support.
   1966 		 *
   1967 		 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
   1968 		 * b0a873e).
   1969 		 */
   1970 		scnprintf(msg, msgsize, "%s",
   1971 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
   1972 
   1973 		evsel->attr.type   = PERF_TYPE_SOFTWARE;
   1974 		evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
   1975 
   1976 		free(evsel->name);
   1977 		evsel->name = NULL;
   1978 		return true;
   1979 	}
   1980 
   1981 	return false;
   1982 }
   1983 
   1984 int perf_evsel__open_strerror(struct perf_evsel *evsel,
   1985 			      struct perf_target *target,
   1986 			      int err, char *msg, size_t size)
   1987 {
   1988 	switch (err) {
   1989 	case EPERM:
   1990 	case EACCES:
   1991 		return scnprintf(msg, size,
   1992 		 "You may not have permission to collect %sstats.\n"
   1993 		 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
   1994 		 " -1 - Not paranoid at all\n"
   1995 		 "  0 - Disallow raw tracepoint access for unpriv\n"
   1996 		 "  1 - Disallow cpu events for unpriv\n"
   1997 		 "  2 - Disallow kernel profiling for unpriv",
   1998 				 target->system_wide ? "system-wide " : "");
   1999 	case ENOENT:
   2000 		return scnprintf(msg, size, "The %s event is not supported.",
   2001 				 perf_evsel__name(evsel));
   2002 	case EMFILE:
   2003 		return scnprintf(msg, size, "%s",
   2004 			 "Too many events are opened.\n"
   2005 			 "Try again after reducing the number of events.");
   2006 	case ENODEV:
   2007 		if (target->cpu_list)
   2008 			return scnprintf(msg, size, "%s",
   2009 	 "No such device - did you specify an out-of-range profile CPU?\n");
   2010 		break;
   2011 	case EOPNOTSUPP:
   2012 		if (evsel->attr.precise_ip)
   2013 			return scnprintf(msg, size, "%s",
   2014 	"\'precise\' request may not be supported. Try removing 'p' modifier.");
   2015 #if defined(__i386__) || defined(__x86_64__)
   2016 		if (evsel->attr.type == PERF_TYPE_HARDWARE)
   2017 			return scnprintf(msg, size, "%s",
   2018 	"No hardware sampling interrupt available.\n"
   2019 	"No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
   2020 #endif
   2021 		break;
   2022 	default:
   2023 		break;
   2024 	}
   2025 
   2026 	return scnprintf(msg, size,
   2027 	"The sys_perf_event_open() syscall returned with %d (%s) for event (%s).  \n"
   2028 	"/bin/dmesg may provide additional information.\n"
   2029 	"No CONFIG_PERF_EVENTS=y kernel support configured?\n",
   2030 			 err, strerror(err), perf_evsel__name(evsel));
   2031 }
   2032