Home | History | Annotate | Download | only in util
      1 #define _FILE_OFFSET_BITS 64
      2 
      3 #include <sys/types.h>
      4 #include <byteswap.h>
      5 #include <unistd.h>
      6 #include <stdio.h>
      7 #include <stdlib.h>
      8 /* ANDROID_CHANGE_BEGIN */
      9 #if 0
     10 #include <linux/list.h>
     11 #include <linux/kernel.h>
     12 #else
     13 #include "include/linux/list.h"
     14 #include "include/linux/kernel.h"
     15 #endif
     16 /* ANDROID_CHANGE_END */
     17 
     18 #include "evlist.h"
     19 #include "evsel.h"
     20 #include "util.h"
     21 #include "header.h"
     22 #include "../perf.h"
     23 #include "trace-event.h"
     24 #include "session.h"
     25 #include "symbol.h"
     26 #include "debug.h"
     27 
     28 static bool no_buildid_cache = false;
     29 
     30 static int event_count;
     31 static struct perf_trace_event_type *events;
     32 
     33 int perf_header__push_event(u64 id, const char *name)
     34 {
     35 	if (strlen(name) > MAX_EVENT_NAME)
     36 		pr_warning("Event %s will be truncated\n", name);
     37 
     38 	if (!events) {
     39 		events = malloc(sizeof(struct perf_trace_event_type));
     40 		if (events == NULL)
     41 			return -ENOMEM;
     42 	} else {
     43 		struct perf_trace_event_type *nevents;
     44 
     45 		nevents = realloc(events, (event_count + 1) * sizeof(*events));
     46 		if (nevents == NULL)
     47 			return -ENOMEM;
     48 		events = nevents;
     49 	}
     50 	memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
     51 	events[event_count].event_id = id;
     52 	strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
     53 	event_count++;
     54 	return 0;
     55 }
     56 
     57 char *perf_header__find_event(u64 id)
     58 {
     59 	int i;
     60 	for (i = 0 ; i < event_count; i++) {
     61 		if (events[i].event_id == id)
     62 			return events[i].name;
     63 	}
     64 	return NULL;
     65 }
     66 
     67 /* ANDROID_CHANGE_BEGIN */
     68 /*
     69  * The string in the literal pool might not be aligned properly. ARM doesn't
     70  * support unaligned loads with NEON registers
     71  */
     72 #if 0
     73 static const char *__perf_magic = "PERFFILE";
     74 
     75 #else
     76 static const char const __perf_magic[9] __attribute__ ((aligned (8))) =
     77   "PERFFILE";
     78 #endif
     79 /* ANDROID_CHANGE_END */
     80 
     81 #define PERF_MAGIC	(*(u64 *)__perf_magic)
     82 
     83 
     84 struct perf_file_attr {
     85 	struct perf_event_attr	attr;
     86 	struct perf_file_section	ids;
     87 };
     88 
     89 void perf_header__set_feat(struct perf_header *header, int feat)
     90 {
     91 	set_bit(feat, header->adds_features);
     92 }
     93 
     94 void perf_header__clear_feat(struct perf_header *header, int feat)
     95 {
     96 	clear_bit(feat, header->adds_features);
     97 }
     98 
     99 bool perf_header__has_feat(const struct perf_header *header, int feat)
    100 {
    101 	return test_bit(feat, header->adds_features);
    102 }
    103 
    104 static int do_write(int fd, const void *buf, size_t size)
    105 {
    106 	while (size) {
    107 		int ret = write(fd, buf, size);
    108 
    109 		if (ret < 0)
    110 			return -errno;
    111 
    112 		size -= ret;
    113 		buf += ret;
    114 	}
    115 
    116 	return 0;
    117 }
    118 
    119 #define NAME_ALIGN 64
    120 
    121 static int write_padded(int fd, const void *bf, size_t count,
    122 			size_t count_aligned)
    123 {
    124 	static const char zero_buf[NAME_ALIGN];
    125 	int err = do_write(fd, bf, count);
    126 
    127 	if (!err)
    128 		err = do_write(fd, zero_buf, count_aligned - count);
    129 
    130 	return err;
    131 }
    132 
    133 #define dsos__for_each_with_build_id(pos, head)	\
    134 	list_for_each_entry(pos, head, node)	\
    135 		if (!pos->has_build_id)		\
    136 			continue;		\
    137 		else
    138 
    139 static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
    140 				u16 misc, int fd)
    141 {
    142 	struct dso *pos;
    143 
    144 	dsos__for_each_with_build_id(pos, head) {
    145 		int err;
    146 		struct build_id_event b;
    147 		size_t len;
    148 
    149 		if (!pos->hit)
    150 			continue;
    151 		len = pos->long_name_len + 1;
    152                 /* ANDROID_CHANGE_BEGIN */
    153 #if defined(__BIONIC__) || defined(__APPLE__)
    154 		len = KERNEL_ALIGN(len, NAME_ALIGN);
    155 #else
    156 		len = ALIGN(len, NAME_ALIGN);
    157 #endif
    158                 /* ANDROID_CHANGE_BEGIN */
    159 		memset(&b, 0, sizeof(b));
    160 		memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
    161 		b.pid = pid;
    162 		b.header.misc = misc;
    163 		b.header.size = sizeof(b) + len;
    164 		err = do_write(fd, &b, sizeof(b));
    165 		if (err < 0)
    166 			return err;
    167 		err = write_padded(fd, pos->long_name,
    168 				   pos->long_name_len + 1, len);
    169 		if (err < 0)
    170 			return err;
    171 	}
    172 
    173 	return 0;
    174 }
    175 
    176 static int machine__write_buildid_table(struct machine *machine, int fd)
    177 {
    178 	int err;
    179 	u16 kmisc = PERF_RECORD_MISC_KERNEL,
    180 	    umisc = PERF_RECORD_MISC_USER;
    181 
    182 	if (!machine__is_host(machine)) {
    183 		kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
    184 		umisc = PERF_RECORD_MISC_GUEST_USER;
    185 	}
    186 
    187 	err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
    188 					  kmisc, fd);
    189 	if (err == 0)
    190 		err = __dsos__write_buildid_table(&machine->user_dsos,
    191 						  machine->pid, umisc, fd);
    192 	return err;
    193 }
    194 
    195 static int dsos__write_buildid_table(struct perf_header *header, int fd)
    196 {
    197 	struct perf_session *session = container_of(header,
    198 			struct perf_session, header);
    199 	struct rb_node *nd;
    200 	int err = machine__write_buildid_table(&session->host_machine, fd);
    201 
    202 	if (err)
    203 		return err;
    204 
    205 	for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
    206 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
    207 		err = machine__write_buildid_table(pos, fd);
    208 		if (err)
    209 			break;
    210 	}
    211 	return err;
    212 }
    213 
    214 int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
    215 			  const char *name, bool is_kallsyms)
    216 {
    217 	const size_t size = PATH_MAX;
    218 	char *realname, *filename = malloc(size),
    219 	     *linkname = malloc(size), *targetname;
    220 	int len, err = -1;
    221 
    222 	if (is_kallsyms) {
    223 		if (symbol_conf.kptr_restrict) {
    224 			pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
    225 			return 0;
    226 		}
    227 		realname = (char *)name;
    228 	} else
    229 		realname = realpath(name, NULL);
    230 
    231 	if (realname == NULL || filename == NULL || linkname == NULL)
    232 		goto out_free;
    233 
    234 	len = snprintf(filename, size, "%s%s%s",
    235 		       debugdir, is_kallsyms ? "/" : "", realname);
    236 	if (mkdir_p(filename, 0755))
    237 		goto out_free;
    238 
    239 	snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
    240 
    241 	if (access(filename, F_OK)) {
    242 		if (is_kallsyms) {
    243 			 if (copyfile("/proc/kallsyms", filename))
    244 				goto out_free;
    245 		} else if (link(realname, filename) && copyfile(name, filename))
    246 			goto out_free;
    247 	}
    248 
    249 	len = snprintf(linkname, size, "%s/.build-id/%.2s",
    250 		       debugdir, sbuild_id);
    251 
    252 	if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
    253 		goto out_free;
    254 
    255 	snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
    256 	targetname = filename + strlen(debugdir) - 5;
    257 	memcpy(targetname, "../..", 5);
    258 
    259 	if (symlink(targetname, linkname) == 0)
    260 		err = 0;
    261 out_free:
    262 	if (!is_kallsyms)
    263 		free(realname);
    264 	free(filename);
    265 	free(linkname);
    266 	return err;
    267 }
    268 
    269 static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
    270 				 const char *name, const char *debugdir,
    271 				 bool is_kallsyms)
    272 {
    273 	char sbuild_id[BUILD_ID_SIZE * 2 + 1];
    274 
    275 	build_id__sprintf(build_id, build_id_size, sbuild_id);
    276 
    277 	return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
    278 }
    279 
    280 int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
    281 {
    282 	const size_t size = PATH_MAX;
    283 	char *filename = malloc(size),
    284 	     *linkname = malloc(size);
    285 	int err = -1;
    286 
    287 	if (filename == NULL || linkname == NULL)
    288 		goto out_free;
    289 
    290 	snprintf(linkname, size, "%s/.build-id/%.2s/%s",
    291 		 debugdir, sbuild_id, sbuild_id + 2);
    292 
    293 	if (access(linkname, F_OK))
    294 		goto out_free;
    295 
    296 	if (readlink(linkname, filename, size) < 0)
    297 		goto out_free;
    298 
    299 	if (unlink(linkname))
    300 		goto out_free;
    301 
    302 	/*
    303 	 * Since the link is relative, we must make it absolute:
    304 	 */
    305 	snprintf(linkname, size, "%s/.build-id/%.2s/%s",
    306 		 debugdir, sbuild_id, filename);
    307 
    308 	if (unlink(linkname))
    309 		goto out_free;
    310 
    311 	err = 0;
    312 out_free:
    313 	free(filename);
    314 	free(linkname);
    315 	return err;
    316 }
    317 
    318 static int dso__cache_build_id(struct dso *dso, const char *debugdir)
    319 {
    320 	bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
    321 
    322 	return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
    323 				     dso->long_name, debugdir, is_kallsyms);
    324 }
    325 
    326 static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
    327 {
    328 	struct dso *pos;
    329 	int err = 0;
    330 
    331 	dsos__for_each_with_build_id(pos, head)
    332 		if (dso__cache_build_id(pos, debugdir))
    333 			err = -1;
    334 
    335 	return err;
    336 }
    337 
    338 static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
    339 {
    340 	int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
    341 	ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
    342 	return ret;
    343 }
    344 
    345 static int perf_session__cache_build_ids(struct perf_session *session)
    346 {
    347 	struct rb_node *nd;
    348 	int ret;
    349 	char debugdir[PATH_MAX];
    350 
    351 	snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
    352 
    353 	if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
    354 		return -1;
    355 
    356 	ret = machine__cache_build_ids(&session->host_machine, debugdir);
    357 
    358 	for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
    359 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
    360 		ret |= machine__cache_build_ids(pos, debugdir);
    361 	}
    362 	return ret ? -1 : 0;
    363 }
    364 
    365 static bool machine__read_build_ids(struct machine *machine, bool with_hits)
    366 {
    367 	bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
    368 	ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
    369 	return ret;
    370 }
    371 
    372 static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
    373 {
    374 	struct rb_node *nd;
    375 	bool ret = machine__read_build_ids(&session->host_machine, with_hits);
    376 
    377 	for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
    378 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
    379 		ret |= machine__read_build_ids(pos, with_hits);
    380 	}
    381 
    382 	return ret;
    383 }
    384 
    385 static int perf_header__adds_write(struct perf_header *header,
    386 				   struct perf_evlist *evlist, int fd)
    387 {
    388 	int nr_sections;
    389 	struct perf_session *session;
    390 	struct perf_file_section *feat_sec;
    391 	int sec_size;
    392 	u64 sec_start;
    393 	int idx = 0, err;
    394 
    395 	session = container_of(header, struct perf_session, header);
    396 
    397 	if (perf_header__has_feat(header, HEADER_BUILD_ID &&
    398 	    !perf_session__read_build_ids(session, true)))
    399 		perf_header__clear_feat(header, HEADER_BUILD_ID);
    400 
    401 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
    402 	if (!nr_sections)
    403 		return 0;
    404 
    405 	feat_sec = calloc(sizeof(*feat_sec), nr_sections);
    406 	if (feat_sec == NULL)
    407 		return -ENOMEM;
    408 
    409 	sec_size = sizeof(*feat_sec) * nr_sections;
    410 
    411 	sec_start = header->data_offset + header->data_size;
    412 	lseek(fd, sec_start + sec_size, SEEK_SET);
    413 
    414 	if (perf_header__has_feat(header, HEADER_TRACE_INFO)) {
    415 		struct perf_file_section *trace_sec;
    416 
    417 		trace_sec = &feat_sec[idx++];
    418 
    419 		/* Write trace info */
    420 		trace_sec->offset = lseek(fd, 0, SEEK_CUR);
    421 		read_tracing_data(fd, &evlist->entries);
    422 		trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset;
    423 	}
    424 
    425 	if (perf_header__has_feat(header, HEADER_BUILD_ID)) {
    426 		struct perf_file_section *buildid_sec;
    427 
    428 		buildid_sec = &feat_sec[idx++];
    429 
    430 		/* Write build-ids */
    431 		buildid_sec->offset = lseek(fd, 0, SEEK_CUR);
    432 		err = dsos__write_buildid_table(header, fd);
    433 		if (err < 0) {
    434 			pr_debug("failed to write buildid table\n");
    435 			goto out_free;
    436 		}
    437 		buildid_sec->size = lseek(fd, 0, SEEK_CUR) -
    438 					  buildid_sec->offset;
    439 		if (!no_buildid_cache)
    440 			perf_session__cache_build_ids(session);
    441 	}
    442 
    443 	lseek(fd, sec_start, SEEK_SET);
    444 	err = do_write(fd, feat_sec, sec_size);
    445 	if (err < 0)
    446 		pr_debug("failed to write feature section\n");
    447 out_free:
    448 	free(feat_sec);
    449 	return err;
    450 }
    451 
    452 int perf_header__write_pipe(int fd)
    453 {
    454 	struct perf_pipe_file_header f_header;
    455 	int err;
    456 
    457 	f_header = (struct perf_pipe_file_header){
    458 		.magic	   = PERF_MAGIC,
    459 		.size	   = sizeof(f_header),
    460 	};
    461 
    462 	err = do_write(fd, &f_header, sizeof(f_header));
    463 	if (err < 0) {
    464 		pr_debug("failed to write perf pipe header\n");
    465 		return err;
    466 	}
    467 
    468 	return 0;
    469 }
    470 
    471 int perf_session__write_header(struct perf_session *session,
    472 			       struct perf_evlist *evlist,
    473 			       int fd, bool at_exit)
    474 {
    475 	struct perf_file_header f_header;
    476 	struct perf_file_attr   f_attr;
    477 	struct perf_header *header = &session->header;
    478 	struct perf_evsel *attr, *pair = NULL;
    479 	int err;
    480 
    481 	lseek(fd, sizeof(f_header), SEEK_SET);
    482 
    483 	if (session->evlist != evlist)
    484 		pair = list_entry(session->evlist->entries.next, struct perf_evsel, node);
    485 
    486 	list_for_each_entry(attr, &evlist->entries, node) {
    487 		attr->id_offset = lseek(fd, 0, SEEK_CUR);
    488 		err = do_write(fd, attr->id, attr->ids * sizeof(u64));
    489 		if (err < 0) {
    490 out_err_write:
    491 			pr_debug("failed to write perf header\n");
    492 			return err;
    493 		}
    494 		if (session->evlist != evlist) {
    495 			err = do_write(fd, pair->id, pair->ids * sizeof(u64));
    496 			if (err < 0)
    497 				goto out_err_write;
    498 			attr->ids += pair->ids;
    499 			pair = list_entry(pair->node.next, struct perf_evsel, node);
    500 		}
    501 	}
    502 
    503 	header->attr_offset = lseek(fd, 0, SEEK_CUR);
    504 
    505 	list_for_each_entry(attr, &evlist->entries, node) {
    506 		f_attr = (struct perf_file_attr){
    507 			.attr = attr->attr,
    508 			.ids  = {
    509 				.offset = attr->id_offset,
    510 				.size   = attr->ids * sizeof(u64),
    511 			}
    512 		};
    513 		err = do_write(fd, &f_attr, sizeof(f_attr));
    514 		if (err < 0) {
    515 			pr_debug("failed to write perf header attribute\n");
    516 			return err;
    517 		}
    518 	}
    519 
    520 	header->event_offset = lseek(fd, 0, SEEK_CUR);
    521 	header->event_size = event_count * sizeof(struct perf_trace_event_type);
    522 	if (events) {
    523 		err = do_write(fd, events, header->event_size);
    524 		if (err < 0) {
    525 			pr_debug("failed to write perf header events\n");
    526 			return err;
    527 		}
    528 	}
    529 
    530 	header->data_offset = lseek(fd, 0, SEEK_CUR);
    531 
    532 	if (at_exit) {
    533 		err = perf_header__adds_write(header, evlist, fd);
    534 		if (err < 0)
    535 			return err;
    536 	}
    537 
    538 	f_header = (struct perf_file_header){
    539 		.magic	   = PERF_MAGIC,
    540 		.size	   = sizeof(f_header),
    541 		.attr_size = sizeof(f_attr),
    542 		.attrs = {
    543 			.offset = header->attr_offset,
    544 			.size   = evlist->nr_entries * sizeof(f_attr),
    545 		},
    546 		.data = {
    547 			.offset = header->data_offset,
    548 			.size	= header->data_size,
    549 		},
    550 		.event_types = {
    551 			.offset = header->event_offset,
    552 			.size	= header->event_size,
    553 		},
    554 	};
    555 
    556 	memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
    557 
    558 	lseek(fd, 0, SEEK_SET);
    559 	err = do_write(fd, &f_header, sizeof(f_header));
    560 	if (err < 0) {
    561 		pr_debug("failed to write perf header\n");
    562 		return err;
    563 	}
    564 	lseek(fd, header->data_offset + header->data_size, SEEK_SET);
    565 
    566 	header->frozen = 1;
    567 	return 0;
    568 }
    569 
    570 static int perf_header__getbuffer64(struct perf_header *header,
    571 				    int fd, void *buf, size_t size)
    572 {
    573 	if (readn(fd, buf, size) <= 0)
    574 		return -1;
    575 
    576 	if (header->needs_swap)
    577 		mem_bswap_64(buf, size);
    578 
    579 	return 0;
    580 }
    581 
    582 int perf_header__process_sections(struct perf_header *header, int fd,
    583 				  int (*process)(struct perf_file_section *section,
    584 						 struct perf_header *ph,
    585 						 int feat, int fd))
    586 {
    587 	struct perf_file_section *feat_sec;
    588 	int nr_sections;
    589 	int sec_size;
    590 	int idx = 0;
    591 	int err = -1, feat = 1;
    592 
    593 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
    594 	if (!nr_sections)
    595 		return 0;
    596 
    597 	feat_sec = calloc(sizeof(*feat_sec), nr_sections);
    598 	if (!feat_sec)
    599 		return -1;
    600 
    601 	sec_size = sizeof(*feat_sec) * nr_sections;
    602 
    603 	lseek(fd, header->data_offset + header->data_size, SEEK_SET);
    604 
    605 	if (perf_header__getbuffer64(header, fd, feat_sec, sec_size))
    606 		goto out_free;
    607 
    608 	err = 0;
    609 	while (idx < nr_sections && feat < HEADER_LAST_FEATURE) {
    610 		if (perf_header__has_feat(header, feat)) {
    611 			struct perf_file_section *sec = &feat_sec[idx++];
    612 
    613 			err = process(sec, header, feat, fd);
    614 			if (err < 0)
    615 				break;
    616 		}
    617 		++feat;
    618 	}
    619 out_free:
    620 	free(feat_sec);
    621 	return err;
    622 }
    623 
    624 int perf_file_header__read(struct perf_file_header *header,
    625 			   struct perf_header *ph, int fd)
    626 {
    627 	lseek(fd, 0, SEEK_SET);
    628 
    629 	if (readn(fd, header, sizeof(*header)) <= 0 ||
    630 	    memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
    631 		return -1;
    632 
    633 	if (header->attr_size != sizeof(struct perf_file_attr)) {
    634 		u64 attr_size = bswap_64(header->attr_size);
    635 
    636 		if (attr_size != sizeof(struct perf_file_attr))
    637 			return -1;
    638 
    639 		mem_bswap_64(header, offsetof(struct perf_file_header,
    640 					    adds_features));
    641 		ph->needs_swap = true;
    642 	}
    643 
    644 	if (header->size != sizeof(*header)) {
    645 		/* Support the previous format */
    646 		if (header->size == offsetof(typeof(*header), adds_features))
    647 			bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
    648 		else
    649 			return -1;
    650 	}
    651 
    652 	memcpy(&ph->adds_features, &header->adds_features,
    653 	       sizeof(ph->adds_features));
    654 	/*
    655 	 * FIXME: hack that assumes that if we need swap the perf.data file
    656 	 * may be coming from an arch with a different word-size, ergo different
    657 	 * DEFINE_BITMAP format, investigate more later, but for now its mostly
    658 	 * safe to assume that we have a build-id section. Trace files probably
    659 	 * have several other issues in this realm anyway...
    660 	 */
    661 	if (ph->needs_swap) {
    662 		memset(&ph->adds_features, 0, sizeof(ph->adds_features));
    663 		perf_header__set_feat(ph, HEADER_BUILD_ID);
    664 	}
    665 
    666 	ph->event_offset = header->event_types.offset;
    667 	ph->event_size   = header->event_types.size;
    668 	ph->data_offset  = header->data.offset;
    669 	ph->data_size	 = header->data.size;
    670 	return 0;
    671 }
    672 
    673 static int __event_process_build_id(struct build_id_event *bev,
    674 				    char *filename,
    675 				    struct perf_session *session)
    676 {
    677 	int err = -1;
    678 	struct list_head *head;
    679 	struct machine *machine;
    680 	u16 misc;
    681 	struct dso *dso;
    682 	enum dso_kernel_type dso_type;
    683 
    684 	machine = perf_session__findnew_machine(session, bev->pid);
    685 	if (!machine)
    686 		goto out;
    687 
    688 	misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
    689 
    690 	switch (misc) {
    691 	case PERF_RECORD_MISC_KERNEL:
    692 		dso_type = DSO_TYPE_KERNEL;
    693 		head = &machine->kernel_dsos;
    694 		break;
    695 	case PERF_RECORD_MISC_GUEST_KERNEL:
    696 		dso_type = DSO_TYPE_GUEST_KERNEL;
    697 		head = &machine->kernel_dsos;
    698 		break;
    699 	case PERF_RECORD_MISC_USER:
    700 	case PERF_RECORD_MISC_GUEST_USER:
    701 		dso_type = DSO_TYPE_USER;
    702 		head = &machine->user_dsos;
    703 		break;
    704 	default:
    705 		goto out;
    706 	}
    707 
    708 	dso = __dsos__findnew(head, filename);
    709 	if (dso != NULL) {
    710 		char sbuild_id[BUILD_ID_SIZE * 2 + 1];
    711 
    712 		dso__set_build_id(dso, &bev->build_id);
    713 
    714 		if (filename[0] == '[')
    715 			dso->kernel = dso_type;
    716 
    717 		build_id__sprintf(dso->build_id, sizeof(dso->build_id),
    718 				  sbuild_id);
    719 		pr_debug("build id event received for %s: %s\n",
    720 			 dso->long_name, sbuild_id);
    721 	}
    722 
    723 	err = 0;
    724 out:
    725 	return err;
    726 }
    727 
    728 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
    729 						 int input, u64 offset, u64 size)
    730 {
    731 	struct perf_session *session = container_of(header, struct perf_session, header);
    732 	struct {
    733 		struct perf_event_header   header;
    734                 /* ANDROID_CHANGE_BEGIN */
    735 #if defined(__BIONIC__) || defined(__APPLE__)
    736 		u8			   build_id[KERNEL_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
    737 #else
    738 		u8			   build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))];
    739 #endif
    740                 /* ANDROID_CHANGE_END */
    741 		char			   filename[0];
    742 	} old_bev;
    743 	struct build_id_event bev;
    744 	char filename[PATH_MAX];
    745 	u64 limit = offset + size;
    746 
    747 	while (offset < limit) {
    748 		ssize_t len;
    749 
    750 		if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
    751 			return -1;
    752 
    753 		if (header->needs_swap)
    754 			perf_event_header__bswap(&old_bev.header);
    755 
    756 		len = old_bev.header.size - sizeof(old_bev);
    757 		if (read(input, filename, len) != len)
    758 			return -1;
    759 
    760 		bev.header = old_bev.header;
    761 		bev.pid	   = 0;
    762 		memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
    763 		__event_process_build_id(&bev, filename, session);
    764 
    765 		offset += bev.header.size;
    766 	}
    767 
    768 	return 0;
    769 }
    770 
    771 static int perf_header__read_build_ids(struct perf_header *header,
    772 				       int input, u64 offset, u64 size)
    773 {
    774 	struct perf_session *session = container_of(header, struct perf_session, header);
    775 	struct build_id_event bev;
    776 	char filename[PATH_MAX];
    777 	u64 limit = offset + size, orig_offset = offset;
    778 	int err = -1;
    779 
    780 	while (offset < limit) {
    781 		ssize_t len;
    782 
    783 		if (read(input, &bev, sizeof(bev)) != sizeof(bev))
    784 			goto out;
    785 
    786 		if (header->needs_swap)
    787 			perf_event_header__bswap(&bev.header);
    788 
    789 		len = bev.header.size - sizeof(bev);
    790 		if (read(input, filename, len) != len)
    791 			goto out;
    792 		/*
    793 		 * The a1645ce1 changeset:
    794 		 *
    795 		 * "perf: 'perf kvm' tool for monitoring guest performance from host"
    796 		 *
    797 		 * Added a field to struct build_id_event that broke the file
    798 		 * format.
    799 		 *
    800 		 * Since the kernel build-id is the first entry, process the
    801 		 * table using the old format if the well known
    802 		 * '[kernel.kallsyms]' string for the kernel build-id has the
    803 		 * first 4 characters chopped off (where the pid_t sits).
    804 		 */
    805 		if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
    806 			if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
    807 				return -1;
    808 			return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
    809 		}
    810 
    811 		__event_process_build_id(&bev, filename, session);
    812 
    813 		offset += bev.header.size;
    814 	}
    815 	err = 0;
    816 out:
    817 	return err;
    818 }
    819 
    820 static int perf_file_section__process(struct perf_file_section *section,
    821 				      struct perf_header *ph,
    822 				      int feat, int fd)
    823 {
    824 	if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
    825 		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
    826 			  "%d, continuing...\n", section->offset, feat);
    827 		return 0;
    828 	}
    829 
    830 	switch (feat) {
    831 	case HEADER_TRACE_INFO:
    832 		trace_report(fd, false);
    833 		break;
    834 
    835 	case HEADER_BUILD_ID:
    836 		if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
    837 			pr_debug("Failed to read buildids, continuing...\n");
    838 		break;
    839 	default:
    840 		pr_debug("unknown feature %d, continuing...\n", feat);
    841 	}
    842 
    843 	return 0;
    844 }
    845 
    846 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
    847 				       struct perf_header *ph, int fd,
    848 				       bool repipe)
    849 {
    850 	if (readn(fd, header, sizeof(*header)) <= 0 ||
    851 	    memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
    852 		return -1;
    853 
    854 	if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
    855 		return -1;
    856 
    857 	if (header->size != sizeof(*header)) {
    858 		u64 size = bswap_64(header->size);
    859 
    860 		if (size != sizeof(*header))
    861 			return -1;
    862 
    863 		ph->needs_swap = true;
    864 	}
    865 
    866 	return 0;
    867 }
    868 
    869 static int perf_header__read_pipe(struct perf_session *session, int fd)
    870 {
    871 	struct perf_header *header = &session->header;
    872 	struct perf_pipe_file_header f_header;
    873 
    874 	if (perf_file_header__read_pipe(&f_header, header, fd,
    875 					session->repipe) < 0) {
    876 		pr_debug("incompatible file format\n");
    877 		return -EINVAL;
    878 	}
    879 
    880 	session->fd = fd;
    881 
    882 	return 0;
    883 }
    884 
    885 int perf_session__read_header(struct perf_session *session, int fd)
    886 {
    887 	struct perf_header *header = &session->header;
    888 	struct perf_file_header	f_header;
    889 	struct perf_file_attr	f_attr;
    890 	u64			f_id;
    891 	int nr_attrs, nr_ids, i, j;
    892 
    893 	session->evlist = perf_evlist__new(NULL, NULL);
    894 	if (session->evlist == NULL)
    895 		return -ENOMEM;
    896 
    897 	if (session->fd_pipe)
    898 		return perf_header__read_pipe(session, fd);
    899 
    900 	if (perf_file_header__read(&f_header, header, fd) < 0) {
    901 		pr_debug("incompatible file format\n");
    902 		return -EINVAL;
    903 	}
    904 
    905 	nr_attrs = f_header.attrs.size / sizeof(f_attr);
    906 	lseek(fd, f_header.attrs.offset, SEEK_SET);
    907 
    908 	for (i = 0; i < nr_attrs; i++) {
    909 		struct perf_evsel *evsel;
    910 		off_t tmp;
    911 
    912 		if (readn(fd, &f_attr, sizeof(f_attr)) <= 0)
    913 			goto out_errno;
    914 
    915 		if (header->needs_swap)
    916 			perf_event__attr_swap(&f_attr.attr);
    917 
    918 		tmp = lseek(fd, 0, SEEK_CUR);
    919 		evsel = perf_evsel__new(&f_attr.attr, i);
    920 
    921 		if (evsel == NULL)
    922 			goto out_delete_evlist;
    923 		/*
    924 		 * Do it before so that if perf_evsel__alloc_id fails, this
    925 		 * entry gets purged too at perf_evlist__delete().
    926 		 */
    927 		perf_evlist__add(session->evlist, evsel);
    928 
    929 		nr_ids = f_attr.ids.size / sizeof(u64);
    930 		/*
    931 		 * We don't have the cpu and thread maps on the header, so
    932 		 * for allocating the perf_sample_id table we fake 1 cpu and
    933 		 * hattr->ids threads.
    934 		 */
    935 		if (perf_evsel__alloc_id(evsel, 1, nr_ids))
    936 			goto out_delete_evlist;
    937 
    938 		lseek(fd, f_attr.ids.offset, SEEK_SET);
    939 
    940 		for (j = 0; j < nr_ids; j++) {
    941 			if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
    942 				goto out_errno;
    943 
    944 			perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
    945 		}
    946 
    947 		lseek(fd, tmp, SEEK_SET);
    948 	}
    949 
    950 	if (f_header.event_types.size) {
    951 		lseek(fd, f_header.event_types.offset, SEEK_SET);
    952 		events = malloc(f_header.event_types.size);
    953 		if (events == NULL)
    954 			return -ENOMEM;
    955 		if (perf_header__getbuffer64(header, fd, events,
    956 					     f_header.event_types.size))
    957 			goto out_errno;
    958 		event_count =  f_header.event_types.size / sizeof(struct perf_trace_event_type);
    959 	}
    960 
    961 	perf_header__process_sections(header, fd, perf_file_section__process);
    962 
    963 	lseek(fd, header->data_offset, SEEK_SET);
    964 
    965 	header->frozen = 1;
    966 	return 0;
    967 out_errno:
    968 	return -errno;
    969 
    970 out_delete_evlist:
    971 	perf_evlist__delete(session->evlist);
    972 	session->evlist = NULL;
    973 	return -ENOMEM;
    974 }
    975 
    976 int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
    977 				perf_event__handler_t process,
    978 				struct perf_session *session)
    979 {
    980 	union perf_event *ev;
    981 	size_t size;
    982 	int err;
    983 
    984 	size = sizeof(struct perf_event_attr);
    985         /* ANDROID_CHANGE_BEGIN */
    986 #if defined(__BIONIC__) || defined(__APPLE__)
    987 	size = KERNEL_ALIGN(size, sizeof(u64));
    988 #else
    989 	size = ALIGN(size, sizeof(u64));
    990 #endif
    991         /* ANDROID_CHANGE_END */
    992 	size += sizeof(struct perf_event_header);
    993 	size += ids * sizeof(u64);
    994 
    995 	ev = malloc(size);
    996 
    997 	if (ev == NULL)
    998 		return -ENOMEM;
    999 
   1000 	ev->attr.attr = *attr;
   1001 	memcpy(ev->attr.id, id, ids * sizeof(u64));
   1002 
   1003 	ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
   1004 	ev->attr.header.size = size;
   1005 
   1006 	err = process(ev, NULL, session);
   1007 
   1008 	free(ev);
   1009 
   1010 	return err;
   1011 }
   1012 
   1013 int perf_session__synthesize_attrs(struct perf_session *session,
   1014 				   perf_event__handler_t process)
   1015 {
   1016 	struct perf_evsel *attr;
   1017 	int err = 0;
   1018 
   1019 	list_for_each_entry(attr, &session->evlist->entries, node) {
   1020 		err = perf_event__synthesize_attr(&attr->attr, attr->ids,
   1021 						  attr->id, process, session);
   1022 		if (err) {
   1023 			pr_debug("failed to create perf header attribute\n");
   1024 			return err;
   1025 		}
   1026 	}
   1027 
   1028 	return err;
   1029 }
   1030 
   1031 int perf_event__process_attr(union perf_event *event,
   1032 			     struct perf_session *session)
   1033 {
   1034 	unsigned int i, ids, n_ids;
   1035 	struct perf_evsel *evsel;
   1036 
   1037 	if (session->evlist == NULL) {
   1038 		session->evlist = perf_evlist__new(NULL, NULL);
   1039 		if (session->evlist == NULL)
   1040 			return -ENOMEM;
   1041 	}
   1042 
   1043 	evsel = perf_evsel__new(&event->attr.attr,
   1044 				session->evlist->nr_entries);
   1045 	if (evsel == NULL)
   1046 		return -ENOMEM;
   1047 
   1048 	perf_evlist__add(session->evlist, evsel);
   1049 
   1050 	ids = event->header.size;
   1051 	ids -= (void *)&event->attr.id - (void *)event;
   1052 	n_ids = ids / sizeof(u64);
   1053 	/*
   1054 	 * We don't have the cpu and thread maps on the header, so
   1055 	 * for allocating the perf_sample_id table we fake 1 cpu and
   1056 	 * hattr->ids threads.
   1057 	 */
   1058 	if (perf_evsel__alloc_id(evsel, 1, n_ids))
   1059 		return -ENOMEM;
   1060 
   1061 	for (i = 0; i < n_ids; i++) {
   1062 		perf_evlist__id_add(session->evlist, evsel, 0, i,
   1063 				    event->attr.id[i]);
   1064 	}
   1065 
   1066 	perf_session__update_sample_type(session);
   1067 
   1068 	return 0;
   1069 }
   1070 
   1071 int perf_event__synthesize_event_type(u64 event_id, char *name,
   1072 				      perf_event__handler_t process,
   1073 				      struct perf_session *session)
   1074 {
   1075 	union perf_event ev;
   1076 	size_t size = 0;
   1077 	int err = 0;
   1078 
   1079 	memset(&ev, 0, sizeof(ev));
   1080 
   1081 	ev.event_type.event_type.event_id = event_id;
   1082 	memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME);
   1083 	strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
   1084 
   1085 	ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
   1086 	size = strlen(name);
   1087         /* ANDROID_CHANGE_BEGIN */
   1088 #if defined(__BIONIC__) || defined(__APPLE__)
   1089 	size = KERNEL_ALIGN(size, sizeof(u64));
   1090 #else
   1091 	size = ALIGN(size, sizeof(u64));
   1092 #endif
   1093         /* ANDROID_CHANGE_END */
   1094 	ev.event_type.header.size = sizeof(ev.event_type) -
   1095 		(sizeof(ev.event_type.event_type.name) - size);
   1096 
   1097 	err = process(&ev, NULL, session);
   1098 
   1099 	return err;
   1100 }
   1101 
   1102 int perf_event__synthesize_event_types(perf_event__handler_t process,
   1103 				       struct perf_session *session)
   1104 {
   1105 	struct perf_trace_event_type *type;
   1106 	int i, err = 0;
   1107 
   1108 	for (i = 0; i < event_count; i++) {
   1109 		type = &events[i];
   1110 
   1111 		err = perf_event__synthesize_event_type(type->event_id,
   1112 							type->name, process,
   1113 							session);
   1114 		if (err) {
   1115 			pr_debug("failed to create perf header event type\n");
   1116 			return err;
   1117 		}
   1118 	}
   1119 
   1120 	return err;
   1121 }
   1122 
   1123 int perf_event__process_event_type(union perf_event *event,
   1124 				   struct perf_session *session __unused)
   1125 {
   1126 	if (perf_header__push_event(event->event_type.event_type.event_id,
   1127 				    event->event_type.event_type.name) < 0)
   1128 		return -ENOMEM;
   1129 
   1130 	return 0;
   1131 }
   1132 
   1133 int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
   1134 					 perf_event__handler_t process,
   1135 				   struct perf_session *session __unused)
   1136 {
   1137 	union perf_event ev;
   1138 	ssize_t size = 0, aligned_size = 0, padding;
   1139 	int err __used = 0;
   1140 
   1141 	memset(&ev, 0, sizeof(ev));
   1142 
   1143 	ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
   1144 	size = read_tracing_data_size(fd, &evlist->entries);
   1145 	if (size <= 0)
   1146 		return size;
   1147         /* ANDROID_CHANGE_BEGIN */
   1148 #if defined(__BIONIC__) || defined(__APPLE__)
   1149 	aligned_size = KERNEL_ALIGN(size, sizeof(u64));
   1150 #else
   1151 	aligned_size = ALIGN(size, sizeof(u64));
   1152 #endif
   1153         /* ANDROID_CHANGE_END */
   1154 	padding = aligned_size - size;
   1155 	ev.tracing_data.header.size = sizeof(ev.tracing_data);
   1156 	ev.tracing_data.size = aligned_size;
   1157 
   1158 	process(&ev, NULL, session);
   1159 
   1160 	err = read_tracing_data(fd, &evlist->entries);
   1161 	write_padded(fd, NULL, 0, padding);
   1162 
   1163 	return aligned_size;
   1164 }
   1165 
   1166 int perf_event__process_tracing_data(union perf_event *event,
   1167 				     struct perf_session *session)
   1168 {
   1169 	ssize_t size_read, padding, size = event->tracing_data.size;
   1170 	off_t offset = lseek(session->fd, 0, SEEK_CUR);
   1171 	char buf[BUFSIZ];
   1172 
   1173 	/* setup for reading amidst mmap */
   1174 	lseek(session->fd, offset + sizeof(struct tracing_data_event),
   1175 	      SEEK_SET);
   1176 
   1177 	size_read = trace_report(session->fd, session->repipe);
   1178 
   1179         /* ANDROID_CHANGE_BEGIN */
   1180 #if defined(__BIONIC__) || defined(__APPLE__)
   1181 	padding = KERNEL_ALIGN(size_read, sizeof(u64)) - size_read;
   1182 #else
   1183 	padding = ALIGN(size_read, sizeof(u64)) - size_read;
   1184 #endif
   1185         /* ANDROID_CHANGE_END */
   1186 
   1187 	if (read(session->fd, buf, padding) < 0)
   1188 		die("reading input file");
   1189 	if (session->repipe) {
   1190 		int retw = write(STDOUT_FILENO, buf, padding);
   1191 		if (retw <= 0 || retw != padding)
   1192 			die("repiping tracing data padding");
   1193 	}
   1194 
   1195 	if (size_read + padding != size)
   1196 		die("tracing data size mismatch");
   1197 
   1198 	return size_read + padding;
   1199 }
   1200 
   1201 int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
   1202 				    perf_event__handler_t process,
   1203 				    struct machine *machine,
   1204 				    struct perf_session *session)
   1205 {
   1206 	union perf_event ev;
   1207 	size_t len;
   1208 	int err = 0;
   1209 
   1210 	if (!pos->hit)
   1211 		return err;
   1212 
   1213 	memset(&ev, 0, sizeof(ev));
   1214 
   1215 	len = pos->long_name_len + 1;
   1216         /* ANDROID_CHANGE_BEGIN */
   1217 #if defined(__BIONIC__) || defined(__APPLE__)
   1218 	len = KERNEL_ALIGN(len, NAME_ALIGN);
   1219 #else
   1220 	len = ALIGN(len, NAME_ALIGN);
   1221 #endif
   1222         /* ANDROID_CHANGE_END */
   1223 	memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
   1224 	ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
   1225 	ev.build_id.header.misc = misc;
   1226 	ev.build_id.pid = machine->pid;
   1227 	ev.build_id.header.size = sizeof(ev.build_id) + len;
   1228 	memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
   1229 
   1230 	err = process(&ev, NULL, session);
   1231 
   1232 	return err;
   1233 }
   1234 
   1235 int perf_event__process_build_id(union perf_event *event,
   1236 				 struct perf_session *session)
   1237 {
   1238 	__event_process_build_id(&event->build_id,
   1239 				 event->build_id.filename,
   1240 				 session);
   1241 	return 0;
   1242 }
   1243 
   1244 void disable_buildid_cache(void)
   1245 {
   1246 	no_buildid_cache = true;
   1247 }
   1248