1 /* 2 * builtin-timechart.c - make an svg timechart of system activity 3 * 4 * (C) Copyright 2009 Intel Corporation 5 * 6 * Authors: 7 * Arjan van de Ven <arjan (at) linux.intel.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; version 2 12 * of the License. 13 */ 14 15 #include <traceevent/event-parse.h> 16 17 #include "builtin.h" 18 19 #include "util/util.h" 20 21 #include "util/color.h" 22 #include <linux/list.h> 23 #include "util/cache.h" 24 #include "util/evlist.h" 25 #include "util/evsel.h" 26 #include <linux/rbtree.h> 27 #include "util/symbol.h" 28 #include "util/callchain.h" 29 #include "util/strlist.h" 30 31 #include "perf.h" 32 #include "util/header.h" 33 #include "util/parse-options.h" 34 #include "util/parse-events.h" 35 #include "util/event.h" 36 #include "util/session.h" 37 #include "util/svghelper.h" 38 #include "util/tool.h" 39 40 #define SUPPORT_OLD_POWER_EVENTS 1 41 #define PWR_EVENT_EXIT -1 42 43 44 static unsigned int numcpus; 45 static u64 min_freq; /* Lowest CPU frequency seen */ 46 static u64 max_freq; /* Highest CPU frequency seen */ 47 static u64 turbo_frequency; 48 49 static u64 first_time, last_time; 50 51 static bool power_only; 52 53 54 struct per_pid; 55 struct per_pidcomm; 56 57 struct cpu_sample; 58 struct power_event; 59 struct wake_event; 60 61 struct sample_wrapper; 62 63 /* 64 * Datastructure layout: 65 * We keep an list of "pid"s, matching the kernels notion of a task struct. 66 * Each "pid" entry, has a list of "comm"s. 67 * this is because we want to track different programs different, while 68 * exec will reuse the original pid (by design). 69 * Each comm has a list of samples that will be used to draw 70 * final graph. 71 */ 72 73 struct per_pid { 74 struct per_pid *next; 75 76 int pid; 77 int ppid; 78 79 u64 start_time; 80 u64 end_time; 81 u64 total_time; 82 int display; 83 84 struct per_pidcomm *all; 85 struct per_pidcomm *current; 86 }; 87 88 89 struct per_pidcomm { 90 struct per_pidcomm *next; 91 92 u64 start_time; 93 u64 end_time; 94 u64 total_time; 95 96 int Y; 97 int display; 98 99 long state; 100 u64 state_since; 101 102 char *comm; 103 104 struct cpu_sample *samples; 105 }; 106 107 struct sample_wrapper { 108 struct sample_wrapper *next; 109 110 u64 timestamp; 111 unsigned char data[0]; 112 }; 113 114 #define TYPE_NONE 0 115 #define TYPE_RUNNING 1 116 #define TYPE_WAITING 2 117 #define TYPE_BLOCKED 3 118 119 struct cpu_sample { 120 struct cpu_sample *next; 121 122 u64 start_time; 123 u64 end_time; 124 int type; 125 int cpu; 126 }; 127 128 static struct per_pid *all_data; 129 130 #define CSTATE 1 131 #define PSTATE 2 132 133 struct power_event { 134 struct power_event *next; 135 int type; 136 int state; 137 u64 start_time; 138 u64 end_time; 139 int cpu; 140 }; 141 142 struct wake_event { 143 struct wake_event *next; 144 int waker; 145 int wakee; 146 u64 time; 147 }; 148 149 static struct power_event *power_events; 150 static struct wake_event *wake_events; 151 152 struct process_filter; 153 struct process_filter { 154 char *name; 155 int pid; 156 struct process_filter *next; 157 }; 158 159 static struct process_filter *process_filter; 160 161 162 static struct per_pid *find_create_pid(int pid) 163 { 164 struct per_pid *cursor = all_data; 165 166 while (cursor) { 167 if (cursor->pid == pid) 168 return cursor; 169 cursor = cursor->next; 170 } 171 cursor = zalloc(sizeof(*cursor)); 172 assert(cursor != NULL); 173 cursor->pid = pid; 174 cursor->next = all_data; 175 all_data = cursor; 176 return cursor; 177 } 178 179 static void pid_set_comm(int pid, char *comm) 180 { 181 struct per_pid *p; 182 struct per_pidcomm *c; 183 p = find_create_pid(pid); 184 c = p->all; 185 while (c) { 186 if (c->comm && strcmp(c->comm, comm) == 0) { 187 p->current = c; 188 return; 189 } 190 if (!c->comm) { 191 c->comm = strdup(comm); 192 p->current = c; 193 return; 194 } 195 c = c->next; 196 } 197 c = zalloc(sizeof(*c)); 198 assert(c != NULL); 199 c->comm = strdup(comm); 200 p->current = c; 201 c->next = p->all; 202 p->all = c; 203 } 204 205 static void pid_fork(int pid, int ppid, u64 timestamp) 206 { 207 struct per_pid *p, *pp; 208 p = find_create_pid(pid); 209 pp = find_create_pid(ppid); 210 p->ppid = ppid; 211 if (pp->current && pp->current->comm && !p->current) 212 pid_set_comm(pid, pp->current->comm); 213 214 p->start_time = timestamp; 215 if (p->current) { 216 p->current->start_time = timestamp; 217 p->current->state_since = timestamp; 218 } 219 } 220 221 static void pid_exit(int pid, u64 timestamp) 222 { 223 struct per_pid *p; 224 p = find_create_pid(pid); 225 p->end_time = timestamp; 226 if (p->current) 227 p->current->end_time = timestamp; 228 } 229 230 static void 231 pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end) 232 { 233 struct per_pid *p; 234 struct per_pidcomm *c; 235 struct cpu_sample *sample; 236 237 p = find_create_pid(pid); 238 c = p->current; 239 if (!c) { 240 c = zalloc(sizeof(*c)); 241 assert(c != NULL); 242 p->current = c; 243 c->next = p->all; 244 p->all = c; 245 } 246 247 sample = zalloc(sizeof(*sample)); 248 assert(sample != NULL); 249 sample->start_time = start; 250 sample->end_time = end; 251 sample->type = type; 252 sample->next = c->samples; 253 sample->cpu = cpu; 254 c->samples = sample; 255 256 if (sample->type == TYPE_RUNNING && end > start && start > 0) { 257 c->total_time += (end-start); 258 p->total_time += (end-start); 259 } 260 261 if (c->start_time == 0 || c->start_time > start) 262 c->start_time = start; 263 if (p->start_time == 0 || p->start_time > start) 264 p->start_time = start; 265 } 266 267 #define MAX_CPUS 4096 268 269 static u64 cpus_cstate_start_times[MAX_CPUS]; 270 static int cpus_cstate_state[MAX_CPUS]; 271 static u64 cpus_pstate_start_times[MAX_CPUS]; 272 static u64 cpus_pstate_state[MAX_CPUS]; 273 274 static int process_comm_event(struct perf_tool *tool __maybe_unused, 275 union perf_event *event, 276 struct perf_sample *sample __maybe_unused, 277 struct machine *machine __maybe_unused) 278 { 279 pid_set_comm(event->comm.tid, event->comm.comm); 280 return 0; 281 } 282 283 static int process_fork_event(struct perf_tool *tool __maybe_unused, 284 union perf_event *event, 285 struct perf_sample *sample __maybe_unused, 286 struct machine *machine __maybe_unused) 287 { 288 pid_fork(event->fork.pid, event->fork.ppid, event->fork.time); 289 return 0; 290 } 291 292 static int process_exit_event(struct perf_tool *tool __maybe_unused, 293 union perf_event *event, 294 struct perf_sample *sample __maybe_unused, 295 struct machine *machine __maybe_unused) 296 { 297 pid_exit(event->fork.pid, event->fork.time); 298 return 0; 299 } 300 301 struct trace_entry { 302 unsigned short type; 303 unsigned char flags; 304 unsigned char preempt_count; 305 int pid; 306 int lock_depth; 307 }; 308 309 #ifdef SUPPORT_OLD_POWER_EVENTS 310 static int use_old_power_events; 311 struct power_entry_old { 312 struct trace_entry te; 313 u64 type; 314 u64 value; 315 u64 cpu_id; 316 }; 317 #endif 318 319 struct power_processor_entry { 320 struct trace_entry te; 321 u32 state; 322 u32 cpu_id; 323 }; 324 325 #define TASK_COMM_LEN 16 326 struct wakeup_entry { 327 struct trace_entry te; 328 char comm[TASK_COMM_LEN]; 329 int pid; 330 int prio; 331 int success; 332 }; 333 334 struct sched_switch { 335 struct trace_entry te; 336 char prev_comm[TASK_COMM_LEN]; 337 int prev_pid; 338 int prev_prio; 339 long prev_state; /* Arjan weeps. */ 340 char next_comm[TASK_COMM_LEN]; 341 int next_pid; 342 int next_prio; 343 }; 344 345 static void c_state_start(int cpu, u64 timestamp, int state) 346 { 347 cpus_cstate_start_times[cpu] = timestamp; 348 cpus_cstate_state[cpu] = state; 349 } 350 351 static void c_state_end(int cpu, u64 timestamp) 352 { 353 struct power_event *pwr = zalloc(sizeof(*pwr)); 354 355 if (!pwr) 356 return; 357 358 pwr->state = cpus_cstate_state[cpu]; 359 pwr->start_time = cpus_cstate_start_times[cpu]; 360 pwr->end_time = timestamp; 361 pwr->cpu = cpu; 362 pwr->type = CSTATE; 363 pwr->next = power_events; 364 365 power_events = pwr; 366 } 367 368 static void p_state_change(int cpu, u64 timestamp, u64 new_freq) 369 { 370 struct power_event *pwr; 371 372 if (new_freq > 8000000) /* detect invalid data */ 373 return; 374 375 pwr = zalloc(sizeof(*pwr)); 376 if (!pwr) 377 return; 378 379 pwr->state = cpus_pstate_state[cpu]; 380 pwr->start_time = cpus_pstate_start_times[cpu]; 381 pwr->end_time = timestamp; 382 pwr->cpu = cpu; 383 pwr->type = PSTATE; 384 pwr->next = power_events; 385 386 if (!pwr->start_time) 387 pwr->start_time = first_time; 388 389 power_events = pwr; 390 391 cpus_pstate_state[cpu] = new_freq; 392 cpus_pstate_start_times[cpu] = timestamp; 393 394 if ((u64)new_freq > max_freq) 395 max_freq = new_freq; 396 397 if (new_freq < min_freq || min_freq == 0) 398 min_freq = new_freq; 399 400 if (new_freq == max_freq - 1000) 401 turbo_frequency = max_freq; 402 } 403 404 static void 405 sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te) 406 { 407 struct per_pid *p; 408 struct wakeup_entry *wake = (void *)te; 409 struct wake_event *we = zalloc(sizeof(*we)); 410 411 if (!we) 412 return; 413 414 we->time = timestamp; 415 we->waker = pid; 416 417 if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ)) 418 we->waker = -1; 419 420 we->wakee = wake->pid; 421 we->next = wake_events; 422 wake_events = we; 423 p = find_create_pid(we->wakee); 424 425 if (p && p->current && p->current->state == TYPE_NONE) { 426 p->current->state_since = timestamp; 427 p->current->state = TYPE_WAITING; 428 } 429 if (p && p->current && p->current->state == TYPE_BLOCKED) { 430 pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp); 431 p->current->state_since = timestamp; 432 p->current->state = TYPE_WAITING; 433 } 434 } 435 436 static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te) 437 { 438 struct per_pid *p = NULL, *prev_p; 439 struct sched_switch *sw = (void *)te; 440 441 442 prev_p = find_create_pid(sw->prev_pid); 443 444 p = find_create_pid(sw->next_pid); 445 446 if (prev_p->current && prev_p->current->state != TYPE_NONE) 447 pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp); 448 if (p && p->current) { 449 if (p->current->state != TYPE_NONE) 450 pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp); 451 452 p->current->state_since = timestamp; 453 p->current->state = TYPE_RUNNING; 454 } 455 456 if (prev_p->current) { 457 prev_p->current->state = TYPE_NONE; 458 prev_p->current->state_since = timestamp; 459 if (sw->prev_state & 2) 460 prev_p->current->state = TYPE_BLOCKED; 461 if (sw->prev_state == 0) 462 prev_p->current->state = TYPE_WAITING; 463 } 464 } 465 466 typedef int (*tracepoint_handler)(struct perf_evsel *evsel, 467 struct perf_sample *sample); 468 469 static int process_sample_event(struct perf_tool *tool __maybe_unused, 470 union perf_event *event __maybe_unused, 471 struct perf_sample *sample, 472 struct perf_evsel *evsel, 473 struct machine *machine __maybe_unused) 474 { 475 if (evsel->attr.sample_type & PERF_SAMPLE_TIME) { 476 if (!first_time || first_time > sample->time) 477 first_time = sample->time; 478 if (last_time < sample->time) 479 last_time = sample->time; 480 } 481 482 if (sample->cpu > numcpus) 483 numcpus = sample->cpu; 484 485 if (evsel->handler.func != NULL) { 486 tracepoint_handler f = evsel->handler.func; 487 return f(evsel, sample); 488 } 489 490 return 0; 491 } 492 493 static int 494 process_sample_cpu_idle(struct perf_evsel *evsel __maybe_unused, 495 struct perf_sample *sample) 496 { 497 struct power_processor_entry *ppe = sample->raw_data; 498 499 if (ppe->state == (u32) PWR_EVENT_EXIT) 500 c_state_end(ppe->cpu_id, sample->time); 501 else 502 c_state_start(ppe->cpu_id, sample->time, ppe->state); 503 return 0; 504 } 505 506 static int 507 process_sample_cpu_frequency(struct perf_evsel *evsel __maybe_unused, 508 struct perf_sample *sample) 509 { 510 struct power_processor_entry *ppe = sample->raw_data; 511 512 p_state_change(ppe->cpu_id, sample->time, ppe->state); 513 return 0; 514 } 515 516 static int 517 process_sample_sched_wakeup(struct perf_evsel *evsel __maybe_unused, 518 struct perf_sample *sample) 519 { 520 struct trace_entry *te = sample->raw_data; 521 522 sched_wakeup(sample->cpu, sample->time, sample->pid, te); 523 return 0; 524 } 525 526 static int 527 process_sample_sched_switch(struct perf_evsel *evsel __maybe_unused, 528 struct perf_sample *sample) 529 { 530 struct trace_entry *te = sample->raw_data; 531 532 sched_switch(sample->cpu, sample->time, te); 533 return 0; 534 } 535 536 #ifdef SUPPORT_OLD_POWER_EVENTS 537 static int 538 process_sample_power_start(struct perf_evsel *evsel __maybe_unused, 539 struct perf_sample *sample) 540 { 541 struct power_entry_old *peo = sample->raw_data; 542 543 c_state_start(peo->cpu_id, sample->time, peo->value); 544 return 0; 545 } 546 547 static int 548 process_sample_power_end(struct perf_evsel *evsel __maybe_unused, 549 struct perf_sample *sample) 550 { 551 c_state_end(sample->cpu, sample->time); 552 return 0; 553 } 554 555 static int 556 process_sample_power_frequency(struct perf_evsel *evsel __maybe_unused, 557 struct perf_sample *sample) 558 { 559 struct power_entry_old *peo = sample->raw_data; 560 561 p_state_change(peo->cpu_id, sample->time, peo->value); 562 return 0; 563 } 564 #endif /* SUPPORT_OLD_POWER_EVENTS */ 565 566 /* 567 * After the last sample we need to wrap up the current C/P state 568 * and close out each CPU for these. 569 */ 570 static void end_sample_processing(void) 571 { 572 u64 cpu; 573 struct power_event *pwr; 574 575 for (cpu = 0; cpu <= numcpus; cpu++) { 576 /* C state */ 577 #if 0 578 pwr = zalloc(sizeof(*pwr)); 579 if (!pwr) 580 return; 581 582 pwr->state = cpus_cstate_state[cpu]; 583 pwr->start_time = cpus_cstate_start_times[cpu]; 584 pwr->end_time = last_time; 585 pwr->cpu = cpu; 586 pwr->type = CSTATE; 587 pwr->next = power_events; 588 589 power_events = pwr; 590 #endif 591 /* P state */ 592 593 pwr = zalloc(sizeof(*pwr)); 594 if (!pwr) 595 return; 596 597 pwr->state = cpus_pstate_state[cpu]; 598 pwr->start_time = cpus_pstate_start_times[cpu]; 599 pwr->end_time = last_time; 600 pwr->cpu = cpu; 601 pwr->type = PSTATE; 602 pwr->next = power_events; 603 604 if (!pwr->start_time) 605 pwr->start_time = first_time; 606 if (!pwr->state) 607 pwr->state = min_freq; 608 power_events = pwr; 609 } 610 } 611 612 /* 613 * Sort the pid datastructure 614 */ 615 static void sort_pids(void) 616 { 617 struct per_pid *new_list, *p, *cursor, *prev; 618 /* sort by ppid first, then by pid, lowest to highest */ 619 620 new_list = NULL; 621 622 while (all_data) { 623 p = all_data; 624 all_data = p->next; 625 p->next = NULL; 626 627 if (new_list == NULL) { 628 new_list = p; 629 p->next = NULL; 630 continue; 631 } 632 prev = NULL; 633 cursor = new_list; 634 while (cursor) { 635 if (cursor->ppid > p->ppid || 636 (cursor->ppid == p->ppid && cursor->pid > p->pid)) { 637 /* must insert before */ 638 if (prev) { 639 p->next = prev->next; 640 prev->next = p; 641 cursor = NULL; 642 continue; 643 } else { 644 p->next = new_list; 645 new_list = p; 646 cursor = NULL; 647 continue; 648 } 649 } 650 651 prev = cursor; 652 cursor = cursor->next; 653 if (!cursor) 654 prev->next = p; 655 } 656 } 657 all_data = new_list; 658 } 659 660 661 static void draw_c_p_states(void) 662 { 663 struct power_event *pwr; 664 pwr = power_events; 665 666 /* 667 * two pass drawing so that the P state bars are on top of the C state blocks 668 */ 669 while (pwr) { 670 if (pwr->type == CSTATE) 671 svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state); 672 pwr = pwr->next; 673 } 674 675 pwr = power_events; 676 while (pwr) { 677 if (pwr->type == PSTATE) { 678 if (!pwr->state) 679 pwr->state = min_freq; 680 svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state); 681 } 682 pwr = pwr->next; 683 } 684 } 685 686 static void draw_wakeups(void) 687 { 688 struct wake_event *we; 689 struct per_pid *p; 690 struct per_pidcomm *c; 691 692 we = wake_events; 693 while (we) { 694 int from = 0, to = 0; 695 char *task_from = NULL, *task_to = NULL; 696 697 /* locate the column of the waker and wakee */ 698 p = all_data; 699 while (p) { 700 if (p->pid == we->waker || p->pid == we->wakee) { 701 c = p->all; 702 while (c) { 703 if (c->Y && c->start_time <= we->time && c->end_time >= we->time) { 704 if (p->pid == we->waker && !from) { 705 from = c->Y; 706 task_from = strdup(c->comm); 707 } 708 if (p->pid == we->wakee && !to) { 709 to = c->Y; 710 task_to = strdup(c->comm); 711 } 712 } 713 c = c->next; 714 } 715 c = p->all; 716 while (c) { 717 if (p->pid == we->waker && !from) { 718 from = c->Y; 719 task_from = strdup(c->comm); 720 } 721 if (p->pid == we->wakee && !to) { 722 to = c->Y; 723 task_to = strdup(c->comm); 724 } 725 c = c->next; 726 } 727 } 728 p = p->next; 729 } 730 731 if (!task_from) { 732 task_from = malloc(40); 733 sprintf(task_from, "[%i]", we->waker); 734 } 735 if (!task_to) { 736 task_to = malloc(40); 737 sprintf(task_to, "[%i]", we->wakee); 738 } 739 740 if (we->waker == -1) 741 svg_interrupt(we->time, to); 742 else if (from && to && abs(from - to) == 1) 743 svg_wakeline(we->time, from, to); 744 else 745 svg_partial_wakeline(we->time, from, task_from, to, task_to); 746 we = we->next; 747 748 free(task_from); 749 free(task_to); 750 } 751 } 752 753 static void draw_cpu_usage(void) 754 { 755 struct per_pid *p; 756 struct per_pidcomm *c; 757 struct cpu_sample *sample; 758 p = all_data; 759 while (p) { 760 c = p->all; 761 while (c) { 762 sample = c->samples; 763 while (sample) { 764 if (sample->type == TYPE_RUNNING) 765 svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm); 766 767 sample = sample->next; 768 } 769 c = c->next; 770 } 771 p = p->next; 772 } 773 } 774 775 static void draw_process_bars(void) 776 { 777 struct per_pid *p; 778 struct per_pidcomm *c; 779 struct cpu_sample *sample; 780 int Y = 0; 781 782 Y = 2 * numcpus + 2; 783 784 p = all_data; 785 while (p) { 786 c = p->all; 787 while (c) { 788 if (!c->display) { 789 c->Y = 0; 790 c = c->next; 791 continue; 792 } 793 794 svg_box(Y, c->start_time, c->end_time, "process"); 795 sample = c->samples; 796 while (sample) { 797 if (sample->type == TYPE_RUNNING) 798 svg_sample(Y, sample->cpu, sample->start_time, sample->end_time); 799 if (sample->type == TYPE_BLOCKED) 800 svg_box(Y, sample->start_time, sample->end_time, "blocked"); 801 if (sample->type == TYPE_WAITING) 802 svg_waiting(Y, sample->start_time, sample->end_time); 803 sample = sample->next; 804 } 805 806 if (c->comm) { 807 char comm[256]; 808 if (c->total_time > 5000000000) /* 5 seconds */ 809 sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0); 810 else 811 sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0); 812 813 svg_text(Y, c->start_time, comm); 814 } 815 c->Y = Y; 816 Y++; 817 c = c->next; 818 } 819 p = p->next; 820 } 821 } 822 823 static void add_process_filter(const char *string) 824 { 825 int pid = strtoull(string, NULL, 10); 826 struct process_filter *filt = malloc(sizeof(*filt)); 827 828 if (!filt) 829 return; 830 831 filt->name = strdup(string); 832 filt->pid = pid; 833 filt->next = process_filter; 834 835 process_filter = filt; 836 } 837 838 static int passes_filter(struct per_pid *p, struct per_pidcomm *c) 839 { 840 struct process_filter *filt; 841 if (!process_filter) 842 return 1; 843 844 filt = process_filter; 845 while (filt) { 846 if (filt->pid && p->pid == filt->pid) 847 return 1; 848 if (strcmp(filt->name, c->comm) == 0) 849 return 1; 850 filt = filt->next; 851 } 852 return 0; 853 } 854 855 static int determine_display_tasks_filtered(void) 856 { 857 struct per_pid *p; 858 struct per_pidcomm *c; 859 int count = 0; 860 861 p = all_data; 862 while (p) { 863 p->display = 0; 864 if (p->start_time == 1) 865 p->start_time = first_time; 866 867 /* no exit marker, task kept running to the end */ 868 if (p->end_time == 0) 869 p->end_time = last_time; 870 871 c = p->all; 872 873 while (c) { 874 c->display = 0; 875 876 if (c->start_time == 1) 877 c->start_time = first_time; 878 879 if (passes_filter(p, c)) { 880 c->display = 1; 881 p->display = 1; 882 count++; 883 } 884 885 if (c->end_time == 0) 886 c->end_time = last_time; 887 888 c = c->next; 889 } 890 p = p->next; 891 } 892 return count; 893 } 894 895 static int determine_display_tasks(u64 threshold) 896 { 897 struct per_pid *p; 898 struct per_pidcomm *c; 899 int count = 0; 900 901 if (process_filter) 902 return determine_display_tasks_filtered(); 903 904 p = all_data; 905 while (p) { 906 p->display = 0; 907 if (p->start_time == 1) 908 p->start_time = first_time; 909 910 /* no exit marker, task kept running to the end */ 911 if (p->end_time == 0) 912 p->end_time = last_time; 913 if (p->total_time >= threshold && !power_only) 914 p->display = 1; 915 916 c = p->all; 917 918 while (c) { 919 c->display = 0; 920 921 if (c->start_time == 1) 922 c->start_time = first_time; 923 924 if (c->total_time >= threshold && !power_only) { 925 c->display = 1; 926 count++; 927 } 928 929 if (c->end_time == 0) 930 c->end_time = last_time; 931 932 c = c->next; 933 } 934 p = p->next; 935 } 936 return count; 937 } 938 939 940 941 #define TIME_THRESH 10000000 942 943 static void write_svg_file(const char *filename) 944 { 945 u64 i; 946 int count; 947 948 numcpus++; 949 950 951 count = determine_display_tasks(TIME_THRESH); 952 953 /* We'd like to show at least 15 tasks; be less picky if we have fewer */ 954 if (count < 15) 955 count = determine_display_tasks(TIME_THRESH / 10); 956 957 open_svg(filename, numcpus, count, first_time, last_time); 958 959 svg_time_grid(); 960 svg_legenda(); 961 962 for (i = 0; i < numcpus; i++) 963 svg_cpu_box(i, max_freq, turbo_frequency); 964 965 draw_cpu_usage(); 966 draw_process_bars(); 967 draw_c_p_states(); 968 draw_wakeups(); 969 970 svg_close(); 971 } 972 973 static int __cmd_timechart(const char *output_name) 974 { 975 struct perf_tool perf_timechart = { 976 .comm = process_comm_event, 977 .fork = process_fork_event, 978 .exit = process_exit_event, 979 .sample = process_sample_event, 980 .ordered_samples = true, 981 }; 982 const struct perf_evsel_str_handler power_tracepoints[] = { 983 { "power:cpu_idle", process_sample_cpu_idle }, 984 { "power:cpu_frequency", process_sample_cpu_frequency }, 985 { "sched:sched_wakeup", process_sample_sched_wakeup }, 986 { "sched:sched_switch", process_sample_sched_switch }, 987 #ifdef SUPPORT_OLD_POWER_EVENTS 988 { "power:power_start", process_sample_power_start }, 989 { "power:power_end", process_sample_power_end }, 990 { "power:power_frequency", process_sample_power_frequency }, 991 #endif 992 }; 993 struct perf_session *session = perf_session__new(input_name, O_RDONLY, 994 0, false, &perf_timechart); 995 int ret = -EINVAL; 996 997 if (session == NULL) 998 return -ENOMEM; 999 1000 if (!perf_session__has_traces(session, "timechart record")) 1001 goto out_delete; 1002 1003 if (perf_session__set_tracepoints_handlers(session, 1004 power_tracepoints)) { 1005 pr_err("Initializing session tracepoint handlers failed\n"); 1006 goto out_delete; 1007 } 1008 1009 ret = perf_session__process_events(session, &perf_timechart); 1010 if (ret) 1011 goto out_delete; 1012 1013 end_sample_processing(); 1014 1015 sort_pids(); 1016 1017 write_svg_file(output_name); 1018 1019 pr_info("Written %2.1f seconds of trace to %s.\n", 1020 (last_time - first_time) / 1000000000.0, output_name); 1021 out_delete: 1022 perf_session__delete(session); 1023 return ret; 1024 } 1025 1026 static int __cmd_record(int argc, const char **argv) 1027 { 1028 #ifdef SUPPORT_OLD_POWER_EVENTS 1029 const char * const record_old_args[] = { 1030 "record", "-a", "-R", "-c", "1", 1031 "-e", "power:power_start", 1032 "-e", "power:power_end", 1033 "-e", "power:power_frequency", 1034 "-e", "sched:sched_wakeup", 1035 "-e", "sched:sched_switch", 1036 }; 1037 #endif 1038 const char * const record_new_args[] = { 1039 "record", "-a", "-R", "-c", "1", 1040 "-e", "power:cpu_frequency", 1041 "-e", "power:cpu_idle", 1042 "-e", "sched:sched_wakeup", 1043 "-e", "sched:sched_switch", 1044 }; 1045 unsigned int rec_argc, i, j; 1046 const char **rec_argv; 1047 const char * const *record_args = record_new_args; 1048 unsigned int record_elems = ARRAY_SIZE(record_new_args); 1049 1050 #ifdef SUPPORT_OLD_POWER_EVENTS 1051 if (!is_valid_tracepoint("power:cpu_idle") && 1052 is_valid_tracepoint("power:power_start")) { 1053 use_old_power_events = 1; 1054 record_args = record_old_args; 1055 record_elems = ARRAY_SIZE(record_old_args); 1056 } 1057 #endif 1058 1059 rec_argc = record_elems + argc - 1; 1060 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 1061 1062 if (rec_argv == NULL) 1063 return -ENOMEM; 1064 1065 for (i = 0; i < record_elems; i++) 1066 rec_argv[i] = strdup(record_args[i]); 1067 1068 for (j = 1; j < (unsigned int)argc; j++, i++) 1069 rec_argv[i] = argv[j]; 1070 1071 return cmd_record(i, rec_argv, NULL); 1072 } 1073 1074 static int 1075 parse_process(const struct option *opt __maybe_unused, const char *arg, 1076 int __maybe_unused unset) 1077 { 1078 if (arg) 1079 add_process_filter(arg); 1080 return 0; 1081 } 1082 1083 int cmd_timechart(int argc, const char **argv, 1084 const char *prefix __maybe_unused) 1085 { 1086 const char *output_name = "output.svg"; 1087 const struct option options[] = { 1088 OPT_STRING('i', "input", &input_name, "file", "input file name"), 1089 OPT_STRING('o', "output", &output_name, "file", "output file name"), 1090 OPT_INTEGER('w', "width", &svg_page_width, "page width"), 1091 OPT_BOOLEAN('P', "power-only", &power_only, "output power data only"), 1092 OPT_CALLBACK('p', "process", NULL, "process", 1093 "process selector. Pass a pid or process name.", 1094 parse_process), 1095 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", 1096 "Look for files with symbols relative to this directory"), 1097 OPT_END() 1098 }; 1099 const char * const timechart_usage[] = { 1100 "perf timechart [<options>] {record}", 1101 NULL 1102 }; 1103 1104 argc = parse_options(argc, argv, options, timechart_usage, 1105 PARSE_OPT_STOP_AT_NON_OPTION); 1106 1107 symbol__init(); 1108 1109 if (argc && !strncmp(argv[0], "rec", 3)) 1110 return __cmd_record(argc, argv); 1111 else if (argc) 1112 usage_with_options(timechart_usage, options); 1113 1114 setup_pager(); 1115 1116 return __cmd_timechart(output_name); 1117 } 1118