1 /* 2 * builtin-timechart.c - make an svg timechart of system activity 3 * 4 * (C) Copyright 2009 Intel Corporation 5 * 6 * Authors: 7 * Arjan van de Ven <arjan@linux.intel.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; version 2 12 * of the License. 13 */ 14 15 #include "builtin.h" 16 17 #include "util/util.h" 18 19 #include "util/color.h" 20 #include <linux/list.h> 21 #include "util/cache.h" 22 #include <linux/rbtree.h> 23 #include "util/symbol.h" 24 #include "util/string.h" 25 #include "util/callchain.h" 26 #include "util/strlist.h" 27 28 #include "perf.h" 29 #include "util/header.h" 30 #include "util/parse-options.h" 31 #include "util/parse-events.h" 32 #include "util/event.h" 33 #include "util/data_map.h" 34 #include "util/svghelper.h" 35 36 static char const *input_name = "perf.data"; 37 static char const *output_name = "output.svg"; 38 39 40 static u64 sample_type; 41 42 static unsigned int numcpus; 43 static u64 min_freq; /* Lowest CPU frequency seen */ 44 static u64 max_freq; /* Highest CPU frequency seen */ 45 static u64 turbo_frequency; 46 47 static u64 first_time, last_time; 48 49 static int power_only; 50 51 52 struct per_pid; 53 struct per_pidcomm; 54 55 struct cpu_sample; 56 struct power_event; 57 struct wake_event; 58 59 struct sample_wrapper; 60 61 /* 62 * Datastructure layout: 63 * We keep an list of "pid"s, matching the kernels notion of a task struct. 64 * Each "pid" entry, has a list of "comm"s. 65 * this is because we want to track different programs different, while 66 * exec will reuse the original pid (by design). 67 * Each comm has a list of samples that will be used to draw 68 * final graph. 69 */ 70 71 struct per_pid { 72 struct per_pid *next; 73 74 int pid; 75 int ppid; 76 77 u64 start_time; 78 u64 end_time; 79 u64 total_time; 80 int display; 81 82 struct per_pidcomm *all; 83 struct per_pidcomm *current; 84 85 int painted; 86 }; 87 88 89 struct per_pidcomm { 90 struct per_pidcomm *next; 91 92 u64 start_time; 93 u64 end_time; 94 u64 total_time; 95 96 int Y; 97 int display; 98 99 long state; 100 u64 state_since; 101 102 char *comm; 103 104 struct cpu_sample *samples; 105 }; 106 107 struct sample_wrapper { 108 struct sample_wrapper *next; 109 110 u64 timestamp; 111 unsigned char data[0]; 112 }; 113 114 #define TYPE_NONE 0 115 #define TYPE_RUNNING 1 116 #define TYPE_WAITING 2 117 #define TYPE_BLOCKED 3 118 119 struct cpu_sample { 120 struct cpu_sample *next; 121 122 u64 start_time; 123 u64 end_time; 124 int type; 125 int cpu; 126 }; 127 128 static struct per_pid *all_data; 129 130 #define CSTATE 1 131 #define PSTATE 2 132 133 struct power_event { 134 struct power_event *next; 135 int type; 136 int state; 137 u64 start_time; 138 u64 end_time; 139 int cpu; 140 }; 141 142 struct wake_event { 143 struct wake_event *next; 144 int waker; 145 int wakee; 146 u64 time; 147 }; 148 149 static struct power_event *power_events; 150 static struct wake_event *wake_events; 151 152 struct sample_wrapper *all_samples; 153 154 155 struct process_filter; 156 struct process_filter { 157 char *name; 158 int pid; 159 struct process_filter *next; 160 }; 161 162 static struct process_filter *process_filter; 163 164 165 static struct per_pid *find_create_pid(int pid) 166 { 167 struct per_pid *cursor = all_data; 168 169 while (cursor) { 170 if (cursor->pid == pid) 171 return cursor; 172 cursor = cursor->next; 173 } 174 cursor = malloc(sizeof(struct per_pid)); 175 assert(cursor != NULL); 176 memset(cursor, 0, sizeof(struct per_pid)); 177 cursor->pid = pid; 178 cursor->next = all_data; 179 all_data = cursor; 180 return cursor; 181 } 182 183 static void pid_set_comm(int pid, char *comm) 184 { 185 struct per_pid *p; 186 struct per_pidcomm *c; 187 p = find_create_pid(pid); 188 c = p->all; 189 while (c) { 190 if (c->comm && strcmp(c->comm, comm) == 0) { 191 p->current = c; 192 return; 193 } 194 if (!c->comm) { 195 c->comm = strdup(comm); 196 p->current = c; 197 return; 198 } 199 c = c->next; 200 } 201 c = malloc(sizeof(struct per_pidcomm)); 202 assert(c != NULL); 203 memset(c, 0, sizeof(struct per_pidcomm)); 204 c->comm = strdup(comm); 205 p->current = c; 206 c->next = p->all; 207 p->all = c; 208 } 209 210 static void pid_fork(int pid, int ppid, u64 timestamp) 211 { 212 struct per_pid *p, *pp; 213 p = find_create_pid(pid); 214 pp = find_create_pid(ppid); 215 p->ppid = ppid; 216 if (pp->current && pp->current->comm && !p->current) 217 pid_set_comm(pid, pp->current->comm); 218 219 p->start_time = timestamp; 220 if (p->current) { 221 p->current->start_time = timestamp; 222 p->current->state_since = timestamp; 223 } 224 } 225 226 static void pid_exit(int pid, u64 timestamp) 227 { 228 struct per_pid *p; 229 p = find_create_pid(pid); 230 p->end_time = timestamp; 231 if (p->current) 232 p->current->end_time = timestamp; 233 } 234 235 static void 236 pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end) 237 { 238 struct per_pid *p; 239 struct per_pidcomm *c; 240 struct cpu_sample *sample; 241 242 p = find_create_pid(pid); 243 c = p->current; 244 if (!c) { 245 c = malloc(sizeof(struct per_pidcomm)); 246 assert(c != NULL); 247 memset(c, 0, sizeof(struct per_pidcomm)); 248 p->current = c; 249 c->next = p->all; 250 p->all = c; 251 } 252 253 sample = malloc(sizeof(struct cpu_sample)); 254 assert(sample != NULL); 255 memset(sample, 0, sizeof(struct cpu_sample)); 256 sample->start_time = start; 257 sample->end_time = end; 258 sample->type = type; 259 sample->next = c->samples; 260 sample->cpu = cpu; 261 c->samples = sample; 262 263 if (sample->type == TYPE_RUNNING && end > start && start > 0) { 264 c->total_time += (end-start); 265 p->total_time += (end-start); 266 } 267 268 if (c->start_time == 0 || c->start_time > start) 269 c->start_time = start; 270 if (p->start_time == 0 || p->start_time > start) 271 p->start_time = start; 272 273 if (cpu > numcpus) 274 numcpus = cpu; 275 } 276 277 #define MAX_CPUS 4096 278 279 static u64 cpus_cstate_start_times[MAX_CPUS]; 280 static int cpus_cstate_state[MAX_CPUS]; 281 static u64 cpus_pstate_start_times[MAX_CPUS]; 282 static u64 cpus_pstate_state[MAX_CPUS]; 283 284 static int 285 process_comm_event(event_t *event) 286 { 287 pid_set_comm(event->comm.pid, event->comm.comm); 288 return 0; 289 } 290 static int 291 process_fork_event(event_t *event) 292 { 293 pid_fork(event->fork.pid, event->fork.ppid, event->fork.time); 294 return 0; 295 } 296 297 static int 298 process_exit_event(event_t *event) 299 { 300 pid_exit(event->fork.pid, event->fork.time); 301 return 0; 302 } 303 304 struct trace_entry { 305 unsigned short type; 306 unsigned char flags; 307 unsigned char preempt_count; 308 int pid; 309 int lock_depth; 310 }; 311 312 struct power_entry { 313 struct trace_entry te; 314 s64 type; 315 s64 value; 316 }; 317 318 #define TASK_COMM_LEN 16 319 struct wakeup_entry { 320 struct trace_entry te; 321 char comm[TASK_COMM_LEN]; 322 int pid; 323 int prio; 324 int success; 325 }; 326 327 /* 328 * trace_flag_type is an enumeration that holds different 329 * states when a trace occurs. These are: 330 * IRQS_OFF - interrupts were disabled 331 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags 332 * NEED_RESCED - reschedule is requested 333 * HARDIRQ - inside an interrupt handler 334 * SOFTIRQ - inside a softirq handler 335 */ 336 enum trace_flag_type { 337 TRACE_FLAG_IRQS_OFF = 0x01, 338 TRACE_FLAG_IRQS_NOSUPPORT = 0x02, 339 TRACE_FLAG_NEED_RESCHED = 0x04, 340 TRACE_FLAG_HARDIRQ = 0x08, 341 TRACE_FLAG_SOFTIRQ = 0x10, 342 }; 343 344 345 346 struct sched_switch { 347 struct trace_entry te; 348 char prev_comm[TASK_COMM_LEN]; 349 int prev_pid; 350 int prev_prio; 351 long prev_state; /* Arjan weeps. */ 352 char next_comm[TASK_COMM_LEN]; 353 int next_pid; 354 int next_prio; 355 }; 356 357 static void c_state_start(int cpu, u64 timestamp, int state) 358 { 359 cpus_cstate_start_times[cpu] = timestamp; 360 cpus_cstate_state[cpu] = state; 361 } 362 363 static void c_state_end(int cpu, u64 timestamp) 364 { 365 struct power_event *pwr; 366 pwr = malloc(sizeof(struct power_event)); 367 if (!pwr) 368 return; 369 memset(pwr, 0, sizeof(struct power_event)); 370 371 pwr->state = cpus_cstate_state[cpu]; 372 pwr->start_time = cpus_cstate_start_times[cpu]; 373 pwr->end_time = timestamp; 374 pwr->cpu = cpu; 375 pwr->type = CSTATE; 376 pwr->next = power_events; 377 378 power_events = pwr; 379 } 380 381 static void p_state_change(int cpu, u64 timestamp, u64 new_freq) 382 { 383 struct power_event *pwr; 384 pwr = malloc(sizeof(struct power_event)); 385 386 if (new_freq > 8000000) /* detect invalid data */ 387 return; 388 389 if (!pwr) 390 return; 391 memset(pwr, 0, sizeof(struct power_event)); 392 393 pwr->state = cpus_pstate_state[cpu]; 394 pwr->start_time = cpus_pstate_start_times[cpu]; 395 pwr->end_time = timestamp; 396 pwr->cpu = cpu; 397 pwr->type = PSTATE; 398 pwr->next = power_events; 399 400 if (!pwr->start_time) 401 pwr->start_time = first_time; 402 403 power_events = pwr; 404 405 cpus_pstate_state[cpu] = new_freq; 406 cpus_pstate_start_times[cpu] = timestamp; 407 408 if ((u64)new_freq > max_freq) 409 max_freq = new_freq; 410 411 if (new_freq < min_freq || min_freq == 0) 412 min_freq = new_freq; 413 414 if (new_freq == max_freq - 1000) 415 turbo_frequency = max_freq; 416 } 417 418 static void 419 sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te) 420 { 421 struct wake_event *we; 422 struct per_pid *p; 423 struct wakeup_entry *wake = (void *)te; 424 425 we = malloc(sizeof(struct wake_event)); 426 if (!we) 427 return; 428 429 memset(we, 0, sizeof(struct wake_event)); 430 we->time = timestamp; 431 we->waker = pid; 432 433 if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ)) 434 we->waker = -1; 435 436 we->wakee = wake->pid; 437 we->next = wake_events; 438 wake_events = we; 439 p = find_create_pid(we->wakee); 440 441 if (p && p->current && p->current->state == TYPE_NONE) { 442 p->current->state_since = timestamp; 443 p->current->state = TYPE_WAITING; 444 } 445 if (p && p->current && p->current->state == TYPE_BLOCKED) { 446 pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp); 447 p->current->state_since = timestamp; 448 p->current->state = TYPE_WAITING; 449 } 450 } 451 452 static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te) 453 { 454 struct per_pid *p = NULL, *prev_p; 455 struct sched_switch *sw = (void *)te; 456 457 458 prev_p = find_create_pid(sw->prev_pid); 459 460 p = find_create_pid(sw->next_pid); 461 462 if (prev_p->current && prev_p->current->state != TYPE_NONE) 463 pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp); 464 if (p && p->current) { 465 if (p->current->state != TYPE_NONE) 466 pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp); 467 468 p->current->state_since = timestamp; 469 p->current->state = TYPE_RUNNING; 470 } 471 472 if (prev_p->current) { 473 prev_p->current->state = TYPE_NONE; 474 prev_p->current->state_since = timestamp; 475 if (sw->prev_state & 2) 476 prev_p->current->state = TYPE_BLOCKED; 477 if (sw->prev_state == 0) 478 prev_p->current->state = TYPE_WAITING; 479 } 480 } 481 482 483 static int 484 process_sample_event(event_t *event) 485 { 486 struct sample_data data; 487 struct trace_entry *te; 488 489 memset(&data, 0, sizeof(data)); 490 491 event__parse_sample(event, sample_type, &data); 492 493 if (sample_type & PERF_SAMPLE_TIME) { 494 if (!first_time || first_time > data.time) 495 first_time = data.time; 496 if (last_time < data.time) 497 last_time = data.time; 498 } 499 500 te = (void *)data.raw_data; 501 if (sample_type & PERF_SAMPLE_RAW && data.raw_size > 0) { 502 char *event_str; 503 struct power_entry *pe; 504 505 pe = (void *)te; 506 507 event_str = perf_header__find_event(te->type); 508 509 if (!event_str) 510 return 0; 511 512 if (strcmp(event_str, "power:power_start") == 0) 513 c_state_start(data.cpu, data.time, pe->value); 514 515 if (strcmp(event_str, "power:power_end") == 0) 516 c_state_end(data.cpu, data.time); 517 518 if (strcmp(event_str, "power:power_frequency") == 0) 519 p_state_change(data.cpu, data.time, pe->value); 520 521 if (strcmp(event_str, "sched:sched_wakeup") == 0) 522 sched_wakeup(data.cpu, data.time, data.pid, te); 523 524 if (strcmp(event_str, "sched:sched_switch") == 0) 525 sched_switch(data.cpu, data.time, te); 526 } 527 return 0; 528 } 529 530 /* 531 * After the last sample we need to wrap up the current C/P state 532 * and close out each CPU for these. 533 */ 534 static void end_sample_processing(void) 535 { 536 u64 cpu; 537 struct power_event *pwr; 538 539 for (cpu = 0; cpu <= numcpus; cpu++) { 540 pwr = malloc(sizeof(struct power_event)); 541 if (!pwr) 542 return; 543 memset(pwr, 0, sizeof(struct power_event)); 544 545 /* C state */ 546 #if 0 547 pwr->state = cpus_cstate_state[cpu]; 548 pwr->start_time = cpus_cstate_start_times[cpu]; 549 pwr->end_time = last_time; 550 pwr->cpu = cpu; 551 pwr->type = CSTATE; 552 pwr->next = power_events; 553 554 power_events = pwr; 555 #endif 556 /* P state */ 557 558 pwr = malloc(sizeof(struct power_event)); 559 if (!pwr) 560 return; 561 memset(pwr, 0, sizeof(struct power_event)); 562 563 pwr->state = cpus_pstate_state[cpu]; 564 pwr->start_time = cpus_pstate_start_times[cpu]; 565 pwr->end_time = last_time; 566 pwr->cpu = cpu; 567 pwr->type = PSTATE; 568 pwr->next = power_events; 569 570 if (!pwr->start_time) 571 pwr->start_time = first_time; 572 if (!pwr->state) 573 pwr->state = min_freq; 574 power_events = pwr; 575 } 576 } 577 578 static u64 sample_time(event_t *event) 579 { 580 int cursor; 581 582 cursor = 0; 583 if (sample_type & PERF_SAMPLE_IP) 584 cursor++; 585 if (sample_type & PERF_SAMPLE_TID) 586 cursor++; 587 if (sample_type & PERF_SAMPLE_TIME) 588 return event->sample.array[cursor]; 589 return 0; 590 } 591 592 593 /* 594 * We first queue all events, sorted backwards by insertion. 595 * The order will get flipped later. 596 */ 597 static int 598 queue_sample_event(event_t *event) 599 { 600 struct sample_wrapper *copy, *prev; 601 int size; 602 603 size = event->sample.header.size + sizeof(struct sample_wrapper) + 8; 604 605 copy = malloc(size); 606 if (!copy) 607 return 1; 608 609 memset(copy, 0, size); 610 611 copy->next = NULL; 612 copy->timestamp = sample_time(event); 613 614 memcpy(©->data, event, event->sample.header.size); 615 616 /* insert in the right place in the list */ 617 618 if (!all_samples) { 619 /* first sample ever */ 620 all_samples = copy; 621 return 0; 622 } 623 624 if (all_samples->timestamp < copy->timestamp) { 625 /* insert at the head of the list */ 626 copy->next = all_samples; 627 all_samples = copy; 628 return 0; 629 } 630 631 prev = all_samples; 632 while (prev->next) { 633 if (prev->next->timestamp < copy->timestamp) { 634 copy->next = prev->next; 635 prev->next = copy; 636 return 0; 637 } 638 prev = prev->next; 639 } 640 /* insert at the end of the list */ 641 prev->next = copy; 642 643 return 0; 644 } 645 646 static void sort_queued_samples(void) 647 { 648 struct sample_wrapper *cursor, *next; 649 650 cursor = all_samples; 651 all_samples = NULL; 652 653 while (cursor) { 654 next = cursor->next; 655 cursor->next = all_samples; 656 all_samples = cursor; 657 cursor = next; 658 } 659 } 660 661 /* 662 * Sort the pid datastructure 663 */ 664 static void sort_pids(void) 665 { 666 struct per_pid *new_list, *p, *cursor, *prev; 667 /* sort by ppid first, then by pid, lowest to highest */ 668 669 new_list = NULL; 670 671 while (all_data) { 672 p = all_data; 673 all_data = p->next; 674 p->next = NULL; 675 676 if (new_list == NULL) { 677 new_list = p; 678 p->next = NULL; 679 continue; 680 } 681 prev = NULL; 682 cursor = new_list; 683 while (cursor) { 684 if (cursor->ppid > p->ppid || 685 (cursor->ppid == p->ppid && cursor->pid > p->pid)) { 686 /* must insert before */ 687 if (prev) { 688 p->next = prev->next; 689 prev->next = p; 690 cursor = NULL; 691 continue; 692 } else { 693 p->next = new_list; 694 new_list = p; 695 cursor = NULL; 696 continue; 697 } 698 } 699 700 prev = cursor; 701 cursor = cursor->next; 702 if (!cursor) 703 prev->next = p; 704 } 705 } 706 all_data = new_list; 707 } 708 709 710 static void draw_c_p_states(void) 711 { 712 struct power_event *pwr; 713 pwr = power_events; 714 715 /* 716 * two pass drawing so that the P state bars are on top of the C state blocks 717 */ 718 while (pwr) { 719 if (pwr->type == CSTATE) 720 svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state); 721 pwr = pwr->next; 722 } 723 724 pwr = power_events; 725 while (pwr) { 726 if (pwr->type == PSTATE) { 727 if (!pwr->state) 728 pwr->state = min_freq; 729 svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state); 730 } 731 pwr = pwr->next; 732 } 733 } 734 735 static void draw_wakeups(void) 736 { 737 struct wake_event *we; 738 struct per_pid *p; 739 struct per_pidcomm *c; 740 741 we = wake_events; 742 while (we) { 743 int from = 0, to = 0; 744 char *task_from = NULL, *task_to = NULL; 745 746 /* locate the column of the waker and wakee */ 747 p = all_data; 748 while (p) { 749 if (p->pid == we->waker || p->pid == we->wakee) { 750 c = p->all; 751 while (c) { 752 if (c->Y && c->start_time <= we->time && c->end_time >= we->time) { 753 if (p->pid == we->waker && !from) { 754 from = c->Y; 755 task_from = strdup(c->comm); 756 } 757 if (p->pid == we->wakee && !to) { 758 to = c->Y; 759 task_to = strdup(c->comm); 760 } 761 } 762 c = c->next; 763 } 764 c = p->all; 765 while (c) { 766 if (p->pid == we->waker && !from) { 767 from = c->Y; 768 task_from = strdup(c->comm); 769 } 770 if (p->pid == we->wakee && !to) { 771 to = c->Y; 772 task_to = strdup(c->comm); 773 } 774 c = c->next; 775 } 776 } 777 p = p->next; 778 } 779 780 if (!task_from) { 781 task_from = malloc(40); 782 sprintf(task_from, "[%i]", we->waker); 783 } 784 if (!task_to) { 785 task_to = malloc(40); 786 sprintf(task_to, "[%i]", we->wakee); 787 } 788 789 if (we->waker == -1) 790 svg_interrupt(we->time, to); 791 else if (from && to && abs(from - to) == 1) 792 svg_wakeline(we->time, from, to); 793 else 794 svg_partial_wakeline(we->time, from, task_from, to, task_to); 795 we = we->next; 796 797 free(task_from); 798 free(task_to); 799 } 800 } 801 802 static void draw_cpu_usage(void) 803 { 804 struct per_pid *p; 805 struct per_pidcomm *c; 806 struct cpu_sample *sample; 807 p = all_data; 808 while (p) { 809 c = p->all; 810 while (c) { 811 sample = c->samples; 812 while (sample) { 813 if (sample->type == TYPE_RUNNING) 814 svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm); 815 816 sample = sample->next; 817 } 818 c = c->next; 819 } 820 p = p->next; 821 } 822 } 823 824 static void draw_process_bars(void) 825 { 826 struct per_pid *p; 827 struct per_pidcomm *c; 828 struct cpu_sample *sample; 829 int Y = 0; 830 831 Y = 2 * numcpus + 2; 832 833 p = all_data; 834 while (p) { 835 c = p->all; 836 while (c) { 837 if (!c->display) { 838 c->Y = 0; 839 c = c->next; 840 continue; 841 } 842 843 svg_box(Y, c->start_time, c->end_time, "process"); 844 sample = c->samples; 845 while (sample) { 846 if (sample->type == TYPE_RUNNING) 847 svg_sample(Y, sample->cpu, sample->start_time, sample->end_time); 848 if (sample->type == TYPE_BLOCKED) 849 svg_box(Y, sample->start_time, sample->end_time, "blocked"); 850 if (sample->type == TYPE_WAITING) 851 svg_waiting(Y, sample->start_time, sample->end_time); 852 sample = sample->next; 853 } 854 855 if (c->comm) { 856 char comm[256]; 857 if (c->total_time > 5000000000) /* 5 seconds */ 858 sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0); 859 else 860 sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0); 861 862 svg_text(Y, c->start_time, comm); 863 } 864 c->Y = Y; 865 Y++; 866 c = c->next; 867 } 868 p = p->next; 869 } 870 } 871 872 static void add_process_filter(const char *string) 873 { 874 struct process_filter *filt; 875 int pid; 876 877 pid = strtoull(string, NULL, 10); 878 filt = malloc(sizeof(struct process_filter)); 879 if (!filt) 880 return; 881 882 filt->name = strdup(string); 883 filt->pid = pid; 884 filt->next = process_filter; 885 886 process_filter = filt; 887 } 888 889 static int passes_filter(struct per_pid *p, struct per_pidcomm *c) 890 { 891 struct process_filter *filt; 892 if (!process_filter) 893 return 1; 894 895 filt = process_filter; 896 while (filt) { 897 if (filt->pid && p->pid == filt->pid) 898 return 1; 899 if (strcmp(filt->name, c->comm) == 0) 900 return 1; 901 filt = filt->next; 902 } 903 return 0; 904 } 905 906 static int determine_display_tasks_filtered(void) 907 { 908 struct per_pid *p; 909 struct per_pidcomm *c; 910 int count = 0; 911 912 p = all_data; 913 while (p) { 914 p->display = 0; 915 if (p->start_time == 1) 916 p->start_time = first_time; 917 918 /* no exit marker, task kept running to the end */ 919 if (p->end_time == 0) 920 p->end_time = last_time; 921 922 c = p->all; 923 924 while (c) { 925 c->display = 0; 926 927 if (c->start_time == 1) 928 c->start_time = first_time; 929 930 if (passes_filter(p, c)) { 931 c->display = 1; 932 p->display = 1; 933 count++; 934 } 935 936 if (c->end_time == 0) 937 c->end_time = last_time; 938 939 c = c->next; 940 } 941 p = p->next; 942 } 943 return count; 944 } 945 946 static int determine_display_tasks(u64 threshold) 947 { 948 struct per_pid *p; 949 struct per_pidcomm *c; 950 int count = 0; 951 952 if (process_filter) 953 return determine_display_tasks_filtered(); 954 955 p = all_data; 956 while (p) { 957 p->display = 0; 958 if (p->start_time == 1) 959 p->start_time = first_time; 960 961 /* no exit marker, task kept running to the end */ 962 if (p->end_time == 0) 963 p->end_time = last_time; 964 if (p->total_time >= threshold && !power_only) 965 p->display = 1; 966 967 c = p->all; 968 969 while (c) { 970 c->display = 0; 971 972 if (c->start_time == 1) 973 c->start_time = first_time; 974 975 if (c->total_time >= threshold && !power_only) { 976 c->display = 1; 977 count++; 978 } 979 980 if (c->end_time == 0) 981 c->end_time = last_time; 982 983 c = c->next; 984 } 985 p = p->next; 986 } 987 return count; 988 } 989 990 991 992 #define TIME_THRESH 10000000 993 994 static void write_svg_file(const char *filename) 995 { 996 u64 i; 997 int count; 998 999 numcpus++; 1000 1001 1002 count = determine_display_tasks(TIME_THRESH); 1003 1004 /* We'd like to show at least 15 tasks; be less picky if we have fewer */ 1005 if (count < 15) 1006 count = determine_display_tasks(TIME_THRESH / 10); 1007 1008 open_svg(filename, numcpus, count, first_time, last_time); 1009 1010 svg_time_grid(); 1011 svg_legenda(); 1012 1013 for (i = 0; i < numcpus; i++) 1014 svg_cpu_box(i, max_freq, turbo_frequency); 1015 1016 draw_cpu_usage(); 1017 draw_process_bars(); 1018 draw_c_p_states(); 1019 draw_wakeups(); 1020 1021 svg_close(); 1022 } 1023 1024 static void process_samples(void) 1025 { 1026 struct sample_wrapper *cursor; 1027 event_t *event; 1028 1029 sort_queued_samples(); 1030 1031 cursor = all_samples; 1032 while (cursor) { 1033 event = (void *)&cursor->data; 1034 cursor = cursor->next; 1035 process_sample_event(event); 1036 } 1037 } 1038 1039 static int sample_type_check(u64 type) 1040 { 1041 sample_type = type; 1042 1043 if (!(sample_type & PERF_SAMPLE_RAW)) { 1044 fprintf(stderr, "No trace samples found in the file.\n" 1045 "Have you used 'perf timechart record' to record it?\n"); 1046 return -1; 1047 } 1048 1049 return 0; 1050 } 1051 1052 static struct perf_file_handler file_handler = { 1053 .process_comm_event = process_comm_event, 1054 .process_fork_event = process_fork_event, 1055 .process_exit_event = process_exit_event, 1056 .process_sample_event = queue_sample_event, 1057 .sample_type_check = sample_type_check, 1058 }; 1059 1060 static int __cmd_timechart(void) 1061 { 1062 struct perf_header *header; 1063 int ret; 1064 1065 register_perf_file_handler(&file_handler); 1066 1067 ret = mmap_dispatch_perf_file(&header, input_name, 0, 0, 1068 &event__cwdlen, &event__cwd); 1069 if (ret) 1070 return EXIT_FAILURE; 1071 1072 process_samples(); 1073 1074 end_sample_processing(); 1075 1076 sort_pids(); 1077 1078 write_svg_file(output_name); 1079 1080 pr_info("Written %2.1f seconds of trace to %s.\n", 1081 (last_time - first_time) / 1000000000.0, output_name); 1082 1083 return EXIT_SUCCESS; 1084 } 1085 1086 static const char * const timechart_usage[] = { 1087 "perf timechart [<options>] {record}", 1088 NULL 1089 }; 1090 1091 static const char *record_args[] = { 1092 "record", 1093 "-a", 1094 "-R", 1095 "-M", 1096 "-f", 1097 "-c", "1", 1098 "-e", "power:power_start", 1099 "-e", "power:power_end", 1100 "-e", "power:power_frequency", 1101 "-e", "sched:sched_wakeup", 1102 "-e", "sched:sched_switch", 1103 }; 1104 1105 static int __cmd_record(int argc, const char **argv) 1106 { 1107 unsigned int rec_argc, i, j; 1108 const char **rec_argv; 1109 1110 rec_argc = ARRAY_SIZE(record_args) + argc - 1; 1111 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 1112 1113 for (i = 0; i < ARRAY_SIZE(record_args); i++) 1114 rec_argv[i] = strdup(record_args[i]); 1115 1116 for (j = 1; j < (unsigned int)argc; j++, i++) 1117 rec_argv[i] = argv[j]; 1118 1119 return cmd_record(i, rec_argv, NULL); 1120 } 1121 1122 static int 1123 parse_process(const struct option *opt __used, const char *arg, int __used unset) 1124 { 1125 if (arg) 1126 add_process_filter(arg); 1127 return 0; 1128 } 1129 1130 static const struct option options[] = { 1131 OPT_STRING('i', "input", &input_name, "file", 1132 "input file name"), 1133 OPT_STRING('o', "output", &output_name, "file", 1134 "output file name"), 1135 OPT_INTEGER('w', "width", &svg_page_width, 1136 "page width"), 1137 OPT_BOOLEAN('P', "power-only", &power_only, 1138 "output power data only"), 1139 OPT_CALLBACK('p', "process", NULL, "process", 1140 "process selector. Pass a pid or process name.", 1141 parse_process), 1142 OPT_END() 1143 }; 1144 1145 1146 int cmd_timechart(int argc, const char **argv, const char *prefix __used) 1147 { 1148 symbol__init(0); 1149 1150 argc = parse_options(argc, argv, options, timechart_usage, 1151 PARSE_OPT_STOP_AT_NON_OPTION); 1152 1153 if (argc && !strncmp(argv[0], "rec", 3)) 1154 return __cmd_record(argc, argv); 1155 else if (argc) 1156 usage_with_options(timechart_usage, options); 1157 1158 setup_pager(); 1159 1160 return __cmd_timechart(); 1161 } 1162