1 /* 2 * builtin-timechart.c - make an svg timechart of system activity 3 * 4 * (C) Copyright 2009 Intel Corporation 5 * 6 * Authors: 7 * Arjan van de Ven <arjan@linux.intel.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; version 2 12 * of the License. 13 */ 14 15 #include "builtin.h" 16 17 #include "util/util.h" 18 19 #include "util/color.h" 20 #include <linux/list.h> 21 #include "util/cache.h" 22 #include <linux/rbtree.h> 23 #include "util/symbol.h" 24 #include "util/string.h" 25 #include "util/callchain.h" 26 #include "util/strlist.h" 27 28 #include "perf.h" 29 #include "util/header.h" 30 #include "util/parse-options.h" 31 #include "util/parse-events.h" 32 #include "util/svghelper.h" 33 34 static char const *input_name = "perf.data"; 35 static char const *output_name = "output.svg"; 36 37 38 static unsigned long page_size; 39 static unsigned long mmap_window = 32; 40 static u64 sample_type; 41 42 static unsigned int numcpus; 43 static u64 min_freq; /* Lowest CPU frequency seen */ 44 static u64 max_freq; /* Highest CPU frequency seen */ 45 static u64 turbo_frequency; 46 47 static u64 first_time, last_time; 48 49 static int power_only; 50 51 52 static struct perf_header *header; 53 54 struct per_pid; 55 struct per_pidcomm; 56 57 struct cpu_sample; 58 struct power_event; 59 struct wake_event; 60 61 struct sample_wrapper; 62 63 /* 64 * Datastructure layout: 65 * We keep an list of "pid"s, matching the kernels notion of a task struct. 66 * Each "pid" entry, has a list of "comm"s. 67 * this is because we want to track different programs different, while 68 * exec will reuse the original pid (by design). 69 * Each comm has a list of samples that will be used to draw 70 * final graph. 71 */ 72 73 struct per_pid { 74 struct per_pid *next; 75 76 int pid; 77 int ppid; 78 79 u64 start_time; 80 u64 end_time; 81 u64 total_time; 82 int display; 83 84 struct per_pidcomm *all; 85 struct per_pidcomm *current; 86 87 int painted; 88 }; 89 90 91 struct per_pidcomm { 92 struct per_pidcomm *next; 93 94 u64 start_time; 95 u64 end_time; 96 u64 total_time; 97 98 int Y; 99 int display; 100 101 long state; 102 u64 state_since; 103 104 char *comm; 105 106 struct cpu_sample *samples; 107 }; 108 109 struct sample_wrapper { 110 struct sample_wrapper *next; 111 112 u64 timestamp; 113 unsigned char data[0]; 114 }; 115 116 #define TYPE_NONE 0 117 #define TYPE_RUNNING 1 118 #define TYPE_WAITING 2 119 #define TYPE_BLOCKED 3 120 121 struct cpu_sample { 122 struct cpu_sample *next; 123 124 u64 start_time; 125 u64 end_time; 126 int type; 127 int cpu; 128 }; 129 130 static struct per_pid *all_data; 131 132 #define CSTATE 1 133 #define PSTATE 2 134 135 struct power_event { 136 struct power_event *next; 137 int type; 138 int state; 139 u64 start_time; 140 u64 end_time; 141 int cpu; 142 }; 143 144 struct wake_event { 145 struct wake_event *next; 146 int waker; 147 int wakee; 148 u64 time; 149 }; 150 151 static struct power_event *power_events; 152 static struct wake_event *wake_events; 153 154 struct sample_wrapper *all_samples; 155 156 static struct per_pid *find_create_pid(int pid) 157 { 158 struct per_pid *cursor = all_data; 159 160 while (cursor) { 161 if (cursor->pid == pid) 162 return cursor; 163 cursor = cursor->next; 164 } 165 cursor = malloc(sizeof(struct per_pid)); 166 assert(cursor != NULL); 167 memset(cursor, 0, sizeof(struct per_pid)); 168 cursor->pid = pid; 169 cursor->next = all_data; 170 all_data = cursor; 171 return cursor; 172 } 173 174 static void pid_set_comm(int pid, char *comm) 175 { 176 struct per_pid *p; 177 struct per_pidcomm *c; 178 p = find_create_pid(pid); 179 c = p->all; 180 while (c) { 181 if (c->comm && strcmp(c->comm, comm) == 0) { 182 p->current = c; 183 return; 184 } 185 if (!c->comm) { 186 c->comm = strdup(comm); 187 p->current = c; 188 return; 189 } 190 c = c->next; 191 } 192 c = malloc(sizeof(struct per_pidcomm)); 193 assert(c != NULL); 194 memset(c, 0, sizeof(struct per_pidcomm)); 195 c->comm = strdup(comm); 196 p->current = c; 197 c->next = p->all; 198 p->all = c; 199 } 200 201 static void pid_fork(int pid, int ppid, u64 timestamp) 202 { 203 struct per_pid *p, *pp; 204 p = find_create_pid(pid); 205 pp = find_create_pid(ppid); 206 p->ppid = ppid; 207 if (pp->current && pp->current->comm && !p->current) 208 pid_set_comm(pid, pp->current->comm); 209 210 p->start_time = timestamp; 211 if (p->current) { 212 p->current->start_time = timestamp; 213 p->current->state_since = timestamp; 214 } 215 } 216 217 static void pid_exit(int pid, u64 timestamp) 218 { 219 struct per_pid *p; 220 p = find_create_pid(pid); 221 p->end_time = timestamp; 222 if (p->current) 223 p->current->end_time = timestamp; 224 } 225 226 static void 227 pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end) 228 { 229 struct per_pid *p; 230 struct per_pidcomm *c; 231 struct cpu_sample *sample; 232 233 p = find_create_pid(pid); 234 c = p->current; 235 if (!c) { 236 c = malloc(sizeof(struct per_pidcomm)); 237 assert(c != NULL); 238 memset(c, 0, sizeof(struct per_pidcomm)); 239 p->current = c; 240 c->next = p->all; 241 p->all = c; 242 } 243 244 sample = malloc(sizeof(struct cpu_sample)); 245 assert(sample != NULL); 246 memset(sample, 0, sizeof(struct cpu_sample)); 247 sample->start_time = start; 248 sample->end_time = end; 249 sample->type = type; 250 sample->next = c->samples; 251 sample->cpu = cpu; 252 c->samples = sample; 253 254 if (sample->type == TYPE_RUNNING && end > start && start > 0) { 255 c->total_time += (end-start); 256 p->total_time += (end-start); 257 } 258 259 if (c->start_time == 0 || c->start_time > start) 260 c->start_time = start; 261 if (p->start_time == 0 || p->start_time > start) 262 p->start_time = start; 263 264 if (cpu > numcpus) 265 numcpus = cpu; 266 } 267 268 #define MAX_CPUS 4096 269 270 static u64 cpus_cstate_start_times[MAX_CPUS]; 271 static int cpus_cstate_state[MAX_CPUS]; 272 static u64 cpus_pstate_start_times[MAX_CPUS]; 273 static u64 cpus_pstate_state[MAX_CPUS]; 274 275 static int 276 process_comm_event(event_t *event) 277 { 278 pid_set_comm(event->comm.pid, event->comm.comm); 279 return 0; 280 } 281 static int 282 process_fork_event(event_t *event) 283 { 284 pid_fork(event->fork.pid, event->fork.ppid, event->fork.time); 285 return 0; 286 } 287 288 static int 289 process_exit_event(event_t *event) 290 { 291 pid_exit(event->fork.pid, event->fork.time); 292 return 0; 293 } 294 295 struct trace_entry { 296 u32 size; 297 unsigned short type; 298 unsigned char flags; 299 unsigned char preempt_count; 300 int pid; 301 int tgid; 302 }; 303 304 struct power_entry { 305 struct trace_entry te; 306 s64 type; 307 s64 value; 308 }; 309 310 #define TASK_COMM_LEN 16 311 struct wakeup_entry { 312 struct trace_entry te; 313 char comm[TASK_COMM_LEN]; 314 int pid; 315 int prio; 316 int success; 317 }; 318 319 /* 320 * trace_flag_type is an enumeration that holds different 321 * states when a trace occurs. These are: 322 * IRQS_OFF - interrupts were disabled 323 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags 324 * NEED_RESCED - reschedule is requested 325 * HARDIRQ - inside an interrupt handler 326 * SOFTIRQ - inside a softirq handler 327 */ 328 enum trace_flag_type { 329 TRACE_FLAG_IRQS_OFF = 0x01, 330 TRACE_FLAG_IRQS_NOSUPPORT = 0x02, 331 TRACE_FLAG_NEED_RESCHED = 0x04, 332 TRACE_FLAG_HARDIRQ = 0x08, 333 TRACE_FLAG_SOFTIRQ = 0x10, 334 }; 335 336 337 338 struct sched_switch { 339 struct trace_entry te; 340 char prev_comm[TASK_COMM_LEN]; 341 int prev_pid; 342 int prev_prio; 343 long prev_state; /* Arjan weeps. */ 344 char next_comm[TASK_COMM_LEN]; 345 int next_pid; 346 int next_prio; 347 }; 348 349 static void c_state_start(int cpu, u64 timestamp, int state) 350 { 351 cpus_cstate_start_times[cpu] = timestamp; 352 cpus_cstate_state[cpu] = state; 353 } 354 355 static void c_state_end(int cpu, u64 timestamp) 356 { 357 struct power_event *pwr; 358 pwr = malloc(sizeof(struct power_event)); 359 if (!pwr) 360 return; 361 memset(pwr, 0, sizeof(struct power_event)); 362 363 pwr->state = cpus_cstate_state[cpu]; 364 pwr->start_time = cpus_cstate_start_times[cpu]; 365 pwr->end_time = timestamp; 366 pwr->cpu = cpu; 367 pwr->type = CSTATE; 368 pwr->next = power_events; 369 370 power_events = pwr; 371 } 372 373 static void p_state_change(int cpu, u64 timestamp, u64 new_freq) 374 { 375 struct power_event *pwr; 376 pwr = malloc(sizeof(struct power_event)); 377 378 if (new_freq > 8000000) /* detect invalid data */ 379 return; 380 381 if (!pwr) 382 return; 383 memset(pwr, 0, sizeof(struct power_event)); 384 385 pwr->state = cpus_pstate_state[cpu]; 386 pwr->start_time = cpus_pstate_start_times[cpu]; 387 pwr->end_time = timestamp; 388 pwr->cpu = cpu; 389 pwr->type = PSTATE; 390 pwr->next = power_events; 391 392 if (!pwr->start_time) 393 pwr->start_time = first_time; 394 395 power_events = pwr; 396 397 cpus_pstate_state[cpu] = new_freq; 398 cpus_pstate_start_times[cpu] = timestamp; 399 400 if ((u64)new_freq > max_freq) 401 max_freq = new_freq; 402 403 if (new_freq < min_freq || min_freq == 0) 404 min_freq = new_freq; 405 406 if (new_freq == max_freq - 1000) 407 turbo_frequency = max_freq; 408 } 409 410 static void 411 sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te) 412 { 413 struct wake_event *we; 414 struct per_pid *p; 415 struct wakeup_entry *wake = (void *)te; 416 417 we = malloc(sizeof(struct wake_event)); 418 if (!we) 419 return; 420 421 memset(we, 0, sizeof(struct wake_event)); 422 we->time = timestamp; 423 we->waker = pid; 424 425 if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ)) 426 we->waker = -1; 427 428 we->wakee = wake->pid; 429 we->next = wake_events; 430 wake_events = we; 431 p = find_create_pid(we->wakee); 432 433 if (p && p->current && p->current->state == TYPE_NONE) { 434 p->current->state_since = timestamp; 435 p->current->state = TYPE_WAITING; 436 } 437 if (p && p->current && p->current->state == TYPE_BLOCKED) { 438 pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp); 439 p->current->state_since = timestamp; 440 p->current->state = TYPE_WAITING; 441 } 442 } 443 444 static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te) 445 { 446 struct per_pid *p = NULL, *prev_p; 447 struct sched_switch *sw = (void *)te; 448 449 450 prev_p = find_create_pid(sw->prev_pid); 451 452 p = find_create_pid(sw->next_pid); 453 454 if (prev_p->current && prev_p->current->state != TYPE_NONE) 455 pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp); 456 if (p && p->current) { 457 if (p->current->state != TYPE_NONE) 458 pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp); 459 460 p->current->state_since = timestamp; 461 p->current->state = TYPE_RUNNING; 462 } 463 464 if (prev_p->current) { 465 prev_p->current->state = TYPE_NONE; 466 prev_p->current->state_since = timestamp; 467 if (sw->prev_state & 2) 468 prev_p->current->state = TYPE_BLOCKED; 469 if (sw->prev_state == 0) 470 prev_p->current->state = TYPE_WAITING; 471 } 472 } 473 474 475 static int 476 process_sample_event(event_t *event) 477 { 478 int cursor = 0; 479 u64 addr = 0; 480 u64 stamp = 0; 481 u32 cpu = 0; 482 u32 pid = 0; 483 struct trace_entry *te; 484 485 if (sample_type & PERF_SAMPLE_IP) 486 cursor++; 487 488 if (sample_type & PERF_SAMPLE_TID) { 489 pid = event->sample.array[cursor]>>32; 490 cursor++; 491 } 492 if (sample_type & PERF_SAMPLE_TIME) { 493 stamp = event->sample.array[cursor++]; 494 495 if (!first_time || first_time > stamp) 496 first_time = stamp; 497 if (last_time < stamp) 498 last_time = stamp; 499 500 } 501 if (sample_type & PERF_SAMPLE_ADDR) 502 addr = event->sample.array[cursor++]; 503 if (sample_type & PERF_SAMPLE_ID) 504 cursor++; 505 if (sample_type & PERF_SAMPLE_STREAM_ID) 506 cursor++; 507 if (sample_type & PERF_SAMPLE_CPU) 508 cpu = event->sample.array[cursor++] & 0xFFFFFFFF; 509 if (sample_type & PERF_SAMPLE_PERIOD) 510 cursor++; 511 512 te = (void *)&event->sample.array[cursor]; 513 514 if (sample_type & PERF_SAMPLE_RAW && te->size > 0) { 515 char *event_str; 516 struct power_entry *pe; 517 518 pe = (void *)te; 519 520 event_str = perf_header__find_event(te->type); 521 522 if (!event_str) 523 return 0; 524 525 if (strcmp(event_str, "power:power_start") == 0) 526 c_state_start(cpu, stamp, pe->value); 527 528 if (strcmp(event_str, "power:power_end") == 0) 529 c_state_end(cpu, stamp); 530 531 if (strcmp(event_str, "power:power_frequency") == 0) 532 p_state_change(cpu, stamp, pe->value); 533 534 if (strcmp(event_str, "sched:sched_wakeup") == 0) 535 sched_wakeup(cpu, stamp, pid, te); 536 537 if (strcmp(event_str, "sched:sched_switch") == 0) 538 sched_switch(cpu, stamp, te); 539 } 540 return 0; 541 } 542 543 /* 544 * After the last sample we need to wrap up the current C/P state 545 * and close out each CPU for these. 546 */ 547 static void end_sample_processing(void) 548 { 549 u64 cpu; 550 struct power_event *pwr; 551 552 for (cpu = 0; cpu <= numcpus; cpu++) { 553 pwr = malloc(sizeof(struct power_event)); 554 if (!pwr) 555 return; 556 memset(pwr, 0, sizeof(struct power_event)); 557 558 /* C state */ 559 #if 0 560 pwr->state = cpus_cstate_state[cpu]; 561 pwr->start_time = cpus_cstate_start_times[cpu]; 562 pwr->end_time = last_time; 563 pwr->cpu = cpu; 564 pwr->type = CSTATE; 565 pwr->next = power_events; 566 567 power_events = pwr; 568 #endif 569 /* P state */ 570 571 pwr = malloc(sizeof(struct power_event)); 572 if (!pwr) 573 return; 574 memset(pwr, 0, sizeof(struct power_event)); 575 576 pwr->state = cpus_pstate_state[cpu]; 577 pwr->start_time = cpus_pstate_start_times[cpu]; 578 pwr->end_time = last_time; 579 pwr->cpu = cpu; 580 pwr->type = PSTATE; 581 pwr->next = power_events; 582 583 if (!pwr->start_time) 584 pwr->start_time = first_time; 585 if (!pwr->state) 586 pwr->state = min_freq; 587 power_events = pwr; 588 } 589 } 590 591 static u64 sample_time(event_t *event) 592 { 593 int cursor; 594 595 cursor = 0; 596 if (sample_type & PERF_SAMPLE_IP) 597 cursor++; 598 if (sample_type & PERF_SAMPLE_TID) 599 cursor++; 600 if (sample_type & PERF_SAMPLE_TIME) 601 return event->sample.array[cursor]; 602 return 0; 603 } 604 605 606 /* 607 * We first queue all events, sorted backwards by insertion. 608 * The order will get flipped later. 609 */ 610 static int 611 queue_sample_event(event_t *event) 612 { 613 struct sample_wrapper *copy, *prev; 614 int size; 615 616 size = event->sample.header.size + sizeof(struct sample_wrapper) + 8; 617 618 copy = malloc(size); 619 if (!copy) 620 return 1; 621 622 memset(copy, 0, size); 623 624 copy->next = NULL; 625 copy->timestamp = sample_time(event); 626 627 memcpy(©->data, event, event->sample.header.size); 628 629 /* insert in the right place in the list */ 630 631 if (!all_samples) { 632 /* first sample ever */ 633 all_samples = copy; 634 return 0; 635 } 636 637 if (all_samples->timestamp < copy->timestamp) { 638 /* insert at the head of the list */ 639 copy->next = all_samples; 640 all_samples = copy; 641 return 0; 642 } 643 644 prev = all_samples; 645 while (prev->next) { 646 if (prev->next->timestamp < copy->timestamp) { 647 copy->next = prev->next; 648 prev->next = copy; 649 return 0; 650 } 651 prev = prev->next; 652 } 653 /* insert at the end of the list */ 654 prev->next = copy; 655 656 return 0; 657 } 658 659 static void sort_queued_samples(void) 660 { 661 struct sample_wrapper *cursor, *next; 662 663 cursor = all_samples; 664 all_samples = NULL; 665 666 while (cursor) { 667 next = cursor->next; 668 cursor->next = all_samples; 669 all_samples = cursor; 670 cursor = next; 671 } 672 } 673 674 /* 675 * Sort the pid datastructure 676 */ 677 static void sort_pids(void) 678 { 679 struct per_pid *new_list, *p, *cursor, *prev; 680 /* sort by ppid first, then by pid, lowest to highest */ 681 682 new_list = NULL; 683 684 while (all_data) { 685 p = all_data; 686 all_data = p->next; 687 p->next = NULL; 688 689 if (new_list == NULL) { 690 new_list = p; 691 p->next = NULL; 692 continue; 693 } 694 prev = NULL; 695 cursor = new_list; 696 while (cursor) { 697 if (cursor->ppid > p->ppid || 698 (cursor->ppid == p->ppid && cursor->pid > p->pid)) { 699 /* must insert before */ 700 if (prev) { 701 p->next = prev->next; 702 prev->next = p; 703 cursor = NULL; 704 continue; 705 } else { 706 p->next = new_list; 707 new_list = p; 708 cursor = NULL; 709 continue; 710 } 711 } 712 713 prev = cursor; 714 cursor = cursor->next; 715 if (!cursor) 716 prev->next = p; 717 } 718 } 719 all_data = new_list; 720 } 721 722 723 static void draw_c_p_states(void) 724 { 725 struct power_event *pwr; 726 pwr = power_events; 727 728 /* 729 * two pass drawing so that the P state bars are on top of the C state blocks 730 */ 731 while (pwr) { 732 if (pwr->type == CSTATE) 733 svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state); 734 pwr = pwr->next; 735 } 736 737 pwr = power_events; 738 while (pwr) { 739 if (pwr->type == PSTATE) { 740 if (!pwr->state) 741 pwr->state = min_freq; 742 svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state); 743 } 744 pwr = pwr->next; 745 } 746 } 747 748 static void draw_wakeups(void) 749 { 750 struct wake_event *we; 751 struct per_pid *p; 752 struct per_pidcomm *c; 753 754 we = wake_events; 755 while (we) { 756 int from = 0, to = 0; 757 char *task_from = NULL, *task_to = NULL; 758 759 /* locate the column of the waker and wakee */ 760 p = all_data; 761 while (p) { 762 if (p->pid == we->waker || p->pid == we->wakee) { 763 c = p->all; 764 while (c) { 765 if (c->Y && c->start_time <= we->time && c->end_time >= we->time) { 766 if (p->pid == we->waker) { 767 from = c->Y; 768 task_from = c->comm; 769 } 770 if (p->pid == we->wakee) { 771 to = c->Y; 772 task_to = c->comm; 773 } 774 } 775 c = c->next; 776 } 777 } 778 p = p->next; 779 } 780 781 if (we->waker == -1) 782 svg_interrupt(we->time, to); 783 else if (from && to && abs(from - to) == 1) 784 svg_wakeline(we->time, from, to); 785 else 786 svg_partial_wakeline(we->time, from, task_from, to, task_to); 787 we = we->next; 788 } 789 } 790 791 static void draw_cpu_usage(void) 792 { 793 struct per_pid *p; 794 struct per_pidcomm *c; 795 struct cpu_sample *sample; 796 p = all_data; 797 while (p) { 798 c = p->all; 799 while (c) { 800 sample = c->samples; 801 while (sample) { 802 if (sample->type == TYPE_RUNNING) 803 svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm); 804 805 sample = sample->next; 806 } 807 c = c->next; 808 } 809 p = p->next; 810 } 811 } 812 813 static void draw_process_bars(void) 814 { 815 struct per_pid *p; 816 struct per_pidcomm *c; 817 struct cpu_sample *sample; 818 int Y = 0; 819 820 Y = 2 * numcpus + 2; 821 822 p = all_data; 823 while (p) { 824 c = p->all; 825 while (c) { 826 if (!c->display) { 827 c->Y = 0; 828 c = c->next; 829 continue; 830 } 831 832 svg_box(Y, c->start_time, c->end_time, "process"); 833 sample = c->samples; 834 while (sample) { 835 if (sample->type == TYPE_RUNNING) 836 svg_sample(Y, sample->cpu, sample->start_time, sample->end_time); 837 if (sample->type == TYPE_BLOCKED) 838 svg_box(Y, sample->start_time, sample->end_time, "blocked"); 839 if (sample->type == TYPE_WAITING) 840 svg_waiting(Y, sample->start_time, sample->end_time); 841 sample = sample->next; 842 } 843 844 if (c->comm) { 845 char comm[256]; 846 if (c->total_time > 5000000000) /* 5 seconds */ 847 sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0); 848 else 849 sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0); 850 851 svg_text(Y, c->start_time, comm); 852 } 853 c->Y = Y; 854 Y++; 855 c = c->next; 856 } 857 p = p->next; 858 } 859 } 860 861 static int determine_display_tasks(u64 threshold) 862 { 863 struct per_pid *p; 864 struct per_pidcomm *c; 865 int count = 0; 866 867 p = all_data; 868 while (p) { 869 p->display = 0; 870 if (p->start_time == 1) 871 p->start_time = first_time; 872 873 /* no exit marker, task kept running to the end */ 874 if (p->end_time == 0) 875 p->end_time = last_time; 876 if (p->total_time >= threshold && !power_only) 877 p->display = 1; 878 879 c = p->all; 880 881 while (c) { 882 c->display = 0; 883 884 if (c->start_time == 1) 885 c->start_time = first_time; 886 887 if (c->total_time >= threshold && !power_only) { 888 c->display = 1; 889 count++; 890 } 891 892 if (c->end_time == 0) 893 c->end_time = last_time; 894 895 c = c->next; 896 } 897 p = p->next; 898 } 899 return count; 900 } 901 902 903 904 #define TIME_THRESH 10000000 905 906 static void write_svg_file(const char *filename) 907 { 908 u64 i; 909 int count; 910 911 numcpus++; 912 913 914 count = determine_display_tasks(TIME_THRESH); 915 916 /* We'd like to show at least 15 tasks; be less picky if we have fewer */ 917 if (count < 15) 918 count = determine_display_tasks(TIME_THRESH / 10); 919 920 open_svg(filename, numcpus, count, first_time, last_time); 921 922 svg_time_grid(); 923 svg_legenda(); 924 925 for (i = 0; i < numcpus; i++) 926 svg_cpu_box(i, max_freq, turbo_frequency); 927 928 draw_cpu_usage(); 929 draw_process_bars(); 930 draw_c_p_states(); 931 draw_wakeups(); 932 933 svg_close(); 934 } 935 936 static int 937 process_event(event_t *event) 938 { 939 940 switch (event->header.type) { 941 942 case PERF_RECORD_COMM: 943 return process_comm_event(event); 944 case PERF_RECORD_FORK: 945 return process_fork_event(event); 946 case PERF_RECORD_EXIT: 947 return process_exit_event(event); 948 case PERF_RECORD_SAMPLE: 949 return queue_sample_event(event); 950 951 /* 952 * We dont process them right now but they are fine: 953 */ 954 case PERF_RECORD_MMAP: 955 case PERF_RECORD_THROTTLE: 956 case PERF_RECORD_UNTHROTTLE: 957 return 0; 958 959 default: 960 return -1; 961 } 962 963 return 0; 964 } 965 966 static void process_samples(void) 967 { 968 struct sample_wrapper *cursor; 969 event_t *event; 970 971 sort_queued_samples(); 972 973 cursor = all_samples; 974 while (cursor) { 975 event = (void *)&cursor->data; 976 cursor = cursor->next; 977 process_sample_event(event); 978 } 979 } 980 981 982 static int __cmd_timechart(void) 983 { 984 int ret, rc = EXIT_FAILURE; 985 unsigned long offset = 0; 986 unsigned long head, shift; 987 struct stat statbuf; 988 event_t *event; 989 uint32_t size; 990 char *buf; 991 int input; 992 993 input = open(input_name, O_RDONLY); 994 if (input < 0) { 995 fprintf(stderr, " failed to open file: %s", input_name); 996 if (!strcmp(input_name, "perf.data")) 997 fprintf(stderr, " (try 'perf record' first)"); 998 fprintf(stderr, "\n"); 999 exit(-1); 1000 } 1001 1002 ret = fstat(input, &statbuf); 1003 if (ret < 0) { 1004 perror("failed to stat file"); 1005 exit(-1); 1006 } 1007 1008 if (!statbuf.st_size) { 1009 fprintf(stderr, "zero-sized file, nothing to do!\n"); 1010 exit(0); 1011 } 1012 1013 header = perf_header__read(input); 1014 head = header->data_offset; 1015 1016 sample_type = perf_header__sample_type(header); 1017 1018 shift = page_size * (head / page_size); 1019 offset += shift; 1020 head -= shift; 1021 1022 remap: 1023 buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, 1024 MAP_SHARED, input, offset); 1025 if (buf == MAP_FAILED) { 1026 perror("failed to mmap file"); 1027 exit(-1); 1028 } 1029 1030 more: 1031 event = (event_t *)(buf + head); 1032 1033 size = event->header.size; 1034 if (!size) 1035 size = 8; 1036 1037 if (head + event->header.size >= page_size * mmap_window) { 1038 int ret2; 1039 1040 shift = page_size * (head / page_size); 1041 1042 ret2 = munmap(buf, page_size * mmap_window); 1043 assert(ret2 == 0); 1044 1045 offset += shift; 1046 head -= shift; 1047 goto remap; 1048 } 1049 1050 size = event->header.size; 1051 1052 if (!size || process_event(event) < 0) { 1053 1054 printf("%p [%p]: skipping unknown header type: %d\n", 1055 (void *)(offset + head), 1056 (void *)(long)(event->header.size), 1057 event->header.type); 1058 1059 /* 1060 * assume we lost track of the stream, check alignment, and 1061 * increment a single u64 in the hope to catch on again 'soon'. 1062 */ 1063 1064 if (unlikely(head & 7)) 1065 head &= ~7ULL; 1066 1067 size = 8; 1068 } 1069 1070 head += size; 1071 1072 if (offset + head >= header->data_offset + header->data_size) 1073 goto done; 1074 1075 if (offset + head < (unsigned long)statbuf.st_size) 1076 goto more; 1077 1078 done: 1079 rc = EXIT_SUCCESS; 1080 close(input); 1081 1082 1083 process_samples(); 1084 1085 end_sample_processing(); 1086 1087 sort_pids(); 1088 1089 write_svg_file(output_name); 1090 1091 printf("Written %2.1f seconds of trace to %s.\n", (last_time - first_time) / 1000000000.0, output_name); 1092 1093 return rc; 1094 } 1095 1096 static const char * const timechart_usage[] = { 1097 "perf timechart [<options>] {record}", 1098 NULL 1099 }; 1100 1101 static const char *record_args[] = { 1102 "record", 1103 "-a", 1104 "-R", 1105 "-M", 1106 "-f", 1107 "-c", "1", 1108 "-e", "power:power_start", 1109 "-e", "power:power_end", 1110 "-e", "power:power_frequency", 1111 "-e", "sched:sched_wakeup", 1112 "-e", "sched:sched_switch", 1113 }; 1114 1115 static int __cmd_record(int argc, const char **argv) 1116 { 1117 unsigned int rec_argc, i, j; 1118 const char **rec_argv; 1119 1120 rec_argc = ARRAY_SIZE(record_args) + argc - 1; 1121 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 1122 1123 for (i = 0; i < ARRAY_SIZE(record_args); i++) 1124 rec_argv[i] = strdup(record_args[i]); 1125 1126 for (j = 1; j < (unsigned int)argc; j++, i++) 1127 rec_argv[i] = argv[j]; 1128 1129 return cmd_record(i, rec_argv, NULL); 1130 } 1131 1132 static const struct option options[] = { 1133 OPT_STRING('i', "input", &input_name, "file", 1134 "input file name"), 1135 OPT_STRING('o', "output", &output_name, "file", 1136 "output file name"), 1137 OPT_INTEGER('w', "width", &svg_page_width, 1138 "page width"), 1139 OPT_BOOLEAN('p', "power-only", &power_only, 1140 "output power data only"), 1141 OPT_END() 1142 }; 1143 1144 1145 int cmd_timechart(int argc, const char **argv, const char *prefix __used) 1146 { 1147 symbol__init(); 1148 1149 page_size = getpagesize(); 1150 1151 argc = parse_options(argc, argv, options, timechart_usage, 1152 PARSE_OPT_STOP_AT_NON_OPTION); 1153 1154 if (argc && !strncmp(argv[0], "rec", 3)) 1155 return __cmd_record(argc, argv); 1156 else if (argc) 1157 usage_with_options(timechart_usage, options); 1158 1159 setup_pager(); 1160 1161 return __cmd_timechart(); 1162 } 1163