1 /* 2 * builtin-timechart.c - make an svg timechart of system activity 3 * 4 * (C) Copyright 2009 Intel Corporation 5 * 6 * Authors: 7 * Arjan van de Ven <arjan@linux.intel.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; version 2 12 * of the License. 13 */ 14 15 #include "builtin.h" 16 17 #include "util/util.h" 18 19 #include "util/color.h" 20 #include <linux/list.h> 21 #include "util/cache.h" 22 #include <linux/rbtree.h> 23 #include "util/symbol.h" 24 #include "util/callchain.h" 25 #include "util/strlist.h" 26 27 #include "perf.h" 28 #include "util/header.h" 29 #include "util/parse-options.h" 30 #include "util/parse-events.h" 31 #include "util/event.h" 32 #include "util/session.h" 33 #include "util/svghelper.h" 34 35 static char const *input_name = "perf.data"; 36 static char const *output_name = "output.svg"; 37 38 static unsigned int numcpus; 39 static u64 min_freq; /* Lowest CPU frequency seen */ 40 static u64 max_freq; /* Highest CPU frequency seen */ 41 static u64 turbo_frequency; 42 43 static u64 first_time, last_time; 44 45 static bool power_only; 46 47 48 struct per_pid; 49 struct per_pidcomm; 50 51 struct cpu_sample; 52 struct power_event; 53 struct wake_event; 54 55 struct sample_wrapper; 56 57 /* 58 * Datastructure layout: 59 * We keep an list of "pid"s, matching the kernels notion of a task struct. 60 * Each "pid" entry, has a list of "comm"s. 61 * this is because we want to track different programs different, while 62 * exec will reuse the original pid (by design). 63 * Each comm has a list of samples that will be used to draw 64 * final graph. 65 */ 66 67 struct per_pid { 68 struct per_pid *next; 69 70 int pid; 71 int ppid; 72 73 u64 start_time; 74 u64 end_time; 75 u64 total_time; 76 int display; 77 78 struct per_pidcomm *all; 79 struct per_pidcomm *current; 80 }; 81 82 83 struct per_pidcomm { 84 struct per_pidcomm *next; 85 86 u64 start_time; 87 u64 end_time; 88 u64 total_time; 89 90 int Y; 91 int display; 92 93 long state; 94 u64 state_since; 95 96 char *comm; 97 98 struct cpu_sample *samples; 99 }; 100 101 struct sample_wrapper { 102 struct sample_wrapper *next; 103 104 u64 timestamp; 105 unsigned char data[0]; 106 }; 107 108 #define TYPE_NONE 0 109 #define TYPE_RUNNING 1 110 #define TYPE_WAITING 2 111 #define TYPE_BLOCKED 3 112 113 struct cpu_sample { 114 struct cpu_sample *next; 115 116 u64 start_time; 117 u64 end_time; 118 int type; 119 int cpu; 120 }; 121 122 static struct per_pid *all_data; 123 124 #define CSTATE 1 125 #define PSTATE 2 126 127 struct power_event { 128 struct power_event *next; 129 int type; 130 int state; 131 u64 start_time; 132 u64 end_time; 133 int cpu; 134 }; 135 136 struct wake_event { 137 struct wake_event *next; 138 int waker; 139 int wakee; 140 u64 time; 141 }; 142 143 static struct power_event *power_events; 144 static struct wake_event *wake_events; 145 146 struct process_filter; 147 struct process_filter { 148 char *name; 149 int pid; 150 struct process_filter *next; 151 }; 152 153 static struct process_filter *process_filter; 154 155 156 static struct per_pid *find_create_pid(int pid) 157 { 158 struct per_pid *cursor = all_data; 159 160 while (cursor) { 161 if (cursor->pid == pid) 162 return cursor; 163 cursor = cursor->next; 164 } 165 cursor = malloc(sizeof(struct per_pid)); 166 assert(cursor != NULL); 167 memset(cursor, 0, sizeof(struct per_pid)); 168 cursor->pid = pid; 169 cursor->next = all_data; 170 all_data = cursor; 171 return cursor; 172 } 173 174 static void pid_set_comm(int pid, char *comm) 175 { 176 struct per_pid *p; 177 struct per_pidcomm *c; 178 p = find_create_pid(pid); 179 c = p->all; 180 while (c) { 181 if (c->comm && strcmp(c->comm, comm) == 0) { 182 p->current = c; 183 return; 184 } 185 if (!c->comm) { 186 c->comm = strdup(comm); 187 p->current = c; 188 return; 189 } 190 c = c->next; 191 } 192 c = malloc(sizeof(struct per_pidcomm)); 193 assert(c != NULL); 194 memset(c, 0, sizeof(struct per_pidcomm)); 195 c->comm = strdup(comm); 196 p->current = c; 197 c->next = p->all; 198 p->all = c; 199 } 200 201 static void pid_fork(int pid, int ppid, u64 timestamp) 202 { 203 struct per_pid *p, *pp; 204 p = find_create_pid(pid); 205 pp = find_create_pid(ppid); 206 p->ppid = ppid; 207 if (pp->current && pp->current->comm && !p->current) 208 pid_set_comm(pid, pp->current->comm); 209 210 p->start_time = timestamp; 211 if (p->current) { 212 p->current->start_time = timestamp; 213 p->current->state_since = timestamp; 214 } 215 } 216 217 static void pid_exit(int pid, u64 timestamp) 218 { 219 struct per_pid *p; 220 p = find_create_pid(pid); 221 p->end_time = timestamp; 222 if (p->current) 223 p->current->end_time = timestamp; 224 } 225 226 static void 227 pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end) 228 { 229 struct per_pid *p; 230 struct per_pidcomm *c; 231 struct cpu_sample *sample; 232 233 p = find_create_pid(pid); 234 c = p->current; 235 if (!c) { 236 c = malloc(sizeof(struct per_pidcomm)); 237 assert(c != NULL); 238 memset(c, 0, sizeof(struct per_pidcomm)); 239 p->current = c; 240 c->next = p->all; 241 p->all = c; 242 } 243 244 sample = malloc(sizeof(struct cpu_sample)); 245 assert(sample != NULL); 246 memset(sample, 0, sizeof(struct cpu_sample)); 247 sample->start_time = start; 248 sample->end_time = end; 249 sample->type = type; 250 sample->next = c->samples; 251 sample->cpu = cpu; 252 c->samples = sample; 253 254 if (sample->type == TYPE_RUNNING && end > start && start > 0) { 255 c->total_time += (end-start); 256 p->total_time += (end-start); 257 } 258 259 if (c->start_time == 0 || c->start_time > start) 260 c->start_time = start; 261 if (p->start_time == 0 || p->start_time > start) 262 p->start_time = start; 263 264 if (cpu > numcpus) 265 numcpus = cpu; 266 } 267 268 #define MAX_CPUS 4096 269 270 static u64 cpus_cstate_start_times[MAX_CPUS]; 271 static int cpus_cstate_state[MAX_CPUS]; 272 static u64 cpus_pstate_start_times[MAX_CPUS]; 273 static u64 cpus_pstate_state[MAX_CPUS]; 274 275 static int process_comm_event(event_t *event, struct perf_session *session __used) 276 { 277 pid_set_comm(event->comm.tid, event->comm.comm); 278 return 0; 279 } 280 281 static int process_fork_event(event_t *event, struct perf_session *session __used) 282 { 283 pid_fork(event->fork.pid, event->fork.ppid, event->fork.time); 284 return 0; 285 } 286 287 static int process_exit_event(event_t *event, struct perf_session *session __used) 288 { 289 pid_exit(event->fork.pid, event->fork.time); 290 return 0; 291 } 292 293 struct trace_entry { 294 unsigned short type; 295 unsigned char flags; 296 unsigned char preempt_count; 297 int pid; 298 int lock_depth; 299 }; 300 301 struct power_entry { 302 struct trace_entry te; 303 u64 type; 304 u64 value; 305 u64 cpu_id; 306 }; 307 308 #define TASK_COMM_LEN 16 309 struct wakeup_entry { 310 struct trace_entry te; 311 char comm[TASK_COMM_LEN]; 312 int pid; 313 int prio; 314 int success; 315 }; 316 317 /* 318 * trace_flag_type is an enumeration that holds different 319 * states when a trace occurs. These are: 320 * IRQS_OFF - interrupts were disabled 321 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags 322 * NEED_RESCED - reschedule is requested 323 * HARDIRQ - inside an interrupt handler 324 * SOFTIRQ - inside a softirq handler 325 */ 326 enum trace_flag_type { 327 TRACE_FLAG_IRQS_OFF = 0x01, 328 TRACE_FLAG_IRQS_NOSUPPORT = 0x02, 329 TRACE_FLAG_NEED_RESCHED = 0x04, 330 TRACE_FLAG_HARDIRQ = 0x08, 331 TRACE_FLAG_SOFTIRQ = 0x10, 332 }; 333 334 335 336 struct sched_switch { 337 struct trace_entry te; 338 char prev_comm[TASK_COMM_LEN]; 339 int prev_pid; 340 int prev_prio; 341 long prev_state; /* Arjan weeps. */ 342 char next_comm[TASK_COMM_LEN]; 343 int next_pid; 344 int next_prio; 345 }; 346 347 static void c_state_start(int cpu, u64 timestamp, int state) 348 { 349 cpus_cstate_start_times[cpu] = timestamp; 350 cpus_cstate_state[cpu] = state; 351 } 352 353 static void c_state_end(int cpu, u64 timestamp) 354 { 355 struct power_event *pwr; 356 pwr = malloc(sizeof(struct power_event)); 357 if (!pwr) 358 return; 359 memset(pwr, 0, sizeof(struct power_event)); 360 361 pwr->state = cpus_cstate_state[cpu]; 362 pwr->start_time = cpus_cstate_start_times[cpu]; 363 pwr->end_time = timestamp; 364 pwr->cpu = cpu; 365 pwr->type = CSTATE; 366 pwr->next = power_events; 367 368 power_events = pwr; 369 } 370 371 static void p_state_change(int cpu, u64 timestamp, u64 new_freq) 372 { 373 struct power_event *pwr; 374 pwr = malloc(sizeof(struct power_event)); 375 376 if (new_freq > 8000000) /* detect invalid data */ 377 return; 378 379 if (!pwr) 380 return; 381 memset(pwr, 0, sizeof(struct power_event)); 382 383 pwr->state = cpus_pstate_state[cpu]; 384 pwr->start_time = cpus_pstate_start_times[cpu]; 385 pwr->end_time = timestamp; 386 pwr->cpu = cpu; 387 pwr->type = PSTATE; 388 pwr->next = power_events; 389 390 if (!pwr->start_time) 391 pwr->start_time = first_time; 392 393 power_events = pwr; 394 395 cpus_pstate_state[cpu] = new_freq; 396 cpus_pstate_start_times[cpu] = timestamp; 397 398 if ((u64)new_freq > max_freq) 399 max_freq = new_freq; 400 401 if (new_freq < min_freq || min_freq == 0) 402 min_freq = new_freq; 403 404 if (new_freq == max_freq - 1000) 405 turbo_frequency = max_freq; 406 } 407 408 static void 409 sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te) 410 { 411 struct wake_event *we; 412 struct per_pid *p; 413 struct wakeup_entry *wake = (void *)te; 414 415 we = malloc(sizeof(struct wake_event)); 416 if (!we) 417 return; 418 419 memset(we, 0, sizeof(struct wake_event)); 420 we->time = timestamp; 421 we->waker = pid; 422 423 if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ)) 424 we->waker = -1; 425 426 we->wakee = wake->pid; 427 we->next = wake_events; 428 wake_events = we; 429 p = find_create_pid(we->wakee); 430 431 if (p && p->current && p->current->state == TYPE_NONE) { 432 p->current->state_since = timestamp; 433 p->current->state = TYPE_WAITING; 434 } 435 if (p && p->current && p->current->state == TYPE_BLOCKED) { 436 pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp); 437 p->current->state_since = timestamp; 438 p->current->state = TYPE_WAITING; 439 } 440 } 441 442 static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te) 443 { 444 struct per_pid *p = NULL, *prev_p; 445 struct sched_switch *sw = (void *)te; 446 447 448 prev_p = find_create_pid(sw->prev_pid); 449 450 p = find_create_pid(sw->next_pid); 451 452 if (prev_p->current && prev_p->current->state != TYPE_NONE) 453 pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp); 454 if (p && p->current) { 455 if (p->current->state != TYPE_NONE) 456 pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp); 457 458 p->current->state_since = timestamp; 459 p->current->state = TYPE_RUNNING; 460 } 461 462 if (prev_p->current) { 463 prev_p->current->state = TYPE_NONE; 464 prev_p->current->state_since = timestamp; 465 if (sw->prev_state & 2) 466 prev_p->current->state = TYPE_BLOCKED; 467 if (sw->prev_state == 0) 468 prev_p->current->state = TYPE_WAITING; 469 } 470 } 471 472 473 static int process_sample_event(event_t *event, struct perf_session *session) 474 { 475 struct sample_data data; 476 struct trace_entry *te; 477 478 memset(&data, 0, sizeof(data)); 479 480 event__parse_sample(event, session->sample_type, &data); 481 482 if (session->sample_type & PERF_SAMPLE_TIME) { 483 if (!first_time || first_time > data.time) 484 first_time = data.time; 485 if (last_time < data.time) 486 last_time = data.time; 487 } 488 489 te = (void *)data.raw_data; 490 if (session->sample_type & PERF_SAMPLE_RAW && data.raw_size > 0) { 491 char *event_str; 492 struct power_entry *pe; 493 494 pe = (void *)te; 495 496 event_str = perf_header__find_event(te->type); 497 498 if (!event_str) 499 return 0; 500 501 if (strcmp(event_str, "power:power_start") == 0) 502 c_state_start(pe->cpu_id, data.time, pe->value); 503 504 if (strcmp(event_str, "power:power_end") == 0) 505 c_state_end(pe->cpu_id, data.time); 506 507 if (strcmp(event_str, "power:power_frequency") == 0) 508 p_state_change(pe->cpu_id, data.time, pe->value); 509 510 if (strcmp(event_str, "sched:sched_wakeup") == 0) 511 sched_wakeup(data.cpu, data.time, data.pid, te); 512 513 if (strcmp(event_str, "sched:sched_switch") == 0) 514 sched_switch(data.cpu, data.time, te); 515 } 516 return 0; 517 } 518 519 /* 520 * After the last sample we need to wrap up the current C/P state 521 * and close out each CPU for these. 522 */ 523 static void end_sample_processing(void) 524 { 525 u64 cpu; 526 struct power_event *pwr; 527 528 for (cpu = 0; cpu <= numcpus; cpu++) { 529 pwr = malloc(sizeof(struct power_event)); 530 if (!pwr) 531 return; 532 memset(pwr, 0, sizeof(struct power_event)); 533 534 /* C state */ 535 #if 0 536 pwr->state = cpus_cstate_state[cpu]; 537 pwr->start_time = cpus_cstate_start_times[cpu]; 538 pwr->end_time = last_time; 539 pwr->cpu = cpu; 540 pwr->type = CSTATE; 541 pwr->next = power_events; 542 543 power_events = pwr; 544 #endif 545 /* P state */ 546 547 pwr = malloc(sizeof(struct power_event)); 548 if (!pwr) 549 return; 550 memset(pwr, 0, sizeof(struct power_event)); 551 552 pwr->state = cpus_pstate_state[cpu]; 553 pwr->start_time = cpus_pstate_start_times[cpu]; 554 pwr->end_time = last_time; 555 pwr->cpu = cpu; 556 pwr->type = PSTATE; 557 pwr->next = power_events; 558 559 if (!pwr->start_time) 560 pwr->start_time = first_time; 561 if (!pwr->state) 562 pwr->state = min_freq; 563 power_events = pwr; 564 } 565 } 566 567 /* 568 * Sort the pid datastructure 569 */ 570 static void sort_pids(void) 571 { 572 struct per_pid *new_list, *p, *cursor, *prev; 573 /* sort by ppid first, then by pid, lowest to highest */ 574 575 new_list = NULL; 576 577 while (all_data) { 578 p = all_data; 579 all_data = p->next; 580 p->next = NULL; 581 582 if (new_list == NULL) { 583 new_list = p; 584 p->next = NULL; 585 continue; 586 } 587 prev = NULL; 588 cursor = new_list; 589 while (cursor) { 590 if (cursor->ppid > p->ppid || 591 (cursor->ppid == p->ppid && cursor->pid > p->pid)) { 592 /* must insert before */ 593 if (prev) { 594 p->next = prev->next; 595 prev->next = p; 596 cursor = NULL; 597 continue; 598 } else { 599 p->next = new_list; 600 new_list = p; 601 cursor = NULL; 602 continue; 603 } 604 } 605 606 prev = cursor; 607 cursor = cursor->next; 608 if (!cursor) 609 prev->next = p; 610 } 611 } 612 all_data = new_list; 613 } 614 615 616 static void draw_c_p_states(void) 617 { 618 struct power_event *pwr; 619 pwr = power_events; 620 621 /* 622 * two pass drawing so that the P state bars are on top of the C state blocks 623 */ 624 while (pwr) { 625 if (pwr->type == CSTATE) 626 svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state); 627 pwr = pwr->next; 628 } 629 630 pwr = power_events; 631 while (pwr) { 632 if (pwr->type == PSTATE) { 633 if (!pwr->state) 634 pwr->state = min_freq; 635 svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state); 636 } 637 pwr = pwr->next; 638 } 639 } 640 641 static void draw_wakeups(void) 642 { 643 struct wake_event *we; 644 struct per_pid *p; 645 struct per_pidcomm *c; 646 647 we = wake_events; 648 while (we) { 649 int from = 0, to = 0; 650 char *task_from = NULL, *task_to = NULL; 651 652 /* locate the column of the waker and wakee */ 653 p = all_data; 654 while (p) { 655 if (p->pid == we->waker || p->pid == we->wakee) { 656 c = p->all; 657 while (c) { 658 if (c->Y && c->start_time <= we->time && c->end_time >= we->time) { 659 if (p->pid == we->waker && !from) { 660 from = c->Y; 661 task_from = strdup(c->comm); 662 } 663 if (p->pid == we->wakee && !to) { 664 to = c->Y; 665 task_to = strdup(c->comm); 666 } 667 } 668 c = c->next; 669 } 670 c = p->all; 671 while (c) { 672 if (p->pid == we->waker && !from) { 673 from = c->Y; 674 task_from = strdup(c->comm); 675 } 676 if (p->pid == we->wakee && !to) { 677 to = c->Y; 678 task_to = strdup(c->comm); 679 } 680 c = c->next; 681 } 682 } 683 p = p->next; 684 } 685 686 if (!task_from) { 687 task_from = malloc(40); 688 sprintf(task_from, "[%i]", we->waker); 689 } 690 if (!task_to) { 691 task_to = malloc(40); 692 sprintf(task_to, "[%i]", we->wakee); 693 } 694 695 if (we->waker == -1) 696 svg_interrupt(we->time, to); 697 else if (from && to && abs(from - to) == 1) 698 svg_wakeline(we->time, from, to); 699 else 700 svg_partial_wakeline(we->time, from, task_from, to, task_to); 701 we = we->next; 702 703 free(task_from); 704 free(task_to); 705 } 706 } 707 708 static void draw_cpu_usage(void) 709 { 710 struct per_pid *p; 711 struct per_pidcomm *c; 712 struct cpu_sample *sample; 713 p = all_data; 714 while (p) { 715 c = p->all; 716 while (c) { 717 sample = c->samples; 718 while (sample) { 719 if (sample->type == TYPE_RUNNING) 720 svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm); 721 722 sample = sample->next; 723 } 724 c = c->next; 725 } 726 p = p->next; 727 } 728 } 729 730 static void draw_process_bars(void) 731 { 732 struct per_pid *p; 733 struct per_pidcomm *c; 734 struct cpu_sample *sample; 735 int Y = 0; 736 737 Y = 2 * numcpus + 2; 738 739 p = all_data; 740 while (p) { 741 c = p->all; 742 while (c) { 743 if (!c->display) { 744 c->Y = 0; 745 c = c->next; 746 continue; 747 } 748 749 svg_box(Y, c->start_time, c->end_time, "process"); 750 sample = c->samples; 751 while (sample) { 752 if (sample->type == TYPE_RUNNING) 753 svg_sample(Y, sample->cpu, sample->start_time, sample->end_time); 754 if (sample->type == TYPE_BLOCKED) 755 svg_box(Y, sample->start_time, sample->end_time, "blocked"); 756 if (sample->type == TYPE_WAITING) 757 svg_waiting(Y, sample->start_time, sample->end_time); 758 sample = sample->next; 759 } 760 761 if (c->comm) { 762 char comm[256]; 763 if (c->total_time > 5000000000) /* 5 seconds */ 764 sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0); 765 else 766 sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0); 767 768 svg_text(Y, c->start_time, comm); 769 } 770 c->Y = Y; 771 Y++; 772 c = c->next; 773 } 774 p = p->next; 775 } 776 } 777 778 static void add_process_filter(const char *string) 779 { 780 struct process_filter *filt; 781 int pid; 782 783 pid = strtoull(string, NULL, 10); 784 filt = malloc(sizeof(struct process_filter)); 785 if (!filt) 786 return; 787 788 filt->name = strdup(string); 789 filt->pid = pid; 790 filt->next = process_filter; 791 792 process_filter = filt; 793 } 794 795 static int passes_filter(struct per_pid *p, struct per_pidcomm *c) 796 { 797 struct process_filter *filt; 798 if (!process_filter) 799 return 1; 800 801 filt = process_filter; 802 while (filt) { 803 if (filt->pid && p->pid == filt->pid) 804 return 1; 805 if (strcmp(filt->name, c->comm) == 0) 806 return 1; 807 filt = filt->next; 808 } 809 return 0; 810 } 811 812 static int determine_display_tasks_filtered(void) 813 { 814 struct per_pid *p; 815 struct per_pidcomm *c; 816 int count = 0; 817 818 p = all_data; 819 while (p) { 820 p->display = 0; 821 if (p->start_time == 1) 822 p->start_time = first_time; 823 824 /* no exit marker, task kept running to the end */ 825 if (p->end_time == 0) 826 p->end_time = last_time; 827 828 c = p->all; 829 830 while (c) { 831 c->display = 0; 832 833 if (c->start_time == 1) 834 c->start_time = first_time; 835 836 if (passes_filter(p, c)) { 837 c->display = 1; 838 p->display = 1; 839 count++; 840 } 841 842 if (c->end_time == 0) 843 c->end_time = last_time; 844 845 c = c->next; 846 } 847 p = p->next; 848 } 849 return count; 850 } 851 852 static int determine_display_tasks(u64 threshold) 853 { 854 struct per_pid *p; 855 struct per_pidcomm *c; 856 int count = 0; 857 858 if (process_filter) 859 return determine_display_tasks_filtered(); 860 861 p = all_data; 862 while (p) { 863 p->display = 0; 864 if (p->start_time == 1) 865 p->start_time = first_time; 866 867 /* no exit marker, task kept running to the end */ 868 if (p->end_time == 0) 869 p->end_time = last_time; 870 if (p->total_time >= threshold && !power_only) 871 p->display = 1; 872 873 c = p->all; 874 875 while (c) { 876 c->display = 0; 877 878 if (c->start_time == 1) 879 c->start_time = first_time; 880 881 if (c->total_time >= threshold && !power_only) { 882 c->display = 1; 883 count++; 884 } 885 886 if (c->end_time == 0) 887 c->end_time = last_time; 888 889 c = c->next; 890 } 891 p = p->next; 892 } 893 return count; 894 } 895 896 897 898 #define TIME_THRESH 10000000 899 900 static void write_svg_file(const char *filename) 901 { 902 u64 i; 903 int count; 904 905 numcpus++; 906 907 908 count = determine_display_tasks(TIME_THRESH); 909 910 /* We'd like to show at least 15 tasks; be less picky if we have fewer */ 911 if (count < 15) 912 count = determine_display_tasks(TIME_THRESH / 10); 913 914 open_svg(filename, numcpus, count, first_time, last_time); 915 916 svg_time_grid(); 917 svg_legenda(); 918 919 for (i = 0; i < numcpus; i++) 920 svg_cpu_box(i, max_freq, turbo_frequency); 921 922 draw_cpu_usage(); 923 draw_process_bars(); 924 draw_c_p_states(); 925 draw_wakeups(); 926 927 svg_close(); 928 } 929 930 static struct perf_event_ops event_ops = { 931 .comm = process_comm_event, 932 .fork = process_fork_event, 933 .exit = process_exit_event, 934 .sample = process_sample_event, 935 .ordered_samples = true, 936 }; 937 938 static int __cmd_timechart(void) 939 { 940 struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0, false); 941 int ret = -EINVAL; 942 943 if (session == NULL) 944 return -ENOMEM; 945 946 if (!perf_session__has_traces(session, "timechart record")) 947 goto out_delete; 948 949 ret = perf_session__process_events(session, &event_ops); 950 if (ret) 951 goto out_delete; 952 953 end_sample_processing(); 954 955 sort_pids(); 956 957 write_svg_file(output_name); 958 959 pr_info("Written %2.1f seconds of trace to %s.\n", 960 (last_time - first_time) / 1000000000.0, output_name); 961 out_delete: 962 perf_session__delete(session); 963 return ret; 964 } 965 966 static const char * const timechart_usage[] = { 967 "perf timechart [<options>] {record}", 968 NULL 969 }; 970 971 static const char *record_args[] = { 972 "record", 973 "-a", 974 "-R", 975 "-f", 976 "-c", "1", 977 "-e", "power:power_start", 978 "-e", "power:power_end", 979 "-e", "power:power_frequency", 980 "-e", "sched:sched_wakeup", 981 "-e", "sched:sched_switch", 982 }; 983 984 static int __cmd_record(int argc, const char **argv) 985 { 986 unsigned int rec_argc, i, j; 987 const char **rec_argv; 988 989 rec_argc = ARRAY_SIZE(record_args) + argc - 1; 990 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 991 992 for (i = 0; i < ARRAY_SIZE(record_args); i++) 993 rec_argv[i] = strdup(record_args[i]); 994 995 for (j = 1; j < (unsigned int)argc; j++, i++) 996 rec_argv[i] = argv[j]; 997 998 return cmd_record(i, rec_argv, NULL); 999 } 1000 1001 static int 1002 parse_process(const struct option *opt __used, const char *arg, int __used unset) 1003 { 1004 if (arg) 1005 add_process_filter(arg); 1006 return 0; 1007 } 1008 1009 static const struct option options[] = { 1010 OPT_STRING('i', "input", &input_name, "file", 1011 "input file name"), 1012 OPT_STRING('o', "output", &output_name, "file", 1013 "output file name"), 1014 OPT_INTEGER('w', "width", &svg_page_width, 1015 "page width"), 1016 OPT_BOOLEAN('P', "power-only", &power_only, 1017 "output power data only"), 1018 OPT_CALLBACK('p', "process", NULL, "process", 1019 "process selector. Pass a pid or process name.", 1020 parse_process), 1021 OPT_END() 1022 }; 1023 1024 1025 int cmd_timechart(int argc, const char **argv, const char *prefix __used) 1026 { 1027 argc = parse_options(argc, argv, options, timechart_usage, 1028 PARSE_OPT_STOP_AT_NON_OPTION); 1029 1030 symbol__init(); 1031 1032 if (argc && !strncmp(argv[0], "rec", 3)) 1033 return __cmd_record(argc, argv); 1034 else if (argc) 1035 usage_with_options(timechart_usage, options); 1036 1037 setup_pager(); 1038 1039 return __cmd_timechart(); 1040 } 1041