1 // SPDX-License-Identifier: GPL-2.0 2 #include "builtin.h" 3 #include "perf.h" 4 #include "perf-sys.h" 5 6 #include "util/cpumap.h" 7 #include "util/evlist.h" 8 #include "util/evsel.h" 9 #include "util/evsel_fprintf.h" 10 #include "util/mutex.h" 11 #include "util/symbol.h" 12 #include "util/thread.h" 13 #include "util/header.h" 14 #include "util/session.h" 15 #include "util/tool.h" 16 #include "util/cloexec.h" 17 #include "util/thread_map.h" 18 #include "util/color.h" 19 #include "util/stat.h" 20 #include "util/string2.h" 21 #include "util/callchain.h" 22 #include "util/time-utils.h" 23 24 #include <subcmd/pager.h> 25 #include <subcmd/parse-options.h> 26 #include "util/trace-event.h" 27 28 #include "util/debug.h" 29 #include "util/event.h" 30 #include "util/util.h" 31 32 #include <linux/kernel.h> 33 #include <linux/log2.h> 34 #include <linux/zalloc.h> 35 #include <sys/prctl.h> 36 #include <sys/resource.h> 37 #include <inttypes.h> 38 39 #include <errno.h> 40 #include <semaphore.h> 41 #include <pthread.h> 42 #include <math.h> 43 #include <api/fs/fs.h> 44 #include <perf/cpumap.h> 45 #include <linux/time64.h> 46 #include <linux/err.h> 47 48 #include <linux/ctype.h> 49 50 #define PR_SET_NAME 15 /* Set process name */ 51 #define MAX_CPUS 4096 52 #define COMM_LEN 20 53 #define SYM_LEN 129 54 #define MAX_PID 1024000 55 #define MAX_PRIO 140 56 57 static const char *cpu_list; 58 static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); 59 60 struct sched_atom; 61 62 struct task_desc { 63 unsigned long nr; 64 unsigned long pid; 65 char comm[COMM_LEN]; 66 67 unsigned long nr_events; 68 unsigned long curr_event; 69 struct sched_atom **atoms; 70 71 pthread_t thread; 72 73 sem_t ready_for_work; 74 sem_t work_done_sem; 75 76 u64 cpu_usage; 77 }; 78 79 enum sched_event_type { 80 SCHED_EVENT_RUN, 81 SCHED_EVENT_SLEEP, 82 SCHED_EVENT_WAKEUP, 83 }; 84 85 struct sched_atom { 86 enum sched_event_type type; 87 u64 timestamp; 88 u64 duration; 89 unsigned long nr; 90 sem_t *wait_sem; 91 struct task_desc *wakee; 92 }; 93 94 enum thread_state { 95 THREAD_SLEEPING = 0, 96 THREAD_WAIT_CPU, 97 THREAD_SCHED_IN, 98 THREAD_IGNORE 99 }; 100 101 struct work_atom { 102 struct list_head list; 103 enum thread_state state; 104 u64 sched_out_time; 105 u64 wake_up_time; 106 u64 sched_in_time; 107 u64 runtime; 108 }; 109 110 struct work_atoms { 111 struct list_head work_list; 112 struct thread *thread; 113 struct rb_node node; 114 u64 max_lat; 115 u64 max_lat_start; 116 u64 max_lat_end; 117 u64 total_lat; 118 u64 nb_atoms; 119 u64 total_runtime; 120 int num_merged; 121 }; 122 123 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *); 124 125 struct perf_sched; 126 127 struct trace_sched_handler { 128 int (*switch_event)(struct perf_sched *sched, struct evsel *evsel, 129 struct perf_sample *sample, struct machine *machine); 130 131 int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel, 132 struct perf_sample *sample, struct machine *machine); 133 134 int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel, 135 struct perf_sample *sample, struct machine *machine); 136 137 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */ 138 int (*fork_event)(struct perf_sched *sched, union perf_event *event, 139 struct machine *machine); 140 141 int (*migrate_task_event)(struct perf_sched *sched, 142 struct evsel *evsel, 143 struct perf_sample *sample, 144 struct machine *machine); 145 }; 146 147 #define COLOR_PIDS PERF_COLOR_BLUE 148 #define COLOR_CPUS PERF_COLOR_BG_RED 149 150 struct perf_sched_map { 151 DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS); 152 struct perf_cpu *comp_cpus; 153 bool comp; 154 struct perf_thread_map *color_pids; 155 const char *color_pids_str; 156 struct perf_cpu_map *color_cpus; 157 const char *color_cpus_str; 158 const char *task_name; 159 struct strlist *task_names; 160 bool fuzzy; 161 struct perf_cpu_map *cpus; 162 const char *cpus_str; 163 }; 164 165 struct perf_sched { 166 struct perf_tool tool; 167 const char *sort_order; 168 unsigned long nr_tasks; 169 struct task_desc **pid_to_task; 170 struct task_desc **tasks; 171 const struct trace_sched_handler *tp_handler; 172 struct mutex start_work_mutex; 173 struct mutex work_done_wait_mutex; 174 int profile_cpu; 175 /* 176 * Track the current task - that way we can know whether there's any 177 * weird events, such as a task being switched away that is not current. 178 */ 179 struct perf_cpu max_cpu; 180 u32 *curr_pid; 181 struct thread **curr_thread; 182 struct thread **curr_out_thread; 183 char next_shortname1; 184 char next_shortname2; 185 unsigned int replay_repeat; 186 unsigned long nr_run_events; 187 unsigned long nr_sleep_events; 188 unsigned long nr_wakeup_events; 189 unsigned long nr_sleep_corrections; 190 unsigned long nr_run_events_optimized; 191 unsigned long targetless_wakeups; 192 unsigned long multitarget_wakeups; 193 unsigned long nr_runs; 194 unsigned long nr_timestamps; 195 unsigned long nr_unordered_timestamps; 196 unsigned long nr_context_switch_bugs; 197 unsigned long nr_events; 198 unsigned long nr_lost_chunks; 199 unsigned long nr_lost_events; 200 u64 run_measurement_overhead; 201 u64 sleep_measurement_overhead; 202 u64 start_time; 203 u64 cpu_usage; 204 u64 runavg_cpu_usage; 205 u64 parent_cpu_usage; 206 u64 runavg_parent_cpu_usage; 207 u64 sum_runtime; 208 u64 sum_fluct; 209 u64 run_avg; 210 u64 all_runtime; 211 u64 all_count; 212 u64 *cpu_last_switched; 213 struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root; 214 struct list_head sort_list, cmp_pid; 215 bool force; 216 bool skip_merge; 217 struct perf_sched_map map; 218 219 /* options for timehist command */ 220 bool summary; 221 bool summary_only; 222 bool idle_hist; 223 bool show_callchain; 224 unsigned int max_stack; 225 bool show_cpu_visual; 226 bool show_wakeups; 227 bool show_next; 228 bool show_migrations; 229 bool pre_migrations; 230 bool show_state; 231 bool show_prio; 232 u64 skipped_samples; 233 const char *time_str; 234 struct perf_time_interval ptime; 235 struct perf_time_interval hist_time; 236 volatile bool thread_funcs_exit; 237 const char *prio_str; 238 DECLARE_BITMAP(prio_bitmap, MAX_PRIO); 239 }; 240 241 /* per thread run time data */ 242 struct thread_runtime { 243 u64 last_time; /* time of previous sched in/out event */ 244 u64 dt_run; /* run time */ 245 u64 dt_sleep; /* time between CPU access by sleep (off cpu) */ 246 u64 dt_iowait; /* time between CPU access by iowait (off cpu) */ 247 u64 dt_preempt; /* time between CPU access by preempt (off cpu) */ 248 u64 dt_delay; /* time between wakeup and sched-in */ 249 u64 dt_pre_mig; /* time between migration and wakeup */ 250 u64 ready_to_run; /* time of wakeup */ 251 u64 migrated; /* time when a thread is migrated */ 252 253 struct stats run_stats; 254 u64 total_run_time; 255 u64 total_sleep_time; 256 u64 total_iowait_time; 257 u64 total_preempt_time; 258 u64 total_delay_time; 259 u64 total_pre_mig_time; 260 261 char last_state; 262 263 char shortname[3]; 264 bool comm_changed; 265 266 u64 migrations; 267 268 int prio; 269 }; 270 271 /* per event run time data */ 272 struct evsel_runtime { 273 u64 *last_time; /* time this event was last seen per cpu */ 274 u32 ncpu; /* highest cpu slot allocated */ 275 }; 276 277 /* per cpu idle time data */ 278 struct idle_thread_runtime { 279 struct thread_runtime tr; 280 struct thread *last_thread; 281 struct rb_root_cached sorted_root; 282 struct callchain_root callchain; 283 struct callchain_cursor cursor; 284 }; 285 286 /* track idle times per cpu */ 287 static struct thread **idle_threads; 288 static int idle_max_cpu; 289 static char idle_comm[] = "<idle>"; 290 291 static u64 get_nsecs(void) 292 { 293 struct timespec ts; 294 295 clock_gettime(CLOCK_MONOTONIC, &ts); 296 297 return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec; 298 } 299 300 static void burn_nsecs(struct perf_sched *sched, u64 nsecs) 301 { 302 u64 T0 = get_nsecs(), T1; 303 304 do { 305 T1 = get_nsecs(); 306 } while (T1 + sched->run_measurement_overhead < T0 + nsecs); 307 } 308 309 static void sleep_nsecs(u64 nsecs) 310 { 311 struct timespec ts; 312 313 ts.tv_nsec = nsecs % 999999999; 314 ts.tv_sec = nsecs / 999999999; 315 316 nanosleep(&ts, NULL); 317 } 318 319 static void calibrate_run_measurement_overhead(struct perf_sched *sched) 320 { 321 u64 T0, T1, delta, min_delta = NSEC_PER_SEC; 322 int i; 323 324 for (i = 0; i < 10; i++) { 325 T0 = get_nsecs(); 326 burn_nsecs(sched, 0); 327 T1 = get_nsecs(); 328 delta = T1-T0; 329 min_delta = min(min_delta, delta); 330 } 331 sched->run_measurement_overhead = min_delta; 332 333 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta); 334 } 335 336 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched) 337 { 338 u64 T0, T1, delta, min_delta = NSEC_PER_SEC; 339 int i; 340 341 for (i = 0; i < 10; i++) { 342 T0 = get_nsecs(); 343 sleep_nsecs(10000); 344 T1 = get_nsecs(); 345 delta = T1-T0; 346 min_delta = min(min_delta, delta); 347 } 348 min_delta -= 10000; 349 sched->sleep_measurement_overhead = min_delta; 350 351 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta); 352 } 353 354 static struct sched_atom * 355 get_new_event(struct task_desc *task, u64 timestamp) 356 { 357 struct sched_atom *event = zalloc(sizeof(*event)); 358 unsigned long idx = task->nr_events; 359 size_t size; 360 361 event->timestamp = timestamp; 362 event->nr = idx; 363 364 task->nr_events++; 365 size = sizeof(struct sched_atom *) * task->nr_events; 366 task->atoms = realloc(task->atoms, size); 367 BUG_ON(!task->atoms); 368 369 task->atoms[idx] = event; 370 371 return event; 372 } 373 374 static struct sched_atom *last_event(struct task_desc *task) 375 { 376 if (!task->nr_events) 377 return NULL; 378 379 return task->atoms[task->nr_events - 1]; 380 } 381 382 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task, 383 u64 timestamp, u64 duration) 384 { 385 struct sched_atom *event, *curr_event = last_event(task); 386 387 /* 388 * optimize an existing RUN event by merging this one 389 * to it: 390 */ 391 if (curr_event && curr_event->type == SCHED_EVENT_RUN) { 392 sched->nr_run_events_optimized++; 393 curr_event->duration += duration; 394 return; 395 } 396 397 event = get_new_event(task, timestamp); 398 399 event->type = SCHED_EVENT_RUN; 400 event->duration = duration; 401 402 sched->nr_run_events++; 403 } 404 405 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task, 406 u64 timestamp, struct task_desc *wakee) 407 { 408 struct sched_atom *event, *wakee_event; 409 410 event = get_new_event(task, timestamp); 411 event->type = SCHED_EVENT_WAKEUP; 412 event->wakee = wakee; 413 414 wakee_event = last_event(wakee); 415 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) { 416 sched->targetless_wakeups++; 417 return; 418 } 419 if (wakee_event->wait_sem) { 420 sched->multitarget_wakeups++; 421 return; 422 } 423 424 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem)); 425 sem_init(wakee_event->wait_sem, 0, 0); 426 event->wait_sem = wakee_event->wait_sem; 427 428 sched->nr_wakeup_events++; 429 } 430 431 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task, 432 u64 timestamp) 433 { 434 struct sched_atom *event = get_new_event(task, timestamp); 435 436 event->type = SCHED_EVENT_SLEEP; 437 438 sched->nr_sleep_events++; 439 } 440 441 static struct task_desc *register_pid(struct perf_sched *sched, 442 unsigned long pid, const char *comm) 443 { 444 struct task_desc *task; 445 static int pid_max; 446 447 if (sched->pid_to_task == NULL) { 448 if (sysctl__read_int("kernel/pid_max", &pid_max) < 0) 449 pid_max = MAX_PID; 450 BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL); 451 } 452 if (pid >= (unsigned long)pid_max) { 453 BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) * 454 sizeof(struct task_desc *))) == NULL); 455 while (pid >= (unsigned long)pid_max) 456 sched->pid_to_task[pid_max++] = NULL; 457 } 458 459 task = sched->pid_to_task[pid]; 460 461 if (task) 462 return task; 463 464 task = zalloc(sizeof(*task)); 465 task->pid = pid; 466 task->nr = sched->nr_tasks; 467 strcpy(task->comm, comm); 468 /* 469 * every task starts in sleeping state - this gets ignored 470 * if there's no wakeup pointing to this sleep state: 471 */ 472 add_sched_event_sleep(sched, task, 0); 473 474 sched->pid_to_task[pid] = task; 475 sched->nr_tasks++; 476 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *)); 477 BUG_ON(!sched->tasks); 478 sched->tasks[task->nr] = task; 479 480 if (verbose > 0) 481 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm); 482 483 return task; 484 } 485 486 487 static void print_task_traces(struct perf_sched *sched) 488 { 489 struct task_desc *task; 490 unsigned long i; 491 492 for (i = 0; i < sched->nr_tasks; i++) { 493 task = sched->tasks[i]; 494 printf("task %6ld (%20s:%10ld), nr_events: %ld\n", 495 task->nr, task->comm, task->pid, task->nr_events); 496 } 497 } 498 499 static void add_cross_task_wakeups(struct perf_sched *sched) 500 { 501 struct task_desc *task1, *task2; 502 unsigned long i, j; 503 504 for (i = 0; i < sched->nr_tasks; i++) { 505 task1 = sched->tasks[i]; 506 j = i + 1; 507 if (j == sched->nr_tasks) 508 j = 0; 509 task2 = sched->tasks[j]; 510 add_sched_event_wakeup(sched, task1, 0, task2); 511 } 512 } 513 514 static void perf_sched__process_event(struct perf_sched *sched, 515 struct sched_atom *atom) 516 { 517 int ret = 0; 518 519 switch (atom->type) { 520 case SCHED_EVENT_RUN: 521 burn_nsecs(sched, atom->duration); 522 break; 523 case SCHED_EVENT_SLEEP: 524 if (atom->wait_sem) 525 ret = sem_wait(atom->wait_sem); 526 BUG_ON(ret); 527 break; 528 case SCHED_EVENT_WAKEUP: 529 if (atom->wait_sem) 530 ret = sem_post(atom->wait_sem); 531 BUG_ON(ret); 532 break; 533 default: 534 BUG_ON(1); 535 } 536 } 537 538 static u64 get_cpu_usage_nsec_parent(void) 539 { 540 struct rusage ru; 541 u64 sum; 542 int err; 543 544 err = getrusage(RUSAGE_SELF, &ru); 545 BUG_ON(err); 546 547 sum = ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC; 548 sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC; 549 550 return sum; 551 } 552 553 static int self_open_counters(struct perf_sched *sched, unsigned long cur_task) 554 { 555 struct perf_event_attr attr; 556 char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE]; 557 int fd; 558 struct rlimit limit; 559 bool need_privilege = false; 560 561 memset(&attr, 0, sizeof(attr)); 562 563 attr.type = PERF_TYPE_SOFTWARE; 564 attr.config = PERF_COUNT_SW_TASK_CLOCK; 565 566 force_again: 567 fd = sys_perf_event_open(&attr, 0, -1, -1, 568 perf_event_open_cloexec_flag()); 569 570 if (fd < 0) { 571 if (errno == EMFILE) { 572 if (sched->force) { 573 BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1); 574 limit.rlim_cur += sched->nr_tasks - cur_task; 575 if (limit.rlim_cur > limit.rlim_max) { 576 limit.rlim_max = limit.rlim_cur; 577 need_privilege = true; 578 } 579 if (setrlimit(RLIMIT_NOFILE, &limit) == -1) { 580 if (need_privilege && errno == EPERM) 581 strcpy(info, "Need privilege\n"); 582 } else 583 goto force_again; 584 } else 585 strcpy(info, "Have a try with -f option\n"); 586 } 587 pr_err("Error: sys_perf_event_open() syscall returned " 588 "with %d (%s)\n%s", fd, 589 str_error_r(errno, sbuf, sizeof(sbuf)), info); 590 exit(EXIT_FAILURE); 591 } 592 return fd; 593 } 594 595 static u64 get_cpu_usage_nsec_self(int fd) 596 { 597 u64 runtime; 598 int ret; 599 600 ret = read(fd, &runtime, sizeof(runtime)); 601 BUG_ON(ret != sizeof(runtime)); 602 603 return runtime; 604 } 605 606 struct sched_thread_parms { 607 struct task_desc *task; 608 struct perf_sched *sched; 609 int fd; 610 }; 611 612 static void *thread_func(void *ctx) 613 { 614 struct sched_thread_parms *parms = ctx; 615 struct task_desc *this_task = parms->task; 616 struct perf_sched *sched = parms->sched; 617 u64 cpu_usage_0, cpu_usage_1; 618 unsigned long i, ret; 619 char comm2[22]; 620 int fd = parms->fd; 621 622 zfree(&parms); 623 624 sprintf(comm2, ":%s", this_task->comm); 625 prctl(PR_SET_NAME, comm2); 626 if (fd < 0) 627 return NULL; 628 629 while (!sched->thread_funcs_exit) { 630 ret = sem_post(&this_task->ready_for_work); 631 BUG_ON(ret); 632 mutex_lock(&sched->start_work_mutex); 633 mutex_unlock(&sched->start_work_mutex); 634 635 cpu_usage_0 = get_cpu_usage_nsec_self(fd); 636 637 for (i = 0; i < this_task->nr_events; i++) { 638 this_task->curr_event = i; 639 perf_sched__process_event(sched, this_task->atoms[i]); 640 } 641 642 cpu_usage_1 = get_cpu_usage_nsec_self(fd); 643 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0; 644 ret = sem_post(&this_task->work_done_sem); 645 BUG_ON(ret); 646 647 mutex_lock(&sched->work_done_wait_mutex); 648 mutex_unlock(&sched->work_done_wait_mutex); 649 } 650 return NULL; 651 } 652 653 static void create_tasks(struct perf_sched *sched) 654 EXCLUSIVE_LOCK_FUNCTION(sched->start_work_mutex) 655 EXCLUSIVE_LOCK_FUNCTION(sched->work_done_wait_mutex) 656 { 657 struct task_desc *task; 658 pthread_attr_t attr; 659 unsigned long i; 660 int err; 661 662 err = pthread_attr_init(&attr); 663 BUG_ON(err); 664 err = pthread_attr_setstacksize(&attr, 665 (size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN)); 666 BUG_ON(err); 667 mutex_lock(&sched->start_work_mutex); 668 mutex_lock(&sched->work_done_wait_mutex); 669 for (i = 0; i < sched->nr_tasks; i++) { 670 struct sched_thread_parms *parms = malloc(sizeof(*parms)); 671 BUG_ON(parms == NULL); 672 parms->task = task = sched->tasks[i]; 673 parms->sched = sched; 674 parms->fd = self_open_counters(sched, i); 675 sem_init(&task->ready_for_work, 0, 0); 676 sem_init(&task->work_done_sem, 0, 0); 677 task->curr_event = 0; 678 err = pthread_create(&task->thread, &attr, thread_func, parms); 679 BUG_ON(err); 680 } 681 } 682 683 static void destroy_tasks(struct perf_sched *sched) 684 UNLOCK_FUNCTION(sched->start_work_mutex) 685 UNLOCK_FUNCTION(sched->work_done_wait_mutex) 686 { 687 struct task_desc *task; 688 unsigned long i; 689 int err; 690 691 mutex_unlock(&sched->start_work_mutex); 692 mutex_unlock(&sched->work_done_wait_mutex); 693 /* Get rid of threads so they won't be upset by mutex destrunction */ 694 for (i = 0; i < sched->nr_tasks; i++) { 695 task = sched->tasks[i]; 696 err = pthread_join(task->thread, NULL); 697 BUG_ON(err); 698 sem_destroy(&task->ready_for_work); 699 sem_destroy(&task->work_done_sem); 700 } 701 } 702 703 static void wait_for_tasks(struct perf_sched *sched) 704 EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex) 705 EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex) 706 { 707 u64 cpu_usage_0, cpu_usage_1; 708 struct task_desc *task; 709 unsigned long i, ret; 710 711 sched->start_time = get_nsecs(); 712 sched->cpu_usage = 0; 713 mutex_unlock(&sched->work_done_wait_mutex); 714 715 for (i = 0; i < sched->nr_tasks; i++) { 716 task = sched->tasks[i]; 717 ret = sem_wait(&task->ready_for_work); 718 BUG_ON(ret); 719 sem_init(&task->ready_for_work, 0, 0); 720 } 721 mutex_lock(&sched->work_done_wait_mutex); 722 723 cpu_usage_0 = get_cpu_usage_nsec_parent(); 724 725 mutex_unlock(&sched->start_work_mutex); 726 727 for (i = 0; i < sched->nr_tasks; i++) { 728 task = sched->tasks[i]; 729 ret = sem_wait(&task->work_done_sem); 730 BUG_ON(ret); 731 sem_init(&task->work_done_sem, 0, 0); 732 sched->cpu_usage += task->cpu_usage; 733 task->cpu_usage = 0; 734 } 735 736 cpu_usage_1 = get_cpu_usage_nsec_parent(); 737 if (!sched->runavg_cpu_usage) 738 sched->runavg_cpu_usage = sched->cpu_usage; 739 sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat; 740 741 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0; 742 if (!sched->runavg_parent_cpu_usage) 743 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage; 744 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) + 745 sched->parent_cpu_usage)/sched->replay_repeat; 746 747 mutex_lock(&sched->start_work_mutex); 748 749 for (i = 0; i < sched->nr_tasks; i++) { 750 task = sched->tasks[i]; 751 task->curr_event = 0; 752 } 753 } 754 755 static void run_one_test(struct perf_sched *sched) 756 EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex) 757 EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex) 758 { 759 u64 T0, T1, delta, avg_delta, fluct; 760 761 T0 = get_nsecs(); 762 wait_for_tasks(sched); 763 T1 = get_nsecs(); 764 765 delta = T1 - T0; 766 sched->sum_runtime += delta; 767 sched->nr_runs++; 768 769 avg_delta = sched->sum_runtime / sched->nr_runs; 770 if (delta < avg_delta) 771 fluct = avg_delta - delta; 772 else 773 fluct = delta - avg_delta; 774 sched->sum_fluct += fluct; 775 if (!sched->run_avg) 776 sched->run_avg = delta; 777 sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat; 778 779 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC); 780 781 printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC); 782 783 printf("cpu: %0.2f / %0.2f", 784 (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC); 785 786 #if 0 787 /* 788 * rusage statistics done by the parent, these are less 789 * accurate than the sched->sum_exec_runtime based statistics: 790 */ 791 printf(" [%0.2f / %0.2f]", 792 (double)sched->parent_cpu_usage / NSEC_PER_MSEC, 793 (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC); 794 #endif 795 796 printf("\n"); 797 798 if (sched->nr_sleep_corrections) 799 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections); 800 sched->nr_sleep_corrections = 0; 801 } 802 803 static void test_calibrations(struct perf_sched *sched) 804 { 805 u64 T0, T1; 806 807 T0 = get_nsecs(); 808 burn_nsecs(sched, NSEC_PER_MSEC); 809 T1 = get_nsecs(); 810 811 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0); 812 813 T0 = get_nsecs(); 814 sleep_nsecs(NSEC_PER_MSEC); 815 T1 = get_nsecs(); 816 817 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0); 818 } 819 820 static int 821 replay_wakeup_event(struct perf_sched *sched, 822 struct evsel *evsel, struct perf_sample *sample, 823 struct machine *machine __maybe_unused) 824 { 825 const char *comm = evsel__strval(evsel, sample, "comm"); 826 const u32 pid = evsel__intval(evsel, sample, "pid"); 827 struct task_desc *waker, *wakee; 828 829 if (verbose > 0) { 830 printf("sched_wakeup event %p\n", evsel); 831 832 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid); 833 } 834 835 waker = register_pid(sched, sample->tid, "<unknown>"); 836 wakee = register_pid(sched, pid, comm); 837 838 add_sched_event_wakeup(sched, waker, sample->time, wakee); 839 return 0; 840 } 841 842 static int replay_switch_event(struct perf_sched *sched, 843 struct evsel *evsel, 844 struct perf_sample *sample, 845 struct machine *machine __maybe_unused) 846 { 847 const char *prev_comm = evsel__strval(evsel, sample, "prev_comm"), 848 *next_comm = evsel__strval(evsel, sample, "next_comm"); 849 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"), 850 next_pid = evsel__intval(evsel, sample, "next_pid"); 851 struct task_desc *prev, __maybe_unused *next; 852 u64 timestamp0, timestamp = sample->time; 853 int cpu = sample->cpu; 854 s64 delta; 855 856 if (verbose > 0) 857 printf("sched_switch event %p\n", evsel); 858 859 if (cpu >= MAX_CPUS || cpu < 0) 860 return 0; 861 862 timestamp0 = sched->cpu_last_switched[cpu]; 863 if (timestamp0) 864 delta = timestamp - timestamp0; 865 else 866 delta = 0; 867 868 if (delta < 0) { 869 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); 870 return -1; 871 } 872 873 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n", 874 prev_comm, prev_pid, next_comm, next_pid, delta); 875 876 prev = register_pid(sched, prev_pid, prev_comm); 877 next = register_pid(sched, next_pid, next_comm); 878 879 sched->cpu_last_switched[cpu] = timestamp; 880 881 add_sched_event_run(sched, prev, timestamp, delta); 882 add_sched_event_sleep(sched, prev, timestamp); 883 884 return 0; 885 } 886 887 static int replay_fork_event(struct perf_sched *sched, 888 union perf_event *event, 889 struct machine *machine) 890 { 891 struct thread *child, *parent; 892 893 child = machine__findnew_thread(machine, event->fork.pid, 894 event->fork.tid); 895 parent = machine__findnew_thread(machine, event->fork.ppid, 896 event->fork.ptid); 897 898 if (child == NULL || parent == NULL) { 899 pr_debug("thread does not exist on fork event: child %p, parent %p\n", 900 child, parent); 901 goto out_put; 902 } 903 904 if (verbose > 0) { 905 printf("fork event\n"); 906 printf("... parent: %s/%d\n", thread__comm_str(parent), thread__tid(parent)); 907 printf("... child: %s/%d\n", thread__comm_str(child), thread__tid(child)); 908 } 909 910 register_pid(sched, thread__tid(parent), thread__comm_str(parent)); 911 register_pid(sched, thread__tid(child), thread__comm_str(child)); 912 out_put: 913 thread__put(child); 914 thread__put(parent); 915 return 0; 916 } 917 918 struct sort_dimension { 919 const char *name; 920 sort_fn_t cmp; 921 struct list_head list; 922 }; 923 924 static inline void init_prio(struct thread_runtime *r) 925 { 926 r->prio = -1; 927 } 928 929 /* 930 * handle runtime stats saved per thread 931 */ 932 static struct thread_runtime *thread__init_runtime(struct thread *thread) 933 { 934 struct thread_runtime *r; 935 936 r = zalloc(sizeof(struct thread_runtime)); 937 if (!r) 938 return NULL; 939 940 init_stats(&r->run_stats); 941 init_prio(r); 942 thread__set_priv(thread, r); 943 944 return r; 945 } 946 947 static struct thread_runtime *thread__get_runtime(struct thread *thread) 948 { 949 struct thread_runtime *tr; 950 951 tr = thread__priv(thread); 952 if (tr == NULL) { 953 tr = thread__init_runtime(thread); 954 if (tr == NULL) 955 pr_debug("Failed to malloc memory for runtime data.\n"); 956 } 957 958 return tr; 959 } 960 961 static int 962 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r) 963 { 964 struct sort_dimension *sort; 965 int ret = 0; 966 967 BUG_ON(list_empty(list)); 968 969 list_for_each_entry(sort, list, list) { 970 ret = sort->cmp(l, r); 971 if (ret) 972 return ret; 973 } 974 975 return ret; 976 } 977 978 static struct work_atoms * 979 thread_atoms_search(struct rb_root_cached *root, struct thread *thread, 980 struct list_head *sort_list) 981 { 982 struct rb_node *node = root->rb_root.rb_node; 983 struct work_atoms key = { .thread = thread }; 984 985 while (node) { 986 struct work_atoms *atoms; 987 int cmp; 988 989 atoms = container_of(node, struct work_atoms, node); 990 991 cmp = thread_lat_cmp(sort_list, &key, atoms); 992 if (cmp > 0) 993 node = node->rb_left; 994 else if (cmp < 0) 995 node = node->rb_right; 996 else { 997 BUG_ON(thread != atoms->thread); 998 return atoms; 999 } 1000 } 1001 return NULL; 1002 } 1003 1004 static void 1005 __thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data, 1006 struct list_head *sort_list) 1007 { 1008 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL; 1009 bool leftmost = true; 1010 1011 while (*new) { 1012 struct work_atoms *this; 1013 int cmp; 1014 1015 this = container_of(*new, struct work_atoms, node); 1016 parent = *new; 1017 1018 cmp = thread_lat_cmp(sort_list, data, this); 1019 1020 if (cmp > 0) 1021 new = &((*new)->rb_left); 1022 else { 1023 new = &((*new)->rb_right); 1024 leftmost = false; 1025 } 1026 } 1027 1028 rb_link_node(&data->node, parent, new); 1029 rb_insert_color_cached(&data->node, root, leftmost); 1030 } 1031 1032 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread) 1033 { 1034 struct work_atoms *atoms = zalloc(sizeof(*atoms)); 1035 if (!atoms) { 1036 pr_err("No memory at %s\n", __func__); 1037 return -1; 1038 } 1039 1040 atoms->thread = thread__get(thread); 1041 INIT_LIST_HEAD(&atoms->work_list); 1042 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid); 1043 return 0; 1044 } 1045 1046 static int 1047 add_sched_out_event(struct work_atoms *atoms, 1048 char run_state, 1049 u64 timestamp) 1050 { 1051 struct work_atom *atom = zalloc(sizeof(*atom)); 1052 if (!atom) { 1053 pr_err("Non memory at %s", __func__); 1054 return -1; 1055 } 1056 1057 atom->sched_out_time = timestamp; 1058 1059 if (run_state == 'R') { 1060 atom->state = THREAD_WAIT_CPU; 1061 atom->wake_up_time = atom->sched_out_time; 1062 } 1063 1064 list_add_tail(&atom->list, &atoms->work_list); 1065 return 0; 1066 } 1067 1068 static void 1069 add_runtime_event(struct work_atoms *atoms, u64 delta, 1070 u64 timestamp __maybe_unused) 1071 { 1072 struct work_atom *atom; 1073 1074 BUG_ON(list_empty(&atoms->work_list)); 1075 1076 atom = list_entry(atoms->work_list.prev, struct work_atom, list); 1077 1078 atom->runtime += delta; 1079 atoms->total_runtime += delta; 1080 } 1081 1082 static void 1083 add_sched_in_event(struct work_atoms *atoms, u64 timestamp) 1084 { 1085 struct work_atom *atom; 1086 u64 delta; 1087 1088 if (list_empty(&atoms->work_list)) 1089 return; 1090 1091 atom = list_entry(atoms->work_list.prev, struct work_atom, list); 1092 1093 if (atom->state != THREAD_WAIT_CPU) 1094 return; 1095 1096 if (timestamp < atom->wake_up_time) { 1097 atom->state = THREAD_IGNORE; 1098 return; 1099 } 1100 1101 atom->state = THREAD_SCHED_IN; 1102 atom->sched_in_time = timestamp; 1103 1104 delta = atom->sched_in_time - atom->wake_up_time; 1105 atoms->total_lat += delta; 1106 if (delta > atoms->max_lat) { 1107 atoms->max_lat = delta; 1108 atoms->max_lat_start = atom->wake_up_time; 1109 atoms->max_lat_end = timestamp; 1110 } 1111 atoms->nb_atoms++; 1112 } 1113 1114 static int latency_switch_event(struct perf_sched *sched, 1115 struct evsel *evsel, 1116 struct perf_sample *sample, 1117 struct machine *machine) 1118 { 1119 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"), 1120 next_pid = evsel__intval(evsel, sample, "next_pid"); 1121 const char prev_state = evsel__taskstate(evsel, sample, "prev_state"); 1122 struct work_atoms *out_events, *in_events; 1123 struct thread *sched_out, *sched_in; 1124 u64 timestamp0, timestamp = sample->time; 1125 int cpu = sample->cpu, err = -1; 1126 s64 delta; 1127 1128 BUG_ON(cpu >= MAX_CPUS || cpu < 0); 1129 1130 timestamp0 = sched->cpu_last_switched[cpu]; 1131 sched->cpu_last_switched[cpu] = timestamp; 1132 if (timestamp0) 1133 delta = timestamp - timestamp0; 1134 else 1135 delta = 0; 1136 1137 if (delta < 0) { 1138 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); 1139 return -1; 1140 } 1141 1142 sched_out = machine__findnew_thread(machine, -1, prev_pid); 1143 sched_in = machine__findnew_thread(machine, -1, next_pid); 1144 if (sched_out == NULL || sched_in == NULL) 1145 goto out_put; 1146 1147 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); 1148 if (!out_events) { 1149 if (thread_atoms_insert(sched, sched_out)) 1150 goto out_put; 1151 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); 1152 if (!out_events) { 1153 pr_err("out-event: Internal tree error"); 1154 goto out_put; 1155 } 1156 } 1157 if (add_sched_out_event(out_events, prev_state, timestamp)) 1158 return -1; 1159 1160 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); 1161 if (!in_events) { 1162 if (thread_atoms_insert(sched, sched_in)) 1163 goto out_put; 1164 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); 1165 if (!in_events) { 1166 pr_err("in-event: Internal tree error"); 1167 goto out_put; 1168 } 1169 /* 1170 * Take came in we have not heard about yet, 1171 * add in an initial atom in runnable state: 1172 */ 1173 if (add_sched_out_event(in_events, 'R', timestamp)) 1174 goto out_put; 1175 } 1176 add_sched_in_event(in_events, timestamp); 1177 err = 0; 1178 out_put: 1179 thread__put(sched_out); 1180 thread__put(sched_in); 1181 return err; 1182 } 1183 1184 static int latency_runtime_event(struct perf_sched *sched, 1185 struct evsel *evsel, 1186 struct perf_sample *sample, 1187 struct machine *machine) 1188 { 1189 const u32 pid = evsel__intval(evsel, sample, "pid"); 1190 const u64 runtime = evsel__intval(evsel, sample, "runtime"); 1191 struct thread *thread = machine__findnew_thread(machine, -1, pid); 1192 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); 1193 u64 timestamp = sample->time; 1194 int cpu = sample->cpu, err = -1; 1195 1196 if (thread == NULL) 1197 return -1; 1198 1199 BUG_ON(cpu >= MAX_CPUS || cpu < 0); 1200 if (!atoms) { 1201 if (thread_atoms_insert(sched, thread)) 1202 goto out_put; 1203 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); 1204 if (!atoms) { 1205 pr_err("in-event: Internal tree error"); 1206 goto out_put; 1207 } 1208 if (add_sched_out_event(atoms, 'R', timestamp)) 1209 goto out_put; 1210 } 1211 1212 add_runtime_event(atoms, runtime, timestamp); 1213 err = 0; 1214 out_put: 1215 thread__put(thread); 1216 return err; 1217 } 1218 1219 static int latency_wakeup_event(struct perf_sched *sched, 1220 struct evsel *evsel, 1221 struct perf_sample *sample, 1222 struct machine *machine) 1223 { 1224 const u32 pid = evsel__intval(evsel, sample, "pid"); 1225 struct work_atoms *atoms; 1226 struct work_atom *atom; 1227 struct thread *wakee; 1228 u64 timestamp = sample->time; 1229 int err = -1; 1230 1231 wakee = machine__findnew_thread(machine, -1, pid); 1232 if (wakee == NULL) 1233 return -1; 1234 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); 1235 if (!atoms) { 1236 if (thread_atoms_insert(sched, wakee)) 1237 goto out_put; 1238 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); 1239 if (!atoms) { 1240 pr_err("wakeup-event: Internal tree error"); 1241 goto out_put; 1242 } 1243 if (add_sched_out_event(atoms, 'S', timestamp)) 1244 goto out_put; 1245 } 1246 1247 BUG_ON(list_empty(&atoms->work_list)); 1248 1249 atom = list_entry(atoms->work_list.prev, struct work_atom, list); 1250 1251 /* 1252 * As we do not guarantee the wakeup event happens when 1253 * task is out of run queue, also may happen when task is 1254 * on run queue and wakeup only change ->state to TASK_RUNNING, 1255 * then we should not set the ->wake_up_time when wake up a 1256 * task which is on run queue. 1257 * 1258 * You WILL be missing events if you've recorded only 1259 * one CPU, or are only looking at only one, so don't 1260 * skip in this case. 1261 */ 1262 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING) 1263 goto out_ok; 1264 1265 sched->nr_timestamps++; 1266 if (atom->sched_out_time > timestamp) { 1267 sched->nr_unordered_timestamps++; 1268 goto out_ok; 1269 } 1270 1271 atom->state = THREAD_WAIT_CPU; 1272 atom->wake_up_time = timestamp; 1273 out_ok: 1274 err = 0; 1275 out_put: 1276 thread__put(wakee); 1277 return err; 1278 } 1279 1280 static int latency_migrate_task_event(struct perf_sched *sched, 1281 struct evsel *evsel, 1282 struct perf_sample *sample, 1283 struct machine *machine) 1284 { 1285 const u32 pid = evsel__intval(evsel, sample, "pid"); 1286 u64 timestamp = sample->time; 1287 struct work_atoms *atoms; 1288 struct work_atom *atom; 1289 struct thread *migrant; 1290 int err = -1; 1291 1292 /* 1293 * Only need to worry about migration when profiling one CPU. 1294 */ 1295 if (sched->profile_cpu == -1) 1296 return 0; 1297 1298 migrant = machine__findnew_thread(machine, -1, pid); 1299 if (migrant == NULL) 1300 return -1; 1301 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); 1302 if (!atoms) { 1303 if (thread_atoms_insert(sched, migrant)) 1304 goto out_put; 1305 register_pid(sched, thread__tid(migrant), thread__comm_str(migrant)); 1306 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); 1307 if (!atoms) { 1308 pr_err("migration-event: Internal tree error"); 1309 goto out_put; 1310 } 1311 if (add_sched_out_event(atoms, 'R', timestamp)) 1312 goto out_put; 1313 } 1314 1315 BUG_ON(list_empty(&atoms->work_list)); 1316 1317 atom = list_entry(atoms->work_list.prev, struct work_atom, list); 1318 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp; 1319 1320 sched->nr_timestamps++; 1321 1322 if (atom->sched_out_time > timestamp) 1323 sched->nr_unordered_timestamps++; 1324 err = 0; 1325 out_put: 1326 thread__put(migrant); 1327 return err; 1328 } 1329 1330 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list) 1331 { 1332 int i; 1333 int ret; 1334 u64 avg; 1335 char max_lat_start[32], max_lat_end[32]; 1336 1337 if (!work_list->nb_atoms) 1338 return; 1339 /* 1340 * Ignore idle threads: 1341 */ 1342 if (!strcmp(thread__comm_str(work_list->thread), "swapper")) 1343 return; 1344 1345 sched->all_runtime += work_list->total_runtime; 1346 sched->all_count += work_list->nb_atoms; 1347 1348 if (work_list->num_merged > 1) { 1349 ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread), 1350 work_list->num_merged); 1351 } else { 1352 ret = printf(" %s:%d ", thread__comm_str(work_list->thread), 1353 thread__tid(work_list->thread)); 1354 } 1355 1356 for (i = 0; i < 24 - ret; i++) 1357 printf(" "); 1358 1359 avg = work_list->total_lat / work_list->nb_atoms; 1360 timestamp__scnprintf_usec(work_list->max_lat_start, max_lat_start, sizeof(max_lat_start)); 1361 timestamp__scnprintf_usec(work_list->max_lat_end, max_lat_end, sizeof(max_lat_end)); 1362 1363 printf("|%11.3f ms |%9" PRIu64 " | avg:%8.3f ms | max:%8.3f ms | max start: %12s s | max end: %12s s\n", 1364 (double)work_list->total_runtime / NSEC_PER_MSEC, 1365 work_list->nb_atoms, (double)avg / NSEC_PER_MSEC, 1366 (double)work_list->max_lat / NSEC_PER_MSEC, 1367 max_lat_start, max_lat_end); 1368 } 1369 1370 static int pid_cmp(struct work_atoms *l, struct work_atoms *r) 1371 { 1372 pid_t l_tid, r_tid; 1373 1374 if (RC_CHK_EQUAL(l->thread, r->thread)) 1375 return 0; 1376 l_tid = thread__tid(l->thread); 1377 r_tid = thread__tid(r->thread); 1378 if (l_tid < r_tid) 1379 return -1; 1380 if (l_tid > r_tid) 1381 return 1; 1382 return (int)(RC_CHK_ACCESS(l->thread) - RC_CHK_ACCESS(r->thread)); 1383 } 1384 1385 static int avg_cmp(struct work_atoms *l, struct work_atoms *r) 1386 { 1387 u64 avgl, avgr; 1388 1389 if (!l->nb_atoms) 1390 return -1; 1391 1392 if (!r->nb_atoms) 1393 return 1; 1394 1395 avgl = l->total_lat / l->nb_atoms; 1396 avgr = r->total_lat / r->nb_atoms; 1397 1398 if (avgl < avgr) 1399 return -1; 1400 if (avgl > avgr) 1401 return 1; 1402 1403 return 0; 1404 } 1405 1406 static int max_cmp(struct work_atoms *l, struct work_atoms *r) 1407 { 1408 if (l->max_lat < r->max_lat) 1409 return -1; 1410 if (l->max_lat > r->max_lat) 1411 return 1; 1412 1413 return 0; 1414 } 1415 1416 static int switch_cmp(struct work_atoms *l, struct work_atoms *r) 1417 { 1418 if (l->nb_atoms < r->nb_atoms) 1419 return -1; 1420 if (l->nb_atoms > r->nb_atoms) 1421 return 1; 1422 1423 return 0; 1424 } 1425 1426 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r) 1427 { 1428 if (l->total_runtime < r->total_runtime) 1429 return -1; 1430 if (l->total_runtime > r->total_runtime) 1431 return 1; 1432 1433 return 0; 1434 } 1435 1436 static int sort_dimension__add(const char *tok, struct list_head *list) 1437 { 1438 size_t i; 1439 static struct sort_dimension avg_sort_dimension = { 1440 .name = "avg", 1441 .cmp = avg_cmp, 1442 }; 1443 static struct sort_dimension max_sort_dimension = { 1444 .name = "max", 1445 .cmp = max_cmp, 1446 }; 1447 static struct sort_dimension pid_sort_dimension = { 1448 .name = "pid", 1449 .cmp = pid_cmp, 1450 }; 1451 static struct sort_dimension runtime_sort_dimension = { 1452 .name = "runtime", 1453 .cmp = runtime_cmp, 1454 }; 1455 static struct sort_dimension switch_sort_dimension = { 1456 .name = "switch", 1457 .cmp = switch_cmp, 1458 }; 1459 struct sort_dimension *available_sorts[] = { 1460 &pid_sort_dimension, 1461 &avg_sort_dimension, 1462 &max_sort_dimension, 1463 &switch_sort_dimension, 1464 &runtime_sort_dimension, 1465 }; 1466 1467 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) { 1468 if (!strcmp(available_sorts[i]->name, tok)) { 1469 list_add_tail(&available_sorts[i]->list, list); 1470 1471 return 0; 1472 } 1473 } 1474 1475 return -1; 1476 } 1477 1478 static void perf_sched__sort_lat(struct perf_sched *sched) 1479 { 1480 struct rb_node *node; 1481 struct rb_root_cached *root = &sched->atom_root; 1482 again: 1483 for (;;) { 1484 struct work_atoms *data; 1485 node = rb_first_cached(root); 1486 if (!node) 1487 break; 1488 1489 rb_erase_cached(node, root); 1490 data = rb_entry(node, struct work_atoms, node); 1491 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list); 1492 } 1493 if (root == &sched->atom_root) { 1494 root = &sched->merged_atom_root; 1495 goto again; 1496 } 1497 } 1498 1499 static int process_sched_wakeup_event(const struct perf_tool *tool, 1500 struct evsel *evsel, 1501 struct perf_sample *sample, 1502 struct machine *machine) 1503 { 1504 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1505 1506 if (sched->tp_handler->wakeup_event) 1507 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine); 1508 1509 return 0; 1510 } 1511 1512 static int process_sched_wakeup_ignore(const struct perf_tool *tool __maybe_unused, 1513 struct evsel *evsel __maybe_unused, 1514 struct perf_sample *sample __maybe_unused, 1515 struct machine *machine __maybe_unused) 1516 { 1517 return 0; 1518 } 1519 1520 union map_priv { 1521 void *ptr; 1522 bool color; 1523 }; 1524 1525 static bool thread__has_color(struct thread *thread) 1526 { 1527 union map_priv priv = { 1528 .ptr = thread__priv(thread), 1529 }; 1530 1531 return priv.color; 1532 } 1533 1534 static struct thread* 1535 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid) 1536 { 1537 struct thread *thread = machine__findnew_thread(machine, pid, tid); 1538 union map_priv priv = { 1539 .color = false, 1540 }; 1541 1542 if (!sched->map.color_pids || !thread || thread__priv(thread)) 1543 return thread; 1544 1545 if (thread_map__has(sched->map.color_pids, tid)) 1546 priv.color = true; 1547 1548 thread__set_priv(thread, priv.ptr); 1549 return thread; 1550 } 1551 1552 static bool sched_match_task(struct perf_sched *sched, const char *comm_str) 1553 { 1554 bool fuzzy_match = sched->map.fuzzy; 1555 struct strlist *task_names = sched->map.task_names; 1556 struct str_node *node; 1557 1558 strlist__for_each_entry(node, task_names) { 1559 bool match_found = fuzzy_match ? !!strstr(comm_str, node->s) : 1560 !strcmp(comm_str, node->s); 1561 if (match_found) 1562 return true; 1563 } 1564 1565 return false; 1566 } 1567 1568 static void print_sched_map(struct perf_sched *sched, struct perf_cpu this_cpu, int cpus_nr, 1569 const char *color, bool sched_out) 1570 { 1571 for (int i = 0; i < cpus_nr; i++) { 1572 struct perf_cpu cpu = { 1573 .cpu = sched->map.comp ? sched->map.comp_cpus[i].cpu : i, 1574 }; 1575 struct thread *curr_thread = sched->curr_thread[cpu.cpu]; 1576 struct thread *curr_out_thread = sched->curr_out_thread[cpu.cpu]; 1577 struct thread_runtime *curr_tr; 1578 const char *pid_color = color; 1579 const char *cpu_color = color; 1580 char symbol = ' '; 1581 struct thread *thread_to_check = sched_out ? curr_out_thread : curr_thread; 1582 1583 if (thread_to_check && thread__has_color(thread_to_check)) 1584 pid_color = COLOR_PIDS; 1585 1586 if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu)) 1587 cpu_color = COLOR_CPUS; 1588 1589 if (cpu.cpu == this_cpu.cpu) 1590 symbol = '*'; 1591 1592 color_fprintf(stdout, cpu.cpu != this_cpu.cpu ? color : cpu_color, "%c", symbol); 1593 1594 thread_to_check = sched_out ? sched->curr_out_thread[cpu.cpu] : 1595 sched->curr_thread[cpu.cpu]; 1596 1597 if (thread_to_check) { 1598 curr_tr = thread__get_runtime(thread_to_check); 1599 if (curr_tr == NULL) 1600 return; 1601 1602 if (sched_out) { 1603 if (cpu.cpu == this_cpu.cpu) 1604 color_fprintf(stdout, color, "- "); 1605 else { 1606 curr_tr = thread__get_runtime(sched->curr_thread[cpu.cpu]); 1607 if (curr_tr != NULL) 1608 color_fprintf(stdout, pid_color, "%2s ", 1609 curr_tr->shortname); 1610 } 1611 } else 1612 color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname); 1613 } else 1614 color_fprintf(stdout, color, " "); 1615 } 1616 } 1617 1618 static int map_switch_event(struct perf_sched *sched, struct evsel *evsel, 1619 struct perf_sample *sample, struct machine *machine) 1620 { 1621 const u32 next_pid = evsel__intval(evsel, sample, "next_pid"); 1622 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"); 1623 struct thread *sched_in, *sched_out; 1624 struct thread_runtime *tr; 1625 int new_shortname; 1626 u64 timestamp0, timestamp = sample->time; 1627 s64 delta; 1628 struct perf_cpu this_cpu = { 1629 .cpu = sample->cpu, 1630 }; 1631 int cpus_nr; 1632 int proceed; 1633 bool new_cpu = false; 1634 const char *color = PERF_COLOR_NORMAL; 1635 char stimestamp[32]; 1636 const char *str; 1637 1638 BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0); 1639 1640 if (this_cpu.cpu > sched->max_cpu.cpu) 1641 sched->max_cpu = this_cpu; 1642 1643 if (sched->map.comp) { 1644 cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS); 1645 if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) { 1646 sched->map.comp_cpus[cpus_nr++] = this_cpu; 1647 new_cpu = true; 1648 } 1649 } else 1650 cpus_nr = sched->max_cpu.cpu; 1651 1652 timestamp0 = sched->cpu_last_switched[this_cpu.cpu]; 1653 sched->cpu_last_switched[this_cpu.cpu] = timestamp; 1654 if (timestamp0) 1655 delta = timestamp - timestamp0; 1656 else 1657 delta = 0; 1658 1659 if (delta < 0) { 1660 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); 1661 return -1; 1662 } 1663 1664 sched_in = map__findnew_thread(sched, machine, -1, next_pid); 1665 sched_out = map__findnew_thread(sched, machine, -1, prev_pid); 1666 if (sched_in == NULL || sched_out == NULL) 1667 return -1; 1668 1669 tr = thread__get_runtime(sched_in); 1670 if (tr == NULL) { 1671 thread__put(sched_in); 1672 return -1; 1673 } 1674 1675 sched->curr_thread[this_cpu.cpu] = thread__get(sched_in); 1676 sched->curr_out_thread[this_cpu.cpu] = thread__get(sched_out); 1677 1678 str = thread__comm_str(sched_in); 1679 new_shortname = 0; 1680 if (!tr->shortname[0]) { 1681 if (!strcmp(thread__comm_str(sched_in), "swapper")) { 1682 /* 1683 * Don't allocate a letter-number for swapper:0 1684 * as a shortname. Instead, we use '.' for it. 1685 */ 1686 tr->shortname[0] = '.'; 1687 tr->shortname[1] = ' '; 1688 } else if (!sched->map.task_name || sched_match_task(sched, str)) { 1689 tr->shortname[0] = sched->next_shortname1; 1690 tr->shortname[1] = sched->next_shortname2; 1691 1692 if (sched->next_shortname1 < 'Z') { 1693 sched->next_shortname1++; 1694 } else { 1695 sched->next_shortname1 = 'A'; 1696 if (sched->next_shortname2 < '9') 1697 sched->next_shortname2++; 1698 else 1699 sched->next_shortname2 = '0'; 1700 } 1701 } else { 1702 tr->shortname[0] = '-'; 1703 tr->shortname[1] = ' '; 1704 } 1705 new_shortname = 1; 1706 } 1707 1708 if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, this_cpu)) 1709 goto out; 1710 1711 proceed = 0; 1712 str = thread__comm_str(sched_in); 1713 /* 1714 * Check which of sched_in and sched_out matches the passed --task-name 1715 * arguments and call the corresponding print_sched_map. 1716 */ 1717 if (sched->map.task_name && !sched_match_task(sched, str)) { 1718 if (!sched_match_task(sched, thread__comm_str(sched_out))) 1719 goto out; 1720 else 1721 goto sched_out; 1722 1723 } else { 1724 str = thread__comm_str(sched_out); 1725 if (!(sched->map.task_name && !sched_match_task(sched, str))) 1726 proceed = 1; 1727 } 1728 1729 printf(" "); 1730 1731 print_sched_map(sched, this_cpu, cpus_nr, color, false); 1732 1733 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp)); 1734 color_fprintf(stdout, color, " %12s secs ", stimestamp); 1735 if (new_shortname || tr->comm_changed || (verbose > 0 && thread__tid(sched_in))) { 1736 const char *pid_color = color; 1737 1738 if (thread__has_color(sched_in)) 1739 pid_color = COLOR_PIDS; 1740 1741 color_fprintf(stdout, pid_color, "%s => %s:%d", 1742 tr->shortname, thread__comm_str(sched_in), thread__tid(sched_in)); 1743 tr->comm_changed = false; 1744 } 1745 1746 if (sched->map.comp && new_cpu) 1747 color_fprintf(stdout, color, " (CPU %d)", this_cpu.cpu); 1748 1749 if (proceed != 1) { 1750 color_fprintf(stdout, color, "\n"); 1751 goto out; 1752 } 1753 1754 sched_out: 1755 if (sched->map.task_name) { 1756 tr = thread__get_runtime(sched->curr_out_thread[this_cpu.cpu]); 1757 if (strcmp(tr->shortname, "") == 0) 1758 goto out; 1759 1760 if (proceed == 1) 1761 color_fprintf(stdout, color, "\n"); 1762 1763 printf(" "); 1764 print_sched_map(sched, this_cpu, cpus_nr, color, true); 1765 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp)); 1766 color_fprintf(stdout, color, " %12s secs ", stimestamp); 1767 } 1768 1769 color_fprintf(stdout, color, "\n"); 1770 1771 out: 1772 if (sched->map.task_name) 1773 thread__put(sched_out); 1774 1775 thread__put(sched_in); 1776 1777 return 0; 1778 } 1779 1780 static int process_sched_switch_event(const struct perf_tool *tool, 1781 struct evsel *evsel, 1782 struct perf_sample *sample, 1783 struct machine *machine) 1784 { 1785 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1786 int this_cpu = sample->cpu, err = 0; 1787 u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"), 1788 next_pid = evsel__intval(evsel, sample, "next_pid"); 1789 1790 if (sched->curr_pid[this_cpu] != (u32)-1) { 1791 /* 1792 * Are we trying to switch away a PID that is 1793 * not current? 1794 */ 1795 if (sched->curr_pid[this_cpu] != prev_pid) 1796 sched->nr_context_switch_bugs++; 1797 } 1798 1799 if (sched->tp_handler->switch_event) 1800 err = sched->tp_handler->switch_event(sched, evsel, sample, machine); 1801 1802 sched->curr_pid[this_cpu] = next_pid; 1803 return err; 1804 } 1805 1806 static int process_sched_runtime_event(const struct perf_tool *tool, 1807 struct evsel *evsel, 1808 struct perf_sample *sample, 1809 struct machine *machine) 1810 { 1811 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1812 1813 if (sched->tp_handler->runtime_event) 1814 return sched->tp_handler->runtime_event(sched, evsel, sample, machine); 1815 1816 return 0; 1817 } 1818 1819 static int perf_sched__process_fork_event(const struct perf_tool *tool, 1820 union perf_event *event, 1821 struct perf_sample *sample, 1822 struct machine *machine) 1823 { 1824 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1825 1826 /* run the fork event through the perf machinery */ 1827 perf_event__process_fork(tool, event, sample, machine); 1828 1829 /* and then run additional processing needed for this command */ 1830 if (sched->tp_handler->fork_event) 1831 return sched->tp_handler->fork_event(sched, event, machine); 1832 1833 return 0; 1834 } 1835 1836 static int process_sched_migrate_task_event(const struct perf_tool *tool, 1837 struct evsel *evsel, 1838 struct perf_sample *sample, 1839 struct machine *machine) 1840 { 1841 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1842 1843 if (sched->tp_handler->migrate_task_event) 1844 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine); 1845 1846 return 0; 1847 } 1848 1849 typedef int (*tracepoint_handler)(const struct perf_tool *tool, 1850 struct evsel *evsel, 1851 struct perf_sample *sample, 1852 struct machine *machine); 1853 1854 static int perf_sched__process_tracepoint_sample(const struct perf_tool *tool __maybe_unused, 1855 union perf_event *event __maybe_unused, 1856 struct perf_sample *sample, 1857 struct evsel *evsel, 1858 struct machine *machine) 1859 { 1860 int err = 0; 1861 1862 if (evsel->handler != NULL) { 1863 tracepoint_handler f = evsel->handler; 1864 err = f(tool, evsel, sample, machine); 1865 } 1866 1867 return err; 1868 } 1869 1870 static int perf_sched__process_comm(const struct perf_tool *tool __maybe_unused, 1871 union perf_event *event, 1872 struct perf_sample *sample, 1873 struct machine *machine) 1874 { 1875 struct thread *thread; 1876 struct thread_runtime *tr; 1877 int err; 1878 1879 err = perf_event__process_comm(tool, event, sample, machine); 1880 if (err) 1881 return err; 1882 1883 thread = machine__find_thread(machine, sample->pid, sample->tid); 1884 if (!thread) { 1885 pr_err("Internal error: can't find thread\n"); 1886 return -1; 1887 } 1888 1889 tr = thread__get_runtime(thread); 1890 if (tr == NULL) { 1891 thread__put(thread); 1892 return -1; 1893 } 1894 1895 tr->comm_changed = true; 1896 thread__put(thread); 1897 1898 return 0; 1899 } 1900 1901 static int perf_sched__read_events(struct perf_sched *sched) 1902 { 1903 struct evsel_str_handler handlers[] = { 1904 { "sched:sched_switch", process_sched_switch_event, }, 1905 { "sched:sched_stat_runtime", process_sched_runtime_event, }, 1906 { "sched:sched_wakeup", process_sched_wakeup_event, }, 1907 { "sched:sched_waking", process_sched_wakeup_event, }, 1908 { "sched:sched_wakeup_new", process_sched_wakeup_event, }, 1909 { "sched:sched_migrate_task", process_sched_migrate_task_event, }, 1910 }; 1911 struct perf_session *session; 1912 struct perf_data data = { 1913 .path = input_name, 1914 .mode = PERF_DATA_MODE_READ, 1915 .force = sched->force, 1916 }; 1917 int rc = -1; 1918 1919 session = perf_session__new(&data, &sched->tool); 1920 if (IS_ERR(session)) { 1921 pr_debug("Error creating perf session"); 1922 return PTR_ERR(session); 1923 } 1924 1925 symbol__init(&session->header.env); 1926 1927 /* prefer sched_waking if it is captured */ 1928 if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking")) 1929 handlers[2].handler = process_sched_wakeup_ignore; 1930 1931 if (perf_session__set_tracepoints_handlers(session, handlers)) 1932 goto out_delete; 1933 1934 if (perf_session__has_traces(session, "record -R")) { 1935 int err = perf_session__process_events(session); 1936 if (err) { 1937 pr_err("Failed to process events, error %d", err); 1938 goto out_delete; 1939 } 1940 1941 sched->nr_events = session->evlist->stats.nr_events[0]; 1942 sched->nr_lost_events = session->evlist->stats.total_lost; 1943 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST]; 1944 } 1945 1946 rc = 0; 1947 out_delete: 1948 perf_session__delete(session); 1949 return rc; 1950 } 1951 1952 /* 1953 * scheduling times are printed as msec.usec 1954 */ 1955 static inline void print_sched_time(unsigned long long nsecs, int width) 1956 { 1957 unsigned long msecs; 1958 unsigned long usecs; 1959 1960 msecs = nsecs / NSEC_PER_MSEC; 1961 nsecs -= msecs * NSEC_PER_MSEC; 1962 usecs = nsecs / NSEC_PER_USEC; 1963 printf("%*lu.%03lu ", width, msecs, usecs); 1964 } 1965 1966 /* 1967 * returns runtime data for event, allocating memory for it the 1968 * first time it is used. 1969 */ 1970 static struct evsel_runtime *evsel__get_runtime(struct evsel *evsel) 1971 { 1972 struct evsel_runtime *r = evsel->priv; 1973 1974 if (r == NULL) { 1975 r = zalloc(sizeof(struct evsel_runtime)); 1976 evsel->priv = r; 1977 } 1978 1979 return r; 1980 } 1981 1982 /* 1983 * save last time event was seen per cpu 1984 */ 1985 static void evsel__save_time(struct evsel *evsel, u64 timestamp, u32 cpu) 1986 { 1987 struct evsel_runtime *r = evsel__get_runtime(evsel); 1988 1989 if (r == NULL) 1990 return; 1991 1992 if ((cpu >= r->ncpu) || (r->last_time == NULL)) { 1993 int i, n = __roundup_pow_of_two(cpu+1); 1994 void *p = r->last_time; 1995 1996 p = realloc(r->last_time, n * sizeof(u64)); 1997 if (!p) 1998 return; 1999 2000 r->last_time = p; 2001 for (i = r->ncpu; i < n; ++i) 2002 r->last_time[i] = (u64) 0; 2003 2004 r->ncpu = n; 2005 } 2006 2007 r->last_time[cpu] = timestamp; 2008 } 2009 2010 /* returns last time this event was seen on the given cpu */ 2011 static u64 evsel__get_time(struct evsel *evsel, u32 cpu) 2012 { 2013 struct evsel_runtime *r = evsel__get_runtime(evsel); 2014 2015 if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu)) 2016 return 0; 2017 2018 return r->last_time[cpu]; 2019 } 2020 2021 static int comm_width = 30; 2022 2023 static char *timehist_get_commstr(struct thread *thread) 2024 { 2025 static char str[32]; 2026 const char *comm = thread__comm_str(thread); 2027 pid_t tid = thread__tid(thread); 2028 pid_t pid = thread__pid(thread); 2029 int n; 2030 2031 if (pid == 0) 2032 n = scnprintf(str, sizeof(str), "%s", comm); 2033 2034 else if (tid != pid) 2035 n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid); 2036 2037 else 2038 n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid); 2039 2040 if (n > comm_width) 2041 comm_width = n; 2042 2043 return str; 2044 } 2045 2046 /* prio field format: xxx or xxx->yyy */ 2047 #define MAX_PRIO_STR_LEN 8 2048 static char *timehist_get_priostr(struct evsel *evsel, 2049 struct thread *thread, 2050 struct perf_sample *sample) 2051 { 2052 static char prio_str[16]; 2053 int prev_prio = (int)evsel__intval(evsel, sample, "prev_prio"); 2054 struct thread_runtime *tr = thread__priv(thread); 2055 2056 if (tr->prio != prev_prio && tr->prio != -1) 2057 scnprintf(prio_str, sizeof(prio_str), "%d->%d", tr->prio, prev_prio); 2058 else 2059 scnprintf(prio_str, sizeof(prio_str), "%d", prev_prio); 2060 2061 return prio_str; 2062 } 2063 2064 static void timehist_header(struct perf_sched *sched) 2065 { 2066 u32 ncpus = sched->max_cpu.cpu + 1; 2067 u32 i, j; 2068 2069 printf("%15s %6s ", "time", "cpu"); 2070 2071 if (sched->show_cpu_visual) { 2072 printf(" "); 2073 for (i = 0, j = 0; i < ncpus; ++i) { 2074 printf("%x", j++); 2075 if (j > 15) 2076 j = 0; 2077 } 2078 printf(" "); 2079 } 2080 2081 printf(" %-*s", comm_width, "task name"); 2082 2083 if (sched->show_prio) 2084 printf(" %-*s", MAX_PRIO_STR_LEN, "prio"); 2085 2086 printf(" %9s %9s %9s", "wait time", "sch delay", "run time"); 2087 2088 if (sched->pre_migrations) 2089 printf(" %9s", "pre-mig time"); 2090 2091 if (sched->show_state) 2092 printf(" %s", "state"); 2093 2094 printf("\n"); 2095 2096 /* 2097 * units row 2098 */ 2099 printf("%15s %-6s ", "", ""); 2100 2101 if (sched->show_cpu_visual) 2102 printf(" %*s ", ncpus, ""); 2103 2104 printf(" %-*s", comm_width, "[tid/pid]"); 2105 2106 if (sched->show_prio) 2107 printf(" %-*s", MAX_PRIO_STR_LEN, ""); 2108 2109 printf(" %9s %9s %9s", "(msec)", "(msec)", "(msec)"); 2110 2111 if (sched->pre_migrations) 2112 printf(" %9s", "(msec)"); 2113 2114 printf("\n"); 2115 2116 /* 2117 * separator 2118 */ 2119 printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line); 2120 2121 if (sched->show_cpu_visual) 2122 printf(" %.*s ", ncpus, graph_dotted_line); 2123 2124 printf(" %.*s", comm_width, graph_dotted_line); 2125 2126 if (sched->show_prio) 2127 printf(" %.*s", MAX_PRIO_STR_LEN, graph_dotted_line); 2128 2129 printf(" %.9s %.9s %.9s", graph_dotted_line, graph_dotted_line, graph_dotted_line); 2130 2131 if (sched->pre_migrations) 2132 printf(" %.9s", graph_dotted_line); 2133 2134 if (sched->show_state) 2135 printf(" %.5s", graph_dotted_line); 2136 2137 printf("\n"); 2138 } 2139 2140 static void timehist_print_sample(struct perf_sched *sched, 2141 struct evsel *evsel, 2142 struct perf_sample *sample, 2143 struct addr_location *al, 2144 struct thread *thread, 2145 u64 t, const char state) 2146 { 2147 struct thread_runtime *tr = thread__priv(thread); 2148 const char *next_comm = evsel__strval(evsel, sample, "next_comm"); 2149 const u32 next_pid = evsel__intval(evsel, sample, "next_pid"); 2150 u32 max_cpus = sched->max_cpu.cpu + 1; 2151 char tstr[64]; 2152 char nstr[30]; 2153 u64 wait_time; 2154 2155 if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) 2156 return; 2157 2158 timestamp__scnprintf_usec(t, tstr, sizeof(tstr)); 2159 printf("%15s [%04d] ", tstr, sample->cpu); 2160 2161 if (sched->show_cpu_visual) { 2162 u32 i; 2163 char c; 2164 2165 printf(" "); 2166 for (i = 0; i < max_cpus; ++i) { 2167 /* flag idle times with 'i'; others are sched events */ 2168 if (i == sample->cpu) 2169 c = (thread__tid(thread) == 0) ? 'i' : 's'; 2170 else 2171 c = ' '; 2172 printf("%c", c); 2173 } 2174 printf(" "); 2175 } 2176 2177 printf(" %-*s ", comm_width, timehist_get_commstr(thread)); 2178 2179 if (sched->show_prio) 2180 printf(" %-*s ", MAX_PRIO_STR_LEN, timehist_get_priostr(evsel, thread, sample)); 2181 2182 wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt; 2183 print_sched_time(wait_time, 6); 2184 2185 print_sched_time(tr->dt_delay, 6); 2186 print_sched_time(tr->dt_run, 6); 2187 if (sched->pre_migrations) 2188 print_sched_time(tr->dt_pre_mig, 6); 2189 2190 if (sched->show_state) 2191 printf(" %5c ", thread__tid(thread) == 0 ? 'I' : state); 2192 2193 if (sched->show_next) { 2194 snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid); 2195 printf(" %-*s", comm_width, nstr); 2196 } 2197 2198 if (sched->show_wakeups && !sched->show_next) 2199 printf(" %-*s", comm_width, ""); 2200 2201 if (thread__tid(thread) == 0) 2202 goto out; 2203 2204 if (sched->show_callchain) 2205 printf(" "); 2206 2207 sample__fprintf_sym(sample, al, 0, 2208 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE | 2209 EVSEL__PRINT_CALLCHAIN_ARROW | 2210 EVSEL__PRINT_SKIP_IGNORED, 2211 get_tls_callchain_cursor(), symbol_conf.bt_stop_list, stdout); 2212 2213 out: 2214 printf("\n"); 2215 } 2216 2217 /* 2218 * Explanation of delta-time stats: 2219 * 2220 * t = time of current schedule out event 2221 * tprev = time of previous sched out event 2222 * also time of schedule-in event for current task 2223 * last_time = time of last sched change event for current task 2224 * (i.e, time process was last scheduled out) 2225 * ready_to_run = time of wakeup for current task 2226 * migrated = time of task migration to another CPU 2227 * 2228 * -----|-------------|-------------|-------------|-------------|----- 2229 * last ready migrated tprev t 2230 * time to run 2231 * 2232 * |---------------- dt_wait ----------------| 2233 * |--------- dt_delay ---------|-- dt_run --| 2234 * |- dt_pre_mig -| 2235 * 2236 * dt_run = run time of current task 2237 * dt_wait = time between last schedule out event for task and tprev 2238 * represents time spent off the cpu 2239 * dt_delay = time between wakeup and schedule-in of task 2240 * dt_pre_mig = time between wakeup and migration to another CPU 2241 */ 2242 2243 static void timehist_update_runtime_stats(struct thread_runtime *r, 2244 u64 t, u64 tprev) 2245 { 2246 r->dt_delay = 0; 2247 r->dt_sleep = 0; 2248 r->dt_iowait = 0; 2249 r->dt_preempt = 0; 2250 r->dt_run = 0; 2251 r->dt_pre_mig = 0; 2252 2253 if (tprev) { 2254 r->dt_run = t - tprev; 2255 if (r->ready_to_run) { 2256 if (r->ready_to_run > tprev) 2257 pr_debug("time travel: wakeup time for task > previous sched_switch event\n"); 2258 else 2259 r->dt_delay = tprev - r->ready_to_run; 2260 2261 if ((r->migrated > r->ready_to_run) && (r->migrated < tprev)) 2262 r->dt_pre_mig = r->migrated - r->ready_to_run; 2263 } 2264 2265 if (r->last_time > tprev) 2266 pr_debug("time travel: last sched out time for task > previous sched_switch event\n"); 2267 else if (r->last_time) { 2268 u64 dt_wait = tprev - r->last_time; 2269 2270 if (r->last_state == 'R') 2271 r->dt_preempt = dt_wait; 2272 else if (r->last_state == 'D') 2273 r->dt_iowait = dt_wait; 2274 else 2275 r->dt_sleep = dt_wait; 2276 } 2277 } 2278 2279 update_stats(&r->run_stats, r->dt_run); 2280 2281 r->total_run_time += r->dt_run; 2282 r->total_delay_time += r->dt_delay; 2283 r->total_sleep_time += r->dt_sleep; 2284 r->total_iowait_time += r->dt_iowait; 2285 r->total_preempt_time += r->dt_preempt; 2286 r->total_pre_mig_time += r->dt_pre_mig; 2287 } 2288 2289 static bool is_idle_sample(struct perf_sample *sample, 2290 struct evsel *evsel) 2291 { 2292 /* pid 0 == swapper == idle task */ 2293 if (evsel__name_is(evsel, "sched:sched_switch")) 2294 return evsel__intval(evsel, sample, "prev_pid") == 0; 2295 2296 return sample->pid == 0; 2297 } 2298 2299 static void save_task_callchain(struct perf_sched *sched, 2300 struct perf_sample *sample, 2301 struct evsel *evsel, 2302 struct machine *machine) 2303 { 2304 struct callchain_cursor *cursor; 2305 struct thread *thread; 2306 2307 /* want main thread for process - has maps */ 2308 thread = machine__findnew_thread(machine, sample->pid, sample->pid); 2309 if (thread == NULL) { 2310 pr_debug("Failed to get thread for pid %d.\n", sample->pid); 2311 return; 2312 } 2313 2314 if (!sched->show_callchain || sample->callchain == NULL) 2315 return; 2316 2317 cursor = get_tls_callchain_cursor(); 2318 2319 if (thread__resolve_callchain(thread, cursor, evsel, sample, 2320 NULL, NULL, sched->max_stack + 2) != 0) { 2321 if (verbose > 0) 2322 pr_err("Failed to resolve callchain. Skipping\n"); 2323 2324 return; 2325 } 2326 2327 callchain_cursor_commit(cursor); 2328 2329 while (true) { 2330 struct callchain_cursor_node *node; 2331 struct symbol *sym; 2332 2333 node = callchain_cursor_current(cursor); 2334 if (node == NULL) 2335 break; 2336 2337 sym = node->ms.sym; 2338 if (sym) { 2339 if (!strcmp(sym->name, "schedule") || 2340 !strcmp(sym->name, "__schedule") || 2341 !strcmp(sym->name, "preempt_schedule")) 2342 sym->ignore = 1; 2343 } 2344 2345 callchain_cursor_advance(cursor); 2346 } 2347 } 2348 2349 static int init_idle_thread(struct thread *thread) 2350 { 2351 struct idle_thread_runtime *itr; 2352 2353 thread__set_comm(thread, idle_comm, 0); 2354 2355 itr = zalloc(sizeof(*itr)); 2356 if (itr == NULL) 2357 return -ENOMEM; 2358 2359 init_prio(&itr->tr); 2360 init_stats(&itr->tr.run_stats); 2361 callchain_init(&itr->callchain); 2362 callchain_cursor_reset(&itr->cursor); 2363 thread__set_priv(thread, itr); 2364 2365 return 0; 2366 } 2367 2368 /* 2369 * Track idle stats per cpu by maintaining a local thread 2370 * struct for the idle task on each cpu. 2371 */ 2372 static int init_idle_threads(int ncpu) 2373 { 2374 int i, ret; 2375 2376 idle_threads = zalloc(ncpu * sizeof(struct thread *)); 2377 if (!idle_threads) 2378 return -ENOMEM; 2379 2380 idle_max_cpu = ncpu; 2381 2382 /* allocate the actual thread struct if needed */ 2383 for (i = 0; i < ncpu; ++i) { 2384 idle_threads[i] = thread__new(0, 0); 2385 if (idle_threads[i] == NULL) 2386 return -ENOMEM; 2387 2388 ret = init_idle_thread(idle_threads[i]); 2389 if (ret < 0) 2390 return ret; 2391 } 2392 2393 return 0; 2394 } 2395 2396 static void free_idle_threads(void) 2397 { 2398 int i; 2399 2400 if (idle_threads == NULL) 2401 return; 2402 2403 for (i = 0; i < idle_max_cpu; ++i) { 2404 if ((idle_threads[i])) 2405 thread__delete(idle_threads[i]); 2406 } 2407 2408 free(idle_threads); 2409 } 2410 2411 static struct thread *get_idle_thread(int cpu) 2412 { 2413 /* 2414 * expand/allocate array of pointers to local thread 2415 * structs if needed 2416 */ 2417 if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) { 2418 int i, j = __roundup_pow_of_two(cpu+1); 2419 void *p; 2420 2421 p = realloc(idle_threads, j * sizeof(struct thread *)); 2422 if (!p) 2423 return NULL; 2424 2425 idle_threads = (struct thread **) p; 2426 for (i = idle_max_cpu; i < j; ++i) 2427 idle_threads[i] = NULL; 2428 2429 idle_max_cpu = j; 2430 } 2431 2432 /* allocate a new thread struct if needed */ 2433 if (idle_threads[cpu] == NULL) { 2434 idle_threads[cpu] = thread__new(0, 0); 2435 2436 if (idle_threads[cpu]) { 2437 if (init_idle_thread(idle_threads[cpu]) < 0) 2438 return NULL; 2439 } 2440 } 2441 2442 return idle_threads[cpu]; 2443 } 2444 2445 static void save_idle_callchain(struct perf_sched *sched, 2446 struct idle_thread_runtime *itr, 2447 struct perf_sample *sample) 2448 { 2449 struct callchain_cursor *cursor; 2450 2451 if (!sched->show_callchain || sample->callchain == NULL) 2452 return; 2453 2454 cursor = get_tls_callchain_cursor(); 2455 if (cursor == NULL) 2456 return; 2457 2458 callchain_cursor__copy(&itr->cursor, cursor); 2459 } 2460 2461 static struct thread *timehist_get_thread(struct perf_sched *sched, 2462 struct perf_sample *sample, 2463 struct machine *machine, 2464 struct evsel *evsel) 2465 { 2466 struct thread *thread; 2467 2468 if (is_idle_sample(sample, evsel)) { 2469 thread = get_idle_thread(sample->cpu); 2470 if (thread == NULL) 2471 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu); 2472 2473 } else { 2474 /* there were samples with tid 0 but non-zero pid */ 2475 thread = machine__findnew_thread(machine, sample->pid, 2476 sample->tid ?: sample->pid); 2477 if (thread == NULL) { 2478 pr_debug("Failed to get thread for tid %d. skipping sample.\n", 2479 sample->tid); 2480 } 2481 2482 save_task_callchain(sched, sample, evsel, machine); 2483 if (sched->idle_hist) { 2484 struct thread *idle; 2485 struct idle_thread_runtime *itr; 2486 2487 idle = get_idle_thread(sample->cpu); 2488 if (idle == NULL) { 2489 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu); 2490 return NULL; 2491 } 2492 2493 itr = thread__priv(idle); 2494 if (itr == NULL) 2495 return NULL; 2496 2497 itr->last_thread = thread; 2498 2499 /* copy task callchain when entering to idle */ 2500 if (evsel__intval(evsel, sample, "next_pid") == 0) 2501 save_idle_callchain(sched, itr, sample); 2502 } 2503 } 2504 2505 return thread; 2506 } 2507 2508 static bool timehist_skip_sample(struct perf_sched *sched, 2509 struct thread *thread, 2510 struct evsel *evsel, 2511 struct perf_sample *sample) 2512 { 2513 bool rc = false; 2514 int prio = -1; 2515 struct thread_runtime *tr = NULL; 2516 2517 if (thread__is_filtered(thread)) { 2518 rc = true; 2519 sched->skipped_samples++; 2520 } 2521 2522 if (sched->prio_str) { 2523 /* 2524 * Because priority may be changed during task execution, 2525 * first read priority from prev sched_in event for current task. 2526 * If prev sched_in event is not saved, then read priority from 2527 * current task sched_out event. 2528 */ 2529 tr = thread__get_runtime(thread); 2530 if (tr && tr->prio != -1) 2531 prio = tr->prio; 2532 else if (evsel__name_is(evsel, "sched:sched_switch")) 2533 prio = evsel__intval(evsel, sample, "prev_prio"); 2534 2535 if (prio != -1 && !test_bit(prio, sched->prio_bitmap)) { 2536 rc = true; 2537 sched->skipped_samples++; 2538 } 2539 } 2540 2541 if (sched->idle_hist) { 2542 if (!evsel__name_is(evsel, "sched:sched_switch")) 2543 rc = true; 2544 else if (evsel__intval(evsel, sample, "prev_pid") != 0 && 2545 evsel__intval(evsel, sample, "next_pid") != 0) 2546 rc = true; 2547 } 2548 2549 return rc; 2550 } 2551 2552 static void timehist_print_wakeup_event(struct perf_sched *sched, 2553 struct evsel *evsel, 2554 struct perf_sample *sample, 2555 struct machine *machine, 2556 struct thread *awakened) 2557 { 2558 struct thread *thread; 2559 char tstr[64]; 2560 2561 thread = machine__findnew_thread(machine, sample->pid, sample->tid); 2562 if (thread == NULL) 2563 return; 2564 2565 /* show wakeup unless both awakee and awaker are filtered */ 2566 if (timehist_skip_sample(sched, thread, evsel, sample) && 2567 timehist_skip_sample(sched, awakened, evsel, sample)) { 2568 return; 2569 } 2570 2571 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr)); 2572 printf("%15s [%04d] ", tstr, sample->cpu); 2573 if (sched->show_cpu_visual) 2574 printf(" %*s ", sched->max_cpu.cpu + 1, ""); 2575 2576 printf(" %-*s ", comm_width, timehist_get_commstr(thread)); 2577 2578 /* dt spacer */ 2579 printf(" %9s %9s %9s ", "", "", ""); 2580 2581 printf("awakened: %s", timehist_get_commstr(awakened)); 2582 2583 printf("\n"); 2584 } 2585 2586 static int timehist_sched_wakeup_ignore(const struct perf_tool *tool __maybe_unused, 2587 union perf_event *event __maybe_unused, 2588 struct evsel *evsel __maybe_unused, 2589 struct perf_sample *sample __maybe_unused, 2590 struct machine *machine __maybe_unused) 2591 { 2592 return 0; 2593 } 2594 2595 static int timehist_sched_wakeup_event(const struct perf_tool *tool, 2596 union perf_event *event __maybe_unused, 2597 struct evsel *evsel, 2598 struct perf_sample *sample, 2599 struct machine *machine) 2600 { 2601 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 2602 struct thread *thread; 2603 struct thread_runtime *tr = NULL; 2604 /* want pid of awakened task not pid in sample */ 2605 const u32 pid = evsel__intval(evsel, sample, "pid"); 2606 2607 thread = machine__findnew_thread(machine, 0, pid); 2608 if (thread == NULL) 2609 return -1; 2610 2611 tr = thread__get_runtime(thread); 2612 if (tr == NULL) 2613 return -1; 2614 2615 if (tr->ready_to_run == 0) 2616 tr->ready_to_run = sample->time; 2617 2618 /* show wakeups if requested */ 2619 if (sched->show_wakeups && 2620 !perf_time__skip_sample(&sched->ptime, sample->time)) 2621 timehist_print_wakeup_event(sched, evsel, sample, machine, thread); 2622 2623 return 0; 2624 } 2625 2626 static void timehist_print_migration_event(struct perf_sched *sched, 2627 struct evsel *evsel, 2628 struct perf_sample *sample, 2629 struct machine *machine, 2630 struct thread *migrated) 2631 { 2632 struct thread *thread; 2633 char tstr[64]; 2634 u32 max_cpus; 2635 u32 ocpu, dcpu; 2636 2637 if (sched->summary_only) 2638 return; 2639 2640 max_cpus = sched->max_cpu.cpu + 1; 2641 ocpu = evsel__intval(evsel, sample, "orig_cpu"); 2642 dcpu = evsel__intval(evsel, sample, "dest_cpu"); 2643 2644 thread = machine__findnew_thread(machine, sample->pid, sample->tid); 2645 if (thread == NULL) 2646 return; 2647 2648 if (timehist_skip_sample(sched, thread, evsel, sample) && 2649 timehist_skip_sample(sched, migrated, evsel, sample)) { 2650 return; 2651 } 2652 2653 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr)); 2654 printf("%15s [%04d] ", tstr, sample->cpu); 2655 2656 if (sched->show_cpu_visual) { 2657 u32 i; 2658 char c; 2659 2660 printf(" "); 2661 for (i = 0; i < max_cpus; ++i) { 2662 c = (i == sample->cpu) ? 'm' : ' '; 2663 printf("%c", c); 2664 } 2665 printf(" "); 2666 } 2667 2668 printf(" %-*s ", comm_width, timehist_get_commstr(thread)); 2669 2670 /* dt spacer */ 2671 printf(" %9s %9s %9s ", "", "", ""); 2672 2673 printf("migrated: %s", timehist_get_commstr(migrated)); 2674 printf(" cpu %d => %d", ocpu, dcpu); 2675 2676 printf("\n"); 2677 } 2678 2679 static int timehist_migrate_task_event(const struct perf_tool *tool, 2680 union perf_event *event __maybe_unused, 2681 struct evsel *evsel, 2682 struct perf_sample *sample, 2683 struct machine *machine) 2684 { 2685 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 2686 struct thread *thread; 2687 struct thread_runtime *tr = NULL; 2688 /* want pid of migrated task not pid in sample */ 2689 const u32 pid = evsel__intval(evsel, sample, "pid"); 2690 2691 thread = machine__findnew_thread(machine, 0, pid); 2692 if (thread == NULL) 2693 return -1; 2694 2695 tr = thread__get_runtime(thread); 2696 if (tr == NULL) 2697 return -1; 2698 2699 tr->migrations++; 2700 tr->migrated = sample->time; 2701 2702 /* show migrations if requested */ 2703 if (sched->show_migrations) { 2704 timehist_print_migration_event(sched, evsel, sample, 2705 machine, thread); 2706 } 2707 2708 return 0; 2709 } 2710 2711 static void timehist_update_task_prio(struct evsel *evsel, 2712 struct perf_sample *sample, 2713 struct machine *machine) 2714 { 2715 struct thread *thread; 2716 struct thread_runtime *tr = NULL; 2717 const u32 next_pid = evsel__intval(evsel, sample, "next_pid"); 2718 const u32 next_prio = evsel__intval(evsel, sample, "next_prio"); 2719 2720 if (next_pid == 0) 2721 thread = get_idle_thread(sample->cpu); 2722 else 2723 thread = machine__findnew_thread(machine, -1, next_pid); 2724 2725 if (thread == NULL) 2726 return; 2727 2728 tr = thread__get_runtime(thread); 2729 if (tr == NULL) 2730 return; 2731 2732 tr->prio = next_prio; 2733 } 2734 2735 static int timehist_sched_change_event(const struct perf_tool *tool, 2736 union perf_event *event, 2737 struct evsel *evsel, 2738 struct perf_sample *sample, 2739 struct machine *machine) 2740 { 2741 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 2742 struct perf_time_interval *ptime = &sched->ptime; 2743 struct addr_location al; 2744 struct thread *thread; 2745 struct thread_runtime *tr = NULL; 2746 u64 tprev, t = sample->time; 2747 int rc = 0; 2748 const char state = evsel__taskstate(evsel, sample, "prev_state"); 2749 2750 addr_location__init(&al); 2751 if (machine__resolve(machine, &al, sample) < 0) { 2752 pr_err("problem processing %d event. skipping it\n", 2753 event->header.type); 2754 rc = -1; 2755 goto out; 2756 } 2757 2758 if (sched->show_prio || sched->prio_str) 2759 timehist_update_task_prio(evsel, sample, machine); 2760 2761 thread = timehist_get_thread(sched, sample, machine, evsel); 2762 if (thread == NULL) { 2763 rc = -1; 2764 goto out; 2765 } 2766 2767 if (timehist_skip_sample(sched, thread, evsel, sample)) 2768 goto out; 2769 2770 tr = thread__get_runtime(thread); 2771 if (tr == NULL) { 2772 rc = -1; 2773 goto out; 2774 } 2775 2776 tprev = evsel__get_time(evsel, sample->cpu); 2777 2778 /* 2779 * If start time given: 2780 * - sample time is under window user cares about - skip sample 2781 * - tprev is under window user cares about - reset to start of window 2782 */ 2783 if (ptime->start && ptime->start > t) 2784 goto out; 2785 2786 if (tprev && ptime->start > tprev) 2787 tprev = ptime->start; 2788 2789 /* 2790 * If end time given: 2791 * - previous sched event is out of window - we are done 2792 * - sample time is beyond window user cares about - reset it 2793 * to close out stats for time window interest 2794 * - If tprev is 0, that is, sched_in event for current task is 2795 * not recorded, cannot determine whether sched_in event is 2796 * within time window interest - ignore it 2797 */ 2798 if (ptime->end) { 2799 if (!tprev || tprev > ptime->end) 2800 goto out; 2801 2802 if (t > ptime->end) 2803 t = ptime->end; 2804 } 2805 2806 if (!sched->idle_hist || thread__tid(thread) == 0) { 2807 if (!cpu_list || test_bit(sample->cpu, cpu_bitmap)) 2808 timehist_update_runtime_stats(tr, t, tprev); 2809 2810 if (sched->idle_hist) { 2811 struct idle_thread_runtime *itr = (void *)tr; 2812 struct thread_runtime *last_tr; 2813 2814 if (itr->last_thread == NULL) 2815 goto out; 2816 2817 /* add current idle time as last thread's runtime */ 2818 last_tr = thread__get_runtime(itr->last_thread); 2819 if (last_tr == NULL) 2820 goto out; 2821 2822 timehist_update_runtime_stats(last_tr, t, tprev); 2823 /* 2824 * remove delta time of last thread as it's not updated 2825 * and otherwise it will show an invalid value next 2826 * time. we only care total run time and run stat. 2827 */ 2828 last_tr->dt_run = 0; 2829 last_tr->dt_delay = 0; 2830 last_tr->dt_sleep = 0; 2831 last_tr->dt_iowait = 0; 2832 last_tr->dt_preempt = 0; 2833 2834 if (itr->cursor.nr) 2835 callchain_append(&itr->callchain, &itr->cursor, t - tprev); 2836 2837 itr->last_thread = NULL; 2838 } 2839 2840 if (!sched->summary_only) 2841 timehist_print_sample(sched, evsel, sample, &al, thread, t, state); 2842 } 2843 2844 out: 2845 if (sched->hist_time.start == 0 && t >= ptime->start) 2846 sched->hist_time.start = t; 2847 if (ptime->end == 0 || t <= ptime->end) 2848 sched->hist_time.end = t; 2849 2850 if (tr) { 2851 /* time of this sched_switch event becomes last time task seen */ 2852 tr->last_time = sample->time; 2853 2854 /* last state is used to determine where to account wait time */ 2855 tr->last_state = state; 2856 2857 /* sched out event for task so reset ready to run time and migrated time */ 2858 if (state == 'R') 2859 tr->ready_to_run = t; 2860 else 2861 tr->ready_to_run = 0; 2862 2863 tr->migrated = 0; 2864 } 2865 2866 evsel__save_time(evsel, sample->time, sample->cpu); 2867 2868 addr_location__exit(&al); 2869 return rc; 2870 } 2871 2872 static int timehist_sched_switch_event(const struct perf_tool *tool, 2873 union perf_event *event, 2874 struct evsel *evsel, 2875 struct perf_sample *sample, 2876 struct machine *machine __maybe_unused) 2877 { 2878 return timehist_sched_change_event(tool, event, evsel, sample, machine); 2879 } 2880 2881 static int process_lost(const struct perf_tool *tool __maybe_unused, 2882 union perf_event *event, 2883 struct perf_sample *sample, 2884 struct machine *machine __maybe_unused) 2885 { 2886 char tstr[64]; 2887 2888 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr)); 2889 printf("%15s ", tstr); 2890 printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu); 2891 2892 return 0; 2893 } 2894 2895 2896 static void print_thread_runtime(struct thread *t, 2897 struct thread_runtime *r) 2898 { 2899 double mean = avg_stats(&r->run_stats); 2900 float stddev; 2901 2902 printf("%*s %5d %9" PRIu64 " ", 2903 comm_width, timehist_get_commstr(t), thread__ppid(t), 2904 (u64) r->run_stats.n); 2905 2906 print_sched_time(r->total_run_time, 8); 2907 stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean); 2908 print_sched_time(r->run_stats.min, 6); 2909 printf(" "); 2910 print_sched_time((u64) mean, 6); 2911 printf(" "); 2912 print_sched_time(r->run_stats.max, 6); 2913 printf(" "); 2914 printf("%5.2f", stddev); 2915 printf(" %5" PRIu64, r->migrations); 2916 printf("\n"); 2917 } 2918 2919 static void print_thread_waittime(struct thread *t, 2920 struct thread_runtime *r) 2921 { 2922 printf("%*s %5d %9" PRIu64 " ", 2923 comm_width, timehist_get_commstr(t), thread__ppid(t), 2924 (u64) r->run_stats.n); 2925 2926 print_sched_time(r->total_run_time, 8); 2927 print_sched_time(r->total_sleep_time, 6); 2928 printf(" "); 2929 print_sched_time(r->total_iowait_time, 6); 2930 printf(" "); 2931 print_sched_time(r->total_preempt_time, 6); 2932 printf(" "); 2933 print_sched_time(r->total_delay_time, 6); 2934 printf("\n"); 2935 } 2936 2937 struct total_run_stats { 2938 struct perf_sched *sched; 2939 u64 sched_count; 2940 u64 task_count; 2941 u64 total_run_time; 2942 }; 2943 2944 static int show_thread_runtime(struct thread *t, void *priv) 2945 { 2946 struct total_run_stats *stats = priv; 2947 struct thread_runtime *r; 2948 2949 if (thread__is_filtered(t)) 2950 return 0; 2951 2952 r = thread__priv(t); 2953 if (r && r->run_stats.n) { 2954 stats->task_count++; 2955 stats->sched_count += r->run_stats.n; 2956 stats->total_run_time += r->total_run_time; 2957 2958 if (stats->sched->show_state) 2959 print_thread_waittime(t, r); 2960 else 2961 print_thread_runtime(t, r); 2962 } 2963 2964 return 0; 2965 } 2966 2967 static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node) 2968 { 2969 const char *sep = " <- "; 2970 struct callchain_list *chain; 2971 size_t ret = 0; 2972 char bf[1024]; 2973 bool first; 2974 2975 if (node == NULL) 2976 return 0; 2977 2978 ret = callchain__fprintf_folded(fp, node->parent); 2979 first = (ret == 0); 2980 2981 list_for_each_entry(chain, &node->val, list) { 2982 if (chain->ip >= PERF_CONTEXT_MAX) 2983 continue; 2984 if (chain->ms.sym && chain->ms.sym->ignore) 2985 continue; 2986 ret += fprintf(fp, "%s%s", first ? "" : sep, 2987 callchain_list__sym_name(chain, bf, sizeof(bf), 2988 false)); 2989 first = false; 2990 } 2991 2992 return ret; 2993 } 2994 2995 static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root) 2996 { 2997 size_t ret = 0; 2998 FILE *fp = stdout; 2999 struct callchain_node *chain; 3000 struct rb_node *rb_node = rb_first_cached(root); 3001 3002 printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains"); 3003 printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line, 3004 graph_dotted_line); 3005 3006 while (rb_node) { 3007 chain = rb_entry(rb_node, struct callchain_node, rb_node); 3008 rb_node = rb_next(rb_node); 3009 3010 ret += fprintf(fp, " "); 3011 print_sched_time(chain->hit, 12); 3012 ret += 16; /* print_sched_time returns 2nd arg + 4 */ 3013 ret += fprintf(fp, " %8d ", chain->count); 3014 ret += callchain__fprintf_folded(fp, chain); 3015 ret += fprintf(fp, "\n"); 3016 } 3017 3018 return ret; 3019 } 3020 3021 static void timehist_print_summary(struct perf_sched *sched, 3022 struct perf_session *session) 3023 { 3024 struct machine *m = &session->machines.host; 3025 struct total_run_stats totals; 3026 u64 task_count; 3027 struct thread *t; 3028 struct thread_runtime *r; 3029 int i; 3030 u64 hist_time = sched->hist_time.end - sched->hist_time.start; 3031 3032 memset(&totals, 0, sizeof(totals)); 3033 totals.sched = sched; 3034 3035 if (sched->idle_hist) { 3036 printf("\nIdle-time summary\n"); 3037 printf("%*s parent sched-out ", comm_width, "comm"); 3038 printf(" idle-time min-idle avg-idle max-idle stddev migrations\n"); 3039 } else if (sched->show_state) { 3040 printf("\nWait-time summary\n"); 3041 printf("%*s parent sched-in ", comm_width, "comm"); 3042 printf(" run-time sleep iowait preempt delay\n"); 3043 } else { 3044 printf("\nRuntime summary\n"); 3045 printf("%*s parent sched-in ", comm_width, "comm"); 3046 printf(" run-time min-run avg-run max-run stddev migrations\n"); 3047 } 3048 printf("%*s (count) ", comm_width, ""); 3049 printf(" (msec) (msec) (msec) (msec) %s\n", 3050 sched->show_state ? "(msec)" : "%"); 3051 printf("%.117s\n", graph_dotted_line); 3052 3053 machine__for_each_thread(m, show_thread_runtime, &totals); 3054 task_count = totals.task_count; 3055 if (!task_count) 3056 printf("<no still running tasks>\n"); 3057 3058 /* CPU idle stats not tracked when samples were skipped */ 3059 if (sched->skipped_samples && !sched->idle_hist) 3060 return; 3061 3062 printf("\nIdle stats:\n"); 3063 for (i = 0; i < idle_max_cpu; ++i) { 3064 if (cpu_list && !test_bit(i, cpu_bitmap)) 3065 continue; 3066 3067 t = idle_threads[i]; 3068 if (!t) 3069 continue; 3070 3071 r = thread__priv(t); 3072 if (r && r->run_stats.n) { 3073 totals.sched_count += r->run_stats.n; 3074 printf(" CPU %2d idle for ", i); 3075 print_sched_time(r->total_run_time, 6); 3076 printf(" msec (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time); 3077 } else 3078 printf(" CPU %2d idle entire time window\n", i); 3079 } 3080 3081 if (sched->idle_hist && sched->show_callchain) { 3082 callchain_param.mode = CHAIN_FOLDED; 3083 callchain_param.value = CCVAL_PERIOD; 3084 3085 callchain_register_param(&callchain_param); 3086 3087 printf("\nIdle stats by callchain:\n"); 3088 for (i = 0; i < idle_max_cpu; ++i) { 3089 struct idle_thread_runtime *itr; 3090 3091 t = idle_threads[i]; 3092 if (!t) 3093 continue; 3094 3095 itr = thread__priv(t); 3096 if (itr == NULL) 3097 continue; 3098 3099 callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain, 3100 0, &callchain_param); 3101 3102 printf(" CPU %2d:", i); 3103 print_sched_time(itr->tr.total_run_time, 6); 3104 printf(" msec\n"); 3105 timehist_print_idlehist_callchain(&itr->sorted_root); 3106 printf("\n"); 3107 } 3108 } 3109 3110 printf("\n" 3111 " Total number of unique tasks: %" PRIu64 "\n" 3112 "Total number of context switches: %" PRIu64 "\n", 3113 totals.task_count, totals.sched_count); 3114 3115 printf(" Total run time (msec): "); 3116 print_sched_time(totals.total_run_time, 2); 3117 printf("\n"); 3118 3119 printf(" Total scheduling time (msec): "); 3120 print_sched_time(hist_time, 2); 3121 printf(" (x %d)\n", sched->max_cpu.cpu); 3122 } 3123 3124 typedef int (*sched_handler)(const struct perf_tool *tool, 3125 union perf_event *event, 3126 struct evsel *evsel, 3127 struct perf_sample *sample, 3128 struct machine *machine); 3129 3130 static int perf_timehist__process_sample(const struct perf_tool *tool, 3131 union perf_event *event, 3132 struct perf_sample *sample, 3133 struct evsel *evsel, 3134 struct machine *machine) 3135 { 3136 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 3137 int err = 0; 3138 struct perf_cpu this_cpu = { 3139 .cpu = sample->cpu, 3140 }; 3141 3142 if (this_cpu.cpu > sched->max_cpu.cpu) 3143 sched->max_cpu = this_cpu; 3144 3145 if (evsel->handler != NULL) { 3146 sched_handler f = evsel->handler; 3147 3148 err = f(tool, event, evsel, sample, machine); 3149 } 3150 3151 return err; 3152 } 3153 3154 static int timehist_check_attr(struct perf_sched *sched, 3155 struct evlist *evlist) 3156 { 3157 struct evsel *evsel; 3158 struct evsel_runtime *er; 3159 3160 list_for_each_entry(evsel, &evlist->core.entries, core.node) { 3161 er = evsel__get_runtime(evsel); 3162 if (er == NULL) { 3163 pr_err("Failed to allocate memory for evsel runtime data\n"); 3164 return -1; 3165 } 3166 3167 /* only need to save callchain related to sched_switch event */ 3168 if (sched->show_callchain && 3169 evsel__name_is(evsel, "sched:sched_switch") && 3170 !evsel__has_callchain(evsel)) { 3171 pr_info("Samples of sched_switch event do not have callchains.\n"); 3172 sched->show_callchain = 0; 3173 symbol_conf.use_callchain = 0; 3174 } 3175 } 3176 3177 return 0; 3178 } 3179 3180 static int timehist_parse_prio_str(struct perf_sched *sched) 3181 { 3182 char *p; 3183 unsigned long start_prio, end_prio; 3184 const char *str = sched->prio_str; 3185 3186 if (!str) 3187 return 0; 3188 3189 while (isdigit(*str)) { 3190 p = NULL; 3191 start_prio = strtoul(str, &p, 0); 3192 if (start_prio >= MAX_PRIO || (*p != '\0' && *p != ',' && *p != '-')) 3193 return -1; 3194 3195 if (*p == '-') { 3196 str = ++p; 3197 p = NULL; 3198 end_prio = strtoul(str, &p, 0); 3199 3200 if (end_prio >= MAX_PRIO || (*p != '\0' && *p != ',')) 3201 return -1; 3202 3203 if (end_prio < start_prio) 3204 return -1; 3205 } else { 3206 end_prio = start_prio; 3207 } 3208 3209 for (; start_prio <= end_prio; start_prio++) 3210 __set_bit(start_prio, sched->prio_bitmap); 3211 3212 if (*p) 3213 ++p; 3214 3215 str = p; 3216 } 3217 3218 return 0; 3219 } 3220 3221 static int perf_sched__timehist(struct perf_sched *sched) 3222 { 3223 struct evsel_str_handler handlers[] = { 3224 { "sched:sched_switch", timehist_sched_switch_event, }, 3225 { "sched:sched_wakeup", timehist_sched_wakeup_event, }, 3226 { "sched:sched_waking", timehist_sched_wakeup_event, }, 3227 { "sched:sched_wakeup_new", timehist_sched_wakeup_event, }, 3228 }; 3229 const struct evsel_str_handler migrate_handlers[] = { 3230 { "sched:sched_migrate_task", timehist_migrate_task_event, }, 3231 }; 3232 struct perf_data data = { 3233 .path = input_name, 3234 .mode = PERF_DATA_MODE_READ, 3235 .force = sched->force, 3236 }; 3237 3238 struct perf_session *session; 3239 struct evlist *evlist; 3240 int err = -1; 3241 3242 /* 3243 * event handlers for timehist option 3244 */ 3245 sched->tool.sample = perf_timehist__process_sample; 3246 sched->tool.mmap = perf_event__process_mmap; 3247 sched->tool.comm = perf_event__process_comm; 3248 sched->tool.exit = perf_event__process_exit; 3249 sched->tool.fork = perf_event__process_fork; 3250 sched->tool.lost = process_lost; 3251 sched->tool.attr = perf_event__process_attr; 3252 sched->tool.tracing_data = perf_event__process_tracing_data; 3253 sched->tool.build_id = perf_event__process_build_id; 3254 3255 sched->tool.ordering_requires_timestamps = true; 3256 3257 symbol_conf.use_callchain = sched->show_callchain; 3258 3259 session = perf_session__new(&data, &sched->tool); 3260 if (IS_ERR(session)) 3261 return PTR_ERR(session); 3262 3263 if (cpu_list) { 3264 err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap); 3265 if (err < 0) 3266 goto out; 3267 } 3268 3269 evlist = session->evlist; 3270 3271 symbol__init(&session->header.env); 3272 3273 if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) { 3274 pr_err("Invalid time string\n"); 3275 err = -EINVAL; 3276 goto out; 3277 } 3278 3279 if (timehist_check_attr(sched, evlist) != 0) 3280 goto out; 3281 3282 if (timehist_parse_prio_str(sched) != 0) { 3283 pr_err("Invalid prio string\n"); 3284 goto out; 3285 } 3286 3287 setup_pager(); 3288 3289 /* prefer sched_waking if it is captured */ 3290 if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking")) 3291 handlers[1].handler = timehist_sched_wakeup_ignore; 3292 3293 /* setup per-evsel handlers */ 3294 if (perf_session__set_tracepoints_handlers(session, handlers)) 3295 goto out; 3296 3297 /* sched_switch event at a minimum needs to exist */ 3298 if (!evlist__find_tracepoint_by_name(session->evlist, "sched:sched_switch")) { 3299 pr_err("No sched_switch events found. Have you run 'perf sched record'?\n"); 3300 goto out; 3301 } 3302 3303 if ((sched->show_migrations || sched->pre_migrations) && 3304 perf_session__set_tracepoints_handlers(session, migrate_handlers)) 3305 goto out; 3306 3307 /* pre-allocate struct for per-CPU idle stats */ 3308 sched->max_cpu.cpu = session->header.env.nr_cpus_online; 3309 if (sched->max_cpu.cpu == 0) 3310 sched->max_cpu.cpu = 4; 3311 if (init_idle_threads(sched->max_cpu.cpu)) 3312 goto out; 3313 3314 /* summary_only implies summary option, but don't overwrite summary if set */ 3315 if (sched->summary_only) 3316 sched->summary = sched->summary_only; 3317 3318 if (!sched->summary_only) 3319 timehist_header(sched); 3320 3321 err = perf_session__process_events(session); 3322 if (err) { 3323 pr_err("Failed to process events, error %d", err); 3324 goto out; 3325 } 3326 3327 sched->nr_events = evlist->stats.nr_events[0]; 3328 sched->nr_lost_events = evlist->stats.total_lost; 3329 sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST]; 3330 3331 if (sched->summary) 3332 timehist_print_summary(sched, session); 3333 3334 out: 3335 free_idle_threads(); 3336 perf_session__delete(session); 3337 3338 return err; 3339 } 3340 3341 3342 static void print_bad_events(struct perf_sched *sched) 3343 { 3344 if (sched->nr_unordered_timestamps && sched->nr_timestamps) { 3345 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n", 3346 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0, 3347 sched->nr_unordered_timestamps, sched->nr_timestamps); 3348 } 3349 if (sched->nr_lost_events && sched->nr_events) { 3350 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n", 3351 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0, 3352 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks); 3353 } 3354 if (sched->nr_context_switch_bugs && sched->nr_timestamps) { 3355 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)", 3356 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0, 3357 sched->nr_context_switch_bugs, sched->nr_timestamps); 3358 if (sched->nr_lost_events) 3359 printf(" (due to lost events?)"); 3360 printf("\n"); 3361 } 3362 } 3363 3364 static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data) 3365 { 3366 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL; 3367 struct work_atoms *this; 3368 const char *comm = thread__comm_str(data->thread), *this_comm; 3369 bool leftmost = true; 3370 3371 while (*new) { 3372 int cmp; 3373 3374 this = container_of(*new, struct work_atoms, node); 3375 parent = *new; 3376 3377 this_comm = thread__comm_str(this->thread); 3378 cmp = strcmp(comm, this_comm); 3379 if (cmp > 0) { 3380 new = &((*new)->rb_left); 3381 } else if (cmp < 0) { 3382 new = &((*new)->rb_right); 3383 leftmost = false; 3384 } else { 3385 this->num_merged++; 3386 this->total_runtime += data->total_runtime; 3387 this->nb_atoms += data->nb_atoms; 3388 this->total_lat += data->total_lat; 3389 list_splice(&data->work_list, &this->work_list); 3390 if (this->max_lat < data->max_lat) { 3391 this->max_lat = data->max_lat; 3392 this->max_lat_start = data->max_lat_start; 3393 this->max_lat_end = data->max_lat_end; 3394 } 3395 zfree(&data); 3396 return; 3397 } 3398 } 3399 3400 data->num_merged++; 3401 rb_link_node(&data->node, parent, new); 3402 rb_insert_color_cached(&data->node, root, leftmost); 3403 } 3404 3405 static void perf_sched__merge_lat(struct perf_sched *sched) 3406 { 3407 struct work_atoms *data; 3408 struct rb_node *node; 3409 3410 if (sched->skip_merge) 3411 return; 3412 3413 while ((node = rb_first_cached(&sched->atom_root))) { 3414 rb_erase_cached(node, &sched->atom_root); 3415 data = rb_entry(node, struct work_atoms, node); 3416 __merge_work_atoms(&sched->merged_atom_root, data); 3417 } 3418 } 3419 3420 static int setup_cpus_switch_event(struct perf_sched *sched) 3421 { 3422 unsigned int i; 3423 3424 sched->cpu_last_switched = calloc(MAX_CPUS, sizeof(*(sched->cpu_last_switched))); 3425 if (!sched->cpu_last_switched) 3426 return -1; 3427 3428 sched->curr_pid = malloc(MAX_CPUS * sizeof(*(sched->curr_pid))); 3429 if (!sched->curr_pid) { 3430 zfree(&sched->cpu_last_switched); 3431 return -1; 3432 } 3433 3434 for (i = 0; i < MAX_CPUS; i++) 3435 sched->curr_pid[i] = -1; 3436 3437 return 0; 3438 } 3439 3440 static void free_cpus_switch_event(struct perf_sched *sched) 3441 { 3442 zfree(&sched->curr_pid); 3443 zfree(&sched->cpu_last_switched); 3444 } 3445 3446 static int perf_sched__lat(struct perf_sched *sched) 3447 { 3448 int rc = -1; 3449 struct rb_node *next; 3450 3451 setup_pager(); 3452 3453 if (setup_cpus_switch_event(sched)) 3454 return rc; 3455 3456 if (perf_sched__read_events(sched)) 3457 goto out_free_cpus_switch_event; 3458 3459 perf_sched__merge_lat(sched); 3460 perf_sched__sort_lat(sched); 3461 3462 printf("\n -------------------------------------------------------------------------------------------------------------------------------------------\n"); 3463 printf(" Task | Runtime ms | Count | Avg delay ms | Max delay ms | Max delay start | Max delay end |\n"); 3464 printf(" -------------------------------------------------------------------------------------------------------------------------------------------\n"); 3465 3466 next = rb_first_cached(&sched->sorted_atom_root); 3467 3468 while (next) { 3469 struct work_atoms *work_list; 3470 3471 work_list = rb_entry(next, struct work_atoms, node); 3472 output_lat_thread(sched, work_list); 3473 next = rb_next(next); 3474 thread__zput(work_list->thread); 3475 } 3476 3477 printf(" -----------------------------------------------------------------------------------------------------------------\n"); 3478 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n", 3479 (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count); 3480 3481 printf(" ---------------------------------------------------\n"); 3482 3483 print_bad_events(sched); 3484 printf("\n"); 3485 3486 rc = 0; 3487 3488 out_free_cpus_switch_event: 3489 free_cpus_switch_event(sched); 3490 return rc; 3491 } 3492 3493 static int setup_map_cpus(struct perf_sched *sched) 3494 { 3495 sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF); 3496 3497 if (sched->map.comp) { 3498 sched->map.comp_cpus = zalloc(sched->max_cpu.cpu * sizeof(int)); 3499 if (!sched->map.comp_cpus) 3500 return -1; 3501 } 3502 3503 if (sched->map.cpus_str) { 3504 sched->map.cpus = perf_cpu_map__new(sched->map.cpus_str); 3505 if (!sched->map.cpus) { 3506 pr_err("failed to get cpus map from %s\n", sched->map.cpus_str); 3507 zfree(&sched->map.comp_cpus); 3508 return -1; 3509 } 3510 } 3511 3512 return 0; 3513 } 3514 3515 static int setup_color_pids(struct perf_sched *sched) 3516 { 3517 struct perf_thread_map *map; 3518 3519 if (!sched->map.color_pids_str) 3520 return 0; 3521 3522 map = thread_map__new_by_tid_str(sched->map.color_pids_str); 3523 if (!map) { 3524 pr_err("failed to get thread map from %s\n", sched->map.color_pids_str); 3525 return -1; 3526 } 3527 3528 sched->map.color_pids = map; 3529 return 0; 3530 } 3531 3532 static int setup_color_cpus(struct perf_sched *sched) 3533 { 3534 struct perf_cpu_map *map; 3535 3536 if (!sched->map.color_cpus_str) 3537 return 0; 3538 3539 map = perf_cpu_map__new(sched->map.color_cpus_str); 3540 if (!map) { 3541 pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str); 3542 return -1; 3543 } 3544 3545 sched->map.color_cpus = map; 3546 return 0; 3547 } 3548 3549 static int perf_sched__map(struct perf_sched *sched) 3550 { 3551 int rc = -1; 3552 3553 sched->curr_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_thread))); 3554 if (!sched->curr_thread) 3555 return rc; 3556 3557 sched->curr_out_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_out_thread))); 3558 if (!sched->curr_out_thread) 3559 return rc; 3560 3561 if (setup_cpus_switch_event(sched)) 3562 goto out_free_curr_thread; 3563 3564 if (setup_map_cpus(sched)) 3565 goto out_free_cpus_switch_event; 3566 3567 if (setup_color_pids(sched)) 3568 goto out_put_map_cpus; 3569 3570 if (setup_color_cpus(sched)) 3571 goto out_put_color_pids; 3572 3573 setup_pager(); 3574 if (perf_sched__read_events(sched)) 3575 goto out_put_color_cpus; 3576 3577 rc = 0; 3578 print_bad_events(sched); 3579 3580 out_put_color_cpus: 3581 perf_cpu_map__put(sched->map.color_cpus); 3582 3583 out_put_color_pids: 3584 perf_thread_map__put(sched->map.color_pids); 3585 3586 out_put_map_cpus: 3587 zfree(&sched->map.comp_cpus); 3588 perf_cpu_map__put(sched->map.cpus); 3589 3590 out_free_cpus_switch_event: 3591 free_cpus_switch_event(sched); 3592 3593 out_free_curr_thread: 3594 zfree(&sched->curr_thread); 3595 return rc; 3596 } 3597 3598 static int perf_sched__replay(struct perf_sched *sched) 3599 { 3600 int ret; 3601 unsigned long i; 3602 3603 mutex_init(&sched->start_work_mutex); 3604 mutex_init(&sched->work_done_wait_mutex); 3605 3606 ret = setup_cpus_switch_event(sched); 3607 if (ret) 3608 goto out_mutex_destroy; 3609 3610 calibrate_run_measurement_overhead(sched); 3611 calibrate_sleep_measurement_overhead(sched); 3612 3613 test_calibrations(sched); 3614 3615 ret = perf_sched__read_events(sched); 3616 if (ret) 3617 goto out_free_cpus_switch_event; 3618 3619 printf("nr_run_events: %ld\n", sched->nr_run_events); 3620 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events); 3621 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events); 3622 3623 if (sched->targetless_wakeups) 3624 printf("target-less wakeups: %ld\n", sched->targetless_wakeups); 3625 if (sched->multitarget_wakeups) 3626 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups); 3627 if (sched->nr_run_events_optimized) 3628 printf("run atoms optimized: %ld\n", 3629 sched->nr_run_events_optimized); 3630 3631 print_task_traces(sched); 3632 add_cross_task_wakeups(sched); 3633 3634 sched->thread_funcs_exit = false; 3635 create_tasks(sched); 3636 printf("------------------------------------------------------------\n"); 3637 if (sched->replay_repeat == 0) 3638 sched->replay_repeat = UINT_MAX; 3639 3640 for (i = 0; i < sched->replay_repeat; i++) 3641 run_one_test(sched); 3642 3643 sched->thread_funcs_exit = true; 3644 destroy_tasks(sched); 3645 3646 out_free_cpus_switch_event: 3647 free_cpus_switch_event(sched); 3648 3649 out_mutex_destroy: 3650 mutex_destroy(&sched->start_work_mutex); 3651 mutex_destroy(&sched->work_done_wait_mutex); 3652 return ret; 3653 } 3654 3655 static void setup_sorting(struct perf_sched *sched, const struct option *options, 3656 const char * const usage_msg[]) 3657 { 3658 char *tmp, *tok, *str = strdup(sched->sort_order); 3659 3660 for (tok = strtok_r(str, ", ", &tmp); 3661 tok; tok = strtok_r(NULL, ", ", &tmp)) { 3662 if (sort_dimension__add(tok, &sched->sort_list) < 0) { 3663 usage_with_options_msg(usage_msg, options, 3664 "Unknown --sort key: `%s'", tok); 3665 } 3666 } 3667 3668 free(str); 3669 3670 sort_dimension__add("pid", &sched->cmp_pid); 3671 } 3672 3673 static bool schedstat_events_exposed(void) 3674 { 3675 /* 3676 * Select "sched:sched_stat_wait" event to check 3677 * whether schedstat tracepoints are exposed. 3678 */ 3679 return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ? 3680 false : true; 3681 } 3682 3683 static int __cmd_record(int argc, const char **argv) 3684 { 3685 unsigned int rec_argc, i, j; 3686 char **rec_argv; 3687 const char **rec_argv_copy; 3688 const char * const record_args[] = { 3689 "record", 3690 "-a", 3691 "-R", 3692 "-m", "1024", 3693 "-c", "1", 3694 "-e", "sched:sched_switch", 3695 "-e", "sched:sched_stat_runtime", 3696 "-e", "sched:sched_process_fork", 3697 "-e", "sched:sched_wakeup_new", 3698 "-e", "sched:sched_migrate_task", 3699 }; 3700 3701 /* 3702 * The tracepoints trace_sched_stat_{wait, sleep, iowait} 3703 * are not exposed to user if CONFIG_SCHEDSTATS is not set, 3704 * to prevent "perf sched record" execution failure, determine 3705 * whether to record schedstat events according to actual situation. 3706 */ 3707 const char * const schedstat_args[] = { 3708 "-e", "sched:sched_stat_wait", 3709 "-e", "sched:sched_stat_sleep", 3710 "-e", "sched:sched_stat_iowait", 3711 }; 3712 unsigned int schedstat_argc = schedstat_events_exposed() ? 3713 ARRAY_SIZE(schedstat_args) : 0; 3714 3715 struct tep_event *waking_event; 3716 int ret; 3717 3718 /* 3719 * +2 for either "-e", "sched:sched_wakeup" or 3720 * "-e", "sched:sched_waking" 3721 */ 3722 rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1; 3723 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 3724 if (rec_argv == NULL) 3725 return -ENOMEM; 3726 rec_argv_copy = calloc(rec_argc + 1, sizeof(char *)); 3727 if (rec_argv_copy == NULL) { 3728 free(rec_argv); 3729 return -ENOMEM; 3730 } 3731 3732 for (i = 0; i < ARRAY_SIZE(record_args); i++) 3733 rec_argv[i] = strdup(record_args[i]); 3734 3735 rec_argv[i++] = strdup("-e"); 3736 waking_event = trace_event__tp_format("sched", "sched_waking"); 3737 if (!IS_ERR(waking_event)) 3738 rec_argv[i++] = strdup("sched:sched_waking"); 3739 else 3740 rec_argv[i++] = strdup("sched:sched_wakeup"); 3741 3742 for (j = 0; j < schedstat_argc; j++) 3743 rec_argv[i++] = strdup(schedstat_args[j]); 3744 3745 for (j = 1; j < (unsigned int)argc; j++, i++) 3746 rec_argv[i] = strdup(argv[j]); 3747 3748 BUG_ON(i != rec_argc); 3749 3750 memcpy(rec_argv_copy, rec_argv, sizeof(char *) * rec_argc); 3751 ret = cmd_record(rec_argc, rec_argv_copy); 3752 3753 for (i = 0; i < rec_argc; i++) 3754 free(rec_argv[i]); 3755 free(rec_argv); 3756 free(rec_argv_copy); 3757 3758 return ret; 3759 } 3760 3761 int cmd_sched(int argc, const char **argv) 3762 { 3763 static const char default_sort_order[] = "avg, max, switch, runtime"; 3764 struct perf_sched sched = { 3765 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid), 3766 .sort_list = LIST_HEAD_INIT(sched.sort_list), 3767 .sort_order = default_sort_order, 3768 .replay_repeat = 10, 3769 .profile_cpu = -1, 3770 .next_shortname1 = 'A', 3771 .next_shortname2 = '0', 3772 .skip_merge = 0, 3773 .show_callchain = 1, 3774 .max_stack = 5, 3775 }; 3776 const struct option sched_options[] = { 3777 OPT_STRING('i', "input", &input_name, "file", 3778 "input file name"), 3779 OPT_INCR('v', "verbose", &verbose, 3780 "be more verbose (show symbol address, etc)"), 3781 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, 3782 "dump raw trace in ASCII"), 3783 OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"), 3784 OPT_END() 3785 }; 3786 const struct option latency_options[] = { 3787 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]", 3788 "sort by key(s): runtime, switch, avg, max"), 3789 OPT_INTEGER('C', "CPU", &sched.profile_cpu, 3790 "CPU to profile on"), 3791 OPT_BOOLEAN('p', "pids", &sched.skip_merge, 3792 "latency stats per pid instead of per comm"), 3793 OPT_PARENT(sched_options) 3794 }; 3795 const struct option replay_options[] = { 3796 OPT_UINTEGER('r', "repeat", &sched.replay_repeat, 3797 "repeat the workload replay N times (0: infinite)"), 3798 OPT_PARENT(sched_options) 3799 }; 3800 const struct option map_options[] = { 3801 OPT_BOOLEAN(0, "compact", &sched.map.comp, 3802 "map output in compact mode"), 3803 OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids", 3804 "highlight given pids in map"), 3805 OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus", 3806 "highlight given CPUs in map"), 3807 OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus", 3808 "display given CPUs in map"), 3809 OPT_STRING(0, "task-name", &sched.map.task_name, "task", 3810 "map output only for the given task name(s)."), 3811 OPT_BOOLEAN(0, "fuzzy-name", &sched.map.fuzzy, 3812 "given command name can be partially matched (fuzzy matching)"), 3813 OPT_PARENT(sched_options) 3814 }; 3815 const struct option timehist_options[] = { 3816 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, 3817 "file", "vmlinux pathname"), 3818 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, 3819 "file", "kallsyms pathname"), 3820 OPT_BOOLEAN('g', "call-graph", &sched.show_callchain, 3821 "Display call chains if present (default on)"), 3822 OPT_UINTEGER(0, "max-stack", &sched.max_stack, 3823 "Maximum number of functions to display backtrace."), 3824 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", 3825 "Look for files with symbols relative to this directory"), 3826 OPT_BOOLEAN('s', "summary", &sched.summary_only, 3827 "Show only syscall summary with statistics"), 3828 OPT_BOOLEAN('S', "with-summary", &sched.summary, 3829 "Show all syscalls and summary with statistics"), 3830 OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"), 3831 OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"), 3832 OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"), 3833 OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"), 3834 OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"), 3835 OPT_STRING(0, "time", &sched.time_str, "str", 3836 "Time span for analysis (start,stop)"), 3837 OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"), 3838 OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]", 3839 "analyze events only for given process id(s)"), 3840 OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]", 3841 "analyze events only for given thread id(s)"), 3842 OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"), 3843 OPT_BOOLEAN(0, "show-prio", &sched.show_prio, "Show task priority"), 3844 OPT_STRING(0, "prio", &sched.prio_str, "prio", 3845 "analyze events only for given task priority(ies)"), 3846 OPT_BOOLEAN('P', "pre-migrations", &sched.pre_migrations, "Show pre-migration wait time"), 3847 OPT_PARENT(sched_options) 3848 }; 3849 3850 const char * const latency_usage[] = { 3851 "perf sched latency [<options>]", 3852 NULL 3853 }; 3854 const char * const replay_usage[] = { 3855 "perf sched replay [<options>]", 3856 NULL 3857 }; 3858 const char * const map_usage[] = { 3859 "perf sched map [<options>]", 3860 NULL 3861 }; 3862 const char * const timehist_usage[] = { 3863 "perf sched timehist [<options>]", 3864 NULL 3865 }; 3866 const char *const sched_subcommands[] = { "record", "latency", "map", 3867 "replay", "script", 3868 "timehist", NULL }; 3869 const char *sched_usage[] = { 3870 NULL, 3871 NULL 3872 }; 3873 struct trace_sched_handler lat_ops = { 3874 .wakeup_event = latency_wakeup_event, 3875 .switch_event = latency_switch_event, 3876 .runtime_event = latency_runtime_event, 3877 .migrate_task_event = latency_migrate_task_event, 3878 }; 3879 struct trace_sched_handler map_ops = { 3880 .switch_event = map_switch_event, 3881 }; 3882 struct trace_sched_handler replay_ops = { 3883 .wakeup_event = replay_wakeup_event, 3884 .switch_event = replay_switch_event, 3885 .fork_event = replay_fork_event, 3886 }; 3887 int ret; 3888 3889 perf_tool__init(&sched.tool, /*ordered_events=*/true); 3890 sched.tool.sample = perf_sched__process_tracepoint_sample; 3891 sched.tool.comm = perf_sched__process_comm; 3892 sched.tool.namespaces = perf_event__process_namespaces; 3893 sched.tool.lost = perf_event__process_lost; 3894 sched.tool.fork = perf_sched__process_fork_event; 3895 3896 argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands, 3897 sched_usage, PARSE_OPT_STOP_AT_NON_OPTION); 3898 if (!argc) 3899 usage_with_options(sched_usage, sched_options); 3900 3901 /* 3902 * Aliased to 'perf script' for now: 3903 */ 3904 if (!strcmp(argv[0], "script")) { 3905 return cmd_script(argc, argv); 3906 } else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) { 3907 return __cmd_record(argc, argv); 3908 } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) { 3909 sched.tp_handler = &lat_ops; 3910 if (argc > 1) { 3911 argc = parse_options(argc, argv, latency_options, latency_usage, 0); 3912 if (argc) 3913 usage_with_options(latency_usage, latency_options); 3914 } 3915 setup_sorting(&sched, latency_options, latency_usage); 3916 return perf_sched__lat(&sched); 3917 } else if (!strcmp(argv[0], "map")) { 3918 if (argc) { 3919 argc = parse_options(argc, argv, map_options, map_usage, 0); 3920 if (argc) 3921 usage_with_options(map_usage, map_options); 3922 3923 if (sched.map.task_name) { 3924 sched.map.task_names = strlist__new(sched.map.task_name, NULL); 3925 if (sched.map.task_names == NULL) { 3926 fprintf(stderr, "Failed to parse task names\n"); 3927 return -1; 3928 } 3929 } 3930 } 3931 sched.tp_handler = &map_ops; 3932 setup_sorting(&sched, latency_options, latency_usage); 3933 return perf_sched__map(&sched); 3934 } else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) { 3935 sched.tp_handler = &replay_ops; 3936 if (argc) { 3937 argc = parse_options(argc, argv, replay_options, replay_usage, 0); 3938 if (argc) 3939 usage_with_options(replay_usage, replay_options); 3940 } 3941 return perf_sched__replay(&sched); 3942 } else if (!strcmp(argv[0], "timehist")) { 3943 if (argc) { 3944 argc = parse_options(argc, argv, timehist_options, 3945 timehist_usage, 0); 3946 if (argc) 3947 usage_with_options(timehist_usage, timehist_options); 3948 } 3949 if ((sched.show_wakeups || sched.show_next) && 3950 sched.summary_only) { 3951 pr_err(" Error: -s and -[n|w] are mutually exclusive.\n"); 3952 parse_options_usage(timehist_usage, timehist_options, "s", true); 3953 if (sched.show_wakeups) 3954 parse_options_usage(NULL, timehist_options, "w", true); 3955 if (sched.show_next) 3956 parse_options_usage(NULL, timehist_options, "n", true); 3957 return -EINVAL; 3958 } 3959 ret = symbol__validate_sym_arguments(); 3960 if (ret) 3961 return ret; 3962 3963 return perf_sched__timehist(&sched); 3964 } else { 3965 usage_with_options(sched_usage, sched_options); 3966 } 3967 3968 /* free usage string allocated by parse_options_subcommand */ 3969 free((void *)sched_usage[0]); 3970 3971 return 0; 3972 } 3973