1 // SPDX-License-Identifier: GPL-2.0 2 #include "builtin.h" 3 #include "perf.h" 4 5 #include "util/util.h" 6 #include "util/evlist.h" 7 #include "util/cache.h" 8 #include "util/evsel.h" 9 #include "util/symbol.h" 10 #include "util/thread.h" 11 #include "util/header.h" 12 #include "util/session.h" 13 #include "util/tool.h" 14 #include "util/cloexec.h" 15 #include "util/thread_map.h" 16 #include "util/color.h" 17 #include "util/stat.h" 18 #include "util/callchain.h" 19 #include "util/time-utils.h" 20 21 #include <subcmd/parse-options.h> 22 #include "util/trace-event.h" 23 24 #include "util/debug.h" 25 26 #include <linux/kernel.h> 27 #include <linux/log2.h> 28 #include <sys/prctl.h> 29 #include <sys/resource.h> 30 #include <inttypes.h> 31 32 #include <errno.h> 33 #include <semaphore.h> 34 #include <pthread.h> 35 #include <math.h> 36 #include <api/fs/fs.h> 37 #include <linux/time64.h> 38 39 #include "sane_ctype.h" 40 41 #define PR_SET_NAME 15 /* Set process name */ 42 #define MAX_CPUS 4096 43 #define COMM_LEN 20 44 #define SYM_LEN 129 45 #define MAX_PID 1024000 46 47 struct sched_atom; 48 49 struct task_desc { 50 unsigned long nr; 51 unsigned long pid; 52 char comm[COMM_LEN]; 53 54 unsigned long nr_events; 55 unsigned long curr_event; 56 struct sched_atom **atoms; 57 58 pthread_t thread; 59 sem_t sleep_sem; 60 61 sem_t ready_for_work; 62 sem_t work_done_sem; 63 64 u64 cpu_usage; 65 }; 66 67 enum sched_event_type { 68 SCHED_EVENT_RUN, 69 SCHED_EVENT_SLEEP, 70 SCHED_EVENT_WAKEUP, 71 SCHED_EVENT_MIGRATION, 72 }; 73 74 struct sched_atom { 75 enum sched_event_type type; 76 int specific_wait; 77 u64 timestamp; 78 u64 duration; 79 unsigned long nr; 80 sem_t *wait_sem; 81 struct task_desc *wakee; 82 }; 83 84 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP" 85 86 /* task state bitmask, copied from include/linux/sched.h */ 87 #define TASK_RUNNING 0 88 #define TASK_INTERRUPTIBLE 1 89 #define TASK_UNINTERRUPTIBLE 2 90 #define __TASK_STOPPED 4 91 #define __TASK_TRACED 8 92 /* in tsk->exit_state */ 93 #define EXIT_DEAD 16 94 #define EXIT_ZOMBIE 32 95 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) 96 /* in tsk->state again */ 97 #define TASK_DEAD 64 98 #define TASK_WAKEKILL 128 99 #define TASK_WAKING 256 100 #define TASK_PARKED 512 101 102 enum thread_state { 103 THREAD_SLEEPING = 0, 104 THREAD_WAIT_CPU, 105 THREAD_SCHED_IN, 106 THREAD_IGNORE 107 }; 108 109 struct work_atom { 110 struct list_head list; 111 enum thread_state state; 112 u64 sched_out_time; 113 u64 wake_up_time; 114 u64 sched_in_time; 115 u64 runtime; 116 }; 117 118 struct work_atoms { 119 struct list_head work_list; 120 struct thread *thread; 121 struct rb_node node; 122 u64 max_lat; 123 u64 max_lat_at; 124 u64 total_lat; 125 u64 nb_atoms; 126 u64 total_runtime; 127 int num_merged; 128 }; 129 130 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *); 131 132 struct perf_sched; 133 134 struct trace_sched_handler { 135 int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel, 136 struct perf_sample *sample, struct machine *machine); 137 138 int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel, 139 struct perf_sample *sample, struct machine *machine); 140 141 int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel, 142 struct perf_sample *sample, struct machine *machine); 143 144 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */ 145 int (*fork_event)(struct perf_sched *sched, union perf_event *event, 146 struct machine *machine); 147 148 int (*migrate_task_event)(struct perf_sched *sched, 149 struct perf_evsel *evsel, 150 struct perf_sample *sample, 151 struct machine *machine); 152 }; 153 154 #define COLOR_PIDS PERF_COLOR_BLUE 155 #define COLOR_CPUS PERF_COLOR_BG_RED 156 157 struct perf_sched_map { 158 DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS); 159 int *comp_cpus; 160 bool comp; 161 struct thread_map *color_pids; 162 const char *color_pids_str; 163 struct cpu_map *color_cpus; 164 const char *color_cpus_str; 165 struct cpu_map *cpus; 166 const char *cpus_str; 167 }; 168 169 struct perf_sched { 170 struct perf_tool tool; 171 const char *sort_order; 172 unsigned long nr_tasks; 173 struct task_desc **pid_to_task; 174 struct task_desc **tasks; 175 const struct trace_sched_handler *tp_handler; 176 pthread_mutex_t start_work_mutex; 177 pthread_mutex_t work_done_wait_mutex; 178 int profile_cpu; 179 /* 180 * Track the current task - that way we can know whether there's any 181 * weird events, such as a task being switched away that is not current. 182 */ 183 int max_cpu; 184 u32 curr_pid[MAX_CPUS]; 185 struct thread *curr_thread[MAX_CPUS]; 186 char next_shortname1; 187 char next_shortname2; 188 unsigned int replay_repeat; 189 unsigned long nr_run_events; 190 unsigned long nr_sleep_events; 191 unsigned long nr_wakeup_events; 192 unsigned long nr_sleep_corrections; 193 unsigned long nr_run_events_optimized; 194 unsigned long targetless_wakeups; 195 unsigned long multitarget_wakeups; 196 unsigned long nr_runs; 197 unsigned long nr_timestamps; 198 unsigned long nr_unordered_timestamps; 199 unsigned long nr_context_switch_bugs; 200 unsigned long nr_events; 201 unsigned long nr_lost_chunks; 202 unsigned long nr_lost_events; 203 u64 run_measurement_overhead; 204 u64 sleep_measurement_overhead; 205 u64 start_time; 206 u64 cpu_usage; 207 u64 runavg_cpu_usage; 208 u64 parent_cpu_usage; 209 u64 runavg_parent_cpu_usage; 210 u64 sum_runtime; 211 u64 sum_fluct; 212 u64 run_avg; 213 u64 all_runtime; 214 u64 all_count; 215 u64 cpu_last_switched[MAX_CPUS]; 216 struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root; 217 struct list_head sort_list, cmp_pid; 218 bool force; 219 bool skip_merge; 220 struct perf_sched_map map; 221 222 /* options for timehist command */ 223 bool summary; 224 bool summary_only; 225 bool idle_hist; 226 bool show_callchain; 227 unsigned int max_stack; 228 bool show_cpu_visual; 229 bool show_wakeups; 230 bool show_next; 231 bool show_migrations; 232 bool show_state; 233 u64 skipped_samples; 234 const char *time_str; 235 struct perf_time_interval ptime; 236 struct perf_time_interval hist_time; 237 }; 238 239 /* per thread run time data */ 240 struct thread_runtime { 241 u64 last_time; /* time of previous sched in/out event */ 242 u64 dt_run; /* run time */ 243 u64 dt_sleep; /* time between CPU access by sleep (off cpu) */ 244 u64 dt_iowait; /* time between CPU access by iowait (off cpu) */ 245 u64 dt_preempt; /* time between CPU access by preempt (off cpu) */ 246 u64 dt_delay; /* time between wakeup and sched-in */ 247 u64 ready_to_run; /* time of wakeup */ 248 249 struct stats run_stats; 250 u64 total_run_time; 251 u64 total_sleep_time; 252 u64 total_iowait_time; 253 u64 total_preempt_time; 254 u64 total_delay_time; 255 256 int last_state; 257 258 char shortname[3]; 259 bool comm_changed; 260 261 u64 migrations; 262 }; 263 264 /* per event run time data */ 265 struct evsel_runtime { 266 u64 *last_time; /* time this event was last seen per cpu */ 267 u32 ncpu; /* highest cpu slot allocated */ 268 }; 269 270 /* per cpu idle time data */ 271 struct idle_thread_runtime { 272 struct thread_runtime tr; 273 struct thread *last_thread; 274 struct rb_root_cached sorted_root; 275 struct callchain_root callchain; 276 struct callchain_cursor cursor; 277 }; 278 279 /* track idle times per cpu */ 280 static struct thread **idle_threads; 281 static int idle_max_cpu; 282 static char idle_comm[] = "<idle>"; 283 284 static u64 get_nsecs(void) 285 { 286 struct timespec ts; 287 288 clock_gettime(CLOCK_MONOTONIC, &ts); 289 290 return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec; 291 } 292 293 static void burn_nsecs(struct perf_sched *sched, u64 nsecs) 294 { 295 u64 T0 = get_nsecs(), T1; 296 297 do { 298 T1 = get_nsecs(); 299 } while (T1 + sched->run_measurement_overhead < T0 + nsecs); 300 } 301 302 static void sleep_nsecs(u64 nsecs) 303 { 304 struct timespec ts; 305 306 ts.tv_nsec = nsecs % 999999999; 307 ts.tv_sec = nsecs / 999999999; 308 309 nanosleep(&ts, NULL); 310 } 311 312 static void calibrate_run_measurement_overhead(struct perf_sched *sched) 313 { 314 u64 T0, T1, delta, min_delta = NSEC_PER_SEC; 315 int i; 316 317 for (i = 0; i < 10; i++) { 318 T0 = get_nsecs(); 319 burn_nsecs(sched, 0); 320 T1 = get_nsecs(); 321 delta = T1-T0; 322 min_delta = min(min_delta, delta); 323 } 324 sched->run_measurement_overhead = min_delta; 325 326 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta); 327 } 328 329 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched) 330 { 331 u64 T0, T1, delta, min_delta = NSEC_PER_SEC; 332 int i; 333 334 for (i = 0; i < 10; i++) { 335 T0 = get_nsecs(); 336 sleep_nsecs(10000); 337 T1 = get_nsecs(); 338 delta = T1-T0; 339 min_delta = min(min_delta, delta); 340 } 341 min_delta -= 10000; 342 sched->sleep_measurement_overhead = min_delta; 343 344 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta); 345 } 346 347 static struct sched_atom * 348 get_new_event(struct task_desc *task, u64 timestamp) 349 { 350 struct sched_atom *event = zalloc(sizeof(*event)); 351 unsigned long idx = task->nr_events; 352 size_t size; 353 354 event->timestamp = timestamp; 355 event->nr = idx; 356 357 task->nr_events++; 358 size = sizeof(struct sched_atom *) * task->nr_events; 359 task->atoms = realloc(task->atoms, size); 360 BUG_ON(!task->atoms); 361 362 task->atoms[idx] = event; 363 364 return event; 365 } 366 367 static struct sched_atom *last_event(struct task_desc *task) 368 { 369 if (!task->nr_events) 370 return NULL; 371 372 return task->atoms[task->nr_events - 1]; 373 } 374 375 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task, 376 u64 timestamp, u64 duration) 377 { 378 struct sched_atom *event, *curr_event = last_event(task); 379 380 /* 381 * optimize an existing RUN event by merging this one 382 * to it: 383 */ 384 if (curr_event && curr_event->type == SCHED_EVENT_RUN) { 385 sched->nr_run_events_optimized++; 386 curr_event->duration += duration; 387 return; 388 } 389 390 event = get_new_event(task, timestamp); 391 392 event->type = SCHED_EVENT_RUN; 393 event->duration = duration; 394 395 sched->nr_run_events++; 396 } 397 398 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task, 399 u64 timestamp, struct task_desc *wakee) 400 { 401 struct sched_atom *event, *wakee_event; 402 403 event = get_new_event(task, timestamp); 404 event->type = SCHED_EVENT_WAKEUP; 405 event->wakee = wakee; 406 407 wakee_event = last_event(wakee); 408 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) { 409 sched->targetless_wakeups++; 410 return; 411 } 412 if (wakee_event->wait_sem) { 413 sched->multitarget_wakeups++; 414 return; 415 } 416 417 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem)); 418 sem_init(wakee_event->wait_sem, 0, 0); 419 wakee_event->specific_wait = 1; 420 event->wait_sem = wakee_event->wait_sem; 421 422 sched->nr_wakeup_events++; 423 } 424 425 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task, 426 u64 timestamp, u64 task_state __maybe_unused) 427 { 428 struct sched_atom *event = get_new_event(task, timestamp); 429 430 event->type = SCHED_EVENT_SLEEP; 431 432 sched->nr_sleep_events++; 433 } 434 435 static struct task_desc *register_pid(struct perf_sched *sched, 436 unsigned long pid, const char *comm) 437 { 438 struct task_desc *task; 439 static int pid_max; 440 441 if (sched->pid_to_task == NULL) { 442 if (sysctl__read_int("kernel/pid_max", &pid_max) < 0) 443 pid_max = MAX_PID; 444 BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL); 445 } 446 if (pid >= (unsigned long)pid_max) { 447 BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) * 448 sizeof(struct task_desc *))) == NULL); 449 while (pid >= (unsigned long)pid_max) 450 sched->pid_to_task[pid_max++] = NULL; 451 } 452 453 task = sched->pid_to_task[pid]; 454 455 if (task) 456 return task; 457 458 task = zalloc(sizeof(*task)); 459 task->pid = pid; 460 task->nr = sched->nr_tasks; 461 strcpy(task->comm, comm); 462 /* 463 * every task starts in sleeping state - this gets ignored 464 * if there's no wakeup pointing to this sleep state: 465 */ 466 add_sched_event_sleep(sched, task, 0, 0); 467 468 sched->pid_to_task[pid] = task; 469 sched->nr_tasks++; 470 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *)); 471 BUG_ON(!sched->tasks); 472 sched->tasks[task->nr] = task; 473 474 if (verbose > 0) 475 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm); 476 477 return task; 478 } 479 480 481 static void print_task_traces(struct perf_sched *sched) 482 { 483 struct task_desc *task; 484 unsigned long i; 485 486 for (i = 0; i < sched->nr_tasks; i++) { 487 task = sched->tasks[i]; 488 printf("task %6ld (%20s:%10ld), nr_events: %ld\n", 489 task->nr, task->comm, task->pid, task->nr_events); 490 } 491 } 492 493 static void add_cross_task_wakeups(struct perf_sched *sched) 494 { 495 struct task_desc *task1, *task2; 496 unsigned long i, j; 497 498 for (i = 0; i < sched->nr_tasks; i++) { 499 task1 = sched->tasks[i]; 500 j = i + 1; 501 if (j == sched->nr_tasks) 502 j = 0; 503 task2 = sched->tasks[j]; 504 add_sched_event_wakeup(sched, task1, 0, task2); 505 } 506 } 507 508 static void perf_sched__process_event(struct perf_sched *sched, 509 struct sched_atom *atom) 510 { 511 int ret = 0; 512 513 switch (atom->type) { 514 case SCHED_EVENT_RUN: 515 burn_nsecs(sched, atom->duration); 516 break; 517 case SCHED_EVENT_SLEEP: 518 if (atom->wait_sem) 519 ret = sem_wait(atom->wait_sem); 520 BUG_ON(ret); 521 break; 522 case SCHED_EVENT_WAKEUP: 523 if (atom->wait_sem) 524 ret = sem_post(atom->wait_sem); 525 BUG_ON(ret); 526 break; 527 case SCHED_EVENT_MIGRATION: 528 break; 529 default: 530 BUG_ON(1); 531 } 532 } 533 534 static u64 get_cpu_usage_nsec_parent(void) 535 { 536 struct rusage ru; 537 u64 sum; 538 int err; 539 540 err = getrusage(RUSAGE_SELF, &ru); 541 BUG_ON(err); 542 543 sum = ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC; 544 sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC; 545 546 return sum; 547 } 548 549 static int self_open_counters(struct perf_sched *sched, unsigned long cur_task) 550 { 551 struct perf_event_attr attr; 552 char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE]; 553 int fd; 554 struct rlimit limit; 555 bool need_privilege = false; 556 557 memset(&attr, 0, sizeof(attr)); 558 559 attr.type = PERF_TYPE_SOFTWARE; 560 attr.config = PERF_COUNT_SW_TASK_CLOCK; 561 562 force_again: 563 fd = sys_perf_event_open(&attr, 0, -1, -1, 564 perf_event_open_cloexec_flag()); 565 566 if (fd < 0) { 567 if (errno == EMFILE) { 568 if (sched->force) { 569 BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1); 570 limit.rlim_cur += sched->nr_tasks - cur_task; 571 if (limit.rlim_cur > limit.rlim_max) { 572 limit.rlim_max = limit.rlim_cur; 573 need_privilege = true; 574 } 575 if (setrlimit(RLIMIT_NOFILE, &limit) == -1) { 576 if (need_privilege && errno == EPERM) 577 strcpy(info, "Need privilege\n"); 578 } else 579 goto force_again; 580 } else 581 strcpy(info, "Have a try with -f option\n"); 582 } 583 pr_err("Error: sys_perf_event_open() syscall returned " 584 "with %d (%s)\n%s", fd, 585 str_error_r(errno, sbuf, sizeof(sbuf)), info); 586 exit(EXIT_FAILURE); 587 } 588 return fd; 589 } 590 591 static u64 get_cpu_usage_nsec_self(int fd) 592 { 593 u64 runtime; 594 int ret; 595 596 ret = read(fd, &runtime, sizeof(runtime)); 597 BUG_ON(ret != sizeof(runtime)); 598 599 return runtime; 600 } 601 602 struct sched_thread_parms { 603 struct task_desc *task; 604 struct perf_sched *sched; 605 int fd; 606 }; 607 608 static void *thread_func(void *ctx) 609 { 610 struct sched_thread_parms *parms = ctx; 611 struct task_desc *this_task = parms->task; 612 struct perf_sched *sched = parms->sched; 613 u64 cpu_usage_0, cpu_usage_1; 614 unsigned long i, ret; 615 char comm2[22]; 616 int fd = parms->fd; 617 618 zfree(&parms); 619 620 sprintf(comm2, ":%s", this_task->comm); 621 prctl(PR_SET_NAME, comm2); 622 if (fd < 0) 623 return NULL; 624 again: 625 ret = sem_post(&this_task->ready_for_work); 626 BUG_ON(ret); 627 ret = pthread_mutex_lock(&sched->start_work_mutex); 628 BUG_ON(ret); 629 ret = pthread_mutex_unlock(&sched->start_work_mutex); 630 BUG_ON(ret); 631 632 cpu_usage_0 = get_cpu_usage_nsec_self(fd); 633 634 for (i = 0; i < this_task->nr_events; i++) { 635 this_task->curr_event = i; 636 perf_sched__process_event(sched, this_task->atoms[i]); 637 } 638 639 cpu_usage_1 = get_cpu_usage_nsec_self(fd); 640 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0; 641 ret = sem_post(&this_task->work_done_sem); 642 BUG_ON(ret); 643 644 ret = pthread_mutex_lock(&sched->work_done_wait_mutex); 645 BUG_ON(ret); 646 ret = pthread_mutex_unlock(&sched->work_done_wait_mutex); 647 BUG_ON(ret); 648 649 goto again; 650 } 651 652 static void create_tasks(struct perf_sched *sched) 653 { 654 struct task_desc *task; 655 pthread_attr_t attr; 656 unsigned long i; 657 int err; 658 659 err = pthread_attr_init(&attr); 660 BUG_ON(err); 661 err = pthread_attr_setstacksize(&attr, 662 (size_t) max(16 * 1024, PTHREAD_STACK_MIN)); 663 BUG_ON(err); 664 err = pthread_mutex_lock(&sched->start_work_mutex); 665 BUG_ON(err); 666 err = pthread_mutex_lock(&sched->work_done_wait_mutex); 667 BUG_ON(err); 668 for (i = 0; i < sched->nr_tasks; i++) { 669 struct sched_thread_parms *parms = malloc(sizeof(*parms)); 670 BUG_ON(parms == NULL); 671 parms->task = task = sched->tasks[i]; 672 parms->sched = sched; 673 parms->fd = self_open_counters(sched, i); 674 sem_init(&task->sleep_sem, 0, 0); 675 sem_init(&task->ready_for_work, 0, 0); 676 sem_init(&task->work_done_sem, 0, 0); 677 task->curr_event = 0; 678 err = pthread_create(&task->thread, &attr, thread_func, parms); 679 BUG_ON(err); 680 } 681 } 682 683 static void wait_for_tasks(struct perf_sched *sched) 684 { 685 u64 cpu_usage_0, cpu_usage_1; 686 struct task_desc *task; 687 unsigned long i, ret; 688 689 sched->start_time = get_nsecs(); 690 sched->cpu_usage = 0; 691 pthread_mutex_unlock(&sched->work_done_wait_mutex); 692 693 for (i = 0; i < sched->nr_tasks; i++) { 694 task = sched->tasks[i]; 695 ret = sem_wait(&task->ready_for_work); 696 BUG_ON(ret); 697 sem_init(&task->ready_for_work, 0, 0); 698 } 699 ret = pthread_mutex_lock(&sched->work_done_wait_mutex); 700 BUG_ON(ret); 701 702 cpu_usage_0 = get_cpu_usage_nsec_parent(); 703 704 pthread_mutex_unlock(&sched->start_work_mutex); 705 706 for (i = 0; i < sched->nr_tasks; i++) { 707 task = sched->tasks[i]; 708 ret = sem_wait(&task->work_done_sem); 709 BUG_ON(ret); 710 sem_init(&task->work_done_sem, 0, 0); 711 sched->cpu_usage += task->cpu_usage; 712 task->cpu_usage = 0; 713 } 714 715 cpu_usage_1 = get_cpu_usage_nsec_parent(); 716 if (!sched->runavg_cpu_usage) 717 sched->runavg_cpu_usage = sched->cpu_usage; 718 sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat; 719 720 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0; 721 if (!sched->runavg_parent_cpu_usage) 722 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage; 723 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) + 724 sched->parent_cpu_usage)/sched->replay_repeat; 725 726 ret = pthread_mutex_lock(&sched->start_work_mutex); 727 BUG_ON(ret); 728 729 for (i = 0; i < sched->nr_tasks; i++) { 730 task = sched->tasks[i]; 731 sem_init(&task->sleep_sem, 0, 0); 732 task->curr_event = 0; 733 } 734 } 735 736 static void run_one_test(struct perf_sched *sched) 737 { 738 u64 T0, T1, delta, avg_delta, fluct; 739 740 T0 = get_nsecs(); 741 wait_for_tasks(sched); 742 T1 = get_nsecs(); 743 744 delta = T1 - T0; 745 sched->sum_runtime += delta; 746 sched->nr_runs++; 747 748 avg_delta = sched->sum_runtime / sched->nr_runs; 749 if (delta < avg_delta) 750 fluct = avg_delta - delta; 751 else 752 fluct = delta - avg_delta; 753 sched->sum_fluct += fluct; 754 if (!sched->run_avg) 755 sched->run_avg = delta; 756 sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat; 757 758 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC); 759 760 printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC); 761 762 printf("cpu: %0.2f / %0.2f", 763 (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC); 764 765 #if 0 766 /* 767 * rusage statistics done by the parent, these are less 768 * accurate than the sched->sum_exec_runtime based statistics: 769 */ 770 printf(" [%0.2f / %0.2f]", 771 (double)sched->parent_cpu_usage / NSEC_PER_MSEC, 772 (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC); 773 #endif 774 775 printf("\n"); 776 777 if (sched->nr_sleep_corrections) 778 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections); 779 sched->nr_sleep_corrections = 0; 780 } 781 782 static void test_calibrations(struct perf_sched *sched) 783 { 784 u64 T0, T1; 785 786 T0 = get_nsecs(); 787 burn_nsecs(sched, NSEC_PER_MSEC); 788 T1 = get_nsecs(); 789 790 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0); 791 792 T0 = get_nsecs(); 793 sleep_nsecs(NSEC_PER_MSEC); 794 T1 = get_nsecs(); 795 796 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0); 797 } 798 799 static int 800 replay_wakeup_event(struct perf_sched *sched, 801 struct perf_evsel *evsel, struct perf_sample *sample, 802 struct machine *machine __maybe_unused) 803 { 804 const char *comm = perf_evsel__strval(evsel, sample, "comm"); 805 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); 806 struct task_desc *waker, *wakee; 807 808 if (verbose > 0) { 809 printf("sched_wakeup event %p\n", evsel); 810 811 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid); 812 } 813 814 waker = register_pid(sched, sample->tid, "<unknown>"); 815 wakee = register_pid(sched, pid, comm); 816 817 add_sched_event_wakeup(sched, waker, sample->time, wakee); 818 return 0; 819 } 820 821 static int replay_switch_event(struct perf_sched *sched, 822 struct perf_evsel *evsel, 823 struct perf_sample *sample, 824 struct machine *machine __maybe_unused) 825 { 826 const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"), 827 *next_comm = perf_evsel__strval(evsel, sample, "next_comm"); 828 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), 829 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); 830 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); 831 struct task_desc *prev, __maybe_unused *next; 832 u64 timestamp0, timestamp = sample->time; 833 int cpu = sample->cpu; 834 s64 delta; 835 836 if (verbose > 0) 837 printf("sched_switch event %p\n", evsel); 838 839 if (cpu >= MAX_CPUS || cpu < 0) 840 return 0; 841 842 timestamp0 = sched->cpu_last_switched[cpu]; 843 if (timestamp0) 844 delta = timestamp - timestamp0; 845 else 846 delta = 0; 847 848 if (delta < 0) { 849 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); 850 return -1; 851 } 852 853 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n", 854 prev_comm, prev_pid, next_comm, next_pid, delta); 855 856 prev = register_pid(sched, prev_pid, prev_comm); 857 next = register_pid(sched, next_pid, next_comm); 858 859 sched->cpu_last_switched[cpu] = timestamp; 860 861 add_sched_event_run(sched, prev, timestamp, delta); 862 add_sched_event_sleep(sched, prev, timestamp, prev_state); 863 864 return 0; 865 } 866 867 static int replay_fork_event(struct perf_sched *sched, 868 union perf_event *event, 869 struct machine *machine) 870 { 871 struct thread *child, *parent; 872 873 child = machine__findnew_thread(machine, event->fork.pid, 874 event->fork.tid); 875 parent = machine__findnew_thread(machine, event->fork.ppid, 876 event->fork.ptid); 877 878 if (child == NULL || parent == NULL) { 879 pr_debug("thread does not exist on fork event: child %p, parent %p\n", 880 child, parent); 881 goto out_put; 882 } 883 884 if (verbose > 0) { 885 printf("fork event\n"); 886 printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid); 887 printf("... child: %s/%d\n", thread__comm_str(child), child->tid); 888 } 889 890 register_pid(sched, parent->tid, thread__comm_str(parent)); 891 register_pid(sched, child->tid, thread__comm_str(child)); 892 out_put: 893 thread__put(child); 894 thread__put(parent); 895 return 0; 896 } 897 898 struct sort_dimension { 899 const char *name; 900 sort_fn_t cmp; 901 struct list_head list; 902 }; 903 904 /* 905 * handle runtime stats saved per thread 906 */ 907 static struct thread_runtime *thread__init_runtime(struct thread *thread) 908 { 909 struct thread_runtime *r; 910 911 r = zalloc(sizeof(struct thread_runtime)); 912 if (!r) 913 return NULL; 914 915 init_stats(&r->run_stats); 916 thread__set_priv(thread, r); 917 918 return r; 919 } 920 921 static struct thread_runtime *thread__get_runtime(struct thread *thread) 922 { 923 struct thread_runtime *tr; 924 925 tr = thread__priv(thread); 926 if (tr == NULL) { 927 tr = thread__init_runtime(thread); 928 if (tr == NULL) 929 pr_debug("Failed to malloc memory for runtime data.\n"); 930 } 931 932 return tr; 933 } 934 935 static int 936 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r) 937 { 938 struct sort_dimension *sort; 939 int ret = 0; 940 941 BUG_ON(list_empty(list)); 942 943 list_for_each_entry(sort, list, list) { 944 ret = sort->cmp(l, r); 945 if (ret) 946 return ret; 947 } 948 949 return ret; 950 } 951 952 static struct work_atoms * 953 thread_atoms_search(struct rb_root_cached *root, struct thread *thread, 954 struct list_head *sort_list) 955 { 956 struct rb_node *node = root->rb_root.rb_node; 957 struct work_atoms key = { .thread = thread }; 958 959 while (node) { 960 struct work_atoms *atoms; 961 int cmp; 962 963 atoms = container_of(node, struct work_atoms, node); 964 965 cmp = thread_lat_cmp(sort_list, &key, atoms); 966 if (cmp > 0) 967 node = node->rb_left; 968 else if (cmp < 0) 969 node = node->rb_right; 970 else { 971 BUG_ON(thread != atoms->thread); 972 return atoms; 973 } 974 } 975 return NULL; 976 } 977 978 static void 979 __thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data, 980 struct list_head *sort_list) 981 { 982 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL; 983 bool leftmost = true; 984 985 while (*new) { 986 struct work_atoms *this; 987 int cmp; 988 989 this = container_of(*new, struct work_atoms, node); 990 parent = *new; 991 992 cmp = thread_lat_cmp(sort_list, data, this); 993 994 if (cmp > 0) 995 new = &((*new)->rb_left); 996 else { 997 new = &((*new)->rb_right); 998 leftmost = false; 999 } 1000 } 1001 1002 rb_link_node(&data->node, parent, new); 1003 rb_insert_color_cached(&data->node, root, leftmost); 1004 } 1005 1006 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread) 1007 { 1008 struct work_atoms *atoms = zalloc(sizeof(*atoms)); 1009 if (!atoms) { 1010 pr_err("No memory at %s\n", __func__); 1011 return -1; 1012 } 1013 1014 atoms->thread = thread__get(thread); 1015 INIT_LIST_HEAD(&atoms->work_list); 1016 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid); 1017 return 0; 1018 } 1019 1020 static char sched_out_state(u64 prev_state) 1021 { 1022 const char *str = TASK_STATE_TO_CHAR_STR; 1023 1024 return str[prev_state]; 1025 } 1026 1027 static int 1028 add_sched_out_event(struct work_atoms *atoms, 1029 char run_state, 1030 u64 timestamp) 1031 { 1032 struct work_atom *atom = zalloc(sizeof(*atom)); 1033 if (!atom) { 1034 pr_err("Non memory at %s", __func__); 1035 return -1; 1036 } 1037 1038 atom->sched_out_time = timestamp; 1039 1040 if (run_state == 'R') { 1041 atom->state = THREAD_WAIT_CPU; 1042 atom->wake_up_time = atom->sched_out_time; 1043 } 1044 1045 list_add_tail(&atom->list, &atoms->work_list); 1046 return 0; 1047 } 1048 1049 static void 1050 add_runtime_event(struct work_atoms *atoms, u64 delta, 1051 u64 timestamp __maybe_unused) 1052 { 1053 struct work_atom *atom; 1054 1055 BUG_ON(list_empty(&atoms->work_list)); 1056 1057 atom = list_entry(atoms->work_list.prev, struct work_atom, list); 1058 1059 atom->runtime += delta; 1060 atoms->total_runtime += delta; 1061 } 1062 1063 static void 1064 add_sched_in_event(struct work_atoms *atoms, u64 timestamp) 1065 { 1066 struct work_atom *atom; 1067 u64 delta; 1068 1069 if (list_empty(&atoms->work_list)) 1070 return; 1071 1072 atom = list_entry(atoms->work_list.prev, struct work_atom, list); 1073 1074 if (atom->state != THREAD_WAIT_CPU) 1075 return; 1076 1077 if (timestamp < atom->wake_up_time) { 1078 atom->state = THREAD_IGNORE; 1079 return; 1080 } 1081 1082 atom->state = THREAD_SCHED_IN; 1083 atom->sched_in_time = timestamp; 1084 1085 delta = atom->sched_in_time - atom->wake_up_time; 1086 atoms->total_lat += delta; 1087 if (delta > atoms->max_lat) { 1088 atoms->max_lat = delta; 1089 atoms->max_lat_at = timestamp; 1090 } 1091 atoms->nb_atoms++; 1092 } 1093 1094 static int latency_switch_event(struct perf_sched *sched, 1095 struct perf_evsel *evsel, 1096 struct perf_sample *sample, 1097 struct machine *machine) 1098 { 1099 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), 1100 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); 1101 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); 1102 struct work_atoms *out_events, *in_events; 1103 struct thread *sched_out, *sched_in; 1104 u64 timestamp0, timestamp = sample->time; 1105 int cpu = sample->cpu, err = -1; 1106 s64 delta; 1107 1108 BUG_ON(cpu >= MAX_CPUS || cpu < 0); 1109 1110 timestamp0 = sched->cpu_last_switched[cpu]; 1111 sched->cpu_last_switched[cpu] = timestamp; 1112 if (timestamp0) 1113 delta = timestamp - timestamp0; 1114 else 1115 delta = 0; 1116 1117 if (delta < 0) { 1118 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); 1119 return -1; 1120 } 1121 1122 sched_out = machine__findnew_thread(machine, -1, prev_pid); 1123 sched_in = machine__findnew_thread(machine, -1, next_pid); 1124 if (sched_out == NULL || sched_in == NULL) 1125 goto out_put; 1126 1127 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); 1128 if (!out_events) { 1129 if (thread_atoms_insert(sched, sched_out)) 1130 goto out_put; 1131 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); 1132 if (!out_events) { 1133 pr_err("out-event: Internal tree error"); 1134 goto out_put; 1135 } 1136 } 1137 if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp)) 1138 return -1; 1139 1140 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); 1141 if (!in_events) { 1142 if (thread_atoms_insert(sched, sched_in)) 1143 goto out_put; 1144 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); 1145 if (!in_events) { 1146 pr_err("in-event: Internal tree error"); 1147 goto out_put; 1148 } 1149 /* 1150 * Take came in we have not heard about yet, 1151 * add in an initial atom in runnable state: 1152 */ 1153 if (add_sched_out_event(in_events, 'R', timestamp)) 1154 goto out_put; 1155 } 1156 add_sched_in_event(in_events, timestamp); 1157 err = 0; 1158 out_put: 1159 thread__put(sched_out); 1160 thread__put(sched_in); 1161 return err; 1162 } 1163 1164 static int latency_runtime_event(struct perf_sched *sched, 1165 struct perf_evsel *evsel, 1166 struct perf_sample *sample, 1167 struct machine *machine) 1168 { 1169 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); 1170 const u64 runtime = perf_evsel__intval(evsel, sample, "runtime"); 1171 struct thread *thread = machine__findnew_thread(machine, -1, pid); 1172 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); 1173 u64 timestamp = sample->time; 1174 int cpu = sample->cpu, err = -1; 1175 1176 if (thread == NULL) 1177 return -1; 1178 1179 BUG_ON(cpu >= MAX_CPUS || cpu < 0); 1180 if (!atoms) { 1181 if (thread_atoms_insert(sched, thread)) 1182 goto out_put; 1183 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); 1184 if (!atoms) { 1185 pr_err("in-event: Internal tree error"); 1186 goto out_put; 1187 } 1188 if (add_sched_out_event(atoms, 'R', timestamp)) 1189 goto out_put; 1190 } 1191 1192 add_runtime_event(atoms, runtime, timestamp); 1193 err = 0; 1194 out_put: 1195 thread__put(thread); 1196 return err; 1197 } 1198 1199 static int latency_wakeup_event(struct perf_sched *sched, 1200 struct perf_evsel *evsel, 1201 struct perf_sample *sample, 1202 struct machine *machine) 1203 { 1204 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); 1205 struct work_atoms *atoms; 1206 struct work_atom *atom; 1207 struct thread *wakee; 1208 u64 timestamp = sample->time; 1209 int err = -1; 1210 1211 wakee = machine__findnew_thread(machine, -1, pid); 1212 if (wakee == NULL) 1213 return -1; 1214 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); 1215 if (!atoms) { 1216 if (thread_atoms_insert(sched, wakee)) 1217 goto out_put; 1218 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); 1219 if (!atoms) { 1220 pr_err("wakeup-event: Internal tree error"); 1221 goto out_put; 1222 } 1223 if (add_sched_out_event(atoms, 'S', timestamp)) 1224 goto out_put; 1225 } 1226 1227 BUG_ON(list_empty(&atoms->work_list)); 1228 1229 atom = list_entry(atoms->work_list.prev, struct work_atom, list); 1230 1231 /* 1232 * As we do not guarantee the wakeup event happens when 1233 * task is out of run queue, also may happen when task is 1234 * on run queue and wakeup only change ->state to TASK_RUNNING, 1235 * then we should not set the ->wake_up_time when wake up a 1236 * task which is on run queue. 1237 * 1238 * You WILL be missing events if you've recorded only 1239 * one CPU, or are only looking at only one, so don't 1240 * skip in this case. 1241 */ 1242 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING) 1243 goto out_ok; 1244 1245 sched->nr_timestamps++; 1246 if (atom->sched_out_time > timestamp) { 1247 sched->nr_unordered_timestamps++; 1248 goto out_ok; 1249 } 1250 1251 atom->state = THREAD_WAIT_CPU; 1252 atom->wake_up_time = timestamp; 1253 out_ok: 1254 err = 0; 1255 out_put: 1256 thread__put(wakee); 1257 return err; 1258 } 1259 1260 static int latency_migrate_task_event(struct perf_sched *sched, 1261 struct perf_evsel *evsel, 1262 struct perf_sample *sample, 1263 struct machine *machine) 1264 { 1265 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); 1266 u64 timestamp = sample->time; 1267 struct work_atoms *atoms; 1268 struct work_atom *atom; 1269 struct thread *migrant; 1270 int err = -1; 1271 1272 /* 1273 * Only need to worry about migration when profiling one CPU. 1274 */ 1275 if (sched->profile_cpu == -1) 1276 return 0; 1277 1278 migrant = machine__findnew_thread(machine, -1, pid); 1279 if (migrant == NULL) 1280 return -1; 1281 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); 1282 if (!atoms) { 1283 if (thread_atoms_insert(sched, migrant)) 1284 goto out_put; 1285 register_pid(sched, migrant->tid, thread__comm_str(migrant)); 1286 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); 1287 if (!atoms) { 1288 pr_err("migration-event: Internal tree error"); 1289 goto out_put; 1290 } 1291 if (add_sched_out_event(atoms, 'R', timestamp)) 1292 goto out_put; 1293 } 1294 1295 BUG_ON(list_empty(&atoms->work_list)); 1296 1297 atom = list_entry(atoms->work_list.prev, struct work_atom, list); 1298 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp; 1299 1300 sched->nr_timestamps++; 1301 1302 if (atom->sched_out_time > timestamp) 1303 sched->nr_unordered_timestamps++; 1304 err = 0; 1305 out_put: 1306 thread__put(migrant); 1307 return err; 1308 } 1309 1310 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list) 1311 { 1312 int i; 1313 int ret; 1314 u64 avg; 1315 char max_lat_at[32]; 1316 1317 if (!work_list->nb_atoms) 1318 return; 1319 /* 1320 * Ignore idle threads: 1321 */ 1322 if (!strcmp(thread__comm_str(work_list->thread), "swapper")) 1323 return; 1324 1325 sched->all_runtime += work_list->total_runtime; 1326 sched->all_count += work_list->nb_atoms; 1327 1328 if (work_list->num_merged > 1) 1329 ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread), work_list->num_merged); 1330 else 1331 ret = printf(" %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid); 1332 1333 for (i = 0; i < 24 - ret; i++) 1334 printf(" "); 1335 1336 avg = work_list->total_lat / work_list->nb_atoms; 1337 timestamp__scnprintf_usec(work_list->max_lat_at, max_lat_at, sizeof(max_lat_at)); 1338 1339 printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13s s\n", 1340 (double)work_list->total_runtime / NSEC_PER_MSEC, 1341 work_list->nb_atoms, (double)avg / NSEC_PER_MSEC, 1342 (double)work_list->max_lat / NSEC_PER_MSEC, 1343 max_lat_at); 1344 } 1345 1346 static int pid_cmp(struct work_atoms *l, struct work_atoms *r) 1347 { 1348 if (l->thread == r->thread) 1349 return 0; 1350 if (l->thread->tid < r->thread->tid) 1351 return -1; 1352 if (l->thread->tid > r->thread->tid) 1353 return 1; 1354 return (int)(l->thread - r->thread); 1355 } 1356 1357 static int avg_cmp(struct work_atoms *l, struct work_atoms *r) 1358 { 1359 u64 avgl, avgr; 1360 1361 if (!l->nb_atoms) 1362 return -1; 1363 1364 if (!r->nb_atoms) 1365 return 1; 1366 1367 avgl = l->total_lat / l->nb_atoms; 1368 avgr = r->total_lat / r->nb_atoms; 1369 1370 if (avgl < avgr) 1371 return -1; 1372 if (avgl > avgr) 1373 return 1; 1374 1375 return 0; 1376 } 1377 1378 static int max_cmp(struct work_atoms *l, struct work_atoms *r) 1379 { 1380 if (l->max_lat < r->max_lat) 1381 return -1; 1382 if (l->max_lat > r->max_lat) 1383 return 1; 1384 1385 return 0; 1386 } 1387 1388 static int switch_cmp(struct work_atoms *l, struct work_atoms *r) 1389 { 1390 if (l->nb_atoms < r->nb_atoms) 1391 return -1; 1392 if (l->nb_atoms > r->nb_atoms) 1393 return 1; 1394 1395 return 0; 1396 } 1397 1398 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r) 1399 { 1400 if (l->total_runtime < r->total_runtime) 1401 return -1; 1402 if (l->total_runtime > r->total_runtime) 1403 return 1; 1404 1405 return 0; 1406 } 1407 1408 static int sort_dimension__add(const char *tok, struct list_head *list) 1409 { 1410 size_t i; 1411 static struct sort_dimension avg_sort_dimension = { 1412 .name = "avg", 1413 .cmp = avg_cmp, 1414 }; 1415 static struct sort_dimension max_sort_dimension = { 1416 .name = "max", 1417 .cmp = max_cmp, 1418 }; 1419 static struct sort_dimension pid_sort_dimension = { 1420 .name = "pid", 1421 .cmp = pid_cmp, 1422 }; 1423 static struct sort_dimension runtime_sort_dimension = { 1424 .name = "runtime", 1425 .cmp = runtime_cmp, 1426 }; 1427 static struct sort_dimension switch_sort_dimension = { 1428 .name = "switch", 1429 .cmp = switch_cmp, 1430 }; 1431 struct sort_dimension *available_sorts[] = { 1432 &pid_sort_dimension, 1433 &avg_sort_dimension, 1434 &max_sort_dimension, 1435 &switch_sort_dimension, 1436 &runtime_sort_dimension, 1437 }; 1438 1439 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) { 1440 if (!strcmp(available_sorts[i]->name, tok)) { 1441 list_add_tail(&available_sorts[i]->list, list); 1442 1443 return 0; 1444 } 1445 } 1446 1447 return -1; 1448 } 1449 1450 static void perf_sched__sort_lat(struct perf_sched *sched) 1451 { 1452 struct rb_node *node; 1453 struct rb_root_cached *root = &sched->atom_root; 1454 again: 1455 for (;;) { 1456 struct work_atoms *data; 1457 node = rb_first_cached(root); 1458 if (!node) 1459 break; 1460 1461 rb_erase_cached(node, root); 1462 data = rb_entry(node, struct work_atoms, node); 1463 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list); 1464 } 1465 if (root == &sched->atom_root) { 1466 root = &sched->merged_atom_root; 1467 goto again; 1468 } 1469 } 1470 1471 static int process_sched_wakeup_event(struct perf_tool *tool, 1472 struct perf_evsel *evsel, 1473 struct perf_sample *sample, 1474 struct machine *machine) 1475 { 1476 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1477 1478 if (sched->tp_handler->wakeup_event) 1479 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine); 1480 1481 return 0; 1482 } 1483 1484 union map_priv { 1485 void *ptr; 1486 bool color; 1487 }; 1488 1489 static bool thread__has_color(struct thread *thread) 1490 { 1491 union map_priv priv = { 1492 .ptr = thread__priv(thread), 1493 }; 1494 1495 return priv.color; 1496 } 1497 1498 static struct thread* 1499 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid) 1500 { 1501 struct thread *thread = machine__findnew_thread(machine, pid, tid); 1502 union map_priv priv = { 1503 .color = false, 1504 }; 1505 1506 if (!sched->map.color_pids || !thread || thread__priv(thread)) 1507 return thread; 1508 1509 if (thread_map__has(sched->map.color_pids, tid)) 1510 priv.color = true; 1511 1512 thread__set_priv(thread, priv.ptr); 1513 return thread; 1514 } 1515 1516 static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, 1517 struct perf_sample *sample, struct machine *machine) 1518 { 1519 const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); 1520 struct thread *sched_in; 1521 struct thread_runtime *tr; 1522 int new_shortname; 1523 u64 timestamp0, timestamp = sample->time; 1524 s64 delta; 1525 int i, this_cpu = sample->cpu; 1526 int cpus_nr; 1527 bool new_cpu = false; 1528 const char *color = PERF_COLOR_NORMAL; 1529 char stimestamp[32]; 1530 1531 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0); 1532 1533 if (this_cpu > sched->max_cpu) 1534 sched->max_cpu = this_cpu; 1535 1536 if (sched->map.comp) { 1537 cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS); 1538 if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) { 1539 sched->map.comp_cpus[cpus_nr++] = this_cpu; 1540 new_cpu = true; 1541 } 1542 } else 1543 cpus_nr = sched->max_cpu; 1544 1545 timestamp0 = sched->cpu_last_switched[this_cpu]; 1546 sched->cpu_last_switched[this_cpu] = timestamp; 1547 if (timestamp0) 1548 delta = timestamp - timestamp0; 1549 else 1550 delta = 0; 1551 1552 if (delta < 0) { 1553 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); 1554 return -1; 1555 } 1556 1557 sched_in = map__findnew_thread(sched, machine, -1, next_pid); 1558 if (sched_in == NULL) 1559 return -1; 1560 1561 tr = thread__get_runtime(sched_in); 1562 if (tr == NULL) { 1563 thread__put(sched_in); 1564 return -1; 1565 } 1566 1567 sched->curr_thread[this_cpu] = thread__get(sched_in); 1568 1569 printf(" "); 1570 1571 new_shortname = 0; 1572 if (!tr->shortname[0]) { 1573 if (!strcmp(thread__comm_str(sched_in), "swapper")) { 1574 /* 1575 * Don't allocate a letter-number for swapper:0 1576 * as a shortname. Instead, we use '.' for it. 1577 */ 1578 tr->shortname[0] = '.'; 1579 tr->shortname[1] = ' '; 1580 } else { 1581 tr->shortname[0] = sched->next_shortname1; 1582 tr->shortname[1] = sched->next_shortname2; 1583 1584 if (sched->next_shortname1 < 'Z') { 1585 sched->next_shortname1++; 1586 } else { 1587 sched->next_shortname1 = 'A'; 1588 if (sched->next_shortname2 < '9') 1589 sched->next_shortname2++; 1590 else 1591 sched->next_shortname2 = '0'; 1592 } 1593 } 1594 new_shortname = 1; 1595 } 1596 1597 for (i = 0; i < cpus_nr; i++) { 1598 int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i; 1599 struct thread *curr_thread = sched->curr_thread[cpu]; 1600 struct thread_runtime *curr_tr; 1601 const char *pid_color = color; 1602 const char *cpu_color = color; 1603 1604 if (curr_thread && thread__has_color(curr_thread)) 1605 pid_color = COLOR_PIDS; 1606 1607 if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu)) 1608 continue; 1609 1610 if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu)) 1611 cpu_color = COLOR_CPUS; 1612 1613 if (cpu != this_cpu) 1614 color_fprintf(stdout, color, " "); 1615 else 1616 color_fprintf(stdout, cpu_color, "*"); 1617 1618 if (sched->curr_thread[cpu]) { 1619 curr_tr = thread__get_runtime(sched->curr_thread[cpu]); 1620 if (curr_tr == NULL) { 1621 thread__put(sched_in); 1622 return -1; 1623 } 1624 color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname); 1625 } else 1626 color_fprintf(stdout, color, " "); 1627 } 1628 1629 if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu)) 1630 goto out; 1631 1632 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp)); 1633 color_fprintf(stdout, color, " %12s secs ", stimestamp); 1634 if (new_shortname || tr->comm_changed || (verbose > 0 && sched_in->tid)) { 1635 const char *pid_color = color; 1636 1637 if (thread__has_color(sched_in)) 1638 pid_color = COLOR_PIDS; 1639 1640 color_fprintf(stdout, pid_color, "%s => %s:%d", 1641 tr->shortname, thread__comm_str(sched_in), sched_in->tid); 1642 tr->comm_changed = false; 1643 } 1644 1645 if (sched->map.comp && new_cpu) 1646 color_fprintf(stdout, color, " (CPU %d)", this_cpu); 1647 1648 out: 1649 color_fprintf(stdout, color, "\n"); 1650 1651 thread__put(sched_in); 1652 1653 return 0; 1654 } 1655 1656 static int process_sched_switch_event(struct perf_tool *tool, 1657 struct perf_evsel *evsel, 1658 struct perf_sample *sample, 1659 struct machine *machine) 1660 { 1661 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1662 int this_cpu = sample->cpu, err = 0; 1663 u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), 1664 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); 1665 1666 if (sched->curr_pid[this_cpu] != (u32)-1) { 1667 /* 1668 * Are we trying to switch away a PID that is 1669 * not current? 1670 */ 1671 if (sched->curr_pid[this_cpu] != prev_pid) 1672 sched->nr_context_switch_bugs++; 1673 } 1674 1675 if (sched->tp_handler->switch_event) 1676 err = sched->tp_handler->switch_event(sched, evsel, sample, machine); 1677 1678 sched->curr_pid[this_cpu] = next_pid; 1679 return err; 1680 } 1681 1682 static int process_sched_runtime_event(struct perf_tool *tool, 1683 struct perf_evsel *evsel, 1684 struct perf_sample *sample, 1685 struct machine *machine) 1686 { 1687 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1688 1689 if (sched->tp_handler->runtime_event) 1690 return sched->tp_handler->runtime_event(sched, evsel, sample, machine); 1691 1692 return 0; 1693 } 1694 1695 static int perf_sched__process_fork_event(struct perf_tool *tool, 1696 union perf_event *event, 1697 struct perf_sample *sample, 1698 struct machine *machine) 1699 { 1700 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1701 1702 /* run the fork event through the perf machineruy */ 1703 perf_event__process_fork(tool, event, sample, machine); 1704 1705 /* and then run additional processing needed for this command */ 1706 if (sched->tp_handler->fork_event) 1707 return sched->tp_handler->fork_event(sched, event, machine); 1708 1709 return 0; 1710 } 1711 1712 static int process_sched_migrate_task_event(struct perf_tool *tool, 1713 struct perf_evsel *evsel, 1714 struct perf_sample *sample, 1715 struct machine *machine) 1716 { 1717 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1718 1719 if (sched->tp_handler->migrate_task_event) 1720 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine); 1721 1722 return 0; 1723 } 1724 1725 typedef int (*tracepoint_handler)(struct perf_tool *tool, 1726 struct perf_evsel *evsel, 1727 struct perf_sample *sample, 1728 struct machine *machine); 1729 1730 static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused, 1731 union perf_event *event __maybe_unused, 1732 struct perf_sample *sample, 1733 struct perf_evsel *evsel, 1734 struct machine *machine) 1735 { 1736 int err = 0; 1737 1738 if (evsel->handler != NULL) { 1739 tracepoint_handler f = evsel->handler; 1740 err = f(tool, evsel, sample, machine); 1741 } 1742 1743 return err; 1744 } 1745 1746 static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused, 1747 union perf_event *event, 1748 struct perf_sample *sample, 1749 struct machine *machine) 1750 { 1751 struct thread *thread; 1752 struct thread_runtime *tr; 1753 int err; 1754 1755 err = perf_event__process_comm(tool, event, sample, machine); 1756 if (err) 1757 return err; 1758 1759 thread = machine__find_thread(machine, sample->pid, sample->tid); 1760 if (!thread) { 1761 pr_err("Internal error: can't find thread\n"); 1762 return -1; 1763 } 1764 1765 tr = thread__get_runtime(thread); 1766 if (tr == NULL) { 1767 thread__put(thread); 1768 return -1; 1769 } 1770 1771 tr->comm_changed = true; 1772 thread__put(thread); 1773 1774 return 0; 1775 } 1776 1777 static int perf_sched__read_events(struct perf_sched *sched) 1778 { 1779 const struct perf_evsel_str_handler handlers[] = { 1780 { "sched:sched_switch", process_sched_switch_event, }, 1781 { "sched:sched_stat_runtime", process_sched_runtime_event, }, 1782 { "sched:sched_wakeup", process_sched_wakeup_event, }, 1783 { "sched:sched_wakeup_new", process_sched_wakeup_event, }, 1784 { "sched:sched_migrate_task", process_sched_migrate_task_event, }, 1785 }; 1786 struct perf_session *session; 1787 struct perf_data data = { 1788 .path = input_name, 1789 .mode = PERF_DATA_MODE_READ, 1790 .force = sched->force, 1791 }; 1792 int rc = -1; 1793 1794 session = perf_session__new(&data, false, &sched->tool); 1795 if (session == NULL) { 1796 pr_debug("No Memory for session\n"); 1797 return -1; 1798 } 1799 1800 symbol__init(&session->header.env); 1801 1802 if (perf_session__set_tracepoints_handlers(session, handlers)) 1803 goto out_delete; 1804 1805 if (perf_session__has_traces(session, "record -R")) { 1806 int err = perf_session__process_events(session); 1807 if (err) { 1808 pr_err("Failed to process events, error %d", err); 1809 goto out_delete; 1810 } 1811 1812 sched->nr_events = session->evlist->stats.nr_events[0]; 1813 sched->nr_lost_events = session->evlist->stats.total_lost; 1814 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST]; 1815 } 1816 1817 rc = 0; 1818 out_delete: 1819 perf_session__delete(session); 1820 return rc; 1821 } 1822 1823 /* 1824 * scheduling times are printed as msec.usec 1825 */ 1826 static inline void print_sched_time(unsigned long long nsecs, int width) 1827 { 1828 unsigned long msecs; 1829 unsigned long usecs; 1830 1831 msecs = nsecs / NSEC_PER_MSEC; 1832 nsecs -= msecs * NSEC_PER_MSEC; 1833 usecs = nsecs / NSEC_PER_USEC; 1834 printf("%*lu.%03lu ", width, msecs, usecs); 1835 } 1836 1837 /* 1838 * returns runtime data for event, allocating memory for it the 1839 * first time it is used. 1840 */ 1841 static struct evsel_runtime *perf_evsel__get_runtime(struct perf_evsel *evsel) 1842 { 1843 struct evsel_runtime *r = evsel->priv; 1844 1845 if (r == NULL) { 1846 r = zalloc(sizeof(struct evsel_runtime)); 1847 evsel->priv = r; 1848 } 1849 1850 return r; 1851 } 1852 1853 /* 1854 * save last time event was seen per cpu 1855 */ 1856 static void perf_evsel__save_time(struct perf_evsel *evsel, 1857 u64 timestamp, u32 cpu) 1858 { 1859 struct evsel_runtime *r = perf_evsel__get_runtime(evsel); 1860 1861 if (r == NULL) 1862 return; 1863 1864 if ((cpu >= r->ncpu) || (r->last_time == NULL)) { 1865 int i, n = __roundup_pow_of_two(cpu+1); 1866 void *p = r->last_time; 1867 1868 p = realloc(r->last_time, n * sizeof(u64)); 1869 if (!p) 1870 return; 1871 1872 r->last_time = p; 1873 for (i = r->ncpu; i < n; ++i) 1874 r->last_time[i] = (u64) 0; 1875 1876 r->ncpu = n; 1877 } 1878 1879 r->last_time[cpu] = timestamp; 1880 } 1881 1882 /* returns last time this event was seen on the given cpu */ 1883 static u64 perf_evsel__get_time(struct perf_evsel *evsel, u32 cpu) 1884 { 1885 struct evsel_runtime *r = perf_evsel__get_runtime(evsel); 1886 1887 if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu)) 1888 return 0; 1889 1890 return r->last_time[cpu]; 1891 } 1892 1893 static int comm_width = 30; 1894 1895 static char *timehist_get_commstr(struct thread *thread) 1896 { 1897 static char str[32]; 1898 const char *comm = thread__comm_str(thread); 1899 pid_t tid = thread->tid; 1900 pid_t pid = thread->pid_; 1901 int n; 1902 1903 if (pid == 0) 1904 n = scnprintf(str, sizeof(str), "%s", comm); 1905 1906 else if (tid != pid) 1907 n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid); 1908 1909 else 1910 n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid); 1911 1912 if (n > comm_width) 1913 comm_width = n; 1914 1915 return str; 1916 } 1917 1918 static void timehist_header(struct perf_sched *sched) 1919 { 1920 u32 ncpus = sched->max_cpu + 1; 1921 u32 i, j; 1922 1923 printf("%15s %6s ", "time", "cpu"); 1924 1925 if (sched->show_cpu_visual) { 1926 printf(" "); 1927 for (i = 0, j = 0; i < ncpus; ++i) { 1928 printf("%x", j++); 1929 if (j > 15) 1930 j = 0; 1931 } 1932 printf(" "); 1933 } 1934 1935 printf(" %-*s %9s %9s %9s", comm_width, 1936 "task name", "wait time", "sch delay", "run time"); 1937 1938 if (sched->show_state) 1939 printf(" %s", "state"); 1940 1941 printf("\n"); 1942 1943 /* 1944 * units row 1945 */ 1946 printf("%15s %-6s ", "", ""); 1947 1948 if (sched->show_cpu_visual) 1949 printf(" %*s ", ncpus, ""); 1950 1951 printf(" %-*s %9s %9s %9s", comm_width, 1952 "[tid/pid]", "(msec)", "(msec)", "(msec)"); 1953 1954 if (sched->show_state) 1955 printf(" %5s", ""); 1956 1957 printf("\n"); 1958 1959 /* 1960 * separator 1961 */ 1962 printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line); 1963 1964 if (sched->show_cpu_visual) 1965 printf(" %.*s ", ncpus, graph_dotted_line); 1966 1967 printf(" %.*s %.9s %.9s %.9s", comm_width, 1968 graph_dotted_line, graph_dotted_line, graph_dotted_line, 1969 graph_dotted_line); 1970 1971 if (sched->show_state) 1972 printf(" %.5s", graph_dotted_line); 1973 1974 printf("\n"); 1975 } 1976 1977 static char task_state_char(struct thread *thread, int state) 1978 { 1979 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; 1980 unsigned bit = state ? ffs(state) : 0; 1981 1982 /* 'I' for idle */ 1983 if (thread->tid == 0) 1984 return 'I'; 1985 1986 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; 1987 } 1988 1989 static void timehist_print_sample(struct perf_sched *sched, 1990 struct perf_evsel *evsel, 1991 struct perf_sample *sample, 1992 struct addr_location *al, 1993 struct thread *thread, 1994 u64 t, int state) 1995 { 1996 struct thread_runtime *tr = thread__priv(thread); 1997 const char *next_comm = perf_evsel__strval(evsel, sample, "next_comm"); 1998 const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); 1999 u32 max_cpus = sched->max_cpu + 1; 2000 char tstr[64]; 2001 char nstr[30]; 2002 u64 wait_time; 2003 2004 timestamp__scnprintf_usec(t, tstr, sizeof(tstr)); 2005 printf("%15s [%04d] ", tstr, sample->cpu); 2006 2007 if (sched->show_cpu_visual) { 2008 u32 i; 2009 char c; 2010 2011 printf(" "); 2012 for (i = 0; i < max_cpus; ++i) { 2013 /* flag idle times with 'i'; others are sched events */ 2014 if (i == sample->cpu) 2015 c = (thread->tid == 0) ? 'i' : 's'; 2016 else 2017 c = ' '; 2018 printf("%c", c); 2019 } 2020 printf(" "); 2021 } 2022 2023 printf(" %-*s ", comm_width, timehist_get_commstr(thread)); 2024 2025 wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt; 2026 print_sched_time(wait_time, 6); 2027 2028 print_sched_time(tr->dt_delay, 6); 2029 print_sched_time(tr->dt_run, 6); 2030 2031 if (sched->show_state) 2032 printf(" %5c ", task_state_char(thread, state)); 2033 2034 if (sched->show_next) { 2035 snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid); 2036 printf(" %-*s", comm_width, nstr); 2037 } 2038 2039 if (sched->show_wakeups && !sched->show_next) 2040 printf(" %-*s", comm_width, ""); 2041 2042 if (thread->tid == 0) 2043 goto out; 2044 2045 if (sched->show_callchain) 2046 printf(" "); 2047 2048 sample__fprintf_sym(sample, al, 0, 2049 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE | 2050 EVSEL__PRINT_CALLCHAIN_ARROW | 2051 EVSEL__PRINT_SKIP_IGNORED, 2052 &callchain_cursor, stdout); 2053 2054 out: 2055 printf("\n"); 2056 } 2057 2058 /* 2059 * Explanation of delta-time stats: 2060 * 2061 * t = time of current schedule out event 2062 * tprev = time of previous sched out event 2063 * also time of schedule-in event for current task 2064 * last_time = time of last sched change event for current task 2065 * (i.e, time process was last scheduled out) 2066 * ready_to_run = time of wakeup for current task 2067 * 2068 * -----|------------|------------|------------|------ 2069 * last ready tprev t 2070 * time to run 2071 * 2072 * |-------- dt_wait --------| 2073 * |- dt_delay -|-- dt_run --| 2074 * 2075 * dt_run = run time of current task 2076 * dt_wait = time between last schedule out event for task and tprev 2077 * represents time spent off the cpu 2078 * dt_delay = time between wakeup and schedule-in of task 2079 */ 2080 2081 static void timehist_update_runtime_stats(struct thread_runtime *r, 2082 u64 t, u64 tprev) 2083 { 2084 r->dt_delay = 0; 2085 r->dt_sleep = 0; 2086 r->dt_iowait = 0; 2087 r->dt_preempt = 0; 2088 r->dt_run = 0; 2089 2090 if (tprev) { 2091 r->dt_run = t - tprev; 2092 if (r->ready_to_run) { 2093 if (r->ready_to_run > tprev) 2094 pr_debug("time travel: wakeup time for task > previous sched_switch event\n"); 2095 else 2096 r->dt_delay = tprev - r->ready_to_run; 2097 } 2098 2099 if (r->last_time > tprev) 2100 pr_debug("time travel: last sched out time for task > previous sched_switch event\n"); 2101 else if (r->last_time) { 2102 u64 dt_wait = tprev - r->last_time; 2103 2104 if (r->last_state == TASK_RUNNING) 2105 r->dt_preempt = dt_wait; 2106 else if (r->last_state == TASK_UNINTERRUPTIBLE) 2107 r->dt_iowait = dt_wait; 2108 else 2109 r->dt_sleep = dt_wait; 2110 } 2111 } 2112 2113 update_stats(&r->run_stats, r->dt_run); 2114 2115 r->total_run_time += r->dt_run; 2116 r->total_delay_time += r->dt_delay; 2117 r->total_sleep_time += r->dt_sleep; 2118 r->total_iowait_time += r->dt_iowait; 2119 r->total_preempt_time += r->dt_preempt; 2120 } 2121 2122 static bool is_idle_sample(struct perf_sample *sample, 2123 struct perf_evsel *evsel) 2124 { 2125 /* pid 0 == swapper == idle task */ 2126 if (strcmp(perf_evsel__name(evsel), "sched:sched_switch") == 0) 2127 return perf_evsel__intval(evsel, sample, "prev_pid") == 0; 2128 2129 return sample->pid == 0; 2130 } 2131 2132 static void save_task_callchain(struct perf_sched *sched, 2133 struct perf_sample *sample, 2134 struct perf_evsel *evsel, 2135 struct machine *machine) 2136 { 2137 struct callchain_cursor *cursor = &callchain_cursor; 2138 struct thread *thread; 2139 2140 /* want main thread for process - has maps */ 2141 thread = machine__findnew_thread(machine, sample->pid, sample->pid); 2142 if (thread == NULL) { 2143 pr_debug("Failed to get thread for pid %d.\n", sample->pid); 2144 return; 2145 } 2146 2147 if (!sched->show_callchain || sample->callchain == NULL) 2148 return; 2149 2150 if (thread__resolve_callchain(thread, cursor, evsel, sample, 2151 NULL, NULL, sched->max_stack + 2) != 0) { 2152 if (verbose > 0) 2153 pr_err("Failed to resolve callchain. Skipping\n"); 2154 2155 return; 2156 } 2157 2158 callchain_cursor_commit(cursor); 2159 2160 while (true) { 2161 struct callchain_cursor_node *node; 2162 struct symbol *sym; 2163 2164 node = callchain_cursor_current(cursor); 2165 if (node == NULL) 2166 break; 2167 2168 sym = node->sym; 2169 if (sym) { 2170 if (!strcmp(sym->name, "schedule") || 2171 !strcmp(sym->name, "__schedule") || 2172 !strcmp(sym->name, "preempt_schedule")) 2173 sym->ignore = 1; 2174 } 2175 2176 callchain_cursor_advance(cursor); 2177 } 2178 } 2179 2180 static int init_idle_thread(struct thread *thread) 2181 { 2182 struct idle_thread_runtime *itr; 2183 2184 thread__set_comm(thread, idle_comm, 0); 2185 2186 itr = zalloc(sizeof(*itr)); 2187 if (itr == NULL) 2188 return -ENOMEM; 2189 2190 init_stats(&itr->tr.run_stats); 2191 callchain_init(&itr->callchain); 2192 callchain_cursor_reset(&itr->cursor); 2193 thread__set_priv(thread, itr); 2194 2195 return 0; 2196 } 2197 2198 /* 2199 * Track idle stats per cpu by maintaining a local thread 2200 * struct for the idle task on each cpu. 2201 */ 2202 static int init_idle_threads(int ncpu) 2203 { 2204 int i, ret; 2205 2206 idle_threads = zalloc(ncpu * sizeof(struct thread *)); 2207 if (!idle_threads) 2208 return -ENOMEM; 2209 2210 idle_max_cpu = ncpu; 2211 2212 /* allocate the actual thread struct if needed */ 2213 for (i = 0; i < ncpu; ++i) { 2214 idle_threads[i] = thread__new(0, 0); 2215 if (idle_threads[i] == NULL) 2216 return -ENOMEM; 2217 2218 ret = init_idle_thread(idle_threads[i]); 2219 if (ret < 0) 2220 return ret; 2221 } 2222 2223 return 0; 2224 } 2225 2226 static void free_idle_threads(void) 2227 { 2228 int i; 2229 2230 if (idle_threads == NULL) 2231 return; 2232 2233 for (i = 0; i < idle_max_cpu; ++i) { 2234 if ((idle_threads[i])) 2235 thread__delete(idle_threads[i]); 2236 } 2237 2238 free(idle_threads); 2239 } 2240 2241 static struct thread *get_idle_thread(int cpu) 2242 { 2243 /* 2244 * expand/allocate array of pointers to local thread 2245 * structs if needed 2246 */ 2247 if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) { 2248 int i, j = __roundup_pow_of_two(cpu+1); 2249 void *p; 2250 2251 p = realloc(idle_threads, j * sizeof(struct thread *)); 2252 if (!p) 2253 return NULL; 2254 2255 idle_threads = (struct thread **) p; 2256 for (i = idle_max_cpu; i < j; ++i) 2257 idle_threads[i] = NULL; 2258 2259 idle_max_cpu = j; 2260 } 2261 2262 /* allocate a new thread struct if needed */ 2263 if (idle_threads[cpu] == NULL) { 2264 idle_threads[cpu] = thread__new(0, 0); 2265 2266 if (idle_threads[cpu]) { 2267 if (init_idle_thread(idle_threads[cpu]) < 0) 2268 return NULL; 2269 } 2270 } 2271 2272 return idle_threads[cpu]; 2273 } 2274 2275 static void save_idle_callchain(struct perf_sched *sched, 2276 struct idle_thread_runtime *itr, 2277 struct perf_sample *sample) 2278 { 2279 if (!sched->show_callchain || sample->callchain == NULL) 2280 return; 2281 2282 callchain_cursor__copy(&itr->cursor, &callchain_cursor); 2283 } 2284 2285 static struct thread *timehist_get_thread(struct perf_sched *sched, 2286 struct perf_sample *sample, 2287 struct machine *machine, 2288 struct perf_evsel *evsel) 2289 { 2290 struct thread *thread; 2291 2292 if (is_idle_sample(sample, evsel)) { 2293 thread = get_idle_thread(sample->cpu); 2294 if (thread == NULL) 2295 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu); 2296 2297 } else { 2298 /* there were samples with tid 0 but non-zero pid */ 2299 thread = machine__findnew_thread(machine, sample->pid, 2300 sample->tid ?: sample->pid); 2301 if (thread == NULL) { 2302 pr_debug("Failed to get thread for tid %d. skipping sample.\n", 2303 sample->tid); 2304 } 2305 2306 save_task_callchain(sched, sample, evsel, machine); 2307 if (sched->idle_hist) { 2308 struct thread *idle; 2309 struct idle_thread_runtime *itr; 2310 2311 idle = get_idle_thread(sample->cpu); 2312 if (idle == NULL) { 2313 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu); 2314 return NULL; 2315 } 2316 2317 itr = thread__priv(idle); 2318 if (itr == NULL) 2319 return NULL; 2320 2321 itr->last_thread = thread; 2322 2323 /* copy task callchain when entering to idle */ 2324 if (perf_evsel__intval(evsel, sample, "next_pid") == 0) 2325 save_idle_callchain(sched, itr, sample); 2326 } 2327 } 2328 2329 return thread; 2330 } 2331 2332 static bool timehist_skip_sample(struct perf_sched *sched, 2333 struct thread *thread, 2334 struct perf_evsel *evsel, 2335 struct perf_sample *sample) 2336 { 2337 bool rc = false; 2338 2339 if (thread__is_filtered(thread)) { 2340 rc = true; 2341 sched->skipped_samples++; 2342 } 2343 2344 if (sched->idle_hist) { 2345 if (strcmp(perf_evsel__name(evsel), "sched:sched_switch")) 2346 rc = true; 2347 else if (perf_evsel__intval(evsel, sample, "prev_pid") != 0 && 2348 perf_evsel__intval(evsel, sample, "next_pid") != 0) 2349 rc = true; 2350 } 2351 2352 return rc; 2353 } 2354 2355 static void timehist_print_wakeup_event(struct perf_sched *sched, 2356 struct perf_evsel *evsel, 2357 struct perf_sample *sample, 2358 struct machine *machine, 2359 struct thread *awakened) 2360 { 2361 struct thread *thread; 2362 char tstr[64]; 2363 2364 thread = machine__findnew_thread(machine, sample->pid, sample->tid); 2365 if (thread == NULL) 2366 return; 2367 2368 /* show wakeup unless both awakee and awaker are filtered */ 2369 if (timehist_skip_sample(sched, thread, evsel, sample) && 2370 timehist_skip_sample(sched, awakened, evsel, sample)) { 2371 return; 2372 } 2373 2374 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr)); 2375 printf("%15s [%04d] ", tstr, sample->cpu); 2376 if (sched->show_cpu_visual) 2377 printf(" %*s ", sched->max_cpu + 1, ""); 2378 2379 printf(" %-*s ", comm_width, timehist_get_commstr(thread)); 2380 2381 /* dt spacer */ 2382 printf(" %9s %9s %9s ", "", "", ""); 2383 2384 printf("awakened: %s", timehist_get_commstr(awakened)); 2385 2386 printf("\n"); 2387 } 2388 2389 static int timehist_sched_wakeup_event(struct perf_tool *tool, 2390 union perf_event *event __maybe_unused, 2391 struct perf_evsel *evsel, 2392 struct perf_sample *sample, 2393 struct machine *machine) 2394 { 2395 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 2396 struct thread *thread; 2397 struct thread_runtime *tr = NULL; 2398 /* want pid of awakened task not pid in sample */ 2399 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); 2400 2401 thread = machine__findnew_thread(machine, 0, pid); 2402 if (thread == NULL) 2403 return -1; 2404 2405 tr = thread__get_runtime(thread); 2406 if (tr == NULL) 2407 return -1; 2408 2409 if (tr->ready_to_run == 0) 2410 tr->ready_to_run = sample->time; 2411 2412 /* show wakeups if requested */ 2413 if (sched->show_wakeups && 2414 !perf_time__skip_sample(&sched->ptime, sample->time)) 2415 timehist_print_wakeup_event(sched, evsel, sample, machine, thread); 2416 2417 return 0; 2418 } 2419 2420 static void timehist_print_migration_event(struct perf_sched *sched, 2421 struct perf_evsel *evsel, 2422 struct perf_sample *sample, 2423 struct machine *machine, 2424 struct thread *migrated) 2425 { 2426 struct thread *thread; 2427 char tstr[64]; 2428 u32 max_cpus = sched->max_cpu + 1; 2429 u32 ocpu, dcpu; 2430 2431 if (sched->summary_only) 2432 return; 2433 2434 max_cpus = sched->max_cpu + 1; 2435 ocpu = perf_evsel__intval(evsel, sample, "orig_cpu"); 2436 dcpu = perf_evsel__intval(evsel, sample, "dest_cpu"); 2437 2438 thread = machine__findnew_thread(machine, sample->pid, sample->tid); 2439 if (thread == NULL) 2440 return; 2441 2442 if (timehist_skip_sample(sched, thread, evsel, sample) && 2443 timehist_skip_sample(sched, migrated, evsel, sample)) { 2444 return; 2445 } 2446 2447 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr)); 2448 printf("%15s [%04d] ", tstr, sample->cpu); 2449 2450 if (sched->show_cpu_visual) { 2451 u32 i; 2452 char c; 2453 2454 printf(" "); 2455 for (i = 0; i < max_cpus; ++i) { 2456 c = (i == sample->cpu) ? 'm' : ' '; 2457 printf("%c", c); 2458 } 2459 printf(" "); 2460 } 2461 2462 printf(" %-*s ", comm_width, timehist_get_commstr(thread)); 2463 2464 /* dt spacer */ 2465 printf(" %9s %9s %9s ", "", "", ""); 2466 2467 printf("migrated: %s", timehist_get_commstr(migrated)); 2468 printf(" cpu %d => %d", ocpu, dcpu); 2469 2470 printf("\n"); 2471 } 2472 2473 static int timehist_migrate_task_event(struct perf_tool *tool, 2474 union perf_event *event __maybe_unused, 2475 struct perf_evsel *evsel, 2476 struct perf_sample *sample, 2477 struct machine *machine) 2478 { 2479 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 2480 struct thread *thread; 2481 struct thread_runtime *tr = NULL; 2482 /* want pid of migrated task not pid in sample */ 2483 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); 2484 2485 thread = machine__findnew_thread(machine, 0, pid); 2486 if (thread == NULL) 2487 return -1; 2488 2489 tr = thread__get_runtime(thread); 2490 if (tr == NULL) 2491 return -1; 2492 2493 tr->migrations++; 2494 2495 /* show migrations if requested */ 2496 timehist_print_migration_event(sched, evsel, sample, machine, thread); 2497 2498 return 0; 2499 } 2500 2501 static int timehist_sched_change_event(struct perf_tool *tool, 2502 union perf_event *event, 2503 struct perf_evsel *evsel, 2504 struct perf_sample *sample, 2505 struct machine *machine) 2506 { 2507 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 2508 struct perf_time_interval *ptime = &sched->ptime; 2509 struct addr_location al; 2510 struct thread *thread; 2511 struct thread_runtime *tr = NULL; 2512 u64 tprev, t = sample->time; 2513 int rc = 0; 2514 int state = perf_evsel__intval(evsel, sample, "prev_state"); 2515 2516 2517 if (machine__resolve(machine, &al, sample) < 0) { 2518 pr_err("problem processing %d event. skipping it\n", 2519 event->header.type); 2520 rc = -1; 2521 goto out; 2522 } 2523 2524 thread = timehist_get_thread(sched, sample, machine, evsel); 2525 if (thread == NULL) { 2526 rc = -1; 2527 goto out; 2528 } 2529 2530 if (timehist_skip_sample(sched, thread, evsel, sample)) 2531 goto out; 2532 2533 tr = thread__get_runtime(thread); 2534 if (tr == NULL) { 2535 rc = -1; 2536 goto out; 2537 } 2538 2539 tprev = perf_evsel__get_time(evsel, sample->cpu); 2540 2541 /* 2542 * If start time given: 2543 * - sample time is under window user cares about - skip sample 2544 * - tprev is under window user cares about - reset to start of window 2545 */ 2546 if (ptime->start && ptime->start > t) 2547 goto out; 2548 2549 if (tprev && ptime->start > tprev) 2550 tprev = ptime->start; 2551 2552 /* 2553 * If end time given: 2554 * - previous sched event is out of window - we are done 2555 * - sample time is beyond window user cares about - reset it 2556 * to close out stats for time window interest 2557 */ 2558 if (ptime->end) { 2559 if (tprev > ptime->end) 2560 goto out; 2561 2562 if (t > ptime->end) 2563 t = ptime->end; 2564 } 2565 2566 if (!sched->idle_hist || thread->tid == 0) { 2567 timehist_update_runtime_stats(tr, t, tprev); 2568 2569 if (sched->idle_hist) { 2570 struct idle_thread_runtime *itr = (void *)tr; 2571 struct thread_runtime *last_tr; 2572 2573 BUG_ON(thread->tid != 0); 2574 2575 if (itr->last_thread == NULL) 2576 goto out; 2577 2578 /* add current idle time as last thread's runtime */ 2579 last_tr = thread__get_runtime(itr->last_thread); 2580 if (last_tr == NULL) 2581 goto out; 2582 2583 timehist_update_runtime_stats(last_tr, t, tprev); 2584 /* 2585 * remove delta time of last thread as it's not updated 2586 * and otherwise it will show an invalid value next 2587 * time. we only care total run time and run stat. 2588 */ 2589 last_tr->dt_run = 0; 2590 last_tr->dt_delay = 0; 2591 last_tr->dt_sleep = 0; 2592 last_tr->dt_iowait = 0; 2593 last_tr->dt_preempt = 0; 2594 2595 if (itr->cursor.nr) 2596 callchain_append(&itr->callchain, &itr->cursor, t - tprev); 2597 2598 itr->last_thread = NULL; 2599 } 2600 } 2601 2602 if (!sched->summary_only) 2603 timehist_print_sample(sched, evsel, sample, &al, thread, t, state); 2604 2605 out: 2606 if (sched->hist_time.start == 0 && t >= ptime->start) 2607 sched->hist_time.start = t; 2608 if (ptime->end == 0 || t <= ptime->end) 2609 sched->hist_time.end = t; 2610 2611 if (tr) { 2612 /* time of this sched_switch event becomes last time task seen */ 2613 tr->last_time = sample->time; 2614 2615 /* last state is used to determine where to account wait time */ 2616 tr->last_state = state; 2617 2618 /* sched out event for task so reset ready to run time */ 2619 tr->ready_to_run = 0; 2620 } 2621 2622 perf_evsel__save_time(evsel, sample->time, sample->cpu); 2623 2624 return rc; 2625 } 2626 2627 static int timehist_sched_switch_event(struct perf_tool *tool, 2628 union perf_event *event, 2629 struct perf_evsel *evsel, 2630 struct perf_sample *sample, 2631 struct machine *machine __maybe_unused) 2632 { 2633 return timehist_sched_change_event(tool, event, evsel, sample, machine); 2634 } 2635 2636 static int process_lost(struct perf_tool *tool __maybe_unused, 2637 union perf_event *event, 2638 struct perf_sample *sample, 2639 struct machine *machine __maybe_unused) 2640 { 2641 char tstr[64]; 2642 2643 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr)); 2644 printf("%15s ", tstr); 2645 printf("lost %" PRIu64 " events on cpu %d\n", event->lost.lost, sample->cpu); 2646 2647 return 0; 2648 } 2649 2650 2651 static void print_thread_runtime(struct thread *t, 2652 struct thread_runtime *r) 2653 { 2654 double mean = avg_stats(&r->run_stats); 2655 float stddev; 2656 2657 printf("%*s %5d %9" PRIu64 " ", 2658 comm_width, timehist_get_commstr(t), t->ppid, 2659 (u64) r->run_stats.n); 2660 2661 print_sched_time(r->total_run_time, 8); 2662 stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean); 2663 print_sched_time(r->run_stats.min, 6); 2664 printf(" "); 2665 print_sched_time((u64) mean, 6); 2666 printf(" "); 2667 print_sched_time(r->run_stats.max, 6); 2668 printf(" "); 2669 printf("%5.2f", stddev); 2670 printf(" %5" PRIu64, r->migrations); 2671 printf("\n"); 2672 } 2673 2674 static void print_thread_waittime(struct thread *t, 2675 struct thread_runtime *r) 2676 { 2677 printf("%*s %5d %9" PRIu64 " ", 2678 comm_width, timehist_get_commstr(t), t->ppid, 2679 (u64) r->run_stats.n); 2680 2681 print_sched_time(r->total_run_time, 8); 2682 print_sched_time(r->total_sleep_time, 6); 2683 printf(" "); 2684 print_sched_time(r->total_iowait_time, 6); 2685 printf(" "); 2686 print_sched_time(r->total_preempt_time, 6); 2687 printf(" "); 2688 print_sched_time(r->total_delay_time, 6); 2689 printf("\n"); 2690 } 2691 2692 struct total_run_stats { 2693 struct perf_sched *sched; 2694 u64 sched_count; 2695 u64 task_count; 2696 u64 total_run_time; 2697 }; 2698 2699 static int __show_thread_runtime(struct thread *t, void *priv) 2700 { 2701 struct total_run_stats *stats = priv; 2702 struct thread_runtime *r; 2703 2704 if (thread__is_filtered(t)) 2705 return 0; 2706 2707 r = thread__priv(t); 2708 if (r && r->run_stats.n) { 2709 stats->task_count++; 2710 stats->sched_count += r->run_stats.n; 2711 stats->total_run_time += r->total_run_time; 2712 2713 if (stats->sched->show_state) 2714 print_thread_waittime(t, r); 2715 else 2716 print_thread_runtime(t, r); 2717 } 2718 2719 return 0; 2720 } 2721 2722 static int show_thread_runtime(struct thread *t, void *priv) 2723 { 2724 if (t->dead) 2725 return 0; 2726 2727 return __show_thread_runtime(t, priv); 2728 } 2729 2730 static int show_deadthread_runtime(struct thread *t, void *priv) 2731 { 2732 if (!t->dead) 2733 return 0; 2734 2735 return __show_thread_runtime(t, priv); 2736 } 2737 2738 static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node) 2739 { 2740 const char *sep = " <- "; 2741 struct callchain_list *chain; 2742 size_t ret = 0; 2743 char bf[1024]; 2744 bool first; 2745 2746 if (node == NULL) 2747 return 0; 2748 2749 ret = callchain__fprintf_folded(fp, node->parent); 2750 first = (ret == 0); 2751 2752 list_for_each_entry(chain, &node->val, list) { 2753 if (chain->ip >= PERF_CONTEXT_MAX) 2754 continue; 2755 if (chain->ms.sym && chain->ms.sym->ignore) 2756 continue; 2757 ret += fprintf(fp, "%s%s", first ? "" : sep, 2758 callchain_list__sym_name(chain, bf, sizeof(bf), 2759 false)); 2760 first = false; 2761 } 2762 2763 return ret; 2764 } 2765 2766 static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root) 2767 { 2768 size_t ret = 0; 2769 FILE *fp = stdout; 2770 struct callchain_node *chain; 2771 struct rb_node *rb_node = rb_first_cached(root); 2772 2773 printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains"); 2774 printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line, 2775 graph_dotted_line); 2776 2777 while (rb_node) { 2778 chain = rb_entry(rb_node, struct callchain_node, rb_node); 2779 rb_node = rb_next(rb_node); 2780 2781 ret += fprintf(fp, " "); 2782 print_sched_time(chain->hit, 12); 2783 ret += 16; /* print_sched_time returns 2nd arg + 4 */ 2784 ret += fprintf(fp, " %8d ", chain->count); 2785 ret += callchain__fprintf_folded(fp, chain); 2786 ret += fprintf(fp, "\n"); 2787 } 2788 2789 return ret; 2790 } 2791 2792 static void timehist_print_summary(struct perf_sched *sched, 2793 struct perf_session *session) 2794 { 2795 struct machine *m = &session->machines.host; 2796 struct total_run_stats totals; 2797 u64 task_count; 2798 struct thread *t; 2799 struct thread_runtime *r; 2800 int i; 2801 u64 hist_time = sched->hist_time.end - sched->hist_time.start; 2802 2803 memset(&totals, 0, sizeof(totals)); 2804 totals.sched = sched; 2805 2806 if (sched->idle_hist) { 2807 printf("\nIdle-time summary\n"); 2808 printf("%*s parent sched-out ", comm_width, "comm"); 2809 printf(" idle-time min-idle avg-idle max-idle stddev migrations\n"); 2810 } else if (sched->show_state) { 2811 printf("\nWait-time summary\n"); 2812 printf("%*s parent sched-in ", comm_width, "comm"); 2813 printf(" run-time sleep iowait preempt delay\n"); 2814 } else { 2815 printf("\nRuntime summary\n"); 2816 printf("%*s parent sched-in ", comm_width, "comm"); 2817 printf(" run-time min-run avg-run max-run stddev migrations\n"); 2818 } 2819 printf("%*s (count) ", comm_width, ""); 2820 printf(" (msec) (msec) (msec) (msec) %s\n", 2821 sched->show_state ? "(msec)" : "%"); 2822 printf("%.117s\n", graph_dotted_line); 2823 2824 machine__for_each_thread(m, show_thread_runtime, &totals); 2825 task_count = totals.task_count; 2826 if (!task_count) 2827 printf("<no still running tasks>\n"); 2828 2829 printf("\nTerminated tasks:\n"); 2830 machine__for_each_thread(m, show_deadthread_runtime, &totals); 2831 if (task_count == totals.task_count) 2832 printf("<no terminated tasks>\n"); 2833 2834 /* CPU idle stats not tracked when samples were skipped */ 2835 if (sched->skipped_samples && !sched->idle_hist) 2836 return; 2837 2838 printf("\nIdle stats:\n"); 2839 for (i = 0; i < idle_max_cpu; ++i) { 2840 t = idle_threads[i]; 2841 if (!t) 2842 continue; 2843 2844 r = thread__priv(t); 2845 if (r && r->run_stats.n) { 2846 totals.sched_count += r->run_stats.n; 2847 printf(" CPU %2d idle for ", i); 2848 print_sched_time(r->total_run_time, 6); 2849 printf(" msec (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time); 2850 } else 2851 printf(" CPU %2d idle entire time window\n", i); 2852 } 2853 2854 if (sched->idle_hist && sched->show_callchain) { 2855 callchain_param.mode = CHAIN_FOLDED; 2856 callchain_param.value = CCVAL_PERIOD; 2857 2858 callchain_register_param(&callchain_param); 2859 2860 printf("\nIdle stats by callchain:\n"); 2861 for (i = 0; i < idle_max_cpu; ++i) { 2862 struct idle_thread_runtime *itr; 2863 2864 t = idle_threads[i]; 2865 if (!t) 2866 continue; 2867 2868 itr = thread__priv(t); 2869 if (itr == NULL) 2870 continue; 2871 2872 callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain, 2873 0, &callchain_param); 2874 2875 printf(" CPU %2d:", i); 2876 print_sched_time(itr->tr.total_run_time, 6); 2877 printf(" msec\n"); 2878 timehist_print_idlehist_callchain(&itr->sorted_root); 2879 printf("\n"); 2880 } 2881 } 2882 2883 printf("\n" 2884 " Total number of unique tasks: %" PRIu64 "\n" 2885 "Total number of context switches: %" PRIu64 "\n", 2886 totals.task_count, totals.sched_count); 2887 2888 printf(" Total run time (msec): "); 2889 print_sched_time(totals.total_run_time, 2); 2890 printf("\n"); 2891 2892 printf(" Total scheduling time (msec): "); 2893 print_sched_time(hist_time, 2); 2894 printf(" (x %d)\n", sched->max_cpu); 2895 } 2896 2897 typedef int (*sched_handler)(struct perf_tool *tool, 2898 union perf_event *event, 2899 struct perf_evsel *evsel, 2900 struct perf_sample *sample, 2901 struct machine *machine); 2902 2903 static int perf_timehist__process_sample(struct perf_tool *tool, 2904 union perf_event *event, 2905 struct perf_sample *sample, 2906 struct perf_evsel *evsel, 2907 struct machine *machine) 2908 { 2909 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 2910 int err = 0; 2911 int this_cpu = sample->cpu; 2912 2913 if (this_cpu > sched->max_cpu) 2914 sched->max_cpu = this_cpu; 2915 2916 if (evsel->handler != NULL) { 2917 sched_handler f = evsel->handler; 2918 2919 err = f(tool, event, evsel, sample, machine); 2920 } 2921 2922 return err; 2923 } 2924 2925 static int timehist_check_attr(struct perf_sched *sched, 2926 struct perf_evlist *evlist) 2927 { 2928 struct perf_evsel *evsel; 2929 struct evsel_runtime *er; 2930 2931 list_for_each_entry(evsel, &evlist->entries, node) { 2932 er = perf_evsel__get_runtime(evsel); 2933 if (er == NULL) { 2934 pr_err("Failed to allocate memory for evsel runtime data\n"); 2935 return -1; 2936 } 2937 2938 if (sched->show_callchain && !evsel__has_callchain(evsel)) { 2939 pr_info("Samples do not have callchains.\n"); 2940 sched->show_callchain = 0; 2941 symbol_conf.use_callchain = 0; 2942 } 2943 } 2944 2945 return 0; 2946 } 2947 2948 static int perf_sched__timehist(struct perf_sched *sched) 2949 { 2950 const struct perf_evsel_str_handler handlers[] = { 2951 { "sched:sched_switch", timehist_sched_switch_event, }, 2952 { "sched:sched_wakeup", timehist_sched_wakeup_event, }, 2953 { "sched:sched_wakeup_new", timehist_sched_wakeup_event, }, 2954 }; 2955 const struct perf_evsel_str_handler migrate_handlers[] = { 2956 { "sched:sched_migrate_task", timehist_migrate_task_event, }, 2957 }; 2958 struct perf_data data = { 2959 .path = input_name, 2960 .mode = PERF_DATA_MODE_READ, 2961 .force = sched->force, 2962 }; 2963 2964 struct perf_session *session; 2965 struct perf_evlist *evlist; 2966 int err = -1; 2967 2968 /* 2969 * event handlers for timehist option 2970 */ 2971 sched->tool.sample = perf_timehist__process_sample; 2972 sched->tool.mmap = perf_event__process_mmap; 2973 sched->tool.comm = perf_event__process_comm; 2974 sched->tool.exit = perf_event__process_exit; 2975 sched->tool.fork = perf_event__process_fork; 2976 sched->tool.lost = process_lost; 2977 sched->tool.attr = perf_event__process_attr; 2978 sched->tool.tracing_data = perf_event__process_tracing_data; 2979 sched->tool.build_id = perf_event__process_build_id; 2980 2981 sched->tool.ordered_events = true; 2982 sched->tool.ordering_requires_timestamps = true; 2983 2984 symbol_conf.use_callchain = sched->show_callchain; 2985 2986 session = perf_session__new(&data, false, &sched->tool); 2987 if (session == NULL) 2988 return -ENOMEM; 2989 2990 evlist = session->evlist; 2991 2992 symbol__init(&session->header.env); 2993 2994 if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) { 2995 pr_err("Invalid time string\n"); 2996 return -EINVAL; 2997 } 2998 2999 if (timehist_check_attr(sched, evlist) != 0) 3000 goto out; 3001 3002 setup_pager(); 3003 3004 /* setup per-evsel handlers */ 3005 if (perf_session__set_tracepoints_handlers(session, handlers)) 3006 goto out; 3007 3008 /* sched_switch event at a minimum needs to exist */ 3009 if (!perf_evlist__find_tracepoint_by_name(session->evlist, 3010 "sched:sched_switch")) { 3011 pr_err("No sched_switch events found. Have you run 'perf sched record'?\n"); 3012 goto out; 3013 } 3014 3015 if (sched->show_migrations && 3016 perf_session__set_tracepoints_handlers(session, migrate_handlers)) 3017 goto out; 3018 3019 /* pre-allocate struct for per-CPU idle stats */ 3020 sched->max_cpu = session->header.env.nr_cpus_online; 3021 if (sched->max_cpu == 0) 3022 sched->max_cpu = 4; 3023 if (init_idle_threads(sched->max_cpu)) 3024 goto out; 3025 3026 /* summary_only implies summary option, but don't overwrite summary if set */ 3027 if (sched->summary_only) 3028 sched->summary = sched->summary_only; 3029 3030 if (!sched->summary_only) 3031 timehist_header(sched); 3032 3033 err = perf_session__process_events(session); 3034 if (err) { 3035 pr_err("Failed to process events, error %d", err); 3036 goto out; 3037 } 3038 3039 sched->nr_events = evlist->stats.nr_events[0]; 3040 sched->nr_lost_events = evlist->stats.total_lost; 3041 sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST]; 3042 3043 if (sched->summary) 3044 timehist_print_summary(sched, session); 3045 3046 out: 3047 free_idle_threads(); 3048 perf_session__delete(session); 3049 3050 return err; 3051 } 3052 3053 3054 static void print_bad_events(struct perf_sched *sched) 3055 { 3056 if (sched->nr_unordered_timestamps && sched->nr_timestamps) { 3057 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n", 3058 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0, 3059 sched->nr_unordered_timestamps, sched->nr_timestamps); 3060 } 3061 if (sched->nr_lost_events && sched->nr_events) { 3062 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n", 3063 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0, 3064 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks); 3065 } 3066 if (sched->nr_context_switch_bugs && sched->nr_timestamps) { 3067 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)", 3068 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0, 3069 sched->nr_context_switch_bugs, sched->nr_timestamps); 3070 if (sched->nr_lost_events) 3071 printf(" (due to lost events?)"); 3072 printf("\n"); 3073 } 3074 } 3075 3076 static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data) 3077 { 3078 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL; 3079 struct work_atoms *this; 3080 const char *comm = thread__comm_str(data->thread), *this_comm; 3081 bool leftmost = true; 3082 3083 while (*new) { 3084 int cmp; 3085 3086 this = container_of(*new, struct work_atoms, node); 3087 parent = *new; 3088 3089 this_comm = thread__comm_str(this->thread); 3090 cmp = strcmp(comm, this_comm); 3091 if (cmp > 0) { 3092 new = &((*new)->rb_left); 3093 } else if (cmp < 0) { 3094 new = &((*new)->rb_right); 3095 leftmost = false; 3096 } else { 3097 this->num_merged++; 3098 this->total_runtime += data->total_runtime; 3099 this->nb_atoms += data->nb_atoms; 3100 this->total_lat += data->total_lat; 3101 list_splice(&data->work_list, &this->work_list); 3102 if (this->max_lat < data->max_lat) { 3103 this->max_lat = data->max_lat; 3104 this->max_lat_at = data->max_lat_at; 3105 } 3106 zfree(&data); 3107 return; 3108 } 3109 } 3110 3111 data->num_merged++; 3112 rb_link_node(&data->node, parent, new); 3113 rb_insert_color_cached(&data->node, root, leftmost); 3114 } 3115 3116 static void perf_sched__merge_lat(struct perf_sched *sched) 3117 { 3118 struct work_atoms *data; 3119 struct rb_node *node; 3120 3121 if (sched->skip_merge) 3122 return; 3123 3124 while ((node = rb_first_cached(&sched->atom_root))) { 3125 rb_erase_cached(node, &sched->atom_root); 3126 data = rb_entry(node, struct work_atoms, node); 3127 __merge_work_atoms(&sched->merged_atom_root, data); 3128 } 3129 } 3130 3131 static int perf_sched__lat(struct perf_sched *sched) 3132 { 3133 struct rb_node *next; 3134 3135 setup_pager(); 3136 3137 if (perf_sched__read_events(sched)) 3138 return -1; 3139 3140 perf_sched__merge_lat(sched); 3141 perf_sched__sort_lat(sched); 3142 3143 printf("\n -----------------------------------------------------------------------------------------------------------------\n"); 3144 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n"); 3145 printf(" -----------------------------------------------------------------------------------------------------------------\n"); 3146 3147 next = rb_first_cached(&sched->sorted_atom_root); 3148 3149 while (next) { 3150 struct work_atoms *work_list; 3151 3152 work_list = rb_entry(next, struct work_atoms, node); 3153 output_lat_thread(sched, work_list); 3154 next = rb_next(next); 3155 thread__zput(work_list->thread); 3156 } 3157 3158 printf(" -----------------------------------------------------------------------------------------------------------------\n"); 3159 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n", 3160 (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count); 3161 3162 printf(" ---------------------------------------------------\n"); 3163 3164 print_bad_events(sched); 3165 printf("\n"); 3166 3167 return 0; 3168 } 3169 3170 static int setup_map_cpus(struct perf_sched *sched) 3171 { 3172 struct cpu_map *map; 3173 3174 sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF); 3175 3176 if (sched->map.comp) { 3177 sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int)); 3178 if (!sched->map.comp_cpus) 3179 return -1; 3180 } 3181 3182 if (!sched->map.cpus_str) 3183 return 0; 3184 3185 map = cpu_map__new(sched->map.cpus_str); 3186 if (!map) { 3187 pr_err("failed to get cpus map from %s\n", sched->map.cpus_str); 3188 return -1; 3189 } 3190 3191 sched->map.cpus = map; 3192 return 0; 3193 } 3194 3195 static int setup_color_pids(struct perf_sched *sched) 3196 { 3197 struct thread_map *map; 3198 3199 if (!sched->map.color_pids_str) 3200 return 0; 3201 3202 map = thread_map__new_by_tid_str(sched->map.color_pids_str); 3203 if (!map) { 3204 pr_err("failed to get thread map from %s\n", sched->map.color_pids_str); 3205 return -1; 3206 } 3207 3208 sched->map.color_pids = map; 3209 return 0; 3210 } 3211 3212 static int setup_color_cpus(struct perf_sched *sched) 3213 { 3214 struct cpu_map *map; 3215 3216 if (!sched->map.color_cpus_str) 3217 return 0; 3218 3219 map = cpu_map__new(sched->map.color_cpus_str); 3220 if (!map) { 3221 pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str); 3222 return -1; 3223 } 3224 3225 sched->map.color_cpus = map; 3226 return 0; 3227 } 3228 3229 static int perf_sched__map(struct perf_sched *sched) 3230 { 3231 if (setup_map_cpus(sched)) 3232 return -1; 3233 3234 if (setup_color_pids(sched)) 3235 return -1; 3236 3237 if (setup_color_cpus(sched)) 3238 return -1; 3239 3240 setup_pager(); 3241 if (perf_sched__read_events(sched)) 3242 return -1; 3243 print_bad_events(sched); 3244 return 0; 3245 } 3246 3247 static int perf_sched__replay(struct perf_sched *sched) 3248 { 3249 unsigned long i; 3250 3251 calibrate_run_measurement_overhead(sched); 3252 calibrate_sleep_measurement_overhead(sched); 3253 3254 test_calibrations(sched); 3255 3256 if (perf_sched__read_events(sched)) 3257 return -1; 3258 3259 printf("nr_run_events: %ld\n", sched->nr_run_events); 3260 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events); 3261 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events); 3262 3263 if (sched->targetless_wakeups) 3264 printf("target-less wakeups: %ld\n", sched->targetless_wakeups); 3265 if (sched->multitarget_wakeups) 3266 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups); 3267 if (sched->nr_run_events_optimized) 3268 printf("run atoms optimized: %ld\n", 3269 sched->nr_run_events_optimized); 3270 3271 print_task_traces(sched); 3272 add_cross_task_wakeups(sched); 3273 3274 create_tasks(sched); 3275 printf("------------------------------------------------------------\n"); 3276 for (i = 0; i < sched->replay_repeat; i++) 3277 run_one_test(sched); 3278 3279 return 0; 3280 } 3281 3282 static void setup_sorting(struct perf_sched *sched, const struct option *options, 3283 const char * const usage_msg[]) 3284 { 3285 char *tmp, *tok, *str = strdup(sched->sort_order); 3286 3287 for (tok = strtok_r(str, ", ", &tmp); 3288 tok; tok = strtok_r(NULL, ", ", &tmp)) { 3289 if (sort_dimension__add(tok, &sched->sort_list) < 0) { 3290 usage_with_options_msg(usage_msg, options, 3291 "Unknown --sort key: `%s'", tok); 3292 } 3293 } 3294 3295 free(str); 3296 3297 sort_dimension__add("pid", &sched->cmp_pid); 3298 } 3299 3300 static int __cmd_record(int argc, const char **argv) 3301 { 3302 unsigned int rec_argc, i, j; 3303 const char **rec_argv; 3304 const char * const record_args[] = { 3305 "record", 3306 "-a", 3307 "-R", 3308 "-m", "1024", 3309 "-c", "1", 3310 "-e", "sched:sched_switch", 3311 "-e", "sched:sched_stat_wait", 3312 "-e", "sched:sched_stat_sleep", 3313 "-e", "sched:sched_stat_iowait", 3314 "-e", "sched:sched_stat_runtime", 3315 "-e", "sched:sched_process_fork", 3316 "-e", "sched:sched_wakeup", 3317 "-e", "sched:sched_wakeup_new", 3318 "-e", "sched:sched_migrate_task", 3319 }; 3320 3321 rec_argc = ARRAY_SIZE(record_args) + argc - 1; 3322 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 3323 3324 if (rec_argv == NULL) 3325 return -ENOMEM; 3326 3327 for (i = 0; i < ARRAY_SIZE(record_args); i++) 3328 rec_argv[i] = strdup(record_args[i]); 3329 3330 for (j = 1; j < (unsigned int)argc; j++, i++) 3331 rec_argv[i] = argv[j]; 3332 3333 BUG_ON(i != rec_argc); 3334 3335 return cmd_record(i, rec_argv); 3336 } 3337 3338 int cmd_sched(int argc, const char **argv) 3339 { 3340 static const char default_sort_order[] = "avg, max, switch, runtime"; 3341 struct perf_sched sched = { 3342 .tool = { 3343 .sample = perf_sched__process_tracepoint_sample, 3344 .comm = perf_sched__process_comm, 3345 .namespaces = perf_event__process_namespaces, 3346 .lost = perf_event__process_lost, 3347 .fork = perf_sched__process_fork_event, 3348 .ordered_events = true, 3349 }, 3350 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid), 3351 .sort_list = LIST_HEAD_INIT(sched.sort_list), 3352 .start_work_mutex = PTHREAD_MUTEX_INITIALIZER, 3353 .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER, 3354 .sort_order = default_sort_order, 3355 .replay_repeat = 10, 3356 .profile_cpu = -1, 3357 .next_shortname1 = 'A', 3358 .next_shortname2 = '0', 3359 .skip_merge = 0, 3360 .show_callchain = 1, 3361 .max_stack = 5, 3362 }; 3363 const struct option sched_options[] = { 3364 OPT_STRING('i', "input", &input_name, "file", 3365 "input file name"), 3366 OPT_INCR('v', "verbose", &verbose, 3367 "be more verbose (show symbol address, etc)"), 3368 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, 3369 "dump raw trace in ASCII"), 3370 OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"), 3371 OPT_END() 3372 }; 3373 const struct option latency_options[] = { 3374 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]", 3375 "sort by key(s): runtime, switch, avg, max"), 3376 OPT_INTEGER('C', "CPU", &sched.profile_cpu, 3377 "CPU to profile on"), 3378 OPT_BOOLEAN('p', "pids", &sched.skip_merge, 3379 "latency stats per pid instead of per comm"), 3380 OPT_PARENT(sched_options) 3381 }; 3382 const struct option replay_options[] = { 3383 OPT_UINTEGER('r', "repeat", &sched.replay_repeat, 3384 "repeat the workload replay N times (-1: infinite)"), 3385 OPT_PARENT(sched_options) 3386 }; 3387 const struct option map_options[] = { 3388 OPT_BOOLEAN(0, "compact", &sched.map.comp, 3389 "map output in compact mode"), 3390 OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids", 3391 "highlight given pids in map"), 3392 OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus", 3393 "highlight given CPUs in map"), 3394 OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus", 3395 "display given CPUs in map"), 3396 OPT_PARENT(sched_options) 3397 }; 3398 const struct option timehist_options[] = { 3399 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, 3400 "file", "vmlinux pathname"), 3401 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, 3402 "file", "kallsyms pathname"), 3403 OPT_BOOLEAN('g', "call-graph", &sched.show_callchain, 3404 "Display call chains if present (default on)"), 3405 OPT_UINTEGER(0, "max-stack", &sched.max_stack, 3406 "Maximum number of functions to display backtrace."), 3407 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", 3408 "Look for files with symbols relative to this directory"), 3409 OPT_BOOLEAN('s', "summary", &sched.summary_only, 3410 "Show only syscall summary with statistics"), 3411 OPT_BOOLEAN('S', "with-summary", &sched.summary, 3412 "Show all syscalls and summary with statistics"), 3413 OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"), 3414 OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"), 3415 OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"), 3416 OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"), 3417 OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"), 3418 OPT_STRING(0, "time", &sched.time_str, "str", 3419 "Time span for analysis (start,stop)"), 3420 OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"), 3421 OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]", 3422 "analyze events only for given process id(s)"), 3423 OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]", 3424 "analyze events only for given thread id(s)"), 3425 OPT_PARENT(sched_options) 3426 }; 3427 3428 const char * const latency_usage[] = { 3429 "perf sched latency [<options>]", 3430 NULL 3431 }; 3432 const char * const replay_usage[] = { 3433 "perf sched replay [<options>]", 3434 NULL 3435 }; 3436 const char * const map_usage[] = { 3437 "perf sched map [<options>]", 3438 NULL 3439 }; 3440 const char * const timehist_usage[] = { 3441 "perf sched timehist [<options>]", 3442 NULL 3443 }; 3444 const char *const sched_subcommands[] = { "record", "latency", "map", 3445 "replay", "script", 3446 "timehist", NULL }; 3447 const char *sched_usage[] = { 3448 NULL, 3449 NULL 3450 }; 3451 struct trace_sched_handler lat_ops = { 3452 .wakeup_event = latency_wakeup_event, 3453 .switch_event = latency_switch_event, 3454 .runtime_event = latency_runtime_event, 3455 .migrate_task_event = latency_migrate_task_event, 3456 }; 3457 struct trace_sched_handler map_ops = { 3458 .switch_event = map_switch_event, 3459 }; 3460 struct trace_sched_handler replay_ops = { 3461 .wakeup_event = replay_wakeup_event, 3462 .switch_event = replay_switch_event, 3463 .fork_event = replay_fork_event, 3464 }; 3465 unsigned int i; 3466 3467 for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++) 3468 sched.curr_pid[i] = -1; 3469 3470 argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands, 3471 sched_usage, PARSE_OPT_STOP_AT_NON_OPTION); 3472 if (!argc) 3473 usage_with_options(sched_usage, sched_options); 3474 3475 /* 3476 * Aliased to 'perf script' for now: 3477 */ 3478 if (!strcmp(argv[0], "script")) 3479 return cmd_script(argc, argv); 3480 3481 if (!strncmp(argv[0], "rec", 3)) { 3482 return __cmd_record(argc, argv); 3483 } else if (!strncmp(argv[0], "lat", 3)) { 3484 sched.tp_handler = &lat_ops; 3485 if (argc > 1) { 3486 argc = parse_options(argc, argv, latency_options, latency_usage, 0); 3487 if (argc) 3488 usage_with_options(latency_usage, latency_options); 3489 } 3490 setup_sorting(&sched, latency_options, latency_usage); 3491 return perf_sched__lat(&sched); 3492 } else if (!strcmp(argv[0], "map")) { 3493 if (argc) { 3494 argc = parse_options(argc, argv, map_options, map_usage, 0); 3495 if (argc) 3496 usage_with_options(map_usage, map_options); 3497 } 3498 sched.tp_handler = &map_ops; 3499 setup_sorting(&sched, latency_options, latency_usage); 3500 return perf_sched__map(&sched); 3501 } else if (!strncmp(argv[0], "rep", 3)) { 3502 sched.tp_handler = &replay_ops; 3503 if (argc) { 3504 argc = parse_options(argc, argv, replay_options, replay_usage, 0); 3505 if (argc) 3506 usage_with_options(replay_usage, replay_options); 3507 } 3508 return perf_sched__replay(&sched); 3509 } else if (!strcmp(argv[0], "timehist")) { 3510 if (argc) { 3511 argc = parse_options(argc, argv, timehist_options, 3512 timehist_usage, 0); 3513 if (argc) 3514 usage_with_options(timehist_usage, timehist_options); 3515 } 3516 if ((sched.show_wakeups || sched.show_next) && 3517 sched.summary_only) { 3518 pr_err(" Error: -s and -[n|w] are mutually exclusive.\n"); 3519 parse_options_usage(timehist_usage, timehist_options, "s", true); 3520 if (sched.show_wakeups) 3521 parse_options_usage(NULL, timehist_options, "w", true); 3522 if (sched.show_next) 3523 parse_options_usage(NULL, timehist_options, "n", true); 3524 return -EINVAL; 3525 } 3526 3527 return perf_sched__timehist(&sched); 3528 } else { 3529 usage_with_options(sched_usage, sched_options); 3530 } 3531 3532 return 0; 3533 } 3534