1 // SPDX-License-Identifier: GPL-2.0
2 #include "builtin.h"
3 #include "perf.h"
4 #include "perf-sys.h"
5
6 #include "util/cpumap.h"
7 #include "util/evlist.h"
8 #include "util/evsel.h"
9 #include "util/evsel_fprintf.h"
10 #include "util/mutex.h"
11 #include "util/symbol.h"
12 #include "util/thread.h"
13 #include "util/header.h"
14 #include "util/session.h"
15 #include "util/tool.h"
16 #include "util/cloexec.h"
17 #include "util/thread_map.h"
18 #include "util/color.h"
19 #include "util/stat.h"
20 #include "util/string2.h"
21 #include "util/callchain.h"
22 #include "util/time-utils.h"
23
24 #include <subcmd/pager.h>
25 #include <subcmd/parse-options.h>
26 #include "util/trace-event.h"
27
28 #include "util/debug.h"
29 #include "util/event.h"
30 #include "util/util.h"
31
32 #include <linux/kernel.h>
33 #include <linux/log2.h>
34 #include <linux/zalloc.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <inttypes.h>
38
39 #include <errno.h>
40 #include <semaphore.h>
41 #include <pthread.h>
42 #include <math.h>
43 #include <api/fs/fs.h>
44 #include <perf/cpumap.h>
45 #include <linux/time64.h>
46 #include <linux/err.h>
47
48 #include <linux/ctype.h>
49
50 #define PR_SET_NAME 15 /* Set process name */
51 #define MAX_CPUS 4096
52 #define COMM_LEN 20
53 #define SYM_LEN 129
54 #define MAX_PID 1024000
55 #define MAX_PRIO 140
56
57 static const char *cpu_list;
58 static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
59
60 struct sched_atom;
61
62 struct task_desc {
63 unsigned long nr;
64 unsigned long pid;
65 char comm[COMM_LEN];
66
67 unsigned long nr_events;
68 unsigned long curr_event;
69 struct sched_atom **atoms;
70
71 pthread_t thread;
72
73 sem_t ready_for_work;
74 sem_t work_done_sem;
75
76 u64 cpu_usage;
77 };
78
79 enum sched_event_type {
80 SCHED_EVENT_RUN,
81 SCHED_EVENT_SLEEP,
82 SCHED_EVENT_WAKEUP,
83 };
84
85 struct sched_atom {
86 enum sched_event_type type;
87 u64 timestamp;
88 u64 duration;
89 unsigned long nr;
90 sem_t *wait_sem;
91 struct task_desc *wakee;
92 };
93
94 enum thread_state {
95 THREAD_SLEEPING = 0,
96 THREAD_WAIT_CPU,
97 THREAD_SCHED_IN,
98 THREAD_IGNORE
99 };
100
101 struct work_atom {
102 struct list_head list;
103 enum thread_state state;
104 u64 sched_out_time;
105 u64 wake_up_time;
106 u64 sched_in_time;
107 u64 runtime;
108 };
109
110 struct work_atoms {
111 struct list_head work_list;
112 struct thread *thread;
113 struct rb_node node;
114 u64 max_lat;
115 u64 max_lat_start;
116 u64 max_lat_end;
117 u64 total_lat;
118 u64 nb_atoms;
119 u64 total_runtime;
120 int num_merged;
121 };
122
123 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
124
125 struct perf_sched;
126
127 struct trace_sched_handler {
128 int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
129 struct perf_sample *sample, struct machine *machine);
130
131 int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
132 struct perf_sample *sample, struct machine *machine);
133
134 int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
135 struct perf_sample *sample, struct machine *machine);
136
137 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
138 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
139 struct machine *machine);
140
141 int (*migrate_task_event)(struct perf_sched *sched,
142 struct evsel *evsel,
143 struct perf_sample *sample,
144 struct machine *machine);
145 };
146
147 #define COLOR_PIDS PERF_COLOR_BLUE
148 #define COLOR_CPUS PERF_COLOR_BG_RED
149
150 struct perf_sched_map {
151 DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
152 struct perf_cpu *comp_cpus;
153 bool comp;
154 struct perf_thread_map *color_pids;
155 const char *color_pids_str;
156 struct perf_cpu_map *color_cpus;
157 const char *color_cpus_str;
158 const char *task_name;
159 struct strlist *task_names;
160 bool fuzzy;
161 struct perf_cpu_map *cpus;
162 const char *cpus_str;
163 };
164
165 struct perf_sched {
166 struct perf_tool tool;
167 const char *sort_order;
168 unsigned long nr_tasks;
169 struct task_desc **pid_to_task;
170 struct task_desc **tasks;
171 const struct trace_sched_handler *tp_handler;
172 struct mutex start_work_mutex;
173 struct mutex work_done_wait_mutex;
174 int profile_cpu;
175 /*
176 * Track the current task - that way we can know whether there's any
177 * weird events, such as a task being switched away that is not current.
178 */
179 struct perf_cpu max_cpu;
180 u32 *curr_pid;
181 struct thread **curr_thread;
182 struct thread **curr_out_thread;
183 char next_shortname1;
184 char next_shortname2;
185 unsigned int replay_repeat;
186 unsigned long nr_run_events;
187 unsigned long nr_sleep_events;
188 unsigned long nr_wakeup_events;
189 unsigned long nr_sleep_corrections;
190 unsigned long nr_run_events_optimized;
191 unsigned long targetless_wakeups;
192 unsigned long multitarget_wakeups;
193 unsigned long nr_runs;
194 unsigned long nr_timestamps;
195 unsigned long nr_unordered_timestamps;
196 unsigned long nr_context_switch_bugs;
197 unsigned long nr_events;
198 unsigned long nr_lost_chunks;
199 unsigned long nr_lost_events;
200 u64 run_measurement_overhead;
201 u64 sleep_measurement_overhead;
202 u64 start_time;
203 u64 cpu_usage;
204 u64 runavg_cpu_usage;
205 u64 parent_cpu_usage;
206 u64 runavg_parent_cpu_usage;
207 u64 sum_runtime;
208 u64 sum_fluct;
209 u64 run_avg;
210 u64 all_runtime;
211 u64 all_count;
212 u64 *cpu_last_switched;
213 struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
214 struct list_head sort_list, cmp_pid;
215 bool force;
216 bool skip_merge;
217 struct perf_sched_map map;
218
219 /* options for timehist command */
220 bool summary;
221 bool summary_only;
222 bool idle_hist;
223 bool show_callchain;
224 unsigned int max_stack;
225 bool show_cpu_visual;
226 bool show_wakeups;
227 bool show_next;
228 bool show_migrations;
229 bool pre_migrations;
230 bool show_state;
231 bool show_prio;
232 u64 skipped_samples;
233 const char *time_str;
234 struct perf_time_interval ptime;
235 struct perf_time_interval hist_time;
236 volatile bool thread_funcs_exit;
237 const char *prio_str;
238 DECLARE_BITMAP(prio_bitmap, MAX_PRIO);
239 };
240
241 /* per thread run time data */
242 struct thread_runtime {
243 u64 last_time; /* time of previous sched in/out event */
244 u64 dt_run; /* run time */
245 u64 dt_sleep; /* time between CPU access by sleep (off cpu) */
246 u64 dt_iowait; /* time between CPU access by iowait (off cpu) */
247 u64 dt_preempt; /* time between CPU access by preempt (off cpu) */
248 u64 dt_delay; /* time between wakeup and sched-in */
249 u64 dt_pre_mig; /* time between migration and wakeup */
250 u64 ready_to_run; /* time of wakeup */
251 u64 migrated; /* time when a thread is migrated */
252
253 struct stats run_stats;
254 u64 total_run_time;
255 u64 total_sleep_time;
256 u64 total_iowait_time;
257 u64 total_preempt_time;
258 u64 total_delay_time;
259 u64 total_pre_mig_time;
260
261 char last_state;
262
263 char shortname[3];
264 bool comm_changed;
265
266 u64 migrations;
267
268 int prio;
269 };
270
271 /* per event run time data */
272 struct evsel_runtime {
273 u64 *last_time; /* time this event was last seen per cpu */
274 u32 ncpu; /* highest cpu slot allocated */
275 };
276
277 /* per cpu idle time data */
278 struct idle_thread_runtime {
279 struct thread_runtime tr;
280 struct thread *last_thread;
281 struct rb_root_cached sorted_root;
282 struct callchain_root callchain;
283 struct callchain_cursor cursor;
284 };
285
286 /* track idle times per cpu */
287 static struct thread **idle_threads;
288 static int idle_max_cpu;
289 static char idle_comm[] = "<idle>";
290
get_nsecs(void)291 static u64 get_nsecs(void)
292 {
293 struct timespec ts;
294
295 clock_gettime(CLOCK_MONOTONIC, &ts);
296
297 return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
298 }
299
burn_nsecs(struct perf_sched * sched,u64 nsecs)300 static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
301 {
302 u64 T0 = get_nsecs(), T1;
303
304 do {
305 T1 = get_nsecs();
306 } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
307 }
308
sleep_nsecs(u64 nsecs)309 static void sleep_nsecs(u64 nsecs)
310 {
311 struct timespec ts;
312
313 ts.tv_nsec = nsecs % 999999999;
314 ts.tv_sec = nsecs / 999999999;
315
316 nanosleep(&ts, NULL);
317 }
318
calibrate_run_measurement_overhead(struct perf_sched * sched)319 static void calibrate_run_measurement_overhead(struct perf_sched *sched)
320 {
321 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
322 int i;
323
324 for (i = 0; i < 10; i++) {
325 T0 = get_nsecs();
326 burn_nsecs(sched, 0);
327 T1 = get_nsecs();
328 delta = T1-T0;
329 min_delta = min(min_delta, delta);
330 }
331 sched->run_measurement_overhead = min_delta;
332
333 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
334 }
335
calibrate_sleep_measurement_overhead(struct perf_sched * sched)336 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
337 {
338 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
339 int i;
340
341 for (i = 0; i < 10; i++) {
342 T0 = get_nsecs();
343 sleep_nsecs(10000);
344 T1 = get_nsecs();
345 delta = T1-T0;
346 min_delta = min(min_delta, delta);
347 }
348 min_delta -= 10000;
349 sched->sleep_measurement_overhead = min_delta;
350
351 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
352 }
353
354 static struct sched_atom *
get_new_event(struct task_desc * task,u64 timestamp)355 get_new_event(struct task_desc *task, u64 timestamp)
356 {
357 struct sched_atom *event = zalloc(sizeof(*event));
358 unsigned long idx = task->nr_events;
359 size_t size;
360
361 event->timestamp = timestamp;
362 event->nr = idx;
363
364 task->nr_events++;
365 size = sizeof(struct sched_atom *) * task->nr_events;
366 task->atoms = realloc(task->atoms, size);
367 BUG_ON(!task->atoms);
368
369 task->atoms[idx] = event;
370
371 return event;
372 }
373
last_event(struct task_desc * task)374 static struct sched_atom *last_event(struct task_desc *task)
375 {
376 if (!task->nr_events)
377 return NULL;
378
379 return task->atoms[task->nr_events - 1];
380 }
381
add_sched_event_run(struct perf_sched * sched,struct task_desc * task,u64 timestamp,u64 duration)382 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
383 u64 timestamp, u64 duration)
384 {
385 struct sched_atom *event, *curr_event = last_event(task);
386
387 /*
388 * optimize an existing RUN event by merging this one
389 * to it:
390 */
391 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
392 sched->nr_run_events_optimized++;
393 curr_event->duration += duration;
394 return;
395 }
396
397 event = get_new_event(task, timestamp);
398
399 event->type = SCHED_EVENT_RUN;
400 event->duration = duration;
401
402 sched->nr_run_events++;
403 }
404
add_sched_event_wakeup(struct perf_sched * sched,struct task_desc * task,u64 timestamp,struct task_desc * wakee)405 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
406 u64 timestamp, struct task_desc *wakee)
407 {
408 struct sched_atom *event, *wakee_event;
409
410 event = get_new_event(task, timestamp);
411 event->type = SCHED_EVENT_WAKEUP;
412 event->wakee = wakee;
413
414 wakee_event = last_event(wakee);
415 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
416 sched->targetless_wakeups++;
417 return;
418 }
419 if (wakee_event->wait_sem) {
420 sched->multitarget_wakeups++;
421 return;
422 }
423
424 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
425 sem_init(wakee_event->wait_sem, 0, 0);
426 event->wait_sem = wakee_event->wait_sem;
427
428 sched->nr_wakeup_events++;
429 }
430
add_sched_event_sleep(struct perf_sched * sched,struct task_desc * task,u64 timestamp)431 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
432 u64 timestamp)
433 {
434 struct sched_atom *event = get_new_event(task, timestamp);
435
436 event->type = SCHED_EVENT_SLEEP;
437
438 sched->nr_sleep_events++;
439 }
440
register_pid(struct perf_sched * sched,unsigned long pid,const char * comm)441 static struct task_desc *register_pid(struct perf_sched *sched,
442 unsigned long pid, const char *comm)
443 {
444 struct task_desc *task;
445 static int pid_max;
446
447 if (sched->pid_to_task == NULL) {
448 if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
449 pid_max = MAX_PID;
450 BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
451 }
452 if (pid >= (unsigned long)pid_max) {
453 BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
454 sizeof(struct task_desc *))) == NULL);
455 while (pid >= (unsigned long)pid_max)
456 sched->pid_to_task[pid_max++] = NULL;
457 }
458
459 task = sched->pid_to_task[pid];
460
461 if (task)
462 return task;
463
464 task = zalloc(sizeof(*task));
465 task->pid = pid;
466 task->nr = sched->nr_tasks;
467 strcpy(task->comm, comm);
468 /*
469 * every task starts in sleeping state - this gets ignored
470 * if there's no wakeup pointing to this sleep state:
471 */
472 add_sched_event_sleep(sched, task, 0);
473
474 sched->pid_to_task[pid] = task;
475 sched->nr_tasks++;
476 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
477 BUG_ON(!sched->tasks);
478 sched->tasks[task->nr] = task;
479
480 if (verbose > 0)
481 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
482
483 return task;
484 }
485
486
print_task_traces(struct perf_sched * sched)487 static void print_task_traces(struct perf_sched *sched)
488 {
489 struct task_desc *task;
490 unsigned long i;
491
492 for (i = 0; i < sched->nr_tasks; i++) {
493 task = sched->tasks[i];
494 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
495 task->nr, task->comm, task->pid, task->nr_events);
496 }
497 }
498
add_cross_task_wakeups(struct perf_sched * sched)499 static void add_cross_task_wakeups(struct perf_sched *sched)
500 {
501 struct task_desc *task1, *task2;
502 unsigned long i, j;
503
504 for (i = 0; i < sched->nr_tasks; i++) {
505 task1 = sched->tasks[i];
506 j = i + 1;
507 if (j == sched->nr_tasks)
508 j = 0;
509 task2 = sched->tasks[j];
510 add_sched_event_wakeup(sched, task1, 0, task2);
511 }
512 }
513
perf_sched__process_event(struct perf_sched * sched,struct sched_atom * atom)514 static void perf_sched__process_event(struct perf_sched *sched,
515 struct sched_atom *atom)
516 {
517 int ret = 0;
518
519 switch (atom->type) {
520 case SCHED_EVENT_RUN:
521 burn_nsecs(sched, atom->duration);
522 break;
523 case SCHED_EVENT_SLEEP:
524 if (atom->wait_sem)
525 ret = sem_wait(atom->wait_sem);
526 BUG_ON(ret);
527 break;
528 case SCHED_EVENT_WAKEUP:
529 if (atom->wait_sem)
530 ret = sem_post(atom->wait_sem);
531 BUG_ON(ret);
532 break;
533 default:
534 BUG_ON(1);
535 }
536 }
537
get_cpu_usage_nsec_parent(void)538 static u64 get_cpu_usage_nsec_parent(void)
539 {
540 struct rusage ru;
541 u64 sum;
542 int err;
543
544 err = getrusage(RUSAGE_SELF, &ru);
545 BUG_ON(err);
546
547 sum = ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC;
548 sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC;
549
550 return sum;
551 }
552
self_open_counters(struct perf_sched * sched,unsigned long cur_task)553 static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
554 {
555 struct perf_event_attr attr;
556 char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
557 int fd;
558 struct rlimit limit;
559 bool need_privilege = false;
560
561 memset(&attr, 0, sizeof(attr));
562
563 attr.type = PERF_TYPE_SOFTWARE;
564 attr.config = PERF_COUNT_SW_TASK_CLOCK;
565
566 force_again:
567 fd = sys_perf_event_open(&attr, 0, -1, -1,
568 perf_event_open_cloexec_flag());
569
570 if (fd < 0) {
571 if (errno == EMFILE) {
572 if (sched->force) {
573 BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
574 limit.rlim_cur += sched->nr_tasks - cur_task;
575 if (limit.rlim_cur > limit.rlim_max) {
576 limit.rlim_max = limit.rlim_cur;
577 need_privilege = true;
578 }
579 if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
580 if (need_privilege && errno == EPERM)
581 strcpy(info, "Need privilege\n");
582 } else
583 goto force_again;
584 } else
585 strcpy(info, "Have a try with -f option\n");
586 }
587 pr_err("Error: sys_perf_event_open() syscall returned "
588 "with %d (%s)\n%s", fd,
589 str_error_r(errno, sbuf, sizeof(sbuf)), info);
590 exit(EXIT_FAILURE);
591 }
592 return fd;
593 }
594
get_cpu_usage_nsec_self(int fd)595 static u64 get_cpu_usage_nsec_self(int fd)
596 {
597 u64 runtime;
598 int ret;
599
600 ret = read(fd, &runtime, sizeof(runtime));
601 BUG_ON(ret != sizeof(runtime));
602
603 return runtime;
604 }
605
606 struct sched_thread_parms {
607 struct task_desc *task;
608 struct perf_sched *sched;
609 int fd;
610 };
611
thread_func(void * ctx)612 static void *thread_func(void *ctx)
613 {
614 struct sched_thread_parms *parms = ctx;
615 struct task_desc *this_task = parms->task;
616 struct perf_sched *sched = parms->sched;
617 u64 cpu_usage_0, cpu_usage_1;
618 unsigned long i, ret;
619 char comm2[22];
620 int fd = parms->fd;
621
622 zfree(&parms);
623
624 sprintf(comm2, ":%s", this_task->comm);
625 prctl(PR_SET_NAME, comm2);
626 if (fd < 0)
627 return NULL;
628
629 while (!sched->thread_funcs_exit) {
630 ret = sem_post(&this_task->ready_for_work);
631 BUG_ON(ret);
632 mutex_lock(&sched->start_work_mutex);
633 mutex_unlock(&sched->start_work_mutex);
634
635 cpu_usage_0 = get_cpu_usage_nsec_self(fd);
636
637 for (i = 0; i < this_task->nr_events; i++) {
638 this_task->curr_event = i;
639 perf_sched__process_event(sched, this_task->atoms[i]);
640 }
641
642 cpu_usage_1 = get_cpu_usage_nsec_self(fd);
643 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
644 ret = sem_post(&this_task->work_done_sem);
645 BUG_ON(ret);
646
647 mutex_lock(&sched->work_done_wait_mutex);
648 mutex_unlock(&sched->work_done_wait_mutex);
649 }
650 return NULL;
651 }
652
create_tasks(struct perf_sched * sched)653 static void create_tasks(struct perf_sched *sched)
654 EXCLUSIVE_LOCK_FUNCTION(sched->start_work_mutex)
655 EXCLUSIVE_LOCK_FUNCTION(sched->work_done_wait_mutex)
656 {
657 struct task_desc *task;
658 pthread_attr_t attr;
659 unsigned long i;
660 int err;
661
662 err = pthread_attr_init(&attr);
663 BUG_ON(err);
664 err = pthread_attr_setstacksize(&attr,
665 (size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN));
666 BUG_ON(err);
667 mutex_lock(&sched->start_work_mutex);
668 mutex_lock(&sched->work_done_wait_mutex);
669 for (i = 0; i < sched->nr_tasks; i++) {
670 struct sched_thread_parms *parms = malloc(sizeof(*parms));
671 BUG_ON(parms == NULL);
672 parms->task = task = sched->tasks[i];
673 parms->sched = sched;
674 parms->fd = self_open_counters(sched, i);
675 sem_init(&task->ready_for_work, 0, 0);
676 sem_init(&task->work_done_sem, 0, 0);
677 task->curr_event = 0;
678 err = pthread_create(&task->thread, &attr, thread_func, parms);
679 BUG_ON(err);
680 }
681 }
682
destroy_tasks(struct perf_sched * sched)683 static void destroy_tasks(struct perf_sched *sched)
684 UNLOCK_FUNCTION(sched->start_work_mutex)
685 UNLOCK_FUNCTION(sched->work_done_wait_mutex)
686 {
687 struct task_desc *task;
688 unsigned long i;
689 int err;
690
691 mutex_unlock(&sched->start_work_mutex);
692 mutex_unlock(&sched->work_done_wait_mutex);
693 /* Get rid of threads so they won't be upset by mutex destrunction */
694 for (i = 0; i < sched->nr_tasks; i++) {
695 task = sched->tasks[i];
696 err = pthread_join(task->thread, NULL);
697 BUG_ON(err);
698 sem_destroy(&task->ready_for_work);
699 sem_destroy(&task->work_done_sem);
700 }
701 }
702
wait_for_tasks(struct perf_sched * sched)703 static void wait_for_tasks(struct perf_sched *sched)
704 EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
705 EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
706 {
707 u64 cpu_usage_0, cpu_usage_1;
708 struct task_desc *task;
709 unsigned long i, ret;
710
711 sched->start_time = get_nsecs();
712 sched->cpu_usage = 0;
713 mutex_unlock(&sched->work_done_wait_mutex);
714
715 for (i = 0; i < sched->nr_tasks; i++) {
716 task = sched->tasks[i];
717 ret = sem_wait(&task->ready_for_work);
718 BUG_ON(ret);
719 sem_init(&task->ready_for_work, 0, 0);
720 }
721 mutex_lock(&sched->work_done_wait_mutex);
722
723 cpu_usage_0 = get_cpu_usage_nsec_parent();
724
725 mutex_unlock(&sched->start_work_mutex);
726
727 for (i = 0; i < sched->nr_tasks; i++) {
728 task = sched->tasks[i];
729 ret = sem_wait(&task->work_done_sem);
730 BUG_ON(ret);
731 sem_init(&task->work_done_sem, 0, 0);
732 sched->cpu_usage += task->cpu_usage;
733 task->cpu_usage = 0;
734 }
735
736 cpu_usage_1 = get_cpu_usage_nsec_parent();
737 if (!sched->runavg_cpu_usage)
738 sched->runavg_cpu_usage = sched->cpu_usage;
739 sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
740
741 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
742 if (!sched->runavg_parent_cpu_usage)
743 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
744 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
745 sched->parent_cpu_usage)/sched->replay_repeat;
746
747 mutex_lock(&sched->start_work_mutex);
748
749 for (i = 0; i < sched->nr_tasks; i++) {
750 task = sched->tasks[i];
751 task->curr_event = 0;
752 }
753 }
754
run_one_test(struct perf_sched * sched)755 static void run_one_test(struct perf_sched *sched)
756 EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
757 EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
758 {
759 u64 T0, T1, delta, avg_delta, fluct;
760
761 T0 = get_nsecs();
762 wait_for_tasks(sched);
763 T1 = get_nsecs();
764
765 delta = T1 - T0;
766 sched->sum_runtime += delta;
767 sched->nr_runs++;
768
769 avg_delta = sched->sum_runtime / sched->nr_runs;
770 if (delta < avg_delta)
771 fluct = avg_delta - delta;
772 else
773 fluct = delta - avg_delta;
774 sched->sum_fluct += fluct;
775 if (!sched->run_avg)
776 sched->run_avg = delta;
777 sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
778
779 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
780
781 printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
782
783 printf("cpu: %0.2f / %0.2f",
784 (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
785
786 #if 0
787 /*
788 * rusage statistics done by the parent, these are less
789 * accurate than the sched->sum_exec_runtime based statistics:
790 */
791 printf(" [%0.2f / %0.2f]",
792 (double)sched->parent_cpu_usage / NSEC_PER_MSEC,
793 (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
794 #endif
795
796 printf("\n");
797
798 if (sched->nr_sleep_corrections)
799 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
800 sched->nr_sleep_corrections = 0;
801 }
802
test_calibrations(struct perf_sched * sched)803 static void test_calibrations(struct perf_sched *sched)
804 {
805 u64 T0, T1;
806
807 T0 = get_nsecs();
808 burn_nsecs(sched, NSEC_PER_MSEC);
809 T1 = get_nsecs();
810
811 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
812
813 T0 = get_nsecs();
814 sleep_nsecs(NSEC_PER_MSEC);
815 T1 = get_nsecs();
816
817 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
818 }
819
820 static int
replay_wakeup_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused)821 replay_wakeup_event(struct perf_sched *sched,
822 struct evsel *evsel, struct perf_sample *sample,
823 struct machine *machine __maybe_unused)
824 {
825 const char *comm = evsel__strval(evsel, sample, "comm");
826 const u32 pid = evsel__intval(evsel, sample, "pid");
827 struct task_desc *waker, *wakee;
828
829 if (verbose > 0) {
830 printf("sched_wakeup event %p\n", evsel);
831
832 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
833 }
834
835 waker = register_pid(sched, sample->tid, "<unknown>");
836 wakee = register_pid(sched, pid, comm);
837
838 add_sched_event_wakeup(sched, waker, sample->time, wakee);
839 return 0;
840 }
841
replay_switch_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused)842 static int replay_switch_event(struct perf_sched *sched,
843 struct evsel *evsel,
844 struct perf_sample *sample,
845 struct machine *machine __maybe_unused)
846 {
847 const char *prev_comm = evsel__strval(evsel, sample, "prev_comm"),
848 *next_comm = evsel__strval(evsel, sample, "next_comm");
849 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
850 next_pid = evsel__intval(evsel, sample, "next_pid");
851 struct task_desc *prev, __maybe_unused *next;
852 u64 timestamp0, timestamp = sample->time;
853 int cpu = sample->cpu;
854 s64 delta;
855
856 if (verbose > 0)
857 printf("sched_switch event %p\n", evsel);
858
859 if (cpu >= MAX_CPUS || cpu < 0)
860 return 0;
861
862 timestamp0 = sched->cpu_last_switched[cpu];
863 if (timestamp0)
864 delta = timestamp - timestamp0;
865 else
866 delta = 0;
867
868 if (delta < 0) {
869 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
870 return -1;
871 }
872
873 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
874 prev_comm, prev_pid, next_comm, next_pid, delta);
875
876 prev = register_pid(sched, prev_pid, prev_comm);
877 next = register_pid(sched, next_pid, next_comm);
878
879 sched->cpu_last_switched[cpu] = timestamp;
880
881 add_sched_event_run(sched, prev, timestamp, delta);
882 add_sched_event_sleep(sched, prev, timestamp);
883
884 return 0;
885 }
886
replay_fork_event(struct perf_sched * sched,union perf_event * event,struct machine * machine)887 static int replay_fork_event(struct perf_sched *sched,
888 union perf_event *event,
889 struct machine *machine)
890 {
891 struct thread *child, *parent;
892
893 child = machine__findnew_thread(machine, event->fork.pid,
894 event->fork.tid);
895 parent = machine__findnew_thread(machine, event->fork.ppid,
896 event->fork.ptid);
897
898 if (child == NULL || parent == NULL) {
899 pr_debug("thread does not exist on fork event: child %p, parent %p\n",
900 child, parent);
901 goto out_put;
902 }
903
904 if (verbose > 0) {
905 printf("fork event\n");
906 printf("... parent: %s/%d\n", thread__comm_str(parent), thread__tid(parent));
907 printf("... child: %s/%d\n", thread__comm_str(child), thread__tid(child));
908 }
909
910 register_pid(sched, thread__tid(parent), thread__comm_str(parent));
911 register_pid(sched, thread__tid(child), thread__comm_str(child));
912 out_put:
913 thread__put(child);
914 thread__put(parent);
915 return 0;
916 }
917
918 struct sort_dimension {
919 const char *name;
920 sort_fn_t cmp;
921 struct list_head list;
922 };
923
init_prio(struct thread_runtime * r)924 static inline void init_prio(struct thread_runtime *r)
925 {
926 r->prio = -1;
927 }
928
929 /*
930 * handle runtime stats saved per thread
931 */
thread__init_runtime(struct thread * thread)932 static struct thread_runtime *thread__init_runtime(struct thread *thread)
933 {
934 struct thread_runtime *r;
935
936 r = zalloc(sizeof(struct thread_runtime));
937 if (!r)
938 return NULL;
939
940 init_stats(&r->run_stats);
941 init_prio(r);
942 thread__set_priv(thread, r);
943
944 return r;
945 }
946
thread__get_runtime(struct thread * thread)947 static struct thread_runtime *thread__get_runtime(struct thread *thread)
948 {
949 struct thread_runtime *tr;
950
951 tr = thread__priv(thread);
952 if (tr == NULL) {
953 tr = thread__init_runtime(thread);
954 if (tr == NULL)
955 pr_debug("Failed to malloc memory for runtime data.\n");
956 }
957
958 return tr;
959 }
960
961 static int
thread_lat_cmp(struct list_head * list,struct work_atoms * l,struct work_atoms * r)962 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
963 {
964 struct sort_dimension *sort;
965 int ret = 0;
966
967 BUG_ON(list_empty(list));
968
969 list_for_each_entry(sort, list, list) {
970 ret = sort->cmp(l, r);
971 if (ret)
972 return ret;
973 }
974
975 return ret;
976 }
977
978 static struct work_atoms *
thread_atoms_search(struct rb_root_cached * root,struct thread * thread,struct list_head * sort_list)979 thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
980 struct list_head *sort_list)
981 {
982 struct rb_node *node = root->rb_root.rb_node;
983 struct work_atoms key = { .thread = thread };
984
985 while (node) {
986 struct work_atoms *atoms;
987 int cmp;
988
989 atoms = container_of(node, struct work_atoms, node);
990
991 cmp = thread_lat_cmp(sort_list, &key, atoms);
992 if (cmp > 0)
993 node = node->rb_left;
994 else if (cmp < 0)
995 node = node->rb_right;
996 else {
997 BUG_ON(!RC_CHK_EQUAL(thread, atoms->thread));
998 return atoms;
999 }
1000 }
1001 return NULL;
1002 }
1003
1004 static void
__thread_latency_insert(struct rb_root_cached * root,struct work_atoms * data,struct list_head * sort_list)1005 __thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
1006 struct list_head *sort_list)
1007 {
1008 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
1009 bool leftmost = true;
1010
1011 while (*new) {
1012 struct work_atoms *this;
1013 int cmp;
1014
1015 this = container_of(*new, struct work_atoms, node);
1016 parent = *new;
1017
1018 cmp = thread_lat_cmp(sort_list, data, this);
1019
1020 if (cmp > 0)
1021 new = &((*new)->rb_left);
1022 else {
1023 new = &((*new)->rb_right);
1024 leftmost = false;
1025 }
1026 }
1027
1028 rb_link_node(&data->node, parent, new);
1029 rb_insert_color_cached(&data->node, root, leftmost);
1030 }
1031
thread_atoms_insert(struct perf_sched * sched,struct thread * thread)1032 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
1033 {
1034 struct work_atoms *atoms = zalloc(sizeof(*atoms));
1035 if (!atoms) {
1036 pr_err("No memory at %s\n", __func__);
1037 return -1;
1038 }
1039
1040 atoms->thread = thread__get(thread);
1041 INIT_LIST_HEAD(&atoms->work_list);
1042 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
1043 return 0;
1044 }
1045
1046 static int
add_sched_out_event(struct work_atoms * atoms,char run_state,u64 timestamp)1047 add_sched_out_event(struct work_atoms *atoms,
1048 char run_state,
1049 u64 timestamp)
1050 {
1051 struct work_atom *atom = zalloc(sizeof(*atom));
1052 if (!atom) {
1053 pr_err("Non memory at %s", __func__);
1054 return -1;
1055 }
1056
1057 atom->sched_out_time = timestamp;
1058
1059 if (run_state == 'R') {
1060 atom->state = THREAD_WAIT_CPU;
1061 atom->wake_up_time = atom->sched_out_time;
1062 }
1063
1064 list_add_tail(&atom->list, &atoms->work_list);
1065 return 0;
1066 }
1067
1068 static void
add_runtime_event(struct work_atoms * atoms,u64 delta,u64 timestamp __maybe_unused)1069 add_runtime_event(struct work_atoms *atoms, u64 delta,
1070 u64 timestamp __maybe_unused)
1071 {
1072 struct work_atom *atom;
1073
1074 BUG_ON(list_empty(&atoms->work_list));
1075
1076 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1077
1078 atom->runtime += delta;
1079 atoms->total_runtime += delta;
1080 }
1081
1082 static void
add_sched_in_event(struct work_atoms * atoms,u64 timestamp)1083 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1084 {
1085 struct work_atom *atom;
1086 u64 delta;
1087
1088 if (list_empty(&atoms->work_list))
1089 return;
1090
1091 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1092
1093 if (atom->state != THREAD_WAIT_CPU)
1094 return;
1095
1096 if (timestamp < atom->wake_up_time) {
1097 atom->state = THREAD_IGNORE;
1098 return;
1099 }
1100
1101 atom->state = THREAD_SCHED_IN;
1102 atom->sched_in_time = timestamp;
1103
1104 delta = atom->sched_in_time - atom->wake_up_time;
1105 atoms->total_lat += delta;
1106 if (delta > atoms->max_lat) {
1107 atoms->max_lat = delta;
1108 atoms->max_lat_start = atom->wake_up_time;
1109 atoms->max_lat_end = timestamp;
1110 }
1111 atoms->nb_atoms++;
1112 }
1113
free_work_atoms(struct work_atoms * atoms)1114 static void free_work_atoms(struct work_atoms *atoms)
1115 {
1116 struct work_atom *atom, *tmp;
1117
1118 if (atoms == NULL)
1119 return;
1120
1121 list_for_each_entry_safe(atom, tmp, &atoms->work_list, list) {
1122 list_del(&atom->list);
1123 free(atom);
1124 }
1125 thread__zput(atoms->thread);
1126 free(atoms);
1127 }
1128
latency_switch_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1129 static int latency_switch_event(struct perf_sched *sched,
1130 struct evsel *evsel,
1131 struct perf_sample *sample,
1132 struct machine *machine)
1133 {
1134 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1135 next_pid = evsel__intval(evsel, sample, "next_pid");
1136 const char prev_state = evsel__taskstate(evsel, sample, "prev_state");
1137 struct work_atoms *out_events, *in_events;
1138 struct thread *sched_out, *sched_in;
1139 u64 timestamp0, timestamp = sample->time;
1140 int cpu = sample->cpu, err = -1;
1141 s64 delta;
1142
1143 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1144
1145 timestamp0 = sched->cpu_last_switched[cpu];
1146 sched->cpu_last_switched[cpu] = timestamp;
1147 if (timestamp0)
1148 delta = timestamp - timestamp0;
1149 else
1150 delta = 0;
1151
1152 if (delta < 0) {
1153 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1154 return -1;
1155 }
1156
1157 sched_out = machine__findnew_thread(machine, -1, prev_pid);
1158 sched_in = machine__findnew_thread(machine, -1, next_pid);
1159 if (sched_out == NULL || sched_in == NULL)
1160 goto out_put;
1161
1162 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1163 if (!out_events) {
1164 if (thread_atoms_insert(sched, sched_out))
1165 goto out_put;
1166 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1167 if (!out_events) {
1168 pr_err("out-event: Internal tree error");
1169 goto out_put;
1170 }
1171 }
1172 if (add_sched_out_event(out_events, prev_state, timestamp))
1173 return -1;
1174
1175 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1176 if (!in_events) {
1177 if (thread_atoms_insert(sched, sched_in))
1178 goto out_put;
1179 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1180 if (!in_events) {
1181 pr_err("in-event: Internal tree error");
1182 goto out_put;
1183 }
1184 /*
1185 * Take came in we have not heard about yet,
1186 * add in an initial atom in runnable state:
1187 */
1188 if (add_sched_out_event(in_events, 'R', timestamp))
1189 goto out_put;
1190 }
1191 add_sched_in_event(in_events, timestamp);
1192 err = 0;
1193 out_put:
1194 thread__put(sched_out);
1195 thread__put(sched_in);
1196 return err;
1197 }
1198
latency_runtime_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1199 static int latency_runtime_event(struct perf_sched *sched,
1200 struct evsel *evsel,
1201 struct perf_sample *sample,
1202 struct machine *machine)
1203 {
1204 const u32 pid = evsel__intval(evsel, sample, "pid");
1205 const u64 runtime = evsel__intval(evsel, sample, "runtime");
1206 struct thread *thread = machine__findnew_thread(machine, -1, pid);
1207 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1208 u64 timestamp = sample->time;
1209 int cpu = sample->cpu, err = -1;
1210
1211 if (thread == NULL)
1212 return -1;
1213
1214 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1215 if (!atoms) {
1216 if (thread_atoms_insert(sched, thread))
1217 goto out_put;
1218 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1219 if (!atoms) {
1220 pr_err("in-event: Internal tree error");
1221 goto out_put;
1222 }
1223 if (add_sched_out_event(atoms, 'R', timestamp))
1224 goto out_put;
1225 }
1226
1227 add_runtime_event(atoms, runtime, timestamp);
1228 err = 0;
1229 out_put:
1230 thread__put(thread);
1231 return err;
1232 }
1233
latency_wakeup_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1234 static int latency_wakeup_event(struct perf_sched *sched,
1235 struct evsel *evsel,
1236 struct perf_sample *sample,
1237 struct machine *machine)
1238 {
1239 const u32 pid = evsel__intval(evsel, sample, "pid");
1240 struct work_atoms *atoms;
1241 struct work_atom *atom;
1242 struct thread *wakee;
1243 u64 timestamp = sample->time;
1244 int err = -1;
1245
1246 wakee = machine__findnew_thread(machine, -1, pid);
1247 if (wakee == NULL)
1248 return -1;
1249 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1250 if (!atoms) {
1251 if (thread_atoms_insert(sched, wakee))
1252 goto out_put;
1253 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1254 if (!atoms) {
1255 pr_err("wakeup-event: Internal tree error");
1256 goto out_put;
1257 }
1258 if (add_sched_out_event(atoms, 'S', timestamp))
1259 goto out_put;
1260 }
1261
1262 BUG_ON(list_empty(&atoms->work_list));
1263
1264 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1265
1266 /*
1267 * As we do not guarantee the wakeup event happens when
1268 * task is out of run queue, also may happen when task is
1269 * on run queue and wakeup only change ->state to TASK_RUNNING,
1270 * then we should not set the ->wake_up_time when wake up a
1271 * task which is on run queue.
1272 *
1273 * You WILL be missing events if you've recorded only
1274 * one CPU, or are only looking at only one, so don't
1275 * skip in this case.
1276 */
1277 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1278 goto out_ok;
1279
1280 sched->nr_timestamps++;
1281 if (atom->sched_out_time > timestamp) {
1282 sched->nr_unordered_timestamps++;
1283 goto out_ok;
1284 }
1285
1286 atom->state = THREAD_WAIT_CPU;
1287 atom->wake_up_time = timestamp;
1288 out_ok:
1289 err = 0;
1290 out_put:
1291 thread__put(wakee);
1292 return err;
1293 }
1294
latency_migrate_task_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1295 static int latency_migrate_task_event(struct perf_sched *sched,
1296 struct evsel *evsel,
1297 struct perf_sample *sample,
1298 struct machine *machine)
1299 {
1300 const u32 pid = evsel__intval(evsel, sample, "pid");
1301 u64 timestamp = sample->time;
1302 struct work_atoms *atoms;
1303 struct work_atom *atom;
1304 struct thread *migrant;
1305 int err = -1;
1306
1307 /*
1308 * Only need to worry about migration when profiling one CPU.
1309 */
1310 if (sched->profile_cpu == -1)
1311 return 0;
1312
1313 migrant = machine__findnew_thread(machine, -1, pid);
1314 if (migrant == NULL)
1315 return -1;
1316 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1317 if (!atoms) {
1318 if (thread_atoms_insert(sched, migrant))
1319 goto out_put;
1320 register_pid(sched, thread__tid(migrant), thread__comm_str(migrant));
1321 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1322 if (!atoms) {
1323 pr_err("migration-event: Internal tree error");
1324 goto out_put;
1325 }
1326 if (add_sched_out_event(atoms, 'R', timestamp))
1327 goto out_put;
1328 }
1329
1330 BUG_ON(list_empty(&atoms->work_list));
1331
1332 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1333 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1334
1335 sched->nr_timestamps++;
1336
1337 if (atom->sched_out_time > timestamp)
1338 sched->nr_unordered_timestamps++;
1339 err = 0;
1340 out_put:
1341 thread__put(migrant);
1342 return err;
1343 }
1344
output_lat_thread(struct perf_sched * sched,struct work_atoms * work_list)1345 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1346 {
1347 int i;
1348 int ret;
1349 u64 avg;
1350 char max_lat_start[32], max_lat_end[32];
1351
1352 if (!work_list->nb_atoms)
1353 return;
1354 /*
1355 * Ignore idle threads:
1356 */
1357 if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
1358 return;
1359
1360 sched->all_runtime += work_list->total_runtime;
1361 sched->all_count += work_list->nb_atoms;
1362
1363 if (work_list->num_merged > 1) {
1364 ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread),
1365 work_list->num_merged);
1366 } else {
1367 ret = printf(" %s:%d ", thread__comm_str(work_list->thread),
1368 thread__tid(work_list->thread));
1369 }
1370
1371 for (i = 0; i < 24 - ret; i++)
1372 printf(" ");
1373
1374 avg = work_list->total_lat / work_list->nb_atoms;
1375 timestamp__scnprintf_usec(work_list->max_lat_start, max_lat_start, sizeof(max_lat_start));
1376 timestamp__scnprintf_usec(work_list->max_lat_end, max_lat_end, sizeof(max_lat_end));
1377
1378 printf("|%11.3f ms |%9" PRIu64 " | avg:%8.3f ms | max:%8.3f ms | max start: %12s s | max end: %12s s\n",
1379 (double)work_list->total_runtime / NSEC_PER_MSEC,
1380 work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
1381 (double)work_list->max_lat / NSEC_PER_MSEC,
1382 max_lat_start, max_lat_end);
1383 }
1384
pid_cmp(struct work_atoms * l,struct work_atoms * r)1385 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1386 {
1387 pid_t l_tid, r_tid;
1388
1389 if (RC_CHK_EQUAL(l->thread, r->thread))
1390 return 0;
1391 l_tid = thread__tid(l->thread);
1392 r_tid = thread__tid(r->thread);
1393 if (l_tid < r_tid)
1394 return -1;
1395 if (l_tid > r_tid)
1396 return 1;
1397 return (int)(RC_CHK_ACCESS(l->thread) - RC_CHK_ACCESS(r->thread));
1398 }
1399
avg_cmp(struct work_atoms * l,struct work_atoms * r)1400 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1401 {
1402 u64 avgl, avgr;
1403
1404 if (!l->nb_atoms)
1405 return -1;
1406
1407 if (!r->nb_atoms)
1408 return 1;
1409
1410 avgl = l->total_lat / l->nb_atoms;
1411 avgr = r->total_lat / r->nb_atoms;
1412
1413 if (avgl < avgr)
1414 return -1;
1415 if (avgl > avgr)
1416 return 1;
1417
1418 return 0;
1419 }
1420
max_cmp(struct work_atoms * l,struct work_atoms * r)1421 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1422 {
1423 if (l->max_lat < r->max_lat)
1424 return -1;
1425 if (l->max_lat > r->max_lat)
1426 return 1;
1427
1428 return 0;
1429 }
1430
switch_cmp(struct work_atoms * l,struct work_atoms * r)1431 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1432 {
1433 if (l->nb_atoms < r->nb_atoms)
1434 return -1;
1435 if (l->nb_atoms > r->nb_atoms)
1436 return 1;
1437
1438 return 0;
1439 }
1440
runtime_cmp(struct work_atoms * l,struct work_atoms * r)1441 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1442 {
1443 if (l->total_runtime < r->total_runtime)
1444 return -1;
1445 if (l->total_runtime > r->total_runtime)
1446 return 1;
1447
1448 return 0;
1449 }
1450
sort_dimension__add(const char * tok,struct list_head * list)1451 static int sort_dimension__add(const char *tok, struct list_head *list)
1452 {
1453 size_t i;
1454 static struct sort_dimension avg_sort_dimension = {
1455 .name = "avg",
1456 .cmp = avg_cmp,
1457 };
1458 static struct sort_dimension max_sort_dimension = {
1459 .name = "max",
1460 .cmp = max_cmp,
1461 };
1462 static struct sort_dimension pid_sort_dimension = {
1463 .name = "pid",
1464 .cmp = pid_cmp,
1465 };
1466 static struct sort_dimension runtime_sort_dimension = {
1467 .name = "runtime",
1468 .cmp = runtime_cmp,
1469 };
1470 static struct sort_dimension switch_sort_dimension = {
1471 .name = "switch",
1472 .cmp = switch_cmp,
1473 };
1474 struct sort_dimension *available_sorts[] = {
1475 &pid_sort_dimension,
1476 &avg_sort_dimension,
1477 &max_sort_dimension,
1478 &switch_sort_dimension,
1479 &runtime_sort_dimension,
1480 };
1481
1482 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1483 if (!strcmp(available_sorts[i]->name, tok)) {
1484 list_add_tail(&available_sorts[i]->list, list);
1485
1486 return 0;
1487 }
1488 }
1489
1490 return -1;
1491 }
1492
perf_sched__sort_lat(struct perf_sched * sched)1493 static void perf_sched__sort_lat(struct perf_sched *sched)
1494 {
1495 struct rb_node *node;
1496 struct rb_root_cached *root = &sched->atom_root;
1497 again:
1498 for (;;) {
1499 struct work_atoms *data;
1500 node = rb_first_cached(root);
1501 if (!node)
1502 break;
1503
1504 rb_erase_cached(node, root);
1505 data = rb_entry(node, struct work_atoms, node);
1506 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1507 }
1508 if (root == &sched->atom_root) {
1509 root = &sched->merged_atom_root;
1510 goto again;
1511 }
1512 }
1513
process_sched_wakeup_event(const struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1514 static int process_sched_wakeup_event(const struct perf_tool *tool,
1515 struct evsel *evsel,
1516 struct perf_sample *sample,
1517 struct machine *machine)
1518 {
1519 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1520
1521 if (sched->tp_handler->wakeup_event)
1522 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1523
1524 return 0;
1525 }
1526
process_sched_wakeup_ignore(const struct perf_tool * tool __maybe_unused,struct evsel * evsel __maybe_unused,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)1527 static int process_sched_wakeup_ignore(const struct perf_tool *tool __maybe_unused,
1528 struct evsel *evsel __maybe_unused,
1529 struct perf_sample *sample __maybe_unused,
1530 struct machine *machine __maybe_unused)
1531 {
1532 return 0;
1533 }
1534
thread__has_color(struct thread * thread)1535 static bool thread__has_color(struct thread *thread)
1536 {
1537 return thread__priv(thread) != NULL;
1538 }
1539
1540 static struct thread*
map__findnew_thread(struct perf_sched * sched,struct machine * machine,pid_t pid,pid_t tid)1541 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
1542 {
1543 struct thread *thread = machine__findnew_thread(machine, pid, tid);
1544 bool color = false;
1545
1546 if (!sched->map.color_pids || !thread || thread__priv(thread))
1547 return thread;
1548
1549 if (thread_map__has(sched->map.color_pids, tid))
1550 color = true;
1551
1552 thread__set_priv(thread, color ? ((void*)1) : NULL);
1553 return thread;
1554 }
1555
sched_match_task(struct perf_sched * sched,const char * comm_str)1556 static bool sched_match_task(struct perf_sched *sched, const char *comm_str)
1557 {
1558 bool fuzzy_match = sched->map.fuzzy;
1559 struct strlist *task_names = sched->map.task_names;
1560 struct str_node *node;
1561
1562 strlist__for_each_entry(node, task_names) {
1563 bool match_found = fuzzy_match ? !!strstr(comm_str, node->s) :
1564 !strcmp(comm_str, node->s);
1565 if (match_found)
1566 return true;
1567 }
1568
1569 return false;
1570 }
1571
print_sched_map(struct perf_sched * sched,struct perf_cpu this_cpu,int cpus_nr,const char * color,bool sched_out)1572 static void print_sched_map(struct perf_sched *sched, struct perf_cpu this_cpu, int cpus_nr,
1573 const char *color, bool sched_out)
1574 {
1575 for (int i = 0; i < cpus_nr; i++) {
1576 struct perf_cpu cpu = {
1577 .cpu = sched->map.comp ? sched->map.comp_cpus[i].cpu : i,
1578 };
1579 struct thread *curr_thread = sched->curr_thread[cpu.cpu];
1580 struct thread *curr_out_thread = sched->curr_out_thread[cpu.cpu];
1581 struct thread_runtime *curr_tr;
1582 const char *pid_color = color;
1583 const char *cpu_color = color;
1584 char symbol = ' ';
1585 struct thread *thread_to_check = sched_out ? curr_out_thread : curr_thread;
1586
1587 if (thread_to_check && thread__has_color(thread_to_check))
1588 pid_color = COLOR_PIDS;
1589
1590 if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu))
1591 cpu_color = COLOR_CPUS;
1592
1593 if (cpu.cpu == this_cpu.cpu)
1594 symbol = '*';
1595
1596 color_fprintf(stdout, cpu.cpu != this_cpu.cpu ? color : cpu_color, "%c", symbol);
1597
1598 thread_to_check = sched_out ? sched->curr_out_thread[cpu.cpu] :
1599 sched->curr_thread[cpu.cpu];
1600
1601 if (thread_to_check) {
1602 curr_tr = thread__get_runtime(thread_to_check);
1603 if (curr_tr == NULL)
1604 return;
1605
1606 if (sched_out) {
1607 if (cpu.cpu == this_cpu.cpu)
1608 color_fprintf(stdout, color, "- ");
1609 else {
1610 curr_tr = thread__get_runtime(sched->curr_thread[cpu.cpu]);
1611 if (curr_tr != NULL)
1612 color_fprintf(stdout, pid_color, "%2s ",
1613 curr_tr->shortname);
1614 }
1615 } else
1616 color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname);
1617 } else
1618 color_fprintf(stdout, color, " ");
1619 }
1620 }
1621
map_switch_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1622 static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
1623 struct perf_sample *sample, struct machine *machine)
1624 {
1625 const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
1626 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid");
1627 struct thread *sched_in, *sched_out;
1628 struct thread_runtime *tr;
1629 int new_shortname;
1630 u64 timestamp0, timestamp = sample->time;
1631 s64 delta;
1632 struct perf_cpu this_cpu = {
1633 .cpu = sample->cpu,
1634 };
1635 int cpus_nr;
1636 int proceed;
1637 bool new_cpu = false;
1638 const char *color = PERF_COLOR_NORMAL;
1639 char stimestamp[32];
1640 const char *str;
1641 int ret = -1;
1642
1643 BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0);
1644
1645 if (this_cpu.cpu > sched->max_cpu.cpu)
1646 sched->max_cpu = this_cpu;
1647
1648 if (sched->map.comp) {
1649 cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
1650 if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
1651 sched->map.comp_cpus[cpus_nr++] = this_cpu;
1652 new_cpu = true;
1653 }
1654 } else
1655 cpus_nr = sched->max_cpu.cpu;
1656
1657 timestamp0 = sched->cpu_last_switched[this_cpu.cpu];
1658 sched->cpu_last_switched[this_cpu.cpu] = timestamp;
1659 if (timestamp0)
1660 delta = timestamp - timestamp0;
1661 else
1662 delta = 0;
1663
1664 if (delta < 0) {
1665 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1666 return -1;
1667 }
1668
1669 sched_in = map__findnew_thread(sched, machine, -1, next_pid);
1670 sched_out = map__findnew_thread(sched, machine, -1, prev_pid);
1671 if (sched_in == NULL || sched_out == NULL)
1672 goto out;
1673
1674 tr = thread__get_runtime(sched_in);
1675 if (tr == NULL)
1676 goto out;
1677
1678 thread__put(sched->curr_thread[this_cpu.cpu]);
1679 thread__put(sched->curr_out_thread[this_cpu.cpu]);
1680
1681 sched->curr_thread[this_cpu.cpu] = thread__get(sched_in);
1682 sched->curr_out_thread[this_cpu.cpu] = thread__get(sched_out);
1683
1684 ret = 0;
1685
1686 str = thread__comm_str(sched_in);
1687 new_shortname = 0;
1688 if (!tr->shortname[0]) {
1689 if (!strcmp(thread__comm_str(sched_in), "swapper")) {
1690 /*
1691 * Don't allocate a letter-number for swapper:0
1692 * as a shortname. Instead, we use '.' for it.
1693 */
1694 tr->shortname[0] = '.';
1695 tr->shortname[1] = ' ';
1696 } else if (!sched->map.task_name || sched_match_task(sched, str)) {
1697 tr->shortname[0] = sched->next_shortname1;
1698 tr->shortname[1] = sched->next_shortname2;
1699
1700 if (sched->next_shortname1 < 'Z') {
1701 sched->next_shortname1++;
1702 } else {
1703 sched->next_shortname1 = 'A';
1704 if (sched->next_shortname2 < '9')
1705 sched->next_shortname2++;
1706 else
1707 sched->next_shortname2 = '0';
1708 }
1709 } else {
1710 tr->shortname[0] = '-';
1711 tr->shortname[1] = ' ';
1712 }
1713 new_shortname = 1;
1714 }
1715
1716 if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, this_cpu))
1717 goto out;
1718
1719 proceed = 0;
1720 str = thread__comm_str(sched_in);
1721 /*
1722 * Check which of sched_in and sched_out matches the passed --task-name
1723 * arguments and call the corresponding print_sched_map.
1724 */
1725 if (sched->map.task_name && !sched_match_task(sched, str)) {
1726 if (!sched_match_task(sched, thread__comm_str(sched_out)))
1727 goto out;
1728 else
1729 goto sched_out;
1730
1731 } else {
1732 str = thread__comm_str(sched_out);
1733 if (!(sched->map.task_name && !sched_match_task(sched, str)))
1734 proceed = 1;
1735 }
1736
1737 printf(" ");
1738
1739 print_sched_map(sched, this_cpu, cpus_nr, color, false);
1740
1741 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1742 color_fprintf(stdout, color, " %12s secs ", stimestamp);
1743 if (new_shortname || tr->comm_changed || (verbose > 0 && thread__tid(sched_in))) {
1744 const char *pid_color = color;
1745
1746 if (thread__has_color(sched_in))
1747 pid_color = COLOR_PIDS;
1748
1749 color_fprintf(stdout, pid_color, "%s => %s:%d",
1750 tr->shortname, thread__comm_str(sched_in), thread__tid(sched_in));
1751 tr->comm_changed = false;
1752 }
1753
1754 if (sched->map.comp && new_cpu)
1755 color_fprintf(stdout, color, " (CPU %d)", this_cpu.cpu);
1756
1757 if (proceed != 1) {
1758 color_fprintf(stdout, color, "\n");
1759 goto out;
1760 }
1761
1762 sched_out:
1763 if (sched->map.task_name) {
1764 tr = thread__get_runtime(sched->curr_out_thread[this_cpu.cpu]);
1765 if (strcmp(tr->shortname, "") == 0)
1766 goto out;
1767
1768 if (proceed == 1)
1769 color_fprintf(stdout, color, "\n");
1770
1771 printf(" ");
1772 print_sched_map(sched, this_cpu, cpus_nr, color, true);
1773 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1774 color_fprintf(stdout, color, " %12s secs ", stimestamp);
1775 }
1776
1777 color_fprintf(stdout, color, "\n");
1778
1779 out:
1780 thread__put(sched_out);
1781 thread__put(sched_in);
1782
1783 return ret;
1784 }
1785
process_sched_switch_event(const struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1786 static int process_sched_switch_event(const struct perf_tool *tool,
1787 struct evsel *evsel,
1788 struct perf_sample *sample,
1789 struct machine *machine)
1790 {
1791 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1792 int this_cpu = sample->cpu, err = 0;
1793 u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1794 next_pid = evsel__intval(evsel, sample, "next_pid");
1795
1796 if (sched->curr_pid[this_cpu] != (u32)-1) {
1797 /*
1798 * Are we trying to switch away a PID that is
1799 * not current?
1800 */
1801 if (sched->curr_pid[this_cpu] != prev_pid)
1802 sched->nr_context_switch_bugs++;
1803 }
1804
1805 if (sched->tp_handler->switch_event)
1806 err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1807
1808 sched->curr_pid[this_cpu] = next_pid;
1809 return err;
1810 }
1811
process_sched_runtime_event(const struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1812 static int process_sched_runtime_event(const struct perf_tool *tool,
1813 struct evsel *evsel,
1814 struct perf_sample *sample,
1815 struct machine *machine)
1816 {
1817 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1818
1819 if (sched->tp_handler->runtime_event)
1820 return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1821
1822 return 0;
1823 }
1824
perf_sched__process_fork_event(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)1825 static int perf_sched__process_fork_event(const struct perf_tool *tool,
1826 union perf_event *event,
1827 struct perf_sample *sample,
1828 struct machine *machine)
1829 {
1830 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1831
1832 /* run the fork event through the perf machinery */
1833 perf_event__process_fork(tool, event, sample, machine);
1834
1835 /* and then run additional processing needed for this command */
1836 if (sched->tp_handler->fork_event)
1837 return sched->tp_handler->fork_event(sched, event, machine);
1838
1839 return 0;
1840 }
1841
process_sched_migrate_task_event(const struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1842 static int process_sched_migrate_task_event(const struct perf_tool *tool,
1843 struct evsel *evsel,
1844 struct perf_sample *sample,
1845 struct machine *machine)
1846 {
1847 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1848
1849 if (sched->tp_handler->migrate_task_event)
1850 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1851
1852 return 0;
1853 }
1854
1855 typedef int (*tracepoint_handler)(const struct perf_tool *tool,
1856 struct evsel *evsel,
1857 struct perf_sample *sample,
1858 struct machine *machine);
1859
perf_sched__process_tracepoint_sample(const struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)1860 static int perf_sched__process_tracepoint_sample(const struct perf_tool *tool __maybe_unused,
1861 union perf_event *event __maybe_unused,
1862 struct perf_sample *sample,
1863 struct evsel *evsel,
1864 struct machine *machine)
1865 {
1866 int err = 0;
1867
1868 if (evsel->handler != NULL) {
1869 tracepoint_handler f = evsel->handler;
1870 err = f(tool, evsel, sample, machine);
1871 }
1872
1873 return err;
1874 }
1875
perf_sched__process_comm(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_sample * sample,struct machine * machine)1876 static int perf_sched__process_comm(const struct perf_tool *tool __maybe_unused,
1877 union perf_event *event,
1878 struct perf_sample *sample,
1879 struct machine *machine)
1880 {
1881 struct thread *thread;
1882 struct thread_runtime *tr;
1883 int err;
1884
1885 err = perf_event__process_comm(tool, event, sample, machine);
1886 if (err)
1887 return err;
1888
1889 thread = machine__find_thread(machine, sample->pid, sample->tid);
1890 if (!thread) {
1891 pr_err("Internal error: can't find thread\n");
1892 return -1;
1893 }
1894
1895 tr = thread__get_runtime(thread);
1896 if (tr == NULL) {
1897 thread__put(thread);
1898 return -1;
1899 }
1900
1901 tr->comm_changed = true;
1902 thread__put(thread);
1903
1904 return 0;
1905 }
1906
perf_sched__read_events(struct perf_sched * sched)1907 static int perf_sched__read_events(struct perf_sched *sched)
1908 {
1909 struct evsel_str_handler handlers[] = {
1910 { "sched:sched_switch", process_sched_switch_event, },
1911 { "sched:sched_stat_runtime", process_sched_runtime_event, },
1912 { "sched:sched_wakeup", process_sched_wakeup_event, },
1913 { "sched:sched_waking", process_sched_wakeup_event, },
1914 { "sched:sched_wakeup_new", process_sched_wakeup_event, },
1915 { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1916 };
1917 struct perf_session *session;
1918 struct perf_data data = {
1919 .path = input_name,
1920 .mode = PERF_DATA_MODE_READ,
1921 .force = sched->force,
1922 };
1923 int rc = -1;
1924
1925 session = perf_session__new(&data, &sched->tool);
1926 if (IS_ERR(session)) {
1927 pr_debug("Error creating perf session");
1928 return PTR_ERR(session);
1929 }
1930
1931 symbol__init(perf_session__env(session));
1932
1933 /* prefer sched_waking if it is captured */
1934 if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
1935 handlers[2].handler = process_sched_wakeup_ignore;
1936
1937 if (perf_session__set_tracepoints_handlers(session, handlers))
1938 goto out_delete;
1939
1940 if (perf_session__has_traces(session, "record -R")) {
1941 int err = perf_session__process_events(session);
1942 if (err) {
1943 pr_err("Failed to process events, error %d", err);
1944 goto out_delete;
1945 }
1946
1947 sched->nr_events = session->evlist->stats.nr_events[0];
1948 sched->nr_lost_events = session->evlist->stats.total_lost;
1949 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1950 }
1951
1952 rc = 0;
1953 out_delete:
1954 perf_session__delete(session);
1955 return rc;
1956 }
1957
1958 /*
1959 * scheduling times are printed as msec.usec
1960 */
print_sched_time(unsigned long long nsecs,int width)1961 static inline void print_sched_time(unsigned long long nsecs, int width)
1962 {
1963 unsigned long msecs;
1964 unsigned long usecs;
1965
1966 msecs = nsecs / NSEC_PER_MSEC;
1967 nsecs -= msecs * NSEC_PER_MSEC;
1968 usecs = nsecs / NSEC_PER_USEC;
1969 printf("%*lu.%03lu ", width, msecs, usecs);
1970 }
1971
1972 /*
1973 * returns runtime data for event, allocating memory for it the
1974 * first time it is used.
1975 */
evsel__get_runtime(struct evsel * evsel)1976 static struct evsel_runtime *evsel__get_runtime(struct evsel *evsel)
1977 {
1978 struct evsel_runtime *r = evsel->priv;
1979
1980 if (r == NULL) {
1981 r = zalloc(sizeof(struct evsel_runtime));
1982 evsel->priv = r;
1983 }
1984
1985 return r;
1986 }
1987
1988 /*
1989 * save last time event was seen per cpu
1990 */
evsel__save_time(struct evsel * evsel,u64 timestamp,u32 cpu)1991 static void evsel__save_time(struct evsel *evsel, u64 timestamp, u32 cpu)
1992 {
1993 struct evsel_runtime *r = evsel__get_runtime(evsel);
1994
1995 if (r == NULL)
1996 return;
1997
1998 if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
1999 int i, n = __roundup_pow_of_two(cpu+1);
2000 void *p = r->last_time;
2001
2002 p = realloc(r->last_time, n * sizeof(u64));
2003 if (!p)
2004 return;
2005
2006 r->last_time = p;
2007 for (i = r->ncpu; i < n; ++i)
2008 r->last_time[i] = (u64) 0;
2009
2010 r->ncpu = n;
2011 }
2012
2013 r->last_time[cpu] = timestamp;
2014 }
2015
2016 /* returns last time this event was seen on the given cpu */
evsel__get_time(struct evsel * evsel,u32 cpu)2017 static u64 evsel__get_time(struct evsel *evsel, u32 cpu)
2018 {
2019 struct evsel_runtime *r = evsel__get_runtime(evsel);
2020
2021 if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
2022 return 0;
2023
2024 return r->last_time[cpu];
2025 }
2026
timehist__evsel_priv_destructor(void * priv)2027 static void timehist__evsel_priv_destructor(void *priv)
2028 {
2029 struct evsel_runtime *r = priv;
2030
2031 if (r) {
2032 free(r->last_time);
2033 free(r);
2034 }
2035 }
2036
2037 static int comm_width = 30;
2038
timehist_get_commstr(struct thread * thread)2039 static char *timehist_get_commstr(struct thread *thread)
2040 {
2041 static char str[32];
2042 const char *comm = thread__comm_str(thread);
2043 pid_t tid = thread__tid(thread);
2044 pid_t pid = thread__pid(thread);
2045 int n;
2046
2047 if (pid == 0)
2048 n = scnprintf(str, sizeof(str), "%s", comm);
2049
2050 else if (tid != pid)
2051 n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid);
2052
2053 else
2054 n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid);
2055
2056 if (n > comm_width)
2057 comm_width = n;
2058
2059 return str;
2060 }
2061
2062 /* prio field format: xxx or xxx->yyy */
2063 #define MAX_PRIO_STR_LEN 8
timehist_get_priostr(struct evsel * evsel,struct thread * thread,struct perf_sample * sample)2064 static char *timehist_get_priostr(struct evsel *evsel,
2065 struct thread *thread,
2066 struct perf_sample *sample)
2067 {
2068 static char prio_str[16];
2069 int prev_prio = (int)evsel__intval(evsel, sample, "prev_prio");
2070 struct thread_runtime *tr = thread__priv(thread);
2071
2072 if (tr->prio != prev_prio && tr->prio != -1)
2073 scnprintf(prio_str, sizeof(prio_str), "%d->%d", tr->prio, prev_prio);
2074 else
2075 scnprintf(prio_str, sizeof(prio_str), "%d", prev_prio);
2076
2077 return prio_str;
2078 }
2079
timehist_header(struct perf_sched * sched)2080 static void timehist_header(struct perf_sched *sched)
2081 {
2082 u32 ncpus = sched->max_cpu.cpu + 1;
2083 u32 i, j;
2084
2085 printf("%15s %6s ", "time", "cpu");
2086
2087 if (sched->show_cpu_visual) {
2088 printf(" ");
2089 for (i = 0, j = 0; i < ncpus; ++i) {
2090 printf("%x", j++);
2091 if (j > 15)
2092 j = 0;
2093 }
2094 printf(" ");
2095 }
2096
2097 printf(" %-*s", comm_width, "task name");
2098
2099 if (sched->show_prio)
2100 printf(" %-*s", MAX_PRIO_STR_LEN, "prio");
2101
2102 printf(" %9s %9s %9s", "wait time", "sch delay", "run time");
2103
2104 if (sched->pre_migrations)
2105 printf(" %9s", "pre-mig time");
2106
2107 if (sched->show_state)
2108 printf(" %s", "state");
2109
2110 printf("\n");
2111
2112 /*
2113 * units row
2114 */
2115 printf("%15s %-6s ", "", "");
2116
2117 if (sched->show_cpu_visual)
2118 printf(" %*s ", ncpus, "");
2119
2120 printf(" %-*s", comm_width, "[tid/pid]");
2121
2122 if (sched->show_prio)
2123 printf(" %-*s", MAX_PRIO_STR_LEN, "");
2124
2125 printf(" %9s %9s %9s", "(msec)", "(msec)", "(msec)");
2126
2127 if (sched->pre_migrations)
2128 printf(" %9s", "(msec)");
2129
2130 printf("\n");
2131
2132 /*
2133 * separator
2134 */
2135 printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line);
2136
2137 if (sched->show_cpu_visual)
2138 printf(" %.*s ", ncpus, graph_dotted_line);
2139
2140 printf(" %.*s", comm_width, graph_dotted_line);
2141
2142 if (sched->show_prio)
2143 printf(" %.*s", MAX_PRIO_STR_LEN, graph_dotted_line);
2144
2145 printf(" %.9s %.9s %.9s", graph_dotted_line, graph_dotted_line, graph_dotted_line);
2146
2147 if (sched->pre_migrations)
2148 printf(" %.9s", graph_dotted_line);
2149
2150 if (sched->show_state)
2151 printf(" %.5s", graph_dotted_line);
2152
2153 printf("\n");
2154 }
2155
timehist_print_sample(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct addr_location * al,struct thread * thread,u64 t,const char state)2156 static void timehist_print_sample(struct perf_sched *sched,
2157 struct evsel *evsel,
2158 struct perf_sample *sample,
2159 struct addr_location *al,
2160 struct thread *thread,
2161 u64 t, const char state)
2162 {
2163 struct thread_runtime *tr = thread__priv(thread);
2164 const char *next_comm = evsel__strval(evsel, sample, "next_comm");
2165 const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
2166 u32 max_cpus = sched->max_cpu.cpu + 1;
2167 char tstr[64];
2168 char nstr[30];
2169 u64 wait_time;
2170
2171 if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
2172 return;
2173
2174 timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
2175 printf("%15s [%04d] ", tstr, sample->cpu);
2176
2177 if (sched->show_cpu_visual) {
2178 u32 i;
2179 char c;
2180
2181 printf(" ");
2182 for (i = 0; i < max_cpus; ++i) {
2183 /* flag idle times with 'i'; others are sched events */
2184 if (i == sample->cpu)
2185 c = (thread__tid(thread) == 0) ? 'i' : 's';
2186 else
2187 c = ' ';
2188 printf("%c", c);
2189 }
2190 printf(" ");
2191 }
2192
2193 if (!thread__comm_set(thread)) {
2194 const char *prev_comm = evsel__strval(evsel, sample, "prev_comm");
2195 thread__set_comm(thread, prev_comm, sample->time);
2196 }
2197
2198 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2199
2200 if (sched->show_prio)
2201 printf(" %-*s ", MAX_PRIO_STR_LEN, timehist_get_priostr(evsel, thread, sample));
2202
2203 wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt;
2204 print_sched_time(wait_time, 6);
2205
2206 print_sched_time(tr->dt_delay, 6);
2207 print_sched_time(tr->dt_run, 6);
2208 if (sched->pre_migrations)
2209 print_sched_time(tr->dt_pre_mig, 6);
2210
2211 if (sched->show_state)
2212 printf(" %5c ", thread__tid(thread) == 0 ? 'I' : state);
2213
2214 if (sched->show_next) {
2215 snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid);
2216 printf(" %-*s", comm_width, nstr);
2217 }
2218
2219 if (sched->show_wakeups && !sched->show_next)
2220 printf(" %-*s", comm_width, "");
2221
2222 if (thread__tid(thread) == 0)
2223 goto out;
2224
2225 if (sched->show_callchain)
2226 printf(" ");
2227
2228 sample__fprintf_sym(sample, al, 0,
2229 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
2230 EVSEL__PRINT_CALLCHAIN_ARROW |
2231 EVSEL__PRINT_SKIP_IGNORED,
2232 get_tls_callchain_cursor(), symbol_conf.bt_stop_list, stdout);
2233
2234 out:
2235 printf("\n");
2236 }
2237
2238 /*
2239 * Explanation of delta-time stats:
2240 *
2241 * t = time of current schedule out event
2242 * tprev = time of previous sched out event
2243 * also time of schedule-in event for current task
2244 * last_time = time of last sched change event for current task
2245 * (i.e, time process was last scheduled out)
2246 * ready_to_run = time of wakeup for current task
2247 * migrated = time of task migration to another CPU
2248 *
2249 * -----|-------------|-------------|-------------|-------------|-----
2250 * last ready migrated tprev t
2251 * time to run
2252 *
2253 * |---------------- dt_wait ----------------|
2254 * |--------- dt_delay ---------|-- dt_run --|
2255 * |- dt_pre_mig -|
2256 *
2257 * dt_run = run time of current task
2258 * dt_wait = time between last schedule out event for task and tprev
2259 * represents time spent off the cpu
2260 * dt_delay = time between wakeup and schedule-in of task
2261 * dt_pre_mig = time between wakeup and migration to another CPU
2262 */
2263
timehist_update_runtime_stats(struct thread_runtime * r,u64 t,u64 tprev)2264 static void timehist_update_runtime_stats(struct thread_runtime *r,
2265 u64 t, u64 tprev)
2266 {
2267 r->dt_delay = 0;
2268 r->dt_sleep = 0;
2269 r->dt_iowait = 0;
2270 r->dt_preempt = 0;
2271 r->dt_run = 0;
2272 r->dt_pre_mig = 0;
2273
2274 if (tprev) {
2275 r->dt_run = t - tprev;
2276 if (r->ready_to_run) {
2277 if (r->ready_to_run > tprev)
2278 pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
2279 else
2280 r->dt_delay = tprev - r->ready_to_run;
2281
2282 if ((r->migrated > r->ready_to_run) && (r->migrated < tprev))
2283 r->dt_pre_mig = r->migrated - r->ready_to_run;
2284 }
2285
2286 if (r->last_time > tprev)
2287 pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
2288 else if (r->last_time) {
2289 u64 dt_wait = tprev - r->last_time;
2290
2291 if (r->last_state == 'R')
2292 r->dt_preempt = dt_wait;
2293 else if (r->last_state == 'D')
2294 r->dt_iowait = dt_wait;
2295 else
2296 r->dt_sleep = dt_wait;
2297 }
2298 }
2299
2300 update_stats(&r->run_stats, r->dt_run);
2301
2302 r->total_run_time += r->dt_run;
2303 r->total_delay_time += r->dt_delay;
2304 r->total_sleep_time += r->dt_sleep;
2305 r->total_iowait_time += r->dt_iowait;
2306 r->total_preempt_time += r->dt_preempt;
2307 r->total_pre_mig_time += r->dt_pre_mig;
2308 }
2309
is_idle_sample(struct perf_sample * sample,struct evsel * evsel)2310 static bool is_idle_sample(struct perf_sample *sample,
2311 struct evsel *evsel)
2312 {
2313 /* pid 0 == swapper == idle task */
2314 if (evsel__name_is(evsel, "sched:sched_switch"))
2315 return evsel__intval(evsel, sample, "prev_pid") == 0;
2316
2317 return sample->pid == 0;
2318 }
2319
save_task_callchain(struct perf_sched * sched,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)2320 static void save_task_callchain(struct perf_sched *sched,
2321 struct perf_sample *sample,
2322 struct evsel *evsel,
2323 struct machine *machine)
2324 {
2325 struct callchain_cursor *cursor;
2326 struct thread *thread;
2327
2328 /* want main thread for process - has maps */
2329 thread = machine__findnew_thread(machine, sample->pid, sample->pid);
2330 if (thread == NULL) {
2331 pr_debug("Failed to get thread for pid %d.\n", sample->pid);
2332 return;
2333 }
2334
2335 if (!sched->show_callchain || sample->callchain == NULL) {
2336 thread__put(thread);
2337 return;
2338 }
2339
2340 cursor = get_tls_callchain_cursor();
2341
2342 if (thread__resolve_callchain(thread, cursor, evsel, sample,
2343 NULL, NULL, sched->max_stack + 2) != 0) {
2344 if (verbose > 0)
2345 pr_err("Failed to resolve callchain. Skipping\n");
2346
2347 thread__put(thread);
2348 return;
2349 }
2350
2351 callchain_cursor_commit(cursor);
2352 thread__put(thread);
2353
2354 while (true) {
2355 struct callchain_cursor_node *node;
2356 struct symbol *sym;
2357
2358 node = callchain_cursor_current(cursor);
2359 if (node == NULL)
2360 break;
2361
2362 sym = node->ms.sym;
2363 if (sym) {
2364 if (!strcmp(sym->name, "schedule") ||
2365 !strcmp(sym->name, "__schedule") ||
2366 !strcmp(sym->name, "preempt_schedule"))
2367 sym->ignore = 1;
2368 }
2369
2370 callchain_cursor_advance(cursor);
2371 }
2372 }
2373
init_idle_thread(struct thread * thread)2374 static int init_idle_thread(struct thread *thread)
2375 {
2376 struct idle_thread_runtime *itr;
2377
2378 thread__set_comm(thread, idle_comm, 0);
2379
2380 itr = zalloc(sizeof(*itr));
2381 if (itr == NULL)
2382 return -ENOMEM;
2383
2384 init_prio(&itr->tr);
2385 init_stats(&itr->tr.run_stats);
2386 callchain_init(&itr->callchain);
2387 callchain_cursor_reset(&itr->cursor);
2388 thread__set_priv(thread, itr);
2389
2390 return 0;
2391 }
2392
2393 /*
2394 * Track idle stats per cpu by maintaining a local thread
2395 * struct for the idle task on each cpu.
2396 */
init_idle_threads(int ncpu)2397 static int init_idle_threads(int ncpu)
2398 {
2399 int i, ret;
2400
2401 idle_threads = zalloc(ncpu * sizeof(struct thread *));
2402 if (!idle_threads)
2403 return -ENOMEM;
2404
2405 idle_max_cpu = ncpu;
2406
2407 /* allocate the actual thread struct if needed */
2408 for (i = 0; i < ncpu; ++i) {
2409 idle_threads[i] = thread__new(0, 0);
2410 if (idle_threads[i] == NULL)
2411 return -ENOMEM;
2412
2413 ret = init_idle_thread(idle_threads[i]);
2414 if (ret < 0)
2415 return ret;
2416 }
2417
2418 return 0;
2419 }
2420
free_idle_threads(void)2421 static void free_idle_threads(void)
2422 {
2423 int i;
2424
2425 if (idle_threads == NULL)
2426 return;
2427
2428 for (i = 0; i < idle_max_cpu; ++i) {
2429 struct thread *idle = idle_threads[i];
2430
2431 if (idle) {
2432 struct idle_thread_runtime *itr;
2433
2434 itr = thread__priv(idle);
2435 if (itr)
2436 thread__put(itr->last_thread);
2437
2438 thread__delete(idle);
2439 }
2440 }
2441
2442 free(idle_threads);
2443 }
2444
get_idle_thread(int cpu)2445 static struct thread *get_idle_thread(int cpu)
2446 {
2447 /*
2448 * expand/allocate array of pointers to local thread
2449 * structs if needed
2450 */
2451 if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
2452 int i, j = __roundup_pow_of_two(cpu+1);
2453 void *p;
2454
2455 p = realloc(idle_threads, j * sizeof(struct thread *));
2456 if (!p)
2457 return NULL;
2458
2459 idle_threads = (struct thread **) p;
2460 for (i = idle_max_cpu; i < j; ++i)
2461 idle_threads[i] = NULL;
2462
2463 idle_max_cpu = j;
2464 }
2465
2466 /* allocate a new thread struct if needed */
2467 if (idle_threads[cpu] == NULL) {
2468 idle_threads[cpu] = thread__new(0, 0);
2469
2470 if (idle_threads[cpu]) {
2471 if (init_idle_thread(idle_threads[cpu]) < 0)
2472 return NULL;
2473 }
2474 }
2475
2476 return thread__get(idle_threads[cpu]);
2477 }
2478
save_idle_callchain(struct perf_sched * sched,struct idle_thread_runtime * itr,struct perf_sample * sample)2479 static void save_idle_callchain(struct perf_sched *sched,
2480 struct idle_thread_runtime *itr,
2481 struct perf_sample *sample)
2482 {
2483 struct callchain_cursor *cursor;
2484
2485 if (!sched->show_callchain || sample->callchain == NULL)
2486 return;
2487
2488 cursor = get_tls_callchain_cursor();
2489 if (cursor == NULL)
2490 return;
2491
2492 callchain_cursor__copy(&itr->cursor, cursor);
2493 }
2494
timehist_get_thread(struct perf_sched * sched,struct perf_sample * sample,struct machine * machine,struct evsel * evsel)2495 static struct thread *timehist_get_thread(struct perf_sched *sched,
2496 struct perf_sample *sample,
2497 struct machine *machine,
2498 struct evsel *evsel)
2499 {
2500 struct thread *thread;
2501
2502 if (is_idle_sample(sample, evsel)) {
2503 thread = get_idle_thread(sample->cpu);
2504 if (thread == NULL)
2505 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2506
2507 } else {
2508 /* there were samples with tid 0 but non-zero pid */
2509 thread = machine__findnew_thread(machine, sample->pid,
2510 sample->tid ?: sample->pid);
2511 if (thread == NULL) {
2512 pr_debug("Failed to get thread for tid %d. skipping sample.\n",
2513 sample->tid);
2514 }
2515
2516 save_task_callchain(sched, sample, evsel, machine);
2517 if (sched->idle_hist) {
2518 struct thread *idle;
2519 struct idle_thread_runtime *itr;
2520
2521 idle = get_idle_thread(sample->cpu);
2522 if (idle == NULL) {
2523 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2524 return NULL;
2525 }
2526
2527 itr = thread__priv(idle);
2528 if (itr == NULL)
2529 return NULL;
2530
2531 thread__put(itr->last_thread);
2532 itr->last_thread = thread__get(thread);
2533
2534 /* copy task callchain when entering to idle */
2535 if (evsel__intval(evsel, sample, "next_pid") == 0)
2536 save_idle_callchain(sched, itr, sample);
2537 }
2538 }
2539
2540 return thread;
2541 }
2542
timehist_skip_sample(struct perf_sched * sched,struct thread * thread,struct evsel * evsel,struct perf_sample * sample)2543 static bool timehist_skip_sample(struct perf_sched *sched,
2544 struct thread *thread,
2545 struct evsel *evsel,
2546 struct perf_sample *sample)
2547 {
2548 bool rc = false;
2549 int prio = -1;
2550 struct thread_runtime *tr = NULL;
2551
2552 if (thread__is_filtered(thread)) {
2553 rc = true;
2554 sched->skipped_samples++;
2555 }
2556
2557 if (sched->prio_str) {
2558 /*
2559 * Because priority may be changed during task execution,
2560 * first read priority from prev sched_in event for current task.
2561 * If prev sched_in event is not saved, then read priority from
2562 * current task sched_out event.
2563 */
2564 tr = thread__get_runtime(thread);
2565 if (tr && tr->prio != -1)
2566 prio = tr->prio;
2567 else if (evsel__name_is(evsel, "sched:sched_switch"))
2568 prio = evsel__intval(evsel, sample, "prev_prio");
2569
2570 if (prio != -1 && !test_bit(prio, sched->prio_bitmap)) {
2571 rc = true;
2572 sched->skipped_samples++;
2573 }
2574 }
2575
2576 if (sched->idle_hist) {
2577 if (!evsel__name_is(evsel, "sched:sched_switch"))
2578 rc = true;
2579 else if (evsel__intval(evsel, sample, "prev_pid") != 0 &&
2580 evsel__intval(evsel, sample, "next_pid") != 0)
2581 rc = true;
2582 }
2583
2584 return rc;
2585 }
2586
timehist_print_wakeup_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine,struct thread * awakened)2587 static void timehist_print_wakeup_event(struct perf_sched *sched,
2588 struct evsel *evsel,
2589 struct perf_sample *sample,
2590 struct machine *machine,
2591 struct thread *awakened)
2592 {
2593 struct thread *thread;
2594 char tstr[64];
2595
2596 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2597 if (thread == NULL)
2598 return;
2599
2600 /* show wakeup unless both awakee and awaker are filtered */
2601 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2602 timehist_skip_sample(sched, awakened, evsel, sample)) {
2603 thread__put(thread);
2604 return;
2605 }
2606
2607 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2608 printf("%15s [%04d] ", tstr, sample->cpu);
2609 if (sched->show_cpu_visual)
2610 printf(" %*s ", sched->max_cpu.cpu + 1, "");
2611
2612 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2613
2614 /* dt spacer */
2615 printf(" %9s %9s %9s ", "", "", "");
2616
2617 printf("awakened: %s", timehist_get_commstr(awakened));
2618
2619 printf("\n");
2620
2621 thread__put(thread);
2622 }
2623
timehist_sched_wakeup_ignore(const struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct evsel * evsel __maybe_unused,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)2624 static int timehist_sched_wakeup_ignore(const struct perf_tool *tool __maybe_unused,
2625 union perf_event *event __maybe_unused,
2626 struct evsel *evsel __maybe_unused,
2627 struct perf_sample *sample __maybe_unused,
2628 struct machine *machine __maybe_unused)
2629 {
2630 return 0;
2631 }
2632
timehist_sched_wakeup_event(const struct perf_tool * tool,union perf_event * event __maybe_unused,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)2633 static int timehist_sched_wakeup_event(const struct perf_tool *tool,
2634 union perf_event *event __maybe_unused,
2635 struct evsel *evsel,
2636 struct perf_sample *sample,
2637 struct machine *machine)
2638 {
2639 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2640 struct thread *thread;
2641 struct thread_runtime *tr = NULL;
2642 /* want pid of awakened task not pid in sample */
2643 const u32 pid = evsel__intval(evsel, sample, "pid");
2644
2645 thread = machine__findnew_thread(machine, 0, pid);
2646 if (thread == NULL)
2647 return -1;
2648
2649 tr = thread__get_runtime(thread);
2650 if (tr == NULL) {
2651 thread__put(thread);
2652 return -1;
2653 }
2654
2655 if (tr->ready_to_run == 0)
2656 tr->ready_to_run = sample->time;
2657
2658 /* show wakeups if requested */
2659 if (sched->show_wakeups &&
2660 !perf_time__skip_sample(&sched->ptime, sample->time))
2661 timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
2662
2663 thread__put(thread);
2664 return 0;
2665 }
2666
timehist_print_migration_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine,struct thread * migrated)2667 static void timehist_print_migration_event(struct perf_sched *sched,
2668 struct evsel *evsel,
2669 struct perf_sample *sample,
2670 struct machine *machine,
2671 struct thread *migrated)
2672 {
2673 struct thread *thread;
2674 char tstr[64];
2675 u32 max_cpus;
2676 u32 ocpu, dcpu;
2677
2678 if (sched->summary_only)
2679 return;
2680
2681 max_cpus = sched->max_cpu.cpu + 1;
2682 ocpu = evsel__intval(evsel, sample, "orig_cpu");
2683 dcpu = evsel__intval(evsel, sample, "dest_cpu");
2684
2685 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2686 if (thread == NULL)
2687 return;
2688
2689 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2690 timehist_skip_sample(sched, migrated, evsel, sample)) {
2691 thread__put(thread);
2692 return;
2693 }
2694
2695 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2696 printf("%15s [%04d] ", tstr, sample->cpu);
2697
2698 if (sched->show_cpu_visual) {
2699 u32 i;
2700 char c;
2701
2702 printf(" ");
2703 for (i = 0; i < max_cpus; ++i) {
2704 c = (i == sample->cpu) ? 'm' : ' ';
2705 printf("%c", c);
2706 }
2707 printf(" ");
2708 }
2709
2710 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2711
2712 /* dt spacer */
2713 printf(" %9s %9s %9s ", "", "", "");
2714
2715 printf("migrated: %s", timehist_get_commstr(migrated));
2716 printf(" cpu %d => %d", ocpu, dcpu);
2717
2718 printf("\n");
2719 thread__put(thread);
2720 }
2721
timehist_migrate_task_event(const struct perf_tool * tool,union perf_event * event __maybe_unused,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)2722 static int timehist_migrate_task_event(const struct perf_tool *tool,
2723 union perf_event *event __maybe_unused,
2724 struct evsel *evsel,
2725 struct perf_sample *sample,
2726 struct machine *machine)
2727 {
2728 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2729 struct thread *thread;
2730 struct thread_runtime *tr = NULL;
2731 /* want pid of migrated task not pid in sample */
2732 const u32 pid = evsel__intval(evsel, sample, "pid");
2733
2734 thread = machine__findnew_thread(machine, 0, pid);
2735 if (thread == NULL)
2736 return -1;
2737
2738 tr = thread__get_runtime(thread);
2739 if (tr == NULL) {
2740 thread__put(thread);
2741 return -1;
2742 }
2743
2744 tr->migrations++;
2745 tr->migrated = sample->time;
2746
2747 /* show migrations if requested */
2748 if (sched->show_migrations) {
2749 timehist_print_migration_event(sched, evsel, sample,
2750 machine, thread);
2751 }
2752 thread__put(thread);
2753
2754 return 0;
2755 }
2756
timehist_update_task_prio(struct evsel * evsel,struct perf_sample * sample,struct machine * machine)2757 static void timehist_update_task_prio(struct evsel *evsel,
2758 struct perf_sample *sample,
2759 struct machine *machine)
2760 {
2761 struct thread *thread;
2762 struct thread_runtime *tr = NULL;
2763 const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
2764 const u32 next_prio = evsel__intval(evsel, sample, "next_prio");
2765
2766 if (next_pid == 0)
2767 thread = get_idle_thread(sample->cpu);
2768 else
2769 thread = machine__findnew_thread(machine, -1, next_pid);
2770
2771 if (thread == NULL)
2772 return;
2773
2774 tr = thread__get_runtime(thread);
2775 if (tr != NULL)
2776 tr->prio = next_prio;
2777
2778 thread__put(thread);
2779 }
2780
timehist_sched_change_event(const struct perf_tool * tool,union perf_event * event,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)2781 static int timehist_sched_change_event(const struct perf_tool *tool,
2782 union perf_event *event,
2783 struct evsel *evsel,
2784 struct perf_sample *sample,
2785 struct machine *machine)
2786 {
2787 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2788 struct perf_time_interval *ptime = &sched->ptime;
2789 struct addr_location al;
2790 struct thread *thread = NULL;
2791 struct thread_runtime *tr = NULL;
2792 u64 tprev, t = sample->time;
2793 int rc = 0;
2794 const char state = evsel__taskstate(evsel, sample, "prev_state");
2795
2796 addr_location__init(&al);
2797 if (machine__resolve(machine, &al, sample) < 0) {
2798 pr_err("problem processing %d event. skipping it\n",
2799 event->header.type);
2800 rc = -1;
2801 goto out;
2802 }
2803
2804 if (sched->show_prio || sched->prio_str)
2805 timehist_update_task_prio(evsel, sample, machine);
2806
2807 thread = timehist_get_thread(sched, sample, machine, evsel);
2808 if (thread == NULL) {
2809 rc = -1;
2810 goto out;
2811 }
2812
2813 if (timehist_skip_sample(sched, thread, evsel, sample))
2814 goto out;
2815
2816 tr = thread__get_runtime(thread);
2817 if (tr == NULL) {
2818 rc = -1;
2819 goto out;
2820 }
2821
2822 tprev = evsel__get_time(evsel, sample->cpu);
2823
2824 /*
2825 * If start time given:
2826 * - sample time is under window user cares about - skip sample
2827 * - tprev is under window user cares about - reset to start of window
2828 */
2829 if (ptime->start && ptime->start > t)
2830 goto out;
2831
2832 if (tprev && ptime->start > tprev)
2833 tprev = ptime->start;
2834
2835 /*
2836 * If end time given:
2837 * - previous sched event is out of window - we are done
2838 * - sample time is beyond window user cares about - reset it
2839 * to close out stats for time window interest
2840 * - If tprev is 0, that is, sched_in event for current task is
2841 * not recorded, cannot determine whether sched_in event is
2842 * within time window interest - ignore it
2843 */
2844 if (ptime->end) {
2845 if (!tprev || tprev > ptime->end)
2846 goto out;
2847
2848 if (t > ptime->end)
2849 t = ptime->end;
2850 }
2851
2852 if (!sched->idle_hist || thread__tid(thread) == 0) {
2853 if (!cpu_list || test_bit(sample->cpu, cpu_bitmap))
2854 timehist_update_runtime_stats(tr, t, tprev);
2855
2856 if (sched->idle_hist) {
2857 struct idle_thread_runtime *itr = (void *)tr;
2858 struct thread_runtime *last_tr;
2859
2860 if (itr->last_thread == NULL)
2861 goto out;
2862
2863 /* add current idle time as last thread's runtime */
2864 last_tr = thread__get_runtime(itr->last_thread);
2865 if (last_tr == NULL)
2866 goto out;
2867
2868 timehist_update_runtime_stats(last_tr, t, tprev);
2869 /*
2870 * remove delta time of last thread as it's not updated
2871 * and otherwise it will show an invalid value next
2872 * time. we only care total run time and run stat.
2873 */
2874 last_tr->dt_run = 0;
2875 last_tr->dt_delay = 0;
2876 last_tr->dt_sleep = 0;
2877 last_tr->dt_iowait = 0;
2878 last_tr->dt_preempt = 0;
2879
2880 if (itr->cursor.nr)
2881 callchain_append(&itr->callchain, &itr->cursor, t - tprev);
2882
2883 itr->last_thread = NULL;
2884 }
2885
2886 if (!sched->summary_only)
2887 timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
2888 }
2889
2890 out:
2891 if (sched->hist_time.start == 0 && t >= ptime->start)
2892 sched->hist_time.start = t;
2893 if (ptime->end == 0 || t <= ptime->end)
2894 sched->hist_time.end = t;
2895
2896 if (tr) {
2897 /* time of this sched_switch event becomes last time task seen */
2898 tr->last_time = sample->time;
2899
2900 /* last state is used to determine where to account wait time */
2901 tr->last_state = state;
2902
2903 /* sched out event for task so reset ready to run time and migrated time */
2904 if (state == 'R')
2905 tr->ready_to_run = t;
2906 else
2907 tr->ready_to_run = 0;
2908
2909 tr->migrated = 0;
2910 }
2911
2912 evsel__save_time(evsel, sample->time, sample->cpu);
2913
2914 thread__put(thread);
2915 addr_location__exit(&al);
2916 return rc;
2917 }
2918
timehist_sched_switch_event(const struct perf_tool * tool,union perf_event * event,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused)2919 static int timehist_sched_switch_event(const struct perf_tool *tool,
2920 union perf_event *event,
2921 struct evsel *evsel,
2922 struct perf_sample *sample,
2923 struct machine *machine __maybe_unused)
2924 {
2925 return timehist_sched_change_event(tool, event, evsel, sample, machine);
2926 }
2927
process_lost(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_sample * sample,struct machine * machine __maybe_unused)2928 static int process_lost(const struct perf_tool *tool __maybe_unused,
2929 union perf_event *event,
2930 struct perf_sample *sample,
2931 struct machine *machine __maybe_unused)
2932 {
2933 char tstr[64];
2934
2935 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2936 printf("%15s ", tstr);
2937 printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
2938
2939 return 0;
2940 }
2941
2942
print_thread_runtime(struct thread * t,struct thread_runtime * r)2943 static void print_thread_runtime(struct thread *t,
2944 struct thread_runtime *r)
2945 {
2946 double mean = avg_stats(&r->run_stats);
2947 float stddev;
2948
2949 printf("%*s %5d %9" PRIu64 " ",
2950 comm_width, timehist_get_commstr(t), thread__ppid(t),
2951 (u64) r->run_stats.n);
2952
2953 print_sched_time(r->total_run_time, 8);
2954 stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean);
2955 print_sched_time(r->run_stats.min, 6);
2956 printf(" ");
2957 print_sched_time((u64) mean, 6);
2958 printf(" ");
2959 print_sched_time(r->run_stats.max, 6);
2960 printf(" ");
2961 printf("%5.2f", stddev);
2962 printf(" %5" PRIu64, r->migrations);
2963 printf("\n");
2964 }
2965
print_thread_waittime(struct thread * t,struct thread_runtime * r)2966 static void print_thread_waittime(struct thread *t,
2967 struct thread_runtime *r)
2968 {
2969 printf("%*s %5d %9" PRIu64 " ",
2970 comm_width, timehist_get_commstr(t), thread__ppid(t),
2971 (u64) r->run_stats.n);
2972
2973 print_sched_time(r->total_run_time, 8);
2974 print_sched_time(r->total_sleep_time, 6);
2975 printf(" ");
2976 print_sched_time(r->total_iowait_time, 6);
2977 printf(" ");
2978 print_sched_time(r->total_preempt_time, 6);
2979 printf(" ");
2980 print_sched_time(r->total_delay_time, 6);
2981 printf("\n");
2982 }
2983
2984 struct total_run_stats {
2985 struct perf_sched *sched;
2986 u64 sched_count;
2987 u64 task_count;
2988 u64 total_run_time;
2989 };
2990
show_thread_runtime(struct thread * t,void * priv)2991 static int show_thread_runtime(struct thread *t, void *priv)
2992 {
2993 struct total_run_stats *stats = priv;
2994 struct thread_runtime *r;
2995
2996 if (thread__is_filtered(t))
2997 return 0;
2998
2999 r = thread__priv(t);
3000 if (r && r->run_stats.n) {
3001 stats->task_count++;
3002 stats->sched_count += r->run_stats.n;
3003 stats->total_run_time += r->total_run_time;
3004
3005 if (stats->sched->show_state)
3006 print_thread_waittime(t, r);
3007 else
3008 print_thread_runtime(t, r);
3009 }
3010
3011 return 0;
3012 }
3013
callchain__fprintf_folded(FILE * fp,struct callchain_node * node)3014 static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
3015 {
3016 const char *sep = " <- ";
3017 struct callchain_list *chain;
3018 size_t ret = 0;
3019 char bf[1024];
3020 bool first;
3021
3022 if (node == NULL)
3023 return 0;
3024
3025 ret = callchain__fprintf_folded(fp, node->parent);
3026 first = (ret == 0);
3027
3028 list_for_each_entry(chain, &node->val, list) {
3029 if (chain->ip >= PERF_CONTEXT_MAX)
3030 continue;
3031 if (chain->ms.sym && chain->ms.sym->ignore)
3032 continue;
3033 ret += fprintf(fp, "%s%s", first ? "" : sep,
3034 callchain_list__sym_name(chain, bf, sizeof(bf),
3035 false));
3036 first = false;
3037 }
3038
3039 return ret;
3040 }
3041
timehist_print_idlehist_callchain(struct rb_root_cached * root)3042 static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
3043 {
3044 size_t ret = 0;
3045 FILE *fp = stdout;
3046 struct callchain_node *chain;
3047 struct rb_node *rb_node = rb_first_cached(root);
3048
3049 printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains");
3050 printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line,
3051 graph_dotted_line);
3052
3053 while (rb_node) {
3054 chain = rb_entry(rb_node, struct callchain_node, rb_node);
3055 rb_node = rb_next(rb_node);
3056
3057 ret += fprintf(fp, " ");
3058 print_sched_time(chain->hit, 12);
3059 ret += 16; /* print_sched_time returns 2nd arg + 4 */
3060 ret += fprintf(fp, " %8d ", chain->count);
3061 ret += callchain__fprintf_folded(fp, chain);
3062 ret += fprintf(fp, "\n");
3063 }
3064
3065 return ret;
3066 }
3067
timehist_print_summary(struct perf_sched * sched,struct perf_session * session)3068 static void timehist_print_summary(struct perf_sched *sched,
3069 struct perf_session *session)
3070 {
3071 struct machine *m = &session->machines.host;
3072 struct total_run_stats totals;
3073 u64 task_count;
3074 struct thread *t;
3075 struct thread_runtime *r;
3076 int i;
3077 u64 hist_time = sched->hist_time.end - sched->hist_time.start;
3078
3079 memset(&totals, 0, sizeof(totals));
3080 totals.sched = sched;
3081
3082 if (sched->idle_hist) {
3083 printf("\nIdle-time summary\n");
3084 printf("%*s parent sched-out ", comm_width, "comm");
3085 printf(" idle-time min-idle avg-idle max-idle stddev migrations\n");
3086 } else if (sched->show_state) {
3087 printf("\nWait-time summary\n");
3088 printf("%*s parent sched-in ", comm_width, "comm");
3089 printf(" run-time sleep iowait preempt delay\n");
3090 } else {
3091 printf("\nRuntime summary\n");
3092 printf("%*s parent sched-in ", comm_width, "comm");
3093 printf(" run-time min-run avg-run max-run stddev migrations\n");
3094 }
3095 printf("%*s (count) ", comm_width, "");
3096 printf(" (msec) (msec) (msec) (msec) %s\n",
3097 sched->show_state ? "(msec)" : "%");
3098 printf("%.117s\n", graph_dotted_line);
3099
3100 machine__for_each_thread(m, show_thread_runtime, &totals);
3101 task_count = totals.task_count;
3102 if (!task_count)
3103 printf("<no still running tasks>\n");
3104
3105 /* CPU idle stats not tracked when samples were skipped */
3106 if (sched->skipped_samples && !sched->idle_hist)
3107 return;
3108
3109 printf("\nIdle stats:\n");
3110 for (i = 0; i < idle_max_cpu; ++i) {
3111 if (cpu_list && !test_bit(i, cpu_bitmap))
3112 continue;
3113
3114 t = idle_threads[i];
3115 if (!t)
3116 continue;
3117
3118 r = thread__priv(t);
3119 if (r && r->run_stats.n) {
3120 totals.sched_count += r->run_stats.n;
3121 printf(" CPU %2d idle for ", i);
3122 print_sched_time(r->total_run_time, 6);
3123 printf(" msec (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time);
3124 } else
3125 printf(" CPU %2d idle entire time window\n", i);
3126 }
3127
3128 if (sched->idle_hist && sched->show_callchain) {
3129 callchain_param.mode = CHAIN_FOLDED;
3130 callchain_param.value = CCVAL_PERIOD;
3131
3132 callchain_register_param(&callchain_param);
3133
3134 printf("\nIdle stats by callchain:\n");
3135 for (i = 0; i < idle_max_cpu; ++i) {
3136 struct idle_thread_runtime *itr;
3137
3138 t = idle_threads[i];
3139 if (!t)
3140 continue;
3141
3142 itr = thread__priv(t);
3143 if (itr == NULL)
3144 continue;
3145
3146 callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
3147 0, &callchain_param);
3148
3149 printf(" CPU %2d:", i);
3150 print_sched_time(itr->tr.total_run_time, 6);
3151 printf(" msec\n");
3152 timehist_print_idlehist_callchain(&itr->sorted_root);
3153 printf("\n");
3154 }
3155 }
3156
3157 printf("\n"
3158 " Total number of unique tasks: %" PRIu64 "\n"
3159 "Total number of context switches: %" PRIu64 "\n",
3160 totals.task_count, totals.sched_count);
3161
3162 printf(" Total run time (msec): ");
3163 print_sched_time(totals.total_run_time, 2);
3164 printf("\n");
3165
3166 printf(" Total scheduling time (msec): ");
3167 print_sched_time(hist_time, 2);
3168 printf(" (x %d)\n", sched->max_cpu.cpu);
3169 }
3170
3171 typedef int (*sched_handler)(const struct perf_tool *tool,
3172 union perf_event *event,
3173 struct evsel *evsel,
3174 struct perf_sample *sample,
3175 struct machine *machine);
3176
perf_timehist__process_sample(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)3177 static int perf_timehist__process_sample(const struct perf_tool *tool,
3178 union perf_event *event,
3179 struct perf_sample *sample,
3180 struct evsel *evsel,
3181 struct machine *machine)
3182 {
3183 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
3184 int err = 0;
3185 struct perf_cpu this_cpu = {
3186 .cpu = sample->cpu,
3187 };
3188
3189 if (this_cpu.cpu > sched->max_cpu.cpu)
3190 sched->max_cpu = this_cpu;
3191
3192 if (evsel->handler != NULL) {
3193 sched_handler f = evsel->handler;
3194
3195 err = f(tool, event, evsel, sample, machine);
3196 }
3197
3198 return err;
3199 }
3200
timehist_check_attr(struct perf_sched * sched,struct evlist * evlist)3201 static int timehist_check_attr(struct perf_sched *sched,
3202 struct evlist *evlist)
3203 {
3204 struct evsel *evsel;
3205 struct evsel_runtime *er;
3206
3207 list_for_each_entry(evsel, &evlist->core.entries, core.node) {
3208 er = evsel__get_runtime(evsel);
3209 if (er == NULL) {
3210 pr_err("Failed to allocate memory for evsel runtime data\n");
3211 return -1;
3212 }
3213
3214 /* only need to save callchain related to sched_switch event */
3215 if (sched->show_callchain &&
3216 evsel__name_is(evsel, "sched:sched_switch") &&
3217 !evsel__has_callchain(evsel)) {
3218 pr_info("Samples of sched_switch event do not have callchains.\n");
3219 sched->show_callchain = 0;
3220 symbol_conf.use_callchain = 0;
3221 }
3222 }
3223
3224 return 0;
3225 }
3226
timehist_parse_prio_str(struct perf_sched * sched)3227 static int timehist_parse_prio_str(struct perf_sched *sched)
3228 {
3229 char *p;
3230 unsigned long start_prio, end_prio;
3231 const char *str = sched->prio_str;
3232
3233 if (!str)
3234 return 0;
3235
3236 while (isdigit(*str)) {
3237 p = NULL;
3238 start_prio = strtoul(str, &p, 0);
3239 if (start_prio >= MAX_PRIO || (*p != '\0' && *p != ',' && *p != '-'))
3240 return -1;
3241
3242 if (*p == '-') {
3243 str = ++p;
3244 p = NULL;
3245 end_prio = strtoul(str, &p, 0);
3246
3247 if (end_prio >= MAX_PRIO || (*p != '\0' && *p != ','))
3248 return -1;
3249
3250 if (end_prio < start_prio)
3251 return -1;
3252 } else {
3253 end_prio = start_prio;
3254 }
3255
3256 for (; start_prio <= end_prio; start_prio++)
3257 __set_bit(start_prio, sched->prio_bitmap);
3258
3259 if (*p)
3260 ++p;
3261
3262 str = p;
3263 }
3264
3265 return 0;
3266 }
3267
perf_sched__timehist(struct perf_sched * sched)3268 static int perf_sched__timehist(struct perf_sched *sched)
3269 {
3270 struct evsel_str_handler handlers[] = {
3271 { "sched:sched_switch", timehist_sched_switch_event, },
3272 { "sched:sched_wakeup", timehist_sched_wakeup_event, },
3273 { "sched:sched_waking", timehist_sched_wakeup_event, },
3274 { "sched:sched_wakeup_new", timehist_sched_wakeup_event, },
3275 };
3276 const struct evsel_str_handler migrate_handlers[] = {
3277 { "sched:sched_migrate_task", timehist_migrate_task_event, },
3278 };
3279 struct perf_data data = {
3280 .path = input_name,
3281 .mode = PERF_DATA_MODE_READ,
3282 .force = sched->force,
3283 };
3284
3285 struct perf_session *session;
3286 struct perf_env *env;
3287 struct evlist *evlist;
3288 int err = -1;
3289
3290 /*
3291 * event handlers for timehist option
3292 */
3293 sched->tool.sample = perf_timehist__process_sample;
3294 sched->tool.mmap = perf_event__process_mmap;
3295 sched->tool.comm = perf_event__process_comm;
3296 sched->tool.exit = perf_event__process_exit;
3297 sched->tool.fork = perf_event__process_fork;
3298 sched->tool.lost = process_lost;
3299 sched->tool.attr = perf_event__process_attr;
3300 sched->tool.tracing_data = perf_event__process_tracing_data;
3301 sched->tool.build_id = perf_event__process_build_id;
3302
3303 sched->tool.ordering_requires_timestamps = true;
3304
3305 symbol_conf.use_callchain = sched->show_callchain;
3306
3307 session = perf_session__new(&data, &sched->tool);
3308 if (IS_ERR(session))
3309 return PTR_ERR(session);
3310
3311 env = perf_session__env(session);
3312 if (cpu_list) {
3313 err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
3314 if (err < 0)
3315 goto out;
3316 }
3317
3318 evlist = session->evlist;
3319
3320 symbol__init(env);
3321
3322 if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
3323 pr_err("Invalid time string\n");
3324 err = -EINVAL;
3325 goto out;
3326 }
3327
3328 if (timehist_check_attr(sched, evlist) != 0)
3329 goto out;
3330
3331 if (timehist_parse_prio_str(sched) != 0) {
3332 pr_err("Invalid prio string\n");
3333 goto out;
3334 }
3335
3336 setup_pager();
3337
3338 evsel__set_priv_destructor(timehist__evsel_priv_destructor);
3339
3340 /* prefer sched_waking if it is captured */
3341 if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
3342 handlers[1].handler = timehist_sched_wakeup_ignore;
3343
3344 /* setup per-evsel handlers */
3345 if (perf_session__set_tracepoints_handlers(session, handlers))
3346 goto out;
3347
3348 /* sched_switch event at a minimum needs to exist */
3349 if (!evlist__find_tracepoint_by_name(session->evlist, "sched:sched_switch")) {
3350 pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
3351 goto out;
3352 }
3353
3354 if ((sched->show_migrations || sched->pre_migrations) &&
3355 perf_session__set_tracepoints_handlers(session, migrate_handlers))
3356 goto out;
3357
3358 /* pre-allocate struct for per-CPU idle stats */
3359 sched->max_cpu.cpu = env->nr_cpus_online;
3360 if (sched->max_cpu.cpu == 0)
3361 sched->max_cpu.cpu = 4;
3362 if (init_idle_threads(sched->max_cpu.cpu))
3363 goto out;
3364
3365 /* summary_only implies summary option, but don't overwrite summary if set */
3366 if (sched->summary_only)
3367 sched->summary = sched->summary_only;
3368
3369 if (!sched->summary_only)
3370 timehist_header(sched);
3371
3372 err = perf_session__process_events(session);
3373 if (err) {
3374 pr_err("Failed to process events, error %d", err);
3375 goto out;
3376 }
3377
3378 sched->nr_events = evlist->stats.nr_events[0];
3379 sched->nr_lost_events = evlist->stats.total_lost;
3380 sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
3381
3382 if (sched->summary)
3383 timehist_print_summary(sched, session);
3384
3385 out:
3386 free_idle_threads();
3387 perf_session__delete(session);
3388
3389 return err;
3390 }
3391
3392
print_bad_events(struct perf_sched * sched)3393 static void print_bad_events(struct perf_sched *sched)
3394 {
3395 if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
3396 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
3397 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
3398 sched->nr_unordered_timestamps, sched->nr_timestamps);
3399 }
3400 if (sched->nr_lost_events && sched->nr_events) {
3401 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
3402 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
3403 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
3404 }
3405 if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
3406 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
3407 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
3408 sched->nr_context_switch_bugs, sched->nr_timestamps);
3409 if (sched->nr_lost_events)
3410 printf(" (due to lost events?)");
3411 printf("\n");
3412 }
3413 }
3414
__merge_work_atoms(struct rb_root_cached * root,struct work_atoms * data)3415 static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
3416 {
3417 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
3418 struct work_atoms *this;
3419 const char *comm = thread__comm_str(data->thread), *this_comm;
3420 bool leftmost = true;
3421
3422 while (*new) {
3423 int cmp;
3424
3425 this = container_of(*new, struct work_atoms, node);
3426 parent = *new;
3427
3428 this_comm = thread__comm_str(this->thread);
3429 cmp = strcmp(comm, this_comm);
3430 if (cmp > 0) {
3431 new = &((*new)->rb_left);
3432 } else if (cmp < 0) {
3433 new = &((*new)->rb_right);
3434 leftmost = false;
3435 } else {
3436 this->num_merged++;
3437 this->total_runtime += data->total_runtime;
3438 this->nb_atoms += data->nb_atoms;
3439 this->total_lat += data->total_lat;
3440 list_splice_init(&data->work_list, &this->work_list);
3441 if (this->max_lat < data->max_lat) {
3442 this->max_lat = data->max_lat;
3443 this->max_lat_start = data->max_lat_start;
3444 this->max_lat_end = data->max_lat_end;
3445 }
3446 free_work_atoms(data);
3447 return;
3448 }
3449 }
3450
3451 data->num_merged++;
3452 rb_link_node(&data->node, parent, new);
3453 rb_insert_color_cached(&data->node, root, leftmost);
3454 }
3455
perf_sched__merge_lat(struct perf_sched * sched)3456 static void perf_sched__merge_lat(struct perf_sched *sched)
3457 {
3458 struct work_atoms *data;
3459 struct rb_node *node;
3460
3461 if (sched->skip_merge)
3462 return;
3463
3464 while ((node = rb_first_cached(&sched->atom_root))) {
3465 rb_erase_cached(node, &sched->atom_root);
3466 data = rb_entry(node, struct work_atoms, node);
3467 __merge_work_atoms(&sched->merged_atom_root, data);
3468 }
3469 }
3470
setup_cpus_switch_event(struct perf_sched * sched)3471 static int setup_cpus_switch_event(struct perf_sched *sched)
3472 {
3473 unsigned int i;
3474
3475 sched->cpu_last_switched = calloc(MAX_CPUS, sizeof(*(sched->cpu_last_switched)));
3476 if (!sched->cpu_last_switched)
3477 return -1;
3478
3479 sched->curr_pid = malloc(MAX_CPUS * sizeof(*(sched->curr_pid)));
3480 if (!sched->curr_pid) {
3481 zfree(&sched->cpu_last_switched);
3482 return -1;
3483 }
3484
3485 for (i = 0; i < MAX_CPUS; i++)
3486 sched->curr_pid[i] = -1;
3487
3488 return 0;
3489 }
3490
free_cpus_switch_event(struct perf_sched * sched)3491 static void free_cpus_switch_event(struct perf_sched *sched)
3492 {
3493 zfree(&sched->curr_pid);
3494 zfree(&sched->cpu_last_switched);
3495 }
3496
perf_sched__lat(struct perf_sched * sched)3497 static int perf_sched__lat(struct perf_sched *sched)
3498 {
3499 int rc = -1;
3500 struct rb_node *next;
3501
3502 setup_pager();
3503
3504 if (setup_cpus_switch_event(sched))
3505 return rc;
3506
3507 if (perf_sched__read_events(sched))
3508 goto out_free_cpus_switch_event;
3509
3510 perf_sched__merge_lat(sched);
3511 perf_sched__sort_lat(sched);
3512
3513 printf("\n -------------------------------------------------------------------------------------------------------------------------------------------\n");
3514 printf(" Task | Runtime ms | Count | Avg delay ms | Max delay ms | Max delay start | Max delay end |\n");
3515 printf(" -------------------------------------------------------------------------------------------------------------------------------------------\n");
3516
3517 next = rb_first_cached(&sched->sorted_atom_root);
3518
3519 while (next) {
3520 struct work_atoms *work_list;
3521
3522 work_list = rb_entry(next, struct work_atoms, node);
3523 output_lat_thread(sched, work_list);
3524 next = rb_next(next);
3525 }
3526
3527 printf(" -----------------------------------------------------------------------------------------------------------------\n");
3528 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
3529 (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
3530
3531 printf(" ---------------------------------------------------\n");
3532
3533 print_bad_events(sched);
3534 printf("\n");
3535
3536 rc = 0;
3537
3538 while ((next = rb_first_cached(&sched->sorted_atom_root))) {
3539 struct work_atoms *data;
3540
3541 data = rb_entry(next, struct work_atoms, node);
3542 rb_erase_cached(next, &sched->sorted_atom_root);
3543 free_work_atoms(data);
3544 }
3545 out_free_cpus_switch_event:
3546 free_cpus_switch_event(sched);
3547 return rc;
3548 }
3549
setup_map_cpus(struct perf_sched * sched)3550 static int setup_map_cpus(struct perf_sched *sched)
3551 {
3552 sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF);
3553
3554 if (sched->map.comp) {
3555 sched->map.comp_cpus = zalloc(sched->max_cpu.cpu * sizeof(int));
3556 if (!sched->map.comp_cpus)
3557 return -1;
3558 }
3559
3560 if (sched->map.cpus_str) {
3561 sched->map.cpus = perf_cpu_map__new(sched->map.cpus_str);
3562 if (!sched->map.cpus) {
3563 pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
3564 zfree(&sched->map.comp_cpus);
3565 return -1;
3566 }
3567 }
3568
3569 return 0;
3570 }
3571
setup_color_pids(struct perf_sched * sched)3572 static int setup_color_pids(struct perf_sched *sched)
3573 {
3574 struct perf_thread_map *map;
3575
3576 if (!sched->map.color_pids_str)
3577 return 0;
3578
3579 map = thread_map__new_by_tid_str(sched->map.color_pids_str);
3580 if (!map) {
3581 pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
3582 return -1;
3583 }
3584
3585 sched->map.color_pids = map;
3586 return 0;
3587 }
3588
setup_color_cpus(struct perf_sched * sched)3589 static int setup_color_cpus(struct perf_sched *sched)
3590 {
3591 struct perf_cpu_map *map;
3592
3593 if (!sched->map.color_cpus_str)
3594 return 0;
3595
3596 map = perf_cpu_map__new(sched->map.color_cpus_str);
3597 if (!map) {
3598 pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
3599 return -1;
3600 }
3601
3602 sched->map.color_cpus = map;
3603 return 0;
3604 }
3605
perf_sched__map(struct perf_sched * sched)3606 static int perf_sched__map(struct perf_sched *sched)
3607 {
3608 int rc = -1;
3609
3610 sched->curr_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_thread)));
3611 if (!sched->curr_thread)
3612 return rc;
3613
3614 sched->curr_out_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_out_thread)));
3615 if (!sched->curr_out_thread)
3616 goto out_free_curr_thread;
3617
3618 if (setup_cpus_switch_event(sched))
3619 goto out_free_curr_out_thread;
3620
3621 if (setup_map_cpus(sched))
3622 goto out_free_cpus_switch_event;
3623
3624 if (setup_color_pids(sched))
3625 goto out_put_map_cpus;
3626
3627 if (setup_color_cpus(sched))
3628 goto out_put_color_pids;
3629
3630 setup_pager();
3631 if (perf_sched__read_events(sched))
3632 goto out_put_color_cpus;
3633
3634 rc = 0;
3635 print_bad_events(sched);
3636
3637 out_put_color_cpus:
3638 perf_cpu_map__put(sched->map.color_cpus);
3639
3640 out_put_color_pids:
3641 perf_thread_map__put(sched->map.color_pids);
3642
3643 out_put_map_cpus:
3644 zfree(&sched->map.comp_cpus);
3645 perf_cpu_map__put(sched->map.cpus);
3646
3647 out_free_cpus_switch_event:
3648 free_cpus_switch_event(sched);
3649
3650 out_free_curr_out_thread:
3651 for (int i = 0; i < MAX_CPUS; i++)
3652 thread__put(sched->curr_out_thread[i]);
3653 zfree(&sched->curr_out_thread);
3654
3655 out_free_curr_thread:
3656 for (int i = 0; i < MAX_CPUS; i++)
3657 thread__put(sched->curr_thread[i]);
3658 zfree(&sched->curr_thread);
3659 return rc;
3660 }
3661
perf_sched__replay(struct perf_sched * sched)3662 static int perf_sched__replay(struct perf_sched *sched)
3663 {
3664 int ret;
3665 unsigned long i;
3666
3667 mutex_init(&sched->start_work_mutex);
3668 mutex_init(&sched->work_done_wait_mutex);
3669
3670 ret = setup_cpus_switch_event(sched);
3671 if (ret)
3672 goto out_mutex_destroy;
3673
3674 calibrate_run_measurement_overhead(sched);
3675 calibrate_sleep_measurement_overhead(sched);
3676
3677 test_calibrations(sched);
3678
3679 ret = perf_sched__read_events(sched);
3680 if (ret)
3681 goto out_free_cpus_switch_event;
3682
3683 printf("nr_run_events: %ld\n", sched->nr_run_events);
3684 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
3685 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
3686
3687 if (sched->targetless_wakeups)
3688 printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
3689 if (sched->multitarget_wakeups)
3690 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
3691 if (sched->nr_run_events_optimized)
3692 printf("run atoms optimized: %ld\n",
3693 sched->nr_run_events_optimized);
3694
3695 print_task_traces(sched);
3696 add_cross_task_wakeups(sched);
3697
3698 sched->thread_funcs_exit = false;
3699 create_tasks(sched);
3700 printf("------------------------------------------------------------\n");
3701 if (sched->replay_repeat == 0)
3702 sched->replay_repeat = UINT_MAX;
3703
3704 for (i = 0; i < sched->replay_repeat; i++)
3705 run_one_test(sched);
3706
3707 sched->thread_funcs_exit = true;
3708 destroy_tasks(sched);
3709
3710 out_free_cpus_switch_event:
3711 free_cpus_switch_event(sched);
3712
3713 out_mutex_destroy:
3714 mutex_destroy(&sched->start_work_mutex);
3715 mutex_destroy(&sched->work_done_wait_mutex);
3716 return ret;
3717 }
3718
setup_sorting(struct perf_sched * sched,const struct option * options,const char * const usage_msg[])3719 static void setup_sorting(struct perf_sched *sched, const struct option *options,
3720 const char * const usage_msg[])
3721 {
3722 char *tmp, *tok, *str = strdup(sched->sort_order);
3723
3724 for (tok = strtok_r(str, ", ", &tmp);
3725 tok; tok = strtok_r(NULL, ", ", &tmp)) {
3726 if (sort_dimension__add(tok, &sched->sort_list) < 0) {
3727 usage_with_options_msg(usage_msg, options,
3728 "Unknown --sort key: `%s'", tok);
3729 }
3730 }
3731
3732 free(str);
3733
3734 sort_dimension__add("pid", &sched->cmp_pid);
3735 }
3736
schedstat_events_exposed(void)3737 static bool schedstat_events_exposed(void)
3738 {
3739 /*
3740 * Select "sched:sched_stat_wait" event to check
3741 * whether schedstat tracepoints are exposed.
3742 */
3743 return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ?
3744 false : true;
3745 }
3746
__cmd_record(int argc,const char ** argv)3747 static int __cmd_record(int argc, const char **argv)
3748 {
3749 unsigned int rec_argc, i, j;
3750 char **rec_argv;
3751 const char **rec_argv_copy;
3752 const char * const record_args[] = {
3753 "record",
3754 "-a",
3755 "-R",
3756 "-m", "1024",
3757 "-c", "1",
3758 "-e", "sched:sched_switch",
3759 "-e", "sched:sched_stat_runtime",
3760 "-e", "sched:sched_process_fork",
3761 "-e", "sched:sched_wakeup_new",
3762 "-e", "sched:sched_migrate_task",
3763 };
3764
3765 /*
3766 * The tracepoints trace_sched_stat_{wait, sleep, iowait}
3767 * are not exposed to user if CONFIG_SCHEDSTATS is not set,
3768 * to prevent "perf sched record" execution failure, determine
3769 * whether to record schedstat events according to actual situation.
3770 */
3771 const char * const schedstat_args[] = {
3772 "-e", "sched:sched_stat_wait",
3773 "-e", "sched:sched_stat_sleep",
3774 "-e", "sched:sched_stat_iowait",
3775 };
3776 unsigned int schedstat_argc = schedstat_events_exposed() ?
3777 ARRAY_SIZE(schedstat_args) : 0;
3778
3779 struct tep_event *waking_event;
3780 int ret;
3781
3782 /*
3783 * +2 for either "-e", "sched:sched_wakeup" or
3784 * "-e", "sched:sched_waking"
3785 */
3786 rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1;
3787 rec_argv = calloc(rec_argc + 1, sizeof(char *));
3788 if (rec_argv == NULL)
3789 return -ENOMEM;
3790 rec_argv_copy = calloc(rec_argc + 1, sizeof(char *));
3791 if (rec_argv_copy == NULL) {
3792 free(rec_argv);
3793 return -ENOMEM;
3794 }
3795
3796 for (i = 0; i < ARRAY_SIZE(record_args); i++)
3797 rec_argv[i] = strdup(record_args[i]);
3798
3799 rec_argv[i++] = strdup("-e");
3800 waking_event = trace_event__tp_format("sched", "sched_waking");
3801 if (!IS_ERR(waking_event))
3802 rec_argv[i++] = strdup("sched:sched_waking");
3803 else
3804 rec_argv[i++] = strdup("sched:sched_wakeup");
3805
3806 for (j = 0; j < schedstat_argc; j++)
3807 rec_argv[i++] = strdup(schedstat_args[j]);
3808
3809 for (j = 1; j < (unsigned int)argc; j++, i++)
3810 rec_argv[i] = strdup(argv[j]);
3811
3812 BUG_ON(i != rec_argc);
3813
3814 memcpy(rec_argv_copy, rec_argv, sizeof(char *) * rec_argc);
3815 ret = cmd_record(rec_argc, rec_argv_copy);
3816
3817 for (i = 0; i < rec_argc; i++)
3818 free(rec_argv[i]);
3819 free(rec_argv);
3820 free(rec_argv_copy);
3821
3822 return ret;
3823 }
3824
cmd_sched(int argc,const char ** argv)3825 int cmd_sched(int argc, const char **argv)
3826 {
3827 static const char default_sort_order[] = "avg, max, switch, runtime";
3828 struct perf_sched sched = {
3829 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
3830 .sort_list = LIST_HEAD_INIT(sched.sort_list),
3831 .sort_order = default_sort_order,
3832 .replay_repeat = 10,
3833 .profile_cpu = -1,
3834 .next_shortname1 = 'A',
3835 .next_shortname2 = '0',
3836 .skip_merge = 0,
3837 .show_callchain = 1,
3838 .max_stack = 5,
3839 };
3840 const struct option sched_options[] = {
3841 OPT_STRING('i', "input", &input_name, "file",
3842 "input file name"),
3843 OPT_INCR('v', "verbose", &verbose,
3844 "be more verbose (show symbol address, etc)"),
3845 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
3846 "dump raw trace in ASCII"),
3847 OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
3848 OPT_END()
3849 };
3850 const struct option latency_options[] = {
3851 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
3852 "sort by key(s): runtime, switch, avg, max"),
3853 OPT_INTEGER('C', "CPU", &sched.profile_cpu,
3854 "CPU to profile on"),
3855 OPT_BOOLEAN('p', "pids", &sched.skip_merge,
3856 "latency stats per pid instead of per comm"),
3857 OPT_PARENT(sched_options)
3858 };
3859 const struct option replay_options[] = {
3860 OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
3861 "repeat the workload replay N times (0: infinite)"),
3862 OPT_PARENT(sched_options)
3863 };
3864 const struct option map_options[] = {
3865 OPT_BOOLEAN(0, "compact", &sched.map.comp,
3866 "map output in compact mode"),
3867 OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
3868 "highlight given pids in map"),
3869 OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
3870 "highlight given CPUs in map"),
3871 OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
3872 "display given CPUs in map"),
3873 OPT_STRING(0, "task-name", &sched.map.task_name, "task",
3874 "map output only for the given task name(s)."),
3875 OPT_BOOLEAN(0, "fuzzy-name", &sched.map.fuzzy,
3876 "given command name can be partially matched (fuzzy matching)"),
3877 OPT_PARENT(sched_options)
3878 };
3879 const struct option timehist_options[] = {
3880 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
3881 "file", "vmlinux pathname"),
3882 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
3883 "file", "kallsyms pathname"),
3884 OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
3885 "Display call chains if present (default on)"),
3886 OPT_UINTEGER(0, "max-stack", &sched.max_stack,
3887 "Maximum number of functions to display backtrace."),
3888 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
3889 "Look for files with symbols relative to this directory"),
3890 OPT_BOOLEAN('s', "summary", &sched.summary_only,
3891 "Show only syscall summary with statistics"),
3892 OPT_BOOLEAN('S', "with-summary", &sched.summary,
3893 "Show all syscalls and summary with statistics"),
3894 OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
3895 OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"),
3896 OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
3897 OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
3898 OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
3899 OPT_STRING(0, "time", &sched.time_str, "str",
3900 "Time span for analysis (start,stop)"),
3901 OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
3902 OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
3903 "analyze events only for given process id(s)"),
3904 OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
3905 "analyze events only for given thread id(s)"),
3906 OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
3907 OPT_BOOLEAN(0, "show-prio", &sched.show_prio, "Show task priority"),
3908 OPT_STRING(0, "prio", &sched.prio_str, "prio",
3909 "analyze events only for given task priority(ies)"),
3910 OPT_BOOLEAN('P', "pre-migrations", &sched.pre_migrations, "Show pre-migration wait time"),
3911 OPT_PARENT(sched_options)
3912 };
3913
3914 const char * const latency_usage[] = {
3915 "perf sched latency [<options>]",
3916 NULL
3917 };
3918 const char * const replay_usage[] = {
3919 "perf sched replay [<options>]",
3920 NULL
3921 };
3922 const char * const map_usage[] = {
3923 "perf sched map [<options>]",
3924 NULL
3925 };
3926 const char * const timehist_usage[] = {
3927 "perf sched timehist [<options>]",
3928 NULL
3929 };
3930 const char *const sched_subcommands[] = { "record", "latency", "map",
3931 "replay", "script",
3932 "timehist", NULL };
3933 const char *sched_usage[] = {
3934 NULL,
3935 NULL
3936 };
3937 struct trace_sched_handler lat_ops = {
3938 .wakeup_event = latency_wakeup_event,
3939 .switch_event = latency_switch_event,
3940 .runtime_event = latency_runtime_event,
3941 .migrate_task_event = latency_migrate_task_event,
3942 };
3943 struct trace_sched_handler map_ops = {
3944 .switch_event = map_switch_event,
3945 };
3946 struct trace_sched_handler replay_ops = {
3947 .wakeup_event = replay_wakeup_event,
3948 .switch_event = replay_switch_event,
3949 .fork_event = replay_fork_event,
3950 };
3951 int ret;
3952
3953 perf_tool__init(&sched.tool, /*ordered_events=*/true);
3954 sched.tool.sample = perf_sched__process_tracepoint_sample;
3955 sched.tool.comm = perf_sched__process_comm;
3956 sched.tool.namespaces = perf_event__process_namespaces;
3957 sched.tool.lost = perf_event__process_lost;
3958 sched.tool.fork = perf_sched__process_fork_event;
3959
3960 argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
3961 sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
3962 if (!argc)
3963 usage_with_options(sched_usage, sched_options);
3964
3965 thread__set_priv_destructor(free);
3966
3967 /*
3968 * Aliased to 'perf script' for now:
3969 */
3970 if (!strcmp(argv[0], "script")) {
3971 ret = cmd_script(argc, argv);
3972 } else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
3973 ret = __cmd_record(argc, argv);
3974 } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
3975 sched.tp_handler = &lat_ops;
3976 if (argc > 1) {
3977 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
3978 if (argc)
3979 usage_with_options(latency_usage, latency_options);
3980 }
3981 setup_sorting(&sched, latency_options, latency_usage);
3982 ret = perf_sched__lat(&sched);
3983 } else if (!strcmp(argv[0], "map")) {
3984 if (argc) {
3985 argc = parse_options(argc, argv, map_options, map_usage, 0);
3986 if (argc)
3987 usage_with_options(map_usage, map_options);
3988
3989 if (sched.map.task_name) {
3990 sched.map.task_names = strlist__new(sched.map.task_name, NULL);
3991 if (sched.map.task_names == NULL) {
3992 fprintf(stderr, "Failed to parse task names\n");
3993 ret = -1;
3994 goto out;
3995 }
3996 }
3997 }
3998 sched.tp_handler = &map_ops;
3999 setup_sorting(&sched, latency_options, latency_usage);
4000 ret = perf_sched__map(&sched);
4001 } else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) {
4002 sched.tp_handler = &replay_ops;
4003 if (argc) {
4004 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
4005 if (argc)
4006 usage_with_options(replay_usage, replay_options);
4007 }
4008 ret = perf_sched__replay(&sched);
4009 } else if (!strcmp(argv[0], "timehist")) {
4010 if (argc) {
4011 argc = parse_options(argc, argv, timehist_options,
4012 timehist_usage, 0);
4013 if (argc)
4014 usage_with_options(timehist_usage, timehist_options);
4015 }
4016 if ((sched.show_wakeups || sched.show_next) &&
4017 sched.summary_only) {
4018 pr_err(" Error: -s and -[n|w] are mutually exclusive.\n");
4019 parse_options_usage(timehist_usage, timehist_options, "s", true);
4020 if (sched.show_wakeups)
4021 parse_options_usage(NULL, timehist_options, "w", true);
4022 if (sched.show_next)
4023 parse_options_usage(NULL, timehist_options, "n", true);
4024 ret = -EINVAL;
4025 goto out;
4026 }
4027 ret = symbol__validate_sym_arguments();
4028 if (!ret)
4029 ret = perf_sched__timehist(&sched);
4030 } else {
4031 usage_with_options(sched_usage, sched_options);
4032 }
4033
4034 out:
4035 /* free usage string allocated by parse_options_subcommand */
4036 free((void *)sched_usage[0]);
4037
4038 return ret;
4039 }
4040