1 // SPDX-License-Identifier: GPL-2.0
2 #include "builtin.h"
3 #include "perf.h"
4 #include "perf-sys.h"
5
6 #include "util/cpumap.h"
7 #include "util/evlist.h"
8 #include "util/evsel.h"
9 #include "util/evsel_fprintf.h"
10 #include "util/mutex.h"
11 #include "util/symbol.h"
12 #include "util/thread.h"
13 #include "util/header.h"
14 #include "util/session.h"
15 #include "util/tool.h"
16 #include "util/cloexec.h"
17 #include "util/thread_map.h"
18 #include "util/color.h"
19 #include "util/stat.h"
20 #include "util/string2.h"
21 #include "util/callchain.h"
22 #include "util/time-utils.h"
23
24 #include <subcmd/pager.h>
25 #include <subcmd/parse-options.h>
26 #include "util/trace-event.h"
27
28 #include "util/debug.h"
29 #include "util/event.h"
30 #include "util/util.h"
31
32 #include <linux/kernel.h>
33 #include <linux/log2.h>
34 #include <linux/zalloc.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <inttypes.h>
38
39 #include <errno.h>
40 #include <semaphore.h>
41 #include <pthread.h>
42 #include <math.h>
43 #include <api/fs/fs.h>
44 #include <perf/cpumap.h>
45 #include <linux/time64.h>
46 #include <linux/err.h>
47
48 #include <linux/ctype.h>
49
50 #define PR_SET_NAME 15 /* Set process name */
51 #define MAX_CPUS 4096
52 #define COMM_LEN 20
53 #define SYM_LEN 129
54 #define MAX_PID 1024000
55 #define MAX_PRIO 140
56
57 static const char *cpu_list;
58 static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
59
60 struct sched_atom;
61
62 struct task_desc {
63 unsigned long nr;
64 unsigned long pid;
65 char comm[COMM_LEN];
66
67 unsigned long nr_events;
68 unsigned long curr_event;
69 struct sched_atom **atoms;
70
71 pthread_t thread;
72
73 sem_t ready_for_work;
74 sem_t work_done_sem;
75
76 u64 cpu_usage;
77 };
78
79 enum sched_event_type {
80 SCHED_EVENT_RUN,
81 SCHED_EVENT_SLEEP,
82 SCHED_EVENT_WAKEUP,
83 };
84
85 struct sched_atom {
86 enum sched_event_type type;
87 u64 timestamp;
88 u64 duration;
89 unsigned long nr;
90 sem_t *wait_sem;
91 struct task_desc *wakee;
92 };
93
94 enum thread_state {
95 THREAD_SLEEPING = 0,
96 THREAD_WAIT_CPU,
97 THREAD_SCHED_IN,
98 THREAD_IGNORE
99 };
100
101 struct work_atom {
102 struct list_head list;
103 enum thread_state state;
104 u64 sched_out_time;
105 u64 wake_up_time;
106 u64 sched_in_time;
107 u64 runtime;
108 };
109
110 struct work_atoms {
111 struct list_head work_list;
112 struct thread *thread;
113 struct rb_node node;
114 u64 max_lat;
115 u64 max_lat_start;
116 u64 max_lat_end;
117 u64 total_lat;
118 u64 nb_atoms;
119 u64 total_runtime;
120 int num_merged;
121 };
122
123 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
124
125 struct perf_sched;
126
127 struct trace_sched_handler {
128 int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
129 struct perf_sample *sample, struct machine *machine);
130
131 int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
132 struct perf_sample *sample, struct machine *machine);
133
134 int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
135 struct perf_sample *sample, struct machine *machine);
136
137 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
138 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
139 struct machine *machine);
140
141 int (*migrate_task_event)(struct perf_sched *sched,
142 struct evsel *evsel,
143 struct perf_sample *sample,
144 struct machine *machine);
145 };
146
147 #define COLOR_PIDS PERF_COLOR_BLUE
148 #define COLOR_CPUS PERF_COLOR_BG_RED
149
150 struct perf_sched_map {
151 DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
152 struct perf_cpu *comp_cpus;
153 bool comp;
154 struct perf_thread_map *color_pids;
155 const char *color_pids_str;
156 struct perf_cpu_map *color_cpus;
157 const char *color_cpus_str;
158 const char *task_name;
159 struct strlist *task_names;
160 bool fuzzy;
161 struct perf_cpu_map *cpus;
162 const char *cpus_str;
163 };
164
165 struct perf_sched {
166 struct perf_tool tool;
167 const char *sort_order;
168 unsigned long nr_tasks;
169 struct task_desc **pid_to_task;
170 struct task_desc **tasks;
171 const struct trace_sched_handler *tp_handler;
172 struct mutex start_work_mutex;
173 struct mutex work_done_wait_mutex;
174 int profile_cpu;
175 /*
176 * Track the current task - that way we can know whether there's any
177 * weird events, such as a task being switched away that is not current.
178 */
179 struct perf_cpu max_cpu;
180 u32 *curr_pid;
181 struct thread **curr_thread;
182 struct thread **curr_out_thread;
183 char next_shortname1;
184 char next_shortname2;
185 unsigned int replay_repeat;
186 unsigned long nr_run_events;
187 unsigned long nr_sleep_events;
188 unsigned long nr_wakeup_events;
189 unsigned long nr_sleep_corrections;
190 unsigned long nr_run_events_optimized;
191 unsigned long targetless_wakeups;
192 unsigned long multitarget_wakeups;
193 unsigned long nr_runs;
194 unsigned long nr_timestamps;
195 unsigned long nr_unordered_timestamps;
196 unsigned long nr_context_switch_bugs;
197 unsigned long nr_events;
198 unsigned long nr_lost_chunks;
199 unsigned long nr_lost_events;
200 u64 run_measurement_overhead;
201 u64 sleep_measurement_overhead;
202 u64 start_time;
203 u64 cpu_usage;
204 u64 runavg_cpu_usage;
205 u64 parent_cpu_usage;
206 u64 runavg_parent_cpu_usage;
207 u64 sum_runtime;
208 u64 sum_fluct;
209 u64 run_avg;
210 u64 all_runtime;
211 u64 all_count;
212 u64 *cpu_last_switched;
213 struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
214 struct list_head sort_list, cmp_pid;
215 bool force;
216 bool skip_merge;
217 struct perf_sched_map map;
218
219 /* options for timehist command */
220 bool summary;
221 bool summary_only;
222 bool idle_hist;
223 bool show_callchain;
224 unsigned int max_stack;
225 bool show_cpu_visual;
226 bool show_wakeups;
227 bool show_next;
228 bool show_migrations;
229 bool pre_migrations;
230 bool show_state;
231 bool show_prio;
232 u64 skipped_samples;
233 const char *time_str;
234 struct perf_time_interval ptime;
235 struct perf_time_interval hist_time;
236 volatile bool thread_funcs_exit;
237 const char *prio_str;
238 DECLARE_BITMAP(prio_bitmap, MAX_PRIO);
239 };
240
241 /* per thread run time data */
242 struct thread_runtime {
243 u64 last_time; /* time of previous sched in/out event */
244 u64 dt_run; /* run time */
245 u64 dt_sleep; /* time between CPU access by sleep (off cpu) */
246 u64 dt_iowait; /* time between CPU access by iowait (off cpu) */
247 u64 dt_preempt; /* time between CPU access by preempt (off cpu) */
248 u64 dt_delay; /* time between wakeup and sched-in */
249 u64 dt_pre_mig; /* time between migration and wakeup */
250 u64 ready_to_run; /* time of wakeup */
251 u64 migrated; /* time when a thread is migrated */
252
253 struct stats run_stats;
254 u64 total_run_time;
255 u64 total_sleep_time;
256 u64 total_iowait_time;
257 u64 total_preempt_time;
258 u64 total_delay_time;
259 u64 total_pre_mig_time;
260
261 char last_state;
262
263 char shortname[3];
264 bool comm_changed;
265
266 u64 migrations;
267
268 int prio;
269 };
270
271 /* per event run time data */
272 struct evsel_runtime {
273 u64 *last_time; /* time this event was last seen per cpu */
274 u32 ncpu; /* highest cpu slot allocated */
275 };
276
277 /* per cpu idle time data */
278 struct idle_thread_runtime {
279 struct thread_runtime tr;
280 struct thread *last_thread;
281 struct rb_root_cached sorted_root;
282 struct callchain_root callchain;
283 struct callchain_cursor cursor;
284 };
285
286 /* track idle times per cpu */
287 static struct thread **idle_threads;
288 static int idle_max_cpu;
289 static char idle_comm[] = "<idle>";
290
get_nsecs(void)291 static u64 get_nsecs(void)
292 {
293 struct timespec ts;
294
295 clock_gettime(CLOCK_MONOTONIC, &ts);
296
297 return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
298 }
299
burn_nsecs(struct perf_sched * sched,u64 nsecs)300 static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
301 {
302 u64 T0 = get_nsecs(), T1;
303
304 do {
305 T1 = get_nsecs();
306 } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
307 }
308
sleep_nsecs(u64 nsecs)309 static void sleep_nsecs(u64 nsecs)
310 {
311 struct timespec ts;
312
313 ts.tv_nsec = nsecs % 999999999;
314 ts.tv_sec = nsecs / 999999999;
315
316 nanosleep(&ts, NULL);
317 }
318
calibrate_run_measurement_overhead(struct perf_sched * sched)319 static void calibrate_run_measurement_overhead(struct perf_sched *sched)
320 {
321 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
322 int i;
323
324 for (i = 0; i < 10; i++) {
325 T0 = get_nsecs();
326 burn_nsecs(sched, 0);
327 T1 = get_nsecs();
328 delta = T1-T0;
329 min_delta = min(min_delta, delta);
330 }
331 sched->run_measurement_overhead = min_delta;
332
333 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
334 }
335
calibrate_sleep_measurement_overhead(struct perf_sched * sched)336 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
337 {
338 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
339 int i;
340
341 for (i = 0; i < 10; i++) {
342 T0 = get_nsecs();
343 sleep_nsecs(10000);
344 T1 = get_nsecs();
345 delta = T1-T0;
346 min_delta = min(min_delta, delta);
347 }
348 min_delta -= 10000;
349 sched->sleep_measurement_overhead = min_delta;
350
351 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
352 }
353
354 static struct sched_atom *
get_new_event(struct task_desc * task,u64 timestamp)355 get_new_event(struct task_desc *task, u64 timestamp)
356 {
357 struct sched_atom *event = zalloc(sizeof(*event));
358 unsigned long idx = task->nr_events;
359 size_t size;
360
361 event->timestamp = timestamp;
362 event->nr = idx;
363
364 task->nr_events++;
365 size = sizeof(struct sched_atom *) * task->nr_events;
366 task->atoms = realloc(task->atoms, size);
367 BUG_ON(!task->atoms);
368
369 task->atoms[idx] = event;
370
371 return event;
372 }
373
last_event(struct task_desc * task)374 static struct sched_atom *last_event(struct task_desc *task)
375 {
376 if (!task->nr_events)
377 return NULL;
378
379 return task->atoms[task->nr_events - 1];
380 }
381
add_sched_event_run(struct perf_sched * sched,struct task_desc * task,u64 timestamp,u64 duration)382 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
383 u64 timestamp, u64 duration)
384 {
385 struct sched_atom *event, *curr_event = last_event(task);
386
387 /*
388 * optimize an existing RUN event by merging this one
389 * to it:
390 */
391 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
392 sched->nr_run_events_optimized++;
393 curr_event->duration += duration;
394 return;
395 }
396
397 event = get_new_event(task, timestamp);
398
399 event->type = SCHED_EVENT_RUN;
400 event->duration = duration;
401
402 sched->nr_run_events++;
403 }
404
add_sched_event_wakeup(struct perf_sched * sched,struct task_desc * task,u64 timestamp,struct task_desc * wakee)405 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
406 u64 timestamp, struct task_desc *wakee)
407 {
408 struct sched_atom *event, *wakee_event;
409
410 event = get_new_event(task, timestamp);
411 event->type = SCHED_EVENT_WAKEUP;
412 event->wakee = wakee;
413
414 wakee_event = last_event(wakee);
415 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
416 sched->targetless_wakeups++;
417 return;
418 }
419 if (wakee_event->wait_sem) {
420 sched->multitarget_wakeups++;
421 return;
422 }
423
424 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
425 sem_init(wakee_event->wait_sem, 0, 0);
426 event->wait_sem = wakee_event->wait_sem;
427
428 sched->nr_wakeup_events++;
429 }
430
add_sched_event_sleep(struct perf_sched * sched,struct task_desc * task,u64 timestamp)431 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
432 u64 timestamp)
433 {
434 struct sched_atom *event = get_new_event(task, timestamp);
435
436 event->type = SCHED_EVENT_SLEEP;
437
438 sched->nr_sleep_events++;
439 }
440
register_pid(struct perf_sched * sched,unsigned long pid,const char * comm)441 static struct task_desc *register_pid(struct perf_sched *sched,
442 unsigned long pid, const char *comm)
443 {
444 struct task_desc *task;
445 static int pid_max;
446
447 if (sched->pid_to_task == NULL) {
448 if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
449 pid_max = MAX_PID;
450 BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
451 }
452 if (pid >= (unsigned long)pid_max) {
453 BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
454 sizeof(struct task_desc *))) == NULL);
455 while (pid >= (unsigned long)pid_max)
456 sched->pid_to_task[pid_max++] = NULL;
457 }
458
459 task = sched->pid_to_task[pid];
460
461 if (task)
462 return task;
463
464 task = zalloc(sizeof(*task));
465 task->pid = pid;
466 task->nr = sched->nr_tasks;
467 strcpy(task->comm, comm);
468 /*
469 * every task starts in sleeping state - this gets ignored
470 * if there's no wakeup pointing to this sleep state:
471 */
472 add_sched_event_sleep(sched, task, 0);
473
474 sched->pid_to_task[pid] = task;
475 sched->nr_tasks++;
476 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
477 BUG_ON(!sched->tasks);
478 sched->tasks[task->nr] = task;
479
480 if (verbose > 0)
481 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
482
483 return task;
484 }
485
486
print_task_traces(struct perf_sched * sched)487 static void print_task_traces(struct perf_sched *sched)
488 {
489 struct task_desc *task;
490 unsigned long i;
491
492 for (i = 0; i < sched->nr_tasks; i++) {
493 task = sched->tasks[i];
494 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
495 task->nr, task->comm, task->pid, task->nr_events);
496 }
497 }
498
add_cross_task_wakeups(struct perf_sched * sched)499 static void add_cross_task_wakeups(struct perf_sched *sched)
500 {
501 struct task_desc *task1, *task2;
502 unsigned long i, j;
503
504 for (i = 0; i < sched->nr_tasks; i++) {
505 task1 = sched->tasks[i];
506 j = i + 1;
507 if (j == sched->nr_tasks)
508 j = 0;
509 task2 = sched->tasks[j];
510 add_sched_event_wakeup(sched, task1, 0, task2);
511 }
512 }
513
perf_sched__process_event(struct perf_sched * sched,struct sched_atom * atom)514 static void perf_sched__process_event(struct perf_sched *sched,
515 struct sched_atom *atom)
516 {
517 int ret = 0;
518
519 switch (atom->type) {
520 case SCHED_EVENT_RUN:
521 burn_nsecs(sched, atom->duration);
522 break;
523 case SCHED_EVENT_SLEEP:
524 if (atom->wait_sem)
525 ret = sem_wait(atom->wait_sem);
526 BUG_ON(ret);
527 break;
528 case SCHED_EVENT_WAKEUP:
529 if (atom->wait_sem)
530 ret = sem_post(atom->wait_sem);
531 BUG_ON(ret);
532 break;
533 default:
534 BUG_ON(1);
535 }
536 }
537
get_cpu_usage_nsec_parent(void)538 static u64 get_cpu_usage_nsec_parent(void)
539 {
540 struct rusage ru;
541 u64 sum;
542 int err;
543
544 err = getrusage(RUSAGE_SELF, &ru);
545 BUG_ON(err);
546
547 sum = ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC;
548 sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC;
549
550 return sum;
551 }
552
self_open_counters(struct perf_sched * sched,unsigned long cur_task)553 static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
554 {
555 struct perf_event_attr attr;
556 char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
557 int fd;
558 struct rlimit limit;
559 bool need_privilege = false;
560
561 memset(&attr, 0, sizeof(attr));
562
563 attr.type = PERF_TYPE_SOFTWARE;
564 attr.config = PERF_COUNT_SW_TASK_CLOCK;
565
566 force_again:
567 fd = sys_perf_event_open(&attr, 0, -1, -1,
568 perf_event_open_cloexec_flag());
569
570 if (fd < 0) {
571 if (errno == EMFILE) {
572 if (sched->force) {
573 BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
574 limit.rlim_cur += sched->nr_tasks - cur_task;
575 if (limit.rlim_cur > limit.rlim_max) {
576 limit.rlim_max = limit.rlim_cur;
577 need_privilege = true;
578 }
579 if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
580 if (need_privilege && errno == EPERM)
581 strcpy(info, "Need privilege\n");
582 } else
583 goto force_again;
584 } else
585 strcpy(info, "Have a try with -f option\n");
586 }
587 pr_err("Error: sys_perf_event_open() syscall returned "
588 "with %d (%s)\n%s", fd,
589 str_error_r(errno, sbuf, sizeof(sbuf)), info);
590 exit(EXIT_FAILURE);
591 }
592 return fd;
593 }
594
get_cpu_usage_nsec_self(int fd)595 static u64 get_cpu_usage_nsec_self(int fd)
596 {
597 u64 runtime;
598 int ret;
599
600 ret = read(fd, &runtime, sizeof(runtime));
601 BUG_ON(ret != sizeof(runtime));
602
603 return runtime;
604 }
605
606 struct sched_thread_parms {
607 struct task_desc *task;
608 struct perf_sched *sched;
609 int fd;
610 };
611
thread_func(void * ctx)612 static void *thread_func(void *ctx)
613 {
614 struct sched_thread_parms *parms = ctx;
615 struct task_desc *this_task = parms->task;
616 struct perf_sched *sched = parms->sched;
617 u64 cpu_usage_0, cpu_usage_1;
618 unsigned long i, ret;
619 char comm2[22];
620 int fd = parms->fd;
621
622 zfree(&parms);
623
624 sprintf(comm2, ":%s", this_task->comm);
625 prctl(PR_SET_NAME, comm2);
626 if (fd < 0)
627 return NULL;
628
629 while (!sched->thread_funcs_exit) {
630 ret = sem_post(&this_task->ready_for_work);
631 BUG_ON(ret);
632 mutex_lock(&sched->start_work_mutex);
633 mutex_unlock(&sched->start_work_mutex);
634
635 cpu_usage_0 = get_cpu_usage_nsec_self(fd);
636
637 for (i = 0; i < this_task->nr_events; i++) {
638 this_task->curr_event = i;
639 perf_sched__process_event(sched, this_task->atoms[i]);
640 }
641
642 cpu_usage_1 = get_cpu_usage_nsec_self(fd);
643 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
644 ret = sem_post(&this_task->work_done_sem);
645 BUG_ON(ret);
646
647 mutex_lock(&sched->work_done_wait_mutex);
648 mutex_unlock(&sched->work_done_wait_mutex);
649 }
650 return NULL;
651 }
652
create_tasks(struct perf_sched * sched)653 static void create_tasks(struct perf_sched *sched)
654 EXCLUSIVE_LOCK_FUNCTION(sched->start_work_mutex)
655 EXCLUSIVE_LOCK_FUNCTION(sched->work_done_wait_mutex)
656 {
657 struct task_desc *task;
658 pthread_attr_t attr;
659 unsigned long i;
660 int err;
661
662 err = pthread_attr_init(&attr);
663 BUG_ON(err);
664 err = pthread_attr_setstacksize(&attr,
665 (size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN));
666 BUG_ON(err);
667 mutex_lock(&sched->start_work_mutex);
668 mutex_lock(&sched->work_done_wait_mutex);
669 for (i = 0; i < sched->nr_tasks; i++) {
670 struct sched_thread_parms *parms = malloc(sizeof(*parms));
671 BUG_ON(parms == NULL);
672 parms->task = task = sched->tasks[i];
673 parms->sched = sched;
674 parms->fd = self_open_counters(sched, i);
675 sem_init(&task->ready_for_work, 0, 0);
676 sem_init(&task->work_done_sem, 0, 0);
677 task->curr_event = 0;
678 err = pthread_create(&task->thread, &attr, thread_func, parms);
679 BUG_ON(err);
680 }
681 }
682
destroy_tasks(struct perf_sched * sched)683 static void destroy_tasks(struct perf_sched *sched)
684 UNLOCK_FUNCTION(sched->start_work_mutex)
685 UNLOCK_FUNCTION(sched->work_done_wait_mutex)
686 {
687 struct task_desc *task;
688 unsigned long i;
689 int err;
690
691 mutex_unlock(&sched->start_work_mutex);
692 mutex_unlock(&sched->work_done_wait_mutex);
693 /* Get rid of threads so they won't be upset by mutex destrunction */
694 for (i = 0; i < sched->nr_tasks; i++) {
695 task = sched->tasks[i];
696 err = pthread_join(task->thread, NULL);
697 BUG_ON(err);
698 sem_destroy(&task->ready_for_work);
699 sem_destroy(&task->work_done_sem);
700 }
701 }
702
wait_for_tasks(struct perf_sched * sched)703 static void wait_for_tasks(struct perf_sched *sched)
704 EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
705 EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
706 {
707 u64 cpu_usage_0, cpu_usage_1;
708 struct task_desc *task;
709 unsigned long i, ret;
710
711 sched->start_time = get_nsecs();
712 sched->cpu_usage = 0;
713 mutex_unlock(&sched->work_done_wait_mutex);
714
715 for (i = 0; i < sched->nr_tasks; i++) {
716 task = sched->tasks[i];
717 ret = sem_wait(&task->ready_for_work);
718 BUG_ON(ret);
719 sem_init(&task->ready_for_work, 0, 0);
720 }
721 mutex_lock(&sched->work_done_wait_mutex);
722
723 cpu_usage_0 = get_cpu_usage_nsec_parent();
724
725 mutex_unlock(&sched->start_work_mutex);
726
727 for (i = 0; i < sched->nr_tasks; i++) {
728 task = sched->tasks[i];
729 ret = sem_wait(&task->work_done_sem);
730 BUG_ON(ret);
731 sem_init(&task->work_done_sem, 0, 0);
732 sched->cpu_usage += task->cpu_usage;
733 task->cpu_usage = 0;
734 }
735
736 cpu_usage_1 = get_cpu_usage_nsec_parent();
737 if (!sched->runavg_cpu_usage)
738 sched->runavg_cpu_usage = sched->cpu_usage;
739 sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
740
741 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
742 if (!sched->runavg_parent_cpu_usage)
743 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
744 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
745 sched->parent_cpu_usage)/sched->replay_repeat;
746
747 mutex_lock(&sched->start_work_mutex);
748
749 for (i = 0; i < sched->nr_tasks; i++) {
750 task = sched->tasks[i];
751 task->curr_event = 0;
752 }
753 }
754
run_one_test(struct perf_sched * sched)755 static void run_one_test(struct perf_sched *sched)
756 EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
757 EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
758 {
759 u64 T0, T1, delta, avg_delta, fluct;
760
761 T0 = get_nsecs();
762 wait_for_tasks(sched);
763 T1 = get_nsecs();
764
765 delta = T1 - T0;
766 sched->sum_runtime += delta;
767 sched->nr_runs++;
768
769 avg_delta = sched->sum_runtime / sched->nr_runs;
770 if (delta < avg_delta)
771 fluct = avg_delta - delta;
772 else
773 fluct = delta - avg_delta;
774 sched->sum_fluct += fluct;
775 if (!sched->run_avg)
776 sched->run_avg = delta;
777 sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
778
779 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
780
781 printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
782
783 printf("cpu: %0.2f / %0.2f",
784 (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
785
786 #if 0
787 /*
788 * rusage statistics done by the parent, these are less
789 * accurate than the sched->sum_exec_runtime based statistics:
790 */
791 printf(" [%0.2f / %0.2f]",
792 (double)sched->parent_cpu_usage / NSEC_PER_MSEC,
793 (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
794 #endif
795
796 printf("\n");
797
798 if (sched->nr_sleep_corrections)
799 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
800 sched->nr_sleep_corrections = 0;
801 }
802
test_calibrations(struct perf_sched * sched)803 static void test_calibrations(struct perf_sched *sched)
804 {
805 u64 T0, T1;
806
807 T0 = get_nsecs();
808 burn_nsecs(sched, NSEC_PER_MSEC);
809 T1 = get_nsecs();
810
811 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
812
813 T0 = get_nsecs();
814 sleep_nsecs(NSEC_PER_MSEC);
815 T1 = get_nsecs();
816
817 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
818 }
819
820 static int
replay_wakeup_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused)821 replay_wakeup_event(struct perf_sched *sched,
822 struct evsel *evsel, struct perf_sample *sample,
823 struct machine *machine __maybe_unused)
824 {
825 const char *comm = evsel__strval(evsel, sample, "comm");
826 const u32 pid = evsel__intval(evsel, sample, "pid");
827 struct task_desc *waker, *wakee;
828
829 if (verbose > 0) {
830 printf("sched_wakeup event %p\n", evsel);
831
832 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
833 }
834
835 waker = register_pid(sched, sample->tid, "<unknown>");
836 wakee = register_pid(sched, pid, comm);
837
838 add_sched_event_wakeup(sched, waker, sample->time, wakee);
839 return 0;
840 }
841
replay_switch_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused)842 static int replay_switch_event(struct perf_sched *sched,
843 struct evsel *evsel,
844 struct perf_sample *sample,
845 struct machine *machine __maybe_unused)
846 {
847 const char *prev_comm = evsel__strval(evsel, sample, "prev_comm"),
848 *next_comm = evsel__strval(evsel, sample, "next_comm");
849 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
850 next_pid = evsel__intval(evsel, sample, "next_pid");
851 struct task_desc *prev, __maybe_unused *next;
852 u64 timestamp0, timestamp = sample->time;
853 int cpu = sample->cpu;
854 s64 delta;
855
856 if (verbose > 0)
857 printf("sched_switch event %p\n", evsel);
858
859 if (cpu >= MAX_CPUS || cpu < 0)
860 return 0;
861
862 timestamp0 = sched->cpu_last_switched[cpu];
863 if (timestamp0)
864 delta = timestamp - timestamp0;
865 else
866 delta = 0;
867
868 if (delta < 0) {
869 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
870 return -1;
871 }
872
873 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
874 prev_comm, prev_pid, next_comm, next_pid, delta);
875
876 prev = register_pid(sched, prev_pid, prev_comm);
877 next = register_pid(sched, next_pid, next_comm);
878
879 sched->cpu_last_switched[cpu] = timestamp;
880
881 add_sched_event_run(sched, prev, timestamp, delta);
882 add_sched_event_sleep(sched, prev, timestamp);
883
884 return 0;
885 }
886
replay_fork_event(struct perf_sched * sched,union perf_event * event,struct machine * machine)887 static int replay_fork_event(struct perf_sched *sched,
888 union perf_event *event,
889 struct machine *machine)
890 {
891 struct thread *child, *parent;
892
893 child = machine__findnew_thread(machine, event->fork.pid,
894 event->fork.tid);
895 parent = machine__findnew_thread(machine, event->fork.ppid,
896 event->fork.ptid);
897
898 if (child == NULL || parent == NULL) {
899 pr_debug("thread does not exist on fork event: child %p, parent %p\n",
900 child, parent);
901 goto out_put;
902 }
903
904 if (verbose > 0) {
905 printf("fork event\n");
906 printf("... parent: %s/%d\n", thread__comm_str(parent), thread__tid(parent));
907 printf("... child: %s/%d\n", thread__comm_str(child), thread__tid(child));
908 }
909
910 register_pid(sched, thread__tid(parent), thread__comm_str(parent));
911 register_pid(sched, thread__tid(child), thread__comm_str(child));
912 out_put:
913 thread__put(child);
914 thread__put(parent);
915 return 0;
916 }
917
918 struct sort_dimension {
919 const char *name;
920 sort_fn_t cmp;
921 struct list_head list;
922 };
923
init_prio(struct thread_runtime * r)924 static inline void init_prio(struct thread_runtime *r)
925 {
926 r->prio = -1;
927 }
928
929 /*
930 * handle runtime stats saved per thread
931 */
thread__init_runtime(struct thread * thread)932 static struct thread_runtime *thread__init_runtime(struct thread *thread)
933 {
934 struct thread_runtime *r;
935
936 r = zalloc(sizeof(struct thread_runtime));
937 if (!r)
938 return NULL;
939
940 init_stats(&r->run_stats);
941 init_prio(r);
942 thread__set_priv(thread, r);
943
944 return r;
945 }
946
thread__get_runtime(struct thread * thread)947 static struct thread_runtime *thread__get_runtime(struct thread *thread)
948 {
949 struct thread_runtime *tr;
950
951 tr = thread__priv(thread);
952 if (tr == NULL) {
953 tr = thread__init_runtime(thread);
954 if (tr == NULL)
955 pr_debug("Failed to malloc memory for runtime data.\n");
956 }
957
958 return tr;
959 }
960
961 static int
thread_lat_cmp(struct list_head * list,struct work_atoms * l,struct work_atoms * r)962 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
963 {
964 struct sort_dimension *sort;
965 int ret = 0;
966
967 BUG_ON(list_empty(list));
968
969 list_for_each_entry(sort, list, list) {
970 ret = sort->cmp(l, r);
971 if (ret)
972 return ret;
973 }
974
975 return ret;
976 }
977
978 static struct work_atoms *
thread_atoms_search(struct rb_root_cached * root,struct thread * thread,struct list_head * sort_list)979 thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
980 struct list_head *sort_list)
981 {
982 struct rb_node *node = root->rb_root.rb_node;
983 struct work_atoms key = { .thread = thread };
984
985 while (node) {
986 struct work_atoms *atoms;
987 int cmp;
988
989 atoms = container_of(node, struct work_atoms, node);
990
991 cmp = thread_lat_cmp(sort_list, &key, atoms);
992 if (cmp > 0)
993 node = node->rb_left;
994 else if (cmp < 0)
995 node = node->rb_right;
996 else {
997 BUG_ON(!RC_CHK_EQUAL(thread, atoms->thread));
998 return atoms;
999 }
1000 }
1001 return NULL;
1002 }
1003
1004 static void
__thread_latency_insert(struct rb_root_cached * root,struct work_atoms * data,struct list_head * sort_list)1005 __thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
1006 struct list_head *sort_list)
1007 {
1008 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
1009 bool leftmost = true;
1010
1011 while (*new) {
1012 struct work_atoms *this;
1013 int cmp;
1014
1015 this = container_of(*new, struct work_atoms, node);
1016 parent = *new;
1017
1018 cmp = thread_lat_cmp(sort_list, data, this);
1019
1020 if (cmp > 0)
1021 new = &((*new)->rb_left);
1022 else {
1023 new = &((*new)->rb_right);
1024 leftmost = false;
1025 }
1026 }
1027
1028 rb_link_node(&data->node, parent, new);
1029 rb_insert_color_cached(&data->node, root, leftmost);
1030 }
1031
thread_atoms_insert(struct perf_sched * sched,struct thread * thread)1032 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
1033 {
1034 struct work_atoms *atoms = zalloc(sizeof(*atoms));
1035 if (!atoms) {
1036 pr_err("No memory at %s\n", __func__);
1037 return -1;
1038 }
1039
1040 atoms->thread = thread__get(thread);
1041 INIT_LIST_HEAD(&atoms->work_list);
1042 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
1043 return 0;
1044 }
1045
1046 static int
add_sched_out_event(struct work_atoms * atoms,char run_state,u64 timestamp)1047 add_sched_out_event(struct work_atoms *atoms,
1048 char run_state,
1049 u64 timestamp)
1050 {
1051 struct work_atom *atom = zalloc(sizeof(*atom));
1052 if (!atom) {
1053 pr_err("Non memory at %s", __func__);
1054 return -1;
1055 }
1056
1057 atom->sched_out_time = timestamp;
1058
1059 if (run_state == 'R') {
1060 atom->state = THREAD_WAIT_CPU;
1061 atom->wake_up_time = atom->sched_out_time;
1062 }
1063
1064 list_add_tail(&atom->list, &atoms->work_list);
1065 return 0;
1066 }
1067
1068 static void
add_runtime_event(struct work_atoms * atoms,u64 delta,u64 timestamp __maybe_unused)1069 add_runtime_event(struct work_atoms *atoms, u64 delta,
1070 u64 timestamp __maybe_unused)
1071 {
1072 struct work_atom *atom;
1073
1074 BUG_ON(list_empty(&atoms->work_list));
1075
1076 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1077
1078 atom->runtime += delta;
1079 atoms->total_runtime += delta;
1080 }
1081
1082 static void
add_sched_in_event(struct work_atoms * atoms,u64 timestamp)1083 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1084 {
1085 struct work_atom *atom;
1086 u64 delta;
1087
1088 if (list_empty(&atoms->work_list))
1089 return;
1090
1091 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1092
1093 if (atom->state != THREAD_WAIT_CPU)
1094 return;
1095
1096 if (timestamp < atom->wake_up_time) {
1097 atom->state = THREAD_IGNORE;
1098 return;
1099 }
1100
1101 atom->state = THREAD_SCHED_IN;
1102 atom->sched_in_time = timestamp;
1103
1104 delta = atom->sched_in_time - atom->wake_up_time;
1105 atoms->total_lat += delta;
1106 if (delta > atoms->max_lat) {
1107 atoms->max_lat = delta;
1108 atoms->max_lat_start = atom->wake_up_time;
1109 atoms->max_lat_end = timestamp;
1110 }
1111 atoms->nb_atoms++;
1112 }
1113
free_work_atoms(struct work_atoms * atoms)1114 static void free_work_atoms(struct work_atoms *atoms)
1115 {
1116 struct work_atom *atom, *tmp;
1117
1118 if (atoms == NULL)
1119 return;
1120
1121 list_for_each_entry_safe(atom, tmp, &atoms->work_list, list) {
1122 list_del(&atom->list);
1123 free(atom);
1124 }
1125 thread__zput(atoms->thread);
1126 free(atoms);
1127 }
1128
latency_switch_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1129 static int latency_switch_event(struct perf_sched *sched,
1130 struct evsel *evsel,
1131 struct perf_sample *sample,
1132 struct machine *machine)
1133 {
1134 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1135 next_pid = evsel__intval(evsel, sample, "next_pid");
1136 const char prev_state = evsel__taskstate(evsel, sample, "prev_state");
1137 struct work_atoms *out_events, *in_events;
1138 struct thread *sched_out, *sched_in;
1139 u64 timestamp0, timestamp = sample->time;
1140 int cpu = sample->cpu, err = -1;
1141 s64 delta;
1142
1143 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1144
1145 timestamp0 = sched->cpu_last_switched[cpu];
1146 sched->cpu_last_switched[cpu] = timestamp;
1147 if (timestamp0)
1148 delta = timestamp - timestamp0;
1149 else
1150 delta = 0;
1151
1152 if (delta < 0) {
1153 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1154 return -1;
1155 }
1156
1157 sched_out = machine__findnew_thread(machine, -1, prev_pid);
1158 sched_in = machine__findnew_thread(machine, -1, next_pid);
1159 if (sched_out == NULL || sched_in == NULL)
1160 goto out_put;
1161
1162 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1163 if (!out_events) {
1164 if (thread_atoms_insert(sched, sched_out))
1165 goto out_put;
1166 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1167 if (!out_events) {
1168 pr_err("out-event: Internal tree error");
1169 goto out_put;
1170 }
1171 }
1172 if (add_sched_out_event(out_events, prev_state, timestamp))
1173 return -1;
1174
1175 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1176 if (!in_events) {
1177 if (thread_atoms_insert(sched, sched_in))
1178 goto out_put;
1179 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1180 if (!in_events) {
1181 pr_err("in-event: Internal tree error");
1182 goto out_put;
1183 }
1184 /*
1185 * Take came in we have not heard about yet,
1186 * add in an initial atom in runnable state:
1187 */
1188 if (add_sched_out_event(in_events, 'R', timestamp))
1189 goto out_put;
1190 }
1191 add_sched_in_event(in_events, timestamp);
1192 err = 0;
1193 out_put:
1194 thread__put(sched_out);
1195 thread__put(sched_in);
1196 return err;
1197 }
1198
latency_runtime_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1199 static int latency_runtime_event(struct perf_sched *sched,
1200 struct evsel *evsel,
1201 struct perf_sample *sample,
1202 struct machine *machine)
1203 {
1204 const u32 pid = evsel__intval(evsel, sample, "pid");
1205 const u64 runtime = evsel__intval(evsel, sample, "runtime");
1206 struct thread *thread = machine__findnew_thread(machine, -1, pid);
1207 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1208 u64 timestamp = sample->time;
1209 int cpu = sample->cpu, err = -1;
1210
1211 if (thread == NULL)
1212 return -1;
1213
1214 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1215 if (!atoms) {
1216 if (thread_atoms_insert(sched, thread))
1217 goto out_put;
1218 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1219 if (!atoms) {
1220 pr_err("in-event: Internal tree error");
1221 goto out_put;
1222 }
1223 if (add_sched_out_event(atoms, 'R', timestamp))
1224 goto out_put;
1225 }
1226
1227 add_runtime_event(atoms, runtime, timestamp);
1228 err = 0;
1229 out_put:
1230 thread__put(thread);
1231 return err;
1232 }
1233
latency_wakeup_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1234 static int latency_wakeup_event(struct perf_sched *sched,
1235 struct evsel *evsel,
1236 struct perf_sample *sample,
1237 struct machine *machine)
1238 {
1239 const u32 pid = evsel__intval(evsel, sample, "pid");
1240 struct work_atoms *atoms;
1241 struct work_atom *atom;
1242 struct thread *wakee;
1243 u64 timestamp = sample->time;
1244 int err = -1;
1245
1246 wakee = machine__findnew_thread(machine, -1, pid);
1247 if (wakee == NULL)
1248 return -1;
1249 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1250 if (!atoms) {
1251 if (thread_atoms_insert(sched, wakee))
1252 goto out_put;
1253 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1254 if (!atoms) {
1255 pr_err("wakeup-event: Internal tree error");
1256 goto out_put;
1257 }
1258 if (add_sched_out_event(atoms, 'S', timestamp))
1259 goto out_put;
1260 }
1261
1262 BUG_ON(list_empty(&atoms->work_list));
1263
1264 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1265
1266 /*
1267 * As we do not guarantee the wakeup event happens when
1268 * task is out of run queue, also may happen when task is
1269 * on run queue and wakeup only change ->state to TASK_RUNNING,
1270 * then we should not set the ->wake_up_time when wake up a
1271 * task which is on run queue.
1272 *
1273 * You WILL be missing events if you've recorded only
1274 * one CPU, or are only looking at only one, so don't
1275 * skip in this case.
1276 */
1277 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1278 goto out_ok;
1279
1280 sched->nr_timestamps++;
1281 if (atom->sched_out_time > timestamp) {
1282 sched->nr_unordered_timestamps++;
1283 goto out_ok;
1284 }
1285
1286 atom->state = THREAD_WAIT_CPU;
1287 atom->wake_up_time = timestamp;
1288 out_ok:
1289 err = 0;
1290 out_put:
1291 thread__put(wakee);
1292 return err;
1293 }
1294
latency_migrate_task_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1295 static int latency_migrate_task_event(struct perf_sched *sched,
1296 struct evsel *evsel,
1297 struct perf_sample *sample,
1298 struct machine *machine)
1299 {
1300 const u32 pid = evsel__intval(evsel, sample, "pid");
1301 u64 timestamp = sample->time;
1302 struct work_atoms *atoms;
1303 struct work_atom *atom;
1304 struct thread *migrant;
1305 int err = -1;
1306
1307 /*
1308 * Only need to worry about migration when profiling one CPU.
1309 */
1310 if (sched->profile_cpu == -1)
1311 return 0;
1312
1313 migrant = machine__findnew_thread(machine, -1, pid);
1314 if (migrant == NULL)
1315 return -1;
1316 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1317 if (!atoms) {
1318 if (thread_atoms_insert(sched, migrant))
1319 goto out_put;
1320 register_pid(sched, thread__tid(migrant), thread__comm_str(migrant));
1321 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1322 if (!atoms) {
1323 pr_err("migration-event: Internal tree error");
1324 goto out_put;
1325 }
1326 if (add_sched_out_event(atoms, 'R', timestamp))
1327 goto out_put;
1328 }
1329
1330 BUG_ON(list_empty(&atoms->work_list));
1331
1332 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1333 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1334
1335 sched->nr_timestamps++;
1336
1337 if (atom->sched_out_time > timestamp)
1338 sched->nr_unordered_timestamps++;
1339 err = 0;
1340 out_put:
1341 thread__put(migrant);
1342 return err;
1343 }
1344
output_lat_thread(struct perf_sched * sched,struct work_atoms * work_list)1345 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1346 {
1347 int i;
1348 int ret;
1349 u64 avg;
1350 char max_lat_start[32], max_lat_end[32];
1351
1352 if (!work_list->nb_atoms)
1353 return;
1354 /*
1355 * Ignore idle threads:
1356 */
1357 if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
1358 return;
1359
1360 sched->all_runtime += work_list->total_runtime;
1361 sched->all_count += work_list->nb_atoms;
1362
1363 if (work_list->num_merged > 1) {
1364 ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread),
1365 work_list->num_merged);
1366 } else {
1367 ret = printf(" %s:%d ", thread__comm_str(work_list->thread),
1368 thread__tid(work_list->thread));
1369 }
1370
1371 for (i = 0; i < 24 - ret; i++)
1372 printf(" ");
1373
1374 avg = work_list->total_lat / work_list->nb_atoms;
1375 timestamp__scnprintf_usec(work_list->max_lat_start, max_lat_start, sizeof(max_lat_start));
1376 timestamp__scnprintf_usec(work_list->max_lat_end, max_lat_end, sizeof(max_lat_end));
1377
1378 printf("|%11.3f ms |%9" PRIu64 " | avg:%8.3f ms | max:%8.3f ms | max start: %12s s | max end: %12s s\n",
1379 (double)work_list->total_runtime / NSEC_PER_MSEC,
1380 work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
1381 (double)work_list->max_lat / NSEC_PER_MSEC,
1382 max_lat_start, max_lat_end);
1383 }
1384
pid_cmp(struct work_atoms * l,struct work_atoms * r)1385 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1386 {
1387 pid_t l_tid, r_tid;
1388
1389 if (RC_CHK_EQUAL(l->thread, r->thread))
1390 return 0;
1391 l_tid = thread__tid(l->thread);
1392 r_tid = thread__tid(r->thread);
1393 if (l_tid < r_tid)
1394 return -1;
1395 if (l_tid > r_tid)
1396 return 1;
1397 return (int)(RC_CHK_ACCESS(l->thread) - RC_CHK_ACCESS(r->thread));
1398 }
1399
avg_cmp(struct work_atoms * l,struct work_atoms * r)1400 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1401 {
1402 u64 avgl, avgr;
1403
1404 if (!l->nb_atoms)
1405 return -1;
1406
1407 if (!r->nb_atoms)
1408 return 1;
1409
1410 avgl = l->total_lat / l->nb_atoms;
1411 avgr = r->total_lat / r->nb_atoms;
1412
1413 if (avgl < avgr)
1414 return -1;
1415 if (avgl > avgr)
1416 return 1;
1417
1418 return 0;
1419 }
1420
max_cmp(struct work_atoms * l,struct work_atoms * r)1421 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1422 {
1423 if (l->max_lat < r->max_lat)
1424 return -1;
1425 if (l->max_lat > r->max_lat)
1426 return 1;
1427
1428 return 0;
1429 }
1430
switch_cmp(struct work_atoms * l,struct work_atoms * r)1431 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1432 {
1433 if (l->nb_atoms < r->nb_atoms)
1434 return -1;
1435 if (l->nb_atoms > r->nb_atoms)
1436 return 1;
1437
1438 return 0;
1439 }
1440
runtime_cmp(struct work_atoms * l,struct work_atoms * r)1441 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1442 {
1443 if (l->total_runtime < r->total_runtime)
1444 return -1;
1445 if (l->total_runtime > r->total_runtime)
1446 return 1;
1447
1448 return 0;
1449 }
1450
sort_dimension__add(const char * tok,struct list_head * list)1451 static int sort_dimension__add(const char *tok, struct list_head *list)
1452 {
1453 size_t i;
1454 static struct sort_dimension avg_sort_dimension = {
1455 .name = "avg",
1456 .cmp = avg_cmp,
1457 };
1458 static struct sort_dimension max_sort_dimension = {
1459 .name = "max",
1460 .cmp = max_cmp,
1461 };
1462 static struct sort_dimension pid_sort_dimension = {
1463 .name = "pid",
1464 .cmp = pid_cmp,
1465 };
1466 static struct sort_dimension runtime_sort_dimension = {
1467 .name = "runtime",
1468 .cmp = runtime_cmp,
1469 };
1470 static struct sort_dimension switch_sort_dimension = {
1471 .name = "switch",
1472 .cmp = switch_cmp,
1473 };
1474 struct sort_dimension *available_sorts[] = {
1475 &pid_sort_dimension,
1476 &avg_sort_dimension,
1477 &max_sort_dimension,
1478 &switch_sort_dimension,
1479 &runtime_sort_dimension,
1480 };
1481
1482 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1483 if (!strcmp(available_sorts[i]->name, tok)) {
1484 list_add_tail(&available_sorts[i]->list, list);
1485
1486 return 0;
1487 }
1488 }
1489
1490 return -1;
1491 }
1492
perf_sched__sort_lat(struct perf_sched * sched)1493 static void perf_sched__sort_lat(struct perf_sched *sched)
1494 {
1495 struct rb_node *node;
1496 struct rb_root_cached *root = &sched->atom_root;
1497 again:
1498 for (;;) {
1499 struct work_atoms *data;
1500 node = rb_first_cached(root);
1501 if (!node)
1502 break;
1503
1504 rb_erase_cached(node, root);
1505 data = rb_entry(node, struct work_atoms, node);
1506 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1507 }
1508 if (root == &sched->atom_root) {
1509 root = &sched->merged_atom_root;
1510 goto again;
1511 }
1512 }
1513
process_sched_wakeup_event(const struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1514 static int process_sched_wakeup_event(const struct perf_tool *tool,
1515 struct evsel *evsel,
1516 struct perf_sample *sample,
1517 struct machine *machine)
1518 {
1519 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1520
1521 if (sched->tp_handler->wakeup_event)
1522 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1523
1524 return 0;
1525 }
1526
process_sched_wakeup_ignore(const struct perf_tool * tool __maybe_unused,struct evsel * evsel __maybe_unused,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)1527 static int process_sched_wakeup_ignore(const struct perf_tool *tool __maybe_unused,
1528 struct evsel *evsel __maybe_unused,
1529 struct perf_sample *sample __maybe_unused,
1530 struct machine *machine __maybe_unused)
1531 {
1532 return 0;
1533 }
1534
1535 union map_priv {
1536 void *ptr;
1537 bool color;
1538 };
1539
thread__has_color(struct thread * thread)1540 static bool thread__has_color(struct thread *thread)
1541 {
1542 union map_priv priv = {
1543 .ptr = thread__priv(thread),
1544 };
1545
1546 return priv.color;
1547 }
1548
1549 static struct thread*
map__findnew_thread(struct perf_sched * sched,struct machine * machine,pid_t pid,pid_t tid)1550 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
1551 {
1552 struct thread *thread = machine__findnew_thread(machine, pid, tid);
1553 union map_priv priv = {
1554 .color = false,
1555 };
1556
1557 if (!sched->map.color_pids || !thread || thread__priv(thread))
1558 return thread;
1559
1560 if (thread_map__has(sched->map.color_pids, tid))
1561 priv.color = true;
1562
1563 thread__set_priv(thread, priv.ptr);
1564 return thread;
1565 }
1566
sched_match_task(struct perf_sched * sched,const char * comm_str)1567 static bool sched_match_task(struct perf_sched *sched, const char *comm_str)
1568 {
1569 bool fuzzy_match = sched->map.fuzzy;
1570 struct strlist *task_names = sched->map.task_names;
1571 struct str_node *node;
1572
1573 strlist__for_each_entry(node, task_names) {
1574 bool match_found = fuzzy_match ? !!strstr(comm_str, node->s) :
1575 !strcmp(comm_str, node->s);
1576 if (match_found)
1577 return true;
1578 }
1579
1580 return false;
1581 }
1582
print_sched_map(struct perf_sched * sched,struct perf_cpu this_cpu,int cpus_nr,const char * color,bool sched_out)1583 static void print_sched_map(struct perf_sched *sched, struct perf_cpu this_cpu, int cpus_nr,
1584 const char *color, bool sched_out)
1585 {
1586 for (int i = 0; i < cpus_nr; i++) {
1587 struct perf_cpu cpu = {
1588 .cpu = sched->map.comp ? sched->map.comp_cpus[i].cpu : i,
1589 };
1590 struct thread *curr_thread = sched->curr_thread[cpu.cpu];
1591 struct thread *curr_out_thread = sched->curr_out_thread[cpu.cpu];
1592 struct thread_runtime *curr_tr;
1593 const char *pid_color = color;
1594 const char *cpu_color = color;
1595 char symbol = ' ';
1596 struct thread *thread_to_check = sched_out ? curr_out_thread : curr_thread;
1597
1598 if (thread_to_check && thread__has_color(thread_to_check))
1599 pid_color = COLOR_PIDS;
1600
1601 if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu))
1602 cpu_color = COLOR_CPUS;
1603
1604 if (cpu.cpu == this_cpu.cpu)
1605 symbol = '*';
1606
1607 color_fprintf(stdout, cpu.cpu != this_cpu.cpu ? color : cpu_color, "%c", symbol);
1608
1609 thread_to_check = sched_out ? sched->curr_out_thread[cpu.cpu] :
1610 sched->curr_thread[cpu.cpu];
1611
1612 if (thread_to_check) {
1613 curr_tr = thread__get_runtime(thread_to_check);
1614 if (curr_tr == NULL)
1615 return;
1616
1617 if (sched_out) {
1618 if (cpu.cpu == this_cpu.cpu)
1619 color_fprintf(stdout, color, "- ");
1620 else {
1621 curr_tr = thread__get_runtime(sched->curr_thread[cpu.cpu]);
1622 if (curr_tr != NULL)
1623 color_fprintf(stdout, pid_color, "%2s ",
1624 curr_tr->shortname);
1625 }
1626 } else
1627 color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname);
1628 } else
1629 color_fprintf(stdout, color, " ");
1630 }
1631 }
1632
map_switch_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1633 static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
1634 struct perf_sample *sample, struct machine *machine)
1635 {
1636 const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
1637 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid");
1638 struct thread *sched_in, *sched_out;
1639 struct thread_runtime *tr;
1640 int new_shortname;
1641 u64 timestamp0, timestamp = sample->time;
1642 s64 delta;
1643 struct perf_cpu this_cpu = {
1644 .cpu = sample->cpu,
1645 };
1646 int cpus_nr;
1647 int proceed;
1648 bool new_cpu = false;
1649 const char *color = PERF_COLOR_NORMAL;
1650 char stimestamp[32];
1651 const char *str;
1652 int ret = -1;
1653
1654 BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0);
1655
1656 if (this_cpu.cpu > sched->max_cpu.cpu)
1657 sched->max_cpu = this_cpu;
1658
1659 if (sched->map.comp) {
1660 cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
1661 if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
1662 sched->map.comp_cpus[cpus_nr++] = this_cpu;
1663 new_cpu = true;
1664 }
1665 } else
1666 cpus_nr = sched->max_cpu.cpu;
1667
1668 timestamp0 = sched->cpu_last_switched[this_cpu.cpu];
1669 sched->cpu_last_switched[this_cpu.cpu] = timestamp;
1670 if (timestamp0)
1671 delta = timestamp - timestamp0;
1672 else
1673 delta = 0;
1674
1675 if (delta < 0) {
1676 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1677 return -1;
1678 }
1679
1680 sched_in = map__findnew_thread(sched, machine, -1, next_pid);
1681 sched_out = map__findnew_thread(sched, machine, -1, prev_pid);
1682 if (sched_in == NULL || sched_out == NULL)
1683 goto out;
1684
1685 tr = thread__get_runtime(sched_in);
1686 if (tr == NULL)
1687 goto out;
1688
1689 thread__put(sched->curr_thread[this_cpu.cpu]);
1690 thread__put(sched->curr_out_thread[this_cpu.cpu]);
1691
1692 sched->curr_thread[this_cpu.cpu] = thread__get(sched_in);
1693 sched->curr_out_thread[this_cpu.cpu] = thread__get(sched_out);
1694
1695 ret = 0;
1696
1697 str = thread__comm_str(sched_in);
1698 new_shortname = 0;
1699 if (!tr->shortname[0]) {
1700 if (!strcmp(thread__comm_str(sched_in), "swapper")) {
1701 /*
1702 * Don't allocate a letter-number for swapper:0
1703 * as a shortname. Instead, we use '.' for it.
1704 */
1705 tr->shortname[0] = '.';
1706 tr->shortname[1] = ' ';
1707 } else if (!sched->map.task_name || sched_match_task(sched, str)) {
1708 tr->shortname[0] = sched->next_shortname1;
1709 tr->shortname[1] = sched->next_shortname2;
1710
1711 if (sched->next_shortname1 < 'Z') {
1712 sched->next_shortname1++;
1713 } else {
1714 sched->next_shortname1 = 'A';
1715 if (sched->next_shortname2 < '9')
1716 sched->next_shortname2++;
1717 else
1718 sched->next_shortname2 = '0';
1719 }
1720 } else {
1721 tr->shortname[0] = '-';
1722 tr->shortname[1] = ' ';
1723 }
1724 new_shortname = 1;
1725 }
1726
1727 if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, this_cpu))
1728 goto out;
1729
1730 proceed = 0;
1731 str = thread__comm_str(sched_in);
1732 /*
1733 * Check which of sched_in and sched_out matches the passed --task-name
1734 * arguments and call the corresponding print_sched_map.
1735 */
1736 if (sched->map.task_name && !sched_match_task(sched, str)) {
1737 if (!sched_match_task(sched, thread__comm_str(sched_out)))
1738 goto out;
1739 else
1740 goto sched_out;
1741
1742 } else {
1743 str = thread__comm_str(sched_out);
1744 if (!(sched->map.task_name && !sched_match_task(sched, str)))
1745 proceed = 1;
1746 }
1747
1748 printf(" ");
1749
1750 print_sched_map(sched, this_cpu, cpus_nr, color, false);
1751
1752 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1753 color_fprintf(stdout, color, " %12s secs ", stimestamp);
1754 if (new_shortname || tr->comm_changed || (verbose > 0 && thread__tid(sched_in))) {
1755 const char *pid_color = color;
1756
1757 if (thread__has_color(sched_in))
1758 pid_color = COLOR_PIDS;
1759
1760 color_fprintf(stdout, pid_color, "%s => %s:%d",
1761 tr->shortname, thread__comm_str(sched_in), thread__tid(sched_in));
1762 tr->comm_changed = false;
1763 }
1764
1765 if (sched->map.comp && new_cpu)
1766 color_fprintf(stdout, color, " (CPU %d)", this_cpu.cpu);
1767
1768 if (proceed != 1) {
1769 color_fprintf(stdout, color, "\n");
1770 goto out;
1771 }
1772
1773 sched_out:
1774 if (sched->map.task_name) {
1775 tr = thread__get_runtime(sched->curr_out_thread[this_cpu.cpu]);
1776 if (strcmp(tr->shortname, "") == 0)
1777 goto out;
1778
1779 if (proceed == 1)
1780 color_fprintf(stdout, color, "\n");
1781
1782 printf(" ");
1783 print_sched_map(sched, this_cpu, cpus_nr, color, true);
1784 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1785 color_fprintf(stdout, color, " %12s secs ", stimestamp);
1786 }
1787
1788 color_fprintf(stdout, color, "\n");
1789
1790 out:
1791 thread__put(sched_out);
1792 thread__put(sched_in);
1793
1794 return ret;
1795 }
1796
process_sched_switch_event(const struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1797 static int process_sched_switch_event(const struct perf_tool *tool,
1798 struct evsel *evsel,
1799 struct perf_sample *sample,
1800 struct machine *machine)
1801 {
1802 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1803 int this_cpu = sample->cpu, err = 0;
1804 u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1805 next_pid = evsel__intval(evsel, sample, "next_pid");
1806
1807 if (sched->curr_pid[this_cpu] != (u32)-1) {
1808 /*
1809 * Are we trying to switch away a PID that is
1810 * not current?
1811 */
1812 if (sched->curr_pid[this_cpu] != prev_pid)
1813 sched->nr_context_switch_bugs++;
1814 }
1815
1816 if (sched->tp_handler->switch_event)
1817 err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1818
1819 sched->curr_pid[this_cpu] = next_pid;
1820 return err;
1821 }
1822
process_sched_runtime_event(const struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1823 static int process_sched_runtime_event(const struct perf_tool *tool,
1824 struct evsel *evsel,
1825 struct perf_sample *sample,
1826 struct machine *machine)
1827 {
1828 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1829
1830 if (sched->tp_handler->runtime_event)
1831 return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1832
1833 return 0;
1834 }
1835
perf_sched__process_fork_event(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)1836 static int perf_sched__process_fork_event(const struct perf_tool *tool,
1837 union perf_event *event,
1838 struct perf_sample *sample,
1839 struct machine *machine)
1840 {
1841 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1842
1843 /* run the fork event through the perf machinery */
1844 perf_event__process_fork(tool, event, sample, machine);
1845
1846 /* and then run additional processing needed for this command */
1847 if (sched->tp_handler->fork_event)
1848 return sched->tp_handler->fork_event(sched, event, machine);
1849
1850 return 0;
1851 }
1852
process_sched_migrate_task_event(const struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1853 static int process_sched_migrate_task_event(const struct perf_tool *tool,
1854 struct evsel *evsel,
1855 struct perf_sample *sample,
1856 struct machine *machine)
1857 {
1858 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1859
1860 if (sched->tp_handler->migrate_task_event)
1861 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1862
1863 return 0;
1864 }
1865
1866 typedef int (*tracepoint_handler)(const struct perf_tool *tool,
1867 struct evsel *evsel,
1868 struct perf_sample *sample,
1869 struct machine *machine);
1870
perf_sched__process_tracepoint_sample(const struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)1871 static int perf_sched__process_tracepoint_sample(const struct perf_tool *tool __maybe_unused,
1872 union perf_event *event __maybe_unused,
1873 struct perf_sample *sample,
1874 struct evsel *evsel,
1875 struct machine *machine)
1876 {
1877 int err = 0;
1878
1879 if (evsel->handler != NULL) {
1880 tracepoint_handler f = evsel->handler;
1881 err = f(tool, evsel, sample, machine);
1882 }
1883
1884 return err;
1885 }
1886
perf_sched__process_comm(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_sample * sample,struct machine * machine)1887 static int perf_sched__process_comm(const struct perf_tool *tool __maybe_unused,
1888 union perf_event *event,
1889 struct perf_sample *sample,
1890 struct machine *machine)
1891 {
1892 struct thread *thread;
1893 struct thread_runtime *tr;
1894 int err;
1895
1896 err = perf_event__process_comm(tool, event, sample, machine);
1897 if (err)
1898 return err;
1899
1900 thread = machine__find_thread(machine, sample->pid, sample->tid);
1901 if (!thread) {
1902 pr_err("Internal error: can't find thread\n");
1903 return -1;
1904 }
1905
1906 tr = thread__get_runtime(thread);
1907 if (tr == NULL) {
1908 thread__put(thread);
1909 return -1;
1910 }
1911
1912 tr->comm_changed = true;
1913 thread__put(thread);
1914
1915 return 0;
1916 }
1917
perf_sched__read_events(struct perf_sched * sched)1918 static int perf_sched__read_events(struct perf_sched *sched)
1919 {
1920 struct evsel_str_handler handlers[] = {
1921 { "sched:sched_switch", process_sched_switch_event, },
1922 { "sched:sched_stat_runtime", process_sched_runtime_event, },
1923 { "sched:sched_wakeup", process_sched_wakeup_event, },
1924 { "sched:sched_waking", process_sched_wakeup_event, },
1925 { "sched:sched_wakeup_new", process_sched_wakeup_event, },
1926 { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1927 };
1928 struct perf_session *session;
1929 struct perf_data data = {
1930 .path = input_name,
1931 .mode = PERF_DATA_MODE_READ,
1932 .force = sched->force,
1933 };
1934 int rc = -1;
1935
1936 session = perf_session__new(&data, &sched->tool);
1937 if (IS_ERR(session)) {
1938 pr_debug("Error creating perf session");
1939 return PTR_ERR(session);
1940 }
1941
1942 symbol__init(perf_session__env(session));
1943
1944 /* prefer sched_waking if it is captured */
1945 if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
1946 handlers[2].handler = process_sched_wakeup_ignore;
1947
1948 if (perf_session__set_tracepoints_handlers(session, handlers))
1949 goto out_delete;
1950
1951 if (perf_session__has_traces(session, "record -R")) {
1952 int err = perf_session__process_events(session);
1953 if (err) {
1954 pr_err("Failed to process events, error %d", err);
1955 goto out_delete;
1956 }
1957
1958 sched->nr_events = session->evlist->stats.nr_events[0];
1959 sched->nr_lost_events = session->evlist->stats.total_lost;
1960 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1961 }
1962
1963 rc = 0;
1964 out_delete:
1965 perf_session__delete(session);
1966 return rc;
1967 }
1968
1969 /*
1970 * scheduling times are printed as msec.usec
1971 */
print_sched_time(unsigned long long nsecs,int width)1972 static inline void print_sched_time(unsigned long long nsecs, int width)
1973 {
1974 unsigned long msecs;
1975 unsigned long usecs;
1976
1977 msecs = nsecs / NSEC_PER_MSEC;
1978 nsecs -= msecs * NSEC_PER_MSEC;
1979 usecs = nsecs / NSEC_PER_USEC;
1980 printf("%*lu.%03lu ", width, msecs, usecs);
1981 }
1982
1983 /*
1984 * returns runtime data for event, allocating memory for it the
1985 * first time it is used.
1986 */
evsel__get_runtime(struct evsel * evsel)1987 static struct evsel_runtime *evsel__get_runtime(struct evsel *evsel)
1988 {
1989 struct evsel_runtime *r = evsel->priv;
1990
1991 if (r == NULL) {
1992 r = zalloc(sizeof(struct evsel_runtime));
1993 evsel->priv = r;
1994 }
1995
1996 return r;
1997 }
1998
1999 /*
2000 * save last time event was seen per cpu
2001 */
evsel__save_time(struct evsel * evsel,u64 timestamp,u32 cpu)2002 static void evsel__save_time(struct evsel *evsel, u64 timestamp, u32 cpu)
2003 {
2004 struct evsel_runtime *r = evsel__get_runtime(evsel);
2005
2006 if (r == NULL)
2007 return;
2008
2009 if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
2010 int i, n = __roundup_pow_of_two(cpu+1);
2011 void *p = r->last_time;
2012
2013 p = realloc(r->last_time, n * sizeof(u64));
2014 if (!p)
2015 return;
2016
2017 r->last_time = p;
2018 for (i = r->ncpu; i < n; ++i)
2019 r->last_time[i] = (u64) 0;
2020
2021 r->ncpu = n;
2022 }
2023
2024 r->last_time[cpu] = timestamp;
2025 }
2026
2027 /* returns last time this event was seen on the given cpu */
evsel__get_time(struct evsel * evsel,u32 cpu)2028 static u64 evsel__get_time(struct evsel *evsel, u32 cpu)
2029 {
2030 struct evsel_runtime *r = evsel__get_runtime(evsel);
2031
2032 if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
2033 return 0;
2034
2035 return r->last_time[cpu];
2036 }
2037
timehist__evsel_priv_destructor(void * priv)2038 static void timehist__evsel_priv_destructor(void *priv)
2039 {
2040 struct evsel_runtime *r = priv;
2041
2042 if (r) {
2043 free(r->last_time);
2044 free(r);
2045 }
2046 }
2047
2048 static int comm_width = 30;
2049
timehist_get_commstr(struct thread * thread)2050 static char *timehist_get_commstr(struct thread *thread)
2051 {
2052 static char str[32];
2053 const char *comm = thread__comm_str(thread);
2054 pid_t tid = thread__tid(thread);
2055 pid_t pid = thread__pid(thread);
2056 int n;
2057
2058 if (pid == 0)
2059 n = scnprintf(str, sizeof(str), "%s", comm);
2060
2061 else if (tid != pid)
2062 n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid);
2063
2064 else
2065 n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid);
2066
2067 if (n > comm_width)
2068 comm_width = n;
2069
2070 return str;
2071 }
2072
2073 /* prio field format: xxx or xxx->yyy */
2074 #define MAX_PRIO_STR_LEN 8
timehist_get_priostr(struct evsel * evsel,struct thread * thread,struct perf_sample * sample)2075 static char *timehist_get_priostr(struct evsel *evsel,
2076 struct thread *thread,
2077 struct perf_sample *sample)
2078 {
2079 static char prio_str[16];
2080 int prev_prio = (int)evsel__intval(evsel, sample, "prev_prio");
2081 struct thread_runtime *tr = thread__priv(thread);
2082
2083 if (tr->prio != prev_prio && tr->prio != -1)
2084 scnprintf(prio_str, sizeof(prio_str), "%d->%d", tr->prio, prev_prio);
2085 else
2086 scnprintf(prio_str, sizeof(prio_str), "%d", prev_prio);
2087
2088 return prio_str;
2089 }
2090
timehist_header(struct perf_sched * sched)2091 static void timehist_header(struct perf_sched *sched)
2092 {
2093 u32 ncpus = sched->max_cpu.cpu + 1;
2094 u32 i, j;
2095
2096 printf("%15s %6s ", "time", "cpu");
2097
2098 if (sched->show_cpu_visual) {
2099 printf(" ");
2100 for (i = 0, j = 0; i < ncpus; ++i) {
2101 printf("%x", j++);
2102 if (j > 15)
2103 j = 0;
2104 }
2105 printf(" ");
2106 }
2107
2108 printf(" %-*s", comm_width, "task name");
2109
2110 if (sched->show_prio)
2111 printf(" %-*s", MAX_PRIO_STR_LEN, "prio");
2112
2113 printf(" %9s %9s %9s", "wait time", "sch delay", "run time");
2114
2115 if (sched->pre_migrations)
2116 printf(" %9s", "pre-mig time");
2117
2118 if (sched->show_state)
2119 printf(" %s", "state");
2120
2121 printf("\n");
2122
2123 /*
2124 * units row
2125 */
2126 printf("%15s %-6s ", "", "");
2127
2128 if (sched->show_cpu_visual)
2129 printf(" %*s ", ncpus, "");
2130
2131 printf(" %-*s", comm_width, "[tid/pid]");
2132
2133 if (sched->show_prio)
2134 printf(" %-*s", MAX_PRIO_STR_LEN, "");
2135
2136 printf(" %9s %9s %9s", "(msec)", "(msec)", "(msec)");
2137
2138 if (sched->pre_migrations)
2139 printf(" %9s", "(msec)");
2140
2141 printf("\n");
2142
2143 /*
2144 * separator
2145 */
2146 printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line);
2147
2148 if (sched->show_cpu_visual)
2149 printf(" %.*s ", ncpus, graph_dotted_line);
2150
2151 printf(" %.*s", comm_width, graph_dotted_line);
2152
2153 if (sched->show_prio)
2154 printf(" %.*s", MAX_PRIO_STR_LEN, graph_dotted_line);
2155
2156 printf(" %.9s %.9s %.9s", graph_dotted_line, graph_dotted_line, graph_dotted_line);
2157
2158 if (sched->pre_migrations)
2159 printf(" %.9s", graph_dotted_line);
2160
2161 if (sched->show_state)
2162 printf(" %.5s", graph_dotted_line);
2163
2164 printf("\n");
2165 }
2166
timehist_print_sample(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct addr_location * al,struct thread * thread,u64 t,const char state)2167 static void timehist_print_sample(struct perf_sched *sched,
2168 struct evsel *evsel,
2169 struct perf_sample *sample,
2170 struct addr_location *al,
2171 struct thread *thread,
2172 u64 t, const char state)
2173 {
2174 struct thread_runtime *tr = thread__priv(thread);
2175 const char *next_comm = evsel__strval(evsel, sample, "next_comm");
2176 const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
2177 u32 max_cpus = sched->max_cpu.cpu + 1;
2178 char tstr[64];
2179 char nstr[30];
2180 u64 wait_time;
2181
2182 if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
2183 return;
2184
2185 timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
2186 printf("%15s [%04d] ", tstr, sample->cpu);
2187
2188 if (sched->show_cpu_visual) {
2189 u32 i;
2190 char c;
2191
2192 printf(" ");
2193 for (i = 0; i < max_cpus; ++i) {
2194 /* flag idle times with 'i'; others are sched events */
2195 if (i == sample->cpu)
2196 c = (thread__tid(thread) == 0) ? 'i' : 's';
2197 else
2198 c = ' ';
2199 printf("%c", c);
2200 }
2201 printf(" ");
2202 }
2203
2204 if (!thread__comm_set(thread)) {
2205 const char *prev_comm = evsel__strval(evsel, sample, "prev_comm");
2206 thread__set_comm(thread, prev_comm, sample->time);
2207 }
2208
2209 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2210
2211 if (sched->show_prio)
2212 printf(" %-*s ", MAX_PRIO_STR_LEN, timehist_get_priostr(evsel, thread, sample));
2213
2214 wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt;
2215 print_sched_time(wait_time, 6);
2216
2217 print_sched_time(tr->dt_delay, 6);
2218 print_sched_time(tr->dt_run, 6);
2219 if (sched->pre_migrations)
2220 print_sched_time(tr->dt_pre_mig, 6);
2221
2222 if (sched->show_state)
2223 printf(" %5c ", thread__tid(thread) == 0 ? 'I' : state);
2224
2225 if (sched->show_next) {
2226 snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid);
2227 printf(" %-*s", comm_width, nstr);
2228 }
2229
2230 if (sched->show_wakeups && !sched->show_next)
2231 printf(" %-*s", comm_width, "");
2232
2233 if (thread__tid(thread) == 0)
2234 goto out;
2235
2236 if (sched->show_callchain)
2237 printf(" ");
2238
2239 sample__fprintf_sym(sample, al, 0,
2240 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
2241 EVSEL__PRINT_CALLCHAIN_ARROW |
2242 EVSEL__PRINT_SKIP_IGNORED,
2243 get_tls_callchain_cursor(), symbol_conf.bt_stop_list, stdout);
2244
2245 out:
2246 printf("\n");
2247 }
2248
2249 /*
2250 * Explanation of delta-time stats:
2251 *
2252 * t = time of current schedule out event
2253 * tprev = time of previous sched out event
2254 * also time of schedule-in event for current task
2255 * last_time = time of last sched change event for current task
2256 * (i.e, time process was last scheduled out)
2257 * ready_to_run = time of wakeup for current task
2258 * migrated = time of task migration to another CPU
2259 *
2260 * -----|-------------|-------------|-------------|-------------|-----
2261 * last ready migrated tprev t
2262 * time to run
2263 *
2264 * |---------------- dt_wait ----------------|
2265 * |--------- dt_delay ---------|-- dt_run --|
2266 * |- dt_pre_mig -|
2267 *
2268 * dt_run = run time of current task
2269 * dt_wait = time between last schedule out event for task and tprev
2270 * represents time spent off the cpu
2271 * dt_delay = time between wakeup and schedule-in of task
2272 * dt_pre_mig = time between wakeup and migration to another CPU
2273 */
2274
timehist_update_runtime_stats(struct thread_runtime * r,u64 t,u64 tprev)2275 static void timehist_update_runtime_stats(struct thread_runtime *r,
2276 u64 t, u64 tprev)
2277 {
2278 r->dt_delay = 0;
2279 r->dt_sleep = 0;
2280 r->dt_iowait = 0;
2281 r->dt_preempt = 0;
2282 r->dt_run = 0;
2283 r->dt_pre_mig = 0;
2284
2285 if (tprev) {
2286 r->dt_run = t - tprev;
2287 if (r->ready_to_run) {
2288 if (r->ready_to_run > tprev)
2289 pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
2290 else
2291 r->dt_delay = tprev - r->ready_to_run;
2292
2293 if ((r->migrated > r->ready_to_run) && (r->migrated < tprev))
2294 r->dt_pre_mig = r->migrated - r->ready_to_run;
2295 }
2296
2297 if (r->last_time > tprev)
2298 pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
2299 else if (r->last_time) {
2300 u64 dt_wait = tprev - r->last_time;
2301
2302 if (r->last_state == 'R')
2303 r->dt_preempt = dt_wait;
2304 else if (r->last_state == 'D')
2305 r->dt_iowait = dt_wait;
2306 else
2307 r->dt_sleep = dt_wait;
2308 }
2309 }
2310
2311 update_stats(&r->run_stats, r->dt_run);
2312
2313 r->total_run_time += r->dt_run;
2314 r->total_delay_time += r->dt_delay;
2315 r->total_sleep_time += r->dt_sleep;
2316 r->total_iowait_time += r->dt_iowait;
2317 r->total_preempt_time += r->dt_preempt;
2318 r->total_pre_mig_time += r->dt_pre_mig;
2319 }
2320
is_idle_sample(struct perf_sample * sample,struct evsel * evsel)2321 static bool is_idle_sample(struct perf_sample *sample,
2322 struct evsel *evsel)
2323 {
2324 /* pid 0 == swapper == idle task */
2325 if (evsel__name_is(evsel, "sched:sched_switch"))
2326 return evsel__intval(evsel, sample, "prev_pid") == 0;
2327
2328 return sample->pid == 0;
2329 }
2330
save_task_callchain(struct perf_sched * sched,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)2331 static void save_task_callchain(struct perf_sched *sched,
2332 struct perf_sample *sample,
2333 struct evsel *evsel,
2334 struct machine *machine)
2335 {
2336 struct callchain_cursor *cursor;
2337 struct thread *thread;
2338
2339 /* want main thread for process - has maps */
2340 thread = machine__findnew_thread(machine, sample->pid, sample->pid);
2341 if (thread == NULL) {
2342 pr_debug("Failed to get thread for pid %d.\n", sample->pid);
2343 return;
2344 }
2345
2346 if (!sched->show_callchain || sample->callchain == NULL) {
2347 thread__put(thread);
2348 return;
2349 }
2350
2351 cursor = get_tls_callchain_cursor();
2352
2353 if (thread__resolve_callchain(thread, cursor, evsel, sample,
2354 NULL, NULL, sched->max_stack + 2) != 0) {
2355 if (verbose > 0)
2356 pr_err("Failed to resolve callchain. Skipping\n");
2357
2358 thread__put(thread);
2359 return;
2360 }
2361
2362 callchain_cursor_commit(cursor);
2363 thread__put(thread);
2364
2365 while (true) {
2366 struct callchain_cursor_node *node;
2367 struct symbol *sym;
2368
2369 node = callchain_cursor_current(cursor);
2370 if (node == NULL)
2371 break;
2372
2373 sym = node->ms.sym;
2374 if (sym) {
2375 if (!strcmp(sym->name, "schedule") ||
2376 !strcmp(sym->name, "__schedule") ||
2377 !strcmp(sym->name, "preempt_schedule"))
2378 sym->ignore = 1;
2379 }
2380
2381 callchain_cursor_advance(cursor);
2382 }
2383 }
2384
init_idle_thread(struct thread * thread)2385 static int init_idle_thread(struct thread *thread)
2386 {
2387 struct idle_thread_runtime *itr;
2388
2389 thread__set_comm(thread, idle_comm, 0);
2390
2391 itr = zalloc(sizeof(*itr));
2392 if (itr == NULL)
2393 return -ENOMEM;
2394
2395 init_prio(&itr->tr);
2396 init_stats(&itr->tr.run_stats);
2397 callchain_init(&itr->callchain);
2398 callchain_cursor_reset(&itr->cursor);
2399 thread__set_priv(thread, itr);
2400
2401 return 0;
2402 }
2403
2404 /*
2405 * Track idle stats per cpu by maintaining a local thread
2406 * struct for the idle task on each cpu.
2407 */
init_idle_threads(int ncpu)2408 static int init_idle_threads(int ncpu)
2409 {
2410 int i, ret;
2411
2412 idle_threads = zalloc(ncpu * sizeof(struct thread *));
2413 if (!idle_threads)
2414 return -ENOMEM;
2415
2416 idle_max_cpu = ncpu;
2417
2418 /* allocate the actual thread struct if needed */
2419 for (i = 0; i < ncpu; ++i) {
2420 idle_threads[i] = thread__new(0, 0);
2421 if (idle_threads[i] == NULL)
2422 return -ENOMEM;
2423
2424 ret = init_idle_thread(idle_threads[i]);
2425 if (ret < 0)
2426 return ret;
2427 }
2428
2429 return 0;
2430 }
2431
free_idle_threads(void)2432 static void free_idle_threads(void)
2433 {
2434 int i;
2435
2436 if (idle_threads == NULL)
2437 return;
2438
2439 for (i = 0; i < idle_max_cpu; ++i) {
2440 struct thread *idle = idle_threads[i];
2441
2442 if (idle) {
2443 struct idle_thread_runtime *itr;
2444
2445 itr = thread__priv(idle);
2446 if (itr)
2447 thread__put(itr->last_thread);
2448
2449 thread__delete(idle);
2450 }
2451 }
2452
2453 free(idle_threads);
2454 }
2455
get_idle_thread(int cpu)2456 static struct thread *get_idle_thread(int cpu)
2457 {
2458 /*
2459 * expand/allocate array of pointers to local thread
2460 * structs if needed
2461 */
2462 if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
2463 int i, j = __roundup_pow_of_two(cpu+1);
2464 void *p;
2465
2466 p = realloc(idle_threads, j * sizeof(struct thread *));
2467 if (!p)
2468 return NULL;
2469
2470 idle_threads = (struct thread **) p;
2471 for (i = idle_max_cpu; i < j; ++i)
2472 idle_threads[i] = NULL;
2473
2474 idle_max_cpu = j;
2475 }
2476
2477 /* allocate a new thread struct if needed */
2478 if (idle_threads[cpu] == NULL) {
2479 idle_threads[cpu] = thread__new(0, 0);
2480
2481 if (idle_threads[cpu]) {
2482 if (init_idle_thread(idle_threads[cpu]) < 0)
2483 return NULL;
2484 }
2485 }
2486
2487 return thread__get(idle_threads[cpu]);
2488 }
2489
save_idle_callchain(struct perf_sched * sched,struct idle_thread_runtime * itr,struct perf_sample * sample)2490 static void save_idle_callchain(struct perf_sched *sched,
2491 struct idle_thread_runtime *itr,
2492 struct perf_sample *sample)
2493 {
2494 struct callchain_cursor *cursor;
2495
2496 if (!sched->show_callchain || sample->callchain == NULL)
2497 return;
2498
2499 cursor = get_tls_callchain_cursor();
2500 if (cursor == NULL)
2501 return;
2502
2503 callchain_cursor__copy(&itr->cursor, cursor);
2504 }
2505
timehist_get_thread(struct perf_sched * sched,struct perf_sample * sample,struct machine * machine,struct evsel * evsel)2506 static struct thread *timehist_get_thread(struct perf_sched *sched,
2507 struct perf_sample *sample,
2508 struct machine *machine,
2509 struct evsel *evsel)
2510 {
2511 struct thread *thread;
2512
2513 if (is_idle_sample(sample, evsel)) {
2514 thread = get_idle_thread(sample->cpu);
2515 if (thread == NULL)
2516 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2517
2518 } else {
2519 /* there were samples with tid 0 but non-zero pid */
2520 thread = machine__findnew_thread(machine, sample->pid,
2521 sample->tid ?: sample->pid);
2522 if (thread == NULL) {
2523 pr_debug("Failed to get thread for tid %d. skipping sample.\n",
2524 sample->tid);
2525 }
2526
2527 save_task_callchain(sched, sample, evsel, machine);
2528 if (sched->idle_hist) {
2529 struct thread *idle;
2530 struct idle_thread_runtime *itr;
2531
2532 idle = get_idle_thread(sample->cpu);
2533 if (idle == NULL) {
2534 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2535 return NULL;
2536 }
2537
2538 itr = thread__priv(idle);
2539 if (itr == NULL)
2540 return NULL;
2541
2542 thread__put(itr->last_thread);
2543 itr->last_thread = thread__get(thread);
2544
2545 /* copy task callchain when entering to idle */
2546 if (evsel__intval(evsel, sample, "next_pid") == 0)
2547 save_idle_callchain(sched, itr, sample);
2548 }
2549 }
2550
2551 return thread;
2552 }
2553
timehist_skip_sample(struct perf_sched * sched,struct thread * thread,struct evsel * evsel,struct perf_sample * sample)2554 static bool timehist_skip_sample(struct perf_sched *sched,
2555 struct thread *thread,
2556 struct evsel *evsel,
2557 struct perf_sample *sample)
2558 {
2559 bool rc = false;
2560 int prio = -1;
2561 struct thread_runtime *tr = NULL;
2562
2563 if (thread__is_filtered(thread)) {
2564 rc = true;
2565 sched->skipped_samples++;
2566 }
2567
2568 if (sched->prio_str) {
2569 /*
2570 * Because priority may be changed during task execution,
2571 * first read priority from prev sched_in event for current task.
2572 * If prev sched_in event is not saved, then read priority from
2573 * current task sched_out event.
2574 */
2575 tr = thread__get_runtime(thread);
2576 if (tr && tr->prio != -1)
2577 prio = tr->prio;
2578 else if (evsel__name_is(evsel, "sched:sched_switch"))
2579 prio = evsel__intval(evsel, sample, "prev_prio");
2580
2581 if (prio != -1 && !test_bit(prio, sched->prio_bitmap)) {
2582 rc = true;
2583 sched->skipped_samples++;
2584 }
2585 }
2586
2587 if (sched->idle_hist) {
2588 if (!evsel__name_is(evsel, "sched:sched_switch"))
2589 rc = true;
2590 else if (evsel__intval(evsel, sample, "prev_pid") != 0 &&
2591 evsel__intval(evsel, sample, "next_pid") != 0)
2592 rc = true;
2593 }
2594
2595 return rc;
2596 }
2597
timehist_print_wakeup_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine,struct thread * awakened)2598 static void timehist_print_wakeup_event(struct perf_sched *sched,
2599 struct evsel *evsel,
2600 struct perf_sample *sample,
2601 struct machine *machine,
2602 struct thread *awakened)
2603 {
2604 struct thread *thread;
2605 char tstr[64];
2606
2607 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2608 if (thread == NULL)
2609 return;
2610
2611 /* show wakeup unless both awakee and awaker are filtered */
2612 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2613 timehist_skip_sample(sched, awakened, evsel, sample)) {
2614 thread__put(thread);
2615 return;
2616 }
2617
2618 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2619 printf("%15s [%04d] ", tstr, sample->cpu);
2620 if (sched->show_cpu_visual)
2621 printf(" %*s ", sched->max_cpu.cpu + 1, "");
2622
2623 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2624
2625 /* dt spacer */
2626 printf(" %9s %9s %9s ", "", "", "");
2627
2628 printf("awakened: %s", timehist_get_commstr(awakened));
2629
2630 printf("\n");
2631
2632 thread__put(thread);
2633 }
2634
timehist_sched_wakeup_ignore(const struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct evsel * evsel __maybe_unused,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)2635 static int timehist_sched_wakeup_ignore(const struct perf_tool *tool __maybe_unused,
2636 union perf_event *event __maybe_unused,
2637 struct evsel *evsel __maybe_unused,
2638 struct perf_sample *sample __maybe_unused,
2639 struct machine *machine __maybe_unused)
2640 {
2641 return 0;
2642 }
2643
timehist_sched_wakeup_event(const struct perf_tool * tool,union perf_event * event __maybe_unused,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)2644 static int timehist_sched_wakeup_event(const struct perf_tool *tool,
2645 union perf_event *event __maybe_unused,
2646 struct evsel *evsel,
2647 struct perf_sample *sample,
2648 struct machine *machine)
2649 {
2650 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2651 struct thread *thread;
2652 struct thread_runtime *tr = NULL;
2653 /* want pid of awakened task not pid in sample */
2654 const u32 pid = evsel__intval(evsel, sample, "pid");
2655
2656 thread = machine__findnew_thread(machine, 0, pid);
2657 if (thread == NULL)
2658 return -1;
2659
2660 tr = thread__get_runtime(thread);
2661 if (tr == NULL) {
2662 thread__put(thread);
2663 return -1;
2664 }
2665
2666 if (tr->ready_to_run == 0)
2667 tr->ready_to_run = sample->time;
2668
2669 /* show wakeups if requested */
2670 if (sched->show_wakeups &&
2671 !perf_time__skip_sample(&sched->ptime, sample->time))
2672 timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
2673
2674 thread__put(thread);
2675 return 0;
2676 }
2677
timehist_print_migration_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine,struct thread * migrated)2678 static void timehist_print_migration_event(struct perf_sched *sched,
2679 struct evsel *evsel,
2680 struct perf_sample *sample,
2681 struct machine *machine,
2682 struct thread *migrated)
2683 {
2684 struct thread *thread;
2685 char tstr[64];
2686 u32 max_cpus;
2687 u32 ocpu, dcpu;
2688
2689 if (sched->summary_only)
2690 return;
2691
2692 max_cpus = sched->max_cpu.cpu + 1;
2693 ocpu = evsel__intval(evsel, sample, "orig_cpu");
2694 dcpu = evsel__intval(evsel, sample, "dest_cpu");
2695
2696 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2697 if (thread == NULL)
2698 return;
2699
2700 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2701 timehist_skip_sample(sched, migrated, evsel, sample)) {
2702 thread__put(thread);
2703 return;
2704 }
2705
2706 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2707 printf("%15s [%04d] ", tstr, sample->cpu);
2708
2709 if (sched->show_cpu_visual) {
2710 u32 i;
2711 char c;
2712
2713 printf(" ");
2714 for (i = 0; i < max_cpus; ++i) {
2715 c = (i == sample->cpu) ? 'm' : ' ';
2716 printf("%c", c);
2717 }
2718 printf(" ");
2719 }
2720
2721 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2722
2723 /* dt spacer */
2724 printf(" %9s %9s %9s ", "", "", "");
2725
2726 printf("migrated: %s", timehist_get_commstr(migrated));
2727 printf(" cpu %d => %d", ocpu, dcpu);
2728
2729 printf("\n");
2730 thread__put(thread);
2731 }
2732
timehist_migrate_task_event(const struct perf_tool * tool,union perf_event * event __maybe_unused,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)2733 static int timehist_migrate_task_event(const struct perf_tool *tool,
2734 union perf_event *event __maybe_unused,
2735 struct evsel *evsel,
2736 struct perf_sample *sample,
2737 struct machine *machine)
2738 {
2739 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2740 struct thread *thread;
2741 struct thread_runtime *tr = NULL;
2742 /* want pid of migrated task not pid in sample */
2743 const u32 pid = evsel__intval(evsel, sample, "pid");
2744
2745 thread = machine__findnew_thread(machine, 0, pid);
2746 if (thread == NULL)
2747 return -1;
2748
2749 tr = thread__get_runtime(thread);
2750 if (tr == NULL) {
2751 thread__put(thread);
2752 return -1;
2753 }
2754
2755 tr->migrations++;
2756 tr->migrated = sample->time;
2757
2758 /* show migrations if requested */
2759 if (sched->show_migrations) {
2760 timehist_print_migration_event(sched, evsel, sample,
2761 machine, thread);
2762 }
2763 thread__put(thread);
2764
2765 return 0;
2766 }
2767
timehist_update_task_prio(struct evsel * evsel,struct perf_sample * sample,struct machine * machine)2768 static void timehist_update_task_prio(struct evsel *evsel,
2769 struct perf_sample *sample,
2770 struct machine *machine)
2771 {
2772 struct thread *thread;
2773 struct thread_runtime *tr = NULL;
2774 const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
2775 const u32 next_prio = evsel__intval(evsel, sample, "next_prio");
2776
2777 if (next_pid == 0)
2778 thread = get_idle_thread(sample->cpu);
2779 else
2780 thread = machine__findnew_thread(machine, -1, next_pid);
2781
2782 if (thread == NULL)
2783 return;
2784
2785 tr = thread__get_runtime(thread);
2786 if (tr != NULL)
2787 tr->prio = next_prio;
2788
2789 thread__put(thread);
2790 }
2791
timehist_sched_change_event(const struct perf_tool * tool,union perf_event * event,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)2792 static int timehist_sched_change_event(const struct perf_tool *tool,
2793 union perf_event *event,
2794 struct evsel *evsel,
2795 struct perf_sample *sample,
2796 struct machine *machine)
2797 {
2798 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2799 struct perf_time_interval *ptime = &sched->ptime;
2800 struct addr_location al;
2801 struct thread *thread = NULL;
2802 struct thread_runtime *tr = NULL;
2803 u64 tprev, t = sample->time;
2804 int rc = 0;
2805 const char state = evsel__taskstate(evsel, sample, "prev_state");
2806
2807 addr_location__init(&al);
2808 if (machine__resolve(machine, &al, sample) < 0) {
2809 pr_err("problem processing %d event. skipping it\n",
2810 event->header.type);
2811 rc = -1;
2812 goto out;
2813 }
2814
2815 if (sched->show_prio || sched->prio_str)
2816 timehist_update_task_prio(evsel, sample, machine);
2817
2818 thread = timehist_get_thread(sched, sample, machine, evsel);
2819 if (thread == NULL) {
2820 rc = -1;
2821 goto out;
2822 }
2823
2824 if (timehist_skip_sample(sched, thread, evsel, sample))
2825 goto out;
2826
2827 tr = thread__get_runtime(thread);
2828 if (tr == NULL) {
2829 rc = -1;
2830 goto out;
2831 }
2832
2833 tprev = evsel__get_time(evsel, sample->cpu);
2834
2835 /*
2836 * If start time given:
2837 * - sample time is under window user cares about - skip sample
2838 * - tprev is under window user cares about - reset to start of window
2839 */
2840 if (ptime->start && ptime->start > t)
2841 goto out;
2842
2843 if (tprev && ptime->start > tprev)
2844 tprev = ptime->start;
2845
2846 /*
2847 * If end time given:
2848 * - previous sched event is out of window - we are done
2849 * - sample time is beyond window user cares about - reset it
2850 * to close out stats for time window interest
2851 * - If tprev is 0, that is, sched_in event for current task is
2852 * not recorded, cannot determine whether sched_in event is
2853 * within time window interest - ignore it
2854 */
2855 if (ptime->end) {
2856 if (!tprev || tprev > ptime->end)
2857 goto out;
2858
2859 if (t > ptime->end)
2860 t = ptime->end;
2861 }
2862
2863 if (!sched->idle_hist || thread__tid(thread) == 0) {
2864 if (!cpu_list || test_bit(sample->cpu, cpu_bitmap))
2865 timehist_update_runtime_stats(tr, t, tprev);
2866
2867 if (sched->idle_hist) {
2868 struct idle_thread_runtime *itr = (void *)tr;
2869 struct thread_runtime *last_tr;
2870
2871 if (itr->last_thread == NULL)
2872 goto out;
2873
2874 /* add current idle time as last thread's runtime */
2875 last_tr = thread__get_runtime(itr->last_thread);
2876 if (last_tr == NULL)
2877 goto out;
2878
2879 timehist_update_runtime_stats(last_tr, t, tprev);
2880 /*
2881 * remove delta time of last thread as it's not updated
2882 * and otherwise it will show an invalid value next
2883 * time. we only care total run time and run stat.
2884 */
2885 last_tr->dt_run = 0;
2886 last_tr->dt_delay = 0;
2887 last_tr->dt_sleep = 0;
2888 last_tr->dt_iowait = 0;
2889 last_tr->dt_preempt = 0;
2890
2891 if (itr->cursor.nr)
2892 callchain_append(&itr->callchain, &itr->cursor, t - tprev);
2893
2894 itr->last_thread = NULL;
2895 }
2896
2897 if (!sched->summary_only)
2898 timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
2899 }
2900
2901 out:
2902 if (sched->hist_time.start == 0 && t >= ptime->start)
2903 sched->hist_time.start = t;
2904 if (ptime->end == 0 || t <= ptime->end)
2905 sched->hist_time.end = t;
2906
2907 if (tr) {
2908 /* time of this sched_switch event becomes last time task seen */
2909 tr->last_time = sample->time;
2910
2911 /* last state is used to determine where to account wait time */
2912 tr->last_state = state;
2913
2914 /* sched out event for task so reset ready to run time and migrated time */
2915 if (state == 'R')
2916 tr->ready_to_run = t;
2917 else
2918 tr->ready_to_run = 0;
2919
2920 tr->migrated = 0;
2921 }
2922
2923 evsel__save_time(evsel, sample->time, sample->cpu);
2924
2925 thread__put(thread);
2926 addr_location__exit(&al);
2927 return rc;
2928 }
2929
timehist_sched_switch_event(const struct perf_tool * tool,union perf_event * event,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused)2930 static int timehist_sched_switch_event(const struct perf_tool *tool,
2931 union perf_event *event,
2932 struct evsel *evsel,
2933 struct perf_sample *sample,
2934 struct machine *machine __maybe_unused)
2935 {
2936 return timehist_sched_change_event(tool, event, evsel, sample, machine);
2937 }
2938
process_lost(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_sample * sample,struct machine * machine __maybe_unused)2939 static int process_lost(const struct perf_tool *tool __maybe_unused,
2940 union perf_event *event,
2941 struct perf_sample *sample,
2942 struct machine *machine __maybe_unused)
2943 {
2944 char tstr[64];
2945
2946 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2947 printf("%15s ", tstr);
2948 printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
2949
2950 return 0;
2951 }
2952
2953
print_thread_runtime(struct thread * t,struct thread_runtime * r)2954 static void print_thread_runtime(struct thread *t,
2955 struct thread_runtime *r)
2956 {
2957 double mean = avg_stats(&r->run_stats);
2958 float stddev;
2959
2960 printf("%*s %5d %9" PRIu64 " ",
2961 comm_width, timehist_get_commstr(t), thread__ppid(t),
2962 (u64) r->run_stats.n);
2963
2964 print_sched_time(r->total_run_time, 8);
2965 stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean);
2966 print_sched_time(r->run_stats.min, 6);
2967 printf(" ");
2968 print_sched_time((u64) mean, 6);
2969 printf(" ");
2970 print_sched_time(r->run_stats.max, 6);
2971 printf(" ");
2972 printf("%5.2f", stddev);
2973 printf(" %5" PRIu64, r->migrations);
2974 printf("\n");
2975 }
2976
print_thread_waittime(struct thread * t,struct thread_runtime * r)2977 static void print_thread_waittime(struct thread *t,
2978 struct thread_runtime *r)
2979 {
2980 printf("%*s %5d %9" PRIu64 " ",
2981 comm_width, timehist_get_commstr(t), thread__ppid(t),
2982 (u64) r->run_stats.n);
2983
2984 print_sched_time(r->total_run_time, 8);
2985 print_sched_time(r->total_sleep_time, 6);
2986 printf(" ");
2987 print_sched_time(r->total_iowait_time, 6);
2988 printf(" ");
2989 print_sched_time(r->total_preempt_time, 6);
2990 printf(" ");
2991 print_sched_time(r->total_delay_time, 6);
2992 printf("\n");
2993 }
2994
2995 struct total_run_stats {
2996 struct perf_sched *sched;
2997 u64 sched_count;
2998 u64 task_count;
2999 u64 total_run_time;
3000 };
3001
show_thread_runtime(struct thread * t,void * priv)3002 static int show_thread_runtime(struct thread *t, void *priv)
3003 {
3004 struct total_run_stats *stats = priv;
3005 struct thread_runtime *r;
3006
3007 if (thread__is_filtered(t))
3008 return 0;
3009
3010 r = thread__priv(t);
3011 if (r && r->run_stats.n) {
3012 stats->task_count++;
3013 stats->sched_count += r->run_stats.n;
3014 stats->total_run_time += r->total_run_time;
3015
3016 if (stats->sched->show_state)
3017 print_thread_waittime(t, r);
3018 else
3019 print_thread_runtime(t, r);
3020 }
3021
3022 return 0;
3023 }
3024
callchain__fprintf_folded(FILE * fp,struct callchain_node * node)3025 static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
3026 {
3027 const char *sep = " <- ";
3028 struct callchain_list *chain;
3029 size_t ret = 0;
3030 char bf[1024];
3031 bool first;
3032
3033 if (node == NULL)
3034 return 0;
3035
3036 ret = callchain__fprintf_folded(fp, node->parent);
3037 first = (ret == 0);
3038
3039 list_for_each_entry(chain, &node->val, list) {
3040 if (chain->ip >= PERF_CONTEXT_MAX)
3041 continue;
3042 if (chain->ms.sym && chain->ms.sym->ignore)
3043 continue;
3044 ret += fprintf(fp, "%s%s", first ? "" : sep,
3045 callchain_list__sym_name(chain, bf, sizeof(bf),
3046 false));
3047 first = false;
3048 }
3049
3050 return ret;
3051 }
3052
timehist_print_idlehist_callchain(struct rb_root_cached * root)3053 static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
3054 {
3055 size_t ret = 0;
3056 FILE *fp = stdout;
3057 struct callchain_node *chain;
3058 struct rb_node *rb_node = rb_first_cached(root);
3059
3060 printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains");
3061 printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line,
3062 graph_dotted_line);
3063
3064 while (rb_node) {
3065 chain = rb_entry(rb_node, struct callchain_node, rb_node);
3066 rb_node = rb_next(rb_node);
3067
3068 ret += fprintf(fp, " ");
3069 print_sched_time(chain->hit, 12);
3070 ret += 16; /* print_sched_time returns 2nd arg + 4 */
3071 ret += fprintf(fp, " %8d ", chain->count);
3072 ret += callchain__fprintf_folded(fp, chain);
3073 ret += fprintf(fp, "\n");
3074 }
3075
3076 return ret;
3077 }
3078
timehist_print_summary(struct perf_sched * sched,struct perf_session * session)3079 static void timehist_print_summary(struct perf_sched *sched,
3080 struct perf_session *session)
3081 {
3082 struct machine *m = &session->machines.host;
3083 struct total_run_stats totals;
3084 u64 task_count;
3085 struct thread *t;
3086 struct thread_runtime *r;
3087 int i;
3088 u64 hist_time = sched->hist_time.end - sched->hist_time.start;
3089
3090 memset(&totals, 0, sizeof(totals));
3091 totals.sched = sched;
3092
3093 if (sched->idle_hist) {
3094 printf("\nIdle-time summary\n");
3095 printf("%*s parent sched-out ", comm_width, "comm");
3096 printf(" idle-time min-idle avg-idle max-idle stddev migrations\n");
3097 } else if (sched->show_state) {
3098 printf("\nWait-time summary\n");
3099 printf("%*s parent sched-in ", comm_width, "comm");
3100 printf(" run-time sleep iowait preempt delay\n");
3101 } else {
3102 printf("\nRuntime summary\n");
3103 printf("%*s parent sched-in ", comm_width, "comm");
3104 printf(" run-time min-run avg-run max-run stddev migrations\n");
3105 }
3106 printf("%*s (count) ", comm_width, "");
3107 printf(" (msec) (msec) (msec) (msec) %s\n",
3108 sched->show_state ? "(msec)" : "%");
3109 printf("%.117s\n", graph_dotted_line);
3110
3111 machine__for_each_thread(m, show_thread_runtime, &totals);
3112 task_count = totals.task_count;
3113 if (!task_count)
3114 printf("<no still running tasks>\n");
3115
3116 /* CPU idle stats not tracked when samples were skipped */
3117 if (sched->skipped_samples && !sched->idle_hist)
3118 return;
3119
3120 printf("\nIdle stats:\n");
3121 for (i = 0; i < idle_max_cpu; ++i) {
3122 if (cpu_list && !test_bit(i, cpu_bitmap))
3123 continue;
3124
3125 t = idle_threads[i];
3126 if (!t)
3127 continue;
3128
3129 r = thread__priv(t);
3130 if (r && r->run_stats.n) {
3131 totals.sched_count += r->run_stats.n;
3132 printf(" CPU %2d idle for ", i);
3133 print_sched_time(r->total_run_time, 6);
3134 printf(" msec (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time);
3135 } else
3136 printf(" CPU %2d idle entire time window\n", i);
3137 }
3138
3139 if (sched->idle_hist && sched->show_callchain) {
3140 callchain_param.mode = CHAIN_FOLDED;
3141 callchain_param.value = CCVAL_PERIOD;
3142
3143 callchain_register_param(&callchain_param);
3144
3145 printf("\nIdle stats by callchain:\n");
3146 for (i = 0; i < idle_max_cpu; ++i) {
3147 struct idle_thread_runtime *itr;
3148
3149 t = idle_threads[i];
3150 if (!t)
3151 continue;
3152
3153 itr = thread__priv(t);
3154 if (itr == NULL)
3155 continue;
3156
3157 callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
3158 0, &callchain_param);
3159
3160 printf(" CPU %2d:", i);
3161 print_sched_time(itr->tr.total_run_time, 6);
3162 printf(" msec\n");
3163 timehist_print_idlehist_callchain(&itr->sorted_root);
3164 printf("\n");
3165 }
3166 }
3167
3168 printf("\n"
3169 " Total number of unique tasks: %" PRIu64 "\n"
3170 "Total number of context switches: %" PRIu64 "\n",
3171 totals.task_count, totals.sched_count);
3172
3173 printf(" Total run time (msec): ");
3174 print_sched_time(totals.total_run_time, 2);
3175 printf("\n");
3176
3177 printf(" Total scheduling time (msec): ");
3178 print_sched_time(hist_time, 2);
3179 printf(" (x %d)\n", sched->max_cpu.cpu);
3180 }
3181
3182 typedef int (*sched_handler)(const struct perf_tool *tool,
3183 union perf_event *event,
3184 struct evsel *evsel,
3185 struct perf_sample *sample,
3186 struct machine *machine);
3187
perf_timehist__process_sample(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)3188 static int perf_timehist__process_sample(const struct perf_tool *tool,
3189 union perf_event *event,
3190 struct perf_sample *sample,
3191 struct evsel *evsel,
3192 struct machine *machine)
3193 {
3194 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
3195 int err = 0;
3196 struct perf_cpu this_cpu = {
3197 .cpu = sample->cpu,
3198 };
3199
3200 if (this_cpu.cpu > sched->max_cpu.cpu)
3201 sched->max_cpu = this_cpu;
3202
3203 if (evsel->handler != NULL) {
3204 sched_handler f = evsel->handler;
3205
3206 err = f(tool, event, evsel, sample, machine);
3207 }
3208
3209 return err;
3210 }
3211
timehist_check_attr(struct perf_sched * sched,struct evlist * evlist)3212 static int timehist_check_attr(struct perf_sched *sched,
3213 struct evlist *evlist)
3214 {
3215 struct evsel *evsel;
3216 struct evsel_runtime *er;
3217
3218 list_for_each_entry(evsel, &evlist->core.entries, core.node) {
3219 er = evsel__get_runtime(evsel);
3220 if (er == NULL) {
3221 pr_err("Failed to allocate memory for evsel runtime data\n");
3222 return -1;
3223 }
3224
3225 /* only need to save callchain related to sched_switch event */
3226 if (sched->show_callchain &&
3227 evsel__name_is(evsel, "sched:sched_switch") &&
3228 !evsel__has_callchain(evsel)) {
3229 pr_info("Samples of sched_switch event do not have callchains.\n");
3230 sched->show_callchain = 0;
3231 symbol_conf.use_callchain = 0;
3232 }
3233 }
3234
3235 return 0;
3236 }
3237
timehist_parse_prio_str(struct perf_sched * sched)3238 static int timehist_parse_prio_str(struct perf_sched *sched)
3239 {
3240 char *p;
3241 unsigned long start_prio, end_prio;
3242 const char *str = sched->prio_str;
3243
3244 if (!str)
3245 return 0;
3246
3247 while (isdigit(*str)) {
3248 p = NULL;
3249 start_prio = strtoul(str, &p, 0);
3250 if (start_prio >= MAX_PRIO || (*p != '\0' && *p != ',' && *p != '-'))
3251 return -1;
3252
3253 if (*p == '-') {
3254 str = ++p;
3255 p = NULL;
3256 end_prio = strtoul(str, &p, 0);
3257
3258 if (end_prio >= MAX_PRIO || (*p != '\0' && *p != ','))
3259 return -1;
3260
3261 if (end_prio < start_prio)
3262 return -1;
3263 } else {
3264 end_prio = start_prio;
3265 }
3266
3267 for (; start_prio <= end_prio; start_prio++)
3268 __set_bit(start_prio, sched->prio_bitmap);
3269
3270 if (*p)
3271 ++p;
3272
3273 str = p;
3274 }
3275
3276 return 0;
3277 }
3278
perf_sched__timehist(struct perf_sched * sched)3279 static int perf_sched__timehist(struct perf_sched *sched)
3280 {
3281 struct evsel_str_handler handlers[] = {
3282 { "sched:sched_switch", timehist_sched_switch_event, },
3283 { "sched:sched_wakeup", timehist_sched_wakeup_event, },
3284 { "sched:sched_waking", timehist_sched_wakeup_event, },
3285 { "sched:sched_wakeup_new", timehist_sched_wakeup_event, },
3286 };
3287 const struct evsel_str_handler migrate_handlers[] = {
3288 { "sched:sched_migrate_task", timehist_migrate_task_event, },
3289 };
3290 struct perf_data data = {
3291 .path = input_name,
3292 .mode = PERF_DATA_MODE_READ,
3293 .force = sched->force,
3294 };
3295
3296 struct perf_session *session;
3297 struct perf_env *env;
3298 struct evlist *evlist;
3299 int err = -1;
3300
3301 /*
3302 * event handlers for timehist option
3303 */
3304 sched->tool.sample = perf_timehist__process_sample;
3305 sched->tool.mmap = perf_event__process_mmap;
3306 sched->tool.comm = perf_event__process_comm;
3307 sched->tool.exit = perf_event__process_exit;
3308 sched->tool.fork = perf_event__process_fork;
3309 sched->tool.lost = process_lost;
3310 sched->tool.attr = perf_event__process_attr;
3311 sched->tool.tracing_data = perf_event__process_tracing_data;
3312 sched->tool.build_id = perf_event__process_build_id;
3313
3314 sched->tool.ordering_requires_timestamps = true;
3315
3316 symbol_conf.use_callchain = sched->show_callchain;
3317
3318 session = perf_session__new(&data, &sched->tool);
3319 if (IS_ERR(session))
3320 return PTR_ERR(session);
3321
3322 env = perf_session__env(session);
3323 if (cpu_list) {
3324 err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
3325 if (err < 0)
3326 goto out;
3327 }
3328
3329 evlist = session->evlist;
3330
3331 symbol__init(env);
3332
3333 if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
3334 pr_err("Invalid time string\n");
3335 err = -EINVAL;
3336 goto out;
3337 }
3338
3339 if (timehist_check_attr(sched, evlist) != 0)
3340 goto out;
3341
3342 if (timehist_parse_prio_str(sched) != 0) {
3343 pr_err("Invalid prio string\n");
3344 goto out;
3345 }
3346
3347 setup_pager();
3348
3349 evsel__set_priv_destructor(timehist__evsel_priv_destructor);
3350
3351 /* prefer sched_waking if it is captured */
3352 if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
3353 handlers[1].handler = timehist_sched_wakeup_ignore;
3354
3355 /* setup per-evsel handlers */
3356 if (perf_session__set_tracepoints_handlers(session, handlers))
3357 goto out;
3358
3359 /* sched_switch event at a minimum needs to exist */
3360 if (!evlist__find_tracepoint_by_name(session->evlist, "sched:sched_switch")) {
3361 pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
3362 goto out;
3363 }
3364
3365 if ((sched->show_migrations || sched->pre_migrations) &&
3366 perf_session__set_tracepoints_handlers(session, migrate_handlers))
3367 goto out;
3368
3369 /* pre-allocate struct for per-CPU idle stats */
3370 sched->max_cpu.cpu = env->nr_cpus_online;
3371 if (sched->max_cpu.cpu == 0)
3372 sched->max_cpu.cpu = 4;
3373 if (init_idle_threads(sched->max_cpu.cpu))
3374 goto out;
3375
3376 /* summary_only implies summary option, but don't overwrite summary if set */
3377 if (sched->summary_only)
3378 sched->summary = sched->summary_only;
3379
3380 if (!sched->summary_only)
3381 timehist_header(sched);
3382
3383 err = perf_session__process_events(session);
3384 if (err) {
3385 pr_err("Failed to process events, error %d", err);
3386 goto out;
3387 }
3388
3389 sched->nr_events = evlist->stats.nr_events[0];
3390 sched->nr_lost_events = evlist->stats.total_lost;
3391 sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
3392
3393 if (sched->summary)
3394 timehist_print_summary(sched, session);
3395
3396 out:
3397 free_idle_threads();
3398 perf_session__delete(session);
3399
3400 return err;
3401 }
3402
3403
print_bad_events(struct perf_sched * sched)3404 static void print_bad_events(struct perf_sched *sched)
3405 {
3406 if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
3407 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
3408 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
3409 sched->nr_unordered_timestamps, sched->nr_timestamps);
3410 }
3411 if (sched->nr_lost_events && sched->nr_events) {
3412 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
3413 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
3414 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
3415 }
3416 if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
3417 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
3418 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
3419 sched->nr_context_switch_bugs, sched->nr_timestamps);
3420 if (sched->nr_lost_events)
3421 printf(" (due to lost events?)");
3422 printf("\n");
3423 }
3424 }
3425
__merge_work_atoms(struct rb_root_cached * root,struct work_atoms * data)3426 static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
3427 {
3428 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
3429 struct work_atoms *this;
3430 const char *comm = thread__comm_str(data->thread), *this_comm;
3431 bool leftmost = true;
3432
3433 while (*new) {
3434 int cmp;
3435
3436 this = container_of(*new, struct work_atoms, node);
3437 parent = *new;
3438
3439 this_comm = thread__comm_str(this->thread);
3440 cmp = strcmp(comm, this_comm);
3441 if (cmp > 0) {
3442 new = &((*new)->rb_left);
3443 } else if (cmp < 0) {
3444 new = &((*new)->rb_right);
3445 leftmost = false;
3446 } else {
3447 this->num_merged++;
3448 this->total_runtime += data->total_runtime;
3449 this->nb_atoms += data->nb_atoms;
3450 this->total_lat += data->total_lat;
3451 list_splice_init(&data->work_list, &this->work_list);
3452 if (this->max_lat < data->max_lat) {
3453 this->max_lat = data->max_lat;
3454 this->max_lat_start = data->max_lat_start;
3455 this->max_lat_end = data->max_lat_end;
3456 }
3457 free_work_atoms(data);
3458 return;
3459 }
3460 }
3461
3462 data->num_merged++;
3463 rb_link_node(&data->node, parent, new);
3464 rb_insert_color_cached(&data->node, root, leftmost);
3465 }
3466
perf_sched__merge_lat(struct perf_sched * sched)3467 static void perf_sched__merge_lat(struct perf_sched *sched)
3468 {
3469 struct work_atoms *data;
3470 struct rb_node *node;
3471
3472 if (sched->skip_merge)
3473 return;
3474
3475 while ((node = rb_first_cached(&sched->atom_root))) {
3476 rb_erase_cached(node, &sched->atom_root);
3477 data = rb_entry(node, struct work_atoms, node);
3478 __merge_work_atoms(&sched->merged_atom_root, data);
3479 }
3480 }
3481
setup_cpus_switch_event(struct perf_sched * sched)3482 static int setup_cpus_switch_event(struct perf_sched *sched)
3483 {
3484 unsigned int i;
3485
3486 sched->cpu_last_switched = calloc(MAX_CPUS, sizeof(*(sched->cpu_last_switched)));
3487 if (!sched->cpu_last_switched)
3488 return -1;
3489
3490 sched->curr_pid = malloc(MAX_CPUS * sizeof(*(sched->curr_pid)));
3491 if (!sched->curr_pid) {
3492 zfree(&sched->cpu_last_switched);
3493 return -1;
3494 }
3495
3496 for (i = 0; i < MAX_CPUS; i++)
3497 sched->curr_pid[i] = -1;
3498
3499 return 0;
3500 }
3501
free_cpus_switch_event(struct perf_sched * sched)3502 static void free_cpus_switch_event(struct perf_sched *sched)
3503 {
3504 zfree(&sched->curr_pid);
3505 zfree(&sched->cpu_last_switched);
3506 }
3507
perf_sched__lat(struct perf_sched * sched)3508 static int perf_sched__lat(struct perf_sched *sched)
3509 {
3510 int rc = -1;
3511 struct rb_node *next;
3512
3513 setup_pager();
3514
3515 if (setup_cpus_switch_event(sched))
3516 return rc;
3517
3518 if (perf_sched__read_events(sched))
3519 goto out_free_cpus_switch_event;
3520
3521 perf_sched__merge_lat(sched);
3522 perf_sched__sort_lat(sched);
3523
3524 printf("\n -------------------------------------------------------------------------------------------------------------------------------------------\n");
3525 printf(" Task | Runtime ms | Count | Avg delay ms | Max delay ms | Max delay start | Max delay end |\n");
3526 printf(" -------------------------------------------------------------------------------------------------------------------------------------------\n");
3527
3528 next = rb_first_cached(&sched->sorted_atom_root);
3529
3530 while (next) {
3531 struct work_atoms *work_list;
3532
3533 work_list = rb_entry(next, struct work_atoms, node);
3534 output_lat_thread(sched, work_list);
3535 next = rb_next(next);
3536 }
3537
3538 printf(" -----------------------------------------------------------------------------------------------------------------\n");
3539 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
3540 (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
3541
3542 printf(" ---------------------------------------------------\n");
3543
3544 print_bad_events(sched);
3545 printf("\n");
3546
3547 rc = 0;
3548
3549 while ((next = rb_first_cached(&sched->sorted_atom_root))) {
3550 struct work_atoms *data;
3551
3552 data = rb_entry(next, struct work_atoms, node);
3553 rb_erase_cached(next, &sched->sorted_atom_root);
3554 free_work_atoms(data);
3555 }
3556 out_free_cpus_switch_event:
3557 free_cpus_switch_event(sched);
3558 return rc;
3559 }
3560
setup_map_cpus(struct perf_sched * sched)3561 static int setup_map_cpus(struct perf_sched *sched)
3562 {
3563 sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF);
3564
3565 if (sched->map.comp) {
3566 sched->map.comp_cpus = zalloc(sched->max_cpu.cpu * sizeof(int));
3567 if (!sched->map.comp_cpus)
3568 return -1;
3569 }
3570
3571 if (sched->map.cpus_str) {
3572 sched->map.cpus = perf_cpu_map__new(sched->map.cpus_str);
3573 if (!sched->map.cpus) {
3574 pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
3575 zfree(&sched->map.comp_cpus);
3576 return -1;
3577 }
3578 }
3579
3580 return 0;
3581 }
3582
setup_color_pids(struct perf_sched * sched)3583 static int setup_color_pids(struct perf_sched *sched)
3584 {
3585 struct perf_thread_map *map;
3586
3587 if (!sched->map.color_pids_str)
3588 return 0;
3589
3590 map = thread_map__new_by_tid_str(sched->map.color_pids_str);
3591 if (!map) {
3592 pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
3593 return -1;
3594 }
3595
3596 sched->map.color_pids = map;
3597 return 0;
3598 }
3599
setup_color_cpus(struct perf_sched * sched)3600 static int setup_color_cpus(struct perf_sched *sched)
3601 {
3602 struct perf_cpu_map *map;
3603
3604 if (!sched->map.color_cpus_str)
3605 return 0;
3606
3607 map = perf_cpu_map__new(sched->map.color_cpus_str);
3608 if (!map) {
3609 pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
3610 return -1;
3611 }
3612
3613 sched->map.color_cpus = map;
3614 return 0;
3615 }
3616
perf_sched__map(struct perf_sched * sched)3617 static int perf_sched__map(struct perf_sched *sched)
3618 {
3619 int rc = -1;
3620
3621 sched->curr_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_thread)));
3622 if (!sched->curr_thread)
3623 return rc;
3624
3625 sched->curr_out_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_out_thread)));
3626 if (!sched->curr_out_thread)
3627 goto out_free_curr_thread;
3628
3629 if (setup_cpus_switch_event(sched))
3630 goto out_free_curr_out_thread;
3631
3632 if (setup_map_cpus(sched))
3633 goto out_free_cpus_switch_event;
3634
3635 if (setup_color_pids(sched))
3636 goto out_put_map_cpus;
3637
3638 if (setup_color_cpus(sched))
3639 goto out_put_color_pids;
3640
3641 setup_pager();
3642 if (perf_sched__read_events(sched))
3643 goto out_put_color_cpus;
3644
3645 rc = 0;
3646 print_bad_events(sched);
3647
3648 out_put_color_cpus:
3649 perf_cpu_map__put(sched->map.color_cpus);
3650
3651 out_put_color_pids:
3652 perf_thread_map__put(sched->map.color_pids);
3653
3654 out_put_map_cpus:
3655 zfree(&sched->map.comp_cpus);
3656 perf_cpu_map__put(sched->map.cpus);
3657
3658 out_free_cpus_switch_event:
3659 free_cpus_switch_event(sched);
3660
3661 out_free_curr_out_thread:
3662 for (int i = 0; i < MAX_CPUS; i++)
3663 thread__put(sched->curr_out_thread[i]);
3664 zfree(&sched->curr_out_thread);
3665
3666 out_free_curr_thread:
3667 for (int i = 0; i < MAX_CPUS; i++)
3668 thread__put(sched->curr_thread[i]);
3669 zfree(&sched->curr_thread);
3670 return rc;
3671 }
3672
perf_sched__replay(struct perf_sched * sched)3673 static int perf_sched__replay(struct perf_sched *sched)
3674 {
3675 int ret;
3676 unsigned long i;
3677
3678 mutex_init(&sched->start_work_mutex);
3679 mutex_init(&sched->work_done_wait_mutex);
3680
3681 ret = setup_cpus_switch_event(sched);
3682 if (ret)
3683 goto out_mutex_destroy;
3684
3685 calibrate_run_measurement_overhead(sched);
3686 calibrate_sleep_measurement_overhead(sched);
3687
3688 test_calibrations(sched);
3689
3690 ret = perf_sched__read_events(sched);
3691 if (ret)
3692 goto out_free_cpus_switch_event;
3693
3694 printf("nr_run_events: %ld\n", sched->nr_run_events);
3695 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
3696 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
3697
3698 if (sched->targetless_wakeups)
3699 printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
3700 if (sched->multitarget_wakeups)
3701 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
3702 if (sched->nr_run_events_optimized)
3703 printf("run atoms optimized: %ld\n",
3704 sched->nr_run_events_optimized);
3705
3706 print_task_traces(sched);
3707 add_cross_task_wakeups(sched);
3708
3709 sched->thread_funcs_exit = false;
3710 create_tasks(sched);
3711 printf("------------------------------------------------------------\n");
3712 if (sched->replay_repeat == 0)
3713 sched->replay_repeat = UINT_MAX;
3714
3715 for (i = 0; i < sched->replay_repeat; i++)
3716 run_one_test(sched);
3717
3718 sched->thread_funcs_exit = true;
3719 destroy_tasks(sched);
3720
3721 out_free_cpus_switch_event:
3722 free_cpus_switch_event(sched);
3723
3724 out_mutex_destroy:
3725 mutex_destroy(&sched->start_work_mutex);
3726 mutex_destroy(&sched->work_done_wait_mutex);
3727 return ret;
3728 }
3729
setup_sorting(struct perf_sched * sched,const struct option * options,const char * const usage_msg[])3730 static void setup_sorting(struct perf_sched *sched, const struct option *options,
3731 const char * const usage_msg[])
3732 {
3733 char *tmp, *tok, *str = strdup(sched->sort_order);
3734
3735 for (tok = strtok_r(str, ", ", &tmp);
3736 tok; tok = strtok_r(NULL, ", ", &tmp)) {
3737 if (sort_dimension__add(tok, &sched->sort_list) < 0) {
3738 usage_with_options_msg(usage_msg, options,
3739 "Unknown --sort key: `%s'", tok);
3740 }
3741 }
3742
3743 free(str);
3744
3745 sort_dimension__add("pid", &sched->cmp_pid);
3746 }
3747
schedstat_events_exposed(void)3748 static bool schedstat_events_exposed(void)
3749 {
3750 /*
3751 * Select "sched:sched_stat_wait" event to check
3752 * whether schedstat tracepoints are exposed.
3753 */
3754 return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ?
3755 false : true;
3756 }
3757
__cmd_record(int argc,const char ** argv)3758 static int __cmd_record(int argc, const char **argv)
3759 {
3760 unsigned int rec_argc, i, j;
3761 char **rec_argv;
3762 const char **rec_argv_copy;
3763 const char * const record_args[] = {
3764 "record",
3765 "-a",
3766 "-R",
3767 "-m", "1024",
3768 "-c", "1",
3769 "-e", "sched:sched_switch",
3770 "-e", "sched:sched_stat_runtime",
3771 "-e", "sched:sched_process_fork",
3772 "-e", "sched:sched_wakeup_new",
3773 "-e", "sched:sched_migrate_task",
3774 };
3775
3776 /*
3777 * The tracepoints trace_sched_stat_{wait, sleep, iowait}
3778 * are not exposed to user if CONFIG_SCHEDSTATS is not set,
3779 * to prevent "perf sched record" execution failure, determine
3780 * whether to record schedstat events according to actual situation.
3781 */
3782 const char * const schedstat_args[] = {
3783 "-e", "sched:sched_stat_wait",
3784 "-e", "sched:sched_stat_sleep",
3785 "-e", "sched:sched_stat_iowait",
3786 };
3787 unsigned int schedstat_argc = schedstat_events_exposed() ?
3788 ARRAY_SIZE(schedstat_args) : 0;
3789
3790 struct tep_event *waking_event;
3791 int ret;
3792
3793 /*
3794 * +2 for either "-e", "sched:sched_wakeup" or
3795 * "-e", "sched:sched_waking"
3796 */
3797 rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1;
3798 rec_argv = calloc(rec_argc + 1, sizeof(char *));
3799 if (rec_argv == NULL)
3800 return -ENOMEM;
3801 rec_argv_copy = calloc(rec_argc + 1, sizeof(char *));
3802 if (rec_argv_copy == NULL) {
3803 free(rec_argv);
3804 return -ENOMEM;
3805 }
3806
3807 for (i = 0; i < ARRAY_SIZE(record_args); i++)
3808 rec_argv[i] = strdup(record_args[i]);
3809
3810 rec_argv[i++] = strdup("-e");
3811 waking_event = trace_event__tp_format("sched", "sched_waking");
3812 if (!IS_ERR(waking_event))
3813 rec_argv[i++] = strdup("sched:sched_waking");
3814 else
3815 rec_argv[i++] = strdup("sched:sched_wakeup");
3816
3817 for (j = 0; j < schedstat_argc; j++)
3818 rec_argv[i++] = strdup(schedstat_args[j]);
3819
3820 for (j = 1; j < (unsigned int)argc; j++, i++)
3821 rec_argv[i] = strdup(argv[j]);
3822
3823 BUG_ON(i != rec_argc);
3824
3825 memcpy(rec_argv_copy, rec_argv, sizeof(char *) * rec_argc);
3826 ret = cmd_record(rec_argc, rec_argv_copy);
3827
3828 for (i = 0; i < rec_argc; i++)
3829 free(rec_argv[i]);
3830 free(rec_argv);
3831 free(rec_argv_copy);
3832
3833 return ret;
3834 }
3835
cmd_sched(int argc,const char ** argv)3836 int cmd_sched(int argc, const char **argv)
3837 {
3838 static const char default_sort_order[] = "avg, max, switch, runtime";
3839 struct perf_sched sched = {
3840 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
3841 .sort_list = LIST_HEAD_INIT(sched.sort_list),
3842 .sort_order = default_sort_order,
3843 .replay_repeat = 10,
3844 .profile_cpu = -1,
3845 .next_shortname1 = 'A',
3846 .next_shortname2 = '0',
3847 .skip_merge = 0,
3848 .show_callchain = 1,
3849 .max_stack = 5,
3850 };
3851 const struct option sched_options[] = {
3852 OPT_STRING('i', "input", &input_name, "file",
3853 "input file name"),
3854 OPT_INCR('v', "verbose", &verbose,
3855 "be more verbose (show symbol address, etc)"),
3856 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
3857 "dump raw trace in ASCII"),
3858 OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
3859 OPT_END()
3860 };
3861 const struct option latency_options[] = {
3862 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
3863 "sort by key(s): runtime, switch, avg, max"),
3864 OPT_INTEGER('C', "CPU", &sched.profile_cpu,
3865 "CPU to profile on"),
3866 OPT_BOOLEAN('p', "pids", &sched.skip_merge,
3867 "latency stats per pid instead of per comm"),
3868 OPT_PARENT(sched_options)
3869 };
3870 const struct option replay_options[] = {
3871 OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
3872 "repeat the workload replay N times (0: infinite)"),
3873 OPT_PARENT(sched_options)
3874 };
3875 const struct option map_options[] = {
3876 OPT_BOOLEAN(0, "compact", &sched.map.comp,
3877 "map output in compact mode"),
3878 OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
3879 "highlight given pids in map"),
3880 OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
3881 "highlight given CPUs in map"),
3882 OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
3883 "display given CPUs in map"),
3884 OPT_STRING(0, "task-name", &sched.map.task_name, "task",
3885 "map output only for the given task name(s)."),
3886 OPT_BOOLEAN(0, "fuzzy-name", &sched.map.fuzzy,
3887 "given command name can be partially matched (fuzzy matching)"),
3888 OPT_PARENT(sched_options)
3889 };
3890 const struct option timehist_options[] = {
3891 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
3892 "file", "vmlinux pathname"),
3893 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
3894 "file", "kallsyms pathname"),
3895 OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
3896 "Display call chains if present (default on)"),
3897 OPT_UINTEGER(0, "max-stack", &sched.max_stack,
3898 "Maximum number of functions to display backtrace."),
3899 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
3900 "Look for files with symbols relative to this directory"),
3901 OPT_BOOLEAN('s', "summary", &sched.summary_only,
3902 "Show only syscall summary with statistics"),
3903 OPT_BOOLEAN('S', "with-summary", &sched.summary,
3904 "Show all syscalls and summary with statistics"),
3905 OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
3906 OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"),
3907 OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
3908 OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
3909 OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
3910 OPT_STRING(0, "time", &sched.time_str, "str",
3911 "Time span for analysis (start,stop)"),
3912 OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
3913 OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
3914 "analyze events only for given process id(s)"),
3915 OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
3916 "analyze events only for given thread id(s)"),
3917 OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
3918 OPT_BOOLEAN(0, "show-prio", &sched.show_prio, "Show task priority"),
3919 OPT_STRING(0, "prio", &sched.prio_str, "prio",
3920 "analyze events only for given task priority(ies)"),
3921 OPT_BOOLEAN('P', "pre-migrations", &sched.pre_migrations, "Show pre-migration wait time"),
3922 OPT_PARENT(sched_options)
3923 };
3924
3925 const char * const latency_usage[] = {
3926 "perf sched latency [<options>]",
3927 NULL
3928 };
3929 const char * const replay_usage[] = {
3930 "perf sched replay [<options>]",
3931 NULL
3932 };
3933 const char * const map_usage[] = {
3934 "perf sched map [<options>]",
3935 NULL
3936 };
3937 const char * const timehist_usage[] = {
3938 "perf sched timehist [<options>]",
3939 NULL
3940 };
3941 const char *const sched_subcommands[] = { "record", "latency", "map",
3942 "replay", "script",
3943 "timehist", NULL };
3944 const char *sched_usage[] = {
3945 NULL,
3946 NULL
3947 };
3948 struct trace_sched_handler lat_ops = {
3949 .wakeup_event = latency_wakeup_event,
3950 .switch_event = latency_switch_event,
3951 .runtime_event = latency_runtime_event,
3952 .migrate_task_event = latency_migrate_task_event,
3953 };
3954 struct trace_sched_handler map_ops = {
3955 .switch_event = map_switch_event,
3956 };
3957 struct trace_sched_handler replay_ops = {
3958 .wakeup_event = replay_wakeup_event,
3959 .switch_event = replay_switch_event,
3960 .fork_event = replay_fork_event,
3961 };
3962 int ret;
3963
3964 perf_tool__init(&sched.tool, /*ordered_events=*/true);
3965 sched.tool.sample = perf_sched__process_tracepoint_sample;
3966 sched.tool.comm = perf_sched__process_comm;
3967 sched.tool.namespaces = perf_event__process_namespaces;
3968 sched.tool.lost = perf_event__process_lost;
3969 sched.tool.fork = perf_sched__process_fork_event;
3970
3971 argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
3972 sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
3973 if (!argc)
3974 usage_with_options(sched_usage, sched_options);
3975
3976 thread__set_priv_destructor(free);
3977
3978 /*
3979 * Aliased to 'perf script' for now:
3980 */
3981 if (!strcmp(argv[0], "script")) {
3982 ret = cmd_script(argc, argv);
3983 } else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
3984 ret = __cmd_record(argc, argv);
3985 } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
3986 sched.tp_handler = &lat_ops;
3987 if (argc > 1) {
3988 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
3989 if (argc)
3990 usage_with_options(latency_usage, latency_options);
3991 }
3992 setup_sorting(&sched, latency_options, latency_usage);
3993 ret = perf_sched__lat(&sched);
3994 } else if (!strcmp(argv[0], "map")) {
3995 if (argc) {
3996 argc = parse_options(argc, argv, map_options, map_usage, 0);
3997 if (argc)
3998 usage_with_options(map_usage, map_options);
3999
4000 if (sched.map.task_name) {
4001 sched.map.task_names = strlist__new(sched.map.task_name, NULL);
4002 if (sched.map.task_names == NULL) {
4003 fprintf(stderr, "Failed to parse task names\n");
4004 ret = -1;
4005 goto out;
4006 }
4007 }
4008 }
4009 sched.tp_handler = &map_ops;
4010 setup_sorting(&sched, latency_options, latency_usage);
4011 ret = perf_sched__map(&sched);
4012 } else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) {
4013 sched.tp_handler = &replay_ops;
4014 if (argc) {
4015 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
4016 if (argc)
4017 usage_with_options(replay_usage, replay_options);
4018 }
4019 ret = perf_sched__replay(&sched);
4020 } else if (!strcmp(argv[0], "timehist")) {
4021 if (argc) {
4022 argc = parse_options(argc, argv, timehist_options,
4023 timehist_usage, 0);
4024 if (argc)
4025 usage_with_options(timehist_usage, timehist_options);
4026 }
4027 if ((sched.show_wakeups || sched.show_next) &&
4028 sched.summary_only) {
4029 pr_err(" Error: -s and -[n|w] are mutually exclusive.\n");
4030 parse_options_usage(timehist_usage, timehist_options, "s", true);
4031 if (sched.show_wakeups)
4032 parse_options_usage(NULL, timehist_options, "w", true);
4033 if (sched.show_next)
4034 parse_options_usage(NULL, timehist_options, "n", true);
4035 ret = -EINVAL;
4036 goto out;
4037 }
4038 ret = symbol__validate_sym_arguments();
4039 if (!ret)
4040 ret = perf_sched__timehist(&sched);
4041 } else {
4042 usage_with_options(sched_usage, sched_options);
4043 }
4044
4045 out:
4046 /* free usage string allocated by parse_options_subcommand */
4047 free((void *)sched_usage[0]);
4048
4049 return ret;
4050 }
4051