1 // SPDX-License-Identifier: GPL-2.0
2 #include "builtin.h"
3 #include "perf-sys.h"
4
5 #include "util/cpumap.h"
6 #include "util/evlist.h"
7 #include "util/evsel.h"
8 #include "util/evsel_fprintf.h"
9 #include "util/mutex.h"
10 #include "util/symbol.h"
11 #include "util/thread.h"
12 #include "util/header.h"
13 #include "util/session.h"
14 #include "util/tool.h"
15 #include "util/cloexec.h"
16 #include "util/thread_map.h"
17 #include "util/color.h"
18 #include "util/stat.h"
19 #include "util/string2.h"
20 #include "util/callchain.h"
21 #include "util/time-utils.h"
22
23 #include <subcmd/pager.h>
24 #include <subcmd/parse-options.h>
25 #include "util/trace-event.h"
26
27 #include "util/debug.h"
28 #include "util/event.h"
29 #include "util/util.h"
30
31 #include <linux/kernel.h>
32 #include <linux/log2.h>
33 #include <linux/zalloc.h>
34 #include <sys/prctl.h>
35 #include <sys/resource.h>
36 #include <inttypes.h>
37
38 #include <errno.h>
39 #include <semaphore.h>
40 #include <pthread.h>
41 #include <math.h>
42 #include <api/fs/fs.h>
43 #include <perf/cpumap.h>
44 #include <linux/time64.h>
45 #include <linux/err.h>
46
47 #include <linux/ctype.h>
48
49 #define PR_SET_NAME 15 /* Set process name */
50 #define MAX_CPUS 4096
51 #define COMM_LEN 20
52 #define SYM_LEN 129
53 #define MAX_PID 1024000
54 #define MAX_PRIO 140
55
56 static const char *cpu_list;
57 static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
58
59 struct sched_atom;
60
61 struct task_desc {
62 unsigned long nr;
63 unsigned long pid;
64 char comm[COMM_LEN];
65
66 unsigned long nr_events;
67 unsigned long curr_event;
68 struct sched_atom **atoms;
69
70 pthread_t thread;
71 sem_t sleep_sem;
72
73 sem_t ready_for_work;
74 sem_t work_done_sem;
75
76 u64 cpu_usage;
77 };
78
79 enum sched_event_type {
80 SCHED_EVENT_RUN,
81 SCHED_EVENT_SLEEP,
82 SCHED_EVENT_WAKEUP,
83 SCHED_EVENT_MIGRATION,
84 };
85
86 struct sched_atom {
87 enum sched_event_type type;
88 int specific_wait;
89 u64 timestamp;
90 u64 duration;
91 unsigned long nr;
92 sem_t *wait_sem;
93 struct task_desc *wakee;
94 };
95
96 enum thread_state {
97 THREAD_SLEEPING = 0,
98 THREAD_WAIT_CPU,
99 THREAD_SCHED_IN,
100 THREAD_IGNORE
101 };
102
103 struct work_atom {
104 struct list_head list;
105 enum thread_state state;
106 u64 sched_out_time;
107 u64 wake_up_time;
108 u64 sched_in_time;
109 u64 runtime;
110 };
111
112 struct work_atoms {
113 struct list_head work_list;
114 struct thread *thread;
115 struct rb_node node;
116 u64 max_lat;
117 u64 max_lat_start;
118 u64 max_lat_end;
119 u64 total_lat;
120 u64 nb_atoms;
121 u64 total_runtime;
122 int num_merged;
123 };
124
125 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
126
127 struct perf_sched;
128
129 struct trace_sched_handler {
130 int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
131 struct perf_sample *sample, struct machine *machine);
132
133 int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
134 struct perf_sample *sample, struct machine *machine);
135
136 int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
137 struct perf_sample *sample, struct machine *machine);
138
139 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
140 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
141 struct machine *machine);
142
143 int (*migrate_task_event)(struct perf_sched *sched,
144 struct evsel *evsel,
145 struct perf_sample *sample,
146 struct machine *machine);
147 };
148
149 #define COLOR_PIDS PERF_COLOR_BLUE
150 #define COLOR_CPUS PERF_COLOR_BG_RED
151
152 struct perf_sched_map {
153 DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
154 struct perf_cpu *comp_cpus;
155 bool comp;
156 struct perf_thread_map *color_pids;
157 const char *color_pids_str;
158 struct perf_cpu_map *color_cpus;
159 const char *color_cpus_str;
160 const char *task_name;
161 struct strlist *task_names;
162 bool fuzzy;
163 struct perf_cpu_map *cpus;
164 const char *cpus_str;
165 };
166
167 struct perf_sched {
168 struct perf_tool tool;
169 const char *sort_order;
170 unsigned long nr_tasks;
171 struct task_desc **pid_to_task;
172 struct task_desc **tasks;
173 const struct trace_sched_handler *tp_handler;
174 struct mutex start_work_mutex;
175 struct mutex work_done_wait_mutex;
176 int profile_cpu;
177 /*
178 * Track the current task - that way we can know whether there's any
179 * weird events, such as a task being switched away that is not current.
180 */
181 struct perf_cpu max_cpu;
182 u32 *curr_pid;
183 struct thread **curr_thread;
184 struct thread **curr_out_thread;
185 char next_shortname1;
186 char next_shortname2;
187 unsigned int replay_repeat;
188 unsigned long nr_run_events;
189 unsigned long nr_sleep_events;
190 unsigned long nr_wakeup_events;
191 unsigned long nr_sleep_corrections;
192 unsigned long nr_run_events_optimized;
193 unsigned long targetless_wakeups;
194 unsigned long multitarget_wakeups;
195 unsigned long nr_runs;
196 unsigned long nr_timestamps;
197 unsigned long nr_unordered_timestamps;
198 unsigned long nr_context_switch_bugs;
199 unsigned long nr_events;
200 unsigned long nr_lost_chunks;
201 unsigned long nr_lost_events;
202 u64 run_measurement_overhead;
203 u64 sleep_measurement_overhead;
204 u64 start_time;
205 u64 cpu_usage;
206 u64 runavg_cpu_usage;
207 u64 parent_cpu_usage;
208 u64 runavg_parent_cpu_usage;
209 u64 sum_runtime;
210 u64 sum_fluct;
211 u64 run_avg;
212 u64 all_runtime;
213 u64 all_count;
214 u64 *cpu_last_switched;
215 struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
216 struct list_head sort_list, cmp_pid;
217 bool force;
218 bool skip_merge;
219 struct perf_sched_map map;
220
221 /* options for timehist command */
222 bool summary;
223 bool summary_only;
224 bool idle_hist;
225 bool show_callchain;
226 unsigned int max_stack;
227 bool show_cpu_visual;
228 bool show_wakeups;
229 bool show_next;
230 bool show_migrations;
231 bool show_state;
232 bool show_prio;
233 u64 skipped_samples;
234 const char *time_str;
235 struct perf_time_interval ptime;
236 struct perf_time_interval hist_time;
237 volatile bool thread_funcs_exit;
238 const char *prio_str;
239 DECLARE_BITMAP(prio_bitmap, MAX_PRIO);
240 };
241
242 /* per thread run time data */
243 struct thread_runtime {
244 u64 last_time; /* time of previous sched in/out event */
245 u64 dt_run; /* run time */
246 u64 dt_sleep; /* time between CPU access by sleep (off cpu) */
247 u64 dt_iowait; /* time between CPU access by iowait (off cpu) */
248 u64 dt_preempt; /* time between CPU access by preempt (off cpu) */
249 u64 dt_delay; /* time between wakeup and sched-in */
250 u64 ready_to_run; /* time of wakeup */
251
252 struct stats run_stats;
253 u64 total_run_time;
254 u64 total_sleep_time;
255 u64 total_iowait_time;
256 u64 total_preempt_time;
257 u64 total_delay_time;
258
259 char last_state;
260
261 char shortname[3];
262 bool comm_changed;
263
264 u64 migrations;
265
266 int prio;
267 };
268
269 /* per event run time data */
270 struct evsel_runtime {
271 u64 *last_time; /* time this event was last seen per cpu */
272 u32 ncpu; /* highest cpu slot allocated */
273 };
274
275 /* per cpu idle time data */
276 struct idle_thread_runtime {
277 struct thread_runtime tr;
278 struct thread *last_thread;
279 struct rb_root_cached sorted_root;
280 struct callchain_root callchain;
281 struct callchain_cursor cursor;
282 };
283
284 /* track idle times per cpu */
285 static struct thread **idle_threads;
286 static int idle_max_cpu;
287 static char idle_comm[] = "<idle>";
288
get_nsecs(void)289 static u64 get_nsecs(void)
290 {
291 struct timespec ts;
292
293 clock_gettime(CLOCK_MONOTONIC, &ts);
294
295 return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
296 }
297
burn_nsecs(struct perf_sched * sched,u64 nsecs)298 static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
299 {
300 u64 T0 = get_nsecs(), T1;
301
302 do {
303 T1 = get_nsecs();
304 } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
305 }
306
sleep_nsecs(u64 nsecs)307 static void sleep_nsecs(u64 nsecs)
308 {
309 struct timespec ts;
310
311 ts.tv_nsec = nsecs % 999999999;
312 ts.tv_sec = nsecs / 999999999;
313
314 nanosleep(&ts, NULL);
315 }
316
calibrate_run_measurement_overhead(struct perf_sched * sched)317 static void calibrate_run_measurement_overhead(struct perf_sched *sched)
318 {
319 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
320 int i;
321
322 for (i = 0; i < 10; i++) {
323 T0 = get_nsecs();
324 burn_nsecs(sched, 0);
325 T1 = get_nsecs();
326 delta = T1-T0;
327 min_delta = min(min_delta, delta);
328 }
329 sched->run_measurement_overhead = min_delta;
330
331 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
332 }
333
calibrate_sleep_measurement_overhead(struct perf_sched * sched)334 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
335 {
336 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
337 int i;
338
339 for (i = 0; i < 10; i++) {
340 T0 = get_nsecs();
341 sleep_nsecs(10000);
342 T1 = get_nsecs();
343 delta = T1-T0;
344 min_delta = min(min_delta, delta);
345 }
346 min_delta -= 10000;
347 sched->sleep_measurement_overhead = min_delta;
348
349 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
350 }
351
352 static struct sched_atom *
get_new_event(struct task_desc * task,u64 timestamp)353 get_new_event(struct task_desc *task, u64 timestamp)
354 {
355 struct sched_atom *event = zalloc(sizeof(*event));
356 unsigned long idx = task->nr_events;
357 size_t size;
358
359 event->timestamp = timestamp;
360 event->nr = idx;
361
362 task->nr_events++;
363 size = sizeof(struct sched_atom *) * task->nr_events;
364 task->atoms = realloc(task->atoms, size);
365 BUG_ON(!task->atoms);
366
367 task->atoms[idx] = event;
368
369 return event;
370 }
371
last_event(struct task_desc * task)372 static struct sched_atom *last_event(struct task_desc *task)
373 {
374 if (!task->nr_events)
375 return NULL;
376
377 return task->atoms[task->nr_events - 1];
378 }
379
add_sched_event_run(struct perf_sched * sched,struct task_desc * task,u64 timestamp,u64 duration)380 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
381 u64 timestamp, u64 duration)
382 {
383 struct sched_atom *event, *curr_event = last_event(task);
384
385 /*
386 * optimize an existing RUN event by merging this one
387 * to it:
388 */
389 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
390 sched->nr_run_events_optimized++;
391 curr_event->duration += duration;
392 return;
393 }
394
395 event = get_new_event(task, timestamp);
396
397 event->type = SCHED_EVENT_RUN;
398 event->duration = duration;
399
400 sched->nr_run_events++;
401 }
402
add_sched_event_wakeup(struct perf_sched * sched,struct task_desc * task,u64 timestamp,struct task_desc * wakee)403 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
404 u64 timestamp, struct task_desc *wakee)
405 {
406 struct sched_atom *event, *wakee_event;
407
408 event = get_new_event(task, timestamp);
409 event->type = SCHED_EVENT_WAKEUP;
410 event->wakee = wakee;
411
412 wakee_event = last_event(wakee);
413 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
414 sched->targetless_wakeups++;
415 return;
416 }
417 if (wakee_event->wait_sem) {
418 sched->multitarget_wakeups++;
419 return;
420 }
421
422 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
423 sem_init(wakee_event->wait_sem, 0, 0);
424 wakee_event->specific_wait = 1;
425 event->wait_sem = wakee_event->wait_sem;
426
427 sched->nr_wakeup_events++;
428 }
429
add_sched_event_sleep(struct perf_sched * sched,struct task_desc * task,u64 timestamp,const char task_state __maybe_unused)430 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
431 u64 timestamp, const char task_state __maybe_unused)
432 {
433 struct sched_atom *event = get_new_event(task, timestamp);
434
435 event->type = SCHED_EVENT_SLEEP;
436
437 sched->nr_sleep_events++;
438 }
439
register_pid(struct perf_sched * sched,unsigned long pid,const char * comm)440 static struct task_desc *register_pid(struct perf_sched *sched,
441 unsigned long pid, const char *comm)
442 {
443 struct task_desc *task;
444 static int pid_max;
445
446 if (sched->pid_to_task == NULL) {
447 if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
448 pid_max = MAX_PID;
449 BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
450 }
451 if (pid >= (unsigned long)pid_max) {
452 BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
453 sizeof(struct task_desc *))) == NULL);
454 while (pid >= (unsigned long)pid_max)
455 sched->pid_to_task[pid_max++] = NULL;
456 }
457
458 task = sched->pid_to_task[pid];
459
460 if (task)
461 return task;
462
463 task = zalloc(sizeof(*task));
464 task->pid = pid;
465 task->nr = sched->nr_tasks;
466 strcpy(task->comm, comm);
467 /*
468 * every task starts in sleeping state - this gets ignored
469 * if there's no wakeup pointing to this sleep state:
470 */
471 add_sched_event_sleep(sched, task, 0, 0);
472
473 sched->pid_to_task[pid] = task;
474 sched->nr_tasks++;
475 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
476 BUG_ON(!sched->tasks);
477 sched->tasks[task->nr] = task;
478
479 if (verbose > 0)
480 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
481
482 return task;
483 }
484
485
print_task_traces(struct perf_sched * sched)486 static void print_task_traces(struct perf_sched *sched)
487 {
488 struct task_desc *task;
489 unsigned long i;
490
491 for (i = 0; i < sched->nr_tasks; i++) {
492 task = sched->tasks[i];
493 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
494 task->nr, task->comm, task->pid, task->nr_events);
495 }
496 }
497
add_cross_task_wakeups(struct perf_sched * sched)498 static void add_cross_task_wakeups(struct perf_sched *sched)
499 {
500 struct task_desc *task1, *task2;
501 unsigned long i, j;
502
503 for (i = 0; i < sched->nr_tasks; i++) {
504 task1 = sched->tasks[i];
505 j = i + 1;
506 if (j == sched->nr_tasks)
507 j = 0;
508 task2 = sched->tasks[j];
509 add_sched_event_wakeup(sched, task1, 0, task2);
510 }
511 }
512
perf_sched__process_event(struct perf_sched * sched,struct sched_atom * atom)513 static void perf_sched__process_event(struct perf_sched *sched,
514 struct sched_atom *atom)
515 {
516 int ret = 0;
517
518 switch (atom->type) {
519 case SCHED_EVENT_RUN:
520 burn_nsecs(sched, atom->duration);
521 break;
522 case SCHED_EVENT_SLEEP:
523 if (atom->wait_sem)
524 ret = sem_wait(atom->wait_sem);
525 BUG_ON(ret);
526 break;
527 case SCHED_EVENT_WAKEUP:
528 if (atom->wait_sem)
529 ret = sem_post(atom->wait_sem);
530 BUG_ON(ret);
531 break;
532 case SCHED_EVENT_MIGRATION:
533 break;
534 default:
535 BUG_ON(1);
536 }
537 }
538
get_cpu_usage_nsec_parent(void)539 static u64 get_cpu_usage_nsec_parent(void)
540 {
541 struct rusage ru;
542 u64 sum;
543 int err;
544
545 err = getrusage(RUSAGE_SELF, &ru);
546 BUG_ON(err);
547
548 sum = ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC;
549 sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC;
550
551 return sum;
552 }
553
self_open_counters(struct perf_sched * sched,unsigned long cur_task)554 static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
555 {
556 struct perf_event_attr attr;
557 char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
558 int fd;
559 struct rlimit limit;
560 bool need_privilege = false;
561
562 memset(&attr, 0, sizeof(attr));
563
564 attr.type = PERF_TYPE_SOFTWARE;
565 attr.config = PERF_COUNT_SW_TASK_CLOCK;
566
567 force_again:
568 fd = sys_perf_event_open(&attr, 0, -1, -1,
569 perf_event_open_cloexec_flag());
570
571 if (fd < 0) {
572 if (errno == EMFILE) {
573 if (sched->force) {
574 BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
575 limit.rlim_cur += sched->nr_tasks - cur_task;
576 if (limit.rlim_cur > limit.rlim_max) {
577 limit.rlim_max = limit.rlim_cur;
578 need_privilege = true;
579 }
580 if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
581 if (need_privilege && errno == EPERM)
582 strcpy(info, "Need privilege\n");
583 } else
584 goto force_again;
585 } else
586 strcpy(info, "Have a try with -f option\n");
587 }
588 pr_err("Error: sys_perf_event_open() syscall returned "
589 "with %d (%s)\n%s", fd,
590 str_error_r(errno, sbuf, sizeof(sbuf)), info);
591 exit(EXIT_FAILURE);
592 }
593 return fd;
594 }
595
get_cpu_usage_nsec_self(int fd)596 static u64 get_cpu_usage_nsec_self(int fd)
597 {
598 u64 runtime;
599 int ret;
600
601 ret = read(fd, &runtime, sizeof(runtime));
602 BUG_ON(ret != sizeof(runtime));
603
604 return runtime;
605 }
606
607 struct sched_thread_parms {
608 struct task_desc *task;
609 struct perf_sched *sched;
610 int fd;
611 };
612
thread_func(void * ctx)613 static void *thread_func(void *ctx)
614 {
615 struct sched_thread_parms *parms = ctx;
616 struct task_desc *this_task = parms->task;
617 struct perf_sched *sched = parms->sched;
618 u64 cpu_usage_0, cpu_usage_1;
619 unsigned long i, ret;
620 char comm2[22];
621 int fd = parms->fd;
622
623 zfree(&parms);
624
625 sprintf(comm2, ":%s", this_task->comm);
626 prctl(PR_SET_NAME, comm2);
627 if (fd < 0)
628 return NULL;
629
630 while (!sched->thread_funcs_exit) {
631 ret = sem_post(&this_task->ready_for_work);
632 BUG_ON(ret);
633 mutex_lock(&sched->start_work_mutex);
634 mutex_unlock(&sched->start_work_mutex);
635
636 cpu_usage_0 = get_cpu_usage_nsec_self(fd);
637
638 for (i = 0; i < this_task->nr_events; i++) {
639 this_task->curr_event = i;
640 perf_sched__process_event(sched, this_task->atoms[i]);
641 }
642
643 cpu_usage_1 = get_cpu_usage_nsec_self(fd);
644 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
645 ret = sem_post(&this_task->work_done_sem);
646 BUG_ON(ret);
647
648 mutex_lock(&sched->work_done_wait_mutex);
649 mutex_unlock(&sched->work_done_wait_mutex);
650 }
651 return NULL;
652 }
653
create_tasks(struct perf_sched * sched)654 static void create_tasks(struct perf_sched *sched)
655 EXCLUSIVE_LOCK_FUNCTION(sched->start_work_mutex)
656 EXCLUSIVE_LOCK_FUNCTION(sched->work_done_wait_mutex)
657 {
658 struct task_desc *task;
659 pthread_attr_t attr;
660 unsigned long i;
661 int err;
662
663 err = pthread_attr_init(&attr);
664 BUG_ON(err);
665 err = pthread_attr_setstacksize(&attr,
666 (size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN));
667 BUG_ON(err);
668 mutex_lock(&sched->start_work_mutex);
669 mutex_lock(&sched->work_done_wait_mutex);
670 for (i = 0; i < sched->nr_tasks; i++) {
671 struct sched_thread_parms *parms = malloc(sizeof(*parms));
672 BUG_ON(parms == NULL);
673 parms->task = task = sched->tasks[i];
674 parms->sched = sched;
675 parms->fd = self_open_counters(sched, i);
676 sem_init(&task->sleep_sem, 0, 0);
677 sem_init(&task->ready_for_work, 0, 0);
678 sem_init(&task->work_done_sem, 0, 0);
679 task->curr_event = 0;
680 err = pthread_create(&task->thread, &attr, thread_func, parms);
681 BUG_ON(err);
682 }
683 }
684
destroy_tasks(struct perf_sched * sched)685 static void destroy_tasks(struct perf_sched *sched)
686 UNLOCK_FUNCTION(sched->start_work_mutex)
687 UNLOCK_FUNCTION(sched->work_done_wait_mutex)
688 {
689 struct task_desc *task;
690 unsigned long i;
691 int err;
692
693 mutex_unlock(&sched->start_work_mutex);
694 mutex_unlock(&sched->work_done_wait_mutex);
695 /* Get rid of threads so they won't be upset by mutex destrunction */
696 for (i = 0; i < sched->nr_tasks; i++) {
697 task = sched->tasks[i];
698 err = pthread_join(task->thread, NULL);
699 BUG_ON(err);
700 sem_destroy(&task->sleep_sem);
701 sem_destroy(&task->ready_for_work);
702 sem_destroy(&task->work_done_sem);
703 }
704 }
705
wait_for_tasks(struct perf_sched * sched)706 static void wait_for_tasks(struct perf_sched *sched)
707 EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
708 EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
709 {
710 u64 cpu_usage_0, cpu_usage_1;
711 struct task_desc *task;
712 unsigned long i, ret;
713
714 sched->start_time = get_nsecs();
715 sched->cpu_usage = 0;
716 mutex_unlock(&sched->work_done_wait_mutex);
717
718 for (i = 0; i < sched->nr_tasks; i++) {
719 task = sched->tasks[i];
720 ret = sem_wait(&task->ready_for_work);
721 BUG_ON(ret);
722 sem_init(&task->ready_for_work, 0, 0);
723 }
724 mutex_lock(&sched->work_done_wait_mutex);
725
726 cpu_usage_0 = get_cpu_usage_nsec_parent();
727
728 mutex_unlock(&sched->start_work_mutex);
729
730 for (i = 0; i < sched->nr_tasks; i++) {
731 task = sched->tasks[i];
732 ret = sem_wait(&task->work_done_sem);
733 BUG_ON(ret);
734 sem_init(&task->work_done_sem, 0, 0);
735 sched->cpu_usage += task->cpu_usage;
736 task->cpu_usage = 0;
737 }
738
739 cpu_usage_1 = get_cpu_usage_nsec_parent();
740 if (!sched->runavg_cpu_usage)
741 sched->runavg_cpu_usage = sched->cpu_usage;
742 sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
743
744 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
745 if (!sched->runavg_parent_cpu_usage)
746 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
747 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
748 sched->parent_cpu_usage)/sched->replay_repeat;
749
750 mutex_lock(&sched->start_work_mutex);
751
752 for (i = 0; i < sched->nr_tasks; i++) {
753 task = sched->tasks[i];
754 sem_init(&task->sleep_sem, 0, 0);
755 task->curr_event = 0;
756 }
757 }
758
run_one_test(struct perf_sched * sched)759 static void run_one_test(struct perf_sched *sched)
760 EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
761 EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
762 {
763 u64 T0, T1, delta, avg_delta, fluct;
764
765 T0 = get_nsecs();
766 wait_for_tasks(sched);
767 T1 = get_nsecs();
768
769 delta = T1 - T0;
770 sched->sum_runtime += delta;
771 sched->nr_runs++;
772
773 avg_delta = sched->sum_runtime / sched->nr_runs;
774 if (delta < avg_delta)
775 fluct = avg_delta - delta;
776 else
777 fluct = delta - avg_delta;
778 sched->sum_fluct += fluct;
779 if (!sched->run_avg)
780 sched->run_avg = delta;
781 sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
782
783 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
784
785 printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
786
787 printf("cpu: %0.2f / %0.2f",
788 (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
789
790 #if 0
791 /*
792 * rusage statistics done by the parent, these are less
793 * accurate than the sched->sum_exec_runtime based statistics:
794 */
795 printf(" [%0.2f / %0.2f]",
796 (double)sched->parent_cpu_usage / NSEC_PER_MSEC,
797 (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
798 #endif
799
800 printf("\n");
801
802 if (sched->nr_sleep_corrections)
803 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
804 sched->nr_sleep_corrections = 0;
805 }
806
test_calibrations(struct perf_sched * sched)807 static void test_calibrations(struct perf_sched *sched)
808 {
809 u64 T0, T1;
810
811 T0 = get_nsecs();
812 burn_nsecs(sched, NSEC_PER_MSEC);
813 T1 = get_nsecs();
814
815 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
816
817 T0 = get_nsecs();
818 sleep_nsecs(NSEC_PER_MSEC);
819 T1 = get_nsecs();
820
821 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
822 }
823
824 static int
replay_wakeup_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused)825 replay_wakeup_event(struct perf_sched *sched,
826 struct evsel *evsel, struct perf_sample *sample,
827 struct machine *machine __maybe_unused)
828 {
829 const char *comm = evsel__strval(evsel, sample, "comm");
830 const u32 pid = evsel__intval(evsel, sample, "pid");
831 struct task_desc *waker, *wakee;
832
833 if (verbose > 0) {
834 printf("sched_wakeup event %p\n", evsel);
835
836 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
837 }
838
839 waker = register_pid(sched, sample->tid, "<unknown>");
840 wakee = register_pid(sched, pid, comm);
841
842 add_sched_event_wakeup(sched, waker, sample->time, wakee);
843 return 0;
844 }
845
replay_switch_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused)846 static int replay_switch_event(struct perf_sched *sched,
847 struct evsel *evsel,
848 struct perf_sample *sample,
849 struct machine *machine __maybe_unused)
850 {
851 const char *prev_comm = evsel__strval(evsel, sample, "prev_comm"),
852 *next_comm = evsel__strval(evsel, sample, "next_comm");
853 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
854 next_pid = evsel__intval(evsel, sample, "next_pid");
855 const char prev_state = evsel__taskstate(evsel, sample, "prev_state");
856 struct task_desc *prev, __maybe_unused *next;
857 u64 timestamp0, timestamp = sample->time;
858 int cpu = sample->cpu;
859 s64 delta;
860
861 if (verbose > 0)
862 printf("sched_switch event %p\n", evsel);
863
864 if (cpu >= MAX_CPUS || cpu < 0)
865 return 0;
866
867 timestamp0 = sched->cpu_last_switched[cpu];
868 if (timestamp0)
869 delta = timestamp - timestamp0;
870 else
871 delta = 0;
872
873 if (delta < 0) {
874 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
875 return -1;
876 }
877
878 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
879 prev_comm, prev_pid, next_comm, next_pid, delta);
880
881 prev = register_pid(sched, prev_pid, prev_comm);
882 next = register_pid(sched, next_pid, next_comm);
883
884 sched->cpu_last_switched[cpu] = timestamp;
885
886 add_sched_event_run(sched, prev, timestamp, delta);
887 add_sched_event_sleep(sched, prev, timestamp, prev_state);
888
889 return 0;
890 }
891
replay_fork_event(struct perf_sched * sched,union perf_event * event,struct machine * machine)892 static int replay_fork_event(struct perf_sched *sched,
893 union perf_event *event,
894 struct machine *machine)
895 {
896 struct thread *child, *parent;
897
898 child = machine__findnew_thread(machine, event->fork.pid,
899 event->fork.tid);
900 parent = machine__findnew_thread(machine, event->fork.ppid,
901 event->fork.ptid);
902
903 if (child == NULL || parent == NULL) {
904 pr_debug("thread does not exist on fork event: child %p, parent %p\n",
905 child, parent);
906 goto out_put;
907 }
908
909 if (verbose > 0) {
910 printf("fork event\n");
911 printf("... parent: %s/%d\n", thread__comm_str(parent), thread__tid(parent));
912 printf("... child: %s/%d\n", thread__comm_str(child), thread__tid(child));
913 }
914
915 register_pid(sched, thread__tid(parent), thread__comm_str(parent));
916 register_pid(sched, thread__tid(child), thread__comm_str(child));
917 out_put:
918 thread__put(child);
919 thread__put(parent);
920 return 0;
921 }
922
923 struct sort_dimension {
924 const char *name;
925 sort_fn_t cmp;
926 struct list_head list;
927 };
928
init_prio(struct thread_runtime * r)929 static inline void init_prio(struct thread_runtime *r)
930 {
931 r->prio = -1;
932 }
933
934 /*
935 * handle runtime stats saved per thread
936 */
thread__init_runtime(struct thread * thread)937 static struct thread_runtime *thread__init_runtime(struct thread *thread)
938 {
939 struct thread_runtime *r;
940
941 r = zalloc(sizeof(struct thread_runtime));
942 if (!r)
943 return NULL;
944
945 init_stats(&r->run_stats);
946 init_prio(r);
947 thread__set_priv(thread, r);
948
949 return r;
950 }
951
thread__get_runtime(struct thread * thread)952 static struct thread_runtime *thread__get_runtime(struct thread *thread)
953 {
954 struct thread_runtime *tr;
955
956 tr = thread__priv(thread);
957 if (tr == NULL) {
958 tr = thread__init_runtime(thread);
959 if (tr == NULL)
960 pr_debug("Failed to malloc memory for runtime data.\n");
961 }
962
963 return tr;
964 }
965
966 static int
thread_lat_cmp(struct list_head * list,struct work_atoms * l,struct work_atoms * r)967 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
968 {
969 struct sort_dimension *sort;
970 int ret = 0;
971
972 BUG_ON(list_empty(list));
973
974 list_for_each_entry(sort, list, list) {
975 ret = sort->cmp(l, r);
976 if (ret)
977 return ret;
978 }
979
980 return ret;
981 }
982
983 static struct work_atoms *
thread_atoms_search(struct rb_root_cached * root,struct thread * thread,struct list_head * sort_list)984 thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
985 struct list_head *sort_list)
986 {
987 struct rb_node *node = root->rb_root.rb_node;
988 struct work_atoms key = { .thread = thread };
989
990 while (node) {
991 struct work_atoms *atoms;
992 int cmp;
993
994 atoms = container_of(node, struct work_atoms, node);
995
996 cmp = thread_lat_cmp(sort_list, &key, atoms);
997 if (cmp > 0)
998 node = node->rb_left;
999 else if (cmp < 0)
1000 node = node->rb_right;
1001 else {
1002 BUG_ON(thread != atoms->thread);
1003 return atoms;
1004 }
1005 }
1006 return NULL;
1007 }
1008
1009 static void
__thread_latency_insert(struct rb_root_cached * root,struct work_atoms * data,struct list_head * sort_list)1010 __thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
1011 struct list_head *sort_list)
1012 {
1013 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
1014 bool leftmost = true;
1015
1016 while (*new) {
1017 struct work_atoms *this;
1018 int cmp;
1019
1020 this = container_of(*new, struct work_atoms, node);
1021 parent = *new;
1022
1023 cmp = thread_lat_cmp(sort_list, data, this);
1024
1025 if (cmp > 0)
1026 new = &((*new)->rb_left);
1027 else {
1028 new = &((*new)->rb_right);
1029 leftmost = false;
1030 }
1031 }
1032
1033 rb_link_node(&data->node, parent, new);
1034 rb_insert_color_cached(&data->node, root, leftmost);
1035 }
1036
thread_atoms_insert(struct perf_sched * sched,struct thread * thread)1037 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
1038 {
1039 struct work_atoms *atoms = zalloc(sizeof(*atoms));
1040 if (!atoms) {
1041 pr_err("No memory at %s\n", __func__);
1042 return -1;
1043 }
1044
1045 atoms->thread = thread__get(thread);
1046 INIT_LIST_HEAD(&atoms->work_list);
1047 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
1048 return 0;
1049 }
1050
1051 static int
add_sched_out_event(struct work_atoms * atoms,char run_state,u64 timestamp)1052 add_sched_out_event(struct work_atoms *atoms,
1053 char run_state,
1054 u64 timestamp)
1055 {
1056 struct work_atom *atom = zalloc(sizeof(*atom));
1057 if (!atom) {
1058 pr_err("Non memory at %s", __func__);
1059 return -1;
1060 }
1061
1062 atom->sched_out_time = timestamp;
1063
1064 if (run_state == 'R') {
1065 atom->state = THREAD_WAIT_CPU;
1066 atom->wake_up_time = atom->sched_out_time;
1067 }
1068
1069 list_add_tail(&atom->list, &atoms->work_list);
1070 return 0;
1071 }
1072
1073 static void
add_runtime_event(struct work_atoms * atoms,u64 delta,u64 timestamp __maybe_unused)1074 add_runtime_event(struct work_atoms *atoms, u64 delta,
1075 u64 timestamp __maybe_unused)
1076 {
1077 struct work_atom *atom;
1078
1079 BUG_ON(list_empty(&atoms->work_list));
1080
1081 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1082
1083 atom->runtime += delta;
1084 atoms->total_runtime += delta;
1085 }
1086
1087 static void
add_sched_in_event(struct work_atoms * atoms,u64 timestamp)1088 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1089 {
1090 struct work_atom *atom;
1091 u64 delta;
1092
1093 if (list_empty(&atoms->work_list))
1094 return;
1095
1096 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1097
1098 if (atom->state != THREAD_WAIT_CPU)
1099 return;
1100
1101 if (timestamp < atom->wake_up_time) {
1102 atom->state = THREAD_IGNORE;
1103 return;
1104 }
1105
1106 atom->state = THREAD_SCHED_IN;
1107 atom->sched_in_time = timestamp;
1108
1109 delta = atom->sched_in_time - atom->wake_up_time;
1110 atoms->total_lat += delta;
1111 if (delta > atoms->max_lat) {
1112 atoms->max_lat = delta;
1113 atoms->max_lat_start = atom->wake_up_time;
1114 atoms->max_lat_end = timestamp;
1115 }
1116 atoms->nb_atoms++;
1117 }
1118
latency_switch_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1119 static int latency_switch_event(struct perf_sched *sched,
1120 struct evsel *evsel,
1121 struct perf_sample *sample,
1122 struct machine *machine)
1123 {
1124 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1125 next_pid = evsel__intval(evsel, sample, "next_pid");
1126 const char prev_state = evsel__taskstate(evsel, sample, "prev_state");
1127 struct work_atoms *out_events, *in_events;
1128 struct thread *sched_out, *sched_in;
1129 u64 timestamp0, timestamp = sample->time;
1130 int cpu = sample->cpu, err = -1;
1131 s64 delta;
1132
1133 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1134
1135 timestamp0 = sched->cpu_last_switched[cpu];
1136 sched->cpu_last_switched[cpu] = timestamp;
1137 if (timestamp0)
1138 delta = timestamp - timestamp0;
1139 else
1140 delta = 0;
1141
1142 if (delta < 0) {
1143 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1144 return -1;
1145 }
1146
1147 sched_out = machine__findnew_thread(machine, -1, prev_pid);
1148 sched_in = machine__findnew_thread(machine, -1, next_pid);
1149 if (sched_out == NULL || sched_in == NULL)
1150 goto out_put;
1151
1152 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1153 if (!out_events) {
1154 if (thread_atoms_insert(sched, sched_out))
1155 goto out_put;
1156 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1157 if (!out_events) {
1158 pr_err("out-event: Internal tree error");
1159 goto out_put;
1160 }
1161 }
1162 if (add_sched_out_event(out_events, prev_state, timestamp))
1163 return -1;
1164
1165 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1166 if (!in_events) {
1167 if (thread_atoms_insert(sched, sched_in))
1168 goto out_put;
1169 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1170 if (!in_events) {
1171 pr_err("in-event: Internal tree error");
1172 goto out_put;
1173 }
1174 /*
1175 * Take came in we have not heard about yet,
1176 * add in an initial atom in runnable state:
1177 */
1178 if (add_sched_out_event(in_events, 'R', timestamp))
1179 goto out_put;
1180 }
1181 add_sched_in_event(in_events, timestamp);
1182 err = 0;
1183 out_put:
1184 thread__put(sched_out);
1185 thread__put(sched_in);
1186 return err;
1187 }
1188
latency_runtime_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1189 static int latency_runtime_event(struct perf_sched *sched,
1190 struct evsel *evsel,
1191 struct perf_sample *sample,
1192 struct machine *machine)
1193 {
1194 const u32 pid = evsel__intval(evsel, sample, "pid");
1195 const u64 runtime = evsel__intval(evsel, sample, "runtime");
1196 struct thread *thread = machine__findnew_thread(machine, -1, pid);
1197 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1198 u64 timestamp = sample->time;
1199 int cpu = sample->cpu, err = -1;
1200
1201 if (thread == NULL)
1202 return -1;
1203
1204 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1205 if (!atoms) {
1206 if (thread_atoms_insert(sched, thread))
1207 goto out_put;
1208 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1209 if (!atoms) {
1210 pr_err("in-event: Internal tree error");
1211 goto out_put;
1212 }
1213 if (add_sched_out_event(atoms, 'R', timestamp))
1214 goto out_put;
1215 }
1216
1217 add_runtime_event(atoms, runtime, timestamp);
1218 err = 0;
1219 out_put:
1220 thread__put(thread);
1221 return err;
1222 }
1223
latency_wakeup_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1224 static int latency_wakeup_event(struct perf_sched *sched,
1225 struct evsel *evsel,
1226 struct perf_sample *sample,
1227 struct machine *machine)
1228 {
1229 const u32 pid = evsel__intval(evsel, sample, "pid");
1230 struct work_atoms *atoms;
1231 struct work_atom *atom;
1232 struct thread *wakee;
1233 u64 timestamp = sample->time;
1234 int err = -1;
1235
1236 wakee = machine__findnew_thread(machine, -1, pid);
1237 if (wakee == NULL)
1238 return -1;
1239 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1240 if (!atoms) {
1241 if (thread_atoms_insert(sched, wakee))
1242 goto out_put;
1243 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1244 if (!atoms) {
1245 pr_err("wakeup-event: Internal tree error");
1246 goto out_put;
1247 }
1248 if (add_sched_out_event(atoms, 'S', timestamp))
1249 goto out_put;
1250 }
1251
1252 BUG_ON(list_empty(&atoms->work_list));
1253
1254 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1255
1256 /*
1257 * As we do not guarantee the wakeup event happens when
1258 * task is out of run queue, also may happen when task is
1259 * on run queue and wakeup only change ->state to TASK_RUNNING,
1260 * then we should not set the ->wake_up_time when wake up a
1261 * task which is on run queue.
1262 *
1263 * You WILL be missing events if you've recorded only
1264 * one CPU, or are only looking at only one, so don't
1265 * skip in this case.
1266 */
1267 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1268 goto out_ok;
1269
1270 sched->nr_timestamps++;
1271 if (atom->sched_out_time > timestamp) {
1272 sched->nr_unordered_timestamps++;
1273 goto out_ok;
1274 }
1275
1276 atom->state = THREAD_WAIT_CPU;
1277 atom->wake_up_time = timestamp;
1278 out_ok:
1279 err = 0;
1280 out_put:
1281 thread__put(wakee);
1282 return err;
1283 }
1284
latency_migrate_task_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1285 static int latency_migrate_task_event(struct perf_sched *sched,
1286 struct evsel *evsel,
1287 struct perf_sample *sample,
1288 struct machine *machine)
1289 {
1290 const u32 pid = evsel__intval(evsel, sample, "pid");
1291 u64 timestamp = sample->time;
1292 struct work_atoms *atoms;
1293 struct work_atom *atom;
1294 struct thread *migrant;
1295 int err = -1;
1296
1297 /*
1298 * Only need to worry about migration when profiling one CPU.
1299 */
1300 if (sched->profile_cpu == -1)
1301 return 0;
1302
1303 migrant = machine__findnew_thread(machine, -1, pid);
1304 if (migrant == NULL)
1305 return -1;
1306 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1307 if (!atoms) {
1308 if (thread_atoms_insert(sched, migrant))
1309 goto out_put;
1310 register_pid(sched, thread__tid(migrant), thread__comm_str(migrant));
1311 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1312 if (!atoms) {
1313 pr_err("migration-event: Internal tree error");
1314 goto out_put;
1315 }
1316 if (add_sched_out_event(atoms, 'R', timestamp))
1317 goto out_put;
1318 }
1319
1320 BUG_ON(list_empty(&atoms->work_list));
1321
1322 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1323 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1324
1325 sched->nr_timestamps++;
1326
1327 if (atom->sched_out_time > timestamp)
1328 sched->nr_unordered_timestamps++;
1329 err = 0;
1330 out_put:
1331 thread__put(migrant);
1332 return err;
1333 }
1334
output_lat_thread(struct perf_sched * sched,struct work_atoms * work_list)1335 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1336 {
1337 int i;
1338 int ret;
1339 u64 avg;
1340 char max_lat_start[32], max_lat_end[32];
1341
1342 if (!work_list->nb_atoms)
1343 return;
1344 /*
1345 * Ignore idle threads:
1346 */
1347 if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
1348 return;
1349
1350 sched->all_runtime += work_list->total_runtime;
1351 sched->all_count += work_list->nb_atoms;
1352
1353 if (work_list->num_merged > 1) {
1354 ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread),
1355 work_list->num_merged);
1356 } else {
1357 ret = printf(" %s:%d ", thread__comm_str(work_list->thread),
1358 thread__tid(work_list->thread));
1359 }
1360
1361 for (i = 0; i < 24 - ret; i++)
1362 printf(" ");
1363
1364 avg = work_list->total_lat / work_list->nb_atoms;
1365 timestamp__scnprintf_usec(work_list->max_lat_start, max_lat_start, sizeof(max_lat_start));
1366 timestamp__scnprintf_usec(work_list->max_lat_end, max_lat_end, sizeof(max_lat_end));
1367
1368 printf("|%11.3f ms |%9" PRIu64 " | avg:%8.3f ms | max:%8.3f ms | max start: %12s s | max end: %12s s\n",
1369 (double)work_list->total_runtime / NSEC_PER_MSEC,
1370 work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
1371 (double)work_list->max_lat / NSEC_PER_MSEC,
1372 max_lat_start, max_lat_end);
1373 }
1374
pid_cmp(struct work_atoms * l,struct work_atoms * r)1375 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1376 {
1377 pid_t l_tid, r_tid;
1378
1379 if (RC_CHK_EQUAL(l->thread, r->thread))
1380 return 0;
1381 l_tid = thread__tid(l->thread);
1382 r_tid = thread__tid(r->thread);
1383 if (l_tid < r_tid)
1384 return -1;
1385 if (l_tid > r_tid)
1386 return 1;
1387 return (int)(RC_CHK_ACCESS(l->thread) - RC_CHK_ACCESS(r->thread));
1388 }
1389
avg_cmp(struct work_atoms * l,struct work_atoms * r)1390 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1391 {
1392 u64 avgl, avgr;
1393
1394 if (!l->nb_atoms)
1395 return -1;
1396
1397 if (!r->nb_atoms)
1398 return 1;
1399
1400 avgl = l->total_lat / l->nb_atoms;
1401 avgr = r->total_lat / r->nb_atoms;
1402
1403 if (avgl < avgr)
1404 return -1;
1405 if (avgl > avgr)
1406 return 1;
1407
1408 return 0;
1409 }
1410
max_cmp(struct work_atoms * l,struct work_atoms * r)1411 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1412 {
1413 if (l->max_lat < r->max_lat)
1414 return -1;
1415 if (l->max_lat > r->max_lat)
1416 return 1;
1417
1418 return 0;
1419 }
1420
switch_cmp(struct work_atoms * l,struct work_atoms * r)1421 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1422 {
1423 if (l->nb_atoms < r->nb_atoms)
1424 return -1;
1425 if (l->nb_atoms > r->nb_atoms)
1426 return 1;
1427
1428 return 0;
1429 }
1430
runtime_cmp(struct work_atoms * l,struct work_atoms * r)1431 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1432 {
1433 if (l->total_runtime < r->total_runtime)
1434 return -1;
1435 if (l->total_runtime > r->total_runtime)
1436 return 1;
1437
1438 return 0;
1439 }
1440
sort_dimension__add(const char * tok,struct list_head * list)1441 static int sort_dimension__add(const char *tok, struct list_head *list)
1442 {
1443 size_t i;
1444 static struct sort_dimension avg_sort_dimension = {
1445 .name = "avg",
1446 .cmp = avg_cmp,
1447 };
1448 static struct sort_dimension max_sort_dimension = {
1449 .name = "max",
1450 .cmp = max_cmp,
1451 };
1452 static struct sort_dimension pid_sort_dimension = {
1453 .name = "pid",
1454 .cmp = pid_cmp,
1455 };
1456 static struct sort_dimension runtime_sort_dimension = {
1457 .name = "runtime",
1458 .cmp = runtime_cmp,
1459 };
1460 static struct sort_dimension switch_sort_dimension = {
1461 .name = "switch",
1462 .cmp = switch_cmp,
1463 };
1464 struct sort_dimension *available_sorts[] = {
1465 &pid_sort_dimension,
1466 &avg_sort_dimension,
1467 &max_sort_dimension,
1468 &switch_sort_dimension,
1469 &runtime_sort_dimension,
1470 };
1471
1472 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1473 if (!strcmp(available_sorts[i]->name, tok)) {
1474 list_add_tail(&available_sorts[i]->list, list);
1475
1476 return 0;
1477 }
1478 }
1479
1480 return -1;
1481 }
1482
perf_sched__sort_lat(struct perf_sched * sched)1483 static void perf_sched__sort_lat(struct perf_sched *sched)
1484 {
1485 struct rb_node *node;
1486 struct rb_root_cached *root = &sched->atom_root;
1487 again:
1488 for (;;) {
1489 struct work_atoms *data;
1490 node = rb_first_cached(root);
1491 if (!node)
1492 break;
1493
1494 rb_erase_cached(node, root);
1495 data = rb_entry(node, struct work_atoms, node);
1496 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1497 }
1498 if (root == &sched->atom_root) {
1499 root = &sched->merged_atom_root;
1500 goto again;
1501 }
1502 }
1503
process_sched_wakeup_event(const struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1504 static int process_sched_wakeup_event(const struct perf_tool *tool,
1505 struct evsel *evsel,
1506 struct perf_sample *sample,
1507 struct machine *machine)
1508 {
1509 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1510
1511 if (sched->tp_handler->wakeup_event)
1512 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1513
1514 return 0;
1515 }
1516
process_sched_wakeup_ignore(const struct perf_tool * tool __maybe_unused,struct evsel * evsel __maybe_unused,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)1517 static int process_sched_wakeup_ignore(const struct perf_tool *tool __maybe_unused,
1518 struct evsel *evsel __maybe_unused,
1519 struct perf_sample *sample __maybe_unused,
1520 struct machine *machine __maybe_unused)
1521 {
1522 return 0;
1523 }
1524
1525 union map_priv {
1526 void *ptr;
1527 bool color;
1528 };
1529
thread__has_color(struct thread * thread)1530 static bool thread__has_color(struct thread *thread)
1531 {
1532 union map_priv priv = {
1533 .ptr = thread__priv(thread),
1534 };
1535
1536 return priv.color;
1537 }
1538
1539 static struct thread*
map__findnew_thread(struct perf_sched * sched,struct machine * machine,pid_t pid,pid_t tid)1540 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
1541 {
1542 struct thread *thread = machine__findnew_thread(machine, pid, tid);
1543 union map_priv priv = {
1544 .color = false,
1545 };
1546
1547 if (!sched->map.color_pids || !thread || thread__priv(thread))
1548 return thread;
1549
1550 if (thread_map__has(sched->map.color_pids, tid))
1551 priv.color = true;
1552
1553 thread__set_priv(thread, priv.ptr);
1554 return thread;
1555 }
1556
sched_match_task(struct perf_sched * sched,const char * comm_str)1557 static bool sched_match_task(struct perf_sched *sched, const char *comm_str)
1558 {
1559 bool fuzzy_match = sched->map.fuzzy;
1560 struct strlist *task_names = sched->map.task_names;
1561 struct str_node *node;
1562
1563 strlist__for_each_entry(node, task_names) {
1564 bool match_found = fuzzy_match ? !!strstr(comm_str, node->s) :
1565 !strcmp(comm_str, node->s);
1566 if (match_found)
1567 return true;
1568 }
1569
1570 return false;
1571 }
1572
print_sched_map(struct perf_sched * sched,struct perf_cpu this_cpu,int cpus_nr,const char * color,bool sched_out)1573 static void print_sched_map(struct perf_sched *sched, struct perf_cpu this_cpu, int cpus_nr,
1574 const char *color, bool sched_out)
1575 {
1576 for (int i = 0; i < cpus_nr; i++) {
1577 struct perf_cpu cpu = {
1578 .cpu = sched->map.comp ? sched->map.comp_cpus[i].cpu : i,
1579 };
1580 struct thread *curr_thread = sched->curr_thread[cpu.cpu];
1581 struct thread *curr_out_thread = sched->curr_out_thread[cpu.cpu];
1582 struct thread_runtime *curr_tr;
1583 const char *pid_color = color;
1584 const char *cpu_color = color;
1585 char symbol = ' ';
1586 struct thread *thread_to_check = sched_out ? curr_out_thread : curr_thread;
1587
1588 if (thread_to_check && thread__has_color(thread_to_check))
1589 pid_color = COLOR_PIDS;
1590
1591 if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu))
1592 cpu_color = COLOR_CPUS;
1593
1594 if (cpu.cpu == this_cpu.cpu)
1595 symbol = '*';
1596
1597 color_fprintf(stdout, cpu.cpu != this_cpu.cpu ? color : cpu_color, "%c", symbol);
1598
1599 thread_to_check = sched_out ? sched->curr_out_thread[cpu.cpu] :
1600 sched->curr_thread[cpu.cpu];
1601
1602 if (thread_to_check) {
1603 curr_tr = thread__get_runtime(thread_to_check);
1604 if (curr_tr == NULL)
1605 return;
1606
1607 if (sched_out) {
1608 if (cpu.cpu == this_cpu.cpu)
1609 color_fprintf(stdout, color, "- ");
1610 else {
1611 curr_tr = thread__get_runtime(sched->curr_thread[cpu.cpu]);
1612 if (curr_tr != NULL)
1613 color_fprintf(stdout, pid_color, "%2s ",
1614 curr_tr->shortname);
1615 }
1616 } else
1617 color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname);
1618 } else
1619 color_fprintf(stdout, color, " ");
1620 }
1621 }
1622
map_switch_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1623 static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
1624 struct perf_sample *sample, struct machine *machine)
1625 {
1626 const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
1627 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid");
1628 struct thread *sched_in, *sched_out;
1629 struct thread_runtime *tr;
1630 int new_shortname;
1631 u64 timestamp0, timestamp = sample->time;
1632 s64 delta;
1633 struct perf_cpu this_cpu = {
1634 .cpu = sample->cpu,
1635 };
1636 int cpus_nr;
1637 int proceed;
1638 bool new_cpu = false;
1639 const char *color = PERF_COLOR_NORMAL;
1640 char stimestamp[32];
1641 const char *str;
1642
1643 BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0);
1644
1645 if (this_cpu.cpu > sched->max_cpu.cpu)
1646 sched->max_cpu = this_cpu;
1647
1648 if (sched->map.comp) {
1649 cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
1650 if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
1651 sched->map.comp_cpus[cpus_nr++] = this_cpu;
1652 new_cpu = true;
1653 }
1654 } else
1655 cpus_nr = sched->max_cpu.cpu;
1656
1657 timestamp0 = sched->cpu_last_switched[this_cpu.cpu];
1658 sched->cpu_last_switched[this_cpu.cpu] = timestamp;
1659 if (timestamp0)
1660 delta = timestamp - timestamp0;
1661 else
1662 delta = 0;
1663
1664 if (delta < 0) {
1665 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1666 return -1;
1667 }
1668
1669 sched_in = map__findnew_thread(sched, machine, -1, next_pid);
1670 sched_out = map__findnew_thread(sched, machine, -1, prev_pid);
1671 if (sched_in == NULL || sched_out == NULL)
1672 return -1;
1673
1674 tr = thread__get_runtime(sched_in);
1675 if (tr == NULL) {
1676 thread__put(sched_in);
1677 return -1;
1678 }
1679
1680 sched->curr_thread[this_cpu.cpu] = thread__get(sched_in);
1681 sched->curr_out_thread[this_cpu.cpu] = thread__get(sched_out);
1682
1683 str = thread__comm_str(sched_in);
1684 new_shortname = 0;
1685 if (!tr->shortname[0]) {
1686 if (!strcmp(thread__comm_str(sched_in), "swapper")) {
1687 /*
1688 * Don't allocate a letter-number for swapper:0
1689 * as a shortname. Instead, we use '.' for it.
1690 */
1691 tr->shortname[0] = '.';
1692 tr->shortname[1] = ' ';
1693 } else if (!sched->map.task_name || sched_match_task(sched, str)) {
1694 tr->shortname[0] = sched->next_shortname1;
1695 tr->shortname[1] = sched->next_shortname2;
1696
1697 if (sched->next_shortname1 < 'Z') {
1698 sched->next_shortname1++;
1699 } else {
1700 sched->next_shortname1 = 'A';
1701 if (sched->next_shortname2 < '9')
1702 sched->next_shortname2++;
1703 else
1704 sched->next_shortname2 = '0';
1705 }
1706 } else {
1707 tr->shortname[0] = '-';
1708 tr->shortname[1] = ' ';
1709 }
1710 new_shortname = 1;
1711 }
1712
1713 if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, this_cpu))
1714 goto out;
1715
1716 proceed = 0;
1717 str = thread__comm_str(sched_in);
1718 /*
1719 * Check which of sched_in and sched_out matches the passed --task-name
1720 * arguments and call the corresponding print_sched_map.
1721 */
1722 if (sched->map.task_name && !sched_match_task(sched, str)) {
1723 if (!sched_match_task(sched, thread__comm_str(sched_out)))
1724 goto out;
1725 else
1726 goto sched_out;
1727
1728 } else {
1729 str = thread__comm_str(sched_out);
1730 if (!(sched->map.task_name && !sched_match_task(sched, str)))
1731 proceed = 1;
1732 }
1733
1734 printf(" ");
1735
1736 print_sched_map(sched, this_cpu, cpus_nr, color, false);
1737
1738 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1739 color_fprintf(stdout, color, " %12s secs ", stimestamp);
1740 if (new_shortname || tr->comm_changed || (verbose > 0 && thread__tid(sched_in))) {
1741 const char *pid_color = color;
1742
1743 if (thread__has_color(sched_in))
1744 pid_color = COLOR_PIDS;
1745
1746 color_fprintf(stdout, pid_color, "%s => %s:%d",
1747 tr->shortname, thread__comm_str(sched_in), thread__tid(sched_in));
1748 tr->comm_changed = false;
1749 }
1750
1751 if (sched->map.comp && new_cpu)
1752 color_fprintf(stdout, color, " (CPU %d)", this_cpu);
1753
1754 if (proceed != 1) {
1755 color_fprintf(stdout, color, "\n");
1756 goto out;
1757 }
1758
1759 sched_out:
1760 if (sched->map.task_name) {
1761 tr = thread__get_runtime(sched->curr_out_thread[this_cpu.cpu]);
1762 if (strcmp(tr->shortname, "") == 0)
1763 goto out;
1764
1765 if (proceed == 1)
1766 color_fprintf(stdout, color, "\n");
1767
1768 printf(" ");
1769 print_sched_map(sched, this_cpu, cpus_nr, color, true);
1770 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1771 color_fprintf(stdout, color, " %12s secs ", stimestamp);
1772 }
1773
1774 color_fprintf(stdout, color, "\n");
1775
1776 out:
1777 if (sched->map.task_name)
1778 thread__put(sched_out);
1779
1780 thread__put(sched_in);
1781
1782 return 0;
1783 }
1784
process_sched_switch_event(const struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1785 static int process_sched_switch_event(const struct perf_tool *tool,
1786 struct evsel *evsel,
1787 struct perf_sample *sample,
1788 struct machine *machine)
1789 {
1790 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1791 int this_cpu = sample->cpu, err = 0;
1792 u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1793 next_pid = evsel__intval(evsel, sample, "next_pid");
1794
1795 if (sched->curr_pid[this_cpu] != (u32)-1) {
1796 /*
1797 * Are we trying to switch away a PID that is
1798 * not current?
1799 */
1800 if (sched->curr_pid[this_cpu] != prev_pid)
1801 sched->nr_context_switch_bugs++;
1802 }
1803
1804 if (sched->tp_handler->switch_event)
1805 err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1806
1807 sched->curr_pid[this_cpu] = next_pid;
1808 return err;
1809 }
1810
process_sched_runtime_event(const struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1811 static int process_sched_runtime_event(const struct perf_tool *tool,
1812 struct evsel *evsel,
1813 struct perf_sample *sample,
1814 struct machine *machine)
1815 {
1816 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1817
1818 if (sched->tp_handler->runtime_event)
1819 return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1820
1821 return 0;
1822 }
1823
perf_sched__process_fork_event(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)1824 static int perf_sched__process_fork_event(const struct perf_tool *tool,
1825 union perf_event *event,
1826 struct perf_sample *sample,
1827 struct machine *machine)
1828 {
1829 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1830
1831 /* run the fork event through the perf machinery */
1832 perf_event__process_fork(tool, event, sample, machine);
1833
1834 /* and then run additional processing needed for this command */
1835 if (sched->tp_handler->fork_event)
1836 return sched->tp_handler->fork_event(sched, event, machine);
1837
1838 return 0;
1839 }
1840
process_sched_migrate_task_event(const struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1841 static int process_sched_migrate_task_event(const struct perf_tool *tool,
1842 struct evsel *evsel,
1843 struct perf_sample *sample,
1844 struct machine *machine)
1845 {
1846 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1847
1848 if (sched->tp_handler->migrate_task_event)
1849 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1850
1851 return 0;
1852 }
1853
1854 typedef int (*tracepoint_handler)(const struct perf_tool *tool,
1855 struct evsel *evsel,
1856 struct perf_sample *sample,
1857 struct machine *machine);
1858
perf_sched__process_tracepoint_sample(const struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)1859 static int perf_sched__process_tracepoint_sample(const struct perf_tool *tool __maybe_unused,
1860 union perf_event *event __maybe_unused,
1861 struct perf_sample *sample,
1862 struct evsel *evsel,
1863 struct machine *machine)
1864 {
1865 int err = 0;
1866
1867 if (evsel->handler != NULL) {
1868 tracepoint_handler f = evsel->handler;
1869 err = f(tool, evsel, sample, machine);
1870 }
1871
1872 return err;
1873 }
1874
perf_sched__process_comm(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_sample * sample,struct machine * machine)1875 static int perf_sched__process_comm(const struct perf_tool *tool __maybe_unused,
1876 union perf_event *event,
1877 struct perf_sample *sample,
1878 struct machine *machine)
1879 {
1880 struct thread *thread;
1881 struct thread_runtime *tr;
1882 int err;
1883
1884 err = perf_event__process_comm(tool, event, sample, machine);
1885 if (err)
1886 return err;
1887
1888 thread = machine__find_thread(machine, sample->pid, sample->tid);
1889 if (!thread) {
1890 pr_err("Internal error: can't find thread\n");
1891 return -1;
1892 }
1893
1894 tr = thread__get_runtime(thread);
1895 if (tr == NULL) {
1896 thread__put(thread);
1897 return -1;
1898 }
1899
1900 tr->comm_changed = true;
1901 thread__put(thread);
1902
1903 return 0;
1904 }
1905
perf_sched__read_events(struct perf_sched * sched)1906 static int perf_sched__read_events(struct perf_sched *sched)
1907 {
1908 struct evsel_str_handler handlers[] = {
1909 { "sched:sched_switch", process_sched_switch_event, },
1910 { "sched:sched_stat_runtime", process_sched_runtime_event, },
1911 { "sched:sched_wakeup", process_sched_wakeup_event, },
1912 { "sched:sched_waking", process_sched_wakeup_event, },
1913 { "sched:sched_wakeup_new", process_sched_wakeup_event, },
1914 { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1915 };
1916 struct perf_session *session;
1917 struct perf_data data = {
1918 .path = input_name,
1919 .mode = PERF_DATA_MODE_READ,
1920 .force = sched->force,
1921 };
1922 int rc = -1;
1923
1924 session = perf_session__new(&data, &sched->tool);
1925 if (IS_ERR(session)) {
1926 pr_debug("Error creating perf session");
1927 return PTR_ERR(session);
1928 }
1929
1930 symbol__init(&session->header.env);
1931
1932 /* prefer sched_waking if it is captured */
1933 if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
1934 handlers[2].handler = process_sched_wakeup_ignore;
1935
1936 if (perf_session__set_tracepoints_handlers(session, handlers))
1937 goto out_delete;
1938
1939 if (perf_session__has_traces(session, "record -R")) {
1940 int err = perf_session__process_events(session);
1941 if (err) {
1942 pr_err("Failed to process events, error %d", err);
1943 goto out_delete;
1944 }
1945
1946 sched->nr_events = session->evlist->stats.nr_events[0];
1947 sched->nr_lost_events = session->evlist->stats.total_lost;
1948 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1949 }
1950
1951 rc = 0;
1952 out_delete:
1953 perf_session__delete(session);
1954 return rc;
1955 }
1956
1957 /*
1958 * scheduling times are printed as msec.usec
1959 */
print_sched_time(unsigned long long nsecs,int width)1960 static inline void print_sched_time(unsigned long long nsecs, int width)
1961 {
1962 unsigned long msecs;
1963 unsigned long usecs;
1964
1965 msecs = nsecs / NSEC_PER_MSEC;
1966 nsecs -= msecs * NSEC_PER_MSEC;
1967 usecs = nsecs / NSEC_PER_USEC;
1968 printf("%*lu.%03lu ", width, msecs, usecs);
1969 }
1970
1971 /*
1972 * returns runtime data for event, allocating memory for it the
1973 * first time it is used.
1974 */
evsel__get_runtime(struct evsel * evsel)1975 static struct evsel_runtime *evsel__get_runtime(struct evsel *evsel)
1976 {
1977 struct evsel_runtime *r = evsel->priv;
1978
1979 if (r == NULL) {
1980 r = zalloc(sizeof(struct evsel_runtime));
1981 evsel->priv = r;
1982 }
1983
1984 return r;
1985 }
1986
1987 /*
1988 * save last time event was seen per cpu
1989 */
evsel__save_time(struct evsel * evsel,u64 timestamp,u32 cpu)1990 static void evsel__save_time(struct evsel *evsel, u64 timestamp, u32 cpu)
1991 {
1992 struct evsel_runtime *r = evsel__get_runtime(evsel);
1993
1994 if (r == NULL)
1995 return;
1996
1997 if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
1998 int i, n = __roundup_pow_of_two(cpu+1);
1999 void *p = r->last_time;
2000
2001 p = realloc(r->last_time, n * sizeof(u64));
2002 if (!p)
2003 return;
2004
2005 r->last_time = p;
2006 for (i = r->ncpu; i < n; ++i)
2007 r->last_time[i] = (u64) 0;
2008
2009 r->ncpu = n;
2010 }
2011
2012 r->last_time[cpu] = timestamp;
2013 }
2014
2015 /* returns last time this event was seen on the given cpu */
evsel__get_time(struct evsel * evsel,u32 cpu)2016 static u64 evsel__get_time(struct evsel *evsel, u32 cpu)
2017 {
2018 struct evsel_runtime *r = evsel__get_runtime(evsel);
2019
2020 if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
2021 return 0;
2022
2023 return r->last_time[cpu];
2024 }
2025
2026 static int comm_width = 30;
2027
timehist_get_commstr(struct thread * thread)2028 static char *timehist_get_commstr(struct thread *thread)
2029 {
2030 static char str[32];
2031 const char *comm = thread__comm_str(thread);
2032 pid_t tid = thread__tid(thread);
2033 pid_t pid = thread__pid(thread);
2034 int n;
2035
2036 if (pid == 0)
2037 n = scnprintf(str, sizeof(str), "%s", comm);
2038
2039 else if (tid != pid)
2040 n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid);
2041
2042 else
2043 n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid);
2044
2045 if (n > comm_width)
2046 comm_width = n;
2047
2048 return str;
2049 }
2050
2051 /* prio field format: xxx or xxx->yyy */
2052 #define MAX_PRIO_STR_LEN 8
timehist_get_priostr(struct evsel * evsel,struct thread * thread,struct perf_sample * sample)2053 static char *timehist_get_priostr(struct evsel *evsel,
2054 struct thread *thread,
2055 struct perf_sample *sample)
2056 {
2057 static char prio_str[16];
2058 int prev_prio = (int)evsel__intval(evsel, sample, "prev_prio");
2059 struct thread_runtime *tr = thread__priv(thread);
2060
2061 if (tr->prio != prev_prio && tr->prio != -1)
2062 scnprintf(prio_str, sizeof(prio_str), "%d->%d", tr->prio, prev_prio);
2063 else
2064 scnprintf(prio_str, sizeof(prio_str), "%d", prev_prio);
2065
2066 return prio_str;
2067 }
2068
timehist_header(struct perf_sched * sched)2069 static void timehist_header(struct perf_sched *sched)
2070 {
2071 u32 ncpus = sched->max_cpu.cpu + 1;
2072 u32 i, j;
2073
2074 printf("%15s %6s ", "time", "cpu");
2075
2076 if (sched->show_cpu_visual) {
2077 printf(" ");
2078 for (i = 0, j = 0; i < ncpus; ++i) {
2079 printf("%x", j++);
2080 if (j > 15)
2081 j = 0;
2082 }
2083 printf(" ");
2084 }
2085
2086 if (sched->show_prio) {
2087 printf(" %-*s %-*s %9s %9s %9s",
2088 comm_width, "task name", MAX_PRIO_STR_LEN, "prio",
2089 "wait time", "sch delay", "run time");
2090 } else {
2091 printf(" %-*s %9s %9s %9s", comm_width,
2092 "task name", "wait time", "sch delay", "run time");
2093 }
2094
2095 if (sched->show_state)
2096 printf(" %s", "state");
2097
2098 printf("\n");
2099
2100 /*
2101 * units row
2102 */
2103 printf("%15s %-6s ", "", "");
2104
2105 if (sched->show_cpu_visual)
2106 printf(" %*s ", ncpus, "");
2107
2108 if (sched->show_prio) {
2109 printf(" %-*s %-*s %9s %9s %9s",
2110 comm_width, "[tid/pid]", MAX_PRIO_STR_LEN, "",
2111 "(msec)", "(msec)", "(msec)");
2112 } else {
2113 printf(" %-*s %9s %9s %9s", comm_width,
2114 "[tid/pid]", "(msec)", "(msec)", "(msec)");
2115 }
2116
2117 if (sched->show_state)
2118 printf(" %5s", "");
2119
2120 printf("\n");
2121
2122 /*
2123 * separator
2124 */
2125 printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line);
2126
2127 if (sched->show_cpu_visual)
2128 printf(" %.*s ", ncpus, graph_dotted_line);
2129
2130 if (sched->show_prio) {
2131 printf(" %.*s %.*s %.9s %.9s %.9s",
2132 comm_width, graph_dotted_line, MAX_PRIO_STR_LEN, graph_dotted_line,
2133 graph_dotted_line, graph_dotted_line, graph_dotted_line);
2134 } else {
2135 printf(" %.*s %.9s %.9s %.9s", comm_width,
2136 graph_dotted_line, graph_dotted_line, graph_dotted_line,
2137 graph_dotted_line);
2138 }
2139
2140 if (sched->show_state)
2141 printf(" %.5s", graph_dotted_line);
2142
2143 printf("\n");
2144 }
2145
timehist_print_sample(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct addr_location * al,struct thread * thread,u64 t,const char state)2146 static void timehist_print_sample(struct perf_sched *sched,
2147 struct evsel *evsel,
2148 struct perf_sample *sample,
2149 struct addr_location *al,
2150 struct thread *thread,
2151 u64 t, const char state)
2152 {
2153 struct thread_runtime *tr = thread__priv(thread);
2154 const char *next_comm = evsel__strval(evsel, sample, "next_comm");
2155 const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
2156 u32 max_cpus = sched->max_cpu.cpu + 1;
2157 char tstr[64];
2158 char nstr[30];
2159 u64 wait_time;
2160
2161 if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
2162 return;
2163
2164 timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
2165 printf("%15s [%04d] ", tstr, sample->cpu);
2166
2167 if (sched->show_cpu_visual) {
2168 u32 i;
2169 char c;
2170
2171 printf(" ");
2172 for (i = 0; i < max_cpus; ++i) {
2173 /* flag idle times with 'i'; others are sched events */
2174 if (i == sample->cpu)
2175 c = (thread__tid(thread) == 0) ? 'i' : 's';
2176 else
2177 c = ' ';
2178 printf("%c", c);
2179 }
2180 printf(" ");
2181 }
2182
2183 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2184
2185 if (sched->show_prio)
2186 printf(" %-*s ", MAX_PRIO_STR_LEN, timehist_get_priostr(evsel, thread, sample));
2187
2188 wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt;
2189 print_sched_time(wait_time, 6);
2190
2191 print_sched_time(tr->dt_delay, 6);
2192 print_sched_time(tr->dt_run, 6);
2193
2194 if (sched->show_state)
2195 printf(" %5c ", thread__tid(thread) == 0 ? 'I' : state);
2196
2197 if (sched->show_next) {
2198 snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid);
2199 printf(" %-*s", comm_width, nstr);
2200 }
2201
2202 if (sched->show_wakeups && !sched->show_next)
2203 printf(" %-*s", comm_width, "");
2204
2205 if (thread__tid(thread) == 0)
2206 goto out;
2207
2208 if (sched->show_callchain)
2209 printf(" ");
2210
2211 sample__fprintf_sym(sample, al, 0,
2212 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
2213 EVSEL__PRINT_CALLCHAIN_ARROW |
2214 EVSEL__PRINT_SKIP_IGNORED,
2215 get_tls_callchain_cursor(), symbol_conf.bt_stop_list, stdout);
2216
2217 out:
2218 printf("\n");
2219 }
2220
2221 /*
2222 * Explanation of delta-time stats:
2223 *
2224 * t = time of current schedule out event
2225 * tprev = time of previous sched out event
2226 * also time of schedule-in event for current task
2227 * last_time = time of last sched change event for current task
2228 * (i.e, time process was last scheduled out)
2229 * ready_to_run = time of wakeup for current task
2230 *
2231 * -----|------------|------------|------------|------
2232 * last ready tprev t
2233 * time to run
2234 *
2235 * |-------- dt_wait --------|
2236 * |- dt_delay -|-- dt_run --|
2237 *
2238 * dt_run = run time of current task
2239 * dt_wait = time between last schedule out event for task and tprev
2240 * represents time spent off the cpu
2241 * dt_delay = time between wakeup and schedule-in of task
2242 */
2243
timehist_update_runtime_stats(struct thread_runtime * r,u64 t,u64 tprev)2244 static void timehist_update_runtime_stats(struct thread_runtime *r,
2245 u64 t, u64 tprev)
2246 {
2247 r->dt_delay = 0;
2248 r->dt_sleep = 0;
2249 r->dt_iowait = 0;
2250 r->dt_preempt = 0;
2251 r->dt_run = 0;
2252
2253 if (tprev) {
2254 r->dt_run = t - tprev;
2255 if (r->ready_to_run) {
2256 if (r->ready_to_run > tprev)
2257 pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
2258 else
2259 r->dt_delay = tprev - r->ready_to_run;
2260 }
2261
2262 if (r->last_time > tprev)
2263 pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
2264 else if (r->last_time) {
2265 u64 dt_wait = tprev - r->last_time;
2266
2267 if (r->last_state == 'R')
2268 r->dt_preempt = dt_wait;
2269 else if (r->last_state == 'D')
2270 r->dt_iowait = dt_wait;
2271 else
2272 r->dt_sleep = dt_wait;
2273 }
2274 }
2275
2276 update_stats(&r->run_stats, r->dt_run);
2277
2278 r->total_run_time += r->dt_run;
2279 r->total_delay_time += r->dt_delay;
2280 r->total_sleep_time += r->dt_sleep;
2281 r->total_iowait_time += r->dt_iowait;
2282 r->total_preempt_time += r->dt_preempt;
2283 }
2284
is_idle_sample(struct perf_sample * sample,struct evsel * evsel)2285 static bool is_idle_sample(struct perf_sample *sample,
2286 struct evsel *evsel)
2287 {
2288 /* pid 0 == swapper == idle task */
2289 if (evsel__name_is(evsel, "sched:sched_switch"))
2290 return evsel__intval(evsel, sample, "prev_pid") == 0;
2291
2292 return sample->pid == 0;
2293 }
2294
save_task_callchain(struct perf_sched * sched,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)2295 static void save_task_callchain(struct perf_sched *sched,
2296 struct perf_sample *sample,
2297 struct evsel *evsel,
2298 struct machine *machine)
2299 {
2300 struct callchain_cursor *cursor;
2301 struct thread *thread;
2302
2303 /* want main thread for process - has maps */
2304 thread = machine__findnew_thread(machine, sample->pid, sample->pid);
2305 if (thread == NULL) {
2306 pr_debug("Failed to get thread for pid %d.\n", sample->pid);
2307 return;
2308 }
2309
2310 if (!sched->show_callchain || sample->callchain == NULL)
2311 return;
2312
2313 cursor = get_tls_callchain_cursor();
2314
2315 if (thread__resolve_callchain(thread, cursor, evsel, sample,
2316 NULL, NULL, sched->max_stack + 2) != 0) {
2317 if (verbose > 0)
2318 pr_err("Failed to resolve callchain. Skipping\n");
2319
2320 return;
2321 }
2322
2323 callchain_cursor_commit(cursor);
2324
2325 while (true) {
2326 struct callchain_cursor_node *node;
2327 struct symbol *sym;
2328
2329 node = callchain_cursor_current(cursor);
2330 if (node == NULL)
2331 break;
2332
2333 sym = node->ms.sym;
2334 if (sym) {
2335 if (!strcmp(sym->name, "schedule") ||
2336 !strcmp(sym->name, "__schedule") ||
2337 !strcmp(sym->name, "preempt_schedule"))
2338 sym->ignore = 1;
2339 }
2340
2341 callchain_cursor_advance(cursor);
2342 }
2343 }
2344
init_idle_thread(struct thread * thread)2345 static int init_idle_thread(struct thread *thread)
2346 {
2347 struct idle_thread_runtime *itr;
2348
2349 thread__set_comm(thread, idle_comm, 0);
2350
2351 itr = zalloc(sizeof(*itr));
2352 if (itr == NULL)
2353 return -ENOMEM;
2354
2355 init_prio(&itr->tr);
2356 init_stats(&itr->tr.run_stats);
2357 callchain_init(&itr->callchain);
2358 callchain_cursor_reset(&itr->cursor);
2359 thread__set_priv(thread, itr);
2360
2361 return 0;
2362 }
2363
2364 /*
2365 * Track idle stats per cpu by maintaining a local thread
2366 * struct for the idle task on each cpu.
2367 */
init_idle_threads(int ncpu)2368 static int init_idle_threads(int ncpu)
2369 {
2370 int i, ret;
2371
2372 idle_threads = zalloc(ncpu * sizeof(struct thread *));
2373 if (!idle_threads)
2374 return -ENOMEM;
2375
2376 idle_max_cpu = ncpu;
2377
2378 /* allocate the actual thread struct if needed */
2379 for (i = 0; i < ncpu; ++i) {
2380 idle_threads[i] = thread__new(0, 0);
2381 if (idle_threads[i] == NULL)
2382 return -ENOMEM;
2383
2384 ret = init_idle_thread(idle_threads[i]);
2385 if (ret < 0)
2386 return ret;
2387 }
2388
2389 return 0;
2390 }
2391
free_idle_threads(void)2392 static void free_idle_threads(void)
2393 {
2394 int i;
2395
2396 if (idle_threads == NULL)
2397 return;
2398
2399 for (i = 0; i < idle_max_cpu; ++i) {
2400 if ((idle_threads[i]))
2401 thread__delete(idle_threads[i]);
2402 }
2403
2404 free(idle_threads);
2405 }
2406
get_idle_thread(int cpu)2407 static struct thread *get_idle_thread(int cpu)
2408 {
2409 /*
2410 * expand/allocate array of pointers to local thread
2411 * structs if needed
2412 */
2413 if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
2414 int i, j = __roundup_pow_of_two(cpu+1);
2415 void *p;
2416
2417 p = realloc(idle_threads, j * sizeof(struct thread *));
2418 if (!p)
2419 return NULL;
2420
2421 idle_threads = (struct thread **) p;
2422 for (i = idle_max_cpu; i < j; ++i)
2423 idle_threads[i] = NULL;
2424
2425 idle_max_cpu = j;
2426 }
2427
2428 /* allocate a new thread struct if needed */
2429 if (idle_threads[cpu] == NULL) {
2430 idle_threads[cpu] = thread__new(0, 0);
2431
2432 if (idle_threads[cpu]) {
2433 if (init_idle_thread(idle_threads[cpu]) < 0)
2434 return NULL;
2435 }
2436 }
2437
2438 return idle_threads[cpu];
2439 }
2440
save_idle_callchain(struct perf_sched * sched,struct idle_thread_runtime * itr,struct perf_sample * sample)2441 static void save_idle_callchain(struct perf_sched *sched,
2442 struct idle_thread_runtime *itr,
2443 struct perf_sample *sample)
2444 {
2445 struct callchain_cursor *cursor;
2446
2447 if (!sched->show_callchain || sample->callchain == NULL)
2448 return;
2449
2450 cursor = get_tls_callchain_cursor();
2451 if (cursor == NULL)
2452 return;
2453
2454 callchain_cursor__copy(&itr->cursor, cursor);
2455 }
2456
timehist_get_thread(struct perf_sched * sched,struct perf_sample * sample,struct machine * machine,struct evsel * evsel)2457 static struct thread *timehist_get_thread(struct perf_sched *sched,
2458 struct perf_sample *sample,
2459 struct machine *machine,
2460 struct evsel *evsel)
2461 {
2462 struct thread *thread;
2463
2464 if (is_idle_sample(sample, evsel)) {
2465 thread = get_idle_thread(sample->cpu);
2466 if (thread == NULL)
2467 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2468
2469 } else {
2470 /* there were samples with tid 0 but non-zero pid */
2471 thread = machine__findnew_thread(machine, sample->pid,
2472 sample->tid ?: sample->pid);
2473 if (thread == NULL) {
2474 pr_debug("Failed to get thread for tid %d. skipping sample.\n",
2475 sample->tid);
2476 }
2477
2478 save_task_callchain(sched, sample, evsel, machine);
2479 if (sched->idle_hist) {
2480 struct thread *idle;
2481 struct idle_thread_runtime *itr;
2482
2483 idle = get_idle_thread(sample->cpu);
2484 if (idle == NULL) {
2485 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2486 return NULL;
2487 }
2488
2489 itr = thread__priv(idle);
2490 if (itr == NULL)
2491 return NULL;
2492
2493 itr->last_thread = thread;
2494
2495 /* copy task callchain when entering to idle */
2496 if (evsel__intval(evsel, sample, "next_pid") == 0)
2497 save_idle_callchain(sched, itr, sample);
2498 }
2499 }
2500
2501 return thread;
2502 }
2503
timehist_skip_sample(struct perf_sched * sched,struct thread * thread,struct evsel * evsel,struct perf_sample * sample)2504 static bool timehist_skip_sample(struct perf_sched *sched,
2505 struct thread *thread,
2506 struct evsel *evsel,
2507 struct perf_sample *sample)
2508 {
2509 bool rc = false;
2510 int prio = -1;
2511 struct thread_runtime *tr = NULL;
2512
2513 if (thread__is_filtered(thread)) {
2514 rc = true;
2515 sched->skipped_samples++;
2516 }
2517
2518 if (sched->prio_str) {
2519 /*
2520 * Because priority may be changed during task execution,
2521 * first read priority from prev sched_in event for current task.
2522 * If prev sched_in event is not saved, then read priority from
2523 * current task sched_out event.
2524 */
2525 tr = thread__get_runtime(thread);
2526 if (tr && tr->prio != -1)
2527 prio = tr->prio;
2528 else if (evsel__name_is(evsel, "sched:sched_switch"))
2529 prio = evsel__intval(evsel, sample, "prev_prio");
2530
2531 if (prio != -1 && !test_bit(prio, sched->prio_bitmap)) {
2532 rc = true;
2533 sched->skipped_samples++;
2534 }
2535 }
2536
2537 if (sched->idle_hist) {
2538 if (!evsel__name_is(evsel, "sched:sched_switch"))
2539 rc = true;
2540 else if (evsel__intval(evsel, sample, "prev_pid") != 0 &&
2541 evsel__intval(evsel, sample, "next_pid") != 0)
2542 rc = true;
2543 }
2544
2545 return rc;
2546 }
2547
timehist_print_wakeup_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine,struct thread * awakened)2548 static void timehist_print_wakeup_event(struct perf_sched *sched,
2549 struct evsel *evsel,
2550 struct perf_sample *sample,
2551 struct machine *machine,
2552 struct thread *awakened)
2553 {
2554 struct thread *thread;
2555 char tstr[64];
2556
2557 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2558 if (thread == NULL)
2559 return;
2560
2561 /* show wakeup unless both awakee and awaker are filtered */
2562 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2563 timehist_skip_sample(sched, awakened, evsel, sample)) {
2564 return;
2565 }
2566
2567 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2568 printf("%15s [%04d] ", tstr, sample->cpu);
2569 if (sched->show_cpu_visual)
2570 printf(" %*s ", sched->max_cpu.cpu + 1, "");
2571
2572 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2573
2574 /* dt spacer */
2575 printf(" %9s %9s %9s ", "", "", "");
2576
2577 printf("awakened: %s", timehist_get_commstr(awakened));
2578
2579 printf("\n");
2580 }
2581
timehist_sched_wakeup_ignore(const struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct evsel * evsel __maybe_unused,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)2582 static int timehist_sched_wakeup_ignore(const struct perf_tool *tool __maybe_unused,
2583 union perf_event *event __maybe_unused,
2584 struct evsel *evsel __maybe_unused,
2585 struct perf_sample *sample __maybe_unused,
2586 struct machine *machine __maybe_unused)
2587 {
2588 return 0;
2589 }
2590
timehist_sched_wakeup_event(const struct perf_tool * tool,union perf_event * event __maybe_unused,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)2591 static int timehist_sched_wakeup_event(const struct perf_tool *tool,
2592 union perf_event *event __maybe_unused,
2593 struct evsel *evsel,
2594 struct perf_sample *sample,
2595 struct machine *machine)
2596 {
2597 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2598 struct thread *thread;
2599 struct thread_runtime *tr = NULL;
2600 /* want pid of awakened task not pid in sample */
2601 const u32 pid = evsel__intval(evsel, sample, "pid");
2602
2603 thread = machine__findnew_thread(machine, 0, pid);
2604 if (thread == NULL)
2605 return -1;
2606
2607 tr = thread__get_runtime(thread);
2608 if (tr == NULL)
2609 return -1;
2610
2611 if (tr->ready_to_run == 0)
2612 tr->ready_to_run = sample->time;
2613
2614 /* show wakeups if requested */
2615 if (sched->show_wakeups &&
2616 !perf_time__skip_sample(&sched->ptime, sample->time))
2617 timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
2618
2619 return 0;
2620 }
2621
timehist_print_migration_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine,struct thread * migrated)2622 static void timehist_print_migration_event(struct perf_sched *sched,
2623 struct evsel *evsel,
2624 struct perf_sample *sample,
2625 struct machine *machine,
2626 struct thread *migrated)
2627 {
2628 struct thread *thread;
2629 char tstr[64];
2630 u32 max_cpus;
2631 u32 ocpu, dcpu;
2632
2633 if (sched->summary_only)
2634 return;
2635
2636 max_cpus = sched->max_cpu.cpu + 1;
2637 ocpu = evsel__intval(evsel, sample, "orig_cpu");
2638 dcpu = evsel__intval(evsel, sample, "dest_cpu");
2639
2640 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2641 if (thread == NULL)
2642 return;
2643
2644 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2645 timehist_skip_sample(sched, migrated, evsel, sample)) {
2646 return;
2647 }
2648
2649 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2650 printf("%15s [%04d] ", tstr, sample->cpu);
2651
2652 if (sched->show_cpu_visual) {
2653 u32 i;
2654 char c;
2655
2656 printf(" ");
2657 for (i = 0; i < max_cpus; ++i) {
2658 c = (i == sample->cpu) ? 'm' : ' ';
2659 printf("%c", c);
2660 }
2661 printf(" ");
2662 }
2663
2664 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2665
2666 /* dt spacer */
2667 printf(" %9s %9s %9s ", "", "", "");
2668
2669 printf("migrated: %s", timehist_get_commstr(migrated));
2670 printf(" cpu %d => %d", ocpu, dcpu);
2671
2672 printf("\n");
2673 }
2674
timehist_migrate_task_event(const struct perf_tool * tool,union perf_event * event __maybe_unused,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)2675 static int timehist_migrate_task_event(const struct perf_tool *tool,
2676 union perf_event *event __maybe_unused,
2677 struct evsel *evsel,
2678 struct perf_sample *sample,
2679 struct machine *machine)
2680 {
2681 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2682 struct thread *thread;
2683 struct thread_runtime *tr = NULL;
2684 /* want pid of migrated task not pid in sample */
2685 const u32 pid = evsel__intval(evsel, sample, "pid");
2686
2687 thread = machine__findnew_thread(machine, 0, pid);
2688 if (thread == NULL)
2689 return -1;
2690
2691 tr = thread__get_runtime(thread);
2692 if (tr == NULL)
2693 return -1;
2694
2695 tr->migrations++;
2696
2697 /* show migrations if requested */
2698 timehist_print_migration_event(sched, evsel, sample, machine, thread);
2699
2700 return 0;
2701 }
2702
timehist_update_task_prio(struct evsel * evsel,struct perf_sample * sample,struct machine * machine)2703 static void timehist_update_task_prio(struct evsel *evsel,
2704 struct perf_sample *sample,
2705 struct machine *machine)
2706 {
2707 struct thread *thread;
2708 struct thread_runtime *tr = NULL;
2709 const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
2710 const u32 next_prio = evsel__intval(evsel, sample, "next_prio");
2711
2712 if (next_pid == 0)
2713 thread = get_idle_thread(sample->cpu);
2714 else
2715 thread = machine__findnew_thread(machine, -1, next_pid);
2716
2717 if (thread == NULL)
2718 return;
2719
2720 tr = thread__get_runtime(thread);
2721 if (tr == NULL)
2722 return;
2723
2724 tr->prio = next_prio;
2725 }
2726
timehist_sched_change_event(const struct perf_tool * tool,union perf_event * event,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)2727 static int timehist_sched_change_event(const struct perf_tool *tool,
2728 union perf_event *event,
2729 struct evsel *evsel,
2730 struct perf_sample *sample,
2731 struct machine *machine)
2732 {
2733 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2734 struct perf_time_interval *ptime = &sched->ptime;
2735 struct addr_location al;
2736 struct thread *thread;
2737 struct thread_runtime *tr = NULL;
2738 u64 tprev, t = sample->time;
2739 int rc = 0;
2740 const char state = evsel__taskstate(evsel, sample, "prev_state");
2741
2742 addr_location__init(&al);
2743 if (machine__resolve(machine, &al, sample) < 0) {
2744 pr_err("problem processing %d event. skipping it\n",
2745 event->header.type);
2746 rc = -1;
2747 goto out;
2748 }
2749
2750 if (sched->show_prio || sched->prio_str)
2751 timehist_update_task_prio(evsel, sample, machine);
2752
2753 thread = timehist_get_thread(sched, sample, machine, evsel);
2754 if (thread == NULL) {
2755 rc = -1;
2756 goto out;
2757 }
2758
2759 if (timehist_skip_sample(sched, thread, evsel, sample))
2760 goto out;
2761
2762 tr = thread__get_runtime(thread);
2763 if (tr == NULL) {
2764 rc = -1;
2765 goto out;
2766 }
2767
2768 tprev = evsel__get_time(evsel, sample->cpu);
2769
2770 /*
2771 * If start time given:
2772 * - sample time is under window user cares about - skip sample
2773 * - tprev is under window user cares about - reset to start of window
2774 */
2775 if (ptime->start && ptime->start > t)
2776 goto out;
2777
2778 if (tprev && ptime->start > tprev)
2779 tprev = ptime->start;
2780
2781 /*
2782 * If end time given:
2783 * - previous sched event is out of window - we are done
2784 * - sample time is beyond window user cares about - reset it
2785 * to close out stats for time window interest
2786 * - If tprev is 0, that is, sched_in event for current task is
2787 * not recorded, cannot determine whether sched_in event is
2788 * within time window interest - ignore it
2789 */
2790 if (ptime->end) {
2791 if (!tprev || tprev > ptime->end)
2792 goto out;
2793
2794 if (t > ptime->end)
2795 t = ptime->end;
2796 }
2797
2798 if (!sched->idle_hist || thread__tid(thread) == 0) {
2799 if (!cpu_list || test_bit(sample->cpu, cpu_bitmap))
2800 timehist_update_runtime_stats(tr, t, tprev);
2801
2802 if (sched->idle_hist) {
2803 struct idle_thread_runtime *itr = (void *)tr;
2804 struct thread_runtime *last_tr;
2805
2806 if (itr->last_thread == NULL)
2807 goto out;
2808
2809 /* add current idle time as last thread's runtime */
2810 last_tr = thread__get_runtime(itr->last_thread);
2811 if (last_tr == NULL)
2812 goto out;
2813
2814 timehist_update_runtime_stats(last_tr, t, tprev);
2815 /*
2816 * remove delta time of last thread as it's not updated
2817 * and otherwise it will show an invalid value next
2818 * time. we only care total run time and run stat.
2819 */
2820 last_tr->dt_run = 0;
2821 last_tr->dt_delay = 0;
2822 last_tr->dt_sleep = 0;
2823 last_tr->dt_iowait = 0;
2824 last_tr->dt_preempt = 0;
2825
2826 if (itr->cursor.nr)
2827 callchain_append(&itr->callchain, &itr->cursor, t - tprev);
2828
2829 itr->last_thread = NULL;
2830 }
2831
2832 if (!sched->summary_only)
2833 timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
2834 }
2835
2836 out:
2837 if (sched->hist_time.start == 0 && t >= ptime->start)
2838 sched->hist_time.start = t;
2839 if (ptime->end == 0 || t <= ptime->end)
2840 sched->hist_time.end = t;
2841
2842 if (tr) {
2843 /* time of this sched_switch event becomes last time task seen */
2844 tr->last_time = sample->time;
2845
2846 /* last state is used to determine where to account wait time */
2847 tr->last_state = state;
2848
2849 /* sched out event for task so reset ready to run time */
2850 if (state == 'R')
2851 tr->ready_to_run = t;
2852 else
2853 tr->ready_to_run = 0;
2854 }
2855
2856 evsel__save_time(evsel, sample->time, sample->cpu);
2857
2858 addr_location__exit(&al);
2859 return rc;
2860 }
2861
timehist_sched_switch_event(const struct perf_tool * tool,union perf_event * event,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused)2862 static int timehist_sched_switch_event(const struct perf_tool *tool,
2863 union perf_event *event,
2864 struct evsel *evsel,
2865 struct perf_sample *sample,
2866 struct machine *machine __maybe_unused)
2867 {
2868 return timehist_sched_change_event(tool, event, evsel, sample, machine);
2869 }
2870
process_lost(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_sample * sample,struct machine * machine __maybe_unused)2871 static int process_lost(const struct perf_tool *tool __maybe_unused,
2872 union perf_event *event,
2873 struct perf_sample *sample,
2874 struct machine *machine __maybe_unused)
2875 {
2876 char tstr[64];
2877
2878 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2879 printf("%15s ", tstr);
2880 printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
2881
2882 return 0;
2883 }
2884
2885
print_thread_runtime(struct thread * t,struct thread_runtime * r)2886 static void print_thread_runtime(struct thread *t,
2887 struct thread_runtime *r)
2888 {
2889 double mean = avg_stats(&r->run_stats);
2890 float stddev;
2891
2892 printf("%*s %5d %9" PRIu64 " ",
2893 comm_width, timehist_get_commstr(t), thread__ppid(t),
2894 (u64) r->run_stats.n);
2895
2896 print_sched_time(r->total_run_time, 8);
2897 stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean);
2898 print_sched_time(r->run_stats.min, 6);
2899 printf(" ");
2900 print_sched_time((u64) mean, 6);
2901 printf(" ");
2902 print_sched_time(r->run_stats.max, 6);
2903 printf(" ");
2904 printf("%5.2f", stddev);
2905 printf(" %5" PRIu64, r->migrations);
2906 printf("\n");
2907 }
2908
print_thread_waittime(struct thread * t,struct thread_runtime * r)2909 static void print_thread_waittime(struct thread *t,
2910 struct thread_runtime *r)
2911 {
2912 printf("%*s %5d %9" PRIu64 " ",
2913 comm_width, timehist_get_commstr(t), thread__ppid(t),
2914 (u64) r->run_stats.n);
2915
2916 print_sched_time(r->total_run_time, 8);
2917 print_sched_time(r->total_sleep_time, 6);
2918 printf(" ");
2919 print_sched_time(r->total_iowait_time, 6);
2920 printf(" ");
2921 print_sched_time(r->total_preempt_time, 6);
2922 printf(" ");
2923 print_sched_time(r->total_delay_time, 6);
2924 printf("\n");
2925 }
2926
2927 struct total_run_stats {
2928 struct perf_sched *sched;
2929 u64 sched_count;
2930 u64 task_count;
2931 u64 total_run_time;
2932 };
2933
show_thread_runtime(struct thread * t,void * priv)2934 static int show_thread_runtime(struct thread *t, void *priv)
2935 {
2936 struct total_run_stats *stats = priv;
2937 struct thread_runtime *r;
2938
2939 if (thread__is_filtered(t))
2940 return 0;
2941
2942 r = thread__priv(t);
2943 if (r && r->run_stats.n) {
2944 stats->task_count++;
2945 stats->sched_count += r->run_stats.n;
2946 stats->total_run_time += r->total_run_time;
2947
2948 if (stats->sched->show_state)
2949 print_thread_waittime(t, r);
2950 else
2951 print_thread_runtime(t, r);
2952 }
2953
2954 return 0;
2955 }
2956
callchain__fprintf_folded(FILE * fp,struct callchain_node * node)2957 static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
2958 {
2959 const char *sep = " <- ";
2960 struct callchain_list *chain;
2961 size_t ret = 0;
2962 char bf[1024];
2963 bool first;
2964
2965 if (node == NULL)
2966 return 0;
2967
2968 ret = callchain__fprintf_folded(fp, node->parent);
2969 first = (ret == 0);
2970
2971 list_for_each_entry(chain, &node->val, list) {
2972 if (chain->ip >= PERF_CONTEXT_MAX)
2973 continue;
2974 if (chain->ms.sym && chain->ms.sym->ignore)
2975 continue;
2976 ret += fprintf(fp, "%s%s", first ? "" : sep,
2977 callchain_list__sym_name(chain, bf, sizeof(bf),
2978 false));
2979 first = false;
2980 }
2981
2982 return ret;
2983 }
2984
timehist_print_idlehist_callchain(struct rb_root_cached * root)2985 static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
2986 {
2987 size_t ret = 0;
2988 FILE *fp = stdout;
2989 struct callchain_node *chain;
2990 struct rb_node *rb_node = rb_first_cached(root);
2991
2992 printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains");
2993 printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line,
2994 graph_dotted_line);
2995
2996 while (rb_node) {
2997 chain = rb_entry(rb_node, struct callchain_node, rb_node);
2998 rb_node = rb_next(rb_node);
2999
3000 ret += fprintf(fp, " ");
3001 print_sched_time(chain->hit, 12);
3002 ret += 16; /* print_sched_time returns 2nd arg + 4 */
3003 ret += fprintf(fp, " %8d ", chain->count);
3004 ret += callchain__fprintf_folded(fp, chain);
3005 ret += fprintf(fp, "\n");
3006 }
3007
3008 return ret;
3009 }
3010
timehist_print_summary(struct perf_sched * sched,struct perf_session * session)3011 static void timehist_print_summary(struct perf_sched *sched,
3012 struct perf_session *session)
3013 {
3014 struct machine *m = &session->machines.host;
3015 struct total_run_stats totals;
3016 u64 task_count;
3017 struct thread *t;
3018 struct thread_runtime *r;
3019 int i;
3020 u64 hist_time = sched->hist_time.end - sched->hist_time.start;
3021
3022 memset(&totals, 0, sizeof(totals));
3023 totals.sched = sched;
3024
3025 if (sched->idle_hist) {
3026 printf("\nIdle-time summary\n");
3027 printf("%*s parent sched-out ", comm_width, "comm");
3028 printf(" idle-time min-idle avg-idle max-idle stddev migrations\n");
3029 } else if (sched->show_state) {
3030 printf("\nWait-time summary\n");
3031 printf("%*s parent sched-in ", comm_width, "comm");
3032 printf(" run-time sleep iowait preempt delay\n");
3033 } else {
3034 printf("\nRuntime summary\n");
3035 printf("%*s parent sched-in ", comm_width, "comm");
3036 printf(" run-time min-run avg-run max-run stddev migrations\n");
3037 }
3038 printf("%*s (count) ", comm_width, "");
3039 printf(" (msec) (msec) (msec) (msec) %s\n",
3040 sched->show_state ? "(msec)" : "%");
3041 printf("%.117s\n", graph_dotted_line);
3042
3043 machine__for_each_thread(m, show_thread_runtime, &totals);
3044 task_count = totals.task_count;
3045 if (!task_count)
3046 printf("<no still running tasks>\n");
3047
3048 /* CPU idle stats not tracked when samples were skipped */
3049 if (sched->skipped_samples && !sched->idle_hist)
3050 return;
3051
3052 printf("\nIdle stats:\n");
3053 for (i = 0; i < idle_max_cpu; ++i) {
3054 if (cpu_list && !test_bit(i, cpu_bitmap))
3055 continue;
3056
3057 t = idle_threads[i];
3058 if (!t)
3059 continue;
3060
3061 r = thread__priv(t);
3062 if (r && r->run_stats.n) {
3063 totals.sched_count += r->run_stats.n;
3064 printf(" CPU %2d idle for ", i);
3065 print_sched_time(r->total_run_time, 6);
3066 printf(" msec (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time);
3067 } else
3068 printf(" CPU %2d idle entire time window\n", i);
3069 }
3070
3071 if (sched->idle_hist && sched->show_callchain) {
3072 callchain_param.mode = CHAIN_FOLDED;
3073 callchain_param.value = CCVAL_PERIOD;
3074
3075 callchain_register_param(&callchain_param);
3076
3077 printf("\nIdle stats by callchain:\n");
3078 for (i = 0; i < idle_max_cpu; ++i) {
3079 struct idle_thread_runtime *itr;
3080
3081 t = idle_threads[i];
3082 if (!t)
3083 continue;
3084
3085 itr = thread__priv(t);
3086 if (itr == NULL)
3087 continue;
3088
3089 callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
3090 0, &callchain_param);
3091
3092 printf(" CPU %2d:", i);
3093 print_sched_time(itr->tr.total_run_time, 6);
3094 printf(" msec\n");
3095 timehist_print_idlehist_callchain(&itr->sorted_root);
3096 printf("\n");
3097 }
3098 }
3099
3100 printf("\n"
3101 " Total number of unique tasks: %" PRIu64 "\n"
3102 "Total number of context switches: %" PRIu64 "\n",
3103 totals.task_count, totals.sched_count);
3104
3105 printf(" Total run time (msec): ");
3106 print_sched_time(totals.total_run_time, 2);
3107 printf("\n");
3108
3109 printf(" Total scheduling time (msec): ");
3110 print_sched_time(hist_time, 2);
3111 printf(" (x %d)\n", sched->max_cpu.cpu);
3112 }
3113
3114 typedef int (*sched_handler)(const struct perf_tool *tool,
3115 union perf_event *event,
3116 struct evsel *evsel,
3117 struct perf_sample *sample,
3118 struct machine *machine);
3119
perf_timehist__process_sample(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)3120 static int perf_timehist__process_sample(const struct perf_tool *tool,
3121 union perf_event *event,
3122 struct perf_sample *sample,
3123 struct evsel *evsel,
3124 struct machine *machine)
3125 {
3126 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
3127 int err = 0;
3128 struct perf_cpu this_cpu = {
3129 .cpu = sample->cpu,
3130 };
3131
3132 if (this_cpu.cpu > sched->max_cpu.cpu)
3133 sched->max_cpu = this_cpu;
3134
3135 if (evsel->handler != NULL) {
3136 sched_handler f = evsel->handler;
3137
3138 err = f(tool, event, evsel, sample, machine);
3139 }
3140
3141 return err;
3142 }
3143
timehist_check_attr(struct perf_sched * sched,struct evlist * evlist)3144 static int timehist_check_attr(struct perf_sched *sched,
3145 struct evlist *evlist)
3146 {
3147 struct evsel *evsel;
3148 struct evsel_runtime *er;
3149
3150 list_for_each_entry(evsel, &evlist->core.entries, core.node) {
3151 er = evsel__get_runtime(evsel);
3152 if (er == NULL) {
3153 pr_err("Failed to allocate memory for evsel runtime data\n");
3154 return -1;
3155 }
3156
3157 /* only need to save callchain related to sched_switch event */
3158 if (sched->show_callchain &&
3159 evsel__name_is(evsel, "sched:sched_switch") &&
3160 !evsel__has_callchain(evsel)) {
3161 pr_info("Samples of sched_switch event do not have callchains.\n");
3162 sched->show_callchain = 0;
3163 symbol_conf.use_callchain = 0;
3164 }
3165 }
3166
3167 return 0;
3168 }
3169
timehist_parse_prio_str(struct perf_sched * sched)3170 static int timehist_parse_prio_str(struct perf_sched *sched)
3171 {
3172 char *p;
3173 unsigned long start_prio, end_prio;
3174 const char *str = sched->prio_str;
3175
3176 if (!str)
3177 return 0;
3178
3179 while (isdigit(*str)) {
3180 p = NULL;
3181 start_prio = strtoul(str, &p, 0);
3182 if (start_prio >= MAX_PRIO || (*p != '\0' && *p != ',' && *p != '-'))
3183 return -1;
3184
3185 if (*p == '-') {
3186 str = ++p;
3187 p = NULL;
3188 end_prio = strtoul(str, &p, 0);
3189
3190 if (end_prio >= MAX_PRIO || (*p != '\0' && *p != ','))
3191 return -1;
3192
3193 if (end_prio < start_prio)
3194 return -1;
3195 } else {
3196 end_prio = start_prio;
3197 }
3198
3199 for (; start_prio <= end_prio; start_prio++)
3200 __set_bit(start_prio, sched->prio_bitmap);
3201
3202 if (*p)
3203 ++p;
3204
3205 str = p;
3206 }
3207
3208 return 0;
3209 }
3210
perf_sched__timehist(struct perf_sched * sched)3211 static int perf_sched__timehist(struct perf_sched *sched)
3212 {
3213 struct evsel_str_handler handlers[] = {
3214 { "sched:sched_switch", timehist_sched_switch_event, },
3215 { "sched:sched_wakeup", timehist_sched_wakeup_event, },
3216 { "sched:sched_waking", timehist_sched_wakeup_event, },
3217 { "sched:sched_wakeup_new", timehist_sched_wakeup_event, },
3218 };
3219 const struct evsel_str_handler migrate_handlers[] = {
3220 { "sched:sched_migrate_task", timehist_migrate_task_event, },
3221 };
3222 struct perf_data data = {
3223 .path = input_name,
3224 .mode = PERF_DATA_MODE_READ,
3225 .force = sched->force,
3226 };
3227
3228 struct perf_session *session;
3229 struct evlist *evlist;
3230 int err = -1;
3231
3232 /*
3233 * event handlers for timehist option
3234 */
3235 sched->tool.sample = perf_timehist__process_sample;
3236 sched->tool.mmap = perf_event__process_mmap;
3237 sched->tool.comm = perf_event__process_comm;
3238 sched->tool.exit = perf_event__process_exit;
3239 sched->tool.fork = perf_event__process_fork;
3240 sched->tool.lost = process_lost;
3241 sched->tool.attr = perf_event__process_attr;
3242 sched->tool.tracing_data = perf_event__process_tracing_data;
3243 sched->tool.build_id = perf_event__process_build_id;
3244
3245 sched->tool.ordering_requires_timestamps = true;
3246
3247 symbol_conf.use_callchain = sched->show_callchain;
3248
3249 session = perf_session__new(&data, &sched->tool);
3250 if (IS_ERR(session))
3251 return PTR_ERR(session);
3252
3253 if (cpu_list) {
3254 err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
3255 if (err < 0)
3256 goto out;
3257 }
3258
3259 evlist = session->evlist;
3260
3261 symbol__init(&session->header.env);
3262
3263 if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
3264 pr_err("Invalid time string\n");
3265 err = -EINVAL;
3266 goto out;
3267 }
3268
3269 if (timehist_check_attr(sched, evlist) != 0)
3270 goto out;
3271
3272 if (timehist_parse_prio_str(sched) != 0) {
3273 pr_err("Invalid prio string\n");
3274 goto out;
3275 }
3276
3277 setup_pager();
3278
3279 /* prefer sched_waking if it is captured */
3280 if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
3281 handlers[1].handler = timehist_sched_wakeup_ignore;
3282
3283 /* setup per-evsel handlers */
3284 if (perf_session__set_tracepoints_handlers(session, handlers))
3285 goto out;
3286
3287 /* sched_switch event at a minimum needs to exist */
3288 if (!evlist__find_tracepoint_by_name(session->evlist, "sched:sched_switch")) {
3289 pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
3290 goto out;
3291 }
3292
3293 if (sched->show_migrations &&
3294 perf_session__set_tracepoints_handlers(session, migrate_handlers))
3295 goto out;
3296
3297 /* pre-allocate struct for per-CPU idle stats */
3298 sched->max_cpu.cpu = session->header.env.nr_cpus_online;
3299 if (sched->max_cpu.cpu == 0)
3300 sched->max_cpu.cpu = 4;
3301 if (init_idle_threads(sched->max_cpu.cpu))
3302 goto out;
3303
3304 /* summary_only implies summary option, but don't overwrite summary if set */
3305 if (sched->summary_only)
3306 sched->summary = sched->summary_only;
3307
3308 if (!sched->summary_only)
3309 timehist_header(sched);
3310
3311 err = perf_session__process_events(session);
3312 if (err) {
3313 pr_err("Failed to process events, error %d", err);
3314 goto out;
3315 }
3316
3317 sched->nr_events = evlist->stats.nr_events[0];
3318 sched->nr_lost_events = evlist->stats.total_lost;
3319 sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
3320
3321 if (sched->summary)
3322 timehist_print_summary(sched, session);
3323
3324 out:
3325 free_idle_threads();
3326 perf_session__delete(session);
3327
3328 return err;
3329 }
3330
3331
print_bad_events(struct perf_sched * sched)3332 static void print_bad_events(struct perf_sched *sched)
3333 {
3334 if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
3335 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
3336 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
3337 sched->nr_unordered_timestamps, sched->nr_timestamps);
3338 }
3339 if (sched->nr_lost_events && sched->nr_events) {
3340 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
3341 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
3342 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
3343 }
3344 if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
3345 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
3346 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
3347 sched->nr_context_switch_bugs, sched->nr_timestamps);
3348 if (sched->nr_lost_events)
3349 printf(" (due to lost events?)");
3350 printf("\n");
3351 }
3352 }
3353
__merge_work_atoms(struct rb_root_cached * root,struct work_atoms * data)3354 static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
3355 {
3356 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
3357 struct work_atoms *this;
3358 const char *comm = thread__comm_str(data->thread), *this_comm;
3359 bool leftmost = true;
3360
3361 while (*new) {
3362 int cmp;
3363
3364 this = container_of(*new, struct work_atoms, node);
3365 parent = *new;
3366
3367 this_comm = thread__comm_str(this->thread);
3368 cmp = strcmp(comm, this_comm);
3369 if (cmp > 0) {
3370 new = &((*new)->rb_left);
3371 } else if (cmp < 0) {
3372 new = &((*new)->rb_right);
3373 leftmost = false;
3374 } else {
3375 this->num_merged++;
3376 this->total_runtime += data->total_runtime;
3377 this->nb_atoms += data->nb_atoms;
3378 this->total_lat += data->total_lat;
3379 list_splice(&data->work_list, &this->work_list);
3380 if (this->max_lat < data->max_lat) {
3381 this->max_lat = data->max_lat;
3382 this->max_lat_start = data->max_lat_start;
3383 this->max_lat_end = data->max_lat_end;
3384 }
3385 zfree(&data);
3386 return;
3387 }
3388 }
3389
3390 data->num_merged++;
3391 rb_link_node(&data->node, parent, new);
3392 rb_insert_color_cached(&data->node, root, leftmost);
3393 }
3394
perf_sched__merge_lat(struct perf_sched * sched)3395 static void perf_sched__merge_lat(struct perf_sched *sched)
3396 {
3397 struct work_atoms *data;
3398 struct rb_node *node;
3399
3400 if (sched->skip_merge)
3401 return;
3402
3403 while ((node = rb_first_cached(&sched->atom_root))) {
3404 rb_erase_cached(node, &sched->atom_root);
3405 data = rb_entry(node, struct work_atoms, node);
3406 __merge_work_atoms(&sched->merged_atom_root, data);
3407 }
3408 }
3409
setup_cpus_switch_event(struct perf_sched * sched)3410 static int setup_cpus_switch_event(struct perf_sched *sched)
3411 {
3412 unsigned int i;
3413
3414 sched->cpu_last_switched = calloc(MAX_CPUS, sizeof(*(sched->cpu_last_switched)));
3415 if (!sched->cpu_last_switched)
3416 return -1;
3417
3418 sched->curr_pid = malloc(MAX_CPUS * sizeof(*(sched->curr_pid)));
3419 if (!sched->curr_pid) {
3420 zfree(&sched->cpu_last_switched);
3421 return -1;
3422 }
3423
3424 for (i = 0; i < MAX_CPUS; i++)
3425 sched->curr_pid[i] = -1;
3426
3427 return 0;
3428 }
3429
free_cpus_switch_event(struct perf_sched * sched)3430 static void free_cpus_switch_event(struct perf_sched *sched)
3431 {
3432 zfree(&sched->curr_pid);
3433 zfree(&sched->cpu_last_switched);
3434 }
3435
perf_sched__lat(struct perf_sched * sched)3436 static int perf_sched__lat(struct perf_sched *sched)
3437 {
3438 int rc = -1;
3439 struct rb_node *next;
3440
3441 setup_pager();
3442
3443 if (setup_cpus_switch_event(sched))
3444 return rc;
3445
3446 if (perf_sched__read_events(sched))
3447 goto out_free_cpus_switch_event;
3448
3449 perf_sched__merge_lat(sched);
3450 perf_sched__sort_lat(sched);
3451
3452 printf("\n -------------------------------------------------------------------------------------------------------------------------------------------\n");
3453 printf(" Task | Runtime ms | Count | Avg delay ms | Max delay ms | Max delay start | Max delay end |\n");
3454 printf(" -------------------------------------------------------------------------------------------------------------------------------------------\n");
3455
3456 next = rb_first_cached(&sched->sorted_atom_root);
3457
3458 while (next) {
3459 struct work_atoms *work_list;
3460
3461 work_list = rb_entry(next, struct work_atoms, node);
3462 output_lat_thread(sched, work_list);
3463 next = rb_next(next);
3464 thread__zput(work_list->thread);
3465 }
3466
3467 printf(" -----------------------------------------------------------------------------------------------------------------\n");
3468 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
3469 (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
3470
3471 printf(" ---------------------------------------------------\n");
3472
3473 print_bad_events(sched);
3474 printf("\n");
3475
3476 rc = 0;
3477
3478 out_free_cpus_switch_event:
3479 free_cpus_switch_event(sched);
3480 return rc;
3481 }
3482
setup_map_cpus(struct perf_sched * sched)3483 static int setup_map_cpus(struct perf_sched *sched)
3484 {
3485 sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF);
3486
3487 if (sched->map.comp) {
3488 sched->map.comp_cpus = zalloc(sched->max_cpu.cpu * sizeof(int));
3489 if (!sched->map.comp_cpus)
3490 return -1;
3491 }
3492
3493 if (sched->map.cpus_str) {
3494 sched->map.cpus = perf_cpu_map__new(sched->map.cpus_str);
3495 if (!sched->map.cpus) {
3496 pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
3497 zfree(&sched->map.comp_cpus);
3498 return -1;
3499 }
3500 }
3501
3502 return 0;
3503 }
3504
setup_color_pids(struct perf_sched * sched)3505 static int setup_color_pids(struct perf_sched *sched)
3506 {
3507 struct perf_thread_map *map;
3508
3509 if (!sched->map.color_pids_str)
3510 return 0;
3511
3512 map = thread_map__new_by_tid_str(sched->map.color_pids_str);
3513 if (!map) {
3514 pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
3515 return -1;
3516 }
3517
3518 sched->map.color_pids = map;
3519 return 0;
3520 }
3521
setup_color_cpus(struct perf_sched * sched)3522 static int setup_color_cpus(struct perf_sched *sched)
3523 {
3524 struct perf_cpu_map *map;
3525
3526 if (!sched->map.color_cpus_str)
3527 return 0;
3528
3529 map = perf_cpu_map__new(sched->map.color_cpus_str);
3530 if (!map) {
3531 pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
3532 return -1;
3533 }
3534
3535 sched->map.color_cpus = map;
3536 return 0;
3537 }
3538
perf_sched__map(struct perf_sched * sched)3539 static int perf_sched__map(struct perf_sched *sched)
3540 {
3541 int rc = -1;
3542
3543 sched->curr_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_thread)));
3544 if (!sched->curr_thread)
3545 return rc;
3546
3547 sched->curr_out_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_out_thread)));
3548 if (!sched->curr_out_thread)
3549 return rc;
3550
3551 if (setup_cpus_switch_event(sched))
3552 goto out_free_curr_thread;
3553
3554 if (setup_map_cpus(sched))
3555 goto out_free_cpus_switch_event;
3556
3557 if (setup_color_pids(sched))
3558 goto out_put_map_cpus;
3559
3560 if (setup_color_cpus(sched))
3561 goto out_put_color_pids;
3562
3563 setup_pager();
3564 if (perf_sched__read_events(sched))
3565 goto out_put_color_cpus;
3566
3567 rc = 0;
3568 print_bad_events(sched);
3569
3570 out_put_color_cpus:
3571 perf_cpu_map__put(sched->map.color_cpus);
3572
3573 out_put_color_pids:
3574 perf_thread_map__put(sched->map.color_pids);
3575
3576 out_put_map_cpus:
3577 zfree(&sched->map.comp_cpus);
3578 perf_cpu_map__put(sched->map.cpus);
3579
3580 out_free_cpus_switch_event:
3581 free_cpus_switch_event(sched);
3582
3583 out_free_curr_thread:
3584 zfree(&sched->curr_thread);
3585 return rc;
3586 }
3587
perf_sched__replay(struct perf_sched * sched)3588 static int perf_sched__replay(struct perf_sched *sched)
3589 {
3590 int ret;
3591 unsigned long i;
3592
3593 mutex_init(&sched->start_work_mutex);
3594 mutex_init(&sched->work_done_wait_mutex);
3595
3596 ret = setup_cpus_switch_event(sched);
3597 if (ret)
3598 goto out_mutex_destroy;
3599
3600 calibrate_run_measurement_overhead(sched);
3601 calibrate_sleep_measurement_overhead(sched);
3602
3603 test_calibrations(sched);
3604
3605 ret = perf_sched__read_events(sched);
3606 if (ret)
3607 goto out_free_cpus_switch_event;
3608
3609 printf("nr_run_events: %ld\n", sched->nr_run_events);
3610 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
3611 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
3612
3613 if (sched->targetless_wakeups)
3614 printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
3615 if (sched->multitarget_wakeups)
3616 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
3617 if (sched->nr_run_events_optimized)
3618 printf("run atoms optimized: %ld\n",
3619 sched->nr_run_events_optimized);
3620
3621 print_task_traces(sched);
3622 add_cross_task_wakeups(sched);
3623
3624 sched->thread_funcs_exit = false;
3625 create_tasks(sched);
3626 printf("------------------------------------------------------------\n");
3627 if (sched->replay_repeat == 0)
3628 sched->replay_repeat = UINT_MAX;
3629
3630 for (i = 0; i < sched->replay_repeat; i++)
3631 run_one_test(sched);
3632
3633 sched->thread_funcs_exit = true;
3634 destroy_tasks(sched);
3635
3636 out_free_cpus_switch_event:
3637 free_cpus_switch_event(sched);
3638
3639 out_mutex_destroy:
3640 mutex_destroy(&sched->start_work_mutex);
3641 mutex_destroy(&sched->work_done_wait_mutex);
3642 return ret;
3643 }
3644
setup_sorting(struct perf_sched * sched,const struct option * options,const char * const usage_msg[])3645 static void setup_sorting(struct perf_sched *sched, const struct option *options,
3646 const char * const usage_msg[])
3647 {
3648 char *tmp, *tok, *str = strdup(sched->sort_order);
3649
3650 for (tok = strtok_r(str, ", ", &tmp);
3651 tok; tok = strtok_r(NULL, ", ", &tmp)) {
3652 if (sort_dimension__add(tok, &sched->sort_list) < 0) {
3653 usage_with_options_msg(usage_msg, options,
3654 "Unknown --sort key: `%s'", tok);
3655 }
3656 }
3657
3658 free(str);
3659
3660 sort_dimension__add("pid", &sched->cmp_pid);
3661 }
3662
schedstat_events_exposed(void)3663 static bool schedstat_events_exposed(void)
3664 {
3665 /*
3666 * Select "sched:sched_stat_wait" event to check
3667 * whether schedstat tracepoints are exposed.
3668 */
3669 return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ?
3670 false : true;
3671 }
3672
__cmd_record(int argc,const char ** argv)3673 static int __cmd_record(int argc, const char **argv)
3674 {
3675 unsigned int rec_argc, i, j;
3676 char **rec_argv;
3677 const char **rec_argv_copy;
3678 const char * const record_args[] = {
3679 "record",
3680 "-a",
3681 "-R",
3682 "-m", "1024",
3683 "-c", "1",
3684 "-e", "sched:sched_switch",
3685 "-e", "sched:sched_stat_runtime",
3686 "-e", "sched:sched_process_fork",
3687 "-e", "sched:sched_wakeup_new",
3688 "-e", "sched:sched_migrate_task",
3689 };
3690
3691 /*
3692 * The tracepoints trace_sched_stat_{wait, sleep, iowait}
3693 * are not exposed to user if CONFIG_SCHEDSTATS is not set,
3694 * to prevent "perf sched record" execution failure, determine
3695 * whether to record schedstat events according to actual situation.
3696 */
3697 const char * const schedstat_args[] = {
3698 "-e", "sched:sched_stat_wait",
3699 "-e", "sched:sched_stat_sleep",
3700 "-e", "sched:sched_stat_iowait",
3701 };
3702 unsigned int schedstat_argc = schedstat_events_exposed() ?
3703 ARRAY_SIZE(schedstat_args) : 0;
3704
3705 struct tep_event *waking_event;
3706 int ret;
3707
3708 /*
3709 * +2 for either "-e", "sched:sched_wakeup" or
3710 * "-e", "sched:sched_waking"
3711 */
3712 rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1;
3713 rec_argv = calloc(rec_argc + 1, sizeof(char *));
3714 if (rec_argv == NULL)
3715 return -ENOMEM;
3716 rec_argv_copy = calloc(rec_argc + 1, sizeof(char *));
3717 if (rec_argv_copy == NULL) {
3718 free(rec_argv);
3719 return -ENOMEM;
3720 }
3721
3722 for (i = 0; i < ARRAY_SIZE(record_args); i++)
3723 rec_argv[i] = strdup(record_args[i]);
3724
3725 rec_argv[i++] = strdup("-e");
3726 waking_event = trace_event__tp_format("sched", "sched_waking");
3727 if (!IS_ERR(waking_event))
3728 rec_argv[i++] = strdup("sched:sched_waking");
3729 else
3730 rec_argv[i++] = strdup("sched:sched_wakeup");
3731
3732 for (j = 0; j < schedstat_argc; j++)
3733 rec_argv[i++] = strdup(schedstat_args[j]);
3734
3735 for (j = 1; j < (unsigned int)argc; j++, i++)
3736 rec_argv[i] = strdup(argv[j]);
3737
3738 BUG_ON(i != rec_argc);
3739
3740 memcpy(rec_argv_copy, rec_argv, sizeof(char *) * rec_argc);
3741 ret = cmd_record(rec_argc, rec_argv_copy);
3742
3743 for (i = 0; i < rec_argc; i++)
3744 free(rec_argv[i]);
3745 free(rec_argv);
3746 free(rec_argv_copy);
3747
3748 return ret;
3749 }
3750
cmd_sched(int argc,const char ** argv)3751 int cmd_sched(int argc, const char **argv)
3752 {
3753 static const char default_sort_order[] = "avg, max, switch, runtime";
3754 struct perf_sched sched = {
3755 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
3756 .sort_list = LIST_HEAD_INIT(sched.sort_list),
3757 .sort_order = default_sort_order,
3758 .replay_repeat = 10,
3759 .profile_cpu = -1,
3760 .next_shortname1 = 'A',
3761 .next_shortname2 = '0',
3762 .skip_merge = 0,
3763 .show_callchain = 1,
3764 .max_stack = 5,
3765 };
3766 const struct option sched_options[] = {
3767 OPT_STRING('i', "input", &input_name, "file",
3768 "input file name"),
3769 OPT_INCR('v', "verbose", &verbose,
3770 "be more verbose (show symbol address, etc)"),
3771 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
3772 "dump raw trace in ASCII"),
3773 OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
3774 OPT_END()
3775 };
3776 const struct option latency_options[] = {
3777 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
3778 "sort by key(s): runtime, switch, avg, max"),
3779 OPT_INTEGER('C', "CPU", &sched.profile_cpu,
3780 "CPU to profile on"),
3781 OPT_BOOLEAN('p', "pids", &sched.skip_merge,
3782 "latency stats per pid instead of per comm"),
3783 OPT_PARENT(sched_options)
3784 };
3785 const struct option replay_options[] = {
3786 OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
3787 "repeat the workload replay N times (0: infinite)"),
3788 OPT_PARENT(sched_options)
3789 };
3790 const struct option map_options[] = {
3791 OPT_BOOLEAN(0, "compact", &sched.map.comp,
3792 "map output in compact mode"),
3793 OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
3794 "highlight given pids in map"),
3795 OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
3796 "highlight given CPUs in map"),
3797 OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
3798 "display given CPUs in map"),
3799 OPT_STRING(0, "task-name", &sched.map.task_name, "task",
3800 "map output only for the given task name(s)."),
3801 OPT_BOOLEAN(0, "fuzzy-name", &sched.map.fuzzy,
3802 "given command name can be partially matched (fuzzy matching)"),
3803 OPT_PARENT(sched_options)
3804 };
3805 const struct option timehist_options[] = {
3806 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
3807 "file", "vmlinux pathname"),
3808 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
3809 "file", "kallsyms pathname"),
3810 OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
3811 "Display call chains if present (default on)"),
3812 OPT_UINTEGER(0, "max-stack", &sched.max_stack,
3813 "Maximum number of functions to display backtrace."),
3814 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
3815 "Look for files with symbols relative to this directory"),
3816 OPT_BOOLEAN('s', "summary", &sched.summary_only,
3817 "Show only syscall summary with statistics"),
3818 OPT_BOOLEAN('S', "with-summary", &sched.summary,
3819 "Show all syscalls and summary with statistics"),
3820 OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
3821 OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"),
3822 OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
3823 OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
3824 OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
3825 OPT_STRING(0, "time", &sched.time_str, "str",
3826 "Time span for analysis (start,stop)"),
3827 OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
3828 OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
3829 "analyze events only for given process id(s)"),
3830 OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
3831 "analyze events only for given thread id(s)"),
3832 OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
3833 OPT_BOOLEAN(0, "show-prio", &sched.show_prio, "Show task priority"),
3834 OPT_STRING(0, "prio", &sched.prio_str, "prio",
3835 "analyze events only for given task priority(ies)"),
3836 OPT_PARENT(sched_options)
3837 };
3838
3839 const char * const latency_usage[] = {
3840 "perf sched latency [<options>]",
3841 NULL
3842 };
3843 const char * const replay_usage[] = {
3844 "perf sched replay [<options>]",
3845 NULL
3846 };
3847 const char * const map_usage[] = {
3848 "perf sched map [<options>]",
3849 NULL
3850 };
3851 const char * const timehist_usage[] = {
3852 "perf sched timehist [<options>]",
3853 NULL
3854 };
3855 const char *const sched_subcommands[] = { "record", "latency", "map",
3856 "replay", "script",
3857 "timehist", NULL };
3858 const char *sched_usage[] = {
3859 NULL,
3860 NULL
3861 };
3862 struct trace_sched_handler lat_ops = {
3863 .wakeup_event = latency_wakeup_event,
3864 .switch_event = latency_switch_event,
3865 .runtime_event = latency_runtime_event,
3866 .migrate_task_event = latency_migrate_task_event,
3867 };
3868 struct trace_sched_handler map_ops = {
3869 .switch_event = map_switch_event,
3870 };
3871 struct trace_sched_handler replay_ops = {
3872 .wakeup_event = replay_wakeup_event,
3873 .switch_event = replay_switch_event,
3874 .fork_event = replay_fork_event,
3875 };
3876 int ret;
3877
3878 perf_tool__init(&sched.tool, /*ordered_events=*/true);
3879 sched.tool.sample = perf_sched__process_tracepoint_sample;
3880 sched.tool.comm = perf_sched__process_comm;
3881 sched.tool.namespaces = perf_event__process_namespaces;
3882 sched.tool.lost = perf_event__process_lost;
3883 sched.tool.fork = perf_sched__process_fork_event;
3884
3885 argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
3886 sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
3887 if (!argc)
3888 usage_with_options(sched_usage, sched_options);
3889
3890 /*
3891 * Aliased to 'perf script' for now:
3892 */
3893 if (!strcmp(argv[0], "script")) {
3894 return cmd_script(argc, argv);
3895 } else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
3896 return __cmd_record(argc, argv);
3897 } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
3898 sched.tp_handler = &lat_ops;
3899 if (argc > 1) {
3900 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
3901 if (argc)
3902 usage_with_options(latency_usage, latency_options);
3903 }
3904 setup_sorting(&sched, latency_options, latency_usage);
3905 return perf_sched__lat(&sched);
3906 } else if (!strcmp(argv[0], "map")) {
3907 if (argc) {
3908 argc = parse_options(argc, argv, map_options, map_usage, 0);
3909 if (argc)
3910 usage_with_options(map_usage, map_options);
3911
3912 if (sched.map.task_name) {
3913 sched.map.task_names = strlist__new(sched.map.task_name, NULL);
3914 if (sched.map.task_names == NULL) {
3915 fprintf(stderr, "Failed to parse task names\n");
3916 return -1;
3917 }
3918 }
3919 }
3920 sched.tp_handler = &map_ops;
3921 setup_sorting(&sched, latency_options, latency_usage);
3922 return perf_sched__map(&sched);
3923 } else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) {
3924 sched.tp_handler = &replay_ops;
3925 if (argc) {
3926 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
3927 if (argc)
3928 usage_with_options(replay_usage, replay_options);
3929 }
3930 return perf_sched__replay(&sched);
3931 } else if (!strcmp(argv[0], "timehist")) {
3932 if (argc) {
3933 argc = parse_options(argc, argv, timehist_options,
3934 timehist_usage, 0);
3935 if (argc)
3936 usage_with_options(timehist_usage, timehist_options);
3937 }
3938 if ((sched.show_wakeups || sched.show_next) &&
3939 sched.summary_only) {
3940 pr_err(" Error: -s and -[n|w] are mutually exclusive.\n");
3941 parse_options_usage(timehist_usage, timehist_options, "s", true);
3942 if (sched.show_wakeups)
3943 parse_options_usage(NULL, timehist_options, "w", true);
3944 if (sched.show_next)
3945 parse_options_usage(NULL, timehist_options, "n", true);
3946 return -EINVAL;
3947 }
3948 ret = symbol__validate_sym_arguments();
3949 if (ret)
3950 return ret;
3951
3952 return perf_sched__timehist(&sched);
3953 } else {
3954 usage_with_options(sched_usage, sched_options);
3955 }
3956
3957 /* free usage string allocated by parse_options_subcommand */
3958 free((void *)sched_usage[0]);
3959
3960 return 0;
3961 }
3962