xref: /linux/tools/perf/builtin-sched.c (revision b93fb9cf45a99042f4472678fc67afd1de47c32e)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "builtin.h"
3 #include "perf-sys.h"
4 
5 #include "util/cpumap.h"
6 #include "util/evlist.h"
7 #include "util/evsel.h"
8 #include "util/evsel_fprintf.h"
9 #include "util/mutex.h"
10 #include "util/symbol.h"
11 #include "util/thread.h"
12 #include "util/header.h"
13 #include "util/session.h"
14 #include "util/tool.h"
15 #include "util/cloexec.h"
16 #include "util/thread_map.h"
17 #include "util/color.h"
18 #include "util/stat.h"
19 #include "util/string2.h"
20 #include "util/callchain.h"
21 #include "util/time-utils.h"
22 
23 #include <subcmd/pager.h>
24 #include <subcmd/parse-options.h>
25 #include "util/trace-event.h"
26 
27 #include "util/debug.h"
28 #include "util/event.h"
29 #include "util/util.h"
30 
31 #include <linux/kernel.h>
32 #include <linux/log2.h>
33 #include <linux/zalloc.h>
34 #include <sys/prctl.h>
35 #include <sys/resource.h>
36 #include <inttypes.h>
37 
38 #include <errno.h>
39 #include <semaphore.h>
40 #include <pthread.h>
41 #include <math.h>
42 #include <api/fs/fs.h>
43 #include <perf/cpumap.h>
44 #include <linux/time64.h>
45 #include <linux/err.h>
46 
47 #include <linux/ctype.h>
48 
49 #define PR_SET_NAME		15               /* Set process name */
50 #define MAX_CPUS		4096
51 #define COMM_LEN		20
52 #define SYM_LEN			129
53 #define MAX_PID			1024000
54 
55 static const char *cpu_list;
56 static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
57 
58 struct sched_atom;
59 
60 struct task_desc {
61 	unsigned long		nr;
62 	unsigned long		pid;
63 	char			comm[COMM_LEN];
64 
65 	unsigned long		nr_events;
66 	unsigned long		curr_event;
67 	struct sched_atom	**atoms;
68 
69 	pthread_t		thread;
70 	sem_t			sleep_sem;
71 
72 	sem_t			ready_for_work;
73 	sem_t			work_done_sem;
74 
75 	u64			cpu_usage;
76 };
77 
78 enum sched_event_type {
79 	SCHED_EVENT_RUN,
80 	SCHED_EVENT_SLEEP,
81 	SCHED_EVENT_WAKEUP,
82 	SCHED_EVENT_MIGRATION,
83 };
84 
85 struct sched_atom {
86 	enum sched_event_type	type;
87 	int			specific_wait;
88 	u64			timestamp;
89 	u64			duration;
90 	unsigned long		nr;
91 	sem_t			*wait_sem;
92 	struct task_desc	*wakee;
93 };
94 
95 enum thread_state {
96 	THREAD_SLEEPING = 0,
97 	THREAD_WAIT_CPU,
98 	THREAD_SCHED_IN,
99 	THREAD_IGNORE
100 };
101 
102 struct work_atom {
103 	struct list_head	list;
104 	enum thread_state	state;
105 	u64			sched_out_time;
106 	u64			wake_up_time;
107 	u64			sched_in_time;
108 	u64			runtime;
109 };
110 
111 struct work_atoms {
112 	struct list_head	work_list;
113 	struct thread		*thread;
114 	struct rb_node		node;
115 	u64			max_lat;
116 	u64			max_lat_start;
117 	u64			max_lat_end;
118 	u64			total_lat;
119 	u64			nb_atoms;
120 	u64			total_runtime;
121 	int			num_merged;
122 };
123 
124 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
125 
126 struct perf_sched;
127 
128 struct trace_sched_handler {
129 	int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
130 			    struct perf_sample *sample, struct machine *machine);
131 
132 	int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
133 			     struct perf_sample *sample, struct machine *machine);
134 
135 	int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
136 			    struct perf_sample *sample, struct machine *machine);
137 
138 	/* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
139 	int (*fork_event)(struct perf_sched *sched, union perf_event *event,
140 			  struct machine *machine);
141 
142 	int (*migrate_task_event)(struct perf_sched *sched,
143 				  struct evsel *evsel,
144 				  struct perf_sample *sample,
145 				  struct machine *machine);
146 };
147 
148 #define COLOR_PIDS PERF_COLOR_BLUE
149 #define COLOR_CPUS PERF_COLOR_BG_RED
150 
151 struct perf_sched_map {
152 	DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
153 	struct perf_cpu		*comp_cpus;
154 	bool			 comp;
155 	struct perf_thread_map *color_pids;
156 	const char		*color_pids_str;
157 	struct perf_cpu_map	*color_cpus;
158 	const char		*color_cpus_str;
159 	const char		*task_name;
160 	struct strlist		*task_names;
161 	bool			fuzzy;
162 	struct perf_cpu_map	*cpus;
163 	const char		*cpus_str;
164 };
165 
166 struct perf_sched {
167 	struct perf_tool tool;
168 	const char	 *sort_order;
169 	unsigned long	 nr_tasks;
170 	struct task_desc **pid_to_task;
171 	struct task_desc **tasks;
172 	const struct trace_sched_handler *tp_handler;
173 	struct mutex	 start_work_mutex;
174 	struct mutex	 work_done_wait_mutex;
175 	int		 profile_cpu;
176 /*
177  * Track the current task - that way we can know whether there's any
178  * weird events, such as a task being switched away that is not current.
179  */
180 	struct perf_cpu	 max_cpu;
181 	u32		 *curr_pid;
182 	struct thread	 **curr_thread;
183 	struct thread	 **curr_out_thread;
184 	char		 next_shortname1;
185 	char		 next_shortname2;
186 	unsigned int	 replay_repeat;
187 	unsigned long	 nr_run_events;
188 	unsigned long	 nr_sleep_events;
189 	unsigned long	 nr_wakeup_events;
190 	unsigned long	 nr_sleep_corrections;
191 	unsigned long	 nr_run_events_optimized;
192 	unsigned long	 targetless_wakeups;
193 	unsigned long	 multitarget_wakeups;
194 	unsigned long	 nr_runs;
195 	unsigned long	 nr_timestamps;
196 	unsigned long	 nr_unordered_timestamps;
197 	unsigned long	 nr_context_switch_bugs;
198 	unsigned long	 nr_events;
199 	unsigned long	 nr_lost_chunks;
200 	unsigned long	 nr_lost_events;
201 	u64		 run_measurement_overhead;
202 	u64		 sleep_measurement_overhead;
203 	u64		 start_time;
204 	u64		 cpu_usage;
205 	u64		 runavg_cpu_usage;
206 	u64		 parent_cpu_usage;
207 	u64		 runavg_parent_cpu_usage;
208 	u64		 sum_runtime;
209 	u64		 sum_fluct;
210 	u64		 run_avg;
211 	u64		 all_runtime;
212 	u64		 all_count;
213 	u64		 *cpu_last_switched;
214 	struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
215 	struct list_head sort_list, cmp_pid;
216 	bool force;
217 	bool skip_merge;
218 	struct perf_sched_map map;
219 
220 	/* options for timehist command */
221 	bool		summary;
222 	bool		summary_only;
223 	bool		idle_hist;
224 	bool		show_callchain;
225 	unsigned int	max_stack;
226 	bool		show_cpu_visual;
227 	bool		show_wakeups;
228 	bool		show_next;
229 	bool		show_migrations;
230 	bool		show_state;
231 	u64		skipped_samples;
232 	const char	*time_str;
233 	struct perf_time_interval ptime;
234 	struct perf_time_interval hist_time;
235 	volatile bool   thread_funcs_exit;
236 };
237 
238 /* per thread run time data */
239 struct thread_runtime {
240 	u64 last_time;      /* time of previous sched in/out event */
241 	u64 dt_run;         /* run time */
242 	u64 dt_sleep;       /* time between CPU access by sleep (off cpu) */
243 	u64 dt_iowait;      /* time between CPU access by iowait (off cpu) */
244 	u64 dt_preempt;     /* time between CPU access by preempt (off cpu) */
245 	u64 dt_delay;       /* time between wakeup and sched-in */
246 	u64 ready_to_run;   /* time of wakeup */
247 
248 	struct stats run_stats;
249 	u64 total_run_time;
250 	u64 total_sleep_time;
251 	u64 total_iowait_time;
252 	u64 total_preempt_time;
253 	u64 total_delay_time;
254 
255 	char last_state;
256 
257 	char shortname[3];
258 	bool comm_changed;
259 
260 	u64 migrations;
261 };
262 
263 /* per event run time data */
264 struct evsel_runtime {
265 	u64 *last_time; /* time this event was last seen per cpu */
266 	u32 ncpu;       /* highest cpu slot allocated */
267 };
268 
269 /* per cpu idle time data */
270 struct idle_thread_runtime {
271 	struct thread_runtime	tr;
272 	struct thread		*last_thread;
273 	struct rb_root_cached	sorted_root;
274 	struct callchain_root	callchain;
275 	struct callchain_cursor	cursor;
276 };
277 
278 /* track idle times per cpu */
279 static struct thread **idle_threads;
280 static int idle_max_cpu;
281 static char idle_comm[] = "<idle>";
282 
283 static u64 get_nsecs(void)
284 {
285 	struct timespec ts;
286 
287 	clock_gettime(CLOCK_MONOTONIC, &ts);
288 
289 	return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
290 }
291 
292 static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
293 {
294 	u64 T0 = get_nsecs(), T1;
295 
296 	do {
297 		T1 = get_nsecs();
298 	} while (T1 + sched->run_measurement_overhead < T0 + nsecs);
299 }
300 
301 static void sleep_nsecs(u64 nsecs)
302 {
303 	struct timespec ts;
304 
305 	ts.tv_nsec = nsecs % 999999999;
306 	ts.tv_sec = nsecs / 999999999;
307 
308 	nanosleep(&ts, NULL);
309 }
310 
311 static void calibrate_run_measurement_overhead(struct perf_sched *sched)
312 {
313 	u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
314 	int i;
315 
316 	for (i = 0; i < 10; i++) {
317 		T0 = get_nsecs();
318 		burn_nsecs(sched, 0);
319 		T1 = get_nsecs();
320 		delta = T1-T0;
321 		min_delta = min(min_delta, delta);
322 	}
323 	sched->run_measurement_overhead = min_delta;
324 
325 	printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
326 }
327 
328 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
329 {
330 	u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
331 	int i;
332 
333 	for (i = 0; i < 10; i++) {
334 		T0 = get_nsecs();
335 		sleep_nsecs(10000);
336 		T1 = get_nsecs();
337 		delta = T1-T0;
338 		min_delta = min(min_delta, delta);
339 	}
340 	min_delta -= 10000;
341 	sched->sleep_measurement_overhead = min_delta;
342 
343 	printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
344 }
345 
346 static struct sched_atom *
347 get_new_event(struct task_desc *task, u64 timestamp)
348 {
349 	struct sched_atom *event = zalloc(sizeof(*event));
350 	unsigned long idx = task->nr_events;
351 	size_t size;
352 
353 	event->timestamp = timestamp;
354 	event->nr = idx;
355 
356 	task->nr_events++;
357 	size = sizeof(struct sched_atom *) * task->nr_events;
358 	task->atoms = realloc(task->atoms, size);
359 	BUG_ON(!task->atoms);
360 
361 	task->atoms[idx] = event;
362 
363 	return event;
364 }
365 
366 static struct sched_atom *last_event(struct task_desc *task)
367 {
368 	if (!task->nr_events)
369 		return NULL;
370 
371 	return task->atoms[task->nr_events - 1];
372 }
373 
374 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
375 				u64 timestamp, u64 duration)
376 {
377 	struct sched_atom *event, *curr_event = last_event(task);
378 
379 	/*
380 	 * optimize an existing RUN event by merging this one
381 	 * to it:
382 	 */
383 	if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
384 		sched->nr_run_events_optimized++;
385 		curr_event->duration += duration;
386 		return;
387 	}
388 
389 	event = get_new_event(task, timestamp);
390 
391 	event->type = SCHED_EVENT_RUN;
392 	event->duration = duration;
393 
394 	sched->nr_run_events++;
395 }
396 
397 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
398 				   u64 timestamp, struct task_desc *wakee)
399 {
400 	struct sched_atom *event, *wakee_event;
401 
402 	event = get_new_event(task, timestamp);
403 	event->type = SCHED_EVENT_WAKEUP;
404 	event->wakee = wakee;
405 
406 	wakee_event = last_event(wakee);
407 	if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
408 		sched->targetless_wakeups++;
409 		return;
410 	}
411 	if (wakee_event->wait_sem) {
412 		sched->multitarget_wakeups++;
413 		return;
414 	}
415 
416 	wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
417 	sem_init(wakee_event->wait_sem, 0, 0);
418 	wakee_event->specific_wait = 1;
419 	event->wait_sem = wakee_event->wait_sem;
420 
421 	sched->nr_wakeup_events++;
422 }
423 
424 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
425 				  u64 timestamp, const char task_state __maybe_unused)
426 {
427 	struct sched_atom *event = get_new_event(task, timestamp);
428 
429 	event->type = SCHED_EVENT_SLEEP;
430 
431 	sched->nr_sleep_events++;
432 }
433 
434 static struct task_desc *register_pid(struct perf_sched *sched,
435 				      unsigned long pid, const char *comm)
436 {
437 	struct task_desc *task;
438 	static int pid_max;
439 
440 	if (sched->pid_to_task == NULL) {
441 		if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
442 			pid_max = MAX_PID;
443 		BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
444 	}
445 	if (pid >= (unsigned long)pid_max) {
446 		BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
447 			sizeof(struct task_desc *))) == NULL);
448 		while (pid >= (unsigned long)pid_max)
449 			sched->pid_to_task[pid_max++] = NULL;
450 	}
451 
452 	task = sched->pid_to_task[pid];
453 
454 	if (task)
455 		return task;
456 
457 	task = zalloc(sizeof(*task));
458 	task->pid = pid;
459 	task->nr = sched->nr_tasks;
460 	strcpy(task->comm, comm);
461 	/*
462 	 * every task starts in sleeping state - this gets ignored
463 	 * if there's no wakeup pointing to this sleep state:
464 	 */
465 	add_sched_event_sleep(sched, task, 0, 0);
466 
467 	sched->pid_to_task[pid] = task;
468 	sched->nr_tasks++;
469 	sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
470 	BUG_ON(!sched->tasks);
471 	sched->tasks[task->nr] = task;
472 
473 	if (verbose > 0)
474 		printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
475 
476 	return task;
477 }
478 
479 
480 static void print_task_traces(struct perf_sched *sched)
481 {
482 	struct task_desc *task;
483 	unsigned long i;
484 
485 	for (i = 0; i < sched->nr_tasks; i++) {
486 		task = sched->tasks[i];
487 		printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
488 			task->nr, task->comm, task->pid, task->nr_events);
489 	}
490 }
491 
492 static void add_cross_task_wakeups(struct perf_sched *sched)
493 {
494 	struct task_desc *task1, *task2;
495 	unsigned long i, j;
496 
497 	for (i = 0; i < sched->nr_tasks; i++) {
498 		task1 = sched->tasks[i];
499 		j = i + 1;
500 		if (j == sched->nr_tasks)
501 			j = 0;
502 		task2 = sched->tasks[j];
503 		add_sched_event_wakeup(sched, task1, 0, task2);
504 	}
505 }
506 
507 static void perf_sched__process_event(struct perf_sched *sched,
508 				      struct sched_atom *atom)
509 {
510 	int ret = 0;
511 
512 	switch (atom->type) {
513 		case SCHED_EVENT_RUN:
514 			burn_nsecs(sched, atom->duration);
515 			break;
516 		case SCHED_EVENT_SLEEP:
517 			if (atom->wait_sem)
518 				ret = sem_wait(atom->wait_sem);
519 			BUG_ON(ret);
520 			break;
521 		case SCHED_EVENT_WAKEUP:
522 			if (atom->wait_sem)
523 				ret = sem_post(atom->wait_sem);
524 			BUG_ON(ret);
525 			break;
526 		case SCHED_EVENT_MIGRATION:
527 			break;
528 		default:
529 			BUG_ON(1);
530 	}
531 }
532 
533 static u64 get_cpu_usage_nsec_parent(void)
534 {
535 	struct rusage ru;
536 	u64 sum;
537 	int err;
538 
539 	err = getrusage(RUSAGE_SELF, &ru);
540 	BUG_ON(err);
541 
542 	sum =  ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC;
543 	sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC;
544 
545 	return sum;
546 }
547 
548 static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
549 {
550 	struct perf_event_attr attr;
551 	char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
552 	int fd;
553 	struct rlimit limit;
554 	bool need_privilege = false;
555 
556 	memset(&attr, 0, sizeof(attr));
557 
558 	attr.type = PERF_TYPE_SOFTWARE;
559 	attr.config = PERF_COUNT_SW_TASK_CLOCK;
560 
561 force_again:
562 	fd = sys_perf_event_open(&attr, 0, -1, -1,
563 				 perf_event_open_cloexec_flag());
564 
565 	if (fd < 0) {
566 		if (errno == EMFILE) {
567 			if (sched->force) {
568 				BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
569 				limit.rlim_cur += sched->nr_tasks - cur_task;
570 				if (limit.rlim_cur > limit.rlim_max) {
571 					limit.rlim_max = limit.rlim_cur;
572 					need_privilege = true;
573 				}
574 				if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
575 					if (need_privilege && errno == EPERM)
576 						strcpy(info, "Need privilege\n");
577 				} else
578 					goto force_again;
579 			} else
580 				strcpy(info, "Have a try with -f option\n");
581 		}
582 		pr_err("Error: sys_perf_event_open() syscall returned "
583 		       "with %d (%s)\n%s", fd,
584 		       str_error_r(errno, sbuf, sizeof(sbuf)), info);
585 		exit(EXIT_FAILURE);
586 	}
587 	return fd;
588 }
589 
590 static u64 get_cpu_usage_nsec_self(int fd)
591 {
592 	u64 runtime;
593 	int ret;
594 
595 	ret = read(fd, &runtime, sizeof(runtime));
596 	BUG_ON(ret != sizeof(runtime));
597 
598 	return runtime;
599 }
600 
601 struct sched_thread_parms {
602 	struct task_desc  *task;
603 	struct perf_sched *sched;
604 	int fd;
605 };
606 
607 static void *thread_func(void *ctx)
608 {
609 	struct sched_thread_parms *parms = ctx;
610 	struct task_desc *this_task = parms->task;
611 	struct perf_sched *sched = parms->sched;
612 	u64 cpu_usage_0, cpu_usage_1;
613 	unsigned long i, ret;
614 	char comm2[22];
615 	int fd = parms->fd;
616 
617 	zfree(&parms);
618 
619 	sprintf(comm2, ":%s", this_task->comm);
620 	prctl(PR_SET_NAME, comm2);
621 	if (fd < 0)
622 		return NULL;
623 
624 	while (!sched->thread_funcs_exit) {
625 		ret = sem_post(&this_task->ready_for_work);
626 		BUG_ON(ret);
627 		mutex_lock(&sched->start_work_mutex);
628 		mutex_unlock(&sched->start_work_mutex);
629 
630 		cpu_usage_0 = get_cpu_usage_nsec_self(fd);
631 
632 		for (i = 0; i < this_task->nr_events; i++) {
633 			this_task->curr_event = i;
634 			perf_sched__process_event(sched, this_task->atoms[i]);
635 		}
636 
637 		cpu_usage_1 = get_cpu_usage_nsec_self(fd);
638 		this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
639 		ret = sem_post(&this_task->work_done_sem);
640 		BUG_ON(ret);
641 
642 		mutex_lock(&sched->work_done_wait_mutex);
643 		mutex_unlock(&sched->work_done_wait_mutex);
644 	}
645 	return NULL;
646 }
647 
648 static void create_tasks(struct perf_sched *sched)
649 	EXCLUSIVE_LOCK_FUNCTION(sched->start_work_mutex)
650 	EXCLUSIVE_LOCK_FUNCTION(sched->work_done_wait_mutex)
651 {
652 	struct task_desc *task;
653 	pthread_attr_t attr;
654 	unsigned long i;
655 	int err;
656 
657 	err = pthread_attr_init(&attr);
658 	BUG_ON(err);
659 	err = pthread_attr_setstacksize(&attr,
660 			(size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN));
661 	BUG_ON(err);
662 	mutex_lock(&sched->start_work_mutex);
663 	mutex_lock(&sched->work_done_wait_mutex);
664 	for (i = 0; i < sched->nr_tasks; i++) {
665 		struct sched_thread_parms *parms = malloc(sizeof(*parms));
666 		BUG_ON(parms == NULL);
667 		parms->task = task = sched->tasks[i];
668 		parms->sched = sched;
669 		parms->fd = self_open_counters(sched, i);
670 		sem_init(&task->sleep_sem, 0, 0);
671 		sem_init(&task->ready_for_work, 0, 0);
672 		sem_init(&task->work_done_sem, 0, 0);
673 		task->curr_event = 0;
674 		err = pthread_create(&task->thread, &attr, thread_func, parms);
675 		BUG_ON(err);
676 	}
677 }
678 
679 static void destroy_tasks(struct perf_sched *sched)
680 	UNLOCK_FUNCTION(sched->start_work_mutex)
681 	UNLOCK_FUNCTION(sched->work_done_wait_mutex)
682 {
683 	struct task_desc *task;
684 	unsigned long i;
685 	int err;
686 
687 	mutex_unlock(&sched->start_work_mutex);
688 	mutex_unlock(&sched->work_done_wait_mutex);
689 	/* Get rid of threads so they won't be upset by mutex destrunction */
690 	for (i = 0; i < sched->nr_tasks; i++) {
691 		task = sched->tasks[i];
692 		err = pthread_join(task->thread, NULL);
693 		BUG_ON(err);
694 		sem_destroy(&task->sleep_sem);
695 		sem_destroy(&task->ready_for_work);
696 		sem_destroy(&task->work_done_sem);
697 	}
698 }
699 
700 static void wait_for_tasks(struct perf_sched *sched)
701 	EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
702 	EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
703 {
704 	u64 cpu_usage_0, cpu_usage_1;
705 	struct task_desc *task;
706 	unsigned long i, ret;
707 
708 	sched->start_time = get_nsecs();
709 	sched->cpu_usage = 0;
710 	mutex_unlock(&sched->work_done_wait_mutex);
711 
712 	for (i = 0; i < sched->nr_tasks; i++) {
713 		task = sched->tasks[i];
714 		ret = sem_wait(&task->ready_for_work);
715 		BUG_ON(ret);
716 		sem_init(&task->ready_for_work, 0, 0);
717 	}
718 	mutex_lock(&sched->work_done_wait_mutex);
719 
720 	cpu_usage_0 = get_cpu_usage_nsec_parent();
721 
722 	mutex_unlock(&sched->start_work_mutex);
723 
724 	for (i = 0; i < sched->nr_tasks; i++) {
725 		task = sched->tasks[i];
726 		ret = sem_wait(&task->work_done_sem);
727 		BUG_ON(ret);
728 		sem_init(&task->work_done_sem, 0, 0);
729 		sched->cpu_usage += task->cpu_usage;
730 		task->cpu_usage = 0;
731 	}
732 
733 	cpu_usage_1 = get_cpu_usage_nsec_parent();
734 	if (!sched->runavg_cpu_usage)
735 		sched->runavg_cpu_usage = sched->cpu_usage;
736 	sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
737 
738 	sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
739 	if (!sched->runavg_parent_cpu_usage)
740 		sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
741 	sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
742 					 sched->parent_cpu_usage)/sched->replay_repeat;
743 
744 	mutex_lock(&sched->start_work_mutex);
745 
746 	for (i = 0; i < sched->nr_tasks; i++) {
747 		task = sched->tasks[i];
748 		sem_init(&task->sleep_sem, 0, 0);
749 		task->curr_event = 0;
750 	}
751 }
752 
753 static void run_one_test(struct perf_sched *sched)
754 	EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
755 	EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
756 {
757 	u64 T0, T1, delta, avg_delta, fluct;
758 
759 	T0 = get_nsecs();
760 	wait_for_tasks(sched);
761 	T1 = get_nsecs();
762 
763 	delta = T1 - T0;
764 	sched->sum_runtime += delta;
765 	sched->nr_runs++;
766 
767 	avg_delta = sched->sum_runtime / sched->nr_runs;
768 	if (delta < avg_delta)
769 		fluct = avg_delta - delta;
770 	else
771 		fluct = delta - avg_delta;
772 	sched->sum_fluct += fluct;
773 	if (!sched->run_avg)
774 		sched->run_avg = delta;
775 	sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
776 
777 	printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
778 
779 	printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
780 
781 	printf("cpu: %0.2f / %0.2f",
782 		(double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
783 
784 #if 0
785 	/*
786 	 * rusage statistics done by the parent, these are less
787 	 * accurate than the sched->sum_exec_runtime based statistics:
788 	 */
789 	printf(" [%0.2f / %0.2f]",
790 		(double)sched->parent_cpu_usage / NSEC_PER_MSEC,
791 		(double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
792 #endif
793 
794 	printf("\n");
795 
796 	if (sched->nr_sleep_corrections)
797 		printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
798 	sched->nr_sleep_corrections = 0;
799 }
800 
801 static void test_calibrations(struct perf_sched *sched)
802 {
803 	u64 T0, T1;
804 
805 	T0 = get_nsecs();
806 	burn_nsecs(sched, NSEC_PER_MSEC);
807 	T1 = get_nsecs();
808 
809 	printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
810 
811 	T0 = get_nsecs();
812 	sleep_nsecs(NSEC_PER_MSEC);
813 	T1 = get_nsecs();
814 
815 	printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
816 }
817 
818 static int
819 replay_wakeup_event(struct perf_sched *sched,
820 		    struct evsel *evsel, struct perf_sample *sample,
821 		    struct machine *machine __maybe_unused)
822 {
823 	const char *comm = evsel__strval(evsel, sample, "comm");
824 	const u32 pid	 = evsel__intval(evsel, sample, "pid");
825 	struct task_desc *waker, *wakee;
826 
827 	if (verbose > 0) {
828 		printf("sched_wakeup event %p\n", evsel);
829 
830 		printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
831 	}
832 
833 	waker = register_pid(sched, sample->tid, "<unknown>");
834 	wakee = register_pid(sched, pid, comm);
835 
836 	add_sched_event_wakeup(sched, waker, sample->time, wakee);
837 	return 0;
838 }
839 
840 static int replay_switch_event(struct perf_sched *sched,
841 			       struct evsel *evsel,
842 			       struct perf_sample *sample,
843 			       struct machine *machine __maybe_unused)
844 {
845 	const char *prev_comm  = evsel__strval(evsel, sample, "prev_comm"),
846 		   *next_comm  = evsel__strval(evsel, sample, "next_comm");
847 	const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
848 		  next_pid = evsel__intval(evsel, sample, "next_pid");
849 	const char prev_state = evsel__taskstate(evsel, sample, "prev_state");
850 	struct task_desc *prev, __maybe_unused *next;
851 	u64 timestamp0, timestamp = sample->time;
852 	int cpu = sample->cpu;
853 	s64 delta;
854 
855 	if (verbose > 0)
856 		printf("sched_switch event %p\n", evsel);
857 
858 	if (cpu >= MAX_CPUS || cpu < 0)
859 		return 0;
860 
861 	timestamp0 = sched->cpu_last_switched[cpu];
862 	if (timestamp0)
863 		delta = timestamp - timestamp0;
864 	else
865 		delta = 0;
866 
867 	if (delta < 0) {
868 		pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
869 		return -1;
870 	}
871 
872 	pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
873 		 prev_comm, prev_pid, next_comm, next_pid, delta);
874 
875 	prev = register_pid(sched, prev_pid, prev_comm);
876 	next = register_pid(sched, next_pid, next_comm);
877 
878 	sched->cpu_last_switched[cpu] = timestamp;
879 
880 	add_sched_event_run(sched, prev, timestamp, delta);
881 	add_sched_event_sleep(sched, prev, timestamp, prev_state);
882 
883 	return 0;
884 }
885 
886 static int replay_fork_event(struct perf_sched *sched,
887 			     union perf_event *event,
888 			     struct machine *machine)
889 {
890 	struct thread *child, *parent;
891 
892 	child = machine__findnew_thread(machine, event->fork.pid,
893 					event->fork.tid);
894 	parent = machine__findnew_thread(machine, event->fork.ppid,
895 					 event->fork.ptid);
896 
897 	if (child == NULL || parent == NULL) {
898 		pr_debug("thread does not exist on fork event: child %p, parent %p\n",
899 				 child, parent);
900 		goto out_put;
901 	}
902 
903 	if (verbose > 0) {
904 		printf("fork event\n");
905 		printf("... parent: %s/%d\n", thread__comm_str(parent), thread__tid(parent));
906 		printf("...  child: %s/%d\n", thread__comm_str(child), thread__tid(child));
907 	}
908 
909 	register_pid(sched, thread__tid(parent), thread__comm_str(parent));
910 	register_pid(sched, thread__tid(child), thread__comm_str(child));
911 out_put:
912 	thread__put(child);
913 	thread__put(parent);
914 	return 0;
915 }
916 
917 struct sort_dimension {
918 	const char		*name;
919 	sort_fn_t		cmp;
920 	struct list_head	list;
921 };
922 
923 /*
924  * handle runtime stats saved per thread
925  */
926 static struct thread_runtime *thread__init_runtime(struct thread *thread)
927 {
928 	struct thread_runtime *r;
929 
930 	r = zalloc(sizeof(struct thread_runtime));
931 	if (!r)
932 		return NULL;
933 
934 	init_stats(&r->run_stats);
935 	thread__set_priv(thread, r);
936 
937 	return r;
938 }
939 
940 static struct thread_runtime *thread__get_runtime(struct thread *thread)
941 {
942 	struct thread_runtime *tr;
943 
944 	tr = thread__priv(thread);
945 	if (tr == NULL) {
946 		tr = thread__init_runtime(thread);
947 		if (tr == NULL)
948 			pr_debug("Failed to malloc memory for runtime data.\n");
949 	}
950 
951 	return tr;
952 }
953 
954 static int
955 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
956 {
957 	struct sort_dimension *sort;
958 	int ret = 0;
959 
960 	BUG_ON(list_empty(list));
961 
962 	list_for_each_entry(sort, list, list) {
963 		ret = sort->cmp(l, r);
964 		if (ret)
965 			return ret;
966 	}
967 
968 	return ret;
969 }
970 
971 static struct work_atoms *
972 thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
973 			 struct list_head *sort_list)
974 {
975 	struct rb_node *node = root->rb_root.rb_node;
976 	struct work_atoms key = { .thread = thread };
977 
978 	while (node) {
979 		struct work_atoms *atoms;
980 		int cmp;
981 
982 		atoms = container_of(node, struct work_atoms, node);
983 
984 		cmp = thread_lat_cmp(sort_list, &key, atoms);
985 		if (cmp > 0)
986 			node = node->rb_left;
987 		else if (cmp < 0)
988 			node = node->rb_right;
989 		else {
990 			BUG_ON(thread != atoms->thread);
991 			return atoms;
992 		}
993 	}
994 	return NULL;
995 }
996 
997 static void
998 __thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
999 			 struct list_head *sort_list)
1000 {
1001 	struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
1002 	bool leftmost = true;
1003 
1004 	while (*new) {
1005 		struct work_atoms *this;
1006 		int cmp;
1007 
1008 		this = container_of(*new, struct work_atoms, node);
1009 		parent = *new;
1010 
1011 		cmp = thread_lat_cmp(sort_list, data, this);
1012 
1013 		if (cmp > 0)
1014 			new = &((*new)->rb_left);
1015 		else {
1016 			new = &((*new)->rb_right);
1017 			leftmost = false;
1018 		}
1019 	}
1020 
1021 	rb_link_node(&data->node, parent, new);
1022 	rb_insert_color_cached(&data->node, root, leftmost);
1023 }
1024 
1025 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
1026 {
1027 	struct work_atoms *atoms = zalloc(sizeof(*atoms));
1028 	if (!atoms) {
1029 		pr_err("No memory at %s\n", __func__);
1030 		return -1;
1031 	}
1032 
1033 	atoms->thread = thread__get(thread);
1034 	INIT_LIST_HEAD(&atoms->work_list);
1035 	__thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
1036 	return 0;
1037 }
1038 
1039 static int
1040 add_sched_out_event(struct work_atoms *atoms,
1041 		    char run_state,
1042 		    u64 timestamp)
1043 {
1044 	struct work_atom *atom = zalloc(sizeof(*atom));
1045 	if (!atom) {
1046 		pr_err("Non memory at %s", __func__);
1047 		return -1;
1048 	}
1049 
1050 	atom->sched_out_time = timestamp;
1051 
1052 	if (run_state == 'R') {
1053 		atom->state = THREAD_WAIT_CPU;
1054 		atom->wake_up_time = atom->sched_out_time;
1055 	}
1056 
1057 	list_add_tail(&atom->list, &atoms->work_list);
1058 	return 0;
1059 }
1060 
1061 static void
1062 add_runtime_event(struct work_atoms *atoms, u64 delta,
1063 		  u64 timestamp __maybe_unused)
1064 {
1065 	struct work_atom *atom;
1066 
1067 	BUG_ON(list_empty(&atoms->work_list));
1068 
1069 	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1070 
1071 	atom->runtime += delta;
1072 	atoms->total_runtime += delta;
1073 }
1074 
1075 static void
1076 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1077 {
1078 	struct work_atom *atom;
1079 	u64 delta;
1080 
1081 	if (list_empty(&atoms->work_list))
1082 		return;
1083 
1084 	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1085 
1086 	if (atom->state != THREAD_WAIT_CPU)
1087 		return;
1088 
1089 	if (timestamp < atom->wake_up_time) {
1090 		atom->state = THREAD_IGNORE;
1091 		return;
1092 	}
1093 
1094 	atom->state = THREAD_SCHED_IN;
1095 	atom->sched_in_time = timestamp;
1096 
1097 	delta = atom->sched_in_time - atom->wake_up_time;
1098 	atoms->total_lat += delta;
1099 	if (delta > atoms->max_lat) {
1100 		atoms->max_lat = delta;
1101 		atoms->max_lat_start = atom->wake_up_time;
1102 		atoms->max_lat_end = timestamp;
1103 	}
1104 	atoms->nb_atoms++;
1105 }
1106 
1107 static int latency_switch_event(struct perf_sched *sched,
1108 				struct evsel *evsel,
1109 				struct perf_sample *sample,
1110 				struct machine *machine)
1111 {
1112 	const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1113 		  next_pid = evsel__intval(evsel, sample, "next_pid");
1114 	const char prev_state = evsel__taskstate(evsel, sample, "prev_state");
1115 	struct work_atoms *out_events, *in_events;
1116 	struct thread *sched_out, *sched_in;
1117 	u64 timestamp0, timestamp = sample->time;
1118 	int cpu = sample->cpu, err = -1;
1119 	s64 delta;
1120 
1121 	BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1122 
1123 	timestamp0 = sched->cpu_last_switched[cpu];
1124 	sched->cpu_last_switched[cpu] = timestamp;
1125 	if (timestamp0)
1126 		delta = timestamp - timestamp0;
1127 	else
1128 		delta = 0;
1129 
1130 	if (delta < 0) {
1131 		pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1132 		return -1;
1133 	}
1134 
1135 	sched_out = machine__findnew_thread(machine, -1, prev_pid);
1136 	sched_in = machine__findnew_thread(machine, -1, next_pid);
1137 	if (sched_out == NULL || sched_in == NULL)
1138 		goto out_put;
1139 
1140 	out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1141 	if (!out_events) {
1142 		if (thread_atoms_insert(sched, sched_out))
1143 			goto out_put;
1144 		out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1145 		if (!out_events) {
1146 			pr_err("out-event: Internal tree error");
1147 			goto out_put;
1148 		}
1149 	}
1150 	if (add_sched_out_event(out_events, prev_state, timestamp))
1151 		return -1;
1152 
1153 	in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1154 	if (!in_events) {
1155 		if (thread_atoms_insert(sched, sched_in))
1156 			goto out_put;
1157 		in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1158 		if (!in_events) {
1159 			pr_err("in-event: Internal tree error");
1160 			goto out_put;
1161 		}
1162 		/*
1163 		 * Take came in we have not heard about yet,
1164 		 * add in an initial atom in runnable state:
1165 		 */
1166 		if (add_sched_out_event(in_events, 'R', timestamp))
1167 			goto out_put;
1168 	}
1169 	add_sched_in_event(in_events, timestamp);
1170 	err = 0;
1171 out_put:
1172 	thread__put(sched_out);
1173 	thread__put(sched_in);
1174 	return err;
1175 }
1176 
1177 static int latency_runtime_event(struct perf_sched *sched,
1178 				 struct evsel *evsel,
1179 				 struct perf_sample *sample,
1180 				 struct machine *machine)
1181 {
1182 	const u32 pid	   = evsel__intval(evsel, sample, "pid");
1183 	const u64 runtime  = evsel__intval(evsel, sample, "runtime");
1184 	struct thread *thread = machine__findnew_thread(machine, -1, pid);
1185 	struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1186 	u64 timestamp = sample->time;
1187 	int cpu = sample->cpu, err = -1;
1188 
1189 	if (thread == NULL)
1190 		return -1;
1191 
1192 	BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1193 	if (!atoms) {
1194 		if (thread_atoms_insert(sched, thread))
1195 			goto out_put;
1196 		atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1197 		if (!atoms) {
1198 			pr_err("in-event: Internal tree error");
1199 			goto out_put;
1200 		}
1201 		if (add_sched_out_event(atoms, 'R', timestamp))
1202 			goto out_put;
1203 	}
1204 
1205 	add_runtime_event(atoms, runtime, timestamp);
1206 	err = 0;
1207 out_put:
1208 	thread__put(thread);
1209 	return err;
1210 }
1211 
1212 static int latency_wakeup_event(struct perf_sched *sched,
1213 				struct evsel *evsel,
1214 				struct perf_sample *sample,
1215 				struct machine *machine)
1216 {
1217 	const u32 pid	  = evsel__intval(evsel, sample, "pid");
1218 	struct work_atoms *atoms;
1219 	struct work_atom *atom;
1220 	struct thread *wakee;
1221 	u64 timestamp = sample->time;
1222 	int err = -1;
1223 
1224 	wakee = machine__findnew_thread(machine, -1, pid);
1225 	if (wakee == NULL)
1226 		return -1;
1227 	atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1228 	if (!atoms) {
1229 		if (thread_atoms_insert(sched, wakee))
1230 			goto out_put;
1231 		atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1232 		if (!atoms) {
1233 			pr_err("wakeup-event: Internal tree error");
1234 			goto out_put;
1235 		}
1236 		if (add_sched_out_event(atoms, 'S', timestamp))
1237 			goto out_put;
1238 	}
1239 
1240 	BUG_ON(list_empty(&atoms->work_list));
1241 
1242 	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1243 
1244 	/*
1245 	 * As we do not guarantee the wakeup event happens when
1246 	 * task is out of run queue, also may happen when task is
1247 	 * on run queue and wakeup only change ->state to TASK_RUNNING,
1248 	 * then we should not set the ->wake_up_time when wake up a
1249 	 * task which is on run queue.
1250 	 *
1251 	 * You WILL be missing events if you've recorded only
1252 	 * one CPU, or are only looking at only one, so don't
1253 	 * skip in this case.
1254 	 */
1255 	if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1256 		goto out_ok;
1257 
1258 	sched->nr_timestamps++;
1259 	if (atom->sched_out_time > timestamp) {
1260 		sched->nr_unordered_timestamps++;
1261 		goto out_ok;
1262 	}
1263 
1264 	atom->state = THREAD_WAIT_CPU;
1265 	atom->wake_up_time = timestamp;
1266 out_ok:
1267 	err = 0;
1268 out_put:
1269 	thread__put(wakee);
1270 	return err;
1271 }
1272 
1273 static int latency_migrate_task_event(struct perf_sched *sched,
1274 				      struct evsel *evsel,
1275 				      struct perf_sample *sample,
1276 				      struct machine *machine)
1277 {
1278 	const u32 pid = evsel__intval(evsel, sample, "pid");
1279 	u64 timestamp = sample->time;
1280 	struct work_atoms *atoms;
1281 	struct work_atom *atom;
1282 	struct thread *migrant;
1283 	int err = -1;
1284 
1285 	/*
1286 	 * Only need to worry about migration when profiling one CPU.
1287 	 */
1288 	if (sched->profile_cpu == -1)
1289 		return 0;
1290 
1291 	migrant = machine__findnew_thread(machine, -1, pid);
1292 	if (migrant == NULL)
1293 		return -1;
1294 	atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1295 	if (!atoms) {
1296 		if (thread_atoms_insert(sched, migrant))
1297 			goto out_put;
1298 		register_pid(sched, thread__tid(migrant), thread__comm_str(migrant));
1299 		atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1300 		if (!atoms) {
1301 			pr_err("migration-event: Internal tree error");
1302 			goto out_put;
1303 		}
1304 		if (add_sched_out_event(atoms, 'R', timestamp))
1305 			goto out_put;
1306 	}
1307 
1308 	BUG_ON(list_empty(&atoms->work_list));
1309 
1310 	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1311 	atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1312 
1313 	sched->nr_timestamps++;
1314 
1315 	if (atom->sched_out_time > timestamp)
1316 		sched->nr_unordered_timestamps++;
1317 	err = 0;
1318 out_put:
1319 	thread__put(migrant);
1320 	return err;
1321 }
1322 
1323 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1324 {
1325 	int i;
1326 	int ret;
1327 	u64 avg;
1328 	char max_lat_start[32], max_lat_end[32];
1329 
1330 	if (!work_list->nb_atoms)
1331 		return;
1332 	/*
1333 	 * Ignore idle threads:
1334 	 */
1335 	if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
1336 		return;
1337 
1338 	sched->all_runtime += work_list->total_runtime;
1339 	sched->all_count   += work_list->nb_atoms;
1340 
1341 	if (work_list->num_merged > 1) {
1342 		ret = printf("  %s:(%d) ", thread__comm_str(work_list->thread),
1343 			     work_list->num_merged);
1344 	} else {
1345 		ret = printf("  %s:%d ", thread__comm_str(work_list->thread),
1346 			     thread__tid(work_list->thread));
1347 	}
1348 
1349 	for (i = 0; i < 24 - ret; i++)
1350 		printf(" ");
1351 
1352 	avg = work_list->total_lat / work_list->nb_atoms;
1353 	timestamp__scnprintf_usec(work_list->max_lat_start, max_lat_start, sizeof(max_lat_start));
1354 	timestamp__scnprintf_usec(work_list->max_lat_end, max_lat_end, sizeof(max_lat_end));
1355 
1356 	printf("|%11.3f ms |%9" PRIu64 " | avg:%8.3f ms | max:%8.3f ms | max start: %12s s | max end: %12s s\n",
1357 	      (double)work_list->total_runtime / NSEC_PER_MSEC,
1358 		 work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
1359 		 (double)work_list->max_lat / NSEC_PER_MSEC,
1360 		 max_lat_start, max_lat_end);
1361 }
1362 
1363 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1364 {
1365 	pid_t l_tid, r_tid;
1366 
1367 	if (RC_CHK_EQUAL(l->thread, r->thread))
1368 		return 0;
1369 	l_tid = thread__tid(l->thread);
1370 	r_tid = thread__tid(r->thread);
1371 	if (l_tid < r_tid)
1372 		return -1;
1373 	if (l_tid > r_tid)
1374 		return 1;
1375 	return (int)(RC_CHK_ACCESS(l->thread) - RC_CHK_ACCESS(r->thread));
1376 }
1377 
1378 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1379 {
1380 	u64 avgl, avgr;
1381 
1382 	if (!l->nb_atoms)
1383 		return -1;
1384 
1385 	if (!r->nb_atoms)
1386 		return 1;
1387 
1388 	avgl = l->total_lat / l->nb_atoms;
1389 	avgr = r->total_lat / r->nb_atoms;
1390 
1391 	if (avgl < avgr)
1392 		return -1;
1393 	if (avgl > avgr)
1394 		return 1;
1395 
1396 	return 0;
1397 }
1398 
1399 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1400 {
1401 	if (l->max_lat < r->max_lat)
1402 		return -1;
1403 	if (l->max_lat > r->max_lat)
1404 		return 1;
1405 
1406 	return 0;
1407 }
1408 
1409 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1410 {
1411 	if (l->nb_atoms < r->nb_atoms)
1412 		return -1;
1413 	if (l->nb_atoms > r->nb_atoms)
1414 		return 1;
1415 
1416 	return 0;
1417 }
1418 
1419 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1420 {
1421 	if (l->total_runtime < r->total_runtime)
1422 		return -1;
1423 	if (l->total_runtime > r->total_runtime)
1424 		return 1;
1425 
1426 	return 0;
1427 }
1428 
1429 static int sort_dimension__add(const char *tok, struct list_head *list)
1430 {
1431 	size_t i;
1432 	static struct sort_dimension avg_sort_dimension = {
1433 		.name = "avg",
1434 		.cmp  = avg_cmp,
1435 	};
1436 	static struct sort_dimension max_sort_dimension = {
1437 		.name = "max",
1438 		.cmp  = max_cmp,
1439 	};
1440 	static struct sort_dimension pid_sort_dimension = {
1441 		.name = "pid",
1442 		.cmp  = pid_cmp,
1443 	};
1444 	static struct sort_dimension runtime_sort_dimension = {
1445 		.name = "runtime",
1446 		.cmp  = runtime_cmp,
1447 	};
1448 	static struct sort_dimension switch_sort_dimension = {
1449 		.name = "switch",
1450 		.cmp  = switch_cmp,
1451 	};
1452 	struct sort_dimension *available_sorts[] = {
1453 		&pid_sort_dimension,
1454 		&avg_sort_dimension,
1455 		&max_sort_dimension,
1456 		&switch_sort_dimension,
1457 		&runtime_sort_dimension,
1458 	};
1459 
1460 	for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1461 		if (!strcmp(available_sorts[i]->name, tok)) {
1462 			list_add_tail(&available_sorts[i]->list, list);
1463 
1464 			return 0;
1465 		}
1466 	}
1467 
1468 	return -1;
1469 }
1470 
1471 static void perf_sched__sort_lat(struct perf_sched *sched)
1472 {
1473 	struct rb_node *node;
1474 	struct rb_root_cached *root = &sched->atom_root;
1475 again:
1476 	for (;;) {
1477 		struct work_atoms *data;
1478 		node = rb_first_cached(root);
1479 		if (!node)
1480 			break;
1481 
1482 		rb_erase_cached(node, root);
1483 		data = rb_entry(node, struct work_atoms, node);
1484 		__thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1485 	}
1486 	if (root == &sched->atom_root) {
1487 		root = &sched->merged_atom_root;
1488 		goto again;
1489 	}
1490 }
1491 
1492 static int process_sched_wakeup_event(const struct perf_tool *tool,
1493 				      struct evsel *evsel,
1494 				      struct perf_sample *sample,
1495 				      struct machine *machine)
1496 {
1497 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1498 
1499 	if (sched->tp_handler->wakeup_event)
1500 		return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1501 
1502 	return 0;
1503 }
1504 
1505 static int process_sched_wakeup_ignore(const struct perf_tool *tool __maybe_unused,
1506 				      struct evsel *evsel __maybe_unused,
1507 				      struct perf_sample *sample __maybe_unused,
1508 				      struct machine *machine __maybe_unused)
1509 {
1510 	return 0;
1511 }
1512 
1513 union map_priv {
1514 	void	*ptr;
1515 	bool	 color;
1516 };
1517 
1518 static bool thread__has_color(struct thread *thread)
1519 {
1520 	union map_priv priv = {
1521 		.ptr = thread__priv(thread),
1522 	};
1523 
1524 	return priv.color;
1525 }
1526 
1527 static struct thread*
1528 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
1529 {
1530 	struct thread *thread = machine__findnew_thread(machine, pid, tid);
1531 	union map_priv priv = {
1532 		.color = false,
1533 	};
1534 
1535 	if (!sched->map.color_pids || !thread || thread__priv(thread))
1536 		return thread;
1537 
1538 	if (thread_map__has(sched->map.color_pids, tid))
1539 		priv.color = true;
1540 
1541 	thread__set_priv(thread, priv.ptr);
1542 	return thread;
1543 }
1544 
1545 static bool sched_match_task(struct perf_sched *sched, const char *comm_str)
1546 {
1547 	bool fuzzy_match = sched->map.fuzzy;
1548 	struct strlist *task_names = sched->map.task_names;
1549 	struct str_node *node;
1550 
1551 	strlist__for_each_entry(node, task_names) {
1552 		bool match_found = fuzzy_match ? !!strstr(comm_str, node->s) :
1553 							!strcmp(comm_str, node->s);
1554 		if (match_found)
1555 			return true;
1556 	}
1557 
1558 	return false;
1559 }
1560 
1561 static void print_sched_map(struct perf_sched *sched, struct perf_cpu this_cpu, int cpus_nr,
1562 								const char *color, bool sched_out)
1563 {
1564 	for (int i = 0; i < cpus_nr; i++) {
1565 		struct perf_cpu cpu = {
1566 			.cpu = sched->map.comp ? sched->map.comp_cpus[i].cpu : i,
1567 		};
1568 		struct thread *curr_thread = sched->curr_thread[cpu.cpu];
1569 		struct thread *curr_out_thread = sched->curr_out_thread[cpu.cpu];
1570 		struct thread_runtime *curr_tr;
1571 		const char *pid_color = color;
1572 		const char *cpu_color = color;
1573 		char symbol = ' ';
1574 		struct thread *thread_to_check = sched_out ? curr_out_thread : curr_thread;
1575 
1576 		if (thread_to_check && thread__has_color(thread_to_check))
1577 			pid_color = COLOR_PIDS;
1578 
1579 		if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu))
1580 			cpu_color = COLOR_CPUS;
1581 
1582 		if (cpu.cpu == this_cpu.cpu)
1583 			symbol = '*';
1584 
1585 		color_fprintf(stdout, cpu.cpu != this_cpu.cpu ? color : cpu_color, "%c", symbol);
1586 
1587 		thread_to_check = sched_out ? sched->curr_out_thread[cpu.cpu] :
1588 								sched->curr_thread[cpu.cpu];
1589 
1590 		if (thread_to_check) {
1591 			curr_tr = thread__get_runtime(thread_to_check);
1592 			if (curr_tr == NULL)
1593 				return;
1594 
1595 			if (sched_out) {
1596 				if (cpu.cpu == this_cpu.cpu)
1597 					color_fprintf(stdout, color, "-  ");
1598 				else {
1599 					curr_tr = thread__get_runtime(sched->curr_thread[cpu.cpu]);
1600 					if (curr_tr != NULL)
1601 						color_fprintf(stdout, pid_color, "%2s ",
1602 										curr_tr->shortname);
1603 				}
1604 			} else
1605 				color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname);
1606 		} else
1607 			color_fprintf(stdout, color, "   ");
1608 	}
1609 }
1610 
1611 static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
1612 			    struct perf_sample *sample, struct machine *machine)
1613 {
1614 	const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
1615 	const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid");
1616 	struct thread *sched_in, *sched_out;
1617 	struct thread_runtime *tr;
1618 	int new_shortname;
1619 	u64 timestamp0, timestamp = sample->time;
1620 	s64 delta;
1621 	struct perf_cpu this_cpu = {
1622 		.cpu = sample->cpu,
1623 	};
1624 	int cpus_nr;
1625 	int proceed;
1626 	bool new_cpu = false;
1627 	const char *color = PERF_COLOR_NORMAL;
1628 	char stimestamp[32];
1629 	const char *str;
1630 
1631 	BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0);
1632 
1633 	if (this_cpu.cpu > sched->max_cpu.cpu)
1634 		sched->max_cpu = this_cpu;
1635 
1636 	if (sched->map.comp) {
1637 		cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
1638 		if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
1639 			sched->map.comp_cpus[cpus_nr++] = this_cpu;
1640 			new_cpu = true;
1641 		}
1642 	} else
1643 		cpus_nr = sched->max_cpu.cpu;
1644 
1645 	timestamp0 = sched->cpu_last_switched[this_cpu.cpu];
1646 	sched->cpu_last_switched[this_cpu.cpu] = timestamp;
1647 	if (timestamp0)
1648 		delta = timestamp - timestamp0;
1649 	else
1650 		delta = 0;
1651 
1652 	if (delta < 0) {
1653 		pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1654 		return -1;
1655 	}
1656 
1657 	sched_in = map__findnew_thread(sched, machine, -1, next_pid);
1658 	sched_out = map__findnew_thread(sched, machine, -1, prev_pid);
1659 	if (sched_in == NULL || sched_out == NULL)
1660 		return -1;
1661 
1662 	tr = thread__get_runtime(sched_in);
1663 	if (tr == NULL) {
1664 		thread__put(sched_in);
1665 		return -1;
1666 	}
1667 
1668 	sched->curr_thread[this_cpu.cpu] = thread__get(sched_in);
1669 	sched->curr_out_thread[this_cpu.cpu] = thread__get(sched_out);
1670 
1671 	str = thread__comm_str(sched_in);
1672 	new_shortname = 0;
1673 	if (!tr->shortname[0]) {
1674 		if (!strcmp(thread__comm_str(sched_in), "swapper")) {
1675 			/*
1676 			 * Don't allocate a letter-number for swapper:0
1677 			 * as a shortname. Instead, we use '.' for it.
1678 			 */
1679 			tr->shortname[0] = '.';
1680 			tr->shortname[1] = ' ';
1681 		} else if (!sched->map.task_name || sched_match_task(sched, str)) {
1682 			tr->shortname[0] = sched->next_shortname1;
1683 			tr->shortname[1] = sched->next_shortname2;
1684 
1685 			if (sched->next_shortname1 < 'Z') {
1686 				sched->next_shortname1++;
1687 			} else {
1688 				sched->next_shortname1 = 'A';
1689 				if (sched->next_shortname2 < '9')
1690 					sched->next_shortname2++;
1691 				else
1692 					sched->next_shortname2 = '0';
1693 			}
1694 		} else {
1695 			tr->shortname[0] = '-';
1696 			tr->shortname[1] = ' ';
1697 		}
1698 		new_shortname = 1;
1699 	}
1700 
1701 	if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, this_cpu))
1702 		goto out;
1703 
1704 	proceed = 0;
1705 	str = thread__comm_str(sched_in);
1706 	/*
1707 	 * Check which of sched_in and sched_out matches the passed --task-name
1708 	 * arguments and call the corresponding print_sched_map.
1709 	 */
1710 	if (sched->map.task_name && !sched_match_task(sched, str)) {
1711 		if (!sched_match_task(sched, thread__comm_str(sched_out)))
1712 			goto out;
1713 		else
1714 			goto sched_out;
1715 
1716 	} else {
1717 		str = thread__comm_str(sched_out);
1718 		if (!(sched->map.task_name && !sched_match_task(sched, str)))
1719 			proceed = 1;
1720 	}
1721 
1722 	printf("  ");
1723 
1724 	print_sched_map(sched, this_cpu, cpus_nr, color, false);
1725 
1726 	timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1727 	color_fprintf(stdout, color, "  %12s secs ", stimestamp);
1728 	if (new_shortname || tr->comm_changed || (verbose > 0 && thread__tid(sched_in))) {
1729 		const char *pid_color = color;
1730 
1731 		if (thread__has_color(sched_in))
1732 			pid_color = COLOR_PIDS;
1733 
1734 		color_fprintf(stdout, pid_color, "%s => %s:%d",
1735 			tr->shortname, thread__comm_str(sched_in), thread__tid(sched_in));
1736 		tr->comm_changed = false;
1737 	}
1738 
1739 	if (sched->map.comp && new_cpu)
1740 		color_fprintf(stdout, color, " (CPU %d)", this_cpu);
1741 
1742 	if (proceed != 1) {
1743 		color_fprintf(stdout, color, "\n");
1744 		goto out;
1745 	}
1746 
1747 sched_out:
1748 	if (sched->map.task_name) {
1749 		tr = thread__get_runtime(sched->curr_out_thread[this_cpu.cpu]);
1750 		if (strcmp(tr->shortname, "") == 0)
1751 			goto out;
1752 
1753 		if (proceed == 1)
1754 			color_fprintf(stdout, color, "\n");
1755 
1756 		printf("  ");
1757 		print_sched_map(sched, this_cpu, cpus_nr, color, true);
1758 		timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1759 		color_fprintf(stdout, color, "  %12s secs ", stimestamp);
1760 	}
1761 
1762 	color_fprintf(stdout, color, "\n");
1763 
1764 out:
1765 	if (sched->map.task_name)
1766 		thread__put(sched_out);
1767 
1768 	thread__put(sched_in);
1769 
1770 	return 0;
1771 }
1772 
1773 static int process_sched_switch_event(const struct perf_tool *tool,
1774 				      struct evsel *evsel,
1775 				      struct perf_sample *sample,
1776 				      struct machine *machine)
1777 {
1778 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1779 	int this_cpu = sample->cpu, err = 0;
1780 	u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1781 	    next_pid = evsel__intval(evsel, sample, "next_pid");
1782 
1783 	if (sched->curr_pid[this_cpu] != (u32)-1) {
1784 		/*
1785 		 * Are we trying to switch away a PID that is
1786 		 * not current?
1787 		 */
1788 		if (sched->curr_pid[this_cpu] != prev_pid)
1789 			sched->nr_context_switch_bugs++;
1790 	}
1791 
1792 	if (sched->tp_handler->switch_event)
1793 		err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1794 
1795 	sched->curr_pid[this_cpu] = next_pid;
1796 	return err;
1797 }
1798 
1799 static int process_sched_runtime_event(const struct perf_tool *tool,
1800 				       struct evsel *evsel,
1801 				       struct perf_sample *sample,
1802 				       struct machine *machine)
1803 {
1804 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1805 
1806 	if (sched->tp_handler->runtime_event)
1807 		return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1808 
1809 	return 0;
1810 }
1811 
1812 static int perf_sched__process_fork_event(const struct perf_tool *tool,
1813 					  union perf_event *event,
1814 					  struct perf_sample *sample,
1815 					  struct machine *machine)
1816 {
1817 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1818 
1819 	/* run the fork event through the perf machinery */
1820 	perf_event__process_fork(tool, event, sample, machine);
1821 
1822 	/* and then run additional processing needed for this command */
1823 	if (sched->tp_handler->fork_event)
1824 		return sched->tp_handler->fork_event(sched, event, machine);
1825 
1826 	return 0;
1827 }
1828 
1829 static int process_sched_migrate_task_event(const struct perf_tool *tool,
1830 					    struct evsel *evsel,
1831 					    struct perf_sample *sample,
1832 					    struct machine *machine)
1833 {
1834 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1835 
1836 	if (sched->tp_handler->migrate_task_event)
1837 		return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1838 
1839 	return 0;
1840 }
1841 
1842 typedef int (*tracepoint_handler)(const struct perf_tool *tool,
1843 				  struct evsel *evsel,
1844 				  struct perf_sample *sample,
1845 				  struct machine *machine);
1846 
1847 static int perf_sched__process_tracepoint_sample(const struct perf_tool *tool __maybe_unused,
1848 						 union perf_event *event __maybe_unused,
1849 						 struct perf_sample *sample,
1850 						 struct evsel *evsel,
1851 						 struct machine *machine)
1852 {
1853 	int err = 0;
1854 
1855 	if (evsel->handler != NULL) {
1856 		tracepoint_handler f = evsel->handler;
1857 		err = f(tool, evsel, sample, machine);
1858 	}
1859 
1860 	return err;
1861 }
1862 
1863 static int perf_sched__process_comm(const struct perf_tool *tool __maybe_unused,
1864 				    union perf_event *event,
1865 				    struct perf_sample *sample,
1866 				    struct machine *machine)
1867 {
1868 	struct thread *thread;
1869 	struct thread_runtime *tr;
1870 	int err;
1871 
1872 	err = perf_event__process_comm(tool, event, sample, machine);
1873 	if (err)
1874 		return err;
1875 
1876 	thread = machine__find_thread(machine, sample->pid, sample->tid);
1877 	if (!thread) {
1878 		pr_err("Internal error: can't find thread\n");
1879 		return -1;
1880 	}
1881 
1882 	tr = thread__get_runtime(thread);
1883 	if (tr == NULL) {
1884 		thread__put(thread);
1885 		return -1;
1886 	}
1887 
1888 	tr->comm_changed = true;
1889 	thread__put(thread);
1890 
1891 	return 0;
1892 }
1893 
1894 static int perf_sched__read_events(struct perf_sched *sched)
1895 {
1896 	struct evsel_str_handler handlers[] = {
1897 		{ "sched:sched_switch",	      process_sched_switch_event, },
1898 		{ "sched:sched_stat_runtime", process_sched_runtime_event, },
1899 		{ "sched:sched_wakeup",	      process_sched_wakeup_event, },
1900 		{ "sched:sched_waking",	      process_sched_wakeup_event, },
1901 		{ "sched:sched_wakeup_new",   process_sched_wakeup_event, },
1902 		{ "sched:sched_migrate_task", process_sched_migrate_task_event, },
1903 	};
1904 	struct perf_session *session;
1905 	struct perf_data data = {
1906 		.path  = input_name,
1907 		.mode  = PERF_DATA_MODE_READ,
1908 		.force = sched->force,
1909 	};
1910 	int rc = -1;
1911 
1912 	session = perf_session__new(&data, &sched->tool);
1913 	if (IS_ERR(session)) {
1914 		pr_debug("Error creating perf session");
1915 		return PTR_ERR(session);
1916 	}
1917 
1918 	symbol__init(&session->header.env);
1919 
1920 	/* prefer sched_waking if it is captured */
1921 	if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
1922 		handlers[2].handler = process_sched_wakeup_ignore;
1923 
1924 	if (perf_session__set_tracepoints_handlers(session, handlers))
1925 		goto out_delete;
1926 
1927 	if (perf_session__has_traces(session, "record -R")) {
1928 		int err = perf_session__process_events(session);
1929 		if (err) {
1930 			pr_err("Failed to process events, error %d", err);
1931 			goto out_delete;
1932 		}
1933 
1934 		sched->nr_events      = session->evlist->stats.nr_events[0];
1935 		sched->nr_lost_events = session->evlist->stats.total_lost;
1936 		sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1937 	}
1938 
1939 	rc = 0;
1940 out_delete:
1941 	perf_session__delete(session);
1942 	return rc;
1943 }
1944 
1945 /*
1946  * scheduling times are printed as msec.usec
1947  */
1948 static inline void print_sched_time(unsigned long long nsecs, int width)
1949 {
1950 	unsigned long msecs;
1951 	unsigned long usecs;
1952 
1953 	msecs  = nsecs / NSEC_PER_MSEC;
1954 	nsecs -= msecs * NSEC_PER_MSEC;
1955 	usecs  = nsecs / NSEC_PER_USEC;
1956 	printf("%*lu.%03lu ", width, msecs, usecs);
1957 }
1958 
1959 /*
1960  * returns runtime data for event, allocating memory for it the
1961  * first time it is used.
1962  */
1963 static struct evsel_runtime *evsel__get_runtime(struct evsel *evsel)
1964 {
1965 	struct evsel_runtime *r = evsel->priv;
1966 
1967 	if (r == NULL) {
1968 		r = zalloc(sizeof(struct evsel_runtime));
1969 		evsel->priv = r;
1970 	}
1971 
1972 	return r;
1973 }
1974 
1975 /*
1976  * save last time event was seen per cpu
1977  */
1978 static void evsel__save_time(struct evsel *evsel, u64 timestamp, u32 cpu)
1979 {
1980 	struct evsel_runtime *r = evsel__get_runtime(evsel);
1981 
1982 	if (r == NULL)
1983 		return;
1984 
1985 	if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
1986 		int i, n = __roundup_pow_of_two(cpu+1);
1987 		void *p = r->last_time;
1988 
1989 		p = realloc(r->last_time, n * sizeof(u64));
1990 		if (!p)
1991 			return;
1992 
1993 		r->last_time = p;
1994 		for (i = r->ncpu; i < n; ++i)
1995 			r->last_time[i] = (u64) 0;
1996 
1997 		r->ncpu = n;
1998 	}
1999 
2000 	r->last_time[cpu] = timestamp;
2001 }
2002 
2003 /* returns last time this event was seen on the given cpu */
2004 static u64 evsel__get_time(struct evsel *evsel, u32 cpu)
2005 {
2006 	struct evsel_runtime *r = evsel__get_runtime(evsel);
2007 
2008 	if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
2009 		return 0;
2010 
2011 	return r->last_time[cpu];
2012 }
2013 
2014 static int comm_width = 30;
2015 
2016 static char *timehist_get_commstr(struct thread *thread)
2017 {
2018 	static char str[32];
2019 	const char *comm = thread__comm_str(thread);
2020 	pid_t tid = thread__tid(thread);
2021 	pid_t pid = thread__pid(thread);
2022 	int n;
2023 
2024 	if (pid == 0)
2025 		n = scnprintf(str, sizeof(str), "%s", comm);
2026 
2027 	else if (tid != pid)
2028 		n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid);
2029 
2030 	else
2031 		n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid);
2032 
2033 	if (n > comm_width)
2034 		comm_width = n;
2035 
2036 	return str;
2037 }
2038 
2039 static void timehist_header(struct perf_sched *sched)
2040 {
2041 	u32 ncpus = sched->max_cpu.cpu + 1;
2042 	u32 i, j;
2043 
2044 	printf("%15s %6s ", "time", "cpu");
2045 
2046 	if (sched->show_cpu_visual) {
2047 		printf(" ");
2048 		for (i = 0, j = 0; i < ncpus; ++i) {
2049 			printf("%x", j++);
2050 			if (j > 15)
2051 				j = 0;
2052 		}
2053 		printf(" ");
2054 	}
2055 
2056 	printf(" %-*s  %9s  %9s  %9s", comm_width,
2057 		"task name", "wait time", "sch delay", "run time");
2058 
2059 	if (sched->show_state)
2060 		printf("  %s", "state");
2061 
2062 	printf("\n");
2063 
2064 	/*
2065 	 * units row
2066 	 */
2067 	printf("%15s %-6s ", "", "");
2068 
2069 	if (sched->show_cpu_visual)
2070 		printf(" %*s ", ncpus, "");
2071 
2072 	printf(" %-*s  %9s  %9s  %9s", comm_width,
2073 	       "[tid/pid]", "(msec)", "(msec)", "(msec)");
2074 
2075 	if (sched->show_state)
2076 		printf("  %5s", "");
2077 
2078 	printf("\n");
2079 
2080 	/*
2081 	 * separator
2082 	 */
2083 	printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line);
2084 
2085 	if (sched->show_cpu_visual)
2086 		printf(" %.*s ", ncpus, graph_dotted_line);
2087 
2088 	printf(" %.*s  %.9s  %.9s  %.9s", comm_width,
2089 		graph_dotted_line, graph_dotted_line, graph_dotted_line,
2090 		graph_dotted_line);
2091 
2092 	if (sched->show_state)
2093 		printf("  %.5s", graph_dotted_line);
2094 
2095 	printf("\n");
2096 }
2097 
2098 static void timehist_print_sample(struct perf_sched *sched,
2099 				  struct evsel *evsel,
2100 				  struct perf_sample *sample,
2101 				  struct addr_location *al,
2102 				  struct thread *thread,
2103 				  u64 t, const char state)
2104 {
2105 	struct thread_runtime *tr = thread__priv(thread);
2106 	const char *next_comm = evsel__strval(evsel, sample, "next_comm");
2107 	const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
2108 	u32 max_cpus = sched->max_cpu.cpu + 1;
2109 	char tstr[64];
2110 	char nstr[30];
2111 	u64 wait_time;
2112 
2113 	if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
2114 		return;
2115 
2116 	timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
2117 	printf("%15s [%04d] ", tstr, sample->cpu);
2118 
2119 	if (sched->show_cpu_visual) {
2120 		u32 i;
2121 		char c;
2122 
2123 		printf(" ");
2124 		for (i = 0; i < max_cpus; ++i) {
2125 			/* flag idle times with 'i'; others are sched events */
2126 			if (i == sample->cpu)
2127 				c = (thread__tid(thread) == 0) ? 'i' : 's';
2128 			else
2129 				c = ' ';
2130 			printf("%c", c);
2131 		}
2132 		printf(" ");
2133 	}
2134 
2135 	printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2136 
2137 	wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt;
2138 	print_sched_time(wait_time, 6);
2139 
2140 	print_sched_time(tr->dt_delay, 6);
2141 	print_sched_time(tr->dt_run, 6);
2142 
2143 	if (sched->show_state)
2144 		printf(" %5c ", thread__tid(thread) == 0 ? 'I' : state);
2145 
2146 	if (sched->show_next) {
2147 		snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid);
2148 		printf(" %-*s", comm_width, nstr);
2149 	}
2150 
2151 	if (sched->show_wakeups && !sched->show_next)
2152 		printf("  %-*s", comm_width, "");
2153 
2154 	if (thread__tid(thread) == 0)
2155 		goto out;
2156 
2157 	if (sched->show_callchain)
2158 		printf("  ");
2159 
2160 	sample__fprintf_sym(sample, al, 0,
2161 			    EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
2162 			    EVSEL__PRINT_CALLCHAIN_ARROW |
2163 			    EVSEL__PRINT_SKIP_IGNORED,
2164 			    get_tls_callchain_cursor(), symbol_conf.bt_stop_list,  stdout);
2165 
2166 out:
2167 	printf("\n");
2168 }
2169 
2170 /*
2171  * Explanation of delta-time stats:
2172  *
2173  *            t = time of current schedule out event
2174  *        tprev = time of previous sched out event
2175  *                also time of schedule-in event for current task
2176  *    last_time = time of last sched change event for current task
2177  *                (i.e, time process was last scheduled out)
2178  * ready_to_run = time of wakeup for current task
2179  *
2180  * -----|------------|------------|------------|------
2181  *    last         ready        tprev          t
2182  *    time         to run
2183  *
2184  *      |-------- dt_wait --------|
2185  *                   |- dt_delay -|-- dt_run --|
2186  *
2187  *   dt_run = run time of current task
2188  *  dt_wait = time between last schedule out event for task and tprev
2189  *            represents time spent off the cpu
2190  * dt_delay = time between wakeup and schedule-in of task
2191  */
2192 
2193 static void timehist_update_runtime_stats(struct thread_runtime *r,
2194 					 u64 t, u64 tprev)
2195 {
2196 	r->dt_delay   = 0;
2197 	r->dt_sleep   = 0;
2198 	r->dt_iowait  = 0;
2199 	r->dt_preempt = 0;
2200 	r->dt_run     = 0;
2201 
2202 	if (tprev) {
2203 		r->dt_run = t - tprev;
2204 		if (r->ready_to_run) {
2205 			if (r->ready_to_run > tprev)
2206 				pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
2207 			else
2208 				r->dt_delay = tprev - r->ready_to_run;
2209 		}
2210 
2211 		if (r->last_time > tprev)
2212 			pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
2213 		else if (r->last_time) {
2214 			u64 dt_wait = tprev - r->last_time;
2215 
2216 			if (r->last_state == 'R')
2217 				r->dt_preempt = dt_wait;
2218 			else if (r->last_state == 'D')
2219 				r->dt_iowait = dt_wait;
2220 			else
2221 				r->dt_sleep = dt_wait;
2222 		}
2223 	}
2224 
2225 	update_stats(&r->run_stats, r->dt_run);
2226 
2227 	r->total_run_time     += r->dt_run;
2228 	r->total_delay_time   += r->dt_delay;
2229 	r->total_sleep_time   += r->dt_sleep;
2230 	r->total_iowait_time  += r->dt_iowait;
2231 	r->total_preempt_time += r->dt_preempt;
2232 }
2233 
2234 static bool is_idle_sample(struct perf_sample *sample,
2235 			   struct evsel *evsel)
2236 {
2237 	/* pid 0 == swapper == idle task */
2238 	if (evsel__name_is(evsel, "sched:sched_switch"))
2239 		return evsel__intval(evsel, sample, "prev_pid") == 0;
2240 
2241 	return sample->pid == 0;
2242 }
2243 
2244 static void save_task_callchain(struct perf_sched *sched,
2245 				struct perf_sample *sample,
2246 				struct evsel *evsel,
2247 				struct machine *machine)
2248 {
2249 	struct callchain_cursor *cursor;
2250 	struct thread *thread;
2251 
2252 	/* want main thread for process - has maps */
2253 	thread = machine__findnew_thread(machine, sample->pid, sample->pid);
2254 	if (thread == NULL) {
2255 		pr_debug("Failed to get thread for pid %d.\n", sample->pid);
2256 		return;
2257 	}
2258 
2259 	if (!sched->show_callchain || sample->callchain == NULL)
2260 		return;
2261 
2262 	cursor = get_tls_callchain_cursor();
2263 
2264 	if (thread__resolve_callchain(thread, cursor, evsel, sample,
2265 				      NULL, NULL, sched->max_stack + 2) != 0) {
2266 		if (verbose > 0)
2267 			pr_err("Failed to resolve callchain. Skipping\n");
2268 
2269 		return;
2270 	}
2271 
2272 	callchain_cursor_commit(cursor);
2273 
2274 	while (true) {
2275 		struct callchain_cursor_node *node;
2276 		struct symbol *sym;
2277 
2278 		node = callchain_cursor_current(cursor);
2279 		if (node == NULL)
2280 			break;
2281 
2282 		sym = node->ms.sym;
2283 		if (sym) {
2284 			if (!strcmp(sym->name, "schedule") ||
2285 			    !strcmp(sym->name, "__schedule") ||
2286 			    !strcmp(sym->name, "preempt_schedule"))
2287 				sym->ignore = 1;
2288 		}
2289 
2290 		callchain_cursor_advance(cursor);
2291 	}
2292 }
2293 
2294 static int init_idle_thread(struct thread *thread)
2295 {
2296 	struct idle_thread_runtime *itr;
2297 
2298 	thread__set_comm(thread, idle_comm, 0);
2299 
2300 	itr = zalloc(sizeof(*itr));
2301 	if (itr == NULL)
2302 		return -ENOMEM;
2303 
2304 	init_stats(&itr->tr.run_stats);
2305 	callchain_init(&itr->callchain);
2306 	callchain_cursor_reset(&itr->cursor);
2307 	thread__set_priv(thread, itr);
2308 
2309 	return 0;
2310 }
2311 
2312 /*
2313  * Track idle stats per cpu by maintaining a local thread
2314  * struct for the idle task on each cpu.
2315  */
2316 static int init_idle_threads(int ncpu)
2317 {
2318 	int i, ret;
2319 
2320 	idle_threads = zalloc(ncpu * sizeof(struct thread *));
2321 	if (!idle_threads)
2322 		return -ENOMEM;
2323 
2324 	idle_max_cpu = ncpu;
2325 
2326 	/* allocate the actual thread struct if needed */
2327 	for (i = 0; i < ncpu; ++i) {
2328 		idle_threads[i] = thread__new(0, 0);
2329 		if (idle_threads[i] == NULL)
2330 			return -ENOMEM;
2331 
2332 		ret = init_idle_thread(idle_threads[i]);
2333 		if (ret < 0)
2334 			return ret;
2335 	}
2336 
2337 	return 0;
2338 }
2339 
2340 static void free_idle_threads(void)
2341 {
2342 	int i;
2343 
2344 	if (idle_threads == NULL)
2345 		return;
2346 
2347 	for (i = 0; i < idle_max_cpu; ++i) {
2348 		if ((idle_threads[i]))
2349 			thread__delete(idle_threads[i]);
2350 	}
2351 
2352 	free(idle_threads);
2353 }
2354 
2355 static struct thread *get_idle_thread(int cpu)
2356 {
2357 	/*
2358 	 * expand/allocate array of pointers to local thread
2359 	 * structs if needed
2360 	 */
2361 	if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
2362 		int i, j = __roundup_pow_of_two(cpu+1);
2363 		void *p;
2364 
2365 		p = realloc(idle_threads, j * sizeof(struct thread *));
2366 		if (!p)
2367 			return NULL;
2368 
2369 		idle_threads = (struct thread **) p;
2370 		for (i = idle_max_cpu; i < j; ++i)
2371 			idle_threads[i] = NULL;
2372 
2373 		idle_max_cpu = j;
2374 	}
2375 
2376 	/* allocate a new thread struct if needed */
2377 	if (idle_threads[cpu] == NULL) {
2378 		idle_threads[cpu] = thread__new(0, 0);
2379 
2380 		if (idle_threads[cpu]) {
2381 			if (init_idle_thread(idle_threads[cpu]) < 0)
2382 				return NULL;
2383 		}
2384 	}
2385 
2386 	return idle_threads[cpu];
2387 }
2388 
2389 static void save_idle_callchain(struct perf_sched *sched,
2390 				struct idle_thread_runtime *itr,
2391 				struct perf_sample *sample)
2392 {
2393 	struct callchain_cursor *cursor;
2394 
2395 	if (!sched->show_callchain || sample->callchain == NULL)
2396 		return;
2397 
2398 	cursor = get_tls_callchain_cursor();
2399 	if (cursor == NULL)
2400 		return;
2401 
2402 	callchain_cursor__copy(&itr->cursor, cursor);
2403 }
2404 
2405 static struct thread *timehist_get_thread(struct perf_sched *sched,
2406 					  struct perf_sample *sample,
2407 					  struct machine *machine,
2408 					  struct evsel *evsel)
2409 {
2410 	struct thread *thread;
2411 
2412 	if (is_idle_sample(sample, evsel)) {
2413 		thread = get_idle_thread(sample->cpu);
2414 		if (thread == NULL)
2415 			pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2416 
2417 	} else {
2418 		/* there were samples with tid 0 but non-zero pid */
2419 		thread = machine__findnew_thread(machine, sample->pid,
2420 						 sample->tid ?: sample->pid);
2421 		if (thread == NULL) {
2422 			pr_debug("Failed to get thread for tid %d. skipping sample.\n",
2423 				 sample->tid);
2424 		}
2425 
2426 		save_task_callchain(sched, sample, evsel, machine);
2427 		if (sched->idle_hist) {
2428 			struct thread *idle;
2429 			struct idle_thread_runtime *itr;
2430 
2431 			idle = get_idle_thread(sample->cpu);
2432 			if (idle == NULL) {
2433 				pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2434 				return NULL;
2435 			}
2436 
2437 			itr = thread__priv(idle);
2438 			if (itr == NULL)
2439 				return NULL;
2440 
2441 			itr->last_thread = thread;
2442 
2443 			/* copy task callchain when entering to idle */
2444 			if (evsel__intval(evsel, sample, "next_pid") == 0)
2445 				save_idle_callchain(sched, itr, sample);
2446 		}
2447 	}
2448 
2449 	return thread;
2450 }
2451 
2452 static bool timehist_skip_sample(struct perf_sched *sched,
2453 				 struct thread *thread,
2454 				 struct evsel *evsel,
2455 				 struct perf_sample *sample)
2456 {
2457 	bool rc = false;
2458 
2459 	if (thread__is_filtered(thread)) {
2460 		rc = true;
2461 		sched->skipped_samples++;
2462 	}
2463 
2464 	if (sched->idle_hist) {
2465 		if (!evsel__name_is(evsel, "sched:sched_switch"))
2466 			rc = true;
2467 		else if (evsel__intval(evsel, sample, "prev_pid") != 0 &&
2468 			 evsel__intval(evsel, sample, "next_pid") != 0)
2469 			rc = true;
2470 	}
2471 
2472 	return rc;
2473 }
2474 
2475 static void timehist_print_wakeup_event(struct perf_sched *sched,
2476 					struct evsel *evsel,
2477 					struct perf_sample *sample,
2478 					struct machine *machine,
2479 					struct thread *awakened)
2480 {
2481 	struct thread *thread;
2482 	char tstr[64];
2483 
2484 	thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2485 	if (thread == NULL)
2486 		return;
2487 
2488 	/* show wakeup unless both awakee and awaker are filtered */
2489 	if (timehist_skip_sample(sched, thread, evsel, sample) &&
2490 	    timehist_skip_sample(sched, awakened, evsel, sample)) {
2491 		return;
2492 	}
2493 
2494 	timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2495 	printf("%15s [%04d] ", tstr, sample->cpu);
2496 	if (sched->show_cpu_visual)
2497 		printf(" %*s ", sched->max_cpu.cpu + 1, "");
2498 
2499 	printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2500 
2501 	/* dt spacer */
2502 	printf("  %9s  %9s  %9s ", "", "", "");
2503 
2504 	printf("awakened: %s", timehist_get_commstr(awakened));
2505 
2506 	printf("\n");
2507 }
2508 
2509 static int timehist_sched_wakeup_ignore(const struct perf_tool *tool __maybe_unused,
2510 					union perf_event *event __maybe_unused,
2511 					struct evsel *evsel __maybe_unused,
2512 					struct perf_sample *sample __maybe_unused,
2513 					struct machine *machine __maybe_unused)
2514 {
2515 	return 0;
2516 }
2517 
2518 static int timehist_sched_wakeup_event(const struct perf_tool *tool,
2519 				       union perf_event *event __maybe_unused,
2520 				       struct evsel *evsel,
2521 				       struct perf_sample *sample,
2522 				       struct machine *machine)
2523 {
2524 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2525 	struct thread *thread;
2526 	struct thread_runtime *tr = NULL;
2527 	/* want pid of awakened task not pid in sample */
2528 	const u32 pid = evsel__intval(evsel, sample, "pid");
2529 
2530 	thread = machine__findnew_thread(machine, 0, pid);
2531 	if (thread == NULL)
2532 		return -1;
2533 
2534 	tr = thread__get_runtime(thread);
2535 	if (tr == NULL)
2536 		return -1;
2537 
2538 	if (tr->ready_to_run == 0)
2539 		tr->ready_to_run = sample->time;
2540 
2541 	/* show wakeups if requested */
2542 	if (sched->show_wakeups &&
2543 	    !perf_time__skip_sample(&sched->ptime, sample->time))
2544 		timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
2545 
2546 	return 0;
2547 }
2548 
2549 static void timehist_print_migration_event(struct perf_sched *sched,
2550 					struct evsel *evsel,
2551 					struct perf_sample *sample,
2552 					struct machine *machine,
2553 					struct thread *migrated)
2554 {
2555 	struct thread *thread;
2556 	char tstr[64];
2557 	u32 max_cpus;
2558 	u32 ocpu, dcpu;
2559 
2560 	if (sched->summary_only)
2561 		return;
2562 
2563 	max_cpus = sched->max_cpu.cpu + 1;
2564 	ocpu = evsel__intval(evsel, sample, "orig_cpu");
2565 	dcpu = evsel__intval(evsel, sample, "dest_cpu");
2566 
2567 	thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2568 	if (thread == NULL)
2569 		return;
2570 
2571 	if (timehist_skip_sample(sched, thread, evsel, sample) &&
2572 	    timehist_skip_sample(sched, migrated, evsel, sample)) {
2573 		return;
2574 	}
2575 
2576 	timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2577 	printf("%15s [%04d] ", tstr, sample->cpu);
2578 
2579 	if (sched->show_cpu_visual) {
2580 		u32 i;
2581 		char c;
2582 
2583 		printf("  ");
2584 		for (i = 0; i < max_cpus; ++i) {
2585 			c = (i == sample->cpu) ? 'm' : ' ';
2586 			printf("%c", c);
2587 		}
2588 		printf("  ");
2589 	}
2590 
2591 	printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2592 
2593 	/* dt spacer */
2594 	printf("  %9s  %9s  %9s ", "", "", "");
2595 
2596 	printf("migrated: %s", timehist_get_commstr(migrated));
2597 	printf(" cpu %d => %d", ocpu, dcpu);
2598 
2599 	printf("\n");
2600 }
2601 
2602 static int timehist_migrate_task_event(const struct perf_tool *tool,
2603 				       union perf_event *event __maybe_unused,
2604 				       struct evsel *evsel,
2605 				       struct perf_sample *sample,
2606 				       struct machine *machine)
2607 {
2608 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2609 	struct thread *thread;
2610 	struct thread_runtime *tr = NULL;
2611 	/* want pid of migrated task not pid in sample */
2612 	const u32 pid = evsel__intval(evsel, sample, "pid");
2613 
2614 	thread = machine__findnew_thread(machine, 0, pid);
2615 	if (thread == NULL)
2616 		return -1;
2617 
2618 	tr = thread__get_runtime(thread);
2619 	if (tr == NULL)
2620 		return -1;
2621 
2622 	tr->migrations++;
2623 
2624 	/* show migrations if requested */
2625 	timehist_print_migration_event(sched, evsel, sample, machine, thread);
2626 
2627 	return 0;
2628 }
2629 
2630 static int timehist_sched_change_event(const struct perf_tool *tool,
2631 				       union perf_event *event,
2632 				       struct evsel *evsel,
2633 				       struct perf_sample *sample,
2634 				       struct machine *machine)
2635 {
2636 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2637 	struct perf_time_interval *ptime = &sched->ptime;
2638 	struct addr_location al;
2639 	struct thread *thread;
2640 	struct thread_runtime *tr = NULL;
2641 	u64 tprev, t = sample->time;
2642 	int rc = 0;
2643 	const char state = evsel__taskstate(evsel, sample, "prev_state");
2644 
2645 	addr_location__init(&al);
2646 	if (machine__resolve(machine, &al, sample) < 0) {
2647 		pr_err("problem processing %d event. skipping it\n",
2648 		       event->header.type);
2649 		rc = -1;
2650 		goto out;
2651 	}
2652 
2653 	thread = timehist_get_thread(sched, sample, machine, evsel);
2654 	if (thread == NULL) {
2655 		rc = -1;
2656 		goto out;
2657 	}
2658 
2659 	if (timehist_skip_sample(sched, thread, evsel, sample))
2660 		goto out;
2661 
2662 	tr = thread__get_runtime(thread);
2663 	if (tr == NULL) {
2664 		rc = -1;
2665 		goto out;
2666 	}
2667 
2668 	tprev = evsel__get_time(evsel, sample->cpu);
2669 
2670 	/*
2671 	 * If start time given:
2672 	 * - sample time is under window user cares about - skip sample
2673 	 * - tprev is under window user cares about  - reset to start of window
2674 	 */
2675 	if (ptime->start && ptime->start > t)
2676 		goto out;
2677 
2678 	if (tprev && ptime->start > tprev)
2679 		tprev = ptime->start;
2680 
2681 	/*
2682 	 * If end time given:
2683 	 * - previous sched event is out of window - we are done
2684 	 * - sample time is beyond window user cares about - reset it
2685 	 *   to close out stats for time window interest
2686 	 * - If tprev is 0, that is, sched_in event for current task is
2687 	 *   not recorded, cannot determine whether sched_in event is
2688 	 *   within time window interest - ignore it
2689 	 */
2690 	if (ptime->end) {
2691 		if (!tprev || tprev > ptime->end)
2692 			goto out;
2693 
2694 		if (t > ptime->end)
2695 			t = ptime->end;
2696 	}
2697 
2698 	if (!sched->idle_hist || thread__tid(thread) == 0) {
2699 		if (!cpu_list || test_bit(sample->cpu, cpu_bitmap))
2700 			timehist_update_runtime_stats(tr, t, tprev);
2701 
2702 		if (sched->idle_hist) {
2703 			struct idle_thread_runtime *itr = (void *)tr;
2704 			struct thread_runtime *last_tr;
2705 
2706 			if (itr->last_thread == NULL)
2707 				goto out;
2708 
2709 			/* add current idle time as last thread's runtime */
2710 			last_tr = thread__get_runtime(itr->last_thread);
2711 			if (last_tr == NULL)
2712 				goto out;
2713 
2714 			timehist_update_runtime_stats(last_tr, t, tprev);
2715 			/*
2716 			 * remove delta time of last thread as it's not updated
2717 			 * and otherwise it will show an invalid value next
2718 			 * time.  we only care total run time and run stat.
2719 			 */
2720 			last_tr->dt_run = 0;
2721 			last_tr->dt_delay = 0;
2722 			last_tr->dt_sleep = 0;
2723 			last_tr->dt_iowait = 0;
2724 			last_tr->dt_preempt = 0;
2725 
2726 			if (itr->cursor.nr)
2727 				callchain_append(&itr->callchain, &itr->cursor, t - tprev);
2728 
2729 			itr->last_thread = NULL;
2730 		}
2731 
2732 		if (!sched->summary_only)
2733 			timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
2734 	}
2735 
2736 out:
2737 	if (sched->hist_time.start == 0 && t >= ptime->start)
2738 		sched->hist_time.start = t;
2739 	if (ptime->end == 0 || t <= ptime->end)
2740 		sched->hist_time.end = t;
2741 
2742 	if (tr) {
2743 		/* time of this sched_switch event becomes last time task seen */
2744 		tr->last_time = sample->time;
2745 
2746 		/* last state is used to determine where to account wait time */
2747 		tr->last_state = state;
2748 
2749 		/* sched out event for task so reset ready to run time */
2750 		if (state == 'R')
2751 			tr->ready_to_run = t;
2752 		else
2753 			tr->ready_to_run = 0;
2754 	}
2755 
2756 	evsel__save_time(evsel, sample->time, sample->cpu);
2757 
2758 	addr_location__exit(&al);
2759 	return rc;
2760 }
2761 
2762 static int timehist_sched_switch_event(const struct perf_tool *tool,
2763 			     union perf_event *event,
2764 			     struct evsel *evsel,
2765 			     struct perf_sample *sample,
2766 			     struct machine *machine __maybe_unused)
2767 {
2768 	return timehist_sched_change_event(tool, event, evsel, sample, machine);
2769 }
2770 
2771 static int process_lost(const struct perf_tool *tool __maybe_unused,
2772 			union perf_event *event,
2773 			struct perf_sample *sample,
2774 			struct machine *machine __maybe_unused)
2775 {
2776 	char tstr[64];
2777 
2778 	timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2779 	printf("%15s ", tstr);
2780 	printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
2781 
2782 	return 0;
2783 }
2784 
2785 
2786 static void print_thread_runtime(struct thread *t,
2787 				 struct thread_runtime *r)
2788 {
2789 	double mean = avg_stats(&r->run_stats);
2790 	float stddev;
2791 
2792 	printf("%*s   %5d  %9" PRIu64 " ",
2793 	       comm_width, timehist_get_commstr(t), thread__ppid(t),
2794 	       (u64) r->run_stats.n);
2795 
2796 	print_sched_time(r->total_run_time, 8);
2797 	stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean);
2798 	print_sched_time(r->run_stats.min, 6);
2799 	printf(" ");
2800 	print_sched_time((u64) mean, 6);
2801 	printf(" ");
2802 	print_sched_time(r->run_stats.max, 6);
2803 	printf("  ");
2804 	printf("%5.2f", stddev);
2805 	printf("   %5" PRIu64, r->migrations);
2806 	printf("\n");
2807 }
2808 
2809 static void print_thread_waittime(struct thread *t,
2810 				  struct thread_runtime *r)
2811 {
2812 	printf("%*s   %5d  %9" PRIu64 " ",
2813 	       comm_width, timehist_get_commstr(t), thread__ppid(t),
2814 	       (u64) r->run_stats.n);
2815 
2816 	print_sched_time(r->total_run_time, 8);
2817 	print_sched_time(r->total_sleep_time, 6);
2818 	printf(" ");
2819 	print_sched_time(r->total_iowait_time, 6);
2820 	printf(" ");
2821 	print_sched_time(r->total_preempt_time, 6);
2822 	printf(" ");
2823 	print_sched_time(r->total_delay_time, 6);
2824 	printf("\n");
2825 }
2826 
2827 struct total_run_stats {
2828 	struct perf_sched *sched;
2829 	u64  sched_count;
2830 	u64  task_count;
2831 	u64  total_run_time;
2832 };
2833 
2834 static int show_thread_runtime(struct thread *t, void *priv)
2835 {
2836 	struct total_run_stats *stats = priv;
2837 	struct thread_runtime *r;
2838 
2839 	if (thread__is_filtered(t))
2840 		return 0;
2841 
2842 	r = thread__priv(t);
2843 	if (r && r->run_stats.n) {
2844 		stats->task_count++;
2845 		stats->sched_count += r->run_stats.n;
2846 		stats->total_run_time += r->total_run_time;
2847 
2848 		if (stats->sched->show_state)
2849 			print_thread_waittime(t, r);
2850 		else
2851 			print_thread_runtime(t, r);
2852 	}
2853 
2854 	return 0;
2855 }
2856 
2857 static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
2858 {
2859 	const char *sep = " <- ";
2860 	struct callchain_list *chain;
2861 	size_t ret = 0;
2862 	char bf[1024];
2863 	bool first;
2864 
2865 	if (node == NULL)
2866 		return 0;
2867 
2868 	ret = callchain__fprintf_folded(fp, node->parent);
2869 	first = (ret == 0);
2870 
2871 	list_for_each_entry(chain, &node->val, list) {
2872 		if (chain->ip >= PERF_CONTEXT_MAX)
2873 			continue;
2874 		if (chain->ms.sym && chain->ms.sym->ignore)
2875 			continue;
2876 		ret += fprintf(fp, "%s%s", first ? "" : sep,
2877 			       callchain_list__sym_name(chain, bf, sizeof(bf),
2878 							false));
2879 		first = false;
2880 	}
2881 
2882 	return ret;
2883 }
2884 
2885 static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
2886 {
2887 	size_t ret = 0;
2888 	FILE *fp = stdout;
2889 	struct callchain_node *chain;
2890 	struct rb_node *rb_node = rb_first_cached(root);
2891 
2892 	printf("  %16s  %8s  %s\n", "Idle time (msec)", "Count", "Callchains");
2893 	printf("  %.16s  %.8s  %.50s\n", graph_dotted_line, graph_dotted_line,
2894 	       graph_dotted_line);
2895 
2896 	while (rb_node) {
2897 		chain = rb_entry(rb_node, struct callchain_node, rb_node);
2898 		rb_node = rb_next(rb_node);
2899 
2900 		ret += fprintf(fp, "  ");
2901 		print_sched_time(chain->hit, 12);
2902 		ret += 16;  /* print_sched_time returns 2nd arg + 4 */
2903 		ret += fprintf(fp, " %8d  ", chain->count);
2904 		ret += callchain__fprintf_folded(fp, chain);
2905 		ret += fprintf(fp, "\n");
2906 	}
2907 
2908 	return ret;
2909 }
2910 
2911 static void timehist_print_summary(struct perf_sched *sched,
2912 				   struct perf_session *session)
2913 {
2914 	struct machine *m = &session->machines.host;
2915 	struct total_run_stats totals;
2916 	u64 task_count;
2917 	struct thread *t;
2918 	struct thread_runtime *r;
2919 	int i;
2920 	u64 hist_time = sched->hist_time.end - sched->hist_time.start;
2921 
2922 	memset(&totals, 0, sizeof(totals));
2923 	totals.sched = sched;
2924 
2925 	if (sched->idle_hist) {
2926 		printf("\nIdle-time summary\n");
2927 		printf("%*s  parent  sched-out  ", comm_width, "comm");
2928 		printf("  idle-time   min-idle    avg-idle    max-idle  stddev  migrations\n");
2929 	} else if (sched->show_state) {
2930 		printf("\nWait-time summary\n");
2931 		printf("%*s  parent   sched-in  ", comm_width, "comm");
2932 		printf("   run-time      sleep      iowait     preempt       delay\n");
2933 	} else {
2934 		printf("\nRuntime summary\n");
2935 		printf("%*s  parent   sched-in  ", comm_width, "comm");
2936 		printf("   run-time    min-run     avg-run     max-run  stddev  migrations\n");
2937 	}
2938 	printf("%*s            (count)  ", comm_width, "");
2939 	printf("     (msec)     (msec)      (msec)      (msec)       %s\n",
2940 	       sched->show_state ? "(msec)" : "%");
2941 	printf("%.117s\n", graph_dotted_line);
2942 
2943 	machine__for_each_thread(m, show_thread_runtime, &totals);
2944 	task_count = totals.task_count;
2945 	if (!task_count)
2946 		printf("<no still running tasks>\n");
2947 
2948 	/* CPU idle stats not tracked when samples were skipped */
2949 	if (sched->skipped_samples && !sched->idle_hist)
2950 		return;
2951 
2952 	printf("\nIdle stats:\n");
2953 	for (i = 0; i < idle_max_cpu; ++i) {
2954 		if (cpu_list && !test_bit(i, cpu_bitmap))
2955 			continue;
2956 
2957 		t = idle_threads[i];
2958 		if (!t)
2959 			continue;
2960 
2961 		r = thread__priv(t);
2962 		if (r && r->run_stats.n) {
2963 			totals.sched_count += r->run_stats.n;
2964 			printf("    CPU %2d idle for ", i);
2965 			print_sched_time(r->total_run_time, 6);
2966 			printf(" msec  (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time);
2967 		} else
2968 			printf("    CPU %2d idle entire time window\n", i);
2969 	}
2970 
2971 	if (sched->idle_hist && sched->show_callchain) {
2972 		callchain_param.mode  = CHAIN_FOLDED;
2973 		callchain_param.value = CCVAL_PERIOD;
2974 
2975 		callchain_register_param(&callchain_param);
2976 
2977 		printf("\nIdle stats by callchain:\n");
2978 		for (i = 0; i < idle_max_cpu; ++i) {
2979 			struct idle_thread_runtime *itr;
2980 
2981 			t = idle_threads[i];
2982 			if (!t)
2983 				continue;
2984 
2985 			itr = thread__priv(t);
2986 			if (itr == NULL)
2987 				continue;
2988 
2989 			callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
2990 					     0, &callchain_param);
2991 
2992 			printf("  CPU %2d:", i);
2993 			print_sched_time(itr->tr.total_run_time, 6);
2994 			printf(" msec\n");
2995 			timehist_print_idlehist_callchain(&itr->sorted_root);
2996 			printf("\n");
2997 		}
2998 	}
2999 
3000 	printf("\n"
3001 	       "    Total number of unique tasks: %" PRIu64 "\n"
3002 	       "Total number of context switches: %" PRIu64 "\n",
3003 	       totals.task_count, totals.sched_count);
3004 
3005 	printf("           Total run time (msec): ");
3006 	print_sched_time(totals.total_run_time, 2);
3007 	printf("\n");
3008 
3009 	printf("    Total scheduling time (msec): ");
3010 	print_sched_time(hist_time, 2);
3011 	printf(" (x %d)\n", sched->max_cpu.cpu);
3012 }
3013 
3014 typedef int (*sched_handler)(const struct perf_tool *tool,
3015 			  union perf_event *event,
3016 			  struct evsel *evsel,
3017 			  struct perf_sample *sample,
3018 			  struct machine *machine);
3019 
3020 static int perf_timehist__process_sample(const struct perf_tool *tool,
3021 					 union perf_event *event,
3022 					 struct perf_sample *sample,
3023 					 struct evsel *evsel,
3024 					 struct machine *machine)
3025 {
3026 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
3027 	int err = 0;
3028 	struct perf_cpu this_cpu = {
3029 		.cpu = sample->cpu,
3030 	};
3031 
3032 	if (this_cpu.cpu > sched->max_cpu.cpu)
3033 		sched->max_cpu = this_cpu;
3034 
3035 	if (evsel->handler != NULL) {
3036 		sched_handler f = evsel->handler;
3037 
3038 		err = f(tool, event, evsel, sample, machine);
3039 	}
3040 
3041 	return err;
3042 }
3043 
3044 static int timehist_check_attr(struct perf_sched *sched,
3045 			       struct evlist *evlist)
3046 {
3047 	struct evsel *evsel;
3048 	struct evsel_runtime *er;
3049 
3050 	list_for_each_entry(evsel, &evlist->core.entries, core.node) {
3051 		er = evsel__get_runtime(evsel);
3052 		if (er == NULL) {
3053 			pr_err("Failed to allocate memory for evsel runtime data\n");
3054 			return -1;
3055 		}
3056 
3057 		/* only need to save callchain related to sched_switch event */
3058 		if (sched->show_callchain &&
3059 		    evsel__name_is(evsel, "sched:sched_switch") &&
3060 		    !evsel__has_callchain(evsel)) {
3061 			pr_info("Samples of sched_switch event do not have callchains.\n");
3062 			sched->show_callchain = 0;
3063 			symbol_conf.use_callchain = 0;
3064 		}
3065 	}
3066 
3067 	return 0;
3068 }
3069 
3070 static int perf_sched__timehist(struct perf_sched *sched)
3071 {
3072 	struct evsel_str_handler handlers[] = {
3073 		{ "sched:sched_switch",       timehist_sched_switch_event, },
3074 		{ "sched:sched_wakeup",	      timehist_sched_wakeup_event, },
3075 		{ "sched:sched_waking",       timehist_sched_wakeup_event, },
3076 		{ "sched:sched_wakeup_new",   timehist_sched_wakeup_event, },
3077 	};
3078 	const struct evsel_str_handler migrate_handlers[] = {
3079 		{ "sched:sched_migrate_task", timehist_migrate_task_event, },
3080 	};
3081 	struct perf_data data = {
3082 		.path  = input_name,
3083 		.mode  = PERF_DATA_MODE_READ,
3084 		.force = sched->force,
3085 	};
3086 
3087 	struct perf_session *session;
3088 	struct evlist *evlist;
3089 	int err = -1;
3090 
3091 	/*
3092 	 * event handlers for timehist option
3093 	 */
3094 	sched->tool.sample	 = perf_timehist__process_sample;
3095 	sched->tool.mmap	 = perf_event__process_mmap;
3096 	sched->tool.comm	 = perf_event__process_comm;
3097 	sched->tool.exit	 = perf_event__process_exit;
3098 	sched->tool.fork	 = perf_event__process_fork;
3099 	sched->tool.lost	 = process_lost;
3100 	sched->tool.attr	 = perf_event__process_attr;
3101 	sched->tool.tracing_data = perf_event__process_tracing_data;
3102 	sched->tool.build_id	 = perf_event__process_build_id;
3103 
3104 	sched->tool.ordering_requires_timestamps = true;
3105 
3106 	symbol_conf.use_callchain = sched->show_callchain;
3107 
3108 	session = perf_session__new(&data, &sched->tool);
3109 	if (IS_ERR(session))
3110 		return PTR_ERR(session);
3111 
3112 	if (cpu_list) {
3113 		err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
3114 		if (err < 0)
3115 			goto out;
3116 	}
3117 
3118 	evlist = session->evlist;
3119 
3120 	symbol__init(&session->header.env);
3121 
3122 	if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
3123 		pr_err("Invalid time string\n");
3124 		err = -EINVAL;
3125 		goto out;
3126 	}
3127 
3128 	if (timehist_check_attr(sched, evlist) != 0)
3129 		goto out;
3130 
3131 	setup_pager();
3132 
3133 	/* prefer sched_waking if it is captured */
3134 	if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
3135 		handlers[1].handler = timehist_sched_wakeup_ignore;
3136 
3137 	/* setup per-evsel handlers */
3138 	if (perf_session__set_tracepoints_handlers(session, handlers))
3139 		goto out;
3140 
3141 	/* sched_switch event at a minimum needs to exist */
3142 	if (!evlist__find_tracepoint_by_name(session->evlist, "sched:sched_switch")) {
3143 		pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
3144 		goto out;
3145 	}
3146 
3147 	if (sched->show_migrations &&
3148 	    perf_session__set_tracepoints_handlers(session, migrate_handlers))
3149 		goto out;
3150 
3151 	/* pre-allocate struct for per-CPU idle stats */
3152 	sched->max_cpu.cpu = session->header.env.nr_cpus_online;
3153 	if (sched->max_cpu.cpu == 0)
3154 		sched->max_cpu.cpu = 4;
3155 	if (init_idle_threads(sched->max_cpu.cpu))
3156 		goto out;
3157 
3158 	/* summary_only implies summary option, but don't overwrite summary if set */
3159 	if (sched->summary_only)
3160 		sched->summary = sched->summary_only;
3161 
3162 	if (!sched->summary_only)
3163 		timehist_header(sched);
3164 
3165 	err = perf_session__process_events(session);
3166 	if (err) {
3167 		pr_err("Failed to process events, error %d", err);
3168 		goto out;
3169 	}
3170 
3171 	sched->nr_events      = evlist->stats.nr_events[0];
3172 	sched->nr_lost_events = evlist->stats.total_lost;
3173 	sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
3174 
3175 	if (sched->summary)
3176 		timehist_print_summary(sched, session);
3177 
3178 out:
3179 	free_idle_threads();
3180 	perf_session__delete(session);
3181 
3182 	return err;
3183 }
3184 
3185 
3186 static void print_bad_events(struct perf_sched *sched)
3187 {
3188 	if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
3189 		printf("  INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
3190 			(double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
3191 			sched->nr_unordered_timestamps, sched->nr_timestamps);
3192 	}
3193 	if (sched->nr_lost_events && sched->nr_events) {
3194 		printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
3195 			(double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
3196 			sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
3197 	}
3198 	if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
3199 		printf("  INFO: %.3f%% context switch bugs (%ld out of %ld)",
3200 			(double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
3201 			sched->nr_context_switch_bugs, sched->nr_timestamps);
3202 		if (sched->nr_lost_events)
3203 			printf(" (due to lost events?)");
3204 		printf("\n");
3205 	}
3206 }
3207 
3208 static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
3209 {
3210 	struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
3211 	struct work_atoms *this;
3212 	const char *comm = thread__comm_str(data->thread), *this_comm;
3213 	bool leftmost = true;
3214 
3215 	while (*new) {
3216 		int cmp;
3217 
3218 		this = container_of(*new, struct work_atoms, node);
3219 		parent = *new;
3220 
3221 		this_comm = thread__comm_str(this->thread);
3222 		cmp = strcmp(comm, this_comm);
3223 		if (cmp > 0) {
3224 			new = &((*new)->rb_left);
3225 		} else if (cmp < 0) {
3226 			new = &((*new)->rb_right);
3227 			leftmost = false;
3228 		} else {
3229 			this->num_merged++;
3230 			this->total_runtime += data->total_runtime;
3231 			this->nb_atoms += data->nb_atoms;
3232 			this->total_lat += data->total_lat;
3233 			list_splice(&data->work_list, &this->work_list);
3234 			if (this->max_lat < data->max_lat) {
3235 				this->max_lat = data->max_lat;
3236 				this->max_lat_start = data->max_lat_start;
3237 				this->max_lat_end = data->max_lat_end;
3238 			}
3239 			zfree(&data);
3240 			return;
3241 		}
3242 	}
3243 
3244 	data->num_merged++;
3245 	rb_link_node(&data->node, parent, new);
3246 	rb_insert_color_cached(&data->node, root, leftmost);
3247 }
3248 
3249 static void perf_sched__merge_lat(struct perf_sched *sched)
3250 {
3251 	struct work_atoms *data;
3252 	struct rb_node *node;
3253 
3254 	if (sched->skip_merge)
3255 		return;
3256 
3257 	while ((node = rb_first_cached(&sched->atom_root))) {
3258 		rb_erase_cached(node, &sched->atom_root);
3259 		data = rb_entry(node, struct work_atoms, node);
3260 		__merge_work_atoms(&sched->merged_atom_root, data);
3261 	}
3262 }
3263 
3264 static int setup_cpus_switch_event(struct perf_sched *sched)
3265 {
3266 	unsigned int i;
3267 
3268 	sched->cpu_last_switched = calloc(MAX_CPUS, sizeof(*(sched->cpu_last_switched)));
3269 	if (!sched->cpu_last_switched)
3270 		return -1;
3271 
3272 	sched->curr_pid = malloc(MAX_CPUS * sizeof(*(sched->curr_pid)));
3273 	if (!sched->curr_pid) {
3274 		zfree(&sched->cpu_last_switched);
3275 		return -1;
3276 	}
3277 
3278 	for (i = 0; i < MAX_CPUS; i++)
3279 		sched->curr_pid[i] = -1;
3280 
3281 	return 0;
3282 }
3283 
3284 static void free_cpus_switch_event(struct perf_sched *sched)
3285 {
3286 	zfree(&sched->curr_pid);
3287 	zfree(&sched->cpu_last_switched);
3288 }
3289 
3290 static int perf_sched__lat(struct perf_sched *sched)
3291 {
3292 	int rc = -1;
3293 	struct rb_node *next;
3294 
3295 	setup_pager();
3296 
3297 	if (setup_cpus_switch_event(sched))
3298 		return rc;
3299 
3300 	if (perf_sched__read_events(sched))
3301 		goto out_free_cpus_switch_event;
3302 
3303 	perf_sched__merge_lat(sched);
3304 	perf_sched__sort_lat(sched);
3305 
3306 	printf("\n -------------------------------------------------------------------------------------------------------------------------------------------\n");
3307 	printf("  Task                  |   Runtime ms  |  Count   | Avg delay ms    | Max delay ms    | Max delay start           | Max delay end          |\n");
3308 	printf(" -------------------------------------------------------------------------------------------------------------------------------------------\n");
3309 
3310 	next = rb_first_cached(&sched->sorted_atom_root);
3311 
3312 	while (next) {
3313 		struct work_atoms *work_list;
3314 
3315 		work_list = rb_entry(next, struct work_atoms, node);
3316 		output_lat_thread(sched, work_list);
3317 		next = rb_next(next);
3318 		thread__zput(work_list->thread);
3319 	}
3320 
3321 	printf(" -----------------------------------------------------------------------------------------------------------------\n");
3322 	printf("  TOTAL:                |%11.3f ms |%9" PRIu64 " |\n",
3323 		(double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
3324 
3325 	printf(" ---------------------------------------------------\n");
3326 
3327 	print_bad_events(sched);
3328 	printf("\n");
3329 
3330 	rc = 0;
3331 
3332 out_free_cpus_switch_event:
3333 	free_cpus_switch_event(sched);
3334 	return rc;
3335 }
3336 
3337 static int setup_map_cpus(struct perf_sched *sched)
3338 {
3339 	sched->max_cpu.cpu  = sysconf(_SC_NPROCESSORS_CONF);
3340 
3341 	if (sched->map.comp) {
3342 		sched->map.comp_cpus = zalloc(sched->max_cpu.cpu * sizeof(int));
3343 		if (!sched->map.comp_cpus)
3344 			return -1;
3345 	}
3346 
3347 	if (sched->map.cpus_str) {
3348 		sched->map.cpus = perf_cpu_map__new(sched->map.cpus_str);
3349 		if (!sched->map.cpus) {
3350 			pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
3351 			zfree(&sched->map.comp_cpus);
3352 			return -1;
3353 		}
3354 	}
3355 
3356 	return 0;
3357 }
3358 
3359 static int setup_color_pids(struct perf_sched *sched)
3360 {
3361 	struct perf_thread_map *map;
3362 
3363 	if (!sched->map.color_pids_str)
3364 		return 0;
3365 
3366 	map = thread_map__new_by_tid_str(sched->map.color_pids_str);
3367 	if (!map) {
3368 		pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
3369 		return -1;
3370 	}
3371 
3372 	sched->map.color_pids = map;
3373 	return 0;
3374 }
3375 
3376 static int setup_color_cpus(struct perf_sched *sched)
3377 {
3378 	struct perf_cpu_map *map;
3379 
3380 	if (!sched->map.color_cpus_str)
3381 		return 0;
3382 
3383 	map = perf_cpu_map__new(sched->map.color_cpus_str);
3384 	if (!map) {
3385 		pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
3386 		return -1;
3387 	}
3388 
3389 	sched->map.color_cpus = map;
3390 	return 0;
3391 }
3392 
3393 static int perf_sched__map(struct perf_sched *sched)
3394 {
3395 	int rc = -1;
3396 
3397 	sched->curr_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_thread)));
3398 	if (!sched->curr_thread)
3399 		return rc;
3400 
3401 	sched->curr_out_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_out_thread)));
3402 	if (!sched->curr_out_thread)
3403 		return rc;
3404 
3405 	if (setup_cpus_switch_event(sched))
3406 		goto out_free_curr_thread;
3407 
3408 	if (setup_map_cpus(sched))
3409 		goto out_free_cpus_switch_event;
3410 
3411 	if (setup_color_pids(sched))
3412 		goto out_put_map_cpus;
3413 
3414 	if (setup_color_cpus(sched))
3415 		goto out_put_color_pids;
3416 
3417 	setup_pager();
3418 	if (perf_sched__read_events(sched))
3419 		goto out_put_color_cpus;
3420 
3421 	rc = 0;
3422 	print_bad_events(sched);
3423 
3424 out_put_color_cpus:
3425 	perf_cpu_map__put(sched->map.color_cpus);
3426 
3427 out_put_color_pids:
3428 	perf_thread_map__put(sched->map.color_pids);
3429 
3430 out_put_map_cpus:
3431 	zfree(&sched->map.comp_cpus);
3432 	perf_cpu_map__put(sched->map.cpus);
3433 
3434 out_free_cpus_switch_event:
3435 	free_cpus_switch_event(sched);
3436 
3437 out_free_curr_thread:
3438 	zfree(&sched->curr_thread);
3439 	return rc;
3440 }
3441 
3442 static int perf_sched__replay(struct perf_sched *sched)
3443 {
3444 	int ret;
3445 	unsigned long i;
3446 
3447 	mutex_init(&sched->start_work_mutex);
3448 	mutex_init(&sched->work_done_wait_mutex);
3449 
3450 	ret = setup_cpus_switch_event(sched);
3451 	if (ret)
3452 		goto out_mutex_destroy;
3453 
3454 	calibrate_run_measurement_overhead(sched);
3455 	calibrate_sleep_measurement_overhead(sched);
3456 
3457 	test_calibrations(sched);
3458 
3459 	ret = perf_sched__read_events(sched);
3460 	if (ret)
3461 		goto out_free_cpus_switch_event;
3462 
3463 	printf("nr_run_events:        %ld\n", sched->nr_run_events);
3464 	printf("nr_sleep_events:      %ld\n", sched->nr_sleep_events);
3465 	printf("nr_wakeup_events:     %ld\n", sched->nr_wakeup_events);
3466 
3467 	if (sched->targetless_wakeups)
3468 		printf("target-less wakeups:  %ld\n", sched->targetless_wakeups);
3469 	if (sched->multitarget_wakeups)
3470 		printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
3471 	if (sched->nr_run_events_optimized)
3472 		printf("run atoms optimized: %ld\n",
3473 			sched->nr_run_events_optimized);
3474 
3475 	print_task_traces(sched);
3476 	add_cross_task_wakeups(sched);
3477 
3478 	sched->thread_funcs_exit = false;
3479 	create_tasks(sched);
3480 	printf("------------------------------------------------------------\n");
3481 	if (sched->replay_repeat == 0)
3482 		sched->replay_repeat = UINT_MAX;
3483 
3484 	for (i = 0; i < sched->replay_repeat; i++)
3485 		run_one_test(sched);
3486 
3487 	sched->thread_funcs_exit = true;
3488 	destroy_tasks(sched);
3489 
3490 out_free_cpus_switch_event:
3491 	free_cpus_switch_event(sched);
3492 
3493 out_mutex_destroy:
3494 	mutex_destroy(&sched->start_work_mutex);
3495 	mutex_destroy(&sched->work_done_wait_mutex);
3496 	return ret;
3497 }
3498 
3499 static void setup_sorting(struct perf_sched *sched, const struct option *options,
3500 			  const char * const usage_msg[])
3501 {
3502 	char *tmp, *tok, *str = strdup(sched->sort_order);
3503 
3504 	for (tok = strtok_r(str, ", ", &tmp);
3505 			tok; tok = strtok_r(NULL, ", ", &tmp)) {
3506 		if (sort_dimension__add(tok, &sched->sort_list) < 0) {
3507 			usage_with_options_msg(usage_msg, options,
3508 					"Unknown --sort key: `%s'", tok);
3509 		}
3510 	}
3511 
3512 	free(str);
3513 
3514 	sort_dimension__add("pid", &sched->cmp_pid);
3515 }
3516 
3517 static bool schedstat_events_exposed(void)
3518 {
3519 	/*
3520 	 * Select "sched:sched_stat_wait" event to check
3521 	 * whether schedstat tracepoints are exposed.
3522 	 */
3523 	return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ?
3524 		false : true;
3525 }
3526 
3527 static int __cmd_record(int argc, const char **argv)
3528 {
3529 	unsigned int rec_argc, i, j;
3530 	char **rec_argv;
3531 	const char **rec_argv_copy;
3532 	const char * const record_args[] = {
3533 		"record",
3534 		"-a",
3535 		"-R",
3536 		"-m", "1024",
3537 		"-c", "1",
3538 		"-e", "sched:sched_switch",
3539 		"-e", "sched:sched_stat_runtime",
3540 		"-e", "sched:sched_process_fork",
3541 		"-e", "sched:sched_wakeup_new",
3542 		"-e", "sched:sched_migrate_task",
3543 	};
3544 
3545 	/*
3546 	 * The tracepoints trace_sched_stat_{wait, sleep, iowait}
3547 	 * are not exposed to user if CONFIG_SCHEDSTATS is not set,
3548 	 * to prevent "perf sched record" execution failure, determine
3549 	 * whether to record schedstat events according to actual situation.
3550 	 */
3551 	const char * const schedstat_args[] = {
3552 		"-e", "sched:sched_stat_wait",
3553 		"-e", "sched:sched_stat_sleep",
3554 		"-e", "sched:sched_stat_iowait",
3555 	};
3556 	unsigned int schedstat_argc = schedstat_events_exposed() ?
3557 		ARRAY_SIZE(schedstat_args) : 0;
3558 
3559 	struct tep_event *waking_event;
3560 	int ret;
3561 
3562 	/*
3563 	 * +2 for either "-e", "sched:sched_wakeup" or
3564 	 * "-e", "sched:sched_waking"
3565 	 */
3566 	rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1;
3567 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
3568 	if (rec_argv == NULL)
3569 		return -ENOMEM;
3570 	rec_argv_copy = calloc(rec_argc + 1, sizeof(char *));
3571 	if (rec_argv_copy == NULL) {
3572 		free(rec_argv);
3573 		return -ENOMEM;
3574 	}
3575 
3576 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
3577 		rec_argv[i] = strdup(record_args[i]);
3578 
3579 	rec_argv[i++] = strdup("-e");
3580 	waking_event = trace_event__tp_format("sched", "sched_waking");
3581 	if (!IS_ERR(waking_event))
3582 		rec_argv[i++] = strdup("sched:sched_waking");
3583 	else
3584 		rec_argv[i++] = strdup("sched:sched_wakeup");
3585 
3586 	for (j = 0; j < schedstat_argc; j++)
3587 		rec_argv[i++] = strdup(schedstat_args[j]);
3588 
3589 	for (j = 1; j < (unsigned int)argc; j++, i++)
3590 		rec_argv[i] = strdup(argv[j]);
3591 
3592 	BUG_ON(i != rec_argc);
3593 
3594 	memcpy(rec_argv_copy, rec_argv, sizeof(char *) * rec_argc);
3595 	ret = cmd_record(rec_argc, rec_argv_copy);
3596 
3597 	for (i = 0; i < rec_argc; i++)
3598 		free(rec_argv[i]);
3599 	free(rec_argv);
3600 	free(rec_argv_copy);
3601 
3602 	return ret;
3603 }
3604 
3605 int cmd_sched(int argc, const char **argv)
3606 {
3607 	static const char default_sort_order[] = "avg, max, switch, runtime";
3608 	struct perf_sched sched = {
3609 		.cmp_pid	      = LIST_HEAD_INIT(sched.cmp_pid),
3610 		.sort_list	      = LIST_HEAD_INIT(sched.sort_list),
3611 		.sort_order	      = default_sort_order,
3612 		.replay_repeat	      = 10,
3613 		.profile_cpu	      = -1,
3614 		.next_shortname1      = 'A',
3615 		.next_shortname2      = '0',
3616 		.skip_merge           = 0,
3617 		.show_callchain	      = 1,
3618 		.max_stack            = 5,
3619 	};
3620 	const struct option sched_options[] = {
3621 	OPT_STRING('i', "input", &input_name, "file",
3622 		    "input file name"),
3623 	OPT_INCR('v', "verbose", &verbose,
3624 		    "be more verbose (show symbol address, etc)"),
3625 	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
3626 		    "dump raw trace in ASCII"),
3627 	OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
3628 	OPT_END()
3629 	};
3630 	const struct option latency_options[] = {
3631 	OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
3632 		   "sort by key(s): runtime, switch, avg, max"),
3633 	OPT_INTEGER('C', "CPU", &sched.profile_cpu,
3634 		    "CPU to profile on"),
3635 	OPT_BOOLEAN('p', "pids", &sched.skip_merge,
3636 		    "latency stats per pid instead of per comm"),
3637 	OPT_PARENT(sched_options)
3638 	};
3639 	const struct option replay_options[] = {
3640 	OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
3641 		     "repeat the workload replay N times (0: infinite)"),
3642 	OPT_PARENT(sched_options)
3643 	};
3644 	const struct option map_options[] = {
3645 	OPT_BOOLEAN(0, "compact", &sched.map.comp,
3646 		    "map output in compact mode"),
3647 	OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
3648 		   "highlight given pids in map"),
3649 	OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
3650                     "highlight given CPUs in map"),
3651 	OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
3652                     "display given CPUs in map"),
3653 	OPT_STRING(0, "task-name", &sched.map.task_name, "task",
3654 		"map output only for the given task name(s)."),
3655 	OPT_BOOLEAN(0, "fuzzy-name", &sched.map.fuzzy,
3656 		"given command name can be partially matched (fuzzy matching)"),
3657 	OPT_PARENT(sched_options)
3658 	};
3659 	const struct option timehist_options[] = {
3660 	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
3661 		   "file", "vmlinux pathname"),
3662 	OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
3663 		   "file", "kallsyms pathname"),
3664 	OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
3665 		    "Display call chains if present (default on)"),
3666 	OPT_UINTEGER(0, "max-stack", &sched.max_stack,
3667 		   "Maximum number of functions to display backtrace."),
3668 	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
3669 		    "Look for files with symbols relative to this directory"),
3670 	OPT_BOOLEAN('s', "summary", &sched.summary_only,
3671 		    "Show only syscall summary with statistics"),
3672 	OPT_BOOLEAN('S', "with-summary", &sched.summary,
3673 		    "Show all syscalls and summary with statistics"),
3674 	OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
3675 	OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"),
3676 	OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
3677 	OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
3678 	OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
3679 	OPT_STRING(0, "time", &sched.time_str, "str",
3680 		   "Time span for analysis (start,stop)"),
3681 	OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
3682 	OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
3683 		   "analyze events only for given process id(s)"),
3684 	OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
3685 		   "analyze events only for given thread id(s)"),
3686 	OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
3687 	OPT_PARENT(sched_options)
3688 	};
3689 
3690 	const char * const latency_usage[] = {
3691 		"perf sched latency [<options>]",
3692 		NULL
3693 	};
3694 	const char * const replay_usage[] = {
3695 		"perf sched replay [<options>]",
3696 		NULL
3697 	};
3698 	const char * const map_usage[] = {
3699 		"perf sched map [<options>]",
3700 		NULL
3701 	};
3702 	const char * const timehist_usage[] = {
3703 		"perf sched timehist [<options>]",
3704 		NULL
3705 	};
3706 	const char *const sched_subcommands[] = { "record", "latency", "map",
3707 						  "replay", "script",
3708 						  "timehist", NULL };
3709 	const char *sched_usage[] = {
3710 		NULL,
3711 		NULL
3712 	};
3713 	struct trace_sched_handler lat_ops  = {
3714 		.wakeup_event	    = latency_wakeup_event,
3715 		.switch_event	    = latency_switch_event,
3716 		.runtime_event	    = latency_runtime_event,
3717 		.migrate_task_event = latency_migrate_task_event,
3718 	};
3719 	struct trace_sched_handler map_ops  = {
3720 		.switch_event	    = map_switch_event,
3721 	};
3722 	struct trace_sched_handler replay_ops  = {
3723 		.wakeup_event	    = replay_wakeup_event,
3724 		.switch_event	    = replay_switch_event,
3725 		.fork_event	    = replay_fork_event,
3726 	};
3727 	int ret;
3728 
3729 	perf_tool__init(&sched.tool, /*ordered_events=*/true);
3730 	sched.tool.sample	 = perf_sched__process_tracepoint_sample;
3731 	sched.tool.comm		 = perf_sched__process_comm;
3732 	sched.tool.namespaces	 = perf_event__process_namespaces;
3733 	sched.tool.lost		 = perf_event__process_lost;
3734 	sched.tool.fork		 = perf_sched__process_fork_event;
3735 
3736 	argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
3737 					sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
3738 	if (!argc)
3739 		usage_with_options(sched_usage, sched_options);
3740 
3741 	/*
3742 	 * Aliased to 'perf script' for now:
3743 	 */
3744 	if (!strcmp(argv[0], "script")) {
3745 		return cmd_script(argc, argv);
3746 	} else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
3747 		return __cmd_record(argc, argv);
3748 	} else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
3749 		sched.tp_handler = &lat_ops;
3750 		if (argc > 1) {
3751 			argc = parse_options(argc, argv, latency_options, latency_usage, 0);
3752 			if (argc)
3753 				usage_with_options(latency_usage, latency_options);
3754 		}
3755 		setup_sorting(&sched, latency_options, latency_usage);
3756 		return perf_sched__lat(&sched);
3757 	} else if (!strcmp(argv[0], "map")) {
3758 		if (argc) {
3759 			argc = parse_options(argc, argv, map_options, map_usage, 0);
3760 			if (argc)
3761 				usage_with_options(map_usage, map_options);
3762 
3763 			if (sched.map.task_name) {
3764 				sched.map.task_names = strlist__new(sched.map.task_name, NULL);
3765 				if (sched.map.task_names == NULL) {
3766 					fprintf(stderr, "Failed to parse task names\n");
3767 					return -1;
3768 				}
3769 			}
3770 		}
3771 		sched.tp_handler = &map_ops;
3772 		setup_sorting(&sched, latency_options, latency_usage);
3773 		return perf_sched__map(&sched);
3774 	} else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) {
3775 		sched.tp_handler = &replay_ops;
3776 		if (argc) {
3777 			argc = parse_options(argc, argv, replay_options, replay_usage, 0);
3778 			if (argc)
3779 				usage_with_options(replay_usage, replay_options);
3780 		}
3781 		return perf_sched__replay(&sched);
3782 	} else if (!strcmp(argv[0], "timehist")) {
3783 		if (argc) {
3784 			argc = parse_options(argc, argv, timehist_options,
3785 					     timehist_usage, 0);
3786 			if (argc)
3787 				usage_with_options(timehist_usage, timehist_options);
3788 		}
3789 		if ((sched.show_wakeups || sched.show_next) &&
3790 		    sched.summary_only) {
3791 			pr_err(" Error: -s and -[n|w] are mutually exclusive.\n");
3792 			parse_options_usage(timehist_usage, timehist_options, "s", true);
3793 			if (sched.show_wakeups)
3794 				parse_options_usage(NULL, timehist_options, "w", true);
3795 			if (sched.show_next)
3796 				parse_options_usage(NULL, timehist_options, "n", true);
3797 			return -EINVAL;
3798 		}
3799 		ret = symbol__validate_sym_arguments();
3800 		if (ret)
3801 			return ret;
3802 
3803 		return perf_sched__timehist(&sched);
3804 	} else {
3805 		usage_with_options(sched_usage, sched_options);
3806 	}
3807 
3808 	return 0;
3809 }
3810