xref: /linux/kernel/sched/debug.c (revision 4ac6d90867a4de2e12117e755dbd76e08d88697f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/sched/debug.c
4  *
5  * Print the CFS rbtree and other debugging details
6  *
7  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8  */
9 #include "sched.h"
10 
11 /*
12  * This allows printing both to /proc/sched_debug and
13  * to the console
14  */
15 #define SEQ_printf(m, x...)			\
16  do {						\
17 	if (m)					\
18 		seq_printf(m, x);		\
19 	else					\
20 		pr_cont(x);			\
21  } while (0)
22 
23 /*
24  * Ease the printing of nsec fields:
25  */
26 static long long nsec_high(unsigned long long nsec)
27 {
28 	if ((long long)nsec < 0) {
29 		nsec = -nsec;
30 		do_div(nsec, 1000000);
31 		return -nsec;
32 	}
33 	do_div(nsec, 1000000);
34 
35 	return nsec;
36 }
37 
38 static unsigned long nsec_low(unsigned long long nsec)
39 {
40 	if ((long long)nsec < 0)
41 		nsec = -nsec;
42 
43 	return do_div(nsec, 1000000);
44 }
45 
46 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
47 
48 #define SCHED_FEAT(name, enabled)	\
49 	#name ,
50 
51 static const char * const sched_feat_names[] = {
52 #include "features.h"
53 };
54 
55 #undef SCHED_FEAT
56 
57 static int sched_feat_show(struct seq_file *m, void *v)
58 {
59 	int i;
60 
61 	for (i = 0; i < __SCHED_FEAT_NR; i++) {
62 		if (!(sysctl_sched_features & (1UL << i)))
63 			seq_puts(m, "NO_");
64 		seq_printf(m, "%s ", sched_feat_names[i]);
65 	}
66 	seq_puts(m, "\n");
67 
68 	return 0;
69 }
70 
71 #ifdef CONFIG_JUMP_LABEL
72 
73 #define jump_label_key__true  STATIC_KEY_INIT_TRUE
74 #define jump_label_key__false STATIC_KEY_INIT_FALSE
75 
76 #define SCHED_FEAT(name, enabled)	\
77 	jump_label_key__##enabled ,
78 
79 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
80 #include "features.h"
81 };
82 
83 #undef SCHED_FEAT
84 
85 static void sched_feat_disable(int i)
86 {
87 	static_key_disable_cpuslocked(&sched_feat_keys[i]);
88 }
89 
90 static void sched_feat_enable(int i)
91 {
92 	static_key_enable_cpuslocked(&sched_feat_keys[i]);
93 }
94 #else
95 static void sched_feat_disable(int i) { };
96 static void sched_feat_enable(int i) { };
97 #endif /* CONFIG_JUMP_LABEL */
98 
99 static int sched_feat_set(char *cmp)
100 {
101 	int i;
102 	int neg = 0;
103 
104 	if (strncmp(cmp, "NO_", 3) == 0) {
105 		neg = 1;
106 		cmp += 3;
107 	}
108 
109 	i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
110 	if (i < 0)
111 		return i;
112 
113 	if (neg) {
114 		sysctl_sched_features &= ~(1UL << i);
115 		sched_feat_disable(i);
116 	} else {
117 		sysctl_sched_features |= (1UL << i);
118 		sched_feat_enable(i);
119 	}
120 
121 	return 0;
122 }
123 
124 static ssize_t
125 sched_feat_write(struct file *filp, const char __user *ubuf,
126 		size_t cnt, loff_t *ppos)
127 {
128 	char buf[64];
129 	char *cmp;
130 	int ret;
131 	struct inode *inode;
132 
133 	if (cnt > 63)
134 		cnt = 63;
135 
136 	if (copy_from_user(&buf, ubuf, cnt))
137 		return -EFAULT;
138 
139 	buf[cnt] = 0;
140 	cmp = strstrip(buf);
141 
142 	/* Ensure the static_key remains in a consistent state */
143 	inode = file_inode(filp);
144 	cpus_read_lock();
145 	inode_lock(inode);
146 	ret = sched_feat_set(cmp);
147 	inode_unlock(inode);
148 	cpus_read_unlock();
149 	if (ret < 0)
150 		return ret;
151 
152 	*ppos += cnt;
153 
154 	return cnt;
155 }
156 
157 static int sched_feat_open(struct inode *inode, struct file *filp)
158 {
159 	return single_open(filp, sched_feat_show, NULL);
160 }
161 
162 static const struct file_operations sched_feat_fops = {
163 	.open		= sched_feat_open,
164 	.write		= sched_feat_write,
165 	.read		= seq_read,
166 	.llseek		= seq_lseek,
167 	.release	= single_release,
168 };
169 
170 #ifdef CONFIG_SMP
171 
172 static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
173 				   size_t cnt, loff_t *ppos)
174 {
175 	char buf[16];
176 
177 	if (cnt > 15)
178 		cnt = 15;
179 
180 	if (copy_from_user(&buf, ubuf, cnt))
181 		return -EFAULT;
182 
183 	if (kstrtouint(buf, 10, &sysctl_sched_tunable_scaling))
184 		return -EINVAL;
185 
186 	if (sched_update_scaling())
187 		return -EINVAL;
188 
189 	*ppos += cnt;
190 	return cnt;
191 }
192 
193 static int sched_scaling_show(struct seq_file *m, void *v)
194 {
195 	seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
196 	return 0;
197 }
198 
199 static int sched_scaling_open(struct inode *inode, struct file *filp)
200 {
201 	return single_open(filp, sched_scaling_show, NULL);
202 }
203 
204 static const struct file_operations sched_scaling_fops = {
205 	.open		= sched_scaling_open,
206 	.write		= sched_scaling_write,
207 	.read		= seq_read,
208 	.llseek		= seq_lseek,
209 	.release	= single_release,
210 };
211 
212 #endif /* SMP */
213 
214 #ifdef CONFIG_PREEMPT_DYNAMIC
215 
216 static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
217 				   size_t cnt, loff_t *ppos)
218 {
219 	char buf[16];
220 	int mode;
221 
222 	if (cnt > 15)
223 		cnt = 15;
224 
225 	if (copy_from_user(&buf, ubuf, cnt))
226 		return -EFAULT;
227 
228 	buf[cnt] = 0;
229 	mode = sched_dynamic_mode(strstrip(buf));
230 	if (mode < 0)
231 		return mode;
232 
233 	sched_dynamic_update(mode);
234 
235 	*ppos += cnt;
236 
237 	return cnt;
238 }
239 
240 static int sched_dynamic_show(struct seq_file *m, void *v)
241 {
242 	static const char * preempt_modes[] = {
243 		"none", "voluntary", "full"
244 	};
245 	int i;
246 
247 	for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
248 		if (preempt_dynamic_mode == i)
249 			seq_puts(m, "(");
250 		seq_puts(m, preempt_modes[i]);
251 		if (preempt_dynamic_mode == i)
252 			seq_puts(m, ")");
253 
254 		seq_puts(m, " ");
255 	}
256 
257 	seq_puts(m, "\n");
258 	return 0;
259 }
260 
261 static int sched_dynamic_open(struct inode *inode, struct file *filp)
262 {
263 	return single_open(filp, sched_dynamic_show, NULL);
264 }
265 
266 static const struct file_operations sched_dynamic_fops = {
267 	.open		= sched_dynamic_open,
268 	.write		= sched_dynamic_write,
269 	.read		= seq_read,
270 	.llseek		= seq_lseek,
271 	.release	= single_release,
272 };
273 
274 #endif /* CONFIG_PREEMPT_DYNAMIC */
275 
276 __read_mostly bool sched_debug_verbose;
277 
278 static const struct seq_operations sched_debug_sops;
279 
280 static int sched_debug_open(struct inode *inode, struct file *filp)
281 {
282 	return seq_open(filp, &sched_debug_sops);
283 }
284 
285 static const struct file_operations sched_debug_fops = {
286 	.open		= sched_debug_open,
287 	.read		= seq_read,
288 	.llseek		= seq_lseek,
289 	.release	= seq_release,
290 };
291 
292 static struct dentry *debugfs_sched;
293 
294 static __init int sched_init_debug(void)
295 {
296 	struct dentry __maybe_unused *numa;
297 
298 	debugfs_sched = debugfs_create_dir("sched", NULL);
299 
300 	debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
301 	debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
302 #ifdef CONFIG_PREEMPT_DYNAMIC
303 	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
304 #endif
305 
306 	debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
307 	debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
308 	debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
309 
310 	debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
311 	debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
312 
313 #ifdef CONFIG_SMP
314 	debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
315 	debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
316 	debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
317 
318 	mutex_lock(&sched_domains_mutex);
319 	update_sched_domain_debugfs();
320 	mutex_unlock(&sched_domains_mutex);
321 #endif
322 
323 #ifdef CONFIG_NUMA_BALANCING
324 	numa = debugfs_create_dir("numa_balancing", debugfs_sched);
325 
326 	debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
327 	debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
328 	debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
329 	debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
330 #endif
331 
332 	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
333 
334 	return 0;
335 }
336 late_initcall(sched_init_debug);
337 
338 #ifdef CONFIG_SMP
339 
340 static cpumask_var_t		sd_sysctl_cpus;
341 static struct dentry		*sd_dentry;
342 
343 static int sd_flags_show(struct seq_file *m, void *v)
344 {
345 	unsigned long flags = *(unsigned int *)m->private;
346 	int idx;
347 
348 	for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
349 		seq_puts(m, sd_flag_debug[idx].name);
350 		seq_puts(m, " ");
351 	}
352 	seq_puts(m, "\n");
353 
354 	return 0;
355 }
356 
357 static int sd_flags_open(struct inode *inode, struct file *file)
358 {
359 	return single_open(file, sd_flags_show, inode->i_private);
360 }
361 
362 static const struct file_operations sd_flags_fops = {
363 	.open		= sd_flags_open,
364 	.read		= seq_read,
365 	.llseek		= seq_lseek,
366 	.release	= single_release,
367 };
368 
369 static void register_sd(struct sched_domain *sd, struct dentry *parent)
370 {
371 #define SDM(type, mode, member)	\
372 	debugfs_create_##type(#member, mode, parent, &sd->member)
373 
374 	SDM(ulong, 0644, min_interval);
375 	SDM(ulong, 0644, max_interval);
376 	SDM(u64,   0644, max_newidle_lb_cost);
377 	SDM(u32,   0644, busy_factor);
378 	SDM(u32,   0644, imbalance_pct);
379 	SDM(u32,   0644, cache_nice_tries);
380 	SDM(str,   0444, name);
381 
382 #undef SDM
383 
384 	debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
385 }
386 
387 void update_sched_domain_debugfs(void)
388 {
389 	int cpu, i;
390 
391 	/*
392 	 * This can unfortunately be invoked before sched_debug_init() creates
393 	 * the debug directory. Don't touch sd_sysctl_cpus until then.
394 	 */
395 	if (!debugfs_sched)
396 		return;
397 
398 	if (!cpumask_available(sd_sysctl_cpus)) {
399 		if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
400 			return;
401 		cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
402 	}
403 
404 	if (!sd_dentry)
405 		sd_dentry = debugfs_create_dir("domains", debugfs_sched);
406 
407 	for_each_cpu(cpu, sd_sysctl_cpus) {
408 		struct sched_domain *sd;
409 		struct dentry *d_cpu;
410 		char buf[32];
411 
412 		snprintf(buf, sizeof(buf), "cpu%d", cpu);
413 		debugfs_remove(debugfs_lookup(buf, sd_dentry));
414 		d_cpu = debugfs_create_dir(buf, sd_dentry);
415 
416 		i = 0;
417 		for_each_domain(cpu, sd) {
418 			struct dentry *d_sd;
419 
420 			snprintf(buf, sizeof(buf), "domain%d", i);
421 			d_sd = debugfs_create_dir(buf, d_cpu);
422 
423 			register_sd(sd, d_sd);
424 			i++;
425 		}
426 
427 		__cpumask_clear_cpu(cpu, sd_sysctl_cpus);
428 	}
429 }
430 
431 void dirty_sched_domain_sysctl(int cpu)
432 {
433 	if (cpumask_available(sd_sysctl_cpus))
434 		__cpumask_set_cpu(cpu, sd_sysctl_cpus);
435 }
436 
437 #endif /* CONFIG_SMP */
438 
439 #ifdef CONFIG_FAIR_GROUP_SCHED
440 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
441 {
442 	struct sched_entity *se = tg->se[cpu];
443 
444 #define P(F)		SEQ_printf(m, "  .%-30s: %lld\n",	#F, (long long)F)
445 #define P_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld\n",	#F, (long long)schedstat_val(F))
446 #define PN(F)		SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
447 #define PN_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
448 
449 	if (!se)
450 		return;
451 
452 	PN(se->exec_start);
453 	PN(se->vruntime);
454 	PN(se->sum_exec_runtime);
455 
456 	if (schedstat_enabled()) {
457 		PN_SCHEDSTAT(se->statistics.wait_start);
458 		PN_SCHEDSTAT(se->statistics.sleep_start);
459 		PN_SCHEDSTAT(se->statistics.block_start);
460 		PN_SCHEDSTAT(se->statistics.sleep_max);
461 		PN_SCHEDSTAT(se->statistics.block_max);
462 		PN_SCHEDSTAT(se->statistics.exec_max);
463 		PN_SCHEDSTAT(se->statistics.slice_max);
464 		PN_SCHEDSTAT(se->statistics.wait_max);
465 		PN_SCHEDSTAT(se->statistics.wait_sum);
466 		P_SCHEDSTAT(se->statistics.wait_count);
467 	}
468 
469 	P(se->load.weight);
470 #ifdef CONFIG_SMP
471 	P(se->avg.load_avg);
472 	P(se->avg.util_avg);
473 	P(se->avg.runnable_avg);
474 #endif
475 
476 #undef PN_SCHEDSTAT
477 #undef PN
478 #undef P_SCHEDSTAT
479 #undef P
480 }
481 #endif
482 
483 #ifdef CONFIG_CGROUP_SCHED
484 static DEFINE_SPINLOCK(sched_debug_lock);
485 static char group_path[PATH_MAX];
486 
487 static void task_group_path(struct task_group *tg, char *path, int plen)
488 {
489 	if (autogroup_path(tg, path, plen))
490 		return;
491 
492 	cgroup_path(tg->css.cgroup, path, plen);
493 }
494 
495 /*
496  * Only 1 SEQ_printf_task_group_path() caller can use the full length
497  * group_path[] for cgroup path. Other simultaneous callers will have
498  * to use a shorter stack buffer. A "..." suffix is appended at the end
499  * of the stack buffer so that it will show up in case the output length
500  * matches the given buffer size to indicate possible path name truncation.
501  */
502 #define SEQ_printf_task_group_path(m, tg, fmt...)			\
503 {									\
504 	if (spin_trylock(&sched_debug_lock)) {				\
505 		task_group_path(tg, group_path, sizeof(group_path));	\
506 		SEQ_printf(m, fmt, group_path);				\
507 		spin_unlock(&sched_debug_lock);				\
508 	} else {							\
509 		char buf[128];						\
510 		char *bufend = buf + sizeof(buf) - 3;			\
511 		task_group_path(tg, buf, bufend - buf);			\
512 		strcpy(bufend - 1, "...");				\
513 		SEQ_printf(m, fmt, buf);				\
514 	}								\
515 }
516 #endif
517 
518 static void
519 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
520 {
521 	if (task_current(rq, p))
522 		SEQ_printf(m, ">R");
523 	else
524 		SEQ_printf(m, " %c", task_state_to_char(p));
525 
526 	SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
527 		p->comm, task_pid_nr(p),
528 		SPLIT_NS(p->se.vruntime),
529 		(long long)(p->nvcsw + p->nivcsw),
530 		p->prio);
531 
532 	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
533 		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
534 		SPLIT_NS(p->se.sum_exec_runtime),
535 		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
536 
537 #ifdef CONFIG_NUMA_BALANCING
538 	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
539 #endif
540 #ifdef CONFIG_CGROUP_SCHED
541 	SEQ_printf_task_group_path(m, task_group(p), " %s")
542 #endif
543 
544 	SEQ_printf(m, "\n");
545 }
546 
547 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
548 {
549 	struct task_struct *g, *p;
550 
551 	SEQ_printf(m, "\n");
552 	SEQ_printf(m, "runnable tasks:\n");
553 	SEQ_printf(m, " S            task   PID         tree-key  switches  prio"
554 		   "     wait-time             sum-exec        sum-sleep\n");
555 	SEQ_printf(m, "-------------------------------------------------------"
556 		   "------------------------------------------------------\n");
557 
558 	rcu_read_lock();
559 	for_each_process_thread(g, p) {
560 		if (task_cpu(p) != rq_cpu)
561 			continue;
562 
563 		print_task(m, rq, p);
564 	}
565 	rcu_read_unlock();
566 }
567 
568 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
569 {
570 	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
571 		spread, rq0_min_vruntime, spread0;
572 	struct rq *rq = cpu_rq(cpu);
573 	struct sched_entity *last;
574 	unsigned long flags;
575 
576 #ifdef CONFIG_FAIR_GROUP_SCHED
577 	SEQ_printf(m, "\n");
578 	SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
579 #else
580 	SEQ_printf(m, "\n");
581 	SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
582 #endif
583 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
584 			SPLIT_NS(cfs_rq->exec_clock));
585 
586 	raw_spin_rq_lock_irqsave(rq, flags);
587 	if (rb_first_cached(&cfs_rq->tasks_timeline))
588 		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
589 	last = __pick_last_entity(cfs_rq);
590 	if (last)
591 		max_vruntime = last->vruntime;
592 	min_vruntime = cfs_rq->min_vruntime;
593 	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
594 	raw_spin_rq_unlock_irqrestore(rq, flags);
595 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
596 			SPLIT_NS(MIN_vruntime));
597 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
598 			SPLIT_NS(min_vruntime));
599 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
600 			SPLIT_NS(max_vruntime));
601 	spread = max_vruntime - MIN_vruntime;
602 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
603 			SPLIT_NS(spread));
604 	spread0 = min_vruntime - rq0_min_vruntime;
605 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
606 			SPLIT_NS(spread0));
607 	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
608 			cfs_rq->nr_spread_over);
609 	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
610 	SEQ_printf(m, "  .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
611 	SEQ_printf(m, "  .%-30s: %d\n", "idle_h_nr_running",
612 			cfs_rq->idle_h_nr_running);
613 	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
614 #ifdef CONFIG_SMP
615 	SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
616 			cfs_rq->avg.load_avg);
617 	SEQ_printf(m, "  .%-30s: %lu\n", "runnable_avg",
618 			cfs_rq->avg.runnable_avg);
619 	SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
620 			cfs_rq->avg.util_avg);
621 	SEQ_printf(m, "  .%-30s: %u\n", "util_est_enqueued",
622 			cfs_rq->avg.util_est.enqueued);
623 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.load_avg",
624 			cfs_rq->removed.load_avg);
625 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.util_avg",
626 			cfs_rq->removed.util_avg);
627 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.runnable_avg",
628 			cfs_rq->removed.runnable_avg);
629 #ifdef CONFIG_FAIR_GROUP_SCHED
630 	SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
631 			cfs_rq->tg_load_avg_contrib);
632 	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
633 			atomic_long_read(&cfs_rq->tg->load_avg));
634 #endif
635 #endif
636 #ifdef CONFIG_CFS_BANDWIDTH
637 	SEQ_printf(m, "  .%-30s: %d\n", "throttled",
638 			cfs_rq->throttled);
639 	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
640 			cfs_rq->throttle_count);
641 #endif
642 
643 #ifdef CONFIG_FAIR_GROUP_SCHED
644 	print_cfs_group_stats(m, cpu, cfs_rq->tg);
645 #endif
646 }
647 
648 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
649 {
650 #ifdef CONFIG_RT_GROUP_SCHED
651 	SEQ_printf(m, "\n");
652 	SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
653 #else
654 	SEQ_printf(m, "\n");
655 	SEQ_printf(m, "rt_rq[%d]:\n", cpu);
656 #endif
657 
658 #define P(x) \
659 	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
660 #define PU(x) \
661 	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
662 #define PN(x) \
663 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
664 
665 	PU(rt_nr_running);
666 #ifdef CONFIG_SMP
667 	PU(rt_nr_migratory);
668 #endif
669 	P(rt_throttled);
670 	PN(rt_time);
671 	PN(rt_runtime);
672 
673 #undef PN
674 #undef PU
675 #undef P
676 }
677 
678 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
679 {
680 	struct dl_bw *dl_bw;
681 
682 	SEQ_printf(m, "\n");
683 	SEQ_printf(m, "dl_rq[%d]:\n", cpu);
684 
685 #define PU(x) \
686 	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
687 
688 	PU(dl_nr_running);
689 #ifdef CONFIG_SMP
690 	PU(dl_nr_migratory);
691 	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
692 #else
693 	dl_bw = &dl_rq->dl_bw;
694 #endif
695 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
696 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
697 
698 #undef PU
699 }
700 
701 static void print_cpu(struct seq_file *m, int cpu)
702 {
703 	struct rq *rq = cpu_rq(cpu);
704 
705 #ifdef CONFIG_X86
706 	{
707 		unsigned int freq = cpu_khz ? : 1;
708 
709 		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
710 			   cpu, freq / 1000, (freq % 1000));
711 	}
712 #else
713 	SEQ_printf(m, "cpu#%d\n", cpu);
714 #endif
715 
716 #define P(x)								\
717 do {									\
718 	if (sizeof(rq->x) == 4)						\
719 		SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));	\
720 	else								\
721 		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
722 } while (0)
723 
724 #define PN(x) \
725 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
726 
727 	P(nr_running);
728 	P(nr_switches);
729 	P(nr_uninterruptible);
730 	PN(next_balance);
731 	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
732 	PN(clock);
733 	PN(clock_task);
734 #undef P
735 #undef PN
736 
737 #ifdef CONFIG_SMP
738 #define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
739 	P64(avg_idle);
740 	P64(max_idle_balance_cost);
741 #undef P64
742 #endif
743 
744 #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
745 	if (schedstat_enabled()) {
746 		P(yld_count);
747 		P(sched_count);
748 		P(sched_goidle);
749 		P(ttwu_count);
750 		P(ttwu_local);
751 	}
752 #undef P
753 
754 	print_cfs_stats(m, cpu);
755 	print_rt_stats(m, cpu);
756 	print_dl_stats(m, cpu);
757 
758 	print_rq(m, rq, cpu);
759 	SEQ_printf(m, "\n");
760 }
761 
762 static const char *sched_tunable_scaling_names[] = {
763 	"none",
764 	"logarithmic",
765 	"linear"
766 };
767 
768 static void sched_debug_header(struct seq_file *m)
769 {
770 	u64 ktime, sched_clk, cpu_clk;
771 	unsigned long flags;
772 
773 	local_irq_save(flags);
774 	ktime = ktime_to_ns(ktime_get());
775 	sched_clk = sched_clock();
776 	cpu_clk = local_clock();
777 	local_irq_restore(flags);
778 
779 	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
780 		init_utsname()->release,
781 		(int)strcspn(init_utsname()->version, " "),
782 		init_utsname()->version);
783 
784 #define P(x) \
785 	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
786 #define PN(x) \
787 	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
788 	PN(ktime);
789 	PN(sched_clk);
790 	PN(cpu_clk);
791 	P(jiffies);
792 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
793 	P(sched_clock_stable());
794 #endif
795 #undef PN
796 #undef P
797 
798 	SEQ_printf(m, "\n");
799 	SEQ_printf(m, "sysctl_sched\n");
800 
801 #define P(x) \
802 	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
803 #define PN(x) \
804 	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
805 	PN(sysctl_sched_latency);
806 	PN(sysctl_sched_min_granularity);
807 	PN(sysctl_sched_wakeup_granularity);
808 	P(sysctl_sched_child_runs_first);
809 	P(sysctl_sched_features);
810 #undef PN
811 #undef P
812 
813 	SEQ_printf(m, "  .%-40s: %d (%s)\n",
814 		"sysctl_sched_tunable_scaling",
815 		sysctl_sched_tunable_scaling,
816 		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
817 	SEQ_printf(m, "\n");
818 }
819 
820 static int sched_debug_show(struct seq_file *m, void *v)
821 {
822 	int cpu = (unsigned long)(v - 2);
823 
824 	if (cpu != -1)
825 		print_cpu(m, cpu);
826 	else
827 		sched_debug_header(m);
828 
829 	return 0;
830 }
831 
832 void sysrq_sched_debug_show(void)
833 {
834 	int cpu;
835 
836 	sched_debug_header(NULL);
837 	for_each_online_cpu(cpu) {
838 		/*
839 		 * Need to reset softlockup watchdogs on all CPUs, because
840 		 * another CPU might be blocked waiting for us to process
841 		 * an IPI or stop_machine.
842 		 */
843 		touch_nmi_watchdog();
844 		touch_all_softlockup_watchdogs();
845 		print_cpu(NULL, cpu);
846 	}
847 }
848 
849 /*
850  * This iterator needs some explanation.
851  * It returns 1 for the header position.
852  * This means 2 is CPU 0.
853  * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
854  * to use cpumask_* to iterate over the CPUs.
855  */
856 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
857 {
858 	unsigned long n = *offset;
859 
860 	if (n == 0)
861 		return (void *) 1;
862 
863 	n--;
864 
865 	if (n > 0)
866 		n = cpumask_next(n - 1, cpu_online_mask);
867 	else
868 		n = cpumask_first(cpu_online_mask);
869 
870 	*offset = n + 1;
871 
872 	if (n < nr_cpu_ids)
873 		return (void *)(unsigned long)(n + 2);
874 
875 	return NULL;
876 }
877 
878 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
879 {
880 	(*offset)++;
881 	return sched_debug_start(file, offset);
882 }
883 
884 static void sched_debug_stop(struct seq_file *file, void *data)
885 {
886 }
887 
888 static const struct seq_operations sched_debug_sops = {
889 	.start		= sched_debug_start,
890 	.next		= sched_debug_next,
891 	.stop		= sched_debug_stop,
892 	.show		= sched_debug_show,
893 };
894 
895 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
896 #define __P(F) __PS(#F, F)
897 #define   P(F) __PS(#F, p->F)
898 #define   PM(F, M) __PS(#F, p->F & (M))
899 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
900 #define __PN(F) __PSN(#F, F)
901 #define   PN(F) __PSN(#F, p->F)
902 
903 
904 #ifdef CONFIG_NUMA_BALANCING
905 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
906 		unsigned long tpf, unsigned long gsf, unsigned long gpf)
907 {
908 	SEQ_printf(m, "numa_faults node=%d ", node);
909 	SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
910 	SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
911 }
912 #endif
913 
914 
915 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
916 {
917 #ifdef CONFIG_NUMA_BALANCING
918 	struct mempolicy *pol;
919 
920 	if (p->mm)
921 		P(mm->numa_scan_seq);
922 
923 	task_lock(p);
924 	pol = p->mempolicy;
925 	if (pol && !(pol->flags & MPOL_F_MORON))
926 		pol = NULL;
927 	mpol_get(pol);
928 	task_unlock(p);
929 
930 	P(numa_pages_migrated);
931 	P(numa_preferred_nid);
932 	P(total_numa_faults);
933 	SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
934 			task_node(p), task_numa_group_id(p));
935 	show_numa_stats(p, m);
936 	mpol_put(pol);
937 #endif
938 }
939 
940 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
941 						  struct seq_file *m)
942 {
943 	unsigned long nr_switches;
944 
945 	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
946 						get_nr_threads(p));
947 	SEQ_printf(m,
948 		"---------------------------------------------------------"
949 		"----------\n");
950 
951 #define P_SCHEDSTAT(F)  __PS(#F, schedstat_val(p->F))
952 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->F))
953 
954 	PN(se.exec_start);
955 	PN(se.vruntime);
956 	PN(se.sum_exec_runtime);
957 
958 	nr_switches = p->nvcsw + p->nivcsw;
959 
960 	P(se.nr_migrations);
961 
962 	if (schedstat_enabled()) {
963 		u64 avg_atom, avg_per_cpu;
964 
965 		PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
966 		PN_SCHEDSTAT(se.statistics.wait_start);
967 		PN_SCHEDSTAT(se.statistics.sleep_start);
968 		PN_SCHEDSTAT(se.statistics.block_start);
969 		PN_SCHEDSTAT(se.statistics.sleep_max);
970 		PN_SCHEDSTAT(se.statistics.block_max);
971 		PN_SCHEDSTAT(se.statistics.exec_max);
972 		PN_SCHEDSTAT(se.statistics.slice_max);
973 		PN_SCHEDSTAT(se.statistics.wait_max);
974 		PN_SCHEDSTAT(se.statistics.wait_sum);
975 		P_SCHEDSTAT(se.statistics.wait_count);
976 		PN_SCHEDSTAT(se.statistics.iowait_sum);
977 		P_SCHEDSTAT(se.statistics.iowait_count);
978 		P_SCHEDSTAT(se.statistics.nr_migrations_cold);
979 		P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
980 		P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
981 		P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
982 		P_SCHEDSTAT(se.statistics.nr_forced_migrations);
983 		P_SCHEDSTAT(se.statistics.nr_wakeups);
984 		P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
985 		P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
986 		P_SCHEDSTAT(se.statistics.nr_wakeups_local);
987 		P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
988 		P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
989 		P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
990 		P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
991 		P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
992 
993 		avg_atom = p->se.sum_exec_runtime;
994 		if (nr_switches)
995 			avg_atom = div64_ul(avg_atom, nr_switches);
996 		else
997 			avg_atom = -1LL;
998 
999 		avg_per_cpu = p->se.sum_exec_runtime;
1000 		if (p->se.nr_migrations) {
1001 			avg_per_cpu = div64_u64(avg_per_cpu,
1002 						p->se.nr_migrations);
1003 		} else {
1004 			avg_per_cpu = -1LL;
1005 		}
1006 
1007 		__PN(avg_atom);
1008 		__PN(avg_per_cpu);
1009 	}
1010 
1011 	__P(nr_switches);
1012 	__PS("nr_voluntary_switches", p->nvcsw);
1013 	__PS("nr_involuntary_switches", p->nivcsw);
1014 
1015 	P(se.load.weight);
1016 #ifdef CONFIG_SMP
1017 	P(se.avg.load_sum);
1018 	P(se.avg.runnable_sum);
1019 	P(se.avg.util_sum);
1020 	P(se.avg.load_avg);
1021 	P(se.avg.runnable_avg);
1022 	P(se.avg.util_avg);
1023 	P(se.avg.last_update_time);
1024 	P(se.avg.util_est.ewma);
1025 	PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
1026 #endif
1027 #ifdef CONFIG_UCLAMP_TASK
1028 	__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1029 	__PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
1030 	__PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1031 	__PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1032 #endif
1033 	P(policy);
1034 	P(prio);
1035 	if (task_has_dl_policy(p)) {
1036 		P(dl.runtime);
1037 		P(dl.deadline);
1038 	}
1039 #undef PN_SCHEDSTAT
1040 #undef P_SCHEDSTAT
1041 
1042 	{
1043 		unsigned int this_cpu = raw_smp_processor_id();
1044 		u64 t0, t1;
1045 
1046 		t0 = cpu_clock(this_cpu);
1047 		t1 = cpu_clock(this_cpu);
1048 		__PS("clock-delta", t1-t0);
1049 	}
1050 
1051 	sched_show_numa(p, m);
1052 }
1053 
1054 void proc_sched_set_task(struct task_struct *p)
1055 {
1056 #ifdef CONFIG_SCHEDSTATS
1057 	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
1058 #endif
1059 }
1060 
1061 void resched_latency_warn(int cpu, u64 latency)
1062 {
1063 	static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
1064 
1065 	WARN(__ratelimit(&latency_check_ratelimit),
1066 	     "sched: CPU %d need_resched set for > %llu ns (%d ticks) "
1067 	     "without schedule\n",
1068 	     cpu, latency, cpu_rq(cpu)->ticks_without_resched);
1069 }
1070