xref: /linux/kernel/sched/debug.c (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/sched/debug.c
4  *
5  * Print the CFS rbtree and other debugging details
6  *
7  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8  */
9 #include <linux/debugfs.h>
10 #include <linux/nmi.h>
11 #include "sched.h"
12 
13 /*
14  * This allows printing both to /sys/kernel/debug/sched/debug and
15  * to the console
16  */
17 #define SEQ_printf(m, x...)			\
18  do {						\
19 	if (m)					\
20 		seq_printf(m, x);		\
21 	else					\
22 		pr_cont(x);			\
23  } while (0)
24 
25 /*
26  * Ease the printing of nsec fields:
27  */
28 static long long nsec_high(unsigned long long nsec)
29 {
30 	if ((long long)nsec < 0) {
31 		nsec = -nsec;
32 		do_div(nsec, 1000000);
33 		return -nsec;
34 	}
35 	do_div(nsec, 1000000);
36 
37 	return nsec;
38 }
39 
40 static unsigned long nsec_low(unsigned long long nsec)
41 {
42 	if ((long long)nsec < 0)
43 		nsec = -nsec;
44 
45 	return do_div(nsec, 1000000);
46 }
47 
48 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
49 
50 #define SCHED_FEAT(name, enabled)	\
51 	#name ,
52 
53 static const char * const sched_feat_names[] = {
54 #include "features.h"
55 };
56 
57 #undef SCHED_FEAT
58 
59 static int sched_feat_show(struct seq_file *m, void *v)
60 {
61 	int i;
62 
63 	for (i = 0; i < __SCHED_FEAT_NR; i++) {
64 		if (!(sysctl_sched_features & (1UL << i)))
65 			seq_puts(m, "NO_");
66 		seq_printf(m, "%s ", sched_feat_names[i]);
67 	}
68 	seq_puts(m, "\n");
69 
70 	return 0;
71 }
72 
73 #ifdef CONFIG_JUMP_LABEL
74 
75 #define jump_label_key__true  STATIC_KEY_INIT_TRUE
76 #define jump_label_key__false STATIC_KEY_INIT_FALSE
77 
78 #define SCHED_FEAT(name, enabled)	\
79 	jump_label_key__##enabled ,
80 
81 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
82 #include "features.h"
83 };
84 
85 #undef SCHED_FEAT
86 
87 static void sched_feat_disable(int i)
88 {
89 	static_key_disable_cpuslocked(&sched_feat_keys[i]);
90 }
91 
92 static void sched_feat_enable(int i)
93 {
94 	static_key_enable_cpuslocked(&sched_feat_keys[i]);
95 }
96 #else /* !CONFIG_JUMP_LABEL: */
97 static void sched_feat_disable(int i) { };
98 static void sched_feat_enable(int i) { };
99 #endif /* !CONFIG_JUMP_LABEL */
100 
101 static int sched_feat_set(char *cmp)
102 {
103 	int i;
104 	int neg = 0;
105 
106 	if (strncmp(cmp, "NO_", 3) == 0) {
107 		neg = 1;
108 		cmp += 3;
109 	}
110 
111 	i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
112 	if (i < 0)
113 		return i;
114 
115 	if (neg) {
116 		sysctl_sched_features &= ~(1UL << i);
117 		sched_feat_disable(i);
118 	} else {
119 		sysctl_sched_features |= (1UL << i);
120 		sched_feat_enable(i);
121 	}
122 
123 	return 0;
124 }
125 
126 static ssize_t
127 sched_feat_write(struct file *filp, const char __user *ubuf,
128 		size_t cnt, loff_t *ppos)
129 {
130 	char buf[64];
131 	char *cmp;
132 	int ret;
133 	struct inode *inode;
134 
135 	if (cnt > 63)
136 		cnt = 63;
137 
138 	if (copy_from_user(&buf, ubuf, cnt))
139 		return -EFAULT;
140 
141 	buf[cnt] = 0;
142 	cmp = strstrip(buf);
143 
144 	/* Ensure the static_key remains in a consistent state */
145 	inode = file_inode(filp);
146 	cpus_read_lock();
147 	inode_lock(inode);
148 	ret = sched_feat_set(cmp);
149 	inode_unlock(inode);
150 	cpus_read_unlock();
151 	if (ret < 0)
152 		return ret;
153 
154 	*ppos += cnt;
155 
156 	return cnt;
157 }
158 
159 static int sched_feat_open(struct inode *inode, struct file *filp)
160 {
161 	return single_open(filp, sched_feat_show, NULL);
162 }
163 
164 static const struct file_operations sched_feat_fops = {
165 	.open		= sched_feat_open,
166 	.write		= sched_feat_write,
167 	.read		= seq_read,
168 	.llseek		= seq_lseek,
169 	.release	= single_release,
170 };
171 
172 static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
173 				   size_t cnt, loff_t *ppos)
174 {
175 	unsigned int scaling;
176 	int ret;
177 
178 	ret = kstrtouint_from_user(ubuf, cnt, 10, &scaling);
179 	if (ret)
180 		return ret;
181 
182 	if (scaling >= SCHED_TUNABLESCALING_END)
183 		return -EINVAL;
184 
185 	sysctl_sched_tunable_scaling = scaling;
186 	if (sched_update_scaling())
187 		return -EINVAL;
188 
189 	*ppos += cnt;
190 	return cnt;
191 }
192 
193 static int sched_scaling_show(struct seq_file *m, void *v)
194 {
195 	seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
196 	return 0;
197 }
198 
199 static int sched_scaling_open(struct inode *inode, struct file *filp)
200 {
201 	return single_open(filp, sched_scaling_show, NULL);
202 }
203 
204 static const struct file_operations sched_scaling_fops = {
205 	.open		= sched_scaling_open,
206 	.write		= sched_scaling_write,
207 	.read		= seq_read,
208 	.llseek		= seq_lseek,
209 	.release	= single_release,
210 };
211 
212 #ifdef CONFIG_PREEMPT_DYNAMIC
213 
214 static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
215 				   size_t cnt, loff_t *ppos)
216 {
217 	char buf[16];
218 	int mode;
219 
220 	if (cnt > 15)
221 		cnt = 15;
222 
223 	if (copy_from_user(&buf, ubuf, cnt))
224 		return -EFAULT;
225 
226 	buf[cnt] = 0;
227 	mode = sched_dynamic_mode(strstrip(buf));
228 	if (mode < 0)
229 		return mode;
230 
231 	sched_dynamic_update(mode);
232 
233 	*ppos += cnt;
234 
235 	return cnt;
236 }
237 
238 static int sched_dynamic_show(struct seq_file *m, void *v)
239 {
240 	int i = (IS_ENABLED(CONFIG_PREEMPT_RT) || IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY)) * 2;
241 	int j;
242 
243 	/* Count entries in NULL terminated preempt_modes */
244 	for (j = 0; preempt_modes[j]; j++)
245 		;
246 	j -= !IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY);
247 
248 	for (; i < j; i++) {
249 		if (preempt_dynamic_mode == i)
250 			seq_puts(m, "(");
251 		seq_puts(m, preempt_modes[i]);
252 		if (preempt_dynamic_mode == i)
253 			seq_puts(m, ")");
254 
255 		seq_puts(m, " ");
256 	}
257 
258 	seq_puts(m, "\n");
259 	return 0;
260 }
261 
262 static int sched_dynamic_open(struct inode *inode, struct file *filp)
263 {
264 	return single_open(filp, sched_dynamic_show, NULL);
265 }
266 
267 static const struct file_operations sched_dynamic_fops = {
268 	.open		= sched_dynamic_open,
269 	.write		= sched_dynamic_write,
270 	.read		= seq_read,
271 	.llseek		= seq_lseek,
272 	.release	= single_release,
273 };
274 
275 #endif /* CONFIG_PREEMPT_DYNAMIC */
276 
277 __read_mostly bool sched_debug_verbose;
278 
279 static struct dentry           *sd_dentry;
280 
281 
282 static ssize_t sched_verbose_write(struct file *filp, const char __user *ubuf,
283 				  size_t cnt, loff_t *ppos)
284 {
285 	ssize_t result;
286 	bool orig;
287 
288 	cpus_read_lock();
289 	sched_domains_mutex_lock();
290 
291 	orig = sched_debug_verbose;
292 	result = debugfs_write_file_bool(filp, ubuf, cnt, ppos);
293 
294 	if (sched_debug_verbose && !orig)
295 		update_sched_domain_debugfs();
296 	else if (!sched_debug_verbose && orig) {
297 		debugfs_remove(sd_dentry);
298 		sd_dentry = NULL;
299 	}
300 
301 	sched_domains_mutex_unlock();
302 	cpus_read_unlock();
303 
304 	return result;
305 }
306 
307 static const struct file_operations sched_verbose_fops = {
308 	.read =         debugfs_read_file_bool,
309 	.write =        sched_verbose_write,
310 	.open =         simple_open,
311 	.llseek =       default_llseek,
312 };
313 
314 static const struct seq_operations sched_debug_sops;
315 
316 static int sched_debug_open(struct inode *inode, struct file *filp)
317 {
318 	return seq_open(filp, &sched_debug_sops);
319 }
320 
321 static const struct file_operations sched_debug_fops = {
322 	.open		= sched_debug_open,
323 	.read		= seq_read,
324 	.llseek		= seq_lseek,
325 	.release	= seq_release,
326 };
327 
328 enum dl_param {
329 	DL_RUNTIME = 0,
330 	DL_PERIOD,
331 };
332 
333 static unsigned long dl_server_period_max = (1UL << 22) * NSEC_PER_USEC; /* ~4 seconds */
334 static unsigned long dl_server_period_min = (100) * NSEC_PER_USEC;     /* 100 us */
335 
336 static ssize_t sched_server_write_common(struct file *filp, const char __user *ubuf,
337 					 size_t cnt, loff_t *ppos, enum dl_param param,
338 					 void *server)
339 {
340 	long cpu = (long) ((struct seq_file *) filp->private_data)->private;
341 	struct sched_dl_entity *dl_se = (struct sched_dl_entity *)server;
342 	u64 old_runtime, runtime, period;
343 	struct rq *rq = cpu_rq(cpu);
344 	int retval = 0;
345 	size_t err;
346 	u64 value;
347 
348 	err = kstrtoull_from_user(ubuf, cnt, 10, &value);
349 	if (err)
350 		return err;
351 
352 	scoped_guard (rq_lock_irqsave, rq) {
353 		old_runtime = runtime = dl_se->dl_runtime;
354 		period = dl_se->dl_period;
355 
356 		switch (param) {
357 		case DL_RUNTIME:
358 			if (runtime == value)
359 				break;
360 			runtime = value;
361 			break;
362 		case DL_PERIOD:
363 			if (value == period)
364 				break;
365 			period = value;
366 			break;
367 		}
368 
369 		if (runtime > period ||
370 		    period > dl_server_period_max ||
371 		    period < dl_server_period_min) {
372 			return  -EINVAL;
373 		}
374 
375 		update_rq_clock(rq);
376 		dl_server_stop(dl_se);
377 		retval = dl_server_apply_params(dl_se, runtime, period, 0);
378 		dl_server_start(dl_se);
379 
380 		if (retval < 0)
381 			return retval;
382 	}
383 
384 	if (!!old_runtime ^ !!runtime) {
385 		pr_info("%s server %sabled on CPU %d%s.\n",
386 			server == &rq->fair_server ? "Fair" : "Ext",
387 			runtime ? "en" : "dis",
388 			cpu_of(rq),
389 			runtime ? "" : ", system may malfunction due to starvation");
390 	}
391 
392 	*ppos += cnt;
393 	return cnt;
394 }
395 
396 static size_t sched_server_show_common(struct seq_file *m, void *v, enum dl_param param,
397 				       void *server)
398 {
399 	struct sched_dl_entity *dl_se = (struct sched_dl_entity *)server;
400 	u64 value;
401 
402 	switch (param) {
403 	case DL_RUNTIME:
404 		value = dl_se->dl_runtime;
405 		break;
406 	case DL_PERIOD:
407 		value = dl_se->dl_period;
408 		break;
409 	}
410 
411 	seq_printf(m, "%llu\n", value);
412 	return 0;
413 }
414 
415 static ssize_t
416 sched_fair_server_runtime_write(struct file *filp, const char __user *ubuf,
417 				size_t cnt, loff_t *ppos)
418 {
419 	long cpu = (long) ((struct seq_file *) filp->private_data)->private;
420 	struct rq *rq = cpu_rq(cpu);
421 
422 	return sched_server_write_common(filp, ubuf, cnt, ppos, DL_RUNTIME,
423 					&rq->fair_server);
424 }
425 
426 static int sched_fair_server_runtime_show(struct seq_file *m, void *v)
427 {
428 	unsigned long cpu = (unsigned long) m->private;
429 	struct rq *rq = cpu_rq(cpu);
430 
431 	return sched_server_show_common(m, v, DL_RUNTIME, &rq->fair_server);
432 }
433 
434 static int sched_fair_server_runtime_open(struct inode *inode, struct file *filp)
435 {
436 	return single_open(filp, sched_fair_server_runtime_show, inode->i_private);
437 }
438 
439 static const struct file_operations fair_server_runtime_fops = {
440 	.open		= sched_fair_server_runtime_open,
441 	.write		= sched_fair_server_runtime_write,
442 	.read		= seq_read,
443 	.llseek		= seq_lseek,
444 	.release	= single_release,
445 };
446 
447 #ifdef CONFIG_SCHED_CLASS_EXT
448 static ssize_t
449 sched_ext_server_runtime_write(struct file *filp, const char __user *ubuf,
450 			       size_t cnt, loff_t *ppos)
451 {
452 	long cpu = (long) ((struct seq_file *) filp->private_data)->private;
453 	struct rq *rq = cpu_rq(cpu);
454 
455 	return sched_server_write_common(filp, ubuf, cnt, ppos, DL_RUNTIME,
456 					&rq->ext_server);
457 }
458 
459 static int sched_ext_server_runtime_show(struct seq_file *m, void *v)
460 {
461 	unsigned long cpu = (unsigned long) m->private;
462 	struct rq *rq = cpu_rq(cpu);
463 
464 	return sched_server_show_common(m, v, DL_RUNTIME, &rq->ext_server);
465 }
466 
467 static int sched_ext_server_runtime_open(struct inode *inode, struct file *filp)
468 {
469 	return single_open(filp, sched_ext_server_runtime_show, inode->i_private);
470 }
471 
472 static const struct file_operations ext_server_runtime_fops = {
473 	.open		= sched_ext_server_runtime_open,
474 	.write		= sched_ext_server_runtime_write,
475 	.read		= seq_read,
476 	.llseek		= seq_lseek,
477 	.release	= single_release,
478 };
479 #endif /* CONFIG_SCHED_CLASS_EXT */
480 
481 static ssize_t
482 sched_fair_server_period_write(struct file *filp, const char __user *ubuf,
483 			       size_t cnt, loff_t *ppos)
484 {
485 	long cpu = (long) ((struct seq_file *) filp->private_data)->private;
486 	struct rq *rq = cpu_rq(cpu);
487 
488 	return sched_server_write_common(filp, ubuf, cnt, ppos, DL_PERIOD,
489 					&rq->fair_server);
490 }
491 
492 static int sched_fair_server_period_show(struct seq_file *m, void *v)
493 {
494 	unsigned long cpu = (unsigned long) m->private;
495 	struct rq *rq = cpu_rq(cpu);
496 
497 	return sched_server_show_common(m, v, DL_PERIOD, &rq->fair_server);
498 }
499 
500 static int sched_fair_server_period_open(struct inode *inode, struct file *filp)
501 {
502 	return single_open(filp, sched_fair_server_period_show, inode->i_private);
503 }
504 
505 static const struct file_operations fair_server_period_fops = {
506 	.open		= sched_fair_server_period_open,
507 	.write		= sched_fair_server_period_write,
508 	.read		= seq_read,
509 	.llseek		= seq_lseek,
510 	.release	= single_release,
511 };
512 
513 #ifdef CONFIG_SCHED_CLASS_EXT
514 static ssize_t
515 sched_ext_server_period_write(struct file *filp, const char __user *ubuf,
516 			      size_t cnt, loff_t *ppos)
517 {
518 	long cpu = (long) ((struct seq_file *) filp->private_data)->private;
519 	struct rq *rq = cpu_rq(cpu);
520 
521 	return sched_server_write_common(filp, ubuf, cnt, ppos, DL_PERIOD,
522 					&rq->ext_server);
523 }
524 
525 static int sched_ext_server_period_show(struct seq_file *m, void *v)
526 {
527 	unsigned long cpu = (unsigned long) m->private;
528 	struct rq *rq = cpu_rq(cpu);
529 
530 	return sched_server_show_common(m, v, DL_PERIOD, &rq->ext_server);
531 }
532 
533 static int sched_ext_server_period_open(struct inode *inode, struct file *filp)
534 {
535 	return single_open(filp, sched_ext_server_period_show, inode->i_private);
536 }
537 
538 static const struct file_operations ext_server_period_fops = {
539 	.open		= sched_ext_server_period_open,
540 	.write		= sched_ext_server_period_write,
541 	.read		= seq_read,
542 	.llseek		= seq_lseek,
543 	.release	= single_release,
544 };
545 #endif /* CONFIG_SCHED_CLASS_EXT */
546 
547 static struct dentry *debugfs_sched;
548 
549 static void debugfs_fair_server_init(void)
550 {
551 	struct dentry *d_fair;
552 	unsigned long cpu;
553 
554 	d_fair = debugfs_create_dir("fair_server", debugfs_sched);
555 	if (!d_fair)
556 		return;
557 
558 	for_each_possible_cpu(cpu) {
559 		struct dentry *d_cpu;
560 		char buf[32];
561 
562 		snprintf(buf, sizeof(buf), "cpu%lu", cpu);
563 		d_cpu = debugfs_create_dir(buf, d_fair);
564 
565 		debugfs_create_file("runtime", 0644, d_cpu, (void *) cpu, &fair_server_runtime_fops);
566 		debugfs_create_file("period", 0644, d_cpu, (void *) cpu, &fair_server_period_fops);
567 	}
568 }
569 
570 #ifdef CONFIG_SCHED_CLASS_EXT
571 static void debugfs_ext_server_init(void)
572 {
573 	struct dentry *d_ext;
574 	unsigned long cpu;
575 
576 	d_ext = debugfs_create_dir("ext_server", debugfs_sched);
577 	if (!d_ext)
578 		return;
579 
580 	for_each_possible_cpu(cpu) {
581 		struct dentry *d_cpu;
582 		char buf[32];
583 
584 		snprintf(buf, sizeof(buf), "cpu%lu", cpu);
585 		d_cpu = debugfs_create_dir(buf, d_ext);
586 
587 		debugfs_create_file("runtime", 0644, d_cpu, (void *) cpu, &ext_server_runtime_fops);
588 		debugfs_create_file("period", 0644, d_cpu, (void *) cpu, &ext_server_period_fops);
589 	}
590 }
591 #endif /* CONFIG_SCHED_CLASS_EXT */
592 
593 static __init int sched_init_debug(void)
594 {
595 	struct dentry __maybe_unused *numa;
596 
597 	debugfs_sched = debugfs_create_dir("sched", NULL);
598 
599 	debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
600 	debugfs_create_file_unsafe("verbose", 0644, debugfs_sched, &sched_debug_verbose, &sched_verbose_fops);
601 #ifdef CONFIG_PREEMPT_DYNAMIC
602 	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
603 #endif
604 
605 	debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice);
606 
607 	debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
608 	debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
609 
610 	debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
611 	debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
612 	debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
613 
614 	sched_domains_mutex_lock();
615 	update_sched_domain_debugfs();
616 	sched_domains_mutex_unlock();
617 
618 #ifdef CONFIG_NUMA_BALANCING
619 	numa = debugfs_create_dir("numa_balancing", debugfs_sched);
620 
621 	debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
622 	debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
623 	debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
624 	debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
625 	debugfs_create_u32("hot_threshold_ms", 0644, numa, &sysctl_numa_balancing_hot_threshold);
626 #endif /* CONFIG_NUMA_BALANCING */
627 
628 	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
629 
630 	debugfs_fair_server_init();
631 #ifdef CONFIG_SCHED_CLASS_EXT
632 	debugfs_ext_server_init();
633 #endif
634 
635 	return 0;
636 }
637 late_initcall(sched_init_debug);
638 
639 static cpumask_var_t		sd_sysctl_cpus;
640 
641 static int sd_flags_show(struct seq_file *m, void *v)
642 {
643 	unsigned long flags = *(unsigned int *)m->private;
644 	int idx;
645 
646 	for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
647 		seq_puts(m, sd_flag_debug[idx].name);
648 		seq_puts(m, " ");
649 	}
650 	seq_puts(m, "\n");
651 
652 	return 0;
653 }
654 
655 static int sd_flags_open(struct inode *inode, struct file *file)
656 {
657 	return single_open(file, sd_flags_show, inode->i_private);
658 }
659 
660 static const struct file_operations sd_flags_fops = {
661 	.open		= sd_flags_open,
662 	.read		= seq_read,
663 	.llseek		= seq_lseek,
664 	.release	= single_release,
665 };
666 
667 static void register_sd(struct sched_domain *sd, struct dentry *parent)
668 {
669 #define SDM(type, mode, member)	\
670 	debugfs_create_##type(#member, mode, parent, &sd->member)
671 
672 	SDM(ulong, 0644, min_interval);
673 	SDM(ulong, 0644, max_interval);
674 	SDM(u64,   0644, max_newidle_lb_cost);
675 	SDM(u32,   0644, busy_factor);
676 	SDM(u32,   0644, imbalance_pct);
677 	SDM(u32,   0644, cache_nice_tries);
678 	SDM(str,   0444, name);
679 
680 #undef SDM
681 
682 	debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
683 	debugfs_create_file("groups_flags", 0444, parent, &sd->groups->flags, &sd_flags_fops);
684 	debugfs_create_u32("level", 0444, parent, (u32 *)&sd->level);
685 
686 	if (sd->flags & SD_ASYM_PACKING)
687 		debugfs_create_u32("group_asym_prefer_cpu", 0444, parent,
688 				   (u32 *)&sd->groups->asym_prefer_cpu);
689 }
690 
691 void update_sched_domain_debugfs(void)
692 {
693 	int cpu, i;
694 
695 	/*
696 	 * This can unfortunately be invoked before sched_debug_init() creates
697 	 * the debug directory. Don't touch sd_sysctl_cpus until then.
698 	 */
699 	if (!debugfs_sched)
700 		return;
701 
702 	if (!sched_debug_verbose)
703 		return;
704 
705 	if (!cpumask_available(sd_sysctl_cpus)) {
706 		if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
707 			return;
708 		cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
709 	}
710 
711 	if (!sd_dentry) {
712 		sd_dentry = debugfs_create_dir("domains", debugfs_sched);
713 
714 		/* rebuild sd_sysctl_cpus if empty since it gets cleared below */
715 		if (cpumask_empty(sd_sysctl_cpus))
716 			cpumask_copy(sd_sysctl_cpus, cpu_online_mask);
717 	}
718 
719 	for_each_cpu(cpu, sd_sysctl_cpus) {
720 		struct sched_domain *sd;
721 		struct dentry *d_cpu;
722 		char buf[32];
723 
724 		snprintf(buf, sizeof(buf), "cpu%d", cpu);
725 		debugfs_lookup_and_remove(buf, sd_dentry);
726 		d_cpu = debugfs_create_dir(buf, sd_dentry);
727 
728 		i = 0;
729 		for_each_domain(cpu, sd) {
730 			struct dentry *d_sd;
731 
732 			snprintf(buf, sizeof(buf), "domain%d", i);
733 			d_sd = debugfs_create_dir(buf, d_cpu);
734 
735 			register_sd(sd, d_sd);
736 			i++;
737 		}
738 
739 		__cpumask_clear_cpu(cpu, sd_sysctl_cpus);
740 	}
741 }
742 
743 void dirty_sched_domain_sysctl(int cpu)
744 {
745 	if (cpumask_available(sd_sysctl_cpus))
746 		__cpumask_set_cpu(cpu, sd_sysctl_cpus);
747 }
748 
749 #ifdef CONFIG_FAIR_GROUP_SCHED
750 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
751 {
752 	struct sched_entity *se = tg->se[cpu];
753 
754 #define P(F)		SEQ_printf(m, "  .%-30s: %lld\n",	#F, (long long)F)
755 #define P_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld\n",	\
756 		#F, (long long)schedstat_val(stats->F))
757 #define PN(F)		SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
758 #define PN_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", \
759 		#F, SPLIT_NS((long long)schedstat_val(stats->F)))
760 
761 	if (!se)
762 		return;
763 
764 	PN(se->exec_start);
765 	PN(se->vruntime);
766 	PN(se->sum_exec_runtime);
767 
768 	if (schedstat_enabled()) {
769 		struct sched_statistics *stats;
770 		stats = __schedstats_from_se(se);
771 
772 		PN_SCHEDSTAT(wait_start);
773 		PN_SCHEDSTAT(sleep_start);
774 		PN_SCHEDSTAT(block_start);
775 		PN_SCHEDSTAT(sleep_max);
776 		PN_SCHEDSTAT(block_max);
777 		PN_SCHEDSTAT(exec_max);
778 		PN_SCHEDSTAT(slice_max);
779 		PN_SCHEDSTAT(wait_max);
780 		PN_SCHEDSTAT(wait_sum);
781 		P_SCHEDSTAT(wait_count);
782 	}
783 
784 	P(se->load.weight);
785 	P(se->avg.load_avg);
786 	P(se->avg.util_avg);
787 	P(se->avg.runnable_avg);
788 
789 #undef PN_SCHEDSTAT
790 #undef PN
791 #undef P_SCHEDSTAT
792 #undef P
793 }
794 #endif /* CONFIG_FAIR_GROUP_SCHED */
795 
796 #ifdef CONFIG_CGROUP_SCHED
797 static DEFINE_SPINLOCK(sched_debug_lock);
798 static char group_path[PATH_MAX];
799 
800 static void task_group_path(struct task_group *tg, char *path, int plen)
801 {
802 	if (autogroup_path(tg, path, plen))
803 		return;
804 
805 	cgroup_path(tg->css.cgroup, path, plen);
806 }
807 
808 /*
809  * Only 1 SEQ_printf_task_group_path() caller can use the full length
810  * group_path[] for cgroup path. Other simultaneous callers will have
811  * to use a shorter stack buffer. A "..." suffix is appended at the end
812  * of the stack buffer so that it will show up in case the output length
813  * matches the given buffer size to indicate possible path name truncation.
814  */
815 #define SEQ_printf_task_group_path(m, tg, fmt...)			\
816 {									\
817 	if (spin_trylock(&sched_debug_lock)) {				\
818 		task_group_path(tg, group_path, sizeof(group_path));	\
819 		SEQ_printf(m, fmt, group_path);				\
820 		spin_unlock(&sched_debug_lock);				\
821 	} else {							\
822 		char buf[128];						\
823 		char *bufend = buf + sizeof(buf) - 3;			\
824 		task_group_path(tg, buf, bufend - buf);			\
825 		strcpy(bufend - 1, "...");				\
826 		SEQ_printf(m, fmt, buf);				\
827 	}								\
828 }
829 #endif
830 
831 static void
832 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
833 {
834 	if (task_current(rq, p))
835 		SEQ_printf(m, ">R");
836 	else
837 		SEQ_printf(m, " %c", task_state_to_char(p));
838 
839 	SEQ_printf(m, " %15s %5d %9Ld.%06ld   %c   %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld   %5d ",
840 		p->comm, task_pid_nr(p),
841 		SPLIT_NS(p->se.vruntime),
842 		entity_eligible(cfs_rq_of(&p->se), &p->se) ? 'E' : 'N',
843 		SPLIT_NS(p->se.deadline),
844 		p->se.custom_slice ? 'S' : ' ',
845 		SPLIT_NS(p->se.slice),
846 		SPLIT_NS(p->se.sum_exec_runtime),
847 		(long long)(p->nvcsw + p->nivcsw),
848 		p->prio);
849 
850 	SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld",
851 		SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)),
852 		SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
853 		SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
854 
855 #ifdef CONFIG_NUMA_BALANCING
856 	SEQ_printf(m, "   %d      %d", task_node(p), task_numa_group_id(p));
857 #endif
858 #ifdef CONFIG_CGROUP_SCHED
859 	SEQ_printf_task_group_path(m, task_group(p), "        %s")
860 #endif
861 
862 	SEQ_printf(m, "\n");
863 }
864 
865 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
866 {
867 	struct task_struct *g, *p;
868 
869 	SEQ_printf(m, "\n");
870 	SEQ_printf(m, "runnable tasks:\n");
871 	SEQ_printf(m, " S            task   PID       vruntime   eligible    "
872 		   "deadline             slice          sum-exec      switches  "
873 		   "prio         wait-time        sum-sleep       sum-block"
874 #ifdef CONFIG_NUMA_BALANCING
875 		   "  node   group-id"
876 #endif
877 #ifdef CONFIG_CGROUP_SCHED
878 		   "  group-path"
879 #endif
880 		   "\n");
881 	SEQ_printf(m, "-------------------------------------------------------"
882 		   "------------------------------------------------------"
883 		   "------------------------------------------------------"
884 #ifdef CONFIG_NUMA_BALANCING
885 		   "--------------"
886 #endif
887 #ifdef CONFIG_CGROUP_SCHED
888 		   "--------------"
889 #endif
890 		   "\n");
891 
892 	rcu_read_lock();
893 	for_each_process_thread(g, p) {
894 		if (task_cpu(p) != rq_cpu)
895 			continue;
896 
897 		print_task(m, rq, p);
898 	}
899 	rcu_read_unlock();
900 }
901 
902 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
903 {
904 	s64 left_vruntime = -1, zero_vruntime, right_vruntime = -1, left_deadline = -1, spread;
905 	struct sched_entity *last, *first, *root;
906 	struct rq *rq = cpu_rq(cpu);
907 	unsigned long flags;
908 
909 #ifdef CONFIG_FAIR_GROUP_SCHED
910 	SEQ_printf(m, "\n");
911 	SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
912 #else
913 	SEQ_printf(m, "\n");
914 	SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
915 #endif
916 
917 	raw_spin_rq_lock_irqsave(rq, flags);
918 	root = __pick_root_entity(cfs_rq);
919 	if (root)
920 		left_vruntime = root->min_vruntime;
921 	first = __pick_first_entity(cfs_rq);
922 	if (first)
923 		left_deadline = first->deadline;
924 	last = __pick_last_entity(cfs_rq);
925 	if (last)
926 		right_vruntime = last->vruntime;
927 	zero_vruntime = cfs_rq->zero_vruntime;
928 	raw_spin_rq_unlock_irqrestore(rq, flags);
929 
930 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "left_deadline",
931 			SPLIT_NS(left_deadline));
932 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "left_vruntime",
933 			SPLIT_NS(left_vruntime));
934 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "zero_vruntime",
935 			SPLIT_NS(zero_vruntime));
936 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "avg_vruntime",
937 			SPLIT_NS(avg_vruntime(cfs_rq)));
938 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "right_vruntime",
939 			SPLIT_NS(right_vruntime));
940 	spread = right_vruntime - left_vruntime;
941 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
942 	SEQ_printf(m, "  .%-30s: %d\n", "nr_queued", cfs_rq->nr_queued);
943 	SEQ_printf(m, "  .%-30s: %d\n", "h_nr_runnable", cfs_rq->h_nr_runnable);
944 	SEQ_printf(m, "  .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued);
945 	SEQ_printf(m, "  .%-30s: %d\n", "h_nr_idle", cfs_rq->h_nr_idle);
946 	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
947 	SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
948 			cfs_rq->avg.load_avg);
949 	SEQ_printf(m, "  .%-30s: %lu\n", "runnable_avg",
950 			cfs_rq->avg.runnable_avg);
951 	SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
952 			cfs_rq->avg.util_avg);
953 	SEQ_printf(m, "  .%-30s: %u\n", "util_est",
954 			cfs_rq->avg.util_est);
955 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.load_avg",
956 			cfs_rq->removed.load_avg);
957 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.util_avg",
958 			cfs_rq->removed.util_avg);
959 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.runnable_avg",
960 			cfs_rq->removed.runnable_avg);
961 #ifdef CONFIG_FAIR_GROUP_SCHED
962 	SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
963 			cfs_rq->tg_load_avg_contrib);
964 	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
965 			atomic_long_read(&cfs_rq->tg->load_avg));
966 #endif /* CONFIG_FAIR_GROUP_SCHED */
967 #ifdef CONFIG_CFS_BANDWIDTH
968 	SEQ_printf(m, "  .%-30s: %d\n", "throttled",
969 			cfs_rq->throttled);
970 	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
971 			cfs_rq->throttle_count);
972 #endif
973 
974 #ifdef CONFIG_FAIR_GROUP_SCHED
975 	print_cfs_group_stats(m, cpu, cfs_rq->tg);
976 #endif
977 }
978 
979 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
980 {
981 #ifdef CONFIG_RT_GROUP_SCHED
982 	SEQ_printf(m, "\n");
983 	SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
984 #else
985 	SEQ_printf(m, "\n");
986 	SEQ_printf(m, "rt_rq[%d]:\n", cpu);
987 #endif
988 
989 #define P(x) \
990 	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
991 #define PU(x) \
992 	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
993 #define PN(x) \
994 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
995 
996 	PU(rt_nr_running);
997 
998 #ifdef CONFIG_RT_GROUP_SCHED
999 	P(rt_throttled);
1000 	PN(rt_time);
1001 	PN(rt_runtime);
1002 #endif
1003 
1004 #undef PN
1005 #undef PU
1006 #undef P
1007 }
1008 
1009 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
1010 {
1011 	struct dl_bw *dl_bw;
1012 
1013 	SEQ_printf(m, "\n");
1014 	SEQ_printf(m, "dl_rq[%d]:\n", cpu);
1015 
1016 #define PU(x) \
1017 	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
1018 
1019 	PU(dl_nr_running);
1020 	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
1021 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
1022 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
1023 
1024 #undef PU
1025 }
1026 
1027 static void print_cpu(struct seq_file *m, int cpu)
1028 {
1029 	struct rq *rq = cpu_rq(cpu);
1030 
1031 #ifdef CONFIG_X86
1032 	{
1033 		unsigned int freq = cpu_khz ? : 1;
1034 
1035 		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
1036 			   cpu, freq / 1000, (freq % 1000));
1037 	}
1038 #else /* !CONFIG_X86: */
1039 	SEQ_printf(m, "cpu#%d\n", cpu);
1040 #endif /* !CONFIG_X86 */
1041 
1042 #define P(x)								\
1043 do {									\
1044 	if (sizeof(rq->x) == 4)						\
1045 		SEQ_printf(m, "  .%-30s: %d\n", #x, (int)(rq->x));	\
1046 	else								\
1047 		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
1048 } while (0)
1049 
1050 #define PN(x) \
1051 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
1052 
1053 	P(nr_running);
1054 	P(nr_switches);
1055 	P(nr_uninterruptible);
1056 	PN(next_balance);
1057 	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
1058 	PN(clock);
1059 	PN(clock_task);
1060 #undef P
1061 #undef PN
1062 
1063 #define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
1064 	P64(avg_idle);
1065 	P64(max_idle_balance_cost);
1066 #undef P64
1067 
1068 #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
1069 	if (schedstat_enabled()) {
1070 		P(yld_count);
1071 		P(sched_count);
1072 		P(sched_goidle);
1073 		P(ttwu_count);
1074 		P(ttwu_local);
1075 	}
1076 #undef P
1077 
1078 	print_cfs_stats(m, cpu);
1079 	print_rt_stats(m, cpu);
1080 	print_dl_stats(m, cpu);
1081 
1082 	print_rq(m, rq, cpu);
1083 	SEQ_printf(m, "\n");
1084 }
1085 
1086 static const char *sched_tunable_scaling_names[] = {
1087 	"none",
1088 	"logarithmic",
1089 	"linear"
1090 };
1091 
1092 static void sched_debug_header(struct seq_file *m)
1093 {
1094 	u64 ktime, sched_clk, cpu_clk;
1095 	unsigned long flags;
1096 
1097 	local_irq_save(flags);
1098 	ktime = ktime_to_ns(ktime_get());
1099 	sched_clk = sched_clock();
1100 	cpu_clk = local_clock();
1101 	local_irq_restore(flags);
1102 
1103 	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
1104 		init_utsname()->release,
1105 		(int)strcspn(init_utsname()->version, " "),
1106 		init_utsname()->version);
1107 
1108 #define P(x) \
1109 	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
1110 #define PN(x) \
1111 	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
1112 	PN(ktime);
1113 	PN(sched_clk);
1114 	PN(cpu_clk);
1115 	P(jiffies);
1116 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1117 	P(sched_clock_stable());
1118 #endif
1119 #undef PN
1120 #undef P
1121 
1122 	SEQ_printf(m, "\n");
1123 	SEQ_printf(m, "sysctl_sched\n");
1124 
1125 #define P(x) \
1126 	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
1127 #define PN(x) \
1128 	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
1129 	PN(sysctl_sched_base_slice);
1130 	P(sysctl_sched_features);
1131 #undef PN
1132 #undef P
1133 
1134 	SEQ_printf(m, "  .%-40s: %d (%s)\n",
1135 		"sysctl_sched_tunable_scaling",
1136 		sysctl_sched_tunable_scaling,
1137 		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
1138 	SEQ_printf(m, "\n");
1139 }
1140 
1141 static int sched_debug_show(struct seq_file *m, void *v)
1142 {
1143 	int cpu = (unsigned long)(v - 2);
1144 
1145 	if (cpu != -1)
1146 		print_cpu(m, cpu);
1147 	else
1148 		sched_debug_header(m);
1149 
1150 	return 0;
1151 }
1152 
1153 void sysrq_sched_debug_show(void)
1154 {
1155 	int cpu;
1156 
1157 	sched_debug_header(NULL);
1158 	for_each_online_cpu(cpu) {
1159 		/*
1160 		 * Need to reset softlockup watchdogs on all CPUs, because
1161 		 * another CPU might be blocked waiting for us to process
1162 		 * an IPI or stop_machine.
1163 		 */
1164 		touch_nmi_watchdog();
1165 		touch_all_softlockup_watchdogs();
1166 		print_cpu(NULL, cpu);
1167 	}
1168 }
1169 
1170 /*
1171  * This iterator needs some explanation.
1172  * It returns 1 for the header position.
1173  * This means 2 is CPU 0.
1174  * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
1175  * to use cpumask_* to iterate over the CPUs.
1176  */
1177 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
1178 {
1179 	unsigned long n = *offset;
1180 
1181 	if (n == 0)
1182 		return (void *) 1;
1183 
1184 	n--;
1185 
1186 	if (n > 0)
1187 		n = cpumask_next(n - 1, cpu_online_mask);
1188 	else
1189 		n = cpumask_first(cpu_online_mask);
1190 
1191 	*offset = n + 1;
1192 
1193 	if (n < nr_cpu_ids)
1194 		return (void *)(unsigned long)(n + 2);
1195 
1196 	return NULL;
1197 }
1198 
1199 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
1200 {
1201 	(*offset)++;
1202 	return sched_debug_start(file, offset);
1203 }
1204 
1205 static void sched_debug_stop(struct seq_file *file, void *data)
1206 {
1207 }
1208 
1209 static const struct seq_operations sched_debug_sops = {
1210 	.start		= sched_debug_start,
1211 	.next		= sched_debug_next,
1212 	.stop		= sched_debug_stop,
1213 	.show		= sched_debug_show,
1214 };
1215 
1216 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
1217 #define __P(F) __PS(#F, F)
1218 #define   P(F) __PS(#F, p->F)
1219 #define   PM(F, M) __PS(#F, p->F & (M))
1220 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
1221 #define __PN(F) __PSN(#F, F)
1222 #define   PN(F) __PSN(#F, p->F)
1223 
1224 
1225 #ifdef CONFIG_NUMA_BALANCING
1226 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
1227 		unsigned long tpf, unsigned long gsf, unsigned long gpf)
1228 {
1229 	SEQ_printf(m, "numa_faults node=%d ", node);
1230 	SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
1231 	SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
1232 }
1233 #endif
1234 
1235 
1236 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
1237 {
1238 #ifdef CONFIG_NUMA_BALANCING
1239 	if (p->mm)
1240 		P(mm->numa_scan_seq);
1241 
1242 	P(numa_pages_migrated);
1243 	P(numa_preferred_nid);
1244 	P(total_numa_faults);
1245 	SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
1246 			task_node(p), task_numa_group_id(p));
1247 	show_numa_stats(p, m);
1248 #endif /* CONFIG_NUMA_BALANCING */
1249 }
1250 
1251 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
1252 						  struct seq_file *m)
1253 {
1254 	unsigned long nr_switches;
1255 
1256 	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
1257 						get_nr_threads(p));
1258 	SEQ_printf(m,
1259 		"---------------------------------------------------------"
1260 		"----------\n");
1261 
1262 #define P_SCHEDSTAT(F)  __PS(#F, schedstat_val(p->stats.F))
1263 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->stats.F))
1264 
1265 	PN(se.exec_start);
1266 	PN(se.vruntime);
1267 	PN(se.sum_exec_runtime);
1268 
1269 	nr_switches = p->nvcsw + p->nivcsw;
1270 
1271 	P(se.nr_migrations);
1272 
1273 	if (schedstat_enabled()) {
1274 		u64 avg_atom, avg_per_cpu;
1275 
1276 		PN_SCHEDSTAT(sum_sleep_runtime);
1277 		PN_SCHEDSTAT(sum_block_runtime);
1278 		PN_SCHEDSTAT(wait_start);
1279 		PN_SCHEDSTAT(sleep_start);
1280 		PN_SCHEDSTAT(block_start);
1281 		PN_SCHEDSTAT(sleep_max);
1282 		PN_SCHEDSTAT(block_max);
1283 		PN_SCHEDSTAT(exec_max);
1284 		PN_SCHEDSTAT(slice_max);
1285 		PN_SCHEDSTAT(wait_max);
1286 		PN_SCHEDSTAT(wait_sum);
1287 		P_SCHEDSTAT(wait_count);
1288 		PN_SCHEDSTAT(iowait_sum);
1289 		P_SCHEDSTAT(iowait_count);
1290 		P_SCHEDSTAT(nr_migrations_cold);
1291 		P_SCHEDSTAT(nr_failed_migrations_affine);
1292 		P_SCHEDSTAT(nr_failed_migrations_running);
1293 		P_SCHEDSTAT(nr_failed_migrations_hot);
1294 		P_SCHEDSTAT(nr_forced_migrations);
1295 		P_SCHEDSTAT(nr_wakeups);
1296 		P_SCHEDSTAT(nr_wakeups_sync);
1297 		P_SCHEDSTAT(nr_wakeups_migrate);
1298 		P_SCHEDSTAT(nr_wakeups_local);
1299 		P_SCHEDSTAT(nr_wakeups_remote);
1300 		P_SCHEDSTAT(nr_wakeups_affine);
1301 		P_SCHEDSTAT(nr_wakeups_affine_attempts);
1302 		P_SCHEDSTAT(nr_wakeups_passive);
1303 		P_SCHEDSTAT(nr_wakeups_idle);
1304 
1305 		avg_atom = p->se.sum_exec_runtime;
1306 		if (nr_switches)
1307 			avg_atom = div64_ul(avg_atom, nr_switches);
1308 		else
1309 			avg_atom = -1LL;
1310 
1311 		avg_per_cpu = p->se.sum_exec_runtime;
1312 		if (p->se.nr_migrations) {
1313 			avg_per_cpu = div64_u64(avg_per_cpu,
1314 						p->se.nr_migrations);
1315 		} else {
1316 			avg_per_cpu = -1LL;
1317 		}
1318 
1319 		__PN(avg_atom);
1320 		__PN(avg_per_cpu);
1321 
1322 #ifdef CONFIG_SCHED_CORE
1323 		PN_SCHEDSTAT(core_forceidle_sum);
1324 #endif
1325 	}
1326 
1327 	__P(nr_switches);
1328 	__PS("nr_voluntary_switches", p->nvcsw);
1329 	__PS("nr_involuntary_switches", p->nivcsw);
1330 
1331 	P(se.load.weight);
1332 	P(se.avg.load_sum);
1333 	P(se.avg.runnable_sum);
1334 	P(se.avg.util_sum);
1335 	P(se.avg.load_avg);
1336 	P(se.avg.runnable_avg);
1337 	P(se.avg.util_avg);
1338 	P(se.avg.last_update_time);
1339 	PM(se.avg.util_est, ~UTIL_AVG_UNCHANGED);
1340 #ifdef CONFIG_UCLAMP_TASK
1341 	__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1342 	__PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
1343 	__PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1344 	__PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1345 #endif /* CONFIG_UCLAMP_TASK */
1346 	P(policy);
1347 	P(prio);
1348 	if (task_has_dl_policy(p)) {
1349 		P(dl.runtime);
1350 		P(dl.deadline);
1351 	} else if (fair_policy(p->policy)) {
1352 		P(se.slice);
1353 	}
1354 #ifdef CONFIG_SCHED_CLASS_EXT
1355 	__PS("ext.enabled", task_on_scx(p));
1356 #endif
1357 #undef PN_SCHEDSTAT
1358 #undef P_SCHEDSTAT
1359 
1360 	{
1361 		unsigned int this_cpu = raw_smp_processor_id();
1362 		u64 t0, t1;
1363 
1364 		t0 = cpu_clock(this_cpu);
1365 		t1 = cpu_clock(this_cpu);
1366 		__PS("clock-delta", t1-t0);
1367 	}
1368 
1369 	sched_show_numa(p, m);
1370 }
1371 
1372 void proc_sched_set_task(struct task_struct *p)
1373 {
1374 #ifdef CONFIG_SCHEDSTATS
1375 	memset(&p->stats, 0, sizeof(p->stats));
1376 #endif
1377 }
1378 
1379 void resched_latency_warn(int cpu, u64 latency)
1380 {
1381 	static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
1382 
1383 	if (likely(!__ratelimit(&latency_check_ratelimit)))
1384 		return;
1385 
1386 	pr_err("sched: CPU %d need_resched set for > %llu ns (%d ticks) without schedule\n",
1387 	       cpu, latency, cpu_rq(cpu)->ticks_without_resched);
1388 	dump_stack();
1389 }
1390