xref: /linux/kernel/sched/debug.c (revision bf76f23aa1c178e9115eba17f699fa726aed669b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/sched/debug.c
4  *
5  * Print the CFS rbtree and other debugging details
6  *
7  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8  */
9 #include <linux/debugfs.h>
10 #include <linux/nmi.h>
11 #include "sched.h"
12 
13 /*
14  * This allows printing both to /sys/kernel/debug/sched/debug and
15  * to the console
16  */
17 #define SEQ_printf(m, x...)			\
18  do {						\
19 	if (m)					\
20 		seq_printf(m, x);		\
21 	else					\
22 		pr_cont(x);			\
23  } while (0)
24 
25 /*
26  * Ease the printing of nsec fields:
27  */
nsec_high(unsigned long long nsec)28 static long long nsec_high(unsigned long long nsec)
29 {
30 	if ((long long)nsec < 0) {
31 		nsec = -nsec;
32 		do_div(nsec, 1000000);
33 		return -nsec;
34 	}
35 	do_div(nsec, 1000000);
36 
37 	return nsec;
38 }
39 
nsec_low(unsigned long long nsec)40 static unsigned long nsec_low(unsigned long long nsec)
41 {
42 	if ((long long)nsec < 0)
43 		nsec = -nsec;
44 
45 	return do_div(nsec, 1000000);
46 }
47 
48 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
49 
50 #define SCHED_FEAT(name, enabled)	\
51 	#name ,
52 
53 static const char * const sched_feat_names[] = {
54 #include "features.h"
55 };
56 
57 #undef SCHED_FEAT
58 
sched_feat_show(struct seq_file * m,void * v)59 static int sched_feat_show(struct seq_file *m, void *v)
60 {
61 	int i;
62 
63 	for (i = 0; i < __SCHED_FEAT_NR; i++) {
64 		if (!(sysctl_sched_features & (1UL << i)))
65 			seq_puts(m, "NO_");
66 		seq_printf(m, "%s ", sched_feat_names[i]);
67 	}
68 	seq_puts(m, "\n");
69 
70 	return 0;
71 }
72 
73 #ifdef CONFIG_JUMP_LABEL
74 
75 #define jump_label_key__true  STATIC_KEY_INIT_TRUE
76 #define jump_label_key__false STATIC_KEY_INIT_FALSE
77 
78 #define SCHED_FEAT(name, enabled)	\
79 	jump_label_key__##enabled ,
80 
81 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
82 #include "features.h"
83 };
84 
85 #undef SCHED_FEAT
86 
sched_feat_disable(int i)87 static void sched_feat_disable(int i)
88 {
89 	static_key_disable_cpuslocked(&sched_feat_keys[i]);
90 }
91 
sched_feat_enable(int i)92 static void sched_feat_enable(int i)
93 {
94 	static_key_enable_cpuslocked(&sched_feat_keys[i]);
95 }
96 #else /* !CONFIG_JUMP_LABEL: */
sched_feat_disable(int i)97 static void sched_feat_disable(int i) { };
sched_feat_enable(int i)98 static void sched_feat_enable(int i) { };
99 #endif /* !CONFIG_JUMP_LABEL */
100 
sched_feat_set(char * cmp)101 static int sched_feat_set(char *cmp)
102 {
103 	int i;
104 	int neg = 0;
105 
106 	if (strncmp(cmp, "NO_", 3) == 0) {
107 		neg = 1;
108 		cmp += 3;
109 	}
110 
111 	i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
112 	if (i < 0)
113 		return i;
114 
115 	if (neg) {
116 		sysctl_sched_features &= ~(1UL << i);
117 		sched_feat_disable(i);
118 	} else {
119 		sysctl_sched_features |= (1UL << i);
120 		sched_feat_enable(i);
121 	}
122 
123 	return 0;
124 }
125 
126 static ssize_t
sched_feat_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)127 sched_feat_write(struct file *filp, const char __user *ubuf,
128 		size_t cnt, loff_t *ppos)
129 {
130 	char buf[64];
131 	char *cmp;
132 	int ret;
133 	struct inode *inode;
134 
135 	if (cnt > 63)
136 		cnt = 63;
137 
138 	if (copy_from_user(&buf, ubuf, cnt))
139 		return -EFAULT;
140 
141 	buf[cnt] = 0;
142 	cmp = strstrip(buf);
143 
144 	/* Ensure the static_key remains in a consistent state */
145 	inode = file_inode(filp);
146 	cpus_read_lock();
147 	inode_lock(inode);
148 	ret = sched_feat_set(cmp);
149 	inode_unlock(inode);
150 	cpus_read_unlock();
151 	if (ret < 0)
152 		return ret;
153 
154 	*ppos += cnt;
155 
156 	return cnt;
157 }
158 
sched_feat_open(struct inode * inode,struct file * filp)159 static int sched_feat_open(struct inode *inode, struct file *filp)
160 {
161 	return single_open(filp, sched_feat_show, NULL);
162 }
163 
164 static const struct file_operations sched_feat_fops = {
165 	.open		= sched_feat_open,
166 	.write		= sched_feat_write,
167 	.read		= seq_read,
168 	.llseek		= seq_lseek,
169 	.release	= single_release,
170 };
171 
sched_scaling_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)172 static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
173 				   size_t cnt, loff_t *ppos)
174 {
175 	char buf[16];
176 	unsigned int scaling;
177 
178 	if (cnt > 15)
179 		cnt = 15;
180 
181 	if (copy_from_user(&buf, ubuf, cnt))
182 		return -EFAULT;
183 	buf[cnt] = '\0';
184 
185 	if (kstrtouint(buf, 10, &scaling))
186 		return -EINVAL;
187 
188 	if (scaling >= SCHED_TUNABLESCALING_END)
189 		return -EINVAL;
190 
191 	sysctl_sched_tunable_scaling = scaling;
192 	if (sched_update_scaling())
193 		return -EINVAL;
194 
195 	*ppos += cnt;
196 	return cnt;
197 }
198 
sched_scaling_show(struct seq_file * m,void * v)199 static int sched_scaling_show(struct seq_file *m, void *v)
200 {
201 	seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
202 	return 0;
203 }
204 
sched_scaling_open(struct inode * inode,struct file * filp)205 static int sched_scaling_open(struct inode *inode, struct file *filp)
206 {
207 	return single_open(filp, sched_scaling_show, NULL);
208 }
209 
210 static const struct file_operations sched_scaling_fops = {
211 	.open		= sched_scaling_open,
212 	.write		= sched_scaling_write,
213 	.read		= seq_read,
214 	.llseek		= seq_lseek,
215 	.release	= single_release,
216 };
217 
218 #ifdef CONFIG_PREEMPT_DYNAMIC
219 
sched_dynamic_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)220 static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
221 				   size_t cnt, loff_t *ppos)
222 {
223 	char buf[16];
224 	int mode;
225 
226 	if (cnt > 15)
227 		cnt = 15;
228 
229 	if (copy_from_user(&buf, ubuf, cnt))
230 		return -EFAULT;
231 
232 	buf[cnt] = 0;
233 	mode = sched_dynamic_mode(strstrip(buf));
234 	if (mode < 0)
235 		return mode;
236 
237 	sched_dynamic_update(mode);
238 
239 	*ppos += cnt;
240 
241 	return cnt;
242 }
243 
sched_dynamic_show(struct seq_file * m,void * v)244 static int sched_dynamic_show(struct seq_file *m, void *v)
245 {
246 	int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
247 	int j;
248 
249 	/* Count entries in NULL terminated preempt_modes */
250 	for (j = 0; preempt_modes[j]; j++)
251 		;
252 	j -= !IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY);
253 
254 	for (; i < j; i++) {
255 		if (preempt_dynamic_mode == i)
256 			seq_puts(m, "(");
257 		seq_puts(m, preempt_modes[i]);
258 		if (preempt_dynamic_mode == i)
259 			seq_puts(m, ")");
260 
261 		seq_puts(m, " ");
262 	}
263 
264 	seq_puts(m, "\n");
265 	return 0;
266 }
267 
sched_dynamic_open(struct inode * inode,struct file * filp)268 static int sched_dynamic_open(struct inode *inode, struct file *filp)
269 {
270 	return single_open(filp, sched_dynamic_show, NULL);
271 }
272 
273 static const struct file_operations sched_dynamic_fops = {
274 	.open		= sched_dynamic_open,
275 	.write		= sched_dynamic_write,
276 	.read		= seq_read,
277 	.llseek		= seq_lseek,
278 	.release	= single_release,
279 };
280 
281 #endif /* CONFIG_PREEMPT_DYNAMIC */
282 
283 __read_mostly bool sched_debug_verbose;
284 
285 static struct dentry           *sd_dentry;
286 
287 
sched_verbose_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)288 static ssize_t sched_verbose_write(struct file *filp, const char __user *ubuf,
289 				  size_t cnt, loff_t *ppos)
290 {
291 	ssize_t result;
292 	bool orig;
293 
294 	cpus_read_lock();
295 	sched_domains_mutex_lock();
296 
297 	orig = sched_debug_verbose;
298 	result = debugfs_write_file_bool(filp, ubuf, cnt, ppos);
299 
300 	if (sched_debug_verbose && !orig)
301 		update_sched_domain_debugfs();
302 	else if (!sched_debug_verbose && orig) {
303 		debugfs_remove(sd_dentry);
304 		sd_dentry = NULL;
305 	}
306 
307 	sched_domains_mutex_unlock();
308 	cpus_read_unlock();
309 
310 	return result;
311 }
312 
313 static const struct file_operations sched_verbose_fops = {
314 	.read =         debugfs_read_file_bool,
315 	.write =        sched_verbose_write,
316 	.open =         simple_open,
317 	.llseek =       default_llseek,
318 };
319 
320 static const struct seq_operations sched_debug_sops;
321 
sched_debug_open(struct inode * inode,struct file * filp)322 static int sched_debug_open(struct inode *inode, struct file *filp)
323 {
324 	return seq_open(filp, &sched_debug_sops);
325 }
326 
327 static const struct file_operations sched_debug_fops = {
328 	.open		= sched_debug_open,
329 	.read		= seq_read,
330 	.llseek		= seq_lseek,
331 	.release	= seq_release,
332 };
333 
334 enum dl_param {
335 	DL_RUNTIME = 0,
336 	DL_PERIOD,
337 };
338 
339 static unsigned long fair_server_period_max = (1UL << 22) * NSEC_PER_USEC; /* ~4 seconds */
340 static unsigned long fair_server_period_min = (100) * NSEC_PER_USEC;     /* 100 us */
341 
sched_fair_server_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos,enum dl_param param)342 static ssize_t sched_fair_server_write(struct file *filp, const char __user *ubuf,
343 				       size_t cnt, loff_t *ppos, enum dl_param param)
344 {
345 	long cpu = (long) ((struct seq_file *) filp->private_data)->private;
346 	struct rq *rq = cpu_rq(cpu);
347 	u64 runtime, period;
348 	size_t err;
349 	int retval;
350 	u64 value;
351 
352 	err = kstrtoull_from_user(ubuf, cnt, 10, &value);
353 	if (err)
354 		return err;
355 
356 	scoped_guard (rq_lock_irqsave, rq) {
357 		runtime  = rq->fair_server.dl_runtime;
358 		period = rq->fair_server.dl_period;
359 
360 		switch (param) {
361 		case DL_RUNTIME:
362 			if (runtime == value)
363 				break;
364 			runtime = value;
365 			break;
366 		case DL_PERIOD:
367 			if (value == period)
368 				break;
369 			period = value;
370 			break;
371 		}
372 
373 		if (runtime > period ||
374 		    period > fair_server_period_max ||
375 		    period < fair_server_period_min) {
376 			return  -EINVAL;
377 		}
378 
379 		if (rq->cfs.h_nr_queued) {
380 			update_rq_clock(rq);
381 			dl_server_stop(&rq->fair_server);
382 		}
383 
384 		retval = dl_server_apply_params(&rq->fair_server, runtime, period, 0);
385 		if (retval)
386 			cnt = retval;
387 
388 		if (!runtime)
389 			printk_deferred("Fair server disabled in CPU %d, system may crash due to starvation.\n",
390 					cpu_of(rq));
391 
392 		if (rq->cfs.h_nr_queued)
393 			dl_server_start(&rq->fair_server);
394 	}
395 
396 	*ppos += cnt;
397 	return cnt;
398 }
399 
sched_fair_server_show(struct seq_file * m,void * v,enum dl_param param)400 static size_t sched_fair_server_show(struct seq_file *m, void *v, enum dl_param param)
401 {
402 	unsigned long cpu = (unsigned long) m->private;
403 	struct rq *rq = cpu_rq(cpu);
404 	u64 value;
405 
406 	switch (param) {
407 	case DL_RUNTIME:
408 		value = rq->fair_server.dl_runtime;
409 		break;
410 	case DL_PERIOD:
411 		value = rq->fair_server.dl_period;
412 		break;
413 	}
414 
415 	seq_printf(m, "%llu\n", value);
416 	return 0;
417 
418 }
419 
420 static ssize_t
sched_fair_server_runtime_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)421 sched_fair_server_runtime_write(struct file *filp, const char __user *ubuf,
422 				size_t cnt, loff_t *ppos)
423 {
424 	return sched_fair_server_write(filp, ubuf, cnt, ppos, DL_RUNTIME);
425 }
426 
sched_fair_server_runtime_show(struct seq_file * m,void * v)427 static int sched_fair_server_runtime_show(struct seq_file *m, void *v)
428 {
429 	return sched_fair_server_show(m, v, DL_RUNTIME);
430 }
431 
sched_fair_server_runtime_open(struct inode * inode,struct file * filp)432 static int sched_fair_server_runtime_open(struct inode *inode, struct file *filp)
433 {
434 	return single_open(filp, sched_fair_server_runtime_show, inode->i_private);
435 }
436 
437 static const struct file_operations fair_server_runtime_fops = {
438 	.open		= sched_fair_server_runtime_open,
439 	.write		= sched_fair_server_runtime_write,
440 	.read		= seq_read,
441 	.llseek		= seq_lseek,
442 	.release	= single_release,
443 };
444 
445 static ssize_t
sched_fair_server_period_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)446 sched_fair_server_period_write(struct file *filp, const char __user *ubuf,
447 			       size_t cnt, loff_t *ppos)
448 {
449 	return sched_fair_server_write(filp, ubuf, cnt, ppos, DL_PERIOD);
450 }
451 
sched_fair_server_period_show(struct seq_file * m,void * v)452 static int sched_fair_server_period_show(struct seq_file *m, void *v)
453 {
454 	return sched_fair_server_show(m, v, DL_PERIOD);
455 }
456 
sched_fair_server_period_open(struct inode * inode,struct file * filp)457 static int sched_fair_server_period_open(struct inode *inode, struct file *filp)
458 {
459 	return single_open(filp, sched_fair_server_period_show, inode->i_private);
460 }
461 
462 static const struct file_operations fair_server_period_fops = {
463 	.open		= sched_fair_server_period_open,
464 	.write		= sched_fair_server_period_write,
465 	.read		= seq_read,
466 	.llseek		= seq_lseek,
467 	.release	= single_release,
468 };
469 
470 static struct dentry *debugfs_sched;
471 
debugfs_fair_server_init(void)472 static void debugfs_fair_server_init(void)
473 {
474 	struct dentry *d_fair;
475 	unsigned long cpu;
476 
477 	d_fair = debugfs_create_dir("fair_server", debugfs_sched);
478 	if (!d_fair)
479 		return;
480 
481 	for_each_possible_cpu(cpu) {
482 		struct dentry *d_cpu;
483 		char buf[32];
484 
485 		snprintf(buf, sizeof(buf), "cpu%lu", cpu);
486 		d_cpu = debugfs_create_dir(buf, d_fair);
487 
488 		debugfs_create_file("runtime", 0644, d_cpu, (void *) cpu, &fair_server_runtime_fops);
489 		debugfs_create_file("period", 0644, d_cpu, (void *) cpu, &fair_server_period_fops);
490 	}
491 }
492 
sched_init_debug(void)493 static __init int sched_init_debug(void)
494 {
495 	struct dentry __maybe_unused *numa;
496 
497 	debugfs_sched = debugfs_create_dir("sched", NULL);
498 
499 	debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
500 	debugfs_create_file_unsafe("verbose", 0644, debugfs_sched, &sched_debug_verbose, &sched_verbose_fops);
501 #ifdef CONFIG_PREEMPT_DYNAMIC
502 	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
503 #endif
504 
505 	debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice);
506 
507 	debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
508 	debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
509 
510 	debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
511 	debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
512 	debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
513 
514 	sched_domains_mutex_lock();
515 	update_sched_domain_debugfs();
516 	sched_domains_mutex_unlock();
517 
518 #ifdef CONFIG_NUMA_BALANCING
519 	numa = debugfs_create_dir("numa_balancing", debugfs_sched);
520 
521 	debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
522 	debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
523 	debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
524 	debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
525 	debugfs_create_u32("hot_threshold_ms", 0644, numa, &sysctl_numa_balancing_hot_threshold);
526 #endif /* CONFIG_NUMA_BALANCING */
527 
528 	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
529 
530 	debugfs_fair_server_init();
531 
532 	return 0;
533 }
534 late_initcall(sched_init_debug);
535 
536 static cpumask_var_t		sd_sysctl_cpus;
537 
sd_flags_show(struct seq_file * m,void * v)538 static int sd_flags_show(struct seq_file *m, void *v)
539 {
540 	unsigned long flags = *(unsigned int *)m->private;
541 	int idx;
542 
543 	for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
544 		seq_puts(m, sd_flag_debug[idx].name);
545 		seq_puts(m, " ");
546 	}
547 	seq_puts(m, "\n");
548 
549 	return 0;
550 }
551 
sd_flags_open(struct inode * inode,struct file * file)552 static int sd_flags_open(struct inode *inode, struct file *file)
553 {
554 	return single_open(file, sd_flags_show, inode->i_private);
555 }
556 
557 static const struct file_operations sd_flags_fops = {
558 	.open		= sd_flags_open,
559 	.read		= seq_read,
560 	.llseek		= seq_lseek,
561 	.release	= single_release,
562 };
563 
register_sd(struct sched_domain * sd,struct dentry * parent)564 static void register_sd(struct sched_domain *sd, struct dentry *parent)
565 {
566 #define SDM(type, mode, member)	\
567 	debugfs_create_##type(#member, mode, parent, &sd->member)
568 
569 	SDM(ulong, 0644, min_interval);
570 	SDM(ulong, 0644, max_interval);
571 	SDM(u64,   0644, max_newidle_lb_cost);
572 	SDM(u32,   0644, busy_factor);
573 	SDM(u32,   0644, imbalance_pct);
574 	SDM(u32,   0644, cache_nice_tries);
575 	SDM(str,   0444, name);
576 
577 #undef SDM
578 
579 	debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
580 	debugfs_create_file("groups_flags", 0444, parent, &sd->groups->flags, &sd_flags_fops);
581 	debugfs_create_u32("level", 0444, parent, (u32 *)&sd->level);
582 
583 	if (sd->flags & SD_ASYM_PACKING)
584 		debugfs_create_u32("group_asym_prefer_cpu", 0444, parent,
585 				   (u32 *)&sd->groups->asym_prefer_cpu);
586 }
587 
update_sched_domain_debugfs(void)588 void update_sched_domain_debugfs(void)
589 {
590 	int cpu, i;
591 
592 	/*
593 	 * This can unfortunately be invoked before sched_debug_init() creates
594 	 * the debug directory. Don't touch sd_sysctl_cpus until then.
595 	 */
596 	if (!debugfs_sched)
597 		return;
598 
599 	if (!sched_debug_verbose)
600 		return;
601 
602 	if (!cpumask_available(sd_sysctl_cpus)) {
603 		if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
604 			return;
605 		cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
606 	}
607 
608 	if (!sd_dentry) {
609 		sd_dentry = debugfs_create_dir("domains", debugfs_sched);
610 
611 		/* rebuild sd_sysctl_cpus if empty since it gets cleared below */
612 		if (cpumask_empty(sd_sysctl_cpus))
613 			cpumask_copy(sd_sysctl_cpus, cpu_online_mask);
614 	}
615 
616 	for_each_cpu(cpu, sd_sysctl_cpus) {
617 		struct sched_domain *sd;
618 		struct dentry *d_cpu;
619 		char buf[32];
620 
621 		snprintf(buf, sizeof(buf), "cpu%d", cpu);
622 		debugfs_lookup_and_remove(buf, sd_dentry);
623 		d_cpu = debugfs_create_dir(buf, sd_dentry);
624 
625 		i = 0;
626 		for_each_domain(cpu, sd) {
627 			struct dentry *d_sd;
628 
629 			snprintf(buf, sizeof(buf), "domain%d", i);
630 			d_sd = debugfs_create_dir(buf, d_cpu);
631 
632 			register_sd(sd, d_sd);
633 			i++;
634 		}
635 
636 		__cpumask_clear_cpu(cpu, sd_sysctl_cpus);
637 	}
638 }
639 
dirty_sched_domain_sysctl(int cpu)640 void dirty_sched_domain_sysctl(int cpu)
641 {
642 	if (cpumask_available(sd_sysctl_cpus))
643 		__cpumask_set_cpu(cpu, sd_sysctl_cpus);
644 }
645 
646 #ifdef CONFIG_FAIR_GROUP_SCHED
print_cfs_group_stats(struct seq_file * m,int cpu,struct task_group * tg)647 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
648 {
649 	struct sched_entity *se = tg->se[cpu];
650 
651 #define P(F)		SEQ_printf(m, "  .%-30s: %lld\n",	#F, (long long)F)
652 #define P_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld\n",	\
653 		#F, (long long)schedstat_val(stats->F))
654 #define PN(F)		SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
655 #define PN_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", \
656 		#F, SPLIT_NS((long long)schedstat_val(stats->F)))
657 
658 	if (!se)
659 		return;
660 
661 	PN(se->exec_start);
662 	PN(se->vruntime);
663 	PN(se->sum_exec_runtime);
664 
665 	if (schedstat_enabled()) {
666 		struct sched_statistics *stats;
667 		stats = __schedstats_from_se(se);
668 
669 		PN_SCHEDSTAT(wait_start);
670 		PN_SCHEDSTAT(sleep_start);
671 		PN_SCHEDSTAT(block_start);
672 		PN_SCHEDSTAT(sleep_max);
673 		PN_SCHEDSTAT(block_max);
674 		PN_SCHEDSTAT(exec_max);
675 		PN_SCHEDSTAT(slice_max);
676 		PN_SCHEDSTAT(wait_max);
677 		PN_SCHEDSTAT(wait_sum);
678 		P_SCHEDSTAT(wait_count);
679 	}
680 
681 	P(se->load.weight);
682 	P(se->avg.load_avg);
683 	P(se->avg.util_avg);
684 	P(se->avg.runnable_avg);
685 
686 #undef PN_SCHEDSTAT
687 #undef PN
688 #undef P_SCHEDSTAT
689 #undef P
690 }
691 #endif /* CONFIG_FAIR_GROUP_SCHED */
692 
693 #ifdef CONFIG_CGROUP_SCHED
694 static DEFINE_SPINLOCK(sched_debug_lock);
695 static char group_path[PATH_MAX];
696 
task_group_path(struct task_group * tg,char * path,int plen)697 static void task_group_path(struct task_group *tg, char *path, int plen)
698 {
699 	if (autogroup_path(tg, path, plen))
700 		return;
701 
702 	cgroup_path(tg->css.cgroup, path, plen);
703 }
704 
705 /*
706  * Only 1 SEQ_printf_task_group_path() caller can use the full length
707  * group_path[] for cgroup path. Other simultaneous callers will have
708  * to use a shorter stack buffer. A "..." suffix is appended at the end
709  * of the stack buffer so that it will show up in case the output length
710  * matches the given buffer size to indicate possible path name truncation.
711  */
712 #define SEQ_printf_task_group_path(m, tg, fmt...)			\
713 {									\
714 	if (spin_trylock(&sched_debug_lock)) {				\
715 		task_group_path(tg, group_path, sizeof(group_path));	\
716 		SEQ_printf(m, fmt, group_path);				\
717 		spin_unlock(&sched_debug_lock);				\
718 	} else {							\
719 		char buf[128];						\
720 		char *bufend = buf + sizeof(buf) - 3;			\
721 		task_group_path(tg, buf, bufend - buf);			\
722 		strcpy(bufend - 1, "...");				\
723 		SEQ_printf(m, fmt, buf);				\
724 	}								\
725 }
726 #endif
727 
728 static void
print_task(struct seq_file * m,struct rq * rq,struct task_struct * p)729 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
730 {
731 	if (task_current(rq, p))
732 		SEQ_printf(m, ">R");
733 	else
734 		SEQ_printf(m, " %c", task_state_to_char(p));
735 
736 	SEQ_printf(m, " %15s %5d %9Ld.%06ld   %c   %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld   %5d ",
737 		p->comm, task_pid_nr(p),
738 		SPLIT_NS(p->se.vruntime),
739 		entity_eligible(cfs_rq_of(&p->se), &p->se) ? 'E' : 'N',
740 		SPLIT_NS(p->se.deadline),
741 		p->se.custom_slice ? 'S' : ' ',
742 		SPLIT_NS(p->se.slice),
743 		SPLIT_NS(p->se.sum_exec_runtime),
744 		(long long)(p->nvcsw + p->nivcsw),
745 		p->prio);
746 
747 	SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld",
748 		SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)),
749 		SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
750 		SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
751 
752 #ifdef CONFIG_NUMA_BALANCING
753 	SEQ_printf(m, "   %d      %d", task_node(p), task_numa_group_id(p));
754 #endif
755 #ifdef CONFIG_CGROUP_SCHED
756 	SEQ_printf_task_group_path(m, task_group(p), "        %s")
757 #endif
758 
759 	SEQ_printf(m, "\n");
760 }
761 
print_rq(struct seq_file * m,struct rq * rq,int rq_cpu)762 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
763 {
764 	struct task_struct *g, *p;
765 
766 	SEQ_printf(m, "\n");
767 	SEQ_printf(m, "runnable tasks:\n");
768 	SEQ_printf(m, " S            task   PID       vruntime   eligible    "
769 		   "deadline             slice          sum-exec      switches  "
770 		   "prio         wait-time        sum-sleep       sum-block"
771 #ifdef CONFIG_NUMA_BALANCING
772 		   "  node   group-id"
773 #endif
774 #ifdef CONFIG_CGROUP_SCHED
775 		   "  group-path"
776 #endif
777 		   "\n");
778 	SEQ_printf(m, "-------------------------------------------------------"
779 		   "------------------------------------------------------"
780 		   "------------------------------------------------------"
781 #ifdef CONFIG_NUMA_BALANCING
782 		   "--------------"
783 #endif
784 #ifdef CONFIG_CGROUP_SCHED
785 		   "--------------"
786 #endif
787 		   "\n");
788 
789 	rcu_read_lock();
790 	for_each_process_thread(g, p) {
791 		if (task_cpu(p) != rq_cpu)
792 			continue;
793 
794 		print_task(m, rq, p);
795 	}
796 	rcu_read_unlock();
797 }
798 
print_cfs_rq(struct seq_file * m,int cpu,struct cfs_rq * cfs_rq)799 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
800 {
801 	s64 left_vruntime = -1, min_vruntime, right_vruntime = -1, left_deadline = -1, spread;
802 	struct sched_entity *last, *first, *root;
803 	struct rq *rq = cpu_rq(cpu);
804 	unsigned long flags;
805 
806 #ifdef CONFIG_FAIR_GROUP_SCHED
807 	SEQ_printf(m, "\n");
808 	SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
809 #else
810 	SEQ_printf(m, "\n");
811 	SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
812 #endif
813 
814 	raw_spin_rq_lock_irqsave(rq, flags);
815 	root = __pick_root_entity(cfs_rq);
816 	if (root)
817 		left_vruntime = root->min_vruntime;
818 	first = __pick_first_entity(cfs_rq);
819 	if (first)
820 		left_deadline = first->deadline;
821 	last = __pick_last_entity(cfs_rq);
822 	if (last)
823 		right_vruntime = last->vruntime;
824 	min_vruntime = cfs_rq->min_vruntime;
825 	raw_spin_rq_unlock_irqrestore(rq, flags);
826 
827 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "left_deadline",
828 			SPLIT_NS(left_deadline));
829 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "left_vruntime",
830 			SPLIT_NS(left_vruntime));
831 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
832 			SPLIT_NS(min_vruntime));
833 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "avg_vruntime",
834 			SPLIT_NS(avg_vruntime(cfs_rq)));
835 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "right_vruntime",
836 			SPLIT_NS(right_vruntime));
837 	spread = right_vruntime - left_vruntime;
838 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
839 	SEQ_printf(m, "  .%-30s: %d\n", "nr_queued", cfs_rq->nr_queued);
840 	SEQ_printf(m, "  .%-30s: %d\n", "h_nr_runnable", cfs_rq->h_nr_runnable);
841 	SEQ_printf(m, "  .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued);
842 	SEQ_printf(m, "  .%-30s: %d\n", "h_nr_idle", cfs_rq->h_nr_idle);
843 	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
844 	SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
845 			cfs_rq->avg.load_avg);
846 	SEQ_printf(m, "  .%-30s: %lu\n", "runnable_avg",
847 			cfs_rq->avg.runnable_avg);
848 	SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
849 			cfs_rq->avg.util_avg);
850 	SEQ_printf(m, "  .%-30s: %u\n", "util_est",
851 			cfs_rq->avg.util_est);
852 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.load_avg",
853 			cfs_rq->removed.load_avg);
854 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.util_avg",
855 			cfs_rq->removed.util_avg);
856 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.runnable_avg",
857 			cfs_rq->removed.runnable_avg);
858 #ifdef CONFIG_FAIR_GROUP_SCHED
859 	SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
860 			cfs_rq->tg_load_avg_contrib);
861 	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
862 			atomic_long_read(&cfs_rq->tg->load_avg));
863 #endif /* CONFIG_FAIR_GROUP_SCHED */
864 #ifdef CONFIG_CFS_BANDWIDTH
865 	SEQ_printf(m, "  .%-30s: %d\n", "throttled",
866 			cfs_rq->throttled);
867 	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
868 			cfs_rq->throttle_count);
869 #endif
870 
871 #ifdef CONFIG_FAIR_GROUP_SCHED
872 	print_cfs_group_stats(m, cpu, cfs_rq->tg);
873 #endif
874 }
875 
print_rt_rq(struct seq_file * m,int cpu,struct rt_rq * rt_rq)876 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
877 {
878 #ifdef CONFIG_RT_GROUP_SCHED
879 	SEQ_printf(m, "\n");
880 	SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
881 #else
882 	SEQ_printf(m, "\n");
883 	SEQ_printf(m, "rt_rq[%d]:\n", cpu);
884 #endif
885 
886 #define P(x) \
887 	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
888 #define PU(x) \
889 	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
890 #define PN(x) \
891 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
892 
893 	PU(rt_nr_running);
894 
895 #ifdef CONFIG_RT_GROUP_SCHED
896 	P(rt_throttled);
897 	PN(rt_time);
898 	PN(rt_runtime);
899 #endif
900 
901 #undef PN
902 #undef PU
903 #undef P
904 }
905 
print_dl_rq(struct seq_file * m,int cpu,struct dl_rq * dl_rq)906 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
907 {
908 	struct dl_bw *dl_bw;
909 
910 	SEQ_printf(m, "\n");
911 	SEQ_printf(m, "dl_rq[%d]:\n", cpu);
912 
913 #define PU(x) \
914 	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
915 
916 	PU(dl_nr_running);
917 	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
918 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
919 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
920 
921 #undef PU
922 }
923 
print_cpu(struct seq_file * m,int cpu)924 static void print_cpu(struct seq_file *m, int cpu)
925 {
926 	struct rq *rq = cpu_rq(cpu);
927 
928 #ifdef CONFIG_X86
929 	{
930 		unsigned int freq = cpu_khz ? : 1;
931 
932 		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
933 			   cpu, freq / 1000, (freq % 1000));
934 	}
935 #else /* !CONFIG_X86: */
936 	SEQ_printf(m, "cpu#%d\n", cpu);
937 #endif /* !CONFIG_X86 */
938 
939 #define P(x)								\
940 do {									\
941 	if (sizeof(rq->x) == 4)						\
942 		SEQ_printf(m, "  .%-30s: %d\n", #x, (int)(rq->x));	\
943 	else								\
944 		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
945 } while (0)
946 
947 #define PN(x) \
948 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
949 
950 	P(nr_running);
951 	P(nr_switches);
952 	P(nr_uninterruptible);
953 	PN(next_balance);
954 	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
955 	PN(clock);
956 	PN(clock_task);
957 #undef P
958 #undef PN
959 
960 #define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
961 	P64(avg_idle);
962 	P64(max_idle_balance_cost);
963 #undef P64
964 
965 #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
966 	if (schedstat_enabled()) {
967 		P(yld_count);
968 		P(sched_count);
969 		P(sched_goidle);
970 		P(ttwu_count);
971 		P(ttwu_local);
972 	}
973 #undef P
974 
975 	print_cfs_stats(m, cpu);
976 	print_rt_stats(m, cpu);
977 	print_dl_stats(m, cpu);
978 
979 	print_rq(m, rq, cpu);
980 	SEQ_printf(m, "\n");
981 }
982 
983 static const char *sched_tunable_scaling_names[] = {
984 	"none",
985 	"logarithmic",
986 	"linear"
987 };
988 
sched_debug_header(struct seq_file * m)989 static void sched_debug_header(struct seq_file *m)
990 {
991 	u64 ktime, sched_clk, cpu_clk;
992 	unsigned long flags;
993 
994 	local_irq_save(flags);
995 	ktime = ktime_to_ns(ktime_get());
996 	sched_clk = sched_clock();
997 	cpu_clk = local_clock();
998 	local_irq_restore(flags);
999 
1000 	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
1001 		init_utsname()->release,
1002 		(int)strcspn(init_utsname()->version, " "),
1003 		init_utsname()->version);
1004 
1005 #define P(x) \
1006 	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
1007 #define PN(x) \
1008 	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
1009 	PN(ktime);
1010 	PN(sched_clk);
1011 	PN(cpu_clk);
1012 	P(jiffies);
1013 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1014 	P(sched_clock_stable());
1015 #endif
1016 #undef PN
1017 #undef P
1018 
1019 	SEQ_printf(m, "\n");
1020 	SEQ_printf(m, "sysctl_sched\n");
1021 
1022 #define P(x) \
1023 	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
1024 #define PN(x) \
1025 	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
1026 	PN(sysctl_sched_base_slice);
1027 	P(sysctl_sched_features);
1028 #undef PN
1029 #undef P
1030 
1031 	SEQ_printf(m, "  .%-40s: %d (%s)\n",
1032 		"sysctl_sched_tunable_scaling",
1033 		sysctl_sched_tunable_scaling,
1034 		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
1035 	SEQ_printf(m, "\n");
1036 }
1037 
sched_debug_show(struct seq_file * m,void * v)1038 static int sched_debug_show(struct seq_file *m, void *v)
1039 {
1040 	int cpu = (unsigned long)(v - 2);
1041 
1042 	if (cpu != -1)
1043 		print_cpu(m, cpu);
1044 	else
1045 		sched_debug_header(m);
1046 
1047 	return 0;
1048 }
1049 
sysrq_sched_debug_show(void)1050 void sysrq_sched_debug_show(void)
1051 {
1052 	int cpu;
1053 
1054 	sched_debug_header(NULL);
1055 	for_each_online_cpu(cpu) {
1056 		/*
1057 		 * Need to reset softlockup watchdogs on all CPUs, because
1058 		 * another CPU might be blocked waiting for us to process
1059 		 * an IPI or stop_machine.
1060 		 */
1061 		touch_nmi_watchdog();
1062 		touch_all_softlockup_watchdogs();
1063 		print_cpu(NULL, cpu);
1064 	}
1065 }
1066 
1067 /*
1068  * This iterator needs some explanation.
1069  * It returns 1 for the header position.
1070  * This means 2 is CPU 0.
1071  * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
1072  * to use cpumask_* to iterate over the CPUs.
1073  */
sched_debug_start(struct seq_file * file,loff_t * offset)1074 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
1075 {
1076 	unsigned long n = *offset;
1077 
1078 	if (n == 0)
1079 		return (void *) 1;
1080 
1081 	n--;
1082 
1083 	if (n > 0)
1084 		n = cpumask_next(n - 1, cpu_online_mask);
1085 	else
1086 		n = cpumask_first(cpu_online_mask);
1087 
1088 	*offset = n + 1;
1089 
1090 	if (n < nr_cpu_ids)
1091 		return (void *)(unsigned long)(n + 2);
1092 
1093 	return NULL;
1094 }
1095 
sched_debug_next(struct seq_file * file,void * data,loff_t * offset)1096 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
1097 {
1098 	(*offset)++;
1099 	return sched_debug_start(file, offset);
1100 }
1101 
sched_debug_stop(struct seq_file * file,void * data)1102 static void sched_debug_stop(struct seq_file *file, void *data)
1103 {
1104 }
1105 
1106 static const struct seq_operations sched_debug_sops = {
1107 	.start		= sched_debug_start,
1108 	.next		= sched_debug_next,
1109 	.stop		= sched_debug_stop,
1110 	.show		= sched_debug_show,
1111 };
1112 
1113 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
1114 #define __P(F) __PS(#F, F)
1115 #define   P(F) __PS(#F, p->F)
1116 #define   PM(F, M) __PS(#F, p->F & (M))
1117 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
1118 #define __PN(F) __PSN(#F, F)
1119 #define   PN(F) __PSN(#F, p->F)
1120 
1121 
1122 #ifdef CONFIG_NUMA_BALANCING
print_numa_stats(struct seq_file * m,int node,unsigned long tsf,unsigned long tpf,unsigned long gsf,unsigned long gpf)1123 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
1124 		unsigned long tpf, unsigned long gsf, unsigned long gpf)
1125 {
1126 	SEQ_printf(m, "numa_faults node=%d ", node);
1127 	SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
1128 	SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
1129 }
1130 #endif
1131 
1132 
sched_show_numa(struct task_struct * p,struct seq_file * m)1133 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
1134 {
1135 #ifdef CONFIG_NUMA_BALANCING
1136 	if (p->mm)
1137 		P(mm->numa_scan_seq);
1138 
1139 	P(numa_pages_migrated);
1140 	P(numa_preferred_nid);
1141 	P(total_numa_faults);
1142 	SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
1143 			task_node(p), task_numa_group_id(p));
1144 	show_numa_stats(p, m);
1145 #endif /* CONFIG_NUMA_BALANCING */
1146 }
1147 
proc_sched_show_task(struct task_struct * p,struct pid_namespace * ns,struct seq_file * m)1148 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
1149 						  struct seq_file *m)
1150 {
1151 	unsigned long nr_switches;
1152 
1153 	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
1154 						get_nr_threads(p));
1155 	SEQ_printf(m,
1156 		"---------------------------------------------------------"
1157 		"----------\n");
1158 
1159 #define P_SCHEDSTAT(F)  __PS(#F, schedstat_val(p->stats.F))
1160 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->stats.F))
1161 
1162 	PN(se.exec_start);
1163 	PN(se.vruntime);
1164 	PN(se.sum_exec_runtime);
1165 
1166 	nr_switches = p->nvcsw + p->nivcsw;
1167 
1168 	P(se.nr_migrations);
1169 
1170 	if (schedstat_enabled()) {
1171 		u64 avg_atom, avg_per_cpu;
1172 
1173 		PN_SCHEDSTAT(sum_sleep_runtime);
1174 		PN_SCHEDSTAT(sum_block_runtime);
1175 		PN_SCHEDSTAT(wait_start);
1176 		PN_SCHEDSTAT(sleep_start);
1177 		PN_SCHEDSTAT(block_start);
1178 		PN_SCHEDSTAT(sleep_max);
1179 		PN_SCHEDSTAT(block_max);
1180 		PN_SCHEDSTAT(exec_max);
1181 		PN_SCHEDSTAT(slice_max);
1182 		PN_SCHEDSTAT(wait_max);
1183 		PN_SCHEDSTAT(wait_sum);
1184 		P_SCHEDSTAT(wait_count);
1185 		PN_SCHEDSTAT(iowait_sum);
1186 		P_SCHEDSTAT(iowait_count);
1187 		P_SCHEDSTAT(nr_migrations_cold);
1188 		P_SCHEDSTAT(nr_failed_migrations_affine);
1189 		P_SCHEDSTAT(nr_failed_migrations_running);
1190 		P_SCHEDSTAT(nr_failed_migrations_hot);
1191 		P_SCHEDSTAT(nr_forced_migrations);
1192 		P_SCHEDSTAT(nr_wakeups);
1193 		P_SCHEDSTAT(nr_wakeups_sync);
1194 		P_SCHEDSTAT(nr_wakeups_migrate);
1195 		P_SCHEDSTAT(nr_wakeups_local);
1196 		P_SCHEDSTAT(nr_wakeups_remote);
1197 		P_SCHEDSTAT(nr_wakeups_affine);
1198 		P_SCHEDSTAT(nr_wakeups_affine_attempts);
1199 		P_SCHEDSTAT(nr_wakeups_passive);
1200 		P_SCHEDSTAT(nr_wakeups_idle);
1201 
1202 		avg_atom = p->se.sum_exec_runtime;
1203 		if (nr_switches)
1204 			avg_atom = div64_ul(avg_atom, nr_switches);
1205 		else
1206 			avg_atom = -1LL;
1207 
1208 		avg_per_cpu = p->se.sum_exec_runtime;
1209 		if (p->se.nr_migrations) {
1210 			avg_per_cpu = div64_u64(avg_per_cpu,
1211 						p->se.nr_migrations);
1212 		} else {
1213 			avg_per_cpu = -1LL;
1214 		}
1215 
1216 		__PN(avg_atom);
1217 		__PN(avg_per_cpu);
1218 
1219 #ifdef CONFIG_SCHED_CORE
1220 		PN_SCHEDSTAT(core_forceidle_sum);
1221 #endif
1222 	}
1223 
1224 	__P(nr_switches);
1225 	__PS("nr_voluntary_switches", p->nvcsw);
1226 	__PS("nr_involuntary_switches", p->nivcsw);
1227 
1228 	P(se.load.weight);
1229 	P(se.avg.load_sum);
1230 	P(se.avg.runnable_sum);
1231 	P(se.avg.util_sum);
1232 	P(se.avg.load_avg);
1233 	P(se.avg.runnable_avg);
1234 	P(se.avg.util_avg);
1235 	P(se.avg.last_update_time);
1236 	PM(se.avg.util_est, ~UTIL_AVG_UNCHANGED);
1237 #ifdef CONFIG_UCLAMP_TASK
1238 	__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1239 	__PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
1240 	__PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1241 	__PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1242 #endif /* CONFIG_UCLAMP_TASK */
1243 	P(policy);
1244 	P(prio);
1245 	if (task_has_dl_policy(p)) {
1246 		P(dl.runtime);
1247 		P(dl.deadline);
1248 	} else if (fair_policy(p->policy)) {
1249 		P(se.slice);
1250 	}
1251 #ifdef CONFIG_SCHED_CLASS_EXT
1252 	__PS("ext.enabled", task_on_scx(p));
1253 #endif
1254 #undef PN_SCHEDSTAT
1255 #undef P_SCHEDSTAT
1256 
1257 	{
1258 		unsigned int this_cpu = raw_smp_processor_id();
1259 		u64 t0, t1;
1260 
1261 		t0 = cpu_clock(this_cpu);
1262 		t1 = cpu_clock(this_cpu);
1263 		__PS("clock-delta", t1-t0);
1264 	}
1265 
1266 	sched_show_numa(p, m);
1267 }
1268 
proc_sched_set_task(struct task_struct * p)1269 void proc_sched_set_task(struct task_struct *p)
1270 {
1271 #ifdef CONFIG_SCHEDSTATS
1272 	memset(&p->stats, 0, sizeof(p->stats));
1273 #endif
1274 }
1275 
resched_latency_warn(int cpu,u64 latency)1276 void resched_latency_warn(int cpu, u64 latency)
1277 {
1278 	static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
1279 
1280 	if (likely(!__ratelimit(&latency_check_ratelimit)))
1281 		return;
1282 
1283 	pr_err("sched: CPU %d need_resched set for > %llu ns (%d ticks) without schedule\n",
1284 	       cpu, latency, cpu_rq(cpu)->ticks_without_resched);
1285 	dump_stack();
1286 }
1287