1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/sched/debug.c
4 *
5 * Print the CFS rbtree and other debugging details
6 *
7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8 */
9
10 /*
11 * This allows printing both to /sys/kernel/debug/sched/debug and
12 * to the console
13 */
14 #define SEQ_printf(m, x...) \
15 do { \
16 if (m) \
17 seq_printf(m, x); \
18 else \
19 pr_cont(x); \
20 } while (0)
21
22 /*
23 * Ease the printing of nsec fields:
24 */
nsec_high(unsigned long long nsec)25 static long long nsec_high(unsigned long long nsec)
26 {
27 if ((long long)nsec < 0) {
28 nsec = -nsec;
29 do_div(nsec, 1000000);
30 return -nsec;
31 }
32 do_div(nsec, 1000000);
33
34 return nsec;
35 }
36
nsec_low(unsigned long long nsec)37 static unsigned long nsec_low(unsigned long long nsec)
38 {
39 if ((long long)nsec < 0)
40 nsec = -nsec;
41
42 return do_div(nsec, 1000000);
43 }
44
45 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
46
47 #define SCHED_FEAT(name, enabled) \
48 #name ,
49
50 static const char * const sched_feat_names[] = {
51 #include "features.h"
52 };
53
54 #undef SCHED_FEAT
55
sched_feat_show(struct seq_file * m,void * v)56 static int sched_feat_show(struct seq_file *m, void *v)
57 {
58 int i;
59
60 for (i = 0; i < __SCHED_FEAT_NR; i++) {
61 if (!(sysctl_sched_features & (1UL << i)))
62 seq_puts(m, "NO_");
63 seq_printf(m, "%s ", sched_feat_names[i]);
64 }
65 seq_puts(m, "\n");
66
67 return 0;
68 }
69
70 #ifdef CONFIG_JUMP_LABEL
71
72 #define jump_label_key__true STATIC_KEY_INIT_TRUE
73 #define jump_label_key__false STATIC_KEY_INIT_FALSE
74
75 #define SCHED_FEAT(name, enabled) \
76 jump_label_key__##enabled ,
77
78 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
79 #include "features.h"
80 };
81
82 #undef SCHED_FEAT
83
sched_feat_disable(int i)84 static void sched_feat_disable(int i)
85 {
86 static_key_disable_cpuslocked(&sched_feat_keys[i]);
87 }
88
sched_feat_enable(int i)89 static void sched_feat_enable(int i)
90 {
91 static_key_enable_cpuslocked(&sched_feat_keys[i]);
92 }
93 #else
sched_feat_disable(int i)94 static void sched_feat_disable(int i) { };
sched_feat_enable(int i)95 static void sched_feat_enable(int i) { };
96 #endif /* CONFIG_JUMP_LABEL */
97
sched_feat_set(char * cmp)98 static int sched_feat_set(char *cmp)
99 {
100 int i;
101 int neg = 0;
102
103 if (strncmp(cmp, "NO_", 3) == 0) {
104 neg = 1;
105 cmp += 3;
106 }
107
108 i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
109 if (i < 0)
110 return i;
111
112 if (neg) {
113 sysctl_sched_features &= ~(1UL << i);
114 sched_feat_disable(i);
115 } else {
116 sysctl_sched_features |= (1UL << i);
117 sched_feat_enable(i);
118 }
119
120 return 0;
121 }
122
123 static ssize_t
sched_feat_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)124 sched_feat_write(struct file *filp, const char __user *ubuf,
125 size_t cnt, loff_t *ppos)
126 {
127 char buf[64];
128 char *cmp;
129 int ret;
130 struct inode *inode;
131
132 if (cnt > 63)
133 cnt = 63;
134
135 if (copy_from_user(&buf, ubuf, cnt))
136 return -EFAULT;
137
138 buf[cnt] = 0;
139 cmp = strstrip(buf);
140
141 /* Ensure the static_key remains in a consistent state */
142 inode = file_inode(filp);
143 cpus_read_lock();
144 inode_lock(inode);
145 ret = sched_feat_set(cmp);
146 inode_unlock(inode);
147 cpus_read_unlock();
148 if (ret < 0)
149 return ret;
150
151 *ppos += cnt;
152
153 return cnt;
154 }
155
sched_feat_open(struct inode * inode,struct file * filp)156 static int sched_feat_open(struct inode *inode, struct file *filp)
157 {
158 return single_open(filp, sched_feat_show, NULL);
159 }
160
161 static const struct file_operations sched_feat_fops = {
162 .open = sched_feat_open,
163 .write = sched_feat_write,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167 };
168
169 #ifdef CONFIG_SMP
170
sched_scaling_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)171 static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
172 size_t cnt, loff_t *ppos)
173 {
174 char buf[16];
175 unsigned int scaling;
176
177 if (cnt > 15)
178 cnt = 15;
179
180 if (copy_from_user(&buf, ubuf, cnt))
181 return -EFAULT;
182 buf[cnt] = '\0';
183
184 if (kstrtouint(buf, 10, &scaling))
185 return -EINVAL;
186
187 if (scaling >= SCHED_TUNABLESCALING_END)
188 return -EINVAL;
189
190 sysctl_sched_tunable_scaling = scaling;
191 if (sched_update_scaling())
192 return -EINVAL;
193
194 *ppos += cnt;
195 return cnt;
196 }
197
sched_scaling_show(struct seq_file * m,void * v)198 static int sched_scaling_show(struct seq_file *m, void *v)
199 {
200 seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
201 return 0;
202 }
203
sched_scaling_open(struct inode * inode,struct file * filp)204 static int sched_scaling_open(struct inode *inode, struct file *filp)
205 {
206 return single_open(filp, sched_scaling_show, NULL);
207 }
208
209 static const struct file_operations sched_scaling_fops = {
210 .open = sched_scaling_open,
211 .write = sched_scaling_write,
212 .read = seq_read,
213 .llseek = seq_lseek,
214 .release = single_release,
215 };
216
217 #endif /* SMP */
218
219 #ifdef CONFIG_PREEMPT_DYNAMIC
220
sched_dynamic_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)221 static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
222 size_t cnt, loff_t *ppos)
223 {
224 char buf[16];
225 int mode;
226
227 if (cnt > 15)
228 cnt = 15;
229
230 if (copy_from_user(&buf, ubuf, cnt))
231 return -EFAULT;
232
233 buf[cnt] = 0;
234 mode = sched_dynamic_mode(strstrip(buf));
235 if (mode < 0)
236 return mode;
237
238 sched_dynamic_update(mode);
239
240 *ppos += cnt;
241
242 return cnt;
243 }
244
sched_dynamic_show(struct seq_file * m,void * v)245 static int sched_dynamic_show(struct seq_file *m, void *v)
246 {
247 static const char * preempt_modes[] = {
248 "none", "voluntary", "full", "lazy",
249 };
250 int j = ARRAY_SIZE(preempt_modes) - !IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY);
251 int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
252
253 for (; i < j; i++) {
254 if (preempt_dynamic_mode == i)
255 seq_puts(m, "(");
256 seq_puts(m, preempt_modes[i]);
257 if (preempt_dynamic_mode == i)
258 seq_puts(m, ")");
259
260 seq_puts(m, " ");
261 }
262
263 seq_puts(m, "\n");
264 return 0;
265 }
266
sched_dynamic_open(struct inode * inode,struct file * filp)267 static int sched_dynamic_open(struct inode *inode, struct file *filp)
268 {
269 return single_open(filp, sched_dynamic_show, NULL);
270 }
271
272 static const struct file_operations sched_dynamic_fops = {
273 .open = sched_dynamic_open,
274 .write = sched_dynamic_write,
275 .read = seq_read,
276 .llseek = seq_lseek,
277 .release = single_release,
278 };
279
280 #endif /* CONFIG_PREEMPT_DYNAMIC */
281
282 __read_mostly bool sched_debug_verbose;
283
284 #ifdef CONFIG_SMP
285 static struct dentry *sd_dentry;
286
287
sched_verbose_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)288 static ssize_t sched_verbose_write(struct file *filp, const char __user *ubuf,
289 size_t cnt, loff_t *ppos)
290 {
291 ssize_t result;
292 bool orig;
293
294 cpus_read_lock();
295 mutex_lock(&sched_domains_mutex);
296
297 orig = sched_debug_verbose;
298 result = debugfs_write_file_bool(filp, ubuf, cnt, ppos);
299
300 if (sched_debug_verbose && !orig)
301 update_sched_domain_debugfs();
302 else if (!sched_debug_verbose && orig) {
303 debugfs_remove(sd_dentry);
304 sd_dentry = NULL;
305 }
306
307 mutex_unlock(&sched_domains_mutex);
308 cpus_read_unlock();
309
310 return result;
311 }
312 #else
313 #define sched_verbose_write debugfs_write_file_bool
314 #endif
315
316 static const struct file_operations sched_verbose_fops = {
317 .read = debugfs_read_file_bool,
318 .write = sched_verbose_write,
319 .open = simple_open,
320 .llseek = default_llseek,
321 };
322
323 static const struct seq_operations sched_debug_sops;
324
sched_debug_open(struct inode * inode,struct file * filp)325 static int sched_debug_open(struct inode *inode, struct file *filp)
326 {
327 return seq_open(filp, &sched_debug_sops);
328 }
329
330 static const struct file_operations sched_debug_fops = {
331 .open = sched_debug_open,
332 .read = seq_read,
333 .llseek = seq_lseek,
334 .release = seq_release,
335 };
336
337 enum dl_param {
338 DL_RUNTIME = 0,
339 DL_PERIOD,
340 };
341
342 static unsigned long fair_server_period_max = (1UL << 22) * NSEC_PER_USEC; /* ~4 seconds */
343 static unsigned long fair_server_period_min = (100) * NSEC_PER_USEC; /* 100 us */
344
sched_fair_server_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos,enum dl_param param)345 static ssize_t sched_fair_server_write(struct file *filp, const char __user *ubuf,
346 size_t cnt, loff_t *ppos, enum dl_param param)
347 {
348 long cpu = (long) ((struct seq_file *) filp->private_data)->private;
349 struct rq *rq = cpu_rq(cpu);
350 u64 runtime, period;
351 size_t err;
352 int retval;
353 u64 value;
354
355 err = kstrtoull_from_user(ubuf, cnt, 10, &value);
356 if (err)
357 return err;
358
359 scoped_guard (rq_lock_irqsave, rq) {
360 runtime = rq->fair_server.dl_runtime;
361 period = rq->fair_server.dl_period;
362
363 switch (param) {
364 case DL_RUNTIME:
365 if (runtime == value)
366 break;
367 runtime = value;
368 break;
369 case DL_PERIOD:
370 if (value == period)
371 break;
372 period = value;
373 break;
374 }
375
376 if (runtime > period ||
377 period > fair_server_period_max ||
378 period < fair_server_period_min) {
379 return -EINVAL;
380 }
381
382 if (rq->cfs.h_nr_running) {
383 update_rq_clock(rq);
384 dl_server_stop(&rq->fair_server);
385 }
386
387 retval = dl_server_apply_params(&rq->fair_server, runtime, period, 0);
388 if (retval)
389 cnt = retval;
390
391 if (!runtime)
392 printk_deferred("Fair server disabled in CPU %d, system may crash due to starvation.\n",
393 cpu_of(rq));
394
395 if (rq->cfs.h_nr_running)
396 dl_server_start(&rq->fair_server);
397 }
398
399 *ppos += cnt;
400 return cnt;
401 }
402
sched_fair_server_show(struct seq_file * m,void * v,enum dl_param param)403 static size_t sched_fair_server_show(struct seq_file *m, void *v, enum dl_param param)
404 {
405 unsigned long cpu = (unsigned long) m->private;
406 struct rq *rq = cpu_rq(cpu);
407 u64 value;
408
409 switch (param) {
410 case DL_RUNTIME:
411 value = rq->fair_server.dl_runtime;
412 break;
413 case DL_PERIOD:
414 value = rq->fair_server.dl_period;
415 break;
416 }
417
418 seq_printf(m, "%llu\n", value);
419 return 0;
420
421 }
422
423 static ssize_t
sched_fair_server_runtime_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)424 sched_fair_server_runtime_write(struct file *filp, const char __user *ubuf,
425 size_t cnt, loff_t *ppos)
426 {
427 return sched_fair_server_write(filp, ubuf, cnt, ppos, DL_RUNTIME);
428 }
429
sched_fair_server_runtime_show(struct seq_file * m,void * v)430 static int sched_fair_server_runtime_show(struct seq_file *m, void *v)
431 {
432 return sched_fair_server_show(m, v, DL_RUNTIME);
433 }
434
sched_fair_server_runtime_open(struct inode * inode,struct file * filp)435 static int sched_fair_server_runtime_open(struct inode *inode, struct file *filp)
436 {
437 return single_open(filp, sched_fair_server_runtime_show, inode->i_private);
438 }
439
440 static const struct file_operations fair_server_runtime_fops = {
441 .open = sched_fair_server_runtime_open,
442 .write = sched_fair_server_runtime_write,
443 .read = seq_read,
444 .llseek = seq_lseek,
445 .release = single_release,
446 };
447
448 static ssize_t
sched_fair_server_period_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)449 sched_fair_server_period_write(struct file *filp, const char __user *ubuf,
450 size_t cnt, loff_t *ppos)
451 {
452 return sched_fair_server_write(filp, ubuf, cnt, ppos, DL_PERIOD);
453 }
454
sched_fair_server_period_show(struct seq_file * m,void * v)455 static int sched_fair_server_period_show(struct seq_file *m, void *v)
456 {
457 return sched_fair_server_show(m, v, DL_PERIOD);
458 }
459
sched_fair_server_period_open(struct inode * inode,struct file * filp)460 static int sched_fair_server_period_open(struct inode *inode, struct file *filp)
461 {
462 return single_open(filp, sched_fair_server_period_show, inode->i_private);
463 }
464
465 static const struct file_operations fair_server_period_fops = {
466 .open = sched_fair_server_period_open,
467 .write = sched_fair_server_period_write,
468 .read = seq_read,
469 .llseek = seq_lseek,
470 .release = single_release,
471 };
472
473 static struct dentry *debugfs_sched;
474
debugfs_fair_server_init(void)475 static void debugfs_fair_server_init(void)
476 {
477 struct dentry *d_fair;
478 unsigned long cpu;
479
480 d_fair = debugfs_create_dir("fair_server", debugfs_sched);
481 if (!d_fair)
482 return;
483
484 for_each_possible_cpu(cpu) {
485 struct dentry *d_cpu;
486 char buf[32];
487
488 snprintf(buf, sizeof(buf), "cpu%lu", cpu);
489 d_cpu = debugfs_create_dir(buf, d_fair);
490
491 debugfs_create_file("runtime", 0644, d_cpu, (void *) cpu, &fair_server_runtime_fops);
492 debugfs_create_file("period", 0644, d_cpu, (void *) cpu, &fair_server_period_fops);
493 }
494 }
495
sched_init_debug(void)496 static __init int sched_init_debug(void)
497 {
498 struct dentry __maybe_unused *numa;
499
500 debugfs_sched = debugfs_create_dir("sched", NULL);
501
502 debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
503 debugfs_create_file_unsafe("verbose", 0644, debugfs_sched, &sched_debug_verbose, &sched_verbose_fops);
504 #ifdef CONFIG_PREEMPT_DYNAMIC
505 debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
506 #endif
507
508 debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice);
509
510 debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
511 debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
512
513 #ifdef CONFIG_SMP
514 debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
515 debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
516 debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
517
518 mutex_lock(&sched_domains_mutex);
519 update_sched_domain_debugfs();
520 mutex_unlock(&sched_domains_mutex);
521 #endif
522
523 #ifdef CONFIG_NUMA_BALANCING
524 numa = debugfs_create_dir("numa_balancing", debugfs_sched);
525
526 debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
527 debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
528 debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
529 debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
530 debugfs_create_u32("hot_threshold_ms", 0644, numa, &sysctl_numa_balancing_hot_threshold);
531 #endif
532
533 debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
534
535 debugfs_fair_server_init();
536
537 return 0;
538 }
539 late_initcall(sched_init_debug);
540
541 #ifdef CONFIG_SMP
542
543 static cpumask_var_t sd_sysctl_cpus;
544
sd_flags_show(struct seq_file * m,void * v)545 static int sd_flags_show(struct seq_file *m, void *v)
546 {
547 unsigned long flags = *(unsigned int *)m->private;
548 int idx;
549
550 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
551 seq_puts(m, sd_flag_debug[idx].name);
552 seq_puts(m, " ");
553 }
554 seq_puts(m, "\n");
555
556 return 0;
557 }
558
sd_flags_open(struct inode * inode,struct file * file)559 static int sd_flags_open(struct inode *inode, struct file *file)
560 {
561 return single_open(file, sd_flags_show, inode->i_private);
562 }
563
564 static const struct file_operations sd_flags_fops = {
565 .open = sd_flags_open,
566 .read = seq_read,
567 .llseek = seq_lseek,
568 .release = single_release,
569 };
570
register_sd(struct sched_domain * sd,struct dentry * parent)571 static void register_sd(struct sched_domain *sd, struct dentry *parent)
572 {
573 #define SDM(type, mode, member) \
574 debugfs_create_##type(#member, mode, parent, &sd->member)
575
576 SDM(ulong, 0644, min_interval);
577 SDM(ulong, 0644, max_interval);
578 SDM(u64, 0644, max_newidle_lb_cost);
579 SDM(u32, 0644, busy_factor);
580 SDM(u32, 0644, imbalance_pct);
581 SDM(u32, 0644, cache_nice_tries);
582 SDM(str, 0444, name);
583
584 #undef SDM
585
586 debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
587 debugfs_create_file("groups_flags", 0444, parent, &sd->groups->flags, &sd_flags_fops);
588 debugfs_create_u32("level", 0444, parent, (u32 *)&sd->level);
589 }
590
update_sched_domain_debugfs(void)591 void update_sched_domain_debugfs(void)
592 {
593 int cpu, i;
594
595 /*
596 * This can unfortunately be invoked before sched_debug_init() creates
597 * the debug directory. Don't touch sd_sysctl_cpus until then.
598 */
599 if (!debugfs_sched)
600 return;
601
602 if (!sched_debug_verbose)
603 return;
604
605 if (!cpumask_available(sd_sysctl_cpus)) {
606 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
607 return;
608 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
609 }
610
611 if (!sd_dentry) {
612 sd_dentry = debugfs_create_dir("domains", debugfs_sched);
613
614 /* rebuild sd_sysctl_cpus if empty since it gets cleared below */
615 if (cpumask_empty(sd_sysctl_cpus))
616 cpumask_copy(sd_sysctl_cpus, cpu_online_mask);
617 }
618
619 for_each_cpu(cpu, sd_sysctl_cpus) {
620 struct sched_domain *sd;
621 struct dentry *d_cpu;
622 char buf[32];
623
624 snprintf(buf, sizeof(buf), "cpu%d", cpu);
625 debugfs_lookup_and_remove(buf, sd_dentry);
626 d_cpu = debugfs_create_dir(buf, sd_dentry);
627
628 i = 0;
629 for_each_domain(cpu, sd) {
630 struct dentry *d_sd;
631
632 snprintf(buf, sizeof(buf), "domain%d", i);
633 d_sd = debugfs_create_dir(buf, d_cpu);
634
635 register_sd(sd, d_sd);
636 i++;
637 }
638
639 __cpumask_clear_cpu(cpu, sd_sysctl_cpus);
640 }
641 }
642
dirty_sched_domain_sysctl(int cpu)643 void dirty_sched_domain_sysctl(int cpu)
644 {
645 if (cpumask_available(sd_sysctl_cpus))
646 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
647 }
648
649 #endif /* CONFIG_SMP */
650
651 #ifdef CONFIG_FAIR_GROUP_SCHED
print_cfs_group_stats(struct seq_file * m,int cpu,struct task_group * tg)652 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
653 {
654 struct sched_entity *se = tg->se[cpu];
655
656 #define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
657 #define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", \
658 #F, (long long)schedstat_val(stats->F))
659 #define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
660 #define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", \
661 #F, SPLIT_NS((long long)schedstat_val(stats->F)))
662
663 if (!se)
664 return;
665
666 PN(se->exec_start);
667 PN(se->vruntime);
668 PN(se->sum_exec_runtime);
669
670 if (schedstat_enabled()) {
671 struct sched_statistics *stats;
672 stats = __schedstats_from_se(se);
673
674 PN_SCHEDSTAT(wait_start);
675 PN_SCHEDSTAT(sleep_start);
676 PN_SCHEDSTAT(block_start);
677 PN_SCHEDSTAT(sleep_max);
678 PN_SCHEDSTAT(block_max);
679 PN_SCHEDSTAT(exec_max);
680 PN_SCHEDSTAT(slice_max);
681 PN_SCHEDSTAT(wait_max);
682 PN_SCHEDSTAT(wait_sum);
683 P_SCHEDSTAT(wait_count);
684 }
685
686 P(se->load.weight);
687 #ifdef CONFIG_SMP
688 P(se->avg.load_avg);
689 P(se->avg.util_avg);
690 P(se->avg.runnable_avg);
691 #endif
692
693 #undef PN_SCHEDSTAT
694 #undef PN
695 #undef P_SCHEDSTAT
696 #undef P
697 }
698 #endif
699
700 #ifdef CONFIG_CGROUP_SCHED
701 static DEFINE_SPINLOCK(sched_debug_lock);
702 static char group_path[PATH_MAX];
703
task_group_path(struct task_group * tg,char * path,int plen)704 static void task_group_path(struct task_group *tg, char *path, int plen)
705 {
706 if (autogroup_path(tg, path, plen))
707 return;
708
709 cgroup_path(tg->css.cgroup, path, plen);
710 }
711
712 /*
713 * Only 1 SEQ_printf_task_group_path() caller can use the full length
714 * group_path[] for cgroup path. Other simultaneous callers will have
715 * to use a shorter stack buffer. A "..." suffix is appended at the end
716 * of the stack buffer so that it will show up in case the output length
717 * matches the given buffer size to indicate possible path name truncation.
718 */
719 #define SEQ_printf_task_group_path(m, tg, fmt...) \
720 { \
721 if (spin_trylock(&sched_debug_lock)) { \
722 task_group_path(tg, group_path, sizeof(group_path)); \
723 SEQ_printf(m, fmt, group_path); \
724 spin_unlock(&sched_debug_lock); \
725 } else { \
726 char buf[128]; \
727 char *bufend = buf + sizeof(buf) - 3; \
728 task_group_path(tg, buf, bufend - buf); \
729 strcpy(bufend - 1, "..."); \
730 SEQ_printf(m, fmt, buf); \
731 } \
732 }
733 #endif
734
735 static void
print_task(struct seq_file * m,struct rq * rq,struct task_struct * p)736 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
737 {
738 if (task_current(rq, p))
739 SEQ_printf(m, ">R");
740 else
741 SEQ_printf(m, " %c", task_state_to_char(p));
742
743 SEQ_printf(m, " %15s %5d %9Ld.%06ld %c %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld %5d ",
744 p->comm, task_pid_nr(p),
745 SPLIT_NS(p->se.vruntime),
746 entity_eligible(cfs_rq_of(&p->se), &p->se) ? 'E' : 'N',
747 SPLIT_NS(p->se.deadline),
748 p->se.custom_slice ? 'S' : ' ',
749 SPLIT_NS(p->se.slice),
750 SPLIT_NS(p->se.sum_exec_runtime),
751 (long long)(p->nvcsw + p->nivcsw),
752 p->prio);
753
754 SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld",
755 SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)),
756 SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
757 SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
758
759 #ifdef CONFIG_NUMA_BALANCING
760 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
761 #endif
762 #ifdef CONFIG_CGROUP_SCHED
763 SEQ_printf_task_group_path(m, task_group(p), " %s")
764 #endif
765
766 SEQ_printf(m, "\n");
767 }
768
print_rq(struct seq_file * m,struct rq * rq,int rq_cpu)769 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
770 {
771 struct task_struct *g, *p;
772
773 SEQ_printf(m, "\n");
774 SEQ_printf(m, "runnable tasks:\n");
775 SEQ_printf(m, " S task PID vruntime eligible "
776 "deadline slice sum-exec switches "
777 "prio wait-time sum-sleep sum-block"
778 #ifdef CONFIG_NUMA_BALANCING
779 " node group-id"
780 #endif
781 #ifdef CONFIG_CGROUP_SCHED
782 " group-path"
783 #endif
784 "\n");
785 SEQ_printf(m, "-------------------------------------------------------"
786 "------------------------------------------------------"
787 "------------------------------------------------------"
788 #ifdef CONFIG_NUMA_BALANCING
789 "--------------"
790 #endif
791 #ifdef CONFIG_CGROUP_SCHED
792 "--------------"
793 #endif
794 "\n");
795
796 rcu_read_lock();
797 for_each_process_thread(g, p) {
798 if (task_cpu(p) != rq_cpu)
799 continue;
800
801 print_task(m, rq, p);
802 }
803 rcu_read_unlock();
804 }
805
print_cfs_rq(struct seq_file * m,int cpu,struct cfs_rq * cfs_rq)806 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
807 {
808 s64 left_vruntime = -1, min_vruntime, right_vruntime = -1, left_deadline = -1, spread;
809 struct sched_entity *last, *first, *root;
810 struct rq *rq = cpu_rq(cpu);
811 unsigned long flags;
812
813 #ifdef CONFIG_FAIR_GROUP_SCHED
814 SEQ_printf(m, "\n");
815 SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
816 #else
817 SEQ_printf(m, "\n");
818 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
819 #endif
820
821 raw_spin_rq_lock_irqsave(rq, flags);
822 root = __pick_root_entity(cfs_rq);
823 if (root)
824 left_vruntime = root->min_vruntime;
825 first = __pick_first_entity(cfs_rq);
826 if (first)
827 left_deadline = first->deadline;
828 last = __pick_last_entity(cfs_rq);
829 if (last)
830 right_vruntime = last->vruntime;
831 min_vruntime = cfs_rq->min_vruntime;
832 raw_spin_rq_unlock_irqrestore(rq, flags);
833
834 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "left_deadline",
835 SPLIT_NS(left_deadline));
836 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "left_vruntime",
837 SPLIT_NS(left_vruntime));
838 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
839 SPLIT_NS(min_vruntime));
840 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "avg_vruntime",
841 SPLIT_NS(avg_vruntime(cfs_rq)));
842 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "right_vruntime",
843 SPLIT_NS(right_vruntime));
844 spread = right_vruntime - left_vruntime;
845 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
846 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
847 SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
848 SEQ_printf(m, " .%-30s: %d\n", "h_nr_delayed", cfs_rq->h_nr_delayed);
849 SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
850 cfs_rq->idle_nr_running);
851 SEQ_printf(m, " .%-30s: %d\n", "idle_h_nr_running",
852 cfs_rq->idle_h_nr_running);
853 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
854 #ifdef CONFIG_SMP
855 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
856 cfs_rq->avg.load_avg);
857 SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg",
858 cfs_rq->avg.runnable_avg);
859 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
860 cfs_rq->avg.util_avg);
861 SEQ_printf(m, " .%-30s: %u\n", "util_est",
862 cfs_rq->avg.util_est);
863 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
864 cfs_rq->removed.load_avg);
865 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
866 cfs_rq->removed.util_avg);
867 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_avg",
868 cfs_rq->removed.runnable_avg);
869 #ifdef CONFIG_FAIR_GROUP_SCHED
870 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
871 cfs_rq->tg_load_avg_contrib);
872 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
873 atomic_long_read(&cfs_rq->tg->load_avg));
874 #endif
875 #endif
876 #ifdef CONFIG_CFS_BANDWIDTH
877 SEQ_printf(m, " .%-30s: %d\n", "throttled",
878 cfs_rq->throttled);
879 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
880 cfs_rq->throttle_count);
881 #endif
882
883 #ifdef CONFIG_FAIR_GROUP_SCHED
884 print_cfs_group_stats(m, cpu, cfs_rq->tg);
885 #endif
886 }
887
print_rt_rq(struct seq_file * m,int cpu,struct rt_rq * rt_rq)888 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
889 {
890 #ifdef CONFIG_RT_GROUP_SCHED
891 SEQ_printf(m, "\n");
892 SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
893 #else
894 SEQ_printf(m, "\n");
895 SEQ_printf(m, "rt_rq[%d]:\n", cpu);
896 #endif
897
898 #define P(x) \
899 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
900 #define PU(x) \
901 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
902 #define PN(x) \
903 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
904
905 PU(rt_nr_running);
906
907 #ifdef CONFIG_RT_GROUP_SCHED
908 P(rt_throttled);
909 PN(rt_time);
910 PN(rt_runtime);
911 #endif
912
913 #undef PN
914 #undef PU
915 #undef P
916 }
917
print_dl_rq(struct seq_file * m,int cpu,struct dl_rq * dl_rq)918 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
919 {
920 struct dl_bw *dl_bw;
921
922 SEQ_printf(m, "\n");
923 SEQ_printf(m, "dl_rq[%d]:\n", cpu);
924
925 #define PU(x) \
926 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
927
928 PU(dl_nr_running);
929 #ifdef CONFIG_SMP
930 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
931 #else
932 dl_bw = &dl_rq->dl_bw;
933 #endif
934 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
935 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
936
937 #undef PU
938 }
939
print_cpu(struct seq_file * m,int cpu)940 static void print_cpu(struct seq_file *m, int cpu)
941 {
942 struct rq *rq = cpu_rq(cpu);
943
944 #ifdef CONFIG_X86
945 {
946 unsigned int freq = cpu_khz ? : 1;
947
948 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
949 cpu, freq / 1000, (freq % 1000));
950 }
951 #else
952 SEQ_printf(m, "cpu#%d\n", cpu);
953 #endif
954
955 #define P(x) \
956 do { \
957 if (sizeof(rq->x) == 4) \
958 SEQ_printf(m, " .%-30s: %d\n", #x, (int)(rq->x)); \
959 else \
960 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
961 } while (0)
962
963 #define PN(x) \
964 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
965
966 P(nr_running);
967 P(nr_switches);
968 P(nr_uninterruptible);
969 PN(next_balance);
970 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
971 PN(clock);
972 PN(clock_task);
973 #undef P
974 #undef PN
975
976 #ifdef CONFIG_SMP
977 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
978 P64(avg_idle);
979 P64(max_idle_balance_cost);
980 #undef P64
981 #endif
982
983 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
984 if (schedstat_enabled()) {
985 P(yld_count);
986 P(sched_count);
987 P(sched_goidle);
988 P(ttwu_count);
989 P(ttwu_local);
990 }
991 #undef P
992
993 print_cfs_stats(m, cpu);
994 print_rt_stats(m, cpu);
995 print_dl_stats(m, cpu);
996
997 print_rq(m, rq, cpu);
998 SEQ_printf(m, "\n");
999 }
1000
1001 static const char *sched_tunable_scaling_names[] = {
1002 "none",
1003 "logarithmic",
1004 "linear"
1005 };
1006
sched_debug_header(struct seq_file * m)1007 static void sched_debug_header(struct seq_file *m)
1008 {
1009 u64 ktime, sched_clk, cpu_clk;
1010 unsigned long flags;
1011
1012 local_irq_save(flags);
1013 ktime = ktime_to_ns(ktime_get());
1014 sched_clk = sched_clock();
1015 cpu_clk = local_clock();
1016 local_irq_restore(flags);
1017
1018 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
1019 init_utsname()->release,
1020 (int)strcspn(init_utsname()->version, " "),
1021 init_utsname()->version);
1022
1023 #define P(x) \
1024 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
1025 #define PN(x) \
1026 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
1027 PN(ktime);
1028 PN(sched_clk);
1029 PN(cpu_clk);
1030 P(jiffies);
1031 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1032 P(sched_clock_stable());
1033 #endif
1034 #undef PN
1035 #undef P
1036
1037 SEQ_printf(m, "\n");
1038 SEQ_printf(m, "sysctl_sched\n");
1039
1040 #define P(x) \
1041 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
1042 #define PN(x) \
1043 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
1044 PN(sysctl_sched_base_slice);
1045 P(sysctl_sched_features);
1046 #undef PN
1047 #undef P
1048
1049 SEQ_printf(m, " .%-40s: %d (%s)\n",
1050 "sysctl_sched_tunable_scaling",
1051 sysctl_sched_tunable_scaling,
1052 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
1053 SEQ_printf(m, "\n");
1054 }
1055
sched_debug_show(struct seq_file * m,void * v)1056 static int sched_debug_show(struct seq_file *m, void *v)
1057 {
1058 int cpu = (unsigned long)(v - 2);
1059
1060 if (cpu != -1)
1061 print_cpu(m, cpu);
1062 else
1063 sched_debug_header(m);
1064
1065 return 0;
1066 }
1067
sysrq_sched_debug_show(void)1068 void sysrq_sched_debug_show(void)
1069 {
1070 int cpu;
1071
1072 sched_debug_header(NULL);
1073 for_each_online_cpu(cpu) {
1074 /*
1075 * Need to reset softlockup watchdogs on all CPUs, because
1076 * another CPU might be blocked waiting for us to process
1077 * an IPI or stop_machine.
1078 */
1079 touch_nmi_watchdog();
1080 touch_all_softlockup_watchdogs();
1081 print_cpu(NULL, cpu);
1082 }
1083 }
1084
1085 /*
1086 * This iterator needs some explanation.
1087 * It returns 1 for the header position.
1088 * This means 2 is CPU 0.
1089 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
1090 * to use cpumask_* to iterate over the CPUs.
1091 */
sched_debug_start(struct seq_file * file,loff_t * offset)1092 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
1093 {
1094 unsigned long n = *offset;
1095
1096 if (n == 0)
1097 return (void *) 1;
1098
1099 n--;
1100
1101 if (n > 0)
1102 n = cpumask_next(n - 1, cpu_online_mask);
1103 else
1104 n = cpumask_first(cpu_online_mask);
1105
1106 *offset = n + 1;
1107
1108 if (n < nr_cpu_ids)
1109 return (void *)(unsigned long)(n + 2);
1110
1111 return NULL;
1112 }
1113
sched_debug_next(struct seq_file * file,void * data,loff_t * offset)1114 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
1115 {
1116 (*offset)++;
1117 return sched_debug_start(file, offset);
1118 }
1119
sched_debug_stop(struct seq_file * file,void * data)1120 static void sched_debug_stop(struct seq_file *file, void *data)
1121 {
1122 }
1123
1124 static const struct seq_operations sched_debug_sops = {
1125 .start = sched_debug_start,
1126 .next = sched_debug_next,
1127 .stop = sched_debug_stop,
1128 .show = sched_debug_show,
1129 };
1130
1131 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
1132 #define __P(F) __PS(#F, F)
1133 #define P(F) __PS(#F, p->F)
1134 #define PM(F, M) __PS(#F, p->F & (M))
1135 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
1136 #define __PN(F) __PSN(#F, F)
1137 #define PN(F) __PSN(#F, p->F)
1138
1139
1140 #ifdef CONFIG_NUMA_BALANCING
print_numa_stats(struct seq_file * m,int node,unsigned long tsf,unsigned long tpf,unsigned long gsf,unsigned long gpf)1141 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
1142 unsigned long tpf, unsigned long gsf, unsigned long gpf)
1143 {
1144 SEQ_printf(m, "numa_faults node=%d ", node);
1145 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
1146 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
1147 }
1148 #endif
1149
1150
sched_show_numa(struct task_struct * p,struct seq_file * m)1151 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
1152 {
1153 #ifdef CONFIG_NUMA_BALANCING
1154 if (p->mm)
1155 P(mm->numa_scan_seq);
1156
1157 P(numa_pages_migrated);
1158 P(numa_preferred_nid);
1159 P(total_numa_faults);
1160 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
1161 task_node(p), task_numa_group_id(p));
1162 show_numa_stats(p, m);
1163 #endif
1164 }
1165
proc_sched_show_task(struct task_struct * p,struct pid_namespace * ns,struct seq_file * m)1166 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
1167 struct seq_file *m)
1168 {
1169 unsigned long nr_switches;
1170
1171 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
1172 get_nr_threads(p));
1173 SEQ_printf(m,
1174 "---------------------------------------------------------"
1175 "----------\n");
1176
1177 #define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->stats.F))
1178 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->stats.F))
1179
1180 PN(se.exec_start);
1181 PN(se.vruntime);
1182 PN(se.sum_exec_runtime);
1183
1184 nr_switches = p->nvcsw + p->nivcsw;
1185
1186 P(se.nr_migrations);
1187
1188 if (schedstat_enabled()) {
1189 u64 avg_atom, avg_per_cpu;
1190
1191 PN_SCHEDSTAT(sum_sleep_runtime);
1192 PN_SCHEDSTAT(sum_block_runtime);
1193 PN_SCHEDSTAT(wait_start);
1194 PN_SCHEDSTAT(sleep_start);
1195 PN_SCHEDSTAT(block_start);
1196 PN_SCHEDSTAT(sleep_max);
1197 PN_SCHEDSTAT(block_max);
1198 PN_SCHEDSTAT(exec_max);
1199 PN_SCHEDSTAT(slice_max);
1200 PN_SCHEDSTAT(wait_max);
1201 PN_SCHEDSTAT(wait_sum);
1202 P_SCHEDSTAT(wait_count);
1203 PN_SCHEDSTAT(iowait_sum);
1204 P_SCHEDSTAT(iowait_count);
1205 P_SCHEDSTAT(nr_migrations_cold);
1206 P_SCHEDSTAT(nr_failed_migrations_affine);
1207 P_SCHEDSTAT(nr_failed_migrations_running);
1208 P_SCHEDSTAT(nr_failed_migrations_hot);
1209 P_SCHEDSTAT(nr_forced_migrations);
1210 P_SCHEDSTAT(nr_wakeups);
1211 P_SCHEDSTAT(nr_wakeups_sync);
1212 P_SCHEDSTAT(nr_wakeups_migrate);
1213 P_SCHEDSTAT(nr_wakeups_local);
1214 P_SCHEDSTAT(nr_wakeups_remote);
1215 P_SCHEDSTAT(nr_wakeups_affine);
1216 P_SCHEDSTAT(nr_wakeups_affine_attempts);
1217 P_SCHEDSTAT(nr_wakeups_passive);
1218 P_SCHEDSTAT(nr_wakeups_idle);
1219
1220 avg_atom = p->se.sum_exec_runtime;
1221 if (nr_switches)
1222 avg_atom = div64_ul(avg_atom, nr_switches);
1223 else
1224 avg_atom = -1LL;
1225
1226 avg_per_cpu = p->se.sum_exec_runtime;
1227 if (p->se.nr_migrations) {
1228 avg_per_cpu = div64_u64(avg_per_cpu,
1229 p->se.nr_migrations);
1230 } else {
1231 avg_per_cpu = -1LL;
1232 }
1233
1234 __PN(avg_atom);
1235 __PN(avg_per_cpu);
1236
1237 #ifdef CONFIG_SCHED_CORE
1238 PN_SCHEDSTAT(core_forceidle_sum);
1239 #endif
1240 }
1241
1242 __P(nr_switches);
1243 __PS("nr_voluntary_switches", p->nvcsw);
1244 __PS("nr_involuntary_switches", p->nivcsw);
1245
1246 P(se.load.weight);
1247 #ifdef CONFIG_SMP
1248 P(se.avg.load_sum);
1249 P(se.avg.runnable_sum);
1250 P(se.avg.util_sum);
1251 P(se.avg.load_avg);
1252 P(se.avg.runnable_avg);
1253 P(se.avg.util_avg);
1254 P(se.avg.last_update_time);
1255 PM(se.avg.util_est, ~UTIL_AVG_UNCHANGED);
1256 #endif
1257 #ifdef CONFIG_UCLAMP_TASK
1258 __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1259 __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
1260 __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1261 __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1262 #endif
1263 P(policy);
1264 P(prio);
1265 if (task_has_dl_policy(p)) {
1266 P(dl.runtime);
1267 P(dl.deadline);
1268 }
1269 #ifdef CONFIG_SCHED_CLASS_EXT
1270 __PS("ext.enabled", task_on_scx(p));
1271 #endif
1272 #undef PN_SCHEDSTAT
1273 #undef P_SCHEDSTAT
1274
1275 {
1276 unsigned int this_cpu = raw_smp_processor_id();
1277 u64 t0, t1;
1278
1279 t0 = cpu_clock(this_cpu);
1280 t1 = cpu_clock(this_cpu);
1281 __PS("clock-delta", t1-t0);
1282 }
1283
1284 sched_show_numa(p, m);
1285 }
1286
proc_sched_set_task(struct task_struct * p)1287 void proc_sched_set_task(struct task_struct *p)
1288 {
1289 #ifdef CONFIG_SCHEDSTATS
1290 memset(&p->stats, 0, sizeof(p->stats));
1291 #endif
1292 }
1293
resched_latency_warn(int cpu,u64 latency)1294 void resched_latency_warn(int cpu, u64 latency)
1295 {
1296 static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
1297
1298 WARN(__ratelimit(&latency_check_ratelimit),
1299 "sched: CPU %d need_resched set for > %llu ns (%d ticks) "
1300 "without schedule\n",
1301 cpu, latency, cpu_rq(cpu)->ticks_without_resched);
1302 }
1303