1 /* 2 * kernel/sched/debug.c 3 * 4 * Print the CFS rbtree 5 * 6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/proc_fs.h> 14 #include <linux/sched/mm.h> 15 #include <linux/sched/task.h> 16 #include <linux/seq_file.h> 17 #include <linux/kallsyms.h> 18 #include <linux/utsname.h> 19 #include <linux/mempolicy.h> 20 #include <linux/debugfs.h> 21 22 #include "sched.h" 23 24 static DEFINE_SPINLOCK(sched_debug_lock); 25 26 /* 27 * This allows printing both to /proc/sched_debug and 28 * to the console 29 */ 30 #define SEQ_printf(m, x...) \ 31 do { \ 32 if (m) \ 33 seq_printf(m, x); \ 34 else \ 35 printk(x); \ 36 } while (0) 37 38 /* 39 * Ease the printing of nsec fields: 40 */ 41 static long long nsec_high(unsigned long long nsec) 42 { 43 if ((long long)nsec < 0) { 44 nsec = -nsec; 45 do_div(nsec, 1000000); 46 return -nsec; 47 } 48 do_div(nsec, 1000000); 49 50 return nsec; 51 } 52 53 static unsigned long nsec_low(unsigned long long nsec) 54 { 55 if ((long long)nsec < 0) 56 nsec = -nsec; 57 58 return do_div(nsec, 1000000); 59 } 60 61 #define SPLIT_NS(x) nsec_high(x), nsec_low(x) 62 63 #define SCHED_FEAT(name, enabled) \ 64 #name , 65 66 static const char * const sched_feat_names[] = { 67 #include "features.h" 68 }; 69 70 #undef SCHED_FEAT 71 72 static int sched_feat_show(struct seq_file *m, void *v) 73 { 74 int i; 75 76 for (i = 0; i < __SCHED_FEAT_NR; i++) { 77 if (!(sysctl_sched_features & (1UL << i))) 78 seq_puts(m, "NO_"); 79 seq_printf(m, "%s ", sched_feat_names[i]); 80 } 81 seq_puts(m, "\n"); 82 83 return 0; 84 } 85 86 #ifdef HAVE_JUMP_LABEL 87 88 #define jump_label_key__true STATIC_KEY_INIT_TRUE 89 #define jump_label_key__false STATIC_KEY_INIT_FALSE 90 91 #define SCHED_FEAT(name, enabled) \ 92 jump_label_key__##enabled , 93 94 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = { 95 #include "features.h" 96 }; 97 98 #undef SCHED_FEAT 99 100 static void sched_feat_disable(int i) 101 { 102 static_key_disable(&sched_feat_keys[i]); 103 } 104 105 static void sched_feat_enable(int i) 106 { 107 static_key_enable(&sched_feat_keys[i]); 108 } 109 #else 110 static void sched_feat_disable(int i) { }; 111 static void sched_feat_enable(int i) { }; 112 #endif /* HAVE_JUMP_LABEL */ 113 114 static int sched_feat_set(char *cmp) 115 { 116 int i; 117 int neg = 0; 118 119 if (strncmp(cmp, "NO_", 3) == 0) { 120 neg = 1; 121 cmp += 3; 122 } 123 124 for (i = 0; i < __SCHED_FEAT_NR; i++) { 125 if (strcmp(cmp, sched_feat_names[i]) == 0) { 126 if (neg) { 127 sysctl_sched_features &= ~(1UL << i); 128 sched_feat_disable(i); 129 } else { 130 sysctl_sched_features |= (1UL << i); 131 sched_feat_enable(i); 132 } 133 break; 134 } 135 } 136 137 return i; 138 } 139 140 static ssize_t 141 sched_feat_write(struct file *filp, const char __user *ubuf, 142 size_t cnt, loff_t *ppos) 143 { 144 char buf[64]; 145 char *cmp; 146 int i; 147 struct inode *inode; 148 149 if (cnt > 63) 150 cnt = 63; 151 152 if (copy_from_user(&buf, ubuf, cnt)) 153 return -EFAULT; 154 155 buf[cnt] = 0; 156 cmp = strstrip(buf); 157 158 /* Ensure the static_key remains in a consistent state */ 159 inode = file_inode(filp); 160 inode_lock(inode); 161 i = sched_feat_set(cmp); 162 inode_unlock(inode); 163 if (i == __SCHED_FEAT_NR) 164 return -EINVAL; 165 166 *ppos += cnt; 167 168 return cnt; 169 } 170 171 static int sched_feat_open(struct inode *inode, struct file *filp) 172 { 173 return single_open(filp, sched_feat_show, NULL); 174 } 175 176 static const struct file_operations sched_feat_fops = { 177 .open = sched_feat_open, 178 .write = sched_feat_write, 179 .read = seq_read, 180 .llseek = seq_lseek, 181 .release = single_release, 182 }; 183 184 static __init int sched_init_debug(void) 185 { 186 debugfs_create_file("sched_features", 0644, NULL, NULL, 187 &sched_feat_fops); 188 189 return 0; 190 } 191 late_initcall(sched_init_debug); 192 193 #ifdef CONFIG_SMP 194 195 #ifdef CONFIG_SYSCTL 196 197 static struct ctl_table sd_ctl_dir[] = { 198 { 199 .procname = "sched_domain", 200 .mode = 0555, 201 }, 202 {} 203 }; 204 205 static struct ctl_table sd_ctl_root[] = { 206 { 207 .procname = "kernel", 208 .mode = 0555, 209 .child = sd_ctl_dir, 210 }, 211 {} 212 }; 213 214 static struct ctl_table *sd_alloc_ctl_entry(int n) 215 { 216 struct ctl_table *entry = 217 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); 218 219 return entry; 220 } 221 222 static void sd_free_ctl_entry(struct ctl_table **tablep) 223 { 224 struct ctl_table *entry; 225 226 /* 227 * In the intermediate directories, both the child directory and 228 * procname are dynamically allocated and could fail but the mode 229 * will always be set. In the lowest directory the names are 230 * static strings and all have proc handlers. 231 */ 232 for (entry = *tablep; entry->mode; entry++) { 233 if (entry->child) 234 sd_free_ctl_entry(&entry->child); 235 if (entry->proc_handler == NULL) 236 kfree(entry->procname); 237 } 238 239 kfree(*tablep); 240 *tablep = NULL; 241 } 242 243 static int min_load_idx = 0; 244 static int max_load_idx = CPU_LOAD_IDX_MAX-1; 245 246 static void 247 set_table_entry(struct ctl_table *entry, 248 const char *procname, void *data, int maxlen, 249 umode_t mode, proc_handler *proc_handler, 250 bool load_idx) 251 { 252 entry->procname = procname; 253 entry->data = data; 254 entry->maxlen = maxlen; 255 entry->mode = mode; 256 entry->proc_handler = proc_handler; 257 258 if (load_idx) { 259 entry->extra1 = &min_load_idx; 260 entry->extra2 = &max_load_idx; 261 } 262 } 263 264 static struct ctl_table * 265 sd_alloc_ctl_domain_table(struct sched_domain *sd) 266 { 267 struct ctl_table *table = sd_alloc_ctl_entry(14); 268 269 if (table == NULL) 270 return NULL; 271 272 set_table_entry(&table[0], "min_interval", &sd->min_interval, 273 sizeof(long), 0644, proc_doulongvec_minmax, false); 274 set_table_entry(&table[1], "max_interval", &sd->max_interval, 275 sizeof(long), 0644, proc_doulongvec_minmax, false); 276 set_table_entry(&table[2], "busy_idx", &sd->busy_idx, 277 sizeof(int), 0644, proc_dointvec_minmax, true); 278 set_table_entry(&table[3], "idle_idx", &sd->idle_idx, 279 sizeof(int), 0644, proc_dointvec_minmax, true); 280 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, 281 sizeof(int), 0644, proc_dointvec_minmax, true); 282 set_table_entry(&table[5], "wake_idx", &sd->wake_idx, 283 sizeof(int), 0644, proc_dointvec_minmax, true); 284 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, 285 sizeof(int), 0644, proc_dointvec_minmax, true); 286 set_table_entry(&table[7], "busy_factor", &sd->busy_factor, 287 sizeof(int), 0644, proc_dointvec_minmax, false); 288 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, 289 sizeof(int), 0644, proc_dointvec_minmax, false); 290 set_table_entry(&table[9], "cache_nice_tries", 291 &sd->cache_nice_tries, 292 sizeof(int), 0644, proc_dointvec_minmax, false); 293 set_table_entry(&table[10], "flags", &sd->flags, 294 sizeof(int), 0644, proc_dointvec_minmax, false); 295 set_table_entry(&table[11], "max_newidle_lb_cost", 296 &sd->max_newidle_lb_cost, 297 sizeof(long), 0644, proc_doulongvec_minmax, false); 298 set_table_entry(&table[12], "name", sd->name, 299 CORENAME_MAX_SIZE, 0444, proc_dostring, false); 300 /* &table[13] is terminator */ 301 302 return table; 303 } 304 305 static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu) 306 { 307 struct ctl_table *entry, *table; 308 struct sched_domain *sd; 309 int domain_num = 0, i; 310 char buf[32]; 311 312 for_each_domain(cpu, sd) 313 domain_num++; 314 entry = table = sd_alloc_ctl_entry(domain_num + 1); 315 if (table == NULL) 316 return NULL; 317 318 i = 0; 319 for_each_domain(cpu, sd) { 320 snprintf(buf, 32, "domain%d", i); 321 entry->procname = kstrdup(buf, GFP_KERNEL); 322 entry->mode = 0555; 323 entry->child = sd_alloc_ctl_domain_table(sd); 324 entry++; 325 i++; 326 } 327 return table; 328 } 329 330 static struct ctl_table_header *sd_sysctl_header; 331 void register_sched_domain_sysctl(void) 332 { 333 int i, cpu_num = num_possible_cpus(); 334 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); 335 char buf[32]; 336 337 WARN_ON(sd_ctl_dir[0].child); 338 sd_ctl_dir[0].child = entry; 339 340 if (entry == NULL) 341 return; 342 343 for_each_possible_cpu(i) { 344 snprintf(buf, 32, "cpu%d", i); 345 entry->procname = kstrdup(buf, GFP_KERNEL); 346 entry->mode = 0555; 347 entry->child = sd_alloc_ctl_cpu_table(i); 348 entry++; 349 } 350 351 WARN_ON(sd_sysctl_header); 352 sd_sysctl_header = register_sysctl_table(sd_ctl_root); 353 } 354 355 /* may be called multiple times per register */ 356 void unregister_sched_domain_sysctl(void) 357 { 358 unregister_sysctl_table(sd_sysctl_header); 359 sd_sysctl_header = NULL; 360 if (sd_ctl_dir[0].child) 361 sd_free_ctl_entry(&sd_ctl_dir[0].child); 362 } 363 #endif /* CONFIG_SYSCTL */ 364 #endif /* CONFIG_SMP */ 365 366 #ifdef CONFIG_FAIR_GROUP_SCHED 367 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg) 368 { 369 struct sched_entity *se = tg->se[cpu]; 370 371 #define P(F) \ 372 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F) 373 #define P_SCHEDSTAT(F) \ 374 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F)) 375 #define PN(F) \ 376 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F)) 377 #define PN_SCHEDSTAT(F) \ 378 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F))) 379 380 if (!se) 381 return; 382 383 PN(se->exec_start); 384 PN(se->vruntime); 385 PN(se->sum_exec_runtime); 386 if (schedstat_enabled()) { 387 PN_SCHEDSTAT(se->statistics.wait_start); 388 PN_SCHEDSTAT(se->statistics.sleep_start); 389 PN_SCHEDSTAT(se->statistics.block_start); 390 PN_SCHEDSTAT(se->statistics.sleep_max); 391 PN_SCHEDSTAT(se->statistics.block_max); 392 PN_SCHEDSTAT(se->statistics.exec_max); 393 PN_SCHEDSTAT(se->statistics.slice_max); 394 PN_SCHEDSTAT(se->statistics.wait_max); 395 PN_SCHEDSTAT(se->statistics.wait_sum); 396 P_SCHEDSTAT(se->statistics.wait_count); 397 } 398 P(se->load.weight); 399 #ifdef CONFIG_SMP 400 P(se->avg.load_avg); 401 P(se->avg.util_avg); 402 #endif 403 404 #undef PN_SCHEDSTAT 405 #undef PN 406 #undef P_SCHEDSTAT 407 #undef P 408 } 409 #endif 410 411 #ifdef CONFIG_CGROUP_SCHED 412 static char group_path[PATH_MAX]; 413 414 static char *task_group_path(struct task_group *tg) 415 { 416 if (autogroup_path(tg, group_path, PATH_MAX)) 417 return group_path; 418 419 cgroup_path(tg->css.cgroup, group_path, PATH_MAX); 420 return group_path; 421 } 422 #endif 423 424 static void 425 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) 426 { 427 if (rq->curr == p) 428 SEQ_printf(m, "R"); 429 else 430 SEQ_printf(m, " "); 431 432 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ", 433 p->comm, task_pid_nr(p), 434 SPLIT_NS(p->se.vruntime), 435 (long long)(p->nvcsw + p->nivcsw), 436 p->prio); 437 438 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", 439 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)), 440 SPLIT_NS(p->se.sum_exec_runtime), 441 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime))); 442 443 #ifdef CONFIG_NUMA_BALANCING 444 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); 445 #endif 446 #ifdef CONFIG_CGROUP_SCHED 447 SEQ_printf(m, " %s", task_group_path(task_group(p))); 448 #endif 449 450 SEQ_printf(m, "\n"); 451 } 452 453 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) 454 { 455 struct task_struct *g, *p; 456 457 SEQ_printf(m, 458 "\nrunnable tasks:\n" 459 " task PID tree-key switches prio" 460 " wait-time sum-exec sum-sleep\n" 461 "------------------------------------------------------" 462 "----------------------------------------------------\n"); 463 464 rcu_read_lock(); 465 for_each_process_thread(g, p) { 466 if (task_cpu(p) != rq_cpu) 467 continue; 468 469 print_task(m, rq, p); 470 } 471 rcu_read_unlock(); 472 } 473 474 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) 475 { 476 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, 477 spread, rq0_min_vruntime, spread0; 478 struct rq *rq = cpu_rq(cpu); 479 struct sched_entity *last; 480 unsigned long flags; 481 482 #ifdef CONFIG_FAIR_GROUP_SCHED 483 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg)); 484 #else 485 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); 486 #endif 487 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", 488 SPLIT_NS(cfs_rq->exec_clock)); 489 490 raw_spin_lock_irqsave(&rq->lock, flags); 491 if (cfs_rq->rb_leftmost) 492 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime; 493 last = __pick_last_entity(cfs_rq); 494 if (last) 495 max_vruntime = last->vruntime; 496 min_vruntime = cfs_rq->min_vruntime; 497 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; 498 raw_spin_unlock_irqrestore(&rq->lock, flags); 499 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", 500 SPLIT_NS(MIN_vruntime)); 501 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime", 502 SPLIT_NS(min_vruntime)); 503 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime", 504 SPLIT_NS(max_vruntime)); 505 spread = max_vruntime - MIN_vruntime; 506 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", 507 SPLIT_NS(spread)); 508 spread0 = min_vruntime - rq0_min_vruntime; 509 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0", 510 SPLIT_NS(spread0)); 511 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over", 512 cfs_rq->nr_spread_over); 513 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); 514 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); 515 #ifdef CONFIG_SMP 516 SEQ_printf(m, " .%-30s: %lu\n", "load_avg", 517 cfs_rq->avg.load_avg); 518 SEQ_printf(m, " .%-30s: %lu\n", "runnable_load_avg", 519 cfs_rq->runnable_load_avg); 520 SEQ_printf(m, " .%-30s: %lu\n", "util_avg", 521 cfs_rq->avg.util_avg); 522 SEQ_printf(m, " .%-30s: %ld\n", "removed_load_avg", 523 atomic_long_read(&cfs_rq->removed_load_avg)); 524 SEQ_printf(m, " .%-30s: %ld\n", "removed_util_avg", 525 atomic_long_read(&cfs_rq->removed_util_avg)); 526 #ifdef CONFIG_FAIR_GROUP_SCHED 527 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib", 528 cfs_rq->tg_load_avg_contrib); 529 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg", 530 atomic_long_read(&cfs_rq->tg->load_avg)); 531 #endif 532 #endif 533 #ifdef CONFIG_CFS_BANDWIDTH 534 SEQ_printf(m, " .%-30s: %d\n", "throttled", 535 cfs_rq->throttled); 536 SEQ_printf(m, " .%-30s: %d\n", "throttle_count", 537 cfs_rq->throttle_count); 538 #endif 539 540 #ifdef CONFIG_FAIR_GROUP_SCHED 541 print_cfs_group_stats(m, cpu, cfs_rq->tg); 542 #endif 543 } 544 545 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) 546 { 547 #ifdef CONFIG_RT_GROUP_SCHED 548 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg)); 549 #else 550 SEQ_printf(m, "\nrt_rq[%d]:\n", cpu); 551 #endif 552 553 #define P(x) \ 554 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x)) 555 #define PN(x) \ 556 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x)) 557 558 P(rt_nr_running); 559 P(rt_throttled); 560 PN(rt_time); 561 PN(rt_runtime); 562 563 #undef PN 564 #undef P 565 } 566 567 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq) 568 { 569 struct dl_bw *dl_bw; 570 571 SEQ_printf(m, "\ndl_rq[%d]:\n", cpu); 572 SEQ_printf(m, " .%-30s: %ld\n", "dl_nr_running", dl_rq->dl_nr_running); 573 #ifdef CONFIG_SMP 574 dl_bw = &cpu_rq(cpu)->rd->dl_bw; 575 #else 576 dl_bw = &dl_rq->dl_bw; 577 #endif 578 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw); 579 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw); 580 } 581 582 extern __read_mostly int sched_clock_running; 583 584 static void print_cpu(struct seq_file *m, int cpu) 585 { 586 struct rq *rq = cpu_rq(cpu); 587 unsigned long flags; 588 589 #ifdef CONFIG_X86 590 { 591 unsigned int freq = cpu_khz ? : 1; 592 593 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n", 594 cpu, freq / 1000, (freq % 1000)); 595 } 596 #else 597 SEQ_printf(m, "cpu#%d\n", cpu); 598 #endif 599 600 #define P(x) \ 601 do { \ 602 if (sizeof(rq->x) == 4) \ 603 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \ 604 else \ 605 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\ 606 } while (0) 607 608 #define PN(x) \ 609 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x)) 610 611 P(nr_running); 612 SEQ_printf(m, " .%-30s: %lu\n", "load", 613 rq->load.weight); 614 P(nr_switches); 615 P(nr_load_updates); 616 P(nr_uninterruptible); 617 PN(next_balance); 618 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr))); 619 PN(clock); 620 PN(clock_task); 621 P(cpu_load[0]); 622 P(cpu_load[1]); 623 P(cpu_load[2]); 624 P(cpu_load[3]); 625 P(cpu_load[4]); 626 #undef P 627 #undef PN 628 629 #ifdef CONFIG_SMP 630 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n); 631 P64(avg_idle); 632 P64(max_idle_balance_cost); 633 #undef P64 634 #endif 635 636 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n)); 637 if (schedstat_enabled()) { 638 P(yld_count); 639 P(sched_count); 640 P(sched_goidle); 641 P(ttwu_count); 642 P(ttwu_local); 643 } 644 #undef P 645 646 spin_lock_irqsave(&sched_debug_lock, flags); 647 print_cfs_stats(m, cpu); 648 print_rt_stats(m, cpu); 649 print_dl_stats(m, cpu); 650 651 print_rq(m, rq, cpu); 652 spin_unlock_irqrestore(&sched_debug_lock, flags); 653 SEQ_printf(m, "\n"); 654 } 655 656 static const char *sched_tunable_scaling_names[] = { 657 "none", 658 "logaritmic", 659 "linear" 660 }; 661 662 static void sched_debug_header(struct seq_file *m) 663 { 664 u64 ktime, sched_clk, cpu_clk; 665 unsigned long flags; 666 667 local_irq_save(flags); 668 ktime = ktime_to_ns(ktime_get()); 669 sched_clk = sched_clock(); 670 cpu_clk = local_clock(); 671 local_irq_restore(flags); 672 673 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n", 674 init_utsname()->release, 675 (int)strcspn(init_utsname()->version, " "), 676 init_utsname()->version); 677 678 #define P(x) \ 679 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x)) 680 #define PN(x) \ 681 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) 682 PN(ktime); 683 PN(sched_clk); 684 PN(cpu_clk); 685 P(jiffies); 686 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 687 P(sched_clock_stable()); 688 #endif 689 #undef PN 690 #undef P 691 692 SEQ_printf(m, "\n"); 693 SEQ_printf(m, "sysctl_sched\n"); 694 695 #define P(x) \ 696 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x)) 697 #define PN(x) \ 698 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) 699 PN(sysctl_sched_latency); 700 PN(sysctl_sched_min_granularity); 701 PN(sysctl_sched_wakeup_granularity); 702 P(sysctl_sched_child_runs_first); 703 P(sysctl_sched_features); 704 #undef PN 705 #undef P 706 707 SEQ_printf(m, " .%-40s: %d (%s)\n", 708 "sysctl_sched_tunable_scaling", 709 sysctl_sched_tunable_scaling, 710 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]); 711 SEQ_printf(m, "\n"); 712 } 713 714 static int sched_debug_show(struct seq_file *m, void *v) 715 { 716 int cpu = (unsigned long)(v - 2); 717 718 if (cpu != -1) 719 print_cpu(m, cpu); 720 else 721 sched_debug_header(m); 722 723 return 0; 724 } 725 726 void sysrq_sched_debug_show(void) 727 { 728 int cpu; 729 730 sched_debug_header(NULL); 731 for_each_online_cpu(cpu) 732 print_cpu(NULL, cpu); 733 734 } 735 736 /* 737 * This itererator needs some explanation. 738 * It returns 1 for the header position. 739 * This means 2 is cpu 0. 740 * In a hotplugged system some cpus, including cpu 0, may be missing so we have 741 * to use cpumask_* to iterate over the cpus. 742 */ 743 static void *sched_debug_start(struct seq_file *file, loff_t *offset) 744 { 745 unsigned long n = *offset; 746 747 if (n == 0) 748 return (void *) 1; 749 750 n--; 751 752 if (n > 0) 753 n = cpumask_next(n - 1, cpu_online_mask); 754 else 755 n = cpumask_first(cpu_online_mask); 756 757 *offset = n + 1; 758 759 if (n < nr_cpu_ids) 760 return (void *)(unsigned long)(n + 2); 761 return NULL; 762 } 763 764 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset) 765 { 766 (*offset)++; 767 return sched_debug_start(file, offset); 768 } 769 770 static void sched_debug_stop(struct seq_file *file, void *data) 771 { 772 } 773 774 static const struct seq_operations sched_debug_sops = { 775 .start = sched_debug_start, 776 .next = sched_debug_next, 777 .stop = sched_debug_stop, 778 .show = sched_debug_show, 779 }; 780 781 static int sched_debug_release(struct inode *inode, struct file *file) 782 { 783 seq_release(inode, file); 784 785 return 0; 786 } 787 788 static int sched_debug_open(struct inode *inode, struct file *filp) 789 { 790 int ret = 0; 791 792 ret = seq_open(filp, &sched_debug_sops); 793 794 return ret; 795 } 796 797 static const struct file_operations sched_debug_fops = { 798 .open = sched_debug_open, 799 .read = seq_read, 800 .llseek = seq_lseek, 801 .release = sched_debug_release, 802 }; 803 804 static int __init init_sched_debug_procfs(void) 805 { 806 struct proc_dir_entry *pe; 807 808 pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops); 809 if (!pe) 810 return -ENOMEM; 811 return 0; 812 } 813 814 __initcall(init_sched_debug_procfs); 815 816 #define __P(F) \ 817 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F) 818 #define P(F) \ 819 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F) 820 #define __PN(F) \ 821 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F)) 822 #define PN(F) \ 823 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F)) 824 825 826 #ifdef CONFIG_NUMA_BALANCING 827 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 828 unsigned long tpf, unsigned long gsf, unsigned long gpf) 829 { 830 SEQ_printf(m, "numa_faults node=%d ", node); 831 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf); 832 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf); 833 } 834 #endif 835 836 837 static void sched_show_numa(struct task_struct *p, struct seq_file *m) 838 { 839 #ifdef CONFIG_NUMA_BALANCING 840 struct mempolicy *pol; 841 842 if (p->mm) 843 P(mm->numa_scan_seq); 844 845 task_lock(p); 846 pol = p->mempolicy; 847 if (pol && !(pol->flags & MPOL_F_MORON)) 848 pol = NULL; 849 mpol_get(pol); 850 task_unlock(p); 851 852 P(numa_pages_migrated); 853 P(numa_preferred_nid); 854 P(total_numa_faults); 855 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n", 856 task_node(p), task_numa_group_id(p)); 857 show_numa_stats(p, m); 858 mpol_put(pol); 859 #endif 860 } 861 862 void proc_sched_show_task(struct task_struct *p, struct seq_file *m) 863 { 864 unsigned long nr_switches; 865 866 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p), 867 get_nr_threads(p)); 868 SEQ_printf(m, 869 "---------------------------------------------------------" 870 "----------\n"); 871 #define __P(F) \ 872 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F) 873 #define P(F) \ 874 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F) 875 #define P_SCHEDSTAT(F) \ 876 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F)) 877 #define __PN(F) \ 878 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F)) 879 #define PN(F) \ 880 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F)) 881 #define PN_SCHEDSTAT(F) \ 882 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(p->F))) 883 884 PN(se.exec_start); 885 PN(se.vruntime); 886 PN(se.sum_exec_runtime); 887 888 nr_switches = p->nvcsw + p->nivcsw; 889 890 P(se.nr_migrations); 891 892 if (schedstat_enabled()) { 893 u64 avg_atom, avg_per_cpu; 894 895 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime); 896 PN_SCHEDSTAT(se.statistics.wait_start); 897 PN_SCHEDSTAT(se.statistics.sleep_start); 898 PN_SCHEDSTAT(se.statistics.block_start); 899 PN_SCHEDSTAT(se.statistics.sleep_max); 900 PN_SCHEDSTAT(se.statistics.block_max); 901 PN_SCHEDSTAT(se.statistics.exec_max); 902 PN_SCHEDSTAT(se.statistics.slice_max); 903 PN_SCHEDSTAT(se.statistics.wait_max); 904 PN_SCHEDSTAT(se.statistics.wait_sum); 905 P_SCHEDSTAT(se.statistics.wait_count); 906 PN_SCHEDSTAT(se.statistics.iowait_sum); 907 P_SCHEDSTAT(se.statistics.iowait_count); 908 P_SCHEDSTAT(se.statistics.nr_migrations_cold); 909 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine); 910 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running); 911 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot); 912 P_SCHEDSTAT(se.statistics.nr_forced_migrations); 913 P_SCHEDSTAT(se.statistics.nr_wakeups); 914 P_SCHEDSTAT(se.statistics.nr_wakeups_sync); 915 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate); 916 P_SCHEDSTAT(se.statistics.nr_wakeups_local); 917 P_SCHEDSTAT(se.statistics.nr_wakeups_remote); 918 P_SCHEDSTAT(se.statistics.nr_wakeups_affine); 919 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts); 920 P_SCHEDSTAT(se.statistics.nr_wakeups_passive); 921 P_SCHEDSTAT(se.statistics.nr_wakeups_idle); 922 923 avg_atom = p->se.sum_exec_runtime; 924 if (nr_switches) 925 avg_atom = div64_ul(avg_atom, nr_switches); 926 else 927 avg_atom = -1LL; 928 929 avg_per_cpu = p->se.sum_exec_runtime; 930 if (p->se.nr_migrations) { 931 avg_per_cpu = div64_u64(avg_per_cpu, 932 p->se.nr_migrations); 933 } else { 934 avg_per_cpu = -1LL; 935 } 936 937 __PN(avg_atom); 938 __PN(avg_per_cpu); 939 } 940 941 __P(nr_switches); 942 SEQ_printf(m, "%-45s:%21Ld\n", 943 "nr_voluntary_switches", (long long)p->nvcsw); 944 SEQ_printf(m, "%-45s:%21Ld\n", 945 "nr_involuntary_switches", (long long)p->nivcsw); 946 947 P(se.load.weight); 948 #ifdef CONFIG_SMP 949 P(se.avg.load_sum); 950 P(se.avg.util_sum); 951 P(se.avg.load_avg); 952 P(se.avg.util_avg); 953 P(se.avg.last_update_time); 954 #endif 955 P(policy); 956 P(prio); 957 if (p->policy == SCHED_DEADLINE) { 958 P(dl.runtime); 959 P(dl.deadline); 960 } 961 #undef PN_SCHEDSTAT 962 #undef PN 963 #undef __PN 964 #undef P_SCHEDSTAT 965 #undef P 966 #undef __P 967 968 { 969 unsigned int this_cpu = raw_smp_processor_id(); 970 u64 t0, t1; 971 972 t0 = cpu_clock(this_cpu); 973 t1 = cpu_clock(this_cpu); 974 SEQ_printf(m, "%-45s:%21Ld\n", 975 "clock-delta", (long long)(t1-t0)); 976 } 977 978 sched_show_numa(p, m); 979 } 980 981 void proc_sched_set_task(struct task_struct *p) 982 { 983 #ifdef CONFIG_SCHEDSTATS 984 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 985 #endif 986 } 987