1 /* 2 * kernel/sched/core.c 3 * 4 * Kernel scheduler and related syscalls 5 * 6 * Copyright (C) 1991-2002 Linus Torvalds 7 * 8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and 9 * make semaphores SMP safe 10 * 1998-11-19 Implemented schedule_timeout() and related stuff 11 * by Andrea Arcangeli 12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: 13 * hybrid priority-list and round-robin design with 14 * an array-switch method of distributing timeslices 15 * and per-CPU runqueues. Cleanups and useful suggestions 16 * by Davide Libenzi, preemptible kernel bits by Robert Love. 17 * 2003-09-03 Interactivity tuning by Con Kolivas. 18 * 2004-04-02 Scheduler domains code by Nick Piggin 19 * 2007-04-15 Work begun on replacing all interactivity tuning with a 20 * fair scheduling design by Con Kolivas. 21 * 2007-05-05 Load balancing (smp-nice) and other improvements 22 * by Peter Williams 23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith 24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri 25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, 26 * Thomas Gleixner, Mike Kravetz 27 */ 28 29 #include <linux/kasan.h> 30 #include <linux/mm.h> 31 #include <linux/module.h> 32 #include <linux/nmi.h> 33 #include <linux/init.h> 34 #include <linux/uaccess.h> 35 #include <linux/highmem.h> 36 #include <asm/mmu_context.h> 37 #include <linux/interrupt.h> 38 #include <linux/capability.h> 39 #include <linux/completion.h> 40 #include <linux/kernel_stat.h> 41 #include <linux/debug_locks.h> 42 #include <linux/perf_event.h> 43 #include <linux/security.h> 44 #include <linux/notifier.h> 45 #include <linux/profile.h> 46 #include <linux/freezer.h> 47 #include <linux/vmalloc.h> 48 #include <linux/blkdev.h> 49 #include <linux/delay.h> 50 #include <linux/pid_namespace.h> 51 #include <linux/smp.h> 52 #include <linux/threads.h> 53 #include <linux/timer.h> 54 #include <linux/rcupdate.h> 55 #include <linux/cpu.h> 56 #include <linux/cpuset.h> 57 #include <linux/percpu.h> 58 #include <linux/proc_fs.h> 59 #include <linux/seq_file.h> 60 #include <linux/sysctl.h> 61 #include <linux/syscalls.h> 62 #include <linux/times.h> 63 #include <linux/tsacct_kern.h> 64 #include <linux/kprobes.h> 65 #include <linux/delayacct.h> 66 #include <linux/unistd.h> 67 #include <linux/pagemap.h> 68 #include <linux/hrtimer.h> 69 #include <linux/tick.h> 70 #include <linux/ctype.h> 71 #include <linux/ftrace.h> 72 #include <linux/slab.h> 73 #include <linux/init_task.h> 74 #include <linux/context_tracking.h> 75 #include <linux/compiler.h> 76 77 #include <asm/switch_to.h> 78 #include <asm/tlb.h> 79 #include <asm/irq_regs.h> 80 #include <asm/mutex.h> 81 #ifdef CONFIG_PARAVIRT 82 #include <asm/paravirt.h> 83 #endif 84 85 #include "sched.h" 86 #include "../workqueue_internal.h" 87 #include "../smpboot.h" 88 89 #define CREATE_TRACE_POINTS 90 #include <trace/events/sched.h> 91 92 DEFINE_MUTEX(sched_domains_mutex); 93 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 94 95 static void update_rq_clock_task(struct rq *rq, s64 delta); 96 97 void update_rq_clock(struct rq *rq) 98 { 99 s64 delta; 100 101 lockdep_assert_held(&rq->lock); 102 103 if (rq->clock_skip_update & RQCF_ACT_SKIP) 104 return; 105 106 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 107 if (delta < 0) 108 return; 109 rq->clock += delta; 110 update_rq_clock_task(rq, delta); 111 } 112 113 /* 114 * Debugging: various feature bits 115 */ 116 117 #define SCHED_FEAT(name, enabled) \ 118 (1UL << __SCHED_FEAT_##name) * enabled | 119 120 const_debug unsigned int sysctl_sched_features = 121 #include "features.h" 122 0; 123 124 #undef SCHED_FEAT 125 126 /* 127 * Number of tasks to iterate in a single balance run. 128 * Limited because this is done with IRQs disabled. 129 */ 130 const_debug unsigned int sysctl_sched_nr_migrate = 32; 131 132 /* 133 * period over which we average the RT time consumption, measured 134 * in ms. 135 * 136 * default: 1s 137 */ 138 const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC; 139 140 /* 141 * period over which we measure -rt task cpu usage in us. 142 * default: 1s 143 */ 144 unsigned int sysctl_sched_rt_period = 1000000; 145 146 __read_mostly int scheduler_running; 147 148 /* 149 * part of the period that we allow rt tasks to run in us. 150 * default: 0.95s 151 */ 152 int sysctl_sched_rt_runtime = 950000; 153 154 /* cpus with isolated domains */ 155 cpumask_var_t cpu_isolated_map; 156 157 /* 158 * this_rq_lock - lock this runqueue and disable interrupts. 159 */ 160 static struct rq *this_rq_lock(void) 161 __acquires(rq->lock) 162 { 163 struct rq *rq; 164 165 local_irq_disable(); 166 rq = this_rq(); 167 raw_spin_lock(&rq->lock); 168 169 return rq; 170 } 171 172 #ifdef CONFIG_SCHED_HRTICK 173 /* 174 * Use HR-timers to deliver accurate preemption points. 175 */ 176 177 static void hrtick_clear(struct rq *rq) 178 { 179 if (hrtimer_active(&rq->hrtick_timer)) 180 hrtimer_cancel(&rq->hrtick_timer); 181 } 182 183 /* 184 * High-resolution timer tick. 185 * Runs from hardirq context with interrupts disabled. 186 */ 187 static enum hrtimer_restart hrtick(struct hrtimer *timer) 188 { 189 struct rq *rq = container_of(timer, struct rq, hrtick_timer); 190 191 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 192 193 raw_spin_lock(&rq->lock); 194 update_rq_clock(rq); 195 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 196 raw_spin_unlock(&rq->lock); 197 198 return HRTIMER_NORESTART; 199 } 200 201 #ifdef CONFIG_SMP 202 203 static void __hrtick_restart(struct rq *rq) 204 { 205 struct hrtimer *timer = &rq->hrtick_timer; 206 207 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); 208 } 209 210 /* 211 * called from hardirq (IPI) context 212 */ 213 static void __hrtick_start(void *arg) 214 { 215 struct rq *rq = arg; 216 217 raw_spin_lock(&rq->lock); 218 __hrtick_restart(rq); 219 rq->hrtick_csd_pending = 0; 220 raw_spin_unlock(&rq->lock); 221 } 222 223 /* 224 * Called to set the hrtick timer state. 225 * 226 * called with rq->lock held and irqs disabled 227 */ 228 void hrtick_start(struct rq *rq, u64 delay) 229 { 230 struct hrtimer *timer = &rq->hrtick_timer; 231 ktime_t time; 232 s64 delta; 233 234 /* 235 * Don't schedule slices shorter than 10000ns, that just 236 * doesn't make sense and can cause timer DoS. 237 */ 238 delta = max_t(s64, delay, 10000LL); 239 time = ktime_add_ns(timer->base->get_time(), delta); 240 241 hrtimer_set_expires(timer, time); 242 243 if (rq == this_rq()) { 244 __hrtick_restart(rq); 245 } else if (!rq->hrtick_csd_pending) { 246 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); 247 rq->hrtick_csd_pending = 1; 248 } 249 } 250 251 static int 252 hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) 253 { 254 int cpu = (int)(long)hcpu; 255 256 switch (action) { 257 case CPU_UP_CANCELED: 258 case CPU_UP_CANCELED_FROZEN: 259 case CPU_DOWN_PREPARE: 260 case CPU_DOWN_PREPARE_FROZEN: 261 case CPU_DEAD: 262 case CPU_DEAD_FROZEN: 263 hrtick_clear(cpu_rq(cpu)); 264 return NOTIFY_OK; 265 } 266 267 return NOTIFY_DONE; 268 } 269 270 static __init void init_hrtick(void) 271 { 272 hotcpu_notifier(hotplug_hrtick, 0); 273 } 274 #else 275 /* 276 * Called to set the hrtick timer state. 277 * 278 * called with rq->lock held and irqs disabled 279 */ 280 void hrtick_start(struct rq *rq, u64 delay) 281 { 282 /* 283 * Don't schedule slices shorter than 10000ns, that just 284 * doesn't make sense. Rely on vruntime for fairness. 285 */ 286 delay = max_t(u64, delay, 10000LL); 287 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), 288 HRTIMER_MODE_REL_PINNED); 289 } 290 291 static inline void init_hrtick(void) 292 { 293 } 294 #endif /* CONFIG_SMP */ 295 296 static void init_rq_hrtick(struct rq *rq) 297 { 298 #ifdef CONFIG_SMP 299 rq->hrtick_csd_pending = 0; 300 301 rq->hrtick_csd.flags = 0; 302 rq->hrtick_csd.func = __hrtick_start; 303 rq->hrtick_csd.info = rq; 304 #endif 305 306 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 307 rq->hrtick_timer.function = hrtick; 308 } 309 #else /* CONFIG_SCHED_HRTICK */ 310 static inline void hrtick_clear(struct rq *rq) 311 { 312 } 313 314 static inline void init_rq_hrtick(struct rq *rq) 315 { 316 } 317 318 static inline void init_hrtick(void) 319 { 320 } 321 #endif /* CONFIG_SCHED_HRTICK */ 322 323 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 324 /* 325 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 326 * this avoids any races wrt polling state changes and thereby avoids 327 * spurious IPIs. 328 */ 329 static bool set_nr_and_not_polling(struct task_struct *p) 330 { 331 struct thread_info *ti = task_thread_info(p); 332 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); 333 } 334 335 /* 336 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. 337 * 338 * If this returns true, then the idle task promises to call 339 * sched_ttwu_pending() and reschedule soon. 340 */ 341 static bool set_nr_if_polling(struct task_struct *p) 342 { 343 struct thread_info *ti = task_thread_info(p); 344 typeof(ti->flags) old, val = READ_ONCE(ti->flags); 345 346 for (;;) { 347 if (!(val & _TIF_POLLING_NRFLAG)) 348 return false; 349 if (val & _TIF_NEED_RESCHED) 350 return true; 351 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); 352 if (old == val) 353 break; 354 val = old; 355 } 356 return true; 357 } 358 359 #else 360 static bool set_nr_and_not_polling(struct task_struct *p) 361 { 362 set_tsk_need_resched(p); 363 return true; 364 } 365 366 #ifdef CONFIG_SMP 367 static bool set_nr_if_polling(struct task_struct *p) 368 { 369 return false; 370 } 371 #endif 372 #endif 373 374 void wake_q_add(struct wake_q_head *head, struct task_struct *task) 375 { 376 struct wake_q_node *node = &task->wake_q; 377 378 /* 379 * Atomically grab the task, if ->wake_q is !nil already it means 380 * its already queued (either by us or someone else) and will get the 381 * wakeup due to that. 382 * 383 * This cmpxchg() implies a full barrier, which pairs with the write 384 * barrier implied by the wakeup in wake_up_list(). 385 */ 386 if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL)) 387 return; 388 389 get_task_struct(task); 390 391 /* 392 * The head is context local, there can be no concurrency. 393 */ 394 *head->lastp = node; 395 head->lastp = &node->next; 396 } 397 398 void wake_up_q(struct wake_q_head *head) 399 { 400 struct wake_q_node *node = head->first; 401 402 while (node != WAKE_Q_TAIL) { 403 struct task_struct *task; 404 405 task = container_of(node, struct task_struct, wake_q); 406 BUG_ON(!task); 407 /* task can safely be re-inserted now */ 408 node = node->next; 409 task->wake_q.next = NULL; 410 411 /* 412 * wake_up_process() implies a wmb() to pair with the queueing 413 * in wake_q_add() so as not to miss wakeups. 414 */ 415 wake_up_process(task); 416 put_task_struct(task); 417 } 418 } 419 420 /* 421 * resched_curr - mark rq's current task 'to be rescheduled now'. 422 * 423 * On UP this means the setting of the need_resched flag, on SMP it 424 * might also involve a cross-CPU call to trigger the scheduler on 425 * the target CPU. 426 */ 427 void resched_curr(struct rq *rq) 428 { 429 struct task_struct *curr = rq->curr; 430 int cpu; 431 432 lockdep_assert_held(&rq->lock); 433 434 if (test_tsk_need_resched(curr)) 435 return; 436 437 cpu = cpu_of(rq); 438 439 if (cpu == smp_processor_id()) { 440 set_tsk_need_resched(curr); 441 set_preempt_need_resched(); 442 return; 443 } 444 445 if (set_nr_and_not_polling(curr)) 446 smp_send_reschedule(cpu); 447 else 448 trace_sched_wake_idle_without_ipi(cpu); 449 } 450 451 void resched_cpu(int cpu) 452 { 453 struct rq *rq = cpu_rq(cpu); 454 unsigned long flags; 455 456 if (!raw_spin_trylock_irqsave(&rq->lock, flags)) 457 return; 458 resched_curr(rq); 459 raw_spin_unlock_irqrestore(&rq->lock, flags); 460 } 461 462 #ifdef CONFIG_SMP 463 #ifdef CONFIG_NO_HZ_COMMON 464 /* 465 * In the semi idle case, use the nearest busy cpu for migrating timers 466 * from an idle cpu. This is good for power-savings. 467 * 468 * We don't do similar optimization for completely idle system, as 469 * selecting an idle cpu will add more delays to the timers than intended 470 * (as that cpu's timer base may not be uptodate wrt jiffies etc). 471 */ 472 int get_nohz_timer_target(void) 473 { 474 int i, cpu = smp_processor_id(); 475 struct sched_domain *sd; 476 477 if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu)) 478 return cpu; 479 480 rcu_read_lock(); 481 for_each_domain(cpu, sd) { 482 for_each_cpu(i, sched_domain_span(sd)) { 483 if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) { 484 cpu = i; 485 goto unlock; 486 } 487 } 488 } 489 490 if (!is_housekeeping_cpu(cpu)) 491 cpu = housekeeping_any_cpu(); 492 unlock: 493 rcu_read_unlock(); 494 return cpu; 495 } 496 /* 497 * When add_timer_on() enqueues a timer into the timer wheel of an 498 * idle CPU then this timer might expire before the next timer event 499 * which is scheduled to wake up that CPU. In case of a completely 500 * idle system the next event might even be infinite time into the 501 * future. wake_up_idle_cpu() ensures that the CPU is woken up and 502 * leaves the inner idle loop so the newly added timer is taken into 503 * account when the CPU goes back to idle and evaluates the timer 504 * wheel for the next timer event. 505 */ 506 static void wake_up_idle_cpu(int cpu) 507 { 508 struct rq *rq = cpu_rq(cpu); 509 510 if (cpu == smp_processor_id()) 511 return; 512 513 if (set_nr_and_not_polling(rq->idle)) 514 smp_send_reschedule(cpu); 515 else 516 trace_sched_wake_idle_without_ipi(cpu); 517 } 518 519 static bool wake_up_full_nohz_cpu(int cpu) 520 { 521 /* 522 * We just need the target to call irq_exit() and re-evaluate 523 * the next tick. The nohz full kick at least implies that. 524 * If needed we can still optimize that later with an 525 * empty IRQ. 526 */ 527 if (tick_nohz_full_cpu(cpu)) { 528 if (cpu != smp_processor_id() || 529 tick_nohz_tick_stopped()) 530 tick_nohz_full_kick_cpu(cpu); 531 return true; 532 } 533 534 return false; 535 } 536 537 void wake_up_nohz_cpu(int cpu) 538 { 539 if (!wake_up_full_nohz_cpu(cpu)) 540 wake_up_idle_cpu(cpu); 541 } 542 543 static inline bool got_nohz_idle_kick(void) 544 { 545 int cpu = smp_processor_id(); 546 547 if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu))) 548 return false; 549 550 if (idle_cpu(cpu) && !need_resched()) 551 return true; 552 553 /* 554 * We can't run Idle Load Balance on this CPU for this time so we 555 * cancel it and clear NOHZ_BALANCE_KICK 556 */ 557 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); 558 return false; 559 } 560 561 #else /* CONFIG_NO_HZ_COMMON */ 562 563 static inline bool got_nohz_idle_kick(void) 564 { 565 return false; 566 } 567 568 #endif /* CONFIG_NO_HZ_COMMON */ 569 570 #ifdef CONFIG_NO_HZ_FULL 571 bool sched_can_stop_tick(struct rq *rq) 572 { 573 int fifo_nr_running; 574 575 /* Deadline tasks, even if single, need the tick */ 576 if (rq->dl.dl_nr_running) 577 return false; 578 579 /* 580 * FIFO realtime policy runs the highest priority task (after DEADLINE). 581 * Other runnable tasks are of a lower priority. The scheduler tick 582 * isn't needed. 583 */ 584 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; 585 if (fifo_nr_running) 586 return true; 587 588 /* 589 * Round-robin realtime tasks time slice with other tasks at the same 590 * realtime priority. 591 */ 592 if (rq->rt.rr_nr_running) { 593 if (rq->rt.rr_nr_running == 1) 594 return true; 595 else 596 return false; 597 } 598 599 /* Normal multitasking need periodic preemption checks */ 600 if (rq->cfs.nr_running > 1) 601 return false; 602 603 return true; 604 } 605 #endif /* CONFIG_NO_HZ_FULL */ 606 607 void sched_avg_update(struct rq *rq) 608 { 609 s64 period = sched_avg_period(); 610 611 while ((s64)(rq_clock(rq) - rq->age_stamp) > period) { 612 /* 613 * Inline assembly required to prevent the compiler 614 * optimising this loop into a divmod call. 615 * See __iter_div_u64_rem() for another example of this. 616 */ 617 asm("" : "+rm" (rq->age_stamp)); 618 rq->age_stamp += period; 619 rq->rt_avg /= 2; 620 } 621 } 622 623 #endif /* CONFIG_SMP */ 624 625 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 626 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 627 /* 628 * Iterate task_group tree rooted at *from, calling @down when first entering a 629 * node and @up when leaving it for the final time. 630 * 631 * Caller must hold rcu_lock or sufficient equivalent. 632 */ 633 int walk_tg_tree_from(struct task_group *from, 634 tg_visitor down, tg_visitor up, void *data) 635 { 636 struct task_group *parent, *child; 637 int ret; 638 639 parent = from; 640 641 down: 642 ret = (*down)(parent, data); 643 if (ret) 644 goto out; 645 list_for_each_entry_rcu(child, &parent->children, siblings) { 646 parent = child; 647 goto down; 648 649 up: 650 continue; 651 } 652 ret = (*up)(parent, data); 653 if (ret || parent == from) 654 goto out; 655 656 child = parent; 657 parent = parent->parent; 658 if (parent) 659 goto up; 660 out: 661 return ret; 662 } 663 664 int tg_nop(struct task_group *tg, void *data) 665 { 666 return 0; 667 } 668 #endif 669 670 static void set_load_weight(struct task_struct *p) 671 { 672 int prio = p->static_prio - MAX_RT_PRIO; 673 struct load_weight *load = &p->se.load; 674 675 /* 676 * SCHED_IDLE tasks get minimal weight: 677 */ 678 if (idle_policy(p->policy)) { 679 load->weight = scale_load(WEIGHT_IDLEPRIO); 680 load->inv_weight = WMULT_IDLEPRIO; 681 return; 682 } 683 684 load->weight = scale_load(sched_prio_to_weight[prio]); 685 load->inv_weight = sched_prio_to_wmult[prio]; 686 } 687 688 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 689 { 690 update_rq_clock(rq); 691 if (!(flags & ENQUEUE_RESTORE)) 692 sched_info_queued(rq, p); 693 p->sched_class->enqueue_task(rq, p, flags); 694 } 695 696 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 697 { 698 update_rq_clock(rq); 699 if (!(flags & DEQUEUE_SAVE)) 700 sched_info_dequeued(rq, p); 701 p->sched_class->dequeue_task(rq, p, flags); 702 } 703 704 void activate_task(struct rq *rq, struct task_struct *p, int flags) 705 { 706 if (task_contributes_to_load(p)) 707 rq->nr_uninterruptible--; 708 709 enqueue_task(rq, p, flags); 710 } 711 712 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 713 { 714 if (task_contributes_to_load(p)) 715 rq->nr_uninterruptible++; 716 717 dequeue_task(rq, p, flags); 718 } 719 720 static void update_rq_clock_task(struct rq *rq, s64 delta) 721 { 722 /* 723 * In theory, the compile should just see 0 here, and optimize out the call 724 * to sched_rt_avg_update. But I don't trust it... 725 */ 726 #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) 727 s64 steal = 0, irq_delta = 0; 728 #endif 729 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 730 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 731 732 /* 733 * Since irq_time is only updated on {soft,}irq_exit, we might run into 734 * this case when a previous update_rq_clock() happened inside a 735 * {soft,}irq region. 736 * 737 * When this happens, we stop ->clock_task and only update the 738 * prev_irq_time stamp to account for the part that fit, so that a next 739 * update will consume the rest. This ensures ->clock_task is 740 * monotonic. 741 * 742 * It does however cause some slight miss-attribution of {soft,}irq 743 * time, a more accurate solution would be to update the irq_time using 744 * the current rq->clock timestamp, except that would require using 745 * atomic ops. 746 */ 747 if (irq_delta > delta) 748 irq_delta = delta; 749 750 rq->prev_irq_time += irq_delta; 751 delta -= irq_delta; 752 #endif 753 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 754 if (static_key_false((¶virt_steal_rq_enabled))) { 755 steal = paravirt_steal_clock(cpu_of(rq)); 756 steal -= rq->prev_steal_time_rq; 757 758 if (unlikely(steal > delta)) 759 steal = delta; 760 761 rq->prev_steal_time_rq += steal; 762 delta -= steal; 763 } 764 #endif 765 766 rq->clock_task += delta; 767 768 #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) 769 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) 770 sched_rt_avg_update(rq, irq_delta + steal); 771 #endif 772 } 773 774 void sched_set_stop_task(int cpu, struct task_struct *stop) 775 { 776 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 777 struct task_struct *old_stop = cpu_rq(cpu)->stop; 778 779 if (stop) { 780 /* 781 * Make it appear like a SCHED_FIFO task, its something 782 * userspace knows about and won't get confused about. 783 * 784 * Also, it will make PI more or less work without too 785 * much confusion -- but then, stop work should not 786 * rely on PI working anyway. 787 */ 788 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); 789 790 stop->sched_class = &stop_sched_class; 791 } 792 793 cpu_rq(cpu)->stop = stop; 794 795 if (old_stop) { 796 /* 797 * Reset it back to a normal scheduling class so that 798 * it can die in pieces. 799 */ 800 old_stop->sched_class = &rt_sched_class; 801 } 802 } 803 804 /* 805 * __normal_prio - return the priority that is based on the static prio 806 */ 807 static inline int __normal_prio(struct task_struct *p) 808 { 809 return p->static_prio; 810 } 811 812 /* 813 * Calculate the expected normal priority: i.e. priority 814 * without taking RT-inheritance into account. Might be 815 * boosted by interactivity modifiers. Changes upon fork, 816 * setprio syscalls, and whenever the interactivity 817 * estimator recalculates. 818 */ 819 static inline int normal_prio(struct task_struct *p) 820 { 821 int prio; 822 823 if (task_has_dl_policy(p)) 824 prio = MAX_DL_PRIO-1; 825 else if (task_has_rt_policy(p)) 826 prio = MAX_RT_PRIO-1 - p->rt_priority; 827 else 828 prio = __normal_prio(p); 829 return prio; 830 } 831 832 /* 833 * Calculate the current priority, i.e. the priority 834 * taken into account by the scheduler. This value might 835 * be boosted by RT tasks, or might be boosted by 836 * interactivity modifiers. Will be RT if the task got 837 * RT-boosted. If not then it returns p->normal_prio. 838 */ 839 static int effective_prio(struct task_struct *p) 840 { 841 p->normal_prio = normal_prio(p); 842 /* 843 * If we are RT tasks or we were boosted to RT priority, 844 * keep the priority unchanged. Otherwise, update priority 845 * to the normal priority: 846 */ 847 if (!rt_prio(p->prio)) 848 return p->normal_prio; 849 return p->prio; 850 } 851 852 /** 853 * task_curr - is this task currently executing on a CPU? 854 * @p: the task in question. 855 * 856 * Return: 1 if the task is currently executing. 0 otherwise. 857 */ 858 inline int task_curr(const struct task_struct *p) 859 { 860 return cpu_curr(task_cpu(p)) == p; 861 } 862 863 /* 864 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, 865 * use the balance_callback list if you want balancing. 866 * 867 * this means any call to check_class_changed() must be followed by a call to 868 * balance_callback(). 869 */ 870 static inline void check_class_changed(struct rq *rq, struct task_struct *p, 871 const struct sched_class *prev_class, 872 int oldprio) 873 { 874 if (prev_class != p->sched_class) { 875 if (prev_class->switched_from) 876 prev_class->switched_from(rq, p); 877 878 p->sched_class->switched_to(rq, p); 879 } else if (oldprio != p->prio || dl_task(p)) 880 p->sched_class->prio_changed(rq, p, oldprio); 881 } 882 883 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 884 { 885 const struct sched_class *class; 886 887 if (p->sched_class == rq->curr->sched_class) { 888 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 889 } else { 890 for_each_class(class) { 891 if (class == rq->curr->sched_class) 892 break; 893 if (class == p->sched_class) { 894 resched_curr(rq); 895 break; 896 } 897 } 898 } 899 900 /* 901 * A queue event has occurred, and we're going to schedule. In 902 * this case, we can save a useless back to back clock update. 903 */ 904 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 905 rq_clock_skip_update(rq, true); 906 } 907 908 #ifdef CONFIG_SMP 909 /* 910 * This is how migration works: 911 * 912 * 1) we invoke migration_cpu_stop() on the target CPU using 913 * stop_one_cpu(). 914 * 2) stopper starts to run (implicitly forcing the migrated thread 915 * off the CPU) 916 * 3) it checks whether the migrated task is still in the wrong runqueue. 917 * 4) if it's in the wrong runqueue then the migration thread removes 918 * it and puts it into the right queue. 919 * 5) stopper completes and stop_one_cpu() returns and the migration 920 * is done. 921 */ 922 923 /* 924 * move_queued_task - move a queued task to new rq. 925 * 926 * Returns (locked) new rq. Old rq's lock is released. 927 */ 928 static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu) 929 { 930 lockdep_assert_held(&rq->lock); 931 932 p->on_rq = TASK_ON_RQ_MIGRATING; 933 dequeue_task(rq, p, 0); 934 set_task_cpu(p, new_cpu); 935 raw_spin_unlock(&rq->lock); 936 937 rq = cpu_rq(new_cpu); 938 939 raw_spin_lock(&rq->lock); 940 BUG_ON(task_cpu(p) != new_cpu); 941 enqueue_task(rq, p, 0); 942 p->on_rq = TASK_ON_RQ_QUEUED; 943 check_preempt_curr(rq, p, 0); 944 945 return rq; 946 } 947 948 struct migration_arg { 949 struct task_struct *task; 950 int dest_cpu; 951 }; 952 953 /* 954 * Move (not current) task off this cpu, onto dest cpu. We're doing 955 * this because either it can't run here any more (set_cpus_allowed() 956 * away from this CPU, or CPU going down), or because we're 957 * attempting to rebalance this task on exec (sched_exec). 958 * 959 * So we race with normal scheduler movements, but that's OK, as long 960 * as the task is no longer on this CPU. 961 */ 962 static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu) 963 { 964 if (unlikely(!cpu_active(dest_cpu))) 965 return rq; 966 967 /* Affinity changed (again). */ 968 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) 969 return rq; 970 971 rq = move_queued_task(rq, p, dest_cpu); 972 973 return rq; 974 } 975 976 /* 977 * migration_cpu_stop - this will be executed by a highprio stopper thread 978 * and performs thread migration by bumping thread off CPU then 979 * 'pushing' onto another runqueue. 980 */ 981 static int migration_cpu_stop(void *data) 982 { 983 struct migration_arg *arg = data; 984 struct task_struct *p = arg->task; 985 struct rq *rq = this_rq(); 986 987 /* 988 * The original target cpu might have gone down and we might 989 * be on another cpu but it doesn't matter. 990 */ 991 local_irq_disable(); 992 /* 993 * We need to explicitly wake pending tasks before running 994 * __migrate_task() such that we will not miss enforcing cpus_allowed 995 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 996 */ 997 sched_ttwu_pending(); 998 999 raw_spin_lock(&p->pi_lock); 1000 raw_spin_lock(&rq->lock); 1001 /* 1002 * If task_rq(p) != rq, it cannot be migrated here, because we're 1003 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because 1004 * we're holding p->pi_lock. 1005 */ 1006 if (task_rq(p) == rq && task_on_rq_queued(p)) 1007 rq = __migrate_task(rq, p, arg->dest_cpu); 1008 raw_spin_unlock(&rq->lock); 1009 raw_spin_unlock(&p->pi_lock); 1010 1011 local_irq_enable(); 1012 return 0; 1013 } 1014 1015 /* 1016 * sched_class::set_cpus_allowed must do the below, but is not required to 1017 * actually call this function. 1018 */ 1019 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) 1020 { 1021 cpumask_copy(&p->cpus_allowed, new_mask); 1022 p->nr_cpus_allowed = cpumask_weight(new_mask); 1023 } 1024 1025 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 1026 { 1027 struct rq *rq = task_rq(p); 1028 bool queued, running; 1029 1030 lockdep_assert_held(&p->pi_lock); 1031 1032 queued = task_on_rq_queued(p); 1033 running = task_current(rq, p); 1034 1035 if (queued) { 1036 /* 1037 * Because __kthread_bind() calls this on blocked tasks without 1038 * holding rq->lock. 1039 */ 1040 lockdep_assert_held(&rq->lock); 1041 dequeue_task(rq, p, DEQUEUE_SAVE); 1042 } 1043 if (running) 1044 put_prev_task(rq, p); 1045 1046 p->sched_class->set_cpus_allowed(p, new_mask); 1047 1048 if (running) 1049 p->sched_class->set_curr_task(rq); 1050 if (queued) 1051 enqueue_task(rq, p, ENQUEUE_RESTORE); 1052 } 1053 1054 /* 1055 * Change a given task's CPU affinity. Migrate the thread to a 1056 * proper CPU and schedule it away if the CPU it's executing on 1057 * is removed from the allowed bitmask. 1058 * 1059 * NOTE: the caller must have a valid reference to the task, the 1060 * task must not exit() & deallocate itself prematurely. The 1061 * call is not atomic; no spinlocks may be held. 1062 */ 1063 static int __set_cpus_allowed_ptr(struct task_struct *p, 1064 const struct cpumask *new_mask, bool check) 1065 { 1066 unsigned long flags; 1067 struct rq *rq; 1068 unsigned int dest_cpu; 1069 int ret = 0; 1070 1071 rq = task_rq_lock(p, &flags); 1072 1073 /* 1074 * Must re-check here, to close a race against __kthread_bind(), 1075 * sched_setaffinity() is not guaranteed to observe the flag. 1076 */ 1077 if (check && (p->flags & PF_NO_SETAFFINITY)) { 1078 ret = -EINVAL; 1079 goto out; 1080 } 1081 1082 if (cpumask_equal(&p->cpus_allowed, new_mask)) 1083 goto out; 1084 1085 if (!cpumask_intersects(new_mask, cpu_active_mask)) { 1086 ret = -EINVAL; 1087 goto out; 1088 } 1089 1090 do_set_cpus_allowed(p, new_mask); 1091 1092 /* Can the task run on the task's current CPU? If so, we're done */ 1093 if (cpumask_test_cpu(task_cpu(p), new_mask)) 1094 goto out; 1095 1096 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); 1097 if (task_running(rq, p) || p->state == TASK_WAKING) { 1098 struct migration_arg arg = { p, dest_cpu }; 1099 /* Need help from migration thread: drop lock and wait. */ 1100 task_rq_unlock(rq, p, &flags); 1101 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); 1102 tlb_migrate_finish(p->mm); 1103 return 0; 1104 } else if (task_on_rq_queued(p)) { 1105 /* 1106 * OK, since we're going to drop the lock immediately 1107 * afterwards anyway. 1108 */ 1109 lockdep_unpin_lock(&rq->lock); 1110 rq = move_queued_task(rq, p, dest_cpu); 1111 lockdep_pin_lock(&rq->lock); 1112 } 1113 out: 1114 task_rq_unlock(rq, p, &flags); 1115 1116 return ret; 1117 } 1118 1119 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 1120 { 1121 return __set_cpus_allowed_ptr(p, new_mask, false); 1122 } 1123 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 1124 1125 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 1126 { 1127 #ifdef CONFIG_SCHED_DEBUG 1128 /* 1129 * We should never call set_task_cpu() on a blocked task, 1130 * ttwu() will sort out the placement. 1131 */ 1132 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && 1133 !p->on_rq); 1134 1135 /* 1136 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, 1137 * because schedstat_wait_{start,end} rebase migrating task's wait_start 1138 * time relying on p->on_rq. 1139 */ 1140 WARN_ON_ONCE(p->state == TASK_RUNNING && 1141 p->sched_class == &fair_sched_class && 1142 (p->on_rq && !task_on_rq_migrating(p))); 1143 1144 #ifdef CONFIG_LOCKDEP 1145 /* 1146 * The caller should hold either p->pi_lock or rq->lock, when changing 1147 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 1148 * 1149 * sched_move_task() holds both and thus holding either pins the cgroup, 1150 * see task_group(). 1151 * 1152 * Furthermore, all task_rq users should acquire both locks, see 1153 * task_rq_lock(). 1154 */ 1155 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 1156 lockdep_is_held(&task_rq(p)->lock))); 1157 #endif 1158 #endif 1159 1160 trace_sched_migrate_task(p, new_cpu); 1161 1162 if (task_cpu(p) != new_cpu) { 1163 if (p->sched_class->migrate_task_rq) 1164 p->sched_class->migrate_task_rq(p); 1165 p->se.nr_migrations++; 1166 perf_event_task_migrate(p); 1167 } 1168 1169 __set_task_cpu(p, new_cpu); 1170 } 1171 1172 static void __migrate_swap_task(struct task_struct *p, int cpu) 1173 { 1174 if (task_on_rq_queued(p)) { 1175 struct rq *src_rq, *dst_rq; 1176 1177 src_rq = task_rq(p); 1178 dst_rq = cpu_rq(cpu); 1179 1180 p->on_rq = TASK_ON_RQ_MIGRATING; 1181 deactivate_task(src_rq, p, 0); 1182 set_task_cpu(p, cpu); 1183 activate_task(dst_rq, p, 0); 1184 p->on_rq = TASK_ON_RQ_QUEUED; 1185 check_preempt_curr(dst_rq, p, 0); 1186 } else { 1187 /* 1188 * Task isn't running anymore; make it appear like we migrated 1189 * it before it went to sleep. This means on wakeup we make the 1190 * previous cpu our targer instead of where it really is. 1191 */ 1192 p->wake_cpu = cpu; 1193 } 1194 } 1195 1196 struct migration_swap_arg { 1197 struct task_struct *src_task, *dst_task; 1198 int src_cpu, dst_cpu; 1199 }; 1200 1201 static int migrate_swap_stop(void *data) 1202 { 1203 struct migration_swap_arg *arg = data; 1204 struct rq *src_rq, *dst_rq; 1205 int ret = -EAGAIN; 1206 1207 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) 1208 return -EAGAIN; 1209 1210 src_rq = cpu_rq(arg->src_cpu); 1211 dst_rq = cpu_rq(arg->dst_cpu); 1212 1213 double_raw_lock(&arg->src_task->pi_lock, 1214 &arg->dst_task->pi_lock); 1215 double_rq_lock(src_rq, dst_rq); 1216 1217 if (task_cpu(arg->dst_task) != arg->dst_cpu) 1218 goto unlock; 1219 1220 if (task_cpu(arg->src_task) != arg->src_cpu) 1221 goto unlock; 1222 1223 if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task))) 1224 goto unlock; 1225 1226 if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task))) 1227 goto unlock; 1228 1229 __migrate_swap_task(arg->src_task, arg->dst_cpu); 1230 __migrate_swap_task(arg->dst_task, arg->src_cpu); 1231 1232 ret = 0; 1233 1234 unlock: 1235 double_rq_unlock(src_rq, dst_rq); 1236 raw_spin_unlock(&arg->dst_task->pi_lock); 1237 raw_spin_unlock(&arg->src_task->pi_lock); 1238 1239 return ret; 1240 } 1241 1242 /* 1243 * Cross migrate two tasks 1244 */ 1245 int migrate_swap(struct task_struct *cur, struct task_struct *p) 1246 { 1247 struct migration_swap_arg arg; 1248 int ret = -EINVAL; 1249 1250 arg = (struct migration_swap_arg){ 1251 .src_task = cur, 1252 .src_cpu = task_cpu(cur), 1253 .dst_task = p, 1254 .dst_cpu = task_cpu(p), 1255 }; 1256 1257 if (arg.src_cpu == arg.dst_cpu) 1258 goto out; 1259 1260 /* 1261 * These three tests are all lockless; this is OK since all of them 1262 * will be re-checked with proper locks held further down the line. 1263 */ 1264 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 1265 goto out; 1266 1267 if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task))) 1268 goto out; 1269 1270 if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task))) 1271 goto out; 1272 1273 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 1274 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); 1275 1276 out: 1277 return ret; 1278 } 1279 1280 /* 1281 * wait_task_inactive - wait for a thread to unschedule. 1282 * 1283 * If @match_state is nonzero, it's the @p->state value just checked and 1284 * not expected to change. If it changes, i.e. @p might have woken up, 1285 * then return zero. When we succeed in waiting for @p to be off its CPU, 1286 * we return a positive number (its total switch count). If a second call 1287 * a short while later returns the same number, the caller can be sure that 1288 * @p has remained unscheduled the whole time. 1289 * 1290 * The caller must ensure that the task *will* unschedule sometime soon, 1291 * else this function might spin for a *long* time. This function can't 1292 * be called with interrupts off, or it may introduce deadlock with 1293 * smp_call_function() if an IPI is sent by the same process we are 1294 * waiting to become inactive. 1295 */ 1296 unsigned long wait_task_inactive(struct task_struct *p, long match_state) 1297 { 1298 unsigned long flags; 1299 int running, queued; 1300 unsigned long ncsw; 1301 struct rq *rq; 1302 1303 for (;;) { 1304 /* 1305 * We do the initial early heuristics without holding 1306 * any task-queue locks at all. We'll only try to get 1307 * the runqueue lock when things look like they will 1308 * work out! 1309 */ 1310 rq = task_rq(p); 1311 1312 /* 1313 * If the task is actively running on another CPU 1314 * still, just relax and busy-wait without holding 1315 * any locks. 1316 * 1317 * NOTE! Since we don't hold any locks, it's not 1318 * even sure that "rq" stays as the right runqueue! 1319 * But we don't care, since "task_running()" will 1320 * return false if the runqueue has changed and p 1321 * is actually now running somewhere else! 1322 */ 1323 while (task_running(rq, p)) { 1324 if (match_state && unlikely(p->state != match_state)) 1325 return 0; 1326 cpu_relax(); 1327 } 1328 1329 /* 1330 * Ok, time to look more closely! We need the rq 1331 * lock now, to be *sure*. If we're wrong, we'll 1332 * just go back and repeat. 1333 */ 1334 rq = task_rq_lock(p, &flags); 1335 trace_sched_wait_task(p); 1336 running = task_running(rq, p); 1337 queued = task_on_rq_queued(p); 1338 ncsw = 0; 1339 if (!match_state || p->state == match_state) 1340 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 1341 task_rq_unlock(rq, p, &flags); 1342 1343 /* 1344 * If it changed from the expected state, bail out now. 1345 */ 1346 if (unlikely(!ncsw)) 1347 break; 1348 1349 /* 1350 * Was it really running after all now that we 1351 * checked with the proper locks actually held? 1352 * 1353 * Oops. Go back and try again.. 1354 */ 1355 if (unlikely(running)) { 1356 cpu_relax(); 1357 continue; 1358 } 1359 1360 /* 1361 * It's not enough that it's not actively running, 1362 * it must be off the runqueue _entirely_, and not 1363 * preempted! 1364 * 1365 * So if it was still runnable (but just not actively 1366 * running right now), it's preempted, and we should 1367 * yield - it could be a while. 1368 */ 1369 if (unlikely(queued)) { 1370 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ); 1371 1372 set_current_state(TASK_UNINTERRUPTIBLE); 1373 schedule_hrtimeout(&to, HRTIMER_MODE_REL); 1374 continue; 1375 } 1376 1377 /* 1378 * Ahh, all good. It wasn't running, and it wasn't 1379 * runnable, which means that it will never become 1380 * running in the future either. We're all done! 1381 */ 1382 break; 1383 } 1384 1385 return ncsw; 1386 } 1387 1388 /*** 1389 * kick_process - kick a running thread to enter/exit the kernel 1390 * @p: the to-be-kicked thread 1391 * 1392 * Cause a process which is running on another CPU to enter 1393 * kernel-mode, without any delay. (to get signals handled.) 1394 * 1395 * NOTE: this function doesn't have to take the runqueue lock, 1396 * because all it wants to ensure is that the remote task enters 1397 * the kernel. If the IPI races and the task has been migrated 1398 * to another CPU then no harm is done and the purpose has been 1399 * achieved as well. 1400 */ 1401 void kick_process(struct task_struct *p) 1402 { 1403 int cpu; 1404 1405 preempt_disable(); 1406 cpu = task_cpu(p); 1407 if ((cpu != smp_processor_id()) && task_curr(p)) 1408 smp_send_reschedule(cpu); 1409 preempt_enable(); 1410 } 1411 EXPORT_SYMBOL_GPL(kick_process); 1412 1413 /* 1414 * ->cpus_allowed is protected by both rq->lock and p->pi_lock 1415 */ 1416 static int select_fallback_rq(int cpu, struct task_struct *p) 1417 { 1418 int nid = cpu_to_node(cpu); 1419 const struct cpumask *nodemask = NULL; 1420 enum { cpuset, possible, fail } state = cpuset; 1421 int dest_cpu; 1422 1423 /* 1424 * If the node that the cpu is on has been offlined, cpu_to_node() 1425 * will return -1. There is no cpu on the node, and we should 1426 * select the cpu on the other node. 1427 */ 1428 if (nid != -1) { 1429 nodemask = cpumask_of_node(nid); 1430 1431 /* Look for allowed, online CPU in same node. */ 1432 for_each_cpu(dest_cpu, nodemask) { 1433 if (!cpu_online(dest_cpu)) 1434 continue; 1435 if (!cpu_active(dest_cpu)) 1436 continue; 1437 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) 1438 return dest_cpu; 1439 } 1440 } 1441 1442 for (;;) { 1443 /* Any allowed, online CPU? */ 1444 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { 1445 if (!cpu_online(dest_cpu)) 1446 continue; 1447 if (!cpu_active(dest_cpu)) 1448 continue; 1449 goto out; 1450 } 1451 1452 /* No more Mr. Nice Guy. */ 1453 switch (state) { 1454 case cpuset: 1455 if (IS_ENABLED(CONFIG_CPUSETS)) { 1456 cpuset_cpus_allowed_fallback(p); 1457 state = possible; 1458 break; 1459 } 1460 /* fall-through */ 1461 case possible: 1462 do_set_cpus_allowed(p, cpu_possible_mask); 1463 state = fail; 1464 break; 1465 1466 case fail: 1467 BUG(); 1468 break; 1469 } 1470 } 1471 1472 out: 1473 if (state != cpuset) { 1474 /* 1475 * Don't tell them about moving exiting tasks or 1476 * kernel threads (both mm NULL), since they never 1477 * leave kernel. 1478 */ 1479 if (p->mm && printk_ratelimit()) { 1480 printk_deferred("process %d (%s) no longer affine to cpu%d\n", 1481 task_pid_nr(p), p->comm, cpu); 1482 } 1483 } 1484 1485 return dest_cpu; 1486 } 1487 1488 /* 1489 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. 1490 */ 1491 static inline 1492 int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) 1493 { 1494 lockdep_assert_held(&p->pi_lock); 1495 1496 if (p->nr_cpus_allowed > 1) 1497 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); 1498 1499 /* 1500 * In order not to call set_task_cpu() on a blocking task we need 1501 * to rely on ttwu() to place the task on a valid ->cpus_allowed 1502 * cpu. 1503 * 1504 * Since this is common to all placement strategies, this lives here. 1505 * 1506 * [ this allows ->select_task() to simply return task_cpu(p) and 1507 * not worry about this generic constraint ] 1508 */ 1509 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) || 1510 !cpu_online(cpu))) 1511 cpu = select_fallback_rq(task_cpu(p), p); 1512 1513 return cpu; 1514 } 1515 1516 static void update_avg(u64 *avg, u64 sample) 1517 { 1518 s64 diff = sample - *avg; 1519 *avg += diff >> 3; 1520 } 1521 1522 #else 1523 1524 static inline int __set_cpus_allowed_ptr(struct task_struct *p, 1525 const struct cpumask *new_mask, bool check) 1526 { 1527 return set_cpus_allowed_ptr(p, new_mask); 1528 } 1529 1530 #endif /* CONFIG_SMP */ 1531 1532 static void 1533 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 1534 { 1535 #ifdef CONFIG_SCHEDSTATS 1536 struct rq *rq = this_rq(); 1537 1538 #ifdef CONFIG_SMP 1539 int this_cpu = smp_processor_id(); 1540 1541 if (cpu == this_cpu) { 1542 schedstat_inc(rq, ttwu_local); 1543 schedstat_inc(p, se.statistics.nr_wakeups_local); 1544 } else { 1545 struct sched_domain *sd; 1546 1547 schedstat_inc(p, se.statistics.nr_wakeups_remote); 1548 rcu_read_lock(); 1549 for_each_domain(this_cpu, sd) { 1550 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 1551 schedstat_inc(sd, ttwu_wake_remote); 1552 break; 1553 } 1554 } 1555 rcu_read_unlock(); 1556 } 1557 1558 if (wake_flags & WF_MIGRATED) 1559 schedstat_inc(p, se.statistics.nr_wakeups_migrate); 1560 1561 #endif /* CONFIG_SMP */ 1562 1563 schedstat_inc(rq, ttwu_count); 1564 schedstat_inc(p, se.statistics.nr_wakeups); 1565 1566 if (wake_flags & WF_SYNC) 1567 schedstat_inc(p, se.statistics.nr_wakeups_sync); 1568 1569 #endif /* CONFIG_SCHEDSTATS */ 1570 } 1571 1572 static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) 1573 { 1574 activate_task(rq, p, en_flags); 1575 p->on_rq = TASK_ON_RQ_QUEUED; 1576 1577 /* if a worker is waking up, notify workqueue */ 1578 if (p->flags & PF_WQ_WORKER) 1579 wq_worker_waking_up(p, cpu_of(rq)); 1580 } 1581 1582 /* 1583 * Mark the task runnable and perform wakeup-preemption. 1584 */ 1585 static void 1586 ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) 1587 { 1588 check_preempt_curr(rq, p, wake_flags); 1589 p->state = TASK_RUNNING; 1590 trace_sched_wakeup(p); 1591 1592 #ifdef CONFIG_SMP 1593 if (p->sched_class->task_woken) { 1594 /* 1595 * Our task @p is fully woken up and running; so its safe to 1596 * drop the rq->lock, hereafter rq is only used for statistics. 1597 */ 1598 lockdep_unpin_lock(&rq->lock); 1599 p->sched_class->task_woken(rq, p); 1600 lockdep_pin_lock(&rq->lock); 1601 } 1602 1603 if (rq->idle_stamp) { 1604 u64 delta = rq_clock(rq) - rq->idle_stamp; 1605 u64 max = 2*rq->max_idle_balance_cost; 1606 1607 update_avg(&rq->avg_idle, delta); 1608 1609 if (rq->avg_idle > max) 1610 rq->avg_idle = max; 1611 1612 rq->idle_stamp = 0; 1613 } 1614 #endif 1615 } 1616 1617 static void 1618 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) 1619 { 1620 lockdep_assert_held(&rq->lock); 1621 1622 #ifdef CONFIG_SMP 1623 if (p->sched_contributes_to_load) 1624 rq->nr_uninterruptible--; 1625 #endif 1626 1627 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING); 1628 ttwu_do_wakeup(rq, p, wake_flags); 1629 } 1630 1631 /* 1632 * Called in case the task @p isn't fully descheduled from its runqueue, 1633 * in this case we must do a remote wakeup. Its a 'light' wakeup though, 1634 * since all we need to do is flip p->state to TASK_RUNNING, since 1635 * the task is still ->on_rq. 1636 */ 1637 static int ttwu_remote(struct task_struct *p, int wake_flags) 1638 { 1639 struct rq *rq; 1640 int ret = 0; 1641 1642 rq = __task_rq_lock(p); 1643 if (task_on_rq_queued(p)) { 1644 /* check_preempt_curr() may use rq clock */ 1645 update_rq_clock(rq); 1646 ttwu_do_wakeup(rq, p, wake_flags); 1647 ret = 1; 1648 } 1649 __task_rq_unlock(rq); 1650 1651 return ret; 1652 } 1653 1654 #ifdef CONFIG_SMP 1655 void sched_ttwu_pending(void) 1656 { 1657 struct rq *rq = this_rq(); 1658 struct llist_node *llist = llist_del_all(&rq->wake_list); 1659 struct task_struct *p; 1660 unsigned long flags; 1661 1662 if (!llist) 1663 return; 1664 1665 raw_spin_lock_irqsave(&rq->lock, flags); 1666 lockdep_pin_lock(&rq->lock); 1667 1668 while (llist) { 1669 p = llist_entry(llist, struct task_struct, wake_entry); 1670 llist = llist_next(llist); 1671 ttwu_do_activate(rq, p, 0); 1672 } 1673 1674 lockdep_unpin_lock(&rq->lock); 1675 raw_spin_unlock_irqrestore(&rq->lock, flags); 1676 } 1677 1678 void scheduler_ipi(void) 1679 { 1680 /* 1681 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting 1682 * TIF_NEED_RESCHED remotely (for the first time) will also send 1683 * this IPI. 1684 */ 1685 preempt_fold_need_resched(); 1686 1687 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()) 1688 return; 1689 1690 /* 1691 * Not all reschedule IPI handlers call irq_enter/irq_exit, since 1692 * traditionally all their work was done from the interrupt return 1693 * path. Now that we actually do some work, we need to make sure 1694 * we do call them. 1695 * 1696 * Some archs already do call them, luckily irq_enter/exit nest 1697 * properly. 1698 * 1699 * Arguably we should visit all archs and update all handlers, 1700 * however a fair share of IPIs are still resched only so this would 1701 * somewhat pessimize the simple resched case. 1702 */ 1703 irq_enter(); 1704 sched_ttwu_pending(); 1705 1706 /* 1707 * Check if someone kicked us for doing the nohz idle load balance. 1708 */ 1709 if (unlikely(got_nohz_idle_kick())) { 1710 this_rq()->idle_balance = 1; 1711 raise_softirq_irqoff(SCHED_SOFTIRQ); 1712 } 1713 irq_exit(); 1714 } 1715 1716 static void ttwu_queue_remote(struct task_struct *p, int cpu) 1717 { 1718 struct rq *rq = cpu_rq(cpu); 1719 1720 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) { 1721 if (!set_nr_if_polling(rq->idle)) 1722 smp_send_reschedule(cpu); 1723 else 1724 trace_sched_wake_idle_without_ipi(cpu); 1725 } 1726 } 1727 1728 void wake_up_if_idle(int cpu) 1729 { 1730 struct rq *rq = cpu_rq(cpu); 1731 unsigned long flags; 1732 1733 rcu_read_lock(); 1734 1735 if (!is_idle_task(rcu_dereference(rq->curr))) 1736 goto out; 1737 1738 if (set_nr_if_polling(rq->idle)) { 1739 trace_sched_wake_idle_without_ipi(cpu); 1740 } else { 1741 raw_spin_lock_irqsave(&rq->lock, flags); 1742 if (is_idle_task(rq->curr)) 1743 smp_send_reschedule(cpu); 1744 /* Else cpu is not in idle, do nothing here */ 1745 raw_spin_unlock_irqrestore(&rq->lock, flags); 1746 } 1747 1748 out: 1749 rcu_read_unlock(); 1750 } 1751 1752 bool cpus_share_cache(int this_cpu, int that_cpu) 1753 { 1754 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 1755 } 1756 #endif /* CONFIG_SMP */ 1757 1758 static void ttwu_queue(struct task_struct *p, int cpu) 1759 { 1760 struct rq *rq = cpu_rq(cpu); 1761 1762 #if defined(CONFIG_SMP) 1763 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) { 1764 sched_clock_cpu(cpu); /* sync clocks x-cpu */ 1765 ttwu_queue_remote(p, cpu); 1766 return; 1767 } 1768 #endif 1769 1770 raw_spin_lock(&rq->lock); 1771 lockdep_pin_lock(&rq->lock); 1772 ttwu_do_activate(rq, p, 0); 1773 lockdep_unpin_lock(&rq->lock); 1774 raw_spin_unlock(&rq->lock); 1775 } 1776 1777 /* 1778 * Notes on Program-Order guarantees on SMP systems. 1779 * 1780 * MIGRATION 1781 * 1782 * The basic program-order guarantee on SMP systems is that when a task [t] 1783 * migrates, all its activity on its old cpu [c0] happens-before any subsequent 1784 * execution on its new cpu [c1]. 1785 * 1786 * For migration (of runnable tasks) this is provided by the following means: 1787 * 1788 * A) UNLOCK of the rq(c0)->lock scheduling out task t 1789 * B) migration for t is required to synchronize *both* rq(c0)->lock and 1790 * rq(c1)->lock (if not at the same time, then in that order). 1791 * C) LOCK of the rq(c1)->lock scheduling in task 1792 * 1793 * Transitivity guarantees that B happens after A and C after B. 1794 * Note: we only require RCpc transitivity. 1795 * Note: the cpu doing B need not be c0 or c1 1796 * 1797 * Example: 1798 * 1799 * CPU0 CPU1 CPU2 1800 * 1801 * LOCK rq(0)->lock 1802 * sched-out X 1803 * sched-in Y 1804 * UNLOCK rq(0)->lock 1805 * 1806 * LOCK rq(0)->lock // orders against CPU0 1807 * dequeue X 1808 * UNLOCK rq(0)->lock 1809 * 1810 * LOCK rq(1)->lock 1811 * enqueue X 1812 * UNLOCK rq(1)->lock 1813 * 1814 * LOCK rq(1)->lock // orders against CPU2 1815 * sched-out Z 1816 * sched-in X 1817 * UNLOCK rq(1)->lock 1818 * 1819 * 1820 * BLOCKING -- aka. SLEEP + WAKEUP 1821 * 1822 * For blocking we (obviously) need to provide the same guarantee as for 1823 * migration. However the means are completely different as there is no lock 1824 * chain to provide order. Instead we do: 1825 * 1826 * 1) smp_store_release(X->on_cpu, 0) 1827 * 2) smp_cond_acquire(!X->on_cpu) 1828 * 1829 * Example: 1830 * 1831 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) 1832 * 1833 * LOCK rq(0)->lock LOCK X->pi_lock 1834 * dequeue X 1835 * sched-out X 1836 * smp_store_release(X->on_cpu, 0); 1837 * 1838 * smp_cond_acquire(!X->on_cpu); 1839 * X->state = WAKING 1840 * set_task_cpu(X,2) 1841 * 1842 * LOCK rq(2)->lock 1843 * enqueue X 1844 * X->state = RUNNING 1845 * UNLOCK rq(2)->lock 1846 * 1847 * LOCK rq(2)->lock // orders against CPU1 1848 * sched-out Z 1849 * sched-in X 1850 * UNLOCK rq(2)->lock 1851 * 1852 * UNLOCK X->pi_lock 1853 * UNLOCK rq(0)->lock 1854 * 1855 * 1856 * However; for wakeups there is a second guarantee we must provide, namely we 1857 * must observe the state that lead to our wakeup. That is, not only must our 1858 * task observe its own prior state, it must also observe the stores prior to 1859 * its wakeup. 1860 * 1861 * This means that any means of doing remote wakeups must order the CPU doing 1862 * the wakeup against the CPU the task is going to end up running on. This, 1863 * however, is already required for the regular Program-Order guarantee above, 1864 * since the waking CPU is the one issueing the ACQUIRE (smp_cond_acquire). 1865 * 1866 */ 1867 1868 /** 1869 * try_to_wake_up - wake up a thread 1870 * @p: the thread to be awakened 1871 * @state: the mask of task states that can be woken 1872 * @wake_flags: wake modifier flags (WF_*) 1873 * 1874 * Put it on the run-queue if it's not already there. The "current" 1875 * thread is always on the run-queue (except when the actual 1876 * re-schedule is in progress), and as such you're allowed to do 1877 * the simpler "current->state = TASK_RUNNING" to mark yourself 1878 * runnable without the overhead of this. 1879 * 1880 * Return: %true if @p was woken up, %false if it was already running. 1881 * or @state didn't match @p's state. 1882 */ 1883 static int 1884 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 1885 { 1886 unsigned long flags; 1887 int cpu, success = 0; 1888 1889 /* 1890 * If we are going to wake up a thread waiting for CONDITION we 1891 * need to ensure that CONDITION=1 done by the caller can not be 1892 * reordered with p->state check below. This pairs with mb() in 1893 * set_current_state() the waiting thread does. 1894 */ 1895 smp_mb__before_spinlock(); 1896 raw_spin_lock_irqsave(&p->pi_lock, flags); 1897 if (!(p->state & state)) 1898 goto out; 1899 1900 trace_sched_waking(p); 1901 1902 success = 1; /* we're going to change ->state */ 1903 cpu = task_cpu(p); 1904 1905 if (p->on_rq && ttwu_remote(p, wake_flags)) 1906 goto stat; 1907 1908 #ifdef CONFIG_SMP 1909 /* 1910 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be 1911 * possible to, falsely, observe p->on_cpu == 0. 1912 * 1913 * One must be running (->on_cpu == 1) in order to remove oneself 1914 * from the runqueue. 1915 * 1916 * [S] ->on_cpu = 1; [L] ->on_rq 1917 * UNLOCK rq->lock 1918 * RMB 1919 * LOCK rq->lock 1920 * [S] ->on_rq = 0; [L] ->on_cpu 1921 * 1922 * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock 1923 * from the consecutive calls to schedule(); the first switching to our 1924 * task, the second putting it to sleep. 1925 */ 1926 smp_rmb(); 1927 1928 /* 1929 * If the owning (remote) cpu is still in the middle of schedule() with 1930 * this task as prev, wait until its done referencing the task. 1931 * 1932 * Pairs with the smp_store_release() in finish_lock_switch(). 1933 * 1934 * This ensures that tasks getting woken will be fully ordered against 1935 * their previous state and preserve Program Order. 1936 */ 1937 smp_cond_acquire(!p->on_cpu); 1938 1939 p->sched_contributes_to_load = !!task_contributes_to_load(p); 1940 p->state = TASK_WAKING; 1941 1942 if (p->sched_class->task_waking) 1943 p->sched_class->task_waking(p); 1944 1945 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags); 1946 if (task_cpu(p) != cpu) { 1947 wake_flags |= WF_MIGRATED; 1948 set_task_cpu(p, cpu); 1949 } 1950 #endif /* CONFIG_SMP */ 1951 1952 ttwu_queue(p, cpu); 1953 stat: 1954 if (schedstat_enabled()) 1955 ttwu_stat(p, cpu, wake_flags); 1956 out: 1957 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 1958 1959 return success; 1960 } 1961 1962 /** 1963 * try_to_wake_up_local - try to wake up a local task with rq lock held 1964 * @p: the thread to be awakened 1965 * 1966 * Put @p on the run-queue if it's not already there. The caller must 1967 * ensure that this_rq() is locked, @p is bound to this_rq() and not 1968 * the current task. 1969 */ 1970 static void try_to_wake_up_local(struct task_struct *p) 1971 { 1972 struct rq *rq = task_rq(p); 1973 1974 if (WARN_ON_ONCE(rq != this_rq()) || 1975 WARN_ON_ONCE(p == current)) 1976 return; 1977 1978 lockdep_assert_held(&rq->lock); 1979 1980 if (!raw_spin_trylock(&p->pi_lock)) { 1981 /* 1982 * This is OK, because current is on_cpu, which avoids it being 1983 * picked for load-balance and preemption/IRQs are still 1984 * disabled avoiding further scheduler activity on it and we've 1985 * not yet picked a replacement task. 1986 */ 1987 lockdep_unpin_lock(&rq->lock); 1988 raw_spin_unlock(&rq->lock); 1989 raw_spin_lock(&p->pi_lock); 1990 raw_spin_lock(&rq->lock); 1991 lockdep_pin_lock(&rq->lock); 1992 } 1993 1994 if (!(p->state & TASK_NORMAL)) 1995 goto out; 1996 1997 trace_sched_waking(p); 1998 1999 if (!task_on_rq_queued(p)) 2000 ttwu_activate(rq, p, ENQUEUE_WAKEUP); 2001 2002 ttwu_do_wakeup(rq, p, 0); 2003 if (schedstat_enabled()) 2004 ttwu_stat(p, smp_processor_id(), 0); 2005 out: 2006 raw_spin_unlock(&p->pi_lock); 2007 } 2008 2009 /** 2010 * wake_up_process - Wake up a specific process 2011 * @p: The process to be woken up. 2012 * 2013 * Attempt to wake up the nominated process and move it to the set of runnable 2014 * processes. 2015 * 2016 * Return: 1 if the process was woken up, 0 if it was already running. 2017 * 2018 * It may be assumed that this function implies a write memory barrier before 2019 * changing the task state if and only if any tasks are woken up. 2020 */ 2021 int wake_up_process(struct task_struct *p) 2022 { 2023 return try_to_wake_up(p, TASK_NORMAL, 0); 2024 } 2025 EXPORT_SYMBOL(wake_up_process); 2026 2027 int wake_up_state(struct task_struct *p, unsigned int state) 2028 { 2029 return try_to_wake_up(p, state, 0); 2030 } 2031 2032 /* 2033 * This function clears the sched_dl_entity static params. 2034 */ 2035 void __dl_clear_params(struct task_struct *p) 2036 { 2037 struct sched_dl_entity *dl_se = &p->dl; 2038 2039 dl_se->dl_runtime = 0; 2040 dl_se->dl_deadline = 0; 2041 dl_se->dl_period = 0; 2042 dl_se->flags = 0; 2043 dl_se->dl_bw = 0; 2044 2045 dl_se->dl_throttled = 0; 2046 dl_se->dl_yielded = 0; 2047 } 2048 2049 /* 2050 * Perform scheduler related setup for a newly forked process p. 2051 * p is forked by current. 2052 * 2053 * __sched_fork() is basic setup used by init_idle() too: 2054 */ 2055 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) 2056 { 2057 p->on_rq = 0; 2058 2059 p->se.on_rq = 0; 2060 p->se.exec_start = 0; 2061 p->se.sum_exec_runtime = 0; 2062 p->se.prev_sum_exec_runtime = 0; 2063 p->se.nr_migrations = 0; 2064 p->se.vruntime = 0; 2065 INIT_LIST_HEAD(&p->se.group_node); 2066 2067 #ifdef CONFIG_FAIR_GROUP_SCHED 2068 p->se.cfs_rq = NULL; 2069 #endif 2070 2071 #ifdef CONFIG_SCHEDSTATS 2072 /* Even if schedstat is disabled, there should not be garbage */ 2073 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 2074 #endif 2075 2076 RB_CLEAR_NODE(&p->dl.rb_node); 2077 init_dl_task_timer(&p->dl); 2078 __dl_clear_params(p); 2079 2080 INIT_LIST_HEAD(&p->rt.run_list); 2081 p->rt.timeout = 0; 2082 p->rt.time_slice = sched_rr_timeslice; 2083 p->rt.on_rq = 0; 2084 p->rt.on_list = 0; 2085 2086 #ifdef CONFIG_PREEMPT_NOTIFIERS 2087 INIT_HLIST_HEAD(&p->preempt_notifiers); 2088 #endif 2089 2090 #ifdef CONFIG_NUMA_BALANCING 2091 if (p->mm && atomic_read(&p->mm->mm_users) == 1) { 2092 p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); 2093 p->mm->numa_scan_seq = 0; 2094 } 2095 2096 if (clone_flags & CLONE_VM) 2097 p->numa_preferred_nid = current->numa_preferred_nid; 2098 else 2099 p->numa_preferred_nid = -1; 2100 2101 p->node_stamp = 0ULL; 2102 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; 2103 p->numa_scan_period = sysctl_numa_balancing_scan_delay; 2104 p->numa_work.next = &p->numa_work; 2105 p->numa_faults = NULL; 2106 p->last_task_numa_placement = 0; 2107 p->last_sum_exec_runtime = 0; 2108 2109 p->numa_group = NULL; 2110 #endif /* CONFIG_NUMA_BALANCING */ 2111 } 2112 2113 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); 2114 2115 #ifdef CONFIG_NUMA_BALANCING 2116 2117 void set_numabalancing_state(bool enabled) 2118 { 2119 if (enabled) 2120 static_branch_enable(&sched_numa_balancing); 2121 else 2122 static_branch_disable(&sched_numa_balancing); 2123 } 2124 2125 #ifdef CONFIG_PROC_SYSCTL 2126 int sysctl_numa_balancing(struct ctl_table *table, int write, 2127 void __user *buffer, size_t *lenp, loff_t *ppos) 2128 { 2129 struct ctl_table t; 2130 int err; 2131 int state = static_branch_likely(&sched_numa_balancing); 2132 2133 if (write && !capable(CAP_SYS_ADMIN)) 2134 return -EPERM; 2135 2136 t = *table; 2137 t.data = &state; 2138 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 2139 if (err < 0) 2140 return err; 2141 if (write) 2142 set_numabalancing_state(state); 2143 return err; 2144 } 2145 #endif 2146 #endif 2147 2148 DEFINE_STATIC_KEY_FALSE(sched_schedstats); 2149 2150 #ifdef CONFIG_SCHEDSTATS 2151 static void set_schedstats(bool enabled) 2152 { 2153 if (enabled) 2154 static_branch_enable(&sched_schedstats); 2155 else 2156 static_branch_disable(&sched_schedstats); 2157 } 2158 2159 void force_schedstat_enabled(void) 2160 { 2161 if (!schedstat_enabled()) { 2162 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); 2163 static_branch_enable(&sched_schedstats); 2164 } 2165 } 2166 2167 static int __init setup_schedstats(char *str) 2168 { 2169 int ret = 0; 2170 if (!str) 2171 goto out; 2172 2173 if (!strcmp(str, "enable")) { 2174 set_schedstats(true); 2175 ret = 1; 2176 } else if (!strcmp(str, "disable")) { 2177 set_schedstats(false); 2178 ret = 1; 2179 } 2180 out: 2181 if (!ret) 2182 pr_warn("Unable to parse schedstats=\n"); 2183 2184 return ret; 2185 } 2186 __setup("schedstats=", setup_schedstats); 2187 2188 #ifdef CONFIG_PROC_SYSCTL 2189 int sysctl_schedstats(struct ctl_table *table, int write, 2190 void __user *buffer, size_t *lenp, loff_t *ppos) 2191 { 2192 struct ctl_table t; 2193 int err; 2194 int state = static_branch_likely(&sched_schedstats); 2195 2196 if (write && !capable(CAP_SYS_ADMIN)) 2197 return -EPERM; 2198 2199 t = *table; 2200 t.data = &state; 2201 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 2202 if (err < 0) 2203 return err; 2204 if (write) 2205 set_schedstats(state); 2206 return err; 2207 } 2208 #endif 2209 #endif 2210 2211 /* 2212 * fork()/clone()-time setup: 2213 */ 2214 int sched_fork(unsigned long clone_flags, struct task_struct *p) 2215 { 2216 unsigned long flags; 2217 int cpu = get_cpu(); 2218 2219 __sched_fork(clone_flags, p); 2220 /* 2221 * We mark the process as running here. This guarantees that 2222 * nobody will actually run it, and a signal or other external 2223 * event cannot wake it up and insert it on the runqueue either. 2224 */ 2225 p->state = TASK_RUNNING; 2226 2227 /* 2228 * Make sure we do not leak PI boosting priority to the child. 2229 */ 2230 p->prio = current->normal_prio; 2231 2232 /* 2233 * Revert to default priority/policy on fork if requested. 2234 */ 2235 if (unlikely(p->sched_reset_on_fork)) { 2236 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 2237 p->policy = SCHED_NORMAL; 2238 p->static_prio = NICE_TO_PRIO(0); 2239 p->rt_priority = 0; 2240 } else if (PRIO_TO_NICE(p->static_prio) < 0) 2241 p->static_prio = NICE_TO_PRIO(0); 2242 2243 p->prio = p->normal_prio = __normal_prio(p); 2244 set_load_weight(p); 2245 2246 /* 2247 * We don't need the reset flag anymore after the fork. It has 2248 * fulfilled its duty: 2249 */ 2250 p->sched_reset_on_fork = 0; 2251 } 2252 2253 if (dl_prio(p->prio)) { 2254 put_cpu(); 2255 return -EAGAIN; 2256 } else if (rt_prio(p->prio)) { 2257 p->sched_class = &rt_sched_class; 2258 } else { 2259 p->sched_class = &fair_sched_class; 2260 } 2261 2262 if (p->sched_class->task_fork) 2263 p->sched_class->task_fork(p); 2264 2265 /* 2266 * The child is not yet in the pid-hash so no cgroup attach races, 2267 * and the cgroup is pinned to this child due to cgroup_fork() 2268 * is ran before sched_fork(). 2269 * 2270 * Silence PROVE_RCU. 2271 */ 2272 raw_spin_lock_irqsave(&p->pi_lock, flags); 2273 set_task_cpu(p, cpu); 2274 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2275 2276 #ifdef CONFIG_SCHED_INFO 2277 if (likely(sched_info_on())) 2278 memset(&p->sched_info, 0, sizeof(p->sched_info)); 2279 #endif 2280 #if defined(CONFIG_SMP) 2281 p->on_cpu = 0; 2282 #endif 2283 init_task_preempt_count(p); 2284 #ifdef CONFIG_SMP 2285 plist_node_init(&p->pushable_tasks, MAX_PRIO); 2286 RB_CLEAR_NODE(&p->pushable_dl_tasks); 2287 #endif 2288 2289 put_cpu(); 2290 return 0; 2291 } 2292 2293 unsigned long to_ratio(u64 period, u64 runtime) 2294 { 2295 if (runtime == RUNTIME_INF) 2296 return 1ULL << 20; 2297 2298 /* 2299 * Doing this here saves a lot of checks in all 2300 * the calling paths, and returning zero seems 2301 * safe for them anyway. 2302 */ 2303 if (period == 0) 2304 return 0; 2305 2306 return div64_u64(runtime << 20, period); 2307 } 2308 2309 #ifdef CONFIG_SMP 2310 inline struct dl_bw *dl_bw_of(int i) 2311 { 2312 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 2313 "sched RCU must be held"); 2314 return &cpu_rq(i)->rd->dl_bw; 2315 } 2316 2317 static inline int dl_bw_cpus(int i) 2318 { 2319 struct root_domain *rd = cpu_rq(i)->rd; 2320 int cpus = 0; 2321 2322 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 2323 "sched RCU must be held"); 2324 for_each_cpu_and(i, rd->span, cpu_active_mask) 2325 cpus++; 2326 2327 return cpus; 2328 } 2329 #else 2330 inline struct dl_bw *dl_bw_of(int i) 2331 { 2332 return &cpu_rq(i)->dl.dl_bw; 2333 } 2334 2335 static inline int dl_bw_cpus(int i) 2336 { 2337 return 1; 2338 } 2339 #endif 2340 2341 /* 2342 * We must be sure that accepting a new task (or allowing changing the 2343 * parameters of an existing one) is consistent with the bandwidth 2344 * constraints. If yes, this function also accordingly updates the currently 2345 * allocated bandwidth to reflect the new situation. 2346 * 2347 * This function is called while holding p's rq->lock. 2348 * 2349 * XXX we should delay bw change until the task's 0-lag point, see 2350 * __setparam_dl(). 2351 */ 2352 static int dl_overflow(struct task_struct *p, int policy, 2353 const struct sched_attr *attr) 2354 { 2355 2356 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 2357 u64 period = attr->sched_period ?: attr->sched_deadline; 2358 u64 runtime = attr->sched_runtime; 2359 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; 2360 int cpus, err = -1; 2361 2362 if (new_bw == p->dl.dl_bw) 2363 return 0; 2364 2365 /* 2366 * Either if a task, enters, leave, or stays -deadline but changes 2367 * its parameters, we may need to update accordingly the total 2368 * allocated bandwidth of the container. 2369 */ 2370 raw_spin_lock(&dl_b->lock); 2371 cpus = dl_bw_cpus(task_cpu(p)); 2372 if (dl_policy(policy) && !task_has_dl_policy(p) && 2373 !__dl_overflow(dl_b, cpus, 0, new_bw)) { 2374 __dl_add(dl_b, new_bw); 2375 err = 0; 2376 } else if (dl_policy(policy) && task_has_dl_policy(p) && 2377 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) { 2378 __dl_clear(dl_b, p->dl.dl_bw); 2379 __dl_add(dl_b, new_bw); 2380 err = 0; 2381 } else if (!dl_policy(policy) && task_has_dl_policy(p)) { 2382 __dl_clear(dl_b, p->dl.dl_bw); 2383 err = 0; 2384 } 2385 raw_spin_unlock(&dl_b->lock); 2386 2387 return err; 2388 } 2389 2390 extern void init_dl_bw(struct dl_bw *dl_b); 2391 2392 /* 2393 * wake_up_new_task - wake up a newly created task for the first time. 2394 * 2395 * This function will do some initial scheduler statistics housekeeping 2396 * that must be done for every newly created context, then puts the task 2397 * on the runqueue and wakes it. 2398 */ 2399 void wake_up_new_task(struct task_struct *p) 2400 { 2401 unsigned long flags; 2402 struct rq *rq; 2403 2404 raw_spin_lock_irqsave(&p->pi_lock, flags); 2405 /* Initialize new task's runnable average */ 2406 init_entity_runnable_average(&p->se); 2407 #ifdef CONFIG_SMP 2408 /* 2409 * Fork balancing, do it here and not earlier because: 2410 * - cpus_allowed can change in the fork path 2411 * - any previously selected cpu might disappear through hotplug 2412 */ 2413 set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); 2414 #endif 2415 2416 rq = __task_rq_lock(p); 2417 activate_task(rq, p, 0); 2418 p->on_rq = TASK_ON_RQ_QUEUED; 2419 trace_sched_wakeup_new(p); 2420 check_preempt_curr(rq, p, WF_FORK); 2421 #ifdef CONFIG_SMP 2422 if (p->sched_class->task_woken) { 2423 /* 2424 * Nothing relies on rq->lock after this, so its fine to 2425 * drop it. 2426 */ 2427 lockdep_unpin_lock(&rq->lock); 2428 p->sched_class->task_woken(rq, p); 2429 lockdep_pin_lock(&rq->lock); 2430 } 2431 #endif 2432 task_rq_unlock(rq, p, &flags); 2433 } 2434 2435 #ifdef CONFIG_PREEMPT_NOTIFIERS 2436 2437 static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE; 2438 2439 void preempt_notifier_inc(void) 2440 { 2441 static_key_slow_inc(&preempt_notifier_key); 2442 } 2443 EXPORT_SYMBOL_GPL(preempt_notifier_inc); 2444 2445 void preempt_notifier_dec(void) 2446 { 2447 static_key_slow_dec(&preempt_notifier_key); 2448 } 2449 EXPORT_SYMBOL_GPL(preempt_notifier_dec); 2450 2451 /** 2452 * preempt_notifier_register - tell me when current is being preempted & rescheduled 2453 * @notifier: notifier struct to register 2454 */ 2455 void preempt_notifier_register(struct preempt_notifier *notifier) 2456 { 2457 if (!static_key_false(&preempt_notifier_key)) 2458 WARN(1, "registering preempt_notifier while notifiers disabled\n"); 2459 2460 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); 2461 } 2462 EXPORT_SYMBOL_GPL(preempt_notifier_register); 2463 2464 /** 2465 * preempt_notifier_unregister - no longer interested in preemption notifications 2466 * @notifier: notifier struct to unregister 2467 * 2468 * This is *not* safe to call from within a preemption notifier. 2469 */ 2470 void preempt_notifier_unregister(struct preempt_notifier *notifier) 2471 { 2472 hlist_del(¬ifier->link); 2473 } 2474 EXPORT_SYMBOL_GPL(preempt_notifier_unregister); 2475 2476 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) 2477 { 2478 struct preempt_notifier *notifier; 2479 2480 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 2481 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 2482 } 2483 2484 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 2485 { 2486 if (static_key_false(&preempt_notifier_key)) 2487 __fire_sched_in_preempt_notifiers(curr); 2488 } 2489 2490 static void 2491 __fire_sched_out_preempt_notifiers(struct task_struct *curr, 2492 struct task_struct *next) 2493 { 2494 struct preempt_notifier *notifier; 2495 2496 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 2497 notifier->ops->sched_out(notifier, next); 2498 } 2499 2500 static __always_inline void 2501 fire_sched_out_preempt_notifiers(struct task_struct *curr, 2502 struct task_struct *next) 2503 { 2504 if (static_key_false(&preempt_notifier_key)) 2505 __fire_sched_out_preempt_notifiers(curr, next); 2506 } 2507 2508 #else /* !CONFIG_PREEMPT_NOTIFIERS */ 2509 2510 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 2511 { 2512 } 2513 2514 static inline void 2515 fire_sched_out_preempt_notifiers(struct task_struct *curr, 2516 struct task_struct *next) 2517 { 2518 } 2519 2520 #endif /* CONFIG_PREEMPT_NOTIFIERS */ 2521 2522 /** 2523 * prepare_task_switch - prepare to switch tasks 2524 * @rq: the runqueue preparing to switch 2525 * @prev: the current task that is being switched out 2526 * @next: the task we are going to switch to. 2527 * 2528 * This is called with the rq lock held and interrupts off. It must 2529 * be paired with a subsequent finish_task_switch after the context 2530 * switch. 2531 * 2532 * prepare_task_switch sets up locking and calls architecture specific 2533 * hooks. 2534 */ 2535 static inline void 2536 prepare_task_switch(struct rq *rq, struct task_struct *prev, 2537 struct task_struct *next) 2538 { 2539 sched_info_switch(rq, prev, next); 2540 perf_event_task_sched_out(prev, next); 2541 fire_sched_out_preempt_notifiers(prev, next); 2542 prepare_lock_switch(rq, next); 2543 prepare_arch_switch(next); 2544 } 2545 2546 /** 2547 * finish_task_switch - clean up after a task-switch 2548 * @prev: the thread we just switched away from. 2549 * 2550 * finish_task_switch must be called after the context switch, paired 2551 * with a prepare_task_switch call before the context switch. 2552 * finish_task_switch will reconcile locking set up by prepare_task_switch, 2553 * and do any other architecture-specific cleanup actions. 2554 * 2555 * Note that we may have delayed dropping an mm in context_switch(). If 2556 * so, we finish that here outside of the runqueue lock. (Doing it 2557 * with the lock held can cause deadlocks; see schedule() for 2558 * details.) 2559 * 2560 * The context switch have flipped the stack from under us and restored the 2561 * local variables which were saved when this task called schedule() in the 2562 * past. prev == current is still correct but we need to recalculate this_rq 2563 * because prev may have moved to another CPU. 2564 */ 2565 static struct rq *finish_task_switch(struct task_struct *prev) 2566 __releases(rq->lock) 2567 { 2568 struct rq *rq = this_rq(); 2569 struct mm_struct *mm = rq->prev_mm; 2570 long prev_state; 2571 2572 /* 2573 * The previous task will have left us with a preempt_count of 2 2574 * because it left us after: 2575 * 2576 * schedule() 2577 * preempt_disable(); // 1 2578 * __schedule() 2579 * raw_spin_lock_irq(&rq->lock) // 2 2580 * 2581 * Also, see FORK_PREEMPT_COUNT. 2582 */ 2583 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, 2584 "corrupted preempt_count: %s/%d/0x%x\n", 2585 current->comm, current->pid, preempt_count())) 2586 preempt_count_set(FORK_PREEMPT_COUNT); 2587 2588 rq->prev_mm = NULL; 2589 2590 /* 2591 * A task struct has one reference for the use as "current". 2592 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 2593 * schedule one last time. The schedule call will never return, and 2594 * the scheduled task must drop that reference. 2595 * 2596 * We must observe prev->state before clearing prev->on_cpu (in 2597 * finish_lock_switch), otherwise a concurrent wakeup can get prev 2598 * running on another CPU and we could rave with its RUNNING -> DEAD 2599 * transition, resulting in a double drop. 2600 */ 2601 prev_state = prev->state; 2602 vtime_task_switch(prev); 2603 perf_event_task_sched_in(prev, current); 2604 finish_lock_switch(rq, prev); 2605 finish_arch_post_lock_switch(); 2606 2607 fire_sched_in_preempt_notifiers(current); 2608 if (mm) 2609 mmdrop(mm); 2610 if (unlikely(prev_state == TASK_DEAD)) { 2611 if (prev->sched_class->task_dead) 2612 prev->sched_class->task_dead(prev); 2613 2614 /* 2615 * Remove function-return probe instances associated with this 2616 * task and put them back on the free list. 2617 */ 2618 kprobe_flush_task(prev); 2619 put_task_struct(prev); 2620 } 2621 2622 tick_nohz_task_switch(); 2623 return rq; 2624 } 2625 2626 #ifdef CONFIG_SMP 2627 2628 /* rq->lock is NOT held, but preemption is disabled */ 2629 static void __balance_callback(struct rq *rq) 2630 { 2631 struct callback_head *head, *next; 2632 void (*func)(struct rq *rq); 2633 unsigned long flags; 2634 2635 raw_spin_lock_irqsave(&rq->lock, flags); 2636 head = rq->balance_callback; 2637 rq->balance_callback = NULL; 2638 while (head) { 2639 func = (void (*)(struct rq *))head->func; 2640 next = head->next; 2641 head->next = NULL; 2642 head = next; 2643 2644 func(rq); 2645 } 2646 raw_spin_unlock_irqrestore(&rq->lock, flags); 2647 } 2648 2649 static inline void balance_callback(struct rq *rq) 2650 { 2651 if (unlikely(rq->balance_callback)) 2652 __balance_callback(rq); 2653 } 2654 2655 #else 2656 2657 static inline void balance_callback(struct rq *rq) 2658 { 2659 } 2660 2661 #endif 2662 2663 /** 2664 * schedule_tail - first thing a freshly forked thread must call. 2665 * @prev: the thread we just switched away from. 2666 */ 2667 asmlinkage __visible void schedule_tail(struct task_struct *prev) 2668 __releases(rq->lock) 2669 { 2670 struct rq *rq; 2671 2672 /* 2673 * New tasks start with FORK_PREEMPT_COUNT, see there and 2674 * finish_task_switch() for details. 2675 * 2676 * finish_task_switch() will drop rq->lock() and lower preempt_count 2677 * and the preempt_enable() will end up enabling preemption (on 2678 * PREEMPT_COUNT kernels). 2679 */ 2680 2681 rq = finish_task_switch(prev); 2682 balance_callback(rq); 2683 preempt_enable(); 2684 2685 if (current->set_child_tid) 2686 put_user(task_pid_vnr(current), current->set_child_tid); 2687 } 2688 2689 /* 2690 * context_switch - switch to the new MM and the new thread's register state. 2691 */ 2692 static inline struct rq * 2693 context_switch(struct rq *rq, struct task_struct *prev, 2694 struct task_struct *next) 2695 { 2696 struct mm_struct *mm, *oldmm; 2697 2698 prepare_task_switch(rq, prev, next); 2699 2700 mm = next->mm; 2701 oldmm = prev->active_mm; 2702 /* 2703 * For paravirt, this is coupled with an exit in switch_to to 2704 * combine the page table reload and the switch backend into 2705 * one hypercall. 2706 */ 2707 arch_start_context_switch(prev); 2708 2709 if (!mm) { 2710 next->active_mm = oldmm; 2711 atomic_inc(&oldmm->mm_count); 2712 enter_lazy_tlb(oldmm, next); 2713 } else 2714 switch_mm(oldmm, mm, next); 2715 2716 if (!prev->mm) { 2717 prev->active_mm = NULL; 2718 rq->prev_mm = oldmm; 2719 } 2720 /* 2721 * Since the runqueue lock will be released by the next 2722 * task (which is an invalid locking op but in the case 2723 * of the scheduler it's an obvious special-case), so we 2724 * do an early lockdep release here: 2725 */ 2726 lockdep_unpin_lock(&rq->lock); 2727 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 2728 2729 /* Here we just switch the register state and the stack. */ 2730 switch_to(prev, next, prev); 2731 barrier(); 2732 2733 return finish_task_switch(prev); 2734 } 2735 2736 /* 2737 * nr_running and nr_context_switches: 2738 * 2739 * externally visible scheduler statistics: current number of runnable 2740 * threads, total number of context switches performed since bootup. 2741 */ 2742 unsigned long nr_running(void) 2743 { 2744 unsigned long i, sum = 0; 2745 2746 for_each_online_cpu(i) 2747 sum += cpu_rq(i)->nr_running; 2748 2749 return sum; 2750 } 2751 2752 /* 2753 * Check if only the current task is running on the cpu. 2754 * 2755 * Caution: this function does not check that the caller has disabled 2756 * preemption, thus the result might have a time-of-check-to-time-of-use 2757 * race. The caller is responsible to use it correctly, for example: 2758 * 2759 * - from a non-preemptable section (of course) 2760 * 2761 * - from a thread that is bound to a single CPU 2762 * 2763 * - in a loop with very short iterations (e.g. a polling loop) 2764 */ 2765 bool single_task_running(void) 2766 { 2767 return raw_rq()->nr_running == 1; 2768 } 2769 EXPORT_SYMBOL(single_task_running); 2770 2771 unsigned long long nr_context_switches(void) 2772 { 2773 int i; 2774 unsigned long long sum = 0; 2775 2776 for_each_possible_cpu(i) 2777 sum += cpu_rq(i)->nr_switches; 2778 2779 return sum; 2780 } 2781 2782 unsigned long nr_iowait(void) 2783 { 2784 unsigned long i, sum = 0; 2785 2786 for_each_possible_cpu(i) 2787 sum += atomic_read(&cpu_rq(i)->nr_iowait); 2788 2789 return sum; 2790 } 2791 2792 unsigned long nr_iowait_cpu(int cpu) 2793 { 2794 struct rq *this = cpu_rq(cpu); 2795 return atomic_read(&this->nr_iowait); 2796 } 2797 2798 void get_iowait_load(unsigned long *nr_waiters, unsigned long *load) 2799 { 2800 struct rq *rq = this_rq(); 2801 *nr_waiters = atomic_read(&rq->nr_iowait); 2802 *load = rq->load.weight; 2803 } 2804 2805 #ifdef CONFIG_SMP 2806 2807 /* 2808 * sched_exec - execve() is a valuable balancing opportunity, because at 2809 * this point the task has the smallest effective memory and cache footprint. 2810 */ 2811 void sched_exec(void) 2812 { 2813 struct task_struct *p = current; 2814 unsigned long flags; 2815 int dest_cpu; 2816 2817 raw_spin_lock_irqsave(&p->pi_lock, flags); 2818 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); 2819 if (dest_cpu == smp_processor_id()) 2820 goto unlock; 2821 2822 if (likely(cpu_active(dest_cpu))) { 2823 struct migration_arg arg = { p, dest_cpu }; 2824 2825 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2826 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); 2827 return; 2828 } 2829 unlock: 2830 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2831 } 2832 2833 #endif 2834 2835 DEFINE_PER_CPU(struct kernel_stat, kstat); 2836 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 2837 2838 EXPORT_PER_CPU_SYMBOL(kstat); 2839 EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 2840 2841 /* 2842 * Return accounted runtime for the task. 2843 * In case the task is currently running, return the runtime plus current's 2844 * pending runtime that have not been accounted yet. 2845 */ 2846 unsigned long long task_sched_runtime(struct task_struct *p) 2847 { 2848 unsigned long flags; 2849 struct rq *rq; 2850 u64 ns; 2851 2852 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 2853 /* 2854 * 64-bit doesn't need locks to atomically read a 64bit value. 2855 * So we have a optimization chance when the task's delta_exec is 0. 2856 * Reading ->on_cpu is racy, but this is ok. 2857 * 2858 * If we race with it leaving cpu, we'll take a lock. So we're correct. 2859 * If we race with it entering cpu, unaccounted time is 0. This is 2860 * indistinguishable from the read occurring a few cycles earlier. 2861 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 2862 * been accounted, so we're correct here as well. 2863 */ 2864 if (!p->on_cpu || !task_on_rq_queued(p)) 2865 return p->se.sum_exec_runtime; 2866 #endif 2867 2868 rq = task_rq_lock(p, &flags); 2869 /* 2870 * Must be ->curr _and_ ->on_rq. If dequeued, we would 2871 * project cycles that may never be accounted to this 2872 * thread, breaking clock_gettime(). 2873 */ 2874 if (task_current(rq, p) && task_on_rq_queued(p)) { 2875 update_rq_clock(rq); 2876 p->sched_class->update_curr(rq); 2877 } 2878 ns = p->se.sum_exec_runtime; 2879 task_rq_unlock(rq, p, &flags); 2880 2881 return ns; 2882 } 2883 2884 /* 2885 * This function gets called by the timer code, with HZ frequency. 2886 * We call it with interrupts disabled. 2887 */ 2888 void scheduler_tick(void) 2889 { 2890 int cpu = smp_processor_id(); 2891 struct rq *rq = cpu_rq(cpu); 2892 struct task_struct *curr = rq->curr; 2893 2894 sched_clock_tick(); 2895 2896 raw_spin_lock(&rq->lock); 2897 update_rq_clock(rq); 2898 curr->sched_class->task_tick(rq, curr, 0); 2899 update_cpu_load_active(rq); 2900 calc_global_load_tick(rq); 2901 raw_spin_unlock(&rq->lock); 2902 2903 perf_event_task_tick(); 2904 2905 #ifdef CONFIG_SMP 2906 rq->idle_balance = idle_cpu(cpu); 2907 trigger_load_balance(rq); 2908 #endif 2909 rq_last_tick_reset(rq); 2910 } 2911 2912 #ifdef CONFIG_NO_HZ_FULL 2913 /** 2914 * scheduler_tick_max_deferment 2915 * 2916 * Keep at least one tick per second when a single 2917 * active task is running because the scheduler doesn't 2918 * yet completely support full dynticks environment. 2919 * 2920 * This makes sure that uptime, CFS vruntime, load 2921 * balancing, etc... continue to move forward, even 2922 * with a very low granularity. 2923 * 2924 * Return: Maximum deferment in nanoseconds. 2925 */ 2926 u64 scheduler_tick_max_deferment(void) 2927 { 2928 struct rq *rq = this_rq(); 2929 unsigned long next, now = READ_ONCE(jiffies); 2930 2931 next = rq->last_sched_tick + HZ; 2932 2933 if (time_before_eq(next, now)) 2934 return 0; 2935 2936 return jiffies_to_nsecs(next - now); 2937 } 2938 #endif 2939 2940 #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ 2941 defined(CONFIG_PREEMPT_TRACER)) 2942 2943 void preempt_count_add(int val) 2944 { 2945 #ifdef CONFIG_DEBUG_PREEMPT 2946 /* 2947 * Underflow? 2948 */ 2949 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 2950 return; 2951 #endif 2952 __preempt_count_add(val); 2953 #ifdef CONFIG_DEBUG_PREEMPT 2954 /* 2955 * Spinlock count overflowing soon? 2956 */ 2957 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 2958 PREEMPT_MASK - 10); 2959 #endif 2960 if (preempt_count() == val) { 2961 unsigned long ip = get_lock_parent_ip(); 2962 #ifdef CONFIG_DEBUG_PREEMPT 2963 current->preempt_disable_ip = ip; 2964 #endif 2965 trace_preempt_off(CALLER_ADDR0, ip); 2966 } 2967 } 2968 EXPORT_SYMBOL(preempt_count_add); 2969 NOKPROBE_SYMBOL(preempt_count_add); 2970 2971 void preempt_count_sub(int val) 2972 { 2973 #ifdef CONFIG_DEBUG_PREEMPT 2974 /* 2975 * Underflow? 2976 */ 2977 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 2978 return; 2979 /* 2980 * Is the spinlock portion underflowing? 2981 */ 2982 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 2983 !(preempt_count() & PREEMPT_MASK))) 2984 return; 2985 #endif 2986 2987 if (preempt_count() == val) 2988 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); 2989 __preempt_count_sub(val); 2990 } 2991 EXPORT_SYMBOL(preempt_count_sub); 2992 NOKPROBE_SYMBOL(preempt_count_sub); 2993 2994 #endif 2995 2996 /* 2997 * Print scheduling while atomic bug: 2998 */ 2999 static noinline void __schedule_bug(struct task_struct *prev) 3000 { 3001 if (oops_in_progress) 3002 return; 3003 3004 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 3005 prev->comm, prev->pid, preempt_count()); 3006 3007 debug_show_held_locks(prev); 3008 print_modules(); 3009 if (irqs_disabled()) 3010 print_irqtrace_events(prev); 3011 #ifdef CONFIG_DEBUG_PREEMPT 3012 if (in_atomic_preempt_off()) { 3013 pr_err("Preemption disabled at:"); 3014 print_ip_sym(current->preempt_disable_ip); 3015 pr_cont("\n"); 3016 } 3017 #endif 3018 dump_stack(); 3019 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 3020 } 3021 3022 /* 3023 * Various schedule()-time debugging checks and statistics: 3024 */ 3025 static inline void schedule_debug(struct task_struct *prev) 3026 { 3027 #ifdef CONFIG_SCHED_STACK_END_CHECK 3028 BUG_ON(task_stack_end_corrupted(prev)); 3029 #endif 3030 3031 if (unlikely(in_atomic_preempt_off())) { 3032 __schedule_bug(prev); 3033 preempt_count_set(PREEMPT_DISABLED); 3034 } 3035 rcu_sleep_check(); 3036 3037 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 3038 3039 schedstat_inc(this_rq(), sched_count); 3040 } 3041 3042 /* 3043 * Pick up the highest-prio task: 3044 */ 3045 static inline struct task_struct * 3046 pick_next_task(struct rq *rq, struct task_struct *prev) 3047 { 3048 const struct sched_class *class = &fair_sched_class; 3049 struct task_struct *p; 3050 3051 /* 3052 * Optimization: we know that if all tasks are in 3053 * the fair class we can call that function directly: 3054 */ 3055 if (likely(prev->sched_class == class && 3056 rq->nr_running == rq->cfs.h_nr_running)) { 3057 p = fair_sched_class.pick_next_task(rq, prev); 3058 if (unlikely(p == RETRY_TASK)) 3059 goto again; 3060 3061 /* assumes fair_sched_class->next == idle_sched_class */ 3062 if (unlikely(!p)) 3063 p = idle_sched_class.pick_next_task(rq, prev); 3064 3065 return p; 3066 } 3067 3068 again: 3069 for_each_class(class) { 3070 p = class->pick_next_task(rq, prev); 3071 if (p) { 3072 if (unlikely(p == RETRY_TASK)) 3073 goto again; 3074 return p; 3075 } 3076 } 3077 3078 BUG(); /* the idle class will always have a runnable task */ 3079 } 3080 3081 /* 3082 * __schedule() is the main scheduler function. 3083 * 3084 * The main means of driving the scheduler and thus entering this function are: 3085 * 3086 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. 3087 * 3088 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return 3089 * paths. For example, see arch/x86/entry_64.S. 3090 * 3091 * To drive preemption between tasks, the scheduler sets the flag in timer 3092 * interrupt handler scheduler_tick(). 3093 * 3094 * 3. Wakeups don't really cause entry into schedule(). They add a 3095 * task to the run-queue and that's it. 3096 * 3097 * Now, if the new task added to the run-queue preempts the current 3098 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets 3099 * called on the nearest possible occasion: 3100 * 3101 * - If the kernel is preemptible (CONFIG_PREEMPT=y): 3102 * 3103 * - in syscall or exception context, at the next outmost 3104 * preempt_enable(). (this might be as soon as the wake_up()'s 3105 * spin_unlock()!) 3106 * 3107 * - in IRQ context, return from interrupt-handler to 3108 * preemptible context 3109 * 3110 * - If the kernel is not preemptible (CONFIG_PREEMPT is not set) 3111 * then at the next: 3112 * 3113 * - cond_resched() call 3114 * - explicit schedule() call 3115 * - return from syscall or exception to user-space 3116 * - return from interrupt-handler to user-space 3117 * 3118 * WARNING: must be called with preemption disabled! 3119 */ 3120 static void __sched notrace __schedule(bool preempt) 3121 { 3122 struct task_struct *prev, *next; 3123 unsigned long *switch_count; 3124 struct rq *rq; 3125 int cpu; 3126 3127 cpu = smp_processor_id(); 3128 rq = cpu_rq(cpu); 3129 prev = rq->curr; 3130 3131 /* 3132 * do_exit() calls schedule() with preemption disabled as an exception; 3133 * however we must fix that up, otherwise the next task will see an 3134 * inconsistent (higher) preempt count. 3135 * 3136 * It also avoids the below schedule_debug() test from complaining 3137 * about this. 3138 */ 3139 if (unlikely(prev->state == TASK_DEAD)) 3140 preempt_enable_no_resched_notrace(); 3141 3142 schedule_debug(prev); 3143 3144 if (sched_feat(HRTICK)) 3145 hrtick_clear(rq); 3146 3147 local_irq_disable(); 3148 rcu_note_context_switch(); 3149 3150 /* 3151 * Make sure that signal_pending_state()->signal_pending() below 3152 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 3153 * done by the caller to avoid the race with signal_wake_up(). 3154 */ 3155 smp_mb__before_spinlock(); 3156 raw_spin_lock(&rq->lock); 3157 lockdep_pin_lock(&rq->lock); 3158 3159 rq->clock_skip_update <<= 1; /* promote REQ to ACT */ 3160 3161 switch_count = &prev->nivcsw; 3162 if (!preempt && prev->state) { 3163 if (unlikely(signal_pending_state(prev->state, prev))) { 3164 prev->state = TASK_RUNNING; 3165 } else { 3166 deactivate_task(rq, prev, DEQUEUE_SLEEP); 3167 prev->on_rq = 0; 3168 3169 /* 3170 * If a worker went to sleep, notify and ask workqueue 3171 * whether it wants to wake up a task to maintain 3172 * concurrency. 3173 */ 3174 if (prev->flags & PF_WQ_WORKER) { 3175 struct task_struct *to_wakeup; 3176 3177 to_wakeup = wq_worker_sleeping(prev, cpu); 3178 if (to_wakeup) 3179 try_to_wake_up_local(to_wakeup); 3180 } 3181 } 3182 switch_count = &prev->nvcsw; 3183 } 3184 3185 if (task_on_rq_queued(prev)) 3186 update_rq_clock(rq); 3187 3188 next = pick_next_task(rq, prev); 3189 clear_tsk_need_resched(prev); 3190 clear_preempt_need_resched(); 3191 rq->clock_skip_update = 0; 3192 3193 if (likely(prev != next)) { 3194 rq->nr_switches++; 3195 rq->curr = next; 3196 ++*switch_count; 3197 3198 trace_sched_switch(preempt, prev, next); 3199 rq = context_switch(rq, prev, next); /* unlocks the rq */ 3200 } else { 3201 lockdep_unpin_lock(&rq->lock); 3202 raw_spin_unlock_irq(&rq->lock); 3203 } 3204 3205 balance_callback(rq); 3206 } 3207 3208 static inline void sched_submit_work(struct task_struct *tsk) 3209 { 3210 if (!tsk->state || tsk_is_pi_blocked(tsk)) 3211 return; 3212 /* 3213 * If we are going to sleep and we have plugged IO queued, 3214 * make sure to submit it to avoid deadlocks. 3215 */ 3216 if (blk_needs_flush_plug(tsk)) 3217 blk_schedule_flush_plug(tsk); 3218 } 3219 3220 asmlinkage __visible void __sched schedule(void) 3221 { 3222 struct task_struct *tsk = current; 3223 3224 sched_submit_work(tsk); 3225 do { 3226 preempt_disable(); 3227 __schedule(false); 3228 sched_preempt_enable_no_resched(); 3229 } while (need_resched()); 3230 } 3231 EXPORT_SYMBOL(schedule); 3232 3233 #ifdef CONFIG_CONTEXT_TRACKING 3234 asmlinkage __visible void __sched schedule_user(void) 3235 { 3236 /* 3237 * If we come here after a random call to set_need_resched(), 3238 * or we have been woken up remotely but the IPI has not yet arrived, 3239 * we haven't yet exited the RCU idle mode. Do it here manually until 3240 * we find a better solution. 3241 * 3242 * NB: There are buggy callers of this function. Ideally we 3243 * should warn if prev_state != CONTEXT_USER, but that will trigger 3244 * too frequently to make sense yet. 3245 */ 3246 enum ctx_state prev_state = exception_enter(); 3247 schedule(); 3248 exception_exit(prev_state); 3249 } 3250 #endif 3251 3252 /** 3253 * schedule_preempt_disabled - called with preemption disabled 3254 * 3255 * Returns with preemption disabled. Note: preempt_count must be 1 3256 */ 3257 void __sched schedule_preempt_disabled(void) 3258 { 3259 sched_preempt_enable_no_resched(); 3260 schedule(); 3261 preempt_disable(); 3262 } 3263 3264 static void __sched notrace preempt_schedule_common(void) 3265 { 3266 do { 3267 preempt_disable_notrace(); 3268 __schedule(true); 3269 preempt_enable_no_resched_notrace(); 3270 3271 /* 3272 * Check again in case we missed a preemption opportunity 3273 * between schedule and now. 3274 */ 3275 } while (need_resched()); 3276 } 3277 3278 #ifdef CONFIG_PREEMPT 3279 /* 3280 * this is the entry point to schedule() from in-kernel preemption 3281 * off of preempt_enable. Kernel preemptions off return from interrupt 3282 * occur there and call schedule directly. 3283 */ 3284 asmlinkage __visible void __sched notrace preempt_schedule(void) 3285 { 3286 /* 3287 * If there is a non-zero preempt_count or interrupts are disabled, 3288 * we do not want to preempt the current task. Just return.. 3289 */ 3290 if (likely(!preemptible())) 3291 return; 3292 3293 preempt_schedule_common(); 3294 } 3295 NOKPROBE_SYMBOL(preempt_schedule); 3296 EXPORT_SYMBOL(preempt_schedule); 3297 3298 /** 3299 * preempt_schedule_notrace - preempt_schedule called by tracing 3300 * 3301 * The tracing infrastructure uses preempt_enable_notrace to prevent 3302 * recursion and tracing preempt enabling caused by the tracing 3303 * infrastructure itself. But as tracing can happen in areas coming 3304 * from userspace or just about to enter userspace, a preempt enable 3305 * can occur before user_exit() is called. This will cause the scheduler 3306 * to be called when the system is still in usermode. 3307 * 3308 * To prevent this, the preempt_enable_notrace will use this function 3309 * instead of preempt_schedule() to exit user context if needed before 3310 * calling the scheduler. 3311 */ 3312 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) 3313 { 3314 enum ctx_state prev_ctx; 3315 3316 if (likely(!preemptible())) 3317 return; 3318 3319 do { 3320 preempt_disable_notrace(); 3321 /* 3322 * Needs preempt disabled in case user_exit() is traced 3323 * and the tracer calls preempt_enable_notrace() causing 3324 * an infinite recursion. 3325 */ 3326 prev_ctx = exception_enter(); 3327 __schedule(true); 3328 exception_exit(prev_ctx); 3329 3330 preempt_enable_no_resched_notrace(); 3331 } while (need_resched()); 3332 } 3333 EXPORT_SYMBOL_GPL(preempt_schedule_notrace); 3334 3335 #endif /* CONFIG_PREEMPT */ 3336 3337 /* 3338 * this is the entry point to schedule() from kernel preemption 3339 * off of irq context. 3340 * Note, that this is called and return with irqs disabled. This will 3341 * protect us against recursive calling from irq. 3342 */ 3343 asmlinkage __visible void __sched preempt_schedule_irq(void) 3344 { 3345 enum ctx_state prev_state; 3346 3347 /* Catch callers which need to be fixed */ 3348 BUG_ON(preempt_count() || !irqs_disabled()); 3349 3350 prev_state = exception_enter(); 3351 3352 do { 3353 preempt_disable(); 3354 local_irq_enable(); 3355 __schedule(true); 3356 local_irq_disable(); 3357 sched_preempt_enable_no_resched(); 3358 } while (need_resched()); 3359 3360 exception_exit(prev_state); 3361 } 3362 3363 int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, 3364 void *key) 3365 { 3366 return try_to_wake_up(curr->private, mode, wake_flags); 3367 } 3368 EXPORT_SYMBOL(default_wake_function); 3369 3370 #ifdef CONFIG_RT_MUTEXES 3371 3372 /* 3373 * rt_mutex_setprio - set the current priority of a task 3374 * @p: task 3375 * @prio: prio value (kernel-internal form) 3376 * 3377 * This function changes the 'effective' priority of a task. It does 3378 * not touch ->normal_prio like __setscheduler(). 3379 * 3380 * Used by the rt_mutex code to implement priority inheritance 3381 * logic. Call site only calls if the priority of the task changed. 3382 */ 3383 void rt_mutex_setprio(struct task_struct *p, int prio) 3384 { 3385 int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE; 3386 struct rq *rq; 3387 const struct sched_class *prev_class; 3388 3389 BUG_ON(prio > MAX_PRIO); 3390 3391 rq = __task_rq_lock(p); 3392 3393 /* 3394 * Idle task boosting is a nono in general. There is one 3395 * exception, when PREEMPT_RT and NOHZ is active: 3396 * 3397 * The idle task calls get_next_timer_interrupt() and holds 3398 * the timer wheel base->lock on the CPU and another CPU wants 3399 * to access the timer (probably to cancel it). We can safely 3400 * ignore the boosting request, as the idle CPU runs this code 3401 * with interrupts disabled and will complete the lock 3402 * protected section without being interrupted. So there is no 3403 * real need to boost. 3404 */ 3405 if (unlikely(p == rq->idle)) { 3406 WARN_ON(p != rq->curr); 3407 WARN_ON(p->pi_blocked_on); 3408 goto out_unlock; 3409 } 3410 3411 trace_sched_pi_setprio(p, prio); 3412 oldprio = p->prio; 3413 3414 if (oldprio == prio) 3415 queue_flag &= ~DEQUEUE_MOVE; 3416 3417 prev_class = p->sched_class; 3418 queued = task_on_rq_queued(p); 3419 running = task_current(rq, p); 3420 if (queued) 3421 dequeue_task(rq, p, queue_flag); 3422 if (running) 3423 put_prev_task(rq, p); 3424 3425 /* 3426 * Boosting condition are: 3427 * 1. -rt task is running and holds mutex A 3428 * --> -dl task blocks on mutex A 3429 * 3430 * 2. -dl task is running and holds mutex A 3431 * --> -dl task blocks on mutex A and could preempt the 3432 * running task 3433 */ 3434 if (dl_prio(prio)) { 3435 struct task_struct *pi_task = rt_mutex_get_top_task(p); 3436 if (!dl_prio(p->normal_prio) || 3437 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { 3438 p->dl.dl_boosted = 1; 3439 queue_flag |= ENQUEUE_REPLENISH; 3440 } else 3441 p->dl.dl_boosted = 0; 3442 p->sched_class = &dl_sched_class; 3443 } else if (rt_prio(prio)) { 3444 if (dl_prio(oldprio)) 3445 p->dl.dl_boosted = 0; 3446 if (oldprio < prio) 3447 queue_flag |= ENQUEUE_HEAD; 3448 p->sched_class = &rt_sched_class; 3449 } else { 3450 if (dl_prio(oldprio)) 3451 p->dl.dl_boosted = 0; 3452 if (rt_prio(oldprio)) 3453 p->rt.timeout = 0; 3454 p->sched_class = &fair_sched_class; 3455 } 3456 3457 p->prio = prio; 3458 3459 if (running) 3460 p->sched_class->set_curr_task(rq); 3461 if (queued) 3462 enqueue_task(rq, p, queue_flag); 3463 3464 check_class_changed(rq, p, prev_class, oldprio); 3465 out_unlock: 3466 preempt_disable(); /* avoid rq from going away on us */ 3467 __task_rq_unlock(rq); 3468 3469 balance_callback(rq); 3470 preempt_enable(); 3471 } 3472 #endif 3473 3474 void set_user_nice(struct task_struct *p, long nice) 3475 { 3476 int old_prio, delta, queued; 3477 unsigned long flags; 3478 struct rq *rq; 3479 3480 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 3481 return; 3482 /* 3483 * We have to be careful, if called from sys_setpriority(), 3484 * the task might be in the middle of scheduling on another CPU. 3485 */ 3486 rq = task_rq_lock(p, &flags); 3487 /* 3488 * The RT priorities are set via sched_setscheduler(), but we still 3489 * allow the 'normal' nice value to be set - but as expected 3490 * it wont have any effect on scheduling until the task is 3491 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 3492 */ 3493 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 3494 p->static_prio = NICE_TO_PRIO(nice); 3495 goto out_unlock; 3496 } 3497 queued = task_on_rq_queued(p); 3498 if (queued) 3499 dequeue_task(rq, p, DEQUEUE_SAVE); 3500 3501 p->static_prio = NICE_TO_PRIO(nice); 3502 set_load_weight(p); 3503 old_prio = p->prio; 3504 p->prio = effective_prio(p); 3505 delta = p->prio - old_prio; 3506 3507 if (queued) { 3508 enqueue_task(rq, p, ENQUEUE_RESTORE); 3509 /* 3510 * If the task increased its priority or is running and 3511 * lowered its priority, then reschedule its CPU: 3512 */ 3513 if (delta < 0 || (delta > 0 && task_running(rq, p))) 3514 resched_curr(rq); 3515 } 3516 out_unlock: 3517 task_rq_unlock(rq, p, &flags); 3518 } 3519 EXPORT_SYMBOL(set_user_nice); 3520 3521 /* 3522 * can_nice - check if a task can reduce its nice value 3523 * @p: task 3524 * @nice: nice value 3525 */ 3526 int can_nice(const struct task_struct *p, const int nice) 3527 { 3528 /* convert nice value [19,-20] to rlimit style value [1,40] */ 3529 int nice_rlim = nice_to_rlimit(nice); 3530 3531 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || 3532 capable(CAP_SYS_NICE)); 3533 } 3534 3535 #ifdef __ARCH_WANT_SYS_NICE 3536 3537 /* 3538 * sys_nice - change the priority of the current process. 3539 * @increment: priority increment 3540 * 3541 * sys_setpriority is a more generic, but much slower function that 3542 * does similar things. 3543 */ 3544 SYSCALL_DEFINE1(nice, int, increment) 3545 { 3546 long nice, retval; 3547 3548 /* 3549 * Setpriority might change our priority at the same moment. 3550 * We don't have to worry. Conceptually one call occurs first 3551 * and we have a single winner. 3552 */ 3553 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 3554 nice = task_nice(current) + increment; 3555 3556 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 3557 if (increment < 0 && !can_nice(current, nice)) 3558 return -EPERM; 3559 3560 retval = security_task_setnice(current, nice); 3561 if (retval) 3562 return retval; 3563 3564 set_user_nice(current, nice); 3565 return 0; 3566 } 3567 3568 #endif 3569 3570 /** 3571 * task_prio - return the priority value of a given task. 3572 * @p: the task in question. 3573 * 3574 * Return: The priority value as seen by users in /proc. 3575 * RT tasks are offset by -200. Normal tasks are centered 3576 * around 0, value goes from -16 to +15. 3577 */ 3578 int task_prio(const struct task_struct *p) 3579 { 3580 return p->prio - MAX_RT_PRIO; 3581 } 3582 3583 /** 3584 * idle_cpu - is a given cpu idle currently? 3585 * @cpu: the processor in question. 3586 * 3587 * Return: 1 if the CPU is currently idle. 0 otherwise. 3588 */ 3589 int idle_cpu(int cpu) 3590 { 3591 struct rq *rq = cpu_rq(cpu); 3592 3593 if (rq->curr != rq->idle) 3594 return 0; 3595 3596 if (rq->nr_running) 3597 return 0; 3598 3599 #ifdef CONFIG_SMP 3600 if (!llist_empty(&rq->wake_list)) 3601 return 0; 3602 #endif 3603 3604 return 1; 3605 } 3606 3607 /** 3608 * idle_task - return the idle task for a given cpu. 3609 * @cpu: the processor in question. 3610 * 3611 * Return: The idle task for the cpu @cpu. 3612 */ 3613 struct task_struct *idle_task(int cpu) 3614 { 3615 return cpu_rq(cpu)->idle; 3616 } 3617 3618 /** 3619 * find_process_by_pid - find a process with a matching PID value. 3620 * @pid: the pid in question. 3621 * 3622 * The task of @pid, if found. %NULL otherwise. 3623 */ 3624 static struct task_struct *find_process_by_pid(pid_t pid) 3625 { 3626 return pid ? find_task_by_vpid(pid) : current; 3627 } 3628 3629 /* 3630 * This function initializes the sched_dl_entity of a newly becoming 3631 * SCHED_DEADLINE task. 3632 * 3633 * Only the static values are considered here, the actual runtime and the 3634 * absolute deadline will be properly calculated when the task is enqueued 3635 * for the first time with its new policy. 3636 */ 3637 static void 3638 __setparam_dl(struct task_struct *p, const struct sched_attr *attr) 3639 { 3640 struct sched_dl_entity *dl_se = &p->dl; 3641 3642 dl_se->dl_runtime = attr->sched_runtime; 3643 dl_se->dl_deadline = attr->sched_deadline; 3644 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; 3645 dl_se->flags = attr->sched_flags; 3646 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 3647 3648 /* 3649 * Changing the parameters of a task is 'tricky' and we're not doing 3650 * the correct thing -- also see task_dead_dl() and switched_from_dl(). 3651 * 3652 * What we SHOULD do is delay the bandwidth release until the 0-lag 3653 * point. This would include retaining the task_struct until that time 3654 * and change dl_overflow() to not immediately decrement the current 3655 * amount. 3656 * 3657 * Instead we retain the current runtime/deadline and let the new 3658 * parameters take effect after the current reservation period lapses. 3659 * This is safe (albeit pessimistic) because the 0-lag point is always 3660 * before the current scheduling deadline. 3661 * 3662 * We can still have temporary overloads because we do not delay the 3663 * change in bandwidth until that time; so admission control is 3664 * not on the safe side. It does however guarantee tasks will never 3665 * consume more than promised. 3666 */ 3667 } 3668 3669 /* 3670 * sched_setparam() passes in -1 for its policy, to let the functions 3671 * it calls know not to change it. 3672 */ 3673 #define SETPARAM_POLICY -1 3674 3675 static void __setscheduler_params(struct task_struct *p, 3676 const struct sched_attr *attr) 3677 { 3678 int policy = attr->sched_policy; 3679 3680 if (policy == SETPARAM_POLICY) 3681 policy = p->policy; 3682 3683 p->policy = policy; 3684 3685 if (dl_policy(policy)) 3686 __setparam_dl(p, attr); 3687 else if (fair_policy(policy)) 3688 p->static_prio = NICE_TO_PRIO(attr->sched_nice); 3689 3690 /* 3691 * __sched_setscheduler() ensures attr->sched_priority == 0 when 3692 * !rt_policy. Always setting this ensures that things like 3693 * getparam()/getattr() don't report silly values for !rt tasks. 3694 */ 3695 p->rt_priority = attr->sched_priority; 3696 p->normal_prio = normal_prio(p); 3697 set_load_weight(p); 3698 } 3699 3700 /* Actually do priority change: must hold pi & rq lock. */ 3701 static void __setscheduler(struct rq *rq, struct task_struct *p, 3702 const struct sched_attr *attr, bool keep_boost) 3703 { 3704 __setscheduler_params(p, attr); 3705 3706 /* 3707 * Keep a potential priority boosting if called from 3708 * sched_setscheduler(). 3709 */ 3710 if (keep_boost) 3711 p->prio = rt_mutex_get_effective_prio(p, normal_prio(p)); 3712 else 3713 p->prio = normal_prio(p); 3714 3715 if (dl_prio(p->prio)) 3716 p->sched_class = &dl_sched_class; 3717 else if (rt_prio(p->prio)) 3718 p->sched_class = &rt_sched_class; 3719 else 3720 p->sched_class = &fair_sched_class; 3721 } 3722 3723 static void 3724 __getparam_dl(struct task_struct *p, struct sched_attr *attr) 3725 { 3726 struct sched_dl_entity *dl_se = &p->dl; 3727 3728 attr->sched_priority = p->rt_priority; 3729 attr->sched_runtime = dl_se->dl_runtime; 3730 attr->sched_deadline = dl_se->dl_deadline; 3731 attr->sched_period = dl_se->dl_period; 3732 attr->sched_flags = dl_se->flags; 3733 } 3734 3735 /* 3736 * This function validates the new parameters of a -deadline task. 3737 * We ask for the deadline not being zero, and greater or equal 3738 * than the runtime, as well as the period of being zero or 3739 * greater than deadline. Furthermore, we have to be sure that 3740 * user parameters are above the internal resolution of 1us (we 3741 * check sched_runtime only since it is always the smaller one) and 3742 * below 2^63 ns (we have to check both sched_deadline and 3743 * sched_period, as the latter can be zero). 3744 */ 3745 static bool 3746 __checkparam_dl(const struct sched_attr *attr) 3747 { 3748 /* deadline != 0 */ 3749 if (attr->sched_deadline == 0) 3750 return false; 3751 3752 /* 3753 * Since we truncate DL_SCALE bits, make sure we're at least 3754 * that big. 3755 */ 3756 if (attr->sched_runtime < (1ULL << DL_SCALE)) 3757 return false; 3758 3759 /* 3760 * Since we use the MSB for wrap-around and sign issues, make 3761 * sure it's not set (mind that period can be equal to zero). 3762 */ 3763 if (attr->sched_deadline & (1ULL << 63) || 3764 attr->sched_period & (1ULL << 63)) 3765 return false; 3766 3767 /* runtime <= deadline <= period (if period != 0) */ 3768 if ((attr->sched_period != 0 && 3769 attr->sched_period < attr->sched_deadline) || 3770 attr->sched_deadline < attr->sched_runtime) 3771 return false; 3772 3773 return true; 3774 } 3775 3776 /* 3777 * check the target process has a UID that matches the current process's 3778 */ 3779 static bool check_same_owner(struct task_struct *p) 3780 { 3781 const struct cred *cred = current_cred(), *pcred; 3782 bool match; 3783 3784 rcu_read_lock(); 3785 pcred = __task_cred(p); 3786 match = (uid_eq(cred->euid, pcred->euid) || 3787 uid_eq(cred->euid, pcred->uid)); 3788 rcu_read_unlock(); 3789 return match; 3790 } 3791 3792 static bool dl_param_changed(struct task_struct *p, 3793 const struct sched_attr *attr) 3794 { 3795 struct sched_dl_entity *dl_se = &p->dl; 3796 3797 if (dl_se->dl_runtime != attr->sched_runtime || 3798 dl_se->dl_deadline != attr->sched_deadline || 3799 dl_se->dl_period != attr->sched_period || 3800 dl_se->flags != attr->sched_flags) 3801 return true; 3802 3803 return false; 3804 } 3805 3806 static int __sched_setscheduler(struct task_struct *p, 3807 const struct sched_attr *attr, 3808 bool user, bool pi) 3809 { 3810 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : 3811 MAX_RT_PRIO - 1 - attr->sched_priority; 3812 int retval, oldprio, oldpolicy = -1, queued, running; 3813 int new_effective_prio, policy = attr->sched_policy; 3814 unsigned long flags; 3815 const struct sched_class *prev_class; 3816 struct rq *rq; 3817 int reset_on_fork; 3818 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE; 3819 3820 /* may grab non-irq protected spin_locks */ 3821 BUG_ON(in_interrupt()); 3822 recheck: 3823 /* double check policy once rq lock held */ 3824 if (policy < 0) { 3825 reset_on_fork = p->sched_reset_on_fork; 3826 policy = oldpolicy = p->policy; 3827 } else { 3828 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 3829 3830 if (!valid_policy(policy)) 3831 return -EINVAL; 3832 } 3833 3834 if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK)) 3835 return -EINVAL; 3836 3837 /* 3838 * Valid priorities for SCHED_FIFO and SCHED_RR are 3839 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, 3840 * SCHED_BATCH and SCHED_IDLE is 0. 3841 */ 3842 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) || 3843 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1)) 3844 return -EINVAL; 3845 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 3846 (rt_policy(policy) != (attr->sched_priority != 0))) 3847 return -EINVAL; 3848 3849 /* 3850 * Allow unprivileged RT tasks to decrease priority: 3851 */ 3852 if (user && !capable(CAP_SYS_NICE)) { 3853 if (fair_policy(policy)) { 3854 if (attr->sched_nice < task_nice(p) && 3855 !can_nice(p, attr->sched_nice)) 3856 return -EPERM; 3857 } 3858 3859 if (rt_policy(policy)) { 3860 unsigned long rlim_rtprio = 3861 task_rlimit(p, RLIMIT_RTPRIO); 3862 3863 /* can't set/change the rt policy */ 3864 if (policy != p->policy && !rlim_rtprio) 3865 return -EPERM; 3866 3867 /* can't increase priority */ 3868 if (attr->sched_priority > p->rt_priority && 3869 attr->sched_priority > rlim_rtprio) 3870 return -EPERM; 3871 } 3872 3873 /* 3874 * Can't set/change SCHED_DEADLINE policy at all for now 3875 * (safest behavior); in the future we would like to allow 3876 * unprivileged DL tasks to increase their relative deadline 3877 * or reduce their runtime (both ways reducing utilization) 3878 */ 3879 if (dl_policy(policy)) 3880 return -EPERM; 3881 3882 /* 3883 * Treat SCHED_IDLE as nice 20. Only allow a switch to 3884 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 3885 */ 3886 if (idle_policy(p->policy) && !idle_policy(policy)) { 3887 if (!can_nice(p, task_nice(p))) 3888 return -EPERM; 3889 } 3890 3891 /* can't change other user's priorities */ 3892 if (!check_same_owner(p)) 3893 return -EPERM; 3894 3895 /* Normal users shall not reset the sched_reset_on_fork flag */ 3896 if (p->sched_reset_on_fork && !reset_on_fork) 3897 return -EPERM; 3898 } 3899 3900 if (user) { 3901 retval = security_task_setscheduler(p); 3902 if (retval) 3903 return retval; 3904 } 3905 3906 /* 3907 * make sure no PI-waiters arrive (or leave) while we are 3908 * changing the priority of the task: 3909 * 3910 * To be able to change p->policy safely, the appropriate 3911 * runqueue lock must be held. 3912 */ 3913 rq = task_rq_lock(p, &flags); 3914 3915 /* 3916 * Changing the policy of the stop threads its a very bad idea 3917 */ 3918 if (p == rq->stop) { 3919 task_rq_unlock(rq, p, &flags); 3920 return -EINVAL; 3921 } 3922 3923 /* 3924 * If not changing anything there's no need to proceed further, 3925 * but store a possible modification of reset_on_fork. 3926 */ 3927 if (unlikely(policy == p->policy)) { 3928 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) 3929 goto change; 3930 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 3931 goto change; 3932 if (dl_policy(policy) && dl_param_changed(p, attr)) 3933 goto change; 3934 3935 p->sched_reset_on_fork = reset_on_fork; 3936 task_rq_unlock(rq, p, &flags); 3937 return 0; 3938 } 3939 change: 3940 3941 if (user) { 3942 #ifdef CONFIG_RT_GROUP_SCHED 3943 /* 3944 * Do not allow realtime tasks into groups that have no runtime 3945 * assigned. 3946 */ 3947 if (rt_bandwidth_enabled() && rt_policy(policy) && 3948 task_group(p)->rt_bandwidth.rt_runtime == 0 && 3949 !task_group_is_autogroup(task_group(p))) { 3950 task_rq_unlock(rq, p, &flags); 3951 return -EPERM; 3952 } 3953 #endif 3954 #ifdef CONFIG_SMP 3955 if (dl_bandwidth_enabled() && dl_policy(policy)) { 3956 cpumask_t *span = rq->rd->span; 3957 3958 /* 3959 * Don't allow tasks with an affinity mask smaller than 3960 * the entire root_domain to become SCHED_DEADLINE. We 3961 * will also fail if there's no bandwidth available. 3962 */ 3963 if (!cpumask_subset(span, &p->cpus_allowed) || 3964 rq->rd->dl_bw.bw == 0) { 3965 task_rq_unlock(rq, p, &flags); 3966 return -EPERM; 3967 } 3968 } 3969 #endif 3970 } 3971 3972 /* recheck policy now with rq lock held */ 3973 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 3974 policy = oldpolicy = -1; 3975 task_rq_unlock(rq, p, &flags); 3976 goto recheck; 3977 } 3978 3979 /* 3980 * If setscheduling to SCHED_DEADLINE (or changing the parameters 3981 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 3982 * is available. 3983 */ 3984 if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) { 3985 task_rq_unlock(rq, p, &flags); 3986 return -EBUSY; 3987 } 3988 3989 p->sched_reset_on_fork = reset_on_fork; 3990 oldprio = p->prio; 3991 3992 if (pi) { 3993 /* 3994 * Take priority boosted tasks into account. If the new 3995 * effective priority is unchanged, we just store the new 3996 * normal parameters and do not touch the scheduler class and 3997 * the runqueue. This will be done when the task deboost 3998 * itself. 3999 */ 4000 new_effective_prio = rt_mutex_get_effective_prio(p, newprio); 4001 if (new_effective_prio == oldprio) 4002 queue_flags &= ~DEQUEUE_MOVE; 4003 } 4004 4005 queued = task_on_rq_queued(p); 4006 running = task_current(rq, p); 4007 if (queued) 4008 dequeue_task(rq, p, queue_flags); 4009 if (running) 4010 put_prev_task(rq, p); 4011 4012 prev_class = p->sched_class; 4013 __setscheduler(rq, p, attr, pi); 4014 4015 if (running) 4016 p->sched_class->set_curr_task(rq); 4017 if (queued) { 4018 /* 4019 * We enqueue to tail when the priority of a task is 4020 * increased (user space view). 4021 */ 4022 if (oldprio < p->prio) 4023 queue_flags |= ENQUEUE_HEAD; 4024 4025 enqueue_task(rq, p, queue_flags); 4026 } 4027 4028 check_class_changed(rq, p, prev_class, oldprio); 4029 preempt_disable(); /* avoid rq from going away on us */ 4030 task_rq_unlock(rq, p, &flags); 4031 4032 if (pi) 4033 rt_mutex_adjust_pi(p); 4034 4035 /* 4036 * Run balance callbacks after we've adjusted the PI chain. 4037 */ 4038 balance_callback(rq); 4039 preempt_enable(); 4040 4041 return 0; 4042 } 4043 4044 static int _sched_setscheduler(struct task_struct *p, int policy, 4045 const struct sched_param *param, bool check) 4046 { 4047 struct sched_attr attr = { 4048 .sched_policy = policy, 4049 .sched_priority = param->sched_priority, 4050 .sched_nice = PRIO_TO_NICE(p->static_prio), 4051 }; 4052 4053 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 4054 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 4055 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 4056 policy &= ~SCHED_RESET_ON_FORK; 4057 attr.sched_policy = policy; 4058 } 4059 4060 return __sched_setscheduler(p, &attr, check, true); 4061 } 4062 /** 4063 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 4064 * @p: the task in question. 4065 * @policy: new policy. 4066 * @param: structure containing the new RT priority. 4067 * 4068 * Return: 0 on success. An error code otherwise. 4069 * 4070 * NOTE that the task may be already dead. 4071 */ 4072 int sched_setscheduler(struct task_struct *p, int policy, 4073 const struct sched_param *param) 4074 { 4075 return _sched_setscheduler(p, policy, param, true); 4076 } 4077 EXPORT_SYMBOL_GPL(sched_setscheduler); 4078 4079 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 4080 { 4081 return __sched_setscheduler(p, attr, true, true); 4082 } 4083 EXPORT_SYMBOL_GPL(sched_setattr); 4084 4085 /** 4086 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. 4087 * @p: the task in question. 4088 * @policy: new policy. 4089 * @param: structure containing the new RT priority. 4090 * 4091 * Just like sched_setscheduler, only don't bother checking if the 4092 * current context has permission. For example, this is needed in 4093 * stop_machine(): we create temporary high priority worker threads, 4094 * but our caller might not have that capability. 4095 * 4096 * Return: 0 on success. An error code otherwise. 4097 */ 4098 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 4099 const struct sched_param *param) 4100 { 4101 return _sched_setscheduler(p, policy, param, false); 4102 } 4103 EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck); 4104 4105 static int 4106 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 4107 { 4108 struct sched_param lparam; 4109 struct task_struct *p; 4110 int retval; 4111 4112 if (!param || pid < 0) 4113 return -EINVAL; 4114 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 4115 return -EFAULT; 4116 4117 rcu_read_lock(); 4118 retval = -ESRCH; 4119 p = find_process_by_pid(pid); 4120 if (p != NULL) 4121 retval = sched_setscheduler(p, policy, &lparam); 4122 rcu_read_unlock(); 4123 4124 return retval; 4125 } 4126 4127 /* 4128 * Mimics kernel/events/core.c perf_copy_attr(). 4129 */ 4130 static int sched_copy_attr(struct sched_attr __user *uattr, 4131 struct sched_attr *attr) 4132 { 4133 u32 size; 4134 int ret; 4135 4136 if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0)) 4137 return -EFAULT; 4138 4139 /* 4140 * zero the full structure, so that a short copy will be nice. 4141 */ 4142 memset(attr, 0, sizeof(*attr)); 4143 4144 ret = get_user(size, &uattr->size); 4145 if (ret) 4146 return ret; 4147 4148 if (size > PAGE_SIZE) /* silly large */ 4149 goto err_size; 4150 4151 if (!size) /* abi compat */ 4152 size = SCHED_ATTR_SIZE_VER0; 4153 4154 if (size < SCHED_ATTR_SIZE_VER0) 4155 goto err_size; 4156 4157 /* 4158 * If we're handed a bigger struct than we know of, 4159 * ensure all the unknown bits are 0 - i.e. new 4160 * user-space does not rely on any kernel feature 4161 * extensions we dont know about yet. 4162 */ 4163 if (size > sizeof(*attr)) { 4164 unsigned char __user *addr; 4165 unsigned char __user *end; 4166 unsigned char val; 4167 4168 addr = (void __user *)uattr + sizeof(*attr); 4169 end = (void __user *)uattr + size; 4170 4171 for (; addr < end; addr++) { 4172 ret = get_user(val, addr); 4173 if (ret) 4174 return ret; 4175 if (val) 4176 goto err_size; 4177 } 4178 size = sizeof(*attr); 4179 } 4180 4181 ret = copy_from_user(attr, uattr, size); 4182 if (ret) 4183 return -EFAULT; 4184 4185 /* 4186 * XXX: do we want to be lenient like existing syscalls; or do we want 4187 * to be strict and return an error on out-of-bounds values? 4188 */ 4189 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 4190 4191 return 0; 4192 4193 err_size: 4194 put_user(sizeof(*attr), &uattr->size); 4195 return -E2BIG; 4196 } 4197 4198 /** 4199 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 4200 * @pid: the pid in question. 4201 * @policy: new policy. 4202 * @param: structure containing the new RT priority. 4203 * 4204 * Return: 0 on success. An error code otherwise. 4205 */ 4206 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, 4207 struct sched_param __user *, param) 4208 { 4209 /* negative values for policy are not valid */ 4210 if (policy < 0) 4211 return -EINVAL; 4212 4213 return do_sched_setscheduler(pid, policy, param); 4214 } 4215 4216 /** 4217 * sys_sched_setparam - set/change the RT priority of a thread 4218 * @pid: the pid in question. 4219 * @param: structure containing the new RT priority. 4220 * 4221 * Return: 0 on success. An error code otherwise. 4222 */ 4223 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 4224 { 4225 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 4226 } 4227 4228 /** 4229 * sys_sched_setattr - same as above, but with extended sched_attr 4230 * @pid: the pid in question. 4231 * @uattr: structure containing the extended parameters. 4232 * @flags: for future extension. 4233 */ 4234 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 4235 unsigned int, flags) 4236 { 4237 struct sched_attr attr; 4238 struct task_struct *p; 4239 int retval; 4240 4241 if (!uattr || pid < 0 || flags) 4242 return -EINVAL; 4243 4244 retval = sched_copy_attr(uattr, &attr); 4245 if (retval) 4246 return retval; 4247 4248 if ((int)attr.sched_policy < 0) 4249 return -EINVAL; 4250 4251 rcu_read_lock(); 4252 retval = -ESRCH; 4253 p = find_process_by_pid(pid); 4254 if (p != NULL) 4255 retval = sched_setattr(p, &attr); 4256 rcu_read_unlock(); 4257 4258 return retval; 4259 } 4260 4261 /** 4262 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 4263 * @pid: the pid in question. 4264 * 4265 * Return: On success, the policy of the thread. Otherwise, a negative error 4266 * code. 4267 */ 4268 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 4269 { 4270 struct task_struct *p; 4271 int retval; 4272 4273 if (pid < 0) 4274 return -EINVAL; 4275 4276 retval = -ESRCH; 4277 rcu_read_lock(); 4278 p = find_process_by_pid(pid); 4279 if (p) { 4280 retval = security_task_getscheduler(p); 4281 if (!retval) 4282 retval = p->policy 4283 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 4284 } 4285 rcu_read_unlock(); 4286 return retval; 4287 } 4288 4289 /** 4290 * sys_sched_getparam - get the RT priority of a thread 4291 * @pid: the pid in question. 4292 * @param: structure containing the RT priority. 4293 * 4294 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 4295 * code. 4296 */ 4297 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 4298 { 4299 struct sched_param lp = { .sched_priority = 0 }; 4300 struct task_struct *p; 4301 int retval; 4302 4303 if (!param || pid < 0) 4304 return -EINVAL; 4305 4306 rcu_read_lock(); 4307 p = find_process_by_pid(pid); 4308 retval = -ESRCH; 4309 if (!p) 4310 goto out_unlock; 4311 4312 retval = security_task_getscheduler(p); 4313 if (retval) 4314 goto out_unlock; 4315 4316 if (task_has_rt_policy(p)) 4317 lp.sched_priority = p->rt_priority; 4318 rcu_read_unlock(); 4319 4320 /* 4321 * This one might sleep, we cannot do it with a spinlock held ... 4322 */ 4323 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 4324 4325 return retval; 4326 4327 out_unlock: 4328 rcu_read_unlock(); 4329 return retval; 4330 } 4331 4332 static int sched_read_attr(struct sched_attr __user *uattr, 4333 struct sched_attr *attr, 4334 unsigned int usize) 4335 { 4336 int ret; 4337 4338 if (!access_ok(VERIFY_WRITE, uattr, usize)) 4339 return -EFAULT; 4340 4341 /* 4342 * If we're handed a smaller struct than we know of, 4343 * ensure all the unknown bits are 0 - i.e. old 4344 * user-space does not get uncomplete information. 4345 */ 4346 if (usize < sizeof(*attr)) { 4347 unsigned char *addr; 4348 unsigned char *end; 4349 4350 addr = (void *)attr + usize; 4351 end = (void *)attr + sizeof(*attr); 4352 4353 for (; addr < end; addr++) { 4354 if (*addr) 4355 return -EFBIG; 4356 } 4357 4358 attr->size = usize; 4359 } 4360 4361 ret = copy_to_user(uattr, attr, attr->size); 4362 if (ret) 4363 return -EFAULT; 4364 4365 return 0; 4366 } 4367 4368 /** 4369 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 4370 * @pid: the pid in question. 4371 * @uattr: structure containing the extended parameters. 4372 * @size: sizeof(attr) for fwd/bwd comp. 4373 * @flags: for future extension. 4374 */ 4375 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 4376 unsigned int, size, unsigned int, flags) 4377 { 4378 struct sched_attr attr = { 4379 .size = sizeof(struct sched_attr), 4380 }; 4381 struct task_struct *p; 4382 int retval; 4383 4384 if (!uattr || pid < 0 || size > PAGE_SIZE || 4385 size < SCHED_ATTR_SIZE_VER0 || flags) 4386 return -EINVAL; 4387 4388 rcu_read_lock(); 4389 p = find_process_by_pid(pid); 4390 retval = -ESRCH; 4391 if (!p) 4392 goto out_unlock; 4393 4394 retval = security_task_getscheduler(p); 4395 if (retval) 4396 goto out_unlock; 4397 4398 attr.sched_policy = p->policy; 4399 if (p->sched_reset_on_fork) 4400 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 4401 if (task_has_dl_policy(p)) 4402 __getparam_dl(p, &attr); 4403 else if (task_has_rt_policy(p)) 4404 attr.sched_priority = p->rt_priority; 4405 else 4406 attr.sched_nice = task_nice(p); 4407 4408 rcu_read_unlock(); 4409 4410 retval = sched_read_attr(uattr, &attr, size); 4411 return retval; 4412 4413 out_unlock: 4414 rcu_read_unlock(); 4415 return retval; 4416 } 4417 4418 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 4419 { 4420 cpumask_var_t cpus_allowed, new_mask; 4421 struct task_struct *p; 4422 int retval; 4423 4424 rcu_read_lock(); 4425 4426 p = find_process_by_pid(pid); 4427 if (!p) { 4428 rcu_read_unlock(); 4429 return -ESRCH; 4430 } 4431 4432 /* Prevent p going away */ 4433 get_task_struct(p); 4434 rcu_read_unlock(); 4435 4436 if (p->flags & PF_NO_SETAFFINITY) { 4437 retval = -EINVAL; 4438 goto out_put_task; 4439 } 4440 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 4441 retval = -ENOMEM; 4442 goto out_put_task; 4443 } 4444 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 4445 retval = -ENOMEM; 4446 goto out_free_cpus_allowed; 4447 } 4448 retval = -EPERM; 4449 if (!check_same_owner(p)) { 4450 rcu_read_lock(); 4451 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { 4452 rcu_read_unlock(); 4453 goto out_free_new_mask; 4454 } 4455 rcu_read_unlock(); 4456 } 4457 4458 retval = security_task_setscheduler(p); 4459 if (retval) 4460 goto out_free_new_mask; 4461 4462 4463 cpuset_cpus_allowed(p, cpus_allowed); 4464 cpumask_and(new_mask, in_mask, cpus_allowed); 4465 4466 /* 4467 * Since bandwidth control happens on root_domain basis, 4468 * if admission test is enabled, we only admit -deadline 4469 * tasks allowed to run on all the CPUs in the task's 4470 * root_domain. 4471 */ 4472 #ifdef CONFIG_SMP 4473 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { 4474 rcu_read_lock(); 4475 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { 4476 retval = -EBUSY; 4477 rcu_read_unlock(); 4478 goto out_free_new_mask; 4479 } 4480 rcu_read_unlock(); 4481 } 4482 #endif 4483 again: 4484 retval = __set_cpus_allowed_ptr(p, new_mask, true); 4485 4486 if (!retval) { 4487 cpuset_cpus_allowed(p, cpus_allowed); 4488 if (!cpumask_subset(new_mask, cpus_allowed)) { 4489 /* 4490 * We must have raced with a concurrent cpuset 4491 * update. Just reset the cpus_allowed to the 4492 * cpuset's cpus_allowed 4493 */ 4494 cpumask_copy(new_mask, cpus_allowed); 4495 goto again; 4496 } 4497 } 4498 out_free_new_mask: 4499 free_cpumask_var(new_mask); 4500 out_free_cpus_allowed: 4501 free_cpumask_var(cpus_allowed); 4502 out_put_task: 4503 put_task_struct(p); 4504 return retval; 4505 } 4506 4507 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 4508 struct cpumask *new_mask) 4509 { 4510 if (len < cpumask_size()) 4511 cpumask_clear(new_mask); 4512 else if (len > cpumask_size()) 4513 len = cpumask_size(); 4514 4515 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 4516 } 4517 4518 /** 4519 * sys_sched_setaffinity - set the cpu affinity of a process 4520 * @pid: pid of the process 4521 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 4522 * @user_mask_ptr: user-space pointer to the new cpu mask 4523 * 4524 * Return: 0 on success. An error code otherwise. 4525 */ 4526 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 4527 unsigned long __user *, user_mask_ptr) 4528 { 4529 cpumask_var_t new_mask; 4530 int retval; 4531 4532 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 4533 return -ENOMEM; 4534 4535 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 4536 if (retval == 0) 4537 retval = sched_setaffinity(pid, new_mask); 4538 free_cpumask_var(new_mask); 4539 return retval; 4540 } 4541 4542 long sched_getaffinity(pid_t pid, struct cpumask *mask) 4543 { 4544 struct task_struct *p; 4545 unsigned long flags; 4546 int retval; 4547 4548 rcu_read_lock(); 4549 4550 retval = -ESRCH; 4551 p = find_process_by_pid(pid); 4552 if (!p) 4553 goto out_unlock; 4554 4555 retval = security_task_getscheduler(p); 4556 if (retval) 4557 goto out_unlock; 4558 4559 raw_spin_lock_irqsave(&p->pi_lock, flags); 4560 cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); 4561 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4562 4563 out_unlock: 4564 rcu_read_unlock(); 4565 4566 return retval; 4567 } 4568 4569 /** 4570 * sys_sched_getaffinity - get the cpu affinity of a process 4571 * @pid: pid of the process 4572 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 4573 * @user_mask_ptr: user-space pointer to hold the current cpu mask 4574 * 4575 * Return: 0 on success. An error code otherwise. 4576 */ 4577 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 4578 unsigned long __user *, user_mask_ptr) 4579 { 4580 int ret; 4581 cpumask_var_t mask; 4582 4583 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 4584 return -EINVAL; 4585 if (len & (sizeof(unsigned long)-1)) 4586 return -EINVAL; 4587 4588 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 4589 return -ENOMEM; 4590 4591 ret = sched_getaffinity(pid, mask); 4592 if (ret == 0) { 4593 size_t retlen = min_t(size_t, len, cpumask_size()); 4594 4595 if (copy_to_user(user_mask_ptr, mask, retlen)) 4596 ret = -EFAULT; 4597 else 4598 ret = retlen; 4599 } 4600 free_cpumask_var(mask); 4601 4602 return ret; 4603 } 4604 4605 /** 4606 * sys_sched_yield - yield the current processor to other threads. 4607 * 4608 * This function yields the current CPU to other tasks. If there are no 4609 * other threads running on this CPU then this function will return. 4610 * 4611 * Return: 0. 4612 */ 4613 SYSCALL_DEFINE0(sched_yield) 4614 { 4615 struct rq *rq = this_rq_lock(); 4616 4617 schedstat_inc(rq, yld_count); 4618 current->sched_class->yield_task(rq); 4619 4620 /* 4621 * Since we are going to call schedule() anyway, there's 4622 * no need to preempt or enable interrupts: 4623 */ 4624 __release(rq->lock); 4625 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 4626 do_raw_spin_unlock(&rq->lock); 4627 sched_preempt_enable_no_resched(); 4628 4629 schedule(); 4630 4631 return 0; 4632 } 4633 4634 int __sched _cond_resched(void) 4635 { 4636 if (should_resched(0)) { 4637 preempt_schedule_common(); 4638 return 1; 4639 } 4640 return 0; 4641 } 4642 EXPORT_SYMBOL(_cond_resched); 4643 4644 /* 4645 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 4646 * call schedule, and on return reacquire the lock. 4647 * 4648 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level 4649 * operations here to prevent schedule() from being called twice (once via 4650 * spin_unlock(), once by hand). 4651 */ 4652 int __cond_resched_lock(spinlock_t *lock) 4653 { 4654 int resched = should_resched(PREEMPT_LOCK_OFFSET); 4655 int ret = 0; 4656 4657 lockdep_assert_held(lock); 4658 4659 if (spin_needbreak(lock) || resched) { 4660 spin_unlock(lock); 4661 if (resched) 4662 preempt_schedule_common(); 4663 else 4664 cpu_relax(); 4665 ret = 1; 4666 spin_lock(lock); 4667 } 4668 return ret; 4669 } 4670 EXPORT_SYMBOL(__cond_resched_lock); 4671 4672 int __sched __cond_resched_softirq(void) 4673 { 4674 BUG_ON(!in_softirq()); 4675 4676 if (should_resched(SOFTIRQ_DISABLE_OFFSET)) { 4677 local_bh_enable(); 4678 preempt_schedule_common(); 4679 local_bh_disable(); 4680 return 1; 4681 } 4682 return 0; 4683 } 4684 EXPORT_SYMBOL(__cond_resched_softirq); 4685 4686 /** 4687 * yield - yield the current processor to other threads. 4688 * 4689 * Do not ever use this function, there's a 99% chance you're doing it wrong. 4690 * 4691 * The scheduler is at all times free to pick the calling task as the most 4692 * eligible task to run, if removing the yield() call from your code breaks 4693 * it, its already broken. 4694 * 4695 * Typical broken usage is: 4696 * 4697 * while (!event) 4698 * yield(); 4699 * 4700 * where one assumes that yield() will let 'the other' process run that will 4701 * make event true. If the current task is a SCHED_FIFO task that will never 4702 * happen. Never use yield() as a progress guarantee!! 4703 * 4704 * If you want to use yield() to wait for something, use wait_event(). 4705 * If you want to use yield() to be 'nice' for others, use cond_resched(). 4706 * If you still want to use yield(), do not! 4707 */ 4708 void __sched yield(void) 4709 { 4710 set_current_state(TASK_RUNNING); 4711 sys_sched_yield(); 4712 } 4713 EXPORT_SYMBOL(yield); 4714 4715 /** 4716 * yield_to - yield the current processor to another thread in 4717 * your thread group, or accelerate that thread toward the 4718 * processor it's on. 4719 * @p: target task 4720 * @preempt: whether task preemption is allowed or not 4721 * 4722 * It's the caller's job to ensure that the target task struct 4723 * can't go away on us before we can do any checks. 4724 * 4725 * Return: 4726 * true (>0) if we indeed boosted the target task. 4727 * false (0) if we failed to boost the target. 4728 * -ESRCH if there's no task to yield to. 4729 */ 4730 int __sched yield_to(struct task_struct *p, bool preempt) 4731 { 4732 struct task_struct *curr = current; 4733 struct rq *rq, *p_rq; 4734 unsigned long flags; 4735 int yielded = 0; 4736 4737 local_irq_save(flags); 4738 rq = this_rq(); 4739 4740 again: 4741 p_rq = task_rq(p); 4742 /* 4743 * If we're the only runnable task on the rq and target rq also 4744 * has only one task, there's absolutely no point in yielding. 4745 */ 4746 if (rq->nr_running == 1 && p_rq->nr_running == 1) { 4747 yielded = -ESRCH; 4748 goto out_irq; 4749 } 4750 4751 double_rq_lock(rq, p_rq); 4752 if (task_rq(p) != p_rq) { 4753 double_rq_unlock(rq, p_rq); 4754 goto again; 4755 } 4756 4757 if (!curr->sched_class->yield_to_task) 4758 goto out_unlock; 4759 4760 if (curr->sched_class != p->sched_class) 4761 goto out_unlock; 4762 4763 if (task_running(p_rq, p) || p->state) 4764 goto out_unlock; 4765 4766 yielded = curr->sched_class->yield_to_task(rq, p, preempt); 4767 if (yielded) { 4768 schedstat_inc(rq, yld_count); 4769 /* 4770 * Make p's CPU reschedule; pick_next_entity takes care of 4771 * fairness. 4772 */ 4773 if (preempt && rq != p_rq) 4774 resched_curr(p_rq); 4775 } 4776 4777 out_unlock: 4778 double_rq_unlock(rq, p_rq); 4779 out_irq: 4780 local_irq_restore(flags); 4781 4782 if (yielded > 0) 4783 schedule(); 4784 4785 return yielded; 4786 } 4787 EXPORT_SYMBOL_GPL(yield_to); 4788 4789 /* 4790 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 4791 * that process accounting knows that this is a task in IO wait state. 4792 */ 4793 long __sched io_schedule_timeout(long timeout) 4794 { 4795 int old_iowait = current->in_iowait; 4796 struct rq *rq; 4797 long ret; 4798 4799 current->in_iowait = 1; 4800 blk_schedule_flush_plug(current); 4801 4802 delayacct_blkio_start(); 4803 rq = raw_rq(); 4804 atomic_inc(&rq->nr_iowait); 4805 ret = schedule_timeout(timeout); 4806 current->in_iowait = old_iowait; 4807 atomic_dec(&rq->nr_iowait); 4808 delayacct_blkio_end(); 4809 4810 return ret; 4811 } 4812 EXPORT_SYMBOL(io_schedule_timeout); 4813 4814 /** 4815 * sys_sched_get_priority_max - return maximum RT priority. 4816 * @policy: scheduling class. 4817 * 4818 * Return: On success, this syscall returns the maximum 4819 * rt_priority that can be used by a given scheduling class. 4820 * On failure, a negative error code is returned. 4821 */ 4822 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 4823 { 4824 int ret = -EINVAL; 4825 4826 switch (policy) { 4827 case SCHED_FIFO: 4828 case SCHED_RR: 4829 ret = MAX_USER_RT_PRIO-1; 4830 break; 4831 case SCHED_DEADLINE: 4832 case SCHED_NORMAL: 4833 case SCHED_BATCH: 4834 case SCHED_IDLE: 4835 ret = 0; 4836 break; 4837 } 4838 return ret; 4839 } 4840 4841 /** 4842 * sys_sched_get_priority_min - return minimum RT priority. 4843 * @policy: scheduling class. 4844 * 4845 * Return: On success, this syscall returns the minimum 4846 * rt_priority that can be used by a given scheduling class. 4847 * On failure, a negative error code is returned. 4848 */ 4849 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 4850 { 4851 int ret = -EINVAL; 4852 4853 switch (policy) { 4854 case SCHED_FIFO: 4855 case SCHED_RR: 4856 ret = 1; 4857 break; 4858 case SCHED_DEADLINE: 4859 case SCHED_NORMAL: 4860 case SCHED_BATCH: 4861 case SCHED_IDLE: 4862 ret = 0; 4863 } 4864 return ret; 4865 } 4866 4867 /** 4868 * sys_sched_rr_get_interval - return the default timeslice of a process. 4869 * @pid: pid of the process. 4870 * @interval: userspace pointer to the timeslice value. 4871 * 4872 * this syscall writes the default timeslice value of a given process 4873 * into the user-space timespec buffer. A value of '0' means infinity. 4874 * 4875 * Return: On success, 0 and the timeslice is in @interval. Otherwise, 4876 * an error code. 4877 */ 4878 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 4879 struct timespec __user *, interval) 4880 { 4881 struct task_struct *p; 4882 unsigned int time_slice; 4883 unsigned long flags; 4884 struct rq *rq; 4885 int retval; 4886 struct timespec t; 4887 4888 if (pid < 0) 4889 return -EINVAL; 4890 4891 retval = -ESRCH; 4892 rcu_read_lock(); 4893 p = find_process_by_pid(pid); 4894 if (!p) 4895 goto out_unlock; 4896 4897 retval = security_task_getscheduler(p); 4898 if (retval) 4899 goto out_unlock; 4900 4901 rq = task_rq_lock(p, &flags); 4902 time_slice = 0; 4903 if (p->sched_class->get_rr_interval) 4904 time_slice = p->sched_class->get_rr_interval(rq, p); 4905 task_rq_unlock(rq, p, &flags); 4906 4907 rcu_read_unlock(); 4908 jiffies_to_timespec(time_slice, &t); 4909 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; 4910 return retval; 4911 4912 out_unlock: 4913 rcu_read_unlock(); 4914 return retval; 4915 } 4916 4917 static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; 4918 4919 void sched_show_task(struct task_struct *p) 4920 { 4921 unsigned long free = 0; 4922 int ppid; 4923 unsigned long state = p->state; 4924 4925 if (state) 4926 state = __ffs(state) + 1; 4927 printk(KERN_INFO "%-15.15s %c", p->comm, 4928 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); 4929 #if BITS_PER_LONG == 32 4930 if (state == TASK_RUNNING) 4931 printk(KERN_CONT " running "); 4932 else 4933 printk(KERN_CONT " %08lx ", thread_saved_pc(p)); 4934 #else 4935 if (state == TASK_RUNNING) 4936 printk(KERN_CONT " running task "); 4937 else 4938 printk(KERN_CONT " %016lx ", thread_saved_pc(p)); 4939 #endif 4940 #ifdef CONFIG_DEBUG_STACK_USAGE 4941 free = stack_not_used(p); 4942 #endif 4943 ppid = 0; 4944 rcu_read_lock(); 4945 if (pid_alive(p)) 4946 ppid = task_pid_nr(rcu_dereference(p->real_parent)); 4947 rcu_read_unlock(); 4948 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, 4949 task_pid_nr(p), ppid, 4950 (unsigned long)task_thread_info(p)->flags); 4951 4952 print_worker_info(KERN_INFO, p); 4953 show_stack(p, NULL); 4954 } 4955 4956 void show_state_filter(unsigned long state_filter) 4957 { 4958 struct task_struct *g, *p; 4959 4960 #if BITS_PER_LONG == 32 4961 printk(KERN_INFO 4962 " task PC stack pid father\n"); 4963 #else 4964 printk(KERN_INFO 4965 " task PC stack pid father\n"); 4966 #endif 4967 rcu_read_lock(); 4968 for_each_process_thread(g, p) { 4969 /* 4970 * reset the NMI-timeout, listing all files on a slow 4971 * console might take a lot of time: 4972 */ 4973 touch_nmi_watchdog(); 4974 if (!state_filter || (p->state & state_filter)) 4975 sched_show_task(p); 4976 } 4977 4978 touch_all_softlockup_watchdogs(); 4979 4980 #ifdef CONFIG_SCHED_DEBUG 4981 sysrq_sched_debug_show(); 4982 #endif 4983 rcu_read_unlock(); 4984 /* 4985 * Only show locks if all tasks are dumped: 4986 */ 4987 if (!state_filter) 4988 debug_show_all_locks(); 4989 } 4990 4991 void init_idle_bootup_task(struct task_struct *idle) 4992 { 4993 idle->sched_class = &idle_sched_class; 4994 } 4995 4996 /** 4997 * init_idle - set up an idle thread for a given CPU 4998 * @idle: task in question 4999 * @cpu: cpu the idle task belongs to 5000 * 5001 * NOTE: this function does not set the idle thread's NEED_RESCHED 5002 * flag, to make booting more robust. 5003 */ 5004 void init_idle(struct task_struct *idle, int cpu) 5005 { 5006 struct rq *rq = cpu_rq(cpu); 5007 unsigned long flags; 5008 5009 raw_spin_lock_irqsave(&idle->pi_lock, flags); 5010 raw_spin_lock(&rq->lock); 5011 5012 __sched_fork(0, idle); 5013 idle->state = TASK_RUNNING; 5014 idle->se.exec_start = sched_clock(); 5015 5016 kasan_unpoison_task_stack(idle); 5017 5018 #ifdef CONFIG_SMP 5019 /* 5020 * Its possible that init_idle() gets called multiple times on a task, 5021 * in that case do_set_cpus_allowed() will not do the right thing. 5022 * 5023 * And since this is boot we can forgo the serialization. 5024 */ 5025 set_cpus_allowed_common(idle, cpumask_of(cpu)); 5026 #endif 5027 /* 5028 * We're having a chicken and egg problem, even though we are 5029 * holding rq->lock, the cpu isn't yet set to this cpu so the 5030 * lockdep check in task_group() will fail. 5031 * 5032 * Similar case to sched_fork(). / Alternatively we could 5033 * use task_rq_lock() here and obtain the other rq->lock. 5034 * 5035 * Silence PROVE_RCU 5036 */ 5037 rcu_read_lock(); 5038 __set_task_cpu(idle, cpu); 5039 rcu_read_unlock(); 5040 5041 rq->curr = rq->idle = idle; 5042 idle->on_rq = TASK_ON_RQ_QUEUED; 5043 #ifdef CONFIG_SMP 5044 idle->on_cpu = 1; 5045 #endif 5046 raw_spin_unlock(&rq->lock); 5047 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); 5048 5049 /* Set the preempt count _outside_ the spinlocks! */ 5050 init_idle_preempt_count(idle, cpu); 5051 5052 /* 5053 * The idle tasks have their own, simple scheduling class: 5054 */ 5055 idle->sched_class = &idle_sched_class; 5056 ftrace_graph_init_idle_task(idle, cpu); 5057 vtime_init_idle(idle, cpu); 5058 #ifdef CONFIG_SMP 5059 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 5060 #endif 5061 } 5062 5063 int cpuset_cpumask_can_shrink(const struct cpumask *cur, 5064 const struct cpumask *trial) 5065 { 5066 int ret = 1, trial_cpus; 5067 struct dl_bw *cur_dl_b; 5068 unsigned long flags; 5069 5070 if (!cpumask_weight(cur)) 5071 return ret; 5072 5073 rcu_read_lock_sched(); 5074 cur_dl_b = dl_bw_of(cpumask_any(cur)); 5075 trial_cpus = cpumask_weight(trial); 5076 5077 raw_spin_lock_irqsave(&cur_dl_b->lock, flags); 5078 if (cur_dl_b->bw != -1 && 5079 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw) 5080 ret = 0; 5081 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags); 5082 rcu_read_unlock_sched(); 5083 5084 return ret; 5085 } 5086 5087 int task_can_attach(struct task_struct *p, 5088 const struct cpumask *cs_cpus_allowed) 5089 { 5090 int ret = 0; 5091 5092 /* 5093 * Kthreads which disallow setaffinity shouldn't be moved 5094 * to a new cpuset; we don't want to change their cpu 5095 * affinity and isolating such threads by their set of 5096 * allowed nodes is unnecessary. Thus, cpusets are not 5097 * applicable for such threads. This prevents checking for 5098 * success of set_cpus_allowed_ptr() on all attached tasks 5099 * before cpus_allowed may be changed. 5100 */ 5101 if (p->flags & PF_NO_SETAFFINITY) { 5102 ret = -EINVAL; 5103 goto out; 5104 } 5105 5106 #ifdef CONFIG_SMP 5107 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, 5108 cs_cpus_allowed)) { 5109 unsigned int dest_cpu = cpumask_any_and(cpu_active_mask, 5110 cs_cpus_allowed); 5111 struct dl_bw *dl_b; 5112 bool overflow; 5113 int cpus; 5114 unsigned long flags; 5115 5116 rcu_read_lock_sched(); 5117 dl_b = dl_bw_of(dest_cpu); 5118 raw_spin_lock_irqsave(&dl_b->lock, flags); 5119 cpus = dl_bw_cpus(dest_cpu); 5120 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw); 5121 if (overflow) 5122 ret = -EBUSY; 5123 else { 5124 /* 5125 * We reserve space for this task in the destination 5126 * root_domain, as we can't fail after this point. 5127 * We will free resources in the source root_domain 5128 * later on (see set_cpus_allowed_dl()). 5129 */ 5130 __dl_add(dl_b, p->dl.dl_bw); 5131 } 5132 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 5133 rcu_read_unlock_sched(); 5134 5135 } 5136 #endif 5137 out: 5138 return ret; 5139 } 5140 5141 #ifdef CONFIG_SMP 5142 5143 #ifdef CONFIG_NUMA_BALANCING 5144 /* Migrate current task p to target_cpu */ 5145 int migrate_task_to(struct task_struct *p, int target_cpu) 5146 { 5147 struct migration_arg arg = { p, target_cpu }; 5148 int curr_cpu = task_cpu(p); 5149 5150 if (curr_cpu == target_cpu) 5151 return 0; 5152 5153 if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p))) 5154 return -EINVAL; 5155 5156 /* TODO: This is not properly updating schedstats */ 5157 5158 trace_sched_move_numa(p, curr_cpu, target_cpu); 5159 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 5160 } 5161 5162 /* 5163 * Requeue a task on a given node and accurately track the number of NUMA 5164 * tasks on the runqueues 5165 */ 5166 void sched_setnuma(struct task_struct *p, int nid) 5167 { 5168 struct rq *rq; 5169 unsigned long flags; 5170 bool queued, running; 5171 5172 rq = task_rq_lock(p, &flags); 5173 queued = task_on_rq_queued(p); 5174 running = task_current(rq, p); 5175 5176 if (queued) 5177 dequeue_task(rq, p, DEQUEUE_SAVE); 5178 if (running) 5179 put_prev_task(rq, p); 5180 5181 p->numa_preferred_nid = nid; 5182 5183 if (running) 5184 p->sched_class->set_curr_task(rq); 5185 if (queued) 5186 enqueue_task(rq, p, ENQUEUE_RESTORE); 5187 task_rq_unlock(rq, p, &flags); 5188 } 5189 #endif /* CONFIG_NUMA_BALANCING */ 5190 5191 #ifdef CONFIG_HOTPLUG_CPU 5192 /* 5193 * Ensures that the idle task is using init_mm right before its cpu goes 5194 * offline. 5195 */ 5196 void idle_task_exit(void) 5197 { 5198 struct mm_struct *mm = current->active_mm; 5199 5200 BUG_ON(cpu_online(smp_processor_id())); 5201 5202 if (mm != &init_mm) { 5203 switch_mm(mm, &init_mm, current); 5204 finish_arch_post_lock_switch(); 5205 } 5206 mmdrop(mm); 5207 } 5208 5209 /* 5210 * Since this CPU is going 'away' for a while, fold any nr_active delta 5211 * we might have. Assumes we're called after migrate_tasks() so that the 5212 * nr_active count is stable. 5213 * 5214 * Also see the comment "Global load-average calculations". 5215 */ 5216 static void calc_load_migrate(struct rq *rq) 5217 { 5218 long delta = calc_load_fold_active(rq); 5219 if (delta) 5220 atomic_long_add(delta, &calc_load_tasks); 5221 } 5222 5223 static void put_prev_task_fake(struct rq *rq, struct task_struct *prev) 5224 { 5225 } 5226 5227 static const struct sched_class fake_sched_class = { 5228 .put_prev_task = put_prev_task_fake, 5229 }; 5230 5231 static struct task_struct fake_task = { 5232 /* 5233 * Avoid pull_{rt,dl}_task() 5234 */ 5235 .prio = MAX_PRIO + 1, 5236 .sched_class = &fake_sched_class, 5237 }; 5238 5239 /* 5240 * Migrate all tasks from the rq, sleeping tasks will be migrated by 5241 * try_to_wake_up()->select_task_rq(). 5242 * 5243 * Called with rq->lock held even though we'er in stop_machine() and 5244 * there's no concurrency possible, we hold the required locks anyway 5245 * because of lock validation efforts. 5246 */ 5247 static void migrate_tasks(struct rq *dead_rq) 5248 { 5249 struct rq *rq = dead_rq; 5250 struct task_struct *next, *stop = rq->stop; 5251 int dest_cpu; 5252 5253 /* 5254 * Fudge the rq selection such that the below task selection loop 5255 * doesn't get stuck on the currently eligible stop task. 5256 * 5257 * We're currently inside stop_machine() and the rq is either stuck 5258 * in the stop_machine_cpu_stop() loop, or we're executing this code, 5259 * either way we should never end up calling schedule() until we're 5260 * done here. 5261 */ 5262 rq->stop = NULL; 5263 5264 /* 5265 * put_prev_task() and pick_next_task() sched 5266 * class method both need to have an up-to-date 5267 * value of rq->clock[_task] 5268 */ 5269 update_rq_clock(rq); 5270 5271 for (;;) { 5272 /* 5273 * There's this thread running, bail when that's the only 5274 * remaining thread. 5275 */ 5276 if (rq->nr_running == 1) 5277 break; 5278 5279 /* 5280 * pick_next_task assumes pinned rq->lock. 5281 */ 5282 lockdep_pin_lock(&rq->lock); 5283 next = pick_next_task(rq, &fake_task); 5284 BUG_ON(!next); 5285 next->sched_class->put_prev_task(rq, next); 5286 5287 /* 5288 * Rules for changing task_struct::cpus_allowed are holding 5289 * both pi_lock and rq->lock, such that holding either 5290 * stabilizes the mask. 5291 * 5292 * Drop rq->lock is not quite as disastrous as it usually is 5293 * because !cpu_active at this point, which means load-balance 5294 * will not interfere. Also, stop-machine. 5295 */ 5296 lockdep_unpin_lock(&rq->lock); 5297 raw_spin_unlock(&rq->lock); 5298 raw_spin_lock(&next->pi_lock); 5299 raw_spin_lock(&rq->lock); 5300 5301 /* 5302 * Since we're inside stop-machine, _nothing_ should have 5303 * changed the task, WARN if weird stuff happened, because in 5304 * that case the above rq->lock drop is a fail too. 5305 */ 5306 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) { 5307 raw_spin_unlock(&next->pi_lock); 5308 continue; 5309 } 5310 5311 /* Find suitable destination for @next, with force if needed. */ 5312 dest_cpu = select_fallback_rq(dead_rq->cpu, next); 5313 5314 rq = __migrate_task(rq, next, dest_cpu); 5315 if (rq != dead_rq) { 5316 raw_spin_unlock(&rq->lock); 5317 rq = dead_rq; 5318 raw_spin_lock(&rq->lock); 5319 } 5320 raw_spin_unlock(&next->pi_lock); 5321 } 5322 5323 rq->stop = stop; 5324 } 5325 #endif /* CONFIG_HOTPLUG_CPU */ 5326 5327 static void set_rq_online(struct rq *rq) 5328 { 5329 if (!rq->online) { 5330 const struct sched_class *class; 5331 5332 cpumask_set_cpu(rq->cpu, rq->rd->online); 5333 rq->online = 1; 5334 5335 for_each_class(class) { 5336 if (class->rq_online) 5337 class->rq_online(rq); 5338 } 5339 } 5340 } 5341 5342 static void set_rq_offline(struct rq *rq) 5343 { 5344 if (rq->online) { 5345 const struct sched_class *class; 5346 5347 for_each_class(class) { 5348 if (class->rq_offline) 5349 class->rq_offline(rq); 5350 } 5351 5352 cpumask_clear_cpu(rq->cpu, rq->rd->online); 5353 rq->online = 0; 5354 } 5355 } 5356 5357 /* 5358 * migration_call - callback that gets triggered when a CPU is added. 5359 * Here we can start up the necessary migration thread for the new CPU. 5360 */ 5361 static int 5362 migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) 5363 { 5364 int cpu = (long)hcpu; 5365 unsigned long flags; 5366 struct rq *rq = cpu_rq(cpu); 5367 5368 switch (action & ~CPU_TASKS_FROZEN) { 5369 5370 case CPU_UP_PREPARE: 5371 rq->calc_load_update = calc_load_update; 5372 break; 5373 5374 case CPU_ONLINE: 5375 /* Update our root-domain */ 5376 raw_spin_lock_irqsave(&rq->lock, flags); 5377 if (rq->rd) { 5378 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 5379 5380 set_rq_online(rq); 5381 } 5382 raw_spin_unlock_irqrestore(&rq->lock, flags); 5383 break; 5384 5385 #ifdef CONFIG_HOTPLUG_CPU 5386 case CPU_DYING: 5387 sched_ttwu_pending(); 5388 /* Update our root-domain */ 5389 raw_spin_lock_irqsave(&rq->lock, flags); 5390 if (rq->rd) { 5391 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 5392 set_rq_offline(rq); 5393 } 5394 migrate_tasks(rq); 5395 BUG_ON(rq->nr_running != 1); /* the migration thread */ 5396 raw_spin_unlock_irqrestore(&rq->lock, flags); 5397 break; 5398 5399 case CPU_DEAD: 5400 calc_load_migrate(rq); 5401 break; 5402 #endif 5403 } 5404 5405 update_max_interval(); 5406 5407 return NOTIFY_OK; 5408 } 5409 5410 /* 5411 * Register at high priority so that task migration (migrate_all_tasks) 5412 * happens before everything else. This has to be lower priority than 5413 * the notifier in the perf_event subsystem, though. 5414 */ 5415 static struct notifier_block migration_notifier = { 5416 .notifier_call = migration_call, 5417 .priority = CPU_PRI_MIGRATION, 5418 }; 5419 5420 static void set_cpu_rq_start_time(void) 5421 { 5422 int cpu = smp_processor_id(); 5423 struct rq *rq = cpu_rq(cpu); 5424 rq->age_stamp = sched_clock_cpu(cpu); 5425 } 5426 5427 static int sched_cpu_active(struct notifier_block *nfb, 5428 unsigned long action, void *hcpu) 5429 { 5430 int cpu = (long)hcpu; 5431 5432 switch (action & ~CPU_TASKS_FROZEN) { 5433 case CPU_STARTING: 5434 set_cpu_rq_start_time(); 5435 return NOTIFY_OK; 5436 5437 case CPU_DOWN_FAILED: 5438 set_cpu_active(cpu, true); 5439 return NOTIFY_OK; 5440 5441 default: 5442 return NOTIFY_DONE; 5443 } 5444 } 5445 5446 static int sched_cpu_inactive(struct notifier_block *nfb, 5447 unsigned long action, void *hcpu) 5448 { 5449 switch (action & ~CPU_TASKS_FROZEN) { 5450 case CPU_DOWN_PREPARE: 5451 set_cpu_active((long)hcpu, false); 5452 return NOTIFY_OK; 5453 default: 5454 return NOTIFY_DONE; 5455 } 5456 } 5457 5458 static int __init migration_init(void) 5459 { 5460 void *cpu = (void *)(long)smp_processor_id(); 5461 int err; 5462 5463 /* Initialize migration for the boot CPU */ 5464 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); 5465 BUG_ON(err == NOTIFY_BAD); 5466 migration_call(&migration_notifier, CPU_ONLINE, cpu); 5467 register_cpu_notifier(&migration_notifier); 5468 5469 /* Register cpu active notifiers */ 5470 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); 5471 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); 5472 5473 return 0; 5474 } 5475 early_initcall(migration_init); 5476 5477 static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ 5478 5479 #ifdef CONFIG_SCHED_DEBUG 5480 5481 static __read_mostly int sched_debug_enabled; 5482 5483 static int __init sched_debug_setup(char *str) 5484 { 5485 sched_debug_enabled = 1; 5486 5487 return 0; 5488 } 5489 early_param("sched_debug", sched_debug_setup); 5490 5491 static inline bool sched_debug(void) 5492 { 5493 return sched_debug_enabled; 5494 } 5495 5496 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 5497 struct cpumask *groupmask) 5498 { 5499 struct sched_group *group = sd->groups; 5500 5501 cpumask_clear(groupmask); 5502 5503 printk(KERN_DEBUG "%*s domain %d: ", level, "", level); 5504 5505 if (!(sd->flags & SD_LOAD_BALANCE)) { 5506 printk("does not load-balance\n"); 5507 if (sd->parent) 5508 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" 5509 " has parent"); 5510 return -1; 5511 } 5512 5513 printk(KERN_CONT "span %*pbl level %s\n", 5514 cpumask_pr_args(sched_domain_span(sd)), sd->name); 5515 5516 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 5517 printk(KERN_ERR "ERROR: domain->span does not contain " 5518 "CPU%d\n", cpu); 5519 } 5520 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { 5521 printk(KERN_ERR "ERROR: domain->groups does not contain" 5522 " CPU%d\n", cpu); 5523 } 5524 5525 printk(KERN_DEBUG "%*s groups:", level + 1, ""); 5526 do { 5527 if (!group) { 5528 printk("\n"); 5529 printk(KERN_ERR "ERROR: group is NULL\n"); 5530 break; 5531 } 5532 5533 if (!cpumask_weight(sched_group_cpus(group))) { 5534 printk(KERN_CONT "\n"); 5535 printk(KERN_ERR "ERROR: empty group\n"); 5536 break; 5537 } 5538 5539 if (!(sd->flags & SD_OVERLAP) && 5540 cpumask_intersects(groupmask, sched_group_cpus(group))) { 5541 printk(KERN_CONT "\n"); 5542 printk(KERN_ERR "ERROR: repeated CPUs\n"); 5543 break; 5544 } 5545 5546 cpumask_or(groupmask, groupmask, sched_group_cpus(group)); 5547 5548 printk(KERN_CONT " %*pbl", 5549 cpumask_pr_args(sched_group_cpus(group))); 5550 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) { 5551 printk(KERN_CONT " (cpu_capacity = %d)", 5552 group->sgc->capacity); 5553 } 5554 5555 group = group->next; 5556 } while (group != sd->groups); 5557 printk(KERN_CONT "\n"); 5558 5559 if (!cpumask_equal(sched_domain_span(sd), groupmask)) 5560 printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 5561 5562 if (sd->parent && 5563 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) 5564 printk(KERN_ERR "ERROR: parent span is not a superset " 5565 "of domain->span\n"); 5566 return 0; 5567 } 5568 5569 static void sched_domain_debug(struct sched_domain *sd, int cpu) 5570 { 5571 int level = 0; 5572 5573 if (!sched_debug_enabled) 5574 return; 5575 5576 if (!sd) { 5577 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); 5578 return; 5579 } 5580 5581 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); 5582 5583 for (;;) { 5584 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) 5585 break; 5586 level++; 5587 sd = sd->parent; 5588 if (!sd) 5589 break; 5590 } 5591 } 5592 #else /* !CONFIG_SCHED_DEBUG */ 5593 # define sched_domain_debug(sd, cpu) do { } while (0) 5594 static inline bool sched_debug(void) 5595 { 5596 return false; 5597 } 5598 #endif /* CONFIG_SCHED_DEBUG */ 5599 5600 static int sd_degenerate(struct sched_domain *sd) 5601 { 5602 if (cpumask_weight(sched_domain_span(sd)) == 1) 5603 return 1; 5604 5605 /* Following flags need at least 2 groups */ 5606 if (sd->flags & (SD_LOAD_BALANCE | 5607 SD_BALANCE_NEWIDLE | 5608 SD_BALANCE_FORK | 5609 SD_BALANCE_EXEC | 5610 SD_SHARE_CPUCAPACITY | 5611 SD_SHARE_PKG_RESOURCES | 5612 SD_SHARE_POWERDOMAIN)) { 5613 if (sd->groups != sd->groups->next) 5614 return 0; 5615 } 5616 5617 /* Following flags don't use groups */ 5618 if (sd->flags & (SD_WAKE_AFFINE)) 5619 return 0; 5620 5621 return 1; 5622 } 5623 5624 static int 5625 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) 5626 { 5627 unsigned long cflags = sd->flags, pflags = parent->flags; 5628 5629 if (sd_degenerate(parent)) 5630 return 1; 5631 5632 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) 5633 return 0; 5634 5635 /* Flags needing groups don't count if only 1 group in parent */ 5636 if (parent->groups == parent->groups->next) { 5637 pflags &= ~(SD_LOAD_BALANCE | 5638 SD_BALANCE_NEWIDLE | 5639 SD_BALANCE_FORK | 5640 SD_BALANCE_EXEC | 5641 SD_SHARE_CPUCAPACITY | 5642 SD_SHARE_PKG_RESOURCES | 5643 SD_PREFER_SIBLING | 5644 SD_SHARE_POWERDOMAIN); 5645 if (nr_node_ids == 1) 5646 pflags &= ~SD_SERIALIZE; 5647 } 5648 if (~cflags & pflags) 5649 return 0; 5650 5651 return 1; 5652 } 5653 5654 static void free_rootdomain(struct rcu_head *rcu) 5655 { 5656 struct root_domain *rd = container_of(rcu, struct root_domain, rcu); 5657 5658 cpupri_cleanup(&rd->cpupri); 5659 cpudl_cleanup(&rd->cpudl); 5660 free_cpumask_var(rd->dlo_mask); 5661 free_cpumask_var(rd->rto_mask); 5662 free_cpumask_var(rd->online); 5663 free_cpumask_var(rd->span); 5664 kfree(rd); 5665 } 5666 5667 static void rq_attach_root(struct rq *rq, struct root_domain *rd) 5668 { 5669 struct root_domain *old_rd = NULL; 5670 unsigned long flags; 5671 5672 raw_spin_lock_irqsave(&rq->lock, flags); 5673 5674 if (rq->rd) { 5675 old_rd = rq->rd; 5676 5677 if (cpumask_test_cpu(rq->cpu, old_rd->online)) 5678 set_rq_offline(rq); 5679 5680 cpumask_clear_cpu(rq->cpu, old_rd->span); 5681 5682 /* 5683 * If we dont want to free the old_rd yet then 5684 * set old_rd to NULL to skip the freeing later 5685 * in this function: 5686 */ 5687 if (!atomic_dec_and_test(&old_rd->refcount)) 5688 old_rd = NULL; 5689 } 5690 5691 atomic_inc(&rd->refcount); 5692 rq->rd = rd; 5693 5694 cpumask_set_cpu(rq->cpu, rd->span); 5695 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 5696 set_rq_online(rq); 5697 5698 raw_spin_unlock_irqrestore(&rq->lock, flags); 5699 5700 if (old_rd) 5701 call_rcu_sched(&old_rd->rcu, free_rootdomain); 5702 } 5703 5704 static int init_rootdomain(struct root_domain *rd) 5705 { 5706 memset(rd, 0, sizeof(*rd)); 5707 5708 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) 5709 goto out; 5710 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) 5711 goto free_span; 5712 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) 5713 goto free_online; 5714 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 5715 goto free_dlo_mask; 5716 5717 init_dl_bw(&rd->dl_bw); 5718 if (cpudl_init(&rd->cpudl) != 0) 5719 goto free_dlo_mask; 5720 5721 if (cpupri_init(&rd->cpupri) != 0) 5722 goto free_rto_mask; 5723 return 0; 5724 5725 free_rto_mask: 5726 free_cpumask_var(rd->rto_mask); 5727 free_dlo_mask: 5728 free_cpumask_var(rd->dlo_mask); 5729 free_online: 5730 free_cpumask_var(rd->online); 5731 free_span: 5732 free_cpumask_var(rd->span); 5733 out: 5734 return -ENOMEM; 5735 } 5736 5737 /* 5738 * By default the system creates a single root-domain with all cpus as 5739 * members (mimicking the global state we have today). 5740 */ 5741 struct root_domain def_root_domain; 5742 5743 static void init_defrootdomain(void) 5744 { 5745 init_rootdomain(&def_root_domain); 5746 5747 atomic_set(&def_root_domain.refcount, 1); 5748 } 5749 5750 static struct root_domain *alloc_rootdomain(void) 5751 { 5752 struct root_domain *rd; 5753 5754 rd = kmalloc(sizeof(*rd), GFP_KERNEL); 5755 if (!rd) 5756 return NULL; 5757 5758 if (init_rootdomain(rd) != 0) { 5759 kfree(rd); 5760 return NULL; 5761 } 5762 5763 return rd; 5764 } 5765 5766 static void free_sched_groups(struct sched_group *sg, int free_sgc) 5767 { 5768 struct sched_group *tmp, *first; 5769 5770 if (!sg) 5771 return; 5772 5773 first = sg; 5774 do { 5775 tmp = sg->next; 5776 5777 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) 5778 kfree(sg->sgc); 5779 5780 kfree(sg); 5781 sg = tmp; 5782 } while (sg != first); 5783 } 5784 5785 static void free_sched_domain(struct rcu_head *rcu) 5786 { 5787 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); 5788 5789 /* 5790 * If its an overlapping domain it has private groups, iterate and 5791 * nuke them all. 5792 */ 5793 if (sd->flags & SD_OVERLAP) { 5794 free_sched_groups(sd->groups, 1); 5795 } else if (atomic_dec_and_test(&sd->groups->ref)) { 5796 kfree(sd->groups->sgc); 5797 kfree(sd->groups); 5798 } 5799 kfree(sd); 5800 } 5801 5802 static void destroy_sched_domain(struct sched_domain *sd, int cpu) 5803 { 5804 call_rcu(&sd->rcu, free_sched_domain); 5805 } 5806 5807 static void destroy_sched_domains(struct sched_domain *sd, int cpu) 5808 { 5809 for (; sd; sd = sd->parent) 5810 destroy_sched_domain(sd, cpu); 5811 } 5812 5813 /* 5814 * Keep a special pointer to the highest sched_domain that has 5815 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this 5816 * allows us to avoid some pointer chasing select_idle_sibling(). 5817 * 5818 * Also keep a unique ID per domain (we use the first cpu number in 5819 * the cpumask of the domain), this allows us to quickly tell if 5820 * two cpus are in the same cache domain, see cpus_share_cache(). 5821 */ 5822 DEFINE_PER_CPU(struct sched_domain *, sd_llc); 5823 DEFINE_PER_CPU(int, sd_llc_size); 5824 DEFINE_PER_CPU(int, sd_llc_id); 5825 DEFINE_PER_CPU(struct sched_domain *, sd_numa); 5826 DEFINE_PER_CPU(struct sched_domain *, sd_busy); 5827 DEFINE_PER_CPU(struct sched_domain *, sd_asym); 5828 5829 static void update_top_cache_domain(int cpu) 5830 { 5831 struct sched_domain *sd; 5832 struct sched_domain *busy_sd = NULL; 5833 int id = cpu; 5834 int size = 1; 5835 5836 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); 5837 if (sd) { 5838 id = cpumask_first(sched_domain_span(sd)); 5839 size = cpumask_weight(sched_domain_span(sd)); 5840 busy_sd = sd->parent; /* sd_busy */ 5841 } 5842 rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd); 5843 5844 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 5845 per_cpu(sd_llc_size, cpu) = size; 5846 per_cpu(sd_llc_id, cpu) = id; 5847 5848 sd = lowest_flag_domain(cpu, SD_NUMA); 5849 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); 5850 5851 sd = highest_flag_domain(cpu, SD_ASYM_PACKING); 5852 rcu_assign_pointer(per_cpu(sd_asym, cpu), sd); 5853 } 5854 5855 /* 5856 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must 5857 * hold the hotplug lock. 5858 */ 5859 static void 5860 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) 5861 { 5862 struct rq *rq = cpu_rq(cpu); 5863 struct sched_domain *tmp; 5864 5865 /* Remove the sched domains which do not contribute to scheduling. */ 5866 for (tmp = sd; tmp; ) { 5867 struct sched_domain *parent = tmp->parent; 5868 if (!parent) 5869 break; 5870 5871 if (sd_parent_degenerate(tmp, parent)) { 5872 tmp->parent = parent->parent; 5873 if (parent->parent) 5874 parent->parent->child = tmp; 5875 /* 5876 * Transfer SD_PREFER_SIBLING down in case of a 5877 * degenerate parent; the spans match for this 5878 * so the property transfers. 5879 */ 5880 if (parent->flags & SD_PREFER_SIBLING) 5881 tmp->flags |= SD_PREFER_SIBLING; 5882 destroy_sched_domain(parent, cpu); 5883 } else 5884 tmp = tmp->parent; 5885 } 5886 5887 if (sd && sd_degenerate(sd)) { 5888 tmp = sd; 5889 sd = sd->parent; 5890 destroy_sched_domain(tmp, cpu); 5891 if (sd) 5892 sd->child = NULL; 5893 } 5894 5895 sched_domain_debug(sd, cpu); 5896 5897 rq_attach_root(rq, rd); 5898 tmp = rq->sd; 5899 rcu_assign_pointer(rq->sd, sd); 5900 destroy_sched_domains(tmp, cpu); 5901 5902 update_top_cache_domain(cpu); 5903 } 5904 5905 /* Setup the mask of cpus configured for isolated domains */ 5906 static int __init isolated_cpu_setup(char *str) 5907 { 5908 int ret; 5909 5910 alloc_bootmem_cpumask_var(&cpu_isolated_map); 5911 ret = cpulist_parse(str, cpu_isolated_map); 5912 if (ret) { 5913 pr_err("sched: Error, all isolcpus= values must be between 0 and %d\n", nr_cpu_ids); 5914 return 0; 5915 } 5916 return 1; 5917 } 5918 __setup("isolcpus=", isolated_cpu_setup); 5919 5920 struct s_data { 5921 struct sched_domain ** __percpu sd; 5922 struct root_domain *rd; 5923 }; 5924 5925 enum s_alloc { 5926 sa_rootdomain, 5927 sa_sd, 5928 sa_sd_storage, 5929 sa_none, 5930 }; 5931 5932 /* 5933 * Build an iteration mask that can exclude certain CPUs from the upwards 5934 * domain traversal. 5935 * 5936 * Asymmetric node setups can result in situations where the domain tree is of 5937 * unequal depth, make sure to skip domains that already cover the entire 5938 * range. 5939 * 5940 * In that case build_sched_domains() will have terminated the iteration early 5941 * and our sibling sd spans will be empty. Domains should always include the 5942 * cpu they're built on, so check that. 5943 * 5944 */ 5945 static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) 5946 { 5947 const struct cpumask *span = sched_domain_span(sd); 5948 struct sd_data *sdd = sd->private; 5949 struct sched_domain *sibling; 5950 int i; 5951 5952 for_each_cpu(i, span) { 5953 sibling = *per_cpu_ptr(sdd->sd, i); 5954 if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 5955 continue; 5956 5957 cpumask_set_cpu(i, sched_group_mask(sg)); 5958 } 5959 } 5960 5961 /* 5962 * Return the canonical balance cpu for this group, this is the first cpu 5963 * of this group that's also in the iteration mask. 5964 */ 5965 int group_balance_cpu(struct sched_group *sg) 5966 { 5967 return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg)); 5968 } 5969 5970 static int 5971 build_overlap_sched_groups(struct sched_domain *sd, int cpu) 5972 { 5973 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg; 5974 const struct cpumask *span = sched_domain_span(sd); 5975 struct cpumask *covered = sched_domains_tmpmask; 5976 struct sd_data *sdd = sd->private; 5977 struct sched_domain *sibling; 5978 int i; 5979 5980 cpumask_clear(covered); 5981 5982 for_each_cpu(i, span) { 5983 struct cpumask *sg_span; 5984 5985 if (cpumask_test_cpu(i, covered)) 5986 continue; 5987 5988 sibling = *per_cpu_ptr(sdd->sd, i); 5989 5990 /* See the comment near build_group_mask(). */ 5991 if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 5992 continue; 5993 5994 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 5995 GFP_KERNEL, cpu_to_node(cpu)); 5996 5997 if (!sg) 5998 goto fail; 5999 6000 sg_span = sched_group_cpus(sg); 6001 if (sibling->child) 6002 cpumask_copy(sg_span, sched_domain_span(sibling->child)); 6003 else 6004 cpumask_set_cpu(i, sg_span); 6005 6006 cpumask_or(covered, covered, sg_span); 6007 6008 sg->sgc = *per_cpu_ptr(sdd->sgc, i); 6009 if (atomic_inc_return(&sg->sgc->ref) == 1) 6010 build_group_mask(sd, sg); 6011 6012 /* 6013 * Initialize sgc->capacity such that even if we mess up the 6014 * domains and no possible iteration will get us here, we won't 6015 * die on a /0 trap. 6016 */ 6017 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); 6018 6019 /* 6020 * Make sure the first group of this domain contains the 6021 * canonical balance cpu. Otherwise the sched_domain iteration 6022 * breaks. See update_sg_lb_stats(). 6023 */ 6024 if ((!groups && cpumask_test_cpu(cpu, sg_span)) || 6025 group_balance_cpu(sg) == cpu) 6026 groups = sg; 6027 6028 if (!first) 6029 first = sg; 6030 if (last) 6031 last->next = sg; 6032 last = sg; 6033 last->next = first; 6034 } 6035 sd->groups = groups; 6036 6037 return 0; 6038 6039 fail: 6040 free_sched_groups(first, 0); 6041 6042 return -ENOMEM; 6043 } 6044 6045 static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) 6046 { 6047 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 6048 struct sched_domain *child = sd->child; 6049 6050 if (child) 6051 cpu = cpumask_first(sched_domain_span(child)); 6052 6053 if (sg) { 6054 *sg = *per_cpu_ptr(sdd->sg, cpu); 6055 (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu); 6056 atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */ 6057 } 6058 6059 return cpu; 6060 } 6061 6062 /* 6063 * build_sched_groups will build a circular linked list of the groups 6064 * covered by the given span, and will set each group's ->cpumask correctly, 6065 * and ->cpu_capacity to 0. 6066 * 6067 * Assumes the sched_domain tree is fully constructed 6068 */ 6069 static int 6070 build_sched_groups(struct sched_domain *sd, int cpu) 6071 { 6072 struct sched_group *first = NULL, *last = NULL; 6073 struct sd_data *sdd = sd->private; 6074 const struct cpumask *span = sched_domain_span(sd); 6075 struct cpumask *covered; 6076 int i; 6077 6078 get_group(cpu, sdd, &sd->groups); 6079 atomic_inc(&sd->groups->ref); 6080 6081 if (cpu != cpumask_first(span)) 6082 return 0; 6083 6084 lockdep_assert_held(&sched_domains_mutex); 6085 covered = sched_domains_tmpmask; 6086 6087 cpumask_clear(covered); 6088 6089 for_each_cpu(i, span) { 6090 struct sched_group *sg; 6091 int group, j; 6092 6093 if (cpumask_test_cpu(i, covered)) 6094 continue; 6095 6096 group = get_group(i, sdd, &sg); 6097 cpumask_setall(sched_group_mask(sg)); 6098 6099 for_each_cpu(j, span) { 6100 if (get_group(j, sdd, NULL) != group) 6101 continue; 6102 6103 cpumask_set_cpu(j, covered); 6104 cpumask_set_cpu(j, sched_group_cpus(sg)); 6105 } 6106 6107 if (!first) 6108 first = sg; 6109 if (last) 6110 last->next = sg; 6111 last = sg; 6112 } 6113 last->next = first; 6114 6115 return 0; 6116 } 6117 6118 /* 6119 * Initialize sched groups cpu_capacity. 6120 * 6121 * cpu_capacity indicates the capacity of sched group, which is used while 6122 * distributing the load between different sched groups in a sched domain. 6123 * Typically cpu_capacity for all the groups in a sched domain will be same 6124 * unless there are asymmetries in the topology. If there are asymmetries, 6125 * group having more cpu_capacity will pickup more load compared to the 6126 * group having less cpu_capacity. 6127 */ 6128 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) 6129 { 6130 struct sched_group *sg = sd->groups; 6131 6132 WARN_ON(!sg); 6133 6134 do { 6135 sg->group_weight = cpumask_weight(sched_group_cpus(sg)); 6136 sg = sg->next; 6137 } while (sg != sd->groups); 6138 6139 if (cpu != group_balance_cpu(sg)) 6140 return; 6141 6142 update_group_capacity(sd, cpu); 6143 atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight); 6144 } 6145 6146 /* 6147 * Initializers for schedule domains 6148 * Non-inlined to reduce accumulated stack pressure in build_sched_domains() 6149 */ 6150 6151 static int default_relax_domain_level = -1; 6152 int sched_domain_level_max; 6153 6154 static int __init setup_relax_domain_level(char *str) 6155 { 6156 if (kstrtoint(str, 0, &default_relax_domain_level)) 6157 pr_warn("Unable to set relax_domain_level\n"); 6158 6159 return 1; 6160 } 6161 __setup("relax_domain_level=", setup_relax_domain_level); 6162 6163 static void set_domain_attribute(struct sched_domain *sd, 6164 struct sched_domain_attr *attr) 6165 { 6166 int request; 6167 6168 if (!attr || attr->relax_domain_level < 0) { 6169 if (default_relax_domain_level < 0) 6170 return; 6171 else 6172 request = default_relax_domain_level; 6173 } else 6174 request = attr->relax_domain_level; 6175 if (request < sd->level) { 6176 /* turn off idle balance on this domain */ 6177 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 6178 } else { 6179 /* turn on idle balance on this domain */ 6180 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 6181 } 6182 } 6183 6184 static void __sdt_free(const struct cpumask *cpu_map); 6185 static int __sdt_alloc(const struct cpumask *cpu_map); 6186 6187 static void __free_domain_allocs(struct s_data *d, enum s_alloc what, 6188 const struct cpumask *cpu_map) 6189 { 6190 switch (what) { 6191 case sa_rootdomain: 6192 if (!atomic_read(&d->rd->refcount)) 6193 free_rootdomain(&d->rd->rcu); /* fall through */ 6194 case sa_sd: 6195 free_percpu(d->sd); /* fall through */ 6196 case sa_sd_storage: 6197 __sdt_free(cpu_map); /* fall through */ 6198 case sa_none: 6199 break; 6200 } 6201 } 6202 6203 static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, 6204 const struct cpumask *cpu_map) 6205 { 6206 memset(d, 0, sizeof(*d)); 6207 6208 if (__sdt_alloc(cpu_map)) 6209 return sa_sd_storage; 6210 d->sd = alloc_percpu(struct sched_domain *); 6211 if (!d->sd) 6212 return sa_sd_storage; 6213 d->rd = alloc_rootdomain(); 6214 if (!d->rd) 6215 return sa_sd; 6216 return sa_rootdomain; 6217 } 6218 6219 /* 6220 * NULL the sd_data elements we've used to build the sched_domain and 6221 * sched_group structure so that the subsequent __free_domain_allocs() 6222 * will not free the data we're using. 6223 */ 6224 static void claim_allocations(int cpu, struct sched_domain *sd) 6225 { 6226 struct sd_data *sdd = sd->private; 6227 6228 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); 6229 *per_cpu_ptr(sdd->sd, cpu) = NULL; 6230 6231 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) 6232 *per_cpu_ptr(sdd->sg, cpu) = NULL; 6233 6234 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) 6235 *per_cpu_ptr(sdd->sgc, cpu) = NULL; 6236 } 6237 6238 #ifdef CONFIG_NUMA 6239 static int sched_domains_numa_levels; 6240 enum numa_topology_type sched_numa_topology_type; 6241 static int *sched_domains_numa_distance; 6242 int sched_max_numa_distance; 6243 static struct cpumask ***sched_domains_numa_masks; 6244 static int sched_domains_curr_level; 6245 #endif 6246 6247 /* 6248 * SD_flags allowed in topology descriptions. 6249 * 6250 * SD_SHARE_CPUCAPACITY - describes SMT topologies 6251 * SD_SHARE_PKG_RESOURCES - describes shared caches 6252 * SD_NUMA - describes NUMA topologies 6253 * SD_SHARE_POWERDOMAIN - describes shared power domain 6254 * 6255 * Odd one out: 6256 * SD_ASYM_PACKING - describes SMT quirks 6257 */ 6258 #define TOPOLOGY_SD_FLAGS \ 6259 (SD_SHARE_CPUCAPACITY | \ 6260 SD_SHARE_PKG_RESOURCES | \ 6261 SD_NUMA | \ 6262 SD_ASYM_PACKING | \ 6263 SD_SHARE_POWERDOMAIN) 6264 6265 static struct sched_domain * 6266 sd_init(struct sched_domain_topology_level *tl, int cpu) 6267 { 6268 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); 6269 int sd_weight, sd_flags = 0; 6270 6271 #ifdef CONFIG_NUMA 6272 /* 6273 * Ugly hack to pass state to sd_numa_mask()... 6274 */ 6275 sched_domains_curr_level = tl->numa_level; 6276 #endif 6277 6278 sd_weight = cpumask_weight(tl->mask(cpu)); 6279 6280 if (tl->sd_flags) 6281 sd_flags = (*tl->sd_flags)(); 6282 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, 6283 "wrong sd_flags in topology description\n")) 6284 sd_flags &= ~TOPOLOGY_SD_FLAGS; 6285 6286 *sd = (struct sched_domain){ 6287 .min_interval = sd_weight, 6288 .max_interval = 2*sd_weight, 6289 .busy_factor = 32, 6290 .imbalance_pct = 125, 6291 6292 .cache_nice_tries = 0, 6293 .busy_idx = 0, 6294 .idle_idx = 0, 6295 .newidle_idx = 0, 6296 .wake_idx = 0, 6297 .forkexec_idx = 0, 6298 6299 .flags = 1*SD_LOAD_BALANCE 6300 | 1*SD_BALANCE_NEWIDLE 6301 | 1*SD_BALANCE_EXEC 6302 | 1*SD_BALANCE_FORK 6303 | 0*SD_BALANCE_WAKE 6304 | 1*SD_WAKE_AFFINE 6305 | 0*SD_SHARE_CPUCAPACITY 6306 | 0*SD_SHARE_PKG_RESOURCES 6307 | 0*SD_SERIALIZE 6308 | 0*SD_PREFER_SIBLING 6309 | 0*SD_NUMA 6310 | sd_flags 6311 , 6312 6313 .last_balance = jiffies, 6314 .balance_interval = sd_weight, 6315 .smt_gain = 0, 6316 .max_newidle_lb_cost = 0, 6317 .next_decay_max_lb_cost = jiffies, 6318 #ifdef CONFIG_SCHED_DEBUG 6319 .name = tl->name, 6320 #endif 6321 }; 6322 6323 /* 6324 * Convert topological properties into behaviour. 6325 */ 6326 6327 if (sd->flags & SD_SHARE_CPUCAPACITY) { 6328 sd->flags |= SD_PREFER_SIBLING; 6329 sd->imbalance_pct = 110; 6330 sd->smt_gain = 1178; /* ~15% */ 6331 6332 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { 6333 sd->imbalance_pct = 117; 6334 sd->cache_nice_tries = 1; 6335 sd->busy_idx = 2; 6336 6337 #ifdef CONFIG_NUMA 6338 } else if (sd->flags & SD_NUMA) { 6339 sd->cache_nice_tries = 2; 6340 sd->busy_idx = 3; 6341 sd->idle_idx = 2; 6342 6343 sd->flags |= SD_SERIALIZE; 6344 if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) { 6345 sd->flags &= ~(SD_BALANCE_EXEC | 6346 SD_BALANCE_FORK | 6347 SD_WAKE_AFFINE); 6348 } 6349 6350 #endif 6351 } else { 6352 sd->flags |= SD_PREFER_SIBLING; 6353 sd->cache_nice_tries = 1; 6354 sd->busy_idx = 2; 6355 sd->idle_idx = 1; 6356 } 6357 6358 sd->private = &tl->data; 6359 6360 return sd; 6361 } 6362 6363 /* 6364 * Topology list, bottom-up. 6365 */ 6366 static struct sched_domain_topology_level default_topology[] = { 6367 #ifdef CONFIG_SCHED_SMT 6368 { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, 6369 #endif 6370 #ifdef CONFIG_SCHED_MC 6371 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, 6372 #endif 6373 { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 6374 { NULL, }, 6375 }; 6376 6377 static struct sched_domain_topology_level *sched_domain_topology = 6378 default_topology; 6379 6380 #define for_each_sd_topology(tl) \ 6381 for (tl = sched_domain_topology; tl->mask; tl++) 6382 6383 void set_sched_topology(struct sched_domain_topology_level *tl) 6384 { 6385 sched_domain_topology = tl; 6386 } 6387 6388 #ifdef CONFIG_NUMA 6389 6390 static const struct cpumask *sd_numa_mask(int cpu) 6391 { 6392 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; 6393 } 6394 6395 static void sched_numa_warn(const char *str) 6396 { 6397 static int done = false; 6398 int i,j; 6399 6400 if (done) 6401 return; 6402 6403 done = true; 6404 6405 printk(KERN_WARNING "ERROR: %s\n\n", str); 6406 6407 for (i = 0; i < nr_node_ids; i++) { 6408 printk(KERN_WARNING " "); 6409 for (j = 0; j < nr_node_ids; j++) 6410 printk(KERN_CONT "%02d ", node_distance(i,j)); 6411 printk(KERN_CONT "\n"); 6412 } 6413 printk(KERN_WARNING "\n"); 6414 } 6415 6416 bool find_numa_distance(int distance) 6417 { 6418 int i; 6419 6420 if (distance == node_distance(0, 0)) 6421 return true; 6422 6423 for (i = 0; i < sched_domains_numa_levels; i++) { 6424 if (sched_domains_numa_distance[i] == distance) 6425 return true; 6426 } 6427 6428 return false; 6429 } 6430 6431 /* 6432 * A system can have three types of NUMA topology: 6433 * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system 6434 * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes 6435 * NUMA_BACKPLANE: nodes can reach other nodes through a backplane 6436 * 6437 * The difference between a glueless mesh topology and a backplane 6438 * topology lies in whether communication between not directly 6439 * connected nodes goes through intermediary nodes (where programs 6440 * could run), or through backplane controllers. This affects 6441 * placement of programs. 6442 * 6443 * The type of topology can be discerned with the following tests: 6444 * - If the maximum distance between any nodes is 1 hop, the system 6445 * is directly connected. 6446 * - If for two nodes A and B, located N > 1 hops away from each other, 6447 * there is an intermediary node C, which is < N hops away from both 6448 * nodes A and B, the system is a glueless mesh. 6449 */ 6450 static void init_numa_topology_type(void) 6451 { 6452 int a, b, c, n; 6453 6454 n = sched_max_numa_distance; 6455 6456 if (sched_domains_numa_levels <= 1) { 6457 sched_numa_topology_type = NUMA_DIRECT; 6458 return; 6459 } 6460 6461 for_each_online_node(a) { 6462 for_each_online_node(b) { 6463 /* Find two nodes furthest removed from each other. */ 6464 if (node_distance(a, b) < n) 6465 continue; 6466 6467 /* Is there an intermediary node between a and b? */ 6468 for_each_online_node(c) { 6469 if (node_distance(a, c) < n && 6470 node_distance(b, c) < n) { 6471 sched_numa_topology_type = 6472 NUMA_GLUELESS_MESH; 6473 return; 6474 } 6475 } 6476 6477 sched_numa_topology_type = NUMA_BACKPLANE; 6478 return; 6479 } 6480 } 6481 } 6482 6483 static void sched_init_numa(void) 6484 { 6485 int next_distance, curr_distance = node_distance(0, 0); 6486 struct sched_domain_topology_level *tl; 6487 int level = 0; 6488 int i, j, k; 6489 6490 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); 6491 if (!sched_domains_numa_distance) 6492 return; 6493 6494 /* 6495 * O(nr_nodes^2) deduplicating selection sort -- in order to find the 6496 * unique distances in the node_distance() table. 6497 * 6498 * Assumes node_distance(0,j) includes all distances in 6499 * node_distance(i,j) in order to avoid cubic time. 6500 */ 6501 next_distance = curr_distance; 6502 for (i = 0; i < nr_node_ids; i++) { 6503 for (j = 0; j < nr_node_ids; j++) { 6504 for (k = 0; k < nr_node_ids; k++) { 6505 int distance = node_distance(i, k); 6506 6507 if (distance > curr_distance && 6508 (distance < next_distance || 6509 next_distance == curr_distance)) 6510 next_distance = distance; 6511 6512 /* 6513 * While not a strong assumption it would be nice to know 6514 * about cases where if node A is connected to B, B is not 6515 * equally connected to A. 6516 */ 6517 if (sched_debug() && node_distance(k, i) != distance) 6518 sched_numa_warn("Node-distance not symmetric"); 6519 6520 if (sched_debug() && i && !find_numa_distance(distance)) 6521 sched_numa_warn("Node-0 not representative"); 6522 } 6523 if (next_distance != curr_distance) { 6524 sched_domains_numa_distance[level++] = next_distance; 6525 sched_domains_numa_levels = level; 6526 curr_distance = next_distance; 6527 } else break; 6528 } 6529 6530 /* 6531 * In case of sched_debug() we verify the above assumption. 6532 */ 6533 if (!sched_debug()) 6534 break; 6535 } 6536 6537 if (!level) 6538 return; 6539 6540 /* 6541 * 'level' contains the number of unique distances, excluding the 6542 * identity distance node_distance(i,i). 6543 * 6544 * The sched_domains_numa_distance[] array includes the actual distance 6545 * numbers. 6546 */ 6547 6548 /* 6549 * Here, we should temporarily reset sched_domains_numa_levels to 0. 6550 * If it fails to allocate memory for array sched_domains_numa_masks[][], 6551 * the array will contain less then 'level' members. This could be 6552 * dangerous when we use it to iterate array sched_domains_numa_masks[][] 6553 * in other functions. 6554 * 6555 * We reset it to 'level' at the end of this function. 6556 */ 6557 sched_domains_numa_levels = 0; 6558 6559 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); 6560 if (!sched_domains_numa_masks) 6561 return; 6562 6563 /* 6564 * Now for each level, construct a mask per node which contains all 6565 * cpus of nodes that are that many hops away from us. 6566 */ 6567 for (i = 0; i < level; i++) { 6568 sched_domains_numa_masks[i] = 6569 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); 6570 if (!sched_domains_numa_masks[i]) 6571 return; 6572 6573 for (j = 0; j < nr_node_ids; j++) { 6574 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); 6575 if (!mask) 6576 return; 6577 6578 sched_domains_numa_masks[i][j] = mask; 6579 6580 for_each_node(k) { 6581 if (node_distance(j, k) > sched_domains_numa_distance[i]) 6582 continue; 6583 6584 cpumask_or(mask, mask, cpumask_of_node(k)); 6585 } 6586 } 6587 } 6588 6589 /* Compute default topology size */ 6590 for (i = 0; sched_domain_topology[i].mask; i++); 6591 6592 tl = kzalloc((i + level + 1) * 6593 sizeof(struct sched_domain_topology_level), GFP_KERNEL); 6594 if (!tl) 6595 return; 6596 6597 /* 6598 * Copy the default topology bits.. 6599 */ 6600 for (i = 0; sched_domain_topology[i].mask; i++) 6601 tl[i] = sched_domain_topology[i]; 6602 6603 /* 6604 * .. and append 'j' levels of NUMA goodness. 6605 */ 6606 for (j = 0; j < level; i++, j++) { 6607 tl[i] = (struct sched_domain_topology_level){ 6608 .mask = sd_numa_mask, 6609 .sd_flags = cpu_numa_flags, 6610 .flags = SDTL_OVERLAP, 6611 .numa_level = j, 6612 SD_INIT_NAME(NUMA) 6613 }; 6614 } 6615 6616 sched_domain_topology = tl; 6617 6618 sched_domains_numa_levels = level; 6619 sched_max_numa_distance = sched_domains_numa_distance[level - 1]; 6620 6621 init_numa_topology_type(); 6622 } 6623 6624 static void sched_domains_numa_masks_set(int cpu) 6625 { 6626 int i, j; 6627 int node = cpu_to_node(cpu); 6628 6629 for (i = 0; i < sched_domains_numa_levels; i++) { 6630 for (j = 0; j < nr_node_ids; j++) { 6631 if (node_distance(j, node) <= sched_domains_numa_distance[i]) 6632 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); 6633 } 6634 } 6635 } 6636 6637 static void sched_domains_numa_masks_clear(int cpu) 6638 { 6639 int i, j; 6640 for (i = 0; i < sched_domains_numa_levels; i++) { 6641 for (j = 0; j < nr_node_ids; j++) 6642 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); 6643 } 6644 } 6645 6646 /* 6647 * Update sched_domains_numa_masks[level][node] array when new cpus 6648 * are onlined. 6649 */ 6650 static int sched_domains_numa_masks_update(struct notifier_block *nfb, 6651 unsigned long action, 6652 void *hcpu) 6653 { 6654 int cpu = (long)hcpu; 6655 6656 switch (action & ~CPU_TASKS_FROZEN) { 6657 case CPU_ONLINE: 6658 sched_domains_numa_masks_set(cpu); 6659 break; 6660 6661 case CPU_DEAD: 6662 sched_domains_numa_masks_clear(cpu); 6663 break; 6664 6665 default: 6666 return NOTIFY_DONE; 6667 } 6668 6669 return NOTIFY_OK; 6670 } 6671 #else 6672 static inline void sched_init_numa(void) 6673 { 6674 } 6675 6676 static int sched_domains_numa_masks_update(struct notifier_block *nfb, 6677 unsigned long action, 6678 void *hcpu) 6679 { 6680 return 0; 6681 } 6682 #endif /* CONFIG_NUMA */ 6683 6684 static int __sdt_alloc(const struct cpumask *cpu_map) 6685 { 6686 struct sched_domain_topology_level *tl; 6687 int j; 6688 6689 for_each_sd_topology(tl) { 6690 struct sd_data *sdd = &tl->data; 6691 6692 sdd->sd = alloc_percpu(struct sched_domain *); 6693 if (!sdd->sd) 6694 return -ENOMEM; 6695 6696 sdd->sg = alloc_percpu(struct sched_group *); 6697 if (!sdd->sg) 6698 return -ENOMEM; 6699 6700 sdd->sgc = alloc_percpu(struct sched_group_capacity *); 6701 if (!sdd->sgc) 6702 return -ENOMEM; 6703 6704 for_each_cpu(j, cpu_map) { 6705 struct sched_domain *sd; 6706 struct sched_group *sg; 6707 struct sched_group_capacity *sgc; 6708 6709 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), 6710 GFP_KERNEL, cpu_to_node(j)); 6711 if (!sd) 6712 return -ENOMEM; 6713 6714 *per_cpu_ptr(sdd->sd, j) = sd; 6715 6716 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 6717 GFP_KERNEL, cpu_to_node(j)); 6718 if (!sg) 6719 return -ENOMEM; 6720 6721 sg->next = sg; 6722 6723 *per_cpu_ptr(sdd->sg, j) = sg; 6724 6725 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), 6726 GFP_KERNEL, cpu_to_node(j)); 6727 if (!sgc) 6728 return -ENOMEM; 6729 6730 *per_cpu_ptr(sdd->sgc, j) = sgc; 6731 } 6732 } 6733 6734 return 0; 6735 } 6736 6737 static void __sdt_free(const struct cpumask *cpu_map) 6738 { 6739 struct sched_domain_topology_level *tl; 6740 int j; 6741 6742 for_each_sd_topology(tl) { 6743 struct sd_data *sdd = &tl->data; 6744 6745 for_each_cpu(j, cpu_map) { 6746 struct sched_domain *sd; 6747 6748 if (sdd->sd) { 6749 sd = *per_cpu_ptr(sdd->sd, j); 6750 if (sd && (sd->flags & SD_OVERLAP)) 6751 free_sched_groups(sd->groups, 0); 6752 kfree(*per_cpu_ptr(sdd->sd, j)); 6753 } 6754 6755 if (sdd->sg) 6756 kfree(*per_cpu_ptr(sdd->sg, j)); 6757 if (sdd->sgc) 6758 kfree(*per_cpu_ptr(sdd->sgc, j)); 6759 } 6760 free_percpu(sdd->sd); 6761 sdd->sd = NULL; 6762 free_percpu(sdd->sg); 6763 sdd->sg = NULL; 6764 free_percpu(sdd->sgc); 6765 sdd->sgc = NULL; 6766 } 6767 } 6768 6769 struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, 6770 const struct cpumask *cpu_map, struct sched_domain_attr *attr, 6771 struct sched_domain *child, int cpu) 6772 { 6773 struct sched_domain *sd = sd_init(tl, cpu); 6774 if (!sd) 6775 return child; 6776 6777 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); 6778 if (child) { 6779 sd->level = child->level + 1; 6780 sched_domain_level_max = max(sched_domain_level_max, sd->level); 6781 child->parent = sd; 6782 sd->child = child; 6783 6784 if (!cpumask_subset(sched_domain_span(child), 6785 sched_domain_span(sd))) { 6786 pr_err("BUG: arch topology borken\n"); 6787 #ifdef CONFIG_SCHED_DEBUG 6788 pr_err(" the %s domain not a subset of the %s domain\n", 6789 child->name, sd->name); 6790 #endif 6791 /* Fixup, ensure @sd has at least @child cpus. */ 6792 cpumask_or(sched_domain_span(sd), 6793 sched_domain_span(sd), 6794 sched_domain_span(child)); 6795 } 6796 6797 } 6798 set_domain_attribute(sd, attr); 6799 6800 return sd; 6801 } 6802 6803 /* 6804 * Build sched domains for a given set of cpus and attach the sched domains 6805 * to the individual cpus 6806 */ 6807 static int build_sched_domains(const struct cpumask *cpu_map, 6808 struct sched_domain_attr *attr) 6809 { 6810 enum s_alloc alloc_state; 6811 struct sched_domain *sd; 6812 struct s_data d; 6813 int i, ret = -ENOMEM; 6814 6815 alloc_state = __visit_domain_allocation_hell(&d, cpu_map); 6816 if (alloc_state != sa_rootdomain) 6817 goto error; 6818 6819 /* Set up domains for cpus specified by the cpu_map. */ 6820 for_each_cpu(i, cpu_map) { 6821 struct sched_domain_topology_level *tl; 6822 6823 sd = NULL; 6824 for_each_sd_topology(tl) { 6825 sd = build_sched_domain(tl, cpu_map, attr, sd, i); 6826 if (tl == sched_domain_topology) 6827 *per_cpu_ptr(d.sd, i) = sd; 6828 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP)) 6829 sd->flags |= SD_OVERLAP; 6830 if (cpumask_equal(cpu_map, sched_domain_span(sd))) 6831 break; 6832 } 6833 } 6834 6835 /* Build the groups for the domains */ 6836 for_each_cpu(i, cpu_map) { 6837 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 6838 sd->span_weight = cpumask_weight(sched_domain_span(sd)); 6839 if (sd->flags & SD_OVERLAP) { 6840 if (build_overlap_sched_groups(sd, i)) 6841 goto error; 6842 } else { 6843 if (build_sched_groups(sd, i)) 6844 goto error; 6845 } 6846 } 6847 } 6848 6849 /* Calculate CPU capacity for physical packages and nodes */ 6850 for (i = nr_cpumask_bits-1; i >= 0; i--) { 6851 if (!cpumask_test_cpu(i, cpu_map)) 6852 continue; 6853 6854 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 6855 claim_allocations(i, sd); 6856 init_sched_groups_capacity(i, sd); 6857 } 6858 } 6859 6860 /* Attach the domains */ 6861 rcu_read_lock(); 6862 for_each_cpu(i, cpu_map) { 6863 sd = *per_cpu_ptr(d.sd, i); 6864 cpu_attach_domain(sd, d.rd, i); 6865 } 6866 rcu_read_unlock(); 6867 6868 ret = 0; 6869 error: 6870 __free_domain_allocs(&d, alloc_state, cpu_map); 6871 return ret; 6872 } 6873 6874 static cpumask_var_t *doms_cur; /* current sched domains */ 6875 static int ndoms_cur; /* number of sched domains in 'doms_cur' */ 6876 static struct sched_domain_attr *dattr_cur; 6877 /* attribues of custom domains in 'doms_cur' */ 6878 6879 /* 6880 * Special case: If a kmalloc of a doms_cur partition (array of 6881 * cpumask) fails, then fallback to a single sched domain, 6882 * as determined by the single cpumask fallback_doms. 6883 */ 6884 static cpumask_var_t fallback_doms; 6885 6886 /* 6887 * arch_update_cpu_topology lets virtualized architectures update the 6888 * cpu core maps. It is supposed to return 1 if the topology changed 6889 * or 0 if it stayed the same. 6890 */ 6891 int __weak arch_update_cpu_topology(void) 6892 { 6893 return 0; 6894 } 6895 6896 cpumask_var_t *alloc_sched_domains(unsigned int ndoms) 6897 { 6898 int i; 6899 cpumask_var_t *doms; 6900 6901 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); 6902 if (!doms) 6903 return NULL; 6904 for (i = 0; i < ndoms; i++) { 6905 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { 6906 free_sched_domains(doms, i); 6907 return NULL; 6908 } 6909 } 6910 return doms; 6911 } 6912 6913 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) 6914 { 6915 unsigned int i; 6916 for (i = 0; i < ndoms; i++) 6917 free_cpumask_var(doms[i]); 6918 kfree(doms); 6919 } 6920 6921 /* 6922 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 6923 * For now this just excludes isolated cpus, but could be used to 6924 * exclude other special cases in the future. 6925 */ 6926 static int init_sched_domains(const struct cpumask *cpu_map) 6927 { 6928 int err; 6929 6930 arch_update_cpu_topology(); 6931 ndoms_cur = 1; 6932 doms_cur = alloc_sched_domains(ndoms_cur); 6933 if (!doms_cur) 6934 doms_cur = &fallback_doms; 6935 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); 6936 err = build_sched_domains(doms_cur[0], NULL); 6937 register_sched_domain_sysctl(); 6938 6939 return err; 6940 } 6941 6942 /* 6943 * Detach sched domains from a group of cpus specified in cpu_map 6944 * These cpus will now be attached to the NULL domain 6945 */ 6946 static void detach_destroy_domains(const struct cpumask *cpu_map) 6947 { 6948 int i; 6949 6950 rcu_read_lock(); 6951 for_each_cpu(i, cpu_map) 6952 cpu_attach_domain(NULL, &def_root_domain, i); 6953 rcu_read_unlock(); 6954 } 6955 6956 /* handle null as "default" */ 6957 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, 6958 struct sched_domain_attr *new, int idx_new) 6959 { 6960 struct sched_domain_attr tmp; 6961 6962 /* fast path */ 6963 if (!new && !cur) 6964 return 1; 6965 6966 tmp = SD_ATTR_INIT; 6967 return !memcmp(cur ? (cur + idx_cur) : &tmp, 6968 new ? (new + idx_new) : &tmp, 6969 sizeof(struct sched_domain_attr)); 6970 } 6971 6972 /* 6973 * Partition sched domains as specified by the 'ndoms_new' 6974 * cpumasks in the array doms_new[] of cpumasks. This compares 6975 * doms_new[] to the current sched domain partitioning, doms_cur[]. 6976 * It destroys each deleted domain and builds each new domain. 6977 * 6978 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. 6979 * The masks don't intersect (don't overlap.) We should setup one 6980 * sched domain for each mask. CPUs not in any of the cpumasks will 6981 * not be load balanced. If the same cpumask appears both in the 6982 * current 'doms_cur' domains and in the new 'doms_new', we can leave 6983 * it as it is. 6984 * 6985 * The passed in 'doms_new' should be allocated using 6986 * alloc_sched_domains. This routine takes ownership of it and will 6987 * free_sched_domains it when done with it. If the caller failed the 6988 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, 6989 * and partition_sched_domains() will fallback to the single partition 6990 * 'fallback_doms', it also forces the domains to be rebuilt. 6991 * 6992 * If doms_new == NULL it will be replaced with cpu_online_mask. 6993 * ndoms_new == 0 is a special case for destroying existing domains, 6994 * and it will not create the default domain. 6995 * 6996 * Call with hotplug lock held 6997 */ 6998 void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 6999 struct sched_domain_attr *dattr_new) 7000 { 7001 int i, j, n; 7002 int new_topology; 7003 7004 mutex_lock(&sched_domains_mutex); 7005 7006 /* always unregister in case we don't destroy any domains */ 7007 unregister_sched_domain_sysctl(); 7008 7009 /* Let architecture update cpu core mappings. */ 7010 new_topology = arch_update_cpu_topology(); 7011 7012 n = doms_new ? ndoms_new : 0; 7013 7014 /* Destroy deleted domains */ 7015 for (i = 0; i < ndoms_cur; i++) { 7016 for (j = 0; j < n && !new_topology; j++) { 7017 if (cpumask_equal(doms_cur[i], doms_new[j]) 7018 && dattrs_equal(dattr_cur, i, dattr_new, j)) 7019 goto match1; 7020 } 7021 /* no match - a current sched domain not in new doms_new[] */ 7022 detach_destroy_domains(doms_cur[i]); 7023 match1: 7024 ; 7025 } 7026 7027 n = ndoms_cur; 7028 if (doms_new == NULL) { 7029 n = 0; 7030 doms_new = &fallback_doms; 7031 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); 7032 WARN_ON_ONCE(dattr_new); 7033 } 7034 7035 /* Build new domains */ 7036 for (i = 0; i < ndoms_new; i++) { 7037 for (j = 0; j < n && !new_topology; j++) { 7038 if (cpumask_equal(doms_new[i], doms_cur[j]) 7039 && dattrs_equal(dattr_new, i, dattr_cur, j)) 7040 goto match2; 7041 } 7042 /* no match - add a new doms_new */ 7043 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); 7044 match2: 7045 ; 7046 } 7047 7048 /* Remember the new sched domains */ 7049 if (doms_cur != &fallback_doms) 7050 free_sched_domains(doms_cur, ndoms_cur); 7051 kfree(dattr_cur); /* kfree(NULL) is safe */ 7052 doms_cur = doms_new; 7053 dattr_cur = dattr_new; 7054 ndoms_cur = ndoms_new; 7055 7056 register_sched_domain_sysctl(); 7057 7058 mutex_unlock(&sched_domains_mutex); 7059 } 7060 7061 static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */ 7062 7063 /* 7064 * Update cpusets according to cpu_active mask. If cpusets are 7065 * disabled, cpuset_update_active_cpus() becomes a simple wrapper 7066 * around partition_sched_domains(). 7067 * 7068 * If we come here as part of a suspend/resume, don't touch cpusets because we 7069 * want to restore it back to its original state upon resume anyway. 7070 */ 7071 static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, 7072 void *hcpu) 7073 { 7074 switch (action) { 7075 case CPU_ONLINE_FROZEN: 7076 case CPU_DOWN_FAILED_FROZEN: 7077 7078 /* 7079 * num_cpus_frozen tracks how many CPUs are involved in suspend 7080 * resume sequence. As long as this is not the last online 7081 * operation in the resume sequence, just build a single sched 7082 * domain, ignoring cpusets. 7083 */ 7084 num_cpus_frozen--; 7085 if (likely(num_cpus_frozen)) { 7086 partition_sched_domains(1, NULL, NULL); 7087 break; 7088 } 7089 7090 /* 7091 * This is the last CPU online operation. So fall through and 7092 * restore the original sched domains by considering the 7093 * cpuset configurations. 7094 */ 7095 7096 case CPU_ONLINE: 7097 cpuset_update_active_cpus(true); 7098 break; 7099 default: 7100 return NOTIFY_DONE; 7101 } 7102 return NOTIFY_OK; 7103 } 7104 7105 static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, 7106 void *hcpu) 7107 { 7108 unsigned long flags; 7109 long cpu = (long)hcpu; 7110 struct dl_bw *dl_b; 7111 bool overflow; 7112 int cpus; 7113 7114 switch (action) { 7115 case CPU_DOWN_PREPARE: 7116 rcu_read_lock_sched(); 7117 dl_b = dl_bw_of(cpu); 7118 7119 raw_spin_lock_irqsave(&dl_b->lock, flags); 7120 cpus = dl_bw_cpus(cpu); 7121 overflow = __dl_overflow(dl_b, cpus, 0, 0); 7122 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 7123 7124 rcu_read_unlock_sched(); 7125 7126 if (overflow) 7127 return notifier_from_errno(-EBUSY); 7128 cpuset_update_active_cpus(false); 7129 break; 7130 case CPU_DOWN_PREPARE_FROZEN: 7131 num_cpus_frozen++; 7132 partition_sched_domains(1, NULL, NULL); 7133 break; 7134 default: 7135 return NOTIFY_DONE; 7136 } 7137 return NOTIFY_OK; 7138 } 7139 7140 void __init sched_init_smp(void) 7141 { 7142 cpumask_var_t non_isolated_cpus; 7143 7144 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); 7145 alloc_cpumask_var(&fallback_doms, GFP_KERNEL); 7146 7147 sched_init_numa(); 7148 7149 /* 7150 * There's no userspace yet to cause hotplug operations; hence all the 7151 * cpu masks are stable and all blatant races in the below code cannot 7152 * happen. 7153 */ 7154 mutex_lock(&sched_domains_mutex); 7155 init_sched_domains(cpu_active_mask); 7156 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); 7157 if (cpumask_empty(non_isolated_cpus)) 7158 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); 7159 mutex_unlock(&sched_domains_mutex); 7160 7161 hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE); 7162 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); 7163 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); 7164 7165 init_hrtick(); 7166 7167 /* Move init over to a non-isolated CPU */ 7168 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) 7169 BUG(); 7170 sched_init_granularity(); 7171 free_cpumask_var(non_isolated_cpus); 7172 7173 init_sched_rt_class(); 7174 init_sched_dl_class(); 7175 } 7176 #else 7177 void __init sched_init_smp(void) 7178 { 7179 sched_init_granularity(); 7180 } 7181 #endif /* CONFIG_SMP */ 7182 7183 int in_sched_functions(unsigned long addr) 7184 { 7185 return in_lock_functions(addr) || 7186 (addr >= (unsigned long)__sched_text_start 7187 && addr < (unsigned long)__sched_text_end); 7188 } 7189 7190 #ifdef CONFIG_CGROUP_SCHED 7191 /* 7192 * Default task group. 7193 * Every task in system belongs to this group at bootup. 7194 */ 7195 struct task_group root_task_group; 7196 LIST_HEAD(task_groups); 7197 7198 /* Cacheline aligned slab cache for task_group */ 7199 static struct kmem_cache *task_group_cache __read_mostly; 7200 #endif 7201 7202 DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); 7203 7204 void __init sched_init(void) 7205 { 7206 int i, j; 7207 unsigned long alloc_size = 0, ptr; 7208 7209 #ifdef CONFIG_FAIR_GROUP_SCHED 7210 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 7211 #endif 7212 #ifdef CONFIG_RT_GROUP_SCHED 7213 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 7214 #endif 7215 if (alloc_size) { 7216 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); 7217 7218 #ifdef CONFIG_FAIR_GROUP_SCHED 7219 root_task_group.se = (struct sched_entity **)ptr; 7220 ptr += nr_cpu_ids * sizeof(void **); 7221 7222 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 7223 ptr += nr_cpu_ids * sizeof(void **); 7224 7225 #endif /* CONFIG_FAIR_GROUP_SCHED */ 7226 #ifdef CONFIG_RT_GROUP_SCHED 7227 root_task_group.rt_se = (struct sched_rt_entity **)ptr; 7228 ptr += nr_cpu_ids * sizeof(void **); 7229 7230 root_task_group.rt_rq = (struct rt_rq **)ptr; 7231 ptr += nr_cpu_ids * sizeof(void **); 7232 7233 #endif /* CONFIG_RT_GROUP_SCHED */ 7234 } 7235 #ifdef CONFIG_CPUMASK_OFFSTACK 7236 for_each_possible_cpu(i) { 7237 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( 7238 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 7239 } 7240 #endif /* CONFIG_CPUMASK_OFFSTACK */ 7241 7242 init_rt_bandwidth(&def_rt_bandwidth, 7243 global_rt_period(), global_rt_runtime()); 7244 init_dl_bandwidth(&def_dl_bandwidth, 7245 global_rt_period(), global_rt_runtime()); 7246 7247 #ifdef CONFIG_SMP 7248 init_defrootdomain(); 7249 #endif 7250 7251 #ifdef CONFIG_RT_GROUP_SCHED 7252 init_rt_bandwidth(&root_task_group.rt_bandwidth, 7253 global_rt_period(), global_rt_runtime()); 7254 #endif /* CONFIG_RT_GROUP_SCHED */ 7255 7256 #ifdef CONFIG_CGROUP_SCHED 7257 task_group_cache = KMEM_CACHE(task_group, 0); 7258 7259 list_add(&root_task_group.list, &task_groups); 7260 INIT_LIST_HEAD(&root_task_group.children); 7261 INIT_LIST_HEAD(&root_task_group.siblings); 7262 autogroup_init(&init_task); 7263 #endif /* CONFIG_CGROUP_SCHED */ 7264 7265 for_each_possible_cpu(i) { 7266 struct rq *rq; 7267 7268 rq = cpu_rq(i); 7269 raw_spin_lock_init(&rq->lock); 7270 rq->nr_running = 0; 7271 rq->calc_load_active = 0; 7272 rq->calc_load_update = jiffies + LOAD_FREQ; 7273 init_cfs_rq(&rq->cfs); 7274 init_rt_rq(&rq->rt); 7275 init_dl_rq(&rq->dl); 7276 #ifdef CONFIG_FAIR_GROUP_SCHED 7277 root_task_group.shares = ROOT_TASK_GROUP_LOAD; 7278 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 7279 /* 7280 * How much cpu bandwidth does root_task_group get? 7281 * 7282 * In case of task-groups formed thr' the cgroup filesystem, it 7283 * gets 100% of the cpu resources in the system. This overall 7284 * system cpu resource is divided among the tasks of 7285 * root_task_group and its child task-groups in a fair manner, 7286 * based on each entity's (task or task-group's) weight 7287 * (se->load.weight). 7288 * 7289 * In other words, if root_task_group has 10 tasks of weight 7290 * 1024) and two child groups A0 and A1 (of weight 1024 each), 7291 * then A0's share of the cpu resource is: 7292 * 7293 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 7294 * 7295 * We achieve this by letting root_task_group's tasks sit 7296 * directly in rq->cfs (i.e root_task_group->se[] = NULL). 7297 */ 7298 init_cfs_bandwidth(&root_task_group.cfs_bandwidth); 7299 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); 7300 #endif /* CONFIG_FAIR_GROUP_SCHED */ 7301 7302 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; 7303 #ifdef CONFIG_RT_GROUP_SCHED 7304 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 7305 #endif 7306 7307 for (j = 0; j < CPU_LOAD_IDX_MAX; j++) 7308 rq->cpu_load[j] = 0; 7309 7310 rq->last_load_update_tick = jiffies; 7311 7312 #ifdef CONFIG_SMP 7313 rq->sd = NULL; 7314 rq->rd = NULL; 7315 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; 7316 rq->balance_callback = NULL; 7317 rq->active_balance = 0; 7318 rq->next_balance = jiffies; 7319 rq->push_cpu = 0; 7320 rq->cpu = i; 7321 rq->online = 0; 7322 rq->idle_stamp = 0; 7323 rq->avg_idle = 2*sysctl_sched_migration_cost; 7324 rq->max_idle_balance_cost = sysctl_sched_migration_cost; 7325 7326 INIT_LIST_HEAD(&rq->cfs_tasks); 7327 7328 rq_attach_root(rq, &def_root_domain); 7329 #ifdef CONFIG_NO_HZ_COMMON 7330 rq->nohz_flags = 0; 7331 #endif 7332 #ifdef CONFIG_NO_HZ_FULL 7333 rq->last_sched_tick = 0; 7334 #endif 7335 #endif 7336 init_rq_hrtick(rq); 7337 atomic_set(&rq->nr_iowait, 0); 7338 } 7339 7340 set_load_weight(&init_task); 7341 7342 #ifdef CONFIG_PREEMPT_NOTIFIERS 7343 INIT_HLIST_HEAD(&init_task.preempt_notifiers); 7344 #endif 7345 7346 /* 7347 * The boot idle thread does lazy MMU switching as well: 7348 */ 7349 atomic_inc(&init_mm.mm_count); 7350 enter_lazy_tlb(&init_mm, current); 7351 7352 /* 7353 * During early bootup we pretend to be a normal task: 7354 */ 7355 current->sched_class = &fair_sched_class; 7356 7357 /* 7358 * Make us the idle thread. Technically, schedule() should not be 7359 * called from this thread, however somewhere below it might be, 7360 * but because we are the idle thread, we just pick up running again 7361 * when this runqueue becomes "idle". 7362 */ 7363 init_idle(current, smp_processor_id()); 7364 7365 calc_load_update = jiffies + LOAD_FREQ; 7366 7367 #ifdef CONFIG_SMP 7368 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); 7369 /* May be allocated at isolcpus cmdline parse time */ 7370 if (cpu_isolated_map == NULL) 7371 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 7372 idle_thread_set_boot_cpu(); 7373 set_cpu_rq_start_time(); 7374 #endif 7375 init_sched_fair_class(); 7376 7377 scheduler_running = 1; 7378 } 7379 7380 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 7381 static inline int preempt_count_equals(int preempt_offset) 7382 { 7383 int nested = preempt_count() + rcu_preempt_depth(); 7384 7385 return (nested == preempt_offset); 7386 } 7387 7388 void __might_sleep(const char *file, int line, int preempt_offset) 7389 { 7390 /* 7391 * Blocking primitives will set (and therefore destroy) current->state, 7392 * since we will exit with TASK_RUNNING make sure we enter with it, 7393 * otherwise we will destroy state. 7394 */ 7395 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, 7396 "do not call blocking ops when !TASK_RUNNING; " 7397 "state=%lx set at [<%p>] %pS\n", 7398 current->state, 7399 (void *)current->task_state_change, 7400 (void *)current->task_state_change); 7401 7402 ___might_sleep(file, line, preempt_offset); 7403 } 7404 EXPORT_SYMBOL(__might_sleep); 7405 7406 void ___might_sleep(const char *file, int line, int preempt_offset) 7407 { 7408 static unsigned long prev_jiffy; /* ratelimiting */ 7409 7410 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ 7411 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && 7412 !is_idle_task(current)) || 7413 system_state != SYSTEM_RUNNING || oops_in_progress) 7414 return; 7415 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 7416 return; 7417 prev_jiffy = jiffies; 7418 7419 printk(KERN_ERR 7420 "BUG: sleeping function called from invalid context at %s:%d\n", 7421 file, line); 7422 printk(KERN_ERR 7423 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 7424 in_atomic(), irqs_disabled(), 7425 current->pid, current->comm); 7426 7427 if (task_stack_end_corrupted(current)) 7428 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 7429 7430 debug_show_held_locks(current); 7431 if (irqs_disabled()) 7432 print_irqtrace_events(current); 7433 #ifdef CONFIG_DEBUG_PREEMPT 7434 if (!preempt_count_equals(preempt_offset)) { 7435 pr_err("Preemption disabled at:"); 7436 print_ip_sym(current->preempt_disable_ip); 7437 pr_cont("\n"); 7438 } 7439 #endif 7440 dump_stack(); 7441 } 7442 EXPORT_SYMBOL(___might_sleep); 7443 #endif 7444 7445 #ifdef CONFIG_MAGIC_SYSRQ 7446 void normalize_rt_tasks(void) 7447 { 7448 struct task_struct *g, *p; 7449 struct sched_attr attr = { 7450 .sched_policy = SCHED_NORMAL, 7451 }; 7452 7453 read_lock(&tasklist_lock); 7454 for_each_process_thread(g, p) { 7455 /* 7456 * Only normalize user tasks: 7457 */ 7458 if (p->flags & PF_KTHREAD) 7459 continue; 7460 7461 p->se.exec_start = 0; 7462 #ifdef CONFIG_SCHEDSTATS 7463 p->se.statistics.wait_start = 0; 7464 p->se.statistics.sleep_start = 0; 7465 p->se.statistics.block_start = 0; 7466 #endif 7467 7468 if (!dl_task(p) && !rt_task(p)) { 7469 /* 7470 * Renice negative nice level userspace 7471 * tasks back to 0: 7472 */ 7473 if (task_nice(p) < 0) 7474 set_user_nice(p, 0); 7475 continue; 7476 } 7477 7478 __sched_setscheduler(p, &attr, false, false); 7479 } 7480 read_unlock(&tasklist_lock); 7481 } 7482 7483 #endif /* CONFIG_MAGIC_SYSRQ */ 7484 7485 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) 7486 /* 7487 * These functions are only useful for the IA64 MCA handling, or kdb. 7488 * 7489 * They can only be called when the whole system has been 7490 * stopped - every CPU needs to be quiescent, and no scheduling 7491 * activity can take place. Using them for anything else would 7492 * be a serious bug, and as a result, they aren't even visible 7493 * under any other configuration. 7494 */ 7495 7496 /** 7497 * curr_task - return the current task for a given cpu. 7498 * @cpu: the processor in question. 7499 * 7500 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 7501 * 7502 * Return: The current task for @cpu. 7503 */ 7504 struct task_struct *curr_task(int cpu) 7505 { 7506 return cpu_curr(cpu); 7507 } 7508 7509 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ 7510 7511 #ifdef CONFIG_IA64 7512 /** 7513 * set_curr_task - set the current task for a given cpu. 7514 * @cpu: the processor in question. 7515 * @p: the task pointer to set. 7516 * 7517 * Description: This function must only be used when non-maskable interrupts 7518 * are serviced on a separate stack. It allows the architecture to switch the 7519 * notion of the current task on a cpu in a non-blocking manner. This function 7520 * must be called with all CPU's synchronized, and interrupts disabled, the 7521 * and caller must save the original value of the current task (see 7522 * curr_task() above) and restore that value before reenabling interrupts and 7523 * re-starting the system. 7524 * 7525 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 7526 */ 7527 void set_curr_task(int cpu, struct task_struct *p) 7528 { 7529 cpu_curr(cpu) = p; 7530 } 7531 7532 #endif 7533 7534 #ifdef CONFIG_CGROUP_SCHED 7535 /* task_group_lock serializes the addition/removal of task groups */ 7536 static DEFINE_SPINLOCK(task_group_lock); 7537 7538 static void free_sched_group(struct task_group *tg) 7539 { 7540 free_fair_sched_group(tg); 7541 free_rt_sched_group(tg); 7542 autogroup_free(tg); 7543 kmem_cache_free(task_group_cache, tg); 7544 } 7545 7546 /* allocate runqueue etc for a new task group */ 7547 struct task_group *sched_create_group(struct task_group *parent) 7548 { 7549 struct task_group *tg; 7550 7551 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); 7552 if (!tg) 7553 return ERR_PTR(-ENOMEM); 7554 7555 if (!alloc_fair_sched_group(tg, parent)) 7556 goto err; 7557 7558 if (!alloc_rt_sched_group(tg, parent)) 7559 goto err; 7560 7561 return tg; 7562 7563 err: 7564 free_sched_group(tg); 7565 return ERR_PTR(-ENOMEM); 7566 } 7567 7568 void sched_online_group(struct task_group *tg, struct task_group *parent) 7569 { 7570 unsigned long flags; 7571 7572 spin_lock_irqsave(&task_group_lock, flags); 7573 list_add_rcu(&tg->list, &task_groups); 7574 7575 WARN_ON(!parent); /* root should already exist */ 7576 7577 tg->parent = parent; 7578 INIT_LIST_HEAD(&tg->children); 7579 list_add_rcu(&tg->siblings, &parent->children); 7580 spin_unlock_irqrestore(&task_group_lock, flags); 7581 } 7582 7583 /* rcu callback to free various structures associated with a task group */ 7584 static void free_sched_group_rcu(struct rcu_head *rhp) 7585 { 7586 /* now it should be safe to free those cfs_rqs */ 7587 free_sched_group(container_of(rhp, struct task_group, rcu)); 7588 } 7589 7590 /* Destroy runqueue etc associated with a task group */ 7591 void sched_destroy_group(struct task_group *tg) 7592 { 7593 /* wait for possible concurrent references to cfs_rqs complete */ 7594 call_rcu(&tg->rcu, free_sched_group_rcu); 7595 } 7596 7597 void sched_offline_group(struct task_group *tg) 7598 { 7599 unsigned long flags; 7600 7601 /* end participation in shares distribution */ 7602 unregister_fair_sched_group(tg); 7603 7604 spin_lock_irqsave(&task_group_lock, flags); 7605 list_del_rcu(&tg->list); 7606 list_del_rcu(&tg->siblings); 7607 spin_unlock_irqrestore(&task_group_lock, flags); 7608 } 7609 7610 /* change task's runqueue when it moves between groups. 7611 * The caller of this function should have put the task in its new group 7612 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to 7613 * reflect its new group. 7614 */ 7615 void sched_move_task(struct task_struct *tsk) 7616 { 7617 struct task_group *tg; 7618 int queued, running; 7619 unsigned long flags; 7620 struct rq *rq; 7621 7622 rq = task_rq_lock(tsk, &flags); 7623 7624 running = task_current(rq, tsk); 7625 queued = task_on_rq_queued(tsk); 7626 7627 if (queued) 7628 dequeue_task(rq, tsk, DEQUEUE_SAVE | DEQUEUE_MOVE); 7629 if (unlikely(running)) 7630 put_prev_task(rq, tsk); 7631 7632 /* 7633 * All callers are synchronized by task_rq_lock(); we do not use RCU 7634 * which is pointless here. Thus, we pass "true" to task_css_check() 7635 * to prevent lockdep warnings. 7636 */ 7637 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), 7638 struct task_group, css); 7639 tg = autogroup_task_group(tsk, tg); 7640 tsk->sched_task_group = tg; 7641 7642 #ifdef CONFIG_FAIR_GROUP_SCHED 7643 if (tsk->sched_class->task_move_group) 7644 tsk->sched_class->task_move_group(tsk); 7645 else 7646 #endif 7647 set_task_rq(tsk, task_cpu(tsk)); 7648 7649 if (unlikely(running)) 7650 tsk->sched_class->set_curr_task(rq); 7651 if (queued) 7652 enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE); 7653 7654 task_rq_unlock(rq, tsk, &flags); 7655 } 7656 #endif /* CONFIG_CGROUP_SCHED */ 7657 7658 #ifdef CONFIG_RT_GROUP_SCHED 7659 /* 7660 * Ensure that the real time constraints are schedulable. 7661 */ 7662 static DEFINE_MUTEX(rt_constraints_mutex); 7663 7664 /* Must be called with tasklist_lock held */ 7665 static inline int tg_has_rt_tasks(struct task_group *tg) 7666 { 7667 struct task_struct *g, *p; 7668 7669 /* 7670 * Autogroups do not have RT tasks; see autogroup_create(). 7671 */ 7672 if (task_group_is_autogroup(tg)) 7673 return 0; 7674 7675 for_each_process_thread(g, p) { 7676 if (rt_task(p) && task_group(p) == tg) 7677 return 1; 7678 } 7679 7680 return 0; 7681 } 7682 7683 struct rt_schedulable_data { 7684 struct task_group *tg; 7685 u64 rt_period; 7686 u64 rt_runtime; 7687 }; 7688 7689 static int tg_rt_schedulable(struct task_group *tg, void *data) 7690 { 7691 struct rt_schedulable_data *d = data; 7692 struct task_group *child; 7693 unsigned long total, sum = 0; 7694 u64 period, runtime; 7695 7696 period = ktime_to_ns(tg->rt_bandwidth.rt_period); 7697 runtime = tg->rt_bandwidth.rt_runtime; 7698 7699 if (tg == d->tg) { 7700 period = d->rt_period; 7701 runtime = d->rt_runtime; 7702 } 7703 7704 /* 7705 * Cannot have more runtime than the period. 7706 */ 7707 if (runtime > period && runtime != RUNTIME_INF) 7708 return -EINVAL; 7709 7710 /* 7711 * Ensure we don't starve existing RT tasks. 7712 */ 7713 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) 7714 return -EBUSY; 7715 7716 total = to_ratio(period, runtime); 7717 7718 /* 7719 * Nobody can have more than the global setting allows. 7720 */ 7721 if (total > to_ratio(global_rt_period(), global_rt_runtime())) 7722 return -EINVAL; 7723 7724 /* 7725 * The sum of our children's runtime should not exceed our own. 7726 */ 7727 list_for_each_entry_rcu(child, &tg->children, siblings) { 7728 period = ktime_to_ns(child->rt_bandwidth.rt_period); 7729 runtime = child->rt_bandwidth.rt_runtime; 7730 7731 if (child == d->tg) { 7732 period = d->rt_period; 7733 runtime = d->rt_runtime; 7734 } 7735 7736 sum += to_ratio(period, runtime); 7737 } 7738 7739 if (sum > total) 7740 return -EINVAL; 7741 7742 return 0; 7743 } 7744 7745 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 7746 { 7747 int ret; 7748 7749 struct rt_schedulable_data data = { 7750 .tg = tg, 7751 .rt_period = period, 7752 .rt_runtime = runtime, 7753 }; 7754 7755 rcu_read_lock(); 7756 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data); 7757 rcu_read_unlock(); 7758 7759 return ret; 7760 } 7761 7762 static int tg_set_rt_bandwidth(struct task_group *tg, 7763 u64 rt_period, u64 rt_runtime) 7764 { 7765 int i, err = 0; 7766 7767 /* 7768 * Disallowing the root group RT runtime is BAD, it would disallow the 7769 * kernel creating (and or operating) RT threads. 7770 */ 7771 if (tg == &root_task_group && rt_runtime == 0) 7772 return -EINVAL; 7773 7774 /* No period doesn't make any sense. */ 7775 if (rt_period == 0) 7776 return -EINVAL; 7777 7778 mutex_lock(&rt_constraints_mutex); 7779 read_lock(&tasklist_lock); 7780 err = __rt_schedulable(tg, rt_period, rt_runtime); 7781 if (err) 7782 goto unlock; 7783 7784 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); 7785 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); 7786 tg->rt_bandwidth.rt_runtime = rt_runtime; 7787 7788 for_each_possible_cpu(i) { 7789 struct rt_rq *rt_rq = tg->rt_rq[i]; 7790 7791 raw_spin_lock(&rt_rq->rt_runtime_lock); 7792 rt_rq->rt_runtime = rt_runtime; 7793 raw_spin_unlock(&rt_rq->rt_runtime_lock); 7794 } 7795 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); 7796 unlock: 7797 read_unlock(&tasklist_lock); 7798 mutex_unlock(&rt_constraints_mutex); 7799 7800 return err; 7801 } 7802 7803 static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) 7804 { 7805 u64 rt_runtime, rt_period; 7806 7807 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); 7808 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; 7809 if (rt_runtime_us < 0) 7810 rt_runtime = RUNTIME_INF; 7811 7812 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 7813 } 7814 7815 static long sched_group_rt_runtime(struct task_group *tg) 7816 { 7817 u64 rt_runtime_us; 7818 7819 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) 7820 return -1; 7821 7822 rt_runtime_us = tg->rt_bandwidth.rt_runtime; 7823 do_div(rt_runtime_us, NSEC_PER_USEC); 7824 return rt_runtime_us; 7825 } 7826 7827 static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us) 7828 { 7829 u64 rt_runtime, rt_period; 7830 7831 rt_period = rt_period_us * NSEC_PER_USEC; 7832 rt_runtime = tg->rt_bandwidth.rt_runtime; 7833 7834 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 7835 } 7836 7837 static long sched_group_rt_period(struct task_group *tg) 7838 { 7839 u64 rt_period_us; 7840 7841 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); 7842 do_div(rt_period_us, NSEC_PER_USEC); 7843 return rt_period_us; 7844 } 7845 #endif /* CONFIG_RT_GROUP_SCHED */ 7846 7847 #ifdef CONFIG_RT_GROUP_SCHED 7848 static int sched_rt_global_constraints(void) 7849 { 7850 int ret = 0; 7851 7852 mutex_lock(&rt_constraints_mutex); 7853 read_lock(&tasklist_lock); 7854 ret = __rt_schedulable(NULL, 0, 0); 7855 read_unlock(&tasklist_lock); 7856 mutex_unlock(&rt_constraints_mutex); 7857 7858 return ret; 7859 } 7860 7861 static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) 7862 { 7863 /* Don't accept realtime tasks when there is no way for them to run */ 7864 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) 7865 return 0; 7866 7867 return 1; 7868 } 7869 7870 #else /* !CONFIG_RT_GROUP_SCHED */ 7871 static int sched_rt_global_constraints(void) 7872 { 7873 unsigned long flags; 7874 int i, ret = 0; 7875 7876 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 7877 for_each_possible_cpu(i) { 7878 struct rt_rq *rt_rq = &cpu_rq(i)->rt; 7879 7880 raw_spin_lock(&rt_rq->rt_runtime_lock); 7881 rt_rq->rt_runtime = global_rt_runtime(); 7882 raw_spin_unlock(&rt_rq->rt_runtime_lock); 7883 } 7884 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 7885 7886 return ret; 7887 } 7888 #endif /* CONFIG_RT_GROUP_SCHED */ 7889 7890 static int sched_dl_global_validate(void) 7891 { 7892 u64 runtime = global_rt_runtime(); 7893 u64 period = global_rt_period(); 7894 u64 new_bw = to_ratio(period, runtime); 7895 struct dl_bw *dl_b; 7896 int cpu, ret = 0; 7897 unsigned long flags; 7898 7899 /* 7900 * Here we want to check the bandwidth not being set to some 7901 * value smaller than the currently allocated bandwidth in 7902 * any of the root_domains. 7903 * 7904 * FIXME: Cycling on all the CPUs is overdoing, but simpler than 7905 * cycling on root_domains... Discussion on different/better 7906 * solutions is welcome! 7907 */ 7908 for_each_possible_cpu(cpu) { 7909 rcu_read_lock_sched(); 7910 dl_b = dl_bw_of(cpu); 7911 7912 raw_spin_lock_irqsave(&dl_b->lock, flags); 7913 if (new_bw < dl_b->total_bw) 7914 ret = -EBUSY; 7915 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 7916 7917 rcu_read_unlock_sched(); 7918 7919 if (ret) 7920 break; 7921 } 7922 7923 return ret; 7924 } 7925 7926 static void sched_dl_do_global(void) 7927 { 7928 u64 new_bw = -1; 7929 struct dl_bw *dl_b; 7930 int cpu; 7931 unsigned long flags; 7932 7933 def_dl_bandwidth.dl_period = global_rt_period(); 7934 def_dl_bandwidth.dl_runtime = global_rt_runtime(); 7935 7936 if (global_rt_runtime() != RUNTIME_INF) 7937 new_bw = to_ratio(global_rt_period(), global_rt_runtime()); 7938 7939 /* 7940 * FIXME: As above... 7941 */ 7942 for_each_possible_cpu(cpu) { 7943 rcu_read_lock_sched(); 7944 dl_b = dl_bw_of(cpu); 7945 7946 raw_spin_lock_irqsave(&dl_b->lock, flags); 7947 dl_b->bw = new_bw; 7948 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 7949 7950 rcu_read_unlock_sched(); 7951 } 7952 } 7953 7954 static int sched_rt_global_validate(void) 7955 { 7956 if (sysctl_sched_rt_period <= 0) 7957 return -EINVAL; 7958 7959 if ((sysctl_sched_rt_runtime != RUNTIME_INF) && 7960 (sysctl_sched_rt_runtime > sysctl_sched_rt_period)) 7961 return -EINVAL; 7962 7963 return 0; 7964 } 7965 7966 static void sched_rt_do_global(void) 7967 { 7968 def_rt_bandwidth.rt_runtime = global_rt_runtime(); 7969 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period()); 7970 } 7971 7972 int sched_rt_handler(struct ctl_table *table, int write, 7973 void __user *buffer, size_t *lenp, 7974 loff_t *ppos) 7975 { 7976 int old_period, old_runtime; 7977 static DEFINE_MUTEX(mutex); 7978 int ret; 7979 7980 mutex_lock(&mutex); 7981 old_period = sysctl_sched_rt_period; 7982 old_runtime = sysctl_sched_rt_runtime; 7983 7984 ret = proc_dointvec(table, write, buffer, lenp, ppos); 7985 7986 if (!ret && write) { 7987 ret = sched_rt_global_validate(); 7988 if (ret) 7989 goto undo; 7990 7991 ret = sched_dl_global_validate(); 7992 if (ret) 7993 goto undo; 7994 7995 ret = sched_rt_global_constraints(); 7996 if (ret) 7997 goto undo; 7998 7999 sched_rt_do_global(); 8000 sched_dl_do_global(); 8001 } 8002 if (0) { 8003 undo: 8004 sysctl_sched_rt_period = old_period; 8005 sysctl_sched_rt_runtime = old_runtime; 8006 } 8007 mutex_unlock(&mutex); 8008 8009 return ret; 8010 } 8011 8012 int sched_rr_handler(struct ctl_table *table, int write, 8013 void __user *buffer, size_t *lenp, 8014 loff_t *ppos) 8015 { 8016 int ret; 8017 static DEFINE_MUTEX(mutex); 8018 8019 mutex_lock(&mutex); 8020 ret = proc_dointvec(table, write, buffer, lenp, ppos); 8021 /* make sure that internally we keep jiffies */ 8022 /* also, writing zero resets timeslice to default */ 8023 if (!ret && write) { 8024 sched_rr_timeslice = sched_rr_timeslice <= 0 ? 8025 RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice); 8026 } 8027 mutex_unlock(&mutex); 8028 return ret; 8029 } 8030 8031 #ifdef CONFIG_CGROUP_SCHED 8032 8033 static inline struct task_group *css_tg(struct cgroup_subsys_state *css) 8034 { 8035 return css ? container_of(css, struct task_group, css) : NULL; 8036 } 8037 8038 static struct cgroup_subsys_state * 8039 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 8040 { 8041 struct task_group *parent = css_tg(parent_css); 8042 struct task_group *tg; 8043 8044 if (!parent) { 8045 /* This is early initialization for the top cgroup */ 8046 return &root_task_group.css; 8047 } 8048 8049 tg = sched_create_group(parent); 8050 if (IS_ERR(tg)) 8051 return ERR_PTR(-ENOMEM); 8052 8053 return &tg->css; 8054 } 8055 8056 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) 8057 { 8058 struct task_group *tg = css_tg(css); 8059 struct task_group *parent = css_tg(css->parent); 8060 8061 if (parent) 8062 sched_online_group(tg, parent); 8063 return 0; 8064 } 8065 8066 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) 8067 { 8068 struct task_group *tg = css_tg(css); 8069 8070 sched_destroy_group(tg); 8071 } 8072 8073 static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css) 8074 { 8075 struct task_group *tg = css_tg(css); 8076 8077 sched_offline_group(tg); 8078 } 8079 8080 static void cpu_cgroup_fork(struct task_struct *task) 8081 { 8082 sched_move_task(task); 8083 } 8084 8085 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) 8086 { 8087 struct task_struct *task; 8088 struct cgroup_subsys_state *css; 8089 8090 cgroup_taskset_for_each(task, css, tset) { 8091 #ifdef CONFIG_RT_GROUP_SCHED 8092 if (!sched_rt_can_attach(css_tg(css), task)) 8093 return -EINVAL; 8094 #else 8095 /* We don't support RT-tasks being in separate groups */ 8096 if (task->sched_class != &fair_sched_class) 8097 return -EINVAL; 8098 #endif 8099 } 8100 return 0; 8101 } 8102 8103 static void cpu_cgroup_attach(struct cgroup_taskset *tset) 8104 { 8105 struct task_struct *task; 8106 struct cgroup_subsys_state *css; 8107 8108 cgroup_taskset_for_each(task, css, tset) 8109 sched_move_task(task); 8110 } 8111 8112 #ifdef CONFIG_FAIR_GROUP_SCHED 8113 static int cpu_shares_write_u64(struct cgroup_subsys_state *css, 8114 struct cftype *cftype, u64 shareval) 8115 { 8116 return sched_group_set_shares(css_tg(css), scale_load(shareval)); 8117 } 8118 8119 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, 8120 struct cftype *cft) 8121 { 8122 struct task_group *tg = css_tg(css); 8123 8124 return (u64) scale_load_down(tg->shares); 8125 } 8126 8127 #ifdef CONFIG_CFS_BANDWIDTH 8128 static DEFINE_MUTEX(cfs_constraints_mutex); 8129 8130 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ 8131 const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ 8132 8133 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 8134 8135 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) 8136 { 8137 int i, ret = 0, runtime_enabled, runtime_was_enabled; 8138 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8139 8140 if (tg == &root_task_group) 8141 return -EINVAL; 8142 8143 /* 8144 * Ensure we have at some amount of bandwidth every period. This is 8145 * to prevent reaching a state of large arrears when throttled via 8146 * entity_tick() resulting in prolonged exit starvation. 8147 */ 8148 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) 8149 return -EINVAL; 8150 8151 /* 8152 * Likewise, bound things on the otherside by preventing insane quota 8153 * periods. This also allows us to normalize in computing quota 8154 * feasibility. 8155 */ 8156 if (period > max_cfs_quota_period) 8157 return -EINVAL; 8158 8159 /* 8160 * Prevent race between setting of cfs_rq->runtime_enabled and 8161 * unthrottle_offline_cfs_rqs(). 8162 */ 8163 get_online_cpus(); 8164 mutex_lock(&cfs_constraints_mutex); 8165 ret = __cfs_schedulable(tg, period, quota); 8166 if (ret) 8167 goto out_unlock; 8168 8169 runtime_enabled = quota != RUNTIME_INF; 8170 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 8171 /* 8172 * If we need to toggle cfs_bandwidth_used, off->on must occur 8173 * before making related changes, and on->off must occur afterwards 8174 */ 8175 if (runtime_enabled && !runtime_was_enabled) 8176 cfs_bandwidth_usage_inc(); 8177 raw_spin_lock_irq(&cfs_b->lock); 8178 cfs_b->period = ns_to_ktime(period); 8179 cfs_b->quota = quota; 8180 8181 __refill_cfs_bandwidth_runtime(cfs_b); 8182 /* restart the period timer (if active) to handle new period expiry */ 8183 if (runtime_enabled) 8184 start_cfs_bandwidth(cfs_b); 8185 raw_spin_unlock_irq(&cfs_b->lock); 8186 8187 for_each_online_cpu(i) { 8188 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; 8189 struct rq *rq = cfs_rq->rq; 8190 8191 raw_spin_lock_irq(&rq->lock); 8192 cfs_rq->runtime_enabled = runtime_enabled; 8193 cfs_rq->runtime_remaining = 0; 8194 8195 if (cfs_rq->throttled) 8196 unthrottle_cfs_rq(cfs_rq); 8197 raw_spin_unlock_irq(&rq->lock); 8198 } 8199 if (runtime_was_enabled && !runtime_enabled) 8200 cfs_bandwidth_usage_dec(); 8201 out_unlock: 8202 mutex_unlock(&cfs_constraints_mutex); 8203 put_online_cpus(); 8204 8205 return ret; 8206 } 8207 8208 int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) 8209 { 8210 u64 quota, period; 8211 8212 period = ktime_to_ns(tg->cfs_bandwidth.period); 8213 if (cfs_quota_us < 0) 8214 quota = RUNTIME_INF; 8215 else 8216 quota = (u64)cfs_quota_us * NSEC_PER_USEC; 8217 8218 return tg_set_cfs_bandwidth(tg, period, quota); 8219 } 8220 8221 long tg_get_cfs_quota(struct task_group *tg) 8222 { 8223 u64 quota_us; 8224 8225 if (tg->cfs_bandwidth.quota == RUNTIME_INF) 8226 return -1; 8227 8228 quota_us = tg->cfs_bandwidth.quota; 8229 do_div(quota_us, NSEC_PER_USEC); 8230 8231 return quota_us; 8232 } 8233 8234 int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) 8235 { 8236 u64 quota, period; 8237 8238 period = (u64)cfs_period_us * NSEC_PER_USEC; 8239 quota = tg->cfs_bandwidth.quota; 8240 8241 return tg_set_cfs_bandwidth(tg, period, quota); 8242 } 8243 8244 long tg_get_cfs_period(struct task_group *tg) 8245 { 8246 u64 cfs_period_us; 8247 8248 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); 8249 do_div(cfs_period_us, NSEC_PER_USEC); 8250 8251 return cfs_period_us; 8252 } 8253 8254 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, 8255 struct cftype *cft) 8256 { 8257 return tg_get_cfs_quota(css_tg(css)); 8258 } 8259 8260 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, 8261 struct cftype *cftype, s64 cfs_quota_us) 8262 { 8263 return tg_set_cfs_quota(css_tg(css), cfs_quota_us); 8264 } 8265 8266 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, 8267 struct cftype *cft) 8268 { 8269 return tg_get_cfs_period(css_tg(css)); 8270 } 8271 8272 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, 8273 struct cftype *cftype, u64 cfs_period_us) 8274 { 8275 return tg_set_cfs_period(css_tg(css), cfs_period_us); 8276 } 8277 8278 struct cfs_schedulable_data { 8279 struct task_group *tg; 8280 u64 period, quota; 8281 }; 8282 8283 /* 8284 * normalize group quota/period to be quota/max_period 8285 * note: units are usecs 8286 */ 8287 static u64 normalize_cfs_quota(struct task_group *tg, 8288 struct cfs_schedulable_data *d) 8289 { 8290 u64 quota, period; 8291 8292 if (tg == d->tg) { 8293 period = d->period; 8294 quota = d->quota; 8295 } else { 8296 period = tg_get_cfs_period(tg); 8297 quota = tg_get_cfs_quota(tg); 8298 } 8299 8300 /* note: these should typically be equivalent */ 8301 if (quota == RUNTIME_INF || quota == -1) 8302 return RUNTIME_INF; 8303 8304 return to_ratio(period, quota); 8305 } 8306 8307 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) 8308 { 8309 struct cfs_schedulable_data *d = data; 8310 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8311 s64 quota = 0, parent_quota = -1; 8312 8313 if (!tg->parent) { 8314 quota = RUNTIME_INF; 8315 } else { 8316 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 8317 8318 quota = normalize_cfs_quota(tg, d); 8319 parent_quota = parent_b->hierarchical_quota; 8320 8321 /* 8322 * ensure max(child_quota) <= parent_quota, inherit when no 8323 * limit is set 8324 */ 8325 if (quota == RUNTIME_INF) 8326 quota = parent_quota; 8327 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 8328 return -EINVAL; 8329 } 8330 cfs_b->hierarchical_quota = quota; 8331 8332 return 0; 8333 } 8334 8335 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 8336 { 8337 int ret; 8338 struct cfs_schedulable_data data = { 8339 .tg = tg, 8340 .period = period, 8341 .quota = quota, 8342 }; 8343 8344 if (quota != RUNTIME_INF) { 8345 do_div(data.period, NSEC_PER_USEC); 8346 do_div(data.quota, NSEC_PER_USEC); 8347 } 8348 8349 rcu_read_lock(); 8350 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 8351 rcu_read_unlock(); 8352 8353 return ret; 8354 } 8355 8356 static int cpu_stats_show(struct seq_file *sf, void *v) 8357 { 8358 struct task_group *tg = css_tg(seq_css(sf)); 8359 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 8360 8361 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); 8362 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); 8363 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); 8364 8365 return 0; 8366 } 8367 #endif /* CONFIG_CFS_BANDWIDTH */ 8368 #endif /* CONFIG_FAIR_GROUP_SCHED */ 8369 8370 #ifdef CONFIG_RT_GROUP_SCHED 8371 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, 8372 struct cftype *cft, s64 val) 8373 { 8374 return sched_group_set_rt_runtime(css_tg(css), val); 8375 } 8376 8377 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, 8378 struct cftype *cft) 8379 { 8380 return sched_group_rt_runtime(css_tg(css)); 8381 } 8382 8383 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, 8384 struct cftype *cftype, u64 rt_period_us) 8385 { 8386 return sched_group_set_rt_period(css_tg(css), rt_period_us); 8387 } 8388 8389 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, 8390 struct cftype *cft) 8391 { 8392 return sched_group_rt_period(css_tg(css)); 8393 } 8394 #endif /* CONFIG_RT_GROUP_SCHED */ 8395 8396 static struct cftype cpu_files[] = { 8397 #ifdef CONFIG_FAIR_GROUP_SCHED 8398 { 8399 .name = "shares", 8400 .read_u64 = cpu_shares_read_u64, 8401 .write_u64 = cpu_shares_write_u64, 8402 }, 8403 #endif 8404 #ifdef CONFIG_CFS_BANDWIDTH 8405 { 8406 .name = "cfs_quota_us", 8407 .read_s64 = cpu_cfs_quota_read_s64, 8408 .write_s64 = cpu_cfs_quota_write_s64, 8409 }, 8410 { 8411 .name = "cfs_period_us", 8412 .read_u64 = cpu_cfs_period_read_u64, 8413 .write_u64 = cpu_cfs_period_write_u64, 8414 }, 8415 { 8416 .name = "stat", 8417 .seq_show = cpu_stats_show, 8418 }, 8419 #endif 8420 #ifdef CONFIG_RT_GROUP_SCHED 8421 { 8422 .name = "rt_runtime_us", 8423 .read_s64 = cpu_rt_runtime_read, 8424 .write_s64 = cpu_rt_runtime_write, 8425 }, 8426 { 8427 .name = "rt_period_us", 8428 .read_u64 = cpu_rt_period_read_uint, 8429 .write_u64 = cpu_rt_period_write_uint, 8430 }, 8431 #endif 8432 { } /* terminate */ 8433 }; 8434 8435 struct cgroup_subsys cpu_cgrp_subsys = { 8436 .css_alloc = cpu_cgroup_css_alloc, 8437 .css_free = cpu_cgroup_css_free, 8438 .css_online = cpu_cgroup_css_online, 8439 .css_offline = cpu_cgroup_css_offline, 8440 .fork = cpu_cgroup_fork, 8441 .can_attach = cpu_cgroup_can_attach, 8442 .attach = cpu_cgroup_attach, 8443 .legacy_cftypes = cpu_files, 8444 .early_init = 1, 8445 }; 8446 8447 #endif /* CONFIG_CGROUP_SCHED */ 8448 8449 void dump_cpu_task(int cpu) 8450 { 8451 pr_info("Task dump for CPU %d:\n", cpu); 8452 sched_show_task(cpu_curr(cpu)); 8453 } 8454 8455 /* 8456 * Nice levels are multiplicative, with a gentle 10% change for every 8457 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to 8458 * nice 1, it will get ~10% less CPU time than another CPU-bound task 8459 * that remained on nice 0. 8460 * 8461 * The "10% effect" is relative and cumulative: from _any_ nice level, 8462 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level 8463 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. 8464 * If a task goes up by ~10% and another task goes down by ~10% then 8465 * the relative distance between them is ~25%.) 8466 */ 8467 const int sched_prio_to_weight[40] = { 8468 /* -20 */ 88761, 71755, 56483, 46273, 36291, 8469 /* -15 */ 29154, 23254, 18705, 14949, 11916, 8470 /* -10 */ 9548, 7620, 6100, 4904, 3906, 8471 /* -5 */ 3121, 2501, 1991, 1586, 1277, 8472 /* 0 */ 1024, 820, 655, 526, 423, 8473 /* 5 */ 335, 272, 215, 172, 137, 8474 /* 10 */ 110, 87, 70, 56, 45, 8475 /* 15 */ 36, 29, 23, 18, 15, 8476 }; 8477 8478 /* 8479 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated. 8480 * 8481 * In cases where the weight does not change often, we can use the 8482 * precalculated inverse to speed up arithmetics by turning divisions 8483 * into multiplications: 8484 */ 8485 const u32 sched_prio_to_wmult[40] = { 8486 /* -20 */ 48388, 59856, 76040, 92818, 118348, 8487 /* -15 */ 147320, 184698, 229616, 287308, 360437, 8488 /* -10 */ 449829, 563644, 704093, 875809, 1099582, 8489 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, 8490 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, 8491 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, 8492 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, 8493 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 8494 }; 8495