Lines Matching +full:de +full:- +full:serialized
1 // SPDX-License-Identifier: GPL-2.0-only
7 * Copyright (C) 1991-2002 Linus Torvalds
8 * Copyright (C) 1998-2024 Ingo Molnar, Red Hat
72 # include <linux/entry-common.h>
97 #include "../../io_uring/io-wq.h"
163 if (p->sched_class == &stop_sched_class) /* trumps deadline */ in __task_prio()
164 return -2; in __task_prio()
166 if (p->dl_server) in __task_prio()
167 return -1; /* deadline */ in __task_prio()
169 if (rt_or_dl_prio(p->prio)) in __task_prio()
170 return p->prio; /* [-1, 99] */ in __task_prio()
172 if (p->sched_class == &idle_sched_class) in __task_prio()
195 if (-pa < -pb) in prio_less()
198 if (-pb < -pa) in prio_less()
201 if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */ in prio_less()
204 a_dl = &a->dl; in prio_less()
207 * __task_prio() can return -1 (for DL) even for those. In that in prio_less()
210 if (a->dl_server) in prio_less()
211 a_dl = a->dl_server; in prio_less()
213 b_dl = &b->dl; in prio_less()
214 if (b->dl_server) in prio_less()
215 b_dl = b->dl_server; in prio_less()
217 return !dl_time_before(a_dl->deadline, b_dl->deadline); in prio_less()
234 if (a->core_cookie < b->core_cookie) in __sched_core_less()
237 if (a->core_cookie > b->core_cookie) in __sched_core_less()
241 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count)) in __sched_core_less()
259 if (cookie < p->core_cookie) in rb_sched_core_cmp()
260 return -1; in rb_sched_core_cmp()
262 if (cookie > p->core_cookie) in rb_sched_core_cmp()
270 if (p->se.sched_delayed) in sched_core_enqueue()
273 rq->core->core_task_seq++; in sched_core_enqueue()
275 if (!p->core_cookie) in sched_core_enqueue()
278 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); in sched_core_enqueue()
283 if (p->se.sched_delayed) in sched_core_dequeue()
286 rq->core->core_task_seq++; in sched_core_dequeue()
289 rb_erase(&p->core_node, &rq->core_tree); in sched_core_dequeue()
290 RB_CLEAR_NODE(&p->core_node); in sched_core_dequeue()
296 * and re-examine whether the core is still in forced idle state. in sched_core_dequeue()
298 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 && in sched_core_dequeue()
299 rq->core->core_forceidle_count && rq->curr == rq->idle) in sched_core_dequeue()
305 if (p->sched_class->task_is_throttled) in sched_task_is_throttled()
306 return p->sched_class->task_is_throttled(p, cpu); in sched_task_is_throttled()
313 struct rb_node *node = &p->core_node; in sched_core_next()
322 if (p->core_cookie != cookie) in sched_core_next()
331 * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
339 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp); in sched_core_find()
344 if (!sched_task_is_throttled(p, rq->cpu)) in sched_core_find()
374 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++); in sched_core_lock()
383 raw_spin_unlock(&cpu_rq(t)->__lock); in sched_core_unlock()
404 cpu_rq(t)->core_enabled = enabled; in __sched_core_flip()
406 cpu_rq(cpu)->core->core_forceidle_start = 0; in __sched_core_flip()
417 cpu_rq(cpu)->core_enabled = enabled; in __sched_core_flip()
427 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree)); in sched_core_assert_empty()
482 if (!atomic_add_unless(&sched_core_count, -1, 1)) in sched_core_put()
499 * p->pi_lock
500 * rq->lock
501 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
503 * rq1->lock
504 * rq2->lock where: rq1 < rq2
508 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
509 * local CPU's rq->lock, it optionally removes the task from the runqueue and
513 * Task enqueue is also under rq->lock, possibly taken from another CPU.
519 * complicated to avoid having to take two rq->locks.
523 * System-calls and anything external will use task_rq_lock() which acquires
524 * both p->pi_lock and rq->lock. As a consequence the state they change is
527 * - sched_setaffinity()/
528 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
529 * - set_user_nice(): p->se.load, p->*prio
530 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
531 * p->se.load, p->rt_priority,
532 * p->dl.dl_{runtime, deadline, period, flags, bw, density}
533 * - sched_setnuma(): p->numa_preferred_nid
534 * - sched_move_task(): p->sched_task_group
535 * - uclamp_update_active() p->uclamp*
537 * p->state <- TASK_*:
541 * try_to_wake_up(). This latter uses p->pi_lock to serialize against
544 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
547 * rq->lock. Non-zero indicates the task is runnable, the special
549 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
551 * Additionally it is possible to be ->on_rq but still be considered not
552 * runnable when p->se.sched_delayed is true. These tasks are on the runqueue
556 * p->on_cpu <- { 0, 1 }:
559 * set before p is scheduled-in and cleared after p is scheduled-out, both
560 * under rq->lock. Non-zero indicates the task is running on its CPU.
563 * CPU to have ->on_cpu = 1 at the same time. ]
567 * - Don't call set_task_cpu() on a blocked task:
572 * - for try_to_wake_up(), called under p->pi_lock:
574 * This allows try_to_wake_up() to only take one rq->lock, see its comment.
576 * - for migration called under rq->lock:
582 * - for migration called under double_rq_lock():
598 raw_spin_lock_nested(&rq->__lock, subclass); in raw_spin_rq_lock_nested()
624 ret = raw_spin_trylock(&rq->__lock); in raw_spin_rq_trylock()
647 * double_rq_lock - safely lock two runqueues
665 * __task_rq_lock - lock the rq @p resides on.
668 __acquires(rq->lock) in __task_rq_lock()
672 lockdep_assert_held(&p->pi_lock); in __task_rq_lock()
689 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
692 __acquires(p->pi_lock) in task_rq_lock()
693 __acquires(rq->lock) in task_rq_lock()
698 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); in task_rq_lock()
704 * ACQUIRE (rq->lock) in task_rq_lock()
705 * [S] ->on_rq = MIGRATING [L] rq = task_rq() in task_rq_lock()
706 * WMB (__set_task_cpu()) ACQUIRE (rq->lock); in task_rq_lock()
707 * [S] ->cpu = new_cpu [L] task_rq() in task_rq_lock()
708 * [L] ->on_rq in task_rq_lock()
709 * RELEASE (rq->lock) in task_rq_lock()
712 * the old rq->lock will fully serialize against the stores. in task_rq_lock()
723 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); in task_rq_lock()
731 * RQ-clock updating methods:
744 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; in update_rq_clock_task()
751 * When this happens, we stop ->clock_task and only update the in update_rq_clock_task()
753 * update will consume the rest. This ensures ->clock_task is in update_rq_clock_task()
756 * It does however cause some slight miss-attribution of {soft,}IRQ in update_rq_clock_task()
758 * the current rq->clock timestamp, except that would require using in update_rq_clock_task()
764 rq->prev_irq_time += irq_delta; in update_rq_clock_task()
765 delta -= irq_delta; in update_rq_clock_task()
766 delayacct_irq(rq->curr, irq_delta); in update_rq_clock_task()
774 steal -= rq->prev_steal_time_rq; in update_rq_clock_task()
779 rq->prev_steal_time_rq = prev_steal; in update_rq_clock_task()
780 delta -= steal; in update_rq_clock_task()
784 rq->clock_task += delta; in update_rq_clock_task()
800 if (rq->clock_update_flags & RQCF_ACT_SKIP) in update_rq_clock()
805 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); in update_rq_clock()
806 rq->clock_update_flags |= RQCF_UPDATED; in update_rq_clock()
811 delta = clock - rq->clock; in update_rq_clock()
814 rq->clock += delta; in update_rq_clock()
821 * Use HR-timers to deliver accurate preemption points.
826 if (hrtimer_active(&rq->hrtick_timer)) in hrtick_clear()
827 hrtimer_cancel(&rq->hrtick_timer); in hrtick_clear()
831 * High-resolution timer tick.
843 rq->donor->sched_class->task_tick(rq, rq->curr, 1); in hrtick()
853 struct hrtimer *timer = &rq->hrtick_timer; in __hrtick_restart()
854 ktime_t time = rq->hrtick_time; in __hrtick_restart()
875 * called with rq->lock held and IRQs disabled
879 struct hrtimer *timer = &rq->hrtick_timer; in hrtick_start()
887 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta); in hrtick_start()
892 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); in hrtick_start()
899 * called with rq->lock held and IRQs disabled
908 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), in hrtick_start()
917 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq); in hrtick_rq_init()
919 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); in hrtick_rq_init()
920 rq->hrtick_timer.function = hrtick; in hrtick_rq_init()
954 return !(fetch_or(&ti->flags, 1 << tif) & _TIF_POLLING_NRFLAG); in set_nr_and_not_polling()
966 typeof(ti->flags) val = READ_ONCE(ti->flags); in set_nr_if_polling()
973 } while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED)); in set_nr_if_polling()
995 struct wake_q_node *node = &task->wake_q; in __wake_q_add()
998 * Atomically grab the task, if ->wake_q is !nil already it means in __wake_q_add()
1006 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) in __wake_q_add()
1012 *head->lastp = node; in __wake_q_add()
1013 head->lastp = &node->next; in __wake_q_add()
1018 * wake_q_add() - queue a wakeup for 'later' waking.
1026 * This function must be used as-if it were wake_up_process(); IOW the task
1036 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
1044 * This function must be used as-if it were wake_up_process(); IOW the task
1047 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1060 struct wake_q_node *node = head->first; in wake_up_q()
1066 node = node->next; in wake_up_q()
1068 WRITE_ONCE(task->wake_q.next, NULL); in wake_up_q()
1069 /* Task can safely be re-inserted now. */ in wake_up_q()
1081 * resched_curr - mark rq's current task 'to be rescheduled now'.
1084 * might also involve a cross-CPU call to trigger the scheduler on
1089 struct task_struct *curr = rq->curr; in __resched_curr()
1102 if (cti->flags & ((1 << tif) | _TIF_NEED_RESCHED)) in __resched_curr()
1168 * from an idle CPU. This is good for power-savings.
1176 int i, cpu = smp_processor_id(), default_cpu = -1; in get_nohz_timer_target()
1200 if (default_cpu == -1) in get_nohz_timer_target()
1224 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling in wake_up_idle_cpu()
1228 * re-evaluate the next tick. Provided some re-ordering of tick in wake_up_idle_cpu()
1232 * - On most architectures, a simple fetch_or on ti::flags with a in wake_up_idle_cpu()
1235 * - x86 needs to perform a last need_resched() check between in wake_up_idle_cpu()
1245 if (set_nr_and_not_polling(task_thread_info(rq->idle), TIF_NEED_RESCHED)) in wake_up_idle_cpu()
1254 * We just need the target to call irq_exit() and re-evaluate in wake_up_full_nohz_cpu()
1294 rq->idle_balance = idle_cpu(cpu); in nohz_csd_func()
1295 if (rq->idle_balance) { in nohz_csd_func()
1296 rq->nohz_idle_balance = flags; in nohz_csd_func()
1306 if (rq->nr_running != 1) in __need_bw_check()
1309 if (p->sched_class != &fair_sched_class) in __need_bw_check()
1323 if (rq->dl.dl_nr_running) in sched_can_stop_tick()
1330 if (rq->rt.rr_nr_running) { in sched_can_stop_tick()
1331 if (rq->rt.rr_nr_running == 1) in sched_can_stop_tick()
1341 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; in sched_can_stop_tick()
1353 if (rq->cfs.h_nr_queued > 1) in sched_can_stop_tick()
1361 * E.g. going from 2->1 without going through pick_next_task(). in sched_can_stop_tick()
1363 if (__need_bw_check(rq, rq->curr)) { in sched_can_stop_tick()
1364 if (cfs_task_bw_constrained(rq->curr)) in sched_can_stop_tick()
1393 list_for_each_entry_rcu(child, &parent->children, siblings) { in walk_tg_tree_from()
1405 parent = parent->parent; in walk_tg_tree_from()
1420 int prio = p->static_prio - MAX_RT_PRIO; in set_load_weight()
1435 if (update_load && p->sched_class->reweight_task) in set_load_weight()
1436 p->sched_class->reweight_task(task_rq(p), p, &lw); in set_load_weight()
1438 p->se.load = lw; in set_load_weight()
1445 * The (slow-path) user-space triggers utilization clamp value updates which
1446 * can require updates on (fast-path) scheduler's data structures used to
1448 * While the per-CPU rq lock protects fast-path update operations, user-space
1449 * requests are serialized using a mutex to reduce the risk of conflicting
1470 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1506 * idle (which drops the max-clamp) by retaining the last known in uclamp_idle_value()
1507 * max-clamp. in uclamp_idle_value()
1510 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; in uclamp_idle_value()
1520 /* Reset max-clamp retention only on idle exit */ in uclamp_idle_reset()
1521 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) in uclamp_idle_reset()
1531 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; in uclamp_rq_max_value()
1532 int bucket_id = UCLAMP_BUCKETS - 1; in uclamp_rq_max_value()
1538 for ( ; bucket_id >= 0; bucket_id--) { in uclamp_rq_max_value()
1544 /* No tasks -- default clamp values */ in uclamp_rq_max_value()
1553 lockdep_assert_held(&p->pi_lock); in __uclamp_update_util_min_rt_default()
1555 uc_se = &p->uclamp_req[UCLAMP_MIN]; in __uclamp_update_util_min_rt_default()
1558 if (uc_se->user_defined) in __uclamp_update_util_min_rt_default()
1570 /* Protect updates to p->uclamp_* */ in uclamp_update_util_min_rt_default()
1579 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; in uclamp_tg_restrict()
1592 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value; in uclamp_tg_restrict()
1593 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value; in uclamp_tg_restrict()
1605 * - the task specific clamp value, when explicitly requested from userspace
1606 * - the task group effective clamp value, for tasks not either in the root
1608 * - the system default clamp value, defined by the sysadmin
1627 /* Task currently refcounted: use back-annotated (effective) value */ in uclamp_eff_value()
1628 if (p->uclamp[clamp_id].active) in uclamp_eff_value()
1629 return (unsigned long)p->uclamp[clamp_id].value; in uclamp_eff_value()
1641 * Tasks can have a task-specific value requested from user-space, track
1649 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; in uclamp_rq_inc_id()
1650 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; in uclamp_rq_inc_id()
1656 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); in uclamp_rq_inc_id()
1658 bucket = &uc_rq->bucket[uc_se->bucket_id]; in uclamp_rq_inc_id()
1659 bucket->tasks++; in uclamp_rq_inc_id()
1660 uc_se->active = true; in uclamp_rq_inc_id()
1662 uclamp_idle_reset(rq, clamp_id, uc_se->value); in uclamp_rq_inc_id()
1668 if (bucket->tasks == 1 || uc_se->value > bucket->value) in uclamp_rq_inc_id()
1669 bucket->value = uc_se->value; in uclamp_rq_inc_id()
1671 if (uc_se->value > uclamp_rq_get(rq, clamp_id)) in uclamp_rq_inc_id()
1672 uclamp_rq_set(rq, clamp_id, uc_se->value); in uclamp_rq_inc_id()
1687 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; in uclamp_rq_dec_id()
1688 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; in uclamp_rq_dec_id()
1699 * In this case the uc_se->active flag should be false since no uclamp in uclamp_rq_dec_id()
1710 * // Must not decrement bucket->tasks here in uclamp_rq_dec_id()
1714 * bucket[uc_se->bucket_id]. in uclamp_rq_dec_id()
1718 if (unlikely(!uc_se->active)) in uclamp_rq_dec_id()
1721 bucket = &uc_rq->bucket[uc_se->bucket_id]; in uclamp_rq_dec_id()
1723 SCHED_WARN_ON(!bucket->tasks); in uclamp_rq_dec_id()
1724 if (likely(bucket->tasks)) in uclamp_rq_dec_id()
1725 bucket->tasks--; in uclamp_rq_dec_id()
1727 uc_se->active = false; in uclamp_rq_dec_id()
1735 if (likely(bucket->tasks)) in uclamp_rq_dec_id()
1743 SCHED_WARN_ON(bucket->value > rq_clamp); in uclamp_rq_dec_id()
1744 if (bucket->value >= rq_clamp) { in uclamp_rq_dec_id()
1745 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); in uclamp_rq_dec_id()
1763 if (unlikely(!p->sched_class->uclamp_enabled)) in uclamp_rq_inc()
1766 if (p->se.sched_delayed) in uclamp_rq_inc()
1773 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) in uclamp_rq_inc()
1774 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; in uclamp_rq_inc()
1790 if (unlikely(!p->sched_class->uclamp_enabled)) in uclamp_rq_dec()
1793 if (p->se.sched_delayed) in uclamp_rq_dec()
1803 if (!p->uclamp[clamp_id].active) in uclamp_rq_reinc_id()
1813 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE)) in uclamp_rq_reinc_id()
1814 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; in uclamp_rq_reinc_id()
1835 * Setting the clamp bucket is serialized by task_rq_lock(). in uclamp_update_active()
1868 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], in uclamp_update_root_tg()
1870 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], in uclamp_update_root_tg()
1929 result = -EINVAL; in sysctl_sched_uclamp_handler()
1974 * We don't need to hold task_rq_lock() when updating p->uclamp_* here in uclamp_fork()
1978 p->uclamp[clamp_id].active = false; in uclamp_fork()
1980 if (likely(!p->sched_reset_on_fork)) in uclamp_fork()
1984 uclamp_se_set(&p->uclamp_req[clamp_id], in uclamp_fork()
1997 struct uclamp_rq *uc_rq = rq->uclamp; in init_uclamp_rq()
2005 rq->uclamp_flags = UCLAMP_FLAG_IDLE; in init_uclamp_rq()
2055 raw_spin_lock_irq(&p->pi_lock); in get_wchan()
2056 state = READ_ONCE(p->__state); in get_wchan()
2058 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq) in get_wchan()
2060 raw_spin_unlock_irq(&p->pi_lock); in get_wchan()
2070 p->sched_class->enqueue_task(rq, p, flags); in enqueue_task()
2072 * Must be after ->enqueue_task() because ENQUEUE_DELAYED can clear in enqueue_task()
2073 * ->sched_delayed. in enqueue_task()
2103 * Must be before ->dequeue_task() because ->dequeue_task() can 'fail' in dequeue_task()
2104 * and mark the task ->sched_delayed. in dequeue_task()
2107 return p->sched_class->dequeue_task(rq, p, flags); in dequeue_task()
2119 WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED); in activate_task()
2120 ASSERT_EXCLUSIVE_WRITER(p->on_rq); in activate_task()
2127 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING); in deactivate_task()
2128 ASSERT_EXCLUSIVE_WRITER(p->on_rq); in deactivate_task()
2145 * task_curr - is this task currently executing on a CPU?
2156 * ->switching_to() is called with the pi_lock and rq_lock held and must not
2162 if (prev_class != p->sched_class && p->sched_class->switching_to) in check_class_changing()
2163 p->sched_class->switching_to(rq, p); in check_class_changing()
2167 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2177 if (prev_class != p->sched_class) { in check_class_changed()
2178 if (prev_class->switched_from) in check_class_changed()
2179 prev_class->switched_from(rq, p); in check_class_changed()
2181 p->sched_class->switched_to(rq, p); in check_class_changed()
2182 } else if (oldprio != p->prio || dl_task(p)) in check_class_changed()
2183 p->sched_class->prio_changed(rq, p, oldprio); in check_class_changed()
2188 struct task_struct *donor = rq->donor; in wakeup_preempt()
2190 if (p->sched_class == donor->sched_class) in wakeup_preempt()
2191 donor->sched_class->wakeup_preempt(rq, p, flags); in wakeup_preempt()
2192 else if (sched_class_above(p->sched_class, donor->sched_class)) in wakeup_preempt()
2199 if (task_on_rq_queued(donor) && test_tsk_need_resched(rq->curr)) in wakeup_preempt()
2206 if (READ_ONCE(p->__state) & state) in __task_state_match()
2209 if (READ_ONCE(p->saved_state) & state) in __task_state_match()
2210 return -1; in __task_state_match()
2222 guard(raw_spinlock_irq)(&p->pi_lock); in task_state_match()
2227 * wait_task_inactive - wait for a thread to unschedule.
2252 * any task-queue locks at all. We'll only try to get in wait_task_inactive()
2260 * still, just relax and busy-wait without holding in wait_task_inactive()
2287 * When matching on p->saved_state, consider this task in wait_task_inactive()
2292 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ in wait_task_inactive()
2320 * yield - it could be a while. in wait_task_inactive()
2349 .new_mask = cpumask_of(rq->cpu), in migrate_disable_switch()
2353 if (likely(!p->migration_disabled)) in migrate_disable_switch()
2356 if (p->cpus_ptr != &p->cpus_mask) in migrate_disable_switch()
2369 if (p->migration_disabled) { in migrate_disable()
2372 *Warn about overflow half-way through the range. in migrate_disable()
2374 WARN_ON_ONCE((s16)p->migration_disabled < 0); in migrate_disable()
2376 p->migration_disabled++; in migrate_disable()
2381 this_rq()->nr_pinned++; in migrate_disable()
2382 p->migration_disabled = 1; in migrate_disable()
2390 .new_mask = &p->cpus_mask, in migrate_enable()
2399 if (WARN_ON_ONCE((s16)p->migration_disabled <= 0)) in migrate_enable()
2403 if (p->migration_disabled > 1) { in migrate_enable()
2404 p->migration_disabled--; in migrate_enable()
2413 if (p->cpus_ptr != &p->cpus_mask) in migrate_enable()
2421 p->migration_disabled = 0; in migrate_enable()
2422 this_rq()->nr_pinned--; in migrate_enable()
2428 return rq->nr_pinned; in rq_has_pinned_tasks()
2432 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2446 if (!(p->flags & PF_KTHREAD)) in is_cpu_allowed()
2476 * move_queued_task - move a queued task to new rq.
2539 * migration_cpu_stop - this will be executed by a high-prio stopper thread
2546 struct set_affinity_pending *pending = arg->pending; in migration_cpu_stop()
2547 struct task_struct *p = arg->task; in migration_cpu_stop()
2564 raw_spin_lock(&p->pi_lock); in migration_cpu_stop()
2568 * If we were passed a pending, then ->stop_pending was set, thus in migration_cpu_stop()
2569 * p->migration_pending must have remained stable. in migration_cpu_stop()
2571 WARN_ON_ONCE(pending && pending != p->migration_pending); in migration_cpu_stop()
2575 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because in migration_cpu_stop()
2576 * we're holding p->pi_lock. in migration_cpu_stop()
2583 p->migration_pending = NULL; in migration_cpu_stop()
2586 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) in migration_cpu_stop()
2592 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); in migration_cpu_stop()
2594 p->wake_cpu = arg->dest_cpu; in migration_cpu_stop()
2616 * ->pi_lock, so the allowed mask is stable - if it got in migration_cpu_stop()
2619 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { in migration_cpu_stop()
2620 p->migration_pending = NULL; in migration_cpu_stop()
2626 * When migrate_enable() hits a rq mis-match we can't reliably in migration_cpu_stop()
2630 WARN_ON_ONCE(!pending->stop_pending); in migration_cpu_stop()
2634 &pending->arg, &pending->stop_work); in migration_cpu_stop()
2640 pending->stop_pending = false; in migration_cpu_stop()
2644 complete_all(&pending->done); in migration_cpu_stop()
2654 raw_spin_lock_irq(&p->pi_lock); in push_cpu_stop()
2661 p->migration_flags |= MDF_PUSH; in push_cpu_stop()
2665 p->migration_flags &= ~MDF_PUSH; in push_cpu_stop()
2667 if (p->sched_class->find_lock_rq) in push_cpu_stop()
2668 lowest_rq = p->sched_class->find_lock_rq(p, rq); in push_cpu_stop()
2682 rq->push_busy = false; in push_cpu_stop()
2684 raw_spin_unlock_irq(&p->pi_lock); in push_cpu_stop()
2696 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) { in set_cpus_allowed_common()
2697 p->cpus_ptr = ctx->new_mask; in set_cpus_allowed_common()
2701 cpumask_copy(&p->cpus_mask, ctx->new_mask); in set_cpus_allowed_common()
2702 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask); in set_cpus_allowed_common()
2707 if (ctx->flags & SCA_USER) in set_cpus_allowed_common()
2708 swap(p->user_cpus_ptr, ctx->user_mask); in set_cpus_allowed_common()
2719 * supposed to change these variables while holding both rq->lock and in __do_set_cpus_allowed()
2720 * p->pi_lock. in __do_set_cpus_allowed()
2723 * accesses these variables under p->pi_lock and only does so after in __do_set_cpus_allowed()
2724 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule() in __do_set_cpus_allowed()
2729 if (ctx->flags & SCA_MIGRATE_DISABLE) in __do_set_cpus_allowed()
2730 SCHED_WARN_ON(!p->on_cpu); in __do_set_cpus_allowed()
2732 lockdep_assert_held(&p->pi_lock); in __do_set_cpus_allowed()
2740 * holding rq->lock. in __do_set_cpus_allowed()
2748 p->sched_class->set_cpus_allowed(p, ctx); in __do_set_cpus_allowed()
2749 mm_set_cpus_allowed(p->mm, ctx->new_mask); in __do_set_cpus_allowed()
2776 * Because this is called with p->pi_lock held, it is not possible in do_set_cpus_allowed()
2790 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's in dup_user_cpus_ptr()
2793 dst->user_cpus_ptr = NULL; in dup_user_cpus_ptr()
2800 if (data_race(!src->user_cpus_ptr)) in dup_user_cpus_ptr()
2805 return -ENOMEM; in dup_user_cpus_ptr()
2813 raw_spin_lock_irqsave(&src->pi_lock, flags); in dup_user_cpus_ptr()
2814 if (src->user_cpus_ptr) { in dup_user_cpus_ptr()
2815 swap(dst->user_cpus_ptr, user_mask); in dup_user_cpus_ptr()
2816 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr); in dup_user_cpus_ptr()
2818 raw_spin_unlock_irqrestore(&src->pi_lock, flags); in dup_user_cpus_ptr()
2830 swap(p->user_cpus_ptr, user_mask); in clear_user_cpus_ptr()
2848 * Migrate-Disable comes along and tramples all over our nice sandcastle.
2851 * Initial conditions: P0->cpus_mask = [0, 1]
2860 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2873 * `--> <woken on migration completion>
2875 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2877 * task p are serialized by p->pi_lock, which we can leverage: the one that
2878 * should come into effect at the end of the Migrate-Disable region is the last
2879 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2884 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2888 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2894 * Migrate-Disable. Consider:
2896 * Initial conditions: P0->cpus_mask = [0, 1]
2914 * p->migration_pending done with p->pi_lock held.
2918 __releases(rq->lock) in affine_move_task()
2919 __releases(p->pi_lock) in affine_move_task()
2925 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { in affine_move_task()
2929 (p->migration_flags & MDF_PUSH) && !rq->push_busy) { in affine_move_task()
2930 rq->push_busy = true; in affine_move_task()
2938 pending = p->migration_pending; in affine_move_task()
2939 if (pending && !pending->stop_pending) { in affine_move_task()
2940 p->migration_pending = NULL; in affine_move_task()
2947 stop_one_cpu_nowait(rq->cpu, push_cpu_stop, in affine_move_task()
2948 p, &rq->push_work); in affine_move_task()
2953 complete_all(&pending->done); in affine_move_task()
2959 /* serialized by p->pi_lock */ in affine_move_task()
2960 if (!p->migration_pending) { in affine_move_task()
2970 p->migration_pending = &my_pending; in affine_move_task()
2972 pending = p->migration_pending; in affine_move_task()
2973 refcount_inc(&pending->refs); in affine_move_task()
2980 * Serialized by p->pi_lock, so this is safe. in affine_move_task()
2982 pending->arg.dest_cpu = dest_cpu; in affine_move_task()
2985 pending = p->migration_pending; in affine_move_task()
2987 * - !MIGRATE_ENABLE: in affine_move_task()
2990 * - MIGRATE_ENABLE: in affine_move_task()
3000 return -EINVAL; in affine_move_task()
3003 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) { in affine_move_task()
3007 * and have the stopper function handle it all race-free. in affine_move_task()
3009 stop_pending = pending->stop_pending; in affine_move_task()
3011 pending->stop_pending = true; in affine_move_task()
3014 p->migration_flags &= ~MDF_PUSH; in affine_move_task()
3020 &pending->arg, &pending->stop_work); in affine_move_task()
3032 if (!pending->stop_pending) { in affine_move_task()
3033 p->migration_pending = NULL; in affine_move_task()
3040 complete_all(&pending->done); in affine_move_task()
3043 wait_for_completion(&pending->done); in affine_move_task()
3045 if (refcount_dec_and_test(&pending->refs)) in affine_move_task()
3046 wake_up_var(&pending->refs); /* No UaF, just an address */ in affine_move_task()
3061 * Called with both p->pi_lock and rq->lock held; drops both before returning.
3067 __releases(rq->lock) in __set_cpus_allowed_ptr_locked()
3068 __releases(p->pi_lock) in __set_cpus_allowed_ptr_locked()
3072 bool kthread = p->flags & PF_KTHREAD; in __set_cpus_allowed_ptr_locked()
3081 * however, during cpu-hot-unplug, even these might get pushed in __set_cpus_allowed_ptr_locked()
3087 * set_cpus_allowed_common() and actually reset p->cpus_ptr. in __set_cpus_allowed_ptr_locked()
3092 if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) { in __set_cpus_allowed_ptr_locked()
3093 ret = -EINVAL; in __set_cpus_allowed_ptr_locked()
3098 * Must re-check here, to close a race against __kthread_bind(), in __set_cpus_allowed_ptr_locked()
3101 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { in __set_cpus_allowed_ptr_locked()
3102 ret = -EINVAL; in __set_cpus_allowed_ptr_locked()
3106 if (!(ctx->flags & SCA_MIGRATE_ENABLE)) { in __set_cpus_allowed_ptr_locked()
3107 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) { in __set_cpus_allowed_ptr_locked()
3108 if (ctx->flags & SCA_USER) in __set_cpus_allowed_ptr_locked()
3109 swap(p->user_cpus_ptr, ctx->user_mask); in __set_cpus_allowed_ptr_locked()
3115 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) { in __set_cpus_allowed_ptr_locked()
3116 ret = -EBUSY; in __set_cpus_allowed_ptr_locked()
3126 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask); in __set_cpus_allowed_ptr_locked()
3128 ret = -EINVAL; in __set_cpus_allowed_ptr_locked()
3134 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags); in __set_cpus_allowed_ptr_locked()
3161 if (p->user_cpus_ptr && in __set_cpus_allowed_ptr()
3162 !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) && in __set_cpus_allowed_ptr()
3163 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr)) in __set_cpus_allowed_ptr()
3164 ctx->new_mask = rq->scratch_mask; in __set_cpus_allowed_ptr()
3187 * -EINVAL.
3209 err = -EPERM; in restrict_cpus_allowed_ptr()
3214 err = -EINVAL; in restrict_cpus_allowed_ptr()
3227 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3260 task_pid_nr(p), p->comm, in force_compatible_cpus_allowed_ptr()
3296 unsigned int state = READ_ONCE(p->__state); in set_task_cpu()
3302 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq); in set_task_cpu()
3305 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, in set_task_cpu()
3307 * time relying on p->on_rq. in set_task_cpu()
3310 p->sched_class == &fair_sched_class && in set_task_cpu()
3311 (p->on_rq && !task_on_rq_migrating(p))); in set_task_cpu()
3315 * The caller should hold either p->pi_lock or rq->lock, when changing in set_task_cpu()
3316 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. in set_task_cpu()
3324 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || in set_task_cpu()
3338 if (p->sched_class->migrate_task_rq) in set_task_cpu()
3339 p->sched_class->migrate_task_rq(p, new_cpu); in set_task_cpu()
3340 p->se.nr_migrations++; in set_task_cpu()
3374 p->wake_cpu = cpu; in __migrate_swap_task()
3388 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) in migrate_swap_stop()
3389 return -EAGAIN; in migrate_swap_stop()
3391 src_rq = cpu_rq(arg->src_cpu); in migrate_swap_stop()
3392 dst_rq = cpu_rq(arg->dst_cpu); in migrate_swap_stop()
3394 guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock); in migrate_swap_stop()
3397 if (task_cpu(arg->dst_task) != arg->dst_cpu) in migrate_swap_stop()
3398 return -EAGAIN; in migrate_swap_stop()
3400 if (task_cpu(arg->src_task) != arg->src_cpu) in migrate_swap_stop()
3401 return -EAGAIN; in migrate_swap_stop()
3403 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) in migrate_swap_stop()
3404 return -EAGAIN; in migrate_swap_stop()
3406 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) in migrate_swap_stop()
3407 return -EAGAIN; in migrate_swap_stop()
3409 __migrate_swap_task(arg->src_task, arg->dst_cpu); in migrate_swap_stop()
3410 __migrate_swap_task(arg->dst_task, arg->src_cpu); in migrate_swap_stop()
3422 int ret = -EINVAL; in migrate_swap()
3436 * will be re-checked with proper locks held further down the line. in migrate_swap()
3441 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) in migrate_swap()
3444 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) in migrate_swap()
3456 * kick_process - kick a running thread to enter/exit the kernel
3457 * @p: the to-be-kicked thread
3460 * kernel-mode, without any delay. (to get signals handled.)
3479 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3483 * - cpu_active must be a subset of cpu_online
3485 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3490 * - on CPU-down we clear cpu_active() to mask the sched domains and
3509 * will return -1. There is no CPU on the node, and we should in select_fallback_rq()
3512 if (nid != -1) { in select_fallback_rq()
3524 for_each_cpu(dest_cpu, p->cpus_ptr) { in select_fallback_rq()
3542 * hold p->pi_lock and again violate locking order. in select_fallback_rq()
3562 if (p->mm && printk_ratelimit()) { in select_fallback_rq()
3564 task_pid_nr(p), p->comm, cpu); in select_fallback_rq()
3572 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3577 lockdep_assert_held(&p->pi_lock); in select_task_rq()
3579 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) { in select_task_rq()
3580 cpu = p->sched_class->select_task_rq(p, cpu, *wake_flags); in select_task_rq()
3583 cpu = cpumask_any(p->cpus_ptr); in select_task_rq()
3588 * to rely on ttwu() to place the task on a valid ->cpus_ptr in select_task_rq()
3593 * [ this allows ->select_task() to simply return task_cpu(p) and in select_task_rq()
3605 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; in sched_set_stop_task()
3606 struct task_struct *old_stop = cpu_rq(cpu)->stop; in sched_set_stop_task()
3614 * much confusion -- but then, stop work should not in sched_set_stop_task()
3619 stop->sched_class = &stop_sched_class; in sched_set_stop_task()
3622 * The PI code calls rt_mutex_setprio() with ->pi_lock held to in sched_set_stop_task()
3628 * The stop task itself will never be part of the PI-chain, it in sched_set_stop_task()
3629 * never blocks, therefore that ->pi_lock recursion is safe. in sched_set_stop_task()
3630 * Tell lockdep about this by placing the stop->pi_lock in its in sched_set_stop_task()
3633 lockdep_set_class(&stop->pi_lock, &stop_pi_lock); in sched_set_stop_task()
3636 cpu_rq(cpu)->stop = stop; in sched_set_stop_task()
3643 old_stop->sched_class = &rt_sched_class; in sched_set_stop_task()
3669 if (cpu == rq->cpu) { in ttwu_stat()
3670 __schedstat_inc(rq->ttwu_local); in ttwu_stat()
3671 __schedstat_inc(p->stats.nr_wakeups_local); in ttwu_stat()
3675 __schedstat_inc(p->stats.nr_wakeups_remote); in ttwu_stat()
3678 for_each_domain(rq->cpu, sd) { in ttwu_stat()
3680 __schedstat_inc(sd->ttwu_wake_remote); in ttwu_stat()
3687 __schedstat_inc(p->stats.nr_wakeups_migrate); in ttwu_stat()
3690 __schedstat_inc(rq->ttwu_count); in ttwu_stat()
3691 __schedstat_inc(p->stats.nr_wakeups); in ttwu_stat()
3694 __schedstat_inc(p->stats.nr_wakeups_sync); in ttwu_stat()
3702 WRITE_ONCE(p->__state, TASK_RUNNING); in ttwu_do_wakeup()
3714 if (p->sched_contributes_to_load) in ttwu_do_activate()
3715 rq->nr_uninterruptible--; in ttwu_do_activate()
3724 if (p->in_iowait) { in ttwu_do_activate()
3726 atomic_dec(&task_rq(p)->nr_iowait); in ttwu_do_activate()
3735 if (p->sched_class->task_woken) { in ttwu_do_activate()
3738 * drop the rq->lock, hereafter rq is only used for statistics. in ttwu_do_activate()
3741 p->sched_class->task_woken(rq, p); in ttwu_do_activate()
3745 if (rq->idle_stamp) { in ttwu_do_activate()
3746 u64 delta = rq_clock(rq) - rq->idle_stamp; in ttwu_do_activate()
3747 u64 max = 2*rq->max_idle_balance_cost; in ttwu_do_activate()
3749 update_avg(&rq->avg_idle, delta); in ttwu_do_activate()
3751 if (rq->avg_idle > max) in ttwu_do_activate()
3752 rq->avg_idle = max; in ttwu_do_activate()
3754 rq->idle_stamp = 0; in ttwu_do_activate()
3773 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3776 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3777 * then schedule() must still happen and p->state can be changed to
3793 if (p->se.sched_delayed) in ttwu_runnable()
3825 if (WARN_ON_ONCE(p->on_cpu)) in sched_ttwu_pending()
3826 smp_cond_load_acquire(&p->on_cpu, !VAL); in sched_ttwu_pending()
3831 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); in sched_ttwu_pending()
3836 * idle_cpu() does not observe a false-negative -- if it does, in sched_ttwu_pending()
3844 WRITE_ONCE(rq->ttwu_pending, 0); in sched_ttwu_pending()
3856 if (set_nr_if_polling(cpu_rq(cpu)->idle)) { in call_function_single_prep_ipi()
3874 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); in __ttwu_queue_wakelist()
3876 WRITE_ONCE(rq->ttwu_pending, 1); in __ttwu_queue_wakelist()
3877 __smp_call_single_queue(cpu, &p->wake_entry.llist); in __ttwu_queue_wakelist()
3885 if (is_idle_task(rcu_dereference(rq->curr))) { in wake_up_if_idle()
3887 if (is_idle_task(rq->curr)) in wake_up_if_idle()
3912 * Whether CPUs are share cache resources, which means LLC on non-cluster
3942 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) in ttwu_queue_cond()
3958 * the task activation to the idle (or soon-to-be-idle) CPU as in ttwu_queue_cond()
3962 * Note that we can only get here with (wakee) p->on_rq=0, in ttwu_queue_cond()
3963 * p->on_cpu can be whatever, we've done the dequeue, so in ttwu_queue_cond()
3964 * the wakee has been accounted out of ->nr_running. in ttwu_queue_cond()
3966 if (!cpu_rq(cpu)->nr_running) in ttwu_queue_cond()
4015 * p::saved_state, which means the code is fully serialized in both cases.
4051 p->saved_state = TASK_RUNNING; in ttwu_state_match()
4057 * Notes on Program-Order guarantees on SMP systems.
4061 * The basic program-order guarantee on SMP systems is that when a task [t]
4062 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4067 * A) UNLOCK of the rq(c0)->lock scheduling out task t
4068 * B) migration for t is required to synchronize *both* rq(c0)->lock and
4069 * rq(c1)->lock (if not at the same time, then in that order).
4070 * C) LOCK of the rq(c1)->lock scheduling in task
4079 * LOCK rq(0)->lock
4080 * sched-out X
4081 * sched-in Y
4082 * UNLOCK rq(0)->lock
4084 * LOCK rq(0)->lock // orders against CPU0
4086 * UNLOCK rq(0)->lock
4088 * LOCK rq(1)->lock
4090 * UNLOCK rq(1)->lock
4092 * LOCK rq(1)->lock // orders against CPU2
4093 * sched-out Z
4094 * sched-in X
4095 * UNLOCK rq(1)->lock
4098 * BLOCKING -- aka. SLEEP + WAKEUP
4104 * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
4105 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4111 * LOCK rq(0)->lock LOCK X->pi_lock
4113 * sched-out X
4114 * smp_store_release(X->on_cpu, 0);
4116 * smp_cond_load_acquire(&X->on_cpu, !VAL);
4117 * X->state = WAKING
4120 * LOCK rq(2)->lock
4122 * X->state = RUNNING
4123 * UNLOCK rq(2)->lock
4125 * LOCK rq(2)->lock // orders against CPU1
4126 * sched-out Z
4127 * sched-in X
4128 * UNLOCK rq(2)->lock
4130 * UNLOCK X->pi_lock
4131 * UNLOCK rq(0)->lock
4140 * try_to_wake_up - wake up a thread
4147 * If (@state & @p->state) @p->state = TASK_RUNNING.
4153 * It issues a full memory barrier before accessing @p->state, see the comment
4156 * Uses p->pi_lock to serialize against concurrent wake-ups.
4158 * Relies on p->pi_lock stabilizing:
4159 * - p->sched_class
4160 * - p->cpus_ptr
4161 * - p->sched_task_group
4164 * Tries really hard to only take one task_rq(p)->lock for performance.
4165 * Takes rq->lock in:
4166 * - ttwu_runnable() -- old rq, unavoidable, see comment there;
4167 * - ttwu_queue() -- new rq, for enqueue of the task;
4168 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4173 * Return: %true if @p->state changes (an actual wakeup was done),
4185 * We're waking current, this means 'p->on_rq' and 'task_cpu(p) in try_to_wake_up()
4187 * case the whole 'p->on_rq && ttwu_runnable()' case below in try_to_wake_up()
4195 * - we rely on Program-Order guarantees for all the ordering, in try_to_wake_up()
4196 * - we're serialized against set_special_state() by virtue of in try_to_wake_up()
4197 * it disabling IRQs (this allows not taking ->pi_lock). in try_to_wake_up()
4199 SCHED_WARN_ON(p->se.sched_delayed); in try_to_wake_up()
4211 * reordered with p->state check below. This pairs with smp_store_mb() in try_to_wake_up()
4214 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { in try_to_wake_up()
4222 * Ensure we load p->on_rq _after_ p->state, otherwise it would in try_to_wake_up()
4223 * be possible to, falsely, observe p->on_rq == 0 and get stuck in try_to_wake_up()
4227 * STORE p->on_rq = 1 LOAD p->state in try_to_wake_up()
4228 * UNLOCK rq->lock in try_to_wake_up()
4231 * LOCK rq->lock smp_rmb(); in try_to_wake_up()
4233 * UNLOCK rq->lock in try_to_wake_up()
4236 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq in try_to_wake_up()
4238 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in in try_to_wake_up()
4244 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) in try_to_wake_up()
4249 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be in try_to_wake_up()
4250 * possible to, falsely, observe p->on_cpu == 0. in try_to_wake_up()
4252 * One must be running (->on_cpu == 1) in order to remove oneself in try_to_wake_up()
4256 * STORE p->on_cpu = 1 LOAD p->on_rq in try_to_wake_up()
4257 * UNLOCK rq->lock in try_to_wake_up()
4260 * LOCK rq->lock smp_rmb(); in try_to_wake_up()
4262 * STORE p->on_rq = 0 LOAD p->on_cpu in try_to_wake_up()
4264 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in in try_to_wake_up()
4267 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure in try_to_wake_up()
4269 * care about it's own p->state. See the comment in __schedule(). in try_to_wake_up()
4274 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq in try_to_wake_up()
4275 * == 0), which means we need to do an enqueue, change p->state to in try_to_wake_up()
4276 * TASK_WAKING such that we can unlock p->pi_lock before doing the in try_to_wake_up()
4279 WRITE_ONCE(p->__state, TASK_WAKING); in try_to_wake_up()
4284 * which potentially sends an IPI instead of spinning on p->on_cpu to in try_to_wake_up()
4288 * Ensure we load task_cpu(p) after p->on_cpu: in try_to_wake_up()
4291 * STORE p->cpu = @cpu in try_to_wake_up()
4293 * LOCK rq->lock in try_to_wake_up()
4294 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu) in try_to_wake_up()
4295 * STORE p->on_cpu = 1 LOAD p->cpu in try_to_wake_up()
4300 if (smp_load_acquire(&p->on_cpu) && in try_to_wake_up()
4313 smp_cond_load_acquire(&p->on_cpu, !VAL); in try_to_wake_up()
4315 cpu = select_task_rq(p, p->wake_cpu, &wake_flags); in try_to_wake_up()
4317 if (p->in_iowait) { in try_to_wake_up()
4319 atomic_dec(&task_rq(p)->nr_iowait); in try_to_wake_up()
4341 unsigned int state = READ_ONCE(p->__state); in __task_needs_rq_lock()
4344 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when in __task_needs_rq_lock()
4352 * Ensure we load p->on_rq after p->__state, otherwise it would be in __task_needs_rq_lock()
4353 * possible to, falsely, observe p->on_rq == 0. in __task_needs_rq_lock()
4358 if (p->on_rq) in __task_needs_rq_lock()
4367 smp_cond_load_acquire(&p->on_cpu, !VAL); in __task_needs_rq_lock()
4374 * task_call_func - Invoke a function on task in fixed state
4394 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in task_call_func()
4401 * - blocked and we're holding off wakeups (pi->lock) in task_call_func()
4402 * - woken, and we're holding off enqueue (rq->lock) in task_call_func()
4403 * - queued, and we're holding off schedule (rq->lock) in task_call_func()
4404 * - running, and we're holding off de-schedule (rq->lock) in task_call_func()
4406 * The called function (@func) can use: task_curr(), p->on_rq and in task_call_func()
4407 * p->__state to differentiate between these states. in task_call_func()
4414 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); in task_call_func()
4419 * cpu_curr_snapshot - Return a snapshot of the currently running task
4451 * wake_up_process - Wake up a specific process
4481 p->on_rq = 0; in __sched_fork()
4483 p->se.on_rq = 0; in __sched_fork()
4484 p->se.exec_start = 0; in __sched_fork()
4485 p->se.sum_exec_runtime = 0; in __sched_fork()
4486 p->se.prev_sum_exec_runtime = 0; in __sched_fork()
4487 p->se.nr_migrations = 0; in __sched_fork()
4488 p->se.vruntime = 0; in __sched_fork()
4489 p->se.vlag = 0; in __sched_fork()
4490 INIT_LIST_HEAD(&p->se.group_node); in __sched_fork()
4493 SCHED_WARN_ON(p->se.sched_delayed); in __sched_fork()
4496 p->se.cfs_rq = NULL; in __sched_fork()
4501 memset(&p->stats, 0, sizeof(p->stats)); in __sched_fork()
4504 init_dl_entity(&p->dl); in __sched_fork()
4506 INIT_LIST_HEAD(&p->rt.run_list); in __sched_fork()
4507 p->rt.timeout = 0; in __sched_fork()
4508 p->rt.time_slice = sched_rr_timeslice; in __sched_fork()
4509 p->rt.on_rq = 0; in __sched_fork()
4510 p->rt.on_list = 0; in __sched_fork()
4513 init_scx_entity(&p->scx); in __sched_fork()
4517 INIT_HLIST_HEAD(&p->preempt_notifiers); in __sched_fork()
4521 p->capture_control = NULL; in __sched_fork()
4525 p->wake_entry.u_flags = CSD_TYPE_TTWU; in __sched_fork()
4526 p->migration_pending = NULL; in __sched_fork()
4560 pgdat->nbp_threshold = 0; in reset_memory_tiering()
4561 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE); in reset_memory_tiering()
4562 pgdat->nbp_th_start = jiffies_to_msecs(jiffies); in reset_memory_tiering()
4574 return -EPERM; in sysctl_numa_balancing()
4643 return -EPERM; in sysctl_schedstats()
4714 * fork()/clone()-time setup:
4724 p->__state = TASK_NEW; in sched_fork()
4729 p->prio = current->normal_prio; in sched_fork()
4736 if (unlikely(p->sched_reset_on_fork)) { in sched_fork()
4738 p->policy = SCHED_NORMAL; in sched_fork()
4739 p->static_prio = NICE_TO_PRIO(0); in sched_fork()
4740 p->rt_priority = 0; in sched_fork()
4741 } else if (PRIO_TO_NICE(p->static_prio) < 0) in sched_fork()
4742 p->static_prio = NICE_TO_PRIO(0); in sched_fork()
4744 p->prio = p->normal_prio = p->static_prio; in sched_fork()
4746 p->se.custom_slice = 0; in sched_fork()
4747 p->se.slice = sysctl_sched_base_slice; in sched_fork()
4753 p->sched_reset_on_fork = 0; in sched_fork()
4756 if (dl_prio(p->prio)) in sched_fork()
4757 return -EAGAIN; in sched_fork()
4761 if (rt_prio(p->prio)) { in sched_fork()
4762 p->sched_class = &rt_sched_class; in sched_fork()
4764 } else if (task_should_scx(p->policy)) { in sched_fork()
4765 p->sched_class = &ext_sched_class; in sched_fork()
4768 p->sched_class = &fair_sched_class; in sched_fork()
4771 init_entity_runnable_average(&p->se); in sched_fork()
4776 memset(&p->sched_info, 0, sizeof(p->sched_info)); in sched_fork()
4779 p->on_cpu = 0; in sched_fork()
4783 plist_node_init(&p->pushable_tasks, MAX_PRIO); in sched_fork()
4784 RB_CLEAR_NODE(&p->pushable_dl_tasks); in sched_fork()
4794 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly in sched_cgroup_fork()
4797 raw_spin_lock_irqsave(&p->pi_lock, flags); in sched_cgroup_fork()
4801 tg = container_of(kargs->cset->subsys[cpu_cgrp_id], in sched_cgroup_fork()
4804 p->sched_task_group = tg; in sched_cgroup_fork()
4813 if (p->sched_class->task_fork) in sched_cgroup_fork()
4814 p->sched_class->task_fork(p); in sched_cgroup_fork()
4815 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in sched_cgroup_fork()
4848 * wake_up_new_task - wake up a newly created task for the first time.
4860 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in wake_up_new_task()
4861 WRITE_ONCE(p->__state, TASK_RUNNING); in wake_up_new_task()
4865 * - cpus_ptr can change in the fork path in wake_up_new_task()
4866 * - any previously selected CPU might disappear through hotplug in wake_up_new_task()
4869 * as we're not fully set-up yet. in wake_up_new_task()
4871 p->recent_used_cpu = task_cpu(p); in wake_up_new_task()
4883 if (p->sched_class->task_woken) { in wake_up_new_task()
4885 * Nothing relies on rq->lock after this, so it's fine to in wake_up_new_task()
4889 p->sched_class->task_woken(rq, p); in wake_up_new_task()
4913 * preempt_notifier_register - tell me when current is being preempted & rescheduled
4921 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); in preempt_notifier_register()
4926 * preempt_notifier_unregister - no longer interested in preemption notifications
4933 hlist_del(¬ifier->link); in preempt_notifier_unregister()
4941 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) in __fire_sched_in_preempt_notifiers()
4942 notifier->ops->sched_in(notifier, raw_smp_processor_id()); in __fire_sched_in_preempt_notifiers()
4957 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) in __fire_sched_out_preempt_notifiers()
4958 notifier->ops->sched_out(notifier, next); in __fire_sched_out_preempt_notifiers()
4990 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and in prepare_task()
4993 WRITE_ONCE(next->on_cpu, 1); in prepare_task()
5002 * p->on_cpu is cleared, the task can be moved to a different CPU. We in finish_task()
5006 * In particular, the load of prev->state in finish_task_switch() must in finish_task()
5011 smp_store_release(&prev->on_cpu, 0); in finish_task()
5025 func = (void (*)(struct rq *))head->func; in do_balance_callbacks()
5026 next = head->next; in do_balance_callbacks()
5027 head->next = NULL; in do_balance_callbacks()
5041 * that queued it (only later, when it's safe to drop rq->lock again),
5045 * a single test, namely: rq->balance_callback == NULL.
5055 struct balance_callback *head = rq->balance_callback; in __splice_balance_callbacks()
5064 * in the same rq->lock section. in __splice_balance_callbacks()
5072 rq->balance_callback = NULL; in __splice_balance_callbacks()
5112 * of the scheduler it's an obvious special-case), so we in prepare_lock_switch()
5116 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_); in prepare_lock_switch()
5119 rq_lockp(rq)->owner = next; in prepare_lock_switch()
5127 * fix up the runqueue lock - which gets 'carried over' from in finish_lock_switch()
5130 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_); in finish_lock_switch()
5150 if (unlikely(current->kmap_ctrl.idx)) in kmap_local_sched_out()
5158 if (unlikely(current->kmap_ctrl.idx)) in kmap_local_sched_in()
5164 * prepare_task_switch - prepare to switch tasks
5191 * finish_task_switch - clean up after a task-switch
5197 * and do any other architecture-specific cleanup actions.
5210 __releases(rq->lock) in finish_task_switch()
5213 struct mm_struct *mm = rq->prev_mm; in finish_task_switch()
5223 * raw_spin_lock_irq(&rq->lock) // 2 in finish_task_switch()
5229 current->comm, current->pid, preempt_count())) in finish_task_switch()
5232 rq->prev_mm = NULL; in finish_task_switch()
5236 * If a task dies, then it sets TASK_DEAD in tsk->state and calls in finish_task_switch()
5240 * We must observe prev->state before clearing prev->on_cpu (in in finish_task_switch()
5242 * running on another CPU and we could rave with its RUNNING -> DEAD in finish_task_switch()
5245 prev_state = READ_ONCE(prev->__state); in finish_task_switch()
5267 * schedule between user->kernel->user threads without passing though in finish_task_switch()
5269 * rq->curr, before returning to userspace, so provide them here: in finish_task_switch()
5271 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly in finish_task_switch()
5273 * - a sync_core for SYNC_CORE. in finish_task_switch()
5281 if (prev->sched_class->task_dead) in finish_task_switch()
5282 prev->sched_class->task_dead(prev); in finish_task_switch()
5294 * schedule_tail - first thing a freshly forked thread must call.
5298 __releases(rq->lock) in schedule_tail()
5304 * finish_task_switch() will drop rq->lock() and lower preempt_count in schedule_tail()
5312 if (current->set_child_tid) in schedule_tail()
5313 put_user(task_pid_vnr(current), current->set_child_tid); in schedule_tail()
5319 * context_switch - switch to the new MM and the new thread's register state.
5335 * kernel -> kernel lazy + transfer active in context_switch()
5336 * user -> kernel lazy + mmgrab_lazy_tlb() active in context_switch()
5338 * kernel -> user switch + mmdrop_lazy_tlb() active in context_switch()
5339 * user -> user switch in context_switch()
5344 if (!next->mm) { // to kernel in context_switch()
5345 enter_lazy_tlb(prev->active_mm, next); in context_switch()
5347 next->active_mm = prev->active_mm; in context_switch()
5348 if (prev->mm) // from user in context_switch()
5349 mmgrab_lazy_tlb(prev->active_mm); in context_switch()
5351 prev->active_mm = NULL; in context_switch()
5353 membarrier_switch_mm(rq, prev->active_mm, next->mm); in context_switch()
5356 * rq->curr / membarrier_switch_mm() and returning to userspace. in context_switch()
5359 * case 'prev->active_mm == next->mm' through in context_switch()
5362 switch_mm_irqs_off(prev->active_mm, next->mm, next); in context_switch()
5363 lru_gen_use_mm(next->mm); in context_switch()
5365 if (!prev->mm) { // from kernel in context_switch()
5367 rq->prev_mm = prev->active_mm; in context_switch()
5368 prev->active_mm = NULL; in context_switch()
5395 sum += cpu_rq(i)->nr_running; in nr_running()
5404 * preemption, thus the result might have a time-of-check-to-time-of-use
5407 * - from a non-preemptible section (of course)
5409 * - from a thread that is bound to a single CPU
5411 * - in a loop with very short iterations (e.g. a polling loop)
5415 return raw_rq()->nr_running == 1; in single_task_running()
5421 return cpu_rq(cpu)->nr_switches; in nr_context_switches_cpu()
5430 sum += cpu_rq(i)->nr_switches; in nr_context_switches()
5438 * for a CPU that has IO-wait which might not even end up running the task when
5444 return atomic_read(&cpu_rq(cpu)->nr_iowait); in nr_iowait_cpu()
5448 * IO-wait accounting, and how it's mostly bollocks (on SMP).
5450 * The idea behind IO-wait account is to account the idle time that we could
5452 * storage performance, we'd have a proportional reduction in IO-wait time.
5455 * idle time as IO-wait, because if the storage were faster, it could've been
5462 * CPU will have IO-wait accounted, while the other has regular idle. Even
5466 * This means, that when looking globally, the current IO-wait accounting on
5472 * blocked on. This means the per CPU IO-wait number is meaningless.
5490 * sched_exec - execve() is a valuable balancing opportunity, because at
5499 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { in sched_exec()
5500 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC); in sched_exec()
5522 * and its field curr->exec_start; when called from task_sched_runtime(),
5529 struct sched_entity *curr = p->se.cfs_rq->curr; in prefetch_curr_exec_start()
5531 struct sched_entity *curr = task_rq(p)->cfs.curr; in prefetch_curr_exec_start()
5534 prefetch(&curr->exec_start); in prefetch_curr_exec_start()
5550 * 64-bit doesn't need locks to atomically read a 64-bit value. in task_sched_runtime()
5552 * Reading ->on_cpu is racy, but this is OK. in task_sched_runtime()
5557 * If we see ->on_cpu without ->on_rq, the task is leaving, and has in task_sched_runtime()
5560 if (!p->on_cpu || !task_on_rq_queued(p)) in task_sched_runtime()
5561 return p->se.sum_exec_runtime; in task_sched_runtime()
5566 * Must be ->curr _and_ ->on_rq. If dequeued, we would in task_sched_runtime()
5573 p->sched_class->update_curr(rq); in task_sched_runtime()
5575 ns = p->se.sum_exec_runtime; in task_sched_runtime()
5597 if (!rq->last_seen_need_resched_ns) { in cpu_resched_latency()
5598 rq->last_seen_need_resched_ns = now; in cpu_resched_latency()
5599 rq->ticks_without_resched = 0; in cpu_resched_latency()
5603 rq->ticks_without_resched++; in cpu_resched_latency()
5604 resched_latency = now - rq->last_seen_need_resched_ns; in cpu_resched_latency()
5650 donor = rq->donor; in sched_tick()
5661 donor->sched_class->task_tick(rq, donor, 0); in sched_tick()
5676 if (donor->flags & PF_WQ_WORKER) in sched_tick()
5681 rq->idle_balance = idle_cpu(cpu); in sched_tick()
5694 /* Values for ->state, see diagram below. */
5700 * State diagram for ->state:
5709 * +--TICK_SCHED_REMOTE_OFFLINING
5728 int cpu = twork->cpu; in sched_tick_remote()
5736 * statistics and checks timeslices in a time-independent way, regardless in sched_tick_remote()
5741 struct task_struct *curr = rq->curr; in sched_tick_remote()
5749 SCHED_WARN_ON(rq->curr != rq->donor); in sched_tick_remote()
5757 u64 delta = rq_clock_task(rq) - curr->se.exec_start; in sched_tick_remote()
5760 curr->sched_class->task_tick(rq, curr, 0); in sched_tick_remote()
5772 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); in sched_tick_remote()
5789 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); in sched_tick_start()
5792 twork->cpu = cpu; in sched_tick_start()
5793 INIT_DELAYED_WORK(&twork->work, sched_tick_remote); in sched_tick_start()
5794 queue_delayed_work(system_unbound_wq, &twork->work, HZ); in sched_tick_start()
5810 /* There cannot be competing actions, but don't rely on stop-machine. */ in sched_tick_stop()
5811 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING); in sched_tick_stop()
5840 current->preempt_disable_ip = ip; in preempt_latency_start()
5861 PREEMPT_MASK - 10); in preempt_count_add()
5908 return p->preempt_disable_ip; in get_preempt_disable_ip()
5926 prev->comm, prev->pid, preempt_count()); in __schedule_bug()
5943 * Various schedule()-time debugging checks and statistics:
5956 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) { in schedule_debug()
5957 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", in schedule_debug()
5958 prev->comm, prev->pid, prev->non_block_count); in schedule_debug()
5973 schedstat_inc(this_rq()->sched_count); in schedule_debug()
5979 const struct sched_class *start_class = prev->sched_class; in prev_balance()
5989 rq->scx.flags |= SCX_RQ_BAL_PENDING; in prev_balance()
5997 * that when we release the rq->lock the task is in the same in prev_balance()
5998 * state as before we took rq->lock. in prev_balance()
6004 if (class->balance && class->balance(rq, prev, rf)) in prev_balance()
6010 * Pick up the highest-prio task:
6018 rq->dl_server = NULL; in __pick_next_task()
6029 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) && in __pick_next_task()
6030 rq->nr_running == rq->cfs.h_nr_queued)) { in __pick_next_task()
6049 if (class->pick_next_task) { in __pick_next_task()
6050 p = class->pick_next_task(rq, prev); in __pick_next_task()
6054 p = class->pick_task(rq); in __pick_next_task()
6068 return (task_rq(t)->idle == t); in is_task_rq_idle()
6073 return is_task_rq_idle(a) || (a->core_cookie == cookie); in cookie_equals()
6081 return a->core_cookie == b->core_cookie; in cookie_match()
6089 rq->dl_server = NULL; in pick_task()
6092 p = class->pick_task(rq); in pick_task()
6110 bool core_clock_updated = (rq == rq->core); in pick_next_task()
6121 /* Stopper task is switching into idle, no need core-wide selection. */ in pick_next_task()
6128 rq->core_pick = NULL; in pick_next_task()
6129 rq->core_dl_server = NULL; in pick_next_task()
6134 * If there were no {en,de}queues since we picked (IOW, the task in pick_next_task()
6138 * rq->core_pick can be NULL if no selection was made for a CPU because in pick_next_task()
6139 * it was either offline or went offline during a sibling's core-wide in pick_next_task()
6140 * selection. In this case, do a core-wide selection. in pick_next_task()
6142 if (rq->core->core_pick_seq == rq->core->core_task_seq && in pick_next_task()
6143 rq->core->core_pick_seq != rq->core_sched_seq && in pick_next_task()
6144 rq->core_pick) { in pick_next_task()
6145 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); in pick_next_task()
6147 next = rq->core_pick; in pick_next_task()
6148 rq->dl_server = rq->core_dl_server; in pick_next_task()
6149 rq->core_pick = NULL; in pick_next_task()
6150 rq->core_dl_server = NULL; in pick_next_task()
6157 need_sync = !!rq->core->core_cookie; in pick_next_task()
6160 rq->core->core_cookie = 0UL; in pick_next_task()
6161 if (rq->core->core_forceidle_count) { in pick_next_task()
6163 update_rq_clock(rq->core); in pick_next_task()
6168 rq->core->core_forceidle_start = 0; in pick_next_task()
6169 rq->core->core_forceidle_count = 0; in pick_next_task()
6170 rq->core->core_forceidle_occupation = 0; in pick_next_task()
6176 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq in pick_next_task()
6178 * @task_seq guards the task state ({en,de}queues) in pick_next_task()
6185 rq->core->core_task_seq++; in pick_next_task()
6193 if (!next->core_cookie) { in pick_next_task()
6194 rq->core_pick = NULL; in pick_next_task()
6195 rq->core_dl_server = NULL; in pick_next_task()
6210 * Tie-break prio towards the current CPU in pick_next_task()
6220 if (i != cpu && (rq_i != rq->core || !core_clock_updated)) in pick_next_task()
6223 rq_i->core_pick = p = pick_task(rq_i); in pick_next_task()
6224 rq_i->core_dl_server = rq_i->dl_server; in pick_next_task()
6230 cookie = rq->core->core_cookie = max->core_cookie; in pick_next_task()
6238 p = rq_i->core_pick; in pick_next_task()
6248 rq_i->core_pick = p; in pick_next_task()
6249 rq_i->core_dl_server = NULL; in pick_next_task()
6251 if (p == rq_i->idle) { in pick_next_task()
6252 if (rq_i->nr_running) { in pick_next_task()
6253 rq->core->core_forceidle_count++; in pick_next_task()
6255 rq->core->core_forceidle_seq++; in pick_next_task()
6262 if (schedstat_enabled() && rq->core->core_forceidle_count) { in pick_next_task()
6263 rq->core->core_forceidle_start = rq_clock(rq->core); in pick_next_task()
6264 rq->core->core_forceidle_occupation = occ; in pick_next_task()
6267 rq->core->core_pick_seq = rq->core->core_task_seq; in pick_next_task()
6268 next = rq->core_pick; in pick_next_task()
6269 rq->core_sched_seq = rq->core->core_pick_seq; in pick_next_task()
6277 * NOTE: L1TF -- at this point we're no longer running the old task and in pick_next_task()
6279 * their task. This ensures there is no inter-sibling overlap between in pick_next_task()
6280 * non-matching user state. in pick_next_task()
6289 * picked for it. That's Ok - it will pick tasks for itself, in pick_next_task()
6292 if (!rq_i->core_pick) in pick_next_task()
6296 * Update for new !FI->FI transitions, or if continuing to be in !FI: in pick_next_task()
6303 if (!(fi_before && rq->core->core_forceidle_count)) in pick_next_task()
6304 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count); in pick_next_task()
6306 rq_i->core_pick->core_occupation = occ; in pick_next_task()
6309 rq_i->core_pick = NULL; in pick_next_task()
6310 rq_i->core_dl_server = NULL; in pick_next_task()
6315 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick)); in pick_next_task()
6317 if (rq_i->curr == rq_i->core_pick) { in pick_next_task()
6318 rq_i->core_pick = NULL; in pick_next_task()
6319 rq_i->core_dl_server = NULL; in pick_next_task()
6328 if (rq->core->core_forceidle_count && next == rq->idle) in pick_next_task()
6344 cookie = dst->core->core_cookie; in try_steal_cookie()
6348 if (dst->curr != dst->idle) in try_steal_cookie()
6356 if (p == src->core_pick || p == src->curr) in try_steal_cookie()
6362 if (p->core_occupation > dst->idle->core_occupation) in try_steal_cookie()
6430 if (!rq->core->core_cookie) in queue_core_balance()
6433 if (!rq->nr_running) /* not forced idle */ in queue_core_balance()
6436 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance); in queue_core_balance()
6440 sched_core_lock(*_T->lock, &_T->flags),
6441 sched_core_unlock(*_T->lock, &_T->flags),
6452 WARN_ON_ONCE(rq->core != rq); in sched_core_cpu_starting()
6463 if (rq->core == rq) { in sched_core_cpu_starting()
6477 rq->core = core_rq; in sched_core_cpu_starting()
6479 WARN_ON_ONCE(rq->core != core_rq); in sched_core_cpu_starting()
6493 WARN_ON_ONCE(rq->core != rq); in sched_core_cpu_deactivate()
6498 if (rq->core != rq) in sched_core_cpu_deactivate()
6513 core_rq->core_task_seq = rq->core_task_seq; in sched_core_cpu_deactivate()
6514 core_rq->core_pick_seq = rq->core_pick_seq; in sched_core_cpu_deactivate()
6515 core_rq->core_cookie = rq->core_cookie; in sched_core_cpu_deactivate()
6516 core_rq->core_forceidle_count = rq->core_forceidle_count; in sched_core_cpu_deactivate()
6517 core_rq->core_forceidle_seq = rq->core_forceidle_seq; in sched_core_cpu_deactivate()
6518 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation; in sched_core_cpu_deactivate()
6525 core_rq->core_forceidle_start = 0; in sched_core_cpu_deactivate()
6530 rq->core = core_rq; in sched_core_cpu_deactivate()
6538 if (rq->core != rq) in sched_core_cpu_dying()
6539 rq->core = rq; in sched_core_cpu_dying()
6562 #define SM_IDLE (-1)
6579 WRITE_ONCE(p->__state, TASK_RUNNING); in try_to_block_task()
6583 p->sched_contributes_to_load = in try_to_block_task()
6593 * prev_state = prev->state; if (p->on_rq && ...) in try_to_block_task()
6595 * p->on_rq = 0; smp_acquire__after_ctrl_dep(); in try_to_block_task()
6596 * p->state = TASK_WAKING in try_to_block_task()
6600 * After this, schedule() must not care about p->state any more. in try_to_block_task()
6620 * task to the run-queue and that's it.
6622 * Now, if the new task added to the run-queue preempts the current
6626 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6628 * - in syscall or exception context, at the next outmost
6632 * - in IRQ context, return from interrupt-handler to
6635 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6638 * - cond_resched() call
6639 * - explicit schedule() call
6640 * - return from syscall or exception to user-space
6641 * - return from interrupt-handler to user-space
6661 prev = rq->curr; in __schedule()
6672 * Make sure that signal_pending_state()->signal_pending() below in __schedule()
6679 * LOCK rq->lock LOCK p->pi_state in __schedule()
6681 * if (signal_pending_state()) if (p->state & @state) in __schedule()
6684 * after coming from user-space, before storing to rq->curr; this in __schedule()
6692 rq->clock_update_flags <<= 1; in __schedule()
6694 rq->clock_update_flags = RQCF_UPDATED; in __schedule()
6696 switch_count = &prev->nivcsw; in __schedule()
6702 * We must load prev->state once (task_struct::state is volatile), such in __schedule()
6705 prev_state = READ_ONCE(prev->__state); in __schedule()
6708 if (!rq->nr_running && !scx_enabled()) { in __schedule()
6714 switch_count = &prev->nvcsw; in __schedule()
6723 rq->last_seen_need_resched_ns = 0; in __schedule()
6727 rq->nr_switches++; in __schedule()
6729 * RCU users of rcu_dereference(rq->curr) may not see in __schedule()
6732 RCU_INIT_POINTER(rq->curr, next); in __schedule()
6736 * rq->curr, before returning to user-space. in __schedule()
6740 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC, in __schedule()
6741 * RISC-V. switch_mm() relies on membarrier_arch_switch_mm() in __schedule()
6742 * on PowerPC and on RISC-V. in __schedule()
6743 * - finish_lock_switch() for weakly-ordered in __schedule()
6745 * - switch_to() for arm64 (weakly-ordered, spin_unlock in __schedule()
6751 * On RISC-V, this barrier pairing is also needed for the in __schedule()
6760 prev->se.sched_delayed); in __schedule()
6779 current->flags |= PF_NOFREEZE; in do_task_dead()
6784 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ in do_task_dead()
6796 * will use a blocking primitive -- which would lead to recursion. in sched_submit_work()
6800 task_flags = tsk->flags; in sched_submit_work()
6815 SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT); in sched_submit_work()
6821 blk_flush_plug(tsk->plug, true); in sched_submit_work()
6828 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) { in sched_update_worker()
6829 if (tsk->flags & PF_BLOCK_TS) in sched_update_worker()
6831 if (tsk->flags & PF_WQ_WORKER) in sched_update_worker()
6833 else if (tsk->flags & PF_IO_WORKER) in sched_update_worker()
6852 lockdep_assert(!tsk->sched_rt_mutex); in schedule()
6864 * state (have scheduled out non-voluntarily) by making sure that all
6867 * (schedule out non-voluntarily).
6881 WARN_ON_ONCE(current->__state); in schedule_idle()
6907 * schedule_preempt_disabled - called with preemption disabled
6957 * This is the entry point to schedule() from in-kernel preemption
6963 * If there is a non-zero preempt_count or interrupts are disabled, in preempt_schedule()
6995 * preempt_schedule_notrace - preempt_schedule called by tracing
7099 return try_to_wake_up(curr->private, mode, wake_flags); in default_wake_function()
7123 * bit-fields. Since it's a local thing, use int. Keep the generic sounding
7131 lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1)); in rt_mutex_pre_schedule()
7137 lockdep_assert(current->sched_rt_mutex); in rt_mutex_schedule()
7144 lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0)); in rt_mutex_post_schedule()
7148 * rt_mutex_setprio - set the current priority of a task
7153 * not touch ->normal_prio like __setscheduler().
7166 /* XXX used to be waiter->prio, not waiter->task->prio */ in rt_mutex_setprio()
7167 prio = __rt_effective_prio(pi_task, p->normal_prio); in rt_mutex_setprio()
7172 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) in rt_mutex_setprio()
7178 * Set under pi_lock && rq->lock, such that the value can be used under in rt_mutex_setprio()
7183 * ensure a task is de-boosted (pi_task is set to NULL) before the in rt_mutex_setprio()
7185 * points to a blocked task -- which guarantees the task is present. in rt_mutex_setprio()
7187 p->pi_top_task = pi_task; in rt_mutex_setprio()
7192 if (prio == p->prio && !dl_prio(prio)) in rt_mutex_setprio()
7196 * Idle task boosting is a no-no in general. There is one in rt_mutex_setprio()
7200 * the timer wheel base->lock on the CPU and another CPU wants in rt_mutex_setprio()
7207 if (unlikely(p == rq->idle)) { in rt_mutex_setprio()
7208 WARN_ON(p != rq->curr); in rt_mutex_setprio()
7209 WARN_ON(p->pi_blocked_on); in rt_mutex_setprio()
7214 oldprio = p->prio; in rt_mutex_setprio()
7219 prev_class = p->sched_class; in rt_mutex_setprio()
7220 next_class = __setscheduler_class(p->policy, prio); in rt_mutex_setprio()
7222 if (prev_class != next_class && p->se.sched_delayed) in rt_mutex_setprio()
7234 * 1. -rt task is running and holds mutex A in rt_mutex_setprio()
7235 * --> -dl task blocks on mutex A in rt_mutex_setprio()
7237 * 2. -dl task is running and holds mutex A in rt_mutex_setprio()
7238 * --> -dl task blocks on mutex A and could preempt the in rt_mutex_setprio()
7242 if (!dl_prio(p->normal_prio) || in rt_mutex_setprio()
7243 (pi_task && dl_prio(pi_task->prio) && in rt_mutex_setprio()
7244 dl_entity_preempt(&pi_task->dl, &p->dl))) { in rt_mutex_setprio()
7245 p->dl.pi_se = pi_task->dl.pi_se; in rt_mutex_setprio()
7248 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7252 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7257 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7259 p->rt.timeout = 0; in rt_mutex_setprio()
7262 p->sched_class = next_class; in rt_mutex_setprio()
7263 p->prio = prio; in rt_mutex_setprio()
7293 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick in __cond_resched()
7294 * whether the current CPU is in an RCU read-side critical section, in __cond_resched()
7296 * in kernel context. In contrast, in non-preemptible kernels, in __cond_resched()
7297 * RCU readers leave no in-memory hints, which means that CPU-bound in __cond_resched()
7345 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
7348 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
7409 #include <linux/entry-common.h>
7421 * cond_resched <- __cond_resched
7422 * might_resched <- RET0
7423 * preempt_schedule <- NOP
7424 * preempt_schedule_notrace <- NOP
7425 * irqentry_exit_cond_resched <- NOP
7426 * dynamic_preempt_lazy <- false
7429 * cond_resched <- __cond_resched
7430 * might_resched <- __cond_resched
7431 * preempt_schedule <- NOP
7432 * preempt_schedule_notrace <- NOP
7433 * irqentry_exit_cond_resched <- NOP
7434 * dynamic_preempt_lazy <- false
7437 * cond_resched <- RET0
7438 * might_resched <- RET0
7439 * preempt_schedule <- preempt_schedule
7440 * preempt_schedule_notrace <- preempt_schedule_notrace
7441 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7442 * dynamic_preempt_lazy <- false
7445 * cond_resched <- RET0
7446 * might_resched <- RET0
7447 * preempt_schedule <- preempt_schedule
7448 * preempt_schedule_notrace <- preempt_schedule_notrace
7449 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7450 * dynamic_preempt_lazy <- true
7454 preempt_dynamic_undefined = -1,
7481 return -EINVAL; in sched_dynamic_mode()
7503 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in in __sched_dynamic_update()
7656 int old_iowait = current->in_iowait; in io_schedule_prepare()
7658 current->in_iowait = 1; in io_schedule_prepare()
7659 blk_flush_plug(current->plug, true); in io_schedule_prepare()
7665 current->in_iowait = token; in io_schedule_finish()
7669 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
7703 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); in sched_show_task()
7711 ppid = task_pid_nr(rcu_dereference(p->real_parent)); in sched_show_task()
7713 pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d task_flags:0x%04x flags:0x%08lx\n", in sched_show_task()
7715 ppid, p->flags, read_task_thread_flags(p)); in sched_show_task()
7728 unsigned int state = READ_ONCE(p->__state); in state_filter_match()
7756 * reset the NMI-timeout, listing all files on a slow in show_state_filter()
7781 * init_idle - set up an idle thread for a given CPU
7799 raw_spin_lock_irqsave(&idle->pi_lock, flags); in init_idle()
7802 idle->__state = TASK_RUNNING; in init_idle()
7803 idle->se.exec_start = sched_clock(); in init_idle()
7806 * look like a proper per-CPU kthread. in init_idle()
7808 idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY; in init_idle()
7820 * holding rq->lock, the CPU isn't yet set to this CPU so the in init_idle()
7824 * use task_rq_lock() here and obtain the other rq->lock. in init_idle()
7832 rq->idle = idle; in init_idle()
7834 rcu_assign_pointer(rq->curr, idle); in init_idle()
7835 idle->on_rq = TASK_ON_RQ_QUEUED; in init_idle()
7837 idle->on_cpu = 1; in init_idle()
7840 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); in init_idle()
7848 idle->sched_class = &idle_sched_class; in init_idle()
7852 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); in init_idle()
7884 if (p->flags & PF_NO_SETAFFINITY) in task_can_attach()
7885 ret = -EINVAL; in task_can_attach()
7902 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) in migrate_task_to()
7903 return -EINVAL; in migrate_task_to()
7930 p->numa_preferred_nid = nid; in sched_setnuma()
7952 struct mm_struct *mm = current->active_mm; in sched_force_init_mm()
7957 current->active_mm = &init_mm; in sched_force_init_mm()
7974 raw_spin_lock_irq(&p->pi_lock); in __balance_push_cpu_stop()
7980 cpu = select_fallback_rq(rq->cpu, p); in __balance_push_cpu_stop()
7985 raw_spin_unlock_irq(&p->pi_lock); in __balance_push_cpu_stop()
7995 * Ensure we only run per-cpu kthreads once the CPU goes !active.
8002 struct task_struct *push_task = rq->curr; in balance_push()
8009 rq->balance_callback = &balance_push_callback; in balance_push()
8015 if (!cpu_dying(rq->cpu) || rq != this_rq()) in balance_push()
8019 * Both the cpu-hotplug and stop task are in this case and are in balance_push()
8036 if (!rq->nr_running && !rq_has_pinned_tasks(rq) && in balance_push()
8037 rcuwait_active(&rq->hotplug_wait)) { in balance_push()
8039 rcuwait_wake_up(&rq->hotplug_wait); in balance_push()
8047 * Temporarily drop rq->lock such that we can wake-up the stop task. in balance_push()
8052 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, in balance_push()
8070 WARN_ON_ONCE(rq->balance_callback); in balance_push_set()
8071 rq->balance_callback = &balance_push_callback; in balance_push_set()
8072 } else if (rq->balance_callback == &balance_push_callback) { in balance_push_set()
8073 rq->balance_callback = NULL; in balance_push_set()
8088 rcuwait_wait_event(&rq->hotplug_wait, in balance_hotplug_wait()
8089 rq->nr_running == 1 && !rq_has_pinned_tasks(rq), in balance_hotplug_wait()
8111 if (!rq->online) { in set_rq_online()
8114 cpumask_set_cpu(rq->cpu, rq->rd->online); in set_rq_online()
8115 rq->online = 1; in set_rq_online()
8118 if (class->rq_online) in set_rq_online()
8119 class->rq_online(rq); in set_rq_online()
8126 if (rq->online) { in set_rq_offline()
8131 if (class->rq_offline) in set_rq_offline()
8132 class->rq_offline(rq); in set_rq_offline()
8135 cpumask_clear_cpu(rq->cpu, rq->rd->online); in set_rq_offline()
8136 rq->online = 0; in set_rq_offline()
8145 if (rq->rd) { in sched_set_rq_online()
8146 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_set_rq_online()
8157 if (rq->rd) { in sched_set_rq_offline()
8158 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_set_rq_offline()
8187 if (--num_cpus_frozen) in cpuset_cpu_active()
8291 * preempt-disabled and RCU users of this state to go away such that in sched_cpu_deactivate()
8327 rq->calc_load_update = calc_load_update; in sched_rq_cpu_starting()
8363 * stable. We need to take the tear-down thread which is calling this into
8366 * Also see the comment "Global load-average calculations".
8383 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); in dump_rq_tasks()
8391 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm); in dump_rq_tasks()
8404 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) { in sched_cpu_dying()
8431 /* Move init over to a non-isolated CPU */ in sched_init_smp()
8434 current->flags &= ~PF_NO_SETAFFINITY; in sched_init_smp()
8549 raw_spin_lock_init(&rq->__lock); in sched_init()
8550 rq->nr_running = 0; in sched_init()
8551 rq->calc_load_active = 0; in sched_init()
8552 rq->calc_load_update = jiffies + LOAD_FREQ; in sched_init()
8553 init_cfs_rq(&rq->cfs); in sched_init()
8554 init_rt_rq(&rq->rt); in sched_init()
8555 init_dl_rq(&rq->dl); in sched_init()
8557 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); in sched_init()
8558 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; in sched_init()
8562 * In case of task-groups formed through the cgroup filesystem, it in sched_init()
8565 * root_task_group and its child task-groups in a fair manner, in sched_init()
8566 * based on each entity's (task or task-group's) weight in sched_init()
8567 * (se->load.weight). in sched_init()
8576 * directly in rq->cfs (i.e root_task_group->se[] = NULL). in sched_init()
8578 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); in sched_init()
8587 rq->rt.rt_runtime = global_rt_runtime(); in sched_init()
8588 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); in sched_init()
8591 rq->sd = NULL; in sched_init()
8592 rq->rd = NULL; in sched_init()
8593 rq->cpu_capacity = SCHED_CAPACITY_SCALE; in sched_init()
8594 rq->balance_callback = &balance_push_callback; in sched_init()
8595 rq->active_balance = 0; in sched_init()
8596 rq->next_balance = jiffies; in sched_init()
8597 rq->push_cpu = 0; in sched_init()
8598 rq->cpu = i; in sched_init()
8599 rq->online = 0; in sched_init()
8600 rq->idle_stamp = 0; in sched_init()
8601 rq->avg_idle = 2*sysctl_sched_migration_cost; in sched_init()
8602 rq->max_idle_balance_cost = sysctl_sched_migration_cost; in sched_init()
8604 INIT_LIST_HEAD(&rq->cfs_tasks); in sched_init()
8608 rq->last_blocked_load_update_tick = jiffies; in sched_init()
8609 atomic_set(&rq->nohz_flags, 0); in sched_init()
8611 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); in sched_init()
8614 rcuwait_init(&rq->hotplug_wait); in sched_init()
8618 atomic_set(&rq->nr_iowait, 0); in sched_init()
8622 rq->core = rq; in sched_init()
8623 rq->core_pick = NULL; in sched_init()
8624 rq->core_dl_server = NULL; in sched_init()
8625 rq->core_enabled = 0; in sched_init()
8626 rq->core_tree = RB_ROOT; in sched_init()
8627 rq->core_forceidle_count = 0; in sched_init()
8628 rq->core_forceidle_occupation = 0; in sched_init()
8629 rq->core_forceidle_start = 0; in sched_init()
8631 rq->core_cookie = 0UL; in sched_init()
8633 zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i)); in sched_init()
8647 * is dressed up as a per-CPU kthread and thus needs to play the part in sched_init()
8648 * if we want to avoid special-casing it in code that deals with per-CPU in sched_init()
8686 * Blocking primitives will set (and therefore destroy) current->state, in __might_sleep()
8690 WARN_ONCE(state != TASK_RUNNING && current->task_state_change, in __might_sleep()
8693 (void *)current->task_state_change, in __might_sleep()
8694 (void *)current->task_state_change); in __might_sleep()
8732 !is_idle_task(current) && !current->non_block_count) || in __might_resched()
8747 in_atomic(), irqs_disabled(), current->non_block_count, in __might_resched()
8748 current->pid, current->comm); in __might_resched()
8792 current->pid, current->comm); in __cant_sleep()
8824 current->pid, current->comm); in __cant_migrate()
8847 if (p->flags & PF_KTHREAD) in normalize_rt_tasks()
8850 p->se.exec_start = 0; in normalize_rt_tasks()
8851 schedstat_set(p->stats.wait_start, 0); in normalize_rt_tasks()
8852 schedstat_set(p->stats.sleep_start, 0); in normalize_rt_tasks()
8853 schedstat_set(p->stats.block_start, 0); in normalize_rt_tasks()
8877 * stopped - every CPU needs to be quiescent, and no scheduling
8884 * curr_task - return the current task for a given CPU.
8909 uclamp_se_set(&tg->uclamp_req[clamp_id], in alloc_uclamp_sched_group()
8911 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; in alloc_uclamp_sched_group()
8937 call_rcu(&tg->rcu, sched_free_group_rcu); in sched_unregister_group()
8947 return ERR_PTR(-ENOMEM); in sched_create_group()
8962 return ERR_PTR(-ENOMEM); in sched_create_group()
8970 list_add_rcu(&tg->list, &task_groups); in sched_online_group()
8975 tg->parent = parent; in sched_online_group()
8976 INIT_LIST_HEAD(&tg->children); in sched_online_group()
8977 list_add_rcu(&tg->siblings, &parent->children); in sched_online_group()
8993 call_rcu(&tg->rcu, sched_unregister_group_rcu); in sched_destroy_group()
9014 list_del_rcu(&tg->list); in sched_release_group()
9015 list_del_rcu(&tg->siblings); in sched_release_group()
9037 tsk->sched_task_group = group; in sched_change_group()
9040 if (tsk->sched_class->task_change_group) in sched_change_group()
9041 tsk->sched_class->task_change_group(tsk); in sched_change_group()
9051 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
9069 if (group == tsk->sched_task_group) in sched_move_task()
9112 return ERR_PTR(-ENOMEM); in cpu_cgroup_css_alloc()
9114 return &tg->css; in cpu_cgroup_css_alloc()
9121 struct task_group *parent = css_tg(css->parent); in cpu_cgroup_css_online()
9173 return -EINVAL; in cpu_cgroup_can_attach()
9209 uc_parent = css_tg(css)->parent in cpu_util_update_eff()
9210 ? css_tg(css)->parent->uclamp : NULL; in cpu_util_update_eff()
9214 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value; in cpu_util_update_eff()
9226 uc_se = css_tg(css)->uclamp; in cpu_util_update_eff()
9276 req.ret = -ERANGE; in capacity_from_percent()
9304 if (tg->uclamp_req[clamp_id].value != req.util) in cpu_uclamp_write()
9305 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); in cpu_uclamp_write()
9311 tg->uclamp_pct[clamp_id] = req.percent; in cpu_uclamp_write()
9343 util_clamp = tg->uclamp_req[clamp_id].value; in cpu_uclamp_print()
9351 percent = tg->uclamp_pct[clamp_id]; in cpu_uclamp_print()
9373 return scale_load_down(tg->shares); in tg_weight()
9375 return sched_weight_from_cgroup(tg->scx_weight); in tg_weight()
9414 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_set_cfs_bandwidth()
9417 return -EINVAL; in tg_set_cfs_bandwidth()
9425 return -EINVAL; in tg_set_cfs_bandwidth()
9433 return -EINVAL; in tg_set_cfs_bandwidth()
9439 return -EINVAL; in tg_set_cfs_bandwidth()
9443 return -EINVAL; in tg_set_cfs_bandwidth()
9446 * Prevent race between setting of cfs_rq->runtime_enabled and in tg_set_cfs_bandwidth()
9457 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; in tg_set_cfs_bandwidth()
9459 * If we need to toggle cfs_bandwidth_used, off->on must occur in tg_set_cfs_bandwidth()
9460 * before making related changes, and on->off must occur afterwards in tg_set_cfs_bandwidth()
9465 scoped_guard (raw_spinlock_irq, &cfs_b->lock) { in tg_set_cfs_bandwidth()
9466 cfs_b->period = ns_to_ktime(period); in tg_set_cfs_bandwidth()
9467 cfs_b->quota = quota; in tg_set_cfs_bandwidth()
9468 cfs_b->burst = burst; in tg_set_cfs_bandwidth()
9481 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; in tg_set_cfs_bandwidth()
9482 struct rq *rq = cfs_rq->rq; in tg_set_cfs_bandwidth()
9485 cfs_rq->runtime_enabled = runtime_enabled; in tg_set_cfs_bandwidth()
9486 cfs_rq->runtime_remaining = 0; in tg_set_cfs_bandwidth()
9488 if (cfs_rq->throttled) in tg_set_cfs_bandwidth()
9502 period = ktime_to_ns(tg->cfs_bandwidth.period); in tg_set_cfs_quota()
9503 burst = tg->cfs_bandwidth.burst; in tg_set_cfs_quota()
9509 return -EINVAL; in tg_set_cfs_quota()
9518 if (tg->cfs_bandwidth.quota == RUNTIME_INF) in tg_get_cfs_quota()
9519 return -1; in tg_get_cfs_quota()
9521 quota_us = tg->cfs_bandwidth.quota; in tg_get_cfs_quota()
9532 return -EINVAL; in tg_set_cfs_period()
9535 quota = tg->cfs_bandwidth.quota; in tg_set_cfs_period()
9536 burst = tg->cfs_bandwidth.burst; in tg_set_cfs_period()
9545 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); in tg_get_cfs_period()
9556 return -EINVAL; in tg_set_cfs_burst()
9559 period = ktime_to_ns(tg->cfs_bandwidth.period); in tg_set_cfs_burst()
9560 quota = tg->cfs_bandwidth.quota; in tg_set_cfs_burst()
9569 burst_us = tg->cfs_bandwidth.burst; in tg_get_cfs_burst()
9625 if (tg == d->tg) { in normalize_cfs_quota()
9626 period = d->period; in normalize_cfs_quota()
9627 quota = d->quota; in normalize_cfs_quota()
9634 if (quota == RUNTIME_INF || quota == -1) in normalize_cfs_quota()
9643 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_cfs_schedulable_down()
9644 s64 quota = 0, parent_quota = -1; in tg_cfs_schedulable_down()
9646 if (!tg->parent) { in tg_cfs_schedulable_down()
9649 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; in tg_cfs_schedulable_down()
9652 parent_quota = parent_b->hierarchical_quota; in tg_cfs_schedulable_down()
9656 * always take the non-RUNTIME_INF min. On cgroup1, only in tg_cfs_schedulable_down()
9670 return -EINVAL; in tg_cfs_schedulable_down()
9673 cfs_b->hierarchical_quota = quota; in tg_cfs_schedulable_down()
9698 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in cpu_cfs_stat_show()
9700 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); in cpu_cfs_stat_show()
9701 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); in cpu_cfs_stat_show()
9702 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); in cpu_cfs_stat_show()
9710 stats = __schedstats_from_se(tg->se[i]); in cpu_cfs_stat_show()
9711 ws += schedstat_val(stats->wait_sum); in cpu_cfs_stat_show()
9717 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst); in cpu_cfs_stat_show()
9718 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time); in cpu_cfs_stat_show()
9729 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time); in throttled_time_self()
9775 return css_tg(css)->idle; in cpu_idle_read_s64()
9863 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in cpu_extra_stat_show()
9866 throttled_usec = cfs_b->throttled_time; in cpu_extra_stat_show()
9868 burst_usec = cfs_b->burst_time; in cpu_extra_stat_show()
9876 cfs_b->nr_periods, cfs_b->nr_throttled, in cpu_extra_stat_show()
9877 throttled_usec, cfs_b->nr_burst, burst_usec); in cpu_extra_stat_show()
9916 return -ERANGE; in cpu_weight_write_u64()
9935 delta = abs(sched_prio_to_weight[prio] - weight); in cpu_weight_nice_read_s64()
9941 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO); in cpu_weight_nice_read_s64()
9951 return -ERANGE; in cpu_weight_nice_write_s64()
9953 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; in cpu_weight_nice_write_s64()
9983 return -EINVAL; in cpu_period_quota_parse()
9992 return -EINVAL; in cpu_period_quota_parse()
10011 u64 burst = tg->cfs_bandwidth.burst; in cpu_max_write()
10114 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
10115 * nice 1, it will get ~10% less CPU time than another CPU-bound task
10119 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
10125 /* -20 */ 88761, 71755, 56483, 46273, 36291,
10126 /* -15 */ 29154, 23254, 18705, 14949, 11916,
10127 /* -10 */ 9548, 7620, 6100, 4904, 3906,
10128 /* -5 */ 3121, 2501, 1991, 1586, 1277,
10136 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
10139 * pre-calculated inverse to speed up arithmetics by turning divisions
10143 /* -20 */ 48388, 59856, 76040, 92818, 118348,
10144 /* -15 */ 147320, 184698, 229616, 287308, 360437,
10145 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
10146 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
10161 * @cid_lock: Guarantee forward-progress of cid allocation.
10163 * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
10164 * is only used when contention is detected by the lock-free allocation so
10170 * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
10172 * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
10174 * serialized by @cid_lock until the allocation which detected contention
10181 * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
10187 * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
10204 * per-mm/cpu cid value.
10206 * Let's introduce task (Y) which has task->mm == mm and task (N) which has
10207 * task->mm != mm for the rest of the discussion. There are two scheduler state
10210 * (TSA) Store to rq->curr with transition from (N) to (Y)
10212 * (TSB) Store to rq->curr with transition from (Y) to (N)
10214 * On the remote-clear side, there is one transition we care about:
10219 * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
10233 * Context switch CS-1 Remote-clear
10234 * - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA)
10236 * - switch_mm_cid()
10237 * - memory barrier (see switch_mm_cid()
10241 * - mm_cid_get (next)
10242 * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr)
10249 * still an active task on the cpu. Remote-clear will therefore not transition
10270 t->migrate_from_cpu = task_cpu(t); in sched_mm_cid_migrate_from()
10278 struct mm_struct *mm = t->mm; in __sched_mm_cid_migrate_from_fetch_cid()
10283 return -1; in __sched_mm_cid_migrate_from_fetch_cid()
10285 last_mm_cid = t->last_mm_cid; in __sched_mm_cid_migrate_from_fetch_cid()
10291 if (last_mm_cid == -1) in __sched_mm_cid_migrate_from_fetch_cid()
10292 return -1; in __sched_mm_cid_migrate_from_fetch_cid()
10293 src_cid = READ_ONCE(src_pcpu_cid->cid); in __sched_mm_cid_migrate_from_fetch_cid()
10295 return -1; in __sched_mm_cid_migrate_from_fetch_cid()
10303 src_task = rcu_dereference(src_rq->curr); in __sched_mm_cid_migrate_from_fetch_cid()
10304 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { in __sched_mm_cid_migrate_from_fetch_cid()
10305 t->last_mm_cid = -1; in __sched_mm_cid_migrate_from_fetch_cid()
10306 return -1; in __sched_mm_cid_migrate_from_fetch_cid()
10319 struct mm_struct *mm = t->mm; in __sched_mm_cid_migrate_from_try_steal_cid()
10322 if (src_cid == -1) in __sched_mm_cid_migrate_from_try_steal_cid()
10323 return -1; in __sched_mm_cid_migrate_from_try_steal_cid()
10330 if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid)) in __sched_mm_cid_migrate_from_try_steal_cid()
10331 return -1; in __sched_mm_cid_migrate_from_try_steal_cid()
10334 * The implicit barrier after cmpxchg per-mm/cpu cid before loading in __sched_mm_cid_migrate_from_try_steal_cid()
10335 * rq->curr->mm matches the scheduler barrier in context_switch() in __sched_mm_cid_migrate_from_try_steal_cid()
10336 * between store to rq->curr and load of prev and next task's in __sched_mm_cid_migrate_from_try_steal_cid()
10337 * per-mm/cpu cid. in __sched_mm_cid_migrate_from_try_steal_cid()
10339 * The implicit barrier after cmpxchg per-mm/cpu cid before loading in __sched_mm_cid_migrate_from_try_steal_cid()
10340 * rq->curr->mm_cid_active matches the barrier in in __sched_mm_cid_migrate_from_try_steal_cid()
10342 * sched_mm_cid_after_execve() between store to t->mm_cid_active and in __sched_mm_cid_migrate_from_try_steal_cid()
10343 * load of per-mm/cpu cid. in __sched_mm_cid_migrate_from_try_steal_cid()
10348 * the lazy-put flag, this task will be responsible for transitioning in __sched_mm_cid_migrate_from_try_steal_cid()
10349 * from lazy-put flag set to MM_CID_UNSET. in __sched_mm_cid_migrate_from_try_steal_cid()
10352 src_task = rcu_dereference(src_rq->curr); in __sched_mm_cid_migrate_from_try_steal_cid()
10353 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { in __sched_mm_cid_migrate_from_try_steal_cid()
10358 t->last_mm_cid = -1; in __sched_mm_cid_migrate_from_try_steal_cid()
10359 return -1; in __sched_mm_cid_migrate_from_try_steal_cid()
10366 if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET)) in __sched_mm_cid_migrate_from_try_steal_cid()
10367 return -1; in __sched_mm_cid_migrate_from_try_steal_cid()
10368 WRITE_ONCE(src_pcpu_cid->recent_cid, MM_CID_UNSET); in __sched_mm_cid_migrate_from_try_steal_cid()
10380 struct mm_struct *mm = t->mm; in sched_mm_cid_migrate_to()
10389 src_cpu = t->migrate_from_cpu; in sched_mm_cid_migrate_to()
10390 if (src_cpu == -1) { in sched_mm_cid_migrate_to()
10391 t->last_mm_cid = -1; in sched_mm_cid_migrate_to()
10404 * greater or equal to the number of allowed CPUs, because user-space in sched_mm_cid_migrate_to()
10408 dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq)); in sched_mm_cid_migrate_to()
10409 dst_cid_is_set = !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->cid)) || in sched_mm_cid_migrate_to()
10410 !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->recent_cid)); in sched_mm_cid_migrate_to()
10411 if (dst_cid_is_set && atomic_read(&mm->mm_users) >= READ_ONCE(mm->nr_cpus_allowed)) in sched_mm_cid_migrate_to()
10413 src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu); in sched_mm_cid_migrate_to()
10416 if (src_cid == -1) in sched_mm_cid_migrate_to()
10420 if (src_cid == -1) in sched_mm_cid_migrate_to()
10428 WRITE_ONCE(dst_pcpu_cid->cid, src_cid); in sched_mm_cid_migrate_to()
10429 WRITE_ONCE(dst_pcpu_cid->recent_cid, src_cid); in sched_mm_cid_migrate_to()
10439 cid = READ_ONCE(pcpu_cid->cid); in sched_mm_cid_remote_clear()
10450 if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid)) in sched_mm_cid_remote_clear()
10454 * The implicit barrier after cmpxchg per-mm/cpu cid before loading in sched_mm_cid_remote_clear()
10455 * rq->curr->mm matches the scheduler barrier in context_switch() in sched_mm_cid_remote_clear()
10456 * between store to rq->curr and load of prev and next task's in sched_mm_cid_remote_clear()
10457 * per-mm/cpu cid. in sched_mm_cid_remote_clear()
10459 * The implicit barrier after cmpxchg per-mm/cpu cid before loading in sched_mm_cid_remote_clear()
10460 * rq->curr->mm_cid_active matches the barrier in in sched_mm_cid_remote_clear()
10462 * sched_mm_cid_after_execve() between store to t->mm_cid_active and in sched_mm_cid_remote_clear()
10463 * load of per-mm/cpu cid. in sched_mm_cid_remote_clear()
10468 * the lazy-put flag, that task will be responsible for transitioning in sched_mm_cid_remote_clear()
10469 * from lazy-put flag set to MM_CID_UNSET. in sched_mm_cid_remote_clear()
10472 t = rcu_dereference(rq->curr); in sched_mm_cid_remote_clear()
10473 if (READ_ONCE(t->mm_cid_active) && t->mm == mm) in sched_mm_cid_remote_clear()
10483 if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET)) in sched_mm_cid_remote_clear()
10496 * rq->clock load is racy on 32-bit but one spurious clear once in a in sched_mm_cid_remote_clear_old()
10499 rq_clock = READ_ONCE(rq->clock); in sched_mm_cid_remote_clear_old()
10500 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); in sched_mm_cid_remote_clear_old()
10508 curr = rcu_dereference(rq->curr); in sched_mm_cid_remote_clear_old()
10509 if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) { in sched_mm_cid_remote_clear_old()
10510 WRITE_ONCE(pcpu_cid->time, rq_clock); in sched_mm_cid_remote_clear_old()
10515 if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS) in sched_mm_cid_remote_clear_old()
10526 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); in sched_mm_cid_remote_clear_weight()
10527 cid = READ_ONCE(pcpu_cid->cid); in sched_mm_cid_remote_clear_weight()
10543 work->next = work; /* Prevent double-add */ in task_mm_cid_work()
10544 if (t->flags & PF_EXITING) in task_mm_cid_work()
10546 mm = t->mm; in task_mm_cid_work()
10549 old_scan = READ_ONCE(mm->mm_cid_next_scan); in task_mm_cid_work()
10554 res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan); in task_mm_cid_work()
10562 if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan)) in task_mm_cid_work()
10579 struct mm_struct *mm = t->mm; in init_sched_mm_cid()
10583 mm_users = atomic_read(&mm->mm_users); in init_sched_mm_cid()
10585 mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY); in init_sched_mm_cid()
10587 t->cid_work.next = &t->cid_work; /* Protect against double add */ in init_sched_mm_cid()
10588 init_task_work(&t->cid_work, task_mm_cid_work); in init_sched_mm_cid()
10593 struct callback_head *work = &curr->cid_work; in task_tick_mm_cid()
10596 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || in task_tick_mm_cid()
10597 work->next != work) in task_tick_mm_cid()
10599 if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan))) in task_tick_mm_cid()
10608 struct mm_struct *mm = t->mm; in sched_mm_cid_exit_signals()
10618 WRITE_ONCE(t->mm_cid_active, 0); in sched_mm_cid_exit_signals()
10620 * Store t->mm_cid_active before loading per-mm/cpu cid. in sched_mm_cid_exit_signals()
10625 t->last_mm_cid = t->mm_cid = -1; in sched_mm_cid_exit_signals()
10630 struct mm_struct *mm = t->mm; in sched_mm_cid_before_execve()
10640 WRITE_ONCE(t->mm_cid_active, 0); in sched_mm_cid_before_execve()
10642 * Store t->mm_cid_active before loading per-mm/cpu cid. in sched_mm_cid_before_execve()
10647 t->last_mm_cid = t->mm_cid = -1; in sched_mm_cid_before_execve()
10652 struct mm_struct *mm = t->mm; in sched_mm_cid_after_execve()
10662 WRITE_ONCE(t->mm_cid_active, 1); in sched_mm_cid_after_execve()
10664 * Store t->mm_cid_active before loading per-mm/cpu cid. in sched_mm_cid_after_execve()
10668 t->last_mm_cid = t->mm_cid = mm_cid_get(rq, t, mm); in sched_mm_cid_after_execve()
10675 WARN_ON_ONCE(!t->mm || t->mm_cid != -1); in sched_mm_cid_fork()
10676 t->mm_cid_active = 1; in sched_mm_cid_fork()
10696 if (ctx->queued) in sched_deq_and_put_task()
10698 if (ctx->running) in sched_deq_and_put_task()
10704 struct rq *rq = task_rq(ctx->p); in sched_enq_and_set_task()
10708 if (ctx->queued) in sched_enq_and_set_task()
10709 enqueue_task(rq, ctx->p, ctx->queue_flags | ENQUEUE_NOCLOCK); in sched_enq_and_set_task()
10710 if (ctx->running) in sched_enq_and_set_task()
10711 set_next_task(rq, ctx->p); in sched_enq_and_set_task()