Lines Matching +full:powered +full:- +full:remotely

1 // SPDX-License-Identifier: GPL-2.0-only
7 * Copyright (C) 1991-2002 Linus Torvalds
8 * Copyright (C) 1998-2024 Ingo Molnar, Red Hat
75 # include <linux/irq-entry-common.h>
99 #include "../../io_uring/io-wq.h"
193 if (p->sched_class == &stop_sched_class) /* trumps deadline */ in __task_prio()
194 return -2; in __task_prio()
196 if (p->dl_server) in __task_prio()
197 return -1; /* deadline */ in __task_prio()
199 if (rt_or_dl_prio(p->prio)) in __task_prio()
200 return p->prio; /* [-1, 99] */ in __task_prio()
202 if (p->sched_class == &idle_sched_class) in __task_prio()
225 if (-pa < -pb) in prio_less()
228 if (-pb < -pa) in prio_less()
231 if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */ in prio_less()
234 a_dl = &a->dl; in prio_less()
237 * __task_prio() can return -1 (for DL) even for those. In that in prio_less()
240 if (a->dl_server) in prio_less()
241 a_dl = a->dl_server; in prio_less()
243 b_dl = &b->dl; in prio_less()
244 if (b->dl_server) in prio_less()
245 b_dl = b->dl_server; in prio_less()
247 return !dl_time_before(a_dl->deadline, b_dl->deadline); in prio_less()
264 if (a->core_cookie < b->core_cookie) in __sched_core_less()
267 if (a->core_cookie > b->core_cookie) in __sched_core_less()
271 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count)) in __sched_core_less()
289 if (cookie < p->core_cookie) in rb_sched_core_cmp()
290 return -1; in rb_sched_core_cmp()
292 if (cookie > p->core_cookie) in rb_sched_core_cmp()
300 if (p->se.sched_delayed) in sched_core_enqueue()
303 rq->core->core_task_seq++; in sched_core_enqueue()
305 if (!p->core_cookie) in sched_core_enqueue()
308 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); in sched_core_enqueue()
313 if (p->se.sched_delayed) in sched_core_dequeue()
316 rq->core->core_task_seq++; in sched_core_dequeue()
319 rb_erase(&p->core_node, &rq->core_tree); in sched_core_dequeue()
320 RB_CLEAR_NODE(&p->core_node); in sched_core_dequeue()
326 * and re-examine whether the core is still in forced idle state. in sched_core_dequeue()
328 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 && in sched_core_dequeue()
329 rq->core->core_forceidle_count && rq->curr == rq->idle) in sched_core_dequeue()
335 if (p->sched_class->task_is_throttled) in sched_task_is_throttled()
336 return p->sched_class->task_is_throttled(p, cpu); in sched_task_is_throttled()
343 struct rb_node *node = &p->core_node; in sched_core_next()
352 if (p->core_cookie != cookie) in sched_core_next()
361 * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
369 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp); in sched_core_find()
374 if (!sched_task_is_throttled(p, rq->cpu)) in sched_core_find()
404 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++); in sched_core_lock()
413 raw_spin_unlock(&cpu_rq(t)->__lock); in sched_core_unlock()
434 cpu_rq(t)->core_enabled = enabled; in __sched_core_flip()
436 cpu_rq(cpu)->core->core_forceidle_start = 0; in __sched_core_flip()
447 cpu_rq(cpu)->core_enabled = enabled; in __sched_core_flip()
457 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree)); in sched_core_assert_empty()
512 if (!atomic_add_unless(&sched_core_count, -1, 1)) in sched_core_put()
539 * p->pi_lock
540 * rq->lock
541 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
543 * rq1->lock
544 * rq2->lock where: rq1 < rq2
548 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
549 * local CPU's rq->lock, it optionally removes the task from the runqueue and
553 * Task enqueue is also under rq->lock, possibly taken from another CPU.
559 * complicated to avoid having to take two rq->locks.
563 * System-calls and anything external will use task_rq_lock() which acquires
564 * both p->pi_lock and rq->lock. As a consequence the state they change is
567 * - sched_setaffinity()/
568 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
569 * - set_user_nice(): p->se.load, p->*prio
570 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
571 * p->se.load, p->rt_priority,
572 * p->dl.dl_{runtime, deadline, period, flags, bw, density}
573 * - sched_setnuma(): p->numa_preferred_nid
574 * - sched_move_task(): p->sched_task_group
575 * - uclamp_update_active() p->uclamp*
577 * p->state <- TASK_*:
581 * try_to_wake_up(). This latter uses p->pi_lock to serialize against
584 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
587 * rq->lock. Non-zero indicates the task is runnable, the special
589 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
591 * Additionally it is possible to be ->on_rq but still be considered not
592 * runnable when p->se.sched_delayed is true. These tasks are on the runqueue
596 * p->on_cpu <- { 0, 1 }:
599 * set before p is scheduled-in and cleared after p is scheduled-out, both
600 * under rq->lock. Non-zero indicates the task is running on its CPU.
603 * CPU to have ->on_cpu = 1 at the same time. ]
607 * - Don't call set_task_cpu() on a blocked task:
612 * - for try_to_wake_up(), called under p->pi_lock:
614 * This allows try_to_wake_up() to only take one rq->lock, see its comment.
616 * - for migration called under rq->lock:
622 * - for migration called under double_rq_lock():
638 raw_spin_lock_nested(&rq->__lock, subclass); in raw_spin_rq_lock_nested()
664 ret = raw_spin_trylock(&rq->__lock); in raw_spin_rq_trylock()
686 * double_rq_lock - safely lock two runqueues
703 * __task_rq_lock - lock the rq @p resides on.
706 __acquires(rq->lock) in __task_rq_lock()
710 lockdep_assert_held(&p->pi_lock); in __task_rq_lock()
727 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
730 __acquires(p->pi_lock) in task_rq_lock()
731 __acquires(rq->lock) in task_rq_lock()
736 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); in task_rq_lock()
742 * ACQUIRE (rq->lock) in task_rq_lock()
743 * [S] ->on_rq = MIGRATING [L] rq = task_rq() in task_rq_lock()
744 * WMB (__set_task_cpu()) ACQUIRE (rq->lock); in task_rq_lock()
745 * [S] ->cpu = new_cpu [L] task_rq() in task_rq_lock()
746 * [L] ->on_rq in task_rq_lock()
747 * RELEASE (rq->lock) in task_rq_lock()
750 * the old rq->lock will fully serialize against the stores. in task_rq_lock()
761 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); in task_rq_lock()
769 * RQ-clock updating methods:
782 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; in update_rq_clock_task()
789 * When this happens, we stop ->clock_task and only update the in update_rq_clock_task()
791 * update will consume the rest. This ensures ->clock_task is in update_rq_clock_task()
794 * It does however cause some slight miss-attribution of {soft,}IRQ in update_rq_clock_task()
796 * the current rq->clock timestamp, except that would require using in update_rq_clock_task()
802 rq->prev_irq_time += irq_delta; in update_rq_clock_task()
803 delta -= irq_delta; in update_rq_clock_task()
804 delayacct_irq(rq->curr, irq_delta); in update_rq_clock_task()
812 steal -= rq->prev_steal_time_rq; in update_rq_clock_task()
817 rq->prev_steal_time_rq = prev_steal; in update_rq_clock_task()
818 delta -= steal; in update_rq_clock_task()
822 rq->clock_task += delta; in update_rq_clock_task()
838 if (rq->clock_update_flags & RQCF_ACT_SKIP) in update_rq_clock()
842 WARN_ON_ONCE(rq->clock_update_flags & RQCF_UPDATED); in update_rq_clock()
843 rq->clock_update_flags |= RQCF_UPDATED; in update_rq_clock()
848 delta = clock - rq->clock; in update_rq_clock()
851 rq->clock += delta; in update_rq_clock()
858 * Use HR-timers to deliver accurate preemption points.
863 if (hrtimer_active(&rq->hrtick_timer)) in hrtick_clear()
864 hrtimer_cancel(&rq->hrtick_timer); in hrtick_clear()
868 * High-resolution timer tick.
880 rq->donor->sched_class->task_tick(rq, rq->curr, 1); in hrtick()
888 struct hrtimer *timer = &rq->hrtick_timer; in __hrtick_restart()
889 ktime_t time = rq->hrtick_time; in __hrtick_restart()
910 * called with rq->lock held and IRQs disabled
914 struct hrtimer *timer = &rq->hrtick_timer; in hrtick_start()
922 rq->hrtick_time = ktime_add_ns(hrtimer_cb_get_time(timer), delta); in hrtick_start()
927 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); in hrtick_start()
932 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq); in hrtick_rq_init()
933 hrtimer_setup(&rq->hrtick_timer, hrtick, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); in hrtick_rq_init()
967 return !(fetch_or(&ti->flags, 1 << tif) & _TIF_POLLING_NRFLAG); in set_nr_and_not_polling()
979 typeof(ti->flags) val = READ_ONCE(ti->flags); in set_nr_if_polling()
986 } while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED)); in set_nr_if_polling()
1006 struct wake_q_node *node = &task->wake_q; in __wake_q_add()
1009 * Atomically grab the task, if ->wake_q is !nil already it means in __wake_q_add()
1017 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) in __wake_q_add()
1023 *head->lastp = node; in __wake_q_add()
1024 head->lastp = &node->next; in __wake_q_add()
1029 * wake_q_add() - queue a wakeup for 'later' waking.
1037 * This function must be used as-if it were wake_up_process(); IOW the task
1047 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
1055 * This function must be used as-if it were wake_up_process(); IOW the task
1058 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1071 struct wake_q_node *node = head->first; in wake_up_q()
1077 node = node->next; in wake_up_q()
1079 WRITE_ONCE(task->wake_q.next, NULL); in wake_up_q()
1080 /* Task can safely be re-inserted now. */ in wake_up_q()
1092 * resched_curr - mark rq's current task 'to be rescheduled now'.
1095 * might also involve a cross-CPU call to trigger the scheduler on
1100 struct task_struct *curr = rq->curr; in __resched_curr()
1113 if (cti->flags & ((1 << tif) | _TIF_NEED_RESCHED)) in __resched_curr()
1184 * from an idle CPU. This is good for power-savings.
1192 int i, cpu = smp_processor_id(), default_cpu = -1; in get_nohz_timer_target()
1216 if (default_cpu == -1) in get_nohz_timer_target()
1240 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling in wake_up_idle_cpu()
1244 * re-evaluate the next tick. Provided some re-ordering of tick in wake_up_idle_cpu()
1248 * - On most architectures, a simple fetch_or on ti::flags with a in wake_up_idle_cpu()
1251 * - x86 needs to perform a last need_resched() check between in wake_up_idle_cpu()
1261 if (set_nr_and_not_polling(task_thread_info(rq->idle), TIF_NEED_RESCHED)) in wake_up_idle_cpu()
1270 * We just need the target to call irq_exit() and re-evaluate in wake_up_full_nohz_cpu()
1310 rq->idle_balance = idle_cpu(cpu); in nohz_csd_func()
1311 if (rq->idle_balance) { in nohz_csd_func()
1312 rq->nohz_idle_balance = flags; in nohz_csd_func()
1322 if (rq->nr_running != 1) in __need_bw_check()
1325 if (p->sched_class != &fair_sched_class) in __need_bw_check()
1339 if (rq->dl.dl_nr_running) in sched_can_stop_tick()
1346 if (rq->rt.rr_nr_running) { in sched_can_stop_tick()
1347 if (rq->rt.rr_nr_running == 1) in sched_can_stop_tick()
1357 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; in sched_can_stop_tick()
1369 if (rq->cfs.h_nr_queued > 1) in sched_can_stop_tick()
1377 * E.g. going from 2->1 without going through pick_next_task(). in sched_can_stop_tick()
1379 if (__need_bw_check(rq, rq->curr)) { in sched_can_stop_tick()
1380 if (cfs_task_bw_constrained(rq->curr)) in sched_can_stop_tick()
1407 list_for_each_entry_rcu(child, &parent->children, siblings) { in walk_tg_tree_from()
1419 parent = parent->parent; in walk_tg_tree_from()
1434 int prio = p->static_prio - MAX_RT_PRIO; in set_load_weight()
1449 if (update_load && p->sched_class->reweight_task) in set_load_weight()
1450 p->sched_class->reweight_task(task_rq(p), p, &lw); in set_load_weight()
1452 p->se.load = lw; in set_load_weight()
1459 * The (slow-path) user-space triggers utilization clamp value updates which
1460 * can require updates on (fast-path) scheduler's data structures used to
1462 * While the per-CPU rq lock protects fast-path update operations, user-space
1480 * used. In battery powered devices, particularly, running at the maximum
1484 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1520 * idle (which drops the max-clamp) by retaining the last known in uclamp_idle_value()
1521 * max-clamp. in uclamp_idle_value()
1524 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; in uclamp_idle_value()
1534 /* Reset max-clamp retention only on idle exit */ in uclamp_idle_reset()
1535 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) in uclamp_idle_reset()
1545 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; in uclamp_rq_max_value()
1546 int bucket_id = UCLAMP_BUCKETS - 1; in uclamp_rq_max_value()
1552 for ( ; bucket_id >= 0; bucket_id--) { in uclamp_rq_max_value()
1558 /* No tasks -- default clamp values */ in uclamp_rq_max_value()
1567 lockdep_assert_held(&p->pi_lock); in __uclamp_update_util_min_rt_default()
1569 uc_se = &p->uclamp_req[UCLAMP_MIN]; in __uclamp_update_util_min_rt_default()
1572 if (uc_se->user_defined) in __uclamp_update_util_min_rt_default()
1584 /* Protect updates to p->uclamp_* */ in uclamp_update_util_min_rt_default()
1593 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; in uclamp_tg_restrict()
1606 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value; in uclamp_tg_restrict()
1607 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value; in uclamp_tg_restrict()
1619 * - the task specific clamp value, when explicitly requested from userspace
1620 * - the task group effective clamp value, for tasks not either in the root
1622 * - the system default clamp value, defined by the sysadmin
1641 /* Task currently refcounted: use back-annotated (effective) value */ in uclamp_eff_value()
1642 if (p->uclamp[clamp_id].active) in uclamp_eff_value()
1643 return (unsigned long)p->uclamp[clamp_id].value; in uclamp_eff_value()
1655 * Tasks can have a task-specific value requested from user-space, track
1663 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; in uclamp_rq_inc_id()
1664 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; in uclamp_rq_inc_id()
1670 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); in uclamp_rq_inc_id()
1672 bucket = &uc_rq->bucket[uc_se->bucket_id]; in uclamp_rq_inc_id()
1673 bucket->tasks++; in uclamp_rq_inc_id()
1674 uc_se->active = true; in uclamp_rq_inc_id()
1676 uclamp_idle_reset(rq, clamp_id, uc_se->value); in uclamp_rq_inc_id()
1682 if (bucket->tasks == 1 || uc_se->value > bucket->value) in uclamp_rq_inc_id()
1683 bucket->value = uc_se->value; in uclamp_rq_inc_id()
1685 if (uc_se->value > uclamp_rq_get(rq, clamp_id)) in uclamp_rq_inc_id()
1686 uclamp_rq_set(rq, clamp_id, uc_se->value); in uclamp_rq_inc_id()
1701 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; in uclamp_rq_dec_id()
1702 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; in uclamp_rq_dec_id()
1713 * In this case the uc_se->active flag should be false since no uclamp in uclamp_rq_dec_id()
1724 * // Must not decrement bucket->tasks here in uclamp_rq_dec_id()
1728 * bucket[uc_se->bucket_id]. in uclamp_rq_dec_id()
1732 if (unlikely(!uc_se->active)) in uclamp_rq_dec_id()
1735 bucket = &uc_rq->bucket[uc_se->bucket_id]; in uclamp_rq_dec_id()
1737 WARN_ON_ONCE(!bucket->tasks); in uclamp_rq_dec_id()
1738 if (likely(bucket->tasks)) in uclamp_rq_dec_id()
1739 bucket->tasks--; in uclamp_rq_dec_id()
1741 uc_se->active = false; in uclamp_rq_dec_id()
1749 if (likely(bucket->tasks)) in uclamp_rq_dec_id()
1757 WARN_ON_ONCE(bucket->value > rq_clamp); in uclamp_rq_dec_id()
1758 if (bucket->value >= rq_clamp) { in uclamp_rq_dec_id()
1759 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); in uclamp_rq_dec_id()
1777 if (unlikely(!p->sched_class->uclamp_enabled)) in uclamp_rq_inc()
1781 if (p->se.sched_delayed && !(flags & ENQUEUE_DELAYED)) in uclamp_rq_inc()
1788 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) in uclamp_rq_inc()
1789 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; in uclamp_rq_inc()
1805 if (unlikely(!p->sched_class->uclamp_enabled)) in uclamp_rq_dec()
1808 if (p->se.sched_delayed) in uclamp_rq_dec()
1818 if (!p->uclamp[clamp_id].active) in uclamp_rq_reinc_id()
1828 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE)) in uclamp_rq_reinc_id()
1829 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; in uclamp_rq_reinc_id()
1883 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], in uclamp_update_root_tg()
1885 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], in uclamp_update_root_tg()
1944 result = -EINVAL; in sysctl_sched_uclamp_handler()
1989 * We don't need to hold task_rq_lock() when updating p->uclamp_* here in uclamp_fork()
1993 p->uclamp[clamp_id].active = false; in uclamp_fork()
1995 if (likely(!p->sched_reset_on_fork)) in uclamp_fork()
1999 uclamp_se_set(&p->uclamp_req[clamp_id], in uclamp_fork()
2012 struct uclamp_rq *uc_rq = rq->uclamp; in init_uclamp_rq()
2020 rq->uclamp_flags = UCLAMP_FLAG_IDLE; in init_uclamp_rq()
2070 raw_spin_lock_irq(&p->pi_lock); in get_wchan()
2071 state = READ_ONCE(p->__state); in get_wchan()
2073 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq) in get_wchan()
2075 raw_spin_unlock_irq(&p->pi_lock); in get_wchan()
2086 * Can be before ->enqueue_task() because uclamp considers the in enqueue_task()
2087 * ENQUEUE_DELAYED task before its ->sched_delayed gets cleared in enqueue_task()
2088 * in ->enqueue_task(). in enqueue_task()
2092 p->sched_class->enqueue_task(rq, p, flags); in enqueue_task()
2120 * Must be before ->dequeue_task() because ->dequeue_task() can 'fail' in dequeue_task()
2121 * and mark the task ->sched_delayed. in dequeue_task()
2124 return p->sched_class->dequeue_task(rq, p, flags); in dequeue_task()
2136 WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED); in activate_task()
2137 ASSERT_EXCLUSIVE_WRITER(p->on_rq); in activate_task()
2144 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING); in deactivate_task()
2145 ASSERT_EXCLUSIVE_WRITER(p->on_rq); in deactivate_task()
2162 * task_curr - is this task currently executing on a CPU?
2173 * ->switching_to() is called with the pi_lock and rq_lock held and must not
2179 if (prev_class != p->sched_class && p->sched_class->switching_to) in check_class_changing()
2180 p->sched_class->switching_to(rq, p); in check_class_changing()
2184 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2194 if (prev_class != p->sched_class) { in check_class_changed()
2195 if (prev_class->switched_from) in check_class_changed()
2196 prev_class->switched_from(rq, p); in check_class_changed()
2198 p->sched_class->switched_to(rq, p); in check_class_changed()
2199 } else if (oldprio != p->prio || dl_task(p)) in check_class_changed()
2200 p->sched_class->prio_changed(rq, p, oldprio); in check_class_changed()
2205 struct task_struct *donor = rq->donor; in wakeup_preempt()
2207 if (p->sched_class == donor->sched_class) in wakeup_preempt()
2208 donor->sched_class->wakeup_preempt(rq, p, flags); in wakeup_preempt()
2209 else if (sched_class_above(p->sched_class, donor->sched_class)) in wakeup_preempt()
2216 if (task_on_rq_queued(donor) && test_tsk_need_resched(rq->curr)) in wakeup_preempt()
2223 if (READ_ONCE(p->__state) & state) in __task_state_match()
2226 if (READ_ONCE(p->saved_state) & state) in __task_state_match()
2227 return -1; in __task_state_match()
2239 guard(raw_spinlock_irq)(&p->pi_lock); in task_state_match()
2244 * wait_task_inactive - wait for a thread to unschedule.
2269 * any task-queue locks at all. We'll only try to get in wait_task_inactive()
2277 * still, just relax and busy-wait without holding in wait_task_inactive()
2302 if (p->se.sched_delayed) in wait_task_inactive()
2310 * When matching on p->saved_state, consider this task in wait_task_inactive()
2315 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ in wait_task_inactive()
2343 * yield - it could be a while. in wait_task_inactive()
2370 .new_mask = cpumask_of(rq->cpu), in migrate_disable_switch()
2374 if (likely(!p->migration_disabled)) in migrate_disable_switch()
2377 if (p->cpus_ptr != &p->cpus_mask) in migrate_disable_switch()
2390 .new_mask = &p->cpus_mask, in ___migrate_enable()
2412 return rq->nr_pinned; in rq_has_pinned_tasks()
2416 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2430 if (!(p->flags & PF_KTHREAD)) in is_cpu_allowed()
2460 * move_queued_task - move a queued task to new rq.
2523 * migration_cpu_stop - this will be executed by a high-prio stopper thread
2530 struct set_affinity_pending *pending = arg->pending; in migration_cpu_stop()
2531 struct task_struct *p = arg->task; in migration_cpu_stop()
2548 raw_spin_lock(&p->pi_lock); in migration_cpu_stop()
2552 * If we were passed a pending, then ->stop_pending was set, thus in migration_cpu_stop()
2553 * p->migration_pending must have remained stable. in migration_cpu_stop()
2555 WARN_ON_ONCE(pending && pending != p->migration_pending); in migration_cpu_stop()
2559 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because in migration_cpu_stop()
2560 * we're holding p->pi_lock. in migration_cpu_stop()
2567 p->migration_pending = NULL; in migration_cpu_stop()
2570 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) in migration_cpu_stop()
2576 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); in migration_cpu_stop()
2578 p->wake_cpu = arg->dest_cpu; in migration_cpu_stop()
2600 * ->pi_lock, so the allowed mask is stable - if it got in migration_cpu_stop()
2603 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { in migration_cpu_stop()
2604 p->migration_pending = NULL; in migration_cpu_stop()
2610 * When migrate_enable() hits a rq mis-match we can't reliably in migration_cpu_stop()
2614 WARN_ON_ONCE(!pending->stop_pending); in migration_cpu_stop()
2618 &pending->arg, &pending->stop_work); in migration_cpu_stop()
2624 pending->stop_pending = false; in migration_cpu_stop()
2628 complete_all(&pending->done); in migration_cpu_stop()
2638 raw_spin_lock_irq(&p->pi_lock); in push_cpu_stop()
2645 p->migration_flags |= MDF_PUSH; in push_cpu_stop()
2649 p->migration_flags &= ~MDF_PUSH; in push_cpu_stop()
2651 if (p->sched_class->find_lock_rq) in push_cpu_stop()
2652 lowest_rq = p->sched_class->find_lock_rq(p, rq); in push_cpu_stop()
2666 rq->push_busy = false; in push_cpu_stop()
2668 raw_spin_unlock_irq(&p->pi_lock); in push_cpu_stop()
2680 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) { in set_cpus_allowed_common()
2681 p->cpus_ptr = ctx->new_mask; in set_cpus_allowed_common()
2685 cpumask_copy(&p->cpus_mask, ctx->new_mask); in set_cpus_allowed_common()
2686 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask); in set_cpus_allowed_common()
2691 if (ctx->flags & SCA_USER) in set_cpus_allowed_common()
2692 swap(p->user_cpus_ptr, ctx->user_mask); in set_cpus_allowed_common()
2703 * supposed to change these variables while holding both rq->lock and in __do_set_cpus_allowed()
2704 * p->pi_lock. in __do_set_cpus_allowed()
2707 * accesses these variables under p->pi_lock and only does so after in __do_set_cpus_allowed()
2708 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule() in __do_set_cpus_allowed()
2713 if (ctx->flags & SCA_MIGRATE_DISABLE) in __do_set_cpus_allowed()
2714 WARN_ON_ONCE(!p->on_cpu); in __do_set_cpus_allowed()
2716 lockdep_assert_held(&p->pi_lock); in __do_set_cpus_allowed()
2724 * holding rq->lock. in __do_set_cpus_allowed()
2732 p->sched_class->set_cpus_allowed(p, ctx); in __do_set_cpus_allowed()
2733 mm_set_cpus_allowed(p->mm, ctx->new_mask); in __do_set_cpus_allowed()
2760 * Because this is called with p->pi_lock held, it is not possible in do_set_cpus_allowed()
2774 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's in dup_user_cpus_ptr()
2777 dst->user_cpus_ptr = NULL; in dup_user_cpus_ptr()
2784 if (data_race(!src->user_cpus_ptr)) in dup_user_cpus_ptr()
2789 return -ENOMEM; in dup_user_cpus_ptr()
2797 raw_spin_lock_irqsave(&src->pi_lock, flags); in dup_user_cpus_ptr()
2798 if (src->user_cpus_ptr) { in dup_user_cpus_ptr()
2799 swap(dst->user_cpus_ptr, user_mask); in dup_user_cpus_ptr()
2800 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr); in dup_user_cpus_ptr()
2802 raw_spin_unlock_irqrestore(&src->pi_lock, flags); in dup_user_cpus_ptr()
2814 swap(p->user_cpus_ptr, user_mask); in clear_user_cpus_ptr()
2832 * Migrate-Disable comes along and tramples all over our nice sandcastle.
2835 * Initial conditions: P0->cpus_mask = [0, 1]
2844 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2857 * `--> <woken on migration completion>
2859 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2861 * task p are serialized by p->pi_lock, which we can leverage: the one that
2862 * should come into effect at the end of the Migrate-Disable region is the last
2863 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2868 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2872 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2878 * Migrate-Disable. Consider:
2880 * Initial conditions: P0->cpus_mask = [0, 1]
2898 * p->migration_pending done with p->pi_lock held.
2902 __releases(rq->lock) in affine_move_task()
2903 __releases(p->pi_lock) in affine_move_task()
2911 * We are also done if the task is the current donor, boosting a lock- in affine_move_task()
2915 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask) || in affine_move_task()
2920 (p->migration_flags & MDF_PUSH) && !rq->push_busy) { in affine_move_task()
2921 rq->push_busy = true; in affine_move_task()
2929 pending = p->migration_pending; in affine_move_task()
2930 if (pending && !pending->stop_pending) { in affine_move_task()
2931 p->migration_pending = NULL; in affine_move_task()
2938 stop_one_cpu_nowait(rq->cpu, push_cpu_stop, in affine_move_task()
2939 p, &rq->push_work); in affine_move_task()
2944 complete_all(&pending->done); in affine_move_task()
2950 /* serialized by p->pi_lock */ in affine_move_task()
2951 if (!p->migration_pending) { in affine_move_task()
2961 p->migration_pending = &my_pending; in affine_move_task()
2963 pending = p->migration_pending; in affine_move_task()
2964 refcount_inc(&pending->refs); in affine_move_task()
2971 * Serialized by p->pi_lock, so this is safe. in affine_move_task()
2973 pending->arg.dest_cpu = dest_cpu; in affine_move_task()
2976 pending = p->migration_pending; in affine_move_task()
2978 * - !MIGRATE_ENABLE: in affine_move_task()
2981 * - MIGRATE_ENABLE: in affine_move_task()
2991 return -EINVAL; in affine_move_task()
2994 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) { in affine_move_task()
2998 * and have the stopper function handle it all race-free. in affine_move_task()
3000 stop_pending = pending->stop_pending; in affine_move_task()
3002 pending->stop_pending = true; in affine_move_task()
3005 p->migration_flags &= ~MDF_PUSH; in affine_move_task()
3011 &pending->arg, &pending->stop_work); in affine_move_task()
3023 if (!pending->stop_pending) { in affine_move_task()
3024 p->migration_pending = NULL; in affine_move_task()
3031 complete_all(&pending->done); in affine_move_task()
3034 wait_for_completion(&pending->done); in affine_move_task()
3036 if (refcount_dec_and_test(&pending->refs)) in affine_move_task()
3037 wake_up_var(&pending->refs); /* No UaF, just an address */ in affine_move_task()
3052 * Called with both p->pi_lock and rq->lock held; drops both before returning.
3058 __releases(rq->lock) in __set_cpus_allowed_ptr_locked()
3059 __releases(p->pi_lock) in __set_cpus_allowed_ptr_locked()
3063 bool kthread = p->flags & PF_KTHREAD; in __set_cpus_allowed_ptr_locked()
3072 * however, during cpu-hot-unplug, even these might get pushed in __set_cpus_allowed_ptr_locked()
3078 * set_cpus_allowed_common() and actually reset p->cpus_ptr. in __set_cpus_allowed_ptr_locked()
3083 if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) { in __set_cpus_allowed_ptr_locked()
3084 ret = -EINVAL; in __set_cpus_allowed_ptr_locked()
3089 * Must re-check here, to close a race against __kthread_bind(), in __set_cpus_allowed_ptr_locked()
3092 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { in __set_cpus_allowed_ptr_locked()
3093 ret = -EINVAL; in __set_cpus_allowed_ptr_locked()
3097 if (!(ctx->flags & SCA_MIGRATE_ENABLE)) { in __set_cpus_allowed_ptr_locked()
3098 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) { in __set_cpus_allowed_ptr_locked()
3099 if (ctx->flags & SCA_USER) in __set_cpus_allowed_ptr_locked()
3100 swap(p->user_cpus_ptr, ctx->user_mask); in __set_cpus_allowed_ptr_locked()
3106 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) { in __set_cpus_allowed_ptr_locked()
3107 ret = -EBUSY; in __set_cpus_allowed_ptr_locked()
3117 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask); in __set_cpus_allowed_ptr_locked()
3119 ret = -EINVAL; in __set_cpus_allowed_ptr_locked()
3125 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags); in __set_cpus_allowed_ptr_locked()
3152 if (p->user_cpus_ptr && in __set_cpus_allowed_ptr()
3153 !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) && in __set_cpus_allowed_ptr()
3154 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr)) in __set_cpus_allowed_ptr()
3155 ctx->new_mask = rq->scratch_mask; in __set_cpus_allowed_ptr()
3178 * -EINVAL.
3200 err = -EPERM; in restrict_cpus_allowed_ptr()
3205 err = -EINVAL; in restrict_cpus_allowed_ptr()
3218 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3251 task_pid_nr(p), p->comm, in force_compatible_cpus_allowed_ptr()
3288 unsigned int state = READ_ONCE(p->__state); in set_task_cpu()
3294 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq); in set_task_cpu()
3297 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, in set_task_cpu()
3299 * time relying on p->on_rq. in set_task_cpu()
3302 p->sched_class == &fair_sched_class && in set_task_cpu()
3303 (p->on_rq && !task_on_rq_migrating(p))); in set_task_cpu()
3307 * The caller should hold either p->pi_lock or rq->lock, when changing in set_task_cpu()
3308 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. in set_task_cpu()
3316 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || in set_task_cpu()
3329 if (p->sched_class->migrate_task_rq) in set_task_cpu()
3330 p->sched_class->migrate_task_rq(p, new_cpu); in set_task_cpu()
3331 p->se.nr_migrations++; in set_task_cpu()
3366 p->wake_cpu = cpu; in __migrate_swap_task()
3380 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) in migrate_swap_stop()
3381 return -EAGAIN; in migrate_swap_stop()
3383 src_rq = cpu_rq(arg->src_cpu); in migrate_swap_stop()
3384 dst_rq = cpu_rq(arg->dst_cpu); in migrate_swap_stop()
3386 guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock); in migrate_swap_stop()
3389 if (task_cpu(arg->dst_task) != arg->dst_cpu) in migrate_swap_stop()
3390 return -EAGAIN; in migrate_swap_stop()
3392 if (task_cpu(arg->src_task) != arg->src_cpu) in migrate_swap_stop()
3393 return -EAGAIN; in migrate_swap_stop()
3395 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) in migrate_swap_stop()
3396 return -EAGAIN; in migrate_swap_stop()
3398 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) in migrate_swap_stop()
3399 return -EAGAIN; in migrate_swap_stop()
3401 __migrate_swap_task(arg->src_task, arg->dst_cpu); in migrate_swap_stop()
3402 __migrate_swap_task(arg->dst_task, arg->src_cpu); in migrate_swap_stop()
3414 int ret = -EINVAL; in migrate_swap()
3428 * will be re-checked with proper locks held further down the line. in migrate_swap()
3433 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) in migrate_swap()
3436 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) in migrate_swap()
3448 * kick_process - kick a running thread to enter/exit the kernel
3449 * @p: the to-be-kicked thread
3452 * kernel-mode, without any delay. (to get signals handled.)
3471 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3475 * - cpu_active must be a subset of cpu_online
3477 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3482 * - on CPU-down we clear cpu_active() to mask the sched domains and
3501 * will return -1. There is no CPU on the node, and we should in select_fallback_rq()
3504 if (nid != -1) { in select_fallback_rq()
3516 for_each_cpu(dest_cpu, p->cpus_ptr) { in select_fallback_rq()
3534 * hold p->pi_lock and again violate locking order. in select_fallback_rq()
3554 if (p->mm && printk_ratelimit()) { in select_fallback_rq()
3556 task_pid_nr(p), p->comm, cpu); in select_fallback_rq()
3564 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3569 lockdep_assert_held(&p->pi_lock); in select_task_rq()
3571 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) { in select_task_rq()
3572 cpu = p->sched_class->select_task_rq(p, cpu, *wake_flags); in select_task_rq()
3575 cpu = cpumask_any(p->cpus_ptr); in select_task_rq()
3580 * to rely on ttwu() to place the task on a valid ->cpus_ptr in select_task_rq()
3585 * [ this allows ->select_task() to simply return task_cpu(p) and in select_task_rq()
3597 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; in sched_set_stop_task()
3598 struct task_struct *old_stop = cpu_rq(cpu)->stop; in sched_set_stop_task()
3606 * much confusion -- but then, stop work should not in sched_set_stop_task()
3611 stop->sched_class = &stop_sched_class; in sched_set_stop_task()
3614 * The PI code calls rt_mutex_setprio() with ->pi_lock held to in sched_set_stop_task()
3620 * The stop task itself will never be part of the PI-chain, it in sched_set_stop_task()
3621 * never blocks, therefore that ->pi_lock recursion is safe. in sched_set_stop_task()
3622 * Tell lockdep about this by placing the stop->pi_lock in its in sched_set_stop_task()
3625 lockdep_set_class(&stop->pi_lock, &stop_pi_lock); in sched_set_stop_task()
3628 cpu_rq(cpu)->stop = stop; in sched_set_stop_task()
3635 old_stop->sched_class = &rt_sched_class; in sched_set_stop_task()
3649 if (cpu == rq->cpu) { in ttwu_stat()
3650 __schedstat_inc(rq->ttwu_local); in ttwu_stat()
3651 __schedstat_inc(p->stats.nr_wakeups_local); in ttwu_stat()
3655 __schedstat_inc(p->stats.nr_wakeups_remote); in ttwu_stat()
3658 for_each_domain(rq->cpu, sd) { in ttwu_stat()
3660 __schedstat_inc(sd->ttwu_wake_remote); in ttwu_stat()
3667 __schedstat_inc(p->stats.nr_wakeups_migrate); in ttwu_stat()
3669 __schedstat_inc(rq->ttwu_count); in ttwu_stat()
3670 __schedstat_inc(p->stats.nr_wakeups); in ttwu_stat()
3673 __schedstat_inc(p->stats.nr_wakeups_sync); in ttwu_stat()
3681 WRITE_ONCE(p->__state, TASK_RUNNING); in ttwu_do_wakeup()
3693 if (p->sched_contributes_to_load) in ttwu_do_activate()
3694 rq->nr_uninterruptible--; in ttwu_do_activate()
3701 if (p->in_iowait) { in ttwu_do_activate()
3703 atomic_dec(&task_rq(p)->nr_iowait); in ttwu_do_activate()
3711 if (p->sched_class->task_woken) { in ttwu_do_activate()
3714 * drop the rq->lock, hereafter rq is only used for statistics. in ttwu_do_activate()
3717 p->sched_class->task_woken(rq, p); in ttwu_do_activate()
3721 if (rq->idle_stamp) { in ttwu_do_activate()
3722 u64 delta = rq_clock(rq) - rq->idle_stamp; in ttwu_do_activate()
3723 u64 max = 2*rq->max_idle_balance_cost; in ttwu_do_activate()
3725 update_avg(&rq->avg_idle, delta); in ttwu_do_activate()
3727 if (rq->avg_idle > max) in ttwu_do_activate()
3728 rq->avg_idle = max; in ttwu_do_activate()
3730 rq->idle_stamp = 0; in ttwu_do_activate()
3748 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3751 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3752 * then schedule() must still happen and p->state can be changed to
3768 if (p->se.sched_delayed) in ttwu_runnable()
3799 if (WARN_ON_ONCE(p->on_cpu)) in sched_ttwu_pending()
3800 smp_cond_load_acquire(&p->on_cpu, !VAL); in sched_ttwu_pending()
3805 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); in sched_ttwu_pending()
3810 * idle_cpu() does not observe a false-negative -- if it does, in sched_ttwu_pending()
3818 WRITE_ONCE(rq->ttwu_pending, 0); in sched_ttwu_pending()
3830 if (set_nr_if_polling(cpu_rq(cpu)->idle)) { in call_function_single_prep_ipi()
3848 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); in __ttwu_queue_wakelist()
3850 WRITE_ONCE(rq->ttwu_pending, 1); in __ttwu_queue_wakelist()
3852 __smp_call_single_queue(cpu, &p->wake_entry.llist); in __ttwu_queue_wakelist()
3861 if (is_idle_task(rcu_dereference(rq->curr))) { in wake_up_if_idle()
3863 if (is_idle_task(rq->curr)) in wake_up_if_idle()
3888 * Whether CPUs are share cache resources, which means LLC on non-cluster
3906 if (p->sched_class == &stop_sched_class) in ttwu_queue_cond()
3918 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) in ttwu_queue_cond()
3934 * the task activation to the idle (or soon-to-be-idle) CPU as in ttwu_queue_cond()
3938 * Note that we can only get here with (wakee) p->on_rq=0, in ttwu_queue_cond()
3939 * p->on_cpu can be whatever, we've done the dequeue, so in ttwu_queue_cond()
3940 * the wakee has been accounted out of ->nr_running. in ttwu_queue_cond()
3942 if (!cpu_rq(cpu)->nr_running) in ttwu_queue_cond()
4018 p->saved_state = TASK_RUNNING; in ttwu_state_match()
4024 * Notes on Program-Order guarantees on SMP systems.
4028 * The basic program-order guarantee on SMP systems is that when a task [t]
4029 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4034 * A) UNLOCK of the rq(c0)->lock scheduling out task t
4035 * B) migration for t is required to synchronize *both* rq(c0)->lock and
4036 * rq(c1)->lock (if not at the same time, then in that order).
4037 * C) LOCK of the rq(c1)->lock scheduling in task
4046 * LOCK rq(0)->lock
4047 * sched-out X
4048 * sched-in Y
4049 * UNLOCK rq(0)->lock
4051 * LOCK rq(0)->lock // orders against CPU0
4053 * UNLOCK rq(0)->lock
4055 * LOCK rq(1)->lock
4057 * UNLOCK rq(1)->lock
4059 * LOCK rq(1)->lock // orders against CPU2
4060 * sched-out Z
4061 * sched-in X
4062 * UNLOCK rq(1)->lock
4065 * BLOCKING -- aka. SLEEP + WAKEUP
4071 * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
4072 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4078 * LOCK rq(0)->lock LOCK X->pi_lock
4080 * sched-out X
4081 * smp_store_release(X->on_cpu, 0);
4083 * smp_cond_load_acquire(&X->on_cpu, !VAL);
4084 * X->state = WAKING
4087 * LOCK rq(2)->lock
4089 * X->state = RUNNING
4090 * UNLOCK rq(2)->lock
4092 * LOCK rq(2)->lock // orders against CPU1
4093 * sched-out Z
4094 * sched-in X
4095 * UNLOCK rq(2)->lock
4097 * UNLOCK X->pi_lock
4098 * UNLOCK rq(0)->lock
4107 * try_to_wake_up - wake up a thread
4114 * If (@state & @p->state) @p->state = TASK_RUNNING.
4120 * It issues a full memory barrier before accessing @p->state, see the comment
4123 * Uses p->pi_lock to serialize against concurrent wake-ups.
4125 * Relies on p->pi_lock stabilizing:
4126 * - p->sched_class
4127 * - p->cpus_ptr
4128 * - p->sched_task_group
4131 * Tries really hard to only take one task_rq(p)->lock for performance.
4132 * Takes rq->lock in:
4133 * - ttwu_runnable() -- old rq, unavoidable, see comment there;
4134 * - ttwu_queue() -- new rq, for enqueue of the task;
4135 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4140 * Return: %true if @p->state changes (an actual wakeup was done),
4152 * We're waking current, this means 'p->on_rq' and 'task_cpu(p) in try_to_wake_up()
4154 * case the whole 'p->on_rq && ttwu_runnable()' case below in try_to_wake_up()
4162 * - we rely on Program-Order guarantees for all the ordering, in try_to_wake_up()
4163 * - we're serialized against set_special_state() by virtue of in try_to_wake_up()
4164 * it disabling IRQs (this allows not taking ->pi_lock). in try_to_wake_up()
4166 WARN_ON_ONCE(p->se.sched_delayed); in try_to_wake_up()
4178 * reordered with p->state check below. This pairs with smp_store_mb() in try_to_wake_up()
4181 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { in try_to_wake_up()
4189 * Ensure we load p->on_rq _after_ p->state, otherwise it would in try_to_wake_up()
4190 * be possible to, falsely, observe p->on_rq == 0 and get stuck in try_to_wake_up()
4194 * STORE p->on_rq = 1 LOAD p->state in try_to_wake_up()
4195 * UNLOCK rq->lock in try_to_wake_up()
4198 * LOCK rq->lock smp_rmb(); in try_to_wake_up()
4200 * UNLOCK rq->lock in try_to_wake_up()
4203 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq in try_to_wake_up()
4205 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in in try_to_wake_up()
4211 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) in try_to_wake_up()
4215 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be in try_to_wake_up()
4216 * possible to, falsely, observe p->on_cpu == 0. in try_to_wake_up()
4218 * One must be running (->on_cpu == 1) in order to remove oneself in try_to_wake_up()
4222 * STORE p->on_cpu = 1 LOAD p->on_rq in try_to_wake_up()
4223 * UNLOCK rq->lock in try_to_wake_up()
4226 * LOCK rq->lock smp_rmb(); in try_to_wake_up()
4228 * STORE p->on_rq = 0 LOAD p->on_cpu in try_to_wake_up()
4230 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in in try_to_wake_up()
4233 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure in try_to_wake_up()
4235 * care about it's own p->state. See the comment in __schedule(). in try_to_wake_up()
4240 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq in try_to_wake_up()
4241 * == 0), which means we need to do an enqueue, change p->state to in try_to_wake_up()
4242 * TASK_WAKING such that we can unlock p->pi_lock before doing the in try_to_wake_up()
4245 WRITE_ONCE(p->__state, TASK_WAKING); in try_to_wake_up()
4250 * which potentially sends an IPI instead of spinning on p->on_cpu to in try_to_wake_up()
4254 * Ensure we load task_cpu(p) after p->on_cpu: in try_to_wake_up()
4257 * STORE p->cpu = @cpu in try_to_wake_up()
4259 * LOCK rq->lock in try_to_wake_up()
4260 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu) in try_to_wake_up()
4261 * STORE p->on_cpu = 1 LOAD p->cpu in try_to_wake_up()
4266 if (smp_load_acquire(&p->on_cpu) && in try_to_wake_up()
4279 smp_cond_load_acquire(&p->on_cpu, !VAL); in try_to_wake_up()
4281 cpu = select_task_rq(p, p->wake_cpu, &wake_flags); in try_to_wake_up()
4283 if (p->in_iowait) { in try_to_wake_up()
4285 atomic_dec(&task_rq(p)->nr_iowait); in try_to_wake_up()
4304 unsigned int state = READ_ONCE(p->__state); in __task_needs_rq_lock()
4307 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when in __task_needs_rq_lock()
4315 * Ensure we load p->on_rq after p->__state, otherwise it would be in __task_needs_rq_lock()
4316 * possible to, falsely, observe p->on_rq == 0. in __task_needs_rq_lock()
4321 if (p->on_rq) in __task_needs_rq_lock()
4329 smp_cond_load_acquire(&p->on_cpu, !VAL); in __task_needs_rq_lock()
4335 * task_call_func - Invoke a function on task in fixed state
4355 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in task_call_func()
4362 * - blocked and we're holding off wakeups (pi->lock) in task_call_func()
4363 * - woken, and we're holding off enqueue (rq->lock) in task_call_func()
4364 * - queued, and we're holding off schedule (rq->lock) in task_call_func()
4365 * - running, and we're holding off de-schedule (rq->lock) in task_call_func()
4367 * The called function (@func) can use: task_curr(), p->on_rq and in task_call_func()
4368 * p->__state to differentiate between these states. in task_call_func()
4375 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); in task_call_func()
4380 * cpu_curr_snapshot - Return a snapshot of the currently running task
4412 * wake_up_process - Wake up a specific process
4442 p->on_rq = 0; in __sched_fork()
4444 p->se.on_rq = 0; in __sched_fork()
4445 p->se.exec_start = 0; in __sched_fork()
4446 p->se.sum_exec_runtime = 0; in __sched_fork()
4447 p->se.prev_sum_exec_runtime = 0; in __sched_fork()
4448 p->se.nr_migrations = 0; in __sched_fork()
4449 p->se.vruntime = 0; in __sched_fork()
4450 p->se.vlag = 0; in __sched_fork()
4451 INIT_LIST_HEAD(&p->se.group_node); in __sched_fork()
4454 WARN_ON_ONCE(p->se.sched_delayed); in __sched_fork()
4457 p->se.cfs_rq = NULL; in __sched_fork()
4465 memset(&p->stats, 0, sizeof(p->stats)); in __sched_fork()
4468 init_dl_entity(&p->dl); in __sched_fork()
4470 INIT_LIST_HEAD(&p->rt.run_list); in __sched_fork()
4471 p->rt.timeout = 0; in __sched_fork()
4472 p->rt.time_slice = sched_rr_timeslice; in __sched_fork()
4473 p->rt.on_rq = 0; in __sched_fork()
4474 p->rt.on_list = 0; in __sched_fork()
4477 init_scx_entity(&p->scx); in __sched_fork()
4481 INIT_HLIST_HEAD(&p->preempt_notifiers); in __sched_fork()
4485 p->capture_control = NULL; in __sched_fork()
4488 p->wake_entry.u_flags = CSD_TYPE_TTWU; in __sched_fork()
4489 p->migration_pending = NULL; in __sched_fork()
4522 pgdat->nbp_threshold = 0; in reset_memory_tiering()
4523 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE); in reset_memory_tiering()
4524 pgdat->nbp_th_start = jiffies_to_msecs(jiffies); in reset_memory_tiering()
4536 return -EPERM; in sysctl_numa_balancing()
4605 return -EPERM; in sysctl_schedstats()
4676 * fork()/clone()-time setup:
4686 p->__state = TASK_NEW; in sched_fork()
4691 p->prio = current->normal_prio; in sched_fork()
4698 if (unlikely(p->sched_reset_on_fork)) { in sched_fork()
4700 p->policy = SCHED_NORMAL; in sched_fork()
4701 p->static_prio = NICE_TO_PRIO(0); in sched_fork()
4702 p->rt_priority = 0; in sched_fork()
4703 } else if (PRIO_TO_NICE(p->static_prio) < 0) in sched_fork()
4704 p->static_prio = NICE_TO_PRIO(0); in sched_fork()
4706 p->prio = p->normal_prio = p->static_prio; in sched_fork()
4708 p->se.custom_slice = 0; in sched_fork()
4709 p->se.slice = sysctl_sched_base_slice; in sched_fork()
4715 p->sched_reset_on_fork = 0; in sched_fork()
4718 if (dl_prio(p->prio)) in sched_fork()
4719 return -EAGAIN; in sched_fork()
4723 if (rt_prio(p->prio)) { in sched_fork()
4724 p->sched_class = &rt_sched_class; in sched_fork()
4726 } else if (task_should_scx(p->policy)) { in sched_fork()
4727 p->sched_class = &ext_sched_class; in sched_fork()
4730 p->sched_class = &fair_sched_class; in sched_fork()
4733 init_entity_runnable_average(&p->se); in sched_fork()
4738 memset(&p->sched_info, 0, sizeof(p->sched_info)); in sched_fork()
4740 p->on_cpu = 0; in sched_fork()
4742 plist_node_init(&p->pushable_tasks, MAX_PRIO); in sched_fork()
4743 RB_CLEAR_NODE(&p->pushable_dl_tasks); in sched_fork()
4753 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly in sched_cgroup_fork()
4756 raw_spin_lock_irqsave(&p->pi_lock, flags); in sched_cgroup_fork()
4760 tg = container_of(kargs->cset->subsys[cpu_cgrp_id], in sched_cgroup_fork()
4763 p->sched_task_group = tg; in sched_cgroup_fork()
4772 if (p->sched_class->task_fork) in sched_cgroup_fork()
4773 p->sched_class->task_fork(p); in sched_cgroup_fork()
4774 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in sched_cgroup_fork()
4807 * wake_up_new_task - wake up a newly created task for the first time.
4819 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in wake_up_new_task()
4820 WRITE_ONCE(p->__state, TASK_RUNNING); in wake_up_new_task()
4823 * - cpus_ptr can change in the fork path in wake_up_new_task()
4824 * - any previously selected CPU might disappear through hotplug in wake_up_new_task()
4827 * as we're not fully set-up yet. in wake_up_new_task()
4829 p->recent_used_cpu = task_cpu(p); in wake_up_new_task()
4839 if (p->sched_class->task_woken) { in wake_up_new_task()
4841 * Nothing relies on rq->lock after this, so it's fine to in wake_up_new_task()
4845 p->sched_class->task_woken(rq, p); in wake_up_new_task()
4868 * preempt_notifier_register - tell me when current is being preempted & rescheduled
4876 hlist_add_head(&notifier->link, &current->preempt_notifiers); in preempt_notifier_register()
4881 * preempt_notifier_unregister - no longer interested in preemption notifications
4888 hlist_del(&notifier->link); in preempt_notifier_unregister()
4896 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) in __fire_sched_in_preempt_notifiers()
4897 notifier->ops->sched_in(notifier, raw_smp_processor_id()); in __fire_sched_in_preempt_notifiers()
4912 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) in __fire_sched_out_preempt_notifiers()
4913 notifier->ops->sched_out(notifier, next); in __fire_sched_out_preempt_notifiers()
4944 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and in prepare_task()
4947 WRITE_ONCE(next->on_cpu, 1); in prepare_task()
4954 * p->on_cpu is cleared, the task can be moved to a different CPU. We in finish_task()
4958 * In particular, the load of prev->state in finish_task_switch() must in finish_task()
4963 smp_store_release(&prev->on_cpu, 0); in finish_task()
4974 func = (void (*)(struct rq *))head->func; in do_balance_callbacks()
4975 next = head->next; in do_balance_callbacks()
4976 head->next = NULL; in do_balance_callbacks()
4990 * that queued it (only later, when it's safe to drop rq->lock again),
4994 * a single test, namely: rq->balance_callback == NULL.
5004 struct balance_callback *head = rq->balance_callback; in __splice_balance_callbacks()
5013 * in the same rq->lock section. in __splice_balance_callbacks()
5021 rq->balance_callback = NULL; in __splice_balance_callbacks()
5053 * of the scheduler it's an obvious special-case), so we in prepare_lock_switch()
5057 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_); in prepare_lock_switch()
5060 rq_lockp(rq)->owner = next; in prepare_lock_switch()
5068 * fix up the runqueue lock - which gets 'carried over' from in finish_lock_switch()
5071 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_); in finish_lock_switch()
5091 if (unlikely(current->kmap_ctrl.idx)) in kmap_local_sched_out()
5099 if (unlikely(current->kmap_ctrl.idx)) in kmap_local_sched_in()
5105 * prepare_task_switch - prepare to switch tasks
5132 * finish_task_switch - clean up after a task-switch
5138 * and do any other architecture-specific cleanup actions.
5151 __releases(rq->lock) in finish_task_switch()
5154 struct mm_struct *mm = rq->prev_mm; in finish_task_switch()
5164 * raw_spin_lock_irq(&rq->lock) // 2 in finish_task_switch()
5170 current->comm, current->pid, preempt_count())) in finish_task_switch()
5173 rq->prev_mm = NULL; in finish_task_switch()
5177 * If a task dies, then it sets TASK_DEAD in tsk->state and calls in finish_task_switch()
5181 * We must observe prev->state before clearing prev->on_cpu (in in finish_task_switch()
5183 * running on another CPU and we could rave with its RUNNING -> DEAD in finish_task_switch()
5186 prev_state = READ_ONCE(prev->__state); in finish_task_switch()
5208 * schedule between user->kernel->user threads without passing though in finish_task_switch()
5210 * rq->curr, before returning to userspace, so provide them here: in finish_task_switch()
5212 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly in finish_task_switch()
5214 * - a sync_core for SYNC_CORE. in finish_task_switch()
5222 if (prev->sched_class->task_dead) in finish_task_switch()
5223 prev->sched_class->task_dead(prev); in finish_task_switch()
5235 * schedule_tail - first thing a freshly forked thread must call.
5239 __releases(rq->lock) in schedule_tail()
5245 * finish_task_switch() will drop rq->lock() and lower preempt_count in schedule_tail()
5259 if (current->set_child_tid) in schedule_tail()
5260 put_user(task_pid_vnr(current), current->set_child_tid); in schedule_tail()
5266 * context_switch - switch to the new MM and the new thread's register state.
5282 * kernel -> kernel lazy + transfer active in context_switch()
5283 * user -> kernel lazy + mmgrab_lazy_tlb() active in context_switch()
5285 * kernel -> user switch + mmdrop_lazy_tlb() active in context_switch()
5286 * user -> user switch in context_switch()
5291 if (!next->mm) { // to kernel in context_switch()
5292 enter_lazy_tlb(prev->active_mm, next); in context_switch()
5294 next->active_mm = prev->active_mm; in context_switch()
5295 if (prev->mm) // from user in context_switch()
5296 mmgrab_lazy_tlb(prev->active_mm); in context_switch()
5298 prev->active_mm = NULL; in context_switch()
5300 membarrier_switch_mm(rq, prev->active_mm, next->mm); in context_switch()
5303 * rq->curr / membarrier_switch_mm() and returning to userspace. in context_switch()
5306 * case 'prev->active_mm == next->mm' through in context_switch()
5309 switch_mm_irqs_off(prev->active_mm, next->mm, next); in context_switch()
5310 lru_gen_use_mm(next->mm); in context_switch()
5312 if (!prev->mm) { // from kernel in context_switch()
5314 rq->prev_mm = prev->active_mm; in context_switch()
5315 prev->active_mm = NULL; in context_switch()
5342 sum += cpu_rq(i)->nr_running; in nr_running()
5351 * preemption, thus the result might have a time-of-check-to-time-of-use
5354 * - from a non-preemptible section (of course)
5356 * - from a thread that is bound to a single CPU
5358 * - in a loop with very short iterations (e.g. a polling loop)
5362 return raw_rq()->nr_running == 1; in single_task_running()
5368 return cpu_rq(cpu)->nr_switches; in nr_context_switches_cpu()
5377 sum += cpu_rq(i)->nr_switches; in nr_context_switches()
5385 * for a CPU that has IO-wait which might not even end up running the task when
5391 return atomic_read(&cpu_rq(cpu)->nr_iowait); in nr_iowait_cpu()
5395 * IO-wait accounting, and how it's mostly bollocks (on SMP).
5397 * The idea behind IO-wait account is to account the idle time that we could
5399 * storage performance, we'd have a proportional reduction in IO-wait time.
5402 * idle time as IO-wait, because if the storage were faster, it could've been
5409 * CPU will have IO-wait accounted, while the other has regular idle. Even
5413 * This means, that when looking globally, the current IO-wait accounting on
5419 * blocked on. This means the per CPU IO-wait number is meaningless.
5435 * sched_exec - execve() is a valuable balancing opportunity, because at
5444 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { in sched_exec()
5445 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC); in sched_exec()
5465 * and its field curr->exec_start; when called from task_sched_runtime(),
5472 struct sched_entity *curr = p->se.cfs_rq->curr; in prefetch_curr_exec_start()
5474 struct sched_entity *curr = task_rq(p)->cfs.curr; in prefetch_curr_exec_start()
5477 prefetch(&curr->exec_start); in prefetch_curr_exec_start()
5493 * 64-bit doesn't need locks to atomically read a 64-bit value. in task_sched_runtime()
5495 * Reading ->on_cpu is racy, but this is OK. in task_sched_runtime()
5500 * If we see ->on_cpu without ->on_rq, the task is leaving, and has in task_sched_runtime()
5503 if (!p->on_cpu || !task_on_rq_queued(p)) in task_sched_runtime()
5504 return p->se.sum_exec_runtime; in task_sched_runtime()
5509 * Must be ->curr _and_ ->on_rq. If dequeued, we would in task_sched_runtime()
5516 p->sched_class->update_curr(rq); in task_sched_runtime()
5518 ns = p->se.sum_exec_runtime; in task_sched_runtime()
5539 if (!rq->last_seen_need_resched_ns) { in cpu_resched_latency()
5540 rq->last_seen_need_resched_ns = now; in cpu_resched_latency()
5541 rq->ticks_without_resched = 0; in cpu_resched_latency()
5545 rq->ticks_without_resched++; in cpu_resched_latency()
5546 resched_latency = now - rq->last_seen_need_resched_ns; in cpu_resched_latency()
5589 donor = rq->donor; in sched_tick()
5600 donor->sched_class->task_tick(rq, donor, 0); in sched_tick()
5615 if (donor->flags & PF_WQ_WORKER) in sched_tick()
5619 rq->idle_balance = idle_cpu(cpu); in sched_tick()
5631 /* Values for ->state, see diagram below. */
5637 * State diagram for ->state:
5646 * +--TICK_SCHED_REMOTE_OFFLINING
5665 int cpu = twork->cpu; in sched_tick_remote()
5673 * statistics and checks timeslices in a time-independent way, regardless in sched_tick_remote()
5678 struct task_struct *curr = rq->curr; in sched_tick_remote()
5686 WARN_ON_ONCE(rq->curr != rq->donor); in sched_tick_remote()
5694 u64 delta = rq_clock_task(rq) - curr->se.exec_start; in sched_tick_remote()
5697 curr->sched_class->task_tick(rq, curr, 0); in sched_tick_remote()
5709 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); in sched_tick_remote()
5726 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); in sched_tick_start()
5729 twork->cpu = cpu; in sched_tick_start()
5730 INIT_DELAYED_WORK(&twork->work, sched_tick_remote); in sched_tick_start()
5731 queue_delayed_work(system_unbound_wq, &twork->work, HZ); in sched_tick_start()
5747 /* There cannot be competing actions, but don't rely on stop-machine. */ in sched_tick_stop()
5748 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING); in sched_tick_stop()
5777 current->preempt_disable_ip = ip; in preempt_latency_start()
5798 PREEMPT_MASK - 10); in preempt_count_add()
5845 return p->preempt_disable_ip; in get_preempt_disable_ip()
5863 prev->comm, prev->pid, preempt_count()); in __schedule_bug()
5880 * Various schedule()-time debugging checks and statistics:
5893 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) { in schedule_debug()
5894 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", in schedule_debug()
5895 prev->comm, prev->pid, prev->non_block_count); in schedule_debug()
5910 schedstat_inc(this_rq()->sched_count); in schedule_debug()
5916 const struct sched_class *start_class = prev->sched_class; in prev_balance()
5926 rq->scx.flags |= SCX_RQ_BAL_PENDING; in prev_balance()
5934 * that when we release the rq->lock the task is in the same in prev_balance()
5935 * state as before we took rq->lock. in prev_balance()
5941 if (class->balance && class->balance(rq, prev, rf)) in prev_balance()
5947 * Pick up the highest-prio task:
5955 rq->dl_server = NULL; in __pick_next_task()
5966 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) && in __pick_next_task()
5967 rq->nr_running == rq->cfs.h_nr_queued)) { in __pick_next_task()
5986 if (class->pick_next_task) { in __pick_next_task()
5987 p = class->pick_next_task(rq, prev); in __pick_next_task()
5991 p = class->pick_task(rq); in __pick_next_task()
6005 return (task_rq(t)->idle == t); in is_task_rq_idle()
6010 return is_task_rq_idle(a) || (a->core_cookie == cookie); in cookie_equals()
6018 return a->core_cookie == b->core_cookie; in cookie_match()
6026 rq->dl_server = NULL; in pick_task()
6029 p = class->pick_task(rq); in pick_task()
6047 bool core_clock_updated = (rq == rq->core); in pick_next_task()
6058 /* Stopper task is switching into idle, no need core-wide selection. */ in pick_next_task()
6065 rq->core_pick = NULL; in pick_next_task()
6066 rq->core_dl_server = NULL; in pick_next_task()
6075 * rq->core_pick can be NULL if no selection was made for a CPU because in pick_next_task()
6076 * it was either offline or went offline during a sibling's core-wide in pick_next_task()
6077 * selection. In this case, do a core-wide selection. in pick_next_task()
6079 if (rq->core->core_pick_seq == rq->core->core_task_seq && in pick_next_task()
6080 rq->core->core_pick_seq != rq->core_sched_seq && in pick_next_task()
6081 rq->core_pick) { in pick_next_task()
6082 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); in pick_next_task()
6084 next = rq->core_pick; in pick_next_task()
6085 rq->dl_server = rq->core_dl_server; in pick_next_task()
6086 rq->core_pick = NULL; in pick_next_task()
6087 rq->core_dl_server = NULL; in pick_next_task()
6094 need_sync = !!rq->core->core_cookie; in pick_next_task()
6097 rq->core->core_cookie = 0UL; in pick_next_task()
6098 if (rq->core->core_forceidle_count) { in pick_next_task()
6100 update_rq_clock(rq->core); in pick_next_task()
6105 rq->core->core_forceidle_start = 0; in pick_next_task()
6106 rq->core->core_forceidle_count = 0; in pick_next_task()
6107 rq->core->core_forceidle_occupation = 0; in pick_next_task()
6113 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq in pick_next_task()
6122 rq->core->core_task_seq++; in pick_next_task()
6130 if (!next->core_cookie) { in pick_next_task()
6131 rq->core_pick = NULL; in pick_next_task()
6132 rq->core_dl_server = NULL; in pick_next_task()
6147 * Tie-break prio towards the current CPU in pick_next_task()
6157 if (i != cpu && (rq_i != rq->core || !core_clock_updated)) in pick_next_task()
6160 rq_i->core_pick = p = pick_task(rq_i); in pick_next_task()
6161 rq_i->core_dl_server = rq_i->dl_server; in pick_next_task()
6167 cookie = rq->core->core_cookie = max->core_cookie; in pick_next_task()
6175 p = rq_i->core_pick; in pick_next_task()
6185 rq_i->core_pick = p; in pick_next_task()
6186 rq_i->core_dl_server = NULL; in pick_next_task()
6188 if (p == rq_i->idle) { in pick_next_task()
6189 if (rq_i->nr_running) { in pick_next_task()
6190 rq->core->core_forceidle_count++; in pick_next_task()
6192 rq->core->core_forceidle_seq++; in pick_next_task()
6199 if (schedstat_enabled() && rq->core->core_forceidle_count) { in pick_next_task()
6200 rq->core->core_forceidle_start = rq_clock(rq->core); in pick_next_task()
6201 rq->core->core_forceidle_occupation = occ; in pick_next_task()
6204 rq->core->core_pick_seq = rq->core->core_task_seq; in pick_next_task()
6205 next = rq->core_pick; in pick_next_task()
6206 rq->core_sched_seq = rq->core->core_pick_seq; in pick_next_task()
6214 * NOTE: L1TF -- at this point we're no longer running the old task and in pick_next_task()
6216 * their task. This ensures there is no inter-sibling overlap between in pick_next_task()
6217 * non-matching user state. in pick_next_task()
6226 * picked for it. That's Ok - it will pick tasks for itself, in pick_next_task()
6229 if (!rq_i->core_pick) in pick_next_task()
6233 * Update for new !FI->FI transitions, or if continuing to be in !FI: in pick_next_task()
6240 if (!(fi_before && rq->core->core_forceidle_count)) in pick_next_task()
6241 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count); in pick_next_task()
6243 rq_i->core_pick->core_occupation = occ; in pick_next_task()
6246 rq_i->core_pick = NULL; in pick_next_task()
6247 rq_i->core_dl_server = NULL; in pick_next_task()
6252 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick)); in pick_next_task()
6254 if (rq_i->curr == rq_i->core_pick) { in pick_next_task()
6255 rq_i->core_pick = NULL; in pick_next_task()
6256 rq_i->core_dl_server = NULL; in pick_next_task()
6265 if (rq->core->core_forceidle_count && next == rq->idle) in pick_next_task()
6281 cookie = dst->core->core_cookie; in try_steal_cookie()
6285 if (dst->curr != dst->idle) in try_steal_cookie()
6293 if (p == src->core_pick || p == src->curr) in try_steal_cookie()
6299 if (p->core_occupation > dst->idle->core_occupation) in try_steal_cookie()
6367 if (!rq->core->core_cookie) in queue_core_balance()
6370 if (!rq->nr_running) /* not forced idle */ in queue_core_balance()
6373 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance); in queue_core_balance()
6377 sched_core_lock(*_T->lock, &_T->flags),
6378 sched_core_unlock(*_T->lock, &_T->flags),
6389 WARN_ON_ONCE(rq->core != rq); in sched_core_cpu_starting()
6400 if (rq->core == rq) { in sched_core_cpu_starting()
6414 rq->core = core_rq; in sched_core_cpu_starting()
6416 WARN_ON_ONCE(rq->core != core_rq); in sched_core_cpu_starting()
6430 WARN_ON_ONCE(rq->core != rq); in sched_core_cpu_deactivate()
6435 if (rq->core != rq) in sched_core_cpu_deactivate()
6450 core_rq->core_task_seq = rq->core_task_seq; in sched_core_cpu_deactivate()
6451 core_rq->core_pick_seq = rq->core_pick_seq; in sched_core_cpu_deactivate()
6452 core_rq->core_cookie = rq->core_cookie; in sched_core_cpu_deactivate()
6453 core_rq->core_forceidle_count = rq->core_forceidle_count; in sched_core_cpu_deactivate()
6454 core_rq->core_forceidle_seq = rq->core_forceidle_seq; in sched_core_cpu_deactivate()
6455 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation; in sched_core_cpu_deactivate()
6462 core_rq->core_forceidle_start = 0; in sched_core_cpu_deactivate()
6467 rq->core = core_rq; in sched_core_cpu_deactivate()
6475 if (rq->core != rq) in sched_core_cpu_dying()
6476 rq->core = rq; in sched_core_cpu_dying()
6499 #define SM_IDLE (-1)
6519 WRITE_ONCE(p->__state, TASK_RUNNING); in try_to_block_task()
6529 * to be selectable for proxy-execution. in try_to_block_task()
6534 p->sched_contributes_to_load = in try_to_block_task()
6544 * prev_state = prev->state; if (p->on_rq && ...) in try_to_block_task()
6546 * p->on_rq = 0; smp_acquire__after_ctrl_dep(); in try_to_block_task()
6547 * p->state = TASK_WAKING in try_to_block_task()
6551 * After this, schedule() must not care about p->state any more. in try_to_block_task()
6560 put_prev_set_next_task(rq, rq->donor, rq->idle); in proxy_resched_idle()
6561 rq_set_donor(rq, rq->idle); in proxy_resched_idle()
6562 set_tsk_need_resched(rq->idle); in proxy_resched_idle()
6563 return rq->idle; in proxy_resched_idle()
6568 unsigned long state = READ_ONCE(donor->__state); in __proxy_deactivate()
6576 * As once we deactivate donor, donor->on_rq is set to zero, in __proxy_deactivate()
6579 * after that point. So things like cfs_rq->curr or rq->donor in __proxy_deactivate()
6591 * as unblocked, as we aren't doing proxy-migrations in proxy_deactivate()
6594 donor->blocked_on = NULL; in proxy_deactivate()
6602 * Follow the blocked-on relation:
6603 * task->blocked_on -> mutex->owner -> task...
6607 * p->pi_lock
6608 * rq->lock
6609 * mutex->wait_lock
6624 mutex = p->blocked_on; in find_proxy_task()
6629 * By taking mutex->wait_lock we hold off concurrent mutex_unlock() in find_proxy_task()
6632 guard(raw_spinlock)(&mutex->wait_lock); in find_proxy_task()
6651 if (!READ_ONCE(owner->on_rq) || owner->se.sched_delayed) { in find_proxy_task()
6665 * rq lock. As a simple solution, just schedule rq->idle to give in find_proxy_task()
6674 * Its possible to race where after we check owner->on_rq in find_proxy_task()
6688 * lock(&rq->lock); in find_proxy_task()
6692 * donor(owner) = current->blocked_donor; in find_proxy_task()
6704 * So schedule rq->idle so that ttwu_runnable() can get the rq in find_proxy_task()
6711 * rq, therefore holding @rq->lock is sufficient to in find_proxy_task()
6716 WARN_ON_ONCE(owner && !owner->on_rq); in find_proxy_task()
6739 * only deal with that by means of a dequeue/enqueue cycle. :-/ in proxy_tag_curr()
6759 * task to the run-queue and that's it.
6761 * Now, if the new task added to the run-queue preempts the current
6765 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6767 * - in syscall or exception context, at the next outmost
6771 * - in IRQ context, return from interrupt-handler to
6774 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6777 * - cond_resched() call
6778 * - explicit schedule() call
6779 * - return from syscall or exception to user-space
6780 * - return from interrupt-handler to user-space
6804 prev = rq->curr; in __schedule()
6817 * Make sure that signal_pending_state()->signal_pending() below in __schedule()
6824 * LOCK rq->lock LOCK p->pi_state in __schedule()
6826 * if (signal_pending_state()) if (p->state & @state) in __schedule()
6829 * after coming from user-space, before storing to rq->curr; this in __schedule()
6837 rq->clock_update_flags <<= 1; in __schedule()
6839 rq->clock_update_flags = RQCF_UPDATED; in __schedule()
6841 switch_count = &prev->nivcsw; in __schedule()
6847 * We must load prev->state once (task_struct::state is volatile), such in __schedule()
6850 prev_state = READ_ONCE(prev->__state); in __schedule()
6853 if (!rq->nr_running && !scx_enabled()) { in __schedule()
6860 * in order to keep mutex-blocked tasks on the runqueue in __schedule()
6861 * for slection with proxy-exec (without proxy-exec in __schedule()
6866 switch_count = &prev->nvcsw; in __schedule()
6870 next = pick_next_task(rq, rq->donor, &rf); in __schedule()
6876 if (next == rq->idle) in __schedule()
6883 rq->last_seen_need_resched_ns = 0; in __schedule()
6887 rq->nr_switches++; in __schedule()
6889 * RCU users of rcu_dereference(rq->curr) may not see in __schedule()
6892 RCU_INIT_POINTER(rq->curr, next); in __schedule()
6900 * rq->curr, before returning to user-space. in __schedule()
6904 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC, in __schedule()
6905 * RISC-V. switch_mm() relies on membarrier_arch_switch_mm() in __schedule()
6906 * on PowerPC and on RISC-V. in __schedule()
6907 * - finish_lock_switch() for weakly-ordered in __schedule()
6909 * - switch_to() for arm64 (weakly-ordered, spin_unlock in __schedule()
6915 * On RISC-V, this barrier pairing is also needed for the in __schedule()
6924 prev->se.sched_delayed); in __schedule()
6948 current->flags |= PF_NOFREEZE; in do_task_dead()
6953 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ in do_task_dead()
6965 * will use a blocking primitive -- which would lead to recursion. in sched_submit_work()
6969 task_flags = tsk->flags; in sched_submit_work()
6984 WARN_ON_ONCE(current->__state & TASK_RTLOCK_WAIT); in sched_submit_work()
6990 blk_flush_plug(tsk->plug, true); in sched_submit_work()
6997 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) { in sched_update_worker()
6998 if (tsk->flags & PF_BLOCK_TS) in sched_update_worker()
7000 if (tsk->flags & PF_WQ_WORKER) in sched_update_worker()
7002 else if (tsk->flags & PF_IO_WORKER) in sched_update_worker()
7021 lockdep_assert(!tsk->sched_rt_mutex); in schedule()
7033 * state (have scheduled out non-voluntarily) by making sure that all
7036 * (schedule out non-voluntarily).
7050 WARN_ON_ONCE(current->__state); in schedule_idle()
7061 * or we have been woken up remotely but the IPI has not yet arrived, in schedule_user()
7076 * schedule_preempt_disabled - called with preemption disabled
7126 * This is the entry point to schedule() from in-kernel preemption
7132 * If there is a non-zero preempt_count or interrupts are disabled, in preempt_schedule()
7164 * preempt_schedule_notrace - preempt_schedule called by tracing
7268 return try_to_wake_up(curr->private, mode, wake_flags); in default_wake_function()
7292 * bit-fields. Since it's a local thing, use int. Keep the generic sounding
7300 lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1)); in rt_mutex_pre_schedule()
7306 lockdep_assert(current->sched_rt_mutex); in rt_mutex_schedule()
7313 lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0)); in rt_mutex_post_schedule()
7317 * rt_mutex_setprio - set the current priority of a task
7322 * not touch ->normal_prio like __setscheduler().
7335 /* XXX used to be waiter->prio, not waiter->task->prio */ in rt_mutex_setprio()
7336 prio = __rt_effective_prio(pi_task, p->normal_prio); in rt_mutex_setprio()
7341 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) in rt_mutex_setprio()
7347 * Set under pi_lock && rq->lock, such that the value can be used under in rt_mutex_setprio()
7352 * ensure a task is de-boosted (pi_task is set to NULL) before the in rt_mutex_setprio()
7354 * points to a blocked task -- which guarantees the task is present. in rt_mutex_setprio()
7356 p->pi_top_task = pi_task; in rt_mutex_setprio()
7361 if (prio == p->prio && !dl_prio(prio)) in rt_mutex_setprio()
7365 * Idle task boosting is a no-no in general. There is one in rt_mutex_setprio()
7369 * the timer wheel base->lock on the CPU and another CPU wants in rt_mutex_setprio()
7376 if (unlikely(p == rq->idle)) { in rt_mutex_setprio()
7377 WARN_ON(p != rq->curr); in rt_mutex_setprio()
7378 WARN_ON(p->pi_blocked_on); in rt_mutex_setprio()
7383 oldprio = p->prio; in rt_mutex_setprio()
7388 prev_class = p->sched_class; in rt_mutex_setprio()
7389 next_class = __setscheduler_class(p->policy, prio); in rt_mutex_setprio()
7391 if (prev_class != next_class && p->se.sched_delayed) in rt_mutex_setprio()
7403 * 1. -rt task is running and holds mutex A in rt_mutex_setprio()
7404 * --> -dl task blocks on mutex A in rt_mutex_setprio()
7406 * 2. -dl task is running and holds mutex A in rt_mutex_setprio()
7407 * --> -dl task blocks on mutex A and could preempt the in rt_mutex_setprio()
7411 if (!dl_prio(p->normal_prio) || in rt_mutex_setprio()
7412 (pi_task && dl_prio(pi_task->prio) && in rt_mutex_setprio()
7413 dl_entity_preempt(&pi_task->dl, &p->dl))) { in rt_mutex_setprio()
7414 p->dl.pi_se = pi_task->dl.pi_se; in rt_mutex_setprio()
7417 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7421 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7426 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7428 p->rt.timeout = 0; in rt_mutex_setprio()
7431 p->sched_class = next_class; in rt_mutex_setprio()
7432 p->prio = prio; in rt_mutex_setprio()
7462 * In PREEMPT_RCU kernels, ->rcu_read_lock_nesting tells the tick in __cond_resched()
7463 * whether the current CPU is in an RCU read-side critical section, in __cond_resched()
7465 * in kernel context. In contrast, in non-preemptible kernels, in __cond_resched()
7466 * RCU readers leave no in-memory hints, which means that CPU-bound in __cond_resched()
7471 * A third case, preemptible, but non-PREEMPT_RCU provides for in __cond_resched()
7515 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
7518 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
7579 # include <linux/irq-entry-common.h>
7591 * cond_resched <- __cond_resched
7592 * might_resched <- RET0
7593 * preempt_schedule <- NOP
7594 * preempt_schedule_notrace <- NOP
7595 * irqentry_exit_cond_resched <- NOP
7596 * dynamic_preempt_lazy <- false
7599 * cond_resched <- __cond_resched
7600 * might_resched <- __cond_resched
7601 * preempt_schedule <- NOP
7602 * preempt_schedule_notrace <- NOP
7603 * irqentry_exit_cond_resched <- NOP
7604 * dynamic_preempt_lazy <- false
7607 * cond_resched <- RET0
7608 * might_resched <- RET0
7609 * preempt_schedule <- preempt_schedule
7610 * preempt_schedule_notrace <- preempt_schedule_notrace
7611 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7612 * dynamic_preempt_lazy <- false
7615 * cond_resched <- RET0
7616 * might_resched <- RET0
7617 * preempt_schedule <- preempt_schedule
7618 * preempt_schedule_notrace <- preempt_schedule_notrace
7619 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7620 * dynamic_preempt_lazy <- true
7624 preempt_dynamic_undefined = -1,
7651 return -EINVAL; in sched_dynamic_mode()
7672 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in in __sched_dynamic_update()
7784 #define preempt_dynamic_mode -1
7837 int old_iowait = current->in_iowait; in io_schedule_prepare()
7839 current->in_iowait = 1; in io_schedule_prepare()
7840 blk_flush_plug(current->plug, true); in io_schedule_prepare()
7846 current->in_iowait = token; in io_schedule_finish()
7850 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
7884 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); in sched_show_task()
7892 ppid = task_pid_nr(rcu_dereference(p->real_parent)); in sched_show_task()
7894 pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d task_flags:0x%04x flags:0x%08lx\n", in sched_show_task()
7896 ppid, p->flags, read_task_thread_flags(p)); in sched_show_task()
7909 unsigned int state = READ_ONCE(p->__state); in state_filter_match()
7937 * reset the NMI-timeout, listing all files on a slow in show_state_filter()
7961 * init_idle - set up an idle thread for a given CPU
7977 raw_spin_lock_irqsave(&idle->pi_lock, flags); in init_idle()
7980 idle->__state = TASK_RUNNING; in init_idle()
7981 idle->se.exec_start = sched_clock(); in init_idle()
7984 * look like a proper per-CPU kthread. in init_idle()
7986 idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY; in init_idle()
7996 * holding rq->lock, the CPU isn't yet set to this CPU so the in init_idle()
8000 * use task_rq_lock() here and obtain the other rq->lock. in init_idle()
8008 rq->idle = idle; in init_idle()
8010 rcu_assign_pointer(rq->curr, idle); in init_idle()
8011 idle->on_rq = TASK_ON_RQ_QUEUED; in init_idle()
8012 idle->on_cpu = 1; in init_idle()
8014 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); in init_idle()
8022 idle->sched_class = &idle_sched_class; in init_idle()
8025 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); in init_idle()
8054 if (p->flags & PF_NO_SETAFFINITY) in task_can_attach()
8055 ret = -EINVAL; in task_can_attach()
8072 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) in migrate_task_to()
8073 return -EINVAL; in migrate_task_to()
8100 p->numa_preferred_nid = nid; in sched_setnuma()
8122 struct mm_struct *mm = current->active_mm; in sched_force_init_mm()
8127 current->active_mm = &init_mm; in sched_force_init_mm()
8144 raw_spin_lock_irq(&p->pi_lock); in __balance_push_cpu_stop()
8150 cpu = select_fallback_rq(rq->cpu, p); in __balance_push_cpu_stop()
8155 raw_spin_unlock_irq(&p->pi_lock); in __balance_push_cpu_stop()
8165 * Ensure we only run per-cpu kthreads once the CPU goes !active.
8172 struct task_struct *push_task = rq->curr; in balance_push()
8179 rq->balance_callback = &balance_push_callback; in balance_push()
8185 if (!cpu_dying(rq->cpu) || rq != this_rq()) in balance_push()
8189 * Both the cpu-hotplug and stop task are in this case and are in balance_push()
8206 if (!rq->nr_running && !rq_has_pinned_tasks(rq) && in balance_push()
8207 rcuwait_active(&rq->hotplug_wait)) { in balance_push()
8209 rcuwait_wake_up(&rq->hotplug_wait); in balance_push()
8217 * Temporarily drop rq->lock such that we can wake-up the stop task. in balance_push()
8222 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, in balance_push()
8240 WARN_ON_ONCE(rq->balance_callback); in balance_push_set()
8241 rq->balance_callback = &balance_push_callback; in balance_push_set()
8242 } else if (rq->balance_callback == &balance_push_callback) { in balance_push_set()
8243 rq->balance_callback = NULL; in balance_push_set()
8258 rcuwait_wait_event(&rq->hotplug_wait, in balance_hotplug_wait()
8259 rq->nr_running == 1 && !rq_has_pinned_tasks(rq), in balance_hotplug_wait()
8281 if (!rq->online) { in set_rq_online()
8284 cpumask_set_cpu(rq->cpu, rq->rd->online); in set_rq_online()
8285 rq->online = 1; in set_rq_online()
8288 if (class->rq_online) in set_rq_online()
8289 class->rq_online(rq); in set_rq_online()
8296 if (rq->online) { in set_rq_offline()
8301 if (class->rq_offline) in set_rq_offline()
8302 class->rq_offline(rq); in set_rq_offline()
8305 cpumask_clear_cpu(rq->cpu, rq->rd->online); in set_rq_offline()
8306 rq->online = 0; in set_rq_offline()
8315 if (rq->rd) { in sched_set_rq_online()
8316 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_set_rq_online()
8327 if (rq->rd) { in sched_set_rq_offline()
8328 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_set_rq_offline()
8357 if (--num_cpus_frozen) in cpuset_cpu_active()
8461 * preempt-disabled and RCU users of this state to go away such that in sched_cpu_deactivate()
8497 rq->calc_load_update = calc_load_update; in sched_rq_cpu_starting()
8533 * stable. We need to take the tear-down thread which is calling this into
8536 * Also see the comment "Global load-average calculations".
8553 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); in dump_rq_tasks()
8561 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm); in dump_rq_tasks()
8575 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) { in sched_cpu_dying()
8579 dl_server_stop(&rq->fair_server); in sched_cpu_dying()
8603 /* Move init over to a non-isolated CPU */ in sched_init_smp()
8606 current->flags &= ~PF_NO_SETAFFINITY; in sched_init_smp()
8712 raw_spin_lock_init(&rq->__lock); in sched_init()
8713 rq->nr_running = 0; in sched_init()
8714 rq->calc_load_active = 0; in sched_init()
8715 rq->calc_load_update = jiffies + LOAD_FREQ; in sched_init()
8716 init_cfs_rq(&rq->cfs); in sched_init()
8717 init_rt_rq(&rq->rt); in sched_init()
8718 init_dl_rq(&rq->dl); in sched_init()
8720 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); in sched_init()
8721 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; in sched_init()
8725 * In case of task-groups formed through the cgroup filesystem, it in sched_init()
8728 * root_task_group and its child task-groups in a fair manner, in sched_init()
8729 * based on each entity's (task or task-group's) weight in sched_init()
8730 * (se->load.weight). in sched_init()
8739 * directly in rq->cfs (i.e root_task_group->se[] = NULL). in sched_init()
8741 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); in sched_init()
8750 rq->rt.rt_runtime = global_rt_runtime(); in sched_init()
8751 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); in sched_init()
8753 rq->sd = NULL; in sched_init()
8754 rq->rd = NULL; in sched_init()
8755 rq->cpu_capacity = SCHED_CAPACITY_SCALE; in sched_init()
8756 rq->balance_callback = &balance_push_callback; in sched_init()
8757 rq->active_balance = 0; in sched_init()
8758 rq->next_balance = jiffies; in sched_init()
8759 rq->push_cpu = 0; in sched_init()
8760 rq->cpu = i; in sched_init()
8761 rq->online = 0; in sched_init()
8762 rq->idle_stamp = 0; in sched_init()
8763 rq->avg_idle = 2*sysctl_sched_migration_cost; in sched_init()
8764 rq->max_idle_balance_cost = sysctl_sched_migration_cost; in sched_init()
8766 INIT_LIST_HEAD(&rq->cfs_tasks); in sched_init()
8770 rq->last_blocked_load_update_tick = jiffies; in sched_init()
8771 atomic_set(&rq->nohz_flags, 0); in sched_init()
8773 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); in sched_init()
8776 rcuwait_init(&rq->hotplug_wait); in sched_init()
8779 atomic_set(&rq->nr_iowait, 0); in sched_init()
8783 rq->core = rq; in sched_init()
8784 rq->core_pick = NULL; in sched_init()
8785 rq->core_dl_server = NULL; in sched_init()
8786 rq->core_enabled = 0; in sched_init()
8787 rq->core_tree = RB_ROOT; in sched_init()
8788 rq->core_forceidle_count = 0; in sched_init()
8789 rq->core_forceidle_occupation = 0; in sched_init()
8790 rq->core_forceidle_start = 0; in sched_init()
8792 rq->core_cookie = 0UL; in sched_init()
8794 zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i)); in sched_init()
8808 * is dressed up as a per-CPU kthread and thus needs to play the part in sched_init()
8809 * if we want to avoid special-casing it in code that deals with per-CPU in sched_init()
8846 * Blocking primitives will set (and therefore destroy) current->state, in __might_sleep()
8850 WARN_ONCE(state != TASK_RUNNING && current->task_state_change, in __might_sleep()
8853 (void *)current->task_state_change, in __might_sleep()
8854 (void *)current->task_state_change); in __might_sleep()
8892 !is_idle_task(current) && !current->non_block_count) || in __might_resched()
8907 in_atomic(), irqs_disabled(), current->non_block_count, in __might_resched()
8908 current->pid, current->comm); in __might_resched()
8952 current->pid, current->comm); in __cant_sleep()
8984 current->pid, current->comm); in __cant_migrate()
9007 if (p->flags & PF_KTHREAD) in normalize_rt_tasks()
9010 p->se.exec_start = 0; in normalize_rt_tasks()
9011 schedstat_set(p->stats.wait_start, 0); in normalize_rt_tasks()
9012 schedstat_set(p->stats.sleep_start, 0); in normalize_rt_tasks()
9013 schedstat_set(p->stats.block_start, 0); in normalize_rt_tasks()
9037 * stopped - every CPU needs to be quiescent, and no scheduling
9044 * curr_task - return the current task for a given CPU.
9069 uclamp_se_set(&tg->uclamp_req[clamp_id], in alloc_uclamp_sched_group()
9071 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; in alloc_uclamp_sched_group()
9097 call_rcu(&tg->rcu, sched_free_group_rcu); in sched_unregister_group()
9107 return ERR_PTR(-ENOMEM); in sched_create_group()
9122 return ERR_PTR(-ENOMEM); in sched_create_group()
9130 list_add_tail_rcu(&tg->list, &task_groups); in sched_online_group()
9135 tg->parent = parent; in sched_online_group()
9136 INIT_LIST_HEAD(&tg->children); in sched_online_group()
9137 list_add_rcu(&tg->siblings, &parent->children); in sched_online_group()
9153 call_rcu(&tg->rcu, sched_unregister_group_rcu); in sched_destroy_group()
9174 list_del_rcu(&tg->list); in sched_release_group()
9175 list_del_rcu(&tg->siblings); in sched_release_group()
9191 tsk->sched_task_group = tg; in sched_change_group()
9194 if (tsk->sched_class->task_change_group) in sched_change_group()
9195 tsk->sched_class->task_change_group(tsk); in sched_change_group()
9205 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
9257 return ERR_PTR(-ENOMEM); in cpu_cgroup_css_alloc()
9259 return &tg->css; in cpu_cgroup_css_alloc()
9266 struct task_group *parent = css_tg(css->parent); in cpu_cgroup_css_online()
9321 return -EINVAL; in cpu_cgroup_can_attach()
9356 uc_parent = css_tg(css)->parent in cpu_util_update_eff()
9357 ? css_tg(css)->parent->uclamp : NULL; in cpu_util_update_eff()
9361 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value; in cpu_util_update_eff()
9373 uc_se = css_tg(css)->uclamp; in cpu_util_update_eff()
9423 req.ret = -ERANGE; in capacity_from_percent()
9451 if (tg->uclamp_req[clamp_id].value != req.util) in cpu_uclamp_write()
9452 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); in cpu_uclamp_write()
9458 tg->uclamp_pct[clamp_id] = req.percent; in cpu_uclamp_write()
9490 util_clamp = tg->uclamp_req[clamp_id].value; in cpu_uclamp_print()
9498 percent = tg->uclamp_pct[clamp_id]; in cpu_uclamp_print()
9520 return scale_load_down(tg->shares); in tg_weight()
9522 return sched_weight_from_cgroup(tg->scx.weight); in tg_weight()
9556 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_set_cfs_bandwidth()
9569 * Prevent race between setting of cfs_rq->runtime_enabled and in tg_set_cfs_bandwidth()
9580 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; in tg_set_cfs_bandwidth()
9582 * If we need to toggle cfs_bandwidth_used, off->on must occur in tg_set_cfs_bandwidth()
9583 * before making related changes, and on->off must occur afterwards in tg_set_cfs_bandwidth()
9588 scoped_guard (raw_spinlock_irq, &cfs_b->lock) { in tg_set_cfs_bandwidth()
9589 cfs_b->period = ns_to_ktime(period); in tg_set_cfs_bandwidth()
9590 cfs_b->quota = quota; in tg_set_cfs_bandwidth()
9591 cfs_b->burst = burst; in tg_set_cfs_bandwidth()
9604 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; in tg_set_cfs_bandwidth()
9605 struct rq *rq = cfs_rq->rq; in tg_set_cfs_bandwidth()
9608 cfs_rq->runtime_enabled = runtime_enabled; in tg_set_cfs_bandwidth()
9609 cfs_rq->runtime_remaining = 0; in tg_set_cfs_bandwidth()
9611 if (cfs_rq->throttled) in tg_set_cfs_bandwidth()
9625 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); in tg_get_cfs_period()
9635 if (tg->cfs_bandwidth.quota == RUNTIME_INF) in tg_get_cfs_quota()
9638 quota_us = tg->cfs_bandwidth.quota; in tg_get_cfs_quota()
9648 burst_us = tg->cfs_bandwidth.burst; in tg_get_cfs_burst()
9668 if (tg == d->tg) { in normalize_cfs_quota()
9669 period = d->period; in normalize_cfs_quota()
9670 quota = d->quota; in normalize_cfs_quota()
9677 if (quota == RUNTIME_INF || quota == -1) in normalize_cfs_quota()
9686 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_cfs_schedulable_down()
9687 s64 quota = 0, parent_quota = -1; in tg_cfs_schedulable_down()
9689 if (!tg->parent) { in tg_cfs_schedulable_down()
9692 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; in tg_cfs_schedulable_down()
9695 parent_quota = parent_b->hierarchical_quota; in tg_cfs_schedulable_down()
9699 * always take the non-RUNTIME_INF min. On cgroup1, only in tg_cfs_schedulable_down()
9713 return -EINVAL; in tg_cfs_schedulable_down()
9716 cfs_b->hierarchical_quota = quota; in tg_cfs_schedulable_down()
9741 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in cpu_cfs_stat_show()
9743 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); in cpu_cfs_stat_show()
9744 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); in cpu_cfs_stat_show()
9745 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); in cpu_cfs_stat_show()
9753 stats = __schedstats_from_se(tg->se[i]); in cpu_cfs_stat_show()
9754 ws += schedstat_val(stats->wait_sum); in cpu_cfs_stat_show()
9760 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst); in cpu_cfs_stat_show()
9761 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time); in cpu_cfs_stat_show()
9772 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time); in throttled_time_self()
9806 *period_us_p = tg->scx.bw_period_us; in tg_bandwidth()
9808 *quota_us_p = tg->scx.bw_quota_us; in tg_bandwidth()
9810 *burst_us_p = tg->scx.bw_burst_us; in tg_bandwidth()
9830 return -EINVAL; in tg_set_bandwidth()
9836 return -EINVAL; in tg_set_bandwidth()
9845 return -EINVAL; in tg_set_bandwidth()
9853 return -EINVAL; in tg_set_bandwidth()
9859 return -EINVAL; in tg_set_bandwidth()
9863 return -EINVAL; in tg_set_bandwidth()
9879 return quota_us; /* (s64)RUNTIME_INF becomes -1 */ in cpu_quota_read_s64()
9955 return css_tg(css)->idle; in cpu_idle_read_s64()
10082 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in cpu_extra_stat_show()
10085 throttled_usec = cfs_b->throttled_time; in cpu_extra_stat_show()
10087 burst_usec = cfs_b->burst_time; in cpu_extra_stat_show()
10095 cfs_b->nr_periods, cfs_b->nr_throttled, in cpu_extra_stat_show()
10096 throttled_usec, cfs_b->nr_burst, burst_usec); in cpu_extra_stat_show()
10135 return -ERANGE; in cpu_weight_write_u64()
10154 delta = abs(sched_prio_to_weight[prio] - weight); in cpu_weight_nice_read_s64()
10160 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO); in cpu_weight_nice_read_s64()
10170 return -ERANGE; in cpu_weight_nice_write_s64()
10172 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; in cpu_weight_nice_write_s64()
10202 return -EINVAL; in cpu_period_quota_parse()
10208 return -EINVAL; in cpu_period_quota_parse()
10332 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
10333 * nice 1, it will get ~10% less CPU time than another CPU-bound task
10337 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
10343 /* -20 */ 88761, 71755, 56483, 46273, 36291,
10344 /* -15 */ 29154, 23254, 18705, 14949, 11916,
10345 /* -10 */ 9548, 7620, 6100, 4904, 3906,
10346 /* -5 */ 3121, 2501, 1991, 1586, 1277,
10354 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
10357 * pre-calculated inverse to speed up arithmetics by turning divisions
10361 /* -20 */ 48388, 59856, 76040, 92818, 118348,
10362 /* -15 */ 147320, 184698, 229616, 287308, 360437,
10363 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
10364 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
10379 * @cid_lock: Guarantee forward-progress of cid allocation.
10381 * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
10382 * is only used when contention is detected by the lock-free allocation so
10388 * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
10390 * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
10399 * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
10405 * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
10422 * per-mm/cpu cid value.
10424 * Let's introduce task (Y) which has task->mm == mm and task (N) which has
10425 * task->mm != mm for the rest of the discussion. There are two scheduler state
10428 * (TSA) Store to rq->curr with transition from (N) to (Y)
10430 * (TSB) Store to rq->curr with transition from (Y) to (N)
10432 * On the remote-clear side, there is one transition we care about:
10437 * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
10451 * Context switch CS-1 Remote-clear
10452 * - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA)
10454 * - switch_mm_cid()
10455 * - memory barrier (see switch_mm_cid()
10459 * - mm_cid_get (next)
10460 * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr)
10467 * still an active task on the cpu. Remote-clear will therefore not transition
10488 t->migrate_from_cpu = task_cpu(t); in sched_mm_cid_migrate_from()
10496 struct mm_struct *mm = t->mm; in __sched_mm_cid_migrate_from_fetch_cid()
10501 return -1; in __sched_mm_cid_migrate_from_fetch_cid()
10503 last_mm_cid = t->last_mm_cid; in __sched_mm_cid_migrate_from_fetch_cid()
10509 if (last_mm_cid == -1) in __sched_mm_cid_migrate_from_fetch_cid()
10510 return -1; in __sched_mm_cid_migrate_from_fetch_cid()
10511 src_cid = READ_ONCE(src_pcpu_cid->cid); in __sched_mm_cid_migrate_from_fetch_cid()
10513 return -1; in __sched_mm_cid_migrate_from_fetch_cid()
10521 src_task = rcu_dereference(src_rq->curr); in __sched_mm_cid_migrate_from_fetch_cid()
10522 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { in __sched_mm_cid_migrate_from_fetch_cid()
10523 t->last_mm_cid = -1; in __sched_mm_cid_migrate_from_fetch_cid()
10524 return -1; in __sched_mm_cid_migrate_from_fetch_cid()
10537 struct mm_struct *mm = t->mm; in __sched_mm_cid_migrate_from_try_steal_cid()
10540 if (src_cid == -1) in __sched_mm_cid_migrate_from_try_steal_cid()
10541 return -1; in __sched_mm_cid_migrate_from_try_steal_cid()
10548 if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid)) in __sched_mm_cid_migrate_from_try_steal_cid()
10549 return -1; in __sched_mm_cid_migrate_from_try_steal_cid()
10552 * The implicit barrier after cmpxchg per-mm/cpu cid before loading in __sched_mm_cid_migrate_from_try_steal_cid()
10553 * rq->curr->mm matches the scheduler barrier in context_switch() in __sched_mm_cid_migrate_from_try_steal_cid()
10554 * between store to rq->curr and load of prev and next task's in __sched_mm_cid_migrate_from_try_steal_cid()
10555 * per-mm/cpu cid. in __sched_mm_cid_migrate_from_try_steal_cid()
10557 * The implicit barrier after cmpxchg per-mm/cpu cid before loading in __sched_mm_cid_migrate_from_try_steal_cid()
10558 * rq->curr->mm_cid_active matches the barrier in in __sched_mm_cid_migrate_from_try_steal_cid()
10560 * sched_mm_cid_after_execve() between store to t->mm_cid_active and in __sched_mm_cid_migrate_from_try_steal_cid()
10561 * load of per-mm/cpu cid. in __sched_mm_cid_migrate_from_try_steal_cid()
10566 * the lazy-put flag, this task will be responsible for transitioning in __sched_mm_cid_migrate_from_try_steal_cid()
10567 * from lazy-put flag set to MM_CID_UNSET. in __sched_mm_cid_migrate_from_try_steal_cid()
10570 src_task = rcu_dereference(src_rq->curr); in __sched_mm_cid_migrate_from_try_steal_cid()
10571 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { in __sched_mm_cid_migrate_from_try_steal_cid()
10576 t->last_mm_cid = -1; in __sched_mm_cid_migrate_from_try_steal_cid()
10577 return -1; in __sched_mm_cid_migrate_from_try_steal_cid()
10584 if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET)) in __sched_mm_cid_migrate_from_try_steal_cid()
10585 return -1; in __sched_mm_cid_migrate_from_try_steal_cid()
10586 WRITE_ONCE(src_pcpu_cid->recent_cid, MM_CID_UNSET); in __sched_mm_cid_migrate_from_try_steal_cid()
10598 struct mm_struct *mm = t->mm; in sched_mm_cid_migrate_to()
10607 src_cpu = t->migrate_from_cpu; in sched_mm_cid_migrate_to()
10608 if (src_cpu == -1) { in sched_mm_cid_migrate_to()
10609 t->last_mm_cid = -1; in sched_mm_cid_migrate_to()
10622 * greater or equal to the number of allowed CPUs, because user-space in sched_mm_cid_migrate_to()
10626 dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq)); in sched_mm_cid_migrate_to()
10627 dst_cid_is_set = !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->cid)) || in sched_mm_cid_migrate_to()
10628 !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->recent_cid)); in sched_mm_cid_migrate_to()
10629 if (dst_cid_is_set && atomic_read(&mm->mm_users) >= READ_ONCE(mm->nr_cpus_allowed)) in sched_mm_cid_migrate_to()
10631 src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu); in sched_mm_cid_migrate_to()
10634 if (src_cid == -1) in sched_mm_cid_migrate_to()
10638 if (src_cid == -1) in sched_mm_cid_migrate_to()
10646 WRITE_ONCE(dst_pcpu_cid->cid, src_cid); in sched_mm_cid_migrate_to()
10647 WRITE_ONCE(dst_pcpu_cid->recent_cid, src_cid); in sched_mm_cid_migrate_to()
10657 cid = READ_ONCE(pcpu_cid->cid); in sched_mm_cid_remote_clear()
10668 if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid)) in sched_mm_cid_remote_clear()
10672 * The implicit barrier after cmpxchg per-mm/cpu cid before loading in sched_mm_cid_remote_clear()
10673 * rq->curr->mm matches the scheduler barrier in context_switch() in sched_mm_cid_remote_clear()
10674 * between store to rq->curr and load of prev and next task's in sched_mm_cid_remote_clear()
10675 * per-mm/cpu cid. in sched_mm_cid_remote_clear()
10677 * The implicit barrier after cmpxchg per-mm/cpu cid before loading in sched_mm_cid_remote_clear()
10678 * rq->curr->mm_cid_active matches the barrier in in sched_mm_cid_remote_clear()
10680 * sched_mm_cid_after_execve() between store to t->mm_cid_active and in sched_mm_cid_remote_clear()
10681 * load of per-mm/cpu cid. in sched_mm_cid_remote_clear()
10686 * the lazy-put flag, that task will be responsible for transitioning in sched_mm_cid_remote_clear()
10687 * from lazy-put flag set to MM_CID_UNSET. in sched_mm_cid_remote_clear()
10690 t = rcu_dereference(rq->curr); in sched_mm_cid_remote_clear()
10691 if (READ_ONCE(t->mm_cid_active) && t->mm == mm) in sched_mm_cid_remote_clear()
10701 if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET)) in sched_mm_cid_remote_clear()
10714 * rq->clock load is racy on 32-bit but one spurious clear once in a in sched_mm_cid_remote_clear_old()
10717 rq_clock = READ_ONCE(rq->clock); in sched_mm_cid_remote_clear_old()
10718 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); in sched_mm_cid_remote_clear_old()
10726 curr = rcu_dereference(rq->curr); in sched_mm_cid_remote_clear_old()
10727 if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) { in sched_mm_cid_remote_clear_old()
10728 WRITE_ONCE(pcpu_cid->time, rq_clock); in sched_mm_cid_remote_clear_old()
10733 if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS) in sched_mm_cid_remote_clear_old()
10744 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); in sched_mm_cid_remote_clear_weight()
10745 cid = READ_ONCE(pcpu_cid->cid); in sched_mm_cid_remote_clear_weight()
10761 work->next = work; /* Prevent double-add */ in task_mm_cid_work()
10762 if (t->flags & PF_EXITING) in task_mm_cid_work()
10764 mm = t->mm; in task_mm_cid_work()
10767 old_scan = READ_ONCE(mm->mm_cid_next_scan); in task_mm_cid_work()
10772 res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan); in task_mm_cid_work()
10780 if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan)) in task_mm_cid_work()
10797 struct mm_struct *mm = t->mm; in init_sched_mm_cid()
10801 mm_users = atomic_read(&mm->mm_users); in init_sched_mm_cid()
10803 mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY); in init_sched_mm_cid()
10805 t->cid_work.next = &t->cid_work; /* Protect against double add */ in init_sched_mm_cid()
10806 init_task_work(&t->cid_work, task_mm_cid_work); in init_sched_mm_cid()
10811 struct callback_head *work = &curr->cid_work; in task_tick_mm_cid()
10814 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || in task_tick_mm_cid()
10815 work->next != work) in task_tick_mm_cid()
10817 if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan))) in task_tick_mm_cid()
10826 struct mm_struct *mm = t->mm; in sched_mm_cid_exit_signals()
10836 WRITE_ONCE(t->mm_cid_active, 0); in sched_mm_cid_exit_signals()
10838 * Store t->mm_cid_active before loading per-mm/cpu cid. in sched_mm_cid_exit_signals()
10843 t->last_mm_cid = t->mm_cid = -1; in sched_mm_cid_exit_signals()
10848 struct mm_struct *mm = t->mm; in sched_mm_cid_before_execve()
10858 WRITE_ONCE(t->mm_cid_active, 0); in sched_mm_cid_before_execve()
10860 * Store t->mm_cid_active before loading per-mm/cpu cid. in sched_mm_cid_before_execve()
10865 t->last_mm_cid = t->mm_cid = -1; in sched_mm_cid_before_execve()
10870 struct mm_struct *mm = t->mm; in sched_mm_cid_after_execve()
10880 WRITE_ONCE(t->mm_cid_active, 1); in sched_mm_cid_after_execve()
10882 * Store t->mm_cid_active before loading per-mm/cpu cid. in sched_mm_cid_after_execve()
10886 t->last_mm_cid = t->mm_cid = mm_cid_get(rq, t, mm); in sched_mm_cid_after_execve()
10892 WARN_ON_ONCE(!t->mm || t->mm_cid != -1); in sched_mm_cid_fork()
10893 t->mm_cid_active = 1; in sched_mm_cid_fork()
10913 if (ctx->queued) in sched_deq_and_put_task()
10915 if (ctx->running) in sched_deq_and_put_task()
10921 struct rq *rq = task_rq(ctx->p); in sched_enq_and_set_task()
10925 if (ctx->queued) in sched_enq_and_set_task()
10926 enqueue_task(rq, ctx->p, ctx->queue_flags | ENQUEUE_NOCLOCK); in sched_enq_and_set_task()
10927 if (ctx->running) in sched_enq_and_set_task()
10928 set_next_task(rq, ctx->p); in sched_enq_and_set_task()