Lines Matching +full:suspend +full:- +full:to +full:- +full:idle
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic entry points for the idle threads and
4 * implementation of the idle task scheduling class.
6 * (NOTE: these are not related to SCHED_IDLE batch scheduled
10 #include <linux/suspend.h>
19 * sched_idle_set_state - Record idle state for the current CPU.
20 * @idle_state: State to record.
34 cpu_idle_force_poll--; in cpu_idle_poll_ctrl()
109 * default_idle_call - Default CPU idle routine.
111 * To use when the cpuidle framework cannot be used.
137 return -EBUSY; in call_cpuidle_s2idle()
146 * The idle task must be scheduled, it is pointless to go to idle, just in call_cpuidle()
147 * update no idle residency and return. in call_cpuidle()
150 dev->last_residency_ns = 0; in call_cpuidle()
152 return -EBUSY; in call_cpuidle()
156 * Enter the idle state previously returned by the governor decision. in call_cpuidle()
158 * care of re-enabling the local interrupts in call_cpuidle()
164 * cpuidle_idle_call - the main idle function
179 * Check if the idle task must be rescheduled. If it is the in cpuidle_idle_call()
180 * case, exit the function after re-enabling the local IRQ. in cpuidle_idle_call()
195 * Suspend-to-idle ("s2idle") is a system state in which all user space in cpuidle_idle_call()
198 * the cpuidle governor and go straight for the deepest idle state in cpuidle_idle_call()
199 * available. Possibly also suspend the local tick and the entire in cpuidle_idle_call()
200 * timekeeping to prevent timer interrupts from kicking us out of idle in cpuidle_idle_call()
204 if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) { in cpuidle_idle_call()
215 max_latency_ns = dev->forced_idle_latency_limit_ns; in cpuidle_idle_call()
226 * Ask the cpuidle framework to choose a convenient idle state. in cpuidle_idle_call()
237 * Give the governor an opportunity to reflect on the outcome in cpuidle_idle_call()
246 * It is up to the idle functions to re-enable local interrupts in cpuidle_idle_call()
253 * Generic idle loop implementation
262 * Check if we need to update blocked load in do_idle()
269 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr != in do_idle()
270 * rq->idle). This means that, if rq->idle has the polling bit set, in do_idle()
271 * then setting need_resched is guaranteed to cause the CPU to in do_idle()
281 * Interrupts shouldn't be re-enabled from that point on until in do_idle()
287 * Several cases to consider: in do_idle()
289 * - SLEEP-UNTIL-PENDING-INTERRUPT based instructions such as in do_idle()
293 * - sti;mwait() couple is fine because the interrupts are in do_idle()
294 * re-enabled only upon the execution of mwait, leaving no gap in do_idle()
295 * in-between. in do_idle()
297 * - ROLLBACK based idle handlers with the sleeping instruction in do_idle()
299 * when the interrupt detects it has interrupted an idle handler, in do_idle()
300 * it rolls back to its beginning which performs the in do_idle()
301 * need_resched() check before re-executing the sleeping in do_idle()
303 * If such a scheme is really mandatory due to the lack of an in do_idle()
304 * appropriate CPU sleeping instruction, then a FAST-FORWARD in do_idle()
306 * interrupted an idle handler, it must resume to the end of in do_idle()
307 * this idle handler so that the generic idle loop is iterated in do_idle()
308 * again to reprogram the tick. in do_idle()
321 * In poll mode we re-enable interrupts and spin. Also if we in do_idle()
322 * detected in the wakeup from idle path that the tick in do_idle()
323 * broadcast device expired for us, we don't want to go deep in do_idle()
324 * idle as we know that the IPI is going to arrive right away. in do_idle()
339 * This is required because for polling idle loops we will not have had in do_idle()
340 * an IPI to fold the state for us. in do_idle()
347 * We promise to call sched_ttwu_pending() and reschedule if in do_idle()
349 * polling needs to be visible before doing these things. in do_idle()
354 * RCU relies on this call to be done outside of an RCU read-side in do_idle()
379 WRITE_ONCE(it->done, 1); in idle_inject_timer_fn()
393 WARN_ON_ONCE(current->policy != SCHED_FIFO); in play_idle_precise()
394 WARN_ON_ONCE(current->nr_cpus_allowed != 1); in play_idle_precise()
395 WARN_ON_ONCE(!(current->flags & PF_KTHREAD)); in play_idle_precise()
396 WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY)); in play_idle_precise()
398 WARN_ON_ONCE(current->mm); in play_idle_precise()
402 current->flags |= PF_IDLE; in play_idle_precise()
415 current->flags &= ~PF_IDLE; in play_idle_precise()
424 current->flags |= PF_IDLE; in cpu_startup_entry()
432 * idle-task scheduling class.
438 return task_cpu(p); /* IDLE tasks as never migrated */ in select_task_rq_idle()
448 * Idle tasks are unconditionally rescheduled:
465 schedstat_inc(rq->sched_goidle); in set_next_task_idle()
466 next->se.exec_start = rq_clock_task(rq); in set_next_task_idle()
472 return rq->idle; in pick_task_idle()
476 * It is not legal to sleep in the idle task - print a warning
477 * message if some code attempts to do it:
483 printk(KERN_ERR "bad: scheduling from the idle thread!\n"); in dequeue_task_idle()
517 * Simple, special scheduling class for the per-CPU idle tasks:
519 DEFINE_SCHED_CLASS(idle) = {
521 /* no enqueue/yield_task for idle tasks */