1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Generic entry points for the idle threads and
4 * implementation of the idle task scheduling class.
5 *
6 * (NOTE: these are not related to SCHED_IDLE batch scheduled
7 * tasks which are handled in sched/fair.c )
8 */
9
10 /* Linker adds these: start and end of __cpuidle functions */
11 extern char __cpuidle_text_start[], __cpuidle_text_end[];
12
13 /**
14 * sched_idle_set_state - Record idle state for the current CPU.
15 * @idle_state: State to record.
16 */
sched_idle_set_state(struct cpuidle_state * idle_state)17 void sched_idle_set_state(struct cpuidle_state *idle_state)
18 {
19 idle_set_state(this_rq(), idle_state);
20 }
21
22 static int __read_mostly cpu_idle_force_poll;
23
cpu_idle_poll_ctrl(bool enable)24 void cpu_idle_poll_ctrl(bool enable)
25 {
26 if (enable) {
27 cpu_idle_force_poll++;
28 } else {
29 cpu_idle_force_poll--;
30 WARN_ON_ONCE(cpu_idle_force_poll < 0);
31 }
32 }
33
34 #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
cpu_idle_poll_setup(char * __unused)35 static int __init cpu_idle_poll_setup(char *__unused)
36 {
37 cpu_idle_force_poll = 1;
38
39 return 1;
40 }
41 __setup("nohlt", cpu_idle_poll_setup);
42
cpu_idle_nopoll_setup(char * __unused)43 static int __init cpu_idle_nopoll_setup(char *__unused)
44 {
45 cpu_idle_force_poll = 0;
46
47 return 1;
48 }
49 __setup("hlt", cpu_idle_nopoll_setup);
50 #endif
51
cpu_idle_poll(void)52 static noinline int __cpuidle cpu_idle_poll(void)
53 {
54 instrumentation_begin();
55 trace_cpu_idle(0, smp_processor_id());
56 stop_critical_timings();
57 ct_cpuidle_enter();
58
59 raw_local_irq_enable();
60 while (!tif_need_resched() &&
61 (cpu_idle_force_poll || tick_check_broadcast_expired()))
62 cpu_relax();
63 raw_local_irq_disable();
64
65 ct_cpuidle_exit();
66 start_critical_timings();
67 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
68 local_irq_enable();
69 instrumentation_end();
70
71 return 1;
72 }
73
74 /* Weak implementations for optional arch specific functions */
arch_cpu_idle_prepare(void)75 void __weak arch_cpu_idle_prepare(void) { }
arch_cpu_idle_enter(void)76 void __weak arch_cpu_idle_enter(void) { }
arch_cpu_idle_exit(void)77 void __weak arch_cpu_idle_exit(void) { }
arch_cpu_idle_dead(void)78 void __weak __noreturn arch_cpu_idle_dead(void) { while (1); }
arch_cpu_idle(void)79 void __weak arch_cpu_idle(void)
80 {
81 cpu_idle_force_poll = 1;
82 }
83
84 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST_IDLE
85 DEFINE_STATIC_KEY_FALSE(arch_needs_tick_broadcast);
86
cond_tick_broadcast_enter(void)87 static inline void cond_tick_broadcast_enter(void)
88 {
89 if (static_branch_unlikely(&arch_needs_tick_broadcast))
90 tick_broadcast_enter();
91 }
92
cond_tick_broadcast_exit(void)93 static inline void cond_tick_broadcast_exit(void)
94 {
95 if (static_branch_unlikely(&arch_needs_tick_broadcast))
96 tick_broadcast_exit();
97 }
98 #else
cond_tick_broadcast_enter(void)99 static inline void cond_tick_broadcast_enter(void) { }
cond_tick_broadcast_exit(void)100 static inline void cond_tick_broadcast_exit(void) { }
101 #endif
102
103 /**
104 * default_idle_call - Default CPU idle routine.
105 *
106 * To use when the cpuidle framework cannot be used.
107 */
default_idle_call(void)108 void __cpuidle default_idle_call(void)
109 {
110 instrumentation_begin();
111 if (!current_clr_polling_and_test()) {
112 cond_tick_broadcast_enter();
113 trace_cpu_idle(1, smp_processor_id());
114 stop_critical_timings();
115
116 ct_cpuidle_enter();
117 arch_cpu_idle();
118 ct_cpuidle_exit();
119
120 start_critical_timings();
121 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
122 cond_tick_broadcast_exit();
123 }
124 local_irq_enable();
125 instrumentation_end();
126 }
127
call_cpuidle_s2idle(struct cpuidle_driver * drv,struct cpuidle_device * dev)128 static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
129 struct cpuidle_device *dev)
130 {
131 if (current_clr_polling_and_test())
132 return -EBUSY;
133
134 return cpuidle_enter_s2idle(drv, dev);
135 }
136
call_cpuidle(struct cpuidle_driver * drv,struct cpuidle_device * dev,int next_state)137 static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
138 int next_state)
139 {
140 /*
141 * The idle task must be scheduled, it is pointless to go to idle, just
142 * update no idle residency and return.
143 */
144 if (current_clr_polling_and_test()) {
145 dev->last_residency_ns = 0;
146 local_irq_enable();
147 return -EBUSY;
148 }
149
150 /*
151 * Enter the idle state previously returned by the governor decision.
152 * This function will block until an interrupt occurs and will take
153 * care of re-enabling the local interrupts
154 */
155 return cpuidle_enter(drv, dev, next_state);
156 }
157
158 /**
159 * cpuidle_idle_call - the main idle function
160 *
161 * NOTE: no locks or semaphores should be used here
162 *
163 * On architectures that support TIF_POLLING_NRFLAG, is called with polling
164 * set, and it returns with polling set. If it ever stops polling, it
165 * must clear the polling bit.
166 */
cpuidle_idle_call(void)167 static void cpuidle_idle_call(void)
168 {
169 struct cpuidle_device *dev = cpuidle_get_device();
170 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
171 int next_state, entered_state;
172
173 /*
174 * Check if the idle task must be rescheduled. If it is the
175 * case, exit the function after re-enabling the local IRQ.
176 */
177 if (need_resched()) {
178 local_irq_enable();
179 return;
180 }
181
182 if (cpuidle_not_available(drv, dev)) {
183 tick_nohz_idle_stop_tick();
184
185 default_idle_call();
186 goto exit_idle;
187 }
188
189 /*
190 * Suspend-to-idle ("s2idle") is a system state in which all user space
191 * has been frozen, all I/O devices have been suspended and the only
192 * activity happens here and in interrupts (if any). In that case bypass
193 * the cpuidle governor and go straight for the deepest idle state
194 * available. Possibly also suspend the local tick and the entire
195 * timekeeping to prevent timer interrupts from kicking us out of idle
196 * until a proper wakeup interrupt happens.
197 */
198
199 if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) {
200 u64 max_latency_ns;
201
202 if (idle_should_enter_s2idle()) {
203
204 entered_state = call_cpuidle_s2idle(drv, dev);
205 if (entered_state > 0)
206 goto exit_idle;
207
208 max_latency_ns = U64_MAX;
209 } else {
210 max_latency_ns = dev->forced_idle_latency_limit_ns;
211 }
212
213 tick_nohz_idle_stop_tick();
214
215 next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
216 call_cpuidle(drv, dev, next_state);
217 } else {
218 bool stop_tick = true;
219
220 /*
221 * Ask the cpuidle framework to choose a convenient idle state.
222 */
223 next_state = cpuidle_select(drv, dev, &stop_tick);
224
225 if (stop_tick || tick_nohz_tick_stopped())
226 tick_nohz_idle_stop_tick();
227 else
228 tick_nohz_idle_retain_tick();
229
230 entered_state = call_cpuidle(drv, dev, next_state);
231 /*
232 * Give the governor an opportunity to reflect on the outcome
233 */
234 cpuidle_reflect(dev, entered_state);
235 }
236
237 exit_idle:
238 __current_set_polling();
239
240 /*
241 * It is up to the idle functions to re-enable local interrupts
242 */
243 if (WARN_ON_ONCE(irqs_disabled()))
244 local_irq_enable();
245 }
246
247 /*
248 * Generic idle loop implementation
249 *
250 * Called with polling cleared.
251 */
do_idle(void)252 static void do_idle(void)
253 {
254 int cpu = smp_processor_id();
255
256 /*
257 * Check if we need to update blocked load
258 */
259 nohz_run_idle_balance(cpu);
260
261 /*
262 * If the arch has a polling bit, we maintain an invariant:
263 *
264 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
265 * rq->idle). This means that, if rq->idle has the polling bit set,
266 * then setting need_resched is guaranteed to cause the CPU to
267 * reschedule.
268 */
269
270 __current_set_polling();
271 tick_nohz_idle_enter();
272
273 while (!need_resched()) {
274 rmb();
275
276 /*
277 * Interrupts shouldn't be re-enabled from that point on until
278 * the CPU sleeping instruction is reached. Otherwise an interrupt
279 * may fire and queue a timer that would be ignored until the CPU
280 * wakes from the sleeping instruction. And testing need_resched()
281 * doesn't tell about pending needed timer reprogram.
282 *
283 * Several cases to consider:
284 *
285 * - SLEEP-UNTIL-PENDING-INTERRUPT based instructions such as
286 * "wfi" or "mwait" are fine because they can be entered with
287 * interrupt disabled.
288 *
289 * - sti;mwait() couple is fine because the interrupts are
290 * re-enabled only upon the execution of mwait, leaving no gap
291 * in-between.
292 *
293 * - ROLLBACK based idle handlers with the sleeping instruction
294 * called with interrupts enabled are NOT fine. In this scheme
295 * when the interrupt detects it has interrupted an idle handler,
296 * it rolls back to its beginning which performs the
297 * need_resched() check before re-executing the sleeping
298 * instruction. This can leak a pending needed timer reprogram.
299 * If such a scheme is really mandatory due to the lack of an
300 * appropriate CPU sleeping instruction, then a FAST-FORWARD
301 * must instead be applied: when the interrupt detects it has
302 * interrupted an idle handler, it must resume to the end of
303 * this idle handler so that the generic idle loop is iterated
304 * again to reprogram the tick.
305 */
306 local_irq_disable();
307
308 if (cpu_is_offline(cpu)) {
309 cpuhp_report_idle_dead();
310 arch_cpu_idle_dead();
311 }
312
313 arch_cpu_idle_enter();
314 rcu_nocb_flush_deferred_wakeup();
315
316 /*
317 * In poll mode we re-enable interrupts and spin. Also if we
318 * detected in the wakeup from idle path that the tick
319 * broadcast device expired for us, we don't want to go deep
320 * idle as we know that the IPI is going to arrive right away.
321 */
322 if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
323 tick_nohz_idle_restart_tick();
324 cpu_idle_poll();
325 } else {
326 cpuidle_idle_call();
327 }
328 arch_cpu_idle_exit();
329 }
330
331 /*
332 * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
333 * be set, propagate it into PREEMPT_NEED_RESCHED.
334 *
335 * This is required because for polling idle loops we will not have had
336 * an IPI to fold the state for us.
337 */
338 preempt_set_need_resched();
339 tick_nohz_idle_exit();
340 __current_clr_polling();
341
342 /*
343 * We promise to call sched_ttwu_pending() and reschedule if
344 * need_resched() is set while polling is set. That means that clearing
345 * polling needs to be visible before doing these things.
346 */
347 smp_mb__after_atomic();
348
349 /*
350 * RCU relies on this call to be done outside of an RCU read-side
351 * critical section.
352 */
353 flush_smp_call_function_queue();
354 schedule_idle();
355
356 if (unlikely(klp_patch_pending(current)))
357 klp_update_patch_state(current);
358 }
359
cpu_in_idle(unsigned long pc)360 bool cpu_in_idle(unsigned long pc)
361 {
362 return pc >= (unsigned long)__cpuidle_text_start &&
363 pc < (unsigned long)__cpuidle_text_end;
364 }
365
366 struct idle_timer {
367 struct hrtimer timer;
368 int done;
369 };
370
idle_inject_timer_fn(struct hrtimer * timer)371 static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
372 {
373 struct idle_timer *it = container_of(timer, struct idle_timer, timer);
374
375 WRITE_ONCE(it->done, 1);
376 set_tsk_need_resched(current);
377
378 return HRTIMER_NORESTART;
379 }
380
play_idle_precise(u64 duration_ns,u64 latency_ns)381 void play_idle_precise(u64 duration_ns, u64 latency_ns)
382 {
383 struct idle_timer it;
384
385 /*
386 * Only FIFO tasks can disable the tick since they don't need the forced
387 * preemption.
388 */
389 WARN_ON_ONCE(current->policy != SCHED_FIFO);
390 WARN_ON_ONCE(current->nr_cpus_allowed != 1);
391 WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
392 WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
393 WARN_ON_ONCE(!duration_ns);
394 WARN_ON_ONCE(current->mm);
395
396 rcu_sleep_check();
397 preempt_disable();
398 current->flags |= PF_IDLE;
399 cpuidle_use_deepest_state(latency_ns);
400
401 it.done = 0;
402 hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
403 it.timer.function = idle_inject_timer_fn;
404 hrtimer_start(&it.timer, ns_to_ktime(duration_ns),
405 HRTIMER_MODE_REL_PINNED_HARD);
406
407 while (!READ_ONCE(it.done))
408 do_idle();
409
410 cpuidle_use_deepest_state(0);
411 current->flags &= ~PF_IDLE;
412
413 preempt_fold_need_resched();
414 preempt_enable();
415 }
416 EXPORT_SYMBOL_GPL(play_idle_precise);
417
cpu_startup_entry(enum cpuhp_state state)418 void cpu_startup_entry(enum cpuhp_state state)
419 {
420 current->flags |= PF_IDLE;
421 arch_cpu_idle_prepare();
422 cpuhp_online_idle(state);
423 while (1)
424 do_idle();
425 }
426
427 /*
428 * idle-task scheduling class.
429 */
430
431 #ifdef CONFIG_SMP
432 static int
select_task_rq_idle(struct task_struct * p,int cpu,int flags)433 select_task_rq_idle(struct task_struct *p, int cpu, int flags)
434 {
435 return task_cpu(p); /* IDLE tasks as never migrated */
436 }
437
438 static int
balance_idle(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)439 balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
440 {
441 return WARN_ON_ONCE(1);
442 }
443 #endif
444
445 /*
446 * Idle tasks are unconditionally rescheduled:
447 */
wakeup_preempt_idle(struct rq * rq,struct task_struct * p,int flags)448 static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags)
449 {
450 resched_curr(rq);
451 }
452
put_prev_task_idle(struct rq * rq,struct task_struct * prev,struct task_struct * next)453 static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct task_struct *next)
454 {
455 dl_server_update_idle_time(rq, prev);
456 scx_update_idle(rq, false);
457 }
458
set_next_task_idle(struct rq * rq,struct task_struct * next,bool first)459 static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
460 {
461 update_idle_core(rq);
462 scx_update_idle(rq, true);
463 schedstat_inc(rq->sched_goidle);
464 next->se.exec_start = rq_clock_task(rq);
465 }
466
pick_task_idle(struct rq * rq)467 struct task_struct *pick_task_idle(struct rq *rq)
468 {
469 return rq->idle;
470 }
471
472 /*
473 * It is not legal to sleep in the idle task - print a warning
474 * message if some code attempts to do it:
475 */
476 static bool
dequeue_task_idle(struct rq * rq,struct task_struct * p,int flags)477 dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
478 {
479 raw_spin_rq_unlock_irq(rq);
480 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
481 dump_stack();
482 raw_spin_rq_lock_irq(rq);
483 return true;
484 }
485
486 /*
487 * scheduler tick hitting a task of our scheduling class.
488 *
489 * NOTE: This function can be called remotely by the tick offload that
490 * goes along full dynticks. Therefore no local assumption can be made
491 * and everything must be accessed through the @rq and @curr passed in
492 * parameters.
493 */
task_tick_idle(struct rq * rq,struct task_struct * curr,int queued)494 static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
495 {
496 }
497
switched_to_idle(struct rq * rq,struct task_struct * p)498 static void switched_to_idle(struct rq *rq, struct task_struct *p)
499 {
500 BUG();
501 }
502
503 static void
prio_changed_idle(struct rq * rq,struct task_struct * p,int oldprio)504 prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
505 {
506 BUG();
507 }
508
update_curr_idle(struct rq * rq)509 static void update_curr_idle(struct rq *rq)
510 {
511 }
512
513 /*
514 * Simple, special scheduling class for the per-CPU idle tasks:
515 */
516 DEFINE_SCHED_CLASS(idle) = {
517
518 /* no enqueue/yield_task for idle tasks */
519
520 /* dequeue is not valid, we print a debug message there: */
521 .dequeue_task = dequeue_task_idle,
522
523 .wakeup_preempt = wakeup_preempt_idle,
524
525 .pick_task = pick_task_idle,
526 .put_prev_task = put_prev_task_idle,
527 .set_next_task = set_next_task_idle,
528
529 #ifdef CONFIG_SMP
530 .balance = balance_idle,
531 .select_task_rq = select_task_rq_idle,
532 .set_cpus_allowed = set_cpus_allowed_common,
533 #endif
534
535 .task_tick = task_tick_idle,
536
537 .prio_changed = prio_changed_idle,
538 .switched_to = switched_to_idle,
539 .update_curr = update_curr_idle,
540 };
541