xref: /linux/kernel/sched/idle.c (revision a01353cf1896ea5b8a7bbc5e2b2d38feed8b7aaa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic entry points for the idle threads and
4  * implementation of the idle task scheduling class.
5  *
6  * (NOTE: these are not related to SCHED_IDLE batch scheduled
7  *        tasks which are handled in sched/fair.c )
8  */
9 
10 /* Linker adds these: start and end of __cpuidle functions */
11 extern char __cpuidle_text_start[], __cpuidle_text_end[];
12 
13 /**
14  * sched_idle_set_state - Record idle state for the current CPU.
15  * @idle_state: State to record.
16  */
17 void sched_idle_set_state(struct cpuidle_state *idle_state)
18 {
19 	idle_set_state(this_rq(), idle_state);
20 }
21 
22 static int __read_mostly cpu_idle_force_poll;
23 
24 void cpu_idle_poll_ctrl(bool enable)
25 {
26 	if (enable) {
27 		cpu_idle_force_poll++;
28 	} else {
29 		cpu_idle_force_poll--;
30 		WARN_ON_ONCE(cpu_idle_force_poll < 0);
31 	}
32 }
33 
34 #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
35 static int __init cpu_idle_poll_setup(char *__unused)
36 {
37 	cpu_idle_force_poll = 1;
38 
39 	return 1;
40 }
41 __setup("nohlt", cpu_idle_poll_setup);
42 
43 static int __init cpu_idle_nopoll_setup(char *__unused)
44 {
45 	cpu_idle_force_poll = 0;
46 
47 	return 1;
48 }
49 __setup("hlt", cpu_idle_nopoll_setup);
50 #endif
51 
52 static noinline int __cpuidle cpu_idle_poll(void)
53 {
54 	instrumentation_begin();
55 	trace_cpu_idle(0, smp_processor_id());
56 	stop_critical_timings();
57 	ct_cpuidle_enter();
58 
59 	raw_local_irq_enable();
60 	while (!tif_need_resched() &&
61 	       (cpu_idle_force_poll || tick_check_broadcast_expired()))
62 		cpu_relax();
63 	raw_local_irq_disable();
64 
65 	ct_cpuidle_exit();
66 	start_critical_timings();
67 	trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
68 	local_irq_enable();
69 	instrumentation_end();
70 
71 	return 1;
72 }
73 
74 /* Weak implementations for optional arch specific functions */
75 void __weak arch_cpu_idle_prepare(void) { }
76 void __weak arch_cpu_idle_enter(void) { }
77 void __weak arch_cpu_idle_exit(void) { }
78 void __weak arch_cpu_idle_dead(void) { }
79 void __weak arch_cpu_idle(void)
80 {
81 	cpu_idle_force_poll = 1;
82 	raw_local_irq_enable();
83 }
84 
85 /**
86  * default_idle_call - Default CPU idle routine.
87  *
88  * To use when the cpuidle framework cannot be used.
89  */
90 void __cpuidle default_idle_call(void)
91 {
92 	instrumentation_begin();
93 	if (!current_clr_polling_and_test()) {
94 		trace_cpu_idle(1, smp_processor_id());
95 		stop_critical_timings();
96 
97 		ct_cpuidle_enter();
98 		arch_cpu_idle();
99 		raw_local_irq_disable();
100 		ct_cpuidle_exit();
101 
102 		start_critical_timings();
103 		trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
104 	}
105 	local_irq_enable();
106 	instrumentation_end();
107 }
108 
109 static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
110 			       struct cpuidle_device *dev)
111 {
112 	if (current_clr_polling_and_test())
113 		return -EBUSY;
114 
115 	return cpuidle_enter_s2idle(drv, dev);
116 }
117 
118 static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
119 		      int next_state)
120 {
121 	/*
122 	 * The idle task must be scheduled, it is pointless to go to idle, just
123 	 * update no idle residency and return.
124 	 */
125 	if (current_clr_polling_and_test()) {
126 		dev->last_residency_ns = 0;
127 		local_irq_enable();
128 		return -EBUSY;
129 	}
130 
131 	/*
132 	 * Enter the idle state previously returned by the governor decision.
133 	 * This function will block until an interrupt occurs and will take
134 	 * care of re-enabling the local interrupts
135 	 */
136 	return cpuidle_enter(drv, dev, next_state);
137 }
138 
139 /**
140  * cpuidle_idle_call - the main idle function
141  *
142  * NOTE: no locks or semaphores should be used here
143  *
144  * On architectures that support TIF_POLLING_NRFLAG, is called with polling
145  * set, and it returns with polling set.  If it ever stops polling, it
146  * must clear the polling bit.
147  */
148 static void cpuidle_idle_call(void)
149 {
150 	struct cpuidle_device *dev = cpuidle_get_device();
151 	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
152 	int next_state, entered_state;
153 
154 	/*
155 	 * Check if the idle task must be rescheduled. If it is the
156 	 * case, exit the function after re-enabling the local irq.
157 	 */
158 	if (need_resched()) {
159 		local_irq_enable();
160 		return;
161 	}
162 
163 	/*
164 	 * The RCU framework needs to be told that we are entering an idle
165 	 * section, so no more rcu read side critical sections and one more
166 	 * step to the grace period
167 	 */
168 
169 	if (cpuidle_not_available(drv, dev)) {
170 		tick_nohz_idle_stop_tick();
171 
172 		default_idle_call();
173 		goto exit_idle;
174 	}
175 
176 	/*
177 	 * Suspend-to-idle ("s2idle") is a system state in which all user space
178 	 * has been frozen, all I/O devices have been suspended and the only
179 	 * activity happens here and in interrupts (if any). In that case bypass
180 	 * the cpuidle governor and go straight for the deepest idle state
181 	 * available.  Possibly also suspend the local tick and the entire
182 	 * timekeeping to prevent timer interrupts from kicking us out of idle
183 	 * until a proper wakeup interrupt happens.
184 	 */
185 
186 	if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) {
187 		u64 max_latency_ns;
188 
189 		if (idle_should_enter_s2idle()) {
190 
191 			entered_state = call_cpuidle_s2idle(drv, dev);
192 			if (entered_state > 0)
193 				goto exit_idle;
194 
195 			max_latency_ns = U64_MAX;
196 		} else {
197 			max_latency_ns = dev->forced_idle_latency_limit_ns;
198 		}
199 
200 		tick_nohz_idle_stop_tick();
201 
202 		next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
203 		call_cpuidle(drv, dev, next_state);
204 	} else {
205 		bool stop_tick = true;
206 
207 		/*
208 		 * Ask the cpuidle framework to choose a convenient idle state.
209 		 */
210 		next_state = cpuidle_select(drv, dev, &stop_tick);
211 
212 		if (stop_tick || tick_nohz_tick_stopped())
213 			tick_nohz_idle_stop_tick();
214 		else
215 			tick_nohz_idle_retain_tick();
216 
217 		entered_state = call_cpuidle(drv, dev, next_state);
218 		/*
219 		 * Give the governor an opportunity to reflect on the outcome
220 		 */
221 		cpuidle_reflect(dev, entered_state);
222 	}
223 
224 exit_idle:
225 	__current_set_polling();
226 
227 	/*
228 	 * It is up to the idle functions to reenable local interrupts
229 	 */
230 	if (WARN_ON_ONCE(irqs_disabled()))
231 		local_irq_enable();
232 }
233 
234 /*
235  * Generic idle loop implementation
236  *
237  * Called with polling cleared.
238  */
239 static void do_idle(void)
240 {
241 	int cpu = smp_processor_id();
242 
243 	/*
244 	 * Check if we need to update blocked load
245 	 */
246 	nohz_run_idle_balance(cpu);
247 
248 	/*
249 	 * If the arch has a polling bit, we maintain an invariant:
250 	 *
251 	 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
252 	 * rq->idle). This means that, if rq->idle has the polling bit set,
253 	 * then setting need_resched is guaranteed to cause the CPU to
254 	 * reschedule.
255 	 */
256 
257 	__current_set_polling();
258 	tick_nohz_idle_enter();
259 
260 	while (!need_resched()) {
261 		rmb();
262 
263 		local_irq_disable();
264 
265 		if (cpu_is_offline(cpu)) {
266 			tick_nohz_idle_stop_tick();
267 			cpuhp_report_idle_dead();
268 			arch_cpu_idle_dead();
269 		}
270 
271 		arch_cpu_idle_enter();
272 		rcu_nocb_flush_deferred_wakeup();
273 
274 		/*
275 		 * In poll mode we reenable interrupts and spin. Also if we
276 		 * detected in the wakeup from idle path that the tick
277 		 * broadcast device expired for us, we don't want to go deep
278 		 * idle as we know that the IPI is going to arrive right away.
279 		 */
280 		if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
281 			tick_nohz_idle_restart_tick();
282 			cpu_idle_poll();
283 		} else {
284 			cpuidle_idle_call();
285 		}
286 		arch_cpu_idle_exit();
287 	}
288 
289 	/*
290 	 * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
291 	 * be set, propagate it into PREEMPT_NEED_RESCHED.
292 	 *
293 	 * This is required because for polling idle loops we will not have had
294 	 * an IPI to fold the state for us.
295 	 */
296 	preempt_set_need_resched();
297 	tick_nohz_idle_exit();
298 	__current_clr_polling();
299 
300 	/*
301 	 * We promise to call sched_ttwu_pending() and reschedule if
302 	 * need_resched() is set while polling is set. That means that clearing
303 	 * polling needs to be visible before doing these things.
304 	 */
305 	smp_mb__after_atomic();
306 
307 	/*
308 	 * RCU relies on this call to be done outside of an RCU read-side
309 	 * critical section.
310 	 */
311 	flush_smp_call_function_queue();
312 	schedule_idle();
313 
314 	if (unlikely(klp_patch_pending(current)))
315 		klp_update_patch_state(current);
316 }
317 
318 bool cpu_in_idle(unsigned long pc)
319 {
320 	return pc >= (unsigned long)__cpuidle_text_start &&
321 		pc < (unsigned long)__cpuidle_text_end;
322 }
323 
324 struct idle_timer {
325 	struct hrtimer timer;
326 	int done;
327 };
328 
329 static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
330 {
331 	struct idle_timer *it = container_of(timer, struct idle_timer, timer);
332 
333 	WRITE_ONCE(it->done, 1);
334 	set_tsk_need_resched(current);
335 
336 	return HRTIMER_NORESTART;
337 }
338 
339 void play_idle_precise(u64 duration_ns, u64 latency_ns)
340 {
341 	struct idle_timer it;
342 
343 	/*
344 	 * Only FIFO tasks can disable the tick since they don't need the forced
345 	 * preemption.
346 	 */
347 	WARN_ON_ONCE(current->policy != SCHED_FIFO);
348 	WARN_ON_ONCE(current->nr_cpus_allowed != 1);
349 	WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
350 	WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
351 	WARN_ON_ONCE(!duration_ns);
352 	WARN_ON_ONCE(current->mm);
353 
354 	rcu_sleep_check();
355 	preempt_disable();
356 	current->flags |= PF_IDLE;
357 	cpuidle_use_deepest_state(latency_ns);
358 
359 	it.done = 0;
360 	hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
361 	it.timer.function = idle_inject_timer_fn;
362 	hrtimer_start(&it.timer, ns_to_ktime(duration_ns),
363 		      HRTIMER_MODE_REL_PINNED_HARD);
364 
365 	while (!READ_ONCE(it.done))
366 		do_idle();
367 
368 	cpuidle_use_deepest_state(0);
369 	current->flags &= ~PF_IDLE;
370 
371 	preempt_fold_need_resched();
372 	preempt_enable();
373 }
374 EXPORT_SYMBOL_GPL(play_idle_precise);
375 
376 void cpu_startup_entry(enum cpuhp_state state)
377 {
378 	arch_cpu_idle_prepare();
379 	cpuhp_online_idle(state);
380 	while (1)
381 		do_idle();
382 }
383 
384 /*
385  * idle-task scheduling class.
386  */
387 
388 #ifdef CONFIG_SMP
389 static int
390 select_task_rq_idle(struct task_struct *p, int cpu, int flags)
391 {
392 	return task_cpu(p); /* IDLE tasks as never migrated */
393 }
394 
395 static int
396 balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
397 {
398 	return WARN_ON_ONCE(1);
399 }
400 #endif
401 
402 /*
403  * Idle tasks are unconditionally rescheduled:
404  */
405 static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
406 {
407 	resched_curr(rq);
408 }
409 
410 static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
411 {
412 }
413 
414 static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
415 {
416 	update_idle_core(rq);
417 	schedstat_inc(rq->sched_goidle);
418 }
419 
420 #ifdef CONFIG_SMP
421 static struct task_struct *pick_task_idle(struct rq *rq)
422 {
423 	return rq->idle;
424 }
425 #endif
426 
427 struct task_struct *pick_next_task_idle(struct rq *rq)
428 {
429 	struct task_struct *next = rq->idle;
430 
431 	set_next_task_idle(rq, next, true);
432 
433 	return next;
434 }
435 
436 /*
437  * It is not legal to sleep in the idle task - print a warning
438  * message if some code attempts to do it:
439  */
440 static void
441 dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
442 {
443 	raw_spin_rq_unlock_irq(rq);
444 	printk(KERN_ERR "bad: scheduling from the idle thread!\n");
445 	dump_stack();
446 	raw_spin_rq_lock_irq(rq);
447 }
448 
449 /*
450  * scheduler tick hitting a task of our scheduling class.
451  *
452  * NOTE: This function can be called remotely by the tick offload that
453  * goes along full dynticks. Therefore no local assumption can be made
454  * and everything must be accessed through the @rq and @curr passed in
455  * parameters.
456  */
457 static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
458 {
459 }
460 
461 static void switched_to_idle(struct rq *rq, struct task_struct *p)
462 {
463 	BUG();
464 }
465 
466 static void
467 prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
468 {
469 	BUG();
470 }
471 
472 static void update_curr_idle(struct rq *rq)
473 {
474 }
475 
476 /*
477  * Simple, special scheduling class for the per-CPU idle tasks:
478  */
479 DEFINE_SCHED_CLASS(idle) = {
480 
481 	/* no enqueue/yield_task for idle tasks */
482 
483 	/* dequeue is not valid, we print a debug message there: */
484 	.dequeue_task		= dequeue_task_idle,
485 
486 	.check_preempt_curr	= check_preempt_curr_idle,
487 
488 	.pick_next_task		= pick_next_task_idle,
489 	.put_prev_task		= put_prev_task_idle,
490 	.set_next_task          = set_next_task_idle,
491 
492 #ifdef CONFIG_SMP
493 	.balance		= balance_idle,
494 	.pick_task		= pick_task_idle,
495 	.select_task_rq		= select_task_rq_idle,
496 	.set_cpus_allowed	= set_cpus_allowed_common,
497 #endif
498 
499 	.task_tick		= task_tick_idle,
500 
501 	.prio_changed		= prio_changed_idle,
502 	.switched_to		= switched_to_idle,
503 	.update_curr		= update_curr_idle,
504 };
505