xref: /linux/kernel/softirq.c (revision 3b2074c77d25f453247163300d5638adfab4e4fa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *	linux/kernel/softirq.c
4  *
5  *	Copyright (C) 1992 Linus Torvalds
6  *
7  *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/export.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/interrupt.h>
15 #include <linux/init.h>
16 #include <linux/local_lock.h>
17 #include <linux/mm.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/smpboot.h>
27 #include <linux/tick.h>
28 #include <linux/irq.h>
29 #include <linux/wait_bit.h>
30 #include <linux/workqueue.h>
31 
32 #include <asm/softirq_stack.h>
33 
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/irq.h>
36 
37 /*
38    - No shared variables, all the data are CPU local.
39    - If a softirq needs serialization, let it serialize itself
40      by its own spinlocks.
41    - Even if softirq is serialized, only local cpu is marked for
42      execution. Hence, we get something sort of weak cpu binding.
43      Though it is still not clear, will it result in better locality
44      or will not.
45 
46    Examples:
47    - NET RX softirq. It is multithreaded and does not require
48      any global serialization.
49    - NET TX softirq. It kicks software netdevice queues, hence
50      it is logically serialized per device, but this serialization
51      is invisible to common code.
52    - Tasklets: serialized wrt itself.
53  */
54 
55 #ifndef __ARCH_IRQ_STAT
56 DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
57 EXPORT_PER_CPU_SYMBOL(irq_stat);
58 #endif
59 
60 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
61 
62 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
63 
64 const char * const softirq_to_name[NR_SOFTIRQS] = {
65 	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
66 	"TASKLET", "SCHED", "HRTIMER", "RCU"
67 };
68 
69 /*
70  * we cannot loop indefinitely here to avoid userspace starvation,
71  * but we also don't want to introduce a worst case 1/HZ latency
72  * to the pending events, so lets the scheduler to balance
73  * the softirq load for us.
74  */
wakeup_softirqd(void)75 static void wakeup_softirqd(void)
76 {
77 	/* Interrupts are disabled: no need to stop preemption */
78 	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
79 
80 	if (tsk)
81 		wake_up_process(tsk);
82 }
83 
84 #ifdef CONFIG_TRACE_IRQFLAGS
85 DEFINE_PER_CPU(int, hardirqs_enabled);
86 DEFINE_PER_CPU(int, hardirq_context);
87 EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
88 EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
89 #endif
90 
91 /*
92  * SOFTIRQ_OFFSET usage:
93  *
94  * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
95  * to a per CPU counter and to task::softirqs_disabled_cnt.
96  *
97  * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
98  *   processing.
99  *
100  * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
101  *   on local_bh_disable or local_bh_enable.
102  *
103  * This lets us distinguish between whether we are currently processing
104  * softirq and whether we just have bh disabled.
105  */
106 #ifdef CONFIG_PREEMPT_RT
107 
108 /*
109  * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
110  * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
111  * softirq disabled section to be preempted.
112  *
113  * The per task counter is used for softirq_count(), in_softirq() and
114  * in_serving_softirqs() because these counts are only valid when the task
115  * holding softirq_ctrl::lock is running.
116  *
117  * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
118  * the task which is in a softirq disabled section is preempted or blocks.
119  */
120 struct softirq_ctrl {
121 	local_lock_t	lock;
122 	int		cnt;
123 };
124 
125 static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
126 	.lock	= INIT_LOCAL_LOCK(softirq_ctrl.lock),
127 };
128 
129 #ifdef CONFIG_DEBUG_LOCK_ALLOC
130 static struct lock_class_key bh_lock_key;
131 struct lockdep_map bh_lock_map = {
132 	.name			= "local_bh",
133 	.key			= &bh_lock_key,
134 	.wait_type_outer	= LD_WAIT_FREE,
135 	.wait_type_inner	= LD_WAIT_CONFIG, /* PREEMPT_RT makes BH preemptible. */
136 	.lock_type		= LD_LOCK_PERCPU,
137 };
138 EXPORT_SYMBOL_GPL(bh_lock_map);
139 #endif
140 
141 /**
142  * local_bh_blocked() - Check for idle whether BH processing is blocked
143  *
144  * Returns false if the per CPU softirq::cnt is 0 otherwise true.
145  *
146  * This is invoked from the idle task to guard against false positive
147  * softirq pending warnings, which would happen when the task which holds
148  * softirq_ctrl::lock was the only running task on the CPU and blocks on
149  * some other lock.
150  */
local_bh_blocked(void)151 bool local_bh_blocked(void)
152 {
153 	return __this_cpu_read(softirq_ctrl.cnt) != 0;
154 }
155 
__local_bh_disable_ip(unsigned long ip,unsigned int cnt)156 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
157 {
158 	unsigned long flags;
159 	int newcnt;
160 
161 	WARN_ON_ONCE(in_hardirq());
162 
163 	lock_map_acquire_read(&bh_lock_map);
164 
165 	/* First entry of a task into a BH disabled section? */
166 	if (!current->softirq_disable_cnt) {
167 		if (preemptible()) {
168 			if (IS_ENABLED(CONFIG_PREEMPT_RT_NEEDS_BH_LOCK))
169 				local_lock(&softirq_ctrl.lock);
170 			else
171 				migrate_disable();
172 
173 			/* Required to meet the RCU bottomhalf requirements. */
174 			rcu_read_lock();
175 		} else {
176 			DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
177 		}
178 	}
179 
180 	/*
181 	 * Track the per CPU softirq disabled state. On RT this is per CPU
182 	 * state to allow preemption of bottom half disabled sections.
183 	 */
184 	if (IS_ENABLED(CONFIG_PREEMPT_RT_NEEDS_BH_LOCK)) {
185 		newcnt = this_cpu_add_return(softirq_ctrl.cnt, cnt);
186 		/*
187 		 * Reflect the result in the task state to prevent recursion on the
188 		 * local lock and to make softirq_count() & al work.
189 		 */
190 		current->softirq_disable_cnt = newcnt;
191 
192 		if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
193 			raw_local_irq_save(flags);
194 			lockdep_softirqs_off(ip);
195 			raw_local_irq_restore(flags);
196 		}
197 	} else {
198 		bool sirq_dis = false;
199 
200 		if (!current->softirq_disable_cnt)
201 			sirq_dis = true;
202 
203 		this_cpu_add(softirq_ctrl.cnt, cnt);
204 		current->softirq_disable_cnt += cnt;
205 		WARN_ON_ONCE(current->softirq_disable_cnt < 0);
206 
207 		if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && sirq_dis) {
208 			raw_local_irq_save(flags);
209 			lockdep_softirqs_off(ip);
210 			raw_local_irq_restore(flags);
211 		}
212 	}
213 }
214 EXPORT_SYMBOL(__local_bh_disable_ip);
215 
__local_bh_enable(unsigned int cnt,bool unlock)216 static void __local_bh_enable(unsigned int cnt, bool unlock)
217 {
218 	unsigned long flags;
219 	bool sirq_en = false;
220 	int newcnt;
221 
222 	if (IS_ENABLED(CONFIG_PREEMPT_RT_NEEDS_BH_LOCK)) {
223 		DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
224 				    this_cpu_read(softirq_ctrl.cnt));
225 		if (softirq_count() == cnt)
226 			sirq_en = true;
227 	} else {
228 		if (current->softirq_disable_cnt == cnt)
229 			sirq_en = true;
230 	}
231 
232 	if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && sirq_en) {
233 		raw_local_irq_save(flags);
234 		lockdep_softirqs_on(_RET_IP_);
235 		raw_local_irq_restore(flags);
236 	}
237 
238 	if (IS_ENABLED(CONFIG_PREEMPT_RT_NEEDS_BH_LOCK)) {
239 		newcnt = this_cpu_sub_return(softirq_ctrl.cnt, cnt);
240 		current->softirq_disable_cnt = newcnt;
241 
242 		if (!newcnt && unlock) {
243 			rcu_read_unlock();
244 			local_unlock(&softirq_ctrl.lock);
245 		}
246 	} else {
247 		current->softirq_disable_cnt -= cnt;
248 		this_cpu_sub(softirq_ctrl.cnt, cnt);
249 		if (unlock && !current->softirq_disable_cnt) {
250 			migrate_enable();
251 			rcu_read_unlock();
252 		} else {
253 			WARN_ON_ONCE(current->softirq_disable_cnt < 0);
254 		}
255 	}
256 }
257 
__local_bh_enable_ip(unsigned long ip,unsigned int cnt)258 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
259 {
260 	bool preempt_on = preemptible();
261 	unsigned long flags;
262 	u32 pending;
263 	int curcnt;
264 
265 	WARN_ON_ONCE(in_hardirq());
266 	lockdep_assert_irqs_enabled();
267 
268 	lock_map_release(&bh_lock_map);
269 
270 	local_irq_save(flags);
271 	if (IS_ENABLED(CONFIG_PREEMPT_RT_NEEDS_BH_LOCK))
272 		curcnt = this_cpu_read(softirq_ctrl.cnt);
273 	else
274 		curcnt = current->softirq_disable_cnt;
275 
276 	/*
277 	 * If this is not reenabling soft interrupts, no point in trying to
278 	 * run pending ones.
279 	 */
280 	if (curcnt != cnt)
281 		goto out;
282 
283 	pending = local_softirq_pending();
284 	if (!pending)
285 		goto out;
286 
287 	/*
288 	 * If this was called from non preemptible context, wake up the
289 	 * softirq daemon.
290 	 */
291 	if (!preempt_on) {
292 		wakeup_softirqd();
293 		goto out;
294 	}
295 
296 	/*
297 	 * Adjust softirq count to SOFTIRQ_OFFSET which makes
298 	 * in_serving_softirq() become true.
299 	 */
300 	cnt = SOFTIRQ_OFFSET;
301 	__local_bh_enable(cnt, false);
302 	__do_softirq();
303 
304 out:
305 	__local_bh_enable(cnt, preempt_on);
306 	local_irq_restore(flags);
307 }
308 EXPORT_SYMBOL(__local_bh_enable_ip);
309 
310 /*
311  * Invoked from ksoftirqd_run() outside of the interrupt disabled section
312  * to acquire the per CPU local lock for reentrancy protection.
313  */
ksoftirqd_run_begin(void)314 static inline void ksoftirqd_run_begin(void)
315 {
316 	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
317 	local_irq_disable();
318 }
319 
320 /* Counterpart to ksoftirqd_run_begin() */
ksoftirqd_run_end(void)321 static inline void ksoftirqd_run_end(void)
322 {
323 	/* pairs with the lock_map_acquire_read() in ksoftirqd_run_begin() */
324 	lock_map_release(&bh_lock_map);
325 	__local_bh_enable(SOFTIRQ_OFFSET, true);
326 	WARN_ON_ONCE(in_interrupt());
327 	local_irq_enable();
328 }
329 
softirq_handle_begin(void)330 static inline void softirq_handle_begin(void) { }
softirq_handle_end(void)331 static inline void softirq_handle_end(void) { }
332 
should_wake_ksoftirqd(void)333 static inline bool should_wake_ksoftirqd(void)
334 {
335 	return !this_cpu_read(softirq_ctrl.cnt);
336 }
337 
invoke_softirq(void)338 static inline void invoke_softirq(void)
339 {
340 	if (should_wake_ksoftirqd())
341 		wakeup_softirqd();
342 }
343 
344 #define SCHED_SOFTIRQ_MASK	BIT(SCHED_SOFTIRQ)
345 
346 /*
347  * flush_smp_call_function_queue() can raise a soft interrupt in a function
348  * call. On RT kernels this is undesired and the only known functionalities
349  * are in the block layer which is disabled on RT, and in the scheduler for
350  * idle load balancing. If soft interrupts get raised which haven't been
351  * raised before the flush, warn if it is not a SCHED_SOFTIRQ so it can be
352  * investigated.
353  */
do_softirq_post_smp_call_flush(unsigned int was_pending)354 void do_softirq_post_smp_call_flush(unsigned int was_pending)
355 {
356 	unsigned int is_pending = local_softirq_pending();
357 
358 	if (unlikely(was_pending != is_pending)) {
359 		WARN_ON_ONCE(was_pending != (is_pending & ~SCHED_SOFTIRQ_MASK));
360 		invoke_softirq();
361 	}
362 }
363 
364 #else /* CONFIG_PREEMPT_RT */
365 
366 /*
367  * This one is for softirq.c-internal use, where hardirqs are disabled
368  * legitimately:
369  */
370 #ifdef CONFIG_TRACE_IRQFLAGS
__local_bh_disable_ip(unsigned long ip,unsigned int cnt)371 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
372 {
373 	unsigned long flags;
374 
375 	WARN_ON_ONCE(in_hardirq());
376 
377 	raw_local_irq_save(flags);
378 	/*
379 	 * The preempt tracer hooks into preempt_count_add and will break
380 	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
381 	 * is set and before current->softirq_enabled is cleared.
382 	 * We must manually increment preempt_count here and manually
383 	 * call the trace_preempt_off later.
384 	 */
385 	__preempt_count_add(cnt);
386 	/*
387 	 * Were softirqs turned off above:
388 	 */
389 	if (softirq_count() == (cnt & SOFTIRQ_MASK))
390 		lockdep_softirqs_off(ip);
391 	raw_local_irq_restore(flags);
392 
393 	if (preempt_count() == cnt) {
394 #ifdef CONFIG_DEBUG_PREEMPT
395 		current->preempt_disable_ip = get_lock_parent_ip();
396 #endif
397 		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
398 	}
399 }
400 EXPORT_SYMBOL(__local_bh_disable_ip);
401 #endif /* CONFIG_TRACE_IRQFLAGS */
402 
__local_bh_enable(unsigned int cnt)403 static void __local_bh_enable(unsigned int cnt)
404 {
405 	lockdep_assert_irqs_disabled();
406 
407 	if (preempt_count() == cnt)
408 		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
409 
410 	if (softirq_count() == (cnt & SOFTIRQ_MASK))
411 		lockdep_softirqs_on(_RET_IP_);
412 
413 	__preempt_count_sub(cnt);
414 }
415 
416 /*
417  * Special-case - softirqs can safely be enabled by __do_softirq(),
418  * without processing still-pending softirqs:
419  */
_local_bh_enable(void)420 void _local_bh_enable(void)
421 {
422 	WARN_ON_ONCE(in_hardirq());
423 	__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
424 }
425 EXPORT_SYMBOL(_local_bh_enable);
426 
__local_bh_enable_ip(unsigned long ip,unsigned int cnt)427 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
428 {
429 	WARN_ON_ONCE(in_hardirq());
430 	lockdep_assert_irqs_enabled();
431 #ifdef CONFIG_TRACE_IRQFLAGS
432 	local_irq_disable();
433 #endif
434 	/*
435 	 * Are softirqs going to be turned on now:
436 	 */
437 	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
438 		lockdep_softirqs_on(ip);
439 	/*
440 	 * Keep preemption disabled until we are done with
441 	 * softirq processing:
442 	 */
443 	__preempt_count_sub(cnt - 1);
444 
445 	if (unlikely(!in_interrupt() && local_softirq_pending())) {
446 		/*
447 		 * Run softirq if any pending. And do it in its own stack
448 		 * as we may be calling this deep in a task call stack already.
449 		 */
450 		do_softirq();
451 	}
452 
453 	preempt_count_dec();
454 #ifdef CONFIG_TRACE_IRQFLAGS
455 	local_irq_enable();
456 #endif
457 	preempt_check_resched();
458 }
459 EXPORT_SYMBOL(__local_bh_enable_ip);
460 
softirq_handle_begin(void)461 static inline void softirq_handle_begin(void)
462 {
463 	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
464 }
465 
softirq_handle_end(void)466 static inline void softirq_handle_end(void)
467 {
468 	__local_bh_enable(SOFTIRQ_OFFSET);
469 	WARN_ON_ONCE(in_interrupt());
470 }
471 
ksoftirqd_run_begin(void)472 static inline void ksoftirqd_run_begin(void)
473 {
474 	local_irq_disable();
475 }
476 
ksoftirqd_run_end(void)477 static inline void ksoftirqd_run_end(void)
478 {
479 	local_irq_enable();
480 }
481 
should_wake_ksoftirqd(void)482 static inline bool should_wake_ksoftirqd(void)
483 {
484 	return true;
485 }
486 
invoke_softirq(void)487 static inline void invoke_softirq(void)
488 {
489 	if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
490 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
491 		/*
492 		 * We can safely execute softirq on the current stack if
493 		 * it is the irq stack, because it should be near empty
494 		 * at this stage.
495 		 */
496 		__do_softirq();
497 #else
498 		/*
499 		 * Otherwise, irq_exit() is called on the task stack that can
500 		 * be potentially deep already. So call softirq in its own stack
501 		 * to prevent from any overrun.
502 		 */
503 		do_softirq_own_stack();
504 #endif
505 	} else {
506 		wakeup_softirqd();
507 	}
508 }
509 
do_softirq(void)510 asmlinkage __visible void do_softirq(void)
511 {
512 	__u32 pending;
513 	unsigned long flags;
514 
515 	if (in_interrupt())
516 		return;
517 
518 	local_irq_save(flags);
519 
520 	pending = local_softirq_pending();
521 
522 	if (pending)
523 		do_softirq_own_stack();
524 
525 	local_irq_restore(flags);
526 }
527 
528 #endif /* !CONFIG_PREEMPT_RT */
529 
530 /*
531  * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
532  * but break the loop if need_resched() is set or after 2 ms.
533  * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
534  * certain cases, such as stop_machine(), jiffies may cease to
535  * increment and so we need the MAX_SOFTIRQ_RESTART limit as
536  * well to make sure we eventually return from this method.
537  *
538  * These limits have been established via experimentation.
539  * The two things to balance is latency against fairness -
540  * we want to handle softirqs as soon as possible, but they
541  * should not be able to lock up the box.
542  */
543 #define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
544 #define MAX_SOFTIRQ_RESTART 10
545 
546 #ifdef CONFIG_TRACE_IRQFLAGS
547 /*
548  * When we run softirqs from irq_exit() and thus on the hardirq stack we need
549  * to keep the lockdep irq context tracking as tight as possible in order to
550  * not miss-qualify lock contexts and miss possible deadlocks.
551  */
552 
lockdep_softirq_start(void)553 static inline bool lockdep_softirq_start(void)
554 {
555 	bool in_hardirq = false;
556 
557 	if (lockdep_hardirq_context()) {
558 		in_hardirq = true;
559 		lockdep_hardirq_exit();
560 	}
561 
562 	lockdep_softirq_enter();
563 
564 	return in_hardirq;
565 }
566 
lockdep_softirq_end(bool in_hardirq)567 static inline void lockdep_softirq_end(bool in_hardirq)
568 {
569 	lockdep_softirq_exit();
570 
571 	if (in_hardirq)
572 		lockdep_hardirq_enter();
573 }
574 #else
lockdep_softirq_start(void)575 static inline bool lockdep_softirq_start(void) { return false; }
lockdep_softirq_end(bool in_hardirq)576 static inline void lockdep_softirq_end(bool in_hardirq) { }
577 #endif
578 
handle_softirqs(bool ksirqd)579 static void handle_softirqs(bool ksirqd)
580 {
581 	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
582 	unsigned long old_flags = current->flags;
583 	int max_restart = MAX_SOFTIRQ_RESTART;
584 	struct softirq_action *h;
585 	bool in_hardirq;
586 	__u32 pending;
587 	int softirq_bit;
588 
589 	/*
590 	 * Mask out PF_MEMALLOC as the current task context is borrowed for the
591 	 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
592 	 * again if the socket is related to swapping.
593 	 */
594 	current->flags &= ~PF_MEMALLOC;
595 
596 	pending = local_softirq_pending();
597 
598 	softirq_handle_begin();
599 	in_hardirq = lockdep_softirq_start();
600 	account_softirq_enter(current);
601 
602 restart:
603 	/* Reset the pending bitmask before enabling irqs */
604 	set_softirq_pending(0);
605 
606 	local_irq_enable();
607 
608 	h = softirq_vec;
609 
610 	while ((softirq_bit = ffs(pending))) {
611 		unsigned int vec_nr;
612 		int prev_count;
613 
614 		h += softirq_bit - 1;
615 
616 		vec_nr = h - softirq_vec;
617 		prev_count = preempt_count();
618 
619 		kstat_incr_softirqs_this_cpu(vec_nr);
620 
621 		trace_softirq_entry(vec_nr);
622 		h->action();
623 		trace_softirq_exit(vec_nr);
624 		if (unlikely(prev_count != preempt_count())) {
625 			pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
626 			       vec_nr, softirq_to_name[vec_nr], h->action,
627 			       prev_count, preempt_count());
628 			preempt_count_set(prev_count);
629 		}
630 		h++;
631 		pending >>= softirq_bit;
632 	}
633 
634 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) && ksirqd)
635 		rcu_softirq_qs();
636 
637 	local_irq_disable();
638 
639 	pending = local_softirq_pending();
640 	if (pending) {
641 		if (time_before(jiffies, end) && !need_resched() &&
642 		    --max_restart)
643 			goto restart;
644 
645 		wakeup_softirqd();
646 	}
647 
648 	account_softirq_exit(current);
649 	lockdep_softirq_end(in_hardirq);
650 	softirq_handle_end();
651 	current_restore_flags(old_flags, PF_MEMALLOC);
652 }
653 
__do_softirq(void)654 asmlinkage __visible void __softirq_entry __do_softirq(void)
655 {
656 	handle_softirqs(false);
657 }
658 
659 /**
660  * irq_enter_rcu - Enter an interrupt context with RCU watching
661  */
irq_enter_rcu(void)662 void irq_enter_rcu(void)
663 {
664 	__irq_enter_raw();
665 
666 	if (tick_nohz_full_cpu(smp_processor_id()) ||
667 	    (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
668 		tick_irq_enter();
669 
670 	account_hardirq_enter(current);
671 }
672 
673 /**
674  * irq_enter - Enter an interrupt context including RCU update
675  */
irq_enter(void)676 void irq_enter(void)
677 {
678 	ct_irq_enter();
679 	irq_enter_rcu();
680 }
681 
tick_irq_exit(void)682 static inline void tick_irq_exit(void)
683 {
684 #ifdef CONFIG_NO_HZ_COMMON
685 	int cpu = smp_processor_id();
686 
687 	/* Make sure that timer wheel updates are propagated */
688 	if ((sched_core_idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
689 		if (!in_hardirq())
690 			tick_nohz_irq_exit();
691 	}
692 #endif
693 }
694 
695 #ifdef CONFIG_IRQ_FORCED_THREADING
696 DEFINE_PER_CPU(struct task_struct *, ktimerd);
697 DEFINE_PER_CPU(unsigned long, pending_timer_softirq);
698 
wake_timersd(void)699 static void wake_timersd(void)
700 {
701 	struct task_struct *tsk = __this_cpu_read(ktimerd);
702 
703 	if (tsk)
704 		wake_up_process(tsk);
705 }
706 
707 #else
708 
wake_timersd(void)709 static inline void wake_timersd(void) { }
710 
711 #endif
712 
__irq_exit_rcu(void)713 static inline void __irq_exit_rcu(void)
714 {
715 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
716 	local_irq_disable();
717 #else
718 	lockdep_assert_irqs_disabled();
719 #endif
720 	account_hardirq_exit(current);
721 	preempt_count_sub(HARDIRQ_OFFSET);
722 	if (!in_interrupt() && local_softirq_pending())
723 		invoke_softirq();
724 
725 	if (IS_ENABLED(CONFIG_IRQ_FORCED_THREADING) && force_irqthreads() &&
726 	    local_timers_pending_force_th() && !(in_nmi() | in_hardirq()))
727 		wake_timersd();
728 
729 	tick_irq_exit();
730 }
731 
732 /**
733  * irq_exit_rcu() - Exit an interrupt context without updating RCU
734  *
735  * Also processes softirqs if needed and possible.
736  */
irq_exit_rcu(void)737 void irq_exit_rcu(void)
738 {
739 	__irq_exit_rcu();
740 	 /* must be last! */
741 	lockdep_hardirq_exit();
742 }
743 
744 /**
745  * irq_exit - Exit an interrupt context, update RCU and lockdep
746  *
747  * Also processes softirqs if needed and possible.
748  */
irq_exit(void)749 void irq_exit(void)
750 {
751 	__irq_exit_rcu();
752 	ct_irq_exit();
753 	 /* must be last! */
754 	lockdep_hardirq_exit();
755 }
756 
757 /*
758  * This function must run with irqs disabled!
759  */
raise_softirq_irqoff(unsigned int nr)760 inline void raise_softirq_irqoff(unsigned int nr)
761 {
762 	__raise_softirq_irqoff(nr);
763 
764 	/*
765 	 * If we're in an interrupt or softirq, we're done
766 	 * (this also catches softirq-disabled code). We will
767 	 * actually run the softirq once we return from
768 	 * the irq or softirq.
769 	 *
770 	 * Otherwise we wake up ksoftirqd to make sure we
771 	 * schedule the softirq soon.
772 	 */
773 	if (!in_interrupt() && should_wake_ksoftirqd())
774 		wakeup_softirqd();
775 }
776 
raise_softirq(unsigned int nr)777 void raise_softirq(unsigned int nr)
778 {
779 	unsigned long flags;
780 
781 	local_irq_save(flags);
782 	raise_softirq_irqoff(nr);
783 	local_irq_restore(flags);
784 }
785 
__raise_softirq_irqoff(unsigned int nr)786 void __raise_softirq_irqoff(unsigned int nr)
787 {
788 	lockdep_assert_irqs_disabled();
789 	trace_softirq_raise(nr);
790 	or_softirq_pending(1UL << nr);
791 }
792 
open_softirq(int nr,void (* action)(void))793 void open_softirq(int nr, void (*action)(void))
794 {
795 	softirq_vec[nr].action = action;
796 }
797 
798 /*
799  * Tasklets
800  */
801 struct tasklet_head {
802 	struct tasklet_struct *head;
803 	struct tasklet_struct **tail;
804 };
805 
806 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
807 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
808 
__tasklet_schedule_common(struct tasklet_struct * t,struct tasklet_head __percpu * headp,unsigned int softirq_nr)809 static void __tasklet_schedule_common(struct tasklet_struct *t,
810 				      struct tasklet_head __percpu *headp,
811 				      unsigned int softirq_nr)
812 {
813 	struct tasklet_head *head;
814 	unsigned long flags;
815 
816 	local_irq_save(flags);
817 	head = this_cpu_ptr(headp);
818 	t->next = NULL;
819 	*head->tail = t;
820 	head->tail = &(t->next);
821 	raise_softirq_irqoff(softirq_nr);
822 	local_irq_restore(flags);
823 }
824 
__tasklet_schedule(struct tasklet_struct * t)825 void __tasklet_schedule(struct tasklet_struct *t)
826 {
827 	__tasklet_schedule_common(t, &tasklet_vec,
828 				  TASKLET_SOFTIRQ);
829 }
830 EXPORT_SYMBOL(__tasklet_schedule);
831 
__tasklet_hi_schedule(struct tasklet_struct * t)832 void __tasklet_hi_schedule(struct tasklet_struct *t)
833 {
834 	__tasklet_schedule_common(t, &tasklet_hi_vec,
835 				  HI_SOFTIRQ);
836 }
837 EXPORT_SYMBOL(__tasklet_hi_schedule);
838 
tasklet_clear_sched(struct tasklet_struct * t)839 static bool tasklet_clear_sched(struct tasklet_struct *t)
840 {
841 	if (test_and_clear_wake_up_bit(TASKLET_STATE_SCHED, &t->state))
842 		return true;
843 
844 	WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
845 		  t->use_callback ? "callback" : "func",
846 		  t->use_callback ? (void *)t->callback : (void *)t->func);
847 
848 	return false;
849 }
850 
851 #ifdef CONFIG_PREEMPT_RT
852 struct tasklet_sync_callback {
853 	spinlock_t	cb_lock;
854 	atomic_t	cb_waiters;
855 };
856 
857 static DEFINE_PER_CPU(struct tasklet_sync_callback, tasklet_sync_callback) = {
858 	.cb_lock	= __SPIN_LOCK_UNLOCKED(tasklet_sync_callback.cb_lock),
859 	.cb_waiters	= ATOMIC_INIT(0),
860 };
861 
tasklet_lock_callback(void)862 static void tasklet_lock_callback(void)
863 {
864 	spin_lock(this_cpu_ptr(&tasklet_sync_callback.cb_lock));
865 }
866 
tasklet_unlock_callback(void)867 static void tasklet_unlock_callback(void)
868 {
869 	spin_unlock(this_cpu_ptr(&tasklet_sync_callback.cb_lock));
870 }
871 
tasklet_callback_cancel_wait_running(void)872 static void tasklet_callback_cancel_wait_running(void)
873 {
874 	struct tasklet_sync_callback *sync_cb = this_cpu_ptr(&tasklet_sync_callback);
875 
876 	atomic_inc(&sync_cb->cb_waiters);
877 	spin_lock(&sync_cb->cb_lock);
878 	atomic_dec(&sync_cb->cb_waiters);
879 	spin_unlock(&sync_cb->cb_lock);
880 }
881 
tasklet_callback_sync_wait_running(void)882 static void tasklet_callback_sync_wait_running(void)
883 {
884 	struct tasklet_sync_callback *sync_cb = this_cpu_ptr(&tasklet_sync_callback);
885 
886 	if (atomic_read(&sync_cb->cb_waiters)) {
887 		spin_unlock(&sync_cb->cb_lock);
888 		spin_lock(&sync_cb->cb_lock);
889 	}
890 }
891 
892 #else /* !CONFIG_PREEMPT_RT: */
893 
tasklet_lock_callback(void)894 static void tasklet_lock_callback(void) { }
tasklet_unlock_callback(void)895 static void tasklet_unlock_callback(void) { }
tasklet_callback_sync_wait_running(void)896 static void tasklet_callback_sync_wait_running(void) { }
897 
898 #ifdef CONFIG_SMP
tasklet_callback_cancel_wait_running(void)899 static void tasklet_callback_cancel_wait_running(void) { }
900 #endif
901 #endif /* !CONFIG_PREEMPT_RT */
902 
tasklet_action_common(struct tasklet_head * tl_head,unsigned int softirq_nr)903 static void tasklet_action_common(struct tasklet_head *tl_head,
904 				  unsigned int softirq_nr)
905 {
906 	struct tasklet_struct *list;
907 
908 	local_irq_disable();
909 	list = tl_head->head;
910 	tl_head->head = NULL;
911 	tl_head->tail = &tl_head->head;
912 	local_irq_enable();
913 
914 	tasklet_lock_callback();
915 	while (list) {
916 		struct tasklet_struct *t = list;
917 
918 		list = list->next;
919 
920 		if (tasklet_trylock(t)) {
921 			if (!atomic_read(&t->count)) {
922 				if (tasklet_clear_sched(t)) {
923 					if (t->use_callback) {
924 						trace_tasklet_entry(t, t->callback);
925 						t->callback(t);
926 						trace_tasklet_exit(t, t->callback);
927 					} else {
928 						trace_tasklet_entry(t, t->func);
929 						t->func(t->data);
930 						trace_tasklet_exit(t, t->func);
931 					}
932 				}
933 				tasklet_unlock(t);
934 				tasklet_callback_sync_wait_running();
935 				continue;
936 			}
937 			tasklet_unlock(t);
938 		}
939 
940 		local_irq_disable();
941 		t->next = NULL;
942 		*tl_head->tail = t;
943 		tl_head->tail = &t->next;
944 		__raise_softirq_irqoff(softirq_nr);
945 		local_irq_enable();
946 	}
947 	tasklet_unlock_callback();
948 }
949 
tasklet_action(void)950 static __latent_entropy void tasklet_action(void)
951 {
952 	workqueue_softirq_action(false);
953 	tasklet_action_common(this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
954 }
955 
tasklet_hi_action(void)956 static __latent_entropy void tasklet_hi_action(void)
957 {
958 	workqueue_softirq_action(true);
959 	tasklet_action_common(this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
960 }
961 
tasklet_setup(struct tasklet_struct * t,void (* callback)(struct tasklet_struct *))962 void tasklet_setup(struct tasklet_struct *t,
963 		   void (*callback)(struct tasklet_struct *))
964 {
965 	t->next = NULL;
966 	t->state = 0;
967 	atomic_set(&t->count, 0);
968 	t->callback = callback;
969 	t->use_callback = true;
970 	t->data = 0;
971 }
972 EXPORT_SYMBOL(tasklet_setup);
973 
tasklet_init(struct tasklet_struct * t,void (* func)(unsigned long),unsigned long data)974 void tasklet_init(struct tasklet_struct *t,
975 		  void (*func)(unsigned long), unsigned long data)
976 {
977 	t->next = NULL;
978 	t->state = 0;
979 	atomic_set(&t->count, 0);
980 	t->func = func;
981 	t->use_callback = false;
982 	t->data = data;
983 }
984 EXPORT_SYMBOL(tasklet_init);
985 
986 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
987 /*
988  * Do not use in new code. Waiting for tasklets from atomic contexts is
989  * error prone and should be avoided.
990  */
tasklet_unlock_spin_wait(struct tasklet_struct * t)991 void tasklet_unlock_spin_wait(struct tasklet_struct *t)
992 {
993 	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
994 		if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
995 			/*
996 			 * Prevent a live lock when current preempted soft
997 			 * interrupt processing or prevents ksoftirqd from
998 			 * running.
999 			 */
1000 			tasklet_callback_cancel_wait_running();
1001 		} else {
1002 			cpu_relax();
1003 		}
1004 	}
1005 }
1006 EXPORT_SYMBOL(tasklet_unlock_spin_wait);
1007 #endif
1008 
tasklet_kill(struct tasklet_struct * t)1009 void tasklet_kill(struct tasklet_struct *t)
1010 {
1011 	if (in_interrupt())
1012 		pr_notice("Attempt to kill tasklet from interrupt\n");
1013 
1014 	wait_on_bit_lock(&t->state, TASKLET_STATE_SCHED, TASK_UNINTERRUPTIBLE);
1015 
1016 	tasklet_unlock_wait(t);
1017 	tasklet_clear_sched(t);
1018 }
1019 EXPORT_SYMBOL(tasklet_kill);
1020 
1021 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
tasklet_unlock(struct tasklet_struct * t)1022 void tasklet_unlock(struct tasklet_struct *t)
1023 {
1024 	clear_and_wake_up_bit(TASKLET_STATE_RUN, &t->state);
1025 }
1026 EXPORT_SYMBOL_GPL(tasklet_unlock);
1027 
tasklet_unlock_wait(struct tasklet_struct * t)1028 void tasklet_unlock_wait(struct tasklet_struct *t)
1029 {
1030 	wait_on_bit(&t->state, TASKLET_STATE_RUN, TASK_UNINTERRUPTIBLE);
1031 }
1032 EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
1033 #endif
1034 
softirq_init(void)1035 void __init softirq_init(void)
1036 {
1037 	int cpu;
1038 
1039 	for_each_possible_cpu(cpu) {
1040 		per_cpu(tasklet_vec, cpu).tail =
1041 			&per_cpu(tasklet_vec, cpu).head;
1042 		per_cpu(tasklet_hi_vec, cpu).tail =
1043 			&per_cpu(tasklet_hi_vec, cpu).head;
1044 	}
1045 
1046 	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
1047 	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
1048 }
1049 
ksoftirqd_should_run(unsigned int cpu)1050 static int ksoftirqd_should_run(unsigned int cpu)
1051 {
1052 	return local_softirq_pending();
1053 }
1054 
run_ksoftirqd(unsigned int cpu)1055 static void run_ksoftirqd(unsigned int cpu)
1056 {
1057 	ksoftirqd_run_begin();
1058 	if (local_softirq_pending()) {
1059 		/*
1060 		 * We can safely run softirq on inline stack, as we are not deep
1061 		 * in the task stack here.
1062 		 */
1063 		handle_softirqs(true);
1064 		ksoftirqd_run_end();
1065 		cond_resched();
1066 		return;
1067 	}
1068 	ksoftirqd_run_end();
1069 }
1070 
1071 #ifdef CONFIG_HOTPLUG_CPU
takeover_tasklets(unsigned int cpu)1072 static int takeover_tasklets(unsigned int cpu)
1073 {
1074 	workqueue_softirq_dead(cpu);
1075 
1076 	/* CPU is dead, so no lock needed. */
1077 	local_irq_disable();
1078 
1079 	/* Find end, append list for that CPU. */
1080 	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
1081 		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
1082 		__this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
1083 		per_cpu(tasklet_vec, cpu).head = NULL;
1084 		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
1085 	}
1086 	raise_softirq_irqoff(TASKLET_SOFTIRQ);
1087 
1088 	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
1089 		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
1090 		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
1091 		per_cpu(tasklet_hi_vec, cpu).head = NULL;
1092 		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
1093 	}
1094 	raise_softirq_irqoff(HI_SOFTIRQ);
1095 
1096 	local_irq_enable();
1097 	return 0;
1098 }
1099 #else
1100 #define takeover_tasklets	NULL
1101 #endif /* CONFIG_HOTPLUG_CPU */
1102 
1103 static struct smp_hotplug_thread softirq_threads = {
1104 	.store			= &ksoftirqd,
1105 	.thread_should_run	= ksoftirqd_should_run,
1106 	.thread_fn		= run_ksoftirqd,
1107 	.thread_comm		= "ksoftirqd/%u",
1108 };
1109 
1110 #ifdef CONFIG_IRQ_FORCED_THREADING
ktimerd_setup(unsigned int cpu)1111 static void ktimerd_setup(unsigned int cpu)
1112 {
1113 	/* Above SCHED_NORMAL to handle timers before regular tasks. */
1114 	sched_set_fifo_low(current);
1115 }
1116 
ktimerd_should_run(unsigned int cpu)1117 static int ktimerd_should_run(unsigned int cpu)
1118 {
1119 	return local_timers_pending_force_th();
1120 }
1121 
raise_ktimers_thread(unsigned int nr)1122 void raise_ktimers_thread(unsigned int nr)
1123 {
1124 	trace_softirq_raise(nr);
1125 	__this_cpu_or(pending_timer_softirq, BIT(nr));
1126 }
1127 
run_ktimerd(unsigned int cpu)1128 static void run_ktimerd(unsigned int cpu)
1129 {
1130 	unsigned int timer_si;
1131 
1132 	ksoftirqd_run_begin();
1133 
1134 	timer_si = local_timers_pending_force_th();
1135 	__this_cpu_write(pending_timer_softirq, 0);
1136 	or_softirq_pending(timer_si);
1137 
1138 	__do_softirq();
1139 
1140 	ksoftirqd_run_end();
1141 }
1142 
1143 static struct smp_hotplug_thread timer_thread = {
1144 	.store			= &ktimerd,
1145 	.setup			= ktimerd_setup,
1146 	.thread_should_run	= ktimerd_should_run,
1147 	.thread_fn		= run_ktimerd,
1148 	.thread_comm		= "ktimers/%u",
1149 };
1150 #endif
1151 
spawn_ksoftirqd(void)1152 static __init int spawn_ksoftirqd(void)
1153 {
1154 	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
1155 				  takeover_tasklets);
1156 	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
1157 #ifdef CONFIG_IRQ_FORCED_THREADING
1158 	if (force_irqthreads())
1159 		BUG_ON(smpboot_register_percpu_thread(&timer_thread));
1160 #endif
1161 	return 0;
1162 }
1163 early_initcall(spawn_ksoftirqd);
1164 
1165 /*
1166  * [ These __weak aliases are kept in a separate compilation unit, so that
1167  *   GCC does not inline them incorrectly. ]
1168  */
1169 
early_irq_init(void)1170 int __init __weak early_irq_init(void)
1171 {
1172 	return 0;
1173 }
1174 
arch_probe_nr_irqs(void)1175 int __init __weak arch_probe_nr_irqs(void)
1176 {
1177 	return NR_IRQS_LEGACY;
1178 }
1179 
arch_early_irq_init(void)1180 int __init __weak arch_early_irq_init(void)
1181 {
1182 	return 0;
1183 }
1184 
arch_dynirq_lower_bound(unsigned int from)1185 unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
1186 {
1187 	return from;
1188 }
1189