xref: /linux/kernel/softirq.c (revision 80d443e8876602be2c130f79c4de81e12e2a700d)
1 /*
2  *	linux/kernel/softirq.c
3  *
4  *	Copyright (C) 1992 Linus Torvalds
5  *
6  *	Distribute under GPLv2.
7  *
8  *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9  */
10 
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/export.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
17 #include <linux/mm.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/smpboot.h>
27 #include <linux/tick.h>
28 #include <linux/irq.h>
29 
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/irq.h>
32 
33 /*
34    - No shared variables, all the data are CPU local.
35    - If a softirq needs serialization, let it serialize itself
36      by its own spinlocks.
37    - Even if softirq is serialized, only local cpu is marked for
38      execution. Hence, we get something sort of weak cpu binding.
39      Though it is still not clear, will it result in better locality
40      or will not.
41 
42    Examples:
43    - NET RX softirq. It is multithreaded and does not require
44      any global serialization.
45    - NET TX softirq. It kicks software netdevice queues, hence
46      it is logically serialized per device, but this serialization
47      is invisible to common code.
48    - Tasklets: serialized wrt itself.
49  */
50 
51 #ifndef __ARCH_IRQ_STAT
52 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
53 EXPORT_SYMBOL(irq_stat);
54 #endif
55 
56 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
57 
58 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59 
60 const char * const softirq_to_name[NR_SOFTIRQS] = {
61 	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
62 	"TASKLET", "SCHED", "HRTIMER", "RCU"
63 };
64 
65 /*
66  * we cannot loop indefinitely here to avoid userspace starvation,
67  * but we also don't want to introduce a worst case 1/HZ latency
68  * to the pending events, so lets the scheduler to balance
69  * the softirq load for us.
70  */
71 static void wakeup_softirqd(void)
72 {
73 	/* Interrupts are disabled: no need to stop preemption */
74 	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
75 
76 	if (tsk && tsk->state != TASK_RUNNING)
77 		wake_up_process(tsk);
78 }
79 
80 /*
81  * If ksoftirqd is scheduled, we do not want to process pending softirqs
82  * right now. Let ksoftirqd handle this at its own rate, to get fairness.
83  */
84 static bool ksoftirqd_running(void)
85 {
86 	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
87 
88 	return tsk && (tsk->state == TASK_RUNNING);
89 }
90 
91 /*
92  * preempt_count and SOFTIRQ_OFFSET usage:
93  * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
94  *   softirq processing.
95  * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
96  *   on local_bh_disable or local_bh_enable.
97  * This lets us distinguish between whether we are currently processing
98  * softirq and whether we just have bh disabled.
99  */
100 
101 /*
102  * This one is for softirq.c-internal use,
103  * where hardirqs are disabled legitimately:
104  */
105 #ifdef CONFIG_TRACE_IRQFLAGS
106 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
107 {
108 	unsigned long flags;
109 
110 	WARN_ON_ONCE(in_irq());
111 
112 	raw_local_irq_save(flags);
113 	/*
114 	 * The preempt tracer hooks into preempt_count_add and will break
115 	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
116 	 * is set and before current->softirq_enabled is cleared.
117 	 * We must manually increment preempt_count here and manually
118 	 * call the trace_preempt_off later.
119 	 */
120 	__preempt_count_add(cnt);
121 	/*
122 	 * Were softirqs turned off above:
123 	 */
124 	if (softirq_count() == (cnt & SOFTIRQ_MASK))
125 		trace_softirqs_off(ip);
126 	raw_local_irq_restore(flags);
127 
128 	if (preempt_count() == cnt) {
129 #ifdef CONFIG_DEBUG_PREEMPT
130 		current->preempt_disable_ip = get_lock_parent_ip();
131 #endif
132 		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
133 	}
134 }
135 EXPORT_SYMBOL(__local_bh_disable_ip);
136 #endif /* CONFIG_TRACE_IRQFLAGS */
137 
138 static void __local_bh_enable(unsigned int cnt)
139 {
140 	WARN_ON_ONCE(!irqs_disabled());
141 
142 	if (softirq_count() == (cnt & SOFTIRQ_MASK))
143 		trace_softirqs_on(_RET_IP_);
144 	preempt_count_sub(cnt);
145 }
146 
147 /*
148  * Special-case - softirqs can safely be enabled in
149  * cond_resched_softirq(), or by __do_softirq(),
150  * without processing still-pending softirqs:
151  */
152 void _local_bh_enable(void)
153 {
154 	WARN_ON_ONCE(in_irq());
155 	__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
156 }
157 EXPORT_SYMBOL(_local_bh_enable);
158 
159 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
160 {
161 	WARN_ON_ONCE(in_irq() || irqs_disabled());
162 #ifdef CONFIG_TRACE_IRQFLAGS
163 	local_irq_disable();
164 #endif
165 	/*
166 	 * Are softirqs going to be turned on now:
167 	 */
168 	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
169 		trace_softirqs_on(ip);
170 	/*
171 	 * Keep preemption disabled until we are done with
172 	 * softirq processing:
173 	 */
174 	preempt_count_sub(cnt - 1);
175 
176 	if (unlikely(!in_interrupt() && local_softirq_pending())) {
177 		/*
178 		 * Run softirq if any pending. And do it in its own stack
179 		 * as we may be calling this deep in a task call stack already.
180 		 */
181 		do_softirq();
182 	}
183 
184 	preempt_count_dec();
185 #ifdef CONFIG_TRACE_IRQFLAGS
186 	local_irq_enable();
187 #endif
188 	preempt_check_resched();
189 }
190 EXPORT_SYMBOL(__local_bh_enable_ip);
191 
192 /*
193  * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
194  * but break the loop if need_resched() is set or after 2 ms.
195  * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
196  * certain cases, such as stop_machine(), jiffies may cease to
197  * increment and so we need the MAX_SOFTIRQ_RESTART limit as
198  * well to make sure we eventually return from this method.
199  *
200  * These limits have been established via experimentation.
201  * The two things to balance is latency against fairness -
202  * we want to handle softirqs as soon as possible, but they
203  * should not be able to lock up the box.
204  */
205 #define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
206 #define MAX_SOFTIRQ_RESTART 10
207 
208 #ifdef CONFIG_TRACE_IRQFLAGS
209 /*
210  * When we run softirqs from irq_exit() and thus on the hardirq stack we need
211  * to keep the lockdep irq context tracking as tight as possible in order to
212  * not miss-qualify lock contexts and miss possible deadlocks.
213  */
214 
215 static inline bool lockdep_softirq_start(void)
216 {
217 	bool in_hardirq = false;
218 
219 	if (trace_hardirq_context(current)) {
220 		in_hardirq = true;
221 		trace_hardirq_exit();
222 	}
223 
224 	lockdep_softirq_enter();
225 
226 	return in_hardirq;
227 }
228 
229 static inline void lockdep_softirq_end(bool in_hardirq)
230 {
231 	lockdep_softirq_exit();
232 
233 	if (in_hardirq)
234 		trace_hardirq_enter();
235 }
236 #else
237 static inline bool lockdep_softirq_start(void) { return false; }
238 static inline void lockdep_softirq_end(bool in_hardirq) { }
239 #endif
240 
241 asmlinkage __visible void __softirq_entry __do_softirq(void)
242 {
243 	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
244 	unsigned long old_flags = current->flags;
245 	int max_restart = MAX_SOFTIRQ_RESTART;
246 	struct softirq_action *h;
247 	bool in_hardirq;
248 	__u32 pending;
249 	int softirq_bit;
250 
251 	/*
252 	 * Mask out PF_MEMALLOC s current task context is borrowed for the
253 	 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
254 	 * again if the socket is related to swap
255 	 */
256 	current->flags &= ~PF_MEMALLOC;
257 
258 	pending = local_softirq_pending();
259 	account_irq_enter_time(current);
260 
261 	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
262 	in_hardirq = lockdep_softirq_start();
263 
264 restart:
265 	/* Reset the pending bitmask before enabling irqs */
266 	set_softirq_pending(0);
267 
268 	local_irq_enable();
269 
270 	h = softirq_vec;
271 
272 	while ((softirq_bit = ffs(pending))) {
273 		unsigned int vec_nr;
274 		int prev_count;
275 
276 		h += softirq_bit - 1;
277 
278 		vec_nr = h - softirq_vec;
279 		prev_count = preempt_count();
280 
281 		kstat_incr_softirqs_this_cpu(vec_nr);
282 
283 		trace_softirq_entry(vec_nr);
284 		h->action(h);
285 		trace_softirq_exit(vec_nr);
286 		if (unlikely(prev_count != preempt_count())) {
287 			pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
288 			       vec_nr, softirq_to_name[vec_nr], h->action,
289 			       prev_count, preempt_count());
290 			preempt_count_set(prev_count);
291 		}
292 		h++;
293 		pending >>= softirq_bit;
294 	}
295 
296 	rcu_bh_qs();
297 	local_irq_disable();
298 
299 	pending = local_softirq_pending();
300 	if (pending) {
301 		if (time_before(jiffies, end) && !need_resched() &&
302 		    --max_restart)
303 			goto restart;
304 
305 		wakeup_softirqd();
306 	}
307 
308 	lockdep_softirq_end(in_hardirq);
309 	account_irq_exit_time(current);
310 	__local_bh_enable(SOFTIRQ_OFFSET);
311 	WARN_ON_ONCE(in_interrupt());
312 	tsk_restore_flags(current, old_flags, PF_MEMALLOC);
313 }
314 
315 asmlinkage __visible void do_softirq(void)
316 {
317 	__u32 pending;
318 	unsigned long flags;
319 
320 	if (in_interrupt())
321 		return;
322 
323 	local_irq_save(flags);
324 
325 	pending = local_softirq_pending();
326 
327 	if (pending && !ksoftirqd_running())
328 		do_softirq_own_stack();
329 
330 	local_irq_restore(flags);
331 }
332 
333 /*
334  * Enter an interrupt context.
335  */
336 void irq_enter(void)
337 {
338 	rcu_irq_enter();
339 	if (is_idle_task(current) && !in_interrupt()) {
340 		/*
341 		 * Prevent raise_softirq from needlessly waking up ksoftirqd
342 		 * here, as softirq will be serviced on return from interrupt.
343 		 */
344 		local_bh_disable();
345 		tick_irq_enter();
346 		_local_bh_enable();
347 	}
348 
349 	__irq_enter();
350 }
351 
352 static inline void invoke_softirq(void)
353 {
354 	if (ksoftirqd_running())
355 		return;
356 
357 	if (!force_irqthreads) {
358 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
359 		/*
360 		 * We can safely execute softirq on the current stack if
361 		 * it is the irq stack, because it should be near empty
362 		 * at this stage.
363 		 */
364 		__do_softirq();
365 #else
366 		/*
367 		 * Otherwise, irq_exit() is called on the task stack that can
368 		 * be potentially deep already. So call softirq in its own stack
369 		 * to prevent from any overrun.
370 		 */
371 		do_softirq_own_stack();
372 #endif
373 	} else {
374 		wakeup_softirqd();
375 	}
376 }
377 
378 static inline void tick_irq_exit(void)
379 {
380 #ifdef CONFIG_NO_HZ_COMMON
381 	int cpu = smp_processor_id();
382 
383 	/* Make sure that timer wheel updates are propagated */
384 	if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
385 		if (!in_interrupt())
386 			tick_nohz_irq_exit();
387 	}
388 #endif
389 }
390 
391 /*
392  * Exit an interrupt context. Process softirqs if needed and possible:
393  */
394 void irq_exit(void)
395 {
396 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
397 	local_irq_disable();
398 #else
399 	WARN_ON_ONCE(!irqs_disabled());
400 #endif
401 
402 	account_irq_exit_time(current);
403 	preempt_count_sub(HARDIRQ_OFFSET);
404 	if (!in_interrupt() && local_softirq_pending())
405 		invoke_softirq();
406 
407 	tick_irq_exit();
408 	rcu_irq_exit();
409 	trace_hardirq_exit(); /* must be last! */
410 }
411 
412 /*
413  * This function must run with irqs disabled!
414  */
415 inline void raise_softirq_irqoff(unsigned int nr)
416 {
417 	__raise_softirq_irqoff(nr);
418 
419 	/*
420 	 * If we're in an interrupt or softirq, we're done
421 	 * (this also catches softirq-disabled code). We will
422 	 * actually run the softirq once we return from
423 	 * the irq or softirq.
424 	 *
425 	 * Otherwise we wake up ksoftirqd to make sure we
426 	 * schedule the softirq soon.
427 	 */
428 	if (!in_interrupt())
429 		wakeup_softirqd();
430 }
431 
432 void raise_softirq(unsigned int nr)
433 {
434 	unsigned long flags;
435 
436 	local_irq_save(flags);
437 	raise_softirq_irqoff(nr);
438 	local_irq_restore(flags);
439 }
440 
441 void __raise_softirq_irqoff(unsigned int nr)
442 {
443 	trace_softirq_raise(nr);
444 	or_softirq_pending(1UL << nr);
445 }
446 
447 void open_softirq(int nr, void (*action)(struct softirq_action *))
448 {
449 	softirq_vec[nr].action = action;
450 }
451 
452 /*
453  * Tasklets
454  */
455 struct tasklet_head {
456 	struct tasklet_struct *head;
457 	struct tasklet_struct **tail;
458 };
459 
460 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
461 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
462 
463 void __tasklet_schedule(struct tasklet_struct *t)
464 {
465 	unsigned long flags;
466 
467 	local_irq_save(flags);
468 	t->next = NULL;
469 	*__this_cpu_read(tasklet_vec.tail) = t;
470 	__this_cpu_write(tasklet_vec.tail, &(t->next));
471 	raise_softirq_irqoff(TASKLET_SOFTIRQ);
472 	local_irq_restore(flags);
473 }
474 EXPORT_SYMBOL(__tasklet_schedule);
475 
476 void __tasklet_hi_schedule(struct tasklet_struct *t)
477 {
478 	unsigned long flags;
479 
480 	local_irq_save(flags);
481 	t->next = NULL;
482 	*__this_cpu_read(tasklet_hi_vec.tail) = t;
483 	__this_cpu_write(tasklet_hi_vec.tail,  &(t->next));
484 	raise_softirq_irqoff(HI_SOFTIRQ);
485 	local_irq_restore(flags);
486 }
487 EXPORT_SYMBOL(__tasklet_hi_schedule);
488 
489 void __tasklet_hi_schedule_first(struct tasklet_struct *t)
490 {
491 	BUG_ON(!irqs_disabled());
492 
493 	t->next = __this_cpu_read(tasklet_hi_vec.head);
494 	__this_cpu_write(tasklet_hi_vec.head, t);
495 	__raise_softirq_irqoff(HI_SOFTIRQ);
496 }
497 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
498 
499 static __latent_entropy void tasklet_action(struct softirq_action *a)
500 {
501 	struct tasklet_struct *list;
502 
503 	local_irq_disable();
504 	list = __this_cpu_read(tasklet_vec.head);
505 	__this_cpu_write(tasklet_vec.head, NULL);
506 	__this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
507 	local_irq_enable();
508 
509 	while (list) {
510 		struct tasklet_struct *t = list;
511 
512 		list = list->next;
513 
514 		if (tasklet_trylock(t)) {
515 			if (!atomic_read(&t->count)) {
516 				if (!test_and_clear_bit(TASKLET_STATE_SCHED,
517 							&t->state))
518 					BUG();
519 				t->func(t->data);
520 				tasklet_unlock(t);
521 				continue;
522 			}
523 			tasklet_unlock(t);
524 		}
525 
526 		local_irq_disable();
527 		t->next = NULL;
528 		*__this_cpu_read(tasklet_vec.tail) = t;
529 		__this_cpu_write(tasklet_vec.tail, &(t->next));
530 		__raise_softirq_irqoff(TASKLET_SOFTIRQ);
531 		local_irq_enable();
532 	}
533 }
534 
535 static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
536 {
537 	struct tasklet_struct *list;
538 
539 	local_irq_disable();
540 	list = __this_cpu_read(tasklet_hi_vec.head);
541 	__this_cpu_write(tasklet_hi_vec.head, NULL);
542 	__this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
543 	local_irq_enable();
544 
545 	while (list) {
546 		struct tasklet_struct *t = list;
547 
548 		list = list->next;
549 
550 		if (tasklet_trylock(t)) {
551 			if (!atomic_read(&t->count)) {
552 				if (!test_and_clear_bit(TASKLET_STATE_SCHED,
553 							&t->state))
554 					BUG();
555 				t->func(t->data);
556 				tasklet_unlock(t);
557 				continue;
558 			}
559 			tasklet_unlock(t);
560 		}
561 
562 		local_irq_disable();
563 		t->next = NULL;
564 		*__this_cpu_read(tasklet_hi_vec.tail) = t;
565 		__this_cpu_write(tasklet_hi_vec.tail, &(t->next));
566 		__raise_softirq_irqoff(HI_SOFTIRQ);
567 		local_irq_enable();
568 	}
569 }
570 
571 void tasklet_init(struct tasklet_struct *t,
572 		  void (*func)(unsigned long), unsigned long data)
573 {
574 	t->next = NULL;
575 	t->state = 0;
576 	atomic_set(&t->count, 0);
577 	t->func = func;
578 	t->data = data;
579 }
580 EXPORT_SYMBOL(tasklet_init);
581 
582 void tasklet_kill(struct tasklet_struct *t)
583 {
584 	if (in_interrupt())
585 		pr_notice("Attempt to kill tasklet from interrupt\n");
586 
587 	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
588 		do {
589 			yield();
590 		} while (test_bit(TASKLET_STATE_SCHED, &t->state));
591 	}
592 	tasklet_unlock_wait(t);
593 	clear_bit(TASKLET_STATE_SCHED, &t->state);
594 }
595 EXPORT_SYMBOL(tasklet_kill);
596 
597 /*
598  * tasklet_hrtimer
599  */
600 
601 /*
602  * The trampoline is called when the hrtimer expires. It schedules a tasklet
603  * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
604  * hrtimer callback, but from softirq context.
605  */
606 static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
607 {
608 	struct tasklet_hrtimer *ttimer =
609 		container_of(timer, struct tasklet_hrtimer, timer);
610 
611 	tasklet_hi_schedule(&ttimer->tasklet);
612 	return HRTIMER_NORESTART;
613 }
614 
615 /*
616  * Helper function which calls the hrtimer callback from
617  * tasklet/softirq context
618  */
619 static void __tasklet_hrtimer_trampoline(unsigned long data)
620 {
621 	struct tasklet_hrtimer *ttimer = (void *)data;
622 	enum hrtimer_restart restart;
623 
624 	restart = ttimer->function(&ttimer->timer);
625 	if (restart != HRTIMER_NORESTART)
626 		hrtimer_restart(&ttimer->timer);
627 }
628 
629 /**
630  * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
631  * @ttimer:	 tasklet_hrtimer which is initialized
632  * @function:	 hrtimer callback function which gets called from softirq context
633  * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
634  * @mode:	 hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
635  */
636 void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
637 			  enum hrtimer_restart (*function)(struct hrtimer *),
638 			  clockid_t which_clock, enum hrtimer_mode mode)
639 {
640 	hrtimer_init(&ttimer->timer, which_clock, mode);
641 	ttimer->timer.function = __hrtimer_tasklet_trampoline;
642 	tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
643 		     (unsigned long)ttimer);
644 	ttimer->function = function;
645 }
646 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
647 
648 void __init softirq_init(void)
649 {
650 	int cpu;
651 
652 	for_each_possible_cpu(cpu) {
653 		per_cpu(tasklet_vec, cpu).tail =
654 			&per_cpu(tasklet_vec, cpu).head;
655 		per_cpu(tasklet_hi_vec, cpu).tail =
656 			&per_cpu(tasklet_hi_vec, cpu).head;
657 	}
658 
659 	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
660 	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
661 }
662 
663 static int ksoftirqd_should_run(unsigned int cpu)
664 {
665 	return local_softirq_pending();
666 }
667 
668 static void run_ksoftirqd(unsigned int cpu)
669 {
670 	local_irq_disable();
671 	if (local_softirq_pending()) {
672 		/*
673 		 * We can safely run softirq on inline stack, as we are not deep
674 		 * in the task stack here.
675 		 */
676 		__do_softirq();
677 		local_irq_enable();
678 		cond_resched_rcu_qs();
679 		return;
680 	}
681 	local_irq_enable();
682 }
683 
684 #ifdef CONFIG_HOTPLUG_CPU
685 /*
686  * tasklet_kill_immediate is called to remove a tasklet which can already be
687  * scheduled for execution on @cpu.
688  *
689  * Unlike tasklet_kill, this function removes the tasklet
690  * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
691  *
692  * When this function is called, @cpu must be in the CPU_DEAD state.
693  */
694 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
695 {
696 	struct tasklet_struct **i;
697 
698 	BUG_ON(cpu_online(cpu));
699 	BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
700 
701 	if (!test_bit(TASKLET_STATE_SCHED, &t->state))
702 		return;
703 
704 	/* CPU is dead, so no lock needed. */
705 	for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
706 		if (*i == t) {
707 			*i = t->next;
708 			/* If this was the tail element, move the tail ptr */
709 			if (*i == NULL)
710 				per_cpu(tasklet_vec, cpu).tail = i;
711 			return;
712 		}
713 	}
714 	BUG();
715 }
716 
717 static int takeover_tasklets(unsigned int cpu)
718 {
719 	/* CPU is dead, so no lock needed. */
720 	local_irq_disable();
721 
722 	/* Find end, append list for that CPU. */
723 	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
724 		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
725 		this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
726 		per_cpu(tasklet_vec, cpu).head = NULL;
727 		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
728 	}
729 	raise_softirq_irqoff(TASKLET_SOFTIRQ);
730 
731 	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
732 		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
733 		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
734 		per_cpu(tasklet_hi_vec, cpu).head = NULL;
735 		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
736 	}
737 	raise_softirq_irqoff(HI_SOFTIRQ);
738 
739 	local_irq_enable();
740 	return 0;
741 }
742 #else
743 #define takeover_tasklets	NULL
744 #endif /* CONFIG_HOTPLUG_CPU */
745 
746 static struct smp_hotplug_thread softirq_threads = {
747 	.store			= &ksoftirqd,
748 	.thread_should_run	= ksoftirqd_should_run,
749 	.thread_fn		= run_ksoftirqd,
750 	.thread_comm		= "ksoftirqd/%u",
751 };
752 
753 static __init int spawn_ksoftirqd(void)
754 {
755 	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
756 				  takeover_tasklets);
757 	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
758 
759 	return 0;
760 }
761 early_initcall(spawn_ksoftirqd);
762 
763 /*
764  * [ These __weak aliases are kept in a separate compilation unit, so that
765  *   GCC does not inline them incorrectly. ]
766  */
767 
768 int __init __weak early_irq_init(void)
769 {
770 	return 0;
771 }
772 
773 int __init __weak arch_probe_nr_irqs(void)
774 {
775 	return NR_IRQS_LEGACY;
776 }
777 
778 int __init __weak arch_early_irq_init(void)
779 {
780 	return 0;
781 }
782 
783 unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
784 {
785 	return from;
786 }
787