xref: /linux/kernel/softirq.c (revision b8bb76713ec50df2f11efee386e16f93d51e1076)
1 /*
2  *	linux/kernel/softirq.c
3  *
4  *	Copyright (C) 1992 Linus Torvalds
5  *
6  *	Distribute under GPLv2.
7  *
8  *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9  *
10  *	Remote softirq infrastructure is by Jens Axboe.
11  */
12 
13 #include <linux/module.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
17 #include <linux/mm.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/smp.h>
25 #include <linux/tick.h>
26 
27 #include <asm/irq.h>
28 /*
29    - No shared variables, all the data are CPU local.
30    - If a softirq needs serialization, let it serialize itself
31      by its own spinlocks.
32    - Even if softirq is serialized, only local cpu is marked for
33      execution. Hence, we get something sort of weak cpu binding.
34      Though it is still not clear, will it result in better locality
35      or will not.
36 
37    Examples:
38    - NET RX softirq. It is multithreaded and does not require
39      any global serialization.
40    - NET TX softirq. It kicks software netdevice queues, hence
41      it is logically serialized per device, but this serialization
42      is invisible to common code.
43    - Tasklets: serialized wrt itself.
44  */
45 
46 #ifndef __ARCH_IRQ_STAT
47 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
48 EXPORT_SYMBOL(irq_stat);
49 #endif
50 
51 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
52 
53 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
54 
55 /*
56  * we cannot loop indefinitely here to avoid userspace starvation,
57  * but we also don't want to introduce a worst case 1/HZ latency
58  * to the pending events, so lets the scheduler to balance
59  * the softirq load for us.
60  */
61 static inline void wakeup_softirqd(void)
62 {
63 	/* Interrupts are disabled: no need to stop preemption */
64 	struct task_struct *tsk = __get_cpu_var(ksoftirqd);
65 
66 	if (tsk && tsk->state != TASK_RUNNING)
67 		wake_up_process(tsk);
68 }
69 
70 /*
71  * This one is for softirq.c-internal use,
72  * where hardirqs are disabled legitimately:
73  */
74 #ifdef CONFIG_TRACE_IRQFLAGS
75 static void __local_bh_disable(unsigned long ip)
76 {
77 	unsigned long flags;
78 
79 	WARN_ON_ONCE(in_irq());
80 
81 	raw_local_irq_save(flags);
82 	add_preempt_count(SOFTIRQ_OFFSET);
83 	/*
84 	 * Were softirqs turned off above:
85 	 */
86 	if (softirq_count() == SOFTIRQ_OFFSET)
87 		trace_softirqs_off(ip);
88 	raw_local_irq_restore(flags);
89 }
90 #else /* !CONFIG_TRACE_IRQFLAGS */
91 static inline void __local_bh_disable(unsigned long ip)
92 {
93 	add_preempt_count(SOFTIRQ_OFFSET);
94 	barrier();
95 }
96 #endif /* CONFIG_TRACE_IRQFLAGS */
97 
98 void local_bh_disable(void)
99 {
100 	__local_bh_disable((unsigned long)__builtin_return_address(0));
101 }
102 
103 EXPORT_SYMBOL(local_bh_disable);
104 
105 /*
106  * Special-case - softirqs can safely be enabled in
107  * cond_resched_softirq(), or by __do_softirq(),
108  * without processing still-pending softirqs:
109  */
110 void _local_bh_enable(void)
111 {
112 	WARN_ON_ONCE(in_irq());
113 	WARN_ON_ONCE(!irqs_disabled());
114 
115 	if (softirq_count() == SOFTIRQ_OFFSET)
116 		trace_softirqs_on((unsigned long)__builtin_return_address(0));
117 	sub_preempt_count(SOFTIRQ_OFFSET);
118 }
119 
120 EXPORT_SYMBOL(_local_bh_enable);
121 
122 static inline void _local_bh_enable_ip(unsigned long ip)
123 {
124 	WARN_ON_ONCE(in_irq() || irqs_disabled());
125 #ifdef CONFIG_TRACE_IRQFLAGS
126 	local_irq_disable();
127 #endif
128 	/*
129 	 * Are softirqs going to be turned on now:
130 	 */
131 	if (softirq_count() == SOFTIRQ_OFFSET)
132 		trace_softirqs_on(ip);
133 	/*
134 	 * Keep preemption disabled until we are done with
135 	 * softirq processing:
136  	 */
137  	sub_preempt_count(SOFTIRQ_OFFSET - 1);
138 
139 	if (unlikely(!in_interrupt() && local_softirq_pending()))
140 		do_softirq();
141 
142 	dec_preempt_count();
143 #ifdef CONFIG_TRACE_IRQFLAGS
144 	local_irq_enable();
145 #endif
146 	preempt_check_resched();
147 }
148 
149 void local_bh_enable(void)
150 {
151 	_local_bh_enable_ip((unsigned long)__builtin_return_address(0));
152 }
153 EXPORT_SYMBOL(local_bh_enable);
154 
155 void local_bh_enable_ip(unsigned long ip)
156 {
157 	_local_bh_enable_ip(ip);
158 }
159 EXPORT_SYMBOL(local_bh_enable_ip);
160 
161 /*
162  * We restart softirq processing MAX_SOFTIRQ_RESTART times,
163  * and we fall back to softirqd after that.
164  *
165  * This number has been established via experimentation.
166  * The two things to balance is latency against fairness -
167  * we want to handle softirqs as soon as possible, but they
168  * should not be able to lock up the box.
169  */
170 #define MAX_SOFTIRQ_RESTART 10
171 
172 asmlinkage void __do_softirq(void)
173 {
174 	struct softirq_action *h;
175 	__u32 pending;
176 	int max_restart = MAX_SOFTIRQ_RESTART;
177 	int cpu;
178 
179 	pending = local_softirq_pending();
180 	account_system_vtime(current);
181 
182 	__local_bh_disable((unsigned long)__builtin_return_address(0));
183 	trace_softirq_enter();
184 
185 	cpu = smp_processor_id();
186 restart:
187 	/* Reset the pending bitmask before enabling irqs */
188 	set_softirq_pending(0);
189 
190 	local_irq_enable();
191 
192 	h = softirq_vec;
193 
194 	do {
195 		if (pending & 1) {
196 			int prev_count = preempt_count();
197 
198 			h->action(h);
199 
200 			if (unlikely(prev_count != preempt_count())) {
201 				printk(KERN_ERR "huh, entered softirq %td %p"
202 				       "with preempt_count %08x,"
203 				       " exited with %08x?\n", h - softirq_vec,
204 				       h->action, prev_count, preempt_count());
205 				preempt_count() = prev_count;
206 			}
207 
208 			rcu_bh_qsctr_inc(cpu);
209 		}
210 		h++;
211 		pending >>= 1;
212 	} while (pending);
213 
214 	local_irq_disable();
215 
216 	pending = local_softirq_pending();
217 	if (pending && --max_restart)
218 		goto restart;
219 
220 	if (pending)
221 		wakeup_softirqd();
222 
223 	trace_softirq_exit();
224 
225 	account_system_vtime(current);
226 	_local_bh_enable();
227 }
228 
229 #ifndef __ARCH_HAS_DO_SOFTIRQ
230 
231 asmlinkage void do_softirq(void)
232 {
233 	__u32 pending;
234 	unsigned long flags;
235 
236 	if (in_interrupt())
237 		return;
238 
239 	local_irq_save(flags);
240 
241 	pending = local_softirq_pending();
242 
243 	if (pending)
244 		__do_softirq();
245 
246 	local_irq_restore(flags);
247 }
248 
249 #endif
250 
251 /*
252  * Enter an interrupt context.
253  */
254 void irq_enter(void)
255 {
256 	int cpu = smp_processor_id();
257 
258 	rcu_irq_enter();
259 	if (idle_cpu(cpu) && !in_interrupt()) {
260 		__irq_enter();
261 		tick_check_idle(cpu);
262 	} else
263 		__irq_enter();
264 }
265 
266 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
267 # define invoke_softirq()	__do_softirq()
268 #else
269 # define invoke_softirq()	do_softirq()
270 #endif
271 
272 /*
273  * Exit an interrupt context. Process softirqs if needed and possible:
274  */
275 void irq_exit(void)
276 {
277 	account_system_vtime(current);
278 	trace_hardirq_exit();
279 	sub_preempt_count(IRQ_EXIT_OFFSET);
280 	if (!in_interrupt() && local_softirq_pending())
281 		invoke_softirq();
282 
283 #ifdef CONFIG_NO_HZ
284 	/* Make sure that timer wheel updates are propagated */
285 	rcu_irq_exit();
286 	if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
287 		tick_nohz_stop_sched_tick(0);
288 #endif
289 	preempt_enable_no_resched();
290 }
291 
292 /*
293  * This function must run with irqs disabled!
294  */
295 inline void raise_softirq_irqoff(unsigned int nr)
296 {
297 	__raise_softirq_irqoff(nr);
298 
299 	/*
300 	 * If we're in an interrupt or softirq, we're done
301 	 * (this also catches softirq-disabled code). We will
302 	 * actually run the softirq once we return from
303 	 * the irq or softirq.
304 	 *
305 	 * Otherwise we wake up ksoftirqd to make sure we
306 	 * schedule the softirq soon.
307 	 */
308 	if (!in_interrupt())
309 		wakeup_softirqd();
310 }
311 
312 void raise_softirq(unsigned int nr)
313 {
314 	unsigned long flags;
315 
316 	local_irq_save(flags);
317 	raise_softirq_irqoff(nr);
318 	local_irq_restore(flags);
319 }
320 
321 void open_softirq(int nr, void (*action)(struct softirq_action *))
322 {
323 	softirq_vec[nr].action = action;
324 }
325 
326 /* Tasklets */
327 struct tasklet_head
328 {
329 	struct tasklet_struct *head;
330 	struct tasklet_struct **tail;
331 };
332 
333 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
334 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
335 
336 void __tasklet_schedule(struct tasklet_struct *t)
337 {
338 	unsigned long flags;
339 
340 	local_irq_save(flags);
341 	t->next = NULL;
342 	*__get_cpu_var(tasklet_vec).tail = t;
343 	__get_cpu_var(tasklet_vec).tail = &(t->next);
344 	raise_softirq_irqoff(TASKLET_SOFTIRQ);
345 	local_irq_restore(flags);
346 }
347 
348 EXPORT_SYMBOL(__tasklet_schedule);
349 
350 void __tasklet_hi_schedule(struct tasklet_struct *t)
351 {
352 	unsigned long flags;
353 
354 	local_irq_save(flags);
355 	t->next = NULL;
356 	*__get_cpu_var(tasklet_hi_vec).tail = t;
357 	__get_cpu_var(tasklet_hi_vec).tail = &(t->next);
358 	raise_softirq_irqoff(HI_SOFTIRQ);
359 	local_irq_restore(flags);
360 }
361 
362 EXPORT_SYMBOL(__tasklet_hi_schedule);
363 
364 static void tasklet_action(struct softirq_action *a)
365 {
366 	struct tasklet_struct *list;
367 
368 	local_irq_disable();
369 	list = __get_cpu_var(tasklet_vec).head;
370 	__get_cpu_var(tasklet_vec).head = NULL;
371 	__get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
372 	local_irq_enable();
373 
374 	while (list) {
375 		struct tasklet_struct *t = list;
376 
377 		list = list->next;
378 
379 		if (tasklet_trylock(t)) {
380 			if (!atomic_read(&t->count)) {
381 				if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
382 					BUG();
383 				t->func(t->data);
384 				tasklet_unlock(t);
385 				continue;
386 			}
387 			tasklet_unlock(t);
388 		}
389 
390 		local_irq_disable();
391 		t->next = NULL;
392 		*__get_cpu_var(tasklet_vec).tail = t;
393 		__get_cpu_var(tasklet_vec).tail = &(t->next);
394 		__raise_softirq_irqoff(TASKLET_SOFTIRQ);
395 		local_irq_enable();
396 	}
397 }
398 
399 static void tasklet_hi_action(struct softirq_action *a)
400 {
401 	struct tasklet_struct *list;
402 
403 	local_irq_disable();
404 	list = __get_cpu_var(tasklet_hi_vec).head;
405 	__get_cpu_var(tasklet_hi_vec).head = NULL;
406 	__get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
407 	local_irq_enable();
408 
409 	while (list) {
410 		struct tasklet_struct *t = list;
411 
412 		list = list->next;
413 
414 		if (tasklet_trylock(t)) {
415 			if (!atomic_read(&t->count)) {
416 				if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
417 					BUG();
418 				t->func(t->data);
419 				tasklet_unlock(t);
420 				continue;
421 			}
422 			tasklet_unlock(t);
423 		}
424 
425 		local_irq_disable();
426 		t->next = NULL;
427 		*__get_cpu_var(tasklet_hi_vec).tail = t;
428 		__get_cpu_var(tasklet_hi_vec).tail = &(t->next);
429 		__raise_softirq_irqoff(HI_SOFTIRQ);
430 		local_irq_enable();
431 	}
432 }
433 
434 
435 void tasklet_init(struct tasklet_struct *t,
436 		  void (*func)(unsigned long), unsigned long data)
437 {
438 	t->next = NULL;
439 	t->state = 0;
440 	atomic_set(&t->count, 0);
441 	t->func = func;
442 	t->data = data;
443 }
444 
445 EXPORT_SYMBOL(tasklet_init);
446 
447 void tasklet_kill(struct tasklet_struct *t)
448 {
449 	if (in_interrupt())
450 		printk("Attempt to kill tasklet from interrupt\n");
451 
452 	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
453 		do
454 			yield();
455 		while (test_bit(TASKLET_STATE_SCHED, &t->state));
456 	}
457 	tasklet_unlock_wait(t);
458 	clear_bit(TASKLET_STATE_SCHED, &t->state);
459 }
460 
461 EXPORT_SYMBOL(tasklet_kill);
462 
463 DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
464 EXPORT_PER_CPU_SYMBOL(softirq_work_list);
465 
466 static void __local_trigger(struct call_single_data *cp, int softirq)
467 {
468 	struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
469 
470 	list_add_tail(&cp->list, head);
471 
472 	/* Trigger the softirq only if the list was previously empty.  */
473 	if (head->next == &cp->list)
474 		raise_softirq_irqoff(softirq);
475 }
476 
477 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
478 static void remote_softirq_receive(void *data)
479 {
480 	struct call_single_data *cp = data;
481 	unsigned long flags;
482 	int softirq;
483 
484 	softirq = cp->priv;
485 
486 	local_irq_save(flags);
487 	__local_trigger(cp, softirq);
488 	local_irq_restore(flags);
489 }
490 
491 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
492 {
493 	if (cpu_online(cpu)) {
494 		cp->func = remote_softirq_receive;
495 		cp->info = cp;
496 		cp->flags = 0;
497 		cp->priv = softirq;
498 
499 		__smp_call_function_single(cpu, cp);
500 		return 0;
501 	}
502 	return 1;
503 }
504 #else /* CONFIG_USE_GENERIC_SMP_HELPERS */
505 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
506 {
507 	return 1;
508 }
509 #endif
510 
511 /**
512  * __send_remote_softirq - try to schedule softirq work on a remote cpu
513  * @cp: private SMP call function data area
514  * @cpu: the remote cpu
515  * @this_cpu: the currently executing cpu
516  * @softirq: the softirq for the work
517  *
518  * Attempt to schedule softirq work on a remote cpu.  If this cannot be
519  * done, the work is instead queued up on the local cpu.
520  *
521  * Interrupts must be disabled.
522  */
523 void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
524 {
525 	if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
526 		__local_trigger(cp, softirq);
527 }
528 EXPORT_SYMBOL(__send_remote_softirq);
529 
530 /**
531  * send_remote_softirq - try to schedule softirq work on a remote cpu
532  * @cp: private SMP call function data area
533  * @cpu: the remote cpu
534  * @softirq: the softirq for the work
535  *
536  * Like __send_remote_softirq except that disabling interrupts and
537  * computing the current cpu is done for the caller.
538  */
539 void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
540 {
541 	unsigned long flags;
542 	int this_cpu;
543 
544 	local_irq_save(flags);
545 	this_cpu = smp_processor_id();
546 	__send_remote_softirq(cp, cpu, this_cpu, softirq);
547 	local_irq_restore(flags);
548 }
549 EXPORT_SYMBOL(send_remote_softirq);
550 
551 static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
552 					       unsigned long action, void *hcpu)
553 {
554 	/*
555 	 * If a CPU goes away, splice its entries to the current CPU
556 	 * and trigger a run of the softirq
557 	 */
558 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
559 		int cpu = (unsigned long) hcpu;
560 		int i;
561 
562 		local_irq_disable();
563 		for (i = 0; i < NR_SOFTIRQS; i++) {
564 			struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
565 			struct list_head *local_head;
566 
567 			if (list_empty(head))
568 				continue;
569 
570 			local_head = &__get_cpu_var(softirq_work_list[i]);
571 			list_splice_init(head, local_head);
572 			raise_softirq_irqoff(i);
573 		}
574 		local_irq_enable();
575 	}
576 
577 	return NOTIFY_OK;
578 }
579 
580 static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
581 	.notifier_call	= remote_softirq_cpu_notify,
582 };
583 
584 void __init softirq_init(void)
585 {
586 	int cpu;
587 
588 	for_each_possible_cpu(cpu) {
589 		int i;
590 
591 		per_cpu(tasklet_vec, cpu).tail =
592 			&per_cpu(tasklet_vec, cpu).head;
593 		per_cpu(tasklet_hi_vec, cpu).tail =
594 			&per_cpu(tasklet_hi_vec, cpu).head;
595 		for (i = 0; i < NR_SOFTIRQS; i++)
596 			INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
597 	}
598 
599 	register_hotcpu_notifier(&remote_softirq_cpu_notifier);
600 
601 	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
602 	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
603 }
604 
605 static int ksoftirqd(void * __bind_cpu)
606 {
607 	set_current_state(TASK_INTERRUPTIBLE);
608 
609 	while (!kthread_should_stop()) {
610 		preempt_disable();
611 		if (!local_softirq_pending()) {
612 			preempt_enable_no_resched();
613 			schedule();
614 			preempt_disable();
615 		}
616 
617 		__set_current_state(TASK_RUNNING);
618 
619 		while (local_softirq_pending()) {
620 			/* Preempt disable stops cpu going offline.
621 			   If already offline, we'll be on wrong CPU:
622 			   don't process */
623 			if (cpu_is_offline((long)__bind_cpu))
624 				goto wait_to_die;
625 			do_softirq();
626 			preempt_enable_no_resched();
627 			cond_resched();
628 			preempt_disable();
629 			rcu_qsctr_inc((long)__bind_cpu);
630 		}
631 		preempt_enable();
632 		set_current_state(TASK_INTERRUPTIBLE);
633 	}
634 	__set_current_state(TASK_RUNNING);
635 	return 0;
636 
637 wait_to_die:
638 	preempt_enable();
639 	/* Wait for kthread_stop */
640 	set_current_state(TASK_INTERRUPTIBLE);
641 	while (!kthread_should_stop()) {
642 		schedule();
643 		set_current_state(TASK_INTERRUPTIBLE);
644 	}
645 	__set_current_state(TASK_RUNNING);
646 	return 0;
647 }
648 
649 #ifdef CONFIG_HOTPLUG_CPU
650 /*
651  * tasklet_kill_immediate is called to remove a tasklet which can already be
652  * scheduled for execution on @cpu.
653  *
654  * Unlike tasklet_kill, this function removes the tasklet
655  * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
656  *
657  * When this function is called, @cpu must be in the CPU_DEAD state.
658  */
659 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
660 {
661 	struct tasklet_struct **i;
662 
663 	BUG_ON(cpu_online(cpu));
664 	BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
665 
666 	if (!test_bit(TASKLET_STATE_SCHED, &t->state))
667 		return;
668 
669 	/* CPU is dead, so no lock needed. */
670 	for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
671 		if (*i == t) {
672 			*i = t->next;
673 			/* If this was the tail element, move the tail ptr */
674 			if (*i == NULL)
675 				per_cpu(tasklet_vec, cpu).tail = i;
676 			return;
677 		}
678 	}
679 	BUG();
680 }
681 
682 static void takeover_tasklets(unsigned int cpu)
683 {
684 	/* CPU is dead, so no lock needed. */
685 	local_irq_disable();
686 
687 	/* Find end, append list for that CPU. */
688 	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
689 		*(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
690 		__get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
691 		per_cpu(tasklet_vec, cpu).head = NULL;
692 		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
693 	}
694 	raise_softirq_irqoff(TASKLET_SOFTIRQ);
695 
696 	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
697 		*__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
698 		__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
699 		per_cpu(tasklet_hi_vec, cpu).head = NULL;
700 		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
701 	}
702 	raise_softirq_irqoff(HI_SOFTIRQ);
703 
704 	local_irq_enable();
705 }
706 #endif /* CONFIG_HOTPLUG_CPU */
707 
708 static int __cpuinit cpu_callback(struct notifier_block *nfb,
709 				  unsigned long action,
710 				  void *hcpu)
711 {
712 	int hotcpu = (unsigned long)hcpu;
713 	struct task_struct *p;
714 
715 	switch (action) {
716 	case CPU_UP_PREPARE:
717 	case CPU_UP_PREPARE_FROZEN:
718 		p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
719 		if (IS_ERR(p)) {
720 			printk("ksoftirqd for %i failed\n", hotcpu);
721 			return NOTIFY_BAD;
722 		}
723 		kthread_bind(p, hotcpu);
724   		per_cpu(ksoftirqd, hotcpu) = p;
725  		break;
726 	case CPU_ONLINE:
727 	case CPU_ONLINE_FROZEN:
728 		wake_up_process(per_cpu(ksoftirqd, hotcpu));
729 		break;
730 #ifdef CONFIG_HOTPLUG_CPU
731 	case CPU_UP_CANCELED:
732 	case CPU_UP_CANCELED_FROZEN:
733 		if (!per_cpu(ksoftirqd, hotcpu))
734 			break;
735 		/* Unbind so it can run.  Fall thru. */
736 		kthread_bind(per_cpu(ksoftirqd, hotcpu),
737 			     cpumask_any(cpu_online_mask));
738 	case CPU_DEAD:
739 	case CPU_DEAD_FROZEN: {
740 		struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
741 
742 		p = per_cpu(ksoftirqd, hotcpu);
743 		per_cpu(ksoftirqd, hotcpu) = NULL;
744 		sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
745 		kthread_stop(p);
746 		takeover_tasklets(hotcpu);
747 		break;
748 	}
749 #endif /* CONFIG_HOTPLUG_CPU */
750  	}
751 	return NOTIFY_OK;
752 }
753 
754 static struct notifier_block __cpuinitdata cpu_nfb = {
755 	.notifier_call = cpu_callback
756 };
757 
758 static __init int spawn_ksoftirqd(void)
759 {
760 	void *cpu = (void *)(long)smp_processor_id();
761 	int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
762 
763 	BUG_ON(err == NOTIFY_BAD);
764 	cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
765 	register_cpu_notifier(&cpu_nfb);
766 	return 0;
767 }
768 early_initcall(spawn_ksoftirqd);
769 
770 #ifdef CONFIG_SMP
771 /*
772  * Call a function on all processors
773  */
774 int on_each_cpu(void (*func) (void *info), void *info, int wait)
775 {
776 	int ret = 0;
777 
778 	preempt_disable();
779 	ret = smp_call_function(func, info, wait);
780 	local_irq_disable();
781 	func(info);
782 	local_irq_enable();
783 	preempt_enable();
784 	return ret;
785 }
786 EXPORT_SYMBOL(on_each_cpu);
787 #endif
788 
789 /*
790  * [ These __weak aliases are kept in a separate compilation unit, so that
791  *   GCC does not inline them incorrectly. ]
792  */
793 
794 int __init __weak early_irq_init(void)
795 {
796 	return 0;
797 }
798 
799 int __init __weak arch_probe_nr_irqs(void)
800 {
801 	return 0;
802 }
803 
804 int __init __weak arch_early_irq_init(void)
805 {
806 	return 0;
807 }
808 
809 int __weak arch_init_chip_data(struct irq_desc *desc, int cpu)
810 {
811 	return 0;
812 }
813