xref: /linux/kernel/irq/manage.c (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 /*
2  * linux/kernel/irq/manage.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006 Thomas Gleixner
6  *
7  * This file contains driver APIs to the irq subsystem.
8  */
9 
10 #include <linux/irq.h>
11 #include <linux/kthread.h>
12 #include <linux/module.h>
13 #include <linux/random.h>
14 #include <linux/interrupt.h>
15 #include <linux/slab.h>
16 #include <linux/sched.h>
17 
18 #include "internals.h"
19 
20 #ifdef CONFIG_IRQ_FORCED_THREADING
21 __read_mostly bool force_irqthreads;
22 
23 static int __init setup_forced_irqthreads(char *arg)
24 {
25 	force_irqthreads = true;
26 	return 0;
27 }
28 early_param("threadirqs", setup_forced_irqthreads);
29 #endif
30 
31 /**
32  *	synchronize_irq - wait for pending IRQ handlers (on other CPUs)
33  *	@irq: interrupt number to wait for
34  *
35  *	This function waits for any pending IRQ handlers for this interrupt
36  *	to complete before returning. If you use this function while
37  *	holding a resource the IRQ handler may need you will deadlock.
38  *
39  *	This function may be called - with care - from IRQ context.
40  */
41 void synchronize_irq(unsigned int irq)
42 {
43 	struct irq_desc *desc = irq_to_desc(irq);
44 	bool inprogress;
45 
46 	if (!desc)
47 		return;
48 
49 	do {
50 		unsigned long flags;
51 
52 		/*
53 		 * Wait until we're out of the critical section.  This might
54 		 * give the wrong answer due to the lack of memory barriers.
55 		 */
56 		while (irqd_irq_inprogress(&desc->irq_data))
57 			cpu_relax();
58 
59 		/* Ok, that indicated we're done: double-check carefully. */
60 		raw_spin_lock_irqsave(&desc->lock, flags);
61 		inprogress = irqd_irq_inprogress(&desc->irq_data);
62 		raw_spin_unlock_irqrestore(&desc->lock, flags);
63 
64 		/* Oops, that failed? */
65 	} while (inprogress);
66 
67 	/*
68 	 * We made sure that no hardirq handler is running. Now verify
69 	 * that no threaded handlers are active.
70 	 */
71 	wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
72 }
73 EXPORT_SYMBOL(synchronize_irq);
74 
75 #ifdef CONFIG_SMP
76 cpumask_var_t irq_default_affinity;
77 
78 /**
79  *	irq_can_set_affinity - Check if the affinity of a given irq can be set
80  *	@irq:		Interrupt to check
81  *
82  */
83 int irq_can_set_affinity(unsigned int irq)
84 {
85 	struct irq_desc *desc = irq_to_desc(irq);
86 
87 	if (!desc || !irqd_can_balance(&desc->irq_data) ||
88 	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
89 		return 0;
90 
91 	return 1;
92 }
93 
94 /**
95  *	irq_set_thread_affinity - Notify irq threads to adjust affinity
96  *	@desc:		irq descriptor which has affitnity changed
97  *
98  *	We just set IRQTF_AFFINITY and delegate the affinity setting
99  *	to the interrupt thread itself. We can not call
100  *	set_cpus_allowed_ptr() here as we hold desc->lock and this
101  *	code can be called from hard interrupt context.
102  */
103 void irq_set_thread_affinity(struct irq_desc *desc)
104 {
105 	struct irqaction *action = desc->action;
106 
107 	while (action) {
108 		if (action->thread)
109 			set_bit(IRQTF_AFFINITY, &action->thread_flags);
110 		action = action->next;
111 	}
112 }
113 
114 #ifdef CONFIG_GENERIC_PENDING_IRQ
115 static inline bool irq_can_move_pcntxt(struct irq_data *data)
116 {
117 	return irqd_can_move_in_process_context(data);
118 }
119 static inline bool irq_move_pending(struct irq_data *data)
120 {
121 	return irqd_is_setaffinity_pending(data);
122 }
123 static inline void
124 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
125 {
126 	cpumask_copy(desc->pending_mask, mask);
127 }
128 static inline void
129 irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
130 {
131 	cpumask_copy(mask, desc->pending_mask);
132 }
133 #else
134 static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
135 static inline bool irq_move_pending(struct irq_data *data) { return false; }
136 static inline void
137 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
138 static inline void
139 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
140 #endif
141 
142 int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
143 {
144 	struct irq_chip *chip = irq_data_get_irq_chip(data);
145 	struct irq_desc *desc = irq_data_to_desc(data);
146 	int ret = 0;
147 
148 	if (!chip || !chip->irq_set_affinity)
149 		return -EINVAL;
150 
151 	if (irq_can_move_pcntxt(data)) {
152 		ret = chip->irq_set_affinity(data, mask, false);
153 		switch (ret) {
154 		case IRQ_SET_MASK_OK:
155 			cpumask_copy(data->affinity, mask);
156 		case IRQ_SET_MASK_OK_NOCOPY:
157 			irq_set_thread_affinity(desc);
158 			ret = 0;
159 		}
160 	} else {
161 		irqd_set_move_pending(data);
162 		irq_copy_pending(desc, mask);
163 	}
164 
165 	if (desc->affinity_notify) {
166 		kref_get(&desc->affinity_notify->kref);
167 		schedule_work(&desc->affinity_notify->work);
168 	}
169 	irqd_set(data, IRQD_AFFINITY_SET);
170 
171 	return ret;
172 }
173 
174 /**
175  *	irq_set_affinity - Set the irq affinity of a given irq
176  *	@irq:		Interrupt to set affinity
177  *	@mask:		cpumask
178  *
179  */
180 int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
181 {
182 	struct irq_desc *desc = irq_to_desc(irq);
183 	unsigned long flags;
184 	int ret;
185 
186 	if (!desc)
187 		return -EINVAL;
188 
189 	raw_spin_lock_irqsave(&desc->lock, flags);
190 	ret =  __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
191 	raw_spin_unlock_irqrestore(&desc->lock, flags);
192 	return ret;
193 }
194 
195 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
196 {
197 	unsigned long flags;
198 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
199 
200 	if (!desc)
201 		return -EINVAL;
202 	desc->affinity_hint = m;
203 	irq_put_desc_unlock(desc, flags);
204 	return 0;
205 }
206 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
207 
208 static void irq_affinity_notify(struct work_struct *work)
209 {
210 	struct irq_affinity_notify *notify =
211 		container_of(work, struct irq_affinity_notify, work);
212 	struct irq_desc *desc = irq_to_desc(notify->irq);
213 	cpumask_var_t cpumask;
214 	unsigned long flags;
215 
216 	if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
217 		goto out;
218 
219 	raw_spin_lock_irqsave(&desc->lock, flags);
220 	if (irq_move_pending(&desc->irq_data))
221 		irq_get_pending(cpumask, desc);
222 	else
223 		cpumask_copy(cpumask, desc->irq_data.affinity);
224 	raw_spin_unlock_irqrestore(&desc->lock, flags);
225 
226 	notify->notify(notify, cpumask);
227 
228 	free_cpumask_var(cpumask);
229 out:
230 	kref_put(&notify->kref, notify->release);
231 }
232 
233 /**
234  *	irq_set_affinity_notifier - control notification of IRQ affinity changes
235  *	@irq:		Interrupt for which to enable/disable notification
236  *	@notify:	Context for notification, or %NULL to disable
237  *			notification.  Function pointers must be initialised;
238  *			the other fields will be initialised by this function.
239  *
240  *	Must be called in process context.  Notification may only be enabled
241  *	after the IRQ is allocated and must be disabled before the IRQ is
242  *	freed using free_irq().
243  */
244 int
245 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
246 {
247 	struct irq_desc *desc = irq_to_desc(irq);
248 	struct irq_affinity_notify *old_notify;
249 	unsigned long flags;
250 
251 	/* The release function is promised process context */
252 	might_sleep();
253 
254 	if (!desc)
255 		return -EINVAL;
256 
257 	/* Complete initialisation of *notify */
258 	if (notify) {
259 		notify->irq = irq;
260 		kref_init(&notify->kref);
261 		INIT_WORK(&notify->work, irq_affinity_notify);
262 	}
263 
264 	raw_spin_lock_irqsave(&desc->lock, flags);
265 	old_notify = desc->affinity_notify;
266 	desc->affinity_notify = notify;
267 	raw_spin_unlock_irqrestore(&desc->lock, flags);
268 
269 	if (old_notify)
270 		kref_put(&old_notify->kref, old_notify->release);
271 
272 	return 0;
273 }
274 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
275 
276 #ifndef CONFIG_AUTO_IRQ_AFFINITY
277 /*
278  * Generic version of the affinity autoselector.
279  */
280 static int
281 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
282 {
283 	struct irq_chip *chip = irq_desc_get_chip(desc);
284 	struct cpumask *set = irq_default_affinity;
285 	int ret;
286 
287 	/* Excludes PER_CPU and NO_BALANCE interrupts */
288 	if (!irq_can_set_affinity(irq))
289 		return 0;
290 
291 	/*
292 	 * Preserve an userspace affinity setup, but make sure that
293 	 * one of the targets is online.
294 	 */
295 	if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
296 		if (cpumask_intersects(desc->irq_data.affinity,
297 				       cpu_online_mask))
298 			set = desc->irq_data.affinity;
299 		else
300 			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
301 	}
302 
303 	cpumask_and(mask, cpu_online_mask, set);
304 	ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
305 	switch (ret) {
306 	case IRQ_SET_MASK_OK:
307 		cpumask_copy(desc->irq_data.affinity, mask);
308 	case IRQ_SET_MASK_OK_NOCOPY:
309 		irq_set_thread_affinity(desc);
310 	}
311 	return 0;
312 }
313 #else
314 static inline int
315 setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
316 {
317 	return irq_select_affinity(irq);
318 }
319 #endif
320 
321 /*
322  * Called when affinity is set via /proc/irq
323  */
324 int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
325 {
326 	struct irq_desc *desc = irq_to_desc(irq);
327 	unsigned long flags;
328 	int ret;
329 
330 	raw_spin_lock_irqsave(&desc->lock, flags);
331 	ret = setup_affinity(irq, desc, mask);
332 	raw_spin_unlock_irqrestore(&desc->lock, flags);
333 	return ret;
334 }
335 
336 #else
337 static inline int
338 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
339 {
340 	return 0;
341 }
342 #endif
343 
344 void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
345 {
346 	if (suspend) {
347 		if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
348 			return;
349 		desc->istate |= IRQS_SUSPENDED;
350 	}
351 
352 	if (!desc->depth++)
353 		irq_disable(desc);
354 }
355 
356 static int __disable_irq_nosync(unsigned int irq)
357 {
358 	unsigned long flags;
359 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
360 
361 	if (!desc)
362 		return -EINVAL;
363 	__disable_irq(desc, irq, false);
364 	irq_put_desc_busunlock(desc, flags);
365 	return 0;
366 }
367 
368 /**
369  *	disable_irq_nosync - disable an irq without waiting
370  *	@irq: Interrupt to disable
371  *
372  *	Disable the selected interrupt line.  Disables and Enables are
373  *	nested.
374  *	Unlike disable_irq(), this function does not ensure existing
375  *	instances of the IRQ handler have completed before returning.
376  *
377  *	This function may be called from IRQ context.
378  */
379 void disable_irq_nosync(unsigned int irq)
380 {
381 	__disable_irq_nosync(irq);
382 }
383 EXPORT_SYMBOL(disable_irq_nosync);
384 
385 /**
386  *	disable_irq - disable an irq and wait for completion
387  *	@irq: Interrupt to disable
388  *
389  *	Disable the selected interrupt line.  Enables and Disables are
390  *	nested.
391  *	This function waits for any pending IRQ handlers for this interrupt
392  *	to complete before returning. If you use this function while
393  *	holding a resource the IRQ handler may need you will deadlock.
394  *
395  *	This function may be called - with care - from IRQ context.
396  */
397 void disable_irq(unsigned int irq)
398 {
399 	if (!__disable_irq_nosync(irq))
400 		synchronize_irq(irq);
401 }
402 EXPORT_SYMBOL(disable_irq);
403 
404 void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
405 {
406 	if (resume) {
407 		if (!(desc->istate & IRQS_SUSPENDED)) {
408 			if (!desc->action)
409 				return;
410 			if (!(desc->action->flags & IRQF_FORCE_RESUME))
411 				return;
412 			/* Pretend that it got disabled ! */
413 			desc->depth++;
414 		}
415 		desc->istate &= ~IRQS_SUSPENDED;
416 	}
417 
418 	switch (desc->depth) {
419 	case 0:
420  err_out:
421 		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
422 		break;
423 	case 1: {
424 		if (desc->istate & IRQS_SUSPENDED)
425 			goto err_out;
426 		/* Prevent probing on this irq: */
427 		irq_settings_set_noprobe(desc);
428 		irq_enable(desc);
429 		check_irq_resend(desc, irq);
430 		/* fall-through */
431 	}
432 	default:
433 		desc->depth--;
434 	}
435 }
436 
437 /**
438  *	enable_irq - enable handling of an irq
439  *	@irq: Interrupt to enable
440  *
441  *	Undoes the effect of one call to disable_irq().  If this
442  *	matches the last disable, processing of interrupts on this
443  *	IRQ line is re-enabled.
444  *
445  *	This function may be called from IRQ context only when
446  *	desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
447  */
448 void enable_irq(unsigned int irq)
449 {
450 	unsigned long flags;
451 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
452 
453 	if (!desc)
454 		return;
455 	if (WARN(!desc->irq_data.chip,
456 		 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
457 		goto out;
458 
459 	__enable_irq(desc, irq, false);
460 out:
461 	irq_put_desc_busunlock(desc, flags);
462 }
463 EXPORT_SYMBOL(enable_irq);
464 
465 static int set_irq_wake_real(unsigned int irq, unsigned int on)
466 {
467 	struct irq_desc *desc = irq_to_desc(irq);
468 	int ret = -ENXIO;
469 
470 	if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
471 		return 0;
472 
473 	if (desc->irq_data.chip->irq_set_wake)
474 		ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
475 
476 	return ret;
477 }
478 
479 /**
480  *	irq_set_irq_wake - control irq power management wakeup
481  *	@irq:	interrupt to control
482  *	@on:	enable/disable power management wakeup
483  *
484  *	Enable/disable power management wakeup mode, which is
485  *	disabled by default.  Enables and disables must match,
486  *	just as they match for non-wakeup mode support.
487  *
488  *	Wakeup mode lets this IRQ wake the system from sleep
489  *	states like "suspend to RAM".
490  */
491 int irq_set_irq_wake(unsigned int irq, unsigned int on)
492 {
493 	unsigned long flags;
494 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
495 	int ret = 0;
496 
497 	if (!desc)
498 		return -EINVAL;
499 
500 	/* wakeup-capable irqs can be shared between drivers that
501 	 * don't need to have the same sleep mode behaviors.
502 	 */
503 	if (on) {
504 		if (desc->wake_depth++ == 0) {
505 			ret = set_irq_wake_real(irq, on);
506 			if (ret)
507 				desc->wake_depth = 0;
508 			else
509 				irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
510 		}
511 	} else {
512 		if (desc->wake_depth == 0) {
513 			WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
514 		} else if (--desc->wake_depth == 0) {
515 			ret = set_irq_wake_real(irq, on);
516 			if (ret)
517 				desc->wake_depth = 1;
518 			else
519 				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
520 		}
521 	}
522 	irq_put_desc_busunlock(desc, flags);
523 	return ret;
524 }
525 EXPORT_SYMBOL(irq_set_irq_wake);
526 
527 /*
528  * Internal function that tells the architecture code whether a
529  * particular irq has been exclusively allocated or is available
530  * for driver use.
531  */
532 int can_request_irq(unsigned int irq, unsigned long irqflags)
533 {
534 	unsigned long flags;
535 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
536 	int canrequest = 0;
537 
538 	if (!desc)
539 		return 0;
540 
541 	if (irq_settings_can_request(desc)) {
542 		if (desc->action)
543 			if (irqflags & desc->action->flags & IRQF_SHARED)
544 				canrequest =1;
545 	}
546 	irq_put_desc_unlock(desc, flags);
547 	return canrequest;
548 }
549 
550 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
551 		      unsigned long flags)
552 {
553 	struct irq_chip *chip = desc->irq_data.chip;
554 	int ret, unmask = 0;
555 
556 	if (!chip || !chip->irq_set_type) {
557 		/*
558 		 * IRQF_TRIGGER_* but the PIC does not support multiple
559 		 * flow-types?
560 		 */
561 		pr_debug("No set_type function for IRQ %d (%s)\n", irq,
562 				chip ? (chip->name ? : "unknown") : "unknown");
563 		return 0;
564 	}
565 
566 	flags &= IRQ_TYPE_SENSE_MASK;
567 
568 	if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
569 		if (!irqd_irq_masked(&desc->irq_data))
570 			mask_irq(desc);
571 		if (!irqd_irq_disabled(&desc->irq_data))
572 			unmask = 1;
573 	}
574 
575 	/* caller masked out all except trigger mode flags */
576 	ret = chip->irq_set_type(&desc->irq_data, flags);
577 
578 	switch (ret) {
579 	case IRQ_SET_MASK_OK:
580 		irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
581 		irqd_set(&desc->irq_data, flags);
582 
583 	case IRQ_SET_MASK_OK_NOCOPY:
584 		flags = irqd_get_trigger_type(&desc->irq_data);
585 		irq_settings_set_trigger_mask(desc, flags);
586 		irqd_clear(&desc->irq_data, IRQD_LEVEL);
587 		irq_settings_clr_level(desc);
588 		if (flags & IRQ_TYPE_LEVEL_MASK) {
589 			irq_settings_set_level(desc);
590 			irqd_set(&desc->irq_data, IRQD_LEVEL);
591 		}
592 
593 		ret = 0;
594 		break;
595 	default:
596 		pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
597 		       flags, irq, chip->irq_set_type);
598 	}
599 	if (unmask)
600 		unmask_irq(desc);
601 	return ret;
602 }
603 
604 /*
605  * Default primary interrupt handler for threaded interrupts. Is
606  * assigned as primary handler when request_threaded_irq is called
607  * with handler == NULL. Useful for oneshot interrupts.
608  */
609 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
610 {
611 	return IRQ_WAKE_THREAD;
612 }
613 
614 /*
615  * Primary handler for nested threaded interrupts. Should never be
616  * called.
617  */
618 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
619 {
620 	WARN(1, "Primary handler called for nested irq %d\n", irq);
621 	return IRQ_NONE;
622 }
623 
624 static int irq_wait_for_interrupt(struct irqaction *action)
625 {
626 	set_current_state(TASK_INTERRUPTIBLE);
627 
628 	while (!kthread_should_stop()) {
629 
630 		if (test_and_clear_bit(IRQTF_RUNTHREAD,
631 				       &action->thread_flags)) {
632 			__set_current_state(TASK_RUNNING);
633 			return 0;
634 		}
635 		schedule();
636 		set_current_state(TASK_INTERRUPTIBLE);
637 	}
638 	__set_current_state(TASK_RUNNING);
639 	return -1;
640 }
641 
642 /*
643  * Oneshot interrupts keep the irq line masked until the threaded
644  * handler finished. unmask if the interrupt has not been disabled and
645  * is marked MASKED.
646  */
647 static void irq_finalize_oneshot(struct irq_desc *desc,
648 				 struct irqaction *action, bool force)
649 {
650 	if (!(desc->istate & IRQS_ONESHOT))
651 		return;
652 again:
653 	chip_bus_lock(desc);
654 	raw_spin_lock_irq(&desc->lock);
655 
656 	/*
657 	 * Implausible though it may be we need to protect us against
658 	 * the following scenario:
659 	 *
660 	 * The thread is faster done than the hard interrupt handler
661 	 * on the other CPU. If we unmask the irq line then the
662 	 * interrupt can come in again and masks the line, leaves due
663 	 * to IRQS_INPROGRESS and the irq line is masked forever.
664 	 *
665 	 * This also serializes the state of shared oneshot handlers
666 	 * versus "desc->threads_onehsot |= action->thread_mask;" in
667 	 * irq_wake_thread(). See the comment there which explains the
668 	 * serialization.
669 	 */
670 	if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
671 		raw_spin_unlock_irq(&desc->lock);
672 		chip_bus_sync_unlock(desc);
673 		cpu_relax();
674 		goto again;
675 	}
676 
677 	/*
678 	 * Now check again, whether the thread should run. Otherwise
679 	 * we would clear the threads_oneshot bit of this thread which
680 	 * was just set.
681 	 */
682 	if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
683 		goto out_unlock;
684 
685 	desc->threads_oneshot &= ~action->thread_mask;
686 
687 	if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
688 	    irqd_irq_masked(&desc->irq_data))
689 		unmask_irq(desc);
690 
691 out_unlock:
692 	raw_spin_unlock_irq(&desc->lock);
693 	chip_bus_sync_unlock(desc);
694 }
695 
696 #ifdef CONFIG_SMP
697 /*
698  * Check whether we need to chasnge the affinity of the interrupt thread.
699  */
700 static void
701 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
702 {
703 	cpumask_var_t mask;
704 
705 	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
706 		return;
707 
708 	/*
709 	 * In case we are out of memory we set IRQTF_AFFINITY again and
710 	 * try again next time
711 	 */
712 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
713 		set_bit(IRQTF_AFFINITY, &action->thread_flags);
714 		return;
715 	}
716 
717 	raw_spin_lock_irq(&desc->lock);
718 	cpumask_copy(mask, desc->irq_data.affinity);
719 	raw_spin_unlock_irq(&desc->lock);
720 
721 	set_cpus_allowed_ptr(current, mask);
722 	free_cpumask_var(mask);
723 }
724 #else
725 static inline void
726 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
727 #endif
728 
729 /*
730  * Interrupts which are not explicitely requested as threaded
731  * interrupts rely on the implicit bh/preempt disable of the hard irq
732  * context. So we need to disable bh here to avoid deadlocks and other
733  * side effects.
734  */
735 static irqreturn_t
736 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
737 {
738 	irqreturn_t ret;
739 
740 	local_bh_disable();
741 	ret = action->thread_fn(action->irq, action->dev_id);
742 	irq_finalize_oneshot(desc, action, false);
743 	local_bh_enable();
744 	return ret;
745 }
746 
747 /*
748  * Interrupts explicitely requested as threaded interupts want to be
749  * preemtible - many of them need to sleep and wait for slow busses to
750  * complete.
751  */
752 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
753 		struct irqaction *action)
754 {
755 	irqreturn_t ret;
756 
757 	ret = action->thread_fn(action->irq, action->dev_id);
758 	irq_finalize_oneshot(desc, action, false);
759 	return ret;
760 }
761 
762 /*
763  * Interrupt handler thread
764  */
765 static int irq_thread(void *data)
766 {
767 	static const struct sched_param param = {
768 		.sched_priority = MAX_USER_RT_PRIO/2,
769 	};
770 	struct irqaction *action = data;
771 	struct irq_desc *desc = irq_to_desc(action->irq);
772 	irqreturn_t (*handler_fn)(struct irq_desc *desc,
773 			struct irqaction *action);
774 	int wake;
775 
776 	if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
777 					&action->thread_flags))
778 		handler_fn = irq_forced_thread_fn;
779 	else
780 		handler_fn = irq_thread_fn;
781 
782 	sched_setscheduler(current, SCHED_FIFO, &param);
783 	current->irqaction = action;
784 
785 	while (!irq_wait_for_interrupt(action)) {
786 
787 		irq_thread_check_affinity(desc, action);
788 
789 		atomic_inc(&desc->threads_active);
790 
791 		raw_spin_lock_irq(&desc->lock);
792 		if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
793 			/*
794 			 * CHECKME: We might need a dedicated
795 			 * IRQ_THREAD_PENDING flag here, which
796 			 * retriggers the thread in check_irq_resend()
797 			 * but AFAICT IRQS_PENDING should be fine as it
798 			 * retriggers the interrupt itself --- tglx
799 			 */
800 			desc->istate |= IRQS_PENDING;
801 			raw_spin_unlock_irq(&desc->lock);
802 		} else {
803 			irqreturn_t action_ret;
804 
805 			raw_spin_unlock_irq(&desc->lock);
806 			action_ret = handler_fn(desc, action);
807 			if (!noirqdebug)
808 				note_interrupt(action->irq, desc, action_ret);
809 		}
810 
811 		wake = atomic_dec_and_test(&desc->threads_active);
812 
813 		if (wake && waitqueue_active(&desc->wait_for_threads))
814 			wake_up(&desc->wait_for_threads);
815 	}
816 
817 	/* Prevent a stale desc->threads_oneshot */
818 	irq_finalize_oneshot(desc, action, true);
819 
820 	/*
821 	 * Clear irqaction. Otherwise exit_irq_thread() would make
822 	 * fuzz about an active irq thread going into nirvana.
823 	 */
824 	current->irqaction = NULL;
825 	return 0;
826 }
827 
828 /*
829  * Called from do_exit()
830  */
831 void exit_irq_thread(void)
832 {
833 	struct task_struct *tsk = current;
834 	struct irq_desc *desc;
835 
836 	if (!tsk->irqaction)
837 		return;
838 
839 	printk(KERN_ERR
840 	       "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
841 	       tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
842 
843 	desc = irq_to_desc(tsk->irqaction->irq);
844 
845 	/*
846 	 * Prevent a stale desc->threads_oneshot. Must be called
847 	 * before setting the IRQTF_DIED flag.
848 	 */
849 	irq_finalize_oneshot(desc, tsk->irqaction, true);
850 
851 	/*
852 	 * Set the THREAD DIED flag to prevent further wakeups of the
853 	 * soon to be gone threaded handler.
854 	 */
855 	set_bit(IRQTF_DIED, &tsk->irqaction->flags);
856 }
857 
858 static void irq_setup_forced_threading(struct irqaction *new)
859 {
860 	if (!force_irqthreads)
861 		return;
862 	if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
863 		return;
864 
865 	new->flags |= IRQF_ONESHOT;
866 
867 	if (!new->thread_fn) {
868 		set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
869 		new->thread_fn = new->handler;
870 		new->handler = irq_default_primary_handler;
871 	}
872 }
873 
874 /*
875  * Internal function to register an irqaction - typically used to
876  * allocate special interrupts that are part of the architecture.
877  */
878 static int
879 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
880 {
881 	struct irqaction *old, **old_ptr;
882 	const char *old_name = NULL;
883 	unsigned long flags, thread_mask = 0;
884 	int ret, nested, shared = 0;
885 	cpumask_var_t mask;
886 
887 	if (!desc)
888 		return -EINVAL;
889 
890 	if (desc->irq_data.chip == &no_irq_chip)
891 		return -ENOSYS;
892 	if (!try_module_get(desc->owner))
893 		return -ENODEV;
894 	/*
895 	 * Some drivers like serial.c use request_irq() heavily,
896 	 * so we have to be careful not to interfere with a
897 	 * running system.
898 	 */
899 	if (new->flags & IRQF_SAMPLE_RANDOM) {
900 		/*
901 		 * This function might sleep, we want to call it first,
902 		 * outside of the atomic block.
903 		 * Yes, this might clear the entropy pool if the wrong
904 		 * driver is attempted to be loaded, without actually
905 		 * installing a new handler, but is this really a problem,
906 		 * only the sysadmin is able to do this.
907 		 */
908 		rand_initialize_irq(irq);
909 	}
910 
911 	/*
912 	 * Check whether the interrupt nests into another interrupt
913 	 * thread.
914 	 */
915 	nested = irq_settings_is_nested_thread(desc);
916 	if (nested) {
917 		if (!new->thread_fn) {
918 			ret = -EINVAL;
919 			goto out_mput;
920 		}
921 		/*
922 		 * Replace the primary handler which was provided from
923 		 * the driver for non nested interrupt handling by the
924 		 * dummy function which warns when called.
925 		 */
926 		new->handler = irq_nested_primary_handler;
927 	} else {
928 		if (irq_settings_can_thread(desc))
929 			irq_setup_forced_threading(new);
930 	}
931 
932 	/*
933 	 * Create a handler thread when a thread function is supplied
934 	 * and the interrupt does not nest into another interrupt
935 	 * thread.
936 	 */
937 	if (new->thread_fn && !nested) {
938 		struct task_struct *t;
939 
940 		t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
941 				   new->name);
942 		if (IS_ERR(t)) {
943 			ret = PTR_ERR(t);
944 			goto out_mput;
945 		}
946 		/*
947 		 * We keep the reference to the task struct even if
948 		 * the thread dies to avoid that the interrupt code
949 		 * references an already freed task_struct.
950 		 */
951 		get_task_struct(t);
952 		new->thread = t;
953 	}
954 
955 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
956 		ret = -ENOMEM;
957 		goto out_thread;
958 	}
959 
960 	/*
961 	 * The following block of code has to be executed atomically
962 	 */
963 	raw_spin_lock_irqsave(&desc->lock, flags);
964 	old_ptr = &desc->action;
965 	old = *old_ptr;
966 	if (old) {
967 		/*
968 		 * Can't share interrupts unless both agree to and are
969 		 * the same type (level, edge, polarity). So both flag
970 		 * fields must have IRQF_SHARED set and the bits which
971 		 * set the trigger type must match. Also all must
972 		 * agree on ONESHOT.
973 		 */
974 		if (!((old->flags & new->flags) & IRQF_SHARED) ||
975 		    ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
976 		    ((old->flags ^ new->flags) & IRQF_ONESHOT)) {
977 			old_name = old->name;
978 			goto mismatch;
979 		}
980 
981 		/* All handlers must agree on per-cpuness */
982 		if ((old->flags & IRQF_PERCPU) !=
983 		    (new->flags & IRQF_PERCPU))
984 			goto mismatch;
985 
986 		/* add new interrupt at end of irq queue */
987 		do {
988 			thread_mask |= old->thread_mask;
989 			old_ptr = &old->next;
990 			old = *old_ptr;
991 		} while (old);
992 		shared = 1;
993 	}
994 
995 	/*
996 	 * Setup the thread mask for this irqaction. Unlikely to have
997 	 * 32 resp 64 irqs sharing one line, but who knows.
998 	 */
999 	if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
1000 		ret = -EBUSY;
1001 		goto out_mask;
1002 	}
1003 	new->thread_mask = 1 << ffz(thread_mask);
1004 
1005 	if (!shared) {
1006 		init_waitqueue_head(&desc->wait_for_threads);
1007 
1008 		/* Setup the type (level, edge polarity) if configured: */
1009 		if (new->flags & IRQF_TRIGGER_MASK) {
1010 			ret = __irq_set_trigger(desc, irq,
1011 					new->flags & IRQF_TRIGGER_MASK);
1012 
1013 			if (ret)
1014 				goto out_mask;
1015 		}
1016 
1017 		desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1018 				  IRQS_ONESHOT | IRQS_WAITING);
1019 		irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1020 
1021 		if (new->flags & IRQF_PERCPU) {
1022 			irqd_set(&desc->irq_data, IRQD_PER_CPU);
1023 			irq_settings_set_per_cpu(desc);
1024 		}
1025 
1026 		if (new->flags & IRQF_ONESHOT)
1027 			desc->istate |= IRQS_ONESHOT;
1028 
1029 		if (irq_settings_can_autoenable(desc))
1030 			irq_startup(desc);
1031 		else
1032 			/* Undo nested disables: */
1033 			desc->depth = 1;
1034 
1035 		/* Exclude IRQ from balancing if requested */
1036 		if (new->flags & IRQF_NOBALANCING) {
1037 			irq_settings_set_no_balancing(desc);
1038 			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1039 		}
1040 
1041 		/* Set default affinity mask once everything is setup */
1042 		setup_affinity(irq, desc, mask);
1043 
1044 	} else if (new->flags & IRQF_TRIGGER_MASK) {
1045 		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1046 		unsigned int omsk = irq_settings_get_trigger_mask(desc);
1047 
1048 		if (nmsk != omsk)
1049 			/* hope the handler works with current  trigger mode */
1050 			pr_warning("IRQ %d uses trigger mode %u; requested %u\n",
1051 				   irq, nmsk, omsk);
1052 	}
1053 
1054 	new->irq = irq;
1055 	*old_ptr = new;
1056 
1057 	/* Reset broken irq detection when installing new handler */
1058 	desc->irq_count = 0;
1059 	desc->irqs_unhandled = 0;
1060 
1061 	/*
1062 	 * Check whether we disabled the irq via the spurious handler
1063 	 * before. Reenable it and give it another chance.
1064 	 */
1065 	if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1066 		desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1067 		__enable_irq(desc, irq, false);
1068 	}
1069 
1070 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1071 
1072 	/*
1073 	 * Strictly no need to wake it up, but hung_task complains
1074 	 * when no hard interrupt wakes the thread up.
1075 	 */
1076 	if (new->thread)
1077 		wake_up_process(new->thread);
1078 
1079 	register_irq_proc(irq, desc);
1080 	new->dir = NULL;
1081 	register_handler_proc(irq, new);
1082 	free_cpumask_var(mask);
1083 
1084 	return 0;
1085 
1086 mismatch:
1087 #ifdef CONFIG_DEBUG_SHIRQ
1088 	if (!(new->flags & IRQF_PROBE_SHARED)) {
1089 		printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
1090 		if (old_name)
1091 			printk(KERN_ERR "current handler: %s\n", old_name);
1092 		dump_stack();
1093 	}
1094 #endif
1095 	ret = -EBUSY;
1096 
1097 out_mask:
1098 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1099 	free_cpumask_var(mask);
1100 
1101 out_thread:
1102 	if (new->thread) {
1103 		struct task_struct *t = new->thread;
1104 
1105 		new->thread = NULL;
1106 		if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
1107 			kthread_stop(t);
1108 		put_task_struct(t);
1109 	}
1110 out_mput:
1111 	module_put(desc->owner);
1112 	return ret;
1113 }
1114 
1115 /**
1116  *	setup_irq - setup an interrupt
1117  *	@irq: Interrupt line to setup
1118  *	@act: irqaction for the interrupt
1119  *
1120  * Used to statically setup interrupts in the early boot process.
1121  */
1122 int setup_irq(unsigned int irq, struct irqaction *act)
1123 {
1124 	int retval;
1125 	struct irq_desc *desc = irq_to_desc(irq);
1126 
1127 	if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1128 		return -EINVAL;
1129 	chip_bus_lock(desc);
1130 	retval = __setup_irq(irq, desc, act);
1131 	chip_bus_sync_unlock(desc);
1132 
1133 	return retval;
1134 }
1135 EXPORT_SYMBOL_GPL(setup_irq);
1136 
1137 /*
1138  * Internal function to unregister an irqaction - used to free
1139  * regular and special interrupts that are part of the architecture.
1140  */
1141 static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1142 {
1143 	struct irq_desc *desc = irq_to_desc(irq);
1144 	struct irqaction *action, **action_ptr;
1145 	unsigned long flags;
1146 
1147 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1148 
1149 	if (!desc)
1150 		return NULL;
1151 
1152 	raw_spin_lock_irqsave(&desc->lock, flags);
1153 
1154 	/*
1155 	 * There can be multiple actions per IRQ descriptor, find the right
1156 	 * one based on the dev_id:
1157 	 */
1158 	action_ptr = &desc->action;
1159 	for (;;) {
1160 		action = *action_ptr;
1161 
1162 		if (!action) {
1163 			WARN(1, "Trying to free already-free IRQ %d\n", irq);
1164 			raw_spin_unlock_irqrestore(&desc->lock, flags);
1165 
1166 			return NULL;
1167 		}
1168 
1169 		if (action->dev_id == dev_id)
1170 			break;
1171 		action_ptr = &action->next;
1172 	}
1173 
1174 	/* Found it - now remove it from the list of entries: */
1175 	*action_ptr = action->next;
1176 
1177 	/* Currently used only by UML, might disappear one day: */
1178 #ifdef CONFIG_IRQ_RELEASE_METHOD
1179 	if (desc->irq_data.chip->release)
1180 		desc->irq_data.chip->release(irq, dev_id);
1181 #endif
1182 
1183 	/* If this was the last handler, shut down the IRQ line: */
1184 	if (!desc->action)
1185 		irq_shutdown(desc);
1186 
1187 #ifdef CONFIG_SMP
1188 	/* make sure affinity_hint is cleaned up */
1189 	if (WARN_ON_ONCE(desc->affinity_hint))
1190 		desc->affinity_hint = NULL;
1191 #endif
1192 
1193 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1194 
1195 	unregister_handler_proc(irq, action);
1196 
1197 	/* Make sure it's not being used on another CPU: */
1198 	synchronize_irq(irq);
1199 
1200 #ifdef CONFIG_DEBUG_SHIRQ
1201 	/*
1202 	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1203 	 * event to happen even now it's being freed, so let's make sure that
1204 	 * is so by doing an extra call to the handler ....
1205 	 *
1206 	 * ( We do this after actually deregistering it, to make sure that a
1207 	 *   'real' IRQ doesn't run in * parallel with our fake. )
1208 	 */
1209 	if (action->flags & IRQF_SHARED) {
1210 		local_irq_save(flags);
1211 		action->handler(irq, dev_id);
1212 		local_irq_restore(flags);
1213 	}
1214 #endif
1215 
1216 	if (action->thread) {
1217 		if (!test_bit(IRQTF_DIED, &action->thread_flags))
1218 			kthread_stop(action->thread);
1219 		put_task_struct(action->thread);
1220 	}
1221 
1222 	module_put(desc->owner);
1223 	return action;
1224 }
1225 
1226 /**
1227  *	remove_irq - free an interrupt
1228  *	@irq: Interrupt line to free
1229  *	@act: irqaction for the interrupt
1230  *
1231  * Used to remove interrupts statically setup by the early boot process.
1232  */
1233 void remove_irq(unsigned int irq, struct irqaction *act)
1234 {
1235 	struct irq_desc *desc = irq_to_desc(irq);
1236 
1237 	if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1238 	    __free_irq(irq, act->dev_id);
1239 }
1240 EXPORT_SYMBOL_GPL(remove_irq);
1241 
1242 /**
1243  *	free_irq - free an interrupt allocated with request_irq
1244  *	@irq: Interrupt line to free
1245  *	@dev_id: Device identity to free
1246  *
1247  *	Remove an interrupt handler. The handler is removed and if the
1248  *	interrupt line is no longer in use by any driver it is disabled.
1249  *	On a shared IRQ the caller must ensure the interrupt is disabled
1250  *	on the card it drives before calling this function. The function
1251  *	does not return until any executing interrupts for this IRQ
1252  *	have completed.
1253  *
1254  *	This function must not be called from interrupt context.
1255  */
1256 void free_irq(unsigned int irq, void *dev_id)
1257 {
1258 	struct irq_desc *desc = irq_to_desc(irq);
1259 
1260 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1261 		return;
1262 
1263 #ifdef CONFIG_SMP
1264 	if (WARN_ON(desc->affinity_notify))
1265 		desc->affinity_notify = NULL;
1266 #endif
1267 
1268 	chip_bus_lock(desc);
1269 	kfree(__free_irq(irq, dev_id));
1270 	chip_bus_sync_unlock(desc);
1271 }
1272 EXPORT_SYMBOL(free_irq);
1273 
1274 /**
1275  *	request_threaded_irq - allocate an interrupt line
1276  *	@irq: Interrupt line to allocate
1277  *	@handler: Function to be called when the IRQ occurs.
1278  *		  Primary handler for threaded interrupts
1279  *		  If NULL and thread_fn != NULL the default
1280  *		  primary handler is installed
1281  *	@thread_fn: Function called from the irq handler thread
1282  *		    If NULL, no irq thread is created
1283  *	@irqflags: Interrupt type flags
1284  *	@devname: An ascii name for the claiming device
1285  *	@dev_id: A cookie passed back to the handler function
1286  *
1287  *	This call allocates interrupt resources and enables the
1288  *	interrupt line and IRQ handling. From the point this
1289  *	call is made your handler function may be invoked. Since
1290  *	your handler function must clear any interrupt the board
1291  *	raises, you must take care both to initialise your hardware
1292  *	and to set up the interrupt handler in the right order.
1293  *
1294  *	If you want to set up a threaded irq handler for your device
1295  *	then you need to supply @handler and @thread_fn. @handler is
1296  *	still called in hard interrupt context and has to check
1297  *	whether the interrupt originates from the device. If yes it
1298  *	needs to disable the interrupt on the device and return
1299  *	IRQ_WAKE_THREAD which will wake up the handler thread and run
1300  *	@thread_fn. This split handler design is necessary to support
1301  *	shared interrupts.
1302  *
1303  *	Dev_id must be globally unique. Normally the address of the
1304  *	device data structure is used as the cookie. Since the handler
1305  *	receives this value it makes sense to use it.
1306  *
1307  *	If your interrupt is shared you must pass a non NULL dev_id
1308  *	as this is required when freeing the interrupt.
1309  *
1310  *	Flags:
1311  *
1312  *	IRQF_SHARED		Interrupt is shared
1313  *	IRQF_SAMPLE_RANDOM	The interrupt can be used for entropy
1314  *	IRQF_TRIGGER_*		Specify active edge(s) or level
1315  *
1316  */
1317 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1318 			 irq_handler_t thread_fn, unsigned long irqflags,
1319 			 const char *devname, void *dev_id)
1320 {
1321 	struct irqaction *action;
1322 	struct irq_desc *desc;
1323 	int retval;
1324 
1325 	/*
1326 	 * Sanity-check: shared interrupts must pass in a real dev-ID,
1327 	 * otherwise we'll have trouble later trying to figure out
1328 	 * which interrupt is which (messes up the interrupt freeing
1329 	 * logic etc).
1330 	 */
1331 	if ((irqflags & IRQF_SHARED) && !dev_id)
1332 		return -EINVAL;
1333 
1334 	desc = irq_to_desc(irq);
1335 	if (!desc)
1336 		return -EINVAL;
1337 
1338 	if (!irq_settings_can_request(desc) ||
1339 	    WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1340 		return -EINVAL;
1341 
1342 	if (!handler) {
1343 		if (!thread_fn)
1344 			return -EINVAL;
1345 		handler = irq_default_primary_handler;
1346 	}
1347 
1348 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1349 	if (!action)
1350 		return -ENOMEM;
1351 
1352 	action->handler = handler;
1353 	action->thread_fn = thread_fn;
1354 	action->flags = irqflags;
1355 	action->name = devname;
1356 	action->dev_id = dev_id;
1357 
1358 	chip_bus_lock(desc);
1359 	retval = __setup_irq(irq, desc, action);
1360 	chip_bus_sync_unlock(desc);
1361 
1362 	if (retval)
1363 		kfree(action);
1364 
1365 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1366 	if (!retval && (irqflags & IRQF_SHARED)) {
1367 		/*
1368 		 * It's a shared IRQ -- the driver ought to be prepared for it
1369 		 * to happen immediately, so let's make sure....
1370 		 * We disable the irq to make sure that a 'real' IRQ doesn't
1371 		 * run in parallel with our fake.
1372 		 */
1373 		unsigned long flags;
1374 
1375 		disable_irq(irq);
1376 		local_irq_save(flags);
1377 
1378 		handler(irq, dev_id);
1379 
1380 		local_irq_restore(flags);
1381 		enable_irq(irq);
1382 	}
1383 #endif
1384 	return retval;
1385 }
1386 EXPORT_SYMBOL(request_threaded_irq);
1387 
1388 /**
1389  *	request_any_context_irq - allocate an interrupt line
1390  *	@irq: Interrupt line to allocate
1391  *	@handler: Function to be called when the IRQ occurs.
1392  *		  Threaded handler for threaded interrupts.
1393  *	@flags: Interrupt type flags
1394  *	@name: An ascii name for the claiming device
1395  *	@dev_id: A cookie passed back to the handler function
1396  *
1397  *	This call allocates interrupt resources and enables the
1398  *	interrupt line and IRQ handling. It selects either a
1399  *	hardirq or threaded handling method depending on the
1400  *	context.
1401  *
1402  *	On failure, it returns a negative value. On success,
1403  *	it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1404  */
1405 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1406 			    unsigned long flags, const char *name, void *dev_id)
1407 {
1408 	struct irq_desc *desc = irq_to_desc(irq);
1409 	int ret;
1410 
1411 	if (!desc)
1412 		return -EINVAL;
1413 
1414 	if (irq_settings_is_nested_thread(desc)) {
1415 		ret = request_threaded_irq(irq, NULL, handler,
1416 					   flags, name, dev_id);
1417 		return !ret ? IRQC_IS_NESTED : ret;
1418 	}
1419 
1420 	ret = request_irq(irq, handler, flags, name, dev_id);
1421 	return !ret ? IRQC_IS_HARDIRQ : ret;
1422 }
1423 EXPORT_SYMBOL_GPL(request_any_context_irq);
1424 
1425 void enable_percpu_irq(unsigned int irq, unsigned int type)
1426 {
1427 	unsigned int cpu = smp_processor_id();
1428 	unsigned long flags;
1429 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1430 
1431 	if (!desc)
1432 		return;
1433 
1434 	type &= IRQ_TYPE_SENSE_MASK;
1435 	if (type != IRQ_TYPE_NONE) {
1436 		int ret;
1437 
1438 		ret = __irq_set_trigger(desc, irq, type);
1439 
1440 		if (ret) {
1441 			WARN(1, "failed to set type for IRQ%d\n", irq);
1442 			goto out;
1443 		}
1444 	}
1445 
1446 	irq_percpu_enable(desc, cpu);
1447 out:
1448 	irq_put_desc_unlock(desc, flags);
1449 }
1450 
1451 void disable_percpu_irq(unsigned int irq)
1452 {
1453 	unsigned int cpu = smp_processor_id();
1454 	unsigned long flags;
1455 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1456 
1457 	if (!desc)
1458 		return;
1459 
1460 	irq_percpu_disable(desc, cpu);
1461 	irq_put_desc_unlock(desc, flags);
1462 }
1463 
1464 /*
1465  * Internal function to unregister a percpu irqaction.
1466  */
1467 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1468 {
1469 	struct irq_desc *desc = irq_to_desc(irq);
1470 	struct irqaction *action;
1471 	unsigned long flags;
1472 
1473 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1474 
1475 	if (!desc)
1476 		return NULL;
1477 
1478 	raw_spin_lock_irqsave(&desc->lock, flags);
1479 
1480 	action = desc->action;
1481 	if (!action || action->percpu_dev_id != dev_id) {
1482 		WARN(1, "Trying to free already-free IRQ %d\n", irq);
1483 		goto bad;
1484 	}
1485 
1486 	if (!cpumask_empty(desc->percpu_enabled)) {
1487 		WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1488 		     irq, cpumask_first(desc->percpu_enabled));
1489 		goto bad;
1490 	}
1491 
1492 	/* Found it - now remove it from the list of entries: */
1493 	desc->action = NULL;
1494 
1495 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1496 
1497 	unregister_handler_proc(irq, action);
1498 
1499 	module_put(desc->owner);
1500 	return action;
1501 
1502 bad:
1503 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1504 	return NULL;
1505 }
1506 
1507 /**
1508  *	remove_percpu_irq - free a per-cpu interrupt
1509  *	@irq: Interrupt line to free
1510  *	@act: irqaction for the interrupt
1511  *
1512  * Used to remove interrupts statically setup by the early boot process.
1513  */
1514 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1515 {
1516 	struct irq_desc *desc = irq_to_desc(irq);
1517 
1518 	if (desc && irq_settings_is_per_cpu_devid(desc))
1519 	    __free_percpu_irq(irq, act->percpu_dev_id);
1520 }
1521 
1522 /**
1523  *	free_percpu_irq - free an interrupt allocated with request_percpu_irq
1524  *	@irq: Interrupt line to free
1525  *	@dev_id: Device identity to free
1526  *
1527  *	Remove a percpu interrupt handler. The handler is removed, but
1528  *	the interrupt line is not disabled. This must be done on each
1529  *	CPU before calling this function. The function does not return
1530  *	until any executing interrupts for this IRQ have completed.
1531  *
1532  *	This function must not be called from interrupt context.
1533  */
1534 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1535 {
1536 	struct irq_desc *desc = irq_to_desc(irq);
1537 
1538 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
1539 		return;
1540 
1541 	chip_bus_lock(desc);
1542 	kfree(__free_percpu_irq(irq, dev_id));
1543 	chip_bus_sync_unlock(desc);
1544 }
1545 
1546 /**
1547  *	setup_percpu_irq - setup a per-cpu interrupt
1548  *	@irq: Interrupt line to setup
1549  *	@act: irqaction for the interrupt
1550  *
1551  * Used to statically setup per-cpu interrupts in the early boot process.
1552  */
1553 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1554 {
1555 	struct irq_desc *desc = irq_to_desc(irq);
1556 	int retval;
1557 
1558 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
1559 		return -EINVAL;
1560 	chip_bus_lock(desc);
1561 	retval = __setup_irq(irq, desc, act);
1562 	chip_bus_sync_unlock(desc);
1563 
1564 	return retval;
1565 }
1566 
1567 /**
1568  *	request_percpu_irq - allocate a percpu interrupt line
1569  *	@irq: Interrupt line to allocate
1570  *	@handler: Function to be called when the IRQ occurs.
1571  *	@devname: An ascii name for the claiming device
1572  *	@dev_id: A percpu cookie passed back to the handler function
1573  *
1574  *	This call allocates interrupt resources, but doesn't
1575  *	automatically enable the interrupt. It has to be done on each
1576  *	CPU using enable_percpu_irq().
1577  *
1578  *	Dev_id must be globally unique. It is a per-cpu variable, and
1579  *	the handler gets called with the interrupted CPU's instance of
1580  *	that variable.
1581  */
1582 int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1583 		       const char *devname, void __percpu *dev_id)
1584 {
1585 	struct irqaction *action;
1586 	struct irq_desc *desc;
1587 	int retval;
1588 
1589 	if (!dev_id)
1590 		return -EINVAL;
1591 
1592 	desc = irq_to_desc(irq);
1593 	if (!desc || !irq_settings_can_request(desc) ||
1594 	    !irq_settings_is_per_cpu_devid(desc))
1595 		return -EINVAL;
1596 
1597 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1598 	if (!action)
1599 		return -ENOMEM;
1600 
1601 	action->handler = handler;
1602 	action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1603 	action->name = devname;
1604 	action->percpu_dev_id = dev_id;
1605 
1606 	chip_bus_lock(desc);
1607 	retval = __setup_irq(irq, desc, action);
1608 	chip_bus_sync_unlock(desc);
1609 
1610 	if (retval)
1611 		kfree(action);
1612 
1613 	return retval;
1614 }
1615