xref: /linux/kernel/irq/manage.c (revision d8ce7263e1bc3b6b2b906fec0c5037bc27d21d6a)
1 /*
2  * linux/kernel/irq/manage.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006 Thomas Gleixner
6  *
7  * This file contains driver APIs to the irq subsystem.
8  */
9 
10 #define pr_fmt(fmt) "genirq: " fmt
11 
12 #include <linux/irq.h>
13 #include <linux/kthread.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/task_work.h>
20 
21 #include "internals.h"
22 
23 #ifdef CONFIG_IRQ_FORCED_THREADING
24 __read_mostly bool force_irqthreads;
25 
26 static int __init setup_forced_irqthreads(char *arg)
27 {
28 	force_irqthreads = true;
29 	return 0;
30 }
31 early_param("threadirqs", setup_forced_irqthreads);
32 #endif
33 
34 /**
35  *	synchronize_irq - wait for pending IRQ handlers (on other CPUs)
36  *	@irq: interrupt number to wait for
37  *
38  *	This function waits for any pending IRQ handlers for this interrupt
39  *	to complete before returning. If you use this function while
40  *	holding a resource the IRQ handler may need you will deadlock.
41  *
42  *	This function may be called - with care - from IRQ context.
43  */
44 void synchronize_irq(unsigned int irq)
45 {
46 	struct irq_desc *desc = irq_to_desc(irq);
47 	bool inprogress;
48 
49 	if (!desc)
50 		return;
51 
52 	do {
53 		unsigned long flags;
54 
55 		/*
56 		 * Wait until we're out of the critical section.  This might
57 		 * give the wrong answer due to the lack of memory barriers.
58 		 */
59 		while (irqd_irq_inprogress(&desc->irq_data))
60 			cpu_relax();
61 
62 		/* Ok, that indicated we're done: double-check carefully. */
63 		raw_spin_lock_irqsave(&desc->lock, flags);
64 		inprogress = irqd_irq_inprogress(&desc->irq_data);
65 		raw_spin_unlock_irqrestore(&desc->lock, flags);
66 
67 		/* Oops, that failed? */
68 	} while (inprogress);
69 
70 	/*
71 	 * We made sure that no hardirq handler is running. Now verify
72 	 * that no threaded handlers are active.
73 	 */
74 	wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
75 }
76 EXPORT_SYMBOL(synchronize_irq);
77 
78 #ifdef CONFIG_SMP
79 cpumask_var_t irq_default_affinity;
80 
81 /**
82  *	irq_can_set_affinity - Check if the affinity of a given irq can be set
83  *	@irq:		Interrupt to check
84  *
85  */
86 int irq_can_set_affinity(unsigned int irq)
87 {
88 	struct irq_desc *desc = irq_to_desc(irq);
89 
90 	if (!desc || !irqd_can_balance(&desc->irq_data) ||
91 	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
92 		return 0;
93 
94 	return 1;
95 }
96 
97 /**
98  *	irq_set_thread_affinity - Notify irq threads to adjust affinity
99  *	@desc:		irq descriptor which has affitnity changed
100  *
101  *	We just set IRQTF_AFFINITY and delegate the affinity setting
102  *	to the interrupt thread itself. We can not call
103  *	set_cpus_allowed_ptr() here as we hold desc->lock and this
104  *	code can be called from hard interrupt context.
105  */
106 void irq_set_thread_affinity(struct irq_desc *desc)
107 {
108 	struct irqaction *action = desc->action;
109 
110 	while (action) {
111 		if (action->thread)
112 			set_bit(IRQTF_AFFINITY, &action->thread_flags);
113 		action = action->next;
114 	}
115 }
116 
117 #ifdef CONFIG_GENERIC_PENDING_IRQ
118 static inline bool irq_can_move_pcntxt(struct irq_data *data)
119 {
120 	return irqd_can_move_in_process_context(data);
121 }
122 static inline bool irq_move_pending(struct irq_data *data)
123 {
124 	return irqd_is_setaffinity_pending(data);
125 }
126 static inline void
127 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
128 {
129 	cpumask_copy(desc->pending_mask, mask);
130 }
131 static inline void
132 irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
133 {
134 	cpumask_copy(mask, desc->pending_mask);
135 }
136 #else
137 static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
138 static inline bool irq_move_pending(struct irq_data *data) { return false; }
139 static inline void
140 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
141 static inline void
142 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
143 #endif
144 
145 int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
146 {
147 	struct irq_chip *chip = irq_data_get_irq_chip(data);
148 	struct irq_desc *desc = irq_data_to_desc(data);
149 	int ret = 0;
150 
151 	if (!chip || !chip->irq_set_affinity)
152 		return -EINVAL;
153 
154 	if (irq_can_move_pcntxt(data)) {
155 		ret = chip->irq_set_affinity(data, mask, false);
156 		switch (ret) {
157 		case IRQ_SET_MASK_OK:
158 			cpumask_copy(data->affinity, mask);
159 		case IRQ_SET_MASK_OK_NOCOPY:
160 			irq_set_thread_affinity(desc);
161 			ret = 0;
162 		}
163 	} else {
164 		irqd_set_move_pending(data);
165 		irq_copy_pending(desc, mask);
166 	}
167 
168 	if (desc->affinity_notify) {
169 		kref_get(&desc->affinity_notify->kref);
170 		schedule_work(&desc->affinity_notify->work);
171 	}
172 	irqd_set(data, IRQD_AFFINITY_SET);
173 
174 	return ret;
175 }
176 
177 /**
178  *	irq_set_affinity - Set the irq affinity of a given irq
179  *	@irq:		Interrupt to set affinity
180  *	@mask:		cpumask
181  *
182  */
183 int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
184 {
185 	struct irq_desc *desc = irq_to_desc(irq);
186 	unsigned long flags;
187 	int ret;
188 
189 	if (!desc)
190 		return -EINVAL;
191 
192 	raw_spin_lock_irqsave(&desc->lock, flags);
193 	ret =  __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
194 	raw_spin_unlock_irqrestore(&desc->lock, flags);
195 	return ret;
196 }
197 
198 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
199 {
200 	unsigned long flags;
201 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
202 
203 	if (!desc)
204 		return -EINVAL;
205 	desc->affinity_hint = m;
206 	irq_put_desc_unlock(desc, flags);
207 	return 0;
208 }
209 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
210 
211 static void irq_affinity_notify(struct work_struct *work)
212 {
213 	struct irq_affinity_notify *notify =
214 		container_of(work, struct irq_affinity_notify, work);
215 	struct irq_desc *desc = irq_to_desc(notify->irq);
216 	cpumask_var_t cpumask;
217 	unsigned long flags;
218 
219 	if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
220 		goto out;
221 
222 	raw_spin_lock_irqsave(&desc->lock, flags);
223 	if (irq_move_pending(&desc->irq_data))
224 		irq_get_pending(cpumask, desc);
225 	else
226 		cpumask_copy(cpumask, desc->irq_data.affinity);
227 	raw_spin_unlock_irqrestore(&desc->lock, flags);
228 
229 	notify->notify(notify, cpumask);
230 
231 	free_cpumask_var(cpumask);
232 out:
233 	kref_put(&notify->kref, notify->release);
234 }
235 
236 /**
237  *	irq_set_affinity_notifier - control notification of IRQ affinity changes
238  *	@irq:		Interrupt for which to enable/disable notification
239  *	@notify:	Context for notification, or %NULL to disable
240  *			notification.  Function pointers must be initialised;
241  *			the other fields will be initialised by this function.
242  *
243  *	Must be called in process context.  Notification may only be enabled
244  *	after the IRQ is allocated and must be disabled before the IRQ is
245  *	freed using free_irq().
246  */
247 int
248 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
249 {
250 	struct irq_desc *desc = irq_to_desc(irq);
251 	struct irq_affinity_notify *old_notify;
252 	unsigned long flags;
253 
254 	/* The release function is promised process context */
255 	might_sleep();
256 
257 	if (!desc)
258 		return -EINVAL;
259 
260 	/* Complete initialisation of *notify */
261 	if (notify) {
262 		notify->irq = irq;
263 		kref_init(&notify->kref);
264 		INIT_WORK(&notify->work, irq_affinity_notify);
265 	}
266 
267 	raw_spin_lock_irqsave(&desc->lock, flags);
268 	old_notify = desc->affinity_notify;
269 	desc->affinity_notify = notify;
270 	raw_spin_unlock_irqrestore(&desc->lock, flags);
271 
272 	if (old_notify)
273 		kref_put(&old_notify->kref, old_notify->release);
274 
275 	return 0;
276 }
277 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
278 
279 #ifndef CONFIG_AUTO_IRQ_AFFINITY
280 /*
281  * Generic version of the affinity autoselector.
282  */
283 static int
284 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
285 {
286 	struct irq_chip *chip = irq_desc_get_chip(desc);
287 	struct cpumask *set = irq_default_affinity;
288 	int ret, node = desc->irq_data.node;
289 
290 	/* Excludes PER_CPU and NO_BALANCE interrupts */
291 	if (!irq_can_set_affinity(irq))
292 		return 0;
293 
294 	/*
295 	 * Preserve an userspace affinity setup, but make sure that
296 	 * one of the targets is online.
297 	 */
298 	if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
299 		if (cpumask_intersects(desc->irq_data.affinity,
300 				       cpu_online_mask))
301 			set = desc->irq_data.affinity;
302 		else
303 			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
304 	}
305 
306 	cpumask_and(mask, cpu_online_mask, set);
307 	if (node != NUMA_NO_NODE) {
308 		const struct cpumask *nodemask = cpumask_of_node(node);
309 
310 		/* make sure at least one of the cpus in nodemask is online */
311 		if (cpumask_intersects(mask, nodemask))
312 			cpumask_and(mask, mask, nodemask);
313 	}
314 	ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
315 	switch (ret) {
316 	case IRQ_SET_MASK_OK:
317 		cpumask_copy(desc->irq_data.affinity, mask);
318 	case IRQ_SET_MASK_OK_NOCOPY:
319 		irq_set_thread_affinity(desc);
320 	}
321 	return 0;
322 }
323 #else
324 static inline int
325 setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
326 {
327 	return irq_select_affinity(irq);
328 }
329 #endif
330 
331 /*
332  * Called when affinity is set via /proc/irq
333  */
334 int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
335 {
336 	struct irq_desc *desc = irq_to_desc(irq);
337 	unsigned long flags;
338 	int ret;
339 
340 	raw_spin_lock_irqsave(&desc->lock, flags);
341 	ret = setup_affinity(irq, desc, mask);
342 	raw_spin_unlock_irqrestore(&desc->lock, flags);
343 	return ret;
344 }
345 
346 #else
347 static inline int
348 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
349 {
350 	return 0;
351 }
352 #endif
353 
354 void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
355 {
356 	if (suspend) {
357 		if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
358 			return;
359 		desc->istate |= IRQS_SUSPENDED;
360 	}
361 
362 	if (!desc->depth++)
363 		irq_disable(desc);
364 }
365 
366 static int __disable_irq_nosync(unsigned int irq)
367 {
368 	unsigned long flags;
369 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
370 
371 	if (!desc)
372 		return -EINVAL;
373 	__disable_irq(desc, irq, false);
374 	irq_put_desc_busunlock(desc, flags);
375 	return 0;
376 }
377 
378 /**
379  *	disable_irq_nosync - disable an irq without waiting
380  *	@irq: Interrupt to disable
381  *
382  *	Disable the selected interrupt line.  Disables and Enables are
383  *	nested.
384  *	Unlike disable_irq(), this function does not ensure existing
385  *	instances of the IRQ handler have completed before returning.
386  *
387  *	This function may be called from IRQ context.
388  */
389 void disable_irq_nosync(unsigned int irq)
390 {
391 	__disable_irq_nosync(irq);
392 }
393 EXPORT_SYMBOL(disable_irq_nosync);
394 
395 /**
396  *	disable_irq - disable an irq and wait for completion
397  *	@irq: Interrupt to disable
398  *
399  *	Disable the selected interrupt line.  Enables and Disables are
400  *	nested.
401  *	This function waits for any pending IRQ handlers for this interrupt
402  *	to complete before returning. If you use this function while
403  *	holding a resource the IRQ handler may need you will deadlock.
404  *
405  *	This function may be called - with care - from IRQ context.
406  */
407 void disable_irq(unsigned int irq)
408 {
409 	if (!__disable_irq_nosync(irq))
410 		synchronize_irq(irq);
411 }
412 EXPORT_SYMBOL(disable_irq);
413 
414 void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
415 {
416 	if (resume) {
417 		if (!(desc->istate & IRQS_SUSPENDED)) {
418 			if (!desc->action)
419 				return;
420 			if (!(desc->action->flags & IRQF_FORCE_RESUME))
421 				return;
422 			/* Pretend that it got disabled ! */
423 			desc->depth++;
424 		}
425 		desc->istate &= ~IRQS_SUSPENDED;
426 	}
427 
428 	switch (desc->depth) {
429 	case 0:
430  err_out:
431 		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
432 		break;
433 	case 1: {
434 		if (desc->istate & IRQS_SUSPENDED)
435 			goto err_out;
436 		/* Prevent probing on this irq: */
437 		irq_settings_set_noprobe(desc);
438 		irq_enable(desc);
439 		check_irq_resend(desc, irq);
440 		/* fall-through */
441 	}
442 	default:
443 		desc->depth--;
444 	}
445 }
446 
447 /**
448  *	enable_irq - enable handling of an irq
449  *	@irq: Interrupt to enable
450  *
451  *	Undoes the effect of one call to disable_irq().  If this
452  *	matches the last disable, processing of interrupts on this
453  *	IRQ line is re-enabled.
454  *
455  *	This function may be called from IRQ context only when
456  *	desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
457  */
458 void enable_irq(unsigned int irq)
459 {
460 	unsigned long flags;
461 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
462 
463 	if (!desc)
464 		return;
465 	if (WARN(!desc->irq_data.chip,
466 		 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
467 		goto out;
468 
469 	__enable_irq(desc, irq, false);
470 out:
471 	irq_put_desc_busunlock(desc, flags);
472 }
473 EXPORT_SYMBOL(enable_irq);
474 
475 static int set_irq_wake_real(unsigned int irq, unsigned int on)
476 {
477 	struct irq_desc *desc = irq_to_desc(irq);
478 	int ret = -ENXIO;
479 
480 	if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
481 		return 0;
482 
483 	if (desc->irq_data.chip->irq_set_wake)
484 		ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
485 
486 	return ret;
487 }
488 
489 /**
490  *	irq_set_irq_wake - control irq power management wakeup
491  *	@irq:	interrupt to control
492  *	@on:	enable/disable power management wakeup
493  *
494  *	Enable/disable power management wakeup mode, which is
495  *	disabled by default.  Enables and disables must match,
496  *	just as they match for non-wakeup mode support.
497  *
498  *	Wakeup mode lets this IRQ wake the system from sleep
499  *	states like "suspend to RAM".
500  */
501 int irq_set_irq_wake(unsigned int irq, unsigned int on)
502 {
503 	unsigned long flags;
504 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
505 	int ret = 0;
506 
507 	if (!desc)
508 		return -EINVAL;
509 
510 	/* wakeup-capable irqs can be shared between drivers that
511 	 * don't need to have the same sleep mode behaviors.
512 	 */
513 	if (on) {
514 		if (desc->wake_depth++ == 0) {
515 			ret = set_irq_wake_real(irq, on);
516 			if (ret)
517 				desc->wake_depth = 0;
518 			else
519 				irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
520 		}
521 	} else {
522 		if (desc->wake_depth == 0) {
523 			WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
524 		} else if (--desc->wake_depth == 0) {
525 			ret = set_irq_wake_real(irq, on);
526 			if (ret)
527 				desc->wake_depth = 1;
528 			else
529 				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
530 		}
531 	}
532 	irq_put_desc_busunlock(desc, flags);
533 	return ret;
534 }
535 EXPORT_SYMBOL(irq_set_irq_wake);
536 
537 /*
538  * Internal function that tells the architecture code whether a
539  * particular irq has been exclusively allocated or is available
540  * for driver use.
541  */
542 int can_request_irq(unsigned int irq, unsigned long irqflags)
543 {
544 	unsigned long flags;
545 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
546 	int canrequest = 0;
547 
548 	if (!desc)
549 		return 0;
550 
551 	if (irq_settings_can_request(desc)) {
552 		if (desc->action)
553 			if (irqflags & desc->action->flags & IRQF_SHARED)
554 				canrequest =1;
555 	}
556 	irq_put_desc_unlock(desc, flags);
557 	return canrequest;
558 }
559 
560 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
561 		      unsigned long flags)
562 {
563 	struct irq_chip *chip = desc->irq_data.chip;
564 	int ret, unmask = 0;
565 
566 	if (!chip || !chip->irq_set_type) {
567 		/*
568 		 * IRQF_TRIGGER_* but the PIC does not support multiple
569 		 * flow-types?
570 		 */
571 		pr_debug("No set_type function for IRQ %d (%s)\n", irq,
572 			 chip ? (chip->name ? : "unknown") : "unknown");
573 		return 0;
574 	}
575 
576 	flags &= IRQ_TYPE_SENSE_MASK;
577 
578 	if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
579 		if (!irqd_irq_masked(&desc->irq_data))
580 			mask_irq(desc);
581 		if (!irqd_irq_disabled(&desc->irq_data))
582 			unmask = 1;
583 	}
584 
585 	/* caller masked out all except trigger mode flags */
586 	ret = chip->irq_set_type(&desc->irq_data, flags);
587 
588 	switch (ret) {
589 	case IRQ_SET_MASK_OK:
590 		irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
591 		irqd_set(&desc->irq_data, flags);
592 
593 	case IRQ_SET_MASK_OK_NOCOPY:
594 		flags = irqd_get_trigger_type(&desc->irq_data);
595 		irq_settings_set_trigger_mask(desc, flags);
596 		irqd_clear(&desc->irq_data, IRQD_LEVEL);
597 		irq_settings_clr_level(desc);
598 		if (flags & IRQ_TYPE_LEVEL_MASK) {
599 			irq_settings_set_level(desc);
600 			irqd_set(&desc->irq_data, IRQD_LEVEL);
601 		}
602 
603 		ret = 0;
604 		break;
605 	default:
606 		pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
607 		       flags, irq, chip->irq_set_type);
608 	}
609 	if (unmask)
610 		unmask_irq(desc);
611 	return ret;
612 }
613 
614 /*
615  * Default primary interrupt handler for threaded interrupts. Is
616  * assigned as primary handler when request_threaded_irq is called
617  * with handler == NULL. Useful for oneshot interrupts.
618  */
619 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
620 {
621 	return IRQ_WAKE_THREAD;
622 }
623 
624 /*
625  * Primary handler for nested threaded interrupts. Should never be
626  * called.
627  */
628 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
629 {
630 	WARN(1, "Primary handler called for nested irq %d\n", irq);
631 	return IRQ_NONE;
632 }
633 
634 static int irq_wait_for_interrupt(struct irqaction *action)
635 {
636 	set_current_state(TASK_INTERRUPTIBLE);
637 
638 	while (!kthread_should_stop()) {
639 
640 		if (test_and_clear_bit(IRQTF_RUNTHREAD,
641 				       &action->thread_flags)) {
642 			__set_current_state(TASK_RUNNING);
643 			return 0;
644 		}
645 		schedule();
646 		set_current_state(TASK_INTERRUPTIBLE);
647 	}
648 	__set_current_state(TASK_RUNNING);
649 	return -1;
650 }
651 
652 /*
653  * Oneshot interrupts keep the irq line masked until the threaded
654  * handler finished. unmask if the interrupt has not been disabled and
655  * is marked MASKED.
656  */
657 static void irq_finalize_oneshot(struct irq_desc *desc,
658 				 struct irqaction *action)
659 {
660 	if (!(desc->istate & IRQS_ONESHOT))
661 		return;
662 again:
663 	chip_bus_lock(desc);
664 	raw_spin_lock_irq(&desc->lock);
665 
666 	/*
667 	 * Implausible though it may be we need to protect us against
668 	 * the following scenario:
669 	 *
670 	 * The thread is faster done than the hard interrupt handler
671 	 * on the other CPU. If we unmask the irq line then the
672 	 * interrupt can come in again and masks the line, leaves due
673 	 * to IRQS_INPROGRESS and the irq line is masked forever.
674 	 *
675 	 * This also serializes the state of shared oneshot handlers
676 	 * versus "desc->threads_onehsot |= action->thread_mask;" in
677 	 * irq_wake_thread(). See the comment there which explains the
678 	 * serialization.
679 	 */
680 	if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
681 		raw_spin_unlock_irq(&desc->lock);
682 		chip_bus_sync_unlock(desc);
683 		cpu_relax();
684 		goto again;
685 	}
686 
687 	/*
688 	 * Now check again, whether the thread should run. Otherwise
689 	 * we would clear the threads_oneshot bit of this thread which
690 	 * was just set.
691 	 */
692 	if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
693 		goto out_unlock;
694 
695 	desc->threads_oneshot &= ~action->thread_mask;
696 
697 	if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
698 	    irqd_irq_masked(&desc->irq_data))
699 		unmask_irq(desc);
700 
701 out_unlock:
702 	raw_spin_unlock_irq(&desc->lock);
703 	chip_bus_sync_unlock(desc);
704 }
705 
706 #ifdef CONFIG_SMP
707 /*
708  * Check whether we need to chasnge the affinity of the interrupt thread.
709  */
710 static void
711 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
712 {
713 	cpumask_var_t mask;
714 
715 	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
716 		return;
717 
718 	/*
719 	 * In case we are out of memory we set IRQTF_AFFINITY again and
720 	 * try again next time
721 	 */
722 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
723 		set_bit(IRQTF_AFFINITY, &action->thread_flags);
724 		return;
725 	}
726 
727 	raw_spin_lock_irq(&desc->lock);
728 	cpumask_copy(mask, desc->irq_data.affinity);
729 	raw_spin_unlock_irq(&desc->lock);
730 
731 	set_cpus_allowed_ptr(current, mask);
732 	free_cpumask_var(mask);
733 }
734 #else
735 static inline void
736 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
737 #endif
738 
739 /*
740  * Interrupts which are not explicitely requested as threaded
741  * interrupts rely on the implicit bh/preempt disable of the hard irq
742  * context. So we need to disable bh here to avoid deadlocks and other
743  * side effects.
744  */
745 static irqreturn_t
746 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
747 {
748 	irqreturn_t ret;
749 
750 	local_bh_disable();
751 	ret = action->thread_fn(action->irq, action->dev_id);
752 	irq_finalize_oneshot(desc, action);
753 	local_bh_enable();
754 	return ret;
755 }
756 
757 /*
758  * Interrupts explicitely requested as threaded interupts want to be
759  * preemtible - many of them need to sleep and wait for slow busses to
760  * complete.
761  */
762 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
763 		struct irqaction *action)
764 {
765 	irqreturn_t ret;
766 
767 	ret = action->thread_fn(action->irq, action->dev_id);
768 	irq_finalize_oneshot(desc, action);
769 	return ret;
770 }
771 
772 static void wake_threads_waitq(struct irq_desc *desc)
773 {
774 	if (atomic_dec_and_test(&desc->threads_active) &&
775 	    waitqueue_active(&desc->wait_for_threads))
776 		wake_up(&desc->wait_for_threads);
777 }
778 
779 static void irq_thread_dtor(struct task_work *unused)
780 {
781 	struct task_struct *tsk = current;
782 	struct irq_desc *desc;
783 	struct irqaction *action;
784 
785 	if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
786 		return;
787 
788 	action = kthread_data(tsk);
789 
790 	pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
791 	       tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
792 
793 
794 	desc = irq_to_desc(action->irq);
795 	/*
796 	 * If IRQTF_RUNTHREAD is set, we need to decrement
797 	 * desc->threads_active and wake possible waiters.
798 	 */
799 	if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
800 		wake_threads_waitq(desc);
801 
802 	/* Prevent a stale desc->threads_oneshot */
803 	irq_finalize_oneshot(desc, action);
804 }
805 
806 /*
807  * Interrupt handler thread
808  */
809 static int irq_thread(void *data)
810 {
811 	struct task_work on_exit_work;
812 	static const struct sched_param param = {
813 		.sched_priority = MAX_USER_RT_PRIO/2,
814 	};
815 	struct irqaction *action = data;
816 	struct irq_desc *desc = irq_to_desc(action->irq);
817 	irqreturn_t (*handler_fn)(struct irq_desc *desc,
818 			struct irqaction *action);
819 
820 	if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
821 					&action->thread_flags))
822 		handler_fn = irq_forced_thread_fn;
823 	else
824 		handler_fn = irq_thread_fn;
825 
826 	sched_setscheduler(current, SCHED_FIFO, &param);
827 
828 	init_task_work(&on_exit_work, irq_thread_dtor, NULL);
829 	task_work_add(current, &on_exit_work, false);
830 
831 	while (!irq_wait_for_interrupt(action)) {
832 		irqreturn_t action_ret;
833 
834 		irq_thread_check_affinity(desc, action);
835 
836 		action_ret = handler_fn(desc, action);
837 		if (!noirqdebug)
838 			note_interrupt(action->irq, desc, action_ret);
839 
840 		wake_threads_waitq(desc);
841 	}
842 
843 	/*
844 	 * This is the regular exit path. __free_irq() is stopping the
845 	 * thread via kthread_stop() after calling
846 	 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
847 	 * oneshot mask bit can be set. We cannot verify that as we
848 	 * cannot touch the oneshot mask at this point anymore as
849 	 * __setup_irq() might have given out currents thread_mask
850 	 * again.
851 	 */
852 	task_work_cancel(current, irq_thread_dtor);
853 	return 0;
854 }
855 
856 static void irq_setup_forced_threading(struct irqaction *new)
857 {
858 	if (!force_irqthreads)
859 		return;
860 	if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
861 		return;
862 
863 	new->flags |= IRQF_ONESHOT;
864 
865 	if (!new->thread_fn) {
866 		set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
867 		new->thread_fn = new->handler;
868 		new->handler = irq_default_primary_handler;
869 	}
870 }
871 
872 /*
873  * Internal function to register an irqaction - typically used to
874  * allocate special interrupts that are part of the architecture.
875  */
876 static int
877 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
878 {
879 	struct irqaction *old, **old_ptr;
880 	unsigned long flags, thread_mask = 0;
881 	int ret, nested, shared = 0;
882 	cpumask_var_t mask;
883 
884 	if (!desc)
885 		return -EINVAL;
886 
887 	if (desc->irq_data.chip == &no_irq_chip)
888 		return -ENOSYS;
889 	if (!try_module_get(desc->owner))
890 		return -ENODEV;
891 	/*
892 	 * Some drivers like serial.c use request_irq() heavily,
893 	 * so we have to be careful not to interfere with a
894 	 * running system.
895 	 */
896 	if (new->flags & IRQF_SAMPLE_RANDOM) {
897 		/*
898 		 * This function might sleep, we want to call it first,
899 		 * outside of the atomic block.
900 		 * Yes, this might clear the entropy pool if the wrong
901 		 * driver is attempted to be loaded, without actually
902 		 * installing a new handler, but is this really a problem,
903 		 * only the sysadmin is able to do this.
904 		 */
905 		rand_initialize_irq(irq);
906 	}
907 
908 	/*
909 	 * Check whether the interrupt nests into another interrupt
910 	 * thread.
911 	 */
912 	nested = irq_settings_is_nested_thread(desc);
913 	if (nested) {
914 		if (!new->thread_fn) {
915 			ret = -EINVAL;
916 			goto out_mput;
917 		}
918 		/*
919 		 * Replace the primary handler which was provided from
920 		 * the driver for non nested interrupt handling by the
921 		 * dummy function which warns when called.
922 		 */
923 		new->handler = irq_nested_primary_handler;
924 	} else {
925 		if (irq_settings_can_thread(desc))
926 			irq_setup_forced_threading(new);
927 	}
928 
929 	/*
930 	 * Create a handler thread when a thread function is supplied
931 	 * and the interrupt does not nest into another interrupt
932 	 * thread.
933 	 */
934 	if (new->thread_fn && !nested) {
935 		struct task_struct *t;
936 
937 		t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
938 				   new->name);
939 		if (IS_ERR(t)) {
940 			ret = PTR_ERR(t);
941 			goto out_mput;
942 		}
943 		/*
944 		 * We keep the reference to the task struct even if
945 		 * the thread dies to avoid that the interrupt code
946 		 * references an already freed task_struct.
947 		 */
948 		get_task_struct(t);
949 		new->thread = t;
950 	}
951 
952 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
953 		ret = -ENOMEM;
954 		goto out_thread;
955 	}
956 
957 	/*
958 	 * The following block of code has to be executed atomically
959 	 */
960 	raw_spin_lock_irqsave(&desc->lock, flags);
961 	old_ptr = &desc->action;
962 	old = *old_ptr;
963 	if (old) {
964 		/*
965 		 * Can't share interrupts unless both agree to and are
966 		 * the same type (level, edge, polarity). So both flag
967 		 * fields must have IRQF_SHARED set and the bits which
968 		 * set the trigger type must match. Also all must
969 		 * agree on ONESHOT.
970 		 */
971 		if (!((old->flags & new->flags) & IRQF_SHARED) ||
972 		    ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
973 		    ((old->flags ^ new->flags) & IRQF_ONESHOT))
974 			goto mismatch;
975 
976 		/* All handlers must agree on per-cpuness */
977 		if ((old->flags & IRQF_PERCPU) !=
978 		    (new->flags & IRQF_PERCPU))
979 			goto mismatch;
980 
981 		/* add new interrupt at end of irq queue */
982 		do {
983 			/*
984 			 * Or all existing action->thread_mask bits,
985 			 * so we can find the next zero bit for this
986 			 * new action.
987 			 */
988 			thread_mask |= old->thread_mask;
989 			old_ptr = &old->next;
990 			old = *old_ptr;
991 		} while (old);
992 		shared = 1;
993 	}
994 
995 	/*
996 	 * Setup the thread mask for this irqaction for ONESHOT. For
997 	 * !ONESHOT irqs the thread mask is 0 so we can avoid a
998 	 * conditional in irq_wake_thread().
999 	 */
1000 	if (new->flags & IRQF_ONESHOT) {
1001 		/*
1002 		 * Unlikely to have 32 resp 64 irqs sharing one line,
1003 		 * but who knows.
1004 		 */
1005 		if (thread_mask == ~0UL) {
1006 			ret = -EBUSY;
1007 			goto out_mask;
1008 		}
1009 		/*
1010 		 * The thread_mask for the action is or'ed to
1011 		 * desc->thread_active to indicate that the
1012 		 * IRQF_ONESHOT thread handler has been woken, but not
1013 		 * yet finished. The bit is cleared when a thread
1014 		 * completes. When all threads of a shared interrupt
1015 		 * line have completed desc->threads_active becomes
1016 		 * zero and the interrupt line is unmasked. See
1017 		 * handle.c:irq_wake_thread() for further information.
1018 		 *
1019 		 * If no thread is woken by primary (hard irq context)
1020 		 * interrupt handlers, then desc->threads_active is
1021 		 * also checked for zero to unmask the irq line in the
1022 		 * affected hard irq flow handlers
1023 		 * (handle_[fasteoi|level]_irq).
1024 		 *
1025 		 * The new action gets the first zero bit of
1026 		 * thread_mask assigned. See the loop above which or's
1027 		 * all existing action->thread_mask bits.
1028 		 */
1029 		new->thread_mask = 1 << ffz(thread_mask);
1030 
1031 	} else if (new->handler == irq_default_primary_handler) {
1032 		/*
1033 		 * The interrupt was requested with handler = NULL, so
1034 		 * we use the default primary handler for it. But it
1035 		 * does not have the oneshot flag set. In combination
1036 		 * with level interrupts this is deadly, because the
1037 		 * default primary handler just wakes the thread, then
1038 		 * the irq lines is reenabled, but the device still
1039 		 * has the level irq asserted. Rinse and repeat....
1040 		 *
1041 		 * While this works for edge type interrupts, we play
1042 		 * it safe and reject unconditionally because we can't
1043 		 * say for sure which type this interrupt really
1044 		 * has. The type flags are unreliable as the
1045 		 * underlying chip implementation can override them.
1046 		 */
1047 		pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1048 		       irq);
1049 		ret = -EINVAL;
1050 		goto out_mask;
1051 	}
1052 
1053 	if (!shared) {
1054 		init_waitqueue_head(&desc->wait_for_threads);
1055 
1056 		/* Setup the type (level, edge polarity) if configured: */
1057 		if (new->flags & IRQF_TRIGGER_MASK) {
1058 			ret = __irq_set_trigger(desc, irq,
1059 					new->flags & IRQF_TRIGGER_MASK);
1060 
1061 			if (ret)
1062 				goto out_mask;
1063 		}
1064 
1065 		desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1066 				  IRQS_ONESHOT | IRQS_WAITING);
1067 		irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1068 
1069 		if (new->flags & IRQF_PERCPU) {
1070 			irqd_set(&desc->irq_data, IRQD_PER_CPU);
1071 			irq_settings_set_per_cpu(desc);
1072 		}
1073 
1074 		if (new->flags & IRQF_ONESHOT)
1075 			desc->istate |= IRQS_ONESHOT;
1076 
1077 		if (irq_settings_can_autoenable(desc))
1078 			irq_startup(desc, true);
1079 		else
1080 			/* Undo nested disables: */
1081 			desc->depth = 1;
1082 
1083 		/* Exclude IRQ from balancing if requested */
1084 		if (new->flags & IRQF_NOBALANCING) {
1085 			irq_settings_set_no_balancing(desc);
1086 			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1087 		}
1088 
1089 		/* Set default affinity mask once everything is setup */
1090 		setup_affinity(irq, desc, mask);
1091 
1092 	} else if (new->flags & IRQF_TRIGGER_MASK) {
1093 		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1094 		unsigned int omsk = irq_settings_get_trigger_mask(desc);
1095 
1096 		if (nmsk != omsk)
1097 			/* hope the handler works with current  trigger mode */
1098 			pr_warning("irq %d uses trigger mode %u; requested %u\n",
1099 				   irq, nmsk, omsk);
1100 	}
1101 
1102 	new->irq = irq;
1103 	*old_ptr = new;
1104 
1105 	/* Reset broken irq detection when installing new handler */
1106 	desc->irq_count = 0;
1107 	desc->irqs_unhandled = 0;
1108 
1109 	/*
1110 	 * Check whether we disabled the irq via the spurious handler
1111 	 * before. Reenable it and give it another chance.
1112 	 */
1113 	if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1114 		desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1115 		__enable_irq(desc, irq, false);
1116 	}
1117 
1118 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1119 
1120 	/*
1121 	 * Strictly no need to wake it up, but hung_task complains
1122 	 * when no hard interrupt wakes the thread up.
1123 	 */
1124 	if (new->thread)
1125 		wake_up_process(new->thread);
1126 
1127 	register_irq_proc(irq, desc);
1128 	new->dir = NULL;
1129 	register_handler_proc(irq, new);
1130 	free_cpumask_var(mask);
1131 
1132 	return 0;
1133 
1134 mismatch:
1135 	if (!(new->flags & IRQF_PROBE_SHARED)) {
1136 		pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1137 		       irq, new->flags, new->name, old->flags, old->name);
1138 #ifdef CONFIG_DEBUG_SHIRQ
1139 		dump_stack();
1140 #endif
1141 	}
1142 	ret = -EBUSY;
1143 
1144 out_mask:
1145 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1146 	free_cpumask_var(mask);
1147 
1148 out_thread:
1149 	if (new->thread) {
1150 		struct task_struct *t = new->thread;
1151 
1152 		new->thread = NULL;
1153 		kthread_stop(t);
1154 		put_task_struct(t);
1155 	}
1156 out_mput:
1157 	module_put(desc->owner);
1158 	return ret;
1159 }
1160 
1161 /**
1162  *	setup_irq - setup an interrupt
1163  *	@irq: Interrupt line to setup
1164  *	@act: irqaction for the interrupt
1165  *
1166  * Used to statically setup interrupts in the early boot process.
1167  */
1168 int setup_irq(unsigned int irq, struct irqaction *act)
1169 {
1170 	int retval;
1171 	struct irq_desc *desc = irq_to_desc(irq);
1172 
1173 	if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1174 		return -EINVAL;
1175 	chip_bus_lock(desc);
1176 	retval = __setup_irq(irq, desc, act);
1177 	chip_bus_sync_unlock(desc);
1178 
1179 	return retval;
1180 }
1181 EXPORT_SYMBOL_GPL(setup_irq);
1182 
1183 /*
1184  * Internal function to unregister an irqaction - used to free
1185  * regular and special interrupts that are part of the architecture.
1186  */
1187 static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1188 {
1189 	struct irq_desc *desc = irq_to_desc(irq);
1190 	struct irqaction *action, **action_ptr;
1191 	unsigned long flags;
1192 
1193 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1194 
1195 	if (!desc)
1196 		return NULL;
1197 
1198 	raw_spin_lock_irqsave(&desc->lock, flags);
1199 
1200 	/*
1201 	 * There can be multiple actions per IRQ descriptor, find the right
1202 	 * one based on the dev_id:
1203 	 */
1204 	action_ptr = &desc->action;
1205 	for (;;) {
1206 		action = *action_ptr;
1207 
1208 		if (!action) {
1209 			WARN(1, "Trying to free already-free IRQ %d\n", irq);
1210 			raw_spin_unlock_irqrestore(&desc->lock, flags);
1211 
1212 			return NULL;
1213 		}
1214 
1215 		if (action->dev_id == dev_id)
1216 			break;
1217 		action_ptr = &action->next;
1218 	}
1219 
1220 	/* Found it - now remove it from the list of entries: */
1221 	*action_ptr = action->next;
1222 
1223 	/* If this was the last handler, shut down the IRQ line: */
1224 	if (!desc->action)
1225 		irq_shutdown(desc);
1226 
1227 #ifdef CONFIG_SMP
1228 	/* make sure affinity_hint is cleaned up */
1229 	if (WARN_ON_ONCE(desc->affinity_hint))
1230 		desc->affinity_hint = NULL;
1231 #endif
1232 
1233 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1234 
1235 	unregister_handler_proc(irq, action);
1236 
1237 	/* Make sure it's not being used on another CPU: */
1238 	synchronize_irq(irq);
1239 
1240 #ifdef CONFIG_DEBUG_SHIRQ
1241 	/*
1242 	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1243 	 * event to happen even now it's being freed, so let's make sure that
1244 	 * is so by doing an extra call to the handler ....
1245 	 *
1246 	 * ( We do this after actually deregistering it, to make sure that a
1247 	 *   'real' IRQ doesn't run in * parallel with our fake. )
1248 	 */
1249 	if (action->flags & IRQF_SHARED) {
1250 		local_irq_save(flags);
1251 		action->handler(irq, dev_id);
1252 		local_irq_restore(flags);
1253 	}
1254 #endif
1255 
1256 	if (action->thread) {
1257 		kthread_stop(action->thread);
1258 		put_task_struct(action->thread);
1259 	}
1260 
1261 	module_put(desc->owner);
1262 	return action;
1263 }
1264 
1265 /**
1266  *	remove_irq - free an interrupt
1267  *	@irq: Interrupt line to free
1268  *	@act: irqaction for the interrupt
1269  *
1270  * Used to remove interrupts statically setup by the early boot process.
1271  */
1272 void remove_irq(unsigned int irq, struct irqaction *act)
1273 {
1274 	struct irq_desc *desc = irq_to_desc(irq);
1275 
1276 	if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1277 	    __free_irq(irq, act->dev_id);
1278 }
1279 EXPORT_SYMBOL_GPL(remove_irq);
1280 
1281 /**
1282  *	free_irq - free an interrupt allocated with request_irq
1283  *	@irq: Interrupt line to free
1284  *	@dev_id: Device identity to free
1285  *
1286  *	Remove an interrupt handler. The handler is removed and if the
1287  *	interrupt line is no longer in use by any driver it is disabled.
1288  *	On a shared IRQ the caller must ensure the interrupt is disabled
1289  *	on the card it drives before calling this function. The function
1290  *	does not return until any executing interrupts for this IRQ
1291  *	have completed.
1292  *
1293  *	This function must not be called from interrupt context.
1294  */
1295 void free_irq(unsigned int irq, void *dev_id)
1296 {
1297 	struct irq_desc *desc = irq_to_desc(irq);
1298 
1299 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1300 		return;
1301 
1302 #ifdef CONFIG_SMP
1303 	if (WARN_ON(desc->affinity_notify))
1304 		desc->affinity_notify = NULL;
1305 #endif
1306 
1307 	chip_bus_lock(desc);
1308 	kfree(__free_irq(irq, dev_id));
1309 	chip_bus_sync_unlock(desc);
1310 }
1311 EXPORT_SYMBOL(free_irq);
1312 
1313 /**
1314  *	request_threaded_irq - allocate an interrupt line
1315  *	@irq: Interrupt line to allocate
1316  *	@handler: Function to be called when the IRQ occurs.
1317  *		  Primary handler for threaded interrupts
1318  *		  If NULL and thread_fn != NULL the default
1319  *		  primary handler is installed
1320  *	@thread_fn: Function called from the irq handler thread
1321  *		    If NULL, no irq thread is created
1322  *	@irqflags: Interrupt type flags
1323  *	@devname: An ascii name for the claiming device
1324  *	@dev_id: A cookie passed back to the handler function
1325  *
1326  *	This call allocates interrupt resources and enables the
1327  *	interrupt line and IRQ handling. From the point this
1328  *	call is made your handler function may be invoked. Since
1329  *	your handler function must clear any interrupt the board
1330  *	raises, you must take care both to initialise your hardware
1331  *	and to set up the interrupt handler in the right order.
1332  *
1333  *	If you want to set up a threaded irq handler for your device
1334  *	then you need to supply @handler and @thread_fn. @handler is
1335  *	still called in hard interrupt context and has to check
1336  *	whether the interrupt originates from the device. If yes it
1337  *	needs to disable the interrupt on the device and return
1338  *	IRQ_WAKE_THREAD which will wake up the handler thread and run
1339  *	@thread_fn. This split handler design is necessary to support
1340  *	shared interrupts.
1341  *
1342  *	Dev_id must be globally unique. Normally the address of the
1343  *	device data structure is used as the cookie. Since the handler
1344  *	receives this value it makes sense to use it.
1345  *
1346  *	If your interrupt is shared you must pass a non NULL dev_id
1347  *	as this is required when freeing the interrupt.
1348  *
1349  *	Flags:
1350  *
1351  *	IRQF_SHARED		Interrupt is shared
1352  *	IRQF_SAMPLE_RANDOM	The interrupt can be used for entropy
1353  *	IRQF_TRIGGER_*		Specify active edge(s) or level
1354  *
1355  */
1356 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1357 			 irq_handler_t thread_fn, unsigned long irqflags,
1358 			 const char *devname, void *dev_id)
1359 {
1360 	struct irqaction *action;
1361 	struct irq_desc *desc;
1362 	int retval;
1363 
1364 	/*
1365 	 * Sanity-check: shared interrupts must pass in a real dev-ID,
1366 	 * otherwise we'll have trouble later trying to figure out
1367 	 * which interrupt is which (messes up the interrupt freeing
1368 	 * logic etc).
1369 	 */
1370 	if ((irqflags & IRQF_SHARED) && !dev_id)
1371 		return -EINVAL;
1372 
1373 	desc = irq_to_desc(irq);
1374 	if (!desc)
1375 		return -EINVAL;
1376 
1377 	if (!irq_settings_can_request(desc) ||
1378 	    WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1379 		return -EINVAL;
1380 
1381 	if (!handler) {
1382 		if (!thread_fn)
1383 			return -EINVAL;
1384 		handler = irq_default_primary_handler;
1385 	}
1386 
1387 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1388 	if (!action)
1389 		return -ENOMEM;
1390 
1391 	action->handler = handler;
1392 	action->thread_fn = thread_fn;
1393 	action->flags = irqflags;
1394 	action->name = devname;
1395 	action->dev_id = dev_id;
1396 
1397 	chip_bus_lock(desc);
1398 	retval = __setup_irq(irq, desc, action);
1399 	chip_bus_sync_unlock(desc);
1400 
1401 	if (retval)
1402 		kfree(action);
1403 
1404 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1405 	if (!retval && (irqflags & IRQF_SHARED)) {
1406 		/*
1407 		 * It's a shared IRQ -- the driver ought to be prepared for it
1408 		 * to happen immediately, so let's make sure....
1409 		 * We disable the irq to make sure that a 'real' IRQ doesn't
1410 		 * run in parallel with our fake.
1411 		 */
1412 		unsigned long flags;
1413 
1414 		disable_irq(irq);
1415 		local_irq_save(flags);
1416 
1417 		handler(irq, dev_id);
1418 
1419 		local_irq_restore(flags);
1420 		enable_irq(irq);
1421 	}
1422 #endif
1423 	return retval;
1424 }
1425 EXPORT_SYMBOL(request_threaded_irq);
1426 
1427 /**
1428  *	request_any_context_irq - allocate an interrupt line
1429  *	@irq: Interrupt line to allocate
1430  *	@handler: Function to be called when the IRQ occurs.
1431  *		  Threaded handler for threaded interrupts.
1432  *	@flags: Interrupt type flags
1433  *	@name: An ascii name for the claiming device
1434  *	@dev_id: A cookie passed back to the handler function
1435  *
1436  *	This call allocates interrupt resources and enables the
1437  *	interrupt line and IRQ handling. It selects either a
1438  *	hardirq or threaded handling method depending on the
1439  *	context.
1440  *
1441  *	On failure, it returns a negative value. On success,
1442  *	it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1443  */
1444 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1445 			    unsigned long flags, const char *name, void *dev_id)
1446 {
1447 	struct irq_desc *desc = irq_to_desc(irq);
1448 	int ret;
1449 
1450 	if (!desc)
1451 		return -EINVAL;
1452 
1453 	if (irq_settings_is_nested_thread(desc)) {
1454 		ret = request_threaded_irq(irq, NULL, handler,
1455 					   flags, name, dev_id);
1456 		return !ret ? IRQC_IS_NESTED : ret;
1457 	}
1458 
1459 	ret = request_irq(irq, handler, flags, name, dev_id);
1460 	return !ret ? IRQC_IS_HARDIRQ : ret;
1461 }
1462 EXPORT_SYMBOL_GPL(request_any_context_irq);
1463 
1464 void enable_percpu_irq(unsigned int irq, unsigned int type)
1465 {
1466 	unsigned int cpu = smp_processor_id();
1467 	unsigned long flags;
1468 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1469 
1470 	if (!desc)
1471 		return;
1472 
1473 	type &= IRQ_TYPE_SENSE_MASK;
1474 	if (type != IRQ_TYPE_NONE) {
1475 		int ret;
1476 
1477 		ret = __irq_set_trigger(desc, irq, type);
1478 
1479 		if (ret) {
1480 			WARN(1, "failed to set type for IRQ%d\n", irq);
1481 			goto out;
1482 		}
1483 	}
1484 
1485 	irq_percpu_enable(desc, cpu);
1486 out:
1487 	irq_put_desc_unlock(desc, flags);
1488 }
1489 
1490 void disable_percpu_irq(unsigned int irq)
1491 {
1492 	unsigned int cpu = smp_processor_id();
1493 	unsigned long flags;
1494 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1495 
1496 	if (!desc)
1497 		return;
1498 
1499 	irq_percpu_disable(desc, cpu);
1500 	irq_put_desc_unlock(desc, flags);
1501 }
1502 
1503 /*
1504  * Internal function to unregister a percpu irqaction.
1505  */
1506 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1507 {
1508 	struct irq_desc *desc = irq_to_desc(irq);
1509 	struct irqaction *action;
1510 	unsigned long flags;
1511 
1512 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1513 
1514 	if (!desc)
1515 		return NULL;
1516 
1517 	raw_spin_lock_irqsave(&desc->lock, flags);
1518 
1519 	action = desc->action;
1520 	if (!action || action->percpu_dev_id != dev_id) {
1521 		WARN(1, "Trying to free already-free IRQ %d\n", irq);
1522 		goto bad;
1523 	}
1524 
1525 	if (!cpumask_empty(desc->percpu_enabled)) {
1526 		WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1527 		     irq, cpumask_first(desc->percpu_enabled));
1528 		goto bad;
1529 	}
1530 
1531 	/* Found it - now remove it from the list of entries: */
1532 	desc->action = NULL;
1533 
1534 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1535 
1536 	unregister_handler_proc(irq, action);
1537 
1538 	module_put(desc->owner);
1539 	return action;
1540 
1541 bad:
1542 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1543 	return NULL;
1544 }
1545 
1546 /**
1547  *	remove_percpu_irq - free a per-cpu interrupt
1548  *	@irq: Interrupt line to free
1549  *	@act: irqaction for the interrupt
1550  *
1551  * Used to remove interrupts statically setup by the early boot process.
1552  */
1553 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1554 {
1555 	struct irq_desc *desc = irq_to_desc(irq);
1556 
1557 	if (desc && irq_settings_is_per_cpu_devid(desc))
1558 	    __free_percpu_irq(irq, act->percpu_dev_id);
1559 }
1560 
1561 /**
1562  *	free_percpu_irq - free an interrupt allocated with request_percpu_irq
1563  *	@irq: Interrupt line to free
1564  *	@dev_id: Device identity to free
1565  *
1566  *	Remove a percpu interrupt handler. The handler is removed, but
1567  *	the interrupt line is not disabled. This must be done on each
1568  *	CPU before calling this function. The function does not return
1569  *	until any executing interrupts for this IRQ have completed.
1570  *
1571  *	This function must not be called from interrupt context.
1572  */
1573 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1574 {
1575 	struct irq_desc *desc = irq_to_desc(irq);
1576 
1577 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
1578 		return;
1579 
1580 	chip_bus_lock(desc);
1581 	kfree(__free_percpu_irq(irq, dev_id));
1582 	chip_bus_sync_unlock(desc);
1583 }
1584 
1585 /**
1586  *	setup_percpu_irq - setup a per-cpu interrupt
1587  *	@irq: Interrupt line to setup
1588  *	@act: irqaction for the interrupt
1589  *
1590  * Used to statically setup per-cpu interrupts in the early boot process.
1591  */
1592 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1593 {
1594 	struct irq_desc *desc = irq_to_desc(irq);
1595 	int retval;
1596 
1597 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
1598 		return -EINVAL;
1599 	chip_bus_lock(desc);
1600 	retval = __setup_irq(irq, desc, act);
1601 	chip_bus_sync_unlock(desc);
1602 
1603 	return retval;
1604 }
1605 
1606 /**
1607  *	request_percpu_irq - allocate a percpu interrupt line
1608  *	@irq: Interrupt line to allocate
1609  *	@handler: Function to be called when the IRQ occurs.
1610  *	@devname: An ascii name for the claiming device
1611  *	@dev_id: A percpu cookie passed back to the handler function
1612  *
1613  *	This call allocates interrupt resources, but doesn't
1614  *	automatically enable the interrupt. It has to be done on each
1615  *	CPU using enable_percpu_irq().
1616  *
1617  *	Dev_id must be globally unique. It is a per-cpu variable, and
1618  *	the handler gets called with the interrupted CPU's instance of
1619  *	that variable.
1620  */
1621 int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1622 		       const char *devname, void __percpu *dev_id)
1623 {
1624 	struct irqaction *action;
1625 	struct irq_desc *desc;
1626 	int retval;
1627 
1628 	if (!dev_id)
1629 		return -EINVAL;
1630 
1631 	desc = irq_to_desc(irq);
1632 	if (!desc || !irq_settings_can_request(desc) ||
1633 	    !irq_settings_is_per_cpu_devid(desc))
1634 		return -EINVAL;
1635 
1636 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1637 	if (!action)
1638 		return -ENOMEM;
1639 
1640 	action->handler = handler;
1641 	action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1642 	action->name = devname;
1643 	action->percpu_dev_id = dev_id;
1644 
1645 	chip_bus_lock(desc);
1646 	retval = __setup_irq(irq, desc, action);
1647 	chip_bus_sync_unlock(desc);
1648 
1649 	if (retval)
1650 		kfree(action);
1651 
1652 	return retval;
1653 }
1654