xref: /linux/kernel/irq/chip.c (revision 606d099cdd1080bbb50ea50dc52d98252f8f10a1)
1 /*
2  * linux/kernel/irq/chip.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6  *
7  * This file contains the core interrupt handling code, for irq-chip
8  * based architectures.
9  *
10  * Detailed information is available in Documentation/DocBook/genericirq
11  */
12 
13 #include <linux/irq.h>
14 #include <linux/module.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel_stat.h>
17 
18 #include "internals.h"
19 
20 /**
21  *	dynamic_irq_init - initialize a dynamically allocated irq
22  *	@irq:	irq number to initialize
23  */
24 void dynamic_irq_init(unsigned int irq)
25 {
26 	struct irq_desc *desc;
27 	unsigned long flags;
28 
29 	if (irq >= NR_IRQS) {
30 		printk(KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
31 		WARN_ON(1);
32 		return;
33 	}
34 
35 	/* Ensure we don't have left over values from a previous use of this irq */
36 	desc = irq_desc + irq;
37 	spin_lock_irqsave(&desc->lock, flags);
38 	desc->status = IRQ_DISABLED;
39 	desc->chip = &no_irq_chip;
40 	desc->handle_irq = handle_bad_irq;
41 	desc->depth = 1;
42 	desc->handler_data = NULL;
43 	desc->chip_data = NULL;
44 	desc->action = NULL;
45 	desc->irq_count = 0;
46 	desc->irqs_unhandled = 0;
47 #ifdef CONFIG_SMP
48 	desc->affinity = CPU_MASK_ALL;
49 #endif
50 	spin_unlock_irqrestore(&desc->lock, flags);
51 }
52 
53 /**
54  *	dynamic_irq_cleanup - cleanup a dynamically allocated irq
55  *	@irq:	irq number to initialize
56  */
57 void dynamic_irq_cleanup(unsigned int irq)
58 {
59 	struct irq_desc *desc;
60 	unsigned long flags;
61 
62 	if (irq >= NR_IRQS) {
63 		printk(KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
64 		WARN_ON(1);
65 		return;
66 	}
67 
68 	desc = irq_desc + irq;
69 	spin_lock_irqsave(&desc->lock, flags);
70 	if (desc->action) {
71 		spin_unlock_irqrestore(&desc->lock, flags);
72 		printk(KERN_ERR "Destroying IRQ%d without calling free_irq\n",
73 			irq);
74 		WARN_ON(1);
75 		return;
76 	}
77 	desc->handle_irq = handle_bad_irq;
78 	desc->chip = &no_irq_chip;
79 	spin_unlock_irqrestore(&desc->lock, flags);
80 }
81 
82 
83 /**
84  *	set_irq_chip - set the irq chip for an irq
85  *	@irq:	irq number
86  *	@chip:	pointer to irq chip description structure
87  */
88 int set_irq_chip(unsigned int irq, struct irq_chip *chip)
89 {
90 	struct irq_desc *desc;
91 	unsigned long flags;
92 
93 	if (irq >= NR_IRQS) {
94 		printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
95 		WARN_ON(1);
96 		return -EINVAL;
97 	}
98 
99 	if (!chip)
100 		chip = &no_irq_chip;
101 
102 	desc = irq_desc + irq;
103 	spin_lock_irqsave(&desc->lock, flags);
104 	irq_chip_set_defaults(chip);
105 	desc->chip = chip;
106 	spin_unlock_irqrestore(&desc->lock, flags);
107 
108 	return 0;
109 }
110 EXPORT_SYMBOL(set_irq_chip);
111 
112 /**
113  *	set_irq_type - set the irq type for an irq
114  *	@irq:	irq number
115  *	@type:	interrupt type - see include/linux/interrupt.h
116  */
117 int set_irq_type(unsigned int irq, unsigned int type)
118 {
119 	struct irq_desc *desc;
120 	unsigned long flags;
121 	int ret = -ENXIO;
122 
123 	if (irq >= NR_IRQS) {
124 		printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
125 		return -ENODEV;
126 	}
127 
128 	desc = irq_desc + irq;
129 	if (desc->chip->set_type) {
130 		spin_lock_irqsave(&desc->lock, flags);
131 		ret = desc->chip->set_type(irq, type);
132 		spin_unlock_irqrestore(&desc->lock, flags);
133 	}
134 	return ret;
135 }
136 EXPORT_SYMBOL(set_irq_type);
137 
138 /**
139  *	set_irq_data - set irq type data for an irq
140  *	@irq:	Interrupt number
141  *	@data:	Pointer to interrupt specific data
142  *
143  *	Set the hardware irq controller data for an irq
144  */
145 int set_irq_data(unsigned int irq, void *data)
146 {
147 	struct irq_desc *desc;
148 	unsigned long flags;
149 
150 	if (irq >= NR_IRQS) {
151 		printk(KERN_ERR
152 		       "Trying to install controller data for IRQ%d\n", irq);
153 		return -EINVAL;
154 	}
155 
156 	desc = irq_desc + irq;
157 	spin_lock_irqsave(&desc->lock, flags);
158 	desc->handler_data = data;
159 	spin_unlock_irqrestore(&desc->lock, flags);
160 	return 0;
161 }
162 EXPORT_SYMBOL(set_irq_data);
163 
164 /**
165  *	set_irq_chip_data - set irq chip data for an irq
166  *	@irq:	Interrupt number
167  *	@data:	Pointer to chip specific data
168  *
169  *	Set the hardware irq chip data for an irq
170  */
171 int set_irq_chip_data(unsigned int irq, void *data)
172 {
173 	struct irq_desc *desc = irq_desc + irq;
174 	unsigned long flags;
175 
176 	if (irq >= NR_IRQS || !desc->chip) {
177 		printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
178 		return -EINVAL;
179 	}
180 
181 	spin_lock_irqsave(&desc->lock, flags);
182 	desc->chip_data = data;
183 	spin_unlock_irqrestore(&desc->lock, flags);
184 
185 	return 0;
186 }
187 EXPORT_SYMBOL(set_irq_chip_data);
188 
189 /*
190  * default enable function
191  */
192 static void default_enable(unsigned int irq)
193 {
194 	struct irq_desc *desc = irq_desc + irq;
195 
196 	desc->chip->unmask(irq);
197 	desc->status &= ~IRQ_MASKED;
198 }
199 
200 /*
201  * default disable function
202  */
203 static void default_disable(unsigned int irq)
204 {
205 	struct irq_desc *desc = irq_desc + irq;
206 
207 	if (!(desc->status & IRQ_DELAYED_DISABLE))
208 		desc->chip->mask(irq);
209 }
210 
211 /*
212  * default startup function
213  */
214 static unsigned int default_startup(unsigned int irq)
215 {
216 	irq_desc[irq].chip->enable(irq);
217 
218 	return 0;
219 }
220 
221 /*
222  * Fixup enable/disable function pointers
223  */
224 void irq_chip_set_defaults(struct irq_chip *chip)
225 {
226 	if (!chip->enable)
227 		chip->enable = default_enable;
228 	if (!chip->disable)
229 		chip->disable = default_disable;
230 	if (!chip->startup)
231 		chip->startup = default_startup;
232 	if (!chip->shutdown)
233 		chip->shutdown = chip->disable;
234 	if (!chip->name)
235 		chip->name = chip->typename;
236 	if (!chip->end)
237 		chip->end = dummy_irq_chip.end;
238 }
239 
240 static inline void mask_ack_irq(struct irq_desc *desc, int irq)
241 {
242 	if (desc->chip->mask_ack)
243 		desc->chip->mask_ack(irq);
244 	else {
245 		desc->chip->mask(irq);
246 		desc->chip->ack(irq);
247 	}
248 }
249 
250 /**
251  *	handle_simple_irq - Simple and software-decoded IRQs.
252  *	@irq:	the interrupt number
253  *	@desc:	the interrupt description structure for this irq
254  *
255  *	Simple interrupts are either sent from a demultiplexing interrupt
256  *	handler or come from hardware, where no interrupt hardware control
257  *	is necessary.
258  *
259  *	Note: The caller is expected to handle the ack, clear, mask and
260  *	unmask issues if necessary.
261  */
262 void fastcall
263 handle_simple_irq(unsigned int irq, struct irq_desc *desc)
264 {
265 	struct irqaction *action;
266 	irqreturn_t action_ret;
267 	const unsigned int cpu = smp_processor_id();
268 
269 	spin_lock(&desc->lock);
270 
271 	if (unlikely(desc->status & IRQ_INPROGRESS))
272 		goto out_unlock;
273 	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
274 	kstat_cpu(cpu).irqs[irq]++;
275 
276 	action = desc->action;
277 	if (unlikely(!action || (desc->status & IRQ_DISABLED)))
278 		goto out_unlock;
279 
280 	desc->status |= IRQ_INPROGRESS;
281 	spin_unlock(&desc->lock);
282 
283 	action_ret = handle_IRQ_event(irq, action);
284 	if (!noirqdebug)
285 		note_interrupt(irq, desc, action_ret);
286 
287 	spin_lock(&desc->lock);
288 	desc->status &= ~IRQ_INPROGRESS;
289 out_unlock:
290 	spin_unlock(&desc->lock);
291 }
292 
293 /**
294  *	handle_level_irq - Level type irq handler
295  *	@irq:	the interrupt number
296  *	@desc:	the interrupt description structure for this irq
297  *
298  *	Level type interrupts are active as long as the hardware line has
299  *	the active level. This may require to mask the interrupt and unmask
300  *	it after the associated handler has acknowledged the device, so the
301  *	interrupt line is back to inactive.
302  */
303 void fastcall
304 handle_level_irq(unsigned int irq, struct irq_desc *desc)
305 {
306 	unsigned int cpu = smp_processor_id();
307 	struct irqaction *action;
308 	irqreturn_t action_ret;
309 
310 	spin_lock(&desc->lock);
311 	mask_ack_irq(desc, irq);
312 
313 	if (unlikely(desc->status & IRQ_INPROGRESS))
314 		goto out_unlock;
315 	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
316 	kstat_cpu(cpu).irqs[irq]++;
317 
318 	/*
319 	 * If its disabled or no action available
320 	 * keep it masked and get out of here
321 	 */
322 	action = desc->action;
323 	if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
324 		desc->status |= IRQ_PENDING;
325 		goto out_unlock;
326 	}
327 
328 	desc->status |= IRQ_INPROGRESS;
329 	desc->status &= ~IRQ_PENDING;
330 	spin_unlock(&desc->lock);
331 
332 	action_ret = handle_IRQ_event(irq, action);
333 	if (!noirqdebug)
334 		note_interrupt(irq, desc, action_ret);
335 
336 	spin_lock(&desc->lock);
337 	desc->status &= ~IRQ_INPROGRESS;
338 	if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
339 		desc->chip->unmask(irq);
340 out_unlock:
341 	spin_unlock(&desc->lock);
342 }
343 
344 /**
345  *	handle_fasteoi_irq - irq handler for transparent controllers
346  *	@irq:	the interrupt number
347  *	@desc:	the interrupt description structure for this irq
348  *
349  *	Only a single callback will be issued to the chip: an ->eoi()
350  *	call when the interrupt has been serviced. This enables support
351  *	for modern forms of interrupt handlers, which handle the flow
352  *	details in hardware, transparently.
353  */
354 void fastcall
355 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
356 {
357 	unsigned int cpu = smp_processor_id();
358 	struct irqaction *action;
359 	irqreturn_t action_ret;
360 
361 	spin_lock(&desc->lock);
362 
363 	if (unlikely(desc->status & IRQ_INPROGRESS))
364 		goto out;
365 
366 	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
367 	kstat_cpu(cpu).irqs[irq]++;
368 
369 	/*
370 	 * If its disabled or no action available
371 	 * keep it masked and get out of here
372 	 */
373 	action = desc->action;
374 	if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
375 		desc->status |= IRQ_PENDING;
376 		goto out;
377 	}
378 
379 	desc->status |= IRQ_INPROGRESS;
380 	desc->status &= ~IRQ_PENDING;
381 	spin_unlock(&desc->lock);
382 
383 	action_ret = handle_IRQ_event(irq, action);
384 	if (!noirqdebug)
385 		note_interrupt(irq, desc, action_ret);
386 
387 	spin_lock(&desc->lock);
388 	desc->status &= ~IRQ_INPROGRESS;
389 out:
390 	desc->chip->eoi(irq);
391 
392 	spin_unlock(&desc->lock);
393 }
394 
395 /**
396  *	handle_edge_irq - edge type IRQ handler
397  *	@irq:	the interrupt number
398  *	@desc:	the interrupt description structure for this irq
399  *
400  *	Interrupt occures on the falling and/or rising edge of a hardware
401  *	signal. The occurence is latched into the irq controller hardware
402  *	and must be acked in order to be reenabled. After the ack another
403  *	interrupt can happen on the same source even before the first one
404  *	is handled by the assosiacted event handler. If this happens it
405  *	might be necessary to disable (mask) the interrupt depending on the
406  *	controller hardware. This requires to reenable the interrupt inside
407  *	of the loop which handles the interrupts which have arrived while
408  *	the handler was running. If all pending interrupts are handled, the
409  *	loop is left.
410  */
411 void fastcall
412 handle_edge_irq(unsigned int irq, struct irq_desc *desc)
413 {
414 	const unsigned int cpu = smp_processor_id();
415 
416 	spin_lock(&desc->lock);
417 
418 	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
419 
420 	/*
421 	 * If we're currently running this IRQ, or its disabled,
422 	 * we shouldn't process the IRQ. Mark it pending, handle
423 	 * the necessary masking and go out
424 	 */
425 	if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
426 		    !desc->action)) {
427 		desc->status |= (IRQ_PENDING | IRQ_MASKED);
428 		mask_ack_irq(desc, irq);
429 		goto out_unlock;
430 	}
431 
432 	kstat_cpu(cpu).irqs[irq]++;
433 
434 	/* Start handling the irq */
435 	desc->chip->ack(irq);
436 
437 	/* Mark the IRQ currently in progress.*/
438 	desc->status |= IRQ_INPROGRESS;
439 
440 	do {
441 		struct irqaction *action = desc->action;
442 		irqreturn_t action_ret;
443 
444 		if (unlikely(!action)) {
445 			desc->chip->mask(irq);
446 			goto out_unlock;
447 		}
448 
449 		/*
450 		 * When another irq arrived while we were handling
451 		 * one, we could have masked the irq.
452 		 * Renable it, if it was not disabled in meantime.
453 		 */
454 		if (unlikely((desc->status &
455 			       (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
456 			      (IRQ_PENDING | IRQ_MASKED))) {
457 			desc->chip->unmask(irq);
458 			desc->status &= ~IRQ_MASKED;
459 		}
460 
461 		desc->status &= ~IRQ_PENDING;
462 		spin_unlock(&desc->lock);
463 		action_ret = handle_IRQ_event(irq, action);
464 		if (!noirqdebug)
465 			note_interrupt(irq, desc, action_ret);
466 		spin_lock(&desc->lock);
467 
468 	} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
469 
470 	desc->status &= ~IRQ_INPROGRESS;
471 out_unlock:
472 	spin_unlock(&desc->lock);
473 }
474 
475 #ifdef CONFIG_SMP
476 /**
477  *	handle_percpu_IRQ - Per CPU local irq handler
478  *	@irq:	the interrupt number
479  *	@desc:	the interrupt description structure for this irq
480  *
481  *	Per CPU interrupts on SMP machines without locking requirements
482  */
483 void fastcall
484 handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
485 {
486 	irqreturn_t action_ret;
487 
488 	kstat_this_cpu.irqs[irq]++;
489 
490 	if (desc->chip->ack)
491 		desc->chip->ack(irq);
492 
493 	action_ret = handle_IRQ_event(irq, desc->action);
494 	if (!noirqdebug)
495 		note_interrupt(irq, desc, action_ret);
496 
497 	if (desc->chip->eoi)
498 		desc->chip->eoi(irq);
499 }
500 
501 #endif /* CONFIG_SMP */
502 
503 void
504 __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
505 		  const char *name)
506 {
507 	struct irq_desc *desc;
508 	unsigned long flags;
509 
510 	if (irq >= NR_IRQS) {
511 		printk(KERN_ERR
512 		       "Trying to install type control for IRQ%d\n", irq);
513 		return;
514 	}
515 
516 	desc = irq_desc + irq;
517 
518 	if (!handle)
519 		handle = handle_bad_irq;
520 
521 	if (desc->chip == &no_irq_chip) {
522 		printk(KERN_WARNING "Trying to install %sinterrupt handler "
523 		       "for IRQ%d\n", is_chained ? "chained " : " ", irq);
524 		/*
525 		 * Some ARM implementations install a handler for really dumb
526 		 * interrupt hardware without setting an irq_chip. This worked
527 		 * with the ARM no_irq_chip but the check in setup_irq would
528 		 * prevent us to setup the interrupt at all. Switch it to
529 		 * dummy_irq_chip for easy transition.
530 		 */
531 		desc->chip = &dummy_irq_chip;
532 	}
533 
534 	spin_lock_irqsave(&desc->lock, flags);
535 
536 	/* Uninstall? */
537 	if (handle == handle_bad_irq) {
538 		if (desc->chip != &no_irq_chip) {
539 			desc->chip->mask(irq);
540 			desc->chip->ack(irq);
541 		}
542 		desc->status |= IRQ_DISABLED;
543 		desc->depth = 1;
544 	}
545 	desc->handle_irq = handle;
546 	desc->name = name;
547 
548 	if (handle != handle_bad_irq && is_chained) {
549 		desc->status &= ~IRQ_DISABLED;
550 		desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
551 		desc->depth = 0;
552 		desc->chip->unmask(irq);
553 	}
554 	spin_unlock_irqrestore(&desc->lock, flags);
555 }
556 
557 void
558 set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
559 			 irq_flow_handler_t handle)
560 {
561 	set_irq_chip(irq, chip);
562 	__set_irq_handler(irq, handle, 0, NULL);
563 }
564 
565 void
566 set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
567 			      irq_flow_handler_t handle, const char *name)
568 {
569 	set_irq_chip(irq, chip);
570 	__set_irq_handler(irq, handle, 0, name);
571 }
572