xref: /linux/kernel/irq/chip.c (revision d39d0ed196aa1685bb24771e92f78633c66ac9cb)
1 /*
2  * linux/kernel/irq/chip.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6  *
7  * This file contains the core interrupt handling code, for irq-chip
8  * based architectures.
9  *
10  * Detailed information is available in Documentation/DocBook/genericirq
11  */
12 
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
18 
19 #include "internals.h"
20 
21 static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
22 {
23 	struct irq_desc *desc;
24 	unsigned long flags;
25 
26 	desc = irq_to_desc(irq);
27 	if (!desc) {
28 		WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
29 		return;
30 	}
31 
32 	/* Ensure we don't have left over values from a previous use of this irq */
33 	raw_spin_lock_irqsave(&desc->lock, flags);
34 	desc->status = IRQ_DISABLED;
35 	desc->chip = &no_irq_chip;
36 	desc->handle_irq = handle_bad_irq;
37 	desc->depth = 1;
38 	desc->msi_desc = NULL;
39 	desc->handler_data = NULL;
40 	if (!keep_chip_data)
41 		desc->chip_data = NULL;
42 	desc->action = NULL;
43 	desc->irq_count = 0;
44 	desc->irqs_unhandled = 0;
45 #ifdef CONFIG_SMP
46 	cpumask_setall(desc->affinity);
47 #ifdef CONFIG_GENERIC_PENDING_IRQ
48 	cpumask_clear(desc->pending_mask);
49 #endif
50 #endif
51 	raw_spin_unlock_irqrestore(&desc->lock, flags);
52 }
53 
54 /**
55  *	dynamic_irq_init - initialize a dynamically allocated irq
56  *	@irq:	irq number to initialize
57  */
58 void dynamic_irq_init(unsigned int irq)
59 {
60 	dynamic_irq_init_x(irq, false);
61 }
62 
63 /**
64  *	dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq
65  *	@irq:	irq number to initialize
66  *
67  *	does not set irq_to_desc(irq)->chip_data to NULL
68  */
69 void dynamic_irq_init_keep_chip_data(unsigned int irq)
70 {
71 	dynamic_irq_init_x(irq, true);
72 }
73 
74 static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data)
75 {
76 	struct irq_desc *desc = irq_to_desc(irq);
77 	unsigned long flags;
78 
79 	if (!desc) {
80 		WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
81 		return;
82 	}
83 
84 	raw_spin_lock_irqsave(&desc->lock, flags);
85 	if (desc->action) {
86 		raw_spin_unlock_irqrestore(&desc->lock, flags);
87 		WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
88 			irq);
89 		return;
90 	}
91 	desc->msi_desc = NULL;
92 	desc->handler_data = NULL;
93 	if (!keep_chip_data)
94 		desc->chip_data = NULL;
95 	desc->handle_irq = handle_bad_irq;
96 	desc->chip = &no_irq_chip;
97 	desc->name = NULL;
98 	clear_kstat_irqs(desc);
99 	raw_spin_unlock_irqrestore(&desc->lock, flags);
100 }
101 
102 /**
103  *	dynamic_irq_cleanup - cleanup a dynamically allocated irq
104  *	@irq:	irq number to initialize
105  */
106 void dynamic_irq_cleanup(unsigned int irq)
107 {
108 	dynamic_irq_cleanup_x(irq, false);
109 }
110 
111 /**
112  *	dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq
113  *	@irq:	irq number to initialize
114  *
115  *	does not set irq_to_desc(irq)->chip_data to NULL
116  */
117 void dynamic_irq_cleanup_keep_chip_data(unsigned int irq)
118 {
119 	dynamic_irq_cleanup_x(irq, true);
120 }
121 
122 
123 /**
124  *	set_irq_chip - set the irq chip for an irq
125  *	@irq:	irq number
126  *	@chip:	pointer to irq chip description structure
127  */
128 int set_irq_chip(unsigned int irq, struct irq_chip *chip)
129 {
130 	struct irq_desc *desc = irq_to_desc(irq);
131 	unsigned long flags;
132 
133 	if (!desc) {
134 		WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq);
135 		return -EINVAL;
136 	}
137 
138 	if (!chip)
139 		chip = &no_irq_chip;
140 
141 	raw_spin_lock_irqsave(&desc->lock, flags);
142 	irq_chip_set_defaults(chip);
143 	desc->chip = chip;
144 	raw_spin_unlock_irqrestore(&desc->lock, flags);
145 
146 	return 0;
147 }
148 EXPORT_SYMBOL(set_irq_chip);
149 
150 /**
151  *	set_irq_type - set the irq trigger type for an irq
152  *	@irq:	irq number
153  *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
154  */
155 int set_irq_type(unsigned int irq, unsigned int type)
156 {
157 	struct irq_desc *desc = irq_to_desc(irq);
158 	unsigned long flags;
159 	int ret = -ENXIO;
160 
161 	if (!desc) {
162 		printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
163 		return -ENODEV;
164 	}
165 
166 	type &= IRQ_TYPE_SENSE_MASK;
167 	if (type == IRQ_TYPE_NONE)
168 		return 0;
169 
170 	raw_spin_lock_irqsave(&desc->lock, flags);
171 	ret = __irq_set_trigger(desc, irq, type);
172 	raw_spin_unlock_irqrestore(&desc->lock, flags);
173 	return ret;
174 }
175 EXPORT_SYMBOL(set_irq_type);
176 
177 /**
178  *	set_irq_data - set irq type data for an irq
179  *	@irq:	Interrupt number
180  *	@data:	Pointer to interrupt specific data
181  *
182  *	Set the hardware irq controller data for an irq
183  */
184 int set_irq_data(unsigned int irq, void *data)
185 {
186 	struct irq_desc *desc = irq_to_desc(irq);
187 	unsigned long flags;
188 
189 	if (!desc) {
190 		printk(KERN_ERR
191 		       "Trying to install controller data for IRQ%d\n", irq);
192 		return -EINVAL;
193 	}
194 
195 	raw_spin_lock_irqsave(&desc->lock, flags);
196 	desc->handler_data = data;
197 	raw_spin_unlock_irqrestore(&desc->lock, flags);
198 	return 0;
199 }
200 EXPORT_SYMBOL(set_irq_data);
201 
202 /**
203  *	set_irq_msi - set MSI descriptor data for an irq
204  *	@irq:	Interrupt number
205  *	@entry:	Pointer to MSI descriptor data
206  *
207  *	Set the MSI descriptor entry for an irq
208  */
209 int set_irq_msi(unsigned int irq, struct msi_desc *entry)
210 {
211 	struct irq_desc *desc = irq_to_desc(irq);
212 	unsigned long flags;
213 
214 	if (!desc) {
215 		printk(KERN_ERR
216 		       "Trying to install msi data for IRQ%d\n", irq);
217 		return -EINVAL;
218 	}
219 
220 	raw_spin_lock_irqsave(&desc->lock, flags);
221 	desc->msi_desc = entry;
222 	if (entry)
223 		entry->irq = irq;
224 	raw_spin_unlock_irqrestore(&desc->lock, flags);
225 	return 0;
226 }
227 
228 /**
229  *	set_irq_chip_data - set irq chip data for an irq
230  *	@irq:	Interrupt number
231  *	@data:	Pointer to chip specific data
232  *
233  *	Set the hardware irq chip data for an irq
234  */
235 int set_irq_chip_data(unsigned int irq, void *data)
236 {
237 	struct irq_desc *desc = irq_to_desc(irq);
238 	unsigned long flags;
239 
240 	if (!desc) {
241 		printk(KERN_ERR
242 		       "Trying to install chip data for IRQ%d\n", irq);
243 		return -EINVAL;
244 	}
245 
246 	if (!desc->chip) {
247 		printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
248 		return -EINVAL;
249 	}
250 
251 	raw_spin_lock_irqsave(&desc->lock, flags);
252 	desc->chip_data = data;
253 	raw_spin_unlock_irqrestore(&desc->lock, flags);
254 
255 	return 0;
256 }
257 EXPORT_SYMBOL(set_irq_chip_data);
258 
259 /**
260  *	set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq
261  *
262  *	@irq:	Interrupt number
263  *	@nest:	0 to clear / 1 to set the IRQ_NESTED_THREAD flag
264  *
265  *	The IRQ_NESTED_THREAD flag indicates that on
266  *	request_threaded_irq() no separate interrupt thread should be
267  *	created for the irq as the handler are called nested in the
268  *	context of a demultiplexing interrupt handler thread.
269  */
270 void set_irq_nested_thread(unsigned int irq, int nest)
271 {
272 	struct irq_desc *desc = irq_to_desc(irq);
273 	unsigned long flags;
274 
275 	if (!desc)
276 		return;
277 
278 	raw_spin_lock_irqsave(&desc->lock, flags);
279 	if (nest)
280 		desc->status |= IRQ_NESTED_THREAD;
281 	else
282 		desc->status &= ~IRQ_NESTED_THREAD;
283 	raw_spin_unlock_irqrestore(&desc->lock, flags);
284 }
285 EXPORT_SYMBOL_GPL(set_irq_nested_thread);
286 
287 /*
288  * default enable function
289  */
290 static void default_enable(unsigned int irq)
291 {
292 	struct irq_desc *desc = irq_to_desc(irq);
293 
294 	desc->chip->unmask(irq);
295 	desc->status &= ~IRQ_MASKED;
296 }
297 
298 /*
299  * default disable function
300  */
301 static void default_disable(unsigned int irq)
302 {
303 }
304 
305 /*
306  * default startup function
307  */
308 static unsigned int default_startup(unsigned int irq)
309 {
310 	struct irq_desc *desc = irq_to_desc(irq);
311 
312 	desc->chip->enable(irq);
313 	return 0;
314 }
315 
316 /*
317  * default shutdown function
318  */
319 static void default_shutdown(unsigned int irq)
320 {
321 	struct irq_desc *desc = irq_to_desc(irq);
322 
323 	desc->chip->mask(irq);
324 	desc->status |= IRQ_MASKED;
325 }
326 
327 /*
328  * Fixup enable/disable function pointers
329  */
330 void irq_chip_set_defaults(struct irq_chip *chip)
331 {
332 	if (!chip->enable)
333 		chip->enable = default_enable;
334 	if (!chip->disable)
335 		chip->disable = default_disable;
336 	if (!chip->startup)
337 		chip->startup = default_startup;
338 	/*
339 	 * We use chip->disable, when the user provided its own. When
340 	 * we have default_disable set for chip->disable, then we need
341 	 * to use default_shutdown, otherwise the irq line is not
342 	 * disabled on free_irq():
343 	 */
344 	if (!chip->shutdown)
345 		chip->shutdown = chip->disable != default_disable ?
346 			chip->disable : default_shutdown;
347 	if (!chip->name)
348 		chip->name = chip->typename;
349 	if (!chip->end)
350 		chip->end = dummy_irq_chip.end;
351 }
352 
353 static inline void mask_ack_irq(struct irq_desc *desc, int irq)
354 {
355 	if (desc->chip->mask_ack)
356 		desc->chip->mask_ack(irq);
357 	else {
358 		desc->chip->mask(irq);
359 		if (desc->chip->ack)
360 			desc->chip->ack(irq);
361 	}
362 	desc->status |= IRQ_MASKED;
363 }
364 
365 static inline void mask_irq(struct irq_desc *desc, int irq)
366 {
367 	if (desc->chip->mask) {
368 		desc->chip->mask(irq);
369 		desc->status |= IRQ_MASKED;
370 	}
371 }
372 
373 static inline void unmask_irq(struct irq_desc *desc, int irq)
374 {
375 	if (desc->chip->unmask) {
376 		desc->chip->unmask(irq);
377 		desc->status &= ~IRQ_MASKED;
378 	}
379 }
380 
381 /*
382  *	handle_nested_irq - Handle a nested irq from a irq thread
383  *	@irq:	the interrupt number
384  *
385  *	Handle interrupts which are nested into a threaded interrupt
386  *	handler. The handler function is called inside the calling
387  *	threads context.
388  */
389 void handle_nested_irq(unsigned int irq)
390 {
391 	struct irq_desc *desc = irq_to_desc(irq);
392 	struct irqaction *action;
393 	irqreturn_t action_ret;
394 
395 	might_sleep();
396 
397 	raw_spin_lock_irq(&desc->lock);
398 
399 	kstat_incr_irqs_this_cpu(irq, desc);
400 
401 	action = desc->action;
402 	if (unlikely(!action || (desc->status & IRQ_DISABLED)))
403 		goto out_unlock;
404 
405 	desc->status |= IRQ_INPROGRESS;
406 	raw_spin_unlock_irq(&desc->lock);
407 
408 	action_ret = action->thread_fn(action->irq, action->dev_id);
409 	if (!noirqdebug)
410 		note_interrupt(irq, desc, action_ret);
411 
412 	raw_spin_lock_irq(&desc->lock);
413 	desc->status &= ~IRQ_INPROGRESS;
414 
415 out_unlock:
416 	raw_spin_unlock_irq(&desc->lock);
417 }
418 EXPORT_SYMBOL_GPL(handle_nested_irq);
419 
420 /**
421  *	handle_simple_irq - Simple and software-decoded IRQs.
422  *	@irq:	the interrupt number
423  *	@desc:	the interrupt description structure for this irq
424  *
425  *	Simple interrupts are either sent from a demultiplexing interrupt
426  *	handler or come from hardware, where no interrupt hardware control
427  *	is necessary.
428  *
429  *	Note: The caller is expected to handle the ack, clear, mask and
430  *	unmask issues if necessary.
431  */
432 void
433 handle_simple_irq(unsigned int irq, struct irq_desc *desc)
434 {
435 	struct irqaction *action;
436 	irqreturn_t action_ret;
437 
438 	raw_spin_lock(&desc->lock);
439 
440 	if (unlikely(desc->status & IRQ_INPROGRESS))
441 		goto out_unlock;
442 	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
443 	kstat_incr_irqs_this_cpu(irq, desc);
444 
445 	action = desc->action;
446 	if (unlikely(!action || (desc->status & IRQ_DISABLED)))
447 		goto out_unlock;
448 
449 	desc->status |= IRQ_INPROGRESS;
450 	raw_spin_unlock(&desc->lock);
451 
452 	action_ret = handle_IRQ_event(irq, action);
453 	if (!noirqdebug)
454 		note_interrupt(irq, desc, action_ret);
455 
456 	raw_spin_lock(&desc->lock);
457 	desc->status &= ~IRQ_INPROGRESS;
458 out_unlock:
459 	raw_spin_unlock(&desc->lock);
460 }
461 
462 /**
463  *	handle_level_irq - Level type irq handler
464  *	@irq:	the interrupt number
465  *	@desc:	the interrupt description structure for this irq
466  *
467  *	Level type interrupts are active as long as the hardware line has
468  *	the active level. This may require to mask the interrupt and unmask
469  *	it after the associated handler has acknowledged the device, so the
470  *	interrupt line is back to inactive.
471  */
472 void
473 handle_level_irq(unsigned int irq, struct irq_desc *desc)
474 {
475 	struct irqaction *action;
476 	irqreturn_t action_ret;
477 
478 	raw_spin_lock(&desc->lock);
479 	mask_ack_irq(desc, irq);
480 
481 	if (unlikely(desc->status & IRQ_INPROGRESS))
482 		goto out_unlock;
483 	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
484 	kstat_incr_irqs_this_cpu(irq, desc);
485 
486 	/*
487 	 * If its disabled or no action available
488 	 * keep it masked and get out of here
489 	 */
490 	action = desc->action;
491 	if (unlikely(!action || (desc->status & IRQ_DISABLED)))
492 		goto out_unlock;
493 
494 	desc->status |= IRQ_INPROGRESS;
495 	raw_spin_unlock(&desc->lock);
496 
497 	action_ret = handle_IRQ_event(irq, action);
498 	if (!noirqdebug)
499 		note_interrupt(irq, desc, action_ret);
500 
501 	raw_spin_lock(&desc->lock);
502 	desc->status &= ~IRQ_INPROGRESS;
503 
504 	if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT)))
505 		unmask_irq(desc, irq);
506 out_unlock:
507 	raw_spin_unlock(&desc->lock);
508 }
509 EXPORT_SYMBOL_GPL(handle_level_irq);
510 
511 /**
512  *	handle_fasteoi_irq - irq handler for transparent controllers
513  *	@irq:	the interrupt number
514  *	@desc:	the interrupt description structure for this irq
515  *
516  *	Only a single callback will be issued to the chip: an ->eoi()
517  *	call when the interrupt has been serviced. This enables support
518  *	for modern forms of interrupt handlers, which handle the flow
519  *	details in hardware, transparently.
520  */
521 void
522 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
523 {
524 	struct irqaction *action;
525 	irqreturn_t action_ret;
526 
527 	raw_spin_lock(&desc->lock);
528 
529 	if (unlikely(desc->status & IRQ_INPROGRESS))
530 		goto out;
531 
532 	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
533 	kstat_incr_irqs_this_cpu(irq, desc);
534 
535 	/*
536 	 * If its disabled or no action available
537 	 * then mask it and get out of here:
538 	 */
539 	action = desc->action;
540 	if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
541 		desc->status |= IRQ_PENDING;
542 		mask_irq(desc, irq);
543 		goto out;
544 	}
545 
546 	desc->status |= IRQ_INPROGRESS;
547 	desc->status &= ~IRQ_PENDING;
548 	raw_spin_unlock(&desc->lock);
549 
550 	action_ret = handle_IRQ_event(irq, action);
551 	if (!noirqdebug)
552 		note_interrupt(irq, desc, action_ret);
553 
554 	raw_spin_lock(&desc->lock);
555 	desc->status &= ~IRQ_INPROGRESS;
556 out:
557 	desc->chip->eoi(irq);
558 
559 	raw_spin_unlock(&desc->lock);
560 }
561 
562 /**
563  *	handle_edge_irq - edge type IRQ handler
564  *	@irq:	the interrupt number
565  *	@desc:	the interrupt description structure for this irq
566  *
567  *	Interrupt occures on the falling and/or rising edge of a hardware
568  *	signal. The occurence is latched into the irq controller hardware
569  *	and must be acked in order to be reenabled. After the ack another
570  *	interrupt can happen on the same source even before the first one
571  *	is handled by the associated event handler. If this happens it
572  *	might be necessary to disable (mask) the interrupt depending on the
573  *	controller hardware. This requires to reenable the interrupt inside
574  *	of the loop which handles the interrupts which have arrived while
575  *	the handler was running. If all pending interrupts are handled, the
576  *	loop is left.
577  */
578 void
579 handle_edge_irq(unsigned int irq, struct irq_desc *desc)
580 {
581 	raw_spin_lock(&desc->lock);
582 
583 	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
584 
585 	/*
586 	 * If we're currently running this IRQ, or its disabled,
587 	 * we shouldn't process the IRQ. Mark it pending, handle
588 	 * the necessary masking and go out
589 	 */
590 	if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
591 		    !desc->action)) {
592 		desc->status |= (IRQ_PENDING | IRQ_MASKED);
593 		mask_ack_irq(desc, irq);
594 		goto out_unlock;
595 	}
596 	kstat_incr_irqs_this_cpu(irq, desc);
597 
598 	/* Start handling the irq */
599 	if (desc->chip->ack)
600 		desc->chip->ack(irq);
601 
602 	/* Mark the IRQ currently in progress.*/
603 	desc->status |= IRQ_INPROGRESS;
604 
605 	do {
606 		struct irqaction *action = desc->action;
607 		irqreturn_t action_ret;
608 
609 		if (unlikely(!action)) {
610 			mask_irq(desc, irq);
611 			goto out_unlock;
612 		}
613 
614 		/*
615 		 * When another irq arrived while we were handling
616 		 * one, we could have masked the irq.
617 		 * Renable it, if it was not disabled in meantime.
618 		 */
619 		if (unlikely((desc->status &
620 			       (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
621 			      (IRQ_PENDING | IRQ_MASKED))) {
622 			unmask_irq(desc, irq);
623 		}
624 
625 		desc->status &= ~IRQ_PENDING;
626 		raw_spin_unlock(&desc->lock);
627 		action_ret = handle_IRQ_event(irq, action);
628 		if (!noirqdebug)
629 			note_interrupt(irq, desc, action_ret);
630 		raw_spin_lock(&desc->lock);
631 
632 	} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
633 
634 	desc->status &= ~IRQ_INPROGRESS;
635 out_unlock:
636 	raw_spin_unlock(&desc->lock);
637 }
638 
639 /**
640  *	handle_percpu_irq - Per CPU local irq handler
641  *	@irq:	the interrupt number
642  *	@desc:	the interrupt description structure for this irq
643  *
644  *	Per CPU interrupts on SMP machines without locking requirements
645  */
646 void
647 handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
648 {
649 	irqreturn_t action_ret;
650 
651 	kstat_incr_irqs_this_cpu(irq, desc);
652 
653 	if (desc->chip->ack)
654 		desc->chip->ack(irq);
655 
656 	action_ret = handle_IRQ_event(irq, desc->action);
657 	if (!noirqdebug)
658 		note_interrupt(irq, desc, action_ret);
659 
660 	if (desc->chip->eoi)
661 		desc->chip->eoi(irq);
662 }
663 
664 void
665 __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
666 		  const char *name)
667 {
668 	struct irq_desc *desc = irq_to_desc(irq);
669 	unsigned long flags;
670 
671 	if (!desc) {
672 		printk(KERN_ERR
673 		       "Trying to install type control for IRQ%d\n", irq);
674 		return;
675 	}
676 
677 	if (!handle)
678 		handle = handle_bad_irq;
679 	else if (desc->chip == &no_irq_chip) {
680 		printk(KERN_WARNING "Trying to install %sinterrupt handler "
681 		       "for IRQ%d\n", is_chained ? "chained " : "", irq);
682 		/*
683 		 * Some ARM implementations install a handler for really dumb
684 		 * interrupt hardware without setting an irq_chip. This worked
685 		 * with the ARM no_irq_chip but the check in setup_irq would
686 		 * prevent us to setup the interrupt at all. Switch it to
687 		 * dummy_irq_chip for easy transition.
688 		 */
689 		desc->chip = &dummy_irq_chip;
690 	}
691 
692 	chip_bus_lock(irq, desc);
693 	raw_spin_lock_irqsave(&desc->lock, flags);
694 
695 	/* Uninstall? */
696 	if (handle == handle_bad_irq) {
697 		if (desc->chip != &no_irq_chip)
698 			mask_ack_irq(desc, irq);
699 		desc->status |= IRQ_DISABLED;
700 		desc->depth = 1;
701 	}
702 	desc->handle_irq = handle;
703 	desc->name = name;
704 
705 	if (handle != handle_bad_irq && is_chained) {
706 		desc->status &= ~IRQ_DISABLED;
707 		desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
708 		desc->depth = 0;
709 		desc->chip->startup(irq);
710 	}
711 	raw_spin_unlock_irqrestore(&desc->lock, flags);
712 	chip_bus_sync_unlock(irq, desc);
713 }
714 EXPORT_SYMBOL_GPL(__set_irq_handler);
715 
716 void
717 set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
718 			 irq_flow_handler_t handle)
719 {
720 	set_irq_chip(irq, chip);
721 	__set_irq_handler(irq, handle, 0, NULL);
722 }
723 
724 void
725 set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
726 			      irq_flow_handler_t handle, const char *name)
727 {
728 	set_irq_chip(irq, chip);
729 	__set_irq_handler(irq, handle, 0, name);
730 }
731 
732 void set_irq_noprobe(unsigned int irq)
733 {
734 	struct irq_desc *desc = irq_to_desc(irq);
735 	unsigned long flags;
736 
737 	if (!desc) {
738 		printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq);
739 		return;
740 	}
741 
742 	raw_spin_lock_irqsave(&desc->lock, flags);
743 	desc->status |= IRQ_NOPROBE;
744 	raw_spin_unlock_irqrestore(&desc->lock, flags);
745 }
746 
747 void set_irq_probe(unsigned int irq)
748 {
749 	struct irq_desc *desc = irq_to_desc(irq);
750 	unsigned long flags;
751 
752 	if (!desc) {
753 		printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq);
754 		return;
755 	}
756 
757 	raw_spin_lock_irqsave(&desc->lock, flags);
758 	desc->status &= ~IRQ_NOPROBE;
759 	raw_spin_unlock_irqrestore(&desc->lock, flags);
760 }
761