xref: /linux/arch/powerpc/kernel/irq.c (revision 89c81797d4a0779a957f4ea1f0c676cda203615b)
1 /*
2  *  Derived from arch/i386/kernel/irq.c
3  *    Copyright (C) 1992 Linus Torvalds
4  *  Adapted from arch/i386 by Gary Thomas
5  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6  *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
7  *    Copyright (C) 1996-2001 Cort Dougan
8  *  Adapted for Power Macintosh by Paul Mackerras
9  *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; either version
14  * 2 of the License, or (at your option) any later version.
15  *
16  * This file contains the code used by various IRQ handling routines:
17  * asking for different IRQ's should be done through these routines
18  * instead of just grabbing them. Thus setups with different IRQ numbers
19  * shouldn't result in any weird surprises, and installing new handlers
20  * should be easier.
21  *
22  * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
23  * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
24  * mask register (of which only 16 are defined), hence the weird shifting
25  * and complement of the cached_irq_mask.  I want to be able to stuff
26  * this right into the SIU SMASK register.
27  * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28  * to reduce code space and undefined function references.
29  */
30 
31 #undef DEBUG
32 
33 #include <linux/module.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ptrace.h>
39 #include <linux/ioport.h>
40 #include <linux/interrupt.h>
41 #include <linux/timex.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/delay.h>
45 #include <linux/irq.h>
46 #include <linux/seq_file.h>
47 #include <linux/cpumask.h>
48 #include <linux/profile.h>
49 #include <linux/bitops.h>
50 #include <linux/list.h>
51 #include <linux/radix-tree.h>
52 #include <linux/mutex.h>
53 #include <linux/bootmem.h>
54 #include <linux/pci.h>
55 #include <linux/debugfs.h>
56 
57 #include <asm/uaccess.h>
58 #include <asm/system.h>
59 #include <asm/io.h>
60 #include <asm/pgtable.h>
61 #include <asm/irq.h>
62 #include <asm/cache.h>
63 #include <asm/prom.h>
64 #include <asm/ptrace.h>
65 #include <asm/machdep.h>
66 #include <asm/udbg.h>
67 #include <asm/dbell.h>
68 
69 #ifdef CONFIG_PPC64
70 #include <asm/paca.h>
71 #include <asm/firmware.h>
72 #include <asm/lv1call.h>
73 #endif
74 #define CREATE_TRACE_POINTS
75 #include <asm/trace.h>
76 
77 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
78 EXPORT_PER_CPU_SYMBOL(irq_stat);
79 
80 int __irq_offset_value;
81 
82 #ifdef CONFIG_PPC32
83 EXPORT_SYMBOL(__irq_offset_value);
84 atomic_t ppc_n_lost_interrupts;
85 
86 #ifdef CONFIG_TAU_INT
87 extern int tau_initialized;
88 extern int tau_interrupts(int);
89 #endif
90 #endif /* CONFIG_PPC32 */
91 
92 #ifdef CONFIG_PPC64
93 
94 #ifndef CONFIG_SPARSE_IRQ
95 EXPORT_SYMBOL(irq_desc);
96 #endif
97 
98 int distribute_irqs = 1;
99 
100 static inline notrace unsigned long get_hard_enabled(void)
101 {
102 	unsigned long enabled;
103 
104 	__asm__ __volatile__("lbz %0,%1(13)"
105 	: "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
106 
107 	return enabled;
108 }
109 
110 static inline notrace void set_soft_enabled(unsigned long enable)
111 {
112 	__asm__ __volatile__("stb %0,%1(13)"
113 	: : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
114 }
115 
116 notrace void raw_local_irq_restore(unsigned long en)
117 {
118 	/*
119 	 * get_paca()->soft_enabled = en;
120 	 * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
121 	 * That was allowed before, and in such a case we do need to take care
122 	 * that gcc will set soft_enabled directly via r13, not choose to use
123 	 * an intermediate register, lest we're preempted to a different cpu.
124 	 */
125 	set_soft_enabled(en);
126 	if (!en)
127 		return;
128 
129 #ifdef CONFIG_PPC_STD_MMU_64
130 	if (firmware_has_feature(FW_FEATURE_ISERIES)) {
131 		/*
132 		 * Do we need to disable preemption here?  Not really: in the
133 		 * unlikely event that we're preempted to a different cpu in
134 		 * between getting r13, loading its lppaca_ptr, and loading
135 		 * its any_int, we might call iseries_handle_interrupts without
136 		 * an interrupt pending on the new cpu, but that's no disaster,
137 		 * is it?  And the business of preempting us off the old cpu
138 		 * would itself involve a local_irq_restore which handles the
139 		 * interrupt to that cpu.
140 		 *
141 		 * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
142 		 * to avoid any preemption checking added into get_paca().
143 		 */
144 		if (local_paca->lppaca_ptr->int_dword.any_int)
145 			iseries_handle_interrupts();
146 	}
147 #endif /* CONFIG_PPC_STD_MMU_64 */
148 
149 	/*
150 	 * if (get_paca()->hard_enabled) return;
151 	 * But again we need to take care that gcc gets hard_enabled directly
152 	 * via r13, not choose to use an intermediate register, lest we're
153 	 * preempted to a different cpu in between the two instructions.
154 	 */
155 	if (get_hard_enabled())
156 		return;
157 
158 #if defined(CONFIG_BOOKE) && defined(CONFIG_SMP)
159 	/* Check for pending doorbell interrupts on SMP */
160 	doorbell_exception(NULL);
161 #endif
162 
163 	/*
164 	 * Need to hard-enable interrupts here.  Since currently disabled,
165 	 * no need to take further asm precautions against preemption; but
166 	 * use local_paca instead of get_paca() to avoid preemption checking.
167 	 */
168 	local_paca->hard_enabled = en;
169 
170 #ifndef CONFIG_BOOKE
171 	/* On server, re-trigger the decrementer if it went negative since
172 	 * some processors only trigger on edge transitions of the sign bit.
173 	 *
174 	 * BookE has a level sensitive decrementer (latches in TSR) so we
175 	 * don't need that
176 	 */
177 	if ((int)mfspr(SPRN_DEC) < 0)
178 		mtspr(SPRN_DEC, 1);
179 #endif /* CONFIG_BOOKE */
180 
181 	/*
182 	 * Force the delivery of pending soft-disabled interrupts on PS3.
183 	 * Any HV call will have this side effect.
184 	 */
185 	if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
186 		u64 tmp;
187 		lv1_get_version_info(&tmp);
188 	}
189 
190 	__hard_irq_enable();
191 }
192 EXPORT_SYMBOL(raw_local_irq_restore);
193 #endif /* CONFIG_PPC64 */
194 
195 static int show_other_interrupts(struct seq_file *p, int prec)
196 {
197 	int j;
198 
199 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
200 	if (tau_initialized) {
201 		seq_printf(p, "%*s: ", prec, "TAU");
202 		for_each_online_cpu(j)
203 			seq_printf(p, "%10u ", tau_interrupts(j));
204 		seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
205 	}
206 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
207 
208 	seq_printf(p, "%*s: ", prec, "LOC");
209 	for_each_online_cpu(j)
210 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
211         seq_printf(p, "  Local timer interrupts\n");
212 
213 	seq_printf(p, "%*s: ", prec, "SPU");
214 	for_each_online_cpu(j)
215 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
216 	seq_printf(p, "  Spurious interrupts\n");
217 
218 	seq_printf(p, "%*s: ", prec, "CNT");
219 	for_each_online_cpu(j)
220 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
221 	seq_printf(p, "  Performance monitoring interrupts\n");
222 
223 	seq_printf(p, "%*s: ", prec, "MCE");
224 	for_each_online_cpu(j)
225 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
226 	seq_printf(p, "  Machine check exceptions\n");
227 
228 	return 0;
229 }
230 
231 int show_interrupts(struct seq_file *p, void *v)
232 {
233 	unsigned long flags, any_count = 0;
234 	int i = *(loff_t *) v, j, prec;
235 	struct irqaction *action;
236 	struct irq_desc *desc;
237 
238 	if (i > nr_irqs)
239 		return 0;
240 
241 	for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
242 		j *= 10;
243 
244 	if (i == nr_irqs)
245 		return show_other_interrupts(p, prec);
246 
247 	/* print header */
248 	if (i == 0) {
249 		seq_printf(p, "%*s", prec + 8, "");
250 		for_each_online_cpu(j)
251 			seq_printf(p, "CPU%-8d", j);
252 		seq_putc(p, '\n');
253 	}
254 
255 	desc = irq_to_desc(i);
256 	if (!desc)
257 		return 0;
258 
259 	raw_spin_lock_irqsave(&desc->lock, flags);
260 	for_each_online_cpu(j)
261 		any_count |= kstat_irqs_cpu(i, j);
262 	action = desc->action;
263 	if (!action && !any_count)
264 		goto out;
265 
266 	seq_printf(p, "%*d: ", prec, i);
267 	for_each_online_cpu(j)
268 		seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
269 
270 	if (desc->chip)
271 		seq_printf(p, "  %-16s", desc->chip->name);
272 	else
273 		seq_printf(p, "  %-16s", "None");
274 	seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge");
275 
276 	if (action) {
277 		seq_printf(p, "     %s", action->name);
278 		while ((action = action->next) != NULL)
279 			seq_printf(p, ", %s", action->name);
280 	}
281 
282 	seq_putc(p, '\n');
283 out:
284 	raw_spin_unlock_irqrestore(&desc->lock, flags);
285 	return 0;
286 }
287 
288 /*
289  * /proc/stat helpers
290  */
291 u64 arch_irq_stat_cpu(unsigned int cpu)
292 {
293 	u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
294 
295 	sum += per_cpu(irq_stat, cpu).pmu_irqs;
296 	sum += per_cpu(irq_stat, cpu).mce_exceptions;
297 	sum += per_cpu(irq_stat, cpu).spurious_irqs;
298 
299 	return sum;
300 }
301 
302 #ifdef CONFIG_HOTPLUG_CPU
303 void fixup_irqs(const struct cpumask *map)
304 {
305 	struct irq_desc *desc;
306 	unsigned int irq;
307 	static int warned;
308 	cpumask_var_t mask;
309 
310 	alloc_cpumask_var(&mask, GFP_KERNEL);
311 
312 	for_each_irq(irq) {
313 		desc = irq_to_desc(irq);
314 		if (!desc)
315 			continue;
316 
317 		if (desc->status & IRQ_PER_CPU)
318 			continue;
319 
320 		cpumask_and(mask, desc->affinity, map);
321 		if (cpumask_any(mask) >= nr_cpu_ids) {
322 			printk("Breaking affinity for irq %i\n", irq);
323 			cpumask_copy(mask, map);
324 		}
325 		if (desc->chip->set_affinity)
326 			desc->chip->set_affinity(irq, mask);
327 		else if (desc->action && !(warned++))
328 			printk("Cannot set affinity for irq %i\n", irq);
329 	}
330 
331 	free_cpumask_var(mask);
332 
333 	local_irq_enable();
334 	mdelay(1);
335 	local_irq_disable();
336 }
337 #endif
338 
339 static inline void handle_one_irq(unsigned int irq)
340 {
341 	struct thread_info *curtp, *irqtp;
342 	unsigned long saved_sp_limit;
343 	struct irq_desc *desc;
344 
345 	/* Switch to the irq stack to handle this */
346 	curtp = current_thread_info();
347 	irqtp = hardirq_ctx[smp_processor_id()];
348 
349 	if (curtp == irqtp) {
350 		/* We're already on the irq stack, just handle it */
351 		generic_handle_irq(irq);
352 		return;
353 	}
354 
355 	desc = irq_to_desc(irq);
356 	saved_sp_limit = current->thread.ksp_limit;
357 
358 	irqtp->task = curtp->task;
359 	irqtp->flags = 0;
360 
361 	/* Copy the softirq bits in preempt_count so that the
362 	 * softirq checks work in the hardirq context. */
363 	irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
364 			       (curtp->preempt_count & SOFTIRQ_MASK);
365 
366 	current->thread.ksp_limit = (unsigned long)irqtp +
367 		_ALIGN_UP(sizeof(struct thread_info), 16);
368 
369 	call_handle_irq(irq, desc, irqtp, desc->handle_irq);
370 	current->thread.ksp_limit = saved_sp_limit;
371 	irqtp->task = NULL;
372 
373 	/* Set any flag that may have been set on the
374 	 * alternate stack
375 	 */
376 	if (irqtp->flags)
377 		set_bits(irqtp->flags, &curtp->flags);
378 }
379 
380 static inline void check_stack_overflow(void)
381 {
382 #ifdef CONFIG_DEBUG_STACKOVERFLOW
383 	long sp;
384 
385 	sp = __get_SP() & (THREAD_SIZE-1);
386 
387 	/* check for stack overflow: is there less than 2KB free? */
388 	if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
389 		printk("do_IRQ: stack overflow: %ld\n",
390 			sp - sizeof(struct thread_info));
391 		dump_stack();
392 	}
393 #endif
394 }
395 
396 void do_IRQ(struct pt_regs *regs)
397 {
398 	struct pt_regs *old_regs = set_irq_regs(regs);
399 	unsigned int irq;
400 
401 	trace_irq_entry(regs);
402 
403 	irq_enter();
404 
405 	check_stack_overflow();
406 
407 	irq = ppc_md.get_irq();
408 
409 	if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
410 		handle_one_irq(irq);
411 	else if (irq != NO_IRQ_IGNORE)
412 		__get_cpu_var(irq_stat).spurious_irqs++;
413 
414 	irq_exit();
415 	set_irq_regs(old_regs);
416 
417 #ifdef CONFIG_PPC_ISERIES
418 	if (firmware_has_feature(FW_FEATURE_ISERIES) &&
419 			get_lppaca()->int_dword.fields.decr_int) {
420 		get_lppaca()->int_dword.fields.decr_int = 0;
421 		/* Signal a fake decrementer interrupt */
422 		timer_interrupt(regs);
423 	}
424 #endif
425 
426 	trace_irq_exit(regs);
427 }
428 
429 void __init init_IRQ(void)
430 {
431 	if (ppc_md.init_IRQ)
432 		ppc_md.init_IRQ();
433 
434 	exc_lvl_ctx_init();
435 
436 	irq_ctx_init();
437 }
438 
439 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
440 struct thread_info   *critirq_ctx[NR_CPUS] __read_mostly;
441 struct thread_info    *dbgirq_ctx[NR_CPUS] __read_mostly;
442 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
443 
444 void exc_lvl_ctx_init(void)
445 {
446 	struct thread_info *tp;
447 	int i;
448 
449 	for_each_possible_cpu(i) {
450 		memset((void *)critirq_ctx[i], 0, THREAD_SIZE);
451 		tp = critirq_ctx[i];
452 		tp->cpu = i;
453 		tp->preempt_count = 0;
454 
455 #ifdef CONFIG_BOOKE
456 		memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE);
457 		tp = dbgirq_ctx[i];
458 		tp->cpu = i;
459 		tp->preempt_count = 0;
460 
461 		memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE);
462 		tp = mcheckirq_ctx[i];
463 		tp->cpu = i;
464 		tp->preempt_count = HARDIRQ_OFFSET;
465 #endif
466 	}
467 }
468 #endif
469 
470 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
471 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
472 
473 void irq_ctx_init(void)
474 {
475 	struct thread_info *tp;
476 	int i;
477 
478 	for_each_possible_cpu(i) {
479 		memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
480 		tp = softirq_ctx[i];
481 		tp->cpu = i;
482 		tp->preempt_count = 0;
483 
484 		memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
485 		tp = hardirq_ctx[i];
486 		tp->cpu = i;
487 		tp->preempt_count = HARDIRQ_OFFSET;
488 	}
489 }
490 
491 static inline void do_softirq_onstack(void)
492 {
493 	struct thread_info *curtp, *irqtp;
494 	unsigned long saved_sp_limit = current->thread.ksp_limit;
495 
496 	curtp = current_thread_info();
497 	irqtp = softirq_ctx[smp_processor_id()];
498 	irqtp->task = curtp->task;
499 	current->thread.ksp_limit = (unsigned long)irqtp +
500 				    _ALIGN_UP(sizeof(struct thread_info), 16);
501 	call_do_softirq(irqtp);
502 	current->thread.ksp_limit = saved_sp_limit;
503 	irqtp->task = NULL;
504 }
505 
506 void do_softirq(void)
507 {
508 	unsigned long flags;
509 
510 	if (in_interrupt())
511 		return;
512 
513 	local_irq_save(flags);
514 
515 	if (local_softirq_pending())
516 		do_softirq_onstack();
517 
518 	local_irq_restore(flags);
519 }
520 
521 
522 /*
523  * IRQ controller and virtual interrupts
524  */
525 
526 static LIST_HEAD(irq_hosts);
527 static DEFINE_RAW_SPINLOCK(irq_big_lock);
528 static unsigned int revmap_trees_allocated;
529 static DEFINE_MUTEX(revmap_trees_mutex);
530 struct irq_map_entry irq_map[NR_IRQS];
531 static unsigned int irq_virq_count = NR_IRQS;
532 static struct irq_host *irq_default_host;
533 
534 irq_hw_number_t virq_to_hw(unsigned int virq)
535 {
536 	return irq_map[virq].hwirq;
537 }
538 EXPORT_SYMBOL_GPL(virq_to_hw);
539 
540 static int default_irq_host_match(struct irq_host *h, struct device_node *np)
541 {
542 	return h->of_node != NULL && h->of_node == np;
543 }
544 
545 struct irq_host *irq_alloc_host(struct device_node *of_node,
546 				unsigned int revmap_type,
547 				unsigned int revmap_arg,
548 				struct irq_host_ops *ops,
549 				irq_hw_number_t inval_irq)
550 {
551 	struct irq_host *host;
552 	unsigned int size = sizeof(struct irq_host);
553 	unsigned int i;
554 	unsigned int *rmap;
555 	unsigned long flags;
556 
557 	/* Allocate structure and revmap table if using linear mapping */
558 	if (revmap_type == IRQ_HOST_MAP_LINEAR)
559 		size += revmap_arg * sizeof(unsigned int);
560 	host = zalloc_maybe_bootmem(size, GFP_KERNEL);
561 	if (host == NULL)
562 		return NULL;
563 
564 	/* Fill structure */
565 	host->revmap_type = revmap_type;
566 	host->inval_irq = inval_irq;
567 	host->ops = ops;
568 	host->of_node = of_node_get(of_node);
569 
570 	if (host->ops->match == NULL)
571 		host->ops->match = default_irq_host_match;
572 
573 	raw_spin_lock_irqsave(&irq_big_lock, flags);
574 
575 	/* If it's a legacy controller, check for duplicates and
576 	 * mark it as allocated (we use irq 0 host pointer for that
577 	 */
578 	if (revmap_type == IRQ_HOST_MAP_LEGACY) {
579 		if (irq_map[0].host != NULL) {
580 			raw_spin_unlock_irqrestore(&irq_big_lock, flags);
581 			/* If we are early boot, we can't free the structure,
582 			 * too bad...
583 			 * this will be fixed once slab is made available early
584 			 * instead of the current cruft
585 			 */
586 			if (mem_init_done)
587 				kfree(host);
588 			return NULL;
589 		}
590 		irq_map[0].host = host;
591 	}
592 
593 	list_add(&host->link, &irq_hosts);
594 	raw_spin_unlock_irqrestore(&irq_big_lock, flags);
595 
596 	/* Additional setups per revmap type */
597 	switch(revmap_type) {
598 	case IRQ_HOST_MAP_LEGACY:
599 		/* 0 is always the invalid number for legacy */
600 		host->inval_irq = 0;
601 		/* setup us as the host for all legacy interrupts */
602 		for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
603 			irq_map[i].hwirq = i;
604 			smp_wmb();
605 			irq_map[i].host = host;
606 			smp_wmb();
607 
608 			/* Clear norequest flags */
609 			irq_to_desc(i)->status &= ~IRQ_NOREQUEST;
610 
611 			/* Legacy flags are left to default at this point,
612 			 * one can then use irq_create_mapping() to
613 			 * explicitly change them
614 			 */
615 			ops->map(host, i, i);
616 		}
617 		break;
618 	case IRQ_HOST_MAP_LINEAR:
619 		rmap = (unsigned int *)(host + 1);
620 		for (i = 0; i < revmap_arg; i++)
621 			rmap[i] = NO_IRQ;
622 		host->revmap_data.linear.size = revmap_arg;
623 		smp_wmb();
624 		host->revmap_data.linear.revmap = rmap;
625 		break;
626 	default:
627 		break;
628 	}
629 
630 	pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
631 
632 	return host;
633 }
634 
635 struct irq_host *irq_find_host(struct device_node *node)
636 {
637 	struct irq_host *h, *found = NULL;
638 	unsigned long flags;
639 
640 	/* We might want to match the legacy controller last since
641 	 * it might potentially be set to match all interrupts in
642 	 * the absence of a device node. This isn't a problem so far
643 	 * yet though...
644 	 */
645 	raw_spin_lock_irqsave(&irq_big_lock, flags);
646 	list_for_each_entry(h, &irq_hosts, link)
647 		if (h->ops->match(h, node)) {
648 			found = h;
649 			break;
650 		}
651 	raw_spin_unlock_irqrestore(&irq_big_lock, flags);
652 	return found;
653 }
654 EXPORT_SYMBOL_GPL(irq_find_host);
655 
656 void irq_set_default_host(struct irq_host *host)
657 {
658 	pr_debug("irq: Default host set to @0x%p\n", host);
659 
660 	irq_default_host = host;
661 }
662 
663 void irq_set_virq_count(unsigned int count)
664 {
665 	pr_debug("irq: Trying to set virq count to %d\n", count);
666 
667 	BUG_ON(count < NUM_ISA_INTERRUPTS);
668 	if (count < NR_IRQS)
669 		irq_virq_count = count;
670 }
671 
672 static int irq_setup_virq(struct irq_host *host, unsigned int virq,
673 			    irq_hw_number_t hwirq)
674 {
675 	struct irq_desc *desc;
676 
677 	desc = irq_to_desc_alloc_node(virq, 0);
678 	if (!desc) {
679 		pr_debug("irq: -> allocating desc failed\n");
680 		goto error;
681 	}
682 
683 	/* Clear IRQ_NOREQUEST flag */
684 	desc->status &= ~IRQ_NOREQUEST;
685 
686 	/* map it */
687 	smp_wmb();
688 	irq_map[virq].hwirq = hwirq;
689 	smp_mb();
690 
691 	if (host->ops->map(host, virq, hwirq)) {
692 		pr_debug("irq: -> mapping failed, freeing\n");
693 		goto error;
694 	}
695 
696 	return 0;
697 
698 error:
699 	irq_free_virt(virq, 1);
700 	return -1;
701 }
702 
703 unsigned int irq_create_direct_mapping(struct irq_host *host)
704 {
705 	unsigned int virq;
706 
707 	if (host == NULL)
708 		host = irq_default_host;
709 
710 	BUG_ON(host == NULL);
711 	WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
712 
713 	virq = irq_alloc_virt(host, 1, 0);
714 	if (virq == NO_IRQ) {
715 		pr_debug("irq: create_direct virq allocation failed\n");
716 		return NO_IRQ;
717 	}
718 
719 	pr_debug("irq: create_direct obtained virq %d\n", virq);
720 
721 	if (irq_setup_virq(host, virq, virq))
722 		return NO_IRQ;
723 
724 	return virq;
725 }
726 
727 unsigned int irq_create_mapping(struct irq_host *host,
728 				irq_hw_number_t hwirq)
729 {
730 	unsigned int virq, hint;
731 
732 	pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
733 
734 	/* Look for default host if nececssary */
735 	if (host == NULL)
736 		host = irq_default_host;
737 	if (host == NULL) {
738 		printk(KERN_WARNING "irq_create_mapping called for"
739 		       " NULL host, hwirq=%lx\n", hwirq);
740 		WARN_ON(1);
741 		return NO_IRQ;
742 	}
743 	pr_debug("irq: -> using host @%p\n", host);
744 
745 	/* Check if mapping already exist, if it does, call
746 	 * host->ops->map() to update the flags
747 	 */
748 	virq = irq_find_mapping(host, hwirq);
749 	if (virq != NO_IRQ) {
750 		if (host->ops->remap)
751 			host->ops->remap(host, virq, hwirq);
752 		pr_debug("irq: -> existing mapping on virq %d\n", virq);
753 		return virq;
754 	}
755 
756 	/* Get a virtual interrupt number */
757 	if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
758 		/* Handle legacy */
759 		virq = (unsigned int)hwirq;
760 		if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
761 			return NO_IRQ;
762 		return virq;
763 	} else {
764 		/* Allocate a virtual interrupt number */
765 		hint = hwirq % irq_virq_count;
766 		virq = irq_alloc_virt(host, 1, hint);
767 		if (virq == NO_IRQ) {
768 			pr_debug("irq: -> virq allocation failed\n");
769 			return NO_IRQ;
770 		}
771 	}
772 
773 	if (irq_setup_virq(host, virq, hwirq))
774 		return NO_IRQ;
775 
776 	printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n",
777 		hwirq, host->of_node ? host->of_node->full_name : "null", virq);
778 
779 	return virq;
780 }
781 EXPORT_SYMBOL_GPL(irq_create_mapping);
782 
783 unsigned int irq_create_of_mapping(struct device_node *controller,
784 				   const u32 *intspec, unsigned int intsize)
785 {
786 	struct irq_host *host;
787 	irq_hw_number_t hwirq;
788 	unsigned int type = IRQ_TYPE_NONE;
789 	unsigned int virq;
790 
791 	if (controller == NULL)
792 		host = irq_default_host;
793 	else
794 		host = irq_find_host(controller);
795 	if (host == NULL) {
796 		printk(KERN_WARNING "irq: no irq host found for %s !\n",
797 		       controller->full_name);
798 		return NO_IRQ;
799 	}
800 
801 	/* If host has no translation, then we assume interrupt line */
802 	if (host->ops->xlate == NULL)
803 		hwirq = intspec[0];
804 	else {
805 		if (host->ops->xlate(host, controller, intspec, intsize,
806 				     &hwirq, &type))
807 			return NO_IRQ;
808 	}
809 
810 	/* Create mapping */
811 	virq = irq_create_mapping(host, hwirq);
812 	if (virq == NO_IRQ)
813 		return virq;
814 
815 	/* Set type if specified and different than the current one */
816 	if (type != IRQ_TYPE_NONE &&
817 	    type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK))
818 		set_irq_type(virq, type);
819 	return virq;
820 }
821 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
822 
823 unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
824 {
825 	struct of_irq oirq;
826 
827 	if (of_irq_map_one(dev, index, &oirq))
828 		return NO_IRQ;
829 
830 	return irq_create_of_mapping(oirq.controller, oirq.specifier,
831 				     oirq.size);
832 }
833 EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
834 
835 void irq_dispose_mapping(unsigned int virq)
836 {
837 	struct irq_host *host;
838 	irq_hw_number_t hwirq;
839 
840 	if (virq == NO_IRQ)
841 		return;
842 
843 	host = irq_map[virq].host;
844 	WARN_ON (host == NULL);
845 	if (host == NULL)
846 		return;
847 
848 	/* Never unmap legacy interrupts */
849 	if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
850 		return;
851 
852 	/* remove chip and handler */
853 	set_irq_chip_and_handler(virq, NULL, NULL);
854 
855 	/* Make sure it's completed */
856 	synchronize_irq(virq);
857 
858 	/* Tell the PIC about it */
859 	if (host->ops->unmap)
860 		host->ops->unmap(host, virq);
861 	smp_mb();
862 
863 	/* Clear reverse map */
864 	hwirq = irq_map[virq].hwirq;
865 	switch(host->revmap_type) {
866 	case IRQ_HOST_MAP_LINEAR:
867 		if (hwirq < host->revmap_data.linear.size)
868 			host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
869 		break;
870 	case IRQ_HOST_MAP_TREE:
871 		/*
872 		 * Check if radix tree allocated yet, if not then nothing to
873 		 * remove.
874 		 */
875 		smp_rmb();
876 		if (revmap_trees_allocated < 1)
877 			break;
878 		mutex_lock(&revmap_trees_mutex);
879 		radix_tree_delete(&host->revmap_data.tree, hwirq);
880 		mutex_unlock(&revmap_trees_mutex);
881 		break;
882 	}
883 
884 	/* Destroy map */
885 	smp_mb();
886 	irq_map[virq].hwirq = host->inval_irq;
887 
888 	/* Set some flags */
889 	irq_to_desc(virq)->status |= IRQ_NOREQUEST;
890 
891 	/* Free it */
892 	irq_free_virt(virq, 1);
893 }
894 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
895 
896 unsigned int irq_find_mapping(struct irq_host *host,
897 			      irq_hw_number_t hwirq)
898 {
899 	unsigned int i;
900 	unsigned int hint = hwirq % irq_virq_count;
901 
902 	/* Look for default host if nececssary */
903 	if (host == NULL)
904 		host = irq_default_host;
905 	if (host == NULL)
906 		return NO_IRQ;
907 
908 	/* legacy -> bail early */
909 	if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
910 		return hwirq;
911 
912 	/* Slow path does a linear search of the map */
913 	if (hint < NUM_ISA_INTERRUPTS)
914 		hint = NUM_ISA_INTERRUPTS;
915 	i = hint;
916 	do  {
917 		if (irq_map[i].host == host &&
918 		    irq_map[i].hwirq == hwirq)
919 			return i;
920 		i++;
921 		if (i >= irq_virq_count)
922 			i = NUM_ISA_INTERRUPTS;
923 	} while(i != hint);
924 	return NO_IRQ;
925 }
926 EXPORT_SYMBOL_GPL(irq_find_mapping);
927 
928 
929 unsigned int irq_radix_revmap_lookup(struct irq_host *host,
930 				     irq_hw_number_t hwirq)
931 {
932 	struct irq_map_entry *ptr;
933 	unsigned int virq;
934 
935 	WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
936 
937 	/*
938 	 * Check if the radix tree exists and has bee initialized.
939 	 * If not, we fallback to slow mode
940 	 */
941 	if (revmap_trees_allocated < 2)
942 		return irq_find_mapping(host, hwirq);
943 
944 	/* Now try to resolve */
945 	/*
946 	 * No rcu_read_lock(ing) needed, the ptr returned can't go under us
947 	 * as it's referencing an entry in the static irq_map table.
948 	 */
949 	ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
950 
951 	/*
952 	 * If found in radix tree, then fine.
953 	 * Else fallback to linear lookup - this should not happen in practice
954 	 * as it means that we failed to insert the node in the radix tree.
955 	 */
956 	if (ptr)
957 		virq = ptr - irq_map;
958 	else
959 		virq = irq_find_mapping(host, hwirq);
960 
961 	return virq;
962 }
963 
964 void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
965 			     irq_hw_number_t hwirq)
966 {
967 
968 	WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
969 
970 	/*
971 	 * Check if the radix tree exists yet.
972 	 * If not, then the irq will be inserted into the tree when it gets
973 	 * initialized.
974 	 */
975 	smp_rmb();
976 	if (revmap_trees_allocated < 1)
977 		return;
978 
979 	if (virq != NO_IRQ) {
980 		mutex_lock(&revmap_trees_mutex);
981 		radix_tree_insert(&host->revmap_data.tree, hwirq,
982 				  &irq_map[virq]);
983 		mutex_unlock(&revmap_trees_mutex);
984 	}
985 }
986 
987 unsigned int irq_linear_revmap(struct irq_host *host,
988 			       irq_hw_number_t hwirq)
989 {
990 	unsigned int *revmap;
991 
992 	WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
993 
994 	/* Check revmap bounds */
995 	if (unlikely(hwirq >= host->revmap_data.linear.size))
996 		return irq_find_mapping(host, hwirq);
997 
998 	/* Check if revmap was allocated */
999 	revmap = host->revmap_data.linear.revmap;
1000 	if (unlikely(revmap == NULL))
1001 		return irq_find_mapping(host, hwirq);
1002 
1003 	/* Fill up revmap with slow path if no mapping found */
1004 	if (unlikely(revmap[hwirq] == NO_IRQ))
1005 		revmap[hwirq] = irq_find_mapping(host, hwirq);
1006 
1007 	return revmap[hwirq];
1008 }
1009 
1010 unsigned int irq_alloc_virt(struct irq_host *host,
1011 			    unsigned int count,
1012 			    unsigned int hint)
1013 {
1014 	unsigned long flags;
1015 	unsigned int i, j, found = NO_IRQ;
1016 
1017 	if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
1018 		return NO_IRQ;
1019 
1020 	raw_spin_lock_irqsave(&irq_big_lock, flags);
1021 
1022 	/* Use hint for 1 interrupt if any */
1023 	if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
1024 	    hint < irq_virq_count && irq_map[hint].host == NULL) {
1025 		found = hint;
1026 		goto hint_found;
1027 	}
1028 
1029 	/* Look for count consecutive numbers in the allocatable
1030 	 * (non-legacy) space
1031 	 */
1032 	for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
1033 		if (irq_map[i].host != NULL)
1034 			j = 0;
1035 		else
1036 			j++;
1037 
1038 		if (j == count) {
1039 			found = i - count + 1;
1040 			break;
1041 		}
1042 	}
1043 	if (found == NO_IRQ) {
1044 		raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1045 		return NO_IRQ;
1046 	}
1047  hint_found:
1048 	for (i = found; i < (found + count); i++) {
1049 		irq_map[i].hwirq = host->inval_irq;
1050 		smp_wmb();
1051 		irq_map[i].host = host;
1052 	}
1053 	raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1054 	return found;
1055 }
1056 
1057 void irq_free_virt(unsigned int virq, unsigned int count)
1058 {
1059 	unsigned long flags;
1060 	unsigned int i;
1061 
1062 	WARN_ON (virq < NUM_ISA_INTERRUPTS);
1063 	WARN_ON (count == 0 || (virq + count) > irq_virq_count);
1064 
1065 	raw_spin_lock_irqsave(&irq_big_lock, flags);
1066 	for (i = virq; i < (virq + count); i++) {
1067 		struct irq_host *host;
1068 
1069 		if (i < NUM_ISA_INTERRUPTS ||
1070 		    (virq + count) > irq_virq_count)
1071 			continue;
1072 
1073 		host = irq_map[i].host;
1074 		irq_map[i].hwirq = host->inval_irq;
1075 		smp_wmb();
1076 		irq_map[i].host = NULL;
1077 	}
1078 	raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1079 }
1080 
1081 int arch_early_irq_init(void)
1082 {
1083 	struct irq_desc *desc;
1084 	int i;
1085 
1086 	for (i = 0; i < NR_IRQS; i++) {
1087 		desc = irq_to_desc(i);
1088 		if (desc)
1089 			desc->status |= IRQ_NOREQUEST;
1090 	}
1091 
1092 	return 0;
1093 }
1094 
1095 int arch_init_chip_data(struct irq_desc *desc, int node)
1096 {
1097 	desc->status |= IRQ_NOREQUEST;
1098 	return 0;
1099 }
1100 
1101 /* We need to create the radix trees late */
1102 static int irq_late_init(void)
1103 {
1104 	struct irq_host *h;
1105 	unsigned int i;
1106 
1107 	/*
1108 	 * No mutual exclusion with respect to accessors of the tree is needed
1109 	 * here as the synchronization is done via the state variable
1110 	 * revmap_trees_allocated.
1111 	 */
1112 	list_for_each_entry(h, &irq_hosts, link) {
1113 		if (h->revmap_type == IRQ_HOST_MAP_TREE)
1114 			INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
1115 	}
1116 
1117 	/*
1118 	 * Make sure the radix trees inits are visible before setting
1119 	 * the flag
1120 	 */
1121 	smp_wmb();
1122 	revmap_trees_allocated = 1;
1123 
1124 	/*
1125 	 * Insert the reverse mapping for those interrupts already present
1126 	 * in irq_map[].
1127 	 */
1128 	mutex_lock(&revmap_trees_mutex);
1129 	for (i = 0; i < irq_virq_count; i++) {
1130 		if (irq_map[i].host &&
1131 		    (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
1132 			radix_tree_insert(&irq_map[i].host->revmap_data.tree,
1133 					  irq_map[i].hwirq, &irq_map[i]);
1134 	}
1135 	mutex_unlock(&revmap_trees_mutex);
1136 
1137 	/*
1138 	 * Make sure the radix trees insertions are visible before setting
1139 	 * the flag
1140 	 */
1141 	smp_wmb();
1142 	revmap_trees_allocated = 2;
1143 
1144 	return 0;
1145 }
1146 arch_initcall(irq_late_init);
1147 
1148 #ifdef CONFIG_VIRQ_DEBUG
1149 static int virq_debug_show(struct seq_file *m, void *private)
1150 {
1151 	unsigned long flags;
1152 	struct irq_desc *desc;
1153 	const char *p;
1154 	char none[] = "none";
1155 	int i;
1156 
1157 	seq_printf(m, "%-5s  %-7s  %-15s  %s\n", "virq", "hwirq",
1158 		      "chip name", "host name");
1159 
1160 	for (i = 1; i < nr_irqs; i++) {
1161 		desc = irq_to_desc(i);
1162 		if (!desc)
1163 			continue;
1164 
1165 		raw_spin_lock_irqsave(&desc->lock, flags);
1166 
1167 		if (desc->action && desc->action->handler) {
1168 			seq_printf(m, "%5d  ", i);
1169 			seq_printf(m, "0x%05lx  ", virq_to_hw(i));
1170 
1171 			if (desc->chip && desc->chip->name)
1172 				p = desc->chip->name;
1173 			else
1174 				p = none;
1175 			seq_printf(m, "%-15s  ", p);
1176 
1177 			if (irq_map[i].host && irq_map[i].host->of_node)
1178 				p = irq_map[i].host->of_node->full_name;
1179 			else
1180 				p = none;
1181 			seq_printf(m, "%s\n", p);
1182 		}
1183 
1184 		raw_spin_unlock_irqrestore(&desc->lock, flags);
1185 	}
1186 
1187 	return 0;
1188 }
1189 
1190 static int virq_debug_open(struct inode *inode, struct file *file)
1191 {
1192 	return single_open(file, virq_debug_show, inode->i_private);
1193 }
1194 
1195 static const struct file_operations virq_debug_fops = {
1196 	.open = virq_debug_open,
1197 	.read = seq_read,
1198 	.llseek = seq_lseek,
1199 	.release = single_release,
1200 };
1201 
1202 static int __init irq_debugfs_init(void)
1203 {
1204 	if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
1205 				 NULL, &virq_debug_fops) == NULL)
1206 		return -ENOMEM;
1207 
1208 	return 0;
1209 }
1210 __initcall(irq_debugfs_init);
1211 #endif /* CONFIG_VIRQ_DEBUG */
1212 
1213 #ifdef CONFIG_PPC64
1214 static int __init setup_noirqdistrib(char *str)
1215 {
1216 	distribute_irqs = 0;
1217 	return 1;
1218 }
1219 
1220 __setup("noirqdistrib", setup_noirqdistrib);
1221 #endif /* CONFIG_PPC64 */
1222