xref: /linux/arch/mips/sgi-ip27/ip27-irq.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  * ip27-irq.c: Highlevel interrupt handling for IP27 architecture.
3  *
4  * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
5  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
6  * Copyright (C) 1999 - 2001 Kanoj Sarcar
7  */
8 
9 #undef DEBUG
10 
11 #include <linux/init.h>
12 #include <linux/irq.h>
13 #include <linux/errno.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/interrupt.h>
18 #include <linux/ioport.h>
19 #include <linux/timex.h>
20 #include <linux/slab.h>
21 #include <linux/random.h>
22 #include <linux/smp_lock.h>
23 #include <linux/kernel.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/delay.h>
26 #include <linux/bitops.h>
27 
28 #include <asm/bootinfo.h>
29 #include <asm/io.h>
30 #include <asm/mipsregs.h>
31 #include <asm/system.h>
32 
33 #include <asm/processor.h>
34 #include <asm/pci/bridge.h>
35 #include <asm/sn/addrs.h>
36 #include <asm/sn/agent.h>
37 #include <asm/sn/arch.h>
38 #include <asm/sn/hub.h>
39 #include <asm/sn/intr.h>
40 
41 /*
42  * Linux has a controller-independent x86 interrupt architecture.
43  * every controller has a 'controller-template', that is used
44  * by the main code to do the right thing. Each driver-visible
45  * interrupt source is transparently wired to the apropriate
46  * controller. Thus drivers need not be aware of the
47  * interrupt-controller.
48  *
49  * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
50  * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
51  * (IO-APICs assumed to be messaging to Pentium local-APICs)
52  *
53  * the code is designed to be easily extended with new/different
54  * interrupt controllers, without having to do assembly magic.
55  */
56 
57 extern asmlinkage void ip27_irq(void);
58 
59 extern struct bridge_controller *irq_to_bridge[];
60 extern int irq_to_slot[];
61 
62 /*
63  * use these macros to get the encoded nasid and widget id
64  * from the irq value
65  */
66 #define IRQ_TO_BRIDGE(i)		irq_to_bridge[(i)]
67 #define	SLOT_FROM_PCI_IRQ(i)		irq_to_slot[i]
68 
69 static inline int alloc_level(int cpu, int irq)
70 {
71 	struct hub_data *hub = hub_data(cpu_to_node(cpu));
72 	struct slice_data *si = cpu_data[cpu].data;
73 	int level;
74 
75 	level = find_first_zero_bit(hub->irq_alloc_mask, LEVELS_PER_SLICE);
76 	if (level >= LEVELS_PER_SLICE)
77 		panic("Cpu %d flooded with devices\n", cpu);
78 
79 	__set_bit(level, hub->irq_alloc_mask);
80 	si->level_to_irq[level] = irq;
81 
82 	return level;
83 }
84 
85 static inline int find_level(cpuid_t *cpunum, int irq)
86 {
87 	int cpu, i;
88 
89 	for_each_online_cpu(cpu) {
90 		struct slice_data *si = cpu_data[cpu].data;
91 
92 		for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++)
93 			if (si->level_to_irq[i] == irq) {
94 				*cpunum = cpu;
95 
96 				return i;
97 			}
98 	}
99 
100 	panic("Could not identify cpu/level for irq %d\n", irq);
101 }
102 
103 /*
104  * Find first bit set
105  */
106 static int ms1bit(unsigned long x)
107 {
108 	int b = 0, s;
109 
110 	s = 16; if (x >> 16 == 0) s = 0; b += s; x >>= s;
111 	s =  8; if (x >>  8 == 0) s = 0; b += s; x >>= s;
112 	s =  4; if (x >>  4 == 0) s = 0; b += s; x >>= s;
113 	s =  2; if (x >>  2 == 0) s = 0; b += s; x >>= s;
114 	s =  1; if (x >>  1 == 0) s = 0; b += s;
115 
116 	return b;
117 }
118 
119 /*
120  * This code is unnecessarily complex, because we do IRQF_DISABLED
121  * intr enabling. Basically, once we grab the set of intrs we need
122  * to service, we must mask _all_ these interrupts; firstly, to make
123  * sure the same intr does not intr again, causing recursion that
124  * can lead to stack overflow. Secondly, we can not just mask the
125  * one intr we are do_IRQing, because the non-masked intrs in the
126  * first set might intr again, causing multiple servicings of the
127  * same intr. This effect is mostly seen for intercpu intrs.
128  * Kanoj 05.13.00
129  */
130 
131 static void ip27_do_irq_mask0(void)
132 {
133 	int irq, swlevel;
134 	hubreg_t pend0, mask0;
135 	cpuid_t cpu = smp_processor_id();
136 	int pi_int_mask0 =
137 		(cputoslice(cpu) == 0) ?  PI_INT_MASK0_A : PI_INT_MASK0_B;
138 
139 	/* copied from Irix intpend0() */
140 	pend0 = LOCAL_HUB_L(PI_INT_PEND0);
141 	mask0 = LOCAL_HUB_L(pi_int_mask0);
142 
143 	pend0 &= mask0;		/* Pick intrs we should look at */
144 	if (!pend0)
145 		return;
146 
147 	swlevel = ms1bit(pend0);
148 #ifdef CONFIG_SMP
149 	if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) {
150 		LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
151 	} else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) {
152 		LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
153 	} else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
154 		LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
155 		smp_call_function_interrupt();
156 	} else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
157 		LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
158 		smp_call_function_interrupt();
159 	} else
160 #endif
161 	{
162 		/* "map" swlevel to irq */
163 		struct slice_data *si = cpu_data[cpu].data;
164 
165 		irq = si->level_to_irq[swlevel];
166 		do_IRQ(irq);
167 	}
168 
169 	LOCAL_HUB_L(PI_INT_PEND0);
170 }
171 
172 static void ip27_do_irq_mask1(void)
173 {
174 	int irq, swlevel;
175 	hubreg_t pend1, mask1;
176 	cpuid_t cpu = smp_processor_id();
177 	int pi_int_mask1 = (cputoslice(cpu) == 0) ?  PI_INT_MASK1_A : PI_INT_MASK1_B;
178 	struct slice_data *si = cpu_data[cpu].data;
179 
180 	/* copied from Irix intpend0() */
181 	pend1 = LOCAL_HUB_L(PI_INT_PEND1);
182 	mask1 = LOCAL_HUB_L(pi_int_mask1);
183 
184 	pend1 &= mask1;		/* Pick intrs we should look at */
185 	if (!pend1)
186 		return;
187 
188 	swlevel = ms1bit(pend1);
189 	/* "map" swlevel to irq */
190 	irq = si->level_to_irq[swlevel];
191 	LOCAL_HUB_CLR_INTR(swlevel);
192 	do_IRQ(irq);
193 
194 	LOCAL_HUB_L(PI_INT_PEND1);
195 }
196 
197 static void ip27_prof_timer(void)
198 {
199 	panic("CPU %d got a profiling interrupt", smp_processor_id());
200 }
201 
202 static void ip27_hub_error(void)
203 {
204 	panic("CPU %d got a hub error interrupt", smp_processor_id());
205 }
206 
207 static int intr_connect_level(int cpu, int bit)
208 {
209 	nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
210 	struct slice_data *si = cpu_data[cpu].data;
211 	unsigned long flags;
212 
213 	set_bit(bit, si->irq_enable_mask);
214 
215 	local_irq_save(flags);
216 	if (!cputoslice(cpu)) {
217 		REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
218 		REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
219 	} else {
220 		REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
221 		REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
222 	}
223 	local_irq_restore(flags);
224 
225 	return 0;
226 }
227 
228 static int intr_disconnect_level(int cpu, int bit)
229 {
230 	nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
231 	struct slice_data *si = cpu_data[cpu].data;
232 
233 	clear_bit(bit, si->irq_enable_mask);
234 
235 	if (!cputoslice(cpu)) {
236 		REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
237 		REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
238 	} else {
239 		REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
240 		REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
241 	}
242 
243 	return 0;
244 }
245 
246 /* Startup one of the (PCI ...) IRQs routes over a bridge.  */
247 static unsigned int startup_bridge_irq(unsigned int irq)
248 {
249 	struct bridge_controller *bc;
250 	bridgereg_t device;
251 	bridge_t *bridge;
252 	int pin, swlevel;
253 	cpuid_t cpu;
254 
255 	pin = SLOT_FROM_PCI_IRQ(irq);
256 	bc = IRQ_TO_BRIDGE(irq);
257 	bridge = bc->base;
258 
259 	pr_debug("bridge_startup(): irq= 0x%x  pin=%d\n", irq, pin);
260 	/*
261 	 * "map" irq to a swlevel greater than 6 since the first 6 bits
262 	 * of INT_PEND0 are taken
263 	 */
264 	swlevel = find_level(&cpu, irq);
265 	bridge->b_int_addr[pin].addr = (0x20000 | swlevel | (bc->nasid << 8));
266 	bridge->b_int_enable |= (1 << pin);
267 	bridge->b_int_enable |= 0x7ffffe00;	/* more stuff in int_enable */
268 
269 	/*
270 	 * Enable sending of an interrupt clear packt to the hub on a high to
271 	 * low transition of the interrupt pin.
272 	 *
273 	 * IRIX sets additional bits in the address which are documented as
274 	 * reserved in the bridge docs.
275 	 */
276 	bridge->b_int_mode |= (1UL << pin);
277 
278 	/*
279 	 * We assume the bridge to have a 1:1 mapping between devices
280 	 * (slots) and intr pins.
281 	 */
282 	device = bridge->b_int_device;
283 	device &= ~(7 << (pin*3));
284 	device |= (pin << (pin*3));
285 	bridge->b_int_device = device;
286 
287         bridge->b_wid_tflush;
288 
289         return 0;       /* Never anything pending.  */
290 }
291 
292 /* Shutdown one of the (PCI ...) IRQs routes over a bridge.  */
293 static void shutdown_bridge_irq(unsigned int irq)
294 {
295 	struct bridge_controller *bc = IRQ_TO_BRIDGE(irq);
296 	struct hub_data *hub = hub_data(cpu_to_node(bc->irq_cpu));
297 	bridge_t *bridge = bc->base;
298 	int pin, swlevel;
299 	cpuid_t cpu;
300 
301 	pr_debug("bridge_shutdown: irq 0x%x\n", irq);
302 	pin = SLOT_FROM_PCI_IRQ(irq);
303 
304 	/*
305 	 * map irq to a swlevel greater than 6 since the first 6 bits
306 	 * of INT_PEND0 are taken
307 	 */
308 	swlevel = find_level(&cpu, irq);
309 	intr_disconnect_level(cpu, swlevel);
310 
311 	__clear_bit(swlevel, hub->irq_alloc_mask);
312 
313 	bridge->b_int_enable &= ~(1 << pin);
314 	bridge->b_wid_tflush;
315 }
316 
317 static inline void enable_bridge_irq(unsigned int irq)
318 {
319 	cpuid_t cpu;
320 	int swlevel;
321 
322 	swlevel = find_level(&cpu, irq);	/* Criminal offence */
323 	intr_connect_level(cpu, swlevel);
324 }
325 
326 static inline void disable_bridge_irq(unsigned int irq)
327 {
328 	cpuid_t cpu;
329 	int swlevel;
330 
331 	swlevel = find_level(&cpu, irq);	/* Criminal offence */
332 	intr_disconnect_level(cpu, swlevel);
333 }
334 
335 static struct irq_chip bridge_irq_type = {
336 	.name		= "bridge",
337 	.startup	= startup_bridge_irq,
338 	.shutdown	= shutdown_bridge_irq,
339 	.ack		= disable_bridge_irq,
340 	.mask		= disable_bridge_irq,
341 	.mask_ack	= disable_bridge_irq,
342 	.unmask		= enable_bridge_irq,
343 };
344 
345 void __devinit register_bridge_irq(unsigned int irq)
346 {
347 	set_irq_chip_and_handler(irq, &bridge_irq_type, handle_level_irq);
348 }
349 
350 int __devinit request_bridge_irq(struct bridge_controller *bc)
351 {
352 	int irq = allocate_irqno();
353 	int swlevel, cpu;
354 	nasid_t nasid;
355 
356 	if (irq < 0)
357 		return irq;
358 
359 	/*
360 	 * "map" irq to a swlevel greater than 6 since the first 6 bits
361 	 * of INT_PEND0 are taken
362 	 */
363 	cpu = bc->irq_cpu;
364 	swlevel = alloc_level(cpu, irq);
365 	if (unlikely(swlevel < 0)) {
366 		free_irqno(irq);
367 
368 		return -EAGAIN;
369 	}
370 
371 	/* Make sure it's not already pending when we connect it. */
372 	nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
373 	REMOTE_HUB_CLR_INTR(nasid, swlevel);
374 
375 	intr_connect_level(cpu, swlevel);
376 
377 	register_bridge_irq(irq);
378 
379 	return irq;
380 }
381 
382 extern void ip27_rt_timer_interrupt(void);
383 
384 asmlinkage void plat_irq_dispatch(void)
385 {
386 	unsigned long pending = read_c0_cause() & read_c0_status();
387 
388 	if (pending & CAUSEF_IP4)
389 		ip27_rt_timer_interrupt();
390 	else if (pending & CAUSEF_IP2)	/* PI_INT_PEND_0 or CC_PEND_{A|B} */
391 		ip27_do_irq_mask0();
392 	else if (pending & CAUSEF_IP3)	/* PI_INT_PEND_1 */
393 		ip27_do_irq_mask1();
394 	else if (pending & CAUSEF_IP5)
395 		ip27_prof_timer();
396 	else if (pending & CAUSEF_IP6)
397 		ip27_hub_error();
398 }
399 
400 void __init arch_init_irq(void)
401 {
402 }
403 
404 void install_ipi(void)
405 {
406 	int slice = LOCAL_HUB_L(PI_CPU_NUM);
407 	int cpu = smp_processor_id();
408 	struct slice_data *si = cpu_data[cpu].data;
409 	struct hub_data *hub = hub_data(cpu_to_node(cpu));
410 	int resched, call;
411 
412 	resched = CPU_RESCHED_A_IRQ + slice;
413 	__set_bit(resched, hub->irq_alloc_mask);
414 	__set_bit(resched, si->irq_enable_mask);
415 	LOCAL_HUB_CLR_INTR(resched);
416 
417 	call = CPU_CALL_A_IRQ + slice;
418 	__set_bit(call, hub->irq_alloc_mask);
419 	__set_bit(call, si->irq_enable_mask);
420 	LOCAL_HUB_CLR_INTR(call);
421 
422 	if (slice == 0) {
423 		LOCAL_HUB_S(PI_INT_MASK0_A, si->irq_enable_mask[0]);
424 		LOCAL_HUB_S(PI_INT_MASK1_A, si->irq_enable_mask[1]);
425 	} else {
426 		LOCAL_HUB_S(PI_INT_MASK0_B, si->irq_enable_mask[0]);
427 		LOCAL_HUB_S(PI_INT_MASK1_B, si->irq_enable_mask[1]);
428 	}
429 }
430