xref: /linux/arch/powerpc/platforms/44x/uic.c (revision 2bd1bea5fa6aa79bc563a57919730eb809651b28)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * arch/powerpc/sysdev/uic.c
4  *
5  * IBM PowerPC 4xx Universal Interrupt Controller
6  *
7  * Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation.
8  */
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/reboot.h>
13 #include <linux/slab.h>
14 #include <linux/stddef.h>
15 #include <linux/sched.h>
16 #include <linux/signal.h>
17 #include <linux/device.h>
18 #include <linux/spinlock.h>
19 #include <linux/irq.h>
20 #include <linux/interrupt.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/of.h>
23 #include <linux/of_irq.h>
24 #include <asm/irq.h>
25 #include <asm/io.h>
26 #include <asm/dcr.h>
27 #include <asm/uic.h>
28 
29 #define NR_UIC_INTS	32
30 
31 #define UIC_SR		0x0
32 #define UIC_ER		0x2
33 #define UIC_CR		0x3
34 #define UIC_PR		0x4
35 #define UIC_TR		0x5
36 #define UIC_MSR		0x6
37 #define UIC_VR		0x7
38 #define UIC_VCR		0x8
39 
40 static struct uic *primary_uic;
41 
42 struct uic {
43 	int index;
44 	int dcrbase;
45 
46 	raw_spinlock_t lock;
47 
48 	/* The remapper for this UIC */
49 	struct irq_domain	*irqhost;
50 };
51 
uic_unmask_irq(struct irq_data * d)52 static void uic_unmask_irq(struct irq_data *d)
53 {
54 	struct uic *uic = irq_data_get_irq_chip_data(d);
55 	unsigned int src = irqd_to_hwirq(d);
56 	unsigned long flags;
57 	u32 er, sr;
58 
59 	sr = 1 << (31-src);
60 	raw_spin_lock_irqsave(&uic->lock, flags);
61 	/* ack level-triggered interrupts here */
62 	if (irqd_is_level_type(d))
63 		mtdcr(uic->dcrbase + UIC_SR, sr);
64 	er = mfdcr(uic->dcrbase + UIC_ER);
65 	er |= sr;
66 	mtdcr(uic->dcrbase + UIC_ER, er);
67 	raw_spin_unlock_irqrestore(&uic->lock, flags);
68 }
69 
uic_mask_irq(struct irq_data * d)70 static void uic_mask_irq(struct irq_data *d)
71 {
72 	struct uic *uic = irq_data_get_irq_chip_data(d);
73 	unsigned int src = irqd_to_hwirq(d);
74 	unsigned long flags;
75 	u32 er;
76 
77 	raw_spin_lock_irqsave(&uic->lock, flags);
78 	er = mfdcr(uic->dcrbase + UIC_ER);
79 	er &= ~(1 << (31 - src));
80 	mtdcr(uic->dcrbase + UIC_ER, er);
81 	raw_spin_unlock_irqrestore(&uic->lock, flags);
82 }
83 
uic_ack_irq(struct irq_data * d)84 static void uic_ack_irq(struct irq_data *d)
85 {
86 	struct uic *uic = irq_data_get_irq_chip_data(d);
87 	unsigned int src = irqd_to_hwirq(d);
88 	unsigned long flags;
89 
90 	raw_spin_lock_irqsave(&uic->lock, flags);
91 	mtdcr(uic->dcrbase + UIC_SR, 1 << (31-src));
92 	raw_spin_unlock_irqrestore(&uic->lock, flags);
93 }
94 
uic_mask_ack_irq(struct irq_data * d)95 static void uic_mask_ack_irq(struct irq_data *d)
96 {
97 	struct uic *uic = irq_data_get_irq_chip_data(d);
98 	unsigned int src = irqd_to_hwirq(d);
99 	unsigned long flags;
100 	u32 er, sr;
101 
102 	sr = 1 << (31-src);
103 	raw_spin_lock_irqsave(&uic->lock, flags);
104 	er = mfdcr(uic->dcrbase + UIC_ER);
105 	er &= ~sr;
106 	mtdcr(uic->dcrbase + UIC_ER, er);
107  	/* On the UIC, acking (i.e. clearing the SR bit)
108 	 * a level irq will have no effect if the interrupt
109 	 * is still asserted by the device, even if
110 	 * the interrupt is already masked. Therefore
111 	 * we only ack the egde interrupts here, while
112 	 * level interrupts are ack'ed after the actual
113 	 * isr call in the uic_unmask_irq()
114 	 */
115 	if (!irqd_is_level_type(d))
116 		mtdcr(uic->dcrbase + UIC_SR, sr);
117 	raw_spin_unlock_irqrestore(&uic->lock, flags);
118 }
119 
uic_set_irq_type(struct irq_data * d,unsigned int flow_type)120 static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
121 {
122 	struct uic *uic = irq_data_get_irq_chip_data(d);
123 	unsigned int src = irqd_to_hwirq(d);
124 	unsigned long flags;
125 	int trigger, polarity;
126 	u32 tr, pr, mask;
127 
128 	switch (flow_type & IRQ_TYPE_SENSE_MASK) {
129 	case IRQ_TYPE_NONE:
130 		uic_mask_irq(d);
131 		return 0;
132 
133 	case IRQ_TYPE_EDGE_RISING:
134 		trigger = 1; polarity = 1;
135 		break;
136 	case IRQ_TYPE_EDGE_FALLING:
137 		trigger = 1; polarity = 0;
138 		break;
139 	case IRQ_TYPE_LEVEL_HIGH:
140 		trigger = 0; polarity = 1;
141 		break;
142 	case IRQ_TYPE_LEVEL_LOW:
143 		trigger = 0; polarity = 0;
144 		break;
145 	default:
146 		return -EINVAL;
147 	}
148 
149 	mask = ~(1 << (31 - src));
150 
151 	raw_spin_lock_irqsave(&uic->lock, flags);
152 	tr = mfdcr(uic->dcrbase + UIC_TR);
153 	pr = mfdcr(uic->dcrbase + UIC_PR);
154 	tr = (tr & mask) | (trigger << (31-src));
155 	pr = (pr & mask) | (polarity << (31-src));
156 
157 	mtdcr(uic->dcrbase + UIC_PR, pr);
158 	mtdcr(uic->dcrbase + UIC_TR, tr);
159 	mtdcr(uic->dcrbase + UIC_SR, ~mask);
160 
161 	raw_spin_unlock_irqrestore(&uic->lock, flags);
162 
163 	return 0;
164 }
165 
166 static struct irq_chip uic_irq_chip = {
167 	.name		= "UIC",
168 	.irq_unmask	= uic_unmask_irq,
169 	.irq_mask	= uic_mask_irq,
170 	.irq_mask_ack	= uic_mask_ack_irq,
171 	.irq_ack	= uic_ack_irq,
172 	.irq_set_type	= uic_set_irq_type,
173 };
174 
uic_host_map(struct irq_domain * h,unsigned int virq,irq_hw_number_t hw)175 static int uic_host_map(struct irq_domain *h, unsigned int virq,
176 			irq_hw_number_t hw)
177 {
178 	struct uic *uic = h->host_data;
179 
180 	irq_set_chip_data(virq, uic);
181 	/* Despite the name, handle_level_irq() works for both level
182 	 * and edge irqs on UIC.  FIXME: check this is correct */
183 	irq_set_chip_and_handler(virq, &uic_irq_chip, handle_level_irq);
184 
185 	/* Set default irq type */
186 	irq_set_irq_type(virq, IRQ_TYPE_NONE);
187 
188 	return 0;
189 }
190 
191 static const struct irq_domain_ops uic_host_ops = {
192 	.map	= uic_host_map,
193 	.xlate	= irq_domain_xlate_twocell,
194 };
195 
uic_irq_cascade(struct irq_desc * desc)196 static void uic_irq_cascade(struct irq_desc *desc)
197 {
198 	struct irq_chip *chip = irq_desc_get_chip(desc);
199 	struct irq_data *idata = irq_desc_get_irq_data(desc);
200 	struct uic *uic = irq_desc_get_handler_data(desc);
201 	u32 msr;
202 	int src;
203 
204 	raw_spin_lock(&desc->lock);
205 	if (irqd_is_level_type(idata))
206 		chip->irq_mask(idata);
207 	else
208 		chip->irq_mask_ack(idata);
209 	raw_spin_unlock(&desc->lock);
210 
211 	msr = mfdcr(uic->dcrbase + UIC_MSR);
212 	if (!msr) /* spurious interrupt */
213 		goto uic_irq_ret;
214 
215 	src = 32 - ffs(msr);
216 
217 	generic_handle_domain_irq(uic->irqhost, src);
218 
219 uic_irq_ret:
220 	raw_spin_lock(&desc->lock);
221 	if (irqd_is_level_type(idata))
222 		chip->irq_ack(idata);
223 	if (!irqd_irq_disabled(idata) && chip->irq_unmask)
224 		chip->irq_unmask(idata);
225 	raw_spin_unlock(&desc->lock);
226 }
227 
uic_init_one(struct device_node * node)228 static struct uic * __init uic_init_one(struct device_node *node)
229 {
230 	struct uic *uic;
231 	const u32 *indexp, *dcrreg;
232 	int len;
233 
234 	BUG_ON(! of_device_is_compatible(node, "ibm,uic"));
235 
236 	uic = kzalloc(sizeof(*uic), GFP_KERNEL);
237 	if (! uic)
238 		return NULL; /* FIXME: panic? */
239 
240 	raw_spin_lock_init(&uic->lock);
241 	indexp = of_get_property(node, "cell-index", &len);
242 	if (!indexp || (len != sizeof(u32))) {
243 		printk(KERN_ERR "uic: Device node %pOF has missing or invalid "
244 		       "cell-index property\n", node);
245 		return NULL;
246 	}
247 	uic->index = *indexp;
248 
249 	dcrreg = of_get_property(node, "dcr-reg", &len);
250 	if (!dcrreg || (len != 2*sizeof(u32))) {
251 		printk(KERN_ERR "uic: Device node %pOF has missing or invalid "
252 		       "dcr-reg property\n", node);
253 		return NULL;
254 	}
255 	uic->dcrbase = *dcrreg;
256 
257 	uic->irqhost = irq_domain_create_linear(of_fwnode_handle(node),
258 						NR_UIC_INTS, &uic_host_ops,
259 						uic);
260 	if (! uic->irqhost)
261 		return NULL; /* FIXME: panic? */
262 
263 	/* Start with all interrupts disabled, level and non-critical */
264 	mtdcr(uic->dcrbase + UIC_ER, 0);
265 	mtdcr(uic->dcrbase + UIC_CR, 0);
266 	mtdcr(uic->dcrbase + UIC_TR, 0);
267 	/* Clear any pending interrupts, in case the firmware left some */
268 	mtdcr(uic->dcrbase + UIC_SR, 0xffffffff);
269 
270 	printk ("UIC%d (%d IRQ sources) at DCR 0x%x\n", uic->index,
271 		NR_UIC_INTS, uic->dcrbase);
272 
273 	return uic;
274 }
275 
uic_init_tree(void)276 void __init uic_init_tree(void)
277 {
278 	struct device_node *np;
279 	struct uic *uic;
280 	const u32 *interrupts;
281 
282 	/* First locate and initialize the top-level UIC */
283 	for_each_compatible_node(np, NULL, "ibm,uic") {
284 		interrupts = of_get_property(np, "interrupts", NULL);
285 		if (!interrupts)
286 			break;
287 	}
288 
289 	BUG_ON(!np); /* uic_init_tree() assumes there's a UIC as the
290 		      * top-level interrupt controller */
291 	primary_uic = uic_init_one(np);
292 	if (!primary_uic)
293 		panic("Unable to initialize primary UIC %pOF\n", np);
294 
295 	irq_set_default_domain(primary_uic->irqhost);
296 	of_node_put(np);
297 
298 	/* The scan again for cascaded UICs */
299 	for_each_compatible_node(np, NULL, "ibm,uic") {
300 		interrupts = of_get_property(np, "interrupts", NULL);
301 		if (interrupts) {
302 			/* Secondary UIC */
303 			int cascade_virq;
304 
305 			uic = uic_init_one(np);
306 			if (! uic)
307 				panic("Unable to initialize a secondary UIC %pOF\n",
308 				      np);
309 
310 			cascade_virq = irq_of_parse_and_map(np, 0);
311 
312 			irq_set_handler_data(cascade_virq, uic);
313 			irq_set_chained_handler(cascade_virq, uic_irq_cascade);
314 
315 			/* FIXME: setup critical cascade?? */
316 		}
317 	}
318 }
319 
320 /* Return an interrupt vector or 0 if no interrupt is pending. */
uic_get_irq(void)321 unsigned int uic_get_irq(void)
322 {
323 	u32 msr;
324 	int src;
325 
326 	BUG_ON(! primary_uic);
327 
328 	msr = mfdcr(primary_uic->dcrbase + UIC_MSR);
329 	src = 32 - ffs(msr);
330 
331 	return irq_find_mapping(primary_uic->irqhost, src);
332 }
333