xref: /linux/drivers/soc/fsl/qe/qe_ic.c (revision 297d9111e9fcf47dd1dcc6f79bba915f35378d01)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * arch/powerpc/sysdev/qe_lib/qe_ic.c
4  *
5  * Copyright (C) 2006 Freescale Semiconductor, Inc.  All rights reserved.
6  *
7  * Author: Li Yang <leoli@freescale.com>
8  * Based on code from Shlomi Gridish <gridish@freescale.com>
9  *
10  * QUICC ENGINE Interrupt Controller
11  */
12 
13 #include <linux/of_irq.h>
14 #include <linux/of_address.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/irq.h>
19 #include <linux/reboot.h>
20 #include <linux/slab.h>
21 #include <linux/stddef.h>
22 #include <linux/sched.h>
23 #include <linux/signal.h>
24 #include <linux/device.h>
25 #include <linux/spinlock.h>
26 #include <linux/platform_device.h>
27 #include <asm/irq.h>
28 #include <asm/io.h>
29 #include <soc/fsl/qe/qe.h>
30 
31 #define NR_QE_IC_INTS		64
32 
33 /* QE IC registers offset */
34 #define QEIC_CICR		0x00
35 #define QEIC_CIVEC		0x04
36 #define QEIC_CIPXCC		0x10
37 #define QEIC_CIPYCC		0x14
38 #define QEIC_CIPWCC		0x18
39 #define QEIC_CIPZCC		0x1c
40 #define QEIC_CIMR		0x20
41 #define QEIC_CRIMR		0x24
42 #define QEIC_CIPRTA		0x30
43 #define QEIC_CIPRTB		0x34
44 #define QEIC_CHIVEC		0x60
45 
46 struct qe_ic {
47 	/* Control registers offset */
48 	__be32 __iomem *regs;
49 
50 	/* The remapper for this QEIC */
51 	struct irq_domain *irqhost;
52 
53 	/* The "linux" controller struct */
54 	struct irq_chip hc_irq;
55 
56 	/* VIRQ numbers of QE high/low irqs */
57 	int virq_high;
58 	int virq_low;
59 };
60 
61 /*
62  * QE interrupt controller internal structure
63  */
64 struct qe_ic_info {
65 	/* Location of this source at the QIMR register */
66 	u32	mask;
67 
68 	/* Mask register offset */
69 	u32	mask_reg;
70 
71 	/*
72 	 * For grouped interrupts sources - the interrupt code as
73 	 * appears at the group priority register
74 	 */
75 	u8	pri_code;
76 
77 	/* Group priority register offset */
78 	u32	pri_reg;
79 };
80 
81 static DEFINE_RAW_SPINLOCK(qe_ic_lock);
82 
83 static struct qe_ic_info qe_ic_info[] = {
84 	[1] = {
85 	       .mask = 0x00008000,
86 	       .mask_reg = QEIC_CIMR,
87 	       .pri_code = 0,
88 	       .pri_reg = QEIC_CIPWCC,
89 	       },
90 	[2] = {
91 	       .mask = 0x00004000,
92 	       .mask_reg = QEIC_CIMR,
93 	       .pri_code = 1,
94 	       .pri_reg = QEIC_CIPWCC,
95 	       },
96 	[3] = {
97 	       .mask = 0x00002000,
98 	       .mask_reg = QEIC_CIMR,
99 	       .pri_code = 2,
100 	       .pri_reg = QEIC_CIPWCC,
101 	       },
102 	[10] = {
103 		.mask = 0x00000040,
104 		.mask_reg = QEIC_CIMR,
105 		.pri_code = 1,
106 		.pri_reg = QEIC_CIPZCC,
107 		},
108 	[11] = {
109 		.mask = 0x00000020,
110 		.mask_reg = QEIC_CIMR,
111 		.pri_code = 2,
112 		.pri_reg = QEIC_CIPZCC,
113 		},
114 	[12] = {
115 		.mask = 0x00000010,
116 		.mask_reg = QEIC_CIMR,
117 		.pri_code = 3,
118 		.pri_reg = QEIC_CIPZCC,
119 		},
120 	[13] = {
121 		.mask = 0x00000008,
122 		.mask_reg = QEIC_CIMR,
123 		.pri_code = 4,
124 		.pri_reg = QEIC_CIPZCC,
125 		},
126 	[14] = {
127 		.mask = 0x00000004,
128 		.mask_reg = QEIC_CIMR,
129 		.pri_code = 5,
130 		.pri_reg = QEIC_CIPZCC,
131 		},
132 	[15] = {
133 		.mask = 0x00000002,
134 		.mask_reg = QEIC_CIMR,
135 		.pri_code = 6,
136 		.pri_reg = QEIC_CIPZCC,
137 		},
138 	[20] = {
139 		.mask = 0x10000000,
140 		.mask_reg = QEIC_CRIMR,
141 		.pri_code = 3,
142 		.pri_reg = QEIC_CIPRTA,
143 		},
144 	[25] = {
145 		.mask = 0x00800000,
146 		.mask_reg = QEIC_CRIMR,
147 		.pri_code = 0,
148 		.pri_reg = QEIC_CIPRTB,
149 		},
150 	[26] = {
151 		.mask = 0x00400000,
152 		.mask_reg = QEIC_CRIMR,
153 		.pri_code = 1,
154 		.pri_reg = QEIC_CIPRTB,
155 		},
156 	[27] = {
157 		.mask = 0x00200000,
158 		.mask_reg = QEIC_CRIMR,
159 		.pri_code = 2,
160 		.pri_reg = QEIC_CIPRTB,
161 		},
162 	[28] = {
163 		.mask = 0x00100000,
164 		.mask_reg = QEIC_CRIMR,
165 		.pri_code = 3,
166 		.pri_reg = QEIC_CIPRTB,
167 		},
168 	[32] = {
169 		.mask = 0x80000000,
170 		.mask_reg = QEIC_CIMR,
171 		.pri_code = 0,
172 		.pri_reg = QEIC_CIPXCC,
173 		},
174 	[33] = {
175 		.mask = 0x40000000,
176 		.mask_reg = QEIC_CIMR,
177 		.pri_code = 1,
178 		.pri_reg = QEIC_CIPXCC,
179 		},
180 	[34] = {
181 		.mask = 0x20000000,
182 		.mask_reg = QEIC_CIMR,
183 		.pri_code = 2,
184 		.pri_reg = QEIC_CIPXCC,
185 		},
186 	[35] = {
187 		.mask = 0x10000000,
188 		.mask_reg = QEIC_CIMR,
189 		.pri_code = 3,
190 		.pri_reg = QEIC_CIPXCC,
191 		},
192 	[36] = {
193 		.mask = 0x08000000,
194 		.mask_reg = QEIC_CIMR,
195 		.pri_code = 4,
196 		.pri_reg = QEIC_CIPXCC,
197 		},
198 	[40] = {
199 		.mask = 0x00800000,
200 		.mask_reg = QEIC_CIMR,
201 		.pri_code = 0,
202 		.pri_reg = QEIC_CIPYCC,
203 		},
204 	[41] = {
205 		.mask = 0x00400000,
206 		.mask_reg = QEIC_CIMR,
207 		.pri_code = 1,
208 		.pri_reg = QEIC_CIPYCC,
209 		},
210 	[42] = {
211 		.mask = 0x00200000,
212 		.mask_reg = QEIC_CIMR,
213 		.pri_code = 2,
214 		.pri_reg = QEIC_CIPYCC,
215 		},
216 	[43] = {
217 		.mask = 0x00100000,
218 		.mask_reg = QEIC_CIMR,
219 		.pri_code = 3,
220 		.pri_reg = QEIC_CIPYCC,
221 		},
222 };
223 
qe_ic_read(__be32 __iomem * base,unsigned int reg)224 static inline u32 qe_ic_read(__be32  __iomem *base, unsigned int reg)
225 {
226 	return ioread32be(base + (reg >> 2));
227 }
228 
qe_ic_write(__be32 __iomem * base,unsigned int reg,u32 value)229 static inline void qe_ic_write(__be32  __iomem *base, unsigned int reg,
230 			       u32 value)
231 {
232 	iowrite32be(value, base + (reg >> 2));
233 }
234 
qe_ic_from_irq_data(struct irq_data * d)235 static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
236 {
237 	return irq_data_get_irq_chip_data(d);
238 }
239 
qe_ic_unmask_irq(struct irq_data * d)240 static void qe_ic_unmask_irq(struct irq_data *d)
241 {
242 	struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
243 	unsigned int src = irqd_to_hwirq(d);
244 	unsigned long flags;
245 	u32 temp;
246 
247 	raw_spin_lock_irqsave(&qe_ic_lock, flags);
248 
249 	temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
250 	qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
251 		    temp | qe_ic_info[src].mask);
252 
253 	raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
254 }
255 
qe_ic_mask_irq(struct irq_data * d)256 static void qe_ic_mask_irq(struct irq_data *d)
257 {
258 	struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
259 	unsigned int src = irqd_to_hwirq(d);
260 	unsigned long flags;
261 	u32 temp;
262 
263 	raw_spin_lock_irqsave(&qe_ic_lock, flags);
264 
265 	temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
266 	qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
267 		    temp & ~qe_ic_info[src].mask);
268 
269 	/* Flush the above write before enabling interrupts; otherwise,
270 	 * spurious interrupts will sometimes happen.  To be 100% sure
271 	 * that the write has reached the device before interrupts are
272 	 * enabled, the mask register would have to be read back; however,
273 	 * this is not required for correctness, only to avoid wasting
274 	 * time on a large number of spurious interrupts.  In testing,
275 	 * a sync reduced the observed spurious interrupts to zero.
276 	 */
277 	mb();
278 
279 	raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
280 }
281 
282 static struct irq_chip qe_ic_irq_chip = {
283 	.name = "QEIC",
284 	.irq_unmask = qe_ic_unmask_irq,
285 	.irq_mask = qe_ic_mask_irq,
286 	.irq_mask_ack = qe_ic_mask_irq,
287 };
288 
qe_ic_host_match(struct irq_domain * h,struct device_node * node,enum irq_domain_bus_token bus_token)289 static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
290 			    enum irq_domain_bus_token bus_token)
291 {
292 	/* Exact match, unless qe_ic node is NULL */
293 	struct device_node *of_node = irq_domain_get_of_node(h);
294 	return of_node == NULL || of_node == node;
295 }
296 
qe_ic_host_map(struct irq_domain * h,unsigned int virq,irq_hw_number_t hw)297 static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
298 			  irq_hw_number_t hw)
299 {
300 	struct qe_ic *qe_ic = h->host_data;
301 	struct irq_chip *chip;
302 
303 	if (hw >= ARRAY_SIZE(qe_ic_info)) {
304 		pr_err("%s: Invalid hw irq number for QEIC\n", __func__);
305 		return -EINVAL;
306 	}
307 
308 	if (qe_ic_info[hw].mask == 0) {
309 		printk(KERN_ERR "Can't map reserved IRQ\n");
310 		return -EINVAL;
311 	}
312 	/* Default chip */
313 	chip = &qe_ic->hc_irq;
314 
315 	irq_set_chip_data(virq, qe_ic);
316 	irq_set_status_flags(virq, IRQ_LEVEL);
317 
318 	irq_set_chip_and_handler(virq, chip, handle_level_irq);
319 
320 	return 0;
321 }
322 
323 static const struct irq_domain_ops qe_ic_host_ops = {
324 	.match = qe_ic_host_match,
325 	.map = qe_ic_host_map,
326 	.xlate = irq_domain_xlate_onetwocell,
327 };
328 
329 /* Return an interrupt vector or 0 if no interrupt is pending. */
qe_ic_get_low_irq(struct qe_ic * qe_ic)330 static unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
331 {
332 	int irq;
333 
334 	BUG_ON(qe_ic == NULL);
335 
336 	/* get the interrupt source vector. */
337 	irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
338 
339 	if (irq == 0)
340 		return 0;
341 
342 	return irq_find_mapping(qe_ic->irqhost, irq);
343 }
344 
345 /* Return an interrupt vector or 0 if no interrupt is pending. */
qe_ic_get_high_irq(struct qe_ic * qe_ic)346 static unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
347 {
348 	int irq;
349 
350 	BUG_ON(qe_ic == NULL);
351 
352 	/* get the interrupt source vector. */
353 	irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
354 
355 	if (irq == 0)
356 		return 0;
357 
358 	return irq_find_mapping(qe_ic->irqhost, irq);
359 }
360 
qe_ic_cascade_low(struct irq_desc * desc)361 static void qe_ic_cascade_low(struct irq_desc *desc)
362 {
363 	struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
364 	unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
365 	struct irq_chip *chip = irq_desc_get_chip(desc);
366 
367 	if (cascade_irq != 0)
368 		generic_handle_irq(cascade_irq);
369 
370 	if (chip->irq_eoi)
371 		chip->irq_eoi(&desc->irq_data);
372 }
373 
qe_ic_cascade_high(struct irq_desc * desc)374 static void qe_ic_cascade_high(struct irq_desc *desc)
375 {
376 	struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
377 	unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
378 	struct irq_chip *chip = irq_desc_get_chip(desc);
379 
380 	if (cascade_irq != 0)
381 		generic_handle_irq(cascade_irq);
382 
383 	if (chip->irq_eoi)
384 		chip->irq_eoi(&desc->irq_data);
385 }
386 
qe_ic_cascade_muxed_mpic(struct irq_desc * desc)387 static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
388 {
389 	struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
390 	unsigned int cascade_irq;
391 	struct irq_chip *chip = irq_desc_get_chip(desc);
392 
393 	cascade_irq = qe_ic_get_high_irq(qe_ic);
394 	if (cascade_irq == 0)
395 		cascade_irq = qe_ic_get_low_irq(qe_ic);
396 
397 	if (cascade_irq != 0)
398 		generic_handle_irq(cascade_irq);
399 
400 	chip->irq_eoi(&desc->irq_data);
401 }
402 
qe_ic_init(struct platform_device * pdev)403 static int qe_ic_init(struct platform_device *pdev)
404 {
405 	struct device *dev = &pdev->dev;
406 	void (*low_handler)(struct irq_desc *desc);
407 	void (*high_handler)(struct irq_desc *desc);
408 	struct qe_ic *qe_ic;
409 	struct resource *res;
410 	struct device_node *node = pdev->dev.of_node;
411 
412 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
413 	if (res == NULL) {
414 		dev_err(dev, "no memory resource defined\n");
415 		return -ENODEV;
416 	}
417 
418 	qe_ic = devm_kzalloc(dev, sizeof(*qe_ic), GFP_KERNEL);
419 	if (qe_ic == NULL)
420 		return -ENOMEM;
421 
422 	qe_ic->regs = devm_ioremap(dev, res->start, resource_size(res));
423 	if (qe_ic->regs == NULL) {
424 		dev_err(dev, "failed to ioremap() registers\n");
425 		return -ENODEV;
426 	}
427 
428 	qe_ic->hc_irq = qe_ic_irq_chip;
429 
430 	qe_ic->virq_high = platform_get_irq(pdev, 0);
431 	qe_ic->virq_low = platform_get_irq(pdev, 1);
432 
433 	if (qe_ic->virq_low <= 0)
434 		return -ENODEV;
435 
436 	if (qe_ic->virq_high > 0 && qe_ic->virq_high != qe_ic->virq_low) {
437 		low_handler = qe_ic_cascade_low;
438 		high_handler = qe_ic_cascade_high;
439 	} else {
440 		low_handler = qe_ic_cascade_muxed_mpic;
441 		high_handler = NULL;
442 	}
443 
444 	qe_ic->irqhost = irq_domain_create_linear(of_fwnode_handle(node), NR_QE_IC_INTS,
445 						  &qe_ic_host_ops, qe_ic);
446 	if (qe_ic->irqhost == NULL) {
447 		dev_err(dev, "failed to add irq domain\n");
448 		return -ENODEV;
449 	}
450 
451 	qe_ic_write(qe_ic->regs, QEIC_CICR, 0);
452 
453 	irq_set_chained_handler_and_data(qe_ic->virq_low, low_handler, qe_ic);
454 
455 	if (high_handler)
456 		irq_set_chained_handler_and_data(qe_ic->virq_high,
457 						 high_handler, qe_ic);
458 	return 0;
459 }
460 static const struct of_device_id qe_ic_ids[] = {
461 	{ .compatible = "fsl,qe-ic"},
462 	{ .type = "qeic"},
463 	{},
464 };
465 
466 static struct platform_driver qe_ic_driver =
467 {
468 	.driver	= {
469 		.name		= "qe-ic",
470 		.of_match_table	= qe_ic_ids,
471 	},
472 	.probe	= qe_ic_init,
473 };
474 
qe_ic_of_init(void)475 static int __init qe_ic_of_init(void)
476 {
477 	platform_driver_register(&qe_ic_driver);
478 	return 0;
479 }
480 subsys_initcall(qe_ic_of_init);
481