xref: /linux/drivers/irqchip/spear-shirq.c (revision e3c871ab232ccc5fd82f76b21b9cae0113f01dc0)
1 /*
2  * SPEAr platform shared irq layer source file
3  *
4  * Copyright (C) 2009-2012 ST Microelectronics
5  * Viresh Kumar <viresh.linux@gmail.com>
6  *
7  * Copyright (C) 2012 ST Microelectronics
8  * Shiraz Hashim <shiraz.linux.kernel@gmail.com>
9  *
10  * This file is licensed under the terms of the GNU General Public
11  * License version 2. This program is licensed "as is" without any
12  * warranty of any kind, whether express or implied.
13  */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/err.h>
17 #include <linux/export.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/irq.h>
21 #include <linux/irqdomain.h>
22 #include <linux/of.h>
23 #include <linux/of_address.h>
24 #include <linux/of_irq.h>
25 #include <linux/spinlock.h>
26 
27 #include "irqchip.h"
28 
29 /*
30  * struct shirq_regs: shared irq register configuration
31  *
32  * enb_reg: enable register offset
33  * reset_to_enb: val 1 indicates, we need to clear bit for enabling interrupt
34  * status_reg: status register offset
35  * status_reg_mask: status register valid mask
36  * clear_reg: clear register offset
37  * reset_to_clear: val 1 indicates, we need to clear bit for clearing interrupt
38  */
39 struct shirq_regs {
40 	u32 enb_reg;
41 	u32 reset_to_enb;
42 	u32 status_reg;
43 	u32 clear_reg;
44 	u32 reset_to_clear;
45 };
46 
47 /*
48  * struct spear_shirq: shared irq structure
49  *
50  * base:	Base register address
51  * regs:	Register configuration for shared irq block
52  * virq_base:	Base virtual interrupt number
53  * nr_irqs:	Number of interrupts handled by this block
54  * offset:	Bit offset of the first interrupt
55  * disabled:	Group is disabled, but accounted
56  */
57 struct spear_shirq {
58 	void __iomem		*base;
59 	struct shirq_regs	regs;
60 	u32			virq_base;
61 	u32			nr_irqs;
62 	u32			offset;
63 	bool			disabled;
64 };
65 
66 static DEFINE_SPINLOCK(lock);
67 
68 /* spear300 shared irq registers offsets and masks */
69 #define SPEAR300_INT_ENB_MASK_REG	0x54
70 #define SPEAR300_INT_STS_MASK_REG	0x58
71 
72 static struct spear_shirq spear300_shirq_ras1 = {
73 	.offset		= 0,
74 	.nr_irqs	= 9,
75 	.regs = {
76 		.enb_reg = SPEAR300_INT_ENB_MASK_REG,
77 		.status_reg = SPEAR300_INT_STS_MASK_REG,
78 		.clear_reg = -1,
79 	},
80 };
81 
82 static struct spear_shirq *spear300_shirq_blocks[] = {
83 	&spear300_shirq_ras1,
84 };
85 
86 /* spear310 shared irq registers offsets and masks */
87 #define SPEAR310_INT_STS_MASK_REG	0x04
88 
89 static struct spear_shirq spear310_shirq_ras1 = {
90 	.offset		= 0,
91 	.nr_irqs	= 8,
92 	.regs = {
93 		.enb_reg = -1,
94 		.status_reg = SPEAR310_INT_STS_MASK_REG,
95 		.clear_reg = -1,
96 	},
97 };
98 
99 static struct spear_shirq spear310_shirq_ras2 = {
100 	.offset		= 8,
101 	.nr_irqs	= 5,
102 	.regs = {
103 		.enb_reg = -1,
104 		.status_reg = SPEAR310_INT_STS_MASK_REG,
105 		.clear_reg = -1,
106 	},
107 };
108 
109 static struct spear_shirq spear310_shirq_ras3 = {
110 	.offset		= 13,
111 	.nr_irqs	= 1,
112 	.regs = {
113 		.enb_reg = -1,
114 		.status_reg = SPEAR310_INT_STS_MASK_REG,
115 		.clear_reg = -1,
116 	},
117 };
118 
119 static struct spear_shirq spear310_shirq_intrcomm_ras = {
120 	.offset		= 14,
121 	.nr_irqs	= 3,
122 	.regs = {
123 		.enb_reg = -1,
124 		.status_reg = SPEAR310_INT_STS_MASK_REG,
125 		.clear_reg = -1,
126 	},
127 };
128 
129 static struct spear_shirq *spear310_shirq_blocks[] = {
130 	&spear310_shirq_ras1,
131 	&spear310_shirq_ras2,
132 	&spear310_shirq_ras3,
133 	&spear310_shirq_intrcomm_ras,
134 };
135 
136 /* spear320 shared irq registers offsets and masks */
137 #define SPEAR320_INT_STS_MASK_REG		0x04
138 #define SPEAR320_INT_CLR_MASK_REG		0x04
139 #define SPEAR320_INT_ENB_MASK_REG		0x08
140 
141 static struct spear_shirq spear320_shirq_ras3 = {
142 	.offset		= 0,
143 	.nr_irqs	= 7,
144 	.disabled	= 1,
145 	.regs = {
146 		.enb_reg = SPEAR320_INT_ENB_MASK_REG,
147 		.reset_to_enb = 1,
148 		.status_reg = SPEAR320_INT_STS_MASK_REG,
149 		.clear_reg = SPEAR320_INT_CLR_MASK_REG,
150 		.reset_to_clear = 1,
151 	},
152 };
153 
154 static struct spear_shirq spear320_shirq_ras1 = {
155 	.offset		= 7,
156 	.nr_irqs	= 3,
157 	.regs = {
158 		.enb_reg = -1,
159 		.status_reg = SPEAR320_INT_STS_MASK_REG,
160 		.clear_reg = SPEAR320_INT_CLR_MASK_REG,
161 		.reset_to_clear = 1,
162 	},
163 };
164 
165 static struct spear_shirq spear320_shirq_ras2 = {
166 	.offset		= 10,
167 	.nr_irqs	= 1,
168 	.regs = {
169 		.enb_reg = -1,
170 		.status_reg = SPEAR320_INT_STS_MASK_REG,
171 		.clear_reg = SPEAR320_INT_CLR_MASK_REG,
172 		.reset_to_clear = 1,
173 	},
174 };
175 
176 static struct spear_shirq spear320_shirq_intrcomm_ras = {
177 	.offset		= 11,
178 	.nr_irqs	= 11,
179 	.regs = {
180 		.enb_reg = -1,
181 		.status_reg = SPEAR320_INT_STS_MASK_REG,
182 		.clear_reg = SPEAR320_INT_CLR_MASK_REG,
183 		.reset_to_clear = 1,
184 	},
185 };
186 
187 static struct spear_shirq *spear320_shirq_blocks[] = {
188 	&spear320_shirq_ras3,
189 	&spear320_shirq_ras1,
190 	&spear320_shirq_ras2,
191 	&spear320_shirq_intrcomm_ras,
192 };
193 
194 static void shirq_irq_mask_unmask(struct irq_data *d, bool mask)
195 {
196 	struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
197 	u32 val, offset = d->irq - shirq->virq_base;
198 	unsigned long flags;
199 
200 	if (shirq->regs.enb_reg == -1)
201 		return;
202 
203 	spin_lock_irqsave(&lock, flags);
204 	val = readl(shirq->base + shirq->regs.enb_reg);
205 
206 	if (mask ^ shirq->regs.reset_to_enb)
207 		val &= ~(0x1 << shirq->offset << offset);
208 	else
209 		val |= 0x1 << shirq->offset << offset;
210 
211 	writel(val, shirq->base + shirq->regs.enb_reg);
212 	spin_unlock_irqrestore(&lock, flags);
213 
214 }
215 
216 static void shirq_irq_mask(struct irq_data *d)
217 {
218 	shirq_irq_mask_unmask(d, 1);
219 }
220 
221 static void shirq_irq_unmask(struct irq_data *d)
222 {
223 	shirq_irq_mask_unmask(d, 0);
224 }
225 
226 static struct irq_chip shirq_chip = {
227 	.name		= "spear-shirq",
228 	.irq_ack	= shirq_irq_mask,
229 	.irq_mask	= shirq_irq_mask,
230 	.irq_unmask	= shirq_irq_unmask,
231 };
232 
233 static void shirq_handler(unsigned irq, struct irq_desc *desc)
234 {
235 	struct spear_shirq *shirq = irq_get_handler_data(irq);
236 	struct irq_data *idata = irq_desc_get_irq_data(desc);
237 	struct irq_chip *chip = irq_data_get_irq_chip(idata);
238 	u32 i, j, val, mask, tmp;
239 
240 	chip->irq_ack(idata);
241 
242 	mask = ((0x1 << shirq->nr_irqs) - 1) << shirq->offset;
243 	while ((val = readl(shirq->base + shirq->regs.status_reg) &
244 				mask)) {
245 
246 		val >>= shirq->offset;
247 		for (i = 0, j = 1; i < shirq->nr_irqs; i++, j <<= 1) {
248 
249 			if (!(j & val))
250 				continue;
251 
252 			generic_handle_irq(shirq->virq_base + i);
253 
254 			/* clear interrupt */
255 			if (shirq->regs.clear_reg == -1)
256 				continue;
257 
258 			tmp = readl(shirq->base + shirq->regs.clear_reg);
259 			if (shirq->regs.reset_to_clear)
260 				tmp &= ~(j << shirq->offset);
261 			else
262 				tmp |= (j << shirq->offset);
263 			writel(tmp, shirq->base + shirq->regs.clear_reg);
264 		}
265 	}
266 	chip->irq_unmask(idata);
267 }
268 
269 static void __init spear_shirq_register(struct spear_shirq *shirq,
270 					int parent_irq)
271 {
272 	int i;
273 
274 	if (shirq->disabled)
275 		return;
276 
277 	irq_set_chained_handler(parent_irq, shirq_handler);
278 	irq_set_handler_data(parent_irq, shirq);
279 
280 	for (i = 0; i < shirq->nr_irqs; i++) {
281 		irq_set_chip_and_handler(shirq->virq_base + i,
282 					 &shirq_chip, handle_simple_irq);
283 		set_irq_flags(shirq->virq_base + i, IRQF_VALID);
284 		irq_set_chip_data(shirq->virq_base + i, shirq);
285 	}
286 }
287 
288 static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
289 		struct device_node *np)
290 {
291 	int i, parent_irq, virq_base, hwirq = 0, nr_irqs = 0;
292 	struct irq_domain *shirq_domain;
293 	void __iomem *base;
294 
295 	base = of_iomap(np, 0);
296 	if (!base) {
297 		pr_err("%s: failed to map shirq registers\n", __func__);
298 		return -ENXIO;
299 	}
300 
301 	for (i = 0; i < block_nr; i++)
302 		nr_irqs += shirq_blocks[i]->nr_irqs;
303 
304 	virq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
305 	if (IS_ERR_VALUE(virq_base)) {
306 		pr_err("%s: irq desc alloc failed\n", __func__);
307 		goto err_unmap;
308 	}
309 
310 	shirq_domain = irq_domain_add_legacy(np, nr_irqs, virq_base, 0,
311 			&irq_domain_simple_ops, NULL);
312 	if (WARN_ON(!shirq_domain)) {
313 		pr_warn("%s: irq domain init failed\n", __func__);
314 		goto err_free_desc;
315 	}
316 
317 	for (i = 0; i < block_nr; i++) {
318 		shirq_blocks[i]->base = base;
319 		shirq_blocks[i]->virq_base = irq_find_mapping(shirq_domain,
320 				hwirq);
321 
322 		parent_irq = irq_of_parse_and_map(np, i);
323 		spear_shirq_register(shirq_blocks[i], parent_irq);
324 		hwirq += shirq_blocks[i]->nr_irqs;
325 	}
326 
327 	return 0;
328 
329 err_free_desc:
330 	irq_free_descs(virq_base, nr_irqs);
331 err_unmap:
332 	iounmap(base);
333 	return -ENXIO;
334 }
335 
336 static int __init spear300_shirq_of_init(struct device_node *np,
337 					 struct device_node *parent)
338 {
339 	return shirq_init(spear300_shirq_blocks,
340 			ARRAY_SIZE(spear300_shirq_blocks), np);
341 }
342 IRQCHIP_DECLARE(spear300_shirq, "st,spear300-shirq", spear300_shirq_of_init);
343 
344 static int __init spear310_shirq_of_init(struct device_node *np,
345 					 struct device_node *parent)
346 {
347 	return shirq_init(spear310_shirq_blocks,
348 			ARRAY_SIZE(spear310_shirq_blocks), np);
349 }
350 IRQCHIP_DECLARE(spear310_shirq, "st,spear310-shirq", spear310_shirq_of_init);
351 
352 static int __init spear320_shirq_of_init(struct device_node *np,
353 					 struct device_node *parent)
354 {
355 	return shirq_init(spear320_shirq_blocks,
356 			ARRAY_SIZE(spear320_shirq_blocks), np);
357 }
358 IRQCHIP_DECLARE(spear320_shirq, "st,spear320-shirq", spear320_shirq_of_init);
359