xref: /linux/drivers/irqchip/irq-sni-exiu.c (revision 1634b7adcc5bef645b3666fdd564e5952a9e24e0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Driver for Socionext External Interrupt Unit (EXIU)
4  *
5  * Copyright (c) 2017-2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
6  *
7  * Based on irq-tegra.c:
8  *   Copyright (C) 2011 Google, Inc.
9  *   Copyright (C) 2010,2013, NVIDIA Corporation
10  */
11 
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/irq.h>
15 #include <linux/irqchip.h>
16 #include <linux/irqdomain.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/of_irq.h>
20 #include <linux/platform_device.h>
21 
22 #include <dt-bindings/interrupt-controller/arm-gic.h>
23 
24 #define NUM_IRQS	32
25 
26 #define EIMASK		0x00
27 #define EISRCSEL	0x04
28 #define EIREQSTA	0x08
29 #define EIRAWREQSTA	0x0C
30 #define EIREQCLR	0x10
31 #define EILVL		0x14
32 #define EIEDG		0x18
33 #define EISIR		0x1C
34 
35 struct exiu_irq_data {
36 	void __iomem	*base;
37 	u32		spi_base;
38 };
39 
40 static void exiu_irq_ack(struct irq_data *d)
41 {
42 	struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
43 
44 	writel(BIT(d->hwirq), data->base + EIREQCLR);
45 }
46 
47 static void exiu_irq_eoi(struct irq_data *d)
48 {
49 	struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
50 
51 	/*
52 	 * Level triggered interrupts are latched and must be cleared during
53 	 * EOI or the interrupt will be jammed on. Of course if a level
54 	 * triggered interrupt is still asserted then the write will not clear
55 	 * the interrupt.
56 	 */
57 	if (irqd_is_level_type(d))
58 		writel(BIT(d->hwirq), data->base + EIREQCLR);
59 
60 	irq_chip_eoi_parent(d);
61 }
62 
63 static void exiu_irq_mask(struct irq_data *d)
64 {
65 	struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
66 	u32 val;
67 
68 	val = readl_relaxed(data->base + EIMASK) | BIT(d->hwirq);
69 	writel_relaxed(val, data->base + EIMASK);
70 	irq_chip_mask_parent(d);
71 }
72 
73 static void exiu_irq_unmask(struct irq_data *d)
74 {
75 	struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
76 	u32 val;
77 
78 	val = readl_relaxed(data->base + EIMASK) & ~BIT(d->hwirq);
79 	writel_relaxed(val, data->base + EIMASK);
80 	irq_chip_unmask_parent(d);
81 }
82 
83 static void exiu_irq_enable(struct irq_data *d)
84 {
85 	struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
86 	u32 val;
87 
88 	/* clear interrupts that were latched while disabled */
89 	writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR);
90 
91 	val = readl_relaxed(data->base + EIMASK) & ~BIT(d->hwirq);
92 	writel_relaxed(val, data->base + EIMASK);
93 	irq_chip_enable_parent(d);
94 }
95 
96 static int exiu_irq_set_type(struct irq_data *d, unsigned int type)
97 {
98 	struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
99 	u32 val;
100 
101 	val = readl_relaxed(data->base + EILVL);
102 	if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH)
103 		val |= BIT(d->hwirq);
104 	else
105 		val &= ~BIT(d->hwirq);
106 	writel_relaxed(val, data->base + EILVL);
107 
108 	val = readl_relaxed(data->base + EIEDG);
109 	if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH) {
110 		val &= ~BIT(d->hwirq);
111 		irq_set_handler_locked(d, handle_fasteoi_irq);
112 	} else {
113 		val |= BIT(d->hwirq);
114 		irq_set_handler_locked(d, handle_fasteoi_ack_irq);
115 	}
116 	writel_relaxed(val, data->base + EIEDG);
117 
118 	writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR);
119 
120 	return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
121 }
122 
123 static struct irq_chip exiu_irq_chip = {
124 	.name			= "EXIU",
125 	.irq_ack		= exiu_irq_ack,
126 	.irq_eoi		= exiu_irq_eoi,
127 	.irq_enable		= exiu_irq_enable,
128 	.irq_mask		= exiu_irq_mask,
129 	.irq_unmask		= exiu_irq_unmask,
130 	.irq_set_type		= exiu_irq_set_type,
131 	.irq_set_affinity	= irq_chip_set_affinity_parent,
132 	.flags			= IRQCHIP_SET_TYPE_MASKED |
133 				  IRQCHIP_SKIP_SET_WAKE |
134 				  IRQCHIP_EOI_THREADED |
135 				  IRQCHIP_MASK_ON_SUSPEND,
136 };
137 
138 static int exiu_domain_translate(struct irq_domain *domain,
139 				 struct irq_fwspec *fwspec,
140 				 unsigned long *hwirq,
141 				 unsigned int *type)
142 {
143 	struct exiu_irq_data *info = domain->host_data;
144 
145 	if (is_of_node(fwspec->fwnode)) {
146 		if (fwspec->param_count != 3)
147 			return -EINVAL;
148 
149 		if (fwspec->param[0] != GIC_SPI)
150 			return -EINVAL; /* No PPI should point to this domain */
151 
152 		*hwirq = fwspec->param[1] - info->spi_base;
153 		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
154 	} else {
155 		if (fwspec->param_count != 2)
156 			return -EINVAL;
157 		*hwirq = fwspec->param[0];
158 		*type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
159 	}
160 	return 0;
161 }
162 
163 static int exiu_domain_alloc(struct irq_domain *dom, unsigned int virq,
164 			     unsigned int nr_irqs, void *data)
165 {
166 	struct irq_fwspec *fwspec = data;
167 	struct irq_fwspec parent_fwspec;
168 	struct exiu_irq_data *info = dom->host_data;
169 	irq_hw_number_t hwirq;
170 
171 	parent_fwspec = *fwspec;
172 	if (is_of_node(dom->parent->fwnode)) {
173 		if (fwspec->param_count != 3)
174 			return -EINVAL;	/* Not GIC compliant */
175 		if (fwspec->param[0] != GIC_SPI)
176 			return -EINVAL;	/* No PPI should point to this domain */
177 
178 		hwirq = fwspec->param[1] - info->spi_base;
179 	} else {
180 		hwirq = fwspec->param[0];
181 		parent_fwspec.param[0] = hwirq + info->spi_base + 32;
182 	}
183 	WARN_ON(nr_irqs != 1);
184 	irq_domain_set_hwirq_and_chip(dom, virq, hwirq, &exiu_irq_chip, info);
185 
186 	parent_fwspec.fwnode = dom->parent->fwnode;
187 	return irq_domain_alloc_irqs_parent(dom, virq, nr_irqs, &parent_fwspec);
188 }
189 
190 static const struct irq_domain_ops exiu_domain_ops = {
191 	.translate	= exiu_domain_translate,
192 	.alloc		= exiu_domain_alloc,
193 	.free		= irq_domain_free_irqs_common,
194 };
195 
196 static struct exiu_irq_data *exiu_init(const struct fwnode_handle *fwnode,
197 				       struct resource *res)
198 {
199 	struct exiu_irq_data *data;
200 	int err;
201 
202 	data = kzalloc(sizeof(*data), GFP_KERNEL);
203 	if (!data)
204 		return ERR_PTR(-ENOMEM);
205 
206 	if (fwnode_property_read_u32_array(fwnode, "socionext,spi-base",
207 					   &data->spi_base, 1)) {
208 		err = -ENODEV;
209 		goto out_free;
210 	}
211 
212 	data->base = ioremap(res->start, resource_size(res));
213 	if (!data->base) {
214 		err = -ENODEV;
215 		goto out_free;
216 	}
217 
218 	/* clear and mask all interrupts */
219 	writel_relaxed(0xFFFFFFFF, data->base + EIREQCLR);
220 	writel_relaxed(0xFFFFFFFF, data->base + EIMASK);
221 
222 	return data;
223 
224 out_free:
225 	kfree(data);
226 	return ERR_PTR(err);
227 }
228 
229 static int __init exiu_dt_init(struct device_node *node,
230 			       struct device_node *parent)
231 {
232 	struct irq_domain *parent_domain, *domain;
233 	struct exiu_irq_data *data;
234 	struct resource res;
235 
236 	if (!parent) {
237 		pr_err("%pOF: no parent, giving up\n", node);
238 		return -ENODEV;
239 	}
240 
241 	parent_domain = irq_find_host(parent);
242 	if (!parent_domain) {
243 		pr_err("%pOF: unable to obtain parent domain\n", node);
244 		return -ENXIO;
245 	}
246 
247 	if (of_address_to_resource(node, 0, &res)) {
248 		pr_err("%pOF: failed to parse memory resource\n", node);
249 		return -ENXIO;
250 	}
251 
252 	data = exiu_init(of_node_to_fwnode(node), &res);
253 	if (IS_ERR(data))
254 		return PTR_ERR(data);
255 
256 	domain = irq_domain_add_hierarchy(parent_domain, 0, NUM_IRQS, node,
257 					  &exiu_domain_ops, data);
258 	if (!domain) {
259 		pr_err("%pOF: failed to allocate domain\n", node);
260 		goto out_unmap;
261 	}
262 
263 	pr_info("%pOF: %d interrupts forwarded to %pOF\n", node, NUM_IRQS,
264 		parent);
265 
266 	return 0;
267 
268 out_unmap:
269 	iounmap(data->base);
270 	kfree(data);
271 	return -ENOMEM;
272 }
273 IRQCHIP_DECLARE(exiu, "socionext,synquacer-exiu", exiu_dt_init);
274 
275 #ifdef CONFIG_ACPI
276 static int exiu_acpi_probe(struct platform_device *pdev)
277 {
278 	struct irq_domain *domain;
279 	struct exiu_irq_data *data;
280 	struct resource *res;
281 
282 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
283 	if (!res) {
284 		dev_err(&pdev->dev, "failed to parse memory resource\n");
285 		return -ENXIO;
286 	}
287 
288 	data = exiu_init(dev_fwnode(&pdev->dev), res);
289 	if (IS_ERR(data))
290 		return PTR_ERR(data);
291 
292 	domain = acpi_irq_create_hierarchy(0, NUM_IRQS, dev_fwnode(&pdev->dev),
293 					   &exiu_domain_ops, data);
294 	if (!domain) {
295 		dev_err(&pdev->dev, "failed to create IRQ domain\n");
296 		goto out_unmap;
297 	}
298 
299 	dev_info(&pdev->dev, "%d interrupts forwarded\n", NUM_IRQS);
300 
301 	return 0;
302 
303 out_unmap:
304 	iounmap(data->base);
305 	kfree(data);
306 	return -ENOMEM;
307 }
308 
309 static const struct acpi_device_id exiu_acpi_ids[] = {
310 	{ "SCX0008" },
311 	{ /* sentinel */ }
312 };
313 MODULE_DEVICE_TABLE(acpi, exiu_acpi_ids);
314 
315 static struct platform_driver exiu_driver = {
316 	.driver = {
317 		.name = "exiu",
318 		.acpi_match_table = exiu_acpi_ids,
319 	},
320 	.probe = exiu_acpi_probe,
321 };
322 builtin_platform_driver(exiu_driver);
323 #endif
324