1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Actions Semi Owl SoCs SIRQ interrupt controller driver
4 *
5 * Copyright (C) 2014 Actions Semi Inc.
6 * David Liu <liuwei@actions-semi.com>
7 *
8 * Author: Parthiban Nallathambi <pn@denx.de>
9 * Author: Saravanan Sekar <sravanhome@gmail.com>
10 * Author: Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
11 */
12
13 #include <linux/bitfield.h>
14 #include <linux/interrupt.h>
15 #include <linux/irqchip.h>
16 #include <linux/of_address.h>
17 #include <linux/of_irq.h>
18
19 #include <dt-bindings/interrupt-controller/arm-gic.h>
20
21 #define NUM_SIRQ 3
22
23 #define INTC_EXTCTL_PENDING BIT(0)
24 #define INTC_EXTCTL_CLK_SEL BIT(4)
25 #define INTC_EXTCTL_EN BIT(5)
26 #define INTC_EXTCTL_TYPE_MASK GENMASK(7, 6)
27 #define INTC_EXTCTL_TYPE_HIGH 0
28 #define INTC_EXTCTL_TYPE_LOW BIT(6)
29 #define INTC_EXTCTL_TYPE_RISING BIT(7)
30 #define INTC_EXTCTL_TYPE_FALLING (BIT(6) | BIT(7))
31
32 /* S500 & S700 SIRQ control register masks */
33 #define INTC_EXTCTL_SIRQ0_MASK GENMASK(23, 16)
34 #define INTC_EXTCTL_SIRQ1_MASK GENMASK(15, 8)
35 #define INTC_EXTCTL_SIRQ2_MASK GENMASK(7, 0)
36
37 /* S900 SIRQ control register offsets, relative to controller base address */
38 #define INTC_EXTCTL0 0x0000
39 #define INTC_EXTCTL1 0x0328
40 #define INTC_EXTCTL2 0x032c
41
42 struct owl_sirq_params {
43 /* INTC_EXTCTL reg shared for all three SIRQ lines */
44 bool reg_shared;
45 /* INTC_EXTCTL reg offsets relative to controller base address */
46 u16 reg_offset[NUM_SIRQ];
47 };
48
49 struct owl_sirq_chip_data {
50 const struct owl_sirq_params *params;
51 void __iomem *base;
52 raw_spinlock_t lock;
53 u32 ext_irqs[NUM_SIRQ];
54 };
55
56 /* S500 & S700 SoCs */
57 static const struct owl_sirq_params owl_sirq_s500_params = {
58 .reg_shared = true,
59 .reg_offset = { 0, 0, 0 },
60 };
61
62 /* S900 SoC */
63 static const struct owl_sirq_params owl_sirq_s900_params = {
64 .reg_shared = false,
65 .reg_offset = { INTC_EXTCTL0, INTC_EXTCTL1, INTC_EXTCTL2 },
66 };
67
owl_field_get(u32 val,u32 index)68 static u32 owl_field_get(u32 val, u32 index)
69 {
70 switch (index) {
71 case 0:
72 return FIELD_GET(INTC_EXTCTL_SIRQ0_MASK, val);
73 case 1:
74 return FIELD_GET(INTC_EXTCTL_SIRQ1_MASK, val);
75 case 2:
76 default:
77 return FIELD_GET(INTC_EXTCTL_SIRQ2_MASK, val);
78 }
79 }
80
owl_field_prep(u32 val,u32 index)81 static u32 owl_field_prep(u32 val, u32 index)
82 {
83 switch (index) {
84 case 0:
85 return FIELD_PREP(INTC_EXTCTL_SIRQ0_MASK, val);
86 case 1:
87 return FIELD_PREP(INTC_EXTCTL_SIRQ1_MASK, val);
88 case 2:
89 default:
90 return FIELD_PREP(INTC_EXTCTL_SIRQ2_MASK, val);
91 }
92 }
93
owl_sirq_read_extctl(struct owl_sirq_chip_data * data,u32 index)94 static u32 owl_sirq_read_extctl(struct owl_sirq_chip_data *data, u32 index)
95 {
96 u32 val;
97
98 val = readl_relaxed(data->base + data->params->reg_offset[index]);
99 if (data->params->reg_shared)
100 val = owl_field_get(val, index);
101
102 return val;
103 }
104
owl_sirq_write_extctl(struct owl_sirq_chip_data * data,u32 extctl,u32 index)105 static void owl_sirq_write_extctl(struct owl_sirq_chip_data *data,
106 u32 extctl, u32 index)
107 {
108 u32 val;
109
110 if (data->params->reg_shared) {
111 val = readl_relaxed(data->base + data->params->reg_offset[index]);
112 val &= ~owl_field_prep(0xff, index);
113 extctl = owl_field_prep(extctl, index) | val;
114 }
115
116 writel_relaxed(extctl, data->base + data->params->reg_offset[index]);
117 }
118
owl_sirq_clear_set_extctl(struct owl_sirq_chip_data * d,u32 clear,u32 set,u32 index)119 static void owl_sirq_clear_set_extctl(struct owl_sirq_chip_data *d,
120 u32 clear, u32 set, u32 index)
121 {
122 unsigned long flags;
123 u32 val;
124
125 raw_spin_lock_irqsave(&d->lock, flags);
126 val = owl_sirq_read_extctl(d, index);
127 val &= ~clear;
128 val |= set;
129 owl_sirq_write_extctl(d, val, index);
130 raw_spin_unlock_irqrestore(&d->lock, flags);
131 }
132
owl_sirq_eoi(struct irq_data * data)133 static void owl_sirq_eoi(struct irq_data *data)
134 {
135 struct owl_sirq_chip_data *chip_data = irq_data_get_irq_chip_data(data);
136
137 /*
138 * Software must clear external interrupt pending, when interrupt type
139 * is edge triggered, so we need per SIRQ based clearing.
140 */
141 if (!irqd_is_level_type(data))
142 owl_sirq_clear_set_extctl(chip_data, 0, INTC_EXTCTL_PENDING,
143 data->hwirq);
144
145 irq_chip_eoi_parent(data);
146 }
147
owl_sirq_mask(struct irq_data * data)148 static void owl_sirq_mask(struct irq_data *data)
149 {
150 struct owl_sirq_chip_data *chip_data = irq_data_get_irq_chip_data(data);
151
152 owl_sirq_clear_set_extctl(chip_data, INTC_EXTCTL_EN, 0, data->hwirq);
153 irq_chip_mask_parent(data);
154 }
155
owl_sirq_unmask(struct irq_data * data)156 static void owl_sirq_unmask(struct irq_data *data)
157 {
158 struct owl_sirq_chip_data *chip_data = irq_data_get_irq_chip_data(data);
159
160 owl_sirq_clear_set_extctl(chip_data, 0, INTC_EXTCTL_EN, data->hwirq);
161 irq_chip_unmask_parent(data);
162 }
163
164 /*
165 * GIC does not handle falling edge or active low, hence SIRQ shall be
166 * programmed to convert falling edge to rising edge signal and active
167 * low to active high signal.
168 */
owl_sirq_set_type(struct irq_data * data,unsigned int type)169 static int owl_sirq_set_type(struct irq_data *data, unsigned int type)
170 {
171 struct owl_sirq_chip_data *chip_data = irq_data_get_irq_chip_data(data);
172 u32 sirq_type;
173
174 switch (type) {
175 case IRQ_TYPE_LEVEL_LOW:
176 sirq_type = INTC_EXTCTL_TYPE_LOW;
177 type = IRQ_TYPE_LEVEL_HIGH;
178 break;
179 case IRQ_TYPE_LEVEL_HIGH:
180 sirq_type = INTC_EXTCTL_TYPE_HIGH;
181 break;
182 case IRQ_TYPE_EDGE_FALLING:
183 sirq_type = INTC_EXTCTL_TYPE_FALLING;
184 type = IRQ_TYPE_EDGE_RISING;
185 break;
186 case IRQ_TYPE_EDGE_RISING:
187 sirq_type = INTC_EXTCTL_TYPE_RISING;
188 break;
189 default:
190 return -EINVAL;
191 }
192
193 owl_sirq_clear_set_extctl(chip_data, INTC_EXTCTL_TYPE_MASK, sirq_type,
194 data->hwirq);
195
196 return irq_chip_set_type_parent(data, type);
197 }
198
199 static struct irq_chip owl_sirq_chip = {
200 .name = "owl-sirq",
201 .irq_mask = owl_sirq_mask,
202 .irq_unmask = owl_sirq_unmask,
203 .irq_eoi = owl_sirq_eoi,
204 .irq_set_type = owl_sirq_set_type,
205 .irq_retrigger = irq_chip_retrigger_hierarchy,
206 #ifdef CONFIG_SMP
207 .irq_set_affinity = irq_chip_set_affinity_parent,
208 #endif
209 };
210
owl_sirq_domain_translate(struct irq_domain * d,struct irq_fwspec * fwspec,unsigned long * hwirq,unsigned int * type)211 static int owl_sirq_domain_translate(struct irq_domain *d,
212 struct irq_fwspec *fwspec,
213 unsigned long *hwirq,
214 unsigned int *type)
215 {
216 if (!is_of_node(fwspec->fwnode))
217 return -EINVAL;
218
219 if (fwspec->param_count != 2 || fwspec->param[0] >= NUM_SIRQ)
220 return -EINVAL;
221
222 *hwirq = fwspec->param[0];
223 *type = fwspec->param[1];
224
225 return 0;
226 }
227
owl_sirq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * data)228 static int owl_sirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
229 unsigned int nr_irqs, void *data)
230 {
231 struct owl_sirq_chip_data *chip_data = domain->host_data;
232 struct irq_fwspec *fwspec = data;
233 struct irq_fwspec parent_fwspec;
234 irq_hw_number_t hwirq;
235 unsigned int type;
236 int ret;
237
238 if (WARN_ON(nr_irqs != 1))
239 return -EINVAL;
240
241 ret = owl_sirq_domain_translate(domain, fwspec, &hwirq, &type);
242 if (ret)
243 return ret;
244
245 switch (type) {
246 case IRQ_TYPE_EDGE_RISING:
247 case IRQ_TYPE_LEVEL_HIGH:
248 break;
249 case IRQ_TYPE_EDGE_FALLING:
250 type = IRQ_TYPE_EDGE_RISING;
251 break;
252 case IRQ_TYPE_LEVEL_LOW:
253 type = IRQ_TYPE_LEVEL_HIGH;
254 break;
255 default:
256 return -EINVAL;
257 }
258
259 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &owl_sirq_chip,
260 chip_data);
261
262 parent_fwspec.fwnode = domain->parent->fwnode;
263 parent_fwspec.param_count = 3;
264 parent_fwspec.param[0] = GIC_SPI;
265 parent_fwspec.param[1] = chip_data->ext_irqs[hwirq];
266 parent_fwspec.param[2] = type;
267
268 return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
269 }
270
271 static const struct irq_domain_ops owl_sirq_domain_ops = {
272 .translate = owl_sirq_domain_translate,
273 .alloc = owl_sirq_domain_alloc,
274 .free = irq_domain_free_irqs_common,
275 };
276
owl_sirq_init(const struct owl_sirq_params * params,struct device_node * node,struct device_node * parent)277 static int __init owl_sirq_init(const struct owl_sirq_params *params,
278 struct device_node *node,
279 struct device_node *parent)
280 {
281 struct irq_domain *domain, *parent_domain;
282 struct owl_sirq_chip_data *chip_data;
283 int ret, i;
284
285 parent_domain = irq_find_host(parent);
286 if (!parent_domain) {
287 pr_err("%pOF: failed to find sirq parent domain\n", node);
288 return -ENXIO;
289 }
290
291 chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
292 if (!chip_data)
293 return -ENOMEM;
294
295 raw_spin_lock_init(&chip_data->lock);
296
297 chip_data->params = params;
298
299 chip_data->base = of_iomap(node, 0);
300 if (!chip_data->base) {
301 pr_err("%pOF: failed to map sirq registers\n", node);
302 ret = -ENXIO;
303 goto out_free;
304 }
305
306 for (i = 0; i < NUM_SIRQ; i++) {
307 struct of_phandle_args irq;
308
309 ret = of_irq_parse_one(node, i, &irq);
310 if (ret) {
311 pr_err("%pOF: failed to parse interrupt %d\n", node, i);
312 goto out_unmap;
313 }
314
315 if (WARN_ON(irq.args_count != 3)) {
316 ret = -EINVAL;
317 goto out_unmap;
318 }
319
320 chip_data->ext_irqs[i] = irq.args[1];
321
322 /* Set 24MHz external interrupt clock freq */
323 owl_sirq_clear_set_extctl(chip_data, 0, INTC_EXTCTL_CLK_SEL, i);
324 }
325
326 domain = irq_domain_add_hierarchy(parent_domain, 0, NUM_SIRQ, node,
327 &owl_sirq_domain_ops, chip_data);
328 if (!domain) {
329 pr_err("%pOF: failed to add domain\n", node);
330 ret = -ENOMEM;
331 goto out_unmap;
332 }
333
334 return 0;
335
336 out_unmap:
337 iounmap(chip_data->base);
338 out_free:
339 kfree(chip_data);
340
341 return ret;
342 }
343
owl_sirq_s500_of_init(struct device_node * node,struct device_node * parent)344 static int __init owl_sirq_s500_of_init(struct device_node *node,
345 struct device_node *parent)
346 {
347 return owl_sirq_init(&owl_sirq_s500_params, node, parent);
348 }
349
350 IRQCHIP_DECLARE(owl_sirq_s500, "actions,s500-sirq", owl_sirq_s500_of_init);
351 IRQCHIP_DECLARE(owl_sirq_s700, "actions,s700-sirq", owl_sirq_s500_of_init);
352
owl_sirq_s900_of_init(struct device_node * node,struct device_node * parent)353 static int __init owl_sirq_s900_of_init(struct device_node *node,
354 struct device_node *parent)
355 {
356 return owl_sirq_init(&owl_sirq_s900_params, node, parent);
357 }
358
359 IRQCHIP_DECLARE(owl_sirq_s900, "actions,s900-sirq", owl_sirq_s900_of_init);
360