xref: /linux/drivers/irqchip/irq-mvebu-sei.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #define pr_fmt(fmt) "mvebu-sei: " fmt
4 
5 #include <linux/interrupt.h>
6 #include <linux/irq.h>
7 #include <linux/irqchip.h>
8 #include <linux/irqchip/chained_irq.h>
9 #include <linux/irqdomain.h>
10 #include <linux/kernel.h>
11 #include <linux/msi.h>
12 #include <linux/platform_device.h>
13 #include <linux/of_address.h>
14 #include <linux/of_irq.h>
15 #include <linux/of_platform.h>
16 
17 #include "irq-msi-lib.h"
18 
19 /* Cause register */
20 #define GICP_SECR(idx)		(0x0  + ((idx) * 0x4))
21 /* Mask register */
22 #define GICP_SEMR(idx)		(0x20 + ((idx) * 0x4))
23 #define GICP_SET_SEI_OFFSET	0x30
24 
25 #define SEI_IRQ_COUNT_PER_REG	32
26 #define SEI_IRQ_REG_COUNT	2
27 #define SEI_IRQ_COUNT		(SEI_IRQ_COUNT_PER_REG * SEI_IRQ_REG_COUNT)
28 #define SEI_IRQ_REG_IDX(irq_id)	((irq_id) / SEI_IRQ_COUNT_PER_REG)
29 #define SEI_IRQ_REG_BIT(irq_id)	((irq_id) % SEI_IRQ_COUNT_PER_REG)
30 
31 struct mvebu_sei_interrupt_range {
32 	u32 first;
33 	u32 size;
34 };
35 
36 struct mvebu_sei_caps {
37 	struct mvebu_sei_interrupt_range ap_range;
38 	struct mvebu_sei_interrupt_range cp_range;
39 };
40 
41 struct mvebu_sei {
42 	struct device *dev;
43 	void __iomem *base;
44 	struct resource *res;
45 	struct irq_domain *sei_domain;
46 	struct irq_domain *ap_domain;
47 	struct irq_domain *cp_domain;
48 	const struct mvebu_sei_caps *caps;
49 
50 	/* Lock on MSI allocations/releases */
51 	struct mutex cp_msi_lock;
52 	DECLARE_BITMAP(cp_msi_bitmap, SEI_IRQ_COUNT);
53 
54 	/* Lock on IRQ masking register */
55 	raw_spinlock_t mask_lock;
56 };
57 
mvebu_sei_ack_irq(struct irq_data * d)58 static void mvebu_sei_ack_irq(struct irq_data *d)
59 {
60 	struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
61 	u32 reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
62 
63 	writel_relaxed(BIT(SEI_IRQ_REG_BIT(d->hwirq)),
64 		       sei->base + GICP_SECR(reg_idx));
65 }
66 
mvebu_sei_mask_irq(struct irq_data * d)67 static void mvebu_sei_mask_irq(struct irq_data *d)
68 {
69 	struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
70 	u32 reg, reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
71 	unsigned long flags;
72 
73 	/* 1 disables the interrupt */
74 	raw_spin_lock_irqsave(&sei->mask_lock, flags);
75 	reg = readl_relaxed(sei->base + GICP_SEMR(reg_idx));
76 	reg |= BIT(SEI_IRQ_REG_BIT(d->hwirq));
77 	writel_relaxed(reg, sei->base + GICP_SEMR(reg_idx));
78 	raw_spin_unlock_irqrestore(&sei->mask_lock, flags);
79 }
80 
mvebu_sei_unmask_irq(struct irq_data * d)81 static void mvebu_sei_unmask_irq(struct irq_data *d)
82 {
83 	struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
84 	u32 reg, reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
85 	unsigned long flags;
86 
87 	/* 0 enables the interrupt */
88 	raw_spin_lock_irqsave(&sei->mask_lock, flags);
89 	reg = readl_relaxed(sei->base + GICP_SEMR(reg_idx));
90 	reg &= ~BIT(SEI_IRQ_REG_BIT(d->hwirq));
91 	writel_relaxed(reg, sei->base + GICP_SEMR(reg_idx));
92 	raw_spin_unlock_irqrestore(&sei->mask_lock, flags);
93 }
94 
mvebu_sei_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)95 static int mvebu_sei_set_affinity(struct irq_data *d,
96 				  const struct cpumask *mask_val,
97 				  bool force)
98 {
99 	return -EINVAL;
100 }
101 
mvebu_sei_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool state)102 static int mvebu_sei_set_irqchip_state(struct irq_data *d,
103 				       enum irqchip_irq_state which,
104 				       bool state)
105 {
106 	/* We can only clear the pending state by acking the interrupt */
107 	if (which != IRQCHIP_STATE_PENDING || state)
108 		return -EINVAL;
109 
110 	mvebu_sei_ack_irq(d);
111 	return 0;
112 }
113 
114 static struct irq_chip mvebu_sei_irq_chip = {
115 	.name			= "SEI",
116 	.irq_ack		= mvebu_sei_ack_irq,
117 	.irq_mask		= mvebu_sei_mask_irq,
118 	.irq_unmask		= mvebu_sei_unmask_irq,
119 	.irq_set_affinity       = mvebu_sei_set_affinity,
120 	.irq_set_irqchip_state	= mvebu_sei_set_irqchip_state,
121 };
122 
mvebu_sei_ap_set_type(struct irq_data * data,unsigned int type)123 static int mvebu_sei_ap_set_type(struct irq_data *data, unsigned int type)
124 {
125 	if ((type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH)
126 		return -EINVAL;
127 
128 	return 0;
129 }
130 
131 static struct irq_chip mvebu_sei_ap_irq_chip = {
132 	.name			= "AP SEI",
133 	.irq_ack		= irq_chip_ack_parent,
134 	.irq_mask		= irq_chip_mask_parent,
135 	.irq_unmask		= irq_chip_unmask_parent,
136 	.irq_set_affinity       = irq_chip_set_affinity_parent,
137 	.irq_set_type		= mvebu_sei_ap_set_type,
138 };
139 
mvebu_sei_cp_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)140 static void mvebu_sei_cp_compose_msi_msg(struct irq_data *data,
141 					 struct msi_msg *msg)
142 {
143 	struct mvebu_sei *sei = data->chip_data;
144 	phys_addr_t set = sei->res->start + GICP_SET_SEI_OFFSET;
145 
146 	msg->data = data->hwirq + sei->caps->cp_range.first;
147 	msg->address_lo = lower_32_bits(set);
148 	msg->address_hi = upper_32_bits(set);
149 }
150 
mvebu_sei_cp_set_type(struct irq_data * data,unsigned int type)151 static int mvebu_sei_cp_set_type(struct irq_data *data, unsigned int type)
152 {
153 	if ((type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_EDGE_RISING)
154 		return -EINVAL;
155 
156 	return 0;
157 }
158 
159 static struct irq_chip mvebu_sei_cp_irq_chip = {
160 	.name			= "CP SEI",
161 	.irq_ack		= irq_chip_ack_parent,
162 	.irq_mask		= irq_chip_mask_parent,
163 	.irq_unmask		= irq_chip_unmask_parent,
164 	.irq_set_affinity       = irq_chip_set_affinity_parent,
165 	.irq_set_type		= mvebu_sei_cp_set_type,
166 	.irq_compose_msi_msg	= mvebu_sei_cp_compose_msi_msg,
167 };
168 
mvebu_sei_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)169 static int mvebu_sei_domain_alloc(struct irq_domain *domain, unsigned int virq,
170 				  unsigned int nr_irqs, void *arg)
171 {
172 	struct mvebu_sei *sei = domain->host_data;
173 	struct irq_fwspec *fwspec = arg;
174 
175 	/* Not much to do, just setup the irqdata */
176 	irq_domain_set_hwirq_and_chip(domain, virq, fwspec->param[0],
177 				      &mvebu_sei_irq_chip, sei);
178 
179 	return 0;
180 }
181 
mvebu_sei_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)182 static void mvebu_sei_domain_free(struct irq_domain *domain, unsigned int virq,
183 				  unsigned int nr_irqs)
184 {
185 	int i;
186 
187 	for (i = 0; i < nr_irqs; i++) {
188 		struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
189 		irq_set_handler(virq + i, NULL);
190 		irq_domain_reset_irq_data(d);
191 	}
192 }
193 
194 static const struct irq_domain_ops mvebu_sei_domain_ops = {
195 	.select	= msi_lib_irq_domain_select,
196 	.alloc	= mvebu_sei_domain_alloc,
197 	.free	= mvebu_sei_domain_free,
198 };
199 
mvebu_sei_ap_translate(struct irq_domain * domain,struct irq_fwspec * fwspec,unsigned long * hwirq,unsigned int * type)200 static int mvebu_sei_ap_translate(struct irq_domain *domain,
201 				  struct irq_fwspec *fwspec,
202 				  unsigned long *hwirq,
203 				  unsigned int *type)
204 {
205 	*hwirq = fwspec->param[0];
206 	*type  = IRQ_TYPE_LEVEL_HIGH;
207 
208 	return 0;
209 }
210 
mvebu_sei_ap_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)211 static int mvebu_sei_ap_alloc(struct irq_domain *domain, unsigned int virq,
212 			      unsigned int nr_irqs, void *arg)
213 {
214 	struct mvebu_sei *sei = domain->host_data;
215 	struct irq_fwspec fwspec;
216 	unsigned long hwirq;
217 	unsigned int type;
218 	int err;
219 
220 	mvebu_sei_ap_translate(domain, arg, &hwirq, &type);
221 
222 	fwspec.fwnode = domain->parent->fwnode;
223 	fwspec.param_count = 1;
224 	fwspec.param[0] = hwirq + sei->caps->ap_range.first;
225 
226 	err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
227 	if (err)
228 		return err;
229 
230 	irq_domain_set_info(domain, virq, hwirq,
231 			    &mvebu_sei_ap_irq_chip, sei,
232 			    handle_level_irq, NULL, NULL);
233 	irq_set_probe(virq);
234 
235 	return 0;
236 }
237 
238 static const struct irq_domain_ops mvebu_sei_ap_domain_ops = {
239 	.translate	= mvebu_sei_ap_translate,
240 	.alloc		= mvebu_sei_ap_alloc,
241 	.free		= irq_domain_free_irqs_parent,
242 };
243 
mvebu_sei_cp_release_irq(struct mvebu_sei * sei,unsigned long hwirq)244 static void mvebu_sei_cp_release_irq(struct mvebu_sei *sei, unsigned long hwirq)
245 {
246 	mutex_lock(&sei->cp_msi_lock);
247 	clear_bit(hwirq, sei->cp_msi_bitmap);
248 	mutex_unlock(&sei->cp_msi_lock);
249 }
250 
mvebu_sei_cp_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)251 static int mvebu_sei_cp_domain_alloc(struct irq_domain *domain,
252 				     unsigned int virq, unsigned int nr_irqs,
253 				     void *args)
254 {
255 	struct mvebu_sei *sei = domain->host_data;
256 	struct irq_fwspec fwspec;
257 	unsigned long hwirq;
258 	int ret;
259 
260 	/* The software only supports single allocations for now */
261 	if (nr_irqs != 1)
262 		return -ENOTSUPP;
263 
264 	mutex_lock(&sei->cp_msi_lock);
265 	hwirq = find_first_zero_bit(sei->cp_msi_bitmap,
266 				    sei->caps->cp_range.size);
267 	if (hwirq < sei->caps->cp_range.size)
268 		set_bit(hwirq, sei->cp_msi_bitmap);
269 	mutex_unlock(&sei->cp_msi_lock);
270 
271 	if (hwirq == sei->caps->cp_range.size)
272 		return -ENOSPC;
273 
274 	fwspec.fwnode = domain->parent->fwnode;
275 	fwspec.param_count = 1;
276 	fwspec.param[0] = hwirq + sei->caps->cp_range.first;
277 
278 	ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
279 	if (ret)
280 		goto free_irq;
281 
282 	irq_domain_set_info(domain, virq, hwirq,
283 			    &mvebu_sei_cp_irq_chip, sei,
284 			    handle_edge_irq, NULL, NULL);
285 
286 	return 0;
287 
288 free_irq:
289 	mvebu_sei_cp_release_irq(sei, hwirq);
290 	return ret;
291 }
292 
mvebu_sei_cp_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)293 static void mvebu_sei_cp_domain_free(struct irq_domain *domain,
294 				     unsigned int virq, unsigned int nr_irqs)
295 {
296 	struct mvebu_sei *sei = domain->host_data;
297 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
298 
299 	if (nr_irqs != 1 || d->hwirq >= sei->caps->cp_range.size) {
300 		dev_err(sei->dev, "Invalid hwirq %lu\n", d->hwirq);
301 		return;
302 	}
303 
304 	mvebu_sei_cp_release_irq(sei, d->hwirq);
305 	irq_domain_free_irqs_parent(domain, virq, 1);
306 }
307 
308 static const struct irq_domain_ops mvebu_sei_cp_domain_ops = {
309 	.alloc	= mvebu_sei_cp_domain_alloc,
310 	.free	= mvebu_sei_cp_domain_free,
311 };
312 
mvebu_sei_handle_cascade_irq(struct irq_desc * desc)313 static void mvebu_sei_handle_cascade_irq(struct irq_desc *desc)
314 {
315 	struct mvebu_sei *sei = irq_desc_get_handler_data(desc);
316 	struct irq_chip *chip = irq_desc_get_chip(desc);
317 	u32 idx;
318 
319 	chained_irq_enter(chip, desc);
320 
321 	for (idx = 0; idx < SEI_IRQ_REG_COUNT; idx++) {
322 		unsigned long irqmap;
323 		int bit;
324 
325 		irqmap = readl_relaxed(sei->base + GICP_SECR(idx));
326 		for_each_set_bit(bit, &irqmap, SEI_IRQ_COUNT_PER_REG) {
327 			unsigned long hwirq;
328 			int err;
329 
330 			hwirq = idx * SEI_IRQ_COUNT_PER_REG + bit;
331 			err = generic_handle_domain_irq(sei->sei_domain, hwirq);
332 			if (unlikely(err))
333 				dev_warn(sei->dev, "Spurious IRQ detected (hwirq %lu)\n", hwirq);
334 		}
335 	}
336 
337 	chained_irq_exit(chip, desc);
338 }
339 
mvebu_sei_reset(struct mvebu_sei * sei)340 static void mvebu_sei_reset(struct mvebu_sei *sei)
341 {
342 	u32 reg_idx;
343 
344 	/* Clear IRQ cause registers, mask all interrupts */
345 	for (reg_idx = 0; reg_idx < SEI_IRQ_REG_COUNT; reg_idx++) {
346 		writel_relaxed(0xFFFFFFFF, sei->base + GICP_SECR(reg_idx));
347 		writel_relaxed(0xFFFFFFFF, sei->base + GICP_SEMR(reg_idx));
348 	}
349 }
350 
351 #define SEI_MSI_FLAGS_REQUIRED	(MSI_FLAG_USE_DEF_DOM_OPS |	\
352 				 MSI_FLAG_USE_DEF_CHIP_OPS)
353 
354 #define SEI_MSI_FLAGS_SUPPORTED	(MSI_GENERIC_FLAGS_MASK)
355 
356 static const struct msi_parent_ops sei_msi_parent_ops = {
357 	.supported_flags	= SEI_MSI_FLAGS_SUPPORTED,
358 	.required_flags		= SEI_MSI_FLAGS_REQUIRED,
359 	.bus_select_mask	= MATCH_PLATFORM_MSI,
360 	.bus_select_token	= DOMAIN_BUS_GENERIC_MSI,
361 	.prefix			= "SEI-",
362 	.init_dev_msi_info	= msi_lib_init_dev_msi_info,
363 };
364 
mvebu_sei_probe(struct platform_device * pdev)365 static int mvebu_sei_probe(struct platform_device *pdev)
366 {
367 	struct device_node *node = pdev->dev.of_node;
368 	struct mvebu_sei *sei;
369 	u32 parent_irq;
370 	int ret;
371 
372 	sei = devm_kzalloc(&pdev->dev, sizeof(*sei), GFP_KERNEL);
373 	if (!sei)
374 		return -ENOMEM;
375 
376 	sei->dev = &pdev->dev;
377 
378 	mutex_init(&sei->cp_msi_lock);
379 	raw_spin_lock_init(&sei->mask_lock);
380 
381 	sei->base = devm_platform_get_and_ioremap_resource(pdev, 0, &sei->res);
382 	if (IS_ERR(sei->base))
383 		return PTR_ERR(sei->base);
384 
385 	/* Retrieve the SEI capabilities with the interrupt ranges */
386 	sei->caps = of_device_get_match_data(&pdev->dev);
387 	if (!sei->caps) {
388 		dev_err(sei->dev,
389 			"Could not retrieve controller capabilities\n");
390 		return -EINVAL;
391 	}
392 
393 	/*
394 	 * Reserve the single (top-level) parent SPI IRQ from which all the
395 	 * interrupts handled by this driver will be signaled.
396 	 */
397 	parent_irq = irq_of_parse_and_map(node, 0);
398 	if (parent_irq <= 0) {
399 		dev_err(sei->dev, "Failed to retrieve top-level SPI IRQ\n");
400 		return -ENODEV;
401 	}
402 
403 	/* Create the root SEI domain */
404 	sei->sei_domain = irq_domain_create_linear(of_node_to_fwnode(node),
405 						   (sei->caps->ap_range.size +
406 						    sei->caps->cp_range.size),
407 						   &mvebu_sei_domain_ops,
408 						   sei);
409 	if (!sei->sei_domain) {
410 		dev_err(sei->dev, "Failed to create SEI IRQ domain\n");
411 		ret = -ENOMEM;
412 		goto dispose_irq;
413 	}
414 
415 	irq_domain_update_bus_token(sei->sei_domain, DOMAIN_BUS_NEXUS);
416 
417 	/* Create the 'wired' domain */
418 	sei->ap_domain = irq_domain_create_hierarchy(sei->sei_domain, 0,
419 						     sei->caps->ap_range.size,
420 						     of_node_to_fwnode(node),
421 						     &mvebu_sei_ap_domain_ops,
422 						     sei);
423 	if (!sei->ap_domain) {
424 		dev_err(sei->dev, "Failed to create AP IRQ domain\n");
425 		ret = -ENOMEM;
426 		goto remove_sei_domain;
427 	}
428 
429 	irq_domain_update_bus_token(sei->ap_domain, DOMAIN_BUS_WIRED);
430 
431 	/* Create the 'MSI' domain */
432 	sei->cp_domain = irq_domain_create_hierarchy(sei->sei_domain, 0,
433 						     sei->caps->cp_range.size,
434 						     of_node_to_fwnode(node),
435 						     &mvebu_sei_cp_domain_ops,
436 						     sei);
437 	if (!sei->cp_domain) {
438 		pr_err("Failed to create CPs IRQ domain\n");
439 		ret = -ENOMEM;
440 		goto remove_ap_domain;
441 	}
442 
443 	irq_domain_update_bus_token(sei->cp_domain, DOMAIN_BUS_GENERIC_MSI);
444 	sei->cp_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
445 	sei->cp_domain->msi_parent_ops = &sei_msi_parent_ops;
446 
447 	mvebu_sei_reset(sei);
448 
449 	irq_set_chained_handler_and_data(parent_irq, mvebu_sei_handle_cascade_irq, sei);
450 	return 0;
451 
452 remove_ap_domain:
453 	irq_domain_remove(sei->ap_domain);
454 remove_sei_domain:
455 	irq_domain_remove(sei->sei_domain);
456 dispose_irq:
457 	irq_dispose_mapping(parent_irq);
458 	return ret;
459 }
460 
461 static struct mvebu_sei_caps mvebu_sei_ap806_caps = {
462 	.ap_range = {
463 		.first = 0,
464 		.size = 21,
465 	},
466 	.cp_range = {
467 		.first = 21,
468 		.size = 43,
469 	},
470 };
471 
472 static const struct of_device_id mvebu_sei_of_match[] = {
473 	{
474 		.compatible = "marvell,ap806-sei",
475 		.data = &mvebu_sei_ap806_caps,
476 	},
477 	{},
478 };
479 
480 static struct platform_driver mvebu_sei_driver = {
481 	.probe  = mvebu_sei_probe,
482 	.driver = {
483 		.name = "mvebu-sei",
484 		.of_match_table = mvebu_sei_of_match,
485 	},
486 };
487 builtin_platform_driver(mvebu_sei_driver);
488