xref: /linux/drivers/irqchip/irq-gic-v2m.c (revision 31d166642c7c601c65eccf0ff2e0afe9a0538be2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * ARM GIC v2m MSI(-X) support
4  * Support for Message Signaled Interrupts for systems that
5  * implement ARM Generic Interrupt Controller: GICv2m.
6  *
7  * Copyright (C) 2014 Advanced Micro Devices, Inc.
8  * Authors: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
9  *	    Harish Kasiviswanathan <harish.kasiviswanathan@amd.com>
10  *	    Brandon Anderson <brandon.anderson@amd.com>
11  */
12 
13 #define pr_fmt(fmt) "GICv2m: " fmt
14 
15 #include <linux/acpi.h>
16 #include <linux/dma-iommu.h>
17 #include <linux/irq.h>
18 #include <linux/irqdomain.h>
19 #include <linux/kernel.h>
20 #include <linux/msi.h>
21 #include <linux/of_address.h>
22 #include <linux/of_pci.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/irqchip/arm-gic.h>
26 
27 /*
28 * MSI_TYPER:
29 *     [31:26] Reserved
30 *     [25:16] lowest SPI assigned to MSI
31 *     [15:10] Reserved
32 *     [9:0]   Numer of SPIs assigned to MSI
33 */
34 #define V2M_MSI_TYPER		       0x008
35 #define V2M_MSI_TYPER_BASE_SHIFT       16
36 #define V2M_MSI_TYPER_BASE_MASK	       0x3FF
37 #define V2M_MSI_TYPER_NUM_MASK	       0x3FF
38 #define V2M_MSI_SETSPI_NS	       0x040
39 #define V2M_MIN_SPI		       32
40 #define V2M_MAX_SPI		       1019
41 #define V2M_MSI_IIDR		       0xFCC
42 
43 #define V2M_MSI_TYPER_BASE_SPI(x)      \
44 	       (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
45 
46 #define V2M_MSI_TYPER_NUM_SPI(x)       ((x) & V2M_MSI_TYPER_NUM_MASK)
47 
48 /* APM X-Gene with GICv2m MSI_IIDR register value */
49 #define XGENE_GICV2M_MSI_IIDR		0x06000170
50 
51 /* Broadcom NS2 GICv2m MSI_IIDR register value */
52 #define BCM_NS2_GICV2M_MSI_IIDR		0x0000013f
53 
54 /* List of flags for specific v2m implementation */
55 #define GICV2M_NEEDS_SPI_OFFSET		0x00000001
56 
57 static LIST_HEAD(v2m_nodes);
58 static DEFINE_SPINLOCK(v2m_lock);
59 
60 struct v2m_data {
61 	struct list_head entry;
62 	struct fwnode_handle *fwnode;
63 	struct resource res;	/* GICv2m resource */
64 	void __iomem *base;	/* GICv2m virt address */
65 	u32 spi_start;		/* The SPI number that MSIs start */
66 	u32 nr_spis;		/* The number of SPIs for MSIs */
67 	u32 spi_offset;		/* offset to be subtracted from SPI number */
68 	unsigned long *bm;	/* MSI vector bitmap */
69 	u32 flags;		/* v2m flags for specific implementation */
70 };
71 
72 static void gicv2m_mask_msi_irq(struct irq_data *d)
73 {
74 	pci_msi_mask_irq(d);
75 	irq_chip_mask_parent(d);
76 }
77 
78 static void gicv2m_unmask_msi_irq(struct irq_data *d)
79 {
80 	pci_msi_unmask_irq(d);
81 	irq_chip_unmask_parent(d);
82 }
83 
84 static struct irq_chip gicv2m_msi_irq_chip = {
85 	.name			= "MSI",
86 	.irq_mask		= gicv2m_mask_msi_irq,
87 	.irq_unmask		= gicv2m_unmask_msi_irq,
88 	.irq_eoi		= irq_chip_eoi_parent,
89 	.irq_write_msi_msg	= pci_msi_domain_write_msg,
90 };
91 
92 static struct msi_domain_info gicv2m_msi_domain_info = {
93 	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
94 		   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
95 	.chip	= &gicv2m_msi_irq_chip,
96 };
97 
98 static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
99 {
100 	struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
101 	phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS;
102 
103 	msg->address_hi = upper_32_bits(addr);
104 	msg->address_lo = lower_32_bits(addr);
105 	msg->data = data->hwirq;
106 
107 	if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
108 		msg->data -= v2m->spi_offset;
109 
110 	iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
111 }
112 
113 static struct irq_chip gicv2m_irq_chip = {
114 	.name			= "GICv2m",
115 	.irq_mask		= irq_chip_mask_parent,
116 	.irq_unmask		= irq_chip_unmask_parent,
117 	.irq_eoi		= irq_chip_eoi_parent,
118 	.irq_set_affinity	= irq_chip_set_affinity_parent,
119 	.irq_compose_msi_msg	= gicv2m_compose_msi_msg,
120 };
121 
122 static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
123 				       unsigned int virq,
124 				       irq_hw_number_t hwirq)
125 {
126 	struct irq_fwspec fwspec;
127 	struct irq_data *d;
128 	int err;
129 
130 	if (is_of_node(domain->parent->fwnode)) {
131 		fwspec.fwnode = domain->parent->fwnode;
132 		fwspec.param_count = 3;
133 		fwspec.param[0] = 0;
134 		fwspec.param[1] = hwirq - 32;
135 		fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
136 	} else if (is_fwnode_irqchip(domain->parent->fwnode)) {
137 		fwspec.fwnode = domain->parent->fwnode;
138 		fwspec.param_count = 2;
139 		fwspec.param[0] = hwirq;
140 		fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
141 	} else {
142 		return -EINVAL;
143 	}
144 
145 	err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
146 	if (err)
147 		return err;
148 
149 	/* Configure the interrupt line to be edge */
150 	d = irq_domain_get_irq_data(domain->parent, virq);
151 	d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
152 	return 0;
153 }
154 
155 static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq,
156 			       int nr_irqs)
157 {
158 	spin_lock(&v2m_lock);
159 	bitmap_release_region(v2m->bm, hwirq - v2m->spi_start,
160 			      get_count_order(nr_irqs));
161 	spin_unlock(&v2m_lock);
162 }
163 
164 static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
165 				   unsigned int nr_irqs, void *args)
166 {
167 	msi_alloc_info_t *info = args;
168 	struct v2m_data *v2m = NULL, *tmp;
169 	int hwirq, offset, i, err = 0;
170 
171 	spin_lock(&v2m_lock);
172 	list_for_each_entry(tmp, &v2m_nodes, entry) {
173 		offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis,
174 						 get_count_order(nr_irqs));
175 		if (offset >= 0) {
176 			v2m = tmp;
177 			break;
178 		}
179 	}
180 	spin_unlock(&v2m_lock);
181 
182 	if (!v2m)
183 		return -ENOSPC;
184 
185 	hwirq = v2m->spi_start + offset;
186 
187 	err = iommu_dma_prepare_msi(info->desc,
188 				    v2m->res.start + V2M_MSI_SETSPI_NS);
189 	if (err)
190 		return err;
191 
192 	for (i = 0; i < nr_irqs; i++) {
193 		err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
194 		if (err)
195 			goto fail;
196 
197 		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
198 					      &gicv2m_irq_chip, v2m);
199 	}
200 
201 	return 0;
202 
203 fail:
204 	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
205 	gicv2m_unalloc_msi(v2m, hwirq, nr_irqs);
206 	return err;
207 }
208 
209 static void gicv2m_irq_domain_free(struct irq_domain *domain,
210 				   unsigned int virq, unsigned int nr_irqs)
211 {
212 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
213 	struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
214 
215 	gicv2m_unalloc_msi(v2m, d->hwirq, nr_irqs);
216 	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
217 }
218 
219 static const struct irq_domain_ops gicv2m_domain_ops = {
220 	.alloc			= gicv2m_irq_domain_alloc,
221 	.free			= gicv2m_irq_domain_free,
222 };
223 
224 static bool is_msi_spi_valid(u32 base, u32 num)
225 {
226 	if (base < V2M_MIN_SPI) {
227 		pr_err("Invalid MSI base SPI (base:%u)\n", base);
228 		return false;
229 	}
230 
231 	if ((num == 0) || (base + num > V2M_MAX_SPI)) {
232 		pr_err("Number of SPIs (%u) exceed maximum (%u)\n",
233 		       num, V2M_MAX_SPI - V2M_MIN_SPI + 1);
234 		return false;
235 	}
236 
237 	return true;
238 }
239 
240 static struct irq_chip gicv2m_pmsi_irq_chip = {
241 	.name			= "pMSI",
242 };
243 
244 static struct msi_domain_ops gicv2m_pmsi_ops = {
245 };
246 
247 static struct msi_domain_info gicv2m_pmsi_domain_info = {
248 	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
249 	.ops	= &gicv2m_pmsi_ops,
250 	.chip	= &gicv2m_pmsi_irq_chip,
251 };
252 
253 static void gicv2m_teardown(void)
254 {
255 	struct v2m_data *v2m, *tmp;
256 
257 	list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) {
258 		list_del(&v2m->entry);
259 		kfree(v2m->bm);
260 		iounmap(v2m->base);
261 		of_node_put(to_of_node(v2m->fwnode));
262 		if (is_fwnode_irqchip(v2m->fwnode))
263 			irq_domain_free_fwnode(v2m->fwnode);
264 		kfree(v2m);
265 	}
266 }
267 
268 static int gicv2m_allocate_domains(struct irq_domain *parent)
269 {
270 	struct irq_domain *inner_domain, *pci_domain, *plat_domain;
271 	struct v2m_data *v2m;
272 
273 	v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
274 	if (!v2m)
275 		return 0;
276 
277 	inner_domain = irq_domain_create_tree(v2m->fwnode,
278 					      &gicv2m_domain_ops, v2m);
279 	if (!inner_domain) {
280 		pr_err("Failed to create GICv2m domain\n");
281 		return -ENOMEM;
282 	}
283 
284 	irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
285 	inner_domain->parent = parent;
286 	pci_domain = pci_msi_create_irq_domain(v2m->fwnode,
287 					       &gicv2m_msi_domain_info,
288 					       inner_domain);
289 	plat_domain = platform_msi_create_irq_domain(v2m->fwnode,
290 						     &gicv2m_pmsi_domain_info,
291 						     inner_domain);
292 	if (!pci_domain || !plat_domain) {
293 		pr_err("Failed to create MSI domains\n");
294 		if (plat_domain)
295 			irq_domain_remove(plat_domain);
296 		if (pci_domain)
297 			irq_domain_remove(pci_domain);
298 		irq_domain_remove(inner_domain);
299 		return -ENOMEM;
300 	}
301 
302 	return 0;
303 }
304 
305 static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
306 				  u32 spi_start, u32 nr_spis,
307 				  struct resource *res)
308 {
309 	int ret;
310 	struct v2m_data *v2m;
311 
312 	v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
313 	if (!v2m) {
314 		pr_err("Failed to allocate struct v2m_data.\n");
315 		return -ENOMEM;
316 	}
317 
318 	INIT_LIST_HEAD(&v2m->entry);
319 	v2m->fwnode = fwnode;
320 
321 	memcpy(&v2m->res, res, sizeof(struct resource));
322 
323 	v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res));
324 	if (!v2m->base) {
325 		pr_err("Failed to map GICv2m resource\n");
326 		ret = -ENOMEM;
327 		goto err_free_v2m;
328 	}
329 
330 	if (spi_start && nr_spis) {
331 		v2m->spi_start = spi_start;
332 		v2m->nr_spis = nr_spis;
333 	} else {
334 		u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
335 
336 		v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer);
337 		v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer);
338 	}
339 
340 	if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) {
341 		ret = -EINVAL;
342 		goto err_iounmap;
343 	}
344 
345 	/*
346 	 * APM X-Gene GICv2m implementation has an erratum where
347 	 * the MSI data needs to be the offset from the spi_start
348 	 * in order to trigger the correct MSI interrupt. This is
349 	 * different from the standard GICv2m implementation where
350 	 * the MSI data is the absolute value within the range from
351 	 * spi_start to (spi_start + num_spis).
352 	 *
353 	 * Broadom NS2 GICv2m implementation has an erratum where the MSI data
354 	 * is 'spi_number - 32'
355 	 */
356 	switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
357 	case XGENE_GICV2M_MSI_IIDR:
358 		v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
359 		v2m->spi_offset = v2m->spi_start;
360 		break;
361 	case BCM_NS2_GICV2M_MSI_IIDR:
362 		v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
363 		v2m->spi_offset = 32;
364 		break;
365 	}
366 
367 	v2m->bm = kcalloc(BITS_TO_LONGS(v2m->nr_spis), sizeof(long),
368 			  GFP_KERNEL);
369 	if (!v2m->bm) {
370 		ret = -ENOMEM;
371 		goto err_iounmap;
372 	}
373 
374 	list_add_tail(&v2m->entry, &v2m_nodes);
375 
376 	pr_info("range%pR, SPI[%d:%d]\n", res,
377 		v2m->spi_start, (v2m->spi_start + v2m->nr_spis - 1));
378 	return 0;
379 
380 err_iounmap:
381 	iounmap(v2m->base);
382 err_free_v2m:
383 	kfree(v2m);
384 	return ret;
385 }
386 
387 static struct of_device_id gicv2m_device_id[] = {
388 	{	.compatible	= "arm,gic-v2m-frame",	},
389 	{},
390 };
391 
392 static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
393 				 struct irq_domain *parent)
394 {
395 	int ret = 0;
396 	struct device_node *node = to_of_node(parent_handle);
397 	struct device_node *child;
398 
399 	for (child = of_find_matching_node(node, gicv2m_device_id); child;
400 	     child = of_find_matching_node(child, gicv2m_device_id)) {
401 		u32 spi_start = 0, nr_spis = 0;
402 		struct resource res;
403 
404 		if (!of_find_property(child, "msi-controller", NULL))
405 			continue;
406 
407 		ret = of_address_to_resource(child, 0, &res);
408 		if (ret) {
409 			pr_err("Failed to allocate v2m resource.\n");
410 			break;
411 		}
412 
413 		if (!of_property_read_u32(child, "arm,msi-base-spi",
414 					  &spi_start) &&
415 		    !of_property_read_u32(child, "arm,msi-num-spis", &nr_spis))
416 			pr_info("DT overriding V2M MSI_TYPER (base:%u, num:%u)\n",
417 				spi_start, nr_spis);
418 
419 		ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis, &res);
420 		if (ret) {
421 			of_node_put(child);
422 			break;
423 		}
424 	}
425 
426 	if (!ret)
427 		ret = gicv2m_allocate_domains(parent);
428 	if (ret)
429 		gicv2m_teardown();
430 	return ret;
431 }
432 
433 #ifdef CONFIG_ACPI
434 static int acpi_num_msi;
435 
436 static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
437 {
438 	struct v2m_data *data;
439 
440 	if (WARN_ON(acpi_num_msi <= 0))
441 		return NULL;
442 
443 	/* We only return the fwnode of the first MSI frame. */
444 	data = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
445 	if (!data)
446 		return NULL;
447 
448 	return data->fwnode;
449 }
450 
451 static int __init
452 acpi_parse_madt_msi(union acpi_subtable_headers *header,
453 		    const unsigned long end)
454 {
455 	int ret;
456 	struct resource res;
457 	u32 spi_start = 0, nr_spis = 0;
458 	struct acpi_madt_generic_msi_frame *m;
459 	struct fwnode_handle *fwnode;
460 
461 	m = (struct acpi_madt_generic_msi_frame *)header;
462 	if (BAD_MADT_ENTRY(m, end))
463 		return -EINVAL;
464 
465 	res.start = m->base_address;
466 	res.end = m->base_address + SZ_4K - 1;
467 	res.flags = IORESOURCE_MEM;
468 
469 	if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) {
470 		spi_start = m->spi_base;
471 		nr_spis = m->spi_count;
472 
473 		pr_info("ACPI overriding V2M MSI_TYPER (base:%u, num:%u)\n",
474 			spi_start, nr_spis);
475 	}
476 
477 	fwnode = irq_domain_alloc_fwnode((void *)m->base_address);
478 	if (!fwnode) {
479 		pr_err("Unable to allocate GICv2m domain token\n");
480 		return -EINVAL;
481 	}
482 
483 	ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res);
484 	if (ret)
485 		irq_domain_free_fwnode(fwnode);
486 
487 	return ret;
488 }
489 
490 static int __init gicv2m_acpi_init(struct irq_domain *parent)
491 {
492 	int ret;
493 
494 	if (acpi_num_msi > 0)
495 		return 0;
496 
497 	acpi_num_msi = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_MSI_FRAME,
498 				      acpi_parse_madt_msi, 0);
499 
500 	if (acpi_num_msi <= 0)
501 		goto err_out;
502 
503 	ret = gicv2m_allocate_domains(parent);
504 	if (ret)
505 		goto err_out;
506 
507 	pci_msi_register_fwnode_provider(&gicv2m_get_fwnode);
508 
509 	return 0;
510 
511 err_out:
512 	gicv2m_teardown();
513 	return -EINVAL;
514 }
515 #else /* CONFIG_ACPI */
516 static int __init gicv2m_acpi_init(struct irq_domain *parent)
517 {
518 	return -EINVAL;
519 }
520 #endif /* CONFIG_ACPI */
521 
522 int __init gicv2m_init(struct fwnode_handle *parent_handle,
523 		       struct irq_domain *parent)
524 {
525 	if (is_of_node(parent_handle))
526 		return gicv2m_of_init(parent_handle, parent);
527 
528 	return gicv2m_acpi_init(parent);
529 }
530