xref: /linux/arch/powerpc/sysdev/fsl_msi.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2007-2011 Freescale Semiconductor, Inc.
4  *
5  * Author: Tony Li <tony.li@freescale.com>
6  *	   Jason Jin <Jason.jin@freescale.com>
7  *
8  * The hwirq alloc and free code reuse from sysdev/mpic_msi.c
9  */
10 #include <linux/irq.h>
11 #include <linux/msi.h>
12 #include <linux/pci.h>
13 #include <linux/slab.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_irq.h>
17 #include <linux/platform_device.h>
18 #include <linux/property.h>
19 #include <linux/interrupt.h>
20 #include <linux/irqdomain.h>
21 #include <linux/seq_file.h>
22 #include <sysdev/fsl_soc.h>
23 #include <asm/hw_irq.h>
24 #include <asm/ppc-pci.h>
25 #include <asm/mpic.h>
26 #include <asm/fsl_hcalls.h>
27 
28 #include "fsl_msi.h"
29 #include "fsl_pci.h"
30 
31 #define MSIIR_OFFSET_MASK	0xfffff
32 #define MSIIR_IBS_SHIFT		0
33 #define MSIIR_SRS_SHIFT		5
34 #define MSIIR1_IBS_SHIFT	4
35 #define MSIIR1_SRS_SHIFT	0
36 #define MSI_SRS_MASK		0xf
37 #define MSI_IBS_MASK		0x1f
38 
39 #define msi_hwirq(msi, msir_index, intr_index) \
40 		((msir_index) << (msi)->srs_shift | \
41 		 ((intr_index) << (msi)->ibs_shift))
42 
43 static LIST_HEAD(msi_head);
44 
45 struct fsl_msi_feature {
46 	u32 fsl_pic_ip;
47 	u32 msiir_offset; /* Offset of MSIIR, relative to start of MSIR bank */
48 };
49 
50 struct fsl_msi_cascade_data {
51 	struct fsl_msi *msi_data;
52 	int index;
53 	int virq;
54 };
55 
56 static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg)
57 {
58 	return in_be32(base + (reg >> 2));
59 }
60 
61 /*
62  * We do not need this actually. The MSIR register has been read once
63  * in the cascade interrupt. So, this MSI interrupt has been acked
64 */
65 static void fsl_msi_end_irq(struct irq_data *d)
66 {
67 }
68 
69 static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p)
70 {
71 	struct fsl_msi *msi_data = irqd->domain->host_data;
72 	irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
73 	int cascade_virq, srs;
74 
75 	srs = (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK;
76 	cascade_virq = msi_data->cascade_array[srs]->virq;
77 
78 	seq_printf(p, " fsl-msi-%d", cascade_virq);
79 }
80 
81 
82 static struct irq_chip fsl_msi_chip = {
83 	.irq_mask	= pci_msi_mask_irq,
84 	.irq_unmask	= pci_msi_unmask_irq,
85 	.irq_ack	= fsl_msi_end_irq,
86 	.irq_print_chip = fsl_msi_print_chip,
87 };
88 
89 static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq,
90 				irq_hw_number_t hw)
91 {
92 	struct fsl_msi *msi_data = h->host_data;
93 	struct irq_chip *chip = &fsl_msi_chip;
94 
95 	irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING);
96 
97 	irq_set_chip_data(virq, msi_data);
98 	irq_set_chip_and_handler(virq, chip, handle_edge_irq);
99 
100 	return 0;
101 }
102 
103 static const struct irq_domain_ops fsl_msi_host_ops = {
104 	.map = fsl_msi_host_map,
105 };
106 
107 static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
108 {
109 	int rc, hwirq;
110 
111 	rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX,
112 			      irq_domain_get_of_node(msi_data->irqhost));
113 	if (rc)
114 		return rc;
115 
116 	/*
117 	 * Reserve all the hwirqs
118 	 * The available hwirqs will be released in fsl_msi_setup_hwirq()
119 	 */
120 	for (hwirq = 0; hwirq < NR_MSI_IRQS_MAX; hwirq++)
121 		msi_bitmap_reserve_hwirq(&msi_data->bitmap, hwirq);
122 
123 	return 0;
124 }
125 
126 static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
127 {
128 	struct msi_desc *entry;
129 	struct fsl_msi *msi_data;
130 	irq_hw_number_t hwirq;
131 
132 	msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
133 		hwirq = virq_to_hw(entry->irq);
134 		msi_data = irq_get_chip_data(entry->irq);
135 		irq_set_msi_desc(entry->irq, NULL);
136 		irq_dispose_mapping(entry->irq);
137 		entry->irq = 0;
138 		msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
139 	}
140 }
141 
142 static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
143 				struct msi_msg *msg,
144 				struct fsl_msi *fsl_msi_data)
145 {
146 	struct fsl_msi *msi_data = fsl_msi_data;
147 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
148 	u64 address; /* Physical address of the MSIIR */
149 	int len;
150 	const __be64 *reg;
151 
152 	/* If the msi-address-64 property exists, then use it */
153 	reg = of_get_property(hose->dn, "msi-address-64", &len);
154 	if (reg && (len == sizeof(u64)))
155 		address = be64_to_cpup(reg);
156 	else
157 		address = fsl_pci_immrbar_base(hose) + msi_data->msiir_offset;
158 
159 	msg->address_lo = lower_32_bits(address);
160 	msg->address_hi = upper_32_bits(address);
161 
162 	/*
163 	 * MPIC version 2.0 has erratum PIC1. It causes
164 	 * that neither MSI nor MSI-X can work fine.
165 	 * This is a workaround to allow MSI-X to function
166 	 * properly. It only works for MSI-X, we prevent
167 	 * MSI on buggy chips in fsl_setup_msi_irqs().
168 	 */
169 	if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
170 		msg->data = __swab32(hwirq);
171 	else
172 		msg->data = hwirq;
173 
174 	pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__,
175 		 (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK,
176 		 (hwirq >> msi_data->ibs_shift) & MSI_IBS_MASK);
177 }
178 
179 static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
180 {
181 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
182 	struct device_node *np;
183 	phandle phandle = 0;
184 	int rc, hwirq = -ENOMEM;
185 	unsigned int virq;
186 	struct msi_desc *entry;
187 	struct msi_msg msg;
188 	struct fsl_msi *msi_data;
189 
190 	if (type == PCI_CAP_ID_MSI) {
191 		/*
192 		 * MPIC version 2.0 has erratum PIC1. For now MSI
193 		 * could not work. So check to prevent MSI from
194 		 * being used on the board with this erratum.
195 		 */
196 		list_for_each_entry(msi_data, &msi_head, list)
197 			if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
198 				return -EINVAL;
199 	}
200 
201 	/*
202 	 * If the PCI node has an fsl,msi property, then we need to use it
203 	 * to find the specific MSI.
204 	 */
205 	np = of_parse_phandle(hose->dn, "fsl,msi", 0);
206 	if (np) {
207 		if (of_device_is_compatible(np, "fsl,mpic-msi") ||
208 		    of_device_is_compatible(np, "fsl,vmpic-msi") ||
209 		    of_device_is_compatible(np, "fsl,vmpic-msi-v4.3"))
210 			phandle = np->phandle;
211 		else {
212 			dev_err(&pdev->dev,
213 				"node %pOF has an invalid fsl,msi phandle %u\n",
214 				hose->dn, np->phandle);
215 			of_node_put(np);
216 			return -EINVAL;
217 		}
218 		of_node_put(np);
219 	}
220 
221 	msi_for_each_desc(entry, &pdev->dev, MSI_DESC_NOTASSOCIATED) {
222 		/*
223 		 * Loop over all the MSI devices until we find one that has an
224 		 * available interrupt.
225 		 */
226 		list_for_each_entry(msi_data, &msi_head, list) {
227 			/*
228 			 * If the PCI node has an fsl,msi property, then we
229 			 * restrict our search to the corresponding MSI node.
230 			 * The simplest way is to skip over MSI nodes with the
231 			 * wrong phandle. Under the Freescale hypervisor, this
232 			 * has the additional benefit of skipping over MSI
233 			 * nodes that are not mapped in the PAMU.
234 			 */
235 			if (phandle && (phandle != msi_data->phandle))
236 				continue;
237 
238 			hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
239 			if (hwirq >= 0)
240 				break;
241 		}
242 
243 		if (hwirq < 0) {
244 			rc = hwirq;
245 			dev_err(&pdev->dev, "could not allocate MSI interrupt\n");
246 			goto out_free;
247 		}
248 
249 		virq = irq_create_mapping(msi_data->irqhost, hwirq);
250 
251 		if (!virq) {
252 			dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq);
253 			msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
254 			rc = -ENOSPC;
255 			goto out_free;
256 		}
257 		/* chip_data is msi_data via host->hostdata in host->map() */
258 		irq_set_msi_desc(virq, entry);
259 
260 		fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
261 		pci_write_msi_msg(virq, &msg);
262 	}
263 	return 0;
264 
265 out_free:
266 	/* free by the caller of this function */
267 	return rc;
268 }
269 
270 static irqreturn_t fsl_msi_cascade(int irq, void *data)
271 {
272 	struct fsl_msi *msi_data;
273 	int msir_index = -1;
274 	u32 msir_value = 0;
275 	u32 intr_index;
276 	u32 have_shift = 0;
277 	struct fsl_msi_cascade_data *cascade_data = data;
278 	irqreturn_t ret = IRQ_NONE;
279 
280 	msi_data = cascade_data->msi_data;
281 
282 	msir_index = cascade_data->index;
283 
284 	switch (msi_data->feature & FSL_PIC_IP_MASK) {
285 	case FSL_PIC_IP_MPIC:
286 		msir_value = fsl_msi_read(msi_data->msi_regs,
287 			msir_index * 0x10);
288 		break;
289 	case FSL_PIC_IP_IPIC:
290 		msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4);
291 		break;
292 #ifdef CONFIG_EPAPR_PARAVIRT
293 	case FSL_PIC_IP_VMPIC: {
294 		unsigned int ret;
295 		ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value);
296 		if (ret) {
297 			pr_err("fsl-msi: fh_vmpic_get_msir() failed for "
298 			       "irq %u (ret=%u)\n", irq, ret);
299 			msir_value = 0;
300 		}
301 		break;
302 	}
303 #endif
304 	}
305 
306 	while (msir_value) {
307 		int err;
308 		intr_index = ffs(msir_value) - 1;
309 
310 		err = generic_handle_domain_irq(msi_data->irqhost,
311 				msi_hwirq(msi_data, msir_index,
312 					  intr_index + have_shift));
313 		if (!err)
314 			ret = IRQ_HANDLED;
315 
316 		have_shift += intr_index + 1;
317 		msir_value = msir_value >> (intr_index + 1);
318 	}
319 
320 	return ret;
321 }
322 
323 static void fsl_of_msi_remove(struct platform_device *ofdev)
324 {
325 	struct fsl_msi *msi = platform_get_drvdata(ofdev);
326 	int virq, i;
327 
328 	if (msi->list.prev != NULL)
329 		list_del(&msi->list);
330 	for (i = 0; i < NR_MSI_REG_MAX; i++) {
331 		if (msi->cascade_array[i]) {
332 			virq = msi->cascade_array[i]->virq;
333 
334 			BUG_ON(!virq);
335 
336 			free_irq(virq, msi->cascade_array[i]);
337 			kfree(msi->cascade_array[i]);
338 			irq_dispose_mapping(virq);
339 		}
340 	}
341 	if (msi->bitmap.bitmap)
342 		msi_bitmap_free(&msi->bitmap);
343 	if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC)
344 		iounmap(msi->msi_regs);
345 	kfree(msi);
346 }
347 
348 static struct lock_class_key fsl_msi_irq_class;
349 static struct lock_class_key fsl_msi_irq_request_class;
350 
351 static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
352 			       int offset, int irq_index)
353 {
354 	struct fsl_msi_cascade_data *cascade_data = NULL;
355 	int virt_msir, i, ret;
356 
357 	virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
358 	if (!virt_msir) {
359 		dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n",
360 			__func__, irq_index);
361 		return 0;
362 	}
363 
364 	cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL);
365 	if (!cascade_data) {
366 		dev_err(&dev->dev, "No memory for MSI cascade data\n");
367 		return -ENOMEM;
368 	}
369 	irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class,
370 			      &fsl_msi_irq_request_class);
371 	cascade_data->index = offset;
372 	cascade_data->msi_data = msi;
373 	cascade_data->virq = virt_msir;
374 	msi->cascade_array[irq_index] = cascade_data;
375 
376 	ret = request_irq(virt_msir, fsl_msi_cascade, IRQF_NO_THREAD,
377 			  "fsl-msi-cascade", cascade_data);
378 	if (ret) {
379 		dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n",
380 			virt_msir, ret);
381 		return ret;
382 	}
383 
384 	/* Release the hwirqs corresponding to this MSI register */
385 	for (i = 0; i < IRQS_PER_MSI_REG; i++)
386 		msi_bitmap_free_hwirqs(&msi->bitmap,
387 				       msi_hwirq(msi, offset, i), 1);
388 
389 	return 0;
390 }
391 
392 static const struct of_device_id fsl_of_msi_ids[];
393 static int fsl_of_msi_probe(struct platform_device *dev)
394 {
395 	struct fsl_msi *msi;
396 	struct resource res, msiir;
397 	int err, i, j, irq_index, count;
398 	const u32 *p;
399 	const struct fsl_msi_feature *features;
400 	int len;
401 	u32 offset;
402 	struct pci_controller *phb;
403 
404 	features = device_get_match_data(&dev->dev);
405 
406 	printk(KERN_DEBUG "Setting up Freescale MSI support\n");
407 
408 	msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL);
409 	if (!msi) {
410 		dev_err(&dev->dev, "No memory for MSI structure\n");
411 		return -ENOMEM;
412 	}
413 	platform_set_drvdata(dev, msi);
414 
415 	msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
416 				      NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi);
417 
418 	if (msi->irqhost == NULL) {
419 		dev_err(&dev->dev, "No memory for MSI irqhost\n");
420 		err = -ENOMEM;
421 		goto error_out;
422 	}
423 
424 	/*
425 	 * Under the Freescale hypervisor, the msi nodes don't have a 'reg'
426 	 * property.  Instead, we use hypercalls to access the MSI.
427 	 */
428 	if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) {
429 		err = of_address_to_resource(dev->dev.of_node, 0, &res);
430 		if (err) {
431 			dev_err(&dev->dev, "invalid resource for node %pOF\n",
432 				dev->dev.of_node);
433 			goto error_out;
434 		}
435 
436 		msi->msi_regs = ioremap(res.start, resource_size(&res));
437 		if (!msi->msi_regs) {
438 			err = -ENOMEM;
439 			dev_err(&dev->dev, "could not map node %pOF\n",
440 				dev->dev.of_node);
441 			goto error_out;
442 		}
443 		msi->msiir_offset =
444 			features->msiir_offset + (res.start & 0xfffff);
445 
446 		/*
447 		 * First read the MSIIR/MSIIR1 offset from dts
448 		 * On failure use the hardcode MSIIR offset
449 		 */
450 		if (of_address_to_resource(dev->dev.of_node, 1, &msiir))
451 			msi->msiir_offset = features->msiir_offset +
452 					    (res.start & MSIIR_OFFSET_MASK);
453 		else
454 			msi->msiir_offset = msiir.start & MSIIR_OFFSET_MASK;
455 	}
456 
457 	msi->feature = features->fsl_pic_ip;
458 
459 	/* For erratum PIC1 on MPIC version 2.0*/
460 	if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) == FSL_PIC_IP_MPIC
461 			&& (fsl_mpic_primary_get_version() == 0x0200))
462 		msi->feature |= MSI_HW_ERRATA_ENDIAN;
463 
464 	/*
465 	 * Remember the phandle, so that we can match with any PCI nodes
466 	 * that have an "fsl,msi" property.
467 	 */
468 	msi->phandle = dev->dev.of_node->phandle;
469 
470 	err = fsl_msi_init_allocator(msi);
471 	if (err) {
472 		dev_err(&dev->dev, "Error allocating MSI bitmap\n");
473 		goto error_out;
474 	}
475 
476 	p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
477 
478 	if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3") ||
479 	    of_device_is_compatible(dev->dev.of_node, "fsl,vmpic-msi-v4.3")) {
480 		msi->srs_shift = MSIIR1_SRS_SHIFT;
481 		msi->ibs_shift = MSIIR1_IBS_SHIFT;
482 		if (p)
483 			dev_warn(&dev->dev, "%s: dose not support msi-available-ranges property\n",
484 				__func__);
485 
486 		for (irq_index = 0; irq_index < NR_MSI_REG_MSIIR1;
487 		     irq_index++) {
488 			err = fsl_msi_setup_hwirq(msi, dev,
489 						  irq_index, irq_index);
490 			if (err)
491 				goto error_out;
492 		}
493 	} else {
494 		static const u32 all_avail[] =
495 			{ 0, NR_MSI_REG_MSIIR * IRQS_PER_MSI_REG };
496 
497 		msi->srs_shift = MSIIR_SRS_SHIFT;
498 		msi->ibs_shift = MSIIR_IBS_SHIFT;
499 
500 		if (p && len % (2 * sizeof(u32)) != 0) {
501 			dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
502 				__func__);
503 			err = -EINVAL;
504 			goto error_out;
505 		}
506 
507 		if (!p) {
508 			p = all_avail;
509 			len = sizeof(all_avail);
510 		}
511 
512 		for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
513 			if (p[i * 2] % IRQS_PER_MSI_REG ||
514 			    p[i * 2 + 1] % IRQS_PER_MSI_REG) {
515 				pr_warn("%s: %pOF: msi available range of %u at %u is not IRQ-aligned\n",
516 				       __func__, dev->dev.of_node,
517 				       p[i * 2 + 1], p[i * 2]);
518 				err = -EINVAL;
519 				goto error_out;
520 			}
521 
522 			offset = p[i * 2] / IRQS_PER_MSI_REG;
523 			count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
524 
525 			for (j = 0; j < count; j++, irq_index++) {
526 				err = fsl_msi_setup_hwirq(msi, dev, offset + j,
527 							  irq_index);
528 				if (err)
529 					goto error_out;
530 			}
531 		}
532 	}
533 
534 	list_add_tail(&msi->list, &msi_head);
535 
536 	/*
537 	 * Apply the MSI ops to all the controllers.
538 	 * It doesn't hurt to reassign the same ops,
539 	 * but bail out if we find another MSI driver.
540 	 */
541 	list_for_each_entry(phb, &hose_list, list_node) {
542 		if (!phb->controller_ops.setup_msi_irqs) {
543 			phb->controller_ops.setup_msi_irqs = fsl_setup_msi_irqs;
544 			phb->controller_ops.teardown_msi_irqs = fsl_teardown_msi_irqs;
545 		} else if (phb->controller_ops.setup_msi_irqs != fsl_setup_msi_irqs) {
546 			dev_err(&dev->dev, "Different MSI driver already installed!\n");
547 			err = -ENODEV;
548 			goto error_out;
549 		}
550 	}
551 	return 0;
552 error_out:
553 	fsl_of_msi_remove(dev);
554 	return err;
555 }
556 
557 static const struct fsl_msi_feature mpic_msi_feature = {
558 	.fsl_pic_ip = FSL_PIC_IP_MPIC,
559 	.msiir_offset = 0x140,
560 };
561 
562 static const struct fsl_msi_feature ipic_msi_feature = {
563 	.fsl_pic_ip = FSL_PIC_IP_IPIC,
564 	.msiir_offset = 0x38,
565 };
566 
567 #ifdef CONFIG_EPAPR_PARAVIRT
568 static const struct fsl_msi_feature vmpic_msi_feature = {
569 	.fsl_pic_ip = FSL_PIC_IP_VMPIC,
570 	.msiir_offset = 0,
571 };
572 #endif
573 
574 static const struct of_device_id fsl_of_msi_ids[] = {
575 	{
576 		.compatible = "fsl,mpic-msi",
577 		.data = &mpic_msi_feature,
578 	},
579 	{
580 		.compatible = "fsl,mpic-msi-v4.3",
581 		.data = &mpic_msi_feature,
582 	},
583 	{
584 		.compatible = "fsl,ipic-msi",
585 		.data = &ipic_msi_feature,
586 	},
587 #ifdef CONFIG_EPAPR_PARAVIRT
588 	{
589 		.compatible = "fsl,vmpic-msi",
590 		.data = &vmpic_msi_feature,
591 	},
592 	{
593 		.compatible = "fsl,vmpic-msi-v4.3",
594 		.data = &vmpic_msi_feature,
595 	},
596 #endif
597 	{}
598 };
599 
600 static struct platform_driver fsl_of_msi_driver = {
601 	.driver = {
602 		.name = "fsl-msi",
603 		.of_match_table = fsl_of_msi_ids,
604 	},
605 	.probe = fsl_of_msi_probe,
606 	.remove = fsl_of_msi_remove,
607 };
608 
609 static __init int fsl_of_msi_init(void)
610 {
611 	return platform_driver_register(&fsl_of_msi_driver);
612 }
613 
614 subsys_initcall(fsl_of_msi_init);
615