xref: /linux/arch/powerpc/sysdev/fsl_msi.c (revision 69bfec7548f4c1595bac0e3ddfc0458a5af31f4c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2007-2011 Freescale Semiconductor, Inc.
4  *
5  * Author: Tony Li <tony.li@freescale.com>
6  *	   Jason Jin <Jason.jin@freescale.com>
7  *
8  * The hwirq alloc and free code reuse from sysdev/mpic_msi.c
9  */
10 #include <linux/irq.h>
11 #include <linux/msi.h>
12 #include <linux/pci.h>
13 #include <linux/slab.h>
14 #include <linux/of_address.h>
15 #include <linux/of_irq.h>
16 #include <linux/of_platform.h>
17 #include <linux/interrupt.h>
18 #include <linux/irqdomain.h>
19 #include <linux/seq_file.h>
20 #include <sysdev/fsl_soc.h>
21 #include <asm/hw_irq.h>
22 #include <asm/ppc-pci.h>
23 #include <asm/mpic.h>
24 #include <asm/fsl_hcalls.h>
25 
26 #include "fsl_msi.h"
27 #include "fsl_pci.h"
28 
29 #define MSIIR_OFFSET_MASK	0xfffff
30 #define MSIIR_IBS_SHIFT		0
31 #define MSIIR_SRS_SHIFT		5
32 #define MSIIR1_IBS_SHIFT	4
33 #define MSIIR1_SRS_SHIFT	0
34 #define MSI_SRS_MASK		0xf
35 #define MSI_IBS_MASK		0x1f
36 
37 #define msi_hwirq(msi, msir_index, intr_index) \
38 		((msir_index) << (msi)->srs_shift | \
39 		 ((intr_index) << (msi)->ibs_shift))
40 
41 static LIST_HEAD(msi_head);
42 
43 struct fsl_msi_feature {
44 	u32 fsl_pic_ip;
45 	u32 msiir_offset; /* Offset of MSIIR, relative to start of MSIR bank */
46 };
47 
48 struct fsl_msi_cascade_data {
49 	struct fsl_msi *msi_data;
50 	int index;
51 	int virq;
52 };
53 
54 static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg)
55 {
56 	return in_be32(base + (reg >> 2));
57 }
58 
59 /*
60  * We do not need this actually. The MSIR register has been read once
61  * in the cascade interrupt. So, this MSI interrupt has been acked
62 */
63 static void fsl_msi_end_irq(struct irq_data *d)
64 {
65 }
66 
67 static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p)
68 {
69 	struct fsl_msi *msi_data = irqd->domain->host_data;
70 	irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
71 	int cascade_virq, srs;
72 
73 	srs = (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK;
74 	cascade_virq = msi_data->cascade_array[srs]->virq;
75 
76 	seq_printf(p, " fsl-msi-%d", cascade_virq);
77 }
78 
79 
80 static struct irq_chip fsl_msi_chip = {
81 	.irq_mask	= pci_msi_mask_irq,
82 	.irq_unmask	= pci_msi_unmask_irq,
83 	.irq_ack	= fsl_msi_end_irq,
84 	.irq_print_chip = fsl_msi_print_chip,
85 };
86 
87 static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq,
88 				irq_hw_number_t hw)
89 {
90 	struct fsl_msi *msi_data = h->host_data;
91 	struct irq_chip *chip = &fsl_msi_chip;
92 
93 	irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING);
94 
95 	irq_set_chip_data(virq, msi_data);
96 	irq_set_chip_and_handler(virq, chip, handle_edge_irq);
97 
98 	return 0;
99 }
100 
101 static const struct irq_domain_ops fsl_msi_host_ops = {
102 	.map = fsl_msi_host_map,
103 };
104 
105 static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
106 {
107 	int rc, hwirq;
108 
109 	rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX,
110 			      irq_domain_get_of_node(msi_data->irqhost));
111 	if (rc)
112 		return rc;
113 
114 	/*
115 	 * Reserve all the hwirqs
116 	 * The available hwirqs will be released in fsl_msi_setup_hwirq()
117 	 */
118 	for (hwirq = 0; hwirq < NR_MSI_IRQS_MAX; hwirq++)
119 		msi_bitmap_reserve_hwirq(&msi_data->bitmap, hwirq);
120 
121 	return 0;
122 }
123 
124 static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
125 {
126 	struct msi_desc *entry;
127 	struct fsl_msi *msi_data;
128 	irq_hw_number_t hwirq;
129 
130 	msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
131 		hwirq = virq_to_hw(entry->irq);
132 		msi_data = irq_get_chip_data(entry->irq);
133 		irq_set_msi_desc(entry->irq, NULL);
134 		irq_dispose_mapping(entry->irq);
135 		entry->irq = 0;
136 		msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
137 	}
138 }
139 
140 static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
141 				struct msi_msg *msg,
142 				struct fsl_msi *fsl_msi_data)
143 {
144 	struct fsl_msi *msi_data = fsl_msi_data;
145 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
146 	u64 address; /* Physical address of the MSIIR */
147 	int len;
148 	const __be64 *reg;
149 
150 	/* If the msi-address-64 property exists, then use it */
151 	reg = of_get_property(hose->dn, "msi-address-64", &len);
152 	if (reg && (len == sizeof(u64)))
153 		address = be64_to_cpup(reg);
154 	else
155 		address = fsl_pci_immrbar_base(hose) + msi_data->msiir_offset;
156 
157 	msg->address_lo = lower_32_bits(address);
158 	msg->address_hi = upper_32_bits(address);
159 
160 	/*
161 	 * MPIC version 2.0 has erratum PIC1. It causes
162 	 * that neither MSI nor MSI-X can work fine.
163 	 * This is a workaround to allow MSI-X to function
164 	 * properly. It only works for MSI-X, we prevent
165 	 * MSI on buggy chips in fsl_setup_msi_irqs().
166 	 */
167 	if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
168 		msg->data = __swab32(hwirq);
169 	else
170 		msg->data = hwirq;
171 
172 	pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__,
173 		 (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK,
174 		 (hwirq >> msi_data->ibs_shift) & MSI_IBS_MASK);
175 }
176 
177 static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
178 {
179 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
180 	struct device_node *np;
181 	phandle phandle = 0;
182 	int rc, hwirq = -ENOMEM;
183 	unsigned int virq;
184 	struct msi_desc *entry;
185 	struct msi_msg msg;
186 	struct fsl_msi *msi_data;
187 
188 	if (type == PCI_CAP_ID_MSI) {
189 		/*
190 		 * MPIC version 2.0 has erratum PIC1. For now MSI
191 		 * could not work. So check to prevent MSI from
192 		 * being used on the board with this erratum.
193 		 */
194 		list_for_each_entry(msi_data, &msi_head, list)
195 			if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
196 				return -EINVAL;
197 	}
198 
199 	/*
200 	 * If the PCI node has an fsl,msi property, then we need to use it
201 	 * to find the specific MSI.
202 	 */
203 	np = of_parse_phandle(hose->dn, "fsl,msi", 0);
204 	if (np) {
205 		if (of_device_is_compatible(np, "fsl,mpic-msi") ||
206 		    of_device_is_compatible(np, "fsl,vmpic-msi") ||
207 		    of_device_is_compatible(np, "fsl,vmpic-msi-v4.3"))
208 			phandle = np->phandle;
209 		else {
210 			dev_err(&pdev->dev,
211 				"node %pOF has an invalid fsl,msi phandle %u\n",
212 				hose->dn, np->phandle);
213 			of_node_put(np);
214 			return -EINVAL;
215 		}
216 		of_node_put(np);
217 	}
218 
219 	msi_for_each_desc(entry, &pdev->dev, MSI_DESC_NOTASSOCIATED) {
220 		/*
221 		 * Loop over all the MSI devices until we find one that has an
222 		 * available interrupt.
223 		 */
224 		list_for_each_entry(msi_data, &msi_head, list) {
225 			/*
226 			 * If the PCI node has an fsl,msi property, then we
227 			 * restrict our search to the corresponding MSI node.
228 			 * The simplest way is to skip over MSI nodes with the
229 			 * wrong phandle. Under the Freescale hypervisor, this
230 			 * has the additional benefit of skipping over MSI
231 			 * nodes that are not mapped in the PAMU.
232 			 */
233 			if (phandle && (phandle != msi_data->phandle))
234 				continue;
235 
236 			hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
237 			if (hwirq >= 0)
238 				break;
239 		}
240 
241 		if (hwirq < 0) {
242 			rc = hwirq;
243 			dev_err(&pdev->dev, "could not allocate MSI interrupt\n");
244 			goto out_free;
245 		}
246 
247 		virq = irq_create_mapping(msi_data->irqhost, hwirq);
248 
249 		if (!virq) {
250 			dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq);
251 			msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
252 			rc = -ENOSPC;
253 			goto out_free;
254 		}
255 		/* chip_data is msi_data via host->hostdata in host->map() */
256 		irq_set_msi_desc(virq, entry);
257 
258 		fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
259 		pci_write_msi_msg(virq, &msg);
260 	}
261 	return 0;
262 
263 out_free:
264 	/* free by the caller of this function */
265 	return rc;
266 }
267 
268 static irqreturn_t fsl_msi_cascade(int irq, void *data)
269 {
270 	struct fsl_msi *msi_data;
271 	int msir_index = -1;
272 	u32 msir_value = 0;
273 	u32 intr_index;
274 	u32 have_shift = 0;
275 	struct fsl_msi_cascade_data *cascade_data = data;
276 	irqreturn_t ret = IRQ_NONE;
277 
278 	msi_data = cascade_data->msi_data;
279 
280 	msir_index = cascade_data->index;
281 
282 	switch (msi_data->feature & FSL_PIC_IP_MASK) {
283 	case FSL_PIC_IP_MPIC:
284 		msir_value = fsl_msi_read(msi_data->msi_regs,
285 			msir_index * 0x10);
286 		break;
287 	case FSL_PIC_IP_IPIC:
288 		msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4);
289 		break;
290 #ifdef CONFIG_EPAPR_PARAVIRT
291 	case FSL_PIC_IP_VMPIC: {
292 		unsigned int ret;
293 		ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value);
294 		if (ret) {
295 			pr_err("fsl-msi: fh_vmpic_get_msir() failed for "
296 			       "irq %u (ret=%u)\n", irq, ret);
297 			msir_value = 0;
298 		}
299 		break;
300 	}
301 #endif
302 	}
303 
304 	while (msir_value) {
305 		int err;
306 		intr_index = ffs(msir_value) - 1;
307 
308 		err = generic_handle_domain_irq(msi_data->irqhost,
309 				msi_hwirq(msi_data, msir_index,
310 					  intr_index + have_shift));
311 		if (!err)
312 			ret = IRQ_HANDLED;
313 
314 		have_shift += intr_index + 1;
315 		msir_value = msir_value >> (intr_index + 1);
316 	}
317 
318 	return ret;
319 }
320 
321 static int fsl_of_msi_remove(struct platform_device *ofdev)
322 {
323 	struct fsl_msi *msi = platform_get_drvdata(ofdev);
324 	int virq, i;
325 
326 	if (msi->list.prev != NULL)
327 		list_del(&msi->list);
328 	for (i = 0; i < NR_MSI_REG_MAX; i++) {
329 		if (msi->cascade_array[i]) {
330 			virq = msi->cascade_array[i]->virq;
331 
332 			BUG_ON(!virq);
333 
334 			free_irq(virq, msi->cascade_array[i]);
335 			kfree(msi->cascade_array[i]);
336 			irq_dispose_mapping(virq);
337 		}
338 	}
339 	if (msi->bitmap.bitmap)
340 		msi_bitmap_free(&msi->bitmap);
341 	if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC)
342 		iounmap(msi->msi_regs);
343 	kfree(msi);
344 
345 	return 0;
346 }
347 
348 static struct lock_class_key fsl_msi_irq_class;
349 static struct lock_class_key fsl_msi_irq_request_class;
350 
351 static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
352 			       int offset, int irq_index)
353 {
354 	struct fsl_msi_cascade_data *cascade_data = NULL;
355 	int virt_msir, i, ret;
356 
357 	virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
358 	if (!virt_msir) {
359 		dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n",
360 			__func__, irq_index);
361 		return 0;
362 	}
363 
364 	cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL);
365 	if (!cascade_data) {
366 		dev_err(&dev->dev, "No memory for MSI cascade data\n");
367 		return -ENOMEM;
368 	}
369 	irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class,
370 			      &fsl_msi_irq_request_class);
371 	cascade_data->index = offset;
372 	cascade_data->msi_data = msi;
373 	cascade_data->virq = virt_msir;
374 	msi->cascade_array[irq_index] = cascade_data;
375 
376 	ret = request_irq(virt_msir, fsl_msi_cascade, IRQF_NO_THREAD,
377 			  "fsl-msi-cascade", cascade_data);
378 	if (ret) {
379 		dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n",
380 			virt_msir, ret);
381 		return ret;
382 	}
383 
384 	/* Release the hwirqs corresponding to this MSI register */
385 	for (i = 0; i < IRQS_PER_MSI_REG; i++)
386 		msi_bitmap_free_hwirqs(&msi->bitmap,
387 				       msi_hwirq(msi, offset, i), 1);
388 
389 	return 0;
390 }
391 
392 static const struct of_device_id fsl_of_msi_ids[];
393 static int fsl_of_msi_probe(struct platform_device *dev)
394 {
395 	const struct of_device_id *match;
396 	struct fsl_msi *msi;
397 	struct resource res, msiir;
398 	int err, i, j, irq_index, count;
399 	const u32 *p;
400 	const struct fsl_msi_feature *features;
401 	int len;
402 	u32 offset;
403 	struct pci_controller *phb;
404 
405 	match = of_match_device(fsl_of_msi_ids, &dev->dev);
406 	if (!match)
407 		return -EINVAL;
408 	features = match->data;
409 
410 	printk(KERN_DEBUG "Setting up Freescale MSI support\n");
411 
412 	msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL);
413 	if (!msi) {
414 		dev_err(&dev->dev, "No memory for MSI structure\n");
415 		return -ENOMEM;
416 	}
417 	platform_set_drvdata(dev, msi);
418 
419 	msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
420 				      NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi);
421 
422 	if (msi->irqhost == NULL) {
423 		dev_err(&dev->dev, "No memory for MSI irqhost\n");
424 		err = -ENOMEM;
425 		goto error_out;
426 	}
427 
428 	/*
429 	 * Under the Freescale hypervisor, the msi nodes don't have a 'reg'
430 	 * property.  Instead, we use hypercalls to access the MSI.
431 	 */
432 	if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) {
433 		err = of_address_to_resource(dev->dev.of_node, 0, &res);
434 		if (err) {
435 			dev_err(&dev->dev, "invalid resource for node %pOF\n",
436 				dev->dev.of_node);
437 			goto error_out;
438 		}
439 
440 		msi->msi_regs = ioremap(res.start, resource_size(&res));
441 		if (!msi->msi_regs) {
442 			err = -ENOMEM;
443 			dev_err(&dev->dev, "could not map node %pOF\n",
444 				dev->dev.of_node);
445 			goto error_out;
446 		}
447 		msi->msiir_offset =
448 			features->msiir_offset + (res.start & 0xfffff);
449 
450 		/*
451 		 * First read the MSIIR/MSIIR1 offset from dts
452 		 * On failure use the hardcode MSIIR offset
453 		 */
454 		if (of_address_to_resource(dev->dev.of_node, 1, &msiir))
455 			msi->msiir_offset = features->msiir_offset +
456 					    (res.start & MSIIR_OFFSET_MASK);
457 		else
458 			msi->msiir_offset = msiir.start & MSIIR_OFFSET_MASK;
459 	}
460 
461 	msi->feature = features->fsl_pic_ip;
462 
463 	/* For erratum PIC1 on MPIC version 2.0*/
464 	if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) == FSL_PIC_IP_MPIC
465 			&& (fsl_mpic_primary_get_version() == 0x0200))
466 		msi->feature |= MSI_HW_ERRATA_ENDIAN;
467 
468 	/*
469 	 * Remember the phandle, so that we can match with any PCI nodes
470 	 * that have an "fsl,msi" property.
471 	 */
472 	msi->phandle = dev->dev.of_node->phandle;
473 
474 	err = fsl_msi_init_allocator(msi);
475 	if (err) {
476 		dev_err(&dev->dev, "Error allocating MSI bitmap\n");
477 		goto error_out;
478 	}
479 
480 	p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
481 
482 	if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3") ||
483 	    of_device_is_compatible(dev->dev.of_node, "fsl,vmpic-msi-v4.3")) {
484 		msi->srs_shift = MSIIR1_SRS_SHIFT;
485 		msi->ibs_shift = MSIIR1_IBS_SHIFT;
486 		if (p)
487 			dev_warn(&dev->dev, "%s: dose not support msi-available-ranges property\n",
488 				__func__);
489 
490 		for (irq_index = 0; irq_index < NR_MSI_REG_MSIIR1;
491 		     irq_index++) {
492 			err = fsl_msi_setup_hwirq(msi, dev,
493 						  irq_index, irq_index);
494 			if (err)
495 				goto error_out;
496 		}
497 	} else {
498 		static const u32 all_avail[] =
499 			{ 0, NR_MSI_REG_MSIIR * IRQS_PER_MSI_REG };
500 
501 		msi->srs_shift = MSIIR_SRS_SHIFT;
502 		msi->ibs_shift = MSIIR_IBS_SHIFT;
503 
504 		if (p && len % (2 * sizeof(u32)) != 0) {
505 			dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
506 				__func__);
507 			err = -EINVAL;
508 			goto error_out;
509 		}
510 
511 		if (!p) {
512 			p = all_avail;
513 			len = sizeof(all_avail);
514 		}
515 
516 		for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
517 			if (p[i * 2] % IRQS_PER_MSI_REG ||
518 			    p[i * 2 + 1] % IRQS_PER_MSI_REG) {
519 				pr_warn("%s: %pOF: msi available range of %u at %u is not IRQ-aligned\n",
520 				       __func__, dev->dev.of_node,
521 				       p[i * 2 + 1], p[i * 2]);
522 				err = -EINVAL;
523 				goto error_out;
524 			}
525 
526 			offset = p[i * 2] / IRQS_PER_MSI_REG;
527 			count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
528 
529 			for (j = 0; j < count; j++, irq_index++) {
530 				err = fsl_msi_setup_hwirq(msi, dev, offset + j,
531 							  irq_index);
532 				if (err)
533 					goto error_out;
534 			}
535 		}
536 	}
537 
538 	list_add_tail(&msi->list, &msi_head);
539 
540 	/*
541 	 * Apply the MSI ops to all the controllers.
542 	 * It doesn't hurt to reassign the same ops,
543 	 * but bail out if we find another MSI driver.
544 	 */
545 	list_for_each_entry(phb, &hose_list, list_node) {
546 		if (!phb->controller_ops.setup_msi_irqs) {
547 			phb->controller_ops.setup_msi_irqs = fsl_setup_msi_irqs;
548 			phb->controller_ops.teardown_msi_irqs = fsl_teardown_msi_irqs;
549 		} else if (phb->controller_ops.setup_msi_irqs != fsl_setup_msi_irqs) {
550 			dev_err(&dev->dev, "Different MSI driver already installed!\n");
551 			err = -ENODEV;
552 			goto error_out;
553 		}
554 	}
555 	return 0;
556 error_out:
557 	fsl_of_msi_remove(dev);
558 	return err;
559 }
560 
561 static const struct fsl_msi_feature mpic_msi_feature = {
562 	.fsl_pic_ip = FSL_PIC_IP_MPIC,
563 	.msiir_offset = 0x140,
564 };
565 
566 static const struct fsl_msi_feature ipic_msi_feature = {
567 	.fsl_pic_ip = FSL_PIC_IP_IPIC,
568 	.msiir_offset = 0x38,
569 };
570 
571 static const struct fsl_msi_feature vmpic_msi_feature = {
572 	.fsl_pic_ip = FSL_PIC_IP_VMPIC,
573 	.msiir_offset = 0,
574 };
575 
576 static const struct of_device_id fsl_of_msi_ids[] = {
577 	{
578 		.compatible = "fsl,mpic-msi",
579 		.data = &mpic_msi_feature,
580 	},
581 	{
582 		.compatible = "fsl,mpic-msi-v4.3",
583 		.data = &mpic_msi_feature,
584 	},
585 	{
586 		.compatible = "fsl,ipic-msi",
587 		.data = &ipic_msi_feature,
588 	},
589 #ifdef CONFIG_EPAPR_PARAVIRT
590 	{
591 		.compatible = "fsl,vmpic-msi",
592 		.data = &vmpic_msi_feature,
593 	},
594 	{
595 		.compatible = "fsl,vmpic-msi-v4.3",
596 		.data = &vmpic_msi_feature,
597 	},
598 #endif
599 	{}
600 };
601 
602 static struct platform_driver fsl_of_msi_driver = {
603 	.driver = {
604 		.name = "fsl-msi",
605 		.of_match_table = fsl_of_msi_ids,
606 	},
607 	.probe = fsl_of_msi_probe,
608 	.remove = fsl_of_msi_remove,
609 };
610 
611 static __init int fsl_of_msi_init(void)
612 {
613 	return platform_driver_register(&fsl_of_msi_driver);
614 }
615 
616 subsys_initcall(fsl_of_msi_init);
617