xref: /freebsd/sys/dev/xilinx/xlnx_pcib.c (revision 22cf89c938886d14f5796fc49f9f020c23ea8eaf)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by SRI International and the University of
7  * Cambridge Computer Laboratory (Department of Computer Science and
8  * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9  * DARPA SSITH research programme.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include "opt_platform.h"
34 
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/malloc.h>
39 #include <sys/types.h>
40 #include <sys/sysctl.h>
41 #include <sys/kernel.h>
42 #include <sys/rman.h>
43 #include <sys/module.h>
44 #include <sys/bus.h>
45 #include <sys/endian.h>
46 #include <sys/cpuset.h>
47 #include <sys/mutex.h>
48 #include <sys/proc.h>
49 
50 #include <machine/intr.h>
51 #include <machine/bus.h>
52 
53 #include <vm/vm.h>
54 #include <vm/vm_extern.h>
55 #include <vm/vm_kern.h>
56 #include <vm/pmap.h>
57 
58 #include <dev/ofw/openfirm.h>
59 #include <dev/ofw/ofw_bus.h>
60 #include <dev/ofw/ofw_bus_subr.h>
61 
62 #include <dev/pci/pcireg.h>
63 #include <dev/pci/pcivar.h>
64 #include <dev/pci/pci_host_generic.h>
65 #include <dev/pci/pci_host_generic_fdt.h>
66 #include <dev/pci/pcib_private.h>
67 
68 #include "xlnx_pcib.h"
69 
70 #include "ofw_bus_if.h"
71 #include "msi_if.h"
72 #include "pcib_if.h"
73 #include "pic_if.h"
74 
75 #define	XLNX_PCIB_MAX_MSI	64
76 
77 static int xlnx_pcib_fdt_attach(device_t);
78 static int xlnx_pcib_fdt_probe(device_t);
79 static int xlnx_pcib_fdt_get_id(device_t, device_t, enum pci_id_type,
80     uintptr_t *);
81 static void xlnx_pcib_msi_mask(device_t dev, struct intr_irqsrc *isrc,
82     bool mask);
83 
84 struct xlnx_pcib_softc {
85 	struct generic_pcie_fdt_softc	fdt_sc;
86 	struct resource			*res[4];
87 	struct mtx			mtx;
88 	vm_offset_t			msi_page;
89 	struct xlnx_pcib_irqsrc		*isrcs;
90 	device_t			dev;
91 	void				*intr_cookie[3];
92 };
93 
94 static struct resource_spec xlnx_pcib_spec[] = {
95 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
96 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
97 	{ SYS_RES_IRQ,		1,	RF_ACTIVE },
98 	{ SYS_RES_IRQ,		2,	RF_ACTIVE },
99 	{ -1, 0 }
100 };
101 
102 struct xlnx_pcib_irqsrc {
103 	struct intr_irqsrc	isrc;
104 	u_int			irq;
105 #define	XLNX_IRQ_FLAG_USED	(1 << 0)
106 	u_int			flags;
107 };
108 
109 static void
110 xlnx_pcib_clear_err_interrupts(struct generic_pcie_core_softc *sc)
111 {
112 	uint32_t reg;
113 
114 	reg = bus_read_4(sc->res, XLNX_PCIE_RPERRFRR);
115 
116 	if (reg & RPERRFRR_VALID) {
117 		device_printf(sc->dev, "Requested ID: %x\n",
118 		    reg & RPERRFRR_REQ_ID_M);
119 		bus_write_4(sc->res, XLNX_PCIE_RPERRFRR, ~0U);
120 	}
121 }
122 
123 static int
124 xlnx_pcib_intr(void *arg)
125 {
126 	struct generic_pcie_fdt_softc *fdt_sc;
127 	struct generic_pcie_core_softc *sc;
128 	struct xlnx_pcib_softc *xlnx_sc;
129 	uint32_t val, mask, status;
130 
131 	xlnx_sc = arg;
132 	fdt_sc = &xlnx_sc->fdt_sc;
133 	sc = &fdt_sc->base;
134 
135 	val = bus_read_4(sc->res, XLNX_PCIE_IDR);
136 	mask = bus_read_4(sc->res, XLNX_PCIE_IMR);
137 
138 	status = val & mask;
139 	if (!status)
140 		return (FILTER_HANDLED);
141 
142 	if (status & IMR_LINK_DOWN)
143 		device_printf(sc->dev, "Link down");
144 
145 	if (status & IMR_HOT_RESET)
146 		device_printf(sc->dev, "Hot reset");
147 
148 	if (status & IMR_CORRECTABLE)
149 		xlnx_pcib_clear_err_interrupts(sc);
150 
151 	if (status & IMR_FATAL)
152 		xlnx_pcib_clear_err_interrupts(sc);
153 
154 	if (status & IMR_NON_FATAL)
155 		xlnx_pcib_clear_err_interrupts(sc);
156 
157 	if (status & IMR_MSI) {
158 		device_printf(sc->dev, "MSI interrupt");
159 
160 		/* FIFO mode MSI not implemented. */
161 	}
162 
163 	if (status & IMR_INTX) {
164 		device_printf(sc->dev, "INTx received");
165 
166 		/* Not implemented. */
167 	}
168 
169 	if (status & IMR_SLAVE_UNSUPP_REQ)
170 		device_printf(sc->dev, "Slave unsupported request");
171 
172 	if (status & IMR_SLAVE_UNEXP_COMPL)
173 		device_printf(sc->dev, "Slave unexpected completion");
174 
175 	if (status & IMR_SLAVE_COMPL_TIMOUT)
176 		device_printf(sc->dev, "Slave completion timeout");
177 
178 	if (status & IMR_SLAVE_ERROR_POISON)
179 		device_printf(sc->dev, "Slave error poison");
180 
181 	if (status & IMR_SLAVE_COMPL_ABORT)
182 		device_printf(sc->dev, "Slave completion abort");
183 
184 	if (status & IMR_SLAVE_ILLEG_BURST)
185 		device_printf(sc->dev, "Slave illegal burst");
186 
187 	if (status & IMR_MASTER_DECERR)
188 		device_printf(sc->dev, "Master decode error");
189 
190 	if (status & IMR_MASTER_SLVERR)
191 		device_printf(sc->dev, "Master slave error");
192 
193 	bus_write_4(sc->res, XLNX_PCIE_IDR, val);
194 
195 	return (FILTER_HANDLED);
196 }
197 
198 static void
199 xlnx_pcib_handle_msi_intr(void *arg, int msireg)
200 {
201 	struct generic_pcie_fdt_softc *fdt_sc;
202 	struct generic_pcie_core_softc *sc;
203 	struct xlnx_pcib_softc *xlnx_sc;
204 	struct xlnx_pcib_irqsrc *xi;
205 	struct trapframe *tf;
206 	int irq;
207 	int reg;
208 	int i;
209 
210 	xlnx_sc = arg;
211 	fdt_sc = &xlnx_sc->fdt_sc;
212 	sc = &fdt_sc->base;
213 	tf = curthread->td_intr_frame;
214 
215 	do {
216 		reg = bus_read_4(sc->res, msireg);
217 
218 		for (i = 0; i < sizeof(uint32_t) * 8; i++) {
219 			if (reg & (1 << i)) {
220 				bus_write_4(sc->res, msireg, (1 << i));
221 
222 				irq = i;
223 				if (msireg == XLNX_PCIE_RPMSIID2)
224 					irq += 32;
225 
226 				xi = &xlnx_sc->isrcs[irq];
227 				if (intr_isrc_dispatch(&xi->isrc, tf) != 0) {
228 					/* Disable stray. */
229 					xlnx_pcib_msi_mask(sc->dev,
230 					    &xi->isrc, 1);
231 					device_printf(sc->dev,
232 					    "Stray irq %u disabled\n", irq);
233 				}
234 			}
235 		}
236 	} while (reg != 0);
237 }
238 
239 static int
240 xlnx_pcib_msi0_intr(void *arg)
241 {
242 
243 	xlnx_pcib_handle_msi_intr(arg, XLNX_PCIE_RPMSIID1);
244 
245 	return (FILTER_HANDLED);
246 }
247 
248 static int
249 xlnx_pcib_msi1_intr(void *arg)
250 {
251 
252 	xlnx_pcib_handle_msi_intr(arg, XLNX_PCIE_RPMSIID2);
253 
254 	return (FILTER_HANDLED);
255 }
256 
257 static int
258 xlnx_pcib_register_msi(struct xlnx_pcib_softc *sc)
259 {
260 	const char *name;
261 	int error;
262 	int irq;
263 
264 	sc->isrcs = malloc(sizeof(*sc->isrcs) * XLNX_PCIB_MAX_MSI, M_DEVBUF,
265 	    M_WAITOK | M_ZERO);
266 
267 	name = device_get_nameunit(sc->dev);
268 
269 	for (irq = 0; irq < XLNX_PCIB_MAX_MSI; irq++) {
270 		sc->isrcs[irq].irq = irq;
271 		error = intr_isrc_register(&sc->isrcs[irq].isrc,
272 		    sc->dev, 0, "%s,%u", name, irq);
273 		if (error != 0)
274 			return (error); /* XXX deregister ISRCs */
275 	}
276 
277 	if (intr_msi_register(sc->dev,
278 	    OF_xref_from_node(ofw_bus_get_node(sc->dev))) != 0)
279 		return (ENXIO);
280 
281 	return (0);
282 }
283 
284 static void
285 xlnx_pcib_init(struct xlnx_pcib_softc *sc)
286 {
287 	bus_addr_t addr;
288 	int reg;
289 
290 	/* Disable interrupts. */
291 	bus_write_4(sc->res[0], XLNX_PCIE_IMR, 0);
292 
293 	/* Clear pending interrupts.*/
294 	reg = bus_read_4(sc->res[0], XLNX_PCIE_IDR);
295 	bus_write_4(sc->res[0], XLNX_PCIE_IDR, reg);
296 
297 	/* Setup an MSI page. */
298 	sc->msi_page = kmem_alloc_contig(PAGE_SIZE, M_WAITOK, 0,
299 	    BUS_SPACE_MAXADDR, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
300 	addr = vtophys(sc->msi_page);
301 	bus_write_4(sc->res[0], XLNX_PCIE_RPMSIBR1, (addr >> 32));
302 	bus_write_4(sc->res[0], XLNX_PCIE_RPMSIBR2, (addr >>  0));
303 
304 	/* Enable the bridge. */
305 	reg = bus_read_4(sc->res[0], XLNX_PCIE_RPSCR);
306 	reg |= RPSCR_BE;
307 	bus_write_4(sc->res[0], XLNX_PCIE_RPSCR, reg);
308 
309 	/* Enable interrupts. */
310 	reg = IMR_LINK_DOWN
311 		| IMR_HOT_RESET
312 		| IMR_CFG_COMPL_STATUS_M
313 		| IMR_CFG_TIMEOUT
314 		| IMR_CORRECTABLE
315 		| IMR_NON_FATAL
316 		| IMR_FATAL
317 		| IMR_INTX
318 		| IMR_MSI
319 		| IMR_SLAVE_UNSUPP_REQ
320 		| IMR_SLAVE_UNEXP_COMPL
321 		| IMR_SLAVE_COMPL_TIMOUT
322 		| IMR_SLAVE_ERROR_POISON
323 		| IMR_SLAVE_COMPL_ABORT
324 		| IMR_SLAVE_ILLEG_BURST
325 		| IMR_MASTER_DECERR
326 		| IMR_MASTER_SLVERR;
327 	bus_write_4(sc->res[0], XLNX_PCIE_IMR, reg);
328 }
329 
330 static int
331 xlnx_pcib_fdt_probe(device_t dev)
332 {
333 
334 	if (!ofw_bus_status_okay(dev))
335 		return (ENXIO);
336 
337 	if (ofw_bus_is_compatible(dev, "xlnx,xdma-host-3.00")) {
338 		device_set_desc(dev, "Xilinx XDMA PCIe Controller");
339 		return (BUS_PROBE_DEFAULT);
340 	}
341 
342 	return (ENXIO);
343 }
344 
345 static int
346 xlnx_pcib_fdt_attach(device_t dev)
347 {
348 	struct xlnx_pcib_softc *sc;
349 	int error;
350 
351 	sc = device_get_softc(dev);
352 	sc->dev = dev;
353 
354 	mtx_init(&sc->mtx, "msi_mtx", NULL, MTX_DEF);
355 
356 	if (bus_alloc_resources(dev, xlnx_pcib_spec, sc->res)) {
357 		device_printf(dev, "could not allocate resources\n");
358 		return (ENXIO);
359 	}
360 
361 	/* Setup MISC interrupt handler. */
362 	error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE,
363 	    xlnx_pcib_intr, NULL, sc, &sc->intr_cookie[0]);
364 	if (error != 0) {
365 		device_printf(dev, "could not setup interrupt handler.\n");
366 		return (ENXIO);
367 	}
368 
369 	/* Setup MSI0 interrupt handler. */
370 	error = bus_setup_intr(dev, sc->res[2], INTR_TYPE_MISC | INTR_MPSAFE,
371 	    xlnx_pcib_msi0_intr, NULL, sc, &sc->intr_cookie[1]);
372 	if (error != 0) {
373 		device_printf(dev, "could not setup interrupt handler.\n");
374 		return (ENXIO);
375 	}
376 
377 	/* Setup MSI1 interrupt handler. */
378 	error = bus_setup_intr(dev, sc->res[3], INTR_TYPE_MISC | INTR_MPSAFE,
379 	    xlnx_pcib_msi1_intr, NULL, sc, &sc->intr_cookie[2]);
380 	if (error != 0) {
381 		device_printf(dev, "could not setup interrupt handler.\n");
382 		return (ENXIO);
383 	}
384 
385 	xlnx_pcib_init(sc);
386 
387 	/*
388 	 * Allow the core driver to map registers.
389 	 * We will be accessing the device memory using core_softc.
390 	 */
391 	bus_release_resources(dev, xlnx_pcib_spec, sc->res);
392 
393 	error = xlnx_pcib_register_msi(sc);
394 	if (error)
395 		return (error);
396 
397 	return (pci_host_generic_fdt_attach(dev));
398 }
399 
400 static int
401 xlnx_pcib_fdt_get_id(device_t pci, device_t child, enum pci_id_type type,
402     uintptr_t *id)
403 {
404 	phandle_t node;
405 	int bsf;
406 
407 	if (type != PCI_ID_MSI)
408 		return (pcib_get_id(pci, child, type, id));
409 
410 	node = ofw_bus_get_node(pci);
411 	if (OF_hasprop(node, "msi-map"))
412 		return (generic_pcie_get_id(pci, child, type, id));
413 
414 	bsf = pci_get_rid(child);
415 	*id = (pci_get_domain(child) << PCI_RID_DOMAIN_SHIFT) | bsf;
416 
417 	return (0);
418 }
419 
420 static int
421 xlnx_pcib_req_valid(struct generic_pcie_core_softc *sc,
422     u_int bus, u_int slot, u_int func, u_int reg)
423 {
424 	bus_space_handle_t h;
425 	bus_space_tag_t t;
426 	uint32_t val;
427 
428 	t = sc->bst;
429 	h = sc->bsh;
430 
431 	if ((bus < sc->bus_start) || (bus > sc->bus_end))
432 		return (0);
433 	if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) ||
434 	    (reg > PCIE_REGMAX))
435 		return (0);
436 
437 	if (bus == 0 && slot > 0)
438 		return (0);
439 
440 	val = bus_space_read_4(t, h, XLNX_PCIE_PHYSCR);
441 	if ((val & PHYSCR_LINK_UP) == 0) {
442 		/* Link is down */
443 		return (0);
444 	}
445 
446 	/* Valid */
447 
448 	return (1);
449 }
450 
451 static uint32_t
452 xlnx_pcib_read_config(device_t dev, u_int bus, u_int slot,
453     u_int func, u_int reg, int bytes)
454 {
455 	struct generic_pcie_fdt_softc *fdt_sc;
456 	struct xlnx_pcib_softc *xlnx_sc;
457 	struct generic_pcie_core_softc *sc;
458 	bus_space_handle_t h;
459 	bus_space_tag_t t;
460 	uint64_t offset;
461 	uint32_t data;
462 
463 	xlnx_sc = device_get_softc(dev);
464 	fdt_sc = &xlnx_sc->fdt_sc;
465 	sc = &fdt_sc->base;
466 
467 	if (!xlnx_pcib_req_valid(sc, bus, slot, func, reg))
468 		return (~0U);
469 
470 	offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
471 	t = sc->bst;
472 	h = sc->bsh;
473 
474 	data = bus_space_read_4(t, h, offset & ~3);
475 
476 	switch (bytes) {
477 	case 1:
478 		data >>= (offset & 3) * 8;
479 		data &= 0xff;
480 		break;
481 	case 2:
482 		data >>= (offset & 3) * 8;
483 		data = le16toh(data);
484 		break;
485 	case 4:
486 		data = le32toh(data);
487 		break;
488 	default:
489 		return (~0U);
490 	}
491 
492 	return (data);
493 }
494 
495 static void
496 xlnx_pcib_write_config(device_t dev, u_int bus, u_int slot,
497     u_int func, u_int reg, uint32_t val, int bytes)
498 {
499 	struct generic_pcie_fdt_softc *fdt_sc;
500 	struct xlnx_pcib_softc *xlnx_sc;
501 	struct generic_pcie_core_softc *sc;
502 	bus_space_handle_t h;
503 	bus_space_tag_t t;
504 	uint64_t offset;
505 	uint32_t data;
506 
507 	xlnx_sc = device_get_softc(dev);
508 	fdt_sc = &xlnx_sc->fdt_sc;
509 	sc = &fdt_sc->base;
510 
511 	if (!xlnx_pcib_req_valid(sc, bus, slot, func, reg))
512 		return;
513 
514 	offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
515 
516 	t = sc->bst;
517 	h = sc->bsh;
518 
519 	/*
520 	 * 32-bit access used due to a bug in the Xilinx bridge that
521 	 * requires to write primary and secondary buses in one blast.
522 	 *
523 	 * TODO: This is probably wrong on big-endian.
524 	 */
525 	switch (bytes) {
526 	case 1:
527 		data = bus_space_read_4(t, h, offset & ~3);
528 		data &= ~(0xff << ((offset & 3) * 8));
529 		data |= (val & 0xff) << ((offset & 3) * 8);
530 		bus_space_write_4(t, h, offset & ~3, htole32(data));
531 		break;
532 	case 2:
533 		data = bus_space_read_4(t, h, offset & ~3);
534 		data &= ~(0xffff << ((offset & 3) * 8));
535 		data |= (val & 0xffff) << ((offset & 3) * 8);
536 		bus_space_write_4(t, h, offset & ~3, htole32(data));
537 		break;
538 	case 4:
539 		bus_space_write_4(t, h, offset, htole32(val));
540 		break;
541 	default:
542 		return;
543 	}
544 }
545 
546 static int
547 xlnx_pcib_alloc_msi(device_t pci, device_t child, int count, int maxcount,
548     int *irqs)
549 {
550 	phandle_t msi_parent;
551 
552 	ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
553 	    NULL);
554 	msi_parent = OF_xref_from_node(ofw_bus_get_node(pci));
555 	return (intr_alloc_msi(pci, child, msi_parent, count, maxcount,
556 	    irqs));
557 }
558 
559 static int
560 xlnx_pcib_release_msi(device_t pci, device_t child, int count, int *irqs)
561 {
562 	phandle_t msi_parent;
563 
564 	ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
565 	    NULL);
566 	msi_parent = OF_xref_from_node(ofw_bus_get_node(pci));
567 	return (intr_release_msi(pci, child, msi_parent, count, irqs));
568 }
569 
570 static int
571 xlnx_pcib_map_msi(device_t pci, device_t child, int irq, uint64_t *addr,
572     uint32_t *data)
573 {
574 	phandle_t msi_parent;
575 
576 	ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
577 	    NULL);
578 	msi_parent = OF_xref_from_node(ofw_bus_get_node(pci));
579 	return (intr_map_msi(pci, child, msi_parent, irq, addr, data));
580 }
581 
582 static int
583 xlnx_pcib_msi_alloc_msi(device_t dev, device_t child, int count, int maxcount,
584     device_t *pic, struct intr_irqsrc **srcs)
585 {
586 	struct xlnx_pcib_softc *sc;
587 	int irq, end_irq, i;
588 	bool found;
589 
590 	sc = device_get_softc(dev);
591 
592 	mtx_lock(&sc->mtx);
593 
594 	found = false;
595 
596 	for (irq = 0; (irq + count - 1) < XLNX_PCIB_MAX_MSI; irq++) {
597 		/* Assume the range is valid. */
598 		found = true;
599 
600 		/* Check this range is valid. */
601 		for (end_irq = irq; end_irq < irq + count; end_irq++) {
602 			if (sc->isrcs[end_irq].flags & XLNX_IRQ_FLAG_USED) {
603 				/* This is already used. */
604 				found = false;
605 				break;
606 			}
607 		}
608 
609 		if (found)
610 			break;
611 	}
612 
613 	if (!found || irq == (XLNX_PCIB_MAX_MSI - 1)) {
614 		/* Not enough interrupts were found. */
615 		mtx_unlock(&sc->mtx);
616 		return (ENXIO);
617 	}
618 
619 	/* Mark the interrupt as used. */
620 	for (i = 0; i < count; i++)
621 		sc->isrcs[irq + i].flags |= XLNX_IRQ_FLAG_USED;
622 
623 	mtx_unlock(&sc->mtx);
624 
625 	for (i = 0; i < count; i++)
626 		srcs[i] = (struct intr_irqsrc *)&sc->isrcs[irq + i];
627 
628 	*pic = device_get_parent(dev);
629 
630 	return (0);
631 }
632 
633 static int
634 xlnx_pcib_msi_release_msi(device_t dev, device_t child, int count,
635     struct intr_irqsrc **isrc)
636 {
637 	struct xlnx_pcib_softc *sc;
638 	struct xlnx_pcib_irqsrc *xi;
639 	int i;
640 
641 	sc = device_get_softc(dev);
642 	mtx_lock(&sc->mtx);
643 	for (i = 0; i < count; i++) {
644 		xi = (struct xlnx_pcib_irqsrc *)isrc[i];
645 
646 		KASSERT(xi->flags & XLNX_IRQ_FLAG_USED,
647 		    ("%s: Releasing an unused MSI interrupt", __func__));
648 
649 		xi->flags &= ~XLNX_IRQ_FLAG_USED;
650 	}
651 
652 	mtx_unlock(&sc->mtx);
653 	return (0);
654 }
655 
656 static int
657 xlnx_pcib_msi_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
658     uint64_t *addr, uint32_t *data)
659 {
660 	struct xlnx_pcib_softc *sc;
661 	struct xlnx_pcib_irqsrc *xi;
662 
663 	sc = device_get_softc(dev);
664 	xi = (struct xlnx_pcib_irqsrc *)isrc;
665 
666 	*addr = vtophys(sc->msi_page);
667 	*data = xi->irq;
668 
669 	return (0);
670 }
671 
672 static void
673 xlnx_pcib_msi_mask(device_t dev, struct intr_irqsrc *isrc, bool mask)
674 {
675 	struct generic_pcie_fdt_softc *fdt_sc;
676 	struct generic_pcie_core_softc *sc;
677 	struct xlnx_pcib_softc *xlnx_sc;
678 	struct xlnx_pcib_irqsrc *xi;
679 	uint32_t msireg, irq;
680 	uint32_t reg;
681 
682 	xlnx_sc = device_get_softc(dev);
683 	fdt_sc = &xlnx_sc->fdt_sc;
684 	sc = &fdt_sc->base;
685 
686 	xi = (struct xlnx_pcib_irqsrc *)isrc;
687 
688 	irq = xi->irq;
689 	if (irq < 32)
690 		msireg = XLNX_PCIE_RPMSIID1_MASK;
691 	else
692 		msireg = XLNX_PCIE_RPMSIID2_MASK;
693 
694 	reg = bus_read_4(sc->res, msireg);
695 	if (mask)
696 		reg &= ~(1 << irq);
697 	else
698 		reg |= (1 << irq);
699 	bus_write_4(sc->res, msireg, reg);
700 }
701 
702 static void
703 xlnx_pcib_msi_disable_intr(device_t dev, struct intr_irqsrc *isrc)
704 {
705 
706 	xlnx_pcib_msi_mask(dev, isrc, true);
707 }
708 
709 static void
710 xlnx_pcib_msi_enable_intr(device_t dev, struct intr_irqsrc *isrc)
711 {
712 
713 	xlnx_pcib_msi_mask(dev, isrc, false);
714 }
715 
716 static void
717 xlnx_pcib_msi_post_filter(device_t dev, struct intr_irqsrc *isrc)
718 {
719 
720 }
721 
722 static void
723 xlnx_pcib_msi_post_ithread(device_t dev, struct intr_irqsrc *isrc)
724 {
725 
726 	xlnx_pcib_msi_mask(dev, isrc, false);
727 }
728 
729 static void
730 xlnx_pcib_msi_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
731 {
732 
733 	xlnx_pcib_msi_mask(dev, isrc, true);
734 }
735 
736 static int
737 xlnx_pcib_msi_setup_intr(device_t dev, struct intr_irqsrc *isrc,
738     struct resource *res, struct intr_map_data *data)
739 {
740 
741 	return (0);
742 }
743 
744 static int
745 xlnx_pcib_msi_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
746     struct resource *res, struct intr_map_data *data)
747 {
748 
749 	return (0);
750 }
751 
752 static device_method_t xlnx_pcib_fdt_methods[] = {
753 	/* Device interface */
754 	DEVMETHOD(device_probe,		xlnx_pcib_fdt_probe),
755 	DEVMETHOD(device_attach,	xlnx_pcib_fdt_attach),
756 
757 	/* pcib interface */
758 	DEVMETHOD(pcib_get_id,		xlnx_pcib_fdt_get_id),
759 	DEVMETHOD(pcib_read_config,	xlnx_pcib_read_config),
760 	DEVMETHOD(pcib_write_config,	xlnx_pcib_write_config),
761 	DEVMETHOD(pcib_alloc_msi,	xlnx_pcib_alloc_msi),
762 	DEVMETHOD(pcib_release_msi,	xlnx_pcib_release_msi),
763 	DEVMETHOD(pcib_map_msi,		xlnx_pcib_map_msi),
764 
765 	/* MSI interface */
766 	DEVMETHOD(msi_alloc_msi,		xlnx_pcib_msi_alloc_msi),
767 	DEVMETHOD(msi_release_msi,		xlnx_pcib_msi_release_msi),
768 	DEVMETHOD(msi_map_msi,			xlnx_pcib_msi_map_msi),
769 
770 	/* Interrupt controller interface */
771 	DEVMETHOD(pic_disable_intr,		xlnx_pcib_msi_disable_intr),
772 	DEVMETHOD(pic_enable_intr,		xlnx_pcib_msi_enable_intr),
773 	DEVMETHOD(pic_setup_intr,		xlnx_pcib_msi_setup_intr),
774 	DEVMETHOD(pic_teardown_intr,		xlnx_pcib_msi_teardown_intr),
775 	DEVMETHOD(pic_post_filter,		xlnx_pcib_msi_post_filter),
776 	DEVMETHOD(pic_post_ithread,		xlnx_pcib_msi_post_ithread),
777 	DEVMETHOD(pic_pre_ithread,		xlnx_pcib_msi_pre_ithread),
778 
779 	/* End */
780 	DEVMETHOD_END
781 };
782 
783 DEFINE_CLASS_1(pcib, xlnx_pcib_fdt_driver, xlnx_pcib_fdt_methods,
784     sizeof(struct xlnx_pcib_softc), generic_pcie_fdt_driver);
785 
786 DRIVER_MODULE(xlnx_pcib, simplebus, xlnx_pcib_fdt_driver, 0, 0);
787 DRIVER_MODULE(xlnx_pcib, ofwbus, xlnx_pcib_fdt_driver, 0, 0);
788