xref: /freebsd/sys/dev/xilinx/xlnx_pcib.c (revision fdafd315ad0d0f28a11b9fb4476a9ab059c62b92)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by SRI International and the University of
7  * Cambridge Computer Laboratory (Department of Computer Science and
8  * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9  * DARPA SSITH research programme.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include "opt_platform.h"
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/types.h>
39 #include <sys/sysctl.h>
40 #include <sys/kernel.h>
41 #include <sys/rman.h>
42 #include <sys/module.h>
43 #include <sys/bus.h>
44 #include <sys/endian.h>
45 #include <sys/cpuset.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48 
49 #include <machine/intr.h>
50 #include <machine/bus.h>
51 
52 #include <vm/vm.h>
53 #include <vm/vm_extern.h>
54 #include <vm/vm_kern.h>
55 #include <vm/pmap.h>
56 
57 #include <dev/ofw/openfirm.h>
58 #include <dev/ofw/ofw_bus.h>
59 #include <dev/ofw/ofw_bus_subr.h>
60 
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/pci/pci_host_generic.h>
64 #include <dev/pci/pci_host_generic_fdt.h>
65 #include <dev/pci/pcib_private.h>
66 
67 #include "xlnx_pcib.h"
68 
69 #include "ofw_bus_if.h"
70 #include "msi_if.h"
71 #include "pcib_if.h"
72 #include "pic_if.h"
73 
74 #define	XLNX_PCIB_MAX_MSI	64
75 
76 static int xlnx_pcib_fdt_attach(device_t);
77 static int xlnx_pcib_fdt_probe(device_t);
78 static int xlnx_pcib_fdt_get_id(device_t, device_t, enum pci_id_type,
79     uintptr_t *);
80 static void xlnx_pcib_msi_mask(device_t dev, struct intr_irqsrc *isrc,
81     bool mask);
82 
83 struct xlnx_pcib_softc {
84 	struct generic_pcie_fdt_softc	fdt_sc;
85 	struct resource			*res[4];
86 	struct mtx			mtx;
87 	vm_offset_t			msi_page;
88 	struct xlnx_pcib_irqsrc		*isrcs;
89 	device_t			dev;
90 	void				*intr_cookie[3];
91 };
92 
93 static struct resource_spec xlnx_pcib_spec[] = {
94 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
95 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
96 	{ SYS_RES_IRQ,		1,	RF_ACTIVE },
97 	{ SYS_RES_IRQ,		2,	RF_ACTIVE },
98 	{ -1, 0 }
99 };
100 
101 struct xlnx_pcib_irqsrc {
102 	struct intr_irqsrc	isrc;
103 	u_int			irq;
104 #define	XLNX_IRQ_FLAG_USED	(1 << 0)
105 	u_int			flags;
106 };
107 
108 static void
xlnx_pcib_clear_err_interrupts(struct generic_pcie_core_softc * sc)109 xlnx_pcib_clear_err_interrupts(struct generic_pcie_core_softc *sc)
110 {
111 	uint32_t reg;
112 
113 	reg = bus_read_4(sc->res, XLNX_PCIE_RPERRFRR);
114 
115 	if (reg & RPERRFRR_VALID) {
116 		device_printf(sc->dev, "Requested ID: %x\n",
117 		    reg & RPERRFRR_REQ_ID_M);
118 		bus_write_4(sc->res, XLNX_PCIE_RPERRFRR, ~0U);
119 	}
120 }
121 
122 static int
xlnx_pcib_intr(void * arg)123 xlnx_pcib_intr(void *arg)
124 {
125 	struct generic_pcie_fdt_softc *fdt_sc;
126 	struct generic_pcie_core_softc *sc;
127 	struct xlnx_pcib_softc *xlnx_sc;
128 	uint32_t val, mask, status;
129 
130 	xlnx_sc = arg;
131 	fdt_sc = &xlnx_sc->fdt_sc;
132 	sc = &fdt_sc->base;
133 
134 	val = bus_read_4(sc->res, XLNX_PCIE_IDR);
135 	mask = bus_read_4(sc->res, XLNX_PCIE_IMR);
136 
137 	status = val & mask;
138 	if (!status)
139 		return (FILTER_HANDLED);
140 
141 	if (status & IMR_LINK_DOWN)
142 		device_printf(sc->dev, "Link down");
143 
144 	if (status & IMR_HOT_RESET)
145 		device_printf(sc->dev, "Hot reset");
146 
147 	if (status & IMR_CORRECTABLE)
148 		xlnx_pcib_clear_err_interrupts(sc);
149 
150 	if (status & IMR_FATAL)
151 		xlnx_pcib_clear_err_interrupts(sc);
152 
153 	if (status & IMR_NON_FATAL)
154 		xlnx_pcib_clear_err_interrupts(sc);
155 
156 	if (status & IMR_MSI) {
157 		device_printf(sc->dev, "MSI interrupt");
158 
159 		/* FIFO mode MSI not implemented. */
160 	}
161 
162 	if (status & IMR_INTX) {
163 		device_printf(sc->dev, "INTx received");
164 
165 		/* Not implemented. */
166 	}
167 
168 	if (status & IMR_SLAVE_UNSUPP_REQ)
169 		device_printf(sc->dev, "Slave unsupported request");
170 
171 	if (status & IMR_SLAVE_UNEXP_COMPL)
172 		device_printf(sc->dev, "Slave unexpected completion");
173 
174 	if (status & IMR_SLAVE_COMPL_TIMOUT)
175 		device_printf(sc->dev, "Slave completion timeout");
176 
177 	if (status & IMR_SLAVE_ERROR_POISON)
178 		device_printf(sc->dev, "Slave error poison");
179 
180 	if (status & IMR_SLAVE_COMPL_ABORT)
181 		device_printf(sc->dev, "Slave completion abort");
182 
183 	if (status & IMR_SLAVE_ILLEG_BURST)
184 		device_printf(sc->dev, "Slave illegal burst");
185 
186 	if (status & IMR_MASTER_DECERR)
187 		device_printf(sc->dev, "Master decode error");
188 
189 	if (status & IMR_MASTER_SLVERR)
190 		device_printf(sc->dev, "Master slave error");
191 
192 	bus_write_4(sc->res, XLNX_PCIE_IDR, val);
193 
194 	return (FILTER_HANDLED);
195 }
196 
197 static void
xlnx_pcib_handle_msi_intr(void * arg,int msireg)198 xlnx_pcib_handle_msi_intr(void *arg, int msireg)
199 {
200 	struct generic_pcie_fdt_softc *fdt_sc;
201 	struct generic_pcie_core_softc *sc;
202 	struct xlnx_pcib_softc *xlnx_sc;
203 	struct xlnx_pcib_irqsrc *xi;
204 	struct trapframe *tf;
205 	int irq;
206 	int reg;
207 	int i;
208 
209 	xlnx_sc = arg;
210 	fdt_sc = &xlnx_sc->fdt_sc;
211 	sc = &fdt_sc->base;
212 	tf = curthread->td_intr_frame;
213 
214 	do {
215 		reg = bus_read_4(sc->res, msireg);
216 
217 		for (i = 0; i < sizeof(uint32_t) * 8; i++) {
218 			if (reg & (1 << i)) {
219 				bus_write_4(sc->res, msireg, (1 << i));
220 
221 				irq = i;
222 				if (msireg == XLNX_PCIE_RPMSIID2)
223 					irq += 32;
224 
225 				xi = &xlnx_sc->isrcs[irq];
226 				if (intr_isrc_dispatch(&xi->isrc, tf) != 0) {
227 					/* Disable stray. */
228 					xlnx_pcib_msi_mask(sc->dev,
229 					    &xi->isrc, 1);
230 					device_printf(sc->dev,
231 					    "Stray irq %u disabled\n", irq);
232 				}
233 			}
234 		}
235 	} while (reg != 0);
236 }
237 
238 static int
xlnx_pcib_msi0_intr(void * arg)239 xlnx_pcib_msi0_intr(void *arg)
240 {
241 
242 	xlnx_pcib_handle_msi_intr(arg, XLNX_PCIE_RPMSIID1);
243 
244 	return (FILTER_HANDLED);
245 }
246 
247 static int
xlnx_pcib_msi1_intr(void * arg)248 xlnx_pcib_msi1_intr(void *arg)
249 {
250 
251 	xlnx_pcib_handle_msi_intr(arg, XLNX_PCIE_RPMSIID2);
252 
253 	return (FILTER_HANDLED);
254 }
255 
256 static int
xlnx_pcib_register_msi(struct xlnx_pcib_softc * sc)257 xlnx_pcib_register_msi(struct xlnx_pcib_softc *sc)
258 {
259 	const char *name;
260 	int error;
261 	int irq;
262 
263 	sc->isrcs = malloc(sizeof(*sc->isrcs) * XLNX_PCIB_MAX_MSI, M_DEVBUF,
264 	    M_WAITOK | M_ZERO);
265 
266 	name = device_get_nameunit(sc->dev);
267 
268 	for (irq = 0; irq < XLNX_PCIB_MAX_MSI; irq++) {
269 		sc->isrcs[irq].irq = irq;
270 		error = intr_isrc_register(&sc->isrcs[irq].isrc,
271 		    sc->dev, 0, "%s,%u", name, irq);
272 		if (error != 0)
273 			return (error); /* XXX deregister ISRCs */
274 	}
275 
276 	if (intr_msi_register(sc->dev,
277 	    OF_xref_from_node(ofw_bus_get_node(sc->dev))) != 0)
278 		return (ENXIO);
279 
280 	return (0);
281 }
282 
283 static void
xlnx_pcib_init(struct xlnx_pcib_softc * sc)284 xlnx_pcib_init(struct xlnx_pcib_softc *sc)
285 {
286 	bus_addr_t addr;
287 	int reg;
288 
289 	/* Disable interrupts. */
290 	bus_write_4(sc->res[0], XLNX_PCIE_IMR, 0);
291 
292 	/* Clear pending interrupts.*/
293 	reg = bus_read_4(sc->res[0], XLNX_PCIE_IDR);
294 	bus_write_4(sc->res[0], XLNX_PCIE_IDR, reg);
295 
296 	/* Setup an MSI page. */
297 	sc->msi_page = kmem_alloc_contig(PAGE_SIZE, M_WAITOK, 0,
298 	    BUS_SPACE_MAXADDR, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
299 	addr = vtophys(sc->msi_page);
300 	bus_write_4(sc->res[0], XLNX_PCIE_RPMSIBR1, (addr >> 32));
301 	bus_write_4(sc->res[0], XLNX_PCIE_RPMSIBR2, (addr >>  0));
302 
303 	/* Enable the bridge. */
304 	reg = bus_read_4(sc->res[0], XLNX_PCIE_RPSCR);
305 	reg |= RPSCR_BE;
306 	bus_write_4(sc->res[0], XLNX_PCIE_RPSCR, reg);
307 
308 	/* Enable interrupts. */
309 	reg = IMR_LINK_DOWN
310 		| IMR_HOT_RESET
311 		| IMR_CFG_COMPL_STATUS_M
312 		| IMR_CFG_TIMEOUT
313 		| IMR_CORRECTABLE
314 		| IMR_NON_FATAL
315 		| IMR_FATAL
316 		| IMR_INTX
317 		| IMR_MSI
318 		| IMR_SLAVE_UNSUPP_REQ
319 		| IMR_SLAVE_UNEXP_COMPL
320 		| IMR_SLAVE_COMPL_TIMOUT
321 		| IMR_SLAVE_ERROR_POISON
322 		| IMR_SLAVE_COMPL_ABORT
323 		| IMR_SLAVE_ILLEG_BURST
324 		| IMR_MASTER_DECERR
325 		| IMR_MASTER_SLVERR;
326 	bus_write_4(sc->res[0], XLNX_PCIE_IMR, reg);
327 }
328 
329 static int
xlnx_pcib_fdt_probe(device_t dev)330 xlnx_pcib_fdt_probe(device_t dev)
331 {
332 
333 	if (!ofw_bus_status_okay(dev))
334 		return (ENXIO);
335 
336 	if (ofw_bus_is_compatible(dev, "xlnx,xdma-host-3.00")) {
337 		device_set_desc(dev, "Xilinx XDMA PCIe Controller");
338 		return (BUS_PROBE_DEFAULT);
339 	}
340 
341 	return (ENXIO);
342 }
343 
344 static int
xlnx_pcib_fdt_attach(device_t dev)345 xlnx_pcib_fdt_attach(device_t dev)
346 {
347 	struct xlnx_pcib_softc *sc;
348 	int error;
349 
350 	sc = device_get_softc(dev);
351 	sc->dev = dev;
352 
353 	mtx_init(&sc->mtx, "msi_mtx", NULL, MTX_DEF);
354 
355 	if (bus_alloc_resources(dev, xlnx_pcib_spec, sc->res)) {
356 		device_printf(dev, "could not allocate resources\n");
357 		return (ENXIO);
358 	}
359 
360 	/* Setup MISC interrupt handler. */
361 	error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE,
362 	    xlnx_pcib_intr, NULL, sc, &sc->intr_cookie[0]);
363 	if (error != 0) {
364 		device_printf(dev, "could not setup interrupt handler.\n");
365 		return (ENXIO);
366 	}
367 
368 	/* Setup MSI0 interrupt handler. */
369 	error = bus_setup_intr(dev, sc->res[2], INTR_TYPE_MISC | INTR_MPSAFE,
370 	    xlnx_pcib_msi0_intr, NULL, sc, &sc->intr_cookie[1]);
371 	if (error != 0) {
372 		device_printf(dev, "could not setup interrupt handler.\n");
373 		return (ENXIO);
374 	}
375 
376 	/* Setup MSI1 interrupt handler. */
377 	error = bus_setup_intr(dev, sc->res[3], INTR_TYPE_MISC | INTR_MPSAFE,
378 	    xlnx_pcib_msi1_intr, NULL, sc, &sc->intr_cookie[2]);
379 	if (error != 0) {
380 		device_printf(dev, "could not setup interrupt handler.\n");
381 		return (ENXIO);
382 	}
383 
384 	xlnx_pcib_init(sc);
385 
386 	/*
387 	 * Allow the core driver to map registers.
388 	 * We will be accessing the device memory using core_softc.
389 	 */
390 	bus_release_resources(dev, xlnx_pcib_spec, sc->res);
391 
392 	error = xlnx_pcib_register_msi(sc);
393 	if (error)
394 		return (error);
395 
396 	return (pci_host_generic_fdt_attach(dev));
397 }
398 
399 static int
xlnx_pcib_fdt_get_id(device_t pci,device_t child,enum pci_id_type type,uintptr_t * id)400 xlnx_pcib_fdt_get_id(device_t pci, device_t child, enum pci_id_type type,
401     uintptr_t *id)
402 {
403 	phandle_t node;
404 	int bsf;
405 
406 	if (type != PCI_ID_MSI)
407 		return (pcib_get_id(pci, child, type, id));
408 
409 	node = ofw_bus_get_node(pci);
410 	if (OF_hasprop(node, "msi-map"))
411 		return (generic_pcie_get_id(pci, child, type, id));
412 
413 	bsf = pci_get_rid(child);
414 	*id = (pci_get_domain(child) << PCI_RID_DOMAIN_SHIFT) | bsf;
415 
416 	return (0);
417 }
418 
419 static int
xlnx_pcib_req_valid(struct generic_pcie_core_softc * sc,u_int bus,u_int slot,u_int func,u_int reg)420 xlnx_pcib_req_valid(struct generic_pcie_core_softc *sc,
421     u_int bus, u_int slot, u_int func, u_int reg)
422 {
423 	bus_space_handle_t h;
424 	bus_space_tag_t t;
425 	uint32_t val;
426 
427 	t = sc->bst;
428 	h = sc->bsh;
429 
430 	if ((bus < sc->bus_start) || (bus > sc->bus_end))
431 		return (0);
432 	if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) ||
433 	    (reg > PCIE_REGMAX))
434 		return (0);
435 
436 	if (bus == 0 && slot > 0)
437 		return (0);
438 
439 	val = bus_space_read_4(t, h, XLNX_PCIE_PHYSCR);
440 	if ((val & PHYSCR_LINK_UP) == 0) {
441 		/* Link is down */
442 		return (0);
443 	}
444 
445 	/* Valid */
446 
447 	return (1);
448 }
449 
450 static uint32_t
xlnx_pcib_read_config(device_t dev,u_int bus,u_int slot,u_int func,u_int reg,int bytes)451 xlnx_pcib_read_config(device_t dev, u_int bus, u_int slot,
452     u_int func, u_int reg, int bytes)
453 {
454 	struct generic_pcie_fdt_softc *fdt_sc;
455 	struct xlnx_pcib_softc *xlnx_sc;
456 	struct generic_pcie_core_softc *sc;
457 	bus_space_handle_t h;
458 	bus_space_tag_t t;
459 	uint64_t offset;
460 	uint32_t data;
461 
462 	xlnx_sc = device_get_softc(dev);
463 	fdt_sc = &xlnx_sc->fdt_sc;
464 	sc = &fdt_sc->base;
465 
466 	if (!xlnx_pcib_req_valid(sc, bus, slot, func, reg))
467 		return (~0U);
468 
469 	offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
470 	t = sc->bst;
471 	h = sc->bsh;
472 
473 	data = bus_space_read_4(t, h, offset & ~3);
474 
475 	switch (bytes) {
476 	case 1:
477 		data >>= (offset & 3) * 8;
478 		data &= 0xff;
479 		break;
480 	case 2:
481 		data >>= (offset & 3) * 8;
482 		data = le16toh(data);
483 		break;
484 	case 4:
485 		data = le32toh(data);
486 		break;
487 	default:
488 		return (~0U);
489 	}
490 
491 	return (data);
492 }
493 
494 static void
xlnx_pcib_write_config(device_t dev,u_int bus,u_int slot,u_int func,u_int reg,uint32_t val,int bytes)495 xlnx_pcib_write_config(device_t dev, u_int bus, u_int slot,
496     u_int func, u_int reg, uint32_t val, int bytes)
497 {
498 	struct generic_pcie_fdt_softc *fdt_sc;
499 	struct xlnx_pcib_softc *xlnx_sc;
500 	struct generic_pcie_core_softc *sc;
501 	bus_space_handle_t h;
502 	bus_space_tag_t t;
503 	uint64_t offset;
504 	uint32_t data;
505 
506 	xlnx_sc = device_get_softc(dev);
507 	fdt_sc = &xlnx_sc->fdt_sc;
508 	sc = &fdt_sc->base;
509 
510 	if (!xlnx_pcib_req_valid(sc, bus, slot, func, reg))
511 		return;
512 
513 	offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
514 
515 	t = sc->bst;
516 	h = sc->bsh;
517 
518 	/*
519 	 * 32-bit access used due to a bug in the Xilinx bridge that
520 	 * requires to write primary and secondary buses in one blast.
521 	 *
522 	 * TODO: This is probably wrong on big-endian.
523 	 */
524 	switch (bytes) {
525 	case 1:
526 		data = bus_space_read_4(t, h, offset & ~3);
527 		data &= ~(0xff << ((offset & 3) * 8));
528 		data |= (val & 0xff) << ((offset & 3) * 8);
529 		bus_space_write_4(t, h, offset & ~3, htole32(data));
530 		break;
531 	case 2:
532 		data = bus_space_read_4(t, h, offset & ~3);
533 		data &= ~(0xffff << ((offset & 3) * 8));
534 		data |= (val & 0xffff) << ((offset & 3) * 8);
535 		bus_space_write_4(t, h, offset & ~3, htole32(data));
536 		break;
537 	case 4:
538 		bus_space_write_4(t, h, offset, htole32(val));
539 		break;
540 	default:
541 		return;
542 	}
543 }
544 
545 static int
xlnx_pcib_alloc_msi(device_t pci,device_t child,int count,int maxcount,int * irqs)546 xlnx_pcib_alloc_msi(device_t pci, device_t child, int count, int maxcount,
547     int *irqs)
548 {
549 	phandle_t msi_parent;
550 
551 	ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
552 	    NULL);
553 	msi_parent = OF_xref_from_node(ofw_bus_get_node(pci));
554 	return (intr_alloc_msi(pci, child, msi_parent, count, maxcount,
555 	    irqs));
556 }
557 
558 static int
xlnx_pcib_release_msi(device_t pci,device_t child,int count,int * irqs)559 xlnx_pcib_release_msi(device_t pci, device_t child, int count, int *irqs)
560 {
561 	phandle_t msi_parent;
562 
563 	ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
564 	    NULL);
565 	msi_parent = OF_xref_from_node(ofw_bus_get_node(pci));
566 	return (intr_release_msi(pci, child, msi_parent, count, irqs));
567 }
568 
569 static int
xlnx_pcib_map_msi(device_t pci,device_t child,int irq,uint64_t * addr,uint32_t * data)570 xlnx_pcib_map_msi(device_t pci, device_t child, int irq, uint64_t *addr,
571     uint32_t *data)
572 {
573 	phandle_t msi_parent;
574 
575 	ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
576 	    NULL);
577 	msi_parent = OF_xref_from_node(ofw_bus_get_node(pci));
578 	return (intr_map_msi(pci, child, msi_parent, irq, addr, data));
579 }
580 
581 static int
xlnx_pcib_msi_alloc_msi(device_t dev,device_t child,int count,int maxcount,device_t * pic,struct intr_irqsrc ** srcs)582 xlnx_pcib_msi_alloc_msi(device_t dev, device_t child, int count, int maxcount,
583     device_t *pic, struct intr_irqsrc **srcs)
584 {
585 	struct xlnx_pcib_softc *sc;
586 	int irq, end_irq, i;
587 	bool found;
588 
589 	sc = device_get_softc(dev);
590 
591 	mtx_lock(&sc->mtx);
592 
593 	found = false;
594 
595 	for (irq = 0; (irq + count - 1) < XLNX_PCIB_MAX_MSI; irq++) {
596 		/* Assume the range is valid. */
597 		found = true;
598 
599 		/* Check this range is valid. */
600 		for (end_irq = irq; end_irq < irq + count; end_irq++) {
601 			if (sc->isrcs[end_irq].flags & XLNX_IRQ_FLAG_USED) {
602 				/* This is already used. */
603 				found = false;
604 				break;
605 			}
606 		}
607 
608 		if (found)
609 			break;
610 	}
611 
612 	if (!found || irq == (XLNX_PCIB_MAX_MSI - 1)) {
613 		/* Not enough interrupts were found. */
614 		mtx_unlock(&sc->mtx);
615 		return (ENXIO);
616 	}
617 
618 	/* Mark the interrupt as used. */
619 	for (i = 0; i < count; i++)
620 		sc->isrcs[irq + i].flags |= XLNX_IRQ_FLAG_USED;
621 
622 	mtx_unlock(&sc->mtx);
623 
624 	for (i = 0; i < count; i++)
625 		srcs[i] = (struct intr_irqsrc *)&sc->isrcs[irq + i];
626 
627 	*pic = device_get_parent(dev);
628 
629 	return (0);
630 }
631 
632 static int
xlnx_pcib_msi_release_msi(device_t dev,device_t child,int count,struct intr_irqsrc ** isrc)633 xlnx_pcib_msi_release_msi(device_t dev, device_t child, int count,
634     struct intr_irqsrc **isrc)
635 {
636 	struct xlnx_pcib_softc *sc;
637 	struct xlnx_pcib_irqsrc *xi;
638 	int i;
639 
640 	sc = device_get_softc(dev);
641 	mtx_lock(&sc->mtx);
642 	for (i = 0; i < count; i++) {
643 		xi = (struct xlnx_pcib_irqsrc *)isrc[i];
644 
645 		KASSERT(xi->flags & XLNX_IRQ_FLAG_USED,
646 		    ("%s: Releasing an unused MSI interrupt", __func__));
647 
648 		xi->flags &= ~XLNX_IRQ_FLAG_USED;
649 	}
650 
651 	mtx_unlock(&sc->mtx);
652 	return (0);
653 }
654 
655 static int
xlnx_pcib_msi_map_msi(device_t dev,device_t child,struct intr_irqsrc * isrc,uint64_t * addr,uint32_t * data)656 xlnx_pcib_msi_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
657     uint64_t *addr, uint32_t *data)
658 {
659 	struct xlnx_pcib_softc *sc;
660 	struct xlnx_pcib_irqsrc *xi;
661 
662 	sc = device_get_softc(dev);
663 	xi = (struct xlnx_pcib_irqsrc *)isrc;
664 
665 	*addr = vtophys(sc->msi_page);
666 	*data = xi->irq;
667 
668 	return (0);
669 }
670 
671 static void
xlnx_pcib_msi_mask(device_t dev,struct intr_irqsrc * isrc,bool mask)672 xlnx_pcib_msi_mask(device_t dev, struct intr_irqsrc *isrc, bool mask)
673 {
674 	struct generic_pcie_fdt_softc *fdt_sc;
675 	struct generic_pcie_core_softc *sc;
676 	struct xlnx_pcib_softc *xlnx_sc;
677 	struct xlnx_pcib_irqsrc *xi;
678 	uint32_t msireg, irq;
679 	uint32_t reg;
680 
681 	xlnx_sc = device_get_softc(dev);
682 	fdt_sc = &xlnx_sc->fdt_sc;
683 	sc = &fdt_sc->base;
684 
685 	xi = (struct xlnx_pcib_irqsrc *)isrc;
686 
687 	irq = xi->irq;
688 	if (irq < 32)
689 		msireg = XLNX_PCIE_RPMSIID1_MASK;
690 	else
691 		msireg = XLNX_PCIE_RPMSIID2_MASK;
692 
693 	reg = bus_read_4(sc->res, msireg);
694 	if (mask)
695 		reg &= ~(1 << irq);
696 	else
697 		reg |= (1 << irq);
698 	bus_write_4(sc->res, msireg, reg);
699 }
700 
701 static void
xlnx_pcib_msi_disable_intr(device_t dev,struct intr_irqsrc * isrc)702 xlnx_pcib_msi_disable_intr(device_t dev, struct intr_irqsrc *isrc)
703 {
704 
705 	xlnx_pcib_msi_mask(dev, isrc, true);
706 }
707 
708 static void
xlnx_pcib_msi_enable_intr(device_t dev,struct intr_irqsrc * isrc)709 xlnx_pcib_msi_enable_intr(device_t dev, struct intr_irqsrc *isrc)
710 {
711 
712 	xlnx_pcib_msi_mask(dev, isrc, false);
713 }
714 
715 static void
xlnx_pcib_msi_post_filter(device_t dev,struct intr_irqsrc * isrc)716 xlnx_pcib_msi_post_filter(device_t dev, struct intr_irqsrc *isrc)
717 {
718 
719 }
720 
721 static void
xlnx_pcib_msi_post_ithread(device_t dev,struct intr_irqsrc * isrc)722 xlnx_pcib_msi_post_ithread(device_t dev, struct intr_irqsrc *isrc)
723 {
724 
725 	xlnx_pcib_msi_mask(dev, isrc, false);
726 }
727 
728 static void
xlnx_pcib_msi_pre_ithread(device_t dev,struct intr_irqsrc * isrc)729 xlnx_pcib_msi_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
730 {
731 
732 	xlnx_pcib_msi_mask(dev, isrc, true);
733 }
734 
735 static int
xlnx_pcib_msi_setup_intr(device_t dev,struct intr_irqsrc * isrc,struct resource * res,struct intr_map_data * data)736 xlnx_pcib_msi_setup_intr(device_t dev, struct intr_irqsrc *isrc,
737     struct resource *res, struct intr_map_data *data)
738 {
739 
740 	return (0);
741 }
742 
743 static int
xlnx_pcib_msi_teardown_intr(device_t dev,struct intr_irqsrc * isrc,struct resource * res,struct intr_map_data * data)744 xlnx_pcib_msi_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
745     struct resource *res, struct intr_map_data *data)
746 {
747 
748 	return (0);
749 }
750 
751 static device_method_t xlnx_pcib_fdt_methods[] = {
752 	/* Device interface */
753 	DEVMETHOD(device_probe,		xlnx_pcib_fdt_probe),
754 	DEVMETHOD(device_attach,	xlnx_pcib_fdt_attach),
755 
756 	/* pcib interface */
757 	DEVMETHOD(pcib_get_id,		xlnx_pcib_fdt_get_id),
758 	DEVMETHOD(pcib_read_config,	xlnx_pcib_read_config),
759 	DEVMETHOD(pcib_write_config,	xlnx_pcib_write_config),
760 	DEVMETHOD(pcib_alloc_msi,	xlnx_pcib_alloc_msi),
761 	DEVMETHOD(pcib_release_msi,	xlnx_pcib_release_msi),
762 	DEVMETHOD(pcib_map_msi,		xlnx_pcib_map_msi),
763 
764 	/* MSI interface */
765 	DEVMETHOD(msi_alloc_msi,		xlnx_pcib_msi_alloc_msi),
766 	DEVMETHOD(msi_release_msi,		xlnx_pcib_msi_release_msi),
767 	DEVMETHOD(msi_map_msi,			xlnx_pcib_msi_map_msi),
768 
769 	/* Interrupt controller interface */
770 	DEVMETHOD(pic_disable_intr,		xlnx_pcib_msi_disable_intr),
771 	DEVMETHOD(pic_enable_intr,		xlnx_pcib_msi_enable_intr),
772 	DEVMETHOD(pic_setup_intr,		xlnx_pcib_msi_setup_intr),
773 	DEVMETHOD(pic_teardown_intr,		xlnx_pcib_msi_teardown_intr),
774 	DEVMETHOD(pic_post_filter,		xlnx_pcib_msi_post_filter),
775 	DEVMETHOD(pic_post_ithread,		xlnx_pcib_msi_post_ithread),
776 	DEVMETHOD(pic_pre_ithread,		xlnx_pcib_msi_pre_ithread),
777 
778 	/* End */
779 	DEVMETHOD_END
780 };
781 
782 DEFINE_CLASS_1(pcib, xlnx_pcib_fdt_driver, xlnx_pcib_fdt_methods,
783     sizeof(struct xlnx_pcib_softc), generic_pcie_fdt_driver);
784 
785 DRIVER_MODULE(xlnx_pcib, simplebus, xlnx_pcib_fdt_driver, 0, 0);
786 DRIVER_MODULE(xlnx_pcib, ofwbus, xlnx_pcib_fdt_driver, 0, 0);
787