1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2020-2025 Ruslan Bukin <br@bsdpad.com>
5 *
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory (Department of Computer Science and
8 * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9 * DARPA SSITH research programme.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include "opt_platform.h"
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/types.h>
39 #include <sys/sysctl.h>
40 #include <sys/kernel.h>
41 #include <sys/rman.h>
42 #include <sys/module.h>
43 #include <sys/bus.h>
44 #include <sys/endian.h>
45 #include <sys/cpuset.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48
49 #include <machine/intr.h>
50 #include <machine/bus.h>
51
52 #include <vm/vm.h>
53 #include <vm/vm_extern.h>
54 #include <vm/vm_kern.h>
55 #include <vm/pmap.h>
56
57 #include <dev/ofw/openfirm.h>
58 #include <dev/ofw/ofw_bus.h>
59 #include <dev/ofw/ofw_bus_subr.h>
60
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/pci/pci_host_generic.h>
64 #include <dev/pci/pci_host_generic_fdt.h>
65 #include <dev/pci/pcib_private.h>
66
67 #include "xlnx_pcib.h"
68
69 #include "ofw_bus_if.h"
70 #include "msi_if.h"
71 #include "pcib_if.h"
72 #include "pic_if.h"
73
74 #define XLNX_PCIB_MAX_MSI 64
75
76 static int xlnx_pcib_fdt_attach(device_t);
77 static int xlnx_pcib_fdt_probe(device_t);
78 static int xlnx_pcib_fdt_get_id(device_t, device_t, enum pci_id_type,
79 uintptr_t *);
80 static void xlnx_pcib_msi_mask(device_t dev, struct intr_irqsrc *isrc,
81 bool mask);
82
83 struct xlnx_pcib_softc {
84 struct generic_pcie_fdt_softc fdt_sc;
85 struct resource *res[4];
86 struct mtx mtx;
87 void *msi_page;
88 struct xlnx_pcib_irqsrc *isrcs;
89 device_t dev;
90 void *intr_cookie[3];
91 };
92
93 static struct resource_spec xlnx_pcib_spec[] = {
94 { SYS_RES_MEMORY, 0, RF_ACTIVE },
95 { SYS_RES_IRQ, 0, RF_ACTIVE },
96 { SYS_RES_IRQ, 1, RF_ACTIVE },
97 { SYS_RES_IRQ, 2, RF_ACTIVE },
98 { -1, 0 }
99 };
100
101 struct xlnx_pcib_irqsrc {
102 struct intr_irqsrc isrc;
103 u_int irq;
104 #define XLNX_IRQ_FLAG_USED (1 << 0)
105 u_int flags;
106 };
107
108 static struct ofw_compat_data compat_data[] = {
109 { "xlnx,xdma-host-3.00", 1 },
110 { "xlnx,axi-pcie-host-1.00.a", 1 },
111 { NULL, 0 },
112 };
113
114 static void
xlnx_pcib_clear_err_interrupts(struct generic_pcie_core_softc * sc)115 xlnx_pcib_clear_err_interrupts(struct generic_pcie_core_softc *sc)
116 {
117 uint32_t reg;
118
119 reg = bus_read_4(sc->res, XLNX_PCIE_RPERRFRR);
120
121 if (reg & RPERRFRR_VALID) {
122 device_printf(sc->dev, "Requested ID: %x\n",
123 reg & RPERRFRR_REQ_ID_M);
124 bus_write_4(sc->res, XLNX_PCIE_RPERRFRR, ~0U);
125 }
126 }
127
128 static int
xlnx_pcib_intr(void * arg)129 xlnx_pcib_intr(void *arg)
130 {
131 struct generic_pcie_fdt_softc *fdt_sc;
132 struct generic_pcie_core_softc *sc;
133 struct xlnx_pcib_softc *xlnx_sc;
134 uint32_t val, mask, status;
135
136 xlnx_sc = arg;
137 fdt_sc = &xlnx_sc->fdt_sc;
138 sc = &fdt_sc->base;
139
140 val = bus_read_4(sc->res, XLNX_PCIE_IDR);
141 mask = bus_read_4(sc->res, XLNX_PCIE_IMR);
142
143 status = val & mask;
144 if (!status)
145 return (FILTER_HANDLED);
146
147 if (status & IMR_LINK_DOWN)
148 device_printf(sc->dev, "Link down");
149
150 if (status & IMR_HOT_RESET)
151 device_printf(sc->dev, "Hot reset");
152
153 if (status & IMR_CORRECTABLE)
154 xlnx_pcib_clear_err_interrupts(sc);
155
156 if (status & IMR_FATAL)
157 xlnx_pcib_clear_err_interrupts(sc);
158
159 if (status & IMR_NON_FATAL)
160 xlnx_pcib_clear_err_interrupts(sc);
161
162 if (status & IMR_MSI) {
163 device_printf(sc->dev, "MSI interrupt");
164
165 /* FIFO mode MSI not implemented. */
166 }
167
168 if (status & IMR_INTX) {
169 device_printf(sc->dev, "INTx received");
170
171 /* Not implemented. */
172 }
173
174 if (status & IMR_SLAVE_UNSUPP_REQ)
175 device_printf(sc->dev, "Slave unsupported request");
176
177 if (status & IMR_SLAVE_UNEXP_COMPL)
178 device_printf(sc->dev, "Slave unexpected completion");
179
180 if (status & IMR_SLAVE_COMPL_TIMOUT)
181 device_printf(sc->dev, "Slave completion timeout");
182
183 if (status & IMR_SLAVE_ERROR_POISON)
184 device_printf(sc->dev, "Slave error poison");
185
186 if (status & IMR_SLAVE_COMPL_ABORT)
187 device_printf(sc->dev, "Slave completion abort");
188
189 if (status & IMR_SLAVE_ILLEG_BURST)
190 device_printf(sc->dev, "Slave illegal burst");
191
192 if (status & IMR_MASTER_DECERR)
193 device_printf(sc->dev, "Master decode error");
194
195 if (status & IMR_MASTER_SLVERR)
196 device_printf(sc->dev, "Master slave error");
197
198 bus_write_4(sc->res, XLNX_PCIE_IDR, val);
199
200 return (FILTER_HANDLED);
201 }
202
203 static void
xlnx_pcib_handle_msi_intr(void * arg,int msireg)204 xlnx_pcib_handle_msi_intr(void *arg, int msireg)
205 {
206 struct generic_pcie_fdt_softc *fdt_sc;
207 struct generic_pcie_core_softc *sc;
208 struct xlnx_pcib_softc *xlnx_sc;
209 struct xlnx_pcib_irqsrc *xi;
210 struct trapframe *tf;
211 int irq;
212 int reg;
213 int i;
214
215 xlnx_sc = arg;
216 fdt_sc = &xlnx_sc->fdt_sc;
217 sc = &fdt_sc->base;
218 tf = curthread->td_intr_frame;
219
220 do {
221 reg = bus_read_4(sc->res, msireg);
222
223 for (i = 0; i < sizeof(uint32_t) * 8; i++) {
224 if (reg & (1 << i)) {
225 bus_write_4(sc->res, msireg, (1 << i));
226
227 irq = i;
228 if (msireg == XLNX_PCIE_RPMSIID2)
229 irq += 32;
230
231 xi = &xlnx_sc->isrcs[irq];
232 if (intr_isrc_dispatch(&xi->isrc, tf) != 0) {
233 /* Disable stray. */
234 xlnx_pcib_msi_mask(sc->dev,
235 &xi->isrc, 1);
236 device_printf(sc->dev,
237 "Stray irq %u disabled\n", irq);
238 }
239 }
240 }
241 } while (reg != 0);
242 }
243
244 static int
xlnx_pcib_msi0_intr(void * arg)245 xlnx_pcib_msi0_intr(void *arg)
246 {
247
248 xlnx_pcib_handle_msi_intr(arg, XLNX_PCIE_RPMSIID1);
249
250 return (FILTER_HANDLED);
251 }
252
253 static int
xlnx_pcib_msi1_intr(void * arg)254 xlnx_pcib_msi1_intr(void *arg)
255 {
256
257 xlnx_pcib_handle_msi_intr(arg, XLNX_PCIE_RPMSIID2);
258
259 return (FILTER_HANDLED);
260 }
261
262 static int
xlnx_pcib_register_msi(struct xlnx_pcib_softc * sc)263 xlnx_pcib_register_msi(struct xlnx_pcib_softc *sc)
264 {
265 const char *name;
266 int error;
267 int irq;
268
269 sc->isrcs = malloc(sizeof(*sc->isrcs) * XLNX_PCIB_MAX_MSI, M_DEVBUF,
270 M_WAITOK | M_ZERO);
271
272 name = device_get_nameunit(sc->dev);
273
274 for (irq = 0; irq < XLNX_PCIB_MAX_MSI; irq++) {
275 sc->isrcs[irq].irq = irq;
276 error = intr_isrc_register(&sc->isrcs[irq].isrc,
277 sc->dev, 0, "%s,%u", name, irq);
278 if (error != 0)
279 return (error); /* XXX deregister ISRCs */
280 }
281
282 if (intr_msi_register(sc->dev,
283 OF_xref_from_node(ofw_bus_get_node(sc->dev))) != 0)
284 return (ENXIO);
285
286 return (0);
287 }
288
289 static void
xlnx_pcib_init(struct xlnx_pcib_softc * sc)290 xlnx_pcib_init(struct xlnx_pcib_softc *sc)
291 {
292 bus_addr_t addr;
293 int reg;
294
295 /* Disable interrupts. */
296 bus_write_4(sc->res[0], XLNX_PCIE_IMR, 0);
297
298 /* Clear pending interrupts.*/
299 reg = bus_read_4(sc->res[0], XLNX_PCIE_IDR);
300 bus_write_4(sc->res[0], XLNX_PCIE_IDR, reg);
301
302 /* Setup an MSI page. */
303 sc->msi_page = kmem_alloc_contig(PAGE_SIZE, M_WAITOK, 0,
304 BUS_SPACE_MAXADDR, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
305 addr = vtophys(sc->msi_page);
306 bus_write_4(sc->res[0], XLNX_PCIE_RPMSIBR1, (addr >> 32));
307 bus_write_4(sc->res[0], XLNX_PCIE_RPMSIBR2, (addr >> 0));
308
309 /* Enable the bridge. */
310 reg = bus_read_4(sc->res[0], XLNX_PCIE_RPSCR);
311 reg |= RPSCR_BE;
312 bus_write_4(sc->res[0], XLNX_PCIE_RPSCR, reg);
313
314 /* Enable interrupts. */
315 reg = IMR_LINK_DOWN
316 | IMR_HOT_RESET
317 | IMR_CFG_COMPL_STATUS_M
318 | IMR_CFG_TIMEOUT
319 | IMR_CORRECTABLE
320 | IMR_NON_FATAL
321 | IMR_FATAL
322 | IMR_INTX
323 | IMR_MSI
324 | IMR_SLAVE_UNSUPP_REQ
325 | IMR_SLAVE_UNEXP_COMPL
326 | IMR_SLAVE_COMPL_TIMOUT
327 | IMR_SLAVE_ERROR_POISON
328 | IMR_SLAVE_COMPL_ABORT
329 | IMR_SLAVE_ILLEG_BURST
330 | IMR_MASTER_DECERR
331 | IMR_MASTER_SLVERR;
332 bus_write_4(sc->res[0], XLNX_PCIE_IMR, reg);
333 }
334
335 static int
xlnx_pcib_fdt_probe(device_t dev)336 xlnx_pcib_fdt_probe(device_t dev)
337 {
338
339 if (!ofw_bus_status_okay(dev))
340 return (ENXIO);
341
342 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
343 return (ENXIO);
344
345 device_set_desc(dev, "Xilinx XDMA PCIe Controller");
346
347 return (BUS_PROBE_DEFAULT);
348 }
349
350 static int
xlnx_pcib_fdt_attach(device_t dev)351 xlnx_pcib_fdt_attach(device_t dev)
352 {
353 struct xlnx_pcib_softc *sc;
354 int error;
355
356 sc = device_get_softc(dev);
357 sc->dev = dev;
358
359 mtx_init(&sc->mtx, "msi_mtx", NULL, MTX_DEF);
360
361 if (bus_alloc_resources(dev, xlnx_pcib_spec, sc->res)) {
362 device_printf(dev, "could not allocate resources\n");
363 return (ENXIO);
364 }
365
366 /* Setup MISC interrupt handler. */
367 error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE,
368 xlnx_pcib_intr, NULL, sc, &sc->intr_cookie[0]);
369 if (error != 0) {
370 device_printf(dev, "could not setup interrupt handler.\n");
371 return (ENXIO);
372 }
373
374 /* Setup MSI0 interrupt handler. */
375 error = bus_setup_intr(dev, sc->res[2], INTR_TYPE_MISC | INTR_MPSAFE,
376 xlnx_pcib_msi0_intr, NULL, sc, &sc->intr_cookie[1]);
377 if (error != 0) {
378 device_printf(dev, "could not setup interrupt handler.\n");
379 return (ENXIO);
380 }
381
382 /* Setup MSI1 interrupt handler. */
383 error = bus_setup_intr(dev, sc->res[3], INTR_TYPE_MISC | INTR_MPSAFE,
384 xlnx_pcib_msi1_intr, NULL, sc, &sc->intr_cookie[2]);
385 if (error != 0) {
386 device_printf(dev, "could not setup interrupt handler.\n");
387 return (ENXIO);
388 }
389
390 xlnx_pcib_init(sc);
391
392 /*
393 * Allow the core driver to map registers.
394 * We will be accessing the device memory using core_softc.
395 */
396 bus_release_resources(dev, xlnx_pcib_spec, sc->res);
397
398 error = xlnx_pcib_register_msi(sc);
399 if (error)
400 return (error);
401
402 return (pci_host_generic_fdt_attach(dev));
403 }
404
405 static int
xlnx_pcib_fdt_get_id(device_t pci,device_t child,enum pci_id_type type,uintptr_t * id)406 xlnx_pcib_fdt_get_id(device_t pci, device_t child, enum pci_id_type type,
407 uintptr_t *id)
408 {
409 phandle_t node;
410 int bsf;
411
412 if (type != PCI_ID_MSI)
413 return (pcib_get_id(pci, child, type, id));
414
415 node = ofw_bus_get_node(pci);
416 if (OF_hasprop(node, "msi-map"))
417 return (generic_pcie_get_id(pci, child, type, id));
418
419 bsf = pci_get_rid(child);
420 *id = (pci_get_domain(child) << PCI_RID_DOMAIN_SHIFT) | bsf;
421
422 return (0);
423 }
424
425 static int
xlnx_pcib_req_valid(struct generic_pcie_core_softc * sc,u_int bus,u_int slot,u_int func,u_int reg)426 xlnx_pcib_req_valid(struct generic_pcie_core_softc *sc,
427 u_int bus, u_int slot, u_int func, u_int reg)
428 {
429 bus_space_handle_t h;
430 bus_space_tag_t t;
431 uint32_t val;
432
433 t = rman_get_bustag(sc->res);
434 h = rman_get_bushandle(sc->res);
435
436 if ((bus < sc->bus_start) || (bus > sc->bus_end))
437 return (0);
438 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) ||
439 (reg > PCIE_REGMAX))
440 return (0);
441
442 if (bus == 0 && slot > 0)
443 return (0);
444
445 val = bus_space_read_4(t, h, XLNX_PCIE_PHYSCR);
446 if ((val & PHYSCR_LINK_UP) == 0) {
447 /* Link is down */
448 return (0);
449 }
450
451 /* Valid */
452
453 return (1);
454 }
455
456 static uint32_t
xlnx_pcib_read_config(device_t dev,u_int bus,u_int slot,u_int func,u_int reg,int bytes)457 xlnx_pcib_read_config(device_t dev, u_int bus, u_int slot,
458 u_int func, u_int reg, int bytes)
459 {
460 struct generic_pcie_fdt_softc *fdt_sc;
461 struct xlnx_pcib_softc *xlnx_sc;
462 struct generic_pcie_core_softc *sc;
463 bus_space_handle_t h;
464 bus_space_tag_t t;
465 uint64_t offset;
466 uint32_t data;
467
468 xlnx_sc = device_get_softc(dev);
469 fdt_sc = &xlnx_sc->fdt_sc;
470 sc = &fdt_sc->base;
471
472 if (!xlnx_pcib_req_valid(sc, bus, slot, func, reg))
473 return (~0U);
474
475 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
476 t = rman_get_bustag(sc->res);
477 h = rman_get_bushandle(sc->res);
478
479 data = bus_space_read_4(t, h, offset & ~3);
480
481 switch (bytes) {
482 case 1:
483 data >>= (offset & 3) * 8;
484 data &= 0xff;
485 break;
486 case 2:
487 data >>= (offset & 3) * 8;
488 data = le16toh(data);
489 break;
490 case 4:
491 data = le32toh(data);
492 break;
493 default:
494 return (~0U);
495 }
496
497 return (data);
498 }
499
500 static void
xlnx_pcib_write_config(device_t dev,u_int bus,u_int slot,u_int func,u_int reg,uint32_t val,int bytes)501 xlnx_pcib_write_config(device_t dev, u_int bus, u_int slot,
502 u_int func, u_int reg, uint32_t val, int bytes)
503 {
504 struct generic_pcie_fdt_softc *fdt_sc;
505 struct xlnx_pcib_softc *xlnx_sc;
506 struct generic_pcie_core_softc *sc;
507 bus_space_handle_t h;
508 bus_space_tag_t t;
509 uint64_t offset;
510 uint32_t data;
511
512 xlnx_sc = device_get_softc(dev);
513 fdt_sc = &xlnx_sc->fdt_sc;
514 sc = &fdt_sc->base;
515
516 if (!xlnx_pcib_req_valid(sc, bus, slot, func, reg))
517 return;
518
519 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
520
521 t = rman_get_bustag(sc->res);
522 h = rman_get_bushandle(sc->res);
523
524 /*
525 * 32-bit access used due to a bug in the Xilinx bridge that
526 * requires to write primary and secondary buses in one blast.
527 *
528 * TODO: This is probably wrong on big-endian.
529 */
530 switch (bytes) {
531 case 1:
532 data = bus_space_read_4(t, h, offset & ~3);
533 data &= ~(0xff << ((offset & 3) * 8));
534 data |= (val & 0xff) << ((offset & 3) * 8);
535 bus_space_write_4(t, h, offset & ~3, htole32(data));
536 break;
537 case 2:
538 data = bus_space_read_4(t, h, offset & ~3);
539 data &= ~(0xffff << ((offset & 3) * 8));
540 data |= (val & 0xffff) << ((offset & 3) * 8);
541 bus_space_write_4(t, h, offset & ~3, htole32(data));
542 break;
543 case 4:
544 bus_space_write_4(t, h, offset, htole32(val));
545 break;
546 default:
547 return;
548 }
549 }
550
551 static int
xlnx_pcib_alloc_msi(device_t pci,device_t child,int count,int maxcount,int * irqs)552 xlnx_pcib_alloc_msi(device_t pci, device_t child, int count, int maxcount,
553 int *irqs)
554 {
555 phandle_t msi_parent;
556
557 ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
558 NULL);
559 msi_parent = OF_xref_from_node(ofw_bus_get_node(pci));
560 return (intr_alloc_msi(pci, child, msi_parent, count, maxcount,
561 irqs));
562 }
563
564 static int
xlnx_pcib_release_msi(device_t pci,device_t child,int count,int * irqs)565 xlnx_pcib_release_msi(device_t pci, device_t child, int count, int *irqs)
566 {
567 phandle_t msi_parent;
568
569 ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
570 NULL);
571 msi_parent = OF_xref_from_node(ofw_bus_get_node(pci));
572 return (intr_release_msi(pci, child, msi_parent, count, irqs));
573 }
574
575 static int
xlnx_pcib_map_msi(device_t pci,device_t child,int irq,uint64_t * addr,uint32_t * data)576 xlnx_pcib_map_msi(device_t pci, device_t child, int irq, uint64_t *addr,
577 uint32_t *data)
578 {
579 phandle_t msi_parent;
580
581 ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
582 NULL);
583 msi_parent = OF_xref_from_node(ofw_bus_get_node(pci));
584 return (intr_map_msi(pci, child, msi_parent, irq, addr, data));
585 }
586
587 static int
xlnx_pcib_msi_alloc_msi(device_t dev,device_t child,int count,int maxcount,device_t * pic,struct intr_irqsrc ** srcs)588 xlnx_pcib_msi_alloc_msi(device_t dev, device_t child, int count, int maxcount,
589 device_t *pic, struct intr_irqsrc **srcs)
590 {
591 struct xlnx_pcib_softc *sc;
592 int irq, end_irq, i;
593 bool found;
594
595 sc = device_get_softc(dev);
596
597 mtx_lock(&sc->mtx);
598
599 found = false;
600
601 for (irq = 0; (irq + count - 1) < XLNX_PCIB_MAX_MSI; irq++) {
602 /* Assume the range is valid. */
603 found = true;
604
605 /* Check this range is valid. */
606 for (end_irq = irq; end_irq < irq + count; end_irq++) {
607 if (sc->isrcs[end_irq].flags & XLNX_IRQ_FLAG_USED) {
608 /* This is already used. */
609 found = false;
610 break;
611 }
612 }
613
614 if (found)
615 break;
616 }
617
618 if (!found || irq == (XLNX_PCIB_MAX_MSI - 1)) {
619 /* Not enough interrupts were found. */
620 mtx_unlock(&sc->mtx);
621 return (ENXIO);
622 }
623
624 /* Mark the interrupt as used. */
625 for (i = 0; i < count; i++)
626 sc->isrcs[irq + i].flags |= XLNX_IRQ_FLAG_USED;
627
628 mtx_unlock(&sc->mtx);
629
630 for (i = 0; i < count; i++)
631 srcs[i] = (struct intr_irqsrc *)&sc->isrcs[irq + i];
632
633 *pic = device_get_parent(dev);
634
635 return (0);
636 }
637
638 static int
xlnx_pcib_msi_release_msi(device_t dev,device_t child,int count,struct intr_irqsrc ** isrc)639 xlnx_pcib_msi_release_msi(device_t dev, device_t child, int count,
640 struct intr_irqsrc **isrc)
641 {
642 struct xlnx_pcib_softc *sc;
643 struct xlnx_pcib_irqsrc *xi;
644 int i;
645
646 sc = device_get_softc(dev);
647 mtx_lock(&sc->mtx);
648 for (i = 0; i < count; i++) {
649 xi = (struct xlnx_pcib_irqsrc *)isrc[i];
650
651 KASSERT(xi->flags & XLNX_IRQ_FLAG_USED,
652 ("%s: Releasing an unused MSI interrupt", __func__));
653
654 xi->flags &= ~XLNX_IRQ_FLAG_USED;
655 }
656
657 mtx_unlock(&sc->mtx);
658 return (0);
659 }
660
661 static int
xlnx_pcib_msi_map_msi(device_t dev,device_t child,struct intr_irqsrc * isrc,uint64_t * addr,uint32_t * data)662 xlnx_pcib_msi_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
663 uint64_t *addr, uint32_t *data)
664 {
665 struct xlnx_pcib_softc *sc;
666 struct xlnx_pcib_irqsrc *xi;
667
668 sc = device_get_softc(dev);
669 xi = (struct xlnx_pcib_irqsrc *)isrc;
670
671 *addr = vtophys(sc->msi_page);
672 *data = xi->irq;
673
674 return (0);
675 }
676
677 static void
xlnx_pcib_msi_mask(device_t dev,struct intr_irqsrc * isrc,bool mask)678 xlnx_pcib_msi_mask(device_t dev, struct intr_irqsrc *isrc, bool mask)
679 {
680 struct generic_pcie_fdt_softc *fdt_sc;
681 struct generic_pcie_core_softc *sc;
682 struct xlnx_pcib_softc *xlnx_sc;
683 struct xlnx_pcib_irqsrc *xi;
684 uint32_t msireg, irq;
685 uint32_t reg;
686
687 xlnx_sc = device_get_softc(dev);
688 fdt_sc = &xlnx_sc->fdt_sc;
689 sc = &fdt_sc->base;
690
691 xi = (struct xlnx_pcib_irqsrc *)isrc;
692
693 irq = xi->irq;
694 if (irq < 32)
695 msireg = XLNX_PCIE_RPMSIID1_MASK;
696 else
697 msireg = XLNX_PCIE_RPMSIID2_MASK;
698
699 reg = bus_read_4(sc->res, msireg);
700 if (mask)
701 reg &= ~(1 << irq);
702 else
703 reg |= (1 << irq);
704 bus_write_4(sc->res, msireg, reg);
705 }
706
707 static void
xlnx_pcib_msi_disable_intr(device_t dev,struct intr_irqsrc * isrc)708 xlnx_pcib_msi_disable_intr(device_t dev, struct intr_irqsrc *isrc)
709 {
710
711 xlnx_pcib_msi_mask(dev, isrc, true);
712 }
713
714 static void
xlnx_pcib_msi_enable_intr(device_t dev,struct intr_irqsrc * isrc)715 xlnx_pcib_msi_enable_intr(device_t dev, struct intr_irqsrc *isrc)
716 {
717
718 xlnx_pcib_msi_mask(dev, isrc, false);
719 }
720
721 static void
xlnx_pcib_msi_post_filter(device_t dev,struct intr_irqsrc * isrc)722 xlnx_pcib_msi_post_filter(device_t dev, struct intr_irqsrc *isrc)
723 {
724
725 }
726
727 static void
xlnx_pcib_msi_post_ithread(device_t dev,struct intr_irqsrc * isrc)728 xlnx_pcib_msi_post_ithread(device_t dev, struct intr_irqsrc *isrc)
729 {
730
731 xlnx_pcib_msi_mask(dev, isrc, false);
732 }
733
734 static void
xlnx_pcib_msi_pre_ithread(device_t dev,struct intr_irqsrc * isrc)735 xlnx_pcib_msi_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
736 {
737
738 xlnx_pcib_msi_mask(dev, isrc, true);
739 }
740
741 static int
xlnx_pcib_msi_setup_intr(device_t dev,struct intr_irqsrc * isrc,struct resource * res,struct intr_map_data * data)742 xlnx_pcib_msi_setup_intr(device_t dev, struct intr_irqsrc *isrc,
743 struct resource *res, struct intr_map_data *data)
744 {
745
746 return (0);
747 }
748
749 static int
xlnx_pcib_msi_teardown_intr(device_t dev,struct intr_irqsrc * isrc,struct resource * res,struct intr_map_data * data)750 xlnx_pcib_msi_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
751 struct resource *res, struct intr_map_data *data)
752 {
753
754 return (0);
755 }
756
757 static device_method_t xlnx_pcib_fdt_methods[] = {
758 /* Device interface */
759 DEVMETHOD(device_probe, xlnx_pcib_fdt_probe),
760 DEVMETHOD(device_attach, xlnx_pcib_fdt_attach),
761
762 /* pcib interface */
763 DEVMETHOD(pcib_get_id, xlnx_pcib_fdt_get_id),
764 DEVMETHOD(pcib_read_config, xlnx_pcib_read_config),
765 DEVMETHOD(pcib_write_config, xlnx_pcib_write_config),
766 DEVMETHOD(pcib_alloc_msi, xlnx_pcib_alloc_msi),
767 DEVMETHOD(pcib_release_msi, xlnx_pcib_release_msi),
768 DEVMETHOD(pcib_map_msi, xlnx_pcib_map_msi),
769
770 /* MSI interface */
771 DEVMETHOD(msi_alloc_msi, xlnx_pcib_msi_alloc_msi),
772 DEVMETHOD(msi_release_msi, xlnx_pcib_msi_release_msi),
773 DEVMETHOD(msi_map_msi, xlnx_pcib_msi_map_msi),
774
775 /* Interrupt controller interface */
776 DEVMETHOD(pic_disable_intr, xlnx_pcib_msi_disable_intr),
777 DEVMETHOD(pic_enable_intr, xlnx_pcib_msi_enable_intr),
778 DEVMETHOD(pic_setup_intr, xlnx_pcib_msi_setup_intr),
779 DEVMETHOD(pic_teardown_intr, xlnx_pcib_msi_teardown_intr),
780 DEVMETHOD(pic_post_filter, xlnx_pcib_msi_post_filter),
781 DEVMETHOD(pic_post_ithread, xlnx_pcib_msi_post_ithread),
782 DEVMETHOD(pic_pre_ithread, xlnx_pcib_msi_pre_ithread),
783
784 /* End */
785 DEVMETHOD_END
786 };
787
788 DEFINE_CLASS_1(pcib, xlnx_pcib_fdt_driver, xlnx_pcib_fdt_methods,
789 sizeof(struct xlnx_pcib_softc), generic_pcie_fdt_driver);
790
791 DRIVER_MODULE(xlnx_pcib, simplebus, xlnx_pcib_fdt_driver, 0, 0);
792 DRIVER_MODULE(xlnx_pcib, ofwbus, xlnx_pcib_fdt_driver, 0, 0);
793