1*dee4c1d2SRuslan Bukin /*- 2*dee4c1d2SRuslan Bukin * SPDX-License-Identifier: BSD-2-Clause 3*dee4c1d2SRuslan Bukin * 4*dee4c1d2SRuslan Bukin * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com> 5*dee4c1d2SRuslan Bukin * 6*dee4c1d2SRuslan Bukin * This software was developed by SRI International and the University of 7*dee4c1d2SRuslan Bukin * Cambridge Computer Laboratory (Department of Computer Science and 8*dee4c1d2SRuslan Bukin * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the 9*dee4c1d2SRuslan Bukin * DARPA SSITH research programme. 10*dee4c1d2SRuslan Bukin * 11*dee4c1d2SRuslan Bukin * Redistribution and use in source and binary forms, with or without 12*dee4c1d2SRuslan Bukin * modification, are permitted provided that the following conditions 13*dee4c1d2SRuslan Bukin * are met: 14*dee4c1d2SRuslan Bukin * 1. Redistributions of source code must retain the above copyright 15*dee4c1d2SRuslan Bukin * notice, this list of conditions and the following disclaimer. 16*dee4c1d2SRuslan Bukin * 2. Redistributions in binary form must reproduce the above copyright 17*dee4c1d2SRuslan Bukin * notice, this list of conditions and the following disclaimer in the 18*dee4c1d2SRuslan Bukin * documentation and/or other materials provided with the distribution. 19*dee4c1d2SRuslan Bukin * 20*dee4c1d2SRuslan Bukin * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21*dee4c1d2SRuslan Bukin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22*dee4c1d2SRuslan Bukin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23*dee4c1d2SRuslan Bukin * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24*dee4c1d2SRuslan Bukin * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25*dee4c1d2SRuslan Bukin * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26*dee4c1d2SRuslan Bukin * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27*dee4c1d2SRuslan Bukin * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28*dee4c1d2SRuslan Bukin * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29*dee4c1d2SRuslan Bukin * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30*dee4c1d2SRuslan Bukin * SUCH DAMAGE. 31*dee4c1d2SRuslan Bukin */ 32*dee4c1d2SRuslan Bukin 33*dee4c1d2SRuslan Bukin #include "opt_platform.h" 34*dee4c1d2SRuslan Bukin 35*dee4c1d2SRuslan Bukin #include <sys/cdefs.h> 36*dee4c1d2SRuslan Bukin __FBSDID("$FreeBSD$"); 37*dee4c1d2SRuslan Bukin 38*dee4c1d2SRuslan Bukin #include <sys/param.h> 39*dee4c1d2SRuslan Bukin #include <sys/systm.h> 40*dee4c1d2SRuslan Bukin #include <sys/malloc.h> 41*dee4c1d2SRuslan Bukin #include <sys/types.h> 42*dee4c1d2SRuslan Bukin #include <sys/sysctl.h> 43*dee4c1d2SRuslan Bukin #include <sys/kernel.h> 44*dee4c1d2SRuslan Bukin #include <sys/rman.h> 45*dee4c1d2SRuslan Bukin #include <sys/module.h> 46*dee4c1d2SRuslan Bukin #include <sys/bus.h> 47*dee4c1d2SRuslan Bukin #include <sys/endian.h> 48*dee4c1d2SRuslan Bukin #include <sys/cpuset.h> 49*dee4c1d2SRuslan Bukin #include <sys/mutex.h> 50*dee4c1d2SRuslan Bukin #include <sys/proc.h> 51*dee4c1d2SRuslan Bukin 52*dee4c1d2SRuslan Bukin #include <machine/intr.h> 53*dee4c1d2SRuslan Bukin #include <machine/bus.h> 54*dee4c1d2SRuslan Bukin 55*dee4c1d2SRuslan Bukin #include <vm/vm.h> 56*dee4c1d2SRuslan Bukin #include <vm/vm_extern.h> 57*dee4c1d2SRuslan Bukin #include <vm/vm_kern.h> 58*dee4c1d2SRuslan Bukin #include <vm/pmap.h> 59*dee4c1d2SRuslan Bukin 60*dee4c1d2SRuslan Bukin #include <dev/ofw/openfirm.h> 61*dee4c1d2SRuslan Bukin #include <dev/ofw/ofw_bus.h> 62*dee4c1d2SRuslan Bukin #include <dev/ofw/ofw_bus_subr.h> 63*dee4c1d2SRuslan Bukin 64*dee4c1d2SRuslan Bukin #include <dev/pci/pcireg.h> 65*dee4c1d2SRuslan Bukin #include <dev/pci/pcivar.h> 66*dee4c1d2SRuslan Bukin #include <dev/pci/pci_host_generic.h> 67*dee4c1d2SRuslan Bukin #include <dev/pci/pci_host_generic_fdt.h> 68*dee4c1d2SRuslan Bukin #include <dev/pci/pcib_private.h> 69*dee4c1d2SRuslan Bukin 70*dee4c1d2SRuslan Bukin #include "xlnx_pcib.h" 71*dee4c1d2SRuslan Bukin 72*dee4c1d2SRuslan Bukin #include "ofw_bus_if.h" 73*dee4c1d2SRuslan Bukin #include "msi_if.h" 74*dee4c1d2SRuslan Bukin #include "pcib_if.h" 75*dee4c1d2SRuslan Bukin #include "pic_if.h" 76*dee4c1d2SRuslan Bukin 77*dee4c1d2SRuslan Bukin #define XLNX_PCIB_MAX_MSI 64 78*dee4c1d2SRuslan Bukin 79*dee4c1d2SRuslan Bukin static int xlnx_pcib_fdt_attach(device_t); 80*dee4c1d2SRuslan Bukin static int xlnx_pcib_fdt_probe(device_t); 81*dee4c1d2SRuslan Bukin static int xlnx_pcib_fdt_get_id(device_t, device_t, enum pci_id_type, 82*dee4c1d2SRuslan Bukin uintptr_t *); 83*dee4c1d2SRuslan Bukin static void xlnx_pcib_msi_mask(device_t dev, struct intr_irqsrc *isrc, 84*dee4c1d2SRuslan Bukin bool mask); 85*dee4c1d2SRuslan Bukin 86*dee4c1d2SRuslan Bukin struct xlnx_pcib_softc { 87*dee4c1d2SRuslan Bukin struct generic_pcie_fdt_softc fdt_sc; 88*dee4c1d2SRuslan Bukin struct resource *res[4]; 89*dee4c1d2SRuslan Bukin struct mtx mtx; 90*dee4c1d2SRuslan Bukin vm_offset_t msi_page; 91*dee4c1d2SRuslan Bukin struct xlnx_pcib_irqsrc *isrcs; 92*dee4c1d2SRuslan Bukin device_t dev; 93*dee4c1d2SRuslan Bukin void *intr_cookie[3]; 94*dee4c1d2SRuslan Bukin }; 95*dee4c1d2SRuslan Bukin 96*dee4c1d2SRuslan Bukin static struct resource_spec xlnx_pcib_spec[] = { 97*dee4c1d2SRuslan Bukin { SYS_RES_MEMORY, 0, RF_ACTIVE }, 98*dee4c1d2SRuslan Bukin { SYS_RES_IRQ, 0, RF_ACTIVE }, 99*dee4c1d2SRuslan Bukin { SYS_RES_IRQ, 1, RF_ACTIVE }, 100*dee4c1d2SRuslan Bukin { SYS_RES_IRQ, 2, RF_ACTIVE }, 101*dee4c1d2SRuslan Bukin { -1, 0 } 102*dee4c1d2SRuslan Bukin }; 103*dee4c1d2SRuslan Bukin 104*dee4c1d2SRuslan Bukin struct xlnx_pcib_irqsrc { 105*dee4c1d2SRuslan Bukin struct intr_irqsrc isrc; 106*dee4c1d2SRuslan Bukin u_int irq; 107*dee4c1d2SRuslan Bukin #define XLNX_IRQ_FLAG_USED (1 << 0) 108*dee4c1d2SRuslan Bukin u_int flags; 109*dee4c1d2SRuslan Bukin }; 110*dee4c1d2SRuslan Bukin 111*dee4c1d2SRuslan Bukin static void 112*dee4c1d2SRuslan Bukin xlnx_pcib_clear_err_interrupts(struct generic_pcie_core_softc *sc) 113*dee4c1d2SRuslan Bukin { 114*dee4c1d2SRuslan Bukin uint32_t reg; 115*dee4c1d2SRuslan Bukin 116*dee4c1d2SRuslan Bukin reg = bus_read_4(sc->res, XLNX_PCIE_RPERRFRR); 117*dee4c1d2SRuslan Bukin 118*dee4c1d2SRuslan Bukin if (reg & RPERRFRR_VALID) { 119*dee4c1d2SRuslan Bukin device_printf(sc->dev, "Requested ID: %x\n", 120*dee4c1d2SRuslan Bukin reg & RPERRFRR_REQ_ID_M); 121*dee4c1d2SRuslan Bukin bus_write_4(sc->res, XLNX_PCIE_RPERRFRR, ~0U); 122*dee4c1d2SRuslan Bukin } 123*dee4c1d2SRuslan Bukin } 124*dee4c1d2SRuslan Bukin 125*dee4c1d2SRuslan Bukin static int 126*dee4c1d2SRuslan Bukin xlnx_pcib_intr(void *arg) 127*dee4c1d2SRuslan Bukin { 128*dee4c1d2SRuslan Bukin struct generic_pcie_fdt_softc *fdt_sc; 129*dee4c1d2SRuslan Bukin struct generic_pcie_core_softc *sc; 130*dee4c1d2SRuslan Bukin struct xlnx_pcib_softc *xlnx_sc; 131*dee4c1d2SRuslan Bukin uint32_t val, mask, status; 132*dee4c1d2SRuslan Bukin 133*dee4c1d2SRuslan Bukin xlnx_sc = arg; 134*dee4c1d2SRuslan Bukin fdt_sc = &xlnx_sc->fdt_sc; 135*dee4c1d2SRuslan Bukin sc = &fdt_sc->base; 136*dee4c1d2SRuslan Bukin 137*dee4c1d2SRuslan Bukin val = bus_read_4(sc->res, XLNX_PCIE_IDR); 138*dee4c1d2SRuslan Bukin mask = bus_read_4(sc->res, XLNX_PCIE_IMR); 139*dee4c1d2SRuslan Bukin 140*dee4c1d2SRuslan Bukin status = val & mask; 141*dee4c1d2SRuslan Bukin if (!status) 142*dee4c1d2SRuslan Bukin return (FILTER_HANDLED); 143*dee4c1d2SRuslan Bukin 144*dee4c1d2SRuslan Bukin if (status & IMR_LINK_DOWN) 145*dee4c1d2SRuslan Bukin device_printf(sc->dev, "Link down"); 146*dee4c1d2SRuslan Bukin 147*dee4c1d2SRuslan Bukin if (status & IMR_HOT_RESET) 148*dee4c1d2SRuslan Bukin device_printf(sc->dev, "Hot reset"); 149*dee4c1d2SRuslan Bukin 150*dee4c1d2SRuslan Bukin if (status & IMR_CORRECTABLE) 151*dee4c1d2SRuslan Bukin xlnx_pcib_clear_err_interrupts(sc); 152*dee4c1d2SRuslan Bukin 153*dee4c1d2SRuslan Bukin if (status & IMR_FATAL) 154*dee4c1d2SRuslan Bukin xlnx_pcib_clear_err_interrupts(sc); 155*dee4c1d2SRuslan Bukin 156*dee4c1d2SRuslan Bukin if (status & IMR_NON_FATAL) 157*dee4c1d2SRuslan Bukin xlnx_pcib_clear_err_interrupts(sc); 158*dee4c1d2SRuslan Bukin 159*dee4c1d2SRuslan Bukin if (status & IMR_MSI) { 160*dee4c1d2SRuslan Bukin device_printf(sc->dev, "MSI interrupt"); 161*dee4c1d2SRuslan Bukin 162*dee4c1d2SRuslan Bukin /* FIFO mode MSI not implemented. */ 163*dee4c1d2SRuslan Bukin } 164*dee4c1d2SRuslan Bukin 165*dee4c1d2SRuslan Bukin if (status & IMR_INTX) { 166*dee4c1d2SRuslan Bukin device_printf(sc->dev, "INTx received"); 167*dee4c1d2SRuslan Bukin 168*dee4c1d2SRuslan Bukin /* Not implemented. */ 169*dee4c1d2SRuslan Bukin } 170*dee4c1d2SRuslan Bukin 171*dee4c1d2SRuslan Bukin if (status & IMR_SLAVE_UNSUPP_REQ) 172*dee4c1d2SRuslan Bukin device_printf(sc->dev, "Slave unsupported request"); 173*dee4c1d2SRuslan Bukin 174*dee4c1d2SRuslan Bukin if (status & IMR_SLAVE_UNEXP_COMPL) 175*dee4c1d2SRuslan Bukin device_printf(sc->dev, "Slave unexpected completion"); 176*dee4c1d2SRuslan Bukin 177*dee4c1d2SRuslan Bukin if (status & IMR_SLAVE_COMPL_TIMOUT) 178*dee4c1d2SRuslan Bukin device_printf(sc->dev, "Slave completion timeout"); 179*dee4c1d2SRuslan Bukin 180*dee4c1d2SRuslan Bukin if (status & IMR_SLAVE_ERROR_POISON) 181*dee4c1d2SRuslan Bukin device_printf(sc->dev, "Slave error poison"); 182*dee4c1d2SRuslan Bukin 183*dee4c1d2SRuslan Bukin if (status & IMR_SLAVE_COMPL_ABORT) 184*dee4c1d2SRuslan Bukin device_printf(sc->dev, "Slave completion abort"); 185*dee4c1d2SRuslan Bukin 186*dee4c1d2SRuslan Bukin if (status & IMR_SLAVE_ILLEG_BURST) 187*dee4c1d2SRuslan Bukin device_printf(sc->dev, "Slave illegal burst"); 188*dee4c1d2SRuslan Bukin 189*dee4c1d2SRuslan Bukin if (status & IMR_MASTER_DECERR) 190*dee4c1d2SRuslan Bukin device_printf(sc->dev, "Master decode error"); 191*dee4c1d2SRuslan Bukin 192*dee4c1d2SRuslan Bukin if (status & IMR_MASTER_SLVERR) 193*dee4c1d2SRuslan Bukin device_printf(sc->dev, "Master slave error"); 194*dee4c1d2SRuslan Bukin 195*dee4c1d2SRuslan Bukin bus_write_4(sc->res, XLNX_PCIE_IDR, val); 196*dee4c1d2SRuslan Bukin 197*dee4c1d2SRuslan Bukin return (FILTER_HANDLED); 198*dee4c1d2SRuslan Bukin } 199*dee4c1d2SRuslan Bukin 200*dee4c1d2SRuslan Bukin static void 201*dee4c1d2SRuslan Bukin xlnx_pcib_handle_msi_intr(void *arg, int msireg) 202*dee4c1d2SRuslan Bukin { 203*dee4c1d2SRuslan Bukin struct generic_pcie_fdt_softc *fdt_sc; 204*dee4c1d2SRuslan Bukin struct generic_pcie_core_softc *sc; 205*dee4c1d2SRuslan Bukin struct xlnx_pcib_softc *xlnx_sc; 206*dee4c1d2SRuslan Bukin struct xlnx_pcib_irqsrc *xi; 207*dee4c1d2SRuslan Bukin struct trapframe *tf; 208*dee4c1d2SRuslan Bukin int irq; 209*dee4c1d2SRuslan Bukin int reg; 210*dee4c1d2SRuslan Bukin int i; 211*dee4c1d2SRuslan Bukin 212*dee4c1d2SRuslan Bukin xlnx_sc = arg; 213*dee4c1d2SRuslan Bukin fdt_sc = &xlnx_sc->fdt_sc; 214*dee4c1d2SRuslan Bukin sc = &fdt_sc->base; 215*dee4c1d2SRuslan Bukin tf = curthread->td_intr_frame; 216*dee4c1d2SRuslan Bukin 217*dee4c1d2SRuslan Bukin do { 218*dee4c1d2SRuslan Bukin reg = bus_read_4(sc->res, msireg); 219*dee4c1d2SRuslan Bukin 220*dee4c1d2SRuslan Bukin for (i = 0; i < sizeof(uint32_t) * 8; i++) { 221*dee4c1d2SRuslan Bukin if (reg & (1 << i)) { 222*dee4c1d2SRuslan Bukin bus_write_4(sc->res, msireg, (1 << i)); 223*dee4c1d2SRuslan Bukin 224*dee4c1d2SRuslan Bukin irq = i; 225*dee4c1d2SRuslan Bukin if (msireg == XLNX_PCIE_RPMSIID2) 226*dee4c1d2SRuslan Bukin irq += 32; 227*dee4c1d2SRuslan Bukin 228*dee4c1d2SRuslan Bukin xi = &xlnx_sc->isrcs[irq]; 229*dee4c1d2SRuslan Bukin if (intr_isrc_dispatch(&xi->isrc, tf) != 0) { 230*dee4c1d2SRuslan Bukin /* Disable stray. */ 231*dee4c1d2SRuslan Bukin xlnx_pcib_msi_mask(sc->dev, 232*dee4c1d2SRuslan Bukin &xi->isrc, 1); 233*dee4c1d2SRuslan Bukin device_printf(sc->dev, 234*dee4c1d2SRuslan Bukin "Stray irq %u disabled\n", irq); 235*dee4c1d2SRuslan Bukin } 236*dee4c1d2SRuslan Bukin } 237*dee4c1d2SRuslan Bukin } 238*dee4c1d2SRuslan Bukin } while (reg != 0); 239*dee4c1d2SRuslan Bukin } 240*dee4c1d2SRuslan Bukin 241*dee4c1d2SRuslan Bukin static int 242*dee4c1d2SRuslan Bukin xlnx_pcib_msi0_intr(void *arg) 243*dee4c1d2SRuslan Bukin { 244*dee4c1d2SRuslan Bukin 245*dee4c1d2SRuslan Bukin xlnx_pcib_handle_msi_intr(arg, XLNX_PCIE_RPMSIID1); 246*dee4c1d2SRuslan Bukin 247*dee4c1d2SRuslan Bukin return (FILTER_HANDLED); 248*dee4c1d2SRuslan Bukin } 249*dee4c1d2SRuslan Bukin 250*dee4c1d2SRuslan Bukin static int 251*dee4c1d2SRuslan Bukin xlnx_pcib_msi1_intr(void *arg) 252*dee4c1d2SRuslan Bukin { 253*dee4c1d2SRuslan Bukin 254*dee4c1d2SRuslan Bukin xlnx_pcib_handle_msi_intr(arg, XLNX_PCIE_RPMSIID2); 255*dee4c1d2SRuslan Bukin 256*dee4c1d2SRuslan Bukin return (FILTER_HANDLED); 257*dee4c1d2SRuslan Bukin } 258*dee4c1d2SRuslan Bukin 259*dee4c1d2SRuslan Bukin static int 260*dee4c1d2SRuslan Bukin xlnx_pcib_register_msi(struct xlnx_pcib_softc *sc) 261*dee4c1d2SRuslan Bukin { 262*dee4c1d2SRuslan Bukin const char *name; 263*dee4c1d2SRuslan Bukin int error; 264*dee4c1d2SRuslan Bukin int irq; 265*dee4c1d2SRuslan Bukin 266*dee4c1d2SRuslan Bukin sc->isrcs = malloc(sizeof(*sc->isrcs) * XLNX_PCIB_MAX_MSI, M_DEVBUF, 267*dee4c1d2SRuslan Bukin M_WAITOK | M_ZERO); 268*dee4c1d2SRuslan Bukin 269*dee4c1d2SRuslan Bukin name = device_get_nameunit(sc->dev); 270*dee4c1d2SRuslan Bukin 271*dee4c1d2SRuslan Bukin for (irq = 0; irq < XLNX_PCIB_MAX_MSI; irq++) { 272*dee4c1d2SRuslan Bukin sc->isrcs[irq].irq = irq; 273*dee4c1d2SRuslan Bukin error = intr_isrc_register(&sc->isrcs[irq].isrc, 274*dee4c1d2SRuslan Bukin sc->dev, 0, "%s,%u", name, irq); 275*dee4c1d2SRuslan Bukin if (error != 0) 276*dee4c1d2SRuslan Bukin return (error); /* XXX deregister ISRCs */ 277*dee4c1d2SRuslan Bukin } 278*dee4c1d2SRuslan Bukin 279*dee4c1d2SRuslan Bukin if (intr_msi_register(sc->dev, 280*dee4c1d2SRuslan Bukin OF_xref_from_node(ofw_bus_get_node(sc->dev))) != 0) 281*dee4c1d2SRuslan Bukin return (ENXIO); 282*dee4c1d2SRuslan Bukin 283*dee4c1d2SRuslan Bukin return (0); 284*dee4c1d2SRuslan Bukin } 285*dee4c1d2SRuslan Bukin 286*dee4c1d2SRuslan Bukin static void 287*dee4c1d2SRuslan Bukin xlnx_pcib_init(struct xlnx_pcib_softc *sc) 288*dee4c1d2SRuslan Bukin { 289*dee4c1d2SRuslan Bukin bus_addr_t addr; 290*dee4c1d2SRuslan Bukin int reg; 291*dee4c1d2SRuslan Bukin 292*dee4c1d2SRuslan Bukin /* Disable interrupts. */ 293*dee4c1d2SRuslan Bukin bus_write_4(sc->res[0], XLNX_PCIE_IMR, 0); 294*dee4c1d2SRuslan Bukin 295*dee4c1d2SRuslan Bukin /* Clear pending interrupts.*/ 296*dee4c1d2SRuslan Bukin reg = bus_read_4(sc->res[0], XLNX_PCIE_IDR); 297*dee4c1d2SRuslan Bukin bus_write_4(sc->res[0], XLNX_PCIE_IDR, reg); 298*dee4c1d2SRuslan Bukin 299*dee4c1d2SRuslan Bukin /* Setup an MSI page. */ 300*dee4c1d2SRuslan Bukin sc->msi_page = kmem_alloc_contig(PAGE_SIZE, M_WAITOK, 0, 301*dee4c1d2SRuslan Bukin BUS_SPACE_MAXADDR, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); 302*dee4c1d2SRuslan Bukin addr = vtophys(sc->msi_page); 303*dee4c1d2SRuslan Bukin bus_write_4(sc->res[0], XLNX_PCIE_RPMSIBR1, (addr >> 32)); 304*dee4c1d2SRuslan Bukin bus_write_4(sc->res[0], XLNX_PCIE_RPMSIBR2, (addr >> 0)); 305*dee4c1d2SRuslan Bukin 306*dee4c1d2SRuslan Bukin /* Enable the bridge. */ 307*dee4c1d2SRuslan Bukin reg = bus_read_4(sc->res[0], XLNX_PCIE_RPSCR); 308*dee4c1d2SRuslan Bukin reg |= RPSCR_BE; 309*dee4c1d2SRuslan Bukin bus_write_4(sc->res[0], XLNX_PCIE_RPSCR, reg); 310*dee4c1d2SRuslan Bukin 311*dee4c1d2SRuslan Bukin /* Enable interrupts. */ 312*dee4c1d2SRuslan Bukin reg = IMR_LINK_DOWN 313*dee4c1d2SRuslan Bukin | IMR_HOT_RESET 314*dee4c1d2SRuslan Bukin | IMR_CFG_COMPL_STATUS_M 315*dee4c1d2SRuslan Bukin | IMR_CFG_TIMEOUT 316*dee4c1d2SRuslan Bukin | IMR_CORRECTABLE 317*dee4c1d2SRuslan Bukin | IMR_NON_FATAL 318*dee4c1d2SRuslan Bukin | IMR_FATAL 319*dee4c1d2SRuslan Bukin | IMR_INTX 320*dee4c1d2SRuslan Bukin | IMR_MSI 321*dee4c1d2SRuslan Bukin | IMR_SLAVE_UNSUPP_REQ 322*dee4c1d2SRuslan Bukin | IMR_SLAVE_UNEXP_COMPL 323*dee4c1d2SRuslan Bukin | IMR_SLAVE_COMPL_TIMOUT 324*dee4c1d2SRuslan Bukin | IMR_SLAVE_ERROR_POISON 325*dee4c1d2SRuslan Bukin | IMR_SLAVE_COMPL_ABORT 326*dee4c1d2SRuslan Bukin | IMR_SLAVE_ILLEG_BURST 327*dee4c1d2SRuslan Bukin | IMR_MASTER_DECERR 328*dee4c1d2SRuslan Bukin | IMR_MASTER_SLVERR; 329*dee4c1d2SRuslan Bukin bus_write_4(sc->res[0], XLNX_PCIE_IMR, reg); 330*dee4c1d2SRuslan Bukin } 331*dee4c1d2SRuslan Bukin 332*dee4c1d2SRuslan Bukin static int 333*dee4c1d2SRuslan Bukin xlnx_pcib_fdt_probe(device_t dev) 334*dee4c1d2SRuslan Bukin { 335*dee4c1d2SRuslan Bukin 336*dee4c1d2SRuslan Bukin if (!ofw_bus_status_okay(dev)) 337*dee4c1d2SRuslan Bukin return (ENXIO); 338*dee4c1d2SRuslan Bukin 339*dee4c1d2SRuslan Bukin if (ofw_bus_is_compatible(dev, "xlnx,xdma-host-3.00")) { 340*dee4c1d2SRuslan Bukin device_set_desc(dev, "Xilinx XDMA PCIe Controller"); 341*dee4c1d2SRuslan Bukin return (BUS_PROBE_DEFAULT); 342*dee4c1d2SRuslan Bukin } 343*dee4c1d2SRuslan Bukin 344*dee4c1d2SRuslan Bukin return (ENXIO); 345*dee4c1d2SRuslan Bukin } 346*dee4c1d2SRuslan Bukin 347*dee4c1d2SRuslan Bukin static int 348*dee4c1d2SRuslan Bukin xlnx_pcib_fdt_attach(device_t dev) 349*dee4c1d2SRuslan Bukin { 350*dee4c1d2SRuslan Bukin struct xlnx_pcib_softc *sc; 351*dee4c1d2SRuslan Bukin int error; 352*dee4c1d2SRuslan Bukin 353*dee4c1d2SRuslan Bukin sc = device_get_softc(dev); 354*dee4c1d2SRuslan Bukin sc->dev = dev; 355*dee4c1d2SRuslan Bukin 356*dee4c1d2SRuslan Bukin mtx_init(&sc->mtx, "msi_mtx", NULL, MTX_DEF); 357*dee4c1d2SRuslan Bukin 358*dee4c1d2SRuslan Bukin if (bus_alloc_resources(dev, xlnx_pcib_spec, sc->res)) { 359*dee4c1d2SRuslan Bukin device_printf(dev, "could not allocate resources\n"); 360*dee4c1d2SRuslan Bukin return (ENXIO); 361*dee4c1d2SRuslan Bukin } 362*dee4c1d2SRuslan Bukin 363*dee4c1d2SRuslan Bukin /* Setup MISC interrupt handler. */ 364*dee4c1d2SRuslan Bukin error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE, 365*dee4c1d2SRuslan Bukin xlnx_pcib_intr, NULL, sc, &sc->intr_cookie[0]); 366*dee4c1d2SRuslan Bukin if (error != 0) { 367*dee4c1d2SRuslan Bukin device_printf(dev, "could not setup interrupt handler.\n"); 368*dee4c1d2SRuslan Bukin return (ENXIO); 369*dee4c1d2SRuslan Bukin } 370*dee4c1d2SRuslan Bukin 371*dee4c1d2SRuslan Bukin /* Setup MSI0 interrupt handler. */ 372*dee4c1d2SRuslan Bukin error = bus_setup_intr(dev, sc->res[2], INTR_TYPE_MISC | INTR_MPSAFE, 373*dee4c1d2SRuslan Bukin xlnx_pcib_msi0_intr, NULL, sc, &sc->intr_cookie[1]); 374*dee4c1d2SRuslan Bukin if (error != 0) { 375*dee4c1d2SRuslan Bukin device_printf(dev, "could not setup interrupt handler.\n"); 376*dee4c1d2SRuslan Bukin return (ENXIO); 377*dee4c1d2SRuslan Bukin } 378*dee4c1d2SRuslan Bukin 379*dee4c1d2SRuslan Bukin /* Setup MSI1 interrupt handler. */ 380*dee4c1d2SRuslan Bukin error = bus_setup_intr(dev, sc->res[3], INTR_TYPE_MISC | INTR_MPSAFE, 381*dee4c1d2SRuslan Bukin xlnx_pcib_msi1_intr, NULL, sc, &sc->intr_cookie[2]); 382*dee4c1d2SRuslan Bukin if (error != 0) { 383*dee4c1d2SRuslan Bukin device_printf(dev, "could not setup interrupt handler.\n"); 384*dee4c1d2SRuslan Bukin return (ENXIO); 385*dee4c1d2SRuslan Bukin } 386*dee4c1d2SRuslan Bukin 387*dee4c1d2SRuslan Bukin xlnx_pcib_init(sc); 388*dee4c1d2SRuslan Bukin 389*dee4c1d2SRuslan Bukin /* 390*dee4c1d2SRuslan Bukin * Allow the core driver to map registers. 391*dee4c1d2SRuslan Bukin * We will be accessing the device memory using core_softc. 392*dee4c1d2SRuslan Bukin */ 393*dee4c1d2SRuslan Bukin bus_release_resources(dev, xlnx_pcib_spec, sc->res); 394*dee4c1d2SRuslan Bukin 395*dee4c1d2SRuslan Bukin error = xlnx_pcib_register_msi(sc); 396*dee4c1d2SRuslan Bukin if (error) 397*dee4c1d2SRuslan Bukin return (error); 398*dee4c1d2SRuslan Bukin 399*dee4c1d2SRuslan Bukin return (pci_host_generic_attach(dev)); 400*dee4c1d2SRuslan Bukin } 401*dee4c1d2SRuslan Bukin 402*dee4c1d2SRuslan Bukin static int 403*dee4c1d2SRuslan Bukin xlnx_pcib_fdt_get_id(device_t pci, device_t child, enum pci_id_type type, 404*dee4c1d2SRuslan Bukin uintptr_t *id) 405*dee4c1d2SRuslan Bukin { 406*dee4c1d2SRuslan Bukin phandle_t node; 407*dee4c1d2SRuslan Bukin int bsf; 408*dee4c1d2SRuslan Bukin 409*dee4c1d2SRuslan Bukin if (type != PCI_ID_MSI) 410*dee4c1d2SRuslan Bukin return (pcib_get_id(pci, child, type, id)); 411*dee4c1d2SRuslan Bukin 412*dee4c1d2SRuslan Bukin node = ofw_bus_get_node(pci); 413*dee4c1d2SRuslan Bukin if (OF_hasprop(node, "msi-map")) 414*dee4c1d2SRuslan Bukin return (generic_pcie_get_id(pci, child, type, id)); 415*dee4c1d2SRuslan Bukin 416*dee4c1d2SRuslan Bukin bsf = pci_get_rid(child); 417*dee4c1d2SRuslan Bukin *id = (pci_get_domain(child) << PCI_RID_DOMAIN_SHIFT) | bsf; 418*dee4c1d2SRuslan Bukin 419*dee4c1d2SRuslan Bukin return (0); 420*dee4c1d2SRuslan Bukin } 421*dee4c1d2SRuslan Bukin 422*dee4c1d2SRuslan Bukin static int 423*dee4c1d2SRuslan Bukin xlnx_pcib_req_valid(struct generic_pcie_core_softc *sc, 424*dee4c1d2SRuslan Bukin u_int bus, u_int slot, u_int func, u_int reg) 425*dee4c1d2SRuslan Bukin { 426*dee4c1d2SRuslan Bukin bus_space_handle_t h; 427*dee4c1d2SRuslan Bukin bus_space_tag_t t; 428*dee4c1d2SRuslan Bukin uint32_t val; 429*dee4c1d2SRuslan Bukin 430*dee4c1d2SRuslan Bukin t = sc->bst; 431*dee4c1d2SRuslan Bukin h = sc->bsh; 432*dee4c1d2SRuslan Bukin 433*dee4c1d2SRuslan Bukin if ((bus < sc->bus_start) || (bus > sc->bus_end)) 434*dee4c1d2SRuslan Bukin return (0); 435*dee4c1d2SRuslan Bukin if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || 436*dee4c1d2SRuslan Bukin (reg > PCIE_REGMAX)) 437*dee4c1d2SRuslan Bukin return (0); 438*dee4c1d2SRuslan Bukin 439*dee4c1d2SRuslan Bukin if (bus == 0 && slot > 0) 440*dee4c1d2SRuslan Bukin return (0); 441*dee4c1d2SRuslan Bukin 442*dee4c1d2SRuslan Bukin val = bus_space_read_4(t, h, XLNX_PCIE_PHYSCR); 443*dee4c1d2SRuslan Bukin if ((val & PHYSCR_LINK_UP) == 0) { 444*dee4c1d2SRuslan Bukin /* Link is down */ 445*dee4c1d2SRuslan Bukin return (0); 446*dee4c1d2SRuslan Bukin } 447*dee4c1d2SRuslan Bukin 448*dee4c1d2SRuslan Bukin /* Valid */ 449*dee4c1d2SRuslan Bukin 450*dee4c1d2SRuslan Bukin return (1); 451*dee4c1d2SRuslan Bukin } 452*dee4c1d2SRuslan Bukin 453*dee4c1d2SRuslan Bukin static uint32_t 454*dee4c1d2SRuslan Bukin xlnx_pcib_read_config(device_t dev, u_int bus, u_int slot, 455*dee4c1d2SRuslan Bukin u_int func, u_int reg, int bytes) 456*dee4c1d2SRuslan Bukin { 457*dee4c1d2SRuslan Bukin struct generic_pcie_fdt_softc *fdt_sc; 458*dee4c1d2SRuslan Bukin struct xlnx_pcib_softc *xlnx_sc; 459*dee4c1d2SRuslan Bukin struct generic_pcie_core_softc *sc; 460*dee4c1d2SRuslan Bukin bus_space_handle_t h; 461*dee4c1d2SRuslan Bukin bus_space_tag_t t; 462*dee4c1d2SRuslan Bukin uint64_t offset; 463*dee4c1d2SRuslan Bukin uint32_t data; 464*dee4c1d2SRuslan Bukin 465*dee4c1d2SRuslan Bukin xlnx_sc = device_get_softc(dev); 466*dee4c1d2SRuslan Bukin fdt_sc = &xlnx_sc->fdt_sc; 467*dee4c1d2SRuslan Bukin sc = &fdt_sc->base; 468*dee4c1d2SRuslan Bukin 469*dee4c1d2SRuslan Bukin if (!xlnx_pcib_req_valid(sc, bus, slot, func, reg)) 470*dee4c1d2SRuslan Bukin return (~0U); 471*dee4c1d2SRuslan Bukin 472*dee4c1d2SRuslan Bukin offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); 473*dee4c1d2SRuslan Bukin t = sc->bst; 474*dee4c1d2SRuslan Bukin h = sc->bsh; 475*dee4c1d2SRuslan Bukin 476*dee4c1d2SRuslan Bukin data = bus_space_read_4(t, h, offset & ~3); 477*dee4c1d2SRuslan Bukin 478*dee4c1d2SRuslan Bukin switch (bytes) { 479*dee4c1d2SRuslan Bukin case 1: 480*dee4c1d2SRuslan Bukin data >>= (offset & 3) * 8; 481*dee4c1d2SRuslan Bukin data &= 0xff; 482*dee4c1d2SRuslan Bukin break; 483*dee4c1d2SRuslan Bukin case 2: 484*dee4c1d2SRuslan Bukin data >>= (offset & 3) * 8; 485*dee4c1d2SRuslan Bukin data = le16toh(data); 486*dee4c1d2SRuslan Bukin break; 487*dee4c1d2SRuslan Bukin case 4: 488*dee4c1d2SRuslan Bukin data = le32toh(data); 489*dee4c1d2SRuslan Bukin break; 490*dee4c1d2SRuslan Bukin default: 491*dee4c1d2SRuslan Bukin return (~0U); 492*dee4c1d2SRuslan Bukin } 493*dee4c1d2SRuslan Bukin 494*dee4c1d2SRuslan Bukin return (data); 495*dee4c1d2SRuslan Bukin } 496*dee4c1d2SRuslan Bukin 497*dee4c1d2SRuslan Bukin static void 498*dee4c1d2SRuslan Bukin xlnx_pcib_write_config(device_t dev, u_int bus, u_int slot, 499*dee4c1d2SRuslan Bukin u_int func, u_int reg, uint32_t val, int bytes) 500*dee4c1d2SRuslan Bukin { 501*dee4c1d2SRuslan Bukin struct generic_pcie_fdt_softc *fdt_sc; 502*dee4c1d2SRuslan Bukin struct xlnx_pcib_softc *xlnx_sc; 503*dee4c1d2SRuslan Bukin struct generic_pcie_core_softc *sc; 504*dee4c1d2SRuslan Bukin bus_space_handle_t h; 505*dee4c1d2SRuslan Bukin bus_space_tag_t t; 506*dee4c1d2SRuslan Bukin uint64_t offset; 507*dee4c1d2SRuslan Bukin uint32_t data; 508*dee4c1d2SRuslan Bukin 509*dee4c1d2SRuslan Bukin xlnx_sc = device_get_softc(dev); 510*dee4c1d2SRuslan Bukin fdt_sc = &xlnx_sc->fdt_sc; 511*dee4c1d2SRuslan Bukin sc = &fdt_sc->base; 512*dee4c1d2SRuslan Bukin 513*dee4c1d2SRuslan Bukin if (!xlnx_pcib_req_valid(sc, bus, slot, func, reg)) 514*dee4c1d2SRuslan Bukin return; 515*dee4c1d2SRuslan Bukin 516*dee4c1d2SRuslan Bukin offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); 517*dee4c1d2SRuslan Bukin 518*dee4c1d2SRuslan Bukin t = sc->bst; 519*dee4c1d2SRuslan Bukin h = sc->bsh; 520*dee4c1d2SRuslan Bukin 521*dee4c1d2SRuslan Bukin /* 522*dee4c1d2SRuslan Bukin * 32-bit access used due to a bug in the Xilinx bridge that 523*dee4c1d2SRuslan Bukin * requires to write primary and secondary buses in one blast. 524*dee4c1d2SRuslan Bukin * 525*dee4c1d2SRuslan Bukin * TODO: This is probably wrong on big-endian. 526*dee4c1d2SRuslan Bukin */ 527*dee4c1d2SRuslan Bukin switch (bytes) { 528*dee4c1d2SRuslan Bukin case 1: 529*dee4c1d2SRuslan Bukin data = bus_space_read_4(t, h, offset & ~3); 530*dee4c1d2SRuslan Bukin data &= ~(0xff << ((offset & 3) * 8)); 531*dee4c1d2SRuslan Bukin data |= (val & 0xff) << ((offset & 3) * 8); 532*dee4c1d2SRuslan Bukin bus_space_write_4(t, h, offset & ~3, htole32(data)); 533*dee4c1d2SRuslan Bukin break; 534*dee4c1d2SRuslan Bukin case 2: 535*dee4c1d2SRuslan Bukin data = bus_space_read_4(t, h, offset & ~3); 536*dee4c1d2SRuslan Bukin data &= ~(0xffff << ((offset & 3) * 8)); 537*dee4c1d2SRuslan Bukin data |= (val & 0xffff) << ((offset & 3) * 8); 538*dee4c1d2SRuslan Bukin bus_space_write_4(t, h, offset & ~3, htole32(data)); 539*dee4c1d2SRuslan Bukin break; 540*dee4c1d2SRuslan Bukin case 4: 541*dee4c1d2SRuslan Bukin bus_space_write_4(t, h, offset, htole32(val)); 542*dee4c1d2SRuslan Bukin break; 543*dee4c1d2SRuslan Bukin default: 544*dee4c1d2SRuslan Bukin return; 545*dee4c1d2SRuslan Bukin } 546*dee4c1d2SRuslan Bukin } 547*dee4c1d2SRuslan Bukin 548*dee4c1d2SRuslan Bukin static int 549*dee4c1d2SRuslan Bukin xlnx_pcib_alloc_msi(device_t pci, device_t child, int count, int maxcount, 550*dee4c1d2SRuslan Bukin int *irqs) 551*dee4c1d2SRuslan Bukin { 552*dee4c1d2SRuslan Bukin phandle_t msi_parent; 553*dee4c1d2SRuslan Bukin 554*dee4c1d2SRuslan Bukin ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, 555*dee4c1d2SRuslan Bukin NULL); 556*dee4c1d2SRuslan Bukin msi_parent = OF_xref_from_node(ofw_bus_get_node(pci)); 557*dee4c1d2SRuslan Bukin return (intr_alloc_msi(pci, child, msi_parent, count, maxcount, 558*dee4c1d2SRuslan Bukin irqs)); 559*dee4c1d2SRuslan Bukin } 560*dee4c1d2SRuslan Bukin 561*dee4c1d2SRuslan Bukin static int 562*dee4c1d2SRuslan Bukin xlnx_pcib_release_msi(device_t pci, device_t child, int count, int *irqs) 563*dee4c1d2SRuslan Bukin { 564*dee4c1d2SRuslan Bukin phandle_t msi_parent; 565*dee4c1d2SRuslan Bukin 566*dee4c1d2SRuslan Bukin ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, 567*dee4c1d2SRuslan Bukin NULL); 568*dee4c1d2SRuslan Bukin msi_parent = OF_xref_from_node(ofw_bus_get_node(pci)); 569*dee4c1d2SRuslan Bukin return (intr_release_msi(pci, child, msi_parent, count, irqs)); 570*dee4c1d2SRuslan Bukin } 571*dee4c1d2SRuslan Bukin 572*dee4c1d2SRuslan Bukin static int 573*dee4c1d2SRuslan Bukin xlnx_pcib_map_msi(device_t pci, device_t child, int irq, uint64_t *addr, 574*dee4c1d2SRuslan Bukin uint32_t *data) 575*dee4c1d2SRuslan Bukin { 576*dee4c1d2SRuslan Bukin phandle_t msi_parent; 577*dee4c1d2SRuslan Bukin 578*dee4c1d2SRuslan Bukin ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, 579*dee4c1d2SRuslan Bukin NULL); 580*dee4c1d2SRuslan Bukin msi_parent = OF_xref_from_node(ofw_bus_get_node(pci)); 581*dee4c1d2SRuslan Bukin return (intr_map_msi(pci, child, msi_parent, irq, addr, data)); 582*dee4c1d2SRuslan Bukin } 583*dee4c1d2SRuslan Bukin 584*dee4c1d2SRuslan Bukin static int 585*dee4c1d2SRuslan Bukin xlnx_pcib_msi_alloc_msi(device_t dev, device_t child, int count, int maxcount, 586*dee4c1d2SRuslan Bukin device_t *pic, struct intr_irqsrc **srcs) 587*dee4c1d2SRuslan Bukin { 588*dee4c1d2SRuslan Bukin struct xlnx_pcib_softc *sc; 589*dee4c1d2SRuslan Bukin int irq, end_irq, i; 590*dee4c1d2SRuslan Bukin bool found; 591*dee4c1d2SRuslan Bukin 592*dee4c1d2SRuslan Bukin sc = device_get_softc(dev); 593*dee4c1d2SRuslan Bukin 594*dee4c1d2SRuslan Bukin mtx_lock(&sc->mtx); 595*dee4c1d2SRuslan Bukin 596*dee4c1d2SRuslan Bukin found = false; 597*dee4c1d2SRuslan Bukin 598*dee4c1d2SRuslan Bukin for (irq = 0; (irq + count - 1) < XLNX_PCIB_MAX_MSI; irq++) { 599*dee4c1d2SRuslan Bukin 600*dee4c1d2SRuslan Bukin /* Assume the range is valid. */ 601*dee4c1d2SRuslan Bukin found = true; 602*dee4c1d2SRuslan Bukin 603*dee4c1d2SRuslan Bukin /* Check this range is valid. */ 604*dee4c1d2SRuslan Bukin for (end_irq = irq; end_irq < irq + count; end_irq++) { 605*dee4c1d2SRuslan Bukin if (sc->isrcs[end_irq].flags & XLNX_IRQ_FLAG_USED) { 606*dee4c1d2SRuslan Bukin /* This is already used. */ 607*dee4c1d2SRuslan Bukin found = false; 608*dee4c1d2SRuslan Bukin break; 609*dee4c1d2SRuslan Bukin } 610*dee4c1d2SRuslan Bukin } 611*dee4c1d2SRuslan Bukin 612*dee4c1d2SRuslan Bukin if (found) 613*dee4c1d2SRuslan Bukin break; 614*dee4c1d2SRuslan Bukin } 615*dee4c1d2SRuslan Bukin 616*dee4c1d2SRuslan Bukin if (!found || irq == (XLNX_PCIB_MAX_MSI - 1)) { 617*dee4c1d2SRuslan Bukin /* Not enough interrupts were found. */ 618*dee4c1d2SRuslan Bukin mtx_unlock(&sc->mtx); 619*dee4c1d2SRuslan Bukin return (ENXIO); 620*dee4c1d2SRuslan Bukin } 621*dee4c1d2SRuslan Bukin 622*dee4c1d2SRuslan Bukin /* Mark the interrupt as used. */ 623*dee4c1d2SRuslan Bukin for (i = 0; i < count; i++) 624*dee4c1d2SRuslan Bukin sc->isrcs[irq + i].flags |= XLNX_IRQ_FLAG_USED; 625*dee4c1d2SRuslan Bukin 626*dee4c1d2SRuslan Bukin mtx_unlock(&sc->mtx); 627*dee4c1d2SRuslan Bukin 628*dee4c1d2SRuslan Bukin for (i = 0; i < count; i++) 629*dee4c1d2SRuslan Bukin srcs[i] = (struct intr_irqsrc *)&sc->isrcs[irq + i]; 630*dee4c1d2SRuslan Bukin 631*dee4c1d2SRuslan Bukin *pic = device_get_parent(dev); 632*dee4c1d2SRuslan Bukin 633*dee4c1d2SRuslan Bukin return (0); 634*dee4c1d2SRuslan Bukin } 635*dee4c1d2SRuslan Bukin 636*dee4c1d2SRuslan Bukin static int 637*dee4c1d2SRuslan Bukin xlnx_pcib_msi_release_msi(device_t dev, device_t child, int count, 638*dee4c1d2SRuslan Bukin struct intr_irqsrc **isrc) 639*dee4c1d2SRuslan Bukin { 640*dee4c1d2SRuslan Bukin struct xlnx_pcib_softc *sc; 641*dee4c1d2SRuslan Bukin struct xlnx_pcib_irqsrc *xi; 642*dee4c1d2SRuslan Bukin int i; 643*dee4c1d2SRuslan Bukin 644*dee4c1d2SRuslan Bukin sc = device_get_softc(dev); 645*dee4c1d2SRuslan Bukin mtx_lock(&sc->mtx); 646*dee4c1d2SRuslan Bukin for (i = 0; i < count; i++) { 647*dee4c1d2SRuslan Bukin xi = (struct xlnx_pcib_irqsrc *)isrc[i]; 648*dee4c1d2SRuslan Bukin 649*dee4c1d2SRuslan Bukin KASSERT(xi->flags & XLNX_IRQ_FLAG_USED, 650*dee4c1d2SRuslan Bukin ("%s: Releasing an unused MSI interrupt", __func__)); 651*dee4c1d2SRuslan Bukin 652*dee4c1d2SRuslan Bukin xi->flags &= ~XLNX_IRQ_FLAG_USED; 653*dee4c1d2SRuslan Bukin } 654*dee4c1d2SRuslan Bukin 655*dee4c1d2SRuslan Bukin mtx_unlock(&sc->mtx); 656*dee4c1d2SRuslan Bukin return (0); 657*dee4c1d2SRuslan Bukin } 658*dee4c1d2SRuslan Bukin 659*dee4c1d2SRuslan Bukin static int 660*dee4c1d2SRuslan Bukin xlnx_pcib_msi_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc, 661*dee4c1d2SRuslan Bukin uint64_t *addr, uint32_t *data) 662*dee4c1d2SRuslan Bukin { 663*dee4c1d2SRuslan Bukin struct xlnx_pcib_softc *sc; 664*dee4c1d2SRuslan Bukin struct xlnx_pcib_irqsrc *xi; 665*dee4c1d2SRuslan Bukin 666*dee4c1d2SRuslan Bukin sc = device_get_softc(dev); 667*dee4c1d2SRuslan Bukin xi = (struct xlnx_pcib_irqsrc *)isrc; 668*dee4c1d2SRuslan Bukin 669*dee4c1d2SRuslan Bukin *addr = vtophys(sc->msi_page); 670*dee4c1d2SRuslan Bukin *data = xi->irq; 671*dee4c1d2SRuslan Bukin 672*dee4c1d2SRuslan Bukin return (0); 673*dee4c1d2SRuslan Bukin } 674*dee4c1d2SRuslan Bukin 675*dee4c1d2SRuslan Bukin static void 676*dee4c1d2SRuslan Bukin xlnx_pcib_msi_mask(device_t dev, struct intr_irqsrc *isrc, bool mask) 677*dee4c1d2SRuslan Bukin { 678*dee4c1d2SRuslan Bukin struct generic_pcie_fdt_softc *fdt_sc; 679*dee4c1d2SRuslan Bukin struct generic_pcie_core_softc *sc; 680*dee4c1d2SRuslan Bukin struct xlnx_pcib_softc *xlnx_sc; 681*dee4c1d2SRuslan Bukin struct xlnx_pcib_irqsrc *xi; 682*dee4c1d2SRuslan Bukin uint32_t msireg, irq; 683*dee4c1d2SRuslan Bukin uint32_t reg; 684*dee4c1d2SRuslan Bukin 685*dee4c1d2SRuslan Bukin xlnx_sc = device_get_softc(dev); 686*dee4c1d2SRuslan Bukin fdt_sc = &xlnx_sc->fdt_sc; 687*dee4c1d2SRuslan Bukin sc = &fdt_sc->base; 688*dee4c1d2SRuslan Bukin 689*dee4c1d2SRuslan Bukin xi = (struct xlnx_pcib_irqsrc *)isrc; 690*dee4c1d2SRuslan Bukin 691*dee4c1d2SRuslan Bukin irq = xi->irq; 692*dee4c1d2SRuslan Bukin if (irq < 32) 693*dee4c1d2SRuslan Bukin msireg = XLNX_PCIE_RPMSIID1_MASK; 694*dee4c1d2SRuslan Bukin else 695*dee4c1d2SRuslan Bukin msireg = XLNX_PCIE_RPMSIID2_MASK; 696*dee4c1d2SRuslan Bukin 697*dee4c1d2SRuslan Bukin reg = bus_read_4(sc->res, msireg); 698*dee4c1d2SRuslan Bukin if (mask) 699*dee4c1d2SRuslan Bukin reg &= ~(1 << irq); 700*dee4c1d2SRuslan Bukin else 701*dee4c1d2SRuslan Bukin reg |= (1 << irq); 702*dee4c1d2SRuslan Bukin bus_write_4(sc->res, msireg, reg); 703*dee4c1d2SRuslan Bukin } 704*dee4c1d2SRuslan Bukin 705*dee4c1d2SRuslan Bukin static void 706*dee4c1d2SRuslan Bukin xlnx_pcib_msi_disable_intr(device_t dev, struct intr_irqsrc *isrc) 707*dee4c1d2SRuslan Bukin { 708*dee4c1d2SRuslan Bukin 709*dee4c1d2SRuslan Bukin xlnx_pcib_msi_mask(dev, isrc, true); 710*dee4c1d2SRuslan Bukin } 711*dee4c1d2SRuslan Bukin 712*dee4c1d2SRuslan Bukin static void 713*dee4c1d2SRuslan Bukin xlnx_pcib_msi_enable_intr(device_t dev, struct intr_irqsrc *isrc) 714*dee4c1d2SRuslan Bukin { 715*dee4c1d2SRuslan Bukin 716*dee4c1d2SRuslan Bukin xlnx_pcib_msi_mask(dev, isrc, false); 717*dee4c1d2SRuslan Bukin } 718*dee4c1d2SRuslan Bukin 719*dee4c1d2SRuslan Bukin static void 720*dee4c1d2SRuslan Bukin xlnx_pcib_msi_post_filter(device_t dev, struct intr_irqsrc *isrc) 721*dee4c1d2SRuslan Bukin { 722*dee4c1d2SRuslan Bukin 723*dee4c1d2SRuslan Bukin } 724*dee4c1d2SRuslan Bukin 725*dee4c1d2SRuslan Bukin static void 726*dee4c1d2SRuslan Bukin xlnx_pcib_msi_post_ithread(device_t dev, struct intr_irqsrc *isrc) 727*dee4c1d2SRuslan Bukin { 728*dee4c1d2SRuslan Bukin 729*dee4c1d2SRuslan Bukin xlnx_pcib_msi_mask(dev, isrc, false); 730*dee4c1d2SRuslan Bukin } 731*dee4c1d2SRuslan Bukin 732*dee4c1d2SRuslan Bukin static void 733*dee4c1d2SRuslan Bukin xlnx_pcib_msi_pre_ithread(device_t dev, struct intr_irqsrc *isrc) 734*dee4c1d2SRuslan Bukin { 735*dee4c1d2SRuslan Bukin 736*dee4c1d2SRuslan Bukin xlnx_pcib_msi_mask(dev, isrc, true); 737*dee4c1d2SRuslan Bukin } 738*dee4c1d2SRuslan Bukin 739*dee4c1d2SRuslan Bukin static int 740*dee4c1d2SRuslan Bukin xlnx_pcib_msi_setup_intr(device_t dev, struct intr_irqsrc *isrc, 741*dee4c1d2SRuslan Bukin struct resource *res, struct intr_map_data *data) 742*dee4c1d2SRuslan Bukin { 743*dee4c1d2SRuslan Bukin 744*dee4c1d2SRuslan Bukin return (0); 745*dee4c1d2SRuslan Bukin } 746*dee4c1d2SRuslan Bukin 747*dee4c1d2SRuslan Bukin static int 748*dee4c1d2SRuslan Bukin xlnx_pcib_msi_teardown_intr(device_t dev, struct intr_irqsrc *isrc, 749*dee4c1d2SRuslan Bukin struct resource *res, struct intr_map_data *data) 750*dee4c1d2SRuslan Bukin { 751*dee4c1d2SRuslan Bukin 752*dee4c1d2SRuslan Bukin return (0); 753*dee4c1d2SRuslan Bukin } 754*dee4c1d2SRuslan Bukin 755*dee4c1d2SRuslan Bukin static device_method_t xlnx_pcib_fdt_methods[] = { 756*dee4c1d2SRuslan Bukin /* Device interface */ 757*dee4c1d2SRuslan Bukin DEVMETHOD(device_probe, xlnx_pcib_fdt_probe), 758*dee4c1d2SRuslan Bukin DEVMETHOD(device_attach, xlnx_pcib_fdt_attach), 759*dee4c1d2SRuslan Bukin 760*dee4c1d2SRuslan Bukin /* pcib interface */ 761*dee4c1d2SRuslan Bukin DEVMETHOD(pcib_get_id, xlnx_pcib_fdt_get_id), 762*dee4c1d2SRuslan Bukin DEVMETHOD(pcib_read_config, xlnx_pcib_read_config), 763*dee4c1d2SRuslan Bukin DEVMETHOD(pcib_write_config, xlnx_pcib_write_config), 764*dee4c1d2SRuslan Bukin DEVMETHOD(pcib_alloc_msi, xlnx_pcib_alloc_msi), 765*dee4c1d2SRuslan Bukin DEVMETHOD(pcib_release_msi, xlnx_pcib_release_msi), 766*dee4c1d2SRuslan Bukin DEVMETHOD(pcib_map_msi, xlnx_pcib_map_msi), 767*dee4c1d2SRuslan Bukin 768*dee4c1d2SRuslan Bukin /* MSI interface */ 769*dee4c1d2SRuslan Bukin DEVMETHOD(msi_alloc_msi, xlnx_pcib_msi_alloc_msi), 770*dee4c1d2SRuslan Bukin DEVMETHOD(msi_release_msi, xlnx_pcib_msi_release_msi), 771*dee4c1d2SRuslan Bukin DEVMETHOD(msi_map_msi, xlnx_pcib_msi_map_msi), 772*dee4c1d2SRuslan Bukin 773*dee4c1d2SRuslan Bukin /* Interrupt controller interface */ 774*dee4c1d2SRuslan Bukin DEVMETHOD(pic_disable_intr, xlnx_pcib_msi_disable_intr), 775*dee4c1d2SRuslan Bukin DEVMETHOD(pic_enable_intr, xlnx_pcib_msi_enable_intr), 776*dee4c1d2SRuslan Bukin DEVMETHOD(pic_setup_intr, xlnx_pcib_msi_setup_intr), 777*dee4c1d2SRuslan Bukin DEVMETHOD(pic_teardown_intr, xlnx_pcib_msi_teardown_intr), 778*dee4c1d2SRuslan Bukin DEVMETHOD(pic_post_filter, xlnx_pcib_msi_post_filter), 779*dee4c1d2SRuslan Bukin DEVMETHOD(pic_post_ithread, xlnx_pcib_msi_post_ithread), 780*dee4c1d2SRuslan Bukin DEVMETHOD(pic_pre_ithread, xlnx_pcib_msi_pre_ithread), 781*dee4c1d2SRuslan Bukin 782*dee4c1d2SRuslan Bukin /* End */ 783*dee4c1d2SRuslan Bukin DEVMETHOD_END 784*dee4c1d2SRuslan Bukin }; 785*dee4c1d2SRuslan Bukin 786*dee4c1d2SRuslan Bukin DEFINE_CLASS_1(pcib, xlnx_pcib_fdt_driver, xlnx_pcib_fdt_methods, 787*dee4c1d2SRuslan Bukin sizeof(struct xlnx_pcib_softc), generic_pcie_fdt_driver); 788*dee4c1d2SRuslan Bukin 789*dee4c1d2SRuslan Bukin static devclass_t xlnx_pcib_fdt_devclass; 790*dee4c1d2SRuslan Bukin 791*dee4c1d2SRuslan Bukin DRIVER_MODULE(xlnx_pcib, simplebus, xlnx_pcib_fdt_driver, 792*dee4c1d2SRuslan Bukin xlnx_pcib_fdt_devclass, 0, 0); 793*dee4c1d2SRuslan Bukin DRIVER_MODULE(xlnx_pcib, ofwbus, xlnx_pcib_fdt_driver, 794*dee4c1d2SRuslan Bukin xlnx_pcib_fdt_devclass, 0, 0); 795