1 /*- 2 * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com> 3 * Copyright (c) 2014 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * This software was developed by Semihalf under 7 * the sponsorship of the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* Generic ECAM PCIe driver */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_platform.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/malloc.h> 41 #include <sys/kernel.h> 42 #include <sys/rman.h> 43 #include <sys/module.h> 44 #include <sys/bus.h> 45 #include <sys/endian.h> 46 #include <sys/cpuset.h> 47 #include <sys/rwlock.h> 48 49 #include <dev/ofw/openfirm.h> 50 #include <dev/ofw/ofw_bus.h> 51 #include <dev/ofw/ofw_bus_subr.h> 52 #include <dev/ofw/ofw_pci.h> 53 #include <dev/pci/pcivar.h> 54 #include <dev/pci/pcireg.h> 55 #include <dev/pci/pcib_private.h> 56 #include <dev/pci/pci_host_generic.h> 57 58 #include <machine/cpu.h> 59 #include <machine/bus.h> 60 #include <machine/intr.h> 61 #include <vm/vm_page.h> 62 63 #include "pcib_if.h" 64 65 /* Assembling ECAM Configuration Address */ 66 #define PCIE_BUS_SHIFT 20 67 #define PCIE_SLOT_SHIFT 15 68 #define PCIE_FUNC_SHIFT 12 69 #define PCIE_BUS_MASK 0xFF 70 #define PCIE_SLOT_MASK 0x1F 71 #define PCIE_FUNC_MASK 0x07 72 #define PCIE_REG_MASK 0xFFF 73 74 #define PCIE_ADDR_OFFSET(bus, slot, func, reg) \ 75 ((((bus) & PCIE_BUS_MASK) << PCIE_BUS_SHIFT) | \ 76 (((slot) & PCIE_SLOT_MASK) << PCIE_SLOT_SHIFT) | \ 77 (((func) & PCIE_FUNC_MASK) << PCIE_FUNC_SHIFT) | \ 78 ((reg) & PCIE_REG_MASK)) 79 80 #define PCI_IO_WINDOW_OFFSET 0x1000 81 82 #define SPACE_CODE_SHIFT 24 83 #define SPACE_CODE_MASK 0x3 84 #define SPACE_CODE_IO_SPACE 0x1 85 #define PROPS_CELL_SIZE 1 86 #define PCI_ADDR_CELL_SIZE 2 87 88 /* OFW bus interface */ 89 struct generic_pcie_ofw_devinfo { 90 struct ofw_bus_devinfo di_dinfo; 91 struct resource_list di_rl; 92 }; 93 94 /* Forward prototypes */ 95 96 static int generic_pcie_probe(device_t dev); 97 static int parse_pci_mem_ranges(struct generic_pcie_softc *sc); 98 static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot, 99 u_int func, u_int reg, int bytes); 100 static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot, 101 u_int func, u_int reg, uint32_t val, int bytes); 102 static int generic_pcie_maxslots(device_t dev); 103 static int generic_pcie_read_ivar(device_t dev, device_t child, int index, 104 uintptr_t *result); 105 static int generic_pcie_write_ivar(device_t dev, device_t child, int index, 106 uintptr_t value); 107 static struct resource *generic_pcie_alloc_resource_ofw(device_t, device_t, 108 int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); 109 static struct resource *generic_pcie_alloc_resource_pcie(device_t dev, 110 device_t child, int type, int *rid, rman_res_t start, rman_res_t end, 111 rman_res_t count, u_int flags); 112 static int generic_pcie_release_resource(device_t dev, device_t child, 113 int type, int rid, struct resource *res); 114 static int generic_pcie_release_resource_ofw(device_t, device_t, int, int, 115 struct resource *); 116 static int generic_pcie_release_resource_pcie(device_t, device_t, int, int, 117 struct resource *); 118 static int generic_pcie_ofw_bus_attach(device_t); 119 static const struct ofw_bus_devinfo *generic_pcie_ofw_get_devinfo(device_t, 120 device_t); 121 122 static __inline void 123 get_addr_size_cells(phandle_t node, pcell_t *addr_cells, pcell_t *size_cells) 124 { 125 126 *addr_cells = 2; 127 /* Find address cells if present */ 128 OF_getencprop(node, "#address-cells", addr_cells, sizeof(*addr_cells)); 129 130 *size_cells = 2; 131 /* Find size cells if present */ 132 OF_getencprop(node, "#size-cells", size_cells, sizeof(*size_cells)); 133 } 134 135 static int 136 generic_pcie_probe(device_t dev) 137 { 138 139 if (!ofw_bus_status_okay(dev)) 140 return (ENXIO); 141 142 if (ofw_bus_is_compatible(dev, "pci-host-ecam-generic")) { 143 device_set_desc(dev, "Generic PCI host controller"); 144 return (BUS_PROBE_GENERIC); 145 } 146 if (ofw_bus_is_compatible(dev, "arm,gem5_pcie")) { 147 device_set_desc(dev, "GEM5 PCIe host controller"); 148 return (BUS_PROBE_DEFAULT); 149 } 150 151 return (ENXIO); 152 } 153 154 int 155 pci_host_generic_attach(device_t dev) 156 { 157 struct generic_pcie_softc *sc; 158 uint64_t phys_base; 159 uint64_t pci_base; 160 uint64_t size; 161 int error; 162 int tuple; 163 int rid; 164 165 sc = device_get_softc(dev); 166 sc->dev = dev; 167 168 /* Retrieve 'ranges' property from FDT */ 169 if (bootverbose) 170 device_printf(dev, "parsing FDT for ECAM%d:\n", 171 sc->ecam); 172 if (parse_pci_mem_ranges(sc)) 173 return (ENXIO); 174 175 /* Attach OFW bus */ 176 if (generic_pcie_ofw_bus_attach(dev) != 0) 177 return (ENXIO); 178 179 rid = 0; 180 sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); 181 if (sc->res == NULL) { 182 device_printf(dev, "could not map memory.\n"); 183 return (ENXIO); 184 } 185 186 sc->bst = rman_get_bustag(sc->res); 187 sc->bsh = rman_get_bushandle(sc->res); 188 189 sc->mem_rman.rm_type = RMAN_ARRAY; 190 sc->mem_rman.rm_descr = "PCIe Memory"; 191 sc->io_rman.rm_type = RMAN_ARRAY; 192 sc->io_rman.rm_descr = "PCIe IO window"; 193 194 /* Initialize rman and allocate memory regions */ 195 error = rman_init(&sc->mem_rman); 196 if (error) { 197 device_printf(dev, "rman_init() failed. error = %d\n", error); 198 return (error); 199 } 200 201 error = rman_init(&sc->io_rman); 202 if (error) { 203 device_printf(dev, "rman_init() failed. error = %d\n", error); 204 return (error); 205 } 206 207 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { 208 phys_base = sc->ranges[tuple].phys_base; 209 pci_base = sc->ranges[tuple].pci_base; 210 size = sc->ranges[tuple].size; 211 if (phys_base == 0 || size == 0) 212 continue; /* empty range element */ 213 if (sc->ranges[tuple].flags & FLAG_MEM) { 214 error = rman_manage_region(&sc->mem_rman, 215 phys_base, phys_base + size - 1); 216 } else if (sc->ranges[tuple].flags & FLAG_IO) { 217 error = rman_manage_region(&sc->io_rman, 218 pci_base + PCI_IO_WINDOW_OFFSET, 219 pci_base + PCI_IO_WINDOW_OFFSET + size - 1); 220 } else 221 continue; 222 if (error) { 223 device_printf(dev, "rman_manage_region() failed." 224 "error = %d\n", error); 225 rman_fini(&sc->mem_rman); 226 return (error); 227 } 228 } 229 230 ofw_bus_setup_iinfo(ofw_bus_get_node(dev), &sc->pci_iinfo, 231 sizeof(cell_t)); 232 233 device_add_child(dev, "pci", -1); 234 return (bus_generic_attach(dev)); 235 } 236 237 static int 238 parse_pci_mem_ranges(struct generic_pcie_softc *sc) 239 { 240 pcell_t pci_addr_cells, parent_addr_cells; 241 pcell_t attributes, size_cells; 242 cell_t *base_ranges; 243 int nbase_ranges; 244 phandle_t node; 245 int i, j, k; 246 int tuple; 247 248 node = ofw_bus_get_node(sc->dev); 249 250 OF_getencprop(node, "#address-cells", &pci_addr_cells, 251 sizeof(pci_addr_cells)); 252 OF_getencprop(node, "#size-cells", &size_cells, 253 sizeof(size_cells)); 254 OF_getencprop(OF_parent(node), "#address-cells", &parent_addr_cells, 255 sizeof(parent_addr_cells)); 256 257 if (parent_addr_cells != 2 || pci_addr_cells != 3 || size_cells != 2) { 258 device_printf(sc->dev, 259 "Unexpected number of address or size cells in FDT\n"); 260 return (ENXIO); 261 } 262 263 nbase_ranges = OF_getproplen(node, "ranges"); 264 sc->nranges = nbase_ranges / sizeof(cell_t) / 265 (parent_addr_cells + pci_addr_cells + size_cells); 266 base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); 267 OF_getencprop(node, "ranges", base_ranges, nbase_ranges); 268 269 for (i = 0, j = 0; i < sc->nranges; i++) { 270 attributes = (base_ranges[j++] >> SPACE_CODE_SHIFT) & \ 271 SPACE_CODE_MASK; 272 if (attributes == SPACE_CODE_IO_SPACE) { 273 sc->ranges[i].flags |= FLAG_IO; 274 } else { 275 sc->ranges[i].flags |= FLAG_MEM; 276 } 277 278 sc->ranges[i].pci_base = 0; 279 for (k = 0; k < (pci_addr_cells - 1); k++) { 280 sc->ranges[i].pci_base <<= 32; 281 sc->ranges[i].pci_base |= base_ranges[j++]; 282 } 283 sc->ranges[i].phys_base = 0; 284 for (k = 0; k < parent_addr_cells; k++) { 285 sc->ranges[i].phys_base <<= 32; 286 sc->ranges[i].phys_base |= base_ranges[j++]; 287 } 288 sc->ranges[i].size = 0; 289 for (k = 0; k < size_cells; k++) { 290 sc->ranges[i].size <<= 32; 291 sc->ranges[i].size |= base_ranges[j++]; 292 } 293 } 294 295 for (; i < MAX_RANGES_TUPLES; i++) { 296 /* zero-fill remaining tuples to mark empty elements in array */ 297 sc->ranges[i].pci_base = 0; 298 sc->ranges[i].phys_base = 0; 299 sc->ranges[i].size = 0; 300 } 301 302 if (bootverbose) { 303 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { 304 device_printf(sc->dev, 305 "\tPCI addr: 0x%jx, CPU addr: 0x%jx, Size: 0x%jx\n", 306 sc->ranges[tuple].pci_base, 307 sc->ranges[tuple].phys_base, 308 sc->ranges[tuple].size); 309 } 310 } 311 312 free(base_ranges, M_DEVBUF); 313 return (0); 314 } 315 316 static uint32_t 317 generic_pcie_read_config(device_t dev, u_int bus, u_int slot, 318 u_int func, u_int reg, int bytes) 319 { 320 struct generic_pcie_softc *sc; 321 bus_space_handle_t h; 322 bus_space_tag_t t; 323 uint64_t offset; 324 uint32_t data; 325 326 if ((bus > PCI_BUSMAX) || (slot > PCI_SLOTMAX) || 327 (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) 328 return (~0U); 329 330 sc = device_get_softc(dev); 331 332 offset = PCIE_ADDR_OFFSET(bus, slot, func, reg); 333 t = sc->bst; 334 h = sc->bsh; 335 336 switch (bytes) { 337 case 1: 338 data = bus_space_read_1(t, h, offset); 339 break; 340 case 2: 341 data = le16toh(bus_space_read_2(t, h, offset)); 342 break; 343 case 4: 344 data = le32toh(bus_space_read_4(t, h, offset)); 345 break; 346 default: 347 return (~0U); 348 } 349 350 return (data); 351 } 352 353 static void 354 generic_pcie_write_config(device_t dev, u_int bus, u_int slot, 355 u_int func, u_int reg, uint32_t val, int bytes) 356 { 357 struct generic_pcie_softc *sc; 358 bus_space_handle_t h; 359 bus_space_tag_t t; 360 uint64_t offset; 361 362 if ((bus > PCI_BUSMAX) || (slot > PCI_SLOTMAX) || 363 (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) 364 return; 365 366 sc = device_get_softc(dev); 367 368 offset = PCIE_ADDR_OFFSET(bus, slot, func, reg); 369 370 t = sc->bst; 371 h = sc->bsh; 372 373 switch (bytes) { 374 case 1: 375 bus_space_write_1(t, h, offset, val); 376 break; 377 case 2: 378 bus_space_write_2(t, h, offset, htole16(val)); 379 break; 380 case 4: 381 bus_space_write_4(t, h, offset, htole32(val)); 382 break; 383 default: 384 return; 385 } 386 } 387 388 static int 389 generic_pcie_maxslots(device_t dev) 390 { 391 392 return (31); /* max slots per bus acc. to standard */ 393 } 394 395 static int 396 generic_pcie_route_interrupt(device_t bus, device_t dev, int pin) 397 { 398 struct generic_pcie_softc *sc; 399 struct ofw_pci_register reg; 400 uint32_t pintr, mintr[2]; 401 phandle_t iparent; 402 int intrcells; 403 404 sc = device_get_softc(bus); 405 pintr = pin; 406 407 bzero(®, sizeof(reg)); 408 reg.phys_hi = (pci_get_bus(dev) << OFW_PCI_PHYS_HI_BUSSHIFT) | 409 (pci_get_slot(dev) << OFW_PCI_PHYS_HI_DEVICESHIFT) | 410 (pci_get_function(dev) << OFW_PCI_PHYS_HI_FUNCTIONSHIFT); 411 412 intrcells = ofw_bus_lookup_imap(ofw_bus_get_node(dev), 413 &sc->pci_iinfo, ®, sizeof(reg), &pintr, sizeof(pintr), 414 mintr, sizeof(mintr), &iparent); 415 if (intrcells) { 416 pintr = ofw_bus_map_intr(dev, iparent, intrcells, mintr); 417 return (pintr); 418 } 419 420 device_printf(bus, "could not route pin %d for device %d.%d\n", 421 pin, pci_get_slot(dev), pci_get_function(dev)); 422 return (PCI_INVALID_IRQ); 423 } 424 425 426 static int 427 generic_pcie_read_ivar(device_t dev, device_t child, int index, 428 uintptr_t *result) 429 { 430 struct generic_pcie_softc *sc; 431 int secondary_bus; 432 433 sc = device_get_softc(dev); 434 435 if (index == PCIB_IVAR_BUS) { 436 /* this pcib adds only pci bus 0 as child */ 437 secondary_bus = 0; 438 *result = secondary_bus; 439 return (0); 440 441 } 442 443 if (index == PCIB_IVAR_DOMAIN) { 444 *result = sc->ecam; 445 return (0); 446 } 447 448 if (bootverbose) 449 device_printf(dev, "ERROR: Unknown index %d.\n", index); 450 return (ENOENT); 451 } 452 453 static int 454 generic_pcie_write_ivar(device_t dev, device_t child, int index, 455 uintptr_t value) 456 { 457 458 return (ENOENT); 459 } 460 461 static struct rman * 462 generic_pcie_rman(struct generic_pcie_softc *sc, int type) 463 { 464 465 switch (type) { 466 case SYS_RES_IOPORT: 467 return (&sc->io_rman); 468 case SYS_RES_MEMORY: 469 return (&sc->mem_rman); 470 default: 471 break; 472 } 473 474 return (NULL); 475 } 476 477 static int 478 generic_pcie_release_resource_pcie(device_t dev, device_t child, int type, 479 int rid, struct resource *res) 480 { 481 struct generic_pcie_softc *sc; 482 struct rman *rm; 483 484 sc = device_get_softc(dev); 485 486 rm = generic_pcie_rman(sc, type); 487 if (rm != NULL) { 488 KASSERT(rman_is_region_manager(res, rm), ("rman mismatch")); 489 rman_release_resource(res); 490 } 491 492 return (bus_generic_release_resource(dev, child, type, rid, res)); 493 } 494 495 static int 496 generic_pcie_release_resource(device_t dev, device_t child, int type, 497 int rid, struct resource *res) 498 { 499 500 /* For PCIe devices that do not have FDT nodes, use PCIB method */ 501 if ((int)ofw_bus_get_node(child) <= 0) { 502 return (generic_pcie_release_resource_pcie(dev, 503 child, type, rid, res)); 504 } 505 506 /* For other devices use OFW method */ 507 return (generic_pcie_release_resource_ofw(dev, 508 child, type, rid, res)); 509 } 510 511 struct resource * 512 pci_host_generic_alloc_resource(device_t dev, device_t child, int type, int *rid, 513 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 514 { 515 516 /* For PCIe devices that do not have FDT nodes, use PCIB method */ 517 if ((int)ofw_bus_get_node(child) <= 0) 518 return (generic_pcie_alloc_resource_pcie(dev, child, type, rid, 519 start, end, count, flags)); 520 521 /* For other devices use OFW method */ 522 return (generic_pcie_alloc_resource_ofw(dev, child, type, rid, 523 start, end, count, flags)); 524 } 525 526 static struct resource * 527 generic_pcie_alloc_resource_pcie(device_t dev, device_t child, int type, int *rid, 528 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 529 { 530 struct generic_pcie_softc *sc; 531 struct resource *res; 532 struct rman *rm; 533 534 sc = device_get_softc(dev); 535 536 rm = generic_pcie_rman(sc, type); 537 if (rm == NULL) 538 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, 539 type, rid, start, end, count, flags)); 540 541 if (bootverbose) { 542 device_printf(dev, 543 "rman_reserve_resource: start=%#jx, end=%#jx, count=%#jx\n", 544 start, end, count); 545 } 546 547 res = rman_reserve_resource(rm, start, end, count, flags, child); 548 if (res == NULL) 549 goto fail; 550 551 rman_set_rid(res, *rid); 552 553 if (flags & RF_ACTIVE) 554 if (bus_activate_resource(child, type, *rid, res)) { 555 rman_release_resource(res); 556 goto fail; 557 } 558 559 return (res); 560 561 fail: 562 device_printf(dev, "%s FAIL: type=%d, rid=%d, " 563 "start=%016jx, end=%016jx, count=%016jx, flags=%x\n", 564 __func__, type, *rid, start, end, count, flags); 565 566 return (NULL); 567 } 568 569 static int 570 generic_pcie_adjust_resource(device_t dev, device_t child, int type, 571 struct resource *res, rman_res_t start, rman_res_t end) 572 { 573 struct generic_pcie_softc *sc; 574 struct rman *rm; 575 576 sc = device_get_softc(dev); 577 578 rm = generic_pcie_rman(sc, type); 579 if (rm != NULL) 580 return (rman_adjust_resource(res, start, end)); 581 return (bus_generic_adjust_resource(dev, child, type, res, start, end)); 582 } 583 584 static int 585 generic_pcie_activate_resource(device_t dev, device_t child, int type, int rid, 586 struct resource *r) 587 { 588 struct generic_pcie_softc *sc; 589 uint64_t phys_base; 590 uint64_t pci_base; 591 uint64_t size; 592 int found; 593 int res; 594 int i; 595 596 sc = device_get_softc(dev); 597 598 if ((res = rman_activate_resource(r)) != 0) 599 return (res); 600 601 switch(type) { 602 case SYS_RES_IOPORT: 603 found = 0; 604 for (i = 0; i < MAX_RANGES_TUPLES; i++) { 605 pci_base = sc->ranges[i].pci_base; 606 phys_base = sc->ranges[i].phys_base; 607 size = sc->ranges[i].size; 608 609 if ((rid > pci_base) && (rid < (pci_base + size))) { 610 found = 1; 611 break; 612 } 613 } 614 if (found) { 615 rman_set_start(r, rman_get_start(r) + phys_base); 616 BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child, 617 type, rid, r); 618 } else { 619 device_printf(dev, "Failed to activate IOPORT resource\n"); 620 res = 0; 621 } 622 break; 623 case SYS_RES_MEMORY: 624 BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child, type, rid, r); 625 break; 626 default: 627 break; 628 } 629 630 return (res); 631 } 632 633 static int 634 generic_pcie_deactivate_resource(device_t dev, device_t child, int type, int rid, 635 struct resource *r) 636 { 637 struct generic_pcie_softc *sc; 638 vm_offset_t vaddr; 639 int res; 640 641 sc = device_get_softc(dev); 642 643 if ((res = rman_deactivate_resource(r)) != 0) 644 return (res); 645 646 switch(type) { 647 case SYS_RES_IOPORT: 648 case SYS_RES_MEMORY: 649 vaddr = (vm_offset_t)rman_get_virtual(r); 650 pmap_unmapdev(vaddr, rman_get_size(r)); 651 break; 652 default: 653 break; 654 } 655 656 return (res); 657 } 658 659 static device_method_t generic_pcie_methods[] = { 660 DEVMETHOD(device_probe, generic_pcie_probe), 661 DEVMETHOD(device_attach, pci_host_generic_attach), 662 DEVMETHOD(bus_read_ivar, generic_pcie_read_ivar), 663 DEVMETHOD(bus_write_ivar, generic_pcie_write_ivar), 664 DEVMETHOD(bus_alloc_resource, pci_host_generic_alloc_resource), 665 DEVMETHOD(bus_adjust_resource, generic_pcie_adjust_resource), 666 DEVMETHOD(bus_release_resource, generic_pcie_release_resource), 667 DEVMETHOD(bus_activate_resource, generic_pcie_activate_resource), 668 DEVMETHOD(bus_deactivate_resource, generic_pcie_deactivate_resource), 669 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 670 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 671 672 /* pcib interface */ 673 DEVMETHOD(pcib_maxslots, generic_pcie_maxslots), 674 DEVMETHOD(pcib_route_interrupt, generic_pcie_route_interrupt), 675 DEVMETHOD(pcib_read_config, generic_pcie_read_config), 676 DEVMETHOD(pcib_write_config, generic_pcie_write_config), 677 #if defined(__aarch64__) 678 DEVMETHOD(pcib_alloc_msi, arm_alloc_msi), 679 DEVMETHOD(pcib_release_msi, arm_release_msi), 680 DEVMETHOD(pcib_alloc_msix, arm_alloc_msix), 681 DEVMETHOD(pcib_release_msix, arm_release_msix), 682 DEVMETHOD(pcib_map_msi, arm_map_msi), 683 #endif 684 685 /* ofw_bus interface */ 686 DEVMETHOD(ofw_bus_get_devinfo, generic_pcie_ofw_get_devinfo), 687 DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), 688 DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), 689 DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), 690 DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), 691 DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), 692 693 DEVMETHOD_END 694 }; 695 696 static const struct ofw_bus_devinfo * 697 generic_pcie_ofw_get_devinfo(device_t bus __unused, device_t child) 698 { 699 struct generic_pcie_ofw_devinfo *di; 700 701 di = device_get_ivars(child); 702 return (&di->di_dinfo); 703 } 704 705 static struct resource * 706 generic_pcie_alloc_resource_ofw(device_t bus, device_t child, int type, int *rid, 707 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 708 { 709 struct generic_pcie_softc *sc; 710 struct generic_pcie_ofw_devinfo *di; 711 struct resource_list_entry *rle; 712 int i; 713 714 sc = device_get_softc(bus); 715 716 if (RMAN_IS_DEFAULT_RANGE(start, end)) { 717 if ((di = device_get_ivars(child)) == NULL) 718 return (NULL); 719 if (type == SYS_RES_IOPORT) 720 type = SYS_RES_MEMORY; 721 722 /* Find defaults for this rid */ 723 rle = resource_list_find(&di->di_rl, type, *rid); 724 if (rle == NULL) 725 return (NULL); 726 727 start = rle->start; 728 end = rle->end; 729 count = rle->count; 730 } 731 732 if (type == SYS_RES_MEMORY) { 733 /* Remap through ranges property */ 734 for (i = 0; i < MAX_RANGES_TUPLES; i++) { 735 if (start >= sc->ranges[i].phys_base && end < 736 sc->ranges[i].pci_base + sc->ranges[i].size) { 737 start -= sc->ranges[i].phys_base; 738 start += sc->ranges[i].pci_base; 739 end -= sc->ranges[i].phys_base; 740 end += sc->ranges[i].pci_base; 741 break; 742 } 743 } 744 745 if (i == MAX_RANGES_TUPLES) { 746 device_printf(bus, "Could not map resource " 747 "%#jx-%#jx\n", start, end); 748 return (NULL); 749 } 750 } 751 752 return (bus_generic_alloc_resource(bus, child, type, rid, start, end, 753 count, flags)); 754 } 755 756 static int 757 generic_pcie_release_resource_ofw(device_t bus, device_t child, int type, int rid, 758 struct resource *res) 759 { 760 761 return (bus_generic_release_resource(bus, child, type, rid, res)); 762 } 763 764 /* Helper functions */ 765 766 static int 767 generic_pcie_ofw_bus_attach(device_t dev) 768 { 769 struct generic_pcie_ofw_devinfo *di; 770 device_t child; 771 phandle_t parent, node; 772 pcell_t addr_cells, size_cells; 773 774 parent = ofw_bus_get_node(dev); 775 if (parent > 0) { 776 get_addr_size_cells(parent, &addr_cells, &size_cells); 777 /* Iterate through all bus subordinates */ 778 for (node = OF_child(parent); node > 0; node = OF_peer(node)) { 779 780 /* Allocate and populate devinfo. */ 781 di = malloc(sizeof(*di), M_DEVBUF, M_WAITOK | M_ZERO); 782 if (ofw_bus_gen_setup_devinfo(&di->di_dinfo, node) != 0) { 783 free(di, M_DEVBUF); 784 continue; 785 } 786 787 /* Initialize and populate resource list. */ 788 resource_list_init(&di->di_rl); 789 ofw_bus_reg_to_rl(dev, node, addr_cells, size_cells, 790 &di->di_rl); 791 ofw_bus_intr_to_rl(dev, node, &di->di_rl, NULL); 792 793 /* Add newbus device for this FDT node */ 794 child = device_add_child(dev, NULL, -1); 795 if (child == NULL) { 796 resource_list_free(&di->di_rl); 797 ofw_bus_gen_destroy_devinfo(&di->di_dinfo); 798 free(di, M_DEVBUF); 799 continue; 800 } 801 802 device_set_ivars(child, di); 803 } 804 } 805 806 return (0); 807 } 808 809 DEFINE_CLASS_0(pcib, generic_pcie_driver, 810 generic_pcie_methods, sizeof(struct generic_pcie_softc)); 811 812 devclass_t generic_pcie_devclass; 813 814 DRIVER_MODULE(pcib, simplebus, generic_pcie_driver, 815 generic_pcie_devclass, 0, 0); 816 DRIVER_MODULE(pcib, ofwbus, generic_pcie_driver, 817 generic_pcie_devclass, 0, 0); 818 819