1 /*- 2 * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com> 3 * Copyright (c) 2014 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * This software was developed by Semihalf under 7 * the sponsorship of the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* Generic ECAM PCIe driver */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_platform.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/malloc.h> 41 #include <sys/kernel.h> 42 #include <sys/rman.h> 43 #include <sys/module.h> 44 #include <sys/bus.h> 45 #include <sys/endian.h> 46 #include <sys/cpuset.h> 47 #include <sys/rwlock.h> 48 49 #if defined(INTRNG) 50 #include <machine/intr.h> 51 #endif 52 53 #include <dev/ofw/openfirm.h> 54 #include <dev/ofw/ofw_bus.h> 55 #include <dev/ofw/ofw_bus_subr.h> 56 #include <dev/ofw/ofw_pci.h> 57 #include <dev/pci/pcivar.h> 58 #include <dev/pci/pcireg.h> 59 #include <dev/pci/pcib_private.h> 60 #include <dev/pci/pci_host_generic.h> 61 62 #include <machine/cpu.h> 63 #include <machine/bus.h> 64 #include <machine/intr.h> 65 #include <vm/vm_page.h> 66 67 #include "pcib_if.h" 68 69 /* Assembling ECAM Configuration Address */ 70 #define PCIE_BUS_SHIFT 20 71 #define PCIE_SLOT_SHIFT 15 72 #define PCIE_FUNC_SHIFT 12 73 #define PCIE_BUS_MASK 0xFF 74 #define PCIE_SLOT_MASK 0x1F 75 #define PCIE_FUNC_MASK 0x07 76 #define PCIE_REG_MASK 0xFFF 77 78 #define PCIE_ADDR_OFFSET(bus, slot, func, reg) \ 79 ((((bus) & PCIE_BUS_MASK) << PCIE_BUS_SHIFT) | \ 80 (((slot) & PCIE_SLOT_MASK) << PCIE_SLOT_SHIFT) | \ 81 (((func) & PCIE_FUNC_MASK) << PCIE_FUNC_SHIFT) | \ 82 ((reg) & PCIE_REG_MASK)) 83 84 #define PCI_IO_WINDOW_OFFSET 0x1000 85 86 #define SPACE_CODE_SHIFT 24 87 #define SPACE_CODE_MASK 0x3 88 #define SPACE_CODE_IO_SPACE 0x1 89 #define PROPS_CELL_SIZE 1 90 #define PCI_ADDR_CELL_SIZE 2 91 92 /* OFW bus interface */ 93 struct generic_pcie_ofw_devinfo { 94 struct ofw_bus_devinfo di_dinfo; 95 struct resource_list di_rl; 96 }; 97 98 /* Forward prototypes */ 99 100 static int generic_pcie_probe(device_t dev); 101 static int parse_pci_mem_ranges(struct generic_pcie_softc *sc); 102 static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot, 103 u_int func, u_int reg, int bytes); 104 static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot, 105 u_int func, u_int reg, uint32_t val, int bytes); 106 static int generic_pcie_maxslots(device_t dev); 107 static int generic_pcie_read_ivar(device_t dev, device_t child, int index, 108 uintptr_t *result); 109 static int generic_pcie_write_ivar(device_t dev, device_t child, int index, 110 uintptr_t value); 111 static struct resource *generic_pcie_alloc_resource_ofw(device_t, device_t, 112 int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); 113 static struct resource *generic_pcie_alloc_resource_pcie(device_t dev, 114 device_t child, int type, int *rid, rman_res_t start, rman_res_t end, 115 rman_res_t count, u_int flags); 116 static int generic_pcie_release_resource(device_t dev, device_t child, 117 int type, int rid, struct resource *res); 118 static int generic_pcie_release_resource_ofw(device_t, device_t, int, int, 119 struct resource *); 120 static int generic_pcie_release_resource_pcie(device_t, device_t, int, int, 121 struct resource *); 122 static int generic_pcie_ofw_bus_attach(device_t); 123 static const struct ofw_bus_devinfo *generic_pcie_ofw_get_devinfo(device_t, 124 device_t); 125 126 static __inline void 127 get_addr_size_cells(phandle_t node, pcell_t *addr_cells, pcell_t *size_cells) 128 { 129 130 *addr_cells = 2; 131 /* Find address cells if present */ 132 OF_getencprop(node, "#address-cells", addr_cells, sizeof(*addr_cells)); 133 134 *size_cells = 2; 135 /* Find size cells if present */ 136 OF_getencprop(node, "#size-cells", size_cells, sizeof(*size_cells)); 137 } 138 139 static int 140 generic_pcie_probe(device_t dev) 141 { 142 143 if (!ofw_bus_status_okay(dev)) 144 return (ENXIO); 145 146 if (ofw_bus_is_compatible(dev, "pci-host-ecam-generic")) { 147 device_set_desc(dev, "Generic PCI host controller"); 148 return (BUS_PROBE_GENERIC); 149 } 150 if (ofw_bus_is_compatible(dev, "arm,gem5_pcie")) { 151 device_set_desc(dev, "GEM5 PCIe host controller"); 152 return (BUS_PROBE_DEFAULT); 153 } 154 155 return (ENXIO); 156 } 157 158 int 159 pci_host_generic_attach(device_t dev) 160 { 161 struct generic_pcie_softc *sc; 162 uint64_t phys_base; 163 uint64_t pci_base; 164 uint64_t size; 165 phandle_t node; 166 int error; 167 int tuple; 168 int rid; 169 170 sc = device_get_softc(dev); 171 sc->dev = dev; 172 173 /* Retrieve 'ranges' property from FDT */ 174 if (bootverbose) 175 device_printf(dev, "parsing FDT for ECAM%d:\n", 176 sc->ecam); 177 if (parse_pci_mem_ranges(sc)) 178 return (ENXIO); 179 180 /* Attach OFW bus */ 181 if (generic_pcie_ofw_bus_attach(dev) != 0) 182 return (ENXIO); 183 184 node = ofw_bus_get_node(dev); 185 if (sc->coherent == 0) { 186 sc->coherent = OF_hasprop(node, "dma-coherent"); 187 } 188 if (bootverbose) 189 device_printf(dev, "Bus is%s cache-coherent\n", 190 sc->coherent ? "" : " not"); 191 192 /* Create the parent DMA tag to pass down the coherent flag */ 193 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 194 1, 0, /* alignment, bounds */ 195 BUS_SPACE_MAXADDR, /* lowaddr */ 196 BUS_SPACE_MAXADDR, /* highaddr */ 197 NULL, NULL, /* filter, filterarg */ 198 BUS_SPACE_MAXSIZE, /* maxsize */ 199 BUS_SPACE_UNRESTRICTED, /* nsegments */ 200 BUS_SPACE_MAXSIZE, /* maxsegsize */ 201 sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */ 202 NULL, NULL, /* lockfunc, lockarg */ 203 &sc->dmat); 204 if (error != 0) 205 return (error); 206 207 rid = 0; 208 sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); 209 if (sc->res == NULL) { 210 device_printf(dev, "could not map memory.\n"); 211 return (ENXIO); 212 } 213 214 sc->bst = rman_get_bustag(sc->res); 215 sc->bsh = rman_get_bushandle(sc->res); 216 217 sc->mem_rman.rm_type = RMAN_ARRAY; 218 sc->mem_rman.rm_descr = "PCIe Memory"; 219 sc->io_rman.rm_type = RMAN_ARRAY; 220 sc->io_rman.rm_descr = "PCIe IO window"; 221 222 /* Initialize rman and allocate memory regions */ 223 error = rman_init(&sc->mem_rman); 224 if (error) { 225 device_printf(dev, "rman_init() failed. error = %d\n", error); 226 return (error); 227 } 228 229 error = rman_init(&sc->io_rman); 230 if (error) { 231 device_printf(dev, "rman_init() failed. error = %d\n", error); 232 return (error); 233 } 234 235 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { 236 phys_base = sc->ranges[tuple].phys_base; 237 pci_base = sc->ranges[tuple].pci_base; 238 size = sc->ranges[tuple].size; 239 if (phys_base == 0 || size == 0) 240 continue; /* empty range element */ 241 if (sc->ranges[tuple].flags & FLAG_MEM) { 242 error = rman_manage_region(&sc->mem_rman, 243 phys_base, phys_base + size - 1); 244 } else if (sc->ranges[tuple].flags & FLAG_IO) { 245 error = rman_manage_region(&sc->io_rman, 246 pci_base + PCI_IO_WINDOW_OFFSET, 247 pci_base + PCI_IO_WINDOW_OFFSET + size - 1); 248 } else 249 continue; 250 if (error) { 251 device_printf(dev, "rman_manage_region() failed." 252 "error = %d\n", error); 253 rman_fini(&sc->mem_rman); 254 return (error); 255 } 256 } 257 258 ofw_bus_setup_iinfo(node, &sc->pci_iinfo, sizeof(cell_t)); 259 260 device_add_child(dev, "pci", -1); 261 return (bus_generic_attach(dev)); 262 } 263 264 static int 265 parse_pci_mem_ranges(struct generic_pcie_softc *sc) 266 { 267 pcell_t pci_addr_cells, parent_addr_cells; 268 pcell_t attributes, size_cells; 269 cell_t *base_ranges; 270 int nbase_ranges; 271 phandle_t node; 272 int i, j, k; 273 int tuple; 274 275 node = ofw_bus_get_node(sc->dev); 276 277 OF_getencprop(node, "#address-cells", &pci_addr_cells, 278 sizeof(pci_addr_cells)); 279 OF_getencprop(node, "#size-cells", &size_cells, 280 sizeof(size_cells)); 281 OF_getencprop(OF_parent(node), "#address-cells", &parent_addr_cells, 282 sizeof(parent_addr_cells)); 283 284 if (parent_addr_cells != 2 || pci_addr_cells != 3 || size_cells != 2) { 285 device_printf(sc->dev, 286 "Unexpected number of address or size cells in FDT\n"); 287 return (ENXIO); 288 } 289 290 nbase_ranges = OF_getproplen(node, "ranges"); 291 sc->nranges = nbase_ranges / sizeof(cell_t) / 292 (parent_addr_cells + pci_addr_cells + size_cells); 293 base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); 294 OF_getencprop(node, "ranges", base_ranges, nbase_ranges); 295 296 for (i = 0, j = 0; i < sc->nranges; i++) { 297 attributes = (base_ranges[j++] >> SPACE_CODE_SHIFT) & \ 298 SPACE_CODE_MASK; 299 if (attributes == SPACE_CODE_IO_SPACE) { 300 sc->ranges[i].flags |= FLAG_IO; 301 } else { 302 sc->ranges[i].flags |= FLAG_MEM; 303 } 304 305 sc->ranges[i].pci_base = 0; 306 for (k = 0; k < (pci_addr_cells - 1); k++) { 307 sc->ranges[i].pci_base <<= 32; 308 sc->ranges[i].pci_base |= base_ranges[j++]; 309 } 310 sc->ranges[i].phys_base = 0; 311 for (k = 0; k < parent_addr_cells; k++) { 312 sc->ranges[i].phys_base <<= 32; 313 sc->ranges[i].phys_base |= base_ranges[j++]; 314 } 315 sc->ranges[i].size = 0; 316 for (k = 0; k < size_cells; k++) { 317 sc->ranges[i].size <<= 32; 318 sc->ranges[i].size |= base_ranges[j++]; 319 } 320 } 321 322 for (; i < MAX_RANGES_TUPLES; i++) { 323 /* zero-fill remaining tuples to mark empty elements in array */ 324 sc->ranges[i].pci_base = 0; 325 sc->ranges[i].phys_base = 0; 326 sc->ranges[i].size = 0; 327 } 328 329 if (bootverbose) { 330 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { 331 device_printf(sc->dev, 332 "\tPCI addr: 0x%jx, CPU addr: 0x%jx, Size: 0x%jx\n", 333 sc->ranges[tuple].pci_base, 334 sc->ranges[tuple].phys_base, 335 sc->ranges[tuple].size); 336 } 337 } 338 339 free(base_ranges, M_DEVBUF); 340 return (0); 341 } 342 343 static uint32_t 344 generic_pcie_read_config(device_t dev, u_int bus, u_int slot, 345 u_int func, u_int reg, int bytes) 346 { 347 struct generic_pcie_softc *sc; 348 bus_space_handle_t h; 349 bus_space_tag_t t; 350 uint64_t offset; 351 uint32_t data; 352 353 if ((bus > PCI_BUSMAX) || (slot > PCI_SLOTMAX) || 354 (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) 355 return (~0U); 356 357 sc = device_get_softc(dev); 358 359 offset = PCIE_ADDR_OFFSET(bus, slot, func, reg); 360 t = sc->bst; 361 h = sc->bsh; 362 363 switch (bytes) { 364 case 1: 365 data = bus_space_read_1(t, h, offset); 366 break; 367 case 2: 368 data = le16toh(bus_space_read_2(t, h, offset)); 369 break; 370 case 4: 371 data = le32toh(bus_space_read_4(t, h, offset)); 372 break; 373 default: 374 return (~0U); 375 } 376 377 return (data); 378 } 379 380 static void 381 generic_pcie_write_config(device_t dev, u_int bus, u_int slot, 382 u_int func, u_int reg, uint32_t val, int bytes) 383 { 384 struct generic_pcie_softc *sc; 385 bus_space_handle_t h; 386 bus_space_tag_t t; 387 uint64_t offset; 388 389 if ((bus > PCI_BUSMAX) || (slot > PCI_SLOTMAX) || 390 (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) 391 return; 392 393 sc = device_get_softc(dev); 394 395 offset = PCIE_ADDR_OFFSET(bus, slot, func, reg); 396 397 t = sc->bst; 398 h = sc->bsh; 399 400 switch (bytes) { 401 case 1: 402 bus_space_write_1(t, h, offset, val); 403 break; 404 case 2: 405 bus_space_write_2(t, h, offset, htole16(val)); 406 break; 407 case 4: 408 bus_space_write_4(t, h, offset, htole32(val)); 409 break; 410 default: 411 return; 412 } 413 } 414 415 static int 416 generic_pcie_maxslots(device_t dev) 417 { 418 419 return (31); /* max slots per bus acc. to standard */ 420 } 421 422 static int 423 generic_pcie_route_interrupt(device_t bus, device_t dev, int pin) 424 { 425 struct generic_pcie_softc *sc; 426 struct ofw_pci_register reg; 427 uint32_t pintr, mintr[2]; 428 phandle_t iparent; 429 int intrcells; 430 431 sc = device_get_softc(bus); 432 pintr = pin; 433 434 bzero(®, sizeof(reg)); 435 reg.phys_hi = (pci_get_bus(dev) << OFW_PCI_PHYS_HI_BUSSHIFT) | 436 (pci_get_slot(dev) << OFW_PCI_PHYS_HI_DEVICESHIFT) | 437 (pci_get_function(dev) << OFW_PCI_PHYS_HI_FUNCTIONSHIFT); 438 439 intrcells = ofw_bus_lookup_imap(ofw_bus_get_node(dev), 440 &sc->pci_iinfo, ®, sizeof(reg), &pintr, sizeof(pintr), 441 mintr, sizeof(mintr), &iparent); 442 if (intrcells) { 443 pintr = ofw_bus_map_intr(dev, iparent, intrcells, mintr); 444 return (pintr); 445 } 446 447 device_printf(bus, "could not route pin %d for device %d.%d\n", 448 pin, pci_get_slot(dev), pci_get_function(dev)); 449 return (PCI_INVALID_IRQ); 450 } 451 452 453 static int 454 generic_pcie_read_ivar(device_t dev, device_t child, int index, 455 uintptr_t *result) 456 { 457 struct generic_pcie_softc *sc; 458 int secondary_bus; 459 460 sc = device_get_softc(dev); 461 462 if (index == PCIB_IVAR_BUS) { 463 /* this pcib adds only pci bus 0 as child */ 464 secondary_bus = 0; 465 *result = secondary_bus; 466 return (0); 467 468 } 469 470 if (index == PCIB_IVAR_DOMAIN) { 471 *result = sc->ecam; 472 return (0); 473 } 474 475 if (bootverbose) 476 device_printf(dev, "ERROR: Unknown index %d.\n", index); 477 return (ENOENT); 478 } 479 480 static int 481 generic_pcie_write_ivar(device_t dev, device_t child, int index, 482 uintptr_t value) 483 { 484 485 return (ENOENT); 486 } 487 488 static struct rman * 489 generic_pcie_rman(struct generic_pcie_softc *sc, int type) 490 { 491 492 switch (type) { 493 case SYS_RES_IOPORT: 494 return (&sc->io_rman); 495 case SYS_RES_MEMORY: 496 return (&sc->mem_rman); 497 default: 498 break; 499 } 500 501 return (NULL); 502 } 503 504 static int 505 generic_pcie_release_resource_pcie(device_t dev, device_t child, int type, 506 int rid, struct resource *res) 507 { 508 struct generic_pcie_softc *sc; 509 struct rman *rm; 510 511 sc = device_get_softc(dev); 512 513 rm = generic_pcie_rman(sc, type); 514 if (rm != NULL) { 515 KASSERT(rman_is_region_manager(res, rm), ("rman mismatch")); 516 rman_release_resource(res); 517 } 518 519 return (bus_generic_release_resource(dev, child, type, rid, res)); 520 } 521 522 static int 523 generic_pcie_release_resource(device_t dev, device_t child, int type, 524 int rid, struct resource *res) 525 { 526 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 527 struct generic_pcie_softc *sc; 528 529 if (type == PCI_RES_BUS) { 530 sc = device_get_softc(dev); 531 return (pci_domain_release_bus(sc->ecam, child, rid, res)); 532 } 533 #endif 534 /* For PCIe devices that do not have FDT nodes, use PCIB method */ 535 if ((int)ofw_bus_get_node(child) <= 0) { 536 return (generic_pcie_release_resource_pcie(dev, 537 child, type, rid, res)); 538 } 539 540 /* For other devices use OFW method */ 541 return (generic_pcie_release_resource_ofw(dev, 542 child, type, rid, res)); 543 } 544 545 struct resource * 546 pci_host_generic_alloc_resource(device_t dev, device_t child, int type, int *rid, 547 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 548 { 549 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 550 struct generic_pcie_softc *sc; 551 552 if (type == PCI_RES_BUS) { 553 sc = device_get_softc(dev); 554 return (pci_domain_alloc_bus(sc->ecam, child, rid, start, end, 555 count, flags)); 556 } 557 #endif 558 /* For PCIe devices that do not have FDT nodes, use PCIB method */ 559 if ((int)ofw_bus_get_node(child) <= 0) 560 return (generic_pcie_alloc_resource_pcie(dev, child, type, rid, 561 start, end, count, flags)); 562 563 /* For other devices use OFW method */ 564 return (generic_pcie_alloc_resource_ofw(dev, child, type, rid, 565 start, end, count, flags)); 566 } 567 568 static struct resource * 569 generic_pcie_alloc_resource_pcie(device_t dev, device_t child, int type, int *rid, 570 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 571 { 572 struct generic_pcie_softc *sc; 573 struct resource *res; 574 struct rman *rm; 575 576 sc = device_get_softc(dev); 577 578 rm = generic_pcie_rman(sc, type); 579 if (rm == NULL) 580 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, 581 type, rid, start, end, count, flags)); 582 583 if (bootverbose) { 584 device_printf(dev, 585 "rman_reserve_resource: start=%#jx, end=%#jx, count=%#jx\n", 586 start, end, count); 587 } 588 589 res = rman_reserve_resource(rm, start, end, count, flags, child); 590 if (res == NULL) 591 goto fail; 592 593 rman_set_rid(res, *rid); 594 595 if (flags & RF_ACTIVE) 596 if (bus_activate_resource(child, type, *rid, res)) { 597 rman_release_resource(res); 598 goto fail; 599 } 600 601 return (res); 602 603 fail: 604 device_printf(dev, "%s FAIL: type=%d, rid=%d, " 605 "start=%016jx, end=%016jx, count=%016jx, flags=%x\n", 606 __func__, type, *rid, start, end, count, flags); 607 608 return (NULL); 609 } 610 611 static int 612 generic_pcie_adjust_resource(device_t dev, device_t child, int type, 613 struct resource *res, rman_res_t start, rman_res_t end) 614 { 615 struct generic_pcie_softc *sc; 616 struct rman *rm; 617 618 sc = device_get_softc(dev); 619 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 620 if (type == PCI_RES_BUS) 621 return (pci_domain_adjust_bus(sc->ecam, child, res, start, 622 end)); 623 #endif 624 625 rm = generic_pcie_rman(sc, type); 626 if (rm != NULL) 627 return (rman_adjust_resource(res, start, end)); 628 return (bus_generic_adjust_resource(dev, child, type, res, start, end)); 629 } 630 631 static int 632 generic_pcie_activate_resource(device_t dev, device_t child, int type, int rid, 633 struct resource *r) 634 { 635 struct generic_pcie_softc *sc; 636 uint64_t phys_base; 637 uint64_t pci_base; 638 uint64_t size; 639 int found; 640 int res; 641 int i; 642 643 sc = device_get_softc(dev); 644 645 if ((res = rman_activate_resource(r)) != 0) 646 return (res); 647 648 switch(type) { 649 case SYS_RES_IOPORT: 650 found = 0; 651 for (i = 0; i < MAX_RANGES_TUPLES; i++) { 652 pci_base = sc->ranges[i].pci_base; 653 phys_base = sc->ranges[i].phys_base; 654 size = sc->ranges[i].size; 655 656 if ((rid > pci_base) && (rid < (pci_base + size))) { 657 found = 1; 658 break; 659 } 660 } 661 if (found) { 662 rman_set_start(r, rman_get_start(r) + phys_base); 663 rman_set_end(r, rman_get_end(r) + phys_base); 664 BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child, 665 type, rid, r); 666 } else { 667 device_printf(dev, "Failed to activate IOPORT resource\n"); 668 res = 0; 669 } 670 break; 671 case SYS_RES_MEMORY: 672 BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child, type, rid, r); 673 break; 674 default: 675 break; 676 } 677 678 return (res); 679 } 680 681 static int 682 generic_pcie_deactivate_resource(device_t dev, device_t child, int type, int rid, 683 struct resource *r) 684 { 685 struct generic_pcie_softc *sc; 686 vm_offset_t vaddr; 687 int res; 688 689 sc = device_get_softc(dev); 690 691 if ((res = rman_deactivate_resource(r)) != 0) 692 return (res); 693 694 switch(type) { 695 case SYS_RES_IOPORT: 696 case SYS_RES_MEMORY: 697 vaddr = (vm_offset_t)rman_get_virtual(r); 698 pmap_unmapdev(vaddr, rman_get_size(r)); 699 break; 700 default: 701 break; 702 } 703 704 return (res); 705 } 706 707 static bus_dma_tag_t 708 generic_pcie_get_dma_tag(device_t dev, device_t child) 709 { 710 struct generic_pcie_softc *sc; 711 712 sc = device_get_softc(dev); 713 return (sc->dmat); 714 } 715 716 static int 717 generic_pcie_alloc_msi(device_t pci, device_t child, int count, int maxcount, 718 int *irqs) 719 { 720 #if defined(INTRNG) 721 phandle_t msi_parent; 722 723 ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, 724 NULL); 725 return (intr_alloc_msi(pci, child, msi_parent, count, maxcount, 726 irqs)); 727 #else 728 return (ENXIO); 729 #endif 730 } 731 732 static int 733 generic_pcie_release_msi(device_t pci, device_t child, int count, int *irqs) 734 { 735 #if defined(INTRNG) 736 phandle_t msi_parent; 737 738 ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, 739 NULL); 740 return (intr_release_msi(pci, child, msi_parent, count, irqs)); 741 #else 742 return (ENXIO); 743 #endif 744 } 745 746 static int 747 generic_pcie_map_msi(device_t pci, device_t child, int irq, uint64_t *addr, 748 uint32_t *data) 749 { 750 #if defined(INTRNG) 751 phandle_t msi_parent; 752 753 ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, 754 NULL); 755 return (intr_map_msi(pci, child, msi_parent, irq, addr, data)); 756 #else 757 return (ENXIO); 758 #endif 759 } 760 761 static int 762 generic_pcie_alloc_msix(device_t pci, device_t child, int *irq) 763 { 764 #if defined(INTRNG) 765 phandle_t msi_parent; 766 767 ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, 768 NULL); 769 return (intr_alloc_msix(pci, child, msi_parent, irq)); 770 #else 771 return (ENXIO); 772 #endif 773 } 774 775 static int 776 generic_pcie_release_msix(device_t pci, device_t child, int irq) 777 { 778 #if defined(INTRNG) 779 phandle_t msi_parent; 780 781 ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, 782 NULL); 783 return (intr_release_msix(pci, child, msi_parent, irq)); 784 #else 785 return (ENXIO); 786 #endif 787 } 788 789 int 790 generic_pcie_get_id(device_t pci, device_t child, enum pci_id_type type, 791 uintptr_t *id) 792 { 793 phandle_t node; 794 uint32_t rid; 795 uint16_t pci_rid; 796 797 if (type != PCI_ID_MSI) 798 return (pcib_get_id(pci, child, type, id)); 799 800 node = ofw_bus_get_node(pci); 801 pci_rid = pci_get_rid(child); 802 803 ofw_bus_msimap(node, pci_rid, NULL, &rid); 804 *id = rid; 805 806 return (0); 807 } 808 809 static device_method_t generic_pcie_methods[] = { 810 DEVMETHOD(device_probe, generic_pcie_probe), 811 DEVMETHOD(device_attach, pci_host_generic_attach), 812 DEVMETHOD(bus_read_ivar, generic_pcie_read_ivar), 813 DEVMETHOD(bus_write_ivar, generic_pcie_write_ivar), 814 DEVMETHOD(bus_alloc_resource, pci_host_generic_alloc_resource), 815 DEVMETHOD(bus_adjust_resource, generic_pcie_adjust_resource), 816 DEVMETHOD(bus_release_resource, generic_pcie_release_resource), 817 DEVMETHOD(bus_activate_resource, generic_pcie_activate_resource), 818 DEVMETHOD(bus_deactivate_resource, generic_pcie_deactivate_resource), 819 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 820 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 821 822 DEVMETHOD(bus_get_dma_tag, generic_pcie_get_dma_tag), 823 824 /* pcib interface */ 825 DEVMETHOD(pcib_maxslots, generic_pcie_maxslots), 826 DEVMETHOD(pcib_route_interrupt, generic_pcie_route_interrupt), 827 DEVMETHOD(pcib_read_config, generic_pcie_read_config), 828 DEVMETHOD(pcib_write_config, generic_pcie_write_config), 829 DEVMETHOD(pcib_alloc_msi, generic_pcie_alloc_msi), 830 DEVMETHOD(pcib_release_msi, generic_pcie_release_msi), 831 DEVMETHOD(pcib_alloc_msix, generic_pcie_alloc_msix), 832 DEVMETHOD(pcib_release_msix, generic_pcie_release_msix), 833 DEVMETHOD(pcib_map_msi, generic_pcie_map_msi), 834 DEVMETHOD(pcib_get_id, generic_pcie_get_id), 835 836 /* ofw_bus interface */ 837 DEVMETHOD(ofw_bus_get_devinfo, generic_pcie_ofw_get_devinfo), 838 DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), 839 DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), 840 DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), 841 DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), 842 DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), 843 844 DEVMETHOD_END 845 }; 846 847 static const struct ofw_bus_devinfo * 848 generic_pcie_ofw_get_devinfo(device_t bus __unused, device_t child) 849 { 850 struct generic_pcie_ofw_devinfo *di; 851 852 di = device_get_ivars(child); 853 return (&di->di_dinfo); 854 } 855 856 static struct resource * 857 generic_pcie_alloc_resource_ofw(device_t bus, device_t child, int type, int *rid, 858 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 859 { 860 struct generic_pcie_softc *sc; 861 struct generic_pcie_ofw_devinfo *di; 862 struct resource_list_entry *rle; 863 int i; 864 865 sc = device_get_softc(bus); 866 867 if (RMAN_IS_DEFAULT_RANGE(start, end)) { 868 if ((di = device_get_ivars(child)) == NULL) 869 return (NULL); 870 if (type == SYS_RES_IOPORT) 871 type = SYS_RES_MEMORY; 872 873 /* Find defaults for this rid */ 874 rle = resource_list_find(&di->di_rl, type, *rid); 875 if (rle == NULL) 876 return (NULL); 877 878 start = rle->start; 879 end = rle->end; 880 count = rle->count; 881 } 882 883 if (type == SYS_RES_MEMORY) { 884 /* Remap through ranges property */ 885 for (i = 0; i < MAX_RANGES_TUPLES; i++) { 886 if (start >= sc->ranges[i].phys_base && end < 887 sc->ranges[i].pci_base + sc->ranges[i].size) { 888 start -= sc->ranges[i].phys_base; 889 start += sc->ranges[i].pci_base; 890 end -= sc->ranges[i].phys_base; 891 end += sc->ranges[i].pci_base; 892 break; 893 } 894 } 895 896 if (i == MAX_RANGES_TUPLES) { 897 device_printf(bus, "Could not map resource " 898 "%#jx-%#jx\n", start, end); 899 return (NULL); 900 } 901 } 902 903 return (bus_generic_alloc_resource(bus, child, type, rid, start, end, 904 count, flags)); 905 } 906 907 static int 908 generic_pcie_release_resource_ofw(device_t bus, device_t child, int type, int rid, 909 struct resource *res) 910 { 911 912 return (bus_generic_release_resource(bus, child, type, rid, res)); 913 } 914 915 /* Helper functions */ 916 917 static int 918 generic_pcie_ofw_bus_attach(device_t dev) 919 { 920 struct generic_pcie_ofw_devinfo *di; 921 device_t child; 922 phandle_t parent, node; 923 pcell_t addr_cells, size_cells; 924 925 parent = ofw_bus_get_node(dev); 926 if (parent > 0) { 927 get_addr_size_cells(parent, &addr_cells, &size_cells); 928 /* Iterate through all bus subordinates */ 929 for (node = OF_child(parent); node > 0; node = OF_peer(node)) { 930 931 /* Allocate and populate devinfo. */ 932 di = malloc(sizeof(*di), M_DEVBUF, M_WAITOK | M_ZERO); 933 if (ofw_bus_gen_setup_devinfo(&di->di_dinfo, node) != 0) { 934 free(di, M_DEVBUF); 935 continue; 936 } 937 938 /* Initialize and populate resource list. */ 939 resource_list_init(&di->di_rl); 940 ofw_bus_reg_to_rl(dev, node, addr_cells, size_cells, 941 &di->di_rl); 942 #ifndef INTRNG 943 ofw_bus_intr_to_rl(dev, node, &di->di_rl, NULL); 944 #endif 945 946 /* Add newbus device for this FDT node */ 947 child = device_add_child(dev, NULL, -1); 948 if (child == NULL) { 949 resource_list_free(&di->di_rl); 950 ofw_bus_gen_destroy_devinfo(&di->di_dinfo); 951 free(di, M_DEVBUF); 952 continue; 953 } 954 955 device_set_ivars(child, di); 956 } 957 } 958 959 return (0); 960 } 961 962 DEFINE_CLASS_0(pcib, generic_pcie_driver, 963 generic_pcie_methods, sizeof(struct generic_pcie_softc)); 964 965 devclass_t generic_pcie_devclass; 966 967 DRIVER_MODULE(pcib, simplebus, generic_pcie_driver, 968 generic_pcie_devclass, 0, 0); 969 DRIVER_MODULE(pcib, ofwbus, generic_pcie_driver, 970 generic_pcie_devclass, 0, 0); 971 972