1 /*- 2 * Copyright (c) 2015, 2020 Ruslan Bukin <br@bsdpad.com> 3 * Copyright (c) 2014 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * This software was developed by Semihalf under 7 * the sponsorship of the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* Generic ECAM PCIe driver */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_platform.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/malloc.h> 41 #include <sys/kernel.h> 42 #include <sys/rman.h> 43 #include <sys/module.h> 44 #include <sys/bus.h> 45 #include <sys/endian.h> 46 47 #include <dev/pci/pcivar.h> 48 #include <dev/pci/pcireg.h> 49 #include <dev/pci/pcib_private.h> 50 #include <dev/pci/pci_host_generic.h> 51 52 #include <machine/bus.h> 53 #include <machine/intr.h> 54 55 #include "pcib_if.h" 56 57 /* Forward prototypes */ 58 59 static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot, 60 u_int func, u_int reg, int bytes); 61 static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot, 62 u_int func, u_int reg, uint32_t val, int bytes); 63 static int generic_pcie_maxslots(device_t dev); 64 static int generic_pcie_read_ivar(device_t dev, device_t child, int index, 65 uintptr_t *result); 66 static int generic_pcie_write_ivar(device_t dev, device_t child, int index, 67 uintptr_t value); 68 69 int 70 pci_host_generic_core_attach(device_t dev) 71 { 72 struct generic_pcie_core_softc *sc; 73 uint64_t phys_base; 74 uint64_t pci_base; 75 uint64_t size; 76 int error; 77 int rid, tuple; 78 79 sc = device_get_softc(dev); 80 sc->dev = dev; 81 82 /* Create the parent DMA tag to pass down the coherent flag */ 83 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 84 1, 0, /* alignment, bounds */ 85 BUS_SPACE_MAXADDR, /* lowaddr */ 86 BUS_SPACE_MAXADDR, /* highaddr */ 87 NULL, NULL, /* filter, filterarg */ 88 BUS_SPACE_MAXSIZE, /* maxsize */ 89 BUS_SPACE_UNRESTRICTED, /* nsegments */ 90 BUS_SPACE_MAXSIZE, /* maxsegsize */ 91 sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */ 92 NULL, NULL, /* lockfunc, lockarg */ 93 &sc->dmat); 94 if (error != 0) 95 return (error); 96 97 rid = 0; 98 sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); 99 if (sc->res == NULL) { 100 device_printf(dev, "could not allocate memory.\n"); 101 error = ENXIO; 102 goto err_resource; 103 } 104 105 sc->bst = rman_get_bustag(sc->res); 106 sc->bsh = rman_get_bushandle(sc->res); 107 108 sc->has_pmem = false; 109 sc->pmem_rman.rm_type = RMAN_ARRAY; 110 sc->pmem_rman.rm_descr = "PCIe Prefetch Memory"; 111 112 sc->mem_rman.rm_type = RMAN_ARRAY; 113 sc->mem_rman.rm_descr = "PCIe Memory"; 114 115 sc->io_rman.rm_type = RMAN_ARRAY; 116 sc->io_rman.rm_descr = "PCIe IO window"; 117 118 /* Initialize rman and allocate memory regions */ 119 error = rman_init(&sc->pmem_rman); 120 if (error) { 121 device_printf(dev, "rman_init() failed. error = %d\n", error); 122 goto err_pmem_rman; 123 } 124 125 error = rman_init(&sc->mem_rman); 126 if (error) { 127 device_printf(dev, "rman_init() failed. error = %d\n", error); 128 goto err_mem_rman; 129 } 130 131 error = rman_init(&sc->io_rman); 132 if (error) { 133 device_printf(dev, "rman_init() failed. error = %d\n", error); 134 goto err_io_rman; 135 } 136 137 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { 138 phys_base = sc->ranges[tuple].phys_base; 139 pci_base = sc->ranges[tuple].pci_base; 140 size = sc->ranges[tuple].size; 141 if (phys_base == 0 || size == 0) 142 continue; /* empty range element */ 143 switch (FLAG_TYPE(sc->ranges[tuple].flags)) { 144 case FLAG_TYPE_PMEM: 145 sc->has_pmem = true; 146 error = rman_manage_region(&sc->pmem_rman, 147 pci_base, pci_base + size - 1); 148 break; 149 case FLAG_TYPE_MEM: 150 error = rman_manage_region(&sc->mem_rman, 151 pci_base, pci_base + size - 1); 152 break; 153 case FLAG_TYPE_IO: 154 error = rman_manage_region(&sc->io_rman, 155 pci_base, pci_base + size - 1); 156 break; 157 default: 158 continue; 159 } 160 if (error) { 161 device_printf(dev, "rman_manage_region() failed." 162 "error = %d\n", error); 163 goto err_rman_manage; 164 } 165 } 166 167 return (0); 168 169 err_rman_manage: 170 rman_fini(&sc->io_rman); 171 err_io_rman: 172 rman_fini(&sc->mem_rman); 173 err_mem_rman: 174 rman_fini(&sc->pmem_rman); 175 err_pmem_rman: 176 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res); 177 err_resource: 178 bus_dma_tag_destroy(sc->dmat); 179 return (error); 180 } 181 182 int 183 pci_host_generic_core_detach(device_t dev) 184 { 185 struct generic_pcie_core_softc *sc; 186 int error; 187 188 sc = device_get_softc(dev); 189 190 error = bus_generic_detach(dev); 191 if (error != 0) 192 return (error); 193 194 rman_fini(&sc->io_rman); 195 rman_fini(&sc->mem_rman); 196 rman_fini(&sc->pmem_rman); 197 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res); 198 bus_dma_tag_destroy(sc->dmat); 199 200 return (0); 201 } 202 203 static uint32_t 204 generic_pcie_read_config(device_t dev, u_int bus, u_int slot, 205 u_int func, u_int reg, int bytes) 206 { 207 struct generic_pcie_core_softc *sc; 208 bus_space_handle_t h; 209 bus_space_tag_t t; 210 uint64_t offset; 211 uint32_t data; 212 213 sc = device_get_softc(dev); 214 if ((bus < sc->bus_start) || (bus > sc->bus_end)) 215 return (~0U); 216 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || 217 (reg > PCIE_REGMAX)) 218 return (~0U); 219 if ((sc->quirks & PCIE_ECAM_DESIGNWARE_QUIRK) && bus == 0 && slot > 0) 220 return (~0U); 221 222 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); 223 t = sc->bst; 224 h = sc->bsh; 225 226 switch (bytes) { 227 case 1: 228 data = bus_space_read_1(t, h, offset); 229 break; 230 case 2: 231 data = le16toh(bus_space_read_2(t, h, offset)); 232 break; 233 case 4: 234 data = le32toh(bus_space_read_4(t, h, offset)); 235 break; 236 default: 237 return (~0U); 238 } 239 240 return (data); 241 } 242 243 static void 244 generic_pcie_write_config(device_t dev, u_int bus, u_int slot, 245 u_int func, u_int reg, uint32_t val, int bytes) 246 { 247 struct generic_pcie_core_softc *sc; 248 bus_space_handle_t h; 249 bus_space_tag_t t; 250 uint64_t offset; 251 252 sc = device_get_softc(dev); 253 if ((bus < sc->bus_start) || (bus > sc->bus_end)) 254 return; 255 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || 256 (reg > PCIE_REGMAX)) 257 return; 258 259 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); 260 261 t = sc->bst; 262 h = sc->bsh; 263 264 switch (bytes) { 265 case 1: 266 bus_space_write_1(t, h, offset, val); 267 break; 268 case 2: 269 bus_space_write_2(t, h, offset, htole16(val)); 270 break; 271 case 4: 272 bus_space_write_4(t, h, offset, htole32(val)); 273 break; 274 default: 275 return; 276 } 277 } 278 279 static int 280 generic_pcie_maxslots(device_t dev) 281 { 282 283 return (31); /* max slots per bus acc. to standard */ 284 } 285 286 static int 287 generic_pcie_read_ivar(device_t dev, device_t child, int index, 288 uintptr_t *result) 289 { 290 struct generic_pcie_core_softc *sc; 291 292 sc = device_get_softc(dev); 293 294 if (index == PCIB_IVAR_BUS) { 295 *result = sc->bus_start; 296 return (0); 297 } 298 299 if (index == PCIB_IVAR_DOMAIN) { 300 *result = sc->ecam; 301 return (0); 302 } 303 304 if (bootverbose) 305 device_printf(dev, "ERROR: Unknown index %d.\n", index); 306 return (ENOENT); 307 } 308 309 static int 310 generic_pcie_write_ivar(device_t dev, device_t child, int index, 311 uintptr_t value) 312 { 313 314 return (ENOENT); 315 } 316 317 static struct rman * 318 generic_pcie_rman(struct generic_pcie_core_softc *sc, int type, int flags) 319 { 320 321 switch (type) { 322 case SYS_RES_IOPORT: 323 return (&sc->io_rman); 324 case SYS_RES_MEMORY: 325 if (sc->has_pmem && (flags & RF_PREFETCHABLE) != 0) 326 return (&sc->pmem_rman); 327 return (&sc->mem_rman); 328 default: 329 break; 330 } 331 332 return (NULL); 333 } 334 335 int 336 pci_host_generic_core_release_resource(device_t dev, device_t child, int type, 337 int rid, struct resource *res) 338 { 339 struct generic_pcie_core_softc *sc; 340 struct rman *rm; 341 int error; 342 343 sc = device_get_softc(dev); 344 345 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 346 if (type == PCI_RES_BUS) { 347 return (pci_domain_release_bus(sc->ecam, child, rid, res)); 348 } 349 #endif 350 351 rm = generic_pcie_rman(sc, type, rman_get_flags(res)); 352 if (rm != NULL) { 353 KASSERT(rman_is_region_manager(res, rm), ("rman mismatch")); 354 if (rman_get_flags(res) & RF_ACTIVE) { 355 error = bus_deactivate_resource(child, type, rid, res); 356 if (error) 357 return (error); 358 } 359 return (rman_release_resource(res)); 360 } 361 362 return (bus_generic_release_resource(dev, child, type, rid, res)); 363 } 364 365 static int 366 generic_pcie_translate_resource_common(device_t dev, int type, rman_res_t start, 367 rman_res_t end, rman_res_t *new_start, rman_res_t *new_end) 368 { 369 struct generic_pcie_core_softc *sc; 370 uint64_t phys_base; 371 uint64_t pci_base; 372 uint64_t size; 373 int i, space; 374 bool found; 375 376 sc = device_get_softc(dev); 377 /* Translate the address from a PCI address to a physical address */ 378 switch (type) { 379 case SYS_RES_IOPORT: 380 case SYS_RES_MEMORY: 381 found = false; 382 for (i = 0; i < MAX_RANGES_TUPLES; i++) { 383 pci_base = sc->ranges[i].pci_base; 384 phys_base = sc->ranges[i].phys_base; 385 size = sc->ranges[i].size; 386 387 if (start < pci_base || start >= pci_base + size) 388 continue; 389 390 switch (FLAG_TYPE(sc->ranges[i].flags)) { 391 case FLAG_TYPE_MEM: 392 case FLAG_TYPE_PMEM: 393 space = SYS_RES_MEMORY; 394 break; 395 case FLAG_TYPE_IO: 396 space = SYS_RES_IOPORT; 397 break; 398 default: 399 space = -1; 400 continue; 401 } 402 403 if (type == space) { 404 *new_start = start - pci_base + phys_base; 405 *new_end = end - pci_base + phys_base; 406 found = true; 407 break; 408 } 409 } 410 break; 411 default: 412 /* No translation for non-memory types */ 413 *new_start = start; 414 *new_end = end; 415 found = true; 416 break; 417 } 418 419 return (found ? 0 : ENOENT); 420 } 421 422 static int 423 generic_pcie_translate_resource(device_t bus, int type, 424 rman_res_t start, rman_res_t *newstart) 425 { 426 rman_res_t newend; /* unused */ 427 428 return (generic_pcie_translate_resource_common( 429 bus, type, start, 0, newstart, &newend)); 430 } 431 432 struct resource * 433 pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type, 434 int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 435 { 436 struct generic_pcie_core_softc *sc; 437 struct resource *res; 438 struct rman *rm; 439 rman_res_t phys_start, phys_end; 440 441 sc = device_get_softc(dev); 442 443 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 444 if (type == PCI_RES_BUS) { 445 return (pci_domain_alloc_bus(sc->ecam, child, rid, start, end, 446 count, flags)); 447 } 448 #endif 449 450 rm = generic_pcie_rman(sc, type, flags); 451 if (rm == NULL) 452 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, 453 type, rid, start, end, count, flags)); 454 455 /* Translate the address from a PCI address to a physical address */ 456 if (generic_pcie_translate_resource_common(dev, type, start, end, 457 &phys_start, &phys_end) != 0) { 458 device_printf(dev, 459 "Failed to translate resource %jx-%jx type %x for %s\n", 460 (uintmax_t)start, (uintmax_t)end, type, 461 device_get_nameunit(child)); 462 return (NULL); 463 } 464 465 if (bootverbose) { 466 device_printf(dev, 467 "rman_reserve_resource: start=%#jx, end=%#jx, count=%#jx\n", 468 start, end, count); 469 } 470 471 res = rman_reserve_resource(rm, start, end, count, flags, child); 472 if (res == NULL) 473 goto fail; 474 475 rman_set_rid(res, *rid); 476 477 if (flags & RF_ACTIVE) 478 if (bus_activate_resource(child, type, *rid, res)) { 479 rman_release_resource(res); 480 goto fail; 481 } 482 483 return (res); 484 485 fail: 486 device_printf(dev, "%s FAIL: type=%d, rid=%d, " 487 "start=%016jx, end=%016jx, count=%016jx, flags=%x\n", 488 __func__, type, *rid, start, end, count, flags); 489 490 return (NULL); 491 } 492 493 static int 494 generic_pcie_activate_resource(device_t dev, device_t child, int type, 495 int rid, struct resource *r) 496 { 497 rman_res_t start, end; 498 int res; 499 500 if ((res = rman_activate_resource(r)) != 0) 501 return (res); 502 503 start = rman_get_start(r); 504 end = rman_get_end(r); 505 res = generic_pcie_translate_resource_common(dev, type, start, end, 506 &start, &end); 507 if (res != 0) { 508 rman_deactivate_resource(r); 509 return (res); 510 } 511 rman_set_start(r, start); 512 rman_set_end(r, end); 513 514 return (BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child, type, 515 rid, r)); 516 } 517 518 static int 519 generic_pcie_deactivate_resource(device_t dev, device_t child, int type, 520 int rid, struct resource *r) 521 { 522 int res; 523 524 if ((res = rman_deactivate_resource(r)) != 0) 525 return (res); 526 527 switch (type) { 528 case SYS_RES_IOPORT: 529 case SYS_RES_MEMORY: 530 case SYS_RES_IRQ: 531 res = BUS_DEACTIVATE_RESOURCE(device_get_parent(dev), child, 532 type, rid, r); 533 break; 534 default: 535 break; 536 } 537 538 return (res); 539 } 540 541 static int 542 generic_pcie_adjust_resource(device_t dev, device_t child, int type, 543 struct resource *res, rman_res_t start, rman_res_t end) 544 { 545 struct generic_pcie_core_softc *sc; 546 struct rman *rm; 547 548 sc = device_get_softc(dev); 549 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 550 if (type == PCI_RES_BUS) 551 return (pci_domain_adjust_bus(sc->ecam, child, res, start, 552 end)); 553 #endif 554 555 rm = generic_pcie_rman(sc, type, rman_get_flags(res)); 556 if (rm != NULL) 557 return (rman_adjust_resource(res, start, end)); 558 return (bus_generic_adjust_resource(dev, child, type, res, start, end)); 559 } 560 561 static bus_dma_tag_t 562 generic_pcie_get_dma_tag(device_t dev, device_t child) 563 { 564 struct generic_pcie_core_softc *sc; 565 566 sc = device_get_softc(dev); 567 return (sc->dmat); 568 } 569 570 static device_method_t generic_pcie_methods[] = { 571 DEVMETHOD(device_attach, pci_host_generic_core_attach), 572 DEVMETHOD(device_detach, pci_host_generic_core_detach), 573 574 DEVMETHOD(bus_read_ivar, generic_pcie_read_ivar), 575 DEVMETHOD(bus_write_ivar, generic_pcie_write_ivar), 576 DEVMETHOD(bus_alloc_resource, pci_host_generic_core_alloc_resource), 577 DEVMETHOD(bus_adjust_resource, generic_pcie_adjust_resource), 578 DEVMETHOD(bus_activate_resource, generic_pcie_activate_resource), 579 DEVMETHOD(bus_deactivate_resource, generic_pcie_deactivate_resource), 580 DEVMETHOD(bus_release_resource, pci_host_generic_core_release_resource), 581 DEVMETHOD(bus_translate_resource, generic_pcie_translate_resource), 582 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 583 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 584 585 DEVMETHOD(bus_get_dma_tag, generic_pcie_get_dma_tag), 586 587 /* pcib interface */ 588 DEVMETHOD(pcib_maxslots, generic_pcie_maxslots), 589 DEVMETHOD(pcib_read_config, generic_pcie_read_config), 590 DEVMETHOD(pcib_write_config, generic_pcie_write_config), 591 592 DEVMETHOD_END 593 }; 594 595 DEFINE_CLASS_0(pcib, generic_pcie_core_driver, 596 generic_pcie_methods, sizeof(struct generic_pcie_core_softc)); 597