1 /*- 2 * Copyright (c) 2015, 2020 Ruslan Bukin <br@bsdpad.com> 3 * Copyright (c) 2014 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * This software was developed by Semihalf under 7 * the sponsorship of the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* Generic ECAM PCIe driver */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_platform.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/malloc.h> 41 #include <sys/kernel.h> 42 #include <sys/rman.h> 43 #include <sys/module.h> 44 #include <sys/bus.h> 45 #include <sys/endian.h> 46 47 #include <dev/pci/pcivar.h> 48 #include <dev/pci/pcireg.h> 49 #include <dev/pci/pcib_private.h> 50 #include <dev/pci/pci_host_generic.h> 51 52 #include <machine/bus.h> 53 #include <machine/intr.h> 54 55 #include "pcib_if.h" 56 57 #if defined(VM_MEMATTR_DEVICE_NP) 58 #define PCI_UNMAPPED 59 #define PCI_RF_FLAGS RF_UNMAPPED 60 #else 61 #define PCI_RF_FLAGS 0 62 #endif 63 64 65 /* Forward prototypes */ 66 67 static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot, 68 u_int func, u_int reg, int bytes); 69 static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot, 70 u_int func, u_int reg, uint32_t val, int bytes); 71 static int generic_pcie_maxslots(device_t dev); 72 static int generic_pcie_read_ivar(device_t dev, device_t child, int index, 73 uintptr_t *result); 74 static int generic_pcie_write_ivar(device_t dev, device_t child, int index, 75 uintptr_t value); 76 77 int 78 pci_host_generic_core_attach(device_t dev) 79 { 80 #ifdef PCI_UNMAPPED 81 struct resource_map_request req; 82 struct resource_map map; 83 #endif 84 struct generic_pcie_core_softc *sc; 85 uint64_t phys_base; 86 uint64_t pci_base; 87 uint64_t size; 88 int error; 89 int rid, tuple; 90 91 sc = device_get_softc(dev); 92 sc->dev = dev; 93 94 /* Create the parent DMA tag to pass down the coherent flag */ 95 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 96 1, 0, /* alignment, bounds */ 97 BUS_SPACE_MAXADDR, /* lowaddr */ 98 BUS_SPACE_MAXADDR, /* highaddr */ 99 NULL, NULL, /* filter, filterarg */ 100 BUS_SPACE_MAXSIZE, /* maxsize */ 101 BUS_SPACE_UNRESTRICTED, /* nsegments */ 102 BUS_SPACE_MAXSIZE, /* maxsegsize */ 103 sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */ 104 NULL, NULL, /* lockfunc, lockarg */ 105 &sc->dmat); 106 if (error != 0) 107 return (error); 108 109 rid = 0; 110 sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 111 PCI_RF_FLAGS | RF_ACTIVE); 112 if (sc->res == NULL) { 113 device_printf(dev, "could not allocate memory.\n"); 114 error = ENXIO; 115 goto err_resource; 116 } 117 #ifdef PCI_UNMAPPED 118 resource_init_map_request(&req); 119 req.memattr = VM_MEMATTR_DEVICE_NP; 120 error = bus_map_resource(dev, SYS_RES_MEMORY, sc->res, &req, &map); 121 if (error != 0) { 122 device_printf(dev, "could not map memory.\n"); 123 return (error); 124 } 125 rman_set_mapping(sc->res, &map); 126 #endif 127 128 sc->bst = rman_get_bustag(sc->res); 129 sc->bsh = rman_get_bushandle(sc->res); 130 131 sc->has_pmem = false; 132 sc->pmem_rman.rm_type = RMAN_ARRAY; 133 sc->pmem_rman.rm_descr = "PCIe Prefetch Memory"; 134 135 sc->mem_rman.rm_type = RMAN_ARRAY; 136 sc->mem_rman.rm_descr = "PCIe Memory"; 137 138 sc->io_rman.rm_type = RMAN_ARRAY; 139 sc->io_rman.rm_descr = "PCIe IO window"; 140 141 /* Initialize rman and allocate memory regions */ 142 error = rman_init(&sc->pmem_rman); 143 if (error) { 144 device_printf(dev, "rman_init() failed. error = %d\n", error); 145 goto err_pmem_rman; 146 } 147 148 error = rman_init(&sc->mem_rman); 149 if (error) { 150 device_printf(dev, "rman_init() failed. error = %d\n", error); 151 goto err_mem_rman; 152 } 153 154 error = rman_init(&sc->io_rman); 155 if (error) { 156 device_printf(dev, "rman_init() failed. error = %d\n", error); 157 goto err_io_rman; 158 } 159 160 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { 161 phys_base = sc->ranges[tuple].phys_base; 162 pci_base = sc->ranges[tuple].pci_base; 163 size = sc->ranges[tuple].size; 164 if (phys_base == 0 || size == 0) 165 continue; /* empty range element */ 166 switch (FLAG_TYPE(sc->ranges[tuple].flags)) { 167 case FLAG_TYPE_PMEM: 168 sc->has_pmem = true; 169 error = rman_manage_region(&sc->pmem_rman, 170 pci_base, pci_base + size - 1); 171 break; 172 case FLAG_TYPE_MEM: 173 error = rman_manage_region(&sc->mem_rman, 174 pci_base, pci_base + size - 1); 175 break; 176 case FLAG_TYPE_IO: 177 error = rman_manage_region(&sc->io_rman, 178 pci_base, pci_base + size - 1); 179 break; 180 default: 181 continue; 182 } 183 if (error) { 184 device_printf(dev, "rman_manage_region() failed." 185 "error = %d\n", error); 186 goto err_rman_manage; 187 } 188 } 189 190 return (0); 191 192 err_rman_manage: 193 rman_fini(&sc->io_rman); 194 err_io_rman: 195 rman_fini(&sc->mem_rman); 196 err_mem_rman: 197 rman_fini(&sc->pmem_rman); 198 err_pmem_rman: 199 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res); 200 err_resource: 201 bus_dma_tag_destroy(sc->dmat); 202 return (error); 203 } 204 205 int 206 pci_host_generic_core_detach(device_t dev) 207 { 208 struct generic_pcie_core_softc *sc; 209 int error; 210 211 sc = device_get_softc(dev); 212 213 error = bus_generic_detach(dev); 214 if (error != 0) 215 return (error); 216 217 rman_fini(&sc->io_rman); 218 rman_fini(&sc->mem_rman); 219 rman_fini(&sc->pmem_rman); 220 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res); 221 bus_dma_tag_destroy(sc->dmat); 222 223 return (0); 224 } 225 226 static uint32_t 227 generic_pcie_read_config(device_t dev, u_int bus, u_int slot, 228 u_int func, u_int reg, int bytes) 229 { 230 struct generic_pcie_core_softc *sc; 231 bus_space_handle_t h; 232 bus_space_tag_t t; 233 uint64_t offset; 234 uint32_t data; 235 236 sc = device_get_softc(dev); 237 if ((bus < sc->bus_start) || (bus > sc->bus_end)) 238 return (~0U); 239 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || 240 (reg > PCIE_REGMAX)) 241 return (~0U); 242 if ((sc->quirks & PCIE_ECAM_DESIGNWARE_QUIRK) && bus == 0 && slot > 0) 243 return (~0U); 244 245 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); 246 t = sc->bst; 247 h = sc->bsh; 248 249 switch (bytes) { 250 case 1: 251 data = bus_space_read_1(t, h, offset); 252 break; 253 case 2: 254 data = le16toh(bus_space_read_2(t, h, offset)); 255 break; 256 case 4: 257 data = le32toh(bus_space_read_4(t, h, offset)); 258 break; 259 default: 260 return (~0U); 261 } 262 263 return (data); 264 } 265 266 static void 267 generic_pcie_write_config(device_t dev, u_int bus, u_int slot, 268 u_int func, u_int reg, uint32_t val, int bytes) 269 { 270 struct generic_pcie_core_softc *sc; 271 bus_space_handle_t h; 272 bus_space_tag_t t; 273 uint64_t offset; 274 275 sc = device_get_softc(dev); 276 if ((bus < sc->bus_start) || (bus > sc->bus_end)) 277 return; 278 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || 279 (reg > PCIE_REGMAX)) 280 return; 281 282 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); 283 284 t = sc->bst; 285 h = sc->bsh; 286 287 switch (bytes) { 288 case 1: 289 bus_space_write_1(t, h, offset, val); 290 break; 291 case 2: 292 bus_space_write_2(t, h, offset, htole16(val)); 293 break; 294 case 4: 295 bus_space_write_4(t, h, offset, htole32(val)); 296 break; 297 default: 298 return; 299 } 300 } 301 302 static int 303 generic_pcie_maxslots(device_t dev) 304 { 305 306 return (31); /* max slots per bus acc. to standard */ 307 } 308 309 static int 310 generic_pcie_read_ivar(device_t dev, device_t child, int index, 311 uintptr_t *result) 312 { 313 struct generic_pcie_core_softc *sc; 314 315 sc = device_get_softc(dev); 316 317 if (index == PCIB_IVAR_BUS) { 318 *result = sc->bus_start; 319 return (0); 320 } 321 322 if (index == PCIB_IVAR_DOMAIN) { 323 *result = sc->ecam; 324 return (0); 325 } 326 327 if (bootverbose) 328 device_printf(dev, "ERROR: Unknown index %d.\n", index); 329 return (ENOENT); 330 } 331 332 static int 333 generic_pcie_write_ivar(device_t dev, device_t child, int index, 334 uintptr_t value) 335 { 336 337 return (ENOENT); 338 } 339 340 static struct rman * 341 generic_pcie_rman(struct generic_pcie_core_softc *sc, int type, int flags) 342 { 343 344 switch (type) { 345 case SYS_RES_IOPORT: 346 return (&sc->io_rman); 347 case SYS_RES_MEMORY: 348 if (sc->has_pmem && (flags & RF_PREFETCHABLE) != 0) 349 return (&sc->pmem_rman); 350 return (&sc->mem_rman); 351 default: 352 break; 353 } 354 355 return (NULL); 356 } 357 358 int 359 pci_host_generic_core_release_resource(device_t dev, device_t child, int type, 360 int rid, struct resource *res) 361 { 362 struct generic_pcie_core_softc *sc; 363 struct rman *rm; 364 int error; 365 366 sc = device_get_softc(dev); 367 368 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 369 if (type == PCI_RES_BUS) { 370 return (pci_domain_release_bus(sc->ecam, child, rid, res)); 371 } 372 #endif 373 374 rm = generic_pcie_rman(sc, type, rman_get_flags(res)); 375 if (rm != NULL) { 376 KASSERT(rman_is_region_manager(res, rm), ("rman mismatch")); 377 if (rman_get_flags(res) & RF_ACTIVE) { 378 error = bus_deactivate_resource(child, type, rid, res); 379 if (error) 380 return (error); 381 } 382 return (rman_release_resource(res)); 383 } 384 385 return (bus_generic_release_resource(dev, child, type, rid, res)); 386 } 387 388 static int 389 generic_pcie_translate_resource_common(device_t dev, int type, rman_res_t start, 390 rman_res_t end, rman_res_t *new_start, rman_res_t *new_end) 391 { 392 struct generic_pcie_core_softc *sc; 393 uint64_t phys_base; 394 uint64_t pci_base; 395 uint64_t size; 396 int i, space; 397 bool found; 398 399 sc = device_get_softc(dev); 400 /* Translate the address from a PCI address to a physical address */ 401 switch (type) { 402 case SYS_RES_IOPORT: 403 case SYS_RES_MEMORY: 404 found = false; 405 for (i = 0; i < MAX_RANGES_TUPLES; i++) { 406 pci_base = sc->ranges[i].pci_base; 407 phys_base = sc->ranges[i].phys_base; 408 size = sc->ranges[i].size; 409 410 if (start < pci_base || start >= pci_base + size) 411 continue; 412 413 switch (FLAG_TYPE(sc->ranges[i].flags)) { 414 case FLAG_TYPE_MEM: 415 case FLAG_TYPE_PMEM: 416 space = SYS_RES_MEMORY; 417 break; 418 case FLAG_TYPE_IO: 419 space = SYS_RES_IOPORT; 420 break; 421 default: 422 space = -1; 423 continue; 424 } 425 426 if (type == space) { 427 *new_start = start - pci_base + phys_base; 428 *new_end = end - pci_base + phys_base; 429 found = true; 430 break; 431 } 432 } 433 break; 434 default: 435 /* No translation for non-memory types */ 436 *new_start = start; 437 *new_end = end; 438 found = true; 439 break; 440 } 441 442 return (found ? 0 : ENOENT); 443 } 444 445 static int 446 generic_pcie_translate_resource(device_t bus, int type, 447 rman_res_t start, rman_res_t *newstart) 448 { 449 rman_res_t newend; /* unused */ 450 451 return (generic_pcie_translate_resource_common( 452 bus, type, start, 0, newstart, &newend)); 453 } 454 455 struct resource * 456 pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type, 457 int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 458 { 459 struct generic_pcie_core_softc *sc; 460 struct resource *res; 461 struct rman *rm; 462 463 sc = device_get_softc(dev); 464 465 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 466 if (type == PCI_RES_BUS) { 467 return (pci_domain_alloc_bus(sc->ecam, child, rid, start, end, 468 count, flags)); 469 } 470 #endif 471 472 rm = generic_pcie_rman(sc, type, flags); 473 if (rm == NULL) 474 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, 475 type, rid, start, end, count, flags)); 476 477 if (bootverbose) { 478 device_printf(dev, 479 "rman_reserve_resource: start=%#jx, end=%#jx, count=%#jx\n", 480 start, end, count); 481 } 482 483 res = rman_reserve_resource(rm, start, end, count, flags, child); 484 if (res == NULL) 485 goto fail; 486 487 rman_set_rid(res, *rid); 488 489 if (flags & RF_ACTIVE) 490 if (bus_activate_resource(child, type, *rid, res)) { 491 rman_release_resource(res); 492 goto fail; 493 } 494 495 return (res); 496 497 fail: 498 device_printf(dev, "%s FAIL: type=%d, rid=%d, " 499 "start=%016jx, end=%016jx, count=%016jx, flags=%x\n", 500 __func__, type, *rid, start, end, count, flags); 501 502 return (NULL); 503 } 504 505 static int 506 generic_pcie_activate_resource(device_t dev, device_t child, int type, 507 int rid, struct resource *r) 508 { 509 rman_res_t start, end; 510 int res; 511 512 if ((res = rman_activate_resource(r)) != 0) 513 return (res); 514 515 start = rman_get_start(r); 516 end = rman_get_end(r); 517 res = generic_pcie_translate_resource_common(dev, type, start, end, 518 &start, &end); 519 if (res != 0) { 520 rman_deactivate_resource(r); 521 return (res); 522 } 523 rman_set_start(r, start); 524 rman_set_end(r, end); 525 526 return (BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child, type, 527 rid, r)); 528 } 529 530 static int 531 generic_pcie_deactivate_resource(device_t dev, device_t child, int type, 532 int rid, struct resource *r) 533 { 534 int res; 535 536 if ((res = rman_deactivate_resource(r)) != 0) 537 return (res); 538 539 switch (type) { 540 case SYS_RES_IOPORT: 541 case SYS_RES_MEMORY: 542 case SYS_RES_IRQ: 543 res = BUS_DEACTIVATE_RESOURCE(device_get_parent(dev), child, 544 type, rid, r); 545 break; 546 default: 547 break; 548 } 549 550 return (res); 551 } 552 553 static int 554 generic_pcie_adjust_resource(device_t dev, device_t child, int type, 555 struct resource *res, rman_res_t start, rman_res_t end) 556 { 557 struct generic_pcie_core_softc *sc; 558 struct rman *rm; 559 560 sc = device_get_softc(dev); 561 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 562 if (type == PCI_RES_BUS) 563 return (pci_domain_adjust_bus(sc->ecam, child, res, start, 564 end)); 565 #endif 566 567 rm = generic_pcie_rman(sc, type, rman_get_flags(res)); 568 if (rm != NULL) 569 return (rman_adjust_resource(res, start, end)); 570 return (bus_generic_adjust_resource(dev, child, type, res, start, end)); 571 } 572 573 static bus_dma_tag_t 574 generic_pcie_get_dma_tag(device_t dev, device_t child) 575 { 576 struct generic_pcie_core_softc *sc; 577 578 sc = device_get_softc(dev); 579 return (sc->dmat); 580 } 581 582 static device_method_t generic_pcie_methods[] = { 583 DEVMETHOD(device_attach, pci_host_generic_core_attach), 584 DEVMETHOD(device_detach, pci_host_generic_core_detach), 585 586 DEVMETHOD(bus_read_ivar, generic_pcie_read_ivar), 587 DEVMETHOD(bus_write_ivar, generic_pcie_write_ivar), 588 DEVMETHOD(bus_alloc_resource, pci_host_generic_core_alloc_resource), 589 DEVMETHOD(bus_adjust_resource, generic_pcie_adjust_resource), 590 DEVMETHOD(bus_activate_resource, generic_pcie_activate_resource), 591 DEVMETHOD(bus_deactivate_resource, generic_pcie_deactivate_resource), 592 DEVMETHOD(bus_release_resource, pci_host_generic_core_release_resource), 593 DEVMETHOD(bus_translate_resource, generic_pcie_translate_resource), 594 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 595 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 596 597 DEVMETHOD(bus_get_dma_tag, generic_pcie_get_dma_tag), 598 599 /* pcib interface */ 600 DEVMETHOD(pcib_maxslots, generic_pcie_maxslots), 601 DEVMETHOD(pcib_read_config, generic_pcie_read_config), 602 DEVMETHOD(pcib_write_config, generic_pcie_write_config), 603 604 DEVMETHOD_END 605 }; 606 607 DEFINE_CLASS_0(pcib, generic_pcie_core_driver, 608 generic_pcie_methods, sizeof(struct generic_pcie_core_softc)); 609