1 /*- 2 * Copyright (c) 2015, 2020 Ruslan Bukin <br@bsdpad.com> 3 * Copyright (c) 2014 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * This software was developed by Semihalf under 7 * the sponsorship of the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* Generic ECAM PCIe driver */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_platform.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/malloc.h> 41 #include <sys/kernel.h> 42 #include <sys/rman.h> 43 #include <sys/module.h> 44 #include <sys/bus.h> 45 #include <sys/endian.h> 46 47 #include <dev/pci/pcivar.h> 48 #include <dev/pci/pcireg.h> 49 #include <dev/pci/pcib_private.h> 50 #include <dev/pci/pci_host_generic.h> 51 52 #include <machine/bus.h> 53 #include <machine/intr.h> 54 55 #include "pcib_if.h" 56 57 /* Forward prototypes */ 58 59 static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot, 60 u_int func, u_int reg, int bytes); 61 static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot, 62 u_int func, u_int reg, uint32_t val, int bytes); 63 static int generic_pcie_maxslots(device_t dev); 64 static int generic_pcie_read_ivar(device_t dev, device_t child, int index, 65 uintptr_t *result); 66 static int generic_pcie_write_ivar(device_t dev, device_t child, int index, 67 uintptr_t value); 68 69 int 70 pci_host_generic_core_attach(device_t dev) 71 { 72 struct generic_pcie_core_softc *sc; 73 uint64_t phys_base; 74 uint64_t pci_base; 75 uint64_t size; 76 int error; 77 int rid, tuple; 78 79 sc = device_get_softc(dev); 80 sc->dev = dev; 81 82 /* Create the parent DMA tag to pass down the coherent flag */ 83 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 84 1, 0, /* alignment, bounds */ 85 BUS_SPACE_MAXADDR, /* lowaddr */ 86 BUS_SPACE_MAXADDR, /* highaddr */ 87 NULL, NULL, /* filter, filterarg */ 88 BUS_SPACE_MAXSIZE, /* maxsize */ 89 BUS_SPACE_UNRESTRICTED, /* nsegments */ 90 BUS_SPACE_MAXSIZE, /* maxsegsize */ 91 sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */ 92 NULL, NULL, /* lockfunc, lockarg */ 93 &sc->dmat); 94 if (error != 0) 95 return (error); 96 97 rid = 0; 98 sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); 99 if (sc->res == NULL) { 100 device_printf(dev, "could not map memory.\n"); 101 return (ENXIO); 102 } 103 104 sc->bst = rman_get_bustag(sc->res); 105 sc->bsh = rman_get_bushandle(sc->res); 106 107 sc->has_pmem = false; 108 sc->pmem_rman.rm_type = RMAN_ARRAY; 109 sc->pmem_rman.rm_descr = "PCIe Prefetch Memory"; 110 111 sc->mem_rman.rm_type = RMAN_ARRAY; 112 sc->mem_rman.rm_descr = "PCIe Memory"; 113 114 sc->io_rman.rm_type = RMAN_ARRAY; 115 sc->io_rman.rm_descr = "PCIe IO window"; 116 117 /* Initialize rman and allocate memory regions */ 118 error = rman_init(&sc->pmem_rman); 119 if (error) { 120 device_printf(dev, "rman_init() failed. error = %d\n", error); 121 return (error); 122 } 123 124 error = rman_init(&sc->mem_rman); 125 if (error) { 126 device_printf(dev, "rman_init() failed. error = %d\n", error); 127 return (error); 128 } 129 130 error = rman_init(&sc->io_rman); 131 if (error) { 132 device_printf(dev, "rman_init() failed. error = %d\n", error); 133 return (error); 134 } 135 136 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { 137 phys_base = sc->ranges[tuple].phys_base; 138 pci_base = sc->ranges[tuple].pci_base; 139 size = sc->ranges[tuple].size; 140 if (phys_base == 0 || size == 0) 141 continue; /* empty range element */ 142 switch (FLAG_TYPE(sc->ranges[tuple].flags)) { 143 case FLAG_TYPE_PMEM: 144 sc->has_pmem = true; 145 error = rman_manage_region(&sc->pmem_rman, 146 pci_base, pci_base + size - 1); 147 break; 148 case FLAG_TYPE_MEM: 149 error = rman_manage_region(&sc->mem_rman, 150 pci_base, pci_base + size - 1); 151 break; 152 case FLAG_TYPE_IO: 153 error = rman_manage_region(&sc->io_rman, 154 pci_base, pci_base + size - 1); 155 break; 156 default: 157 continue; 158 } 159 if (error) { 160 device_printf(dev, "rman_manage_region() failed." 161 "error = %d\n", error); 162 rman_fini(&sc->pmem_rman); 163 rman_fini(&sc->mem_rman); 164 rman_fini(&sc->io_rman); 165 return (error); 166 } 167 } 168 169 return (0); 170 } 171 172 static uint32_t 173 generic_pcie_read_config(device_t dev, u_int bus, u_int slot, 174 u_int func, u_int reg, int bytes) 175 { 176 struct generic_pcie_core_softc *sc; 177 bus_space_handle_t h; 178 bus_space_tag_t t; 179 uint64_t offset; 180 uint32_t data; 181 182 sc = device_get_softc(dev); 183 if ((bus < sc->bus_start) || (bus > sc->bus_end)) 184 return (~0U); 185 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || 186 (reg > PCIE_REGMAX)) 187 return (~0U); 188 if ((sc->quirks & PCIE_ECAM_DESIGNWARE_QUIRK) && bus == 0 && slot > 0) 189 return (~0U); 190 191 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); 192 t = sc->bst; 193 h = sc->bsh; 194 195 switch (bytes) { 196 case 1: 197 data = bus_space_read_1(t, h, offset); 198 break; 199 case 2: 200 data = le16toh(bus_space_read_2(t, h, offset)); 201 break; 202 case 4: 203 data = le32toh(bus_space_read_4(t, h, offset)); 204 break; 205 default: 206 return (~0U); 207 } 208 209 return (data); 210 } 211 212 static void 213 generic_pcie_write_config(device_t dev, u_int bus, u_int slot, 214 u_int func, u_int reg, uint32_t val, int bytes) 215 { 216 struct generic_pcie_core_softc *sc; 217 bus_space_handle_t h; 218 bus_space_tag_t t; 219 uint64_t offset; 220 221 sc = device_get_softc(dev); 222 if ((bus < sc->bus_start) || (bus > sc->bus_end)) 223 return; 224 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || 225 (reg > PCIE_REGMAX)) 226 return; 227 228 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); 229 230 t = sc->bst; 231 h = sc->bsh; 232 233 switch (bytes) { 234 case 1: 235 bus_space_write_1(t, h, offset, val); 236 break; 237 case 2: 238 bus_space_write_2(t, h, offset, htole16(val)); 239 break; 240 case 4: 241 bus_space_write_4(t, h, offset, htole32(val)); 242 break; 243 default: 244 return; 245 } 246 } 247 248 static int 249 generic_pcie_maxslots(device_t dev) 250 { 251 252 return (31); /* max slots per bus acc. to standard */ 253 } 254 255 static int 256 generic_pcie_read_ivar(device_t dev, device_t child, int index, 257 uintptr_t *result) 258 { 259 struct generic_pcie_core_softc *sc; 260 261 sc = device_get_softc(dev); 262 263 if (index == PCIB_IVAR_BUS) { 264 *result = sc->bus_start; 265 return (0); 266 } 267 268 if (index == PCIB_IVAR_DOMAIN) { 269 *result = sc->ecam; 270 return (0); 271 } 272 273 if (bootverbose) 274 device_printf(dev, "ERROR: Unknown index %d.\n", index); 275 return (ENOENT); 276 } 277 278 static int 279 generic_pcie_write_ivar(device_t dev, device_t child, int index, 280 uintptr_t value) 281 { 282 283 return (ENOENT); 284 } 285 286 static struct rman * 287 generic_pcie_rman(struct generic_pcie_core_softc *sc, int type, int flags) 288 { 289 290 switch (type) { 291 case SYS_RES_IOPORT: 292 return (&sc->io_rman); 293 case SYS_RES_MEMORY: 294 if (sc->has_pmem && (flags & RF_PREFETCHABLE) != 0) 295 return (&sc->pmem_rman); 296 return (&sc->mem_rman); 297 default: 298 break; 299 } 300 301 return (NULL); 302 } 303 304 int 305 pci_host_generic_core_release_resource(device_t dev, device_t child, int type, 306 int rid, struct resource *res) 307 { 308 struct generic_pcie_core_softc *sc; 309 struct rman *rm; 310 int error; 311 312 sc = device_get_softc(dev); 313 314 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 315 if (type == PCI_RES_BUS) { 316 return (pci_domain_release_bus(sc->ecam, child, rid, res)); 317 } 318 #endif 319 320 rm = generic_pcie_rman(sc, type, rman_get_flags(res)); 321 if (rm != NULL) { 322 KASSERT(rman_is_region_manager(res, rm), ("rman mismatch")); 323 if (rman_get_flags(res) & RF_ACTIVE) { 324 error = bus_deactivate_resource(child, type, rid, res); 325 if (error) 326 return (error); 327 } 328 return (rman_release_resource(res)); 329 } 330 331 return (bus_generic_release_resource(dev, child, type, rid, res)); 332 } 333 334 static int 335 generic_pcie_translate_resource_common(device_t dev, int type, rman_res_t start, 336 rman_res_t end, rman_res_t *new_start, rman_res_t *new_end) 337 { 338 struct generic_pcie_core_softc *sc; 339 uint64_t phys_base; 340 uint64_t pci_base; 341 uint64_t size; 342 int i, space; 343 bool found; 344 345 sc = device_get_softc(dev); 346 /* Translate the address from a PCI address to a physical address */ 347 switch (type) { 348 case SYS_RES_IOPORT: 349 case SYS_RES_MEMORY: 350 found = false; 351 for (i = 0; i < MAX_RANGES_TUPLES; i++) { 352 pci_base = sc->ranges[i].pci_base; 353 phys_base = sc->ranges[i].phys_base; 354 size = sc->ranges[i].size; 355 356 if (start < pci_base || start >= pci_base + size) 357 continue; 358 359 switch (FLAG_TYPE(sc->ranges[i].flags)) { 360 case FLAG_TYPE_MEM: 361 case FLAG_TYPE_PMEM: 362 space = SYS_RES_MEMORY; 363 break; 364 case FLAG_TYPE_IO: 365 space = SYS_RES_IOPORT; 366 break; 367 default: 368 space = -1; 369 continue; 370 } 371 372 if (type == space) { 373 *new_start = start - pci_base + phys_base; 374 *new_end = end - pci_base + phys_base; 375 found = true; 376 break; 377 } 378 } 379 break; 380 default: 381 /* No translation for non-memory types */ 382 *new_start = start; 383 *new_end = end; 384 found = true; 385 break; 386 } 387 388 return (found ? 0 : ENOENT); 389 } 390 391 static int 392 generic_pcie_translate_resource(device_t bus, int type, 393 rman_res_t start, rman_res_t *newstart) 394 { 395 rman_res_t newend; /* unused */ 396 397 return (generic_pcie_translate_resource_common( 398 bus, type, start, 0, newstart, &newend)); 399 } 400 401 struct resource * 402 pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type, 403 int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 404 { 405 struct generic_pcie_core_softc *sc; 406 struct resource *res; 407 struct rman *rm; 408 rman_res_t phys_start, phys_end; 409 410 sc = device_get_softc(dev); 411 412 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 413 if (type == PCI_RES_BUS) { 414 return (pci_domain_alloc_bus(sc->ecam, child, rid, start, end, 415 count, flags)); 416 } 417 #endif 418 419 rm = generic_pcie_rman(sc, type, flags); 420 if (rm == NULL) 421 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, 422 type, rid, start, end, count, flags)); 423 424 /* Translate the address from a PCI address to a physical address */ 425 if (generic_pcie_translate_resource_common(dev, type, start, end, 426 &phys_start, &phys_end) != 0) { 427 device_printf(dev, 428 "Failed to translate resource %jx-%jx type %x for %s\n", 429 (uintmax_t)start, (uintmax_t)end, type, 430 device_get_nameunit(child)); 431 return (NULL); 432 } 433 434 if (bootverbose) { 435 device_printf(dev, 436 "rman_reserve_resource: start=%#jx, end=%#jx, count=%#jx\n", 437 start, end, count); 438 } 439 440 res = rman_reserve_resource(rm, start, end, count, flags, child); 441 if (res == NULL) 442 goto fail; 443 444 rman_set_rid(res, *rid); 445 446 if (flags & RF_ACTIVE) 447 if (bus_activate_resource(child, type, *rid, res)) { 448 rman_release_resource(res); 449 goto fail; 450 } 451 452 return (res); 453 454 fail: 455 device_printf(dev, "%s FAIL: type=%d, rid=%d, " 456 "start=%016jx, end=%016jx, count=%016jx, flags=%x\n", 457 __func__, type, *rid, start, end, count, flags); 458 459 return (NULL); 460 } 461 462 static int 463 generic_pcie_activate_resource(device_t dev, device_t child, int type, 464 int rid, struct resource *r) 465 { 466 struct generic_pcie_core_softc *sc; 467 rman_res_t start, end; 468 int res; 469 470 sc = device_get_softc(dev); 471 472 if ((res = rman_activate_resource(r)) != 0) 473 return (res); 474 475 start = rman_get_start(r); 476 end = rman_get_end(r); 477 res = generic_pcie_translate_resource_common(dev, type, start, end, 478 &start, &end); 479 if (res != 0) { 480 rman_deactivate_resource(r); 481 return (res); 482 } 483 rman_set_start(r, start); 484 rman_set_end(r, end); 485 486 return (BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child, type, 487 rid, r)); 488 } 489 490 static int 491 generic_pcie_deactivate_resource(device_t dev, device_t child, int type, 492 int rid, struct resource *r) 493 { 494 int res; 495 496 if ((res = rman_deactivate_resource(r)) != 0) 497 return (res); 498 499 switch (type) { 500 case SYS_RES_IOPORT: 501 case SYS_RES_MEMORY: 502 case SYS_RES_IRQ: 503 res = BUS_DEACTIVATE_RESOURCE(device_get_parent(dev), child, 504 type, rid, r); 505 break; 506 default: 507 break; 508 } 509 510 return (res); 511 } 512 513 static int 514 generic_pcie_adjust_resource(device_t dev, device_t child, int type, 515 struct resource *res, rman_res_t start, rman_res_t end) 516 { 517 struct generic_pcie_core_softc *sc; 518 struct rman *rm; 519 520 sc = device_get_softc(dev); 521 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 522 if (type == PCI_RES_BUS) 523 return (pci_domain_adjust_bus(sc->ecam, child, res, start, 524 end)); 525 #endif 526 527 rm = generic_pcie_rman(sc, type, rman_get_flags(res)); 528 if (rm != NULL) 529 return (rman_adjust_resource(res, start, end)); 530 return (bus_generic_adjust_resource(dev, child, type, res, start, end)); 531 } 532 533 static bus_dma_tag_t 534 generic_pcie_get_dma_tag(device_t dev, device_t child) 535 { 536 struct generic_pcie_core_softc *sc; 537 538 sc = device_get_softc(dev); 539 return (sc->dmat); 540 } 541 542 static device_method_t generic_pcie_methods[] = { 543 DEVMETHOD(device_attach, pci_host_generic_core_attach), 544 DEVMETHOD(bus_read_ivar, generic_pcie_read_ivar), 545 DEVMETHOD(bus_write_ivar, generic_pcie_write_ivar), 546 DEVMETHOD(bus_alloc_resource, pci_host_generic_core_alloc_resource), 547 DEVMETHOD(bus_adjust_resource, generic_pcie_adjust_resource), 548 DEVMETHOD(bus_activate_resource, generic_pcie_activate_resource), 549 DEVMETHOD(bus_deactivate_resource, generic_pcie_deactivate_resource), 550 DEVMETHOD(bus_release_resource, pci_host_generic_core_release_resource), 551 DEVMETHOD(bus_translate_resource, generic_pcie_translate_resource), 552 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 553 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 554 555 DEVMETHOD(bus_get_dma_tag, generic_pcie_get_dma_tag), 556 557 /* pcib interface */ 558 DEVMETHOD(pcib_maxslots, generic_pcie_maxslots), 559 DEVMETHOD(pcib_read_config, generic_pcie_read_config), 560 DEVMETHOD(pcib_write_config, generic_pcie_write_config), 561 562 DEVMETHOD_END 563 }; 564 565 DEFINE_CLASS_0(pcib, generic_pcie_core_driver, 566 generic_pcie_methods, sizeof(struct generic_pcie_core_softc)); 567