1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright © 2021-2022 Dmitry Salychev 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 /* 30 * The DPAA2 Management Complex (MC) bus driver. 31 * 32 * MC is a hardware resource manager which can be found in several NXP 33 * SoCs (LX2160A, for example) and provides an access to the specialized 34 * hardware objects used in network-oriented packet processing applications. 35 */ 36 37 #include "opt_acpi.h" 38 #include "opt_platform.h" 39 40 #include <sys/param.h> 41 #include <sys/kernel.h> 42 #include <sys/bus.h> 43 #include <sys/rman.h> 44 #include <sys/lock.h> 45 #include <sys/module.h> 46 #include <sys/malloc.h> 47 #include <sys/mutex.h> 48 #include <sys/queue.h> 49 50 #include <vm/vm.h> 51 52 #include <machine/bus.h> 53 #include <machine/resource.h> 54 55 #ifdef DEV_ACPI 56 #include <contrib/dev/acpica/include/acpi.h> 57 #include <dev/acpica/acpivar.h> 58 #endif 59 60 #ifdef FDT 61 #include <dev/ofw/openfirm.h> 62 #include <dev/ofw/ofw_bus.h> 63 #include <dev/ofw/ofw_bus_subr.h> 64 #include <dev/ofw/ofw_pci.h> 65 #endif 66 67 #include "pcib_if.h" 68 #include "pci_if.h" 69 70 #include "dpaa2_mc.h" 71 72 /* Macros to read/write MC registers */ 73 #define mcreg_read_4(_sc, _r) bus_read_4(&(_sc)->map[1], (_r)) 74 #define mcreg_write_4(_sc, _r, _v) bus_write_4(&(_sc)->map[1], (_r), (_v)) 75 76 #define IORT_DEVICE_NAME "MCE" 77 78 /* MC Registers */ 79 #define MC_REG_GCR1 0x0000u 80 #define MC_REG_GCR2 0x0004u /* TODO: Does it exist? */ 81 #define MC_REG_GSR 0x0008u 82 #define MC_REG_FAPR 0x0028u 83 84 /* General Control Register 1 (GCR1) */ 85 #define GCR1_P1_STOP 0x80000000u 86 #define GCR1_P2_STOP 0x40000000u 87 88 /* General Status Register (GSR) */ 89 #define GSR_HW_ERR(v) (((v) & 0x80000000u) >> 31) 90 #define GSR_CAT_ERR(v) (((v) & 0x40000000u) >> 30) 91 #define GSR_DPL_OFFSET(v) (((v) & 0x3FFFFF00u) >> 8) 92 #define GSR_MCS(v) (((v) & 0xFFu) >> 0) 93 94 /* Timeouts to wait for the MC status. */ 95 #define MC_STAT_TIMEOUT 1000u /* us */ 96 #define MC_STAT_ATTEMPTS 100u 97 98 /** 99 * @brief Structure to describe a DPAA2 device as a managed resource. 100 */ 101 struct dpaa2_mc_devinfo { 102 STAILQ_ENTRY(dpaa2_mc_devinfo) link; 103 device_t dpaa2_dev; 104 uint32_t flags; 105 uint32_t owners; 106 }; 107 108 MALLOC_DEFINE(M_DPAA2_MC, "dpaa2_mc", "DPAA2 Management Complex"); 109 110 static struct resource_spec dpaa2_mc_spec[] = { 111 { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_UNMAPPED }, 112 { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL }, 113 RESOURCE_SPEC_END 114 }; 115 116 static u_int dpaa2_mc_get_xref(device_t, device_t); 117 static u_int dpaa2_mc_map_id(device_t, device_t, uintptr_t *); 118 119 static int dpaa2_mc_alloc_msi_impl(device_t, device_t, int, int, int *); 120 static int dpaa2_mc_release_msi_impl(device_t, device_t, int, int *); 121 static int dpaa2_mc_map_msi_impl(device_t, device_t, int, uint64_t *, 122 uint32_t *); 123 124 /* 125 * For device interface. 126 */ 127 128 int 129 dpaa2_mc_attach(device_t dev) 130 { 131 struct dpaa2_mc_softc *sc; 132 struct resource_map_request req; 133 uint32_t val; 134 int error; 135 136 sc = device_get_softc(dev); 137 sc->dev = dev; 138 sc->msi_allocated = false; 139 sc->msi_owner = NULL; 140 141 error = bus_alloc_resources(sc->dev, dpaa2_mc_spec, sc->res); 142 if (error) { 143 device_printf(dev, "%s: failed to allocate resources\n", 144 __func__); 145 return (ENXIO); 146 } 147 148 if (sc->res[1]) { 149 resource_init_map_request(&req); 150 req.memattr = VM_MEMATTR_DEVICE; 151 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[1], 152 &req, &sc->map[1]); 153 if (error) { 154 device_printf(dev, "%s: failed to map control " 155 "registers\n", __func__); 156 dpaa2_mc_detach(dev); 157 return (ENXIO); 158 } 159 160 if (bootverbose) 161 device_printf(dev, 162 "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", 163 mcreg_read_4(sc, MC_REG_GCR1), 164 mcreg_read_4(sc, MC_REG_GCR2), 165 mcreg_read_4(sc, MC_REG_GSR), 166 mcreg_read_4(sc, MC_REG_FAPR)); 167 168 /* Reset P1_STOP and P2_STOP bits to resume MC processor. */ 169 val = mcreg_read_4(sc, MC_REG_GCR1) & 170 ~(GCR1_P1_STOP | GCR1_P2_STOP); 171 mcreg_write_4(sc, MC_REG_GCR1, val); 172 173 /* Poll MC status. */ 174 if (bootverbose) 175 device_printf(dev, "polling MC status...\n"); 176 for (int i = 0; i < MC_STAT_ATTEMPTS; i++) { 177 val = mcreg_read_4(sc, MC_REG_GSR); 178 if (GSR_MCS(val) != 0u) 179 break; 180 DELAY(MC_STAT_TIMEOUT); 181 } 182 183 if (bootverbose) 184 device_printf(dev, 185 "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", 186 mcreg_read_4(sc, MC_REG_GCR1), 187 mcreg_read_4(sc, MC_REG_GCR2), 188 mcreg_read_4(sc, MC_REG_GSR), 189 mcreg_read_4(sc, MC_REG_FAPR)); 190 } 191 192 /* At least 64 bytes of the command portal should be available. */ 193 if (rman_get_size(sc->res[0]) < DPAA2_MCP_MEM_WIDTH) { 194 device_printf(dev, "%s: MC portal memory region too small: " 195 "%jd\n", __func__, rman_get_size(sc->res[0])); 196 dpaa2_mc_detach(dev); 197 return (ENXIO); 198 } 199 200 /* Map MC portal memory resource. */ 201 resource_init_map_request(&req); 202 req.memattr = VM_MEMATTR_DEVICE; 203 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[0], 204 &req, &sc->map[0]); 205 if (error) { 206 device_printf(dev, "Failed to map MC portal memory\n"); 207 dpaa2_mc_detach(dev); 208 return (ENXIO); 209 } 210 211 /* Initialize a resource manager for the DPAA2 I/O objects. */ 212 sc->dpio_rman.rm_type = RMAN_ARRAY; 213 sc->dpio_rman.rm_descr = "DPAA2 DPIO objects"; 214 error = rman_init(&sc->dpio_rman); 215 if (error) { 216 device_printf(dev, "Failed to initialize a resource manager for " 217 "the DPAA2 I/O objects: error=%d\n", error); 218 dpaa2_mc_detach(dev); 219 return (ENXIO); 220 } 221 222 /* Initialize a resource manager for the DPAA2 buffer pools. */ 223 sc->dpbp_rman.rm_type = RMAN_ARRAY; 224 sc->dpbp_rman.rm_descr = "DPAA2 DPBP objects"; 225 error = rman_init(&sc->dpbp_rman); 226 if (error) { 227 device_printf(dev, "Failed to initialize a resource manager for " 228 "the DPAA2 buffer pools: error=%d\n", error); 229 dpaa2_mc_detach(dev); 230 return (ENXIO); 231 } 232 233 /* Initialize a resource manager for the DPAA2 concentrators. */ 234 sc->dpcon_rman.rm_type = RMAN_ARRAY; 235 sc->dpcon_rman.rm_descr = "DPAA2 DPCON objects"; 236 error = rman_init(&sc->dpcon_rman); 237 if (error) { 238 device_printf(dev, "Failed to initialize a resource manager for " 239 "the DPAA2 concentrators: error=%d\n", error); 240 dpaa2_mc_detach(dev); 241 return (ENXIO); 242 } 243 244 /* Initialize a resource manager for the DPAA2 MC portals. */ 245 sc->dpmcp_rman.rm_type = RMAN_ARRAY; 246 sc->dpmcp_rman.rm_descr = "DPAA2 DPMCP objects"; 247 error = rman_init(&sc->dpmcp_rman); 248 if (error) { 249 device_printf(dev, "Failed to initialize a resource manager for " 250 "the DPAA2 MC portals: error=%d\n", error); 251 dpaa2_mc_detach(dev); 252 return (ENXIO); 253 } 254 255 /* Initialize a list of non-allocatable DPAA2 devices. */ 256 mtx_init(&sc->mdev_lock, "MC portal mdev lock", NULL, MTX_DEF); 257 STAILQ_INIT(&sc->mdev_list); 258 259 mtx_init(&sc->msi_lock, "MC MSI lock", NULL, MTX_DEF); 260 261 /* 262 * Add a root resource container as the only child of the bus. All of 263 * the direct descendant containers will be attached to the root one 264 * instead of the MC device. 265 */ 266 sc->rcdev = device_add_child(dev, "dpaa2_rc", 0); 267 if (sc->rcdev == NULL) { 268 dpaa2_mc_detach(dev); 269 return (ENXIO); 270 } 271 bus_generic_probe(dev); 272 bus_generic_attach(dev); 273 274 return (0); 275 } 276 277 int 278 dpaa2_mc_detach(device_t dev) 279 { 280 struct dpaa2_mc_softc *sc; 281 struct dpaa2_devinfo *dinfo = NULL; 282 int error; 283 284 bus_generic_detach(dev); 285 286 sc = device_get_softc(dev); 287 if (sc->rcdev) 288 device_delete_child(dev, sc->rcdev); 289 bus_release_resources(dev, dpaa2_mc_spec, sc->res); 290 291 dinfo = device_get_ivars(dev); 292 if (dinfo) 293 free(dinfo, M_DPAA2_MC); 294 295 error = bus_generic_detach(dev); 296 if (error != 0) 297 return (error); 298 299 return (device_delete_children(dev)); 300 } 301 302 /* 303 * For bus interface. 304 */ 305 306 struct resource * 307 dpaa2_mc_alloc_resource(device_t mcdev, device_t child, int type, int *rid, 308 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 309 { 310 struct resource *res; 311 struct rman *rm; 312 int error; 313 314 rm = dpaa2_mc_rman(mcdev, type, flags); 315 if (rm == NULL) 316 return (bus_generic_alloc_resource(mcdev, child, type, rid, 317 start, end, count, flags)); 318 319 /* 320 * Skip managing DPAA2-specific resource. It must be provided to MC by 321 * calling DPAA2_MC_MANAGE_DEV() beforehand. 322 */ 323 if (type <= DPAA2_DEV_MC) { 324 error = rman_manage_region(rm, start, end); 325 if (error) { 326 device_printf(mcdev, "rman_manage_region() failed: " 327 "start=%#jx, end=%#jx, error=%d\n", start, end, 328 error); 329 goto fail; 330 } 331 } 332 333 res = bus_generic_rman_alloc_resource(mcdev, child, type, rid, start, 334 end, count, flags); 335 if (res == NULL) 336 goto fail; 337 return (res); 338 fail: 339 device_printf(mcdev, "%s() failed: type=%d, rid=%d, start=%#jx, " 340 "end=%#jx, count=%#jx, flags=%x\n", __func__, type, *rid, start, end, 341 count, flags); 342 return (NULL); 343 } 344 345 int 346 dpaa2_mc_adjust_resource(device_t mcdev, device_t child, 347 struct resource *r, rman_res_t start, rman_res_t end) 348 { 349 struct rman *rm; 350 351 rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); 352 if (rm) 353 return (bus_generic_rman_adjust_resource(mcdev, child, r, 354 start, end)); 355 return (bus_generic_adjust_resource(mcdev, child, r, start, end)); 356 } 357 358 int 359 dpaa2_mc_release_resource(device_t mcdev, device_t child, struct resource *r) 360 { 361 struct rman *rm; 362 363 rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); 364 if (rm) 365 return (bus_generic_rman_release_resource(mcdev, child, r)); 366 return (bus_generic_release_resource(mcdev, child, r)); 367 } 368 369 int 370 dpaa2_mc_activate_resource(device_t mcdev, device_t child, struct resource *r) 371 { 372 struct rman *rm; 373 374 rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); 375 if (rm) 376 return (bus_generic_rman_activate_resource(mcdev, child, r)); 377 return (bus_generic_activate_resource(mcdev, child, r)); 378 } 379 380 int 381 dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, struct resource *r) 382 { 383 struct rman *rm; 384 385 rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); 386 if (rm) 387 return (bus_generic_rman_deactivate_resource(mcdev, child, r)); 388 return (bus_generic_deactivate_resource(mcdev, child, r)); 389 } 390 391 /* 392 * For pseudo-pcib interface. 393 */ 394 395 int 396 dpaa2_mc_alloc_msi(device_t mcdev, device_t child, int count, int maxcount, 397 int *irqs) 398 { 399 #if defined(INTRNG) 400 return (dpaa2_mc_alloc_msi_impl(mcdev, child, count, maxcount, irqs)); 401 #else 402 return (ENXIO); 403 #endif 404 } 405 406 int 407 dpaa2_mc_release_msi(device_t mcdev, device_t child, int count, int *irqs) 408 { 409 #if defined(INTRNG) 410 return (dpaa2_mc_release_msi_impl(mcdev, child, count, irqs)); 411 #else 412 return (ENXIO); 413 #endif 414 } 415 416 int 417 dpaa2_mc_map_msi(device_t mcdev, device_t child, int irq, uint64_t *addr, 418 uint32_t *data) 419 { 420 #if defined(INTRNG) 421 return (dpaa2_mc_map_msi_impl(mcdev, child, irq, addr, data)); 422 #else 423 return (ENXIO); 424 #endif 425 } 426 427 int 428 dpaa2_mc_get_id(device_t mcdev, device_t child, enum pci_id_type type, 429 uintptr_t *id) 430 { 431 struct dpaa2_devinfo *dinfo; 432 433 dinfo = device_get_ivars(child); 434 435 if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 436 return (ENXIO); 437 438 if (type == PCI_ID_MSI) 439 return (dpaa2_mc_map_id(mcdev, child, id)); 440 441 *id = dinfo->icid; 442 return (0); 443 } 444 445 /* 446 * For DPAA2 Management Complex bus driver interface. 447 */ 448 449 int 450 dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags) 451 { 452 struct dpaa2_mc_softc *sc; 453 struct dpaa2_devinfo *dinfo; 454 struct dpaa2_mc_devinfo *di; 455 struct rman *rm; 456 int error; 457 458 sc = device_get_softc(mcdev); 459 dinfo = device_get_ivars(dpaa2_dev); 460 461 if (!sc || !dinfo || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 462 return (EINVAL); 463 464 di = malloc(sizeof(*di), M_DPAA2_MC, M_WAITOK | M_ZERO); 465 di->dpaa2_dev = dpaa2_dev; 466 di->flags = flags; 467 di->owners = 0; 468 469 /* Append a new managed DPAA2 device to the queue. */ 470 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 471 mtx_lock(&sc->mdev_lock); 472 STAILQ_INSERT_TAIL(&sc->mdev_list, di, link); 473 mtx_unlock(&sc->mdev_lock); 474 475 if (flags & DPAA2_MC_DEV_ALLOCATABLE) { 476 /* Select rman based on a type of the DPAA2 device. */ 477 rm = dpaa2_mc_rman(mcdev, dinfo->dtype, 0); 478 if (!rm) 479 return (ENOENT); 480 /* Manage DPAA2 device as an allocatable resource. */ 481 error = rman_manage_region(rm, (rman_res_t) dpaa2_dev, 482 (rman_res_t) dpaa2_dev); 483 if (error) 484 return (error); 485 } 486 487 return (0); 488 } 489 490 int 491 dpaa2_mc_get_free_dev(device_t mcdev, device_t *dpaa2_dev, 492 enum dpaa2_dev_type devtype) 493 { 494 struct rman *rm; 495 rman_res_t start, end; 496 int error; 497 498 if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 499 return (EINVAL); 500 501 /* Select resource manager based on a type of the DPAA2 device. */ 502 rm = dpaa2_mc_rman(mcdev, devtype, 0); 503 if (!rm) 504 return (ENOENT); 505 /* Find first free DPAA2 device of the given type. */ 506 error = rman_first_free_region(rm, &start, &end); 507 if (error) 508 return (error); 509 510 KASSERT(start == end, ("start != end, but should be the same pointer " 511 "to the DPAA2 device: start=%jx, end=%jx", start, end)); 512 513 *dpaa2_dev = (device_t) start; 514 515 return (0); 516 } 517 518 int 519 dpaa2_mc_get_dev(device_t mcdev, device_t *dpaa2_dev, 520 enum dpaa2_dev_type devtype, uint32_t obj_id) 521 { 522 struct dpaa2_mc_softc *sc; 523 struct dpaa2_devinfo *dinfo; 524 struct dpaa2_mc_devinfo *di; 525 int error = ENOENT; 526 527 sc = device_get_softc(mcdev); 528 529 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 530 return (EINVAL); 531 532 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 533 mtx_lock(&sc->mdev_lock); 534 535 STAILQ_FOREACH(di, &sc->mdev_list, link) { 536 dinfo = device_get_ivars(di->dpaa2_dev); 537 if (dinfo->dtype == devtype && dinfo->id == obj_id) { 538 *dpaa2_dev = di->dpaa2_dev; 539 error = 0; 540 break; 541 } 542 } 543 544 mtx_unlock(&sc->mdev_lock); 545 546 return (error); 547 } 548 549 int 550 dpaa2_mc_get_shared_dev(device_t mcdev, device_t *dpaa2_dev, 551 enum dpaa2_dev_type devtype) 552 { 553 struct dpaa2_mc_softc *sc; 554 struct dpaa2_devinfo *dinfo; 555 struct dpaa2_mc_devinfo *di; 556 device_t dev = NULL; 557 uint32_t owners = UINT32_MAX; 558 int error = ENOENT; 559 560 sc = device_get_softc(mcdev); 561 562 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 563 return (EINVAL); 564 565 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 566 mtx_lock(&sc->mdev_lock); 567 568 STAILQ_FOREACH(di, &sc->mdev_list, link) { 569 dinfo = device_get_ivars(di->dpaa2_dev); 570 571 if ((dinfo->dtype == devtype) && 572 (di->flags & DPAA2_MC_DEV_SHAREABLE) && 573 (di->owners < owners)) { 574 dev = di->dpaa2_dev; 575 owners = di->owners; 576 } 577 } 578 if (dev) { 579 *dpaa2_dev = dev; 580 error = 0; 581 } 582 583 mtx_unlock(&sc->mdev_lock); 584 585 return (error); 586 } 587 588 int 589 dpaa2_mc_reserve_dev(device_t mcdev, device_t dpaa2_dev, 590 enum dpaa2_dev_type devtype) 591 { 592 struct dpaa2_mc_softc *sc; 593 struct dpaa2_mc_devinfo *di; 594 int error = ENOENT; 595 596 sc = device_get_softc(mcdev); 597 598 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 599 return (EINVAL); 600 601 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 602 mtx_lock(&sc->mdev_lock); 603 604 STAILQ_FOREACH(di, &sc->mdev_list, link) { 605 if (di->dpaa2_dev == dpaa2_dev && 606 (di->flags & DPAA2_MC_DEV_SHAREABLE)) { 607 di->owners++; 608 error = 0; 609 break; 610 } 611 } 612 613 mtx_unlock(&sc->mdev_lock); 614 615 return (error); 616 } 617 618 int 619 dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev, 620 enum dpaa2_dev_type devtype) 621 { 622 struct dpaa2_mc_softc *sc; 623 struct dpaa2_mc_devinfo *di; 624 int error = ENOENT; 625 626 sc = device_get_softc(mcdev); 627 628 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 629 return (EINVAL); 630 631 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 632 mtx_lock(&sc->mdev_lock); 633 634 STAILQ_FOREACH(di, &sc->mdev_list, link) { 635 if (di->dpaa2_dev == dpaa2_dev && 636 (di->flags & DPAA2_MC_DEV_SHAREABLE)) { 637 di->owners -= di->owners > 0 ? 1 : 0; 638 error = 0; 639 break; 640 } 641 } 642 643 mtx_unlock(&sc->mdev_lock); 644 645 return (error); 646 } 647 648 /** 649 * @internal 650 */ 651 static u_int 652 dpaa2_mc_get_xref(device_t mcdev, device_t child) 653 { 654 struct dpaa2_mc_softc *sc = device_get_softc(mcdev); 655 struct dpaa2_devinfo *dinfo = device_get_ivars(child); 656 #ifdef DEV_ACPI 657 u_int xref, devid; 658 #endif 659 #ifdef FDT 660 phandle_t msi_parent; 661 #endif 662 int error; 663 664 if (sc && dinfo) { 665 #ifdef DEV_ACPI 666 if (sc->acpi_based) { 667 /* 668 * NOTE: The first named component from the IORT table 669 * with the given name (as a substring) will be used. 670 */ 671 error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, 672 dinfo->icid, &xref, &devid); 673 if (error) 674 return (0); 675 return (xref); 676 } 677 #endif 678 #ifdef FDT 679 if (!sc->acpi_based) { 680 /* FDT-based driver. */ 681 error = ofw_bus_msimap(sc->ofw_node, dinfo->icid, 682 &msi_parent, NULL); 683 if (error) 684 return (0); 685 return ((u_int) msi_parent); 686 } 687 #endif 688 } 689 return (0); 690 } 691 692 /** 693 * @internal 694 */ 695 static u_int 696 dpaa2_mc_map_id(device_t mcdev, device_t child, uintptr_t *id) 697 { 698 struct dpaa2_devinfo *dinfo; 699 #ifdef DEV_ACPI 700 u_int xref, devid; 701 int error; 702 #endif 703 704 dinfo = device_get_ivars(child); 705 if (dinfo) { 706 /* 707 * The first named components from IORT table with the given 708 * name (as a substring) will be used. 709 */ 710 #ifdef DEV_ACPI 711 error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid, 712 &xref, &devid); 713 if (error == 0) 714 *id = devid; 715 else 716 #endif 717 *id = dinfo->icid; /* RID not in IORT, likely FW bug */ 718 719 return (0); 720 } 721 return (ENXIO); 722 } 723 724 /** 725 * @internal 726 * @brief Obtain a resource manager based on the given type of the resource. 727 */ 728 struct rman * 729 dpaa2_mc_rman(device_t mcdev, int type, u_int flags) 730 { 731 struct dpaa2_mc_softc *sc; 732 733 sc = device_get_softc(mcdev); 734 735 switch (type) { 736 case DPAA2_DEV_IO: 737 return (&sc->dpio_rman); 738 case DPAA2_DEV_BP: 739 return (&sc->dpbp_rman); 740 case DPAA2_DEV_CON: 741 return (&sc->dpcon_rman); 742 case DPAA2_DEV_MCP: 743 return (&sc->dpmcp_rman); 744 default: 745 break; 746 } 747 748 return (NULL); 749 } 750 751 #if defined(INTRNG) && !defined(IOMMU) 752 753 /** 754 * @internal 755 * @brief Allocates requested number of MSIs. 756 * 757 * NOTE: This function is a part of fallback solution when IOMMU isn't available. 758 * Total number of IRQs is limited to 32. 759 */ 760 static int 761 dpaa2_mc_alloc_msi_impl(device_t mcdev, device_t child, int count, int maxcount, 762 int *irqs) 763 { 764 struct dpaa2_mc_softc *sc = device_get_softc(mcdev); 765 int msi_irqs[DPAA2_MC_MSI_COUNT]; 766 int error; 767 768 /* Pre-allocate a bunch of MSIs for MC to be used by its children. */ 769 if (!sc->msi_allocated) { 770 error = intr_alloc_msi(mcdev, child, dpaa2_mc_get_xref(mcdev, 771 child), DPAA2_MC_MSI_COUNT, DPAA2_MC_MSI_COUNT, msi_irqs); 772 if (error) { 773 device_printf(mcdev, "failed to pre-allocate %d MSIs: " 774 "error=%d\n", DPAA2_MC_MSI_COUNT, error); 775 return (error); 776 } 777 778 mtx_assert(&sc->msi_lock, MA_NOTOWNED); 779 mtx_lock(&sc->msi_lock); 780 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { 781 sc->msi[i].child = NULL; 782 sc->msi[i].irq = msi_irqs[i]; 783 } 784 sc->msi_owner = child; 785 sc->msi_allocated = true; 786 mtx_unlock(&sc->msi_lock); 787 } 788 789 error = ENOENT; 790 791 /* Find the first free MSIs from the pre-allocated pool. */ 792 mtx_assert(&sc->msi_lock, MA_NOTOWNED); 793 mtx_lock(&sc->msi_lock); 794 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { 795 if (sc->msi[i].child != NULL) 796 continue; 797 error = 0; 798 for (int j = 0; j < count; j++) { 799 if (i + j >= DPAA2_MC_MSI_COUNT) { 800 device_printf(mcdev, "requested %d MSIs exceed " 801 "limit of %d available\n", count, 802 DPAA2_MC_MSI_COUNT); 803 error = E2BIG; 804 break; 805 } 806 sc->msi[i + j].child = child; 807 irqs[j] = sc->msi[i + j].irq; 808 } 809 break; 810 } 811 mtx_unlock(&sc->msi_lock); 812 813 return (error); 814 } 815 816 /** 817 * @internal 818 * @brief Marks IRQs as free in the pre-allocated pool of MSIs. 819 * 820 * NOTE: This function is a part of fallback solution when IOMMU isn't available. 821 * Total number of IRQs is limited to 32. 822 * NOTE: MSIs are kept allocated in the kernel as a part of the pool. 823 */ 824 static int 825 dpaa2_mc_release_msi_impl(device_t mcdev, device_t child, int count, int *irqs) 826 { 827 struct dpaa2_mc_softc *sc = device_get_softc(mcdev); 828 829 mtx_assert(&sc->msi_lock, MA_NOTOWNED); 830 mtx_lock(&sc->msi_lock); 831 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { 832 if (sc->msi[i].child != child) 833 continue; 834 for (int j = 0; j < count; j++) { 835 if (sc->msi[i].irq == irqs[j]) { 836 sc->msi[i].child = NULL; 837 break; 838 } 839 } 840 } 841 mtx_unlock(&sc->msi_lock); 842 843 return (0); 844 } 845 846 /** 847 * @internal 848 * @brief Provides address to write to and data according to the given MSI from 849 * the pre-allocated pool. 850 * 851 * NOTE: This function is a part of fallback solution when IOMMU isn't available. 852 * Total number of IRQs is limited to 32. 853 */ 854 static int 855 dpaa2_mc_map_msi_impl(device_t mcdev, device_t child, int irq, uint64_t *addr, 856 uint32_t *data) 857 { 858 struct dpaa2_mc_softc *sc = device_get_softc(mcdev); 859 int error = EINVAL; 860 861 mtx_assert(&sc->msi_lock, MA_NOTOWNED); 862 mtx_lock(&sc->msi_lock); 863 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { 864 if (sc->msi[i].child == child && sc->msi[i].irq == irq) { 865 error = 0; 866 break; 867 } 868 } 869 mtx_unlock(&sc->msi_lock); 870 if (error) 871 return (error); 872 873 return (intr_map_msi(mcdev, sc->msi_owner, dpaa2_mc_get_xref(mcdev, 874 sc->msi_owner), irq, addr, data)); 875 } 876 877 #endif /* defined(INTRNG) && !defined(IOMMU) */ 878 879 static device_method_t dpaa2_mc_methods[] = { 880 DEVMETHOD_END 881 }; 882 883 DEFINE_CLASS_0(dpaa2_mc, dpaa2_mc_driver, dpaa2_mc_methods, 884 sizeof(struct dpaa2_mc_softc)); 885