1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright © 2021-2022 Dmitry Salychev 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 /* 30 * The DPAA2 Management Complex (MC) bus driver. 31 * 32 * MC is a hardware resource manager which can be found in several NXP 33 * SoCs (LX2160A, for example) and provides an access to the specialized 34 * hardware objects used in network-oriented packet processing applications. 35 */ 36 37 #include "opt_acpi.h" 38 #include "opt_platform.h" 39 40 #include <sys/param.h> 41 #include <sys/kernel.h> 42 #include <sys/bus.h> 43 #include <sys/rman.h> 44 #include <sys/lock.h> 45 #include <sys/module.h> 46 #include <sys/malloc.h> 47 #include <sys/mutex.h> 48 #include <sys/queue.h> 49 50 #include <vm/vm.h> 51 52 #include <machine/bus.h> 53 #include <machine/resource.h> 54 55 #ifdef DEV_ACPI 56 #include <contrib/dev/acpica/include/acpi.h> 57 #include <dev/acpica/acpivar.h> 58 #endif 59 60 #ifdef FDT 61 #include <dev/ofw/openfirm.h> 62 #include <dev/ofw/ofw_bus.h> 63 #include <dev/ofw/ofw_bus_subr.h> 64 #include <dev/ofw/ofw_pci.h> 65 #endif 66 67 #include "pcib_if.h" 68 #include "pci_if.h" 69 70 #include "dpaa2_mc.h" 71 72 /* Macros to read/write MC registers */ 73 #define mcreg_read_4(_sc, _r) bus_read_4(&(_sc)->map[1], (_r)) 74 #define mcreg_write_4(_sc, _r, _v) bus_write_4(&(_sc)->map[1], (_r), (_v)) 75 76 #define IORT_DEVICE_NAME "MCE" 77 78 /* MC Registers */ 79 #define MC_REG_GCR1 0x0000u 80 #define MC_REG_GCR2 0x0004u /* TODO: Does it exist? */ 81 #define MC_REG_GSR 0x0008u 82 #define MC_REG_FAPR 0x0028u 83 84 /* General Control Register 1 (GCR1) */ 85 #define GCR1_P1_STOP 0x80000000u 86 #define GCR1_P2_STOP 0x40000000u 87 88 /* General Status Register (GSR) */ 89 #define GSR_HW_ERR(v) (((v) & 0x80000000u) >> 31) 90 #define GSR_CAT_ERR(v) (((v) & 0x40000000u) >> 30) 91 #define GSR_DPL_OFFSET(v) (((v) & 0x3FFFFF00u) >> 8) 92 #define GSR_MCS(v) (((v) & 0xFFu) >> 0) 93 94 /* Timeouts to wait for the MC status. */ 95 #define MC_STAT_TIMEOUT 1000u /* us */ 96 #define MC_STAT_ATTEMPTS 100u 97 98 /** 99 * @brief Structure to describe a DPAA2 device as a managed resource. 100 */ 101 struct dpaa2_mc_devinfo { 102 STAILQ_ENTRY(dpaa2_mc_devinfo) link; 103 device_t dpaa2_dev; 104 uint32_t flags; 105 uint32_t owners; 106 }; 107 108 MALLOC_DEFINE(M_DPAA2_MC, "dpaa2_mc", "DPAA2 Management Complex"); 109 110 static struct resource_spec dpaa2_mc_spec[] = { 111 { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_UNMAPPED }, 112 { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL }, 113 RESOURCE_SPEC_END 114 }; 115 116 static u_int dpaa2_mc_get_xref(device_t, device_t); 117 static u_int dpaa2_mc_map_id(device_t, device_t, uintptr_t *); 118 119 static int dpaa2_mc_alloc_msi_impl(device_t, device_t, int, int, int *); 120 static int dpaa2_mc_release_msi_impl(device_t, device_t, int, int *); 121 static int dpaa2_mc_map_msi_impl(device_t, device_t, int, uint64_t *, 122 uint32_t *); 123 124 /* 125 * For device interface. 126 */ 127 128 int 129 dpaa2_mc_attach(device_t dev) 130 { 131 struct dpaa2_mc_softc *sc; 132 struct resource_map_request req; 133 uint32_t val; 134 int error; 135 136 sc = device_get_softc(dev); 137 sc->dev = dev; 138 sc->msi_allocated = false; 139 sc->msi_owner = NULL; 140 141 error = bus_alloc_resources(sc->dev, dpaa2_mc_spec, sc->res); 142 if (error) { 143 device_printf(dev, "%s: failed to allocate resources\n", 144 __func__); 145 return (ENXIO); 146 } 147 148 if (sc->res[1]) { 149 resource_init_map_request(&req); 150 req.memattr = VM_MEMATTR_DEVICE; 151 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[1], 152 &req, &sc->map[1]); 153 if (error) { 154 device_printf(dev, "%s: failed to map control " 155 "registers\n", __func__); 156 dpaa2_mc_detach(dev); 157 return (ENXIO); 158 } 159 160 if (bootverbose) 161 device_printf(dev, 162 "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", 163 mcreg_read_4(sc, MC_REG_GCR1), 164 mcreg_read_4(sc, MC_REG_GCR2), 165 mcreg_read_4(sc, MC_REG_GSR), 166 mcreg_read_4(sc, MC_REG_FAPR)); 167 168 /* Reset P1_STOP and P2_STOP bits to resume MC processor. */ 169 val = mcreg_read_4(sc, MC_REG_GCR1) & 170 ~(GCR1_P1_STOP | GCR1_P2_STOP); 171 mcreg_write_4(sc, MC_REG_GCR1, val); 172 173 /* Poll MC status. */ 174 if (bootverbose) 175 device_printf(dev, "polling MC status...\n"); 176 for (int i = 0; i < MC_STAT_ATTEMPTS; i++) { 177 val = mcreg_read_4(sc, MC_REG_GSR); 178 if (GSR_MCS(val) != 0u) 179 break; 180 DELAY(MC_STAT_TIMEOUT); 181 } 182 183 if (bootverbose) 184 device_printf(dev, 185 "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", 186 mcreg_read_4(sc, MC_REG_GCR1), 187 mcreg_read_4(sc, MC_REG_GCR2), 188 mcreg_read_4(sc, MC_REG_GSR), 189 mcreg_read_4(sc, MC_REG_FAPR)); 190 } 191 192 /* At least 64 bytes of the command portal should be available. */ 193 if (rman_get_size(sc->res[0]) < DPAA2_MCP_MEM_WIDTH) { 194 device_printf(dev, "%s: MC portal memory region too small: " 195 "%jd\n", __func__, rman_get_size(sc->res[0])); 196 dpaa2_mc_detach(dev); 197 return (ENXIO); 198 } 199 200 /* Map MC portal memory resource. */ 201 resource_init_map_request(&req); 202 req.memattr = VM_MEMATTR_DEVICE; 203 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[0], 204 &req, &sc->map[0]); 205 if (error) { 206 device_printf(dev, "Failed to map MC portal memory\n"); 207 dpaa2_mc_detach(dev); 208 return (ENXIO); 209 } 210 211 /* Initialize a resource manager for the DPAA2 I/O objects. */ 212 sc->dpio_rman.rm_type = RMAN_ARRAY; 213 sc->dpio_rman.rm_descr = "DPAA2 DPIO objects"; 214 error = rman_init(&sc->dpio_rman); 215 if (error) { 216 device_printf(dev, "Failed to initialize a resource manager for " 217 "the DPAA2 I/O objects: error=%d\n", error); 218 dpaa2_mc_detach(dev); 219 return (ENXIO); 220 } 221 222 /* Initialize a resource manager for the DPAA2 buffer pools. */ 223 sc->dpbp_rman.rm_type = RMAN_ARRAY; 224 sc->dpbp_rman.rm_descr = "DPAA2 DPBP objects"; 225 error = rman_init(&sc->dpbp_rman); 226 if (error) { 227 device_printf(dev, "Failed to initialize a resource manager for " 228 "the DPAA2 buffer pools: error=%d\n", error); 229 dpaa2_mc_detach(dev); 230 return (ENXIO); 231 } 232 233 /* Initialize a resource manager for the DPAA2 concentrators. */ 234 sc->dpcon_rman.rm_type = RMAN_ARRAY; 235 sc->dpcon_rman.rm_descr = "DPAA2 DPCON objects"; 236 error = rman_init(&sc->dpcon_rman); 237 if (error) { 238 device_printf(dev, "Failed to initialize a resource manager for " 239 "the DPAA2 concentrators: error=%d\n", error); 240 dpaa2_mc_detach(dev); 241 return (ENXIO); 242 } 243 244 /* Initialize a resource manager for the DPAA2 MC portals. */ 245 sc->dpmcp_rman.rm_type = RMAN_ARRAY; 246 sc->dpmcp_rman.rm_descr = "DPAA2 DPMCP objects"; 247 error = rman_init(&sc->dpmcp_rman); 248 if (error) { 249 device_printf(dev, "Failed to initialize a resource manager for " 250 "the DPAA2 MC portals: error=%d\n", error); 251 dpaa2_mc_detach(dev); 252 return (ENXIO); 253 } 254 255 /* Initialize a list of non-allocatable DPAA2 devices. */ 256 mtx_init(&sc->mdev_lock, "MC portal mdev lock", NULL, MTX_DEF); 257 STAILQ_INIT(&sc->mdev_list); 258 259 mtx_init(&sc->msi_lock, "MC MSI lock", NULL, MTX_DEF); 260 261 /* 262 * Add a root resource container as the only child of the bus. All of 263 * the direct descendant containers will be attached to the root one 264 * instead of the MC device. 265 */ 266 sc->rcdev = device_add_child(dev, "dpaa2_rc", 0); 267 if (sc->rcdev == NULL) { 268 dpaa2_mc_detach(dev); 269 return (ENXIO); 270 } 271 bus_generic_probe(dev); 272 bus_generic_attach(dev); 273 274 return (0); 275 } 276 277 int 278 dpaa2_mc_detach(device_t dev) 279 { 280 struct dpaa2_mc_softc *sc; 281 struct dpaa2_devinfo *dinfo = NULL; 282 int error; 283 284 bus_generic_detach(dev); 285 286 sc = device_get_softc(dev); 287 if (sc->rcdev) 288 device_delete_child(dev, sc->rcdev); 289 bus_release_resources(dev, dpaa2_mc_spec, sc->res); 290 291 dinfo = device_get_ivars(dev); 292 if (dinfo) 293 free(dinfo, M_DPAA2_MC); 294 295 error = bus_generic_detach(dev); 296 if (error != 0) 297 return (error); 298 299 return (device_delete_children(dev)); 300 } 301 302 /* 303 * For bus interface. 304 */ 305 306 struct resource * 307 dpaa2_mc_alloc_resource(device_t mcdev, device_t child, int type, int *rid, 308 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 309 { 310 struct resource *res; 311 struct rman *rm; 312 int error; 313 314 rm = dpaa2_mc_rman(mcdev, type, flags); 315 if (rm == NULL) 316 return (bus_generic_alloc_resource(mcdev, child, type, rid, 317 start, end, count, flags)); 318 319 /* 320 * Skip managing DPAA2-specific resource. It must be provided to MC by 321 * calling DPAA2_MC_MANAGE_DEV() beforehand. 322 */ 323 if (type <= DPAA2_DEV_MC) { 324 error = rman_manage_region(rm, start, end); 325 if (error) { 326 device_printf(mcdev, "rman_manage_region() failed: " 327 "start=%#jx, end=%#jx, error=%d\n", start, end, 328 error); 329 goto fail; 330 } 331 } 332 333 res = bus_generic_rman_alloc_resource(mcdev, child, type, rid, start, 334 end, count, flags); 335 if (res == NULL) 336 goto fail; 337 return (res); 338 fail: 339 device_printf(mcdev, "%s() failed: type=%d, rid=%d, start=%#jx, " 340 "end=%#jx, count=%#jx, flags=%x\n", __func__, type, *rid, start, end, 341 count, flags); 342 return (NULL); 343 } 344 345 int 346 dpaa2_mc_adjust_resource(device_t mcdev, device_t child, int type, 347 struct resource *r, rman_res_t start, rman_res_t end) 348 { 349 struct rman *rm; 350 351 rm = dpaa2_mc_rman(mcdev, type, rman_get_flags(r)); 352 if (rm) 353 return (bus_generic_rman_adjust_resource(mcdev, child, type, r, 354 start, end)); 355 return (bus_generic_adjust_resource(mcdev, child, type, r, start, end)); 356 } 357 358 int 359 dpaa2_mc_release_resource(device_t mcdev, device_t child, int type, int rid, 360 struct resource *r) 361 { 362 struct rman *rm; 363 364 rm = dpaa2_mc_rman(mcdev, type, rman_get_flags(r)); 365 if (rm) 366 return (bus_generic_rman_release_resource(mcdev, child, type, 367 rid, r)); 368 return (bus_generic_release_resource(mcdev, child, type, rid, r)); 369 } 370 371 int 372 dpaa2_mc_activate_resource(device_t mcdev, device_t child, int type, int rid, 373 struct resource *r) 374 { 375 struct rman *rm; 376 377 rm = dpaa2_mc_rman(mcdev, type, rman_get_flags(r)); 378 if (rm) 379 return (bus_generic_rman_activate_resource(mcdev, child, type, 380 rid, r)); 381 return (bus_generic_activate_resource(mcdev, child, type, rid, r)); 382 } 383 384 int 385 dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, int type, int rid, 386 struct resource *r) 387 { 388 struct rman *rm; 389 390 rm = dpaa2_mc_rman(mcdev, type, rman_get_flags(r)); 391 if (rm) 392 return (bus_generic_rman_deactivate_resource(mcdev, child, type, 393 rid, r)); 394 return (bus_generic_deactivate_resource(mcdev, child, type, rid, r)); 395 } 396 397 /* 398 * For pseudo-pcib interface. 399 */ 400 401 int 402 dpaa2_mc_alloc_msi(device_t mcdev, device_t child, int count, int maxcount, 403 int *irqs) 404 { 405 #if defined(INTRNG) 406 return (dpaa2_mc_alloc_msi_impl(mcdev, child, count, maxcount, irqs)); 407 #else 408 return (ENXIO); 409 #endif 410 } 411 412 int 413 dpaa2_mc_release_msi(device_t mcdev, device_t child, int count, int *irqs) 414 { 415 #if defined(INTRNG) 416 return (dpaa2_mc_release_msi_impl(mcdev, child, count, irqs)); 417 #else 418 return (ENXIO); 419 #endif 420 } 421 422 int 423 dpaa2_mc_map_msi(device_t mcdev, device_t child, int irq, uint64_t *addr, 424 uint32_t *data) 425 { 426 #if defined(INTRNG) 427 return (dpaa2_mc_map_msi_impl(mcdev, child, irq, addr, data)); 428 #else 429 return (ENXIO); 430 #endif 431 } 432 433 int 434 dpaa2_mc_get_id(device_t mcdev, device_t child, enum pci_id_type type, 435 uintptr_t *id) 436 { 437 struct dpaa2_devinfo *dinfo; 438 439 dinfo = device_get_ivars(child); 440 441 if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 442 return (ENXIO); 443 444 if (type == PCI_ID_MSI) 445 return (dpaa2_mc_map_id(mcdev, child, id)); 446 447 *id = dinfo->icid; 448 return (0); 449 } 450 451 /* 452 * For DPAA2 Management Complex bus driver interface. 453 */ 454 455 int 456 dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags) 457 { 458 struct dpaa2_mc_softc *sc; 459 struct dpaa2_devinfo *dinfo; 460 struct dpaa2_mc_devinfo *di; 461 struct rman *rm; 462 int error; 463 464 sc = device_get_softc(mcdev); 465 dinfo = device_get_ivars(dpaa2_dev); 466 467 if (!sc || !dinfo || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 468 return (EINVAL); 469 470 di = malloc(sizeof(*di), M_DPAA2_MC, M_WAITOK | M_ZERO); 471 if (!di) 472 return (ENOMEM); 473 di->dpaa2_dev = dpaa2_dev; 474 di->flags = flags; 475 di->owners = 0; 476 477 /* Append a new managed DPAA2 device to the queue. */ 478 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 479 mtx_lock(&sc->mdev_lock); 480 STAILQ_INSERT_TAIL(&sc->mdev_list, di, link); 481 mtx_unlock(&sc->mdev_lock); 482 483 if (flags & DPAA2_MC_DEV_ALLOCATABLE) { 484 /* Select rman based on a type of the DPAA2 device. */ 485 rm = dpaa2_mc_rman(mcdev, dinfo->dtype, 0); 486 if (!rm) 487 return (ENOENT); 488 /* Manage DPAA2 device as an allocatable resource. */ 489 error = rman_manage_region(rm, (rman_res_t) dpaa2_dev, 490 (rman_res_t) dpaa2_dev); 491 if (error) 492 return (error); 493 } 494 495 return (0); 496 } 497 498 int 499 dpaa2_mc_get_free_dev(device_t mcdev, device_t *dpaa2_dev, 500 enum dpaa2_dev_type devtype) 501 { 502 struct rman *rm; 503 rman_res_t start, end; 504 int error; 505 506 if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 507 return (EINVAL); 508 509 /* Select resource manager based on a type of the DPAA2 device. */ 510 rm = dpaa2_mc_rman(mcdev, devtype, 0); 511 if (!rm) 512 return (ENOENT); 513 /* Find first free DPAA2 device of the given type. */ 514 error = rman_first_free_region(rm, &start, &end); 515 if (error) 516 return (error); 517 518 KASSERT(start == end, ("start != end, but should be the same pointer " 519 "to the DPAA2 device: start=%jx, end=%jx", start, end)); 520 521 *dpaa2_dev = (device_t) start; 522 523 return (0); 524 } 525 526 int 527 dpaa2_mc_get_dev(device_t mcdev, device_t *dpaa2_dev, 528 enum dpaa2_dev_type devtype, uint32_t obj_id) 529 { 530 struct dpaa2_mc_softc *sc; 531 struct dpaa2_devinfo *dinfo; 532 struct dpaa2_mc_devinfo *di; 533 int error = ENOENT; 534 535 sc = device_get_softc(mcdev); 536 537 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 538 return (EINVAL); 539 540 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 541 mtx_lock(&sc->mdev_lock); 542 543 STAILQ_FOREACH(di, &sc->mdev_list, link) { 544 dinfo = device_get_ivars(di->dpaa2_dev); 545 if (dinfo->dtype == devtype && dinfo->id == obj_id) { 546 *dpaa2_dev = di->dpaa2_dev; 547 error = 0; 548 break; 549 } 550 } 551 552 mtx_unlock(&sc->mdev_lock); 553 554 return (error); 555 } 556 557 int 558 dpaa2_mc_get_shared_dev(device_t mcdev, device_t *dpaa2_dev, 559 enum dpaa2_dev_type devtype) 560 { 561 struct dpaa2_mc_softc *sc; 562 struct dpaa2_devinfo *dinfo; 563 struct dpaa2_mc_devinfo *di; 564 device_t dev = NULL; 565 uint32_t owners = UINT32_MAX; 566 int error = ENOENT; 567 568 sc = device_get_softc(mcdev); 569 570 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 571 return (EINVAL); 572 573 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 574 mtx_lock(&sc->mdev_lock); 575 576 STAILQ_FOREACH(di, &sc->mdev_list, link) { 577 dinfo = device_get_ivars(di->dpaa2_dev); 578 579 if ((dinfo->dtype == devtype) && 580 (di->flags & DPAA2_MC_DEV_SHAREABLE) && 581 (di->owners < owners)) { 582 dev = di->dpaa2_dev; 583 owners = di->owners; 584 } 585 } 586 if (dev) { 587 *dpaa2_dev = dev; 588 error = 0; 589 } 590 591 mtx_unlock(&sc->mdev_lock); 592 593 return (error); 594 } 595 596 int 597 dpaa2_mc_reserve_dev(device_t mcdev, device_t dpaa2_dev, 598 enum dpaa2_dev_type devtype) 599 { 600 struct dpaa2_mc_softc *sc; 601 struct dpaa2_mc_devinfo *di; 602 int error = ENOENT; 603 604 sc = device_get_softc(mcdev); 605 606 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 607 return (EINVAL); 608 609 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 610 mtx_lock(&sc->mdev_lock); 611 612 STAILQ_FOREACH(di, &sc->mdev_list, link) { 613 if (di->dpaa2_dev == dpaa2_dev && 614 (di->flags & DPAA2_MC_DEV_SHAREABLE)) { 615 di->owners++; 616 error = 0; 617 break; 618 } 619 } 620 621 mtx_unlock(&sc->mdev_lock); 622 623 return (error); 624 } 625 626 int 627 dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev, 628 enum dpaa2_dev_type devtype) 629 { 630 struct dpaa2_mc_softc *sc; 631 struct dpaa2_mc_devinfo *di; 632 int error = ENOENT; 633 634 sc = device_get_softc(mcdev); 635 636 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 637 return (EINVAL); 638 639 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 640 mtx_lock(&sc->mdev_lock); 641 642 STAILQ_FOREACH(di, &sc->mdev_list, link) { 643 if (di->dpaa2_dev == dpaa2_dev && 644 (di->flags & DPAA2_MC_DEV_SHAREABLE)) { 645 di->owners -= di->owners > 0 ? 1 : 0; 646 error = 0; 647 break; 648 } 649 } 650 651 mtx_unlock(&sc->mdev_lock); 652 653 return (error); 654 } 655 656 /** 657 * @internal 658 */ 659 static u_int 660 dpaa2_mc_get_xref(device_t mcdev, device_t child) 661 { 662 struct dpaa2_mc_softc *sc = device_get_softc(mcdev); 663 struct dpaa2_devinfo *dinfo = device_get_ivars(child); 664 #ifdef DEV_ACPI 665 u_int xref, devid; 666 #endif 667 #ifdef FDT 668 phandle_t msi_parent; 669 #endif 670 int error; 671 672 if (sc && dinfo) { 673 #ifdef DEV_ACPI 674 if (sc->acpi_based) { 675 /* 676 * NOTE: The first named component from the IORT table 677 * with the given name (as a substring) will be used. 678 */ 679 error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, 680 dinfo->icid, &xref, &devid); 681 if (error) 682 return (0); 683 return (xref); 684 } 685 #endif 686 #ifdef FDT 687 if (!sc->acpi_based) { 688 /* FDT-based driver. */ 689 error = ofw_bus_msimap(sc->ofw_node, dinfo->icid, 690 &msi_parent, NULL); 691 if (error) 692 return (0); 693 return ((u_int) msi_parent); 694 } 695 #endif 696 } 697 return (0); 698 } 699 700 /** 701 * @internal 702 */ 703 static u_int 704 dpaa2_mc_map_id(device_t mcdev, device_t child, uintptr_t *id) 705 { 706 struct dpaa2_devinfo *dinfo; 707 #ifdef DEV_ACPI 708 u_int xref, devid; 709 int error; 710 #endif 711 712 dinfo = device_get_ivars(child); 713 if (dinfo) { 714 /* 715 * The first named components from IORT table with the given 716 * name (as a substring) will be used. 717 */ 718 #ifdef DEV_ACPI 719 error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid, 720 &xref, &devid); 721 if (error == 0) 722 *id = devid; 723 else 724 #endif 725 *id = dinfo->icid; /* RID not in IORT, likely FW bug */ 726 727 return (0); 728 } 729 return (ENXIO); 730 } 731 732 /** 733 * @internal 734 * @brief Obtain a resource manager based on the given type of the resource. 735 */ 736 struct rman * 737 dpaa2_mc_rman(device_t mcdev, int type, u_int flags) 738 { 739 struct dpaa2_mc_softc *sc; 740 741 sc = device_get_softc(mcdev); 742 743 switch (type) { 744 case DPAA2_DEV_IO: 745 return (&sc->dpio_rman); 746 case DPAA2_DEV_BP: 747 return (&sc->dpbp_rman); 748 case DPAA2_DEV_CON: 749 return (&sc->dpcon_rman); 750 case DPAA2_DEV_MCP: 751 return (&sc->dpmcp_rman); 752 default: 753 break; 754 } 755 756 return (NULL); 757 } 758 759 #if defined(INTRNG) && !defined(IOMMU) 760 761 /** 762 * @internal 763 * @brief Allocates requested number of MSIs. 764 * 765 * NOTE: This function is a part of fallback solution when IOMMU isn't available. 766 * Total number of IRQs is limited to 32. 767 */ 768 static int 769 dpaa2_mc_alloc_msi_impl(device_t mcdev, device_t child, int count, int maxcount, 770 int *irqs) 771 { 772 struct dpaa2_mc_softc *sc = device_get_softc(mcdev); 773 int msi_irqs[DPAA2_MC_MSI_COUNT]; 774 int error; 775 776 /* Pre-allocate a bunch of MSIs for MC to be used by its children. */ 777 if (!sc->msi_allocated) { 778 error = intr_alloc_msi(mcdev, child, dpaa2_mc_get_xref(mcdev, 779 child), DPAA2_MC_MSI_COUNT, DPAA2_MC_MSI_COUNT, msi_irqs); 780 if (error) { 781 device_printf(mcdev, "failed to pre-allocate %d MSIs: " 782 "error=%d\n", DPAA2_MC_MSI_COUNT, error); 783 return (error); 784 } 785 786 mtx_assert(&sc->msi_lock, MA_NOTOWNED); 787 mtx_lock(&sc->msi_lock); 788 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { 789 sc->msi[i].child = NULL; 790 sc->msi[i].irq = msi_irqs[i]; 791 } 792 sc->msi_owner = child; 793 sc->msi_allocated = true; 794 mtx_unlock(&sc->msi_lock); 795 } 796 797 error = ENOENT; 798 799 /* Find the first free MSIs from the pre-allocated pool. */ 800 mtx_assert(&sc->msi_lock, MA_NOTOWNED); 801 mtx_lock(&sc->msi_lock); 802 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { 803 if (sc->msi[i].child != NULL) 804 continue; 805 error = 0; 806 for (int j = 0; j < count; j++) { 807 if (i + j >= DPAA2_MC_MSI_COUNT) { 808 device_printf(mcdev, "requested %d MSIs exceed " 809 "limit of %d available\n", count, 810 DPAA2_MC_MSI_COUNT); 811 error = E2BIG; 812 break; 813 } 814 sc->msi[i + j].child = child; 815 irqs[j] = sc->msi[i + j].irq; 816 } 817 break; 818 } 819 mtx_unlock(&sc->msi_lock); 820 821 return (error); 822 } 823 824 /** 825 * @internal 826 * @brief Marks IRQs as free in the pre-allocated pool of MSIs. 827 * 828 * NOTE: This function is a part of fallback solution when IOMMU isn't available. 829 * Total number of IRQs is limited to 32. 830 * NOTE: MSIs are kept allocated in the kernel as a part of the pool. 831 */ 832 static int 833 dpaa2_mc_release_msi_impl(device_t mcdev, device_t child, int count, int *irqs) 834 { 835 struct dpaa2_mc_softc *sc = device_get_softc(mcdev); 836 837 mtx_assert(&sc->msi_lock, MA_NOTOWNED); 838 mtx_lock(&sc->msi_lock); 839 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { 840 if (sc->msi[i].child != child) 841 continue; 842 for (int j = 0; j < count; j++) { 843 if (sc->msi[i].irq == irqs[j]) { 844 sc->msi[i].child = NULL; 845 break; 846 } 847 } 848 } 849 mtx_unlock(&sc->msi_lock); 850 851 return (0); 852 } 853 854 /** 855 * @internal 856 * @brief Provides address to write to and data according to the given MSI from 857 * the pre-allocated pool. 858 * 859 * NOTE: This function is a part of fallback solution when IOMMU isn't available. 860 * Total number of IRQs is limited to 32. 861 */ 862 static int 863 dpaa2_mc_map_msi_impl(device_t mcdev, device_t child, int irq, uint64_t *addr, 864 uint32_t *data) 865 { 866 struct dpaa2_mc_softc *sc = device_get_softc(mcdev); 867 int error = EINVAL; 868 869 mtx_assert(&sc->msi_lock, MA_NOTOWNED); 870 mtx_lock(&sc->msi_lock); 871 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { 872 if (sc->msi[i].child == child && sc->msi[i].irq == irq) { 873 error = 0; 874 break; 875 } 876 } 877 mtx_unlock(&sc->msi_lock); 878 if (error) 879 return (error); 880 881 return (intr_map_msi(mcdev, sc->msi_owner, dpaa2_mc_get_xref(mcdev, 882 sc->msi_owner), irq, addr, data)); 883 } 884 885 #endif /* defined(INTRNG) && !defined(IOMMU) */ 886 887 static device_method_t dpaa2_mc_methods[] = { 888 DEVMETHOD_END 889 }; 890 891 DEFINE_CLASS_0(dpaa2_mc, dpaa2_mc_driver, dpaa2_mc_methods, 892 sizeof(struct dpaa2_mc_softc)); 893