1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright © 2021-2022 Dmitry Salychev 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 /* 32 * The DPAA2 Management Complex (MC) bus driver. 33 * 34 * MC is a hardware resource manager which can be found in several NXP 35 * SoCs (LX2160A, for example) and provides an access to the specialized 36 * hardware objects used in network-oriented packet processing applications. 37 */ 38 39 #include "opt_acpi.h" 40 #include "opt_platform.h" 41 42 #include <sys/param.h> 43 #include <sys/kernel.h> 44 #include <sys/bus.h> 45 #include <sys/rman.h> 46 #include <sys/lock.h> 47 #include <sys/module.h> 48 #include <sys/malloc.h> 49 #include <sys/mutex.h> 50 #include <sys/queue.h> 51 52 #include <vm/vm.h> 53 54 #include <machine/bus.h> 55 #include <machine/resource.h> 56 57 #ifdef DEV_ACPI 58 #include <contrib/dev/acpica/include/acpi.h> 59 #include <dev/acpica/acpivar.h> 60 #endif 61 62 #ifdef FDT 63 #include <dev/ofw/openfirm.h> 64 #include <dev/ofw/ofw_bus.h> 65 #include <dev/ofw/ofw_bus_subr.h> 66 #include <dev/ofw/ofw_pci.h> 67 #endif 68 69 #include "pcib_if.h" 70 #include "pci_if.h" 71 72 #include "dpaa2_mc.h" 73 74 /* Macros to read/write MC registers */ 75 #define mcreg_read_4(_sc, _r) bus_read_4(&(_sc)->map[1], (_r)) 76 #define mcreg_write_4(_sc, _r, _v) bus_write_4(&(_sc)->map[1], (_r), (_v)) 77 78 #define COMPARE_TYPE(t, v) (strncmp((v), (t), strlen((v))) == 0) 79 80 #define IORT_DEVICE_NAME "MCE" 81 82 /* MC Registers */ 83 #define MC_REG_GCR1 0x0000u 84 #define MC_REG_GCR2 0x0004u /* TODO: Does it exist? */ 85 #define MC_REG_GSR 0x0008u 86 #define MC_REG_FAPR 0x0028u 87 88 /* General Control Register 1 (GCR1) */ 89 #define GCR1_P1_STOP 0x80000000u 90 #define GCR1_P2_STOP 0x40000000u 91 92 /* General Status Register (GSR) */ 93 #define GSR_HW_ERR(v) (((v) & 0x80000000u) >> 31) 94 #define GSR_CAT_ERR(v) (((v) & 0x40000000u) >> 30) 95 #define GSR_DPL_OFFSET(v) (((v) & 0x3FFFFF00u) >> 8) 96 #define GSR_MCS(v) (((v) & 0xFFu) >> 0) 97 98 /* Timeouts to wait for the MC status. */ 99 #define MC_STAT_TIMEOUT 1000u /* us */ 100 #define MC_STAT_ATTEMPTS 100u 101 102 /** 103 * @brief Structure to describe a DPAA2 device as a managed resource. 104 */ 105 struct dpaa2_mc_devinfo { 106 STAILQ_ENTRY(dpaa2_mc_devinfo) link; 107 device_t dpaa2_dev; 108 uint32_t flags; 109 uint32_t owners; 110 }; 111 112 MALLOC_DEFINE(M_DPAA2_MC, "dpaa2_mc", "DPAA2 Management Complex"); 113 114 static struct resource_spec dpaa2_mc_spec[] = { 115 { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_UNMAPPED }, 116 { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL }, 117 RESOURCE_SPEC_END 118 }; 119 120 static u_int dpaa2_mc_get_xref(device_t, device_t); 121 static u_int dpaa2_mc_map_id(device_t, device_t, uintptr_t *); 122 static struct rman *dpaa2_mc_rman(device_t, int); 123 124 static int dpaa2_mc_alloc_msi_impl(device_t, device_t, int, int, int *); 125 static int dpaa2_mc_release_msi_impl(device_t, device_t, int, int *); 126 static int dpaa2_mc_map_msi_impl(device_t, device_t, int, uint64_t *, 127 uint32_t *); 128 129 /* 130 * For device interface. 131 */ 132 133 int 134 dpaa2_mc_attach(device_t dev) 135 { 136 struct dpaa2_mc_softc *sc; 137 struct resource_map_request req; 138 uint32_t val; 139 int error; 140 141 sc = device_get_softc(dev); 142 sc->dev = dev; 143 sc->msi_allocated = false; 144 sc->msi_owner = NULL; 145 146 error = bus_alloc_resources(sc->dev, dpaa2_mc_spec, sc->res); 147 if (error) { 148 device_printf(dev, "%s: failed to allocate resources\n", 149 __func__); 150 return (ENXIO); 151 } 152 153 if (sc->res[1]) { 154 resource_init_map_request(&req); 155 req.memattr = VM_MEMATTR_DEVICE; 156 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[1], 157 &req, &sc->map[1]); 158 if (error) { 159 device_printf(dev, "%s: failed to map control " 160 "registers\n", __func__); 161 dpaa2_mc_detach(dev); 162 return (ENXIO); 163 } 164 165 if (bootverbose) 166 device_printf(dev, 167 "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", 168 mcreg_read_4(sc, MC_REG_GCR1), 169 mcreg_read_4(sc, MC_REG_GCR2), 170 mcreg_read_4(sc, MC_REG_GSR), 171 mcreg_read_4(sc, MC_REG_FAPR)); 172 173 /* Reset P1_STOP and P2_STOP bits to resume MC processor. */ 174 val = mcreg_read_4(sc, MC_REG_GCR1) & 175 ~(GCR1_P1_STOP | GCR1_P2_STOP); 176 mcreg_write_4(sc, MC_REG_GCR1, val); 177 178 /* Poll MC status. */ 179 if (bootverbose) 180 device_printf(dev, "polling MC status...\n"); 181 for (int i = 0; i < MC_STAT_ATTEMPTS; i++) { 182 val = mcreg_read_4(sc, MC_REG_GSR); 183 if (GSR_MCS(val) != 0u) 184 break; 185 DELAY(MC_STAT_TIMEOUT); 186 } 187 188 if (bootverbose) 189 device_printf(dev, 190 "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", 191 mcreg_read_4(sc, MC_REG_GCR1), 192 mcreg_read_4(sc, MC_REG_GCR2), 193 mcreg_read_4(sc, MC_REG_GSR), 194 mcreg_read_4(sc, MC_REG_FAPR)); 195 } 196 197 /* At least 64 bytes of the command portal should be available. */ 198 if (rman_get_size(sc->res[0]) < DPAA2_MCP_MEM_WIDTH) { 199 device_printf(dev, "%s: MC portal memory region too small: " 200 "%jd\n", __func__, rman_get_size(sc->res[0])); 201 dpaa2_mc_detach(dev); 202 return (ENXIO); 203 } 204 205 /* Map MC portal memory resource. */ 206 resource_init_map_request(&req); 207 req.memattr = VM_MEMATTR_DEVICE; 208 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[0], 209 &req, &sc->map[0]); 210 if (error) { 211 device_printf(dev, "Failed to map MC portal memory\n"); 212 dpaa2_mc_detach(dev); 213 return (ENXIO); 214 } 215 216 /* Initialize a resource manager for the DPAA2 I/O objects. */ 217 sc->dpio_rman.rm_type = RMAN_ARRAY; 218 sc->dpio_rman.rm_descr = "DPAA2 DPIO objects"; 219 error = rman_init(&sc->dpio_rman); 220 if (error) { 221 device_printf(dev, "Failed to initialize a resource manager for " 222 "the DPAA2 I/O objects: error=%d\n", error); 223 dpaa2_mc_detach(dev); 224 return (ENXIO); 225 } 226 227 /* Initialize a resource manager for the DPAA2 buffer pools. */ 228 sc->dpbp_rman.rm_type = RMAN_ARRAY; 229 sc->dpbp_rman.rm_descr = "DPAA2 DPBP objects"; 230 error = rman_init(&sc->dpbp_rman); 231 if (error) { 232 device_printf(dev, "Failed to initialize a resource manager for " 233 "the DPAA2 buffer pools: error=%d\n", error); 234 dpaa2_mc_detach(dev); 235 return (ENXIO); 236 } 237 238 /* Initialize a resource manager for the DPAA2 concentrators. */ 239 sc->dpcon_rman.rm_type = RMAN_ARRAY; 240 sc->dpcon_rman.rm_descr = "DPAA2 DPCON objects"; 241 error = rman_init(&sc->dpcon_rman); 242 if (error) { 243 device_printf(dev, "Failed to initialize a resource manager for " 244 "the DPAA2 concentrators: error=%d\n", error); 245 dpaa2_mc_detach(dev); 246 return (ENXIO); 247 } 248 249 /* Initialize a resource manager for the DPAA2 MC portals. */ 250 sc->dpmcp_rman.rm_type = RMAN_ARRAY; 251 sc->dpmcp_rman.rm_descr = "DPAA2 DPMCP objects"; 252 error = rman_init(&sc->dpmcp_rman); 253 if (error) { 254 device_printf(dev, "Failed to initialize a resource manager for " 255 "the DPAA2 MC portals: error=%d\n", error); 256 dpaa2_mc_detach(dev); 257 return (ENXIO); 258 } 259 260 /* Initialize a list of non-allocatable DPAA2 devices. */ 261 mtx_init(&sc->mdev_lock, "MC portal mdev lock", NULL, MTX_DEF); 262 STAILQ_INIT(&sc->mdev_list); 263 264 mtx_init(&sc->msi_lock, "MC MSI lock", NULL, MTX_DEF); 265 266 /* 267 * Add a root resource container as the only child of the bus. All of 268 * the direct descendant containers will be attached to the root one 269 * instead of the MC device. 270 */ 271 sc->rcdev = device_add_child(dev, "dpaa2_rc", 0); 272 if (sc->rcdev == NULL) { 273 dpaa2_mc_detach(dev); 274 return (ENXIO); 275 } 276 bus_generic_probe(dev); 277 bus_generic_attach(dev); 278 279 return (0); 280 } 281 282 int 283 dpaa2_mc_detach(device_t dev) 284 { 285 struct dpaa2_mc_softc *sc; 286 struct dpaa2_devinfo *dinfo = NULL; 287 int error; 288 289 bus_generic_detach(dev); 290 291 sc = device_get_softc(dev); 292 if (sc->rcdev) 293 device_delete_child(dev, sc->rcdev); 294 bus_release_resources(dev, dpaa2_mc_spec, sc->res); 295 296 dinfo = device_get_ivars(dev); 297 if (dinfo) 298 free(dinfo, M_DPAA2_MC); 299 300 error = bus_generic_detach(dev); 301 if (error != 0) 302 return (error); 303 304 return (device_delete_children(dev)); 305 } 306 307 /* 308 * For bus interface. 309 */ 310 311 struct resource * 312 dpaa2_mc_alloc_resource(device_t mcdev, device_t child, int type, int *rid, 313 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 314 { 315 struct resource *res; 316 struct rman *rm; 317 int error; 318 319 rm = dpaa2_mc_rman(mcdev, type); 320 if (!rm) 321 return (BUS_ALLOC_RESOURCE(device_get_parent(mcdev), child, 322 type, rid, start, end, count, flags)); 323 324 /* 325 * Skip managing DPAA2-specific resource. It must be provided to MC by 326 * calling DPAA2_MC_MANAGE_DEV() beforehand. 327 */ 328 if (type <= DPAA2_DEV_MC) { 329 error = rman_manage_region(rm, start, end); 330 if (error) { 331 device_printf(mcdev, "rman_manage_region() failed: " 332 "start=%#jx, end=%#jx, error=%d\n", start, end, 333 error); 334 goto fail; 335 } 336 } 337 338 res = rman_reserve_resource(rm, start, end, count, flags, child); 339 if (!res) { 340 device_printf(mcdev, "rman_reserve_resource() failed: " 341 "start=%#jx, end=%#jx, count=%#jx\n", start, end, count); 342 goto fail; 343 } 344 345 rman_set_rid(res, *rid); 346 347 if (flags & RF_ACTIVE) { 348 if (bus_activate_resource(child, type, *rid, res)) { 349 device_printf(mcdev, "bus_activate_resource() failed: " 350 "rid=%d, res=%#jx\n", *rid, (uintmax_t) res); 351 rman_release_resource(res); 352 goto fail; 353 } 354 } 355 356 return (res); 357 fail: 358 device_printf(mcdev, "%s() failed: type=%d, rid=%d, start=%#jx, " 359 "end=%#jx, count=%#jx, flags=%x\n", __func__, type, *rid, start, end, 360 count, flags); 361 return (NULL); 362 } 363 364 int 365 dpaa2_mc_adjust_resource(device_t mcdev, device_t child, int type, 366 struct resource *r, rman_res_t start, rman_res_t end) 367 { 368 struct rman *rm; 369 370 rm = dpaa2_mc_rman(mcdev, type); 371 if (rm) 372 return (rman_adjust_resource(r, start, end)); 373 return (bus_generic_adjust_resource(mcdev, child, type, r, start, end)); 374 } 375 376 int 377 dpaa2_mc_release_resource(device_t mcdev, device_t child, int type, int rid, 378 struct resource *r) 379 { 380 struct rman *rm; 381 382 rm = dpaa2_mc_rman(mcdev, type); 383 if (rm) { 384 KASSERT(rman_is_region_manager(r, rm), ("rman mismatch")); 385 rman_release_resource(r); 386 } 387 388 return (bus_generic_release_resource(mcdev, child, type, rid, r)); 389 } 390 391 int 392 dpaa2_mc_activate_resource(device_t mcdev, device_t child, int type, int rid, 393 struct resource *r) 394 { 395 int rc; 396 397 if ((rc = rman_activate_resource(r)) != 0) 398 return (rc); 399 400 return (BUS_ACTIVATE_RESOURCE(device_get_parent(mcdev), child, type, 401 rid, r)); 402 } 403 404 int 405 dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, int type, int rid, 406 struct resource *r) 407 { 408 int rc; 409 410 if ((rc = rman_deactivate_resource(r)) != 0) 411 return (rc); 412 413 return (BUS_DEACTIVATE_RESOURCE(device_get_parent(mcdev), child, type, 414 rid, r)); 415 } 416 417 /* 418 * For pseudo-pcib interface. 419 */ 420 421 int 422 dpaa2_mc_alloc_msi(device_t mcdev, device_t child, int count, int maxcount, 423 int *irqs) 424 { 425 #if defined(INTRNG) 426 return (dpaa2_mc_alloc_msi_impl(mcdev, child, count, maxcount, irqs)); 427 #else 428 return (ENXIO); 429 #endif 430 } 431 432 int 433 dpaa2_mc_release_msi(device_t mcdev, device_t child, int count, int *irqs) 434 { 435 #if defined(INTRNG) 436 return (dpaa2_mc_release_msi_impl(mcdev, child, count, irqs)); 437 #else 438 return (ENXIO); 439 #endif 440 } 441 442 int 443 dpaa2_mc_map_msi(device_t mcdev, device_t child, int irq, uint64_t *addr, 444 uint32_t *data) 445 { 446 #if defined(INTRNG) 447 return (dpaa2_mc_map_msi_impl(mcdev, child, irq, addr, data)); 448 #else 449 return (ENXIO); 450 #endif 451 } 452 453 int 454 dpaa2_mc_get_id(device_t mcdev, device_t child, enum pci_id_type type, 455 uintptr_t *id) 456 { 457 struct dpaa2_devinfo *dinfo; 458 459 dinfo = device_get_ivars(child); 460 461 if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 462 return (ENXIO); 463 464 if (type == PCI_ID_MSI) 465 return (dpaa2_mc_map_id(mcdev, child, id)); 466 467 *id = dinfo->icid; 468 return (0); 469 } 470 471 /* 472 * For DPAA2 Management Complex bus driver interface. 473 */ 474 475 int 476 dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags) 477 { 478 struct dpaa2_mc_softc *sc; 479 struct dpaa2_devinfo *dinfo; 480 struct dpaa2_mc_devinfo *di; 481 struct rman *rm; 482 int error; 483 484 sc = device_get_softc(mcdev); 485 dinfo = device_get_ivars(dpaa2_dev); 486 487 if (!sc || !dinfo || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 488 return (EINVAL); 489 490 di = malloc(sizeof(*di), M_DPAA2_MC, M_WAITOK | M_ZERO); 491 if (!di) 492 return (ENOMEM); 493 di->dpaa2_dev = dpaa2_dev; 494 di->flags = flags; 495 di->owners = 0; 496 497 /* Append a new managed DPAA2 device to the queue. */ 498 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 499 mtx_lock(&sc->mdev_lock); 500 STAILQ_INSERT_TAIL(&sc->mdev_list, di, link); 501 mtx_unlock(&sc->mdev_lock); 502 503 if (flags & DPAA2_MC_DEV_ALLOCATABLE) { 504 /* Select rman based on a type of the DPAA2 device. */ 505 rm = dpaa2_mc_rman(mcdev, dinfo->dtype); 506 if (!rm) 507 return (ENOENT); 508 /* Manage DPAA2 device as an allocatable resource. */ 509 error = rman_manage_region(rm, (rman_res_t) dpaa2_dev, 510 (rman_res_t) dpaa2_dev); 511 if (error) 512 return (error); 513 } 514 515 return (0); 516 } 517 518 int 519 dpaa2_mc_get_free_dev(device_t mcdev, device_t *dpaa2_dev, 520 enum dpaa2_dev_type devtype) 521 { 522 struct rman *rm; 523 rman_res_t start, end; 524 int error; 525 526 if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 527 return (EINVAL); 528 529 /* Select resource manager based on a type of the DPAA2 device. */ 530 rm = dpaa2_mc_rman(mcdev, devtype); 531 if (!rm) 532 return (ENOENT); 533 /* Find first free DPAA2 device of the given type. */ 534 error = rman_first_free_region(rm, &start, &end); 535 if (error) 536 return (error); 537 538 KASSERT(start == end, ("start != end, but should be the same pointer " 539 "to the DPAA2 device: start=%jx, end=%jx", start, end)); 540 541 *dpaa2_dev = (device_t) start; 542 543 return (0); 544 } 545 546 int 547 dpaa2_mc_get_dev(device_t mcdev, device_t *dpaa2_dev, 548 enum dpaa2_dev_type devtype, uint32_t obj_id) 549 { 550 struct dpaa2_mc_softc *sc; 551 struct dpaa2_devinfo *dinfo; 552 struct dpaa2_mc_devinfo *di; 553 int error = ENOENT; 554 555 sc = device_get_softc(mcdev); 556 557 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 558 return (EINVAL); 559 560 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 561 mtx_lock(&sc->mdev_lock); 562 563 STAILQ_FOREACH(di, &sc->mdev_list, link) { 564 dinfo = device_get_ivars(di->dpaa2_dev); 565 if (dinfo->dtype == devtype && dinfo->id == obj_id) { 566 *dpaa2_dev = di->dpaa2_dev; 567 error = 0; 568 break; 569 } 570 } 571 572 mtx_unlock(&sc->mdev_lock); 573 574 return (error); 575 } 576 577 int 578 dpaa2_mc_get_shared_dev(device_t mcdev, device_t *dpaa2_dev, 579 enum dpaa2_dev_type devtype) 580 { 581 struct dpaa2_mc_softc *sc; 582 struct dpaa2_devinfo *dinfo; 583 struct dpaa2_mc_devinfo *di; 584 device_t dev = NULL; 585 uint32_t owners = UINT32_MAX; 586 int error = ENOENT; 587 588 sc = device_get_softc(mcdev); 589 590 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 591 return (EINVAL); 592 593 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 594 mtx_lock(&sc->mdev_lock); 595 596 STAILQ_FOREACH(di, &sc->mdev_list, link) { 597 dinfo = device_get_ivars(di->dpaa2_dev); 598 599 if ((dinfo->dtype == devtype) && 600 (di->flags & DPAA2_MC_DEV_SHAREABLE) && 601 (di->owners < owners)) { 602 dev = di->dpaa2_dev; 603 owners = di->owners; 604 } 605 } 606 if (dev) { 607 *dpaa2_dev = dev; 608 error = 0; 609 } 610 611 mtx_unlock(&sc->mdev_lock); 612 613 return (error); 614 } 615 616 int 617 dpaa2_mc_reserve_dev(device_t mcdev, device_t dpaa2_dev, 618 enum dpaa2_dev_type devtype) 619 { 620 struct dpaa2_mc_softc *sc; 621 struct dpaa2_mc_devinfo *di; 622 int error = ENOENT; 623 624 sc = device_get_softc(mcdev); 625 626 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 627 return (EINVAL); 628 629 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 630 mtx_lock(&sc->mdev_lock); 631 632 STAILQ_FOREACH(di, &sc->mdev_list, link) { 633 if (di->dpaa2_dev == dpaa2_dev && 634 (di->flags & DPAA2_MC_DEV_SHAREABLE)) { 635 di->owners++; 636 error = 0; 637 break; 638 } 639 } 640 641 mtx_unlock(&sc->mdev_lock); 642 643 return (error); 644 } 645 646 int 647 dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev, 648 enum dpaa2_dev_type devtype) 649 { 650 struct dpaa2_mc_softc *sc; 651 struct dpaa2_mc_devinfo *di; 652 int error = ENOENT; 653 654 sc = device_get_softc(mcdev); 655 656 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 657 return (EINVAL); 658 659 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 660 mtx_lock(&sc->mdev_lock); 661 662 STAILQ_FOREACH(di, &sc->mdev_list, link) { 663 if (di->dpaa2_dev == dpaa2_dev && 664 (di->flags & DPAA2_MC_DEV_SHAREABLE)) { 665 di->owners -= di->owners > 0 ? 1 : 0; 666 error = 0; 667 break; 668 } 669 } 670 671 mtx_unlock(&sc->mdev_lock); 672 673 return (error); 674 } 675 676 /** 677 * @brief Convert DPAA2 device type to string. 678 */ 679 const char * 680 dpaa2_ttos(enum dpaa2_dev_type type) 681 { 682 switch (type) { 683 case DPAA2_DEV_MC: 684 return ("mc"); /* NOTE: to print as information only. */ 685 case DPAA2_DEV_RC: 686 return ("dprc"); 687 case DPAA2_DEV_IO: 688 return ("dpio"); 689 case DPAA2_DEV_NI: 690 return ("dpni"); 691 case DPAA2_DEV_MCP: 692 return ("dpmcp"); 693 case DPAA2_DEV_BP: 694 return ("dpbp"); 695 case DPAA2_DEV_CON: 696 return ("dpcon"); 697 case DPAA2_DEV_MAC: 698 return ("dpmac"); 699 case DPAA2_DEV_MUX: 700 return ("dpdmux"); 701 case DPAA2_DEV_SW: 702 return ("dpsw"); 703 default: 704 break; 705 } 706 return ("notype"); 707 } 708 709 /** 710 * @brief Convert string to DPAA2 device type. 711 */ 712 enum dpaa2_dev_type 713 dpaa2_stot(const char *str) 714 { 715 if (COMPARE_TYPE(str, "dprc")) { 716 return (DPAA2_DEV_RC); 717 } else if (COMPARE_TYPE(str, "dpio")) { 718 return (DPAA2_DEV_IO); 719 } else if (COMPARE_TYPE(str, "dpni")) { 720 return (DPAA2_DEV_NI); 721 } else if (COMPARE_TYPE(str, "dpmcp")) { 722 return (DPAA2_DEV_MCP); 723 } else if (COMPARE_TYPE(str, "dpbp")) { 724 return (DPAA2_DEV_BP); 725 } else if (COMPARE_TYPE(str, "dpcon")) { 726 return (DPAA2_DEV_CON); 727 } else if (COMPARE_TYPE(str, "dpmac")) { 728 return (DPAA2_DEV_MAC); 729 } else if (COMPARE_TYPE(str, "dpdmux")) { 730 return (DPAA2_DEV_MUX); 731 } else if (COMPARE_TYPE(str, "dpsw")) { 732 return (DPAA2_DEV_SW); 733 } 734 735 return (DPAA2_DEV_NOTYPE); 736 } 737 738 /** 739 * @internal 740 */ 741 static u_int 742 dpaa2_mc_get_xref(device_t mcdev, device_t child) 743 { 744 struct dpaa2_mc_softc *sc = device_get_softc(mcdev); 745 struct dpaa2_devinfo *dinfo = device_get_ivars(child); 746 #ifdef DEV_ACPI 747 u_int xref, devid; 748 #endif 749 #ifdef FDT 750 phandle_t msi_parent; 751 #endif 752 int error; 753 754 if (sc && dinfo) { 755 #ifdef DEV_ACPI 756 if (sc->acpi_based) { 757 /* 758 * NOTE: The first named component from the IORT table 759 * with the given name (as a substring) will be used. 760 */ 761 error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, 762 dinfo->icid, &xref, &devid); 763 if (error) 764 return (0); 765 return (xref); 766 } 767 #endif 768 #ifdef FDT 769 if (!sc->acpi_based) { 770 /* FDT-based driver. */ 771 error = ofw_bus_msimap(sc->ofw_node, dinfo->icid, 772 &msi_parent, NULL); 773 if (error) 774 return (0); 775 return ((u_int) msi_parent); 776 } 777 #endif 778 } 779 return (0); 780 } 781 782 /** 783 * @internal 784 */ 785 static u_int 786 dpaa2_mc_map_id(device_t mcdev, device_t child, uintptr_t *id) 787 { 788 struct dpaa2_devinfo *dinfo; 789 #ifdef DEV_ACPI 790 u_int xref, devid; 791 int error; 792 #endif 793 794 dinfo = device_get_ivars(child); 795 if (dinfo) { 796 /* 797 * The first named components from IORT table with the given 798 * name (as a substring) will be used. 799 */ 800 #ifdef DEV_ACPI 801 error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid, 802 &xref, &devid); 803 if (error == 0) 804 *id = devid; 805 else 806 #endif 807 *id = dinfo->icid; /* RID not in IORT, likely FW bug */ 808 809 return (0); 810 } 811 return (ENXIO); 812 } 813 814 /** 815 * @internal 816 * @brief Obtain a resource manager based on the given type of the resource. 817 */ 818 static struct rman * 819 dpaa2_mc_rman(device_t mcdev, int type) 820 { 821 struct dpaa2_mc_softc *sc; 822 823 sc = device_get_softc(mcdev); 824 825 switch (type) { 826 case DPAA2_DEV_IO: 827 return (&sc->dpio_rman); 828 case DPAA2_DEV_BP: 829 return (&sc->dpbp_rman); 830 case DPAA2_DEV_CON: 831 return (&sc->dpcon_rman); 832 case DPAA2_DEV_MCP: 833 return (&sc->dpmcp_rman); 834 default: 835 break; 836 } 837 838 return (NULL); 839 } 840 841 #if defined(INTRNG) && !defined(IOMMU) 842 843 /** 844 * @internal 845 * @brief Allocates requested number of MSIs. 846 * 847 * NOTE: This function is a part of fallback solution when IOMMU isn't available. 848 * Total number of IRQs is limited to 32. 849 */ 850 static int 851 dpaa2_mc_alloc_msi_impl(device_t mcdev, device_t child, int count, int maxcount, 852 int *irqs) 853 { 854 struct dpaa2_mc_softc *sc = device_get_softc(mcdev); 855 int msi_irqs[DPAA2_MC_MSI_COUNT]; 856 int error; 857 858 /* Pre-allocate a bunch of MSIs for MC to be used by its children. */ 859 if (!sc->msi_allocated) { 860 error = intr_alloc_msi(mcdev, child, dpaa2_mc_get_xref(mcdev, 861 child), DPAA2_MC_MSI_COUNT, DPAA2_MC_MSI_COUNT, msi_irqs); 862 if (error) { 863 device_printf(mcdev, "failed to pre-allocate %d MSIs: " 864 "error=%d\n", DPAA2_MC_MSI_COUNT, error); 865 return (error); 866 } 867 868 mtx_assert(&sc->msi_lock, MA_NOTOWNED); 869 mtx_lock(&sc->msi_lock); 870 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { 871 sc->msi[i].child = NULL; 872 sc->msi[i].irq = msi_irqs[i]; 873 } 874 sc->msi_owner = child; 875 sc->msi_allocated = true; 876 mtx_unlock(&sc->msi_lock); 877 } 878 879 error = ENOENT; 880 881 /* Find the first free MSIs from the pre-allocated pool. */ 882 mtx_assert(&sc->msi_lock, MA_NOTOWNED); 883 mtx_lock(&sc->msi_lock); 884 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { 885 if (sc->msi[i].child != NULL) 886 continue; 887 error = 0; 888 for (int j = 0; j < count; j++) { 889 if (i + j >= DPAA2_MC_MSI_COUNT) { 890 device_printf(mcdev, "requested %d MSIs exceed " 891 "limit of %d available\n", count, 892 DPAA2_MC_MSI_COUNT); 893 error = E2BIG; 894 break; 895 } 896 sc->msi[i + j].child = child; 897 irqs[j] = sc->msi[i + j].irq; 898 } 899 break; 900 } 901 mtx_unlock(&sc->msi_lock); 902 903 return (error); 904 } 905 906 /** 907 * @internal 908 * @brief Marks IRQs as free in the pre-allocated pool of MSIs. 909 * 910 * NOTE: This function is a part of fallback solution when IOMMU isn't available. 911 * Total number of IRQs is limited to 32. 912 * NOTE: MSIs are kept allocated in the kernel as a part of the pool. 913 */ 914 static int 915 dpaa2_mc_release_msi_impl(device_t mcdev, device_t child, int count, int *irqs) 916 { 917 struct dpaa2_mc_softc *sc = device_get_softc(mcdev); 918 919 mtx_assert(&sc->msi_lock, MA_NOTOWNED); 920 mtx_lock(&sc->msi_lock); 921 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { 922 if (sc->msi[i].child != child) 923 continue; 924 for (int j = 0; j < count; j++) { 925 if (sc->msi[i].irq == irqs[j]) { 926 sc->msi[i].child = NULL; 927 break; 928 } 929 } 930 } 931 mtx_unlock(&sc->msi_lock); 932 933 return (0); 934 } 935 936 /** 937 * @internal 938 * @brief Provides address to write to and data according to the given MSI from 939 * the pre-allocated pool. 940 * 941 * NOTE: This function is a part of fallback solution when IOMMU isn't available. 942 * Total number of IRQs is limited to 32. 943 */ 944 static int 945 dpaa2_mc_map_msi_impl(device_t mcdev, device_t child, int irq, uint64_t *addr, 946 uint32_t *data) 947 { 948 struct dpaa2_mc_softc *sc = device_get_softc(mcdev); 949 int error = EINVAL; 950 951 mtx_assert(&sc->msi_lock, MA_NOTOWNED); 952 mtx_lock(&sc->msi_lock); 953 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { 954 if (sc->msi[i].child == child && sc->msi[i].irq == irq) { 955 error = 0; 956 break; 957 } 958 } 959 mtx_unlock(&sc->msi_lock); 960 if (error) 961 return (error); 962 963 return (intr_map_msi(mcdev, sc->msi_owner, dpaa2_mc_get_xref(mcdev, 964 sc->msi_owner), irq, addr, data)); 965 } 966 967 #endif /* defined(INTRNG) && !defined(IOMMU) */ 968 969 static device_method_t dpaa2_mc_methods[] = { 970 DEVMETHOD_END 971 }; 972 973 DEFINE_CLASS_0(dpaa2_mc, dpaa2_mc_driver, dpaa2_mc_methods, 974 sizeof(struct dpaa2_mc_softc)); 975