1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright © 2021-2022 Dmitry Salychev 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 /* 30 * The DPAA2 Management Complex (MC) bus driver. 31 * 32 * MC is a hardware resource manager which can be found in several NXP 33 * SoCs (LX2160A, for example) and provides an access to the specialized 34 * hardware objects used in network-oriented packet processing applications. 35 */ 36 37 #include "opt_acpi.h" 38 #include "opt_platform.h" 39 40 #include <sys/param.h> 41 #include <sys/kernel.h> 42 #include <sys/bus.h> 43 #include <sys/rman.h> 44 #include <sys/lock.h> 45 #include <sys/module.h> 46 #include <sys/malloc.h> 47 #include <sys/mutex.h> 48 #include <sys/queue.h> 49 50 #include <vm/vm.h> 51 52 #include <machine/bus.h> 53 #include <machine/resource.h> 54 55 #ifdef DEV_ACPI 56 #include <contrib/dev/acpica/include/acpi.h> 57 #include <dev/acpica/acpivar.h> 58 #endif 59 60 #ifdef FDT 61 #include <dev/ofw/openfirm.h> 62 #include <dev/ofw/ofw_bus.h> 63 #include <dev/ofw/ofw_bus_subr.h> 64 #include <dev/ofw/ofw_pci.h> 65 #endif 66 67 #include "pcib_if.h" 68 #include "pci_if.h" 69 70 #include "dpaa2_mc.h" 71 72 /* Macros to read/write MC registers */ 73 #define mcreg_read_4(_sc, _r) bus_read_4(&(_sc)->map[1], (_r)) 74 #define mcreg_write_4(_sc, _r, _v) bus_write_4(&(_sc)->map[1], (_r), (_v)) 75 76 #define COMPARE_TYPE(t, v) (strncmp((v), (t), strlen((v))) == 0) 77 78 #define IORT_DEVICE_NAME "MCE" 79 80 /* MC Registers */ 81 #define MC_REG_GCR1 0x0000u 82 #define MC_REG_GCR2 0x0004u /* TODO: Does it exist? */ 83 #define MC_REG_GSR 0x0008u 84 #define MC_REG_FAPR 0x0028u 85 86 /* General Control Register 1 (GCR1) */ 87 #define GCR1_P1_STOP 0x80000000u 88 #define GCR1_P2_STOP 0x40000000u 89 90 /* General Status Register (GSR) */ 91 #define GSR_HW_ERR(v) (((v) & 0x80000000u) >> 31) 92 #define GSR_CAT_ERR(v) (((v) & 0x40000000u) >> 30) 93 #define GSR_DPL_OFFSET(v) (((v) & 0x3FFFFF00u) >> 8) 94 #define GSR_MCS(v) (((v) & 0xFFu) >> 0) 95 96 /* Timeouts to wait for the MC status. */ 97 #define MC_STAT_TIMEOUT 1000u /* us */ 98 #define MC_STAT_ATTEMPTS 100u 99 100 /** 101 * @brief Structure to describe a DPAA2 device as a managed resource. 102 */ 103 struct dpaa2_mc_devinfo { 104 STAILQ_ENTRY(dpaa2_mc_devinfo) link; 105 device_t dpaa2_dev; 106 uint32_t flags; 107 uint32_t owners; 108 }; 109 110 MALLOC_DEFINE(M_DPAA2_MC, "dpaa2_mc", "DPAA2 Management Complex"); 111 112 static struct resource_spec dpaa2_mc_spec[] = { 113 { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_UNMAPPED }, 114 { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL }, 115 RESOURCE_SPEC_END 116 }; 117 118 static u_int dpaa2_mc_get_xref(device_t, device_t); 119 static u_int dpaa2_mc_map_id(device_t, device_t, uintptr_t *); 120 static struct rman *dpaa2_mc_rman(device_t, int); 121 122 static int dpaa2_mc_alloc_msi_impl(device_t, device_t, int, int, int *); 123 static int dpaa2_mc_release_msi_impl(device_t, device_t, int, int *); 124 static int dpaa2_mc_map_msi_impl(device_t, device_t, int, uint64_t *, 125 uint32_t *); 126 127 /* 128 * For device interface. 129 */ 130 131 int 132 dpaa2_mc_attach(device_t dev) 133 { 134 struct dpaa2_mc_softc *sc; 135 struct resource_map_request req; 136 uint32_t val; 137 int error; 138 139 sc = device_get_softc(dev); 140 sc->dev = dev; 141 sc->msi_allocated = false; 142 sc->msi_owner = NULL; 143 144 error = bus_alloc_resources(sc->dev, dpaa2_mc_spec, sc->res); 145 if (error) { 146 device_printf(dev, "%s: failed to allocate resources\n", 147 __func__); 148 return (ENXIO); 149 } 150 151 if (sc->res[1]) { 152 resource_init_map_request(&req); 153 req.memattr = VM_MEMATTR_DEVICE; 154 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[1], 155 &req, &sc->map[1]); 156 if (error) { 157 device_printf(dev, "%s: failed to map control " 158 "registers\n", __func__); 159 dpaa2_mc_detach(dev); 160 return (ENXIO); 161 } 162 163 if (bootverbose) 164 device_printf(dev, 165 "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", 166 mcreg_read_4(sc, MC_REG_GCR1), 167 mcreg_read_4(sc, MC_REG_GCR2), 168 mcreg_read_4(sc, MC_REG_GSR), 169 mcreg_read_4(sc, MC_REG_FAPR)); 170 171 /* Reset P1_STOP and P2_STOP bits to resume MC processor. */ 172 val = mcreg_read_4(sc, MC_REG_GCR1) & 173 ~(GCR1_P1_STOP | GCR1_P2_STOP); 174 mcreg_write_4(sc, MC_REG_GCR1, val); 175 176 /* Poll MC status. */ 177 if (bootverbose) 178 device_printf(dev, "polling MC status...\n"); 179 for (int i = 0; i < MC_STAT_ATTEMPTS; i++) { 180 val = mcreg_read_4(sc, MC_REG_GSR); 181 if (GSR_MCS(val) != 0u) 182 break; 183 DELAY(MC_STAT_TIMEOUT); 184 } 185 186 if (bootverbose) 187 device_printf(dev, 188 "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", 189 mcreg_read_4(sc, MC_REG_GCR1), 190 mcreg_read_4(sc, MC_REG_GCR2), 191 mcreg_read_4(sc, MC_REG_GSR), 192 mcreg_read_4(sc, MC_REG_FAPR)); 193 } 194 195 /* At least 64 bytes of the command portal should be available. */ 196 if (rman_get_size(sc->res[0]) < DPAA2_MCP_MEM_WIDTH) { 197 device_printf(dev, "%s: MC portal memory region too small: " 198 "%jd\n", __func__, rman_get_size(sc->res[0])); 199 dpaa2_mc_detach(dev); 200 return (ENXIO); 201 } 202 203 /* Map MC portal memory resource. */ 204 resource_init_map_request(&req); 205 req.memattr = VM_MEMATTR_DEVICE; 206 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[0], 207 &req, &sc->map[0]); 208 if (error) { 209 device_printf(dev, "Failed to map MC portal memory\n"); 210 dpaa2_mc_detach(dev); 211 return (ENXIO); 212 } 213 214 /* Initialize a resource manager for the DPAA2 I/O objects. */ 215 sc->dpio_rman.rm_type = RMAN_ARRAY; 216 sc->dpio_rman.rm_descr = "DPAA2 DPIO objects"; 217 error = rman_init(&sc->dpio_rman); 218 if (error) { 219 device_printf(dev, "Failed to initialize a resource manager for " 220 "the DPAA2 I/O objects: error=%d\n", error); 221 dpaa2_mc_detach(dev); 222 return (ENXIO); 223 } 224 225 /* Initialize a resource manager for the DPAA2 buffer pools. */ 226 sc->dpbp_rman.rm_type = RMAN_ARRAY; 227 sc->dpbp_rman.rm_descr = "DPAA2 DPBP objects"; 228 error = rman_init(&sc->dpbp_rman); 229 if (error) { 230 device_printf(dev, "Failed to initialize a resource manager for " 231 "the DPAA2 buffer pools: error=%d\n", error); 232 dpaa2_mc_detach(dev); 233 return (ENXIO); 234 } 235 236 /* Initialize a resource manager for the DPAA2 concentrators. */ 237 sc->dpcon_rman.rm_type = RMAN_ARRAY; 238 sc->dpcon_rman.rm_descr = "DPAA2 DPCON objects"; 239 error = rman_init(&sc->dpcon_rman); 240 if (error) { 241 device_printf(dev, "Failed to initialize a resource manager for " 242 "the DPAA2 concentrators: error=%d\n", error); 243 dpaa2_mc_detach(dev); 244 return (ENXIO); 245 } 246 247 /* Initialize a resource manager for the DPAA2 MC portals. */ 248 sc->dpmcp_rman.rm_type = RMAN_ARRAY; 249 sc->dpmcp_rman.rm_descr = "DPAA2 DPMCP objects"; 250 error = rman_init(&sc->dpmcp_rman); 251 if (error) { 252 device_printf(dev, "Failed to initialize a resource manager for " 253 "the DPAA2 MC portals: error=%d\n", error); 254 dpaa2_mc_detach(dev); 255 return (ENXIO); 256 } 257 258 /* Initialize a list of non-allocatable DPAA2 devices. */ 259 mtx_init(&sc->mdev_lock, "MC portal mdev lock", NULL, MTX_DEF); 260 STAILQ_INIT(&sc->mdev_list); 261 262 mtx_init(&sc->msi_lock, "MC MSI lock", NULL, MTX_DEF); 263 264 /* 265 * Add a root resource container as the only child of the bus. All of 266 * the direct descendant containers will be attached to the root one 267 * instead of the MC device. 268 */ 269 sc->rcdev = device_add_child(dev, "dpaa2_rc", 0); 270 if (sc->rcdev == NULL) { 271 dpaa2_mc_detach(dev); 272 return (ENXIO); 273 } 274 bus_generic_probe(dev); 275 bus_generic_attach(dev); 276 277 return (0); 278 } 279 280 int 281 dpaa2_mc_detach(device_t dev) 282 { 283 struct dpaa2_mc_softc *sc; 284 struct dpaa2_devinfo *dinfo = NULL; 285 int error; 286 287 bus_generic_detach(dev); 288 289 sc = device_get_softc(dev); 290 if (sc->rcdev) 291 device_delete_child(dev, sc->rcdev); 292 bus_release_resources(dev, dpaa2_mc_spec, sc->res); 293 294 dinfo = device_get_ivars(dev); 295 if (dinfo) 296 free(dinfo, M_DPAA2_MC); 297 298 error = bus_generic_detach(dev); 299 if (error != 0) 300 return (error); 301 302 return (device_delete_children(dev)); 303 } 304 305 /* 306 * For bus interface. 307 */ 308 309 struct resource * 310 dpaa2_mc_alloc_resource(device_t mcdev, device_t child, int type, int *rid, 311 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 312 { 313 struct resource *res; 314 struct rman *rm; 315 int error; 316 317 rm = dpaa2_mc_rman(mcdev, type); 318 if (!rm) 319 return (BUS_ALLOC_RESOURCE(device_get_parent(mcdev), child, 320 type, rid, start, end, count, flags)); 321 322 /* 323 * Skip managing DPAA2-specific resource. It must be provided to MC by 324 * calling DPAA2_MC_MANAGE_DEV() beforehand. 325 */ 326 if (type <= DPAA2_DEV_MC) { 327 error = rman_manage_region(rm, start, end); 328 if (error) { 329 device_printf(mcdev, "rman_manage_region() failed: " 330 "start=%#jx, end=%#jx, error=%d\n", start, end, 331 error); 332 goto fail; 333 } 334 } 335 336 res = rman_reserve_resource(rm, start, end, count, flags, child); 337 if (!res) { 338 device_printf(mcdev, "rman_reserve_resource() failed: " 339 "start=%#jx, end=%#jx, count=%#jx\n", start, end, count); 340 goto fail; 341 } 342 343 rman_set_rid(res, *rid); 344 345 if (flags & RF_ACTIVE) { 346 if (bus_activate_resource(child, type, *rid, res)) { 347 device_printf(mcdev, "bus_activate_resource() failed: " 348 "rid=%d, res=%#jx\n", *rid, (uintmax_t) res); 349 rman_release_resource(res); 350 goto fail; 351 } 352 } 353 354 return (res); 355 fail: 356 device_printf(mcdev, "%s() failed: type=%d, rid=%d, start=%#jx, " 357 "end=%#jx, count=%#jx, flags=%x\n", __func__, type, *rid, start, end, 358 count, flags); 359 return (NULL); 360 } 361 362 int 363 dpaa2_mc_adjust_resource(device_t mcdev, device_t child, int type, 364 struct resource *r, rman_res_t start, rman_res_t end) 365 { 366 struct rman *rm; 367 368 rm = dpaa2_mc_rman(mcdev, type); 369 if (rm) 370 return (rman_adjust_resource(r, start, end)); 371 return (bus_generic_adjust_resource(mcdev, child, type, r, start, end)); 372 } 373 374 int 375 dpaa2_mc_release_resource(device_t mcdev, device_t child, int type, int rid, 376 struct resource *r) 377 { 378 struct rman *rm; 379 380 rm = dpaa2_mc_rman(mcdev, type); 381 if (rm) { 382 KASSERT(rman_is_region_manager(r, rm), ("rman mismatch")); 383 rman_release_resource(r); 384 } 385 386 return (bus_generic_release_resource(mcdev, child, type, rid, r)); 387 } 388 389 int 390 dpaa2_mc_activate_resource(device_t mcdev, device_t child, int type, int rid, 391 struct resource *r) 392 { 393 int rc; 394 395 if ((rc = rman_activate_resource(r)) != 0) 396 return (rc); 397 398 return (BUS_ACTIVATE_RESOURCE(device_get_parent(mcdev), child, type, 399 rid, r)); 400 } 401 402 int 403 dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, int type, int rid, 404 struct resource *r) 405 { 406 int rc; 407 408 if ((rc = rman_deactivate_resource(r)) != 0) 409 return (rc); 410 411 return (BUS_DEACTIVATE_RESOURCE(device_get_parent(mcdev), child, type, 412 rid, r)); 413 } 414 415 /* 416 * For pseudo-pcib interface. 417 */ 418 419 int 420 dpaa2_mc_alloc_msi(device_t mcdev, device_t child, int count, int maxcount, 421 int *irqs) 422 { 423 #if defined(INTRNG) 424 return (dpaa2_mc_alloc_msi_impl(mcdev, child, count, maxcount, irqs)); 425 #else 426 return (ENXIO); 427 #endif 428 } 429 430 int 431 dpaa2_mc_release_msi(device_t mcdev, device_t child, int count, int *irqs) 432 { 433 #if defined(INTRNG) 434 return (dpaa2_mc_release_msi_impl(mcdev, child, count, irqs)); 435 #else 436 return (ENXIO); 437 #endif 438 } 439 440 int 441 dpaa2_mc_map_msi(device_t mcdev, device_t child, int irq, uint64_t *addr, 442 uint32_t *data) 443 { 444 #if defined(INTRNG) 445 return (dpaa2_mc_map_msi_impl(mcdev, child, irq, addr, data)); 446 #else 447 return (ENXIO); 448 #endif 449 } 450 451 int 452 dpaa2_mc_get_id(device_t mcdev, device_t child, enum pci_id_type type, 453 uintptr_t *id) 454 { 455 struct dpaa2_devinfo *dinfo; 456 457 dinfo = device_get_ivars(child); 458 459 if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 460 return (ENXIO); 461 462 if (type == PCI_ID_MSI) 463 return (dpaa2_mc_map_id(mcdev, child, id)); 464 465 *id = dinfo->icid; 466 return (0); 467 } 468 469 /* 470 * For DPAA2 Management Complex bus driver interface. 471 */ 472 473 int 474 dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags) 475 { 476 struct dpaa2_mc_softc *sc; 477 struct dpaa2_devinfo *dinfo; 478 struct dpaa2_mc_devinfo *di; 479 struct rman *rm; 480 int error; 481 482 sc = device_get_softc(mcdev); 483 dinfo = device_get_ivars(dpaa2_dev); 484 485 if (!sc || !dinfo || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 486 return (EINVAL); 487 488 di = malloc(sizeof(*di), M_DPAA2_MC, M_WAITOK | M_ZERO); 489 if (!di) 490 return (ENOMEM); 491 di->dpaa2_dev = dpaa2_dev; 492 di->flags = flags; 493 di->owners = 0; 494 495 /* Append a new managed DPAA2 device to the queue. */ 496 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 497 mtx_lock(&sc->mdev_lock); 498 STAILQ_INSERT_TAIL(&sc->mdev_list, di, link); 499 mtx_unlock(&sc->mdev_lock); 500 501 if (flags & DPAA2_MC_DEV_ALLOCATABLE) { 502 /* Select rman based on a type of the DPAA2 device. */ 503 rm = dpaa2_mc_rman(mcdev, dinfo->dtype); 504 if (!rm) 505 return (ENOENT); 506 /* Manage DPAA2 device as an allocatable resource. */ 507 error = rman_manage_region(rm, (rman_res_t) dpaa2_dev, 508 (rman_res_t) dpaa2_dev); 509 if (error) 510 return (error); 511 } 512 513 return (0); 514 } 515 516 int 517 dpaa2_mc_get_free_dev(device_t mcdev, device_t *dpaa2_dev, 518 enum dpaa2_dev_type devtype) 519 { 520 struct rman *rm; 521 rman_res_t start, end; 522 int error; 523 524 if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 525 return (EINVAL); 526 527 /* Select resource manager based on a type of the DPAA2 device. */ 528 rm = dpaa2_mc_rman(mcdev, devtype); 529 if (!rm) 530 return (ENOENT); 531 /* Find first free DPAA2 device of the given type. */ 532 error = rman_first_free_region(rm, &start, &end); 533 if (error) 534 return (error); 535 536 KASSERT(start == end, ("start != end, but should be the same pointer " 537 "to the DPAA2 device: start=%jx, end=%jx", start, end)); 538 539 *dpaa2_dev = (device_t) start; 540 541 return (0); 542 } 543 544 int 545 dpaa2_mc_get_dev(device_t mcdev, device_t *dpaa2_dev, 546 enum dpaa2_dev_type devtype, uint32_t obj_id) 547 { 548 struct dpaa2_mc_softc *sc; 549 struct dpaa2_devinfo *dinfo; 550 struct dpaa2_mc_devinfo *di; 551 int error = ENOENT; 552 553 sc = device_get_softc(mcdev); 554 555 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 556 return (EINVAL); 557 558 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 559 mtx_lock(&sc->mdev_lock); 560 561 STAILQ_FOREACH(di, &sc->mdev_list, link) { 562 dinfo = device_get_ivars(di->dpaa2_dev); 563 if (dinfo->dtype == devtype && dinfo->id == obj_id) { 564 *dpaa2_dev = di->dpaa2_dev; 565 error = 0; 566 break; 567 } 568 } 569 570 mtx_unlock(&sc->mdev_lock); 571 572 return (error); 573 } 574 575 int 576 dpaa2_mc_get_shared_dev(device_t mcdev, device_t *dpaa2_dev, 577 enum dpaa2_dev_type devtype) 578 { 579 struct dpaa2_mc_softc *sc; 580 struct dpaa2_devinfo *dinfo; 581 struct dpaa2_mc_devinfo *di; 582 device_t dev = NULL; 583 uint32_t owners = UINT32_MAX; 584 int error = ENOENT; 585 586 sc = device_get_softc(mcdev); 587 588 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 589 return (EINVAL); 590 591 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 592 mtx_lock(&sc->mdev_lock); 593 594 STAILQ_FOREACH(di, &sc->mdev_list, link) { 595 dinfo = device_get_ivars(di->dpaa2_dev); 596 597 if ((dinfo->dtype == devtype) && 598 (di->flags & DPAA2_MC_DEV_SHAREABLE) && 599 (di->owners < owners)) { 600 dev = di->dpaa2_dev; 601 owners = di->owners; 602 } 603 } 604 if (dev) { 605 *dpaa2_dev = dev; 606 error = 0; 607 } 608 609 mtx_unlock(&sc->mdev_lock); 610 611 return (error); 612 } 613 614 int 615 dpaa2_mc_reserve_dev(device_t mcdev, device_t dpaa2_dev, 616 enum dpaa2_dev_type devtype) 617 { 618 struct dpaa2_mc_softc *sc; 619 struct dpaa2_mc_devinfo *di; 620 int error = ENOENT; 621 622 sc = device_get_softc(mcdev); 623 624 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 625 return (EINVAL); 626 627 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 628 mtx_lock(&sc->mdev_lock); 629 630 STAILQ_FOREACH(di, &sc->mdev_list, link) { 631 if (di->dpaa2_dev == dpaa2_dev && 632 (di->flags & DPAA2_MC_DEV_SHAREABLE)) { 633 di->owners++; 634 error = 0; 635 break; 636 } 637 } 638 639 mtx_unlock(&sc->mdev_lock); 640 641 return (error); 642 } 643 644 int 645 dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev, 646 enum dpaa2_dev_type devtype) 647 { 648 struct dpaa2_mc_softc *sc; 649 struct dpaa2_mc_devinfo *di; 650 int error = ENOENT; 651 652 sc = device_get_softc(mcdev); 653 654 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 655 return (EINVAL); 656 657 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 658 mtx_lock(&sc->mdev_lock); 659 660 STAILQ_FOREACH(di, &sc->mdev_list, link) { 661 if (di->dpaa2_dev == dpaa2_dev && 662 (di->flags & DPAA2_MC_DEV_SHAREABLE)) { 663 di->owners -= di->owners > 0 ? 1 : 0; 664 error = 0; 665 break; 666 } 667 } 668 669 mtx_unlock(&sc->mdev_lock); 670 671 return (error); 672 } 673 674 /** 675 * @brief Convert DPAA2 device type to string. 676 */ 677 const char * 678 dpaa2_ttos(enum dpaa2_dev_type type) 679 { 680 switch (type) { 681 case DPAA2_DEV_MC: 682 return ("mc"); /* NOTE: to print as information only. */ 683 case DPAA2_DEV_RC: 684 return ("dprc"); 685 case DPAA2_DEV_IO: 686 return ("dpio"); 687 case DPAA2_DEV_NI: 688 return ("dpni"); 689 case DPAA2_DEV_MCP: 690 return ("dpmcp"); 691 case DPAA2_DEV_BP: 692 return ("dpbp"); 693 case DPAA2_DEV_CON: 694 return ("dpcon"); 695 case DPAA2_DEV_MAC: 696 return ("dpmac"); 697 case DPAA2_DEV_MUX: 698 return ("dpdmux"); 699 case DPAA2_DEV_SW: 700 return ("dpsw"); 701 default: 702 break; 703 } 704 return ("notype"); 705 } 706 707 /** 708 * @brief Convert string to DPAA2 device type. 709 */ 710 enum dpaa2_dev_type 711 dpaa2_stot(const char *str) 712 { 713 if (COMPARE_TYPE(str, "dprc")) { 714 return (DPAA2_DEV_RC); 715 } else if (COMPARE_TYPE(str, "dpio")) { 716 return (DPAA2_DEV_IO); 717 } else if (COMPARE_TYPE(str, "dpni")) { 718 return (DPAA2_DEV_NI); 719 } else if (COMPARE_TYPE(str, "dpmcp")) { 720 return (DPAA2_DEV_MCP); 721 } else if (COMPARE_TYPE(str, "dpbp")) { 722 return (DPAA2_DEV_BP); 723 } else if (COMPARE_TYPE(str, "dpcon")) { 724 return (DPAA2_DEV_CON); 725 } else if (COMPARE_TYPE(str, "dpmac")) { 726 return (DPAA2_DEV_MAC); 727 } else if (COMPARE_TYPE(str, "dpdmux")) { 728 return (DPAA2_DEV_MUX); 729 } else if (COMPARE_TYPE(str, "dpsw")) { 730 return (DPAA2_DEV_SW); 731 } 732 733 return (DPAA2_DEV_NOTYPE); 734 } 735 736 /** 737 * @internal 738 */ 739 static u_int 740 dpaa2_mc_get_xref(device_t mcdev, device_t child) 741 { 742 struct dpaa2_mc_softc *sc = device_get_softc(mcdev); 743 struct dpaa2_devinfo *dinfo = device_get_ivars(child); 744 #ifdef DEV_ACPI 745 u_int xref, devid; 746 #endif 747 #ifdef FDT 748 phandle_t msi_parent; 749 #endif 750 int error; 751 752 if (sc && dinfo) { 753 #ifdef DEV_ACPI 754 if (sc->acpi_based) { 755 /* 756 * NOTE: The first named component from the IORT table 757 * with the given name (as a substring) will be used. 758 */ 759 error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, 760 dinfo->icid, &xref, &devid); 761 if (error) 762 return (0); 763 return (xref); 764 } 765 #endif 766 #ifdef FDT 767 if (!sc->acpi_based) { 768 /* FDT-based driver. */ 769 error = ofw_bus_msimap(sc->ofw_node, dinfo->icid, 770 &msi_parent, NULL); 771 if (error) 772 return (0); 773 return ((u_int) msi_parent); 774 } 775 #endif 776 } 777 return (0); 778 } 779 780 /** 781 * @internal 782 */ 783 static u_int 784 dpaa2_mc_map_id(device_t mcdev, device_t child, uintptr_t *id) 785 { 786 struct dpaa2_devinfo *dinfo; 787 #ifdef DEV_ACPI 788 u_int xref, devid; 789 int error; 790 #endif 791 792 dinfo = device_get_ivars(child); 793 if (dinfo) { 794 /* 795 * The first named components from IORT table with the given 796 * name (as a substring) will be used. 797 */ 798 #ifdef DEV_ACPI 799 error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid, 800 &xref, &devid); 801 if (error == 0) 802 *id = devid; 803 else 804 #endif 805 *id = dinfo->icid; /* RID not in IORT, likely FW bug */ 806 807 return (0); 808 } 809 return (ENXIO); 810 } 811 812 /** 813 * @internal 814 * @brief Obtain a resource manager based on the given type of the resource. 815 */ 816 static struct rman * 817 dpaa2_mc_rman(device_t mcdev, int type) 818 { 819 struct dpaa2_mc_softc *sc; 820 821 sc = device_get_softc(mcdev); 822 823 switch (type) { 824 case DPAA2_DEV_IO: 825 return (&sc->dpio_rman); 826 case DPAA2_DEV_BP: 827 return (&sc->dpbp_rman); 828 case DPAA2_DEV_CON: 829 return (&sc->dpcon_rman); 830 case DPAA2_DEV_MCP: 831 return (&sc->dpmcp_rman); 832 default: 833 break; 834 } 835 836 return (NULL); 837 } 838 839 #if defined(INTRNG) && !defined(IOMMU) 840 841 /** 842 * @internal 843 * @brief Allocates requested number of MSIs. 844 * 845 * NOTE: This function is a part of fallback solution when IOMMU isn't available. 846 * Total number of IRQs is limited to 32. 847 */ 848 static int 849 dpaa2_mc_alloc_msi_impl(device_t mcdev, device_t child, int count, int maxcount, 850 int *irqs) 851 { 852 struct dpaa2_mc_softc *sc = device_get_softc(mcdev); 853 int msi_irqs[DPAA2_MC_MSI_COUNT]; 854 int error; 855 856 /* Pre-allocate a bunch of MSIs for MC to be used by its children. */ 857 if (!sc->msi_allocated) { 858 error = intr_alloc_msi(mcdev, child, dpaa2_mc_get_xref(mcdev, 859 child), DPAA2_MC_MSI_COUNT, DPAA2_MC_MSI_COUNT, msi_irqs); 860 if (error) { 861 device_printf(mcdev, "failed to pre-allocate %d MSIs: " 862 "error=%d\n", DPAA2_MC_MSI_COUNT, error); 863 return (error); 864 } 865 866 mtx_assert(&sc->msi_lock, MA_NOTOWNED); 867 mtx_lock(&sc->msi_lock); 868 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { 869 sc->msi[i].child = NULL; 870 sc->msi[i].irq = msi_irqs[i]; 871 } 872 sc->msi_owner = child; 873 sc->msi_allocated = true; 874 mtx_unlock(&sc->msi_lock); 875 } 876 877 error = ENOENT; 878 879 /* Find the first free MSIs from the pre-allocated pool. */ 880 mtx_assert(&sc->msi_lock, MA_NOTOWNED); 881 mtx_lock(&sc->msi_lock); 882 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { 883 if (sc->msi[i].child != NULL) 884 continue; 885 error = 0; 886 for (int j = 0; j < count; j++) { 887 if (i + j >= DPAA2_MC_MSI_COUNT) { 888 device_printf(mcdev, "requested %d MSIs exceed " 889 "limit of %d available\n", count, 890 DPAA2_MC_MSI_COUNT); 891 error = E2BIG; 892 break; 893 } 894 sc->msi[i + j].child = child; 895 irqs[j] = sc->msi[i + j].irq; 896 } 897 break; 898 } 899 mtx_unlock(&sc->msi_lock); 900 901 return (error); 902 } 903 904 /** 905 * @internal 906 * @brief Marks IRQs as free in the pre-allocated pool of MSIs. 907 * 908 * NOTE: This function is a part of fallback solution when IOMMU isn't available. 909 * Total number of IRQs is limited to 32. 910 * NOTE: MSIs are kept allocated in the kernel as a part of the pool. 911 */ 912 static int 913 dpaa2_mc_release_msi_impl(device_t mcdev, device_t child, int count, int *irqs) 914 { 915 struct dpaa2_mc_softc *sc = device_get_softc(mcdev); 916 917 mtx_assert(&sc->msi_lock, MA_NOTOWNED); 918 mtx_lock(&sc->msi_lock); 919 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { 920 if (sc->msi[i].child != child) 921 continue; 922 for (int j = 0; j < count; j++) { 923 if (sc->msi[i].irq == irqs[j]) { 924 sc->msi[i].child = NULL; 925 break; 926 } 927 } 928 } 929 mtx_unlock(&sc->msi_lock); 930 931 return (0); 932 } 933 934 /** 935 * @internal 936 * @brief Provides address to write to and data according to the given MSI from 937 * the pre-allocated pool. 938 * 939 * NOTE: This function is a part of fallback solution when IOMMU isn't available. 940 * Total number of IRQs is limited to 32. 941 */ 942 static int 943 dpaa2_mc_map_msi_impl(device_t mcdev, device_t child, int irq, uint64_t *addr, 944 uint32_t *data) 945 { 946 struct dpaa2_mc_softc *sc = device_get_softc(mcdev); 947 int error = EINVAL; 948 949 mtx_assert(&sc->msi_lock, MA_NOTOWNED); 950 mtx_lock(&sc->msi_lock); 951 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { 952 if (sc->msi[i].child == child && sc->msi[i].irq == irq) { 953 error = 0; 954 break; 955 } 956 } 957 mtx_unlock(&sc->msi_lock); 958 if (error) 959 return (error); 960 961 return (intr_map_msi(mcdev, sc->msi_owner, dpaa2_mc_get_xref(mcdev, 962 sc->msi_owner), irq, addr, data)); 963 } 964 965 #endif /* defined(INTRNG) && !defined(IOMMU) */ 966 967 static device_method_t dpaa2_mc_methods[] = { 968 DEVMETHOD_END 969 }; 970 971 DEFINE_CLASS_0(dpaa2_mc, dpaa2_mc_driver, dpaa2_mc_methods, 972 sizeof(struct dpaa2_mc_softc)); 973