1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright © 2021-2022 Dmitry Salychev 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 /* 30 * The DPAA2 Management Complex (MC) bus driver. 31 * 32 * MC is a hardware resource manager which can be found in several NXP 33 * SoCs (LX2160A, for example) and provides an access to the specialized 34 * hardware objects used in network-oriented packet processing applications. 35 */ 36 37 #include "opt_acpi.h" 38 #include "opt_platform.h" 39 40 #include <sys/param.h> 41 #include <sys/kernel.h> 42 #include <sys/bus.h> 43 #include <sys/rman.h> 44 #include <sys/lock.h> 45 #include <sys/module.h> 46 #include <sys/malloc.h> 47 #include <sys/mutex.h> 48 #include <sys/queue.h> 49 50 #include <vm/vm.h> 51 52 #include <machine/bus.h> 53 #include <machine/resource.h> 54 55 #ifdef DEV_ACPI 56 #include <contrib/dev/acpica/include/acpi.h> 57 #include <dev/acpica/acpivar.h> 58 #endif 59 60 #ifdef FDT 61 #include <dev/ofw/openfirm.h> 62 #include <dev/ofw/ofw_bus.h> 63 #include <dev/ofw/ofw_bus_subr.h> 64 #include <dev/ofw/ofw_pci.h> 65 #endif 66 67 #include "pcib_if.h" 68 #include "pci_if.h" 69 70 #include "dpaa2_mc.h" 71 72 /* Macros to read/write MC registers */ 73 #define mcreg_read_4(_sc, _r) bus_read_4(&(_sc)->map[1], (_r)) 74 #define mcreg_write_4(_sc, _r, _v) bus_write_4(&(_sc)->map[1], (_r), (_v)) 75 76 #define IORT_DEVICE_NAME "MCE" 77 78 /* MC Registers */ 79 #define MC_REG_GCR1 0x0000u 80 #define MC_REG_GCR2 0x0004u /* TODO: Does it exist? */ 81 #define MC_REG_GSR 0x0008u 82 #define MC_REG_FAPR 0x0028u 83 84 /* General Control Register 1 (GCR1) */ 85 #define GCR1_P1_STOP 0x80000000u 86 #define GCR1_P2_STOP 0x40000000u 87 88 /* General Status Register (GSR) */ 89 #define GSR_HW_ERR(v) (((v) & 0x80000000u) >> 31) 90 #define GSR_CAT_ERR(v) (((v) & 0x40000000u) >> 30) 91 #define GSR_DPL_OFFSET(v) (((v) & 0x3FFFFF00u) >> 8) 92 #define GSR_MCS(v) (((v) & 0xFFu) >> 0) 93 94 /* Timeouts to wait for the MC status. */ 95 #define MC_STAT_TIMEOUT 1000u /* us */ 96 #define MC_STAT_ATTEMPTS 100u 97 98 /** 99 * @brief Structure to describe a DPAA2 device as a managed resource. 100 */ 101 struct dpaa2_mc_devinfo { 102 STAILQ_ENTRY(dpaa2_mc_devinfo) link; 103 device_t dpaa2_dev; 104 uint32_t flags; 105 uint32_t owners; 106 }; 107 108 MALLOC_DEFINE(M_DPAA2_MC, "dpaa2_mc", "DPAA2 Management Complex"); 109 110 static struct resource_spec dpaa2_mc_spec[] = { 111 { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_UNMAPPED }, 112 { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL }, 113 RESOURCE_SPEC_END 114 }; 115 116 static u_int dpaa2_mc_get_xref(device_t, device_t); 117 static u_int dpaa2_mc_map_id(device_t, device_t, uintptr_t *); 118 119 static int dpaa2_mc_alloc_msi_impl(device_t, device_t, int, int, int *); 120 static int dpaa2_mc_release_msi_impl(device_t, device_t, int, int *); 121 static int dpaa2_mc_map_msi_impl(device_t, device_t, int, uint64_t *, 122 uint32_t *); 123 124 /* 125 * For device interface. 126 */ 127 128 int 129 dpaa2_mc_attach(device_t dev) 130 { 131 struct dpaa2_mc_softc *sc; 132 struct resource_map_request req; 133 uint32_t val; 134 int error; 135 136 sc = device_get_softc(dev); 137 sc->dev = dev; 138 sc->msi_allocated = false; 139 sc->msi_owner = NULL; 140 141 error = bus_alloc_resources(sc->dev, dpaa2_mc_spec, sc->res); 142 if (error) { 143 device_printf(dev, "%s: failed to allocate resources\n", 144 __func__); 145 return (ENXIO); 146 } 147 148 if (sc->res[1]) { 149 resource_init_map_request(&req); 150 req.memattr = VM_MEMATTR_DEVICE; 151 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[1], 152 &req, &sc->map[1]); 153 if (error) { 154 device_printf(dev, "%s: failed to map control " 155 "registers\n", __func__); 156 dpaa2_mc_detach(dev); 157 return (ENXIO); 158 } 159 160 if (bootverbose) 161 device_printf(dev, 162 "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", 163 mcreg_read_4(sc, MC_REG_GCR1), 164 mcreg_read_4(sc, MC_REG_GCR2), 165 mcreg_read_4(sc, MC_REG_GSR), 166 mcreg_read_4(sc, MC_REG_FAPR)); 167 168 /* Reset P1_STOP and P2_STOP bits to resume MC processor. */ 169 val = mcreg_read_4(sc, MC_REG_GCR1) & 170 ~(GCR1_P1_STOP | GCR1_P2_STOP); 171 mcreg_write_4(sc, MC_REG_GCR1, val); 172 173 /* Poll MC status. */ 174 if (bootverbose) 175 device_printf(dev, "polling MC status...\n"); 176 for (int i = 0; i < MC_STAT_ATTEMPTS; i++) { 177 val = mcreg_read_4(sc, MC_REG_GSR); 178 if (GSR_MCS(val) != 0u) 179 break; 180 DELAY(MC_STAT_TIMEOUT); 181 } 182 183 if (bootverbose) 184 device_printf(dev, 185 "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", 186 mcreg_read_4(sc, MC_REG_GCR1), 187 mcreg_read_4(sc, MC_REG_GCR2), 188 mcreg_read_4(sc, MC_REG_GSR), 189 mcreg_read_4(sc, MC_REG_FAPR)); 190 } 191 192 /* At least 64 bytes of the command portal should be available. */ 193 if (rman_get_size(sc->res[0]) < DPAA2_MCP_MEM_WIDTH) { 194 device_printf(dev, "%s: MC portal memory region too small: " 195 "%jd\n", __func__, rman_get_size(sc->res[0])); 196 dpaa2_mc_detach(dev); 197 return (ENXIO); 198 } 199 200 /* Map MC portal memory resource. */ 201 resource_init_map_request(&req); 202 req.memattr = VM_MEMATTR_DEVICE; 203 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[0], 204 &req, &sc->map[0]); 205 if (error) { 206 device_printf(dev, "Failed to map MC portal memory\n"); 207 dpaa2_mc_detach(dev); 208 return (ENXIO); 209 } 210 211 /* Initialize a resource manager for the DPAA2 I/O objects. */ 212 sc->dpio_rman.rm_type = RMAN_ARRAY; 213 sc->dpio_rman.rm_descr = "DPAA2 DPIO objects"; 214 error = rman_init(&sc->dpio_rman); 215 if (error) { 216 device_printf(dev, "Failed to initialize a resource manager for " 217 "the DPAA2 I/O objects: error=%d\n", error); 218 dpaa2_mc_detach(dev); 219 return (ENXIO); 220 } 221 222 /* Initialize a resource manager for the DPAA2 buffer pools. */ 223 sc->dpbp_rman.rm_type = RMAN_ARRAY; 224 sc->dpbp_rman.rm_descr = "DPAA2 DPBP objects"; 225 error = rman_init(&sc->dpbp_rman); 226 if (error) { 227 device_printf(dev, "Failed to initialize a resource manager for " 228 "the DPAA2 buffer pools: error=%d\n", error); 229 dpaa2_mc_detach(dev); 230 return (ENXIO); 231 } 232 233 /* Initialize a resource manager for the DPAA2 concentrators. */ 234 sc->dpcon_rman.rm_type = RMAN_ARRAY; 235 sc->dpcon_rman.rm_descr = "DPAA2 DPCON objects"; 236 error = rman_init(&sc->dpcon_rman); 237 if (error) { 238 device_printf(dev, "Failed to initialize a resource manager for " 239 "the DPAA2 concentrators: error=%d\n", error); 240 dpaa2_mc_detach(dev); 241 return (ENXIO); 242 } 243 244 /* Initialize a resource manager for the DPAA2 MC portals. */ 245 sc->dpmcp_rman.rm_type = RMAN_ARRAY; 246 sc->dpmcp_rman.rm_descr = "DPAA2 DPMCP objects"; 247 error = rman_init(&sc->dpmcp_rman); 248 if (error) { 249 device_printf(dev, "Failed to initialize a resource manager for " 250 "the DPAA2 MC portals: error=%d\n", error); 251 dpaa2_mc_detach(dev); 252 return (ENXIO); 253 } 254 255 /* Initialize a list of non-allocatable DPAA2 devices. */ 256 mtx_init(&sc->mdev_lock, "MC portal mdev lock", NULL, MTX_DEF); 257 STAILQ_INIT(&sc->mdev_list); 258 259 mtx_init(&sc->msi_lock, "MC MSI lock", NULL, MTX_DEF); 260 261 /* 262 * Add a root resource container as the only child of the bus. All of 263 * the direct descendant containers will be attached to the root one 264 * instead of the MC device. 265 */ 266 sc->rcdev = device_add_child(dev, "dpaa2_rc", 0); 267 if (sc->rcdev == NULL) { 268 dpaa2_mc_detach(dev); 269 return (ENXIO); 270 } 271 bus_generic_probe(dev); 272 bus_generic_attach(dev); 273 274 return (0); 275 } 276 277 int 278 dpaa2_mc_detach(device_t dev) 279 { 280 struct dpaa2_mc_softc *sc; 281 struct dpaa2_devinfo *dinfo = NULL; 282 int error; 283 284 bus_generic_detach(dev); 285 286 sc = device_get_softc(dev); 287 if (sc->rcdev) 288 device_delete_child(dev, sc->rcdev); 289 bus_release_resources(dev, dpaa2_mc_spec, sc->res); 290 291 dinfo = device_get_ivars(dev); 292 if (dinfo) 293 free(dinfo, M_DPAA2_MC); 294 295 error = bus_generic_detach(dev); 296 if (error != 0) 297 return (error); 298 299 return (device_delete_children(dev)); 300 } 301 302 /* 303 * For bus interface. 304 */ 305 306 struct resource * 307 dpaa2_mc_alloc_resource(device_t mcdev, device_t child, int type, int *rid, 308 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 309 { 310 struct resource *res; 311 struct rman *rm; 312 int error; 313 314 rm = dpaa2_mc_rman(mcdev, type, flags); 315 if (rm == NULL) 316 return (bus_generic_alloc_resource(mcdev, child, type, rid, 317 start, end, count, flags)); 318 319 /* 320 * Skip managing DPAA2-specific resource. It must be provided to MC by 321 * calling DPAA2_MC_MANAGE_DEV() beforehand. 322 */ 323 if (type <= DPAA2_DEV_MC) { 324 error = rman_manage_region(rm, start, end); 325 if (error) { 326 device_printf(mcdev, "rman_manage_region() failed: " 327 "start=%#jx, end=%#jx, error=%d\n", start, end, 328 error); 329 goto fail; 330 } 331 } 332 333 res = bus_generic_rman_alloc_resource(mcdev, child, type, rid, start, 334 end, count, flags); 335 if (res == NULL) 336 goto fail; 337 return (res); 338 fail: 339 device_printf(mcdev, "%s() failed: type=%d, rid=%d, start=%#jx, " 340 "end=%#jx, count=%#jx, flags=%x\n", __func__, type, *rid, start, end, 341 count, flags); 342 return (NULL); 343 } 344 345 int 346 dpaa2_mc_adjust_resource(device_t mcdev, device_t child, 347 struct resource *r, rman_res_t start, rman_res_t end) 348 { 349 struct rman *rm; 350 351 rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); 352 if (rm) 353 return (bus_generic_rman_adjust_resource(mcdev, child, r, 354 start, end)); 355 return (bus_generic_adjust_resource(mcdev, child, r, start, end)); 356 } 357 358 int 359 dpaa2_mc_release_resource(device_t mcdev, device_t child, struct resource *r) 360 { 361 struct rman *rm; 362 363 rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); 364 if (rm) 365 return (bus_generic_rman_release_resource(mcdev, child, r)); 366 return (bus_generic_release_resource(mcdev, child, r)); 367 } 368 369 int 370 dpaa2_mc_activate_resource(device_t mcdev, device_t child, struct resource *r) 371 { 372 struct rman *rm; 373 374 rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); 375 if (rm) 376 return (bus_generic_rman_activate_resource(mcdev, child, r)); 377 return (bus_generic_activate_resource(mcdev, child, r)); 378 } 379 380 int 381 dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, struct resource *r) 382 { 383 struct rman *rm; 384 385 rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); 386 if (rm) 387 return (bus_generic_rman_deactivate_resource(mcdev, child, r)); 388 return (bus_generic_deactivate_resource(mcdev, child, r)); 389 } 390 391 /* 392 * For pseudo-pcib interface. 393 */ 394 395 int 396 dpaa2_mc_alloc_msi(device_t mcdev, device_t child, int count, int maxcount, 397 int *irqs) 398 { 399 #if defined(INTRNG) 400 return (dpaa2_mc_alloc_msi_impl(mcdev, child, count, maxcount, irqs)); 401 #else 402 return (ENXIO); 403 #endif 404 } 405 406 int 407 dpaa2_mc_release_msi(device_t mcdev, device_t child, int count, int *irqs) 408 { 409 #if defined(INTRNG) 410 return (dpaa2_mc_release_msi_impl(mcdev, child, count, irqs)); 411 #else 412 return (ENXIO); 413 #endif 414 } 415 416 int 417 dpaa2_mc_map_msi(device_t mcdev, device_t child, int irq, uint64_t *addr, 418 uint32_t *data) 419 { 420 #if defined(INTRNG) 421 return (dpaa2_mc_map_msi_impl(mcdev, child, irq, addr, data)); 422 #else 423 return (ENXIO); 424 #endif 425 } 426 427 int 428 dpaa2_mc_get_id(device_t mcdev, device_t child, enum pci_id_type type, 429 uintptr_t *id) 430 { 431 struct dpaa2_devinfo *dinfo; 432 433 dinfo = device_get_ivars(child); 434 435 if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 436 return (ENXIO); 437 438 if (type == PCI_ID_MSI) 439 return (dpaa2_mc_map_id(mcdev, child, id)); 440 441 *id = dinfo->icid; 442 return (0); 443 } 444 445 /* 446 * For DPAA2 Management Complex bus driver interface. 447 */ 448 449 int 450 dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags) 451 { 452 struct dpaa2_mc_softc *sc; 453 struct dpaa2_devinfo *dinfo; 454 struct dpaa2_mc_devinfo *di; 455 struct rman *rm; 456 int error; 457 458 sc = device_get_softc(mcdev); 459 dinfo = device_get_ivars(dpaa2_dev); 460 461 if (!sc || !dinfo || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 462 return (EINVAL); 463 464 di = malloc(sizeof(*di), M_DPAA2_MC, M_WAITOK | M_ZERO); 465 if (!di) 466 return (ENOMEM); 467 di->dpaa2_dev = dpaa2_dev; 468 di->flags = flags; 469 di->owners = 0; 470 471 /* Append a new managed DPAA2 device to the queue. */ 472 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 473 mtx_lock(&sc->mdev_lock); 474 STAILQ_INSERT_TAIL(&sc->mdev_list, di, link); 475 mtx_unlock(&sc->mdev_lock); 476 477 if (flags & DPAA2_MC_DEV_ALLOCATABLE) { 478 /* Select rman based on a type of the DPAA2 device. */ 479 rm = dpaa2_mc_rman(mcdev, dinfo->dtype, 0); 480 if (!rm) 481 return (ENOENT); 482 /* Manage DPAA2 device as an allocatable resource. */ 483 error = rman_manage_region(rm, (rman_res_t) dpaa2_dev, 484 (rman_res_t) dpaa2_dev); 485 if (error) 486 return (error); 487 } 488 489 return (0); 490 } 491 492 int 493 dpaa2_mc_get_free_dev(device_t mcdev, device_t *dpaa2_dev, 494 enum dpaa2_dev_type devtype) 495 { 496 struct rman *rm; 497 rman_res_t start, end; 498 int error; 499 500 if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 501 return (EINVAL); 502 503 /* Select resource manager based on a type of the DPAA2 device. */ 504 rm = dpaa2_mc_rman(mcdev, devtype, 0); 505 if (!rm) 506 return (ENOENT); 507 /* Find first free DPAA2 device of the given type. */ 508 error = rman_first_free_region(rm, &start, &end); 509 if (error) 510 return (error); 511 512 KASSERT(start == end, ("start != end, but should be the same pointer " 513 "to the DPAA2 device: start=%jx, end=%jx", start, end)); 514 515 *dpaa2_dev = (device_t) start; 516 517 return (0); 518 } 519 520 int 521 dpaa2_mc_get_dev(device_t mcdev, device_t *dpaa2_dev, 522 enum dpaa2_dev_type devtype, uint32_t obj_id) 523 { 524 struct dpaa2_mc_softc *sc; 525 struct dpaa2_devinfo *dinfo; 526 struct dpaa2_mc_devinfo *di; 527 int error = ENOENT; 528 529 sc = device_get_softc(mcdev); 530 531 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 532 return (EINVAL); 533 534 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 535 mtx_lock(&sc->mdev_lock); 536 537 STAILQ_FOREACH(di, &sc->mdev_list, link) { 538 dinfo = device_get_ivars(di->dpaa2_dev); 539 if (dinfo->dtype == devtype && dinfo->id == obj_id) { 540 *dpaa2_dev = di->dpaa2_dev; 541 error = 0; 542 break; 543 } 544 } 545 546 mtx_unlock(&sc->mdev_lock); 547 548 return (error); 549 } 550 551 int 552 dpaa2_mc_get_shared_dev(device_t mcdev, device_t *dpaa2_dev, 553 enum dpaa2_dev_type devtype) 554 { 555 struct dpaa2_mc_softc *sc; 556 struct dpaa2_devinfo *dinfo; 557 struct dpaa2_mc_devinfo *di; 558 device_t dev = NULL; 559 uint32_t owners = UINT32_MAX; 560 int error = ENOENT; 561 562 sc = device_get_softc(mcdev); 563 564 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 565 return (EINVAL); 566 567 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 568 mtx_lock(&sc->mdev_lock); 569 570 STAILQ_FOREACH(di, &sc->mdev_list, link) { 571 dinfo = device_get_ivars(di->dpaa2_dev); 572 573 if ((dinfo->dtype == devtype) && 574 (di->flags & DPAA2_MC_DEV_SHAREABLE) && 575 (di->owners < owners)) { 576 dev = di->dpaa2_dev; 577 owners = di->owners; 578 } 579 } 580 if (dev) { 581 *dpaa2_dev = dev; 582 error = 0; 583 } 584 585 mtx_unlock(&sc->mdev_lock); 586 587 return (error); 588 } 589 590 int 591 dpaa2_mc_reserve_dev(device_t mcdev, device_t dpaa2_dev, 592 enum dpaa2_dev_type devtype) 593 { 594 struct dpaa2_mc_softc *sc; 595 struct dpaa2_mc_devinfo *di; 596 int error = ENOENT; 597 598 sc = device_get_softc(mcdev); 599 600 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 601 return (EINVAL); 602 603 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 604 mtx_lock(&sc->mdev_lock); 605 606 STAILQ_FOREACH(di, &sc->mdev_list, link) { 607 if (di->dpaa2_dev == dpaa2_dev && 608 (di->flags & DPAA2_MC_DEV_SHAREABLE)) { 609 di->owners++; 610 error = 0; 611 break; 612 } 613 } 614 615 mtx_unlock(&sc->mdev_lock); 616 617 return (error); 618 } 619 620 int 621 dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev, 622 enum dpaa2_dev_type devtype) 623 { 624 struct dpaa2_mc_softc *sc; 625 struct dpaa2_mc_devinfo *di; 626 int error = ENOENT; 627 628 sc = device_get_softc(mcdev); 629 630 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) 631 return (EINVAL); 632 633 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); 634 mtx_lock(&sc->mdev_lock); 635 636 STAILQ_FOREACH(di, &sc->mdev_list, link) { 637 if (di->dpaa2_dev == dpaa2_dev && 638 (di->flags & DPAA2_MC_DEV_SHAREABLE)) { 639 di->owners -= di->owners > 0 ? 1 : 0; 640 error = 0; 641 break; 642 } 643 } 644 645 mtx_unlock(&sc->mdev_lock); 646 647 return (error); 648 } 649 650 /** 651 * @internal 652 */ 653 static u_int 654 dpaa2_mc_get_xref(device_t mcdev, device_t child) 655 { 656 struct dpaa2_mc_softc *sc = device_get_softc(mcdev); 657 struct dpaa2_devinfo *dinfo = device_get_ivars(child); 658 #ifdef DEV_ACPI 659 u_int xref, devid; 660 #endif 661 #ifdef FDT 662 phandle_t msi_parent; 663 #endif 664 int error; 665 666 if (sc && dinfo) { 667 #ifdef DEV_ACPI 668 if (sc->acpi_based) { 669 /* 670 * NOTE: The first named component from the IORT table 671 * with the given name (as a substring) will be used. 672 */ 673 error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, 674 dinfo->icid, &xref, &devid); 675 if (error) 676 return (0); 677 return (xref); 678 } 679 #endif 680 #ifdef FDT 681 if (!sc->acpi_based) { 682 /* FDT-based driver. */ 683 error = ofw_bus_msimap(sc->ofw_node, dinfo->icid, 684 &msi_parent, NULL); 685 if (error) 686 return (0); 687 return ((u_int) msi_parent); 688 } 689 #endif 690 } 691 return (0); 692 } 693 694 /** 695 * @internal 696 */ 697 static u_int 698 dpaa2_mc_map_id(device_t mcdev, device_t child, uintptr_t *id) 699 { 700 struct dpaa2_devinfo *dinfo; 701 #ifdef DEV_ACPI 702 u_int xref, devid; 703 int error; 704 #endif 705 706 dinfo = device_get_ivars(child); 707 if (dinfo) { 708 /* 709 * The first named components from IORT table with the given 710 * name (as a substring) will be used. 711 */ 712 #ifdef DEV_ACPI 713 error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid, 714 &xref, &devid); 715 if (error == 0) 716 *id = devid; 717 else 718 #endif 719 *id = dinfo->icid; /* RID not in IORT, likely FW bug */ 720 721 return (0); 722 } 723 return (ENXIO); 724 } 725 726 /** 727 * @internal 728 * @brief Obtain a resource manager based on the given type of the resource. 729 */ 730 struct rman * 731 dpaa2_mc_rman(device_t mcdev, int type, u_int flags) 732 { 733 struct dpaa2_mc_softc *sc; 734 735 sc = device_get_softc(mcdev); 736 737 switch (type) { 738 case DPAA2_DEV_IO: 739 return (&sc->dpio_rman); 740 case DPAA2_DEV_BP: 741 return (&sc->dpbp_rman); 742 case DPAA2_DEV_CON: 743 return (&sc->dpcon_rman); 744 case DPAA2_DEV_MCP: 745 return (&sc->dpmcp_rman); 746 default: 747 break; 748 } 749 750 return (NULL); 751 } 752 753 #if defined(INTRNG) && !defined(IOMMU) 754 755 /** 756 * @internal 757 * @brief Allocates requested number of MSIs. 758 * 759 * NOTE: This function is a part of fallback solution when IOMMU isn't available. 760 * Total number of IRQs is limited to 32. 761 */ 762 static int 763 dpaa2_mc_alloc_msi_impl(device_t mcdev, device_t child, int count, int maxcount, 764 int *irqs) 765 { 766 struct dpaa2_mc_softc *sc = device_get_softc(mcdev); 767 int msi_irqs[DPAA2_MC_MSI_COUNT]; 768 int error; 769 770 /* Pre-allocate a bunch of MSIs for MC to be used by its children. */ 771 if (!sc->msi_allocated) { 772 error = intr_alloc_msi(mcdev, child, dpaa2_mc_get_xref(mcdev, 773 child), DPAA2_MC_MSI_COUNT, DPAA2_MC_MSI_COUNT, msi_irqs); 774 if (error) { 775 device_printf(mcdev, "failed to pre-allocate %d MSIs: " 776 "error=%d\n", DPAA2_MC_MSI_COUNT, error); 777 return (error); 778 } 779 780 mtx_assert(&sc->msi_lock, MA_NOTOWNED); 781 mtx_lock(&sc->msi_lock); 782 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { 783 sc->msi[i].child = NULL; 784 sc->msi[i].irq = msi_irqs[i]; 785 } 786 sc->msi_owner = child; 787 sc->msi_allocated = true; 788 mtx_unlock(&sc->msi_lock); 789 } 790 791 error = ENOENT; 792 793 /* Find the first free MSIs from the pre-allocated pool. */ 794 mtx_assert(&sc->msi_lock, MA_NOTOWNED); 795 mtx_lock(&sc->msi_lock); 796 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { 797 if (sc->msi[i].child != NULL) 798 continue; 799 error = 0; 800 for (int j = 0; j < count; j++) { 801 if (i + j >= DPAA2_MC_MSI_COUNT) { 802 device_printf(mcdev, "requested %d MSIs exceed " 803 "limit of %d available\n", count, 804 DPAA2_MC_MSI_COUNT); 805 error = E2BIG; 806 break; 807 } 808 sc->msi[i + j].child = child; 809 irqs[j] = sc->msi[i + j].irq; 810 } 811 break; 812 } 813 mtx_unlock(&sc->msi_lock); 814 815 return (error); 816 } 817 818 /** 819 * @internal 820 * @brief Marks IRQs as free in the pre-allocated pool of MSIs. 821 * 822 * NOTE: This function is a part of fallback solution when IOMMU isn't available. 823 * Total number of IRQs is limited to 32. 824 * NOTE: MSIs are kept allocated in the kernel as a part of the pool. 825 */ 826 static int 827 dpaa2_mc_release_msi_impl(device_t mcdev, device_t child, int count, int *irqs) 828 { 829 struct dpaa2_mc_softc *sc = device_get_softc(mcdev); 830 831 mtx_assert(&sc->msi_lock, MA_NOTOWNED); 832 mtx_lock(&sc->msi_lock); 833 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { 834 if (sc->msi[i].child != child) 835 continue; 836 for (int j = 0; j < count; j++) { 837 if (sc->msi[i].irq == irqs[j]) { 838 sc->msi[i].child = NULL; 839 break; 840 } 841 } 842 } 843 mtx_unlock(&sc->msi_lock); 844 845 return (0); 846 } 847 848 /** 849 * @internal 850 * @brief Provides address to write to and data according to the given MSI from 851 * the pre-allocated pool. 852 * 853 * NOTE: This function is a part of fallback solution when IOMMU isn't available. 854 * Total number of IRQs is limited to 32. 855 */ 856 static int 857 dpaa2_mc_map_msi_impl(device_t mcdev, device_t child, int irq, uint64_t *addr, 858 uint32_t *data) 859 { 860 struct dpaa2_mc_softc *sc = device_get_softc(mcdev); 861 int error = EINVAL; 862 863 mtx_assert(&sc->msi_lock, MA_NOTOWNED); 864 mtx_lock(&sc->msi_lock); 865 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { 866 if (sc->msi[i].child == child && sc->msi[i].irq == irq) { 867 error = 0; 868 break; 869 } 870 } 871 mtx_unlock(&sc->msi_lock); 872 if (error) 873 return (error); 874 875 return (intr_map_msi(mcdev, sc->msi_owner, dpaa2_mc_get_xref(mcdev, 876 sc->msi_owner), irq, addr, data)); 877 } 878 879 #endif /* defined(INTRNG) && !defined(IOMMU) */ 880 881 static device_method_t dpaa2_mc_methods[] = { 882 DEVMETHOD_END 883 }; 884 885 DEFINE_CLASS_0(dpaa2_mc, dpaa2_mc_driver, dpaa2_mc_methods, 886 sizeof(struct dpaa2_mc_softc)); 887