Lines Matching +full:mc +full:- +full:bus

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright © 2021-2022 Dmitry Salychev
30 * The DPAA2 Management Complex (MC) bus driver.
32 * MC is a hardware resource manager which can be found in several NXP
34 * hardware objects used in network-oriented packet processing applications.
42 #include <sys/bus.h>
52 #include <machine/bus.h>
72 /* Macros to read/write MC registers */
73 #define mcreg_read_4(_sc, _r) bus_read_4(&(_sc)->map[1], (_r))
74 #define mcreg_write_4(_sc, _r, _v) bus_write_4(&(_sc)->map[1], (_r), (_v))
78 /* MC Registers */
94 /* Timeouts to wait for the MC status. */
137 sc->dev = dev; in dpaa2_mc_attach()
138 sc->msi_allocated = false; in dpaa2_mc_attach()
139 sc->msi_owner = NULL; in dpaa2_mc_attach()
141 error = bus_alloc_resources(sc->dev, dpaa2_mc_spec, sc->res); in dpaa2_mc_attach()
148 if (sc->res[1]) { in dpaa2_mc_attach()
151 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[1], in dpaa2_mc_attach()
152 &req, &sc->map[1]); in dpaa2_mc_attach()
168 /* Reset P1_STOP and P2_STOP bits to resume MC processor. */ in dpaa2_mc_attach()
173 /* Poll MC status. */ in dpaa2_mc_attach()
175 device_printf(dev, "polling MC status...\n"); in dpaa2_mc_attach()
193 if (rman_get_size(sc->res[0]) < DPAA2_MCP_MEM_WIDTH) { in dpaa2_mc_attach()
194 device_printf(dev, "%s: MC portal memory region too small: " in dpaa2_mc_attach()
195 "%jd\n", __func__, rman_get_size(sc->res[0])); in dpaa2_mc_attach()
200 /* Map MC portal memory resource. */ in dpaa2_mc_attach()
203 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[0], in dpaa2_mc_attach()
204 &req, &sc->map[0]); in dpaa2_mc_attach()
206 device_printf(dev, "Failed to map MC portal memory\n"); in dpaa2_mc_attach()
212 sc->dpio_rman.rm_type = RMAN_ARRAY; in dpaa2_mc_attach()
213 sc->dpio_rman.rm_descr = "DPAA2 DPIO objects"; in dpaa2_mc_attach()
214 error = rman_init(&sc->dpio_rman); in dpaa2_mc_attach()
223 sc->dpbp_rman.rm_type = RMAN_ARRAY; in dpaa2_mc_attach()
224 sc->dpbp_rman.rm_descr = "DPAA2 DPBP objects"; in dpaa2_mc_attach()
225 error = rman_init(&sc->dpbp_rman); in dpaa2_mc_attach()
234 sc->dpcon_rman.rm_type = RMAN_ARRAY; in dpaa2_mc_attach()
235 sc->dpcon_rman.rm_descr = "DPAA2 DPCON objects"; in dpaa2_mc_attach()
236 error = rman_init(&sc->dpcon_rman); in dpaa2_mc_attach()
244 /* Initialize a resource manager for the DPAA2 MC portals. */ in dpaa2_mc_attach()
245 sc->dpmcp_rman.rm_type = RMAN_ARRAY; in dpaa2_mc_attach()
246 sc->dpmcp_rman.rm_descr = "DPAA2 DPMCP objects"; in dpaa2_mc_attach()
247 error = rman_init(&sc->dpmcp_rman); in dpaa2_mc_attach()
250 "the DPAA2 MC portals: error=%d\n", error); in dpaa2_mc_attach()
255 /* Initialize a list of non-allocatable DPAA2 devices. */ in dpaa2_mc_attach()
256 mtx_init(&sc->mdev_lock, "MC portal mdev lock", NULL, MTX_DEF); in dpaa2_mc_attach()
257 STAILQ_INIT(&sc->mdev_list); in dpaa2_mc_attach()
259 mtx_init(&sc->msi_lock, "MC MSI lock", NULL, MTX_DEF); in dpaa2_mc_attach()
262 * Add a root resource container as the only child of the bus. All of in dpaa2_mc_attach()
264 * instead of the MC device. in dpaa2_mc_attach()
266 sc->rcdev = device_add_child(dev, "dpaa2_rc", 0); in dpaa2_mc_attach()
267 if (sc->rcdev == NULL) { in dpaa2_mc_attach()
289 bus_release_resources(dev, dpaa2_mc_spec, sc->res); in dpaa2_mc_detach()
299 * For bus interface.
316 * Skip managing DPAA2-specific resource. It must be provided to MC by in dpaa2_mc_alloc_resource()
388 * For pseudo-pcib interface.
437 *id = dinfo->icid; in dpaa2_mc_get_id()
442 * For DPAA2 Management Complex bus driver interface.
461 di->dpaa2_dev = dpaa2_dev; in dpaa2_mc_manage_dev()
462 di->flags = flags; in dpaa2_mc_manage_dev()
463 di->owners = 0; in dpaa2_mc_manage_dev()
466 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); in dpaa2_mc_manage_dev()
467 mtx_lock(&sc->mdev_lock); in dpaa2_mc_manage_dev()
468 STAILQ_INSERT_TAIL(&sc->mdev_list, di, link); in dpaa2_mc_manage_dev()
469 mtx_unlock(&sc->mdev_lock); in dpaa2_mc_manage_dev()
473 rm = dpaa2_mc_rman(mcdev, dinfo->dtype, 0); in dpaa2_mc_manage_dev()
528 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); in dpaa2_mc_get_dev()
529 mtx_lock(&sc->mdev_lock); in dpaa2_mc_get_dev()
531 STAILQ_FOREACH(di, &sc->mdev_list, link) { in dpaa2_mc_get_dev()
532 dinfo = device_get_ivars(di->dpaa2_dev); in dpaa2_mc_get_dev()
533 if (dinfo->dtype == devtype && dinfo->id == obj_id) { in dpaa2_mc_get_dev()
534 *dpaa2_dev = di->dpaa2_dev; in dpaa2_mc_get_dev()
540 mtx_unlock(&sc->mdev_lock); in dpaa2_mc_get_dev()
561 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); in dpaa2_mc_get_shared_dev()
562 mtx_lock(&sc->mdev_lock); in dpaa2_mc_get_shared_dev()
564 STAILQ_FOREACH(di, &sc->mdev_list, link) { in dpaa2_mc_get_shared_dev()
565 dinfo = device_get_ivars(di->dpaa2_dev); in dpaa2_mc_get_shared_dev()
567 if ((dinfo->dtype == devtype) && in dpaa2_mc_get_shared_dev()
568 (di->flags & DPAA2_MC_DEV_SHAREABLE) && in dpaa2_mc_get_shared_dev()
569 (di->owners < owners)) { in dpaa2_mc_get_shared_dev()
570 dev = di->dpaa2_dev; in dpaa2_mc_get_shared_dev()
571 owners = di->owners; in dpaa2_mc_get_shared_dev()
579 mtx_unlock(&sc->mdev_lock); in dpaa2_mc_get_shared_dev()
597 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); in dpaa2_mc_reserve_dev()
598 mtx_lock(&sc->mdev_lock); in dpaa2_mc_reserve_dev()
600 STAILQ_FOREACH(di, &sc->mdev_list, link) { in dpaa2_mc_reserve_dev()
601 if (di->dpaa2_dev == dpaa2_dev && in dpaa2_mc_reserve_dev()
602 (di->flags & DPAA2_MC_DEV_SHAREABLE)) { in dpaa2_mc_reserve_dev()
603 di->owners++; in dpaa2_mc_reserve_dev()
609 mtx_unlock(&sc->mdev_lock); in dpaa2_mc_reserve_dev()
627 mtx_assert(&sc->mdev_lock, MA_NOTOWNED); in dpaa2_mc_release_dev()
628 mtx_lock(&sc->mdev_lock); in dpaa2_mc_release_dev()
630 STAILQ_FOREACH(di, &sc->mdev_list, link) { in dpaa2_mc_release_dev()
631 if (di->dpaa2_dev == dpaa2_dev && in dpaa2_mc_release_dev()
632 (di->flags & DPAA2_MC_DEV_SHAREABLE)) { in dpaa2_mc_release_dev()
633 di->owners -= di->owners > 0 ? 1 : 0; in dpaa2_mc_release_dev()
639 mtx_unlock(&sc->mdev_lock); in dpaa2_mc_release_dev()
662 if (sc->acpi_based) { in dpaa2_mc_get_xref()
668 dinfo->icid, &xref, &devid); in dpaa2_mc_get_xref()
675 if (!sc->acpi_based) { in dpaa2_mc_get_xref()
676 /* FDT-based driver. */ in dpaa2_mc_get_xref()
677 error = ofw_bus_msimap(sc->ofw_node, dinfo->icid, in dpaa2_mc_get_xref()
707 error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid, in dpaa2_mc_map_id()
713 *id = dinfo->icid; /* RID not in IORT, likely FW bug */ in dpaa2_mc_map_id()
733 return (&sc->dpio_rman); in dpaa2_mc_rman()
735 return (&sc->dpbp_rman); in dpaa2_mc_rman()
737 return (&sc->dpcon_rman); in dpaa2_mc_rman()
739 return (&sc->dpmcp_rman); in dpaa2_mc_rman()
764 /* Pre-allocate a bunch of MSIs for MC to be used by its children. */ in dpaa2_mc_alloc_msi_impl()
765 if (!sc->msi_allocated) { in dpaa2_mc_alloc_msi_impl()
769 device_printf(mcdev, "failed to pre-allocate %d MSIs: " in dpaa2_mc_alloc_msi_impl()
774 mtx_assert(&sc->msi_lock, MA_NOTOWNED); in dpaa2_mc_alloc_msi_impl()
775 mtx_lock(&sc->msi_lock); in dpaa2_mc_alloc_msi_impl()
777 sc->msi[i].child = NULL; in dpaa2_mc_alloc_msi_impl()
778 sc->msi[i].irq = msi_irqs[i]; in dpaa2_mc_alloc_msi_impl()
780 sc->msi_owner = child; in dpaa2_mc_alloc_msi_impl()
781 sc->msi_allocated = true; in dpaa2_mc_alloc_msi_impl()
782 mtx_unlock(&sc->msi_lock); in dpaa2_mc_alloc_msi_impl()
787 /* Find the first free MSIs from the pre-allocated pool. */ in dpaa2_mc_alloc_msi_impl()
788 mtx_assert(&sc->msi_lock, MA_NOTOWNED); in dpaa2_mc_alloc_msi_impl()
789 mtx_lock(&sc->msi_lock); in dpaa2_mc_alloc_msi_impl()
791 if (sc->msi[i].child != NULL) in dpaa2_mc_alloc_msi_impl()
802 sc->msi[i + j].child = child; in dpaa2_mc_alloc_msi_impl()
803 irqs[j] = sc->msi[i + j].irq; in dpaa2_mc_alloc_msi_impl()
807 mtx_unlock(&sc->msi_lock); in dpaa2_mc_alloc_msi_impl()
814 * @brief Marks IRQs as free in the pre-allocated pool of MSIs.
825 mtx_assert(&sc->msi_lock, MA_NOTOWNED); in dpaa2_mc_release_msi_impl()
826 mtx_lock(&sc->msi_lock); in dpaa2_mc_release_msi_impl()
828 if (sc->msi[i].child != child) in dpaa2_mc_release_msi_impl()
831 if (sc->msi[i].irq == irqs[j]) { in dpaa2_mc_release_msi_impl()
832 sc->msi[i].child = NULL; in dpaa2_mc_release_msi_impl()
837 mtx_unlock(&sc->msi_lock); in dpaa2_mc_release_msi_impl()
845 * the pre-allocated pool.
857 mtx_assert(&sc->msi_lock, MA_NOTOWNED); in dpaa2_mc_map_msi_impl()
858 mtx_lock(&sc->msi_lock); in dpaa2_mc_map_msi_impl()
860 if (sc->msi[i].child == child && sc->msi[i].irq == irq) { in dpaa2_mc_map_msi_impl()
865 mtx_unlock(&sc->msi_lock); in dpaa2_mc_map_msi_impl()
869 return (intr_map_msi(mcdev, sc->msi_owner, dpaa2_mc_get_xref(mcdev, in dpaa2_mc_map_msi_impl()
870 sc->msi_owner), irq, addr, data)); in dpaa2_mc_map_msi_impl()