185debf7fSRuslan Bukin /*-
2101869a8SRuslan Bukin * SPDX-License-Identifier: BSD-2-Clause
3101869a8SRuslan Bukin *
4101869a8SRuslan Bukin * Copyright (c) 2016-2019 Ruslan Bukin <br@bsdpad.com>
585debf7fSRuslan Bukin *
685debf7fSRuslan Bukin * This software was developed by SRI International and the University of
785debf7fSRuslan Bukin * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
885debf7fSRuslan Bukin * ("CTSRD"), as part of the DARPA CRASH research programme.
985debf7fSRuslan Bukin *
1085debf7fSRuslan Bukin * Redistribution and use in source and binary forms, with or without
1185debf7fSRuslan Bukin * modification, are permitted provided that the following conditions
1285debf7fSRuslan Bukin * are met:
1385debf7fSRuslan Bukin * 1. Redistributions of source code must retain the above copyright
1485debf7fSRuslan Bukin * notice, this list of conditions and the following disclaimer.
1585debf7fSRuslan Bukin * 2. Redistributions in binary form must reproduce the above copyright
1685debf7fSRuslan Bukin * notice, this list of conditions and the following disclaimer in the
1785debf7fSRuslan Bukin * documentation and/or other materials provided with the distribution.
1885debf7fSRuslan Bukin *
1985debf7fSRuslan Bukin * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
2085debf7fSRuslan Bukin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2185debf7fSRuslan Bukin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2285debf7fSRuslan Bukin * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
2385debf7fSRuslan Bukin * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2485debf7fSRuslan Bukin * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2585debf7fSRuslan Bukin * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2685debf7fSRuslan Bukin * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2785debf7fSRuslan Bukin * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2885debf7fSRuslan Bukin * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2985debf7fSRuslan Bukin * SUCH DAMAGE.
3085debf7fSRuslan Bukin */
3185debf7fSRuslan Bukin
3285debf7fSRuslan Bukin #include <sys/cdefs.h>
3385debf7fSRuslan Bukin #include "opt_platform.h"
3485debf7fSRuslan Bukin #include <sys/param.h>
3585debf7fSRuslan Bukin #include <sys/conf.h>
3685debf7fSRuslan Bukin #include <sys/bus.h>
37d987842dSRuslan Bukin #include <sys/epoch.h>
3885debf7fSRuslan Bukin #include <sys/kernel.h>
3985debf7fSRuslan Bukin #include <sys/queue.h>
4085debf7fSRuslan Bukin #include <sys/kobj.h>
4185debf7fSRuslan Bukin #include <sys/malloc.h>
4285debf7fSRuslan Bukin #include <sys/limits.h>
4385debf7fSRuslan Bukin #include <sys/lock.h>
44e2e050c8SConrad Meyer #include <sys/mutex.h>
4585debf7fSRuslan Bukin #include <sys/sysctl.h>
4685debf7fSRuslan Bukin #include <sys/systm.h>
4785debf7fSRuslan Bukin
4885debf7fSRuslan Bukin #include <machine/bus.h>
4985debf7fSRuslan Bukin
5085debf7fSRuslan Bukin #ifdef FDT
5185debf7fSRuslan Bukin #include <dev/fdt/fdt_common.h>
5285debf7fSRuslan Bukin #include <dev/ofw/ofw_bus.h>
5385debf7fSRuslan Bukin #include <dev/ofw/ofw_bus_subr.h>
5485debf7fSRuslan Bukin #endif
5585debf7fSRuslan Bukin
5685debf7fSRuslan Bukin #include <dev/xdma/xdma.h>
5785debf7fSRuslan Bukin
5885debf7fSRuslan Bukin #include <xdma_if.h>
5985debf7fSRuslan Bukin
6085debf7fSRuslan Bukin /*
6185debf7fSRuslan Bukin * Multiple xDMA controllers may work with single DMA device,
6285debf7fSRuslan Bukin * so we have global lock for physical channel management.
6385debf7fSRuslan Bukin */
64101869a8SRuslan Bukin static struct mtx xdma_mtx;
6585debf7fSRuslan Bukin
66101869a8SRuslan Bukin #define XDMA_LOCK() mtx_lock(&xdma_mtx)
67101869a8SRuslan Bukin #define XDMA_UNLOCK() mtx_unlock(&xdma_mtx)
68101869a8SRuslan Bukin #define XDMA_ASSERT_LOCKED() mtx_assert(&xdma_mtx, MA_OWNED)
69101869a8SRuslan Bukin
70101869a8SRuslan Bukin #define FDT_REG_CELLS 4
7185debf7fSRuslan Bukin
72951e0584SRuslan Bukin #ifdef FDT
73951e0584SRuslan Bukin static int
xdma_get_iommu_fdt(xdma_controller_t * xdma,xdma_channel_t * xchan)74951e0584SRuslan Bukin xdma_get_iommu_fdt(xdma_controller_t *xdma, xdma_channel_t *xchan)
75951e0584SRuslan Bukin {
76951e0584SRuslan Bukin struct xdma_iommu *xio;
77951e0584SRuslan Bukin phandle_t node;
78951e0584SRuslan Bukin pcell_t prop;
79951e0584SRuslan Bukin size_t len;
80951e0584SRuslan Bukin
81951e0584SRuslan Bukin node = ofw_bus_get_node(xdma->dma_dev);
82951e0584SRuslan Bukin if (OF_getproplen(node, "xdma,iommu") <= 0)
83951e0584SRuslan Bukin return (0);
84951e0584SRuslan Bukin
85951e0584SRuslan Bukin len = OF_getencprop(node, "xdma,iommu", &prop, sizeof(prop));
86951e0584SRuslan Bukin if (len != sizeof(prop)) {
87951e0584SRuslan Bukin device_printf(xdma->dev,
88951e0584SRuslan Bukin "%s: Can't get iommu device node\n", __func__);
89951e0584SRuslan Bukin return (0);
90951e0584SRuslan Bukin }
91951e0584SRuslan Bukin
92951e0584SRuslan Bukin xio = &xchan->xio;
93951e0584SRuslan Bukin xio->dev = OF_device_from_xref(prop);
94951e0584SRuslan Bukin if (xio->dev == NULL) {
95951e0584SRuslan Bukin device_printf(xdma->dev,
96951e0584SRuslan Bukin "%s: Can't get iommu device\n", __func__);
97951e0584SRuslan Bukin return (0);
98951e0584SRuslan Bukin }
99951e0584SRuslan Bukin
100951e0584SRuslan Bukin /* Found */
101951e0584SRuslan Bukin return (1);
102951e0584SRuslan Bukin }
103951e0584SRuslan Bukin #endif
104951e0584SRuslan Bukin
10585debf7fSRuslan Bukin /*
10685debf7fSRuslan Bukin * Allocate virtual xDMA channel.
10785debf7fSRuslan Bukin */
10885debf7fSRuslan Bukin xdma_channel_t *
xdma_channel_alloc(xdma_controller_t * xdma,uint32_t caps)1093d5b3b0aSRuslan Bukin xdma_channel_alloc(xdma_controller_t *xdma, uint32_t caps)
11085debf7fSRuslan Bukin {
11185debf7fSRuslan Bukin xdma_channel_t *xchan;
11285debf7fSRuslan Bukin int ret;
11385debf7fSRuslan Bukin
11485debf7fSRuslan Bukin xchan = malloc(sizeof(xdma_channel_t), M_XDMA, M_WAITOK | M_ZERO);
11585debf7fSRuslan Bukin xchan->xdma = xdma;
116951e0584SRuslan Bukin
117951e0584SRuslan Bukin #ifdef FDT
118951e0584SRuslan Bukin /* Check if this DMA controller supports IOMMU. */
119951e0584SRuslan Bukin if (xdma_get_iommu_fdt(xdma, xchan))
120951e0584SRuslan Bukin caps |= XCHAN_CAP_IOMMU | XCHAN_CAP_NOSEG;
121951e0584SRuslan Bukin #endif
122951e0584SRuslan Bukin
1233d5b3b0aSRuslan Bukin xchan->caps = caps;
12485debf7fSRuslan Bukin
12585debf7fSRuslan Bukin XDMA_LOCK();
12685debf7fSRuslan Bukin
12785debf7fSRuslan Bukin /* Request a real channel from hardware driver. */
12885debf7fSRuslan Bukin ret = XDMA_CHANNEL_ALLOC(xdma->dma_dev, xchan);
12985debf7fSRuslan Bukin if (ret != 0) {
13085debf7fSRuslan Bukin device_printf(xdma->dev,
13185debf7fSRuslan Bukin "%s: Can't request hardware channel.\n", __func__);
13285debf7fSRuslan Bukin XDMA_UNLOCK();
13385debf7fSRuslan Bukin free(xchan, M_XDMA);
13485debf7fSRuslan Bukin
13585debf7fSRuslan Bukin return (NULL);
13685debf7fSRuslan Bukin }
13785debf7fSRuslan Bukin
13885debf7fSRuslan Bukin TAILQ_INIT(&xchan->ie_handlers);
1393d5b3b0aSRuslan Bukin
140101869a8SRuslan Bukin mtx_init(&xchan->mtx_lock, "xDMA chan", NULL, MTX_DEF);
141101869a8SRuslan Bukin mtx_init(&xchan->mtx_qin_lock, "xDMA qin", NULL, MTX_DEF);
142101869a8SRuslan Bukin mtx_init(&xchan->mtx_qout_lock, "xDMA qout", NULL, MTX_DEF);
143101869a8SRuslan Bukin mtx_init(&xchan->mtx_bank_lock, "xDMA bank", NULL, MTX_DEF);
144101869a8SRuslan Bukin mtx_init(&xchan->mtx_proc_lock, "xDMA proc", NULL, MTX_DEF);
1453d5b3b0aSRuslan Bukin
1463d5b3b0aSRuslan Bukin TAILQ_INIT(&xchan->bank);
1473d5b3b0aSRuslan Bukin TAILQ_INIT(&xchan->queue_in);
1483d5b3b0aSRuslan Bukin TAILQ_INIT(&xchan->queue_out);
1493d5b3b0aSRuslan Bukin TAILQ_INIT(&xchan->processing);
15085debf7fSRuslan Bukin
151951e0584SRuslan Bukin if (xchan->caps & XCHAN_CAP_IOMMU)
152951e0584SRuslan Bukin xdma_iommu_init(&xchan->xio);
153951e0584SRuslan Bukin
15485debf7fSRuslan Bukin TAILQ_INSERT_TAIL(&xdma->channels, xchan, xchan_next);
15585debf7fSRuslan Bukin
15685debf7fSRuslan Bukin XDMA_UNLOCK();
15785debf7fSRuslan Bukin
15885debf7fSRuslan Bukin return (xchan);
15985debf7fSRuslan Bukin }
16085debf7fSRuslan Bukin
16185debf7fSRuslan Bukin int
xdma_channel_free(xdma_channel_t * xchan)16285debf7fSRuslan Bukin xdma_channel_free(xdma_channel_t *xchan)
16385debf7fSRuslan Bukin {
16485debf7fSRuslan Bukin xdma_controller_t *xdma;
16585debf7fSRuslan Bukin int err;
16685debf7fSRuslan Bukin
16785debf7fSRuslan Bukin xdma = xchan->xdma;
1683d5b3b0aSRuslan Bukin KASSERT(xdma != NULL, ("xdma is NULL"));
16985debf7fSRuslan Bukin
17085debf7fSRuslan Bukin XDMA_LOCK();
17185debf7fSRuslan Bukin
17285debf7fSRuslan Bukin /* Free the real DMA channel. */
17385debf7fSRuslan Bukin err = XDMA_CHANNEL_FREE(xdma->dma_dev, xchan);
17485debf7fSRuslan Bukin if (err != 0) {
17585debf7fSRuslan Bukin device_printf(xdma->dev,
17685debf7fSRuslan Bukin "%s: Can't free real hw channel.\n", __func__);
17785debf7fSRuslan Bukin XDMA_UNLOCK();
17885debf7fSRuslan Bukin return (-1);
17985debf7fSRuslan Bukin }
18085debf7fSRuslan Bukin
1813d5b3b0aSRuslan Bukin if (xchan->flags & XCHAN_TYPE_SG)
1823d5b3b0aSRuslan Bukin xdma_channel_free_sg(xchan);
1833d5b3b0aSRuslan Bukin
184951e0584SRuslan Bukin if (xchan->caps & XCHAN_CAP_IOMMU)
185951e0584SRuslan Bukin xdma_iommu_release(&xchan->xio);
186951e0584SRuslan Bukin
18785debf7fSRuslan Bukin xdma_teardown_all_intr(xchan);
18885debf7fSRuslan Bukin
189101869a8SRuslan Bukin mtx_destroy(&xchan->mtx_lock);
190101869a8SRuslan Bukin mtx_destroy(&xchan->mtx_qin_lock);
191101869a8SRuslan Bukin mtx_destroy(&xchan->mtx_qout_lock);
192101869a8SRuslan Bukin mtx_destroy(&xchan->mtx_bank_lock);
193101869a8SRuslan Bukin mtx_destroy(&xchan->mtx_proc_lock);
19485debf7fSRuslan Bukin
19585debf7fSRuslan Bukin TAILQ_REMOVE(&xdma->channels, xchan, xchan_next);
19685debf7fSRuslan Bukin
19785debf7fSRuslan Bukin free(xchan, M_XDMA);
19885debf7fSRuslan Bukin
19985debf7fSRuslan Bukin XDMA_UNLOCK();
20085debf7fSRuslan Bukin
20185debf7fSRuslan Bukin return (0);
20285debf7fSRuslan Bukin }
20385debf7fSRuslan Bukin
20485debf7fSRuslan Bukin int
xdma_setup_intr(xdma_channel_t * xchan,int flags,int (* cb)(void *,xdma_transfer_status_t *),void * arg,void ** ihandler)205d987842dSRuslan Bukin xdma_setup_intr(xdma_channel_t *xchan, int flags,
2063d5b3b0aSRuslan Bukin int (*cb)(void *, xdma_transfer_status_t *),
2073d5b3b0aSRuslan Bukin void *arg, void **ihandler)
20885debf7fSRuslan Bukin {
20985debf7fSRuslan Bukin struct xdma_intr_handler *ih;
21085debf7fSRuslan Bukin xdma_controller_t *xdma;
21185debf7fSRuslan Bukin
21285debf7fSRuslan Bukin xdma = xchan->xdma;
21385debf7fSRuslan Bukin KASSERT(xdma != NULL, ("xdma is NULL"));
21485debf7fSRuslan Bukin
21585debf7fSRuslan Bukin /* Sanity check. */
21685debf7fSRuslan Bukin if (cb == NULL) {
21785debf7fSRuslan Bukin device_printf(xdma->dev,
21885debf7fSRuslan Bukin "%s: Can't setup interrupt handler.\n",
21985debf7fSRuslan Bukin __func__);
22085debf7fSRuslan Bukin
22185debf7fSRuslan Bukin return (-1);
22285debf7fSRuslan Bukin }
22385debf7fSRuslan Bukin
22485debf7fSRuslan Bukin ih = malloc(sizeof(struct xdma_intr_handler),
22585debf7fSRuslan Bukin M_XDMA, M_WAITOK | M_ZERO);
226d987842dSRuslan Bukin ih->flags = flags;
22785debf7fSRuslan Bukin ih->cb = cb;
22885debf7fSRuslan Bukin ih->cb_user = arg;
22985debf7fSRuslan Bukin
2303d5b3b0aSRuslan Bukin XCHAN_LOCK(xchan);
23185debf7fSRuslan Bukin TAILQ_INSERT_TAIL(&xchan->ie_handlers, ih, ih_next);
2323d5b3b0aSRuslan Bukin XCHAN_UNLOCK(xchan);
23385debf7fSRuslan Bukin
2343d5b3b0aSRuslan Bukin if (ihandler != NULL)
23585debf7fSRuslan Bukin *ihandler = ih;
23685debf7fSRuslan Bukin
23785debf7fSRuslan Bukin return (0);
23885debf7fSRuslan Bukin }
23985debf7fSRuslan Bukin
24085debf7fSRuslan Bukin int
xdma_teardown_intr(xdma_channel_t * xchan,struct xdma_intr_handler * ih)24185debf7fSRuslan Bukin xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih)
24285debf7fSRuslan Bukin {
24385debf7fSRuslan Bukin xdma_controller_t *xdma;
24485debf7fSRuslan Bukin
24585debf7fSRuslan Bukin xdma = xchan->xdma;
24685debf7fSRuslan Bukin KASSERT(xdma != NULL, ("xdma is NULL"));
24785debf7fSRuslan Bukin
24885debf7fSRuslan Bukin /* Sanity check. */
24985debf7fSRuslan Bukin if (ih == NULL) {
25085debf7fSRuslan Bukin device_printf(xdma->dev,
25185debf7fSRuslan Bukin "%s: Can't teardown interrupt.\n", __func__);
25285debf7fSRuslan Bukin return (-1);
25385debf7fSRuslan Bukin }
25485debf7fSRuslan Bukin
25585debf7fSRuslan Bukin TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
25685debf7fSRuslan Bukin free(ih, M_XDMA);
25785debf7fSRuslan Bukin
25885debf7fSRuslan Bukin return (0);
25985debf7fSRuslan Bukin }
26085debf7fSRuslan Bukin
26185debf7fSRuslan Bukin int
xdma_teardown_all_intr(xdma_channel_t * xchan)26285debf7fSRuslan Bukin xdma_teardown_all_intr(xdma_channel_t *xchan)
26385debf7fSRuslan Bukin {
26485debf7fSRuslan Bukin struct xdma_intr_handler *ih_tmp;
26585debf7fSRuslan Bukin struct xdma_intr_handler *ih;
26685debf7fSRuslan Bukin
267*695d3e52SJohn Baldwin KASSERT(xchan->xdma != NULL, ("xdma is NULL"));
26885debf7fSRuslan Bukin
26985debf7fSRuslan Bukin TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
27085debf7fSRuslan Bukin TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
27185debf7fSRuslan Bukin free(ih, M_XDMA);
27285debf7fSRuslan Bukin }
27385debf7fSRuslan Bukin
27485debf7fSRuslan Bukin return (0);
27585debf7fSRuslan Bukin }
27685debf7fSRuslan Bukin
27785debf7fSRuslan Bukin int
xdma_request(xdma_channel_t * xchan,struct xdma_request * req)2783d5b3b0aSRuslan Bukin xdma_request(xdma_channel_t *xchan, struct xdma_request *req)
27985debf7fSRuslan Bukin {
28085debf7fSRuslan Bukin xdma_controller_t *xdma;
28185debf7fSRuslan Bukin int ret;
28285debf7fSRuslan Bukin
28385debf7fSRuslan Bukin xdma = xchan->xdma;
28485debf7fSRuslan Bukin
2853d5b3b0aSRuslan Bukin KASSERT(xdma != NULL, ("xdma is NULL"));
28685debf7fSRuslan Bukin
28785debf7fSRuslan Bukin XCHAN_LOCK(xchan);
2883d5b3b0aSRuslan Bukin ret = XDMA_CHANNEL_REQUEST(xdma->dma_dev, xchan, req);
28985debf7fSRuslan Bukin if (ret != 0) {
29085debf7fSRuslan Bukin device_printf(xdma->dev,
2913d5b3b0aSRuslan Bukin "%s: Can't request a transfer.\n", __func__);
2923d5b3b0aSRuslan Bukin XCHAN_UNLOCK(xchan);
2933d5b3b0aSRuslan Bukin
29485debf7fSRuslan Bukin return (-1);
29585debf7fSRuslan Bukin }
2963d5b3b0aSRuslan Bukin XCHAN_UNLOCK(xchan);
29785debf7fSRuslan Bukin
29885debf7fSRuslan Bukin return (0);
29985debf7fSRuslan Bukin }
30085debf7fSRuslan Bukin
30185debf7fSRuslan Bukin int
xdma_control(xdma_channel_t * xchan,enum xdma_command cmd)3023d5b3b0aSRuslan Bukin xdma_control(xdma_channel_t *xchan, enum xdma_command cmd)
30385debf7fSRuslan Bukin {
30485debf7fSRuslan Bukin xdma_controller_t *xdma;
30585debf7fSRuslan Bukin int ret;
30685debf7fSRuslan Bukin
30785debf7fSRuslan Bukin xdma = xchan->xdma;
30885debf7fSRuslan Bukin KASSERT(xdma != NULL, ("xdma is NULL"));
30985debf7fSRuslan Bukin
3103d5b3b0aSRuslan Bukin ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, cmd);
31185debf7fSRuslan Bukin if (ret != 0) {
31285debf7fSRuslan Bukin device_printf(xdma->dev,
3133d5b3b0aSRuslan Bukin "%s: Can't process command.\n", __func__);
31485debf7fSRuslan Bukin return (-1);
31585debf7fSRuslan Bukin }
31685debf7fSRuslan Bukin
31785debf7fSRuslan Bukin return (0);
31885debf7fSRuslan Bukin }
31985debf7fSRuslan Bukin
32085debf7fSRuslan Bukin void
xdma_callback(xdma_channel_t * xchan,xdma_transfer_status_t * status)3213d5b3b0aSRuslan Bukin xdma_callback(xdma_channel_t *xchan, xdma_transfer_status_t *status)
32285debf7fSRuslan Bukin {
3233d5b3b0aSRuslan Bukin struct xdma_intr_handler *ih_tmp;
3243d5b3b0aSRuslan Bukin struct xdma_intr_handler *ih;
325d987842dSRuslan Bukin struct epoch_tracker et;
32685debf7fSRuslan Bukin
327*695d3e52SJohn Baldwin KASSERT(xchan->xdma != NULL, ("xdma is NULL"));
3283d5b3b0aSRuslan Bukin
329d987842dSRuslan Bukin TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
330d987842dSRuslan Bukin if (ih->cb != NULL) {
331d987842dSRuslan Bukin if (ih->flags & XDMA_INTR_NET)
332d987842dSRuslan Bukin NET_EPOCH_ENTER(et);
3333d5b3b0aSRuslan Bukin ih->cb(ih->cb_user, status);
334d987842dSRuslan Bukin if (ih->flags & XDMA_INTR_NET)
335d987842dSRuslan Bukin NET_EPOCH_EXIT(et);
336d987842dSRuslan Bukin }
337d987842dSRuslan Bukin }
3383d5b3b0aSRuslan Bukin
3393d5b3b0aSRuslan Bukin if (xchan->flags & XCHAN_TYPE_SG)
3403d5b3b0aSRuslan Bukin xdma_queue_submit(xchan);
34185debf7fSRuslan Bukin }
34285debf7fSRuslan Bukin
34385debf7fSRuslan Bukin #ifdef FDT
34485debf7fSRuslan Bukin /*
34585debf7fSRuslan Bukin * Notify the DMA driver we have machine-dependent data in FDT.
34685debf7fSRuslan Bukin */
34785debf7fSRuslan Bukin static int
xdma_ofw_md_data(xdma_controller_t * xdma,pcell_t * cells,int ncells)34885debf7fSRuslan Bukin xdma_ofw_md_data(xdma_controller_t *xdma, pcell_t *cells, int ncells)
34985debf7fSRuslan Bukin {
35085debf7fSRuslan Bukin uint32_t ret;
35185debf7fSRuslan Bukin
3523d5b3b0aSRuslan Bukin ret = XDMA_OFW_MD_DATA(xdma->dma_dev,
3533d5b3b0aSRuslan Bukin cells, ncells, (void **)&xdma->data);
35485debf7fSRuslan Bukin
35585debf7fSRuslan Bukin return (ret);
35685debf7fSRuslan Bukin }
35785debf7fSRuslan Bukin
358951e0584SRuslan Bukin int
xdma_handle_mem_node(vmem_t * vmem,phandle_t memory)359101869a8SRuslan Bukin xdma_handle_mem_node(vmem_t *vmem, phandle_t memory)
360101869a8SRuslan Bukin {
361101869a8SRuslan Bukin pcell_t reg[FDT_REG_CELLS * FDT_MEM_REGIONS];
362101869a8SRuslan Bukin pcell_t *regp;
363101869a8SRuslan Bukin int addr_cells, size_cells;
364101869a8SRuslan Bukin int i, reg_len, ret, tuple_size, tuples;
365f4ab98c5SConrad Meyer u_long mem_start, mem_size;
366101869a8SRuslan Bukin
367101869a8SRuslan Bukin if ((ret = fdt_addrsize_cells(OF_parent(memory), &addr_cells,
368101869a8SRuslan Bukin &size_cells)) != 0)
369101869a8SRuslan Bukin return (ret);
370101869a8SRuslan Bukin
371101869a8SRuslan Bukin if (addr_cells > 2)
372101869a8SRuslan Bukin return (ERANGE);
373101869a8SRuslan Bukin
374101869a8SRuslan Bukin tuple_size = sizeof(pcell_t) * (addr_cells + size_cells);
375101869a8SRuslan Bukin reg_len = OF_getproplen(memory, "reg");
376101869a8SRuslan Bukin if (reg_len <= 0 || reg_len > sizeof(reg))
377101869a8SRuslan Bukin return (ERANGE);
378101869a8SRuslan Bukin
379101869a8SRuslan Bukin if (OF_getprop(memory, "reg", reg, reg_len) <= 0)
380101869a8SRuslan Bukin return (ENXIO);
381101869a8SRuslan Bukin
382101869a8SRuslan Bukin tuples = reg_len / tuple_size;
383101869a8SRuslan Bukin regp = (pcell_t *)®
384101869a8SRuslan Bukin for (i = 0; i < tuples; i++) {
385101869a8SRuslan Bukin ret = fdt_data_to_res(regp, addr_cells, size_cells,
386101869a8SRuslan Bukin &mem_start, &mem_size);
387101869a8SRuslan Bukin if (ret != 0)
388101869a8SRuslan Bukin return (ret);
389101869a8SRuslan Bukin
390101869a8SRuslan Bukin vmem_add(vmem, mem_start, mem_size, 0);
391101869a8SRuslan Bukin regp += addr_cells + size_cells;
392101869a8SRuslan Bukin }
393101869a8SRuslan Bukin
394101869a8SRuslan Bukin return (0);
395101869a8SRuslan Bukin }
396101869a8SRuslan Bukin
397101869a8SRuslan Bukin vmem_t *
xdma_get_memory(device_t dev)398101869a8SRuslan Bukin xdma_get_memory(device_t dev)
399101869a8SRuslan Bukin {
400101869a8SRuslan Bukin phandle_t mem_node, node;
401101869a8SRuslan Bukin pcell_t mem_handle;
402101869a8SRuslan Bukin vmem_t *vmem;
403101869a8SRuslan Bukin
404101869a8SRuslan Bukin node = ofw_bus_get_node(dev);
405101869a8SRuslan Bukin if (node <= 0) {
406101869a8SRuslan Bukin device_printf(dev,
407101869a8SRuslan Bukin "%s called on not ofw based device.\n", __func__);
408101869a8SRuslan Bukin return (NULL);
409101869a8SRuslan Bukin }
410101869a8SRuslan Bukin
411101869a8SRuslan Bukin if (!OF_hasprop(node, "memory-region"))
412101869a8SRuslan Bukin return (NULL);
413101869a8SRuslan Bukin
414101869a8SRuslan Bukin if (OF_getencprop(node, "memory-region", (void *)&mem_handle,
415101869a8SRuslan Bukin sizeof(mem_handle)) <= 0)
416101869a8SRuslan Bukin return (NULL);
417101869a8SRuslan Bukin
418101869a8SRuslan Bukin vmem = vmem_create("xDMA vmem", 0, 0, PAGE_SIZE,
419101869a8SRuslan Bukin PAGE_SIZE, M_BESTFIT | M_WAITOK);
420101869a8SRuslan Bukin if (vmem == NULL)
421101869a8SRuslan Bukin return (NULL);
422101869a8SRuslan Bukin
423101869a8SRuslan Bukin mem_node = OF_node_from_xref(mem_handle);
424101869a8SRuslan Bukin if (xdma_handle_mem_node(vmem, mem_node) != 0) {
425101869a8SRuslan Bukin vmem_destroy(vmem);
426101869a8SRuslan Bukin return (NULL);
427101869a8SRuslan Bukin }
428101869a8SRuslan Bukin
429101869a8SRuslan Bukin return (vmem);
430101869a8SRuslan Bukin }
431101869a8SRuslan Bukin
432101869a8SRuslan Bukin void
xdma_put_memory(vmem_t * vmem)433101869a8SRuslan Bukin xdma_put_memory(vmem_t *vmem)
434101869a8SRuslan Bukin {
435101869a8SRuslan Bukin
436101869a8SRuslan Bukin vmem_destroy(vmem);
437101869a8SRuslan Bukin }
438101869a8SRuslan Bukin
439101869a8SRuslan Bukin void
xchan_set_memory(xdma_channel_t * xchan,vmem_t * vmem)440101869a8SRuslan Bukin xchan_set_memory(xdma_channel_t *xchan, vmem_t *vmem)
441101869a8SRuslan Bukin {
442101869a8SRuslan Bukin
443101869a8SRuslan Bukin xchan->vmem = vmem;
444101869a8SRuslan Bukin }
445101869a8SRuslan Bukin
44685debf7fSRuslan Bukin /*
44785debf7fSRuslan Bukin * Allocate xdma controller.
44885debf7fSRuslan Bukin */
44985debf7fSRuslan Bukin xdma_controller_t *
xdma_ofw_get(device_t dev,const char * prop)45085debf7fSRuslan Bukin xdma_ofw_get(device_t dev, const char *prop)
45185debf7fSRuslan Bukin {
45285debf7fSRuslan Bukin phandle_t node, parent;
45385debf7fSRuslan Bukin xdma_controller_t *xdma;
45485debf7fSRuslan Bukin device_t dma_dev;
45585debf7fSRuslan Bukin pcell_t *cells;
45685debf7fSRuslan Bukin int ncells;
45785debf7fSRuslan Bukin int error;
45885debf7fSRuslan Bukin int ndmas;
45985debf7fSRuslan Bukin int idx;
46085debf7fSRuslan Bukin
46185debf7fSRuslan Bukin node = ofw_bus_get_node(dev);
4623d5b3b0aSRuslan Bukin if (node <= 0)
46385debf7fSRuslan Bukin device_printf(dev,
46485debf7fSRuslan Bukin "%s called on not ofw based device.\n", __func__);
46585debf7fSRuslan Bukin
46685debf7fSRuslan Bukin error = ofw_bus_parse_xref_list_get_length(node,
46785debf7fSRuslan Bukin "dmas", "#dma-cells", &ndmas);
46885debf7fSRuslan Bukin if (error) {
46985debf7fSRuslan Bukin device_printf(dev,
47085debf7fSRuslan Bukin "%s can't get dmas list.\n", __func__);
47185debf7fSRuslan Bukin return (NULL);
47285debf7fSRuslan Bukin }
47385debf7fSRuslan Bukin
47485debf7fSRuslan Bukin if (ndmas == 0) {
47585debf7fSRuslan Bukin device_printf(dev,
47685debf7fSRuslan Bukin "%s dmas list is empty.\n", __func__);
47785debf7fSRuslan Bukin return (NULL);
47885debf7fSRuslan Bukin }
47985debf7fSRuslan Bukin
48085debf7fSRuslan Bukin error = ofw_bus_find_string_index(node, "dma-names", prop, &idx);
48185debf7fSRuslan Bukin if (error != 0) {
48285debf7fSRuslan Bukin device_printf(dev,
48385debf7fSRuslan Bukin "%s can't find string index.\n", __func__);
48485debf7fSRuslan Bukin return (NULL);
48585debf7fSRuslan Bukin }
48685debf7fSRuslan Bukin
48785debf7fSRuslan Bukin error = ofw_bus_parse_xref_list_alloc(node, "dmas", "#dma-cells",
48885debf7fSRuslan Bukin idx, &parent, &ncells, &cells);
48985debf7fSRuslan Bukin if (error != 0) {
49085debf7fSRuslan Bukin device_printf(dev,
49185debf7fSRuslan Bukin "%s can't get dma device xref.\n", __func__);
49285debf7fSRuslan Bukin return (NULL);
49385debf7fSRuslan Bukin }
49485debf7fSRuslan Bukin
49585debf7fSRuslan Bukin dma_dev = OF_device_from_xref(parent);
49685debf7fSRuslan Bukin if (dma_dev == NULL) {
49785debf7fSRuslan Bukin device_printf(dev,
49885debf7fSRuslan Bukin "%s can't get dma device.\n", __func__);
49985debf7fSRuslan Bukin return (NULL);
50085debf7fSRuslan Bukin }
50185debf7fSRuslan Bukin
5023d5b3b0aSRuslan Bukin xdma = malloc(sizeof(struct xdma_controller),
5033d5b3b0aSRuslan Bukin M_XDMA, M_WAITOK | M_ZERO);
50485debf7fSRuslan Bukin xdma->dev = dev;
50585debf7fSRuslan Bukin xdma->dma_dev = dma_dev;
50685debf7fSRuslan Bukin
50785debf7fSRuslan Bukin TAILQ_INIT(&xdma->channels);
50885debf7fSRuslan Bukin
50985debf7fSRuslan Bukin xdma_ofw_md_data(xdma, cells, ncells);
51085debf7fSRuslan Bukin free(cells, M_OFWPROP);
51185debf7fSRuslan Bukin
51285debf7fSRuslan Bukin return (xdma);
51385debf7fSRuslan Bukin }
51485debf7fSRuslan Bukin #endif
51585debf7fSRuslan Bukin
51685debf7fSRuslan Bukin /*
517a8692c16SRuslan Bukin * Allocate xdma controller.
518a8692c16SRuslan Bukin */
519a8692c16SRuslan Bukin xdma_controller_t *
xdma_get(device_t dev,device_t dma_dev)520a8692c16SRuslan Bukin xdma_get(device_t dev, device_t dma_dev)
521a8692c16SRuslan Bukin {
522a8692c16SRuslan Bukin xdma_controller_t *xdma;
523a8692c16SRuslan Bukin
524a8692c16SRuslan Bukin xdma = malloc(sizeof(struct xdma_controller),
525a8692c16SRuslan Bukin M_XDMA, M_WAITOK | M_ZERO);
526a8692c16SRuslan Bukin xdma->dev = dev;
527a8692c16SRuslan Bukin xdma->dma_dev = dma_dev;
528a8692c16SRuslan Bukin
529a8692c16SRuslan Bukin TAILQ_INIT(&xdma->channels);
530a8692c16SRuslan Bukin
531a8692c16SRuslan Bukin return (xdma);
532a8692c16SRuslan Bukin }
533a8692c16SRuslan Bukin
534a8692c16SRuslan Bukin /*
53585debf7fSRuslan Bukin * Free xDMA controller object.
53685debf7fSRuslan Bukin */
53785debf7fSRuslan Bukin int
xdma_put(xdma_controller_t * xdma)53885debf7fSRuslan Bukin xdma_put(xdma_controller_t *xdma)
53985debf7fSRuslan Bukin {
54085debf7fSRuslan Bukin
54185debf7fSRuslan Bukin XDMA_LOCK();
54285debf7fSRuslan Bukin
54385debf7fSRuslan Bukin /* Ensure no channels allocated. */
54485debf7fSRuslan Bukin if (!TAILQ_EMPTY(&xdma->channels)) {
54585debf7fSRuslan Bukin device_printf(xdma->dev, "%s: Can't free xDMA\n", __func__);
54685debf7fSRuslan Bukin return (-1);
54785debf7fSRuslan Bukin }
54885debf7fSRuslan Bukin
54985debf7fSRuslan Bukin free(xdma->data, M_DEVBUF);
55085debf7fSRuslan Bukin free(xdma, M_XDMA);
55185debf7fSRuslan Bukin
55285debf7fSRuslan Bukin XDMA_UNLOCK();
55385debf7fSRuslan Bukin
55485debf7fSRuslan Bukin return (0);
55585debf7fSRuslan Bukin }
55685debf7fSRuslan Bukin
55785debf7fSRuslan Bukin static void
xdma_init(void)55885debf7fSRuslan Bukin xdma_init(void)
55985debf7fSRuslan Bukin {
56085debf7fSRuslan Bukin
561101869a8SRuslan Bukin mtx_init(&xdma_mtx, "xDMA", NULL, MTX_DEF);
56285debf7fSRuslan Bukin }
56385debf7fSRuslan Bukin
56485debf7fSRuslan Bukin SYSINIT(xdma, SI_SUB_DRIVERS, SI_ORDER_FIRST, xdma_init, NULL);
565