xref: /freebsd/sys/dev/xdma/xdma_sg.c (revision 101869a8f0e8e2546a8d0ffeaa93430a3a851216)
13d5b3b0aSRuslan Bukin /*-
2*101869a8SRuslan Bukin  * SPDX-License-Identifier: BSD-2-Clause
3*101869a8SRuslan Bukin  *
4*101869a8SRuslan Bukin  * Copyright (c) 2018-2019 Ruslan Bukin <br@bsdpad.com>
53d5b3b0aSRuslan Bukin  *
63d5b3b0aSRuslan Bukin  * This software was developed by SRI International and the University of
73d5b3b0aSRuslan Bukin  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
83d5b3b0aSRuslan Bukin  * ("CTSRD"), as part of the DARPA CRASH research programme.
93d5b3b0aSRuslan Bukin  *
103d5b3b0aSRuslan Bukin  * Redistribution and use in source and binary forms, with or without
113d5b3b0aSRuslan Bukin  * modification, are permitted provided that the following conditions
123d5b3b0aSRuslan Bukin  * are met:
133d5b3b0aSRuslan Bukin  * 1. Redistributions of source code must retain the above copyright
143d5b3b0aSRuslan Bukin  *    notice, this list of conditions and the following disclaimer.
153d5b3b0aSRuslan Bukin  * 2. Redistributions in binary form must reproduce the above copyright
163d5b3b0aSRuslan Bukin  *    notice, this list of conditions and the following disclaimer in the
173d5b3b0aSRuslan Bukin  *    documentation and/or other materials provided with the distribution.
183d5b3b0aSRuslan Bukin  *
193d5b3b0aSRuslan Bukin  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
203d5b3b0aSRuslan Bukin  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
213d5b3b0aSRuslan Bukin  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
223d5b3b0aSRuslan Bukin  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
233d5b3b0aSRuslan Bukin  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
243d5b3b0aSRuslan Bukin  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
253d5b3b0aSRuslan Bukin  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
263d5b3b0aSRuslan Bukin  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
273d5b3b0aSRuslan Bukin  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
283d5b3b0aSRuslan Bukin  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
293d5b3b0aSRuslan Bukin  * SUCH DAMAGE.
303d5b3b0aSRuslan Bukin  */
313d5b3b0aSRuslan Bukin 
323d5b3b0aSRuslan Bukin #include <sys/cdefs.h>
333d5b3b0aSRuslan Bukin __FBSDID("$FreeBSD$");
343d5b3b0aSRuslan Bukin 
353d5b3b0aSRuslan Bukin #include "opt_platform.h"
363d5b3b0aSRuslan Bukin #include <sys/param.h>
373d5b3b0aSRuslan Bukin #include <sys/conf.h>
383d5b3b0aSRuslan Bukin #include <sys/bus.h>
393d5b3b0aSRuslan Bukin #include <sys/kernel.h>
403d5b3b0aSRuslan Bukin #include <sys/malloc.h>
413d5b3b0aSRuslan Bukin #include <sys/mbuf.h>
42*101869a8SRuslan Bukin #include <sys/rwlock.h>
433d5b3b0aSRuslan Bukin 
443d5b3b0aSRuslan Bukin #include <machine/bus.h>
453d5b3b0aSRuslan Bukin 
46*101869a8SRuslan Bukin #include <vm/vm.h>
47*101869a8SRuslan Bukin #include <vm/vm_extern.h>
48*101869a8SRuslan Bukin #include <vm/vm_page.h>
49*101869a8SRuslan Bukin 
503d5b3b0aSRuslan Bukin #ifdef FDT
513d5b3b0aSRuslan Bukin #include <dev/fdt/fdt_common.h>
523d5b3b0aSRuslan Bukin #include <dev/ofw/ofw_bus.h>
533d5b3b0aSRuslan Bukin #include <dev/ofw/ofw_bus_subr.h>
543d5b3b0aSRuslan Bukin #endif
553d5b3b0aSRuslan Bukin 
563d5b3b0aSRuslan Bukin #include <dev/xdma/xdma.h>
573d5b3b0aSRuslan Bukin 
583d5b3b0aSRuslan Bukin #include <xdma_if.h>
593d5b3b0aSRuslan Bukin 
603d5b3b0aSRuslan Bukin struct seg_load_request {
613d5b3b0aSRuslan Bukin 	struct bus_dma_segment *seg;
623d5b3b0aSRuslan Bukin 	uint32_t nsegs;
633d5b3b0aSRuslan Bukin 	uint32_t error;
643d5b3b0aSRuslan Bukin };
653d5b3b0aSRuslan Bukin 
66*101869a8SRuslan Bukin static void
67*101869a8SRuslan Bukin xchan_bufs_free_reserved(xdma_channel_t *xchan)
68*101869a8SRuslan Bukin {
69*101869a8SRuslan Bukin 	struct xdma_request *xr;
70*101869a8SRuslan Bukin 	vm_size_t size;
71*101869a8SRuslan Bukin 	int i;
72*101869a8SRuslan Bukin 
73*101869a8SRuslan Bukin 	for (i = 0; i < xchan->xr_num; i++) {
74*101869a8SRuslan Bukin 		xr = &xchan->xr_mem[i];
75*101869a8SRuslan Bukin 		size = xr->buf.size;
76*101869a8SRuslan Bukin 		if (xr->buf.vaddr) {
77*101869a8SRuslan Bukin 			pmap_kremove_device(xr->buf.vaddr, size);
78*101869a8SRuslan Bukin 			kva_free(xr->buf.vaddr, size);
79*101869a8SRuslan Bukin 			xr->buf.vaddr = 0;
80*101869a8SRuslan Bukin 		}
81*101869a8SRuslan Bukin 		if (xr->buf.paddr) {
82*101869a8SRuslan Bukin 			vmem_free(xchan->vmem, xr->buf.paddr, size);
83*101869a8SRuslan Bukin 			xr->buf.paddr = 0;
84*101869a8SRuslan Bukin 		}
85*101869a8SRuslan Bukin 		xr->buf.size = 0;
86*101869a8SRuslan Bukin 	}
87*101869a8SRuslan Bukin }
88*101869a8SRuslan Bukin 
893d5b3b0aSRuslan Bukin static int
90*101869a8SRuslan Bukin xchan_bufs_alloc_reserved(xdma_channel_t *xchan)
913d5b3b0aSRuslan Bukin {
923d5b3b0aSRuslan Bukin 	xdma_controller_t *xdma;
933d5b3b0aSRuslan Bukin 	struct xdma_request *xr;
94*101869a8SRuslan Bukin 	vmem_addr_t addr;
95*101869a8SRuslan Bukin 	vm_size_t size;
963d5b3b0aSRuslan Bukin 	int i;
973d5b3b0aSRuslan Bukin 
983d5b3b0aSRuslan Bukin 	xdma = xchan->xdma;
993d5b3b0aSRuslan Bukin 
100*101869a8SRuslan Bukin 	if (xchan->vmem == NULL)
101*101869a8SRuslan Bukin 		return (ENOBUFS);
102*101869a8SRuslan Bukin 
1033d5b3b0aSRuslan Bukin 	for (i = 0; i < xchan->xr_num; i++) {
1043d5b3b0aSRuslan Bukin 		xr = &xchan->xr_mem[i];
105*101869a8SRuslan Bukin 		size = round_page(xchan->maxsegsize);
106*101869a8SRuslan Bukin 		if (vmem_alloc(xchan->vmem, size,
107*101869a8SRuslan Bukin 		    M_BESTFIT | M_NOWAIT, &addr)) {
108*101869a8SRuslan Bukin 			device_printf(xdma->dev,
109*101869a8SRuslan Bukin 			    "%s: Can't allocate memory\n", __func__);
110*101869a8SRuslan Bukin 			xchan_bufs_free_reserved(xchan);
111*101869a8SRuslan Bukin 			return (ENOMEM);
112*101869a8SRuslan Bukin 		}
113*101869a8SRuslan Bukin 
114*101869a8SRuslan Bukin 		xr->buf.size = size;
115*101869a8SRuslan Bukin 		xr->buf.paddr = addr;
116*101869a8SRuslan Bukin 		xr->buf.vaddr = kva_alloc(size);
117*101869a8SRuslan Bukin 		if (xr->buf.vaddr == 0) {
118*101869a8SRuslan Bukin 			device_printf(xdma->dev,
119*101869a8SRuslan Bukin 			    "%s: Can't allocate KVA\n", __func__);
120*101869a8SRuslan Bukin 			xchan_bufs_free_reserved(xchan);
121*101869a8SRuslan Bukin 			return (ENOMEM);
122*101869a8SRuslan Bukin 		}
123*101869a8SRuslan Bukin 		pmap_kenter_device(xr->buf.vaddr, size, addr);
1243d5b3b0aSRuslan Bukin 	}
1253d5b3b0aSRuslan Bukin 
1263d5b3b0aSRuslan Bukin 	return (0);
1273d5b3b0aSRuslan Bukin }
1283d5b3b0aSRuslan Bukin 
1293d5b3b0aSRuslan Bukin static int
130*101869a8SRuslan Bukin xchan_bufs_alloc_busdma(xdma_channel_t *xchan)
1313d5b3b0aSRuslan Bukin {
1323d5b3b0aSRuslan Bukin 	xdma_controller_t *xdma;
1333d5b3b0aSRuslan Bukin 	struct xdma_request *xr;
1343d5b3b0aSRuslan Bukin 	int err;
1353d5b3b0aSRuslan Bukin 	int i;
1363d5b3b0aSRuslan Bukin 
1373d5b3b0aSRuslan Bukin 	xdma = xchan->xdma;
1383d5b3b0aSRuslan Bukin 
1393d5b3b0aSRuslan Bukin 	/* Create bus_dma tag */
1403d5b3b0aSRuslan Bukin 	err = bus_dma_tag_create(
1413d5b3b0aSRuslan Bukin 	    bus_get_dma_tag(xdma->dev),	/* Parent tag. */
1423d5b3b0aSRuslan Bukin 	    xchan->alignment,		/* alignment */
1433d5b3b0aSRuslan Bukin 	    xchan->boundary,		/* boundary */
1443d5b3b0aSRuslan Bukin 	    xchan->lowaddr,		/* lowaddr */
1453d5b3b0aSRuslan Bukin 	    xchan->highaddr,		/* highaddr */
1463d5b3b0aSRuslan Bukin 	    NULL, NULL,			/* filter, filterarg */
1473d5b3b0aSRuslan Bukin 	    xchan->maxsegsize * xchan->maxnsegs, /* maxsize */
1483d5b3b0aSRuslan Bukin 	    xchan->maxnsegs,		/* nsegments */
1493d5b3b0aSRuslan Bukin 	    xchan->maxsegsize,		/* maxsegsize */
1503d5b3b0aSRuslan Bukin 	    0,				/* flags */
1513d5b3b0aSRuslan Bukin 	    NULL, NULL,			/* lockfunc, lockarg */
1523d5b3b0aSRuslan Bukin 	    &xchan->dma_tag_bufs);
1533d5b3b0aSRuslan Bukin 	if (err != 0) {
1543d5b3b0aSRuslan Bukin 		device_printf(xdma->dev,
1553d5b3b0aSRuslan Bukin 		    "%s: Can't create bus_dma tag.\n", __func__);
1563d5b3b0aSRuslan Bukin 		return (-1);
1573d5b3b0aSRuslan Bukin 	}
1583d5b3b0aSRuslan Bukin 
1593d5b3b0aSRuslan Bukin 	for (i = 0; i < xchan->xr_num; i++) {
1603d5b3b0aSRuslan Bukin 		xr = &xchan->xr_mem[i];
1613d5b3b0aSRuslan Bukin 		err = bus_dmamap_create(xchan->dma_tag_bufs, 0,
1623d5b3b0aSRuslan Bukin 		    &xr->buf.map);
1633d5b3b0aSRuslan Bukin 		if (err != 0) {
1643d5b3b0aSRuslan Bukin 			device_printf(xdma->dev,
1653d5b3b0aSRuslan Bukin 			    "%s: Can't create buf DMA map.\n", __func__);
1663d5b3b0aSRuslan Bukin 
1673d5b3b0aSRuslan Bukin 			/* Cleanup. */
1683d5b3b0aSRuslan Bukin 			bus_dma_tag_destroy(xchan->dma_tag_bufs);
1693d5b3b0aSRuslan Bukin 
1703d5b3b0aSRuslan Bukin 			return (-1);
1713d5b3b0aSRuslan Bukin 		}
1723d5b3b0aSRuslan Bukin 	}
1733d5b3b0aSRuslan Bukin 
1743d5b3b0aSRuslan Bukin 	return (0);
1753d5b3b0aSRuslan Bukin }
1763d5b3b0aSRuslan Bukin 
1773d5b3b0aSRuslan Bukin static int
1783d5b3b0aSRuslan Bukin xchan_bufs_alloc(xdma_channel_t *xchan)
1793d5b3b0aSRuslan Bukin {
1803d5b3b0aSRuslan Bukin 	xdma_controller_t *xdma;
1813d5b3b0aSRuslan Bukin 	int ret;
1823d5b3b0aSRuslan Bukin 
1833d5b3b0aSRuslan Bukin 	xdma = xchan->xdma;
1843d5b3b0aSRuslan Bukin 
1853d5b3b0aSRuslan Bukin 	if (xdma == NULL) {
1863d5b3b0aSRuslan Bukin 		device_printf(xdma->dev,
1873d5b3b0aSRuslan Bukin 		    "%s: Channel was not allocated properly.\n", __func__);
1883d5b3b0aSRuslan Bukin 		return (-1);
1893d5b3b0aSRuslan Bukin 	}
1903d5b3b0aSRuslan Bukin 
1913d5b3b0aSRuslan Bukin 	if (xchan->caps & XCHAN_CAP_BUSDMA)
192*101869a8SRuslan Bukin 		ret = xchan_bufs_alloc_busdma(xchan);
193*101869a8SRuslan Bukin 	else {
194*101869a8SRuslan Bukin 		ret = xchan_bufs_alloc_reserved(xchan);
195*101869a8SRuslan Bukin 	}
1963d5b3b0aSRuslan Bukin 	if (ret != 0) {
1973d5b3b0aSRuslan Bukin 		device_printf(xdma->dev,
1983d5b3b0aSRuslan Bukin 		    "%s: Can't allocate bufs.\n", __func__);
1993d5b3b0aSRuslan Bukin 		return (-1);
2003d5b3b0aSRuslan Bukin 	}
2013d5b3b0aSRuslan Bukin 
2023d5b3b0aSRuslan Bukin 	xchan->flags |= XCHAN_BUFS_ALLOCATED;
2033d5b3b0aSRuslan Bukin 
2043d5b3b0aSRuslan Bukin 	return (0);
2053d5b3b0aSRuslan Bukin }
2063d5b3b0aSRuslan Bukin 
2073d5b3b0aSRuslan Bukin static int
2083d5b3b0aSRuslan Bukin xchan_bufs_free(xdma_channel_t *xchan)
2093d5b3b0aSRuslan Bukin {
2103d5b3b0aSRuslan Bukin 	struct xdma_request *xr;
2113d5b3b0aSRuslan Bukin 	struct xchan_buf *b;
2123d5b3b0aSRuslan Bukin 	int i;
2133d5b3b0aSRuslan Bukin 
2143d5b3b0aSRuslan Bukin 	if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0)
2153d5b3b0aSRuslan Bukin 		return (-1);
2163d5b3b0aSRuslan Bukin 
2173d5b3b0aSRuslan Bukin 	if (xchan->caps & XCHAN_CAP_BUSDMA) {
2183d5b3b0aSRuslan Bukin 		for (i = 0; i < xchan->xr_num; i++) {
2193d5b3b0aSRuslan Bukin 			xr = &xchan->xr_mem[i];
2203d5b3b0aSRuslan Bukin 			b = &xr->buf;
2213d5b3b0aSRuslan Bukin 			bus_dmamap_destroy(xchan->dma_tag_bufs, b->map);
2223d5b3b0aSRuslan Bukin 		}
2233d5b3b0aSRuslan Bukin 		bus_dma_tag_destroy(xchan->dma_tag_bufs);
224*101869a8SRuslan Bukin 	} else
225*101869a8SRuslan Bukin 		xchan_bufs_free_reserved(xchan);
2263d5b3b0aSRuslan Bukin 
2273d5b3b0aSRuslan Bukin 	xchan->flags &= ~XCHAN_BUFS_ALLOCATED;
2283d5b3b0aSRuslan Bukin 
2293d5b3b0aSRuslan Bukin 	return (0);
2303d5b3b0aSRuslan Bukin }
2313d5b3b0aSRuslan Bukin 
2323d5b3b0aSRuslan Bukin void
2333d5b3b0aSRuslan Bukin xdma_channel_free_sg(xdma_channel_t *xchan)
2343d5b3b0aSRuslan Bukin {
2353d5b3b0aSRuslan Bukin 
2363d5b3b0aSRuslan Bukin 	xchan_bufs_free(xchan);
2373d5b3b0aSRuslan Bukin 	xchan_sglist_free(xchan);
2383d5b3b0aSRuslan Bukin 	xchan_bank_free(xchan);
2393d5b3b0aSRuslan Bukin }
2403d5b3b0aSRuslan Bukin 
2413d5b3b0aSRuslan Bukin /*
2423d5b3b0aSRuslan Bukin  * Prepare xchan for a scatter-gather transfer.
2433d5b3b0aSRuslan Bukin  * xr_num - xdma requests queue size,
2443d5b3b0aSRuslan Bukin  * maxsegsize - maximum allowed scatter-gather list element size in bytes
2453d5b3b0aSRuslan Bukin  */
2463d5b3b0aSRuslan Bukin int
2473d5b3b0aSRuslan Bukin xdma_prep_sg(xdma_channel_t *xchan, uint32_t xr_num,
2483d5b3b0aSRuslan Bukin     bus_size_t maxsegsize, bus_size_t maxnsegs,
2493d5b3b0aSRuslan Bukin     bus_size_t alignment, bus_addr_t boundary,
2503d5b3b0aSRuslan Bukin     bus_addr_t lowaddr, bus_addr_t highaddr)
2513d5b3b0aSRuslan Bukin {
2523d5b3b0aSRuslan Bukin 	xdma_controller_t *xdma;
2533d5b3b0aSRuslan Bukin 	int ret;
2543d5b3b0aSRuslan Bukin 
2553d5b3b0aSRuslan Bukin 	xdma = xchan->xdma;
2563d5b3b0aSRuslan Bukin 
2573d5b3b0aSRuslan Bukin 	KASSERT(xdma != NULL, ("xdma is NULL"));
2583d5b3b0aSRuslan Bukin 
2593d5b3b0aSRuslan Bukin 	if (xchan->flags & XCHAN_CONFIGURED) {
2603d5b3b0aSRuslan Bukin 		device_printf(xdma->dev,
2613d5b3b0aSRuslan Bukin 		    "%s: Channel is already configured.\n", __func__);
2623d5b3b0aSRuslan Bukin 		return (-1);
2633d5b3b0aSRuslan Bukin 	}
2643d5b3b0aSRuslan Bukin 
2653d5b3b0aSRuslan Bukin 	xchan->xr_num = xr_num;
2663d5b3b0aSRuslan Bukin 	xchan->maxsegsize = maxsegsize;
2673d5b3b0aSRuslan Bukin 	xchan->maxnsegs = maxnsegs;
2683d5b3b0aSRuslan Bukin 	xchan->alignment = alignment;
2693d5b3b0aSRuslan Bukin 	xchan->boundary = boundary;
2703d5b3b0aSRuslan Bukin 	xchan->lowaddr = lowaddr;
2713d5b3b0aSRuslan Bukin 	xchan->highaddr = highaddr;
2723d5b3b0aSRuslan Bukin 
2733d5b3b0aSRuslan Bukin 	if (xchan->maxnsegs > XDMA_MAX_SEG) {
2743d5b3b0aSRuslan Bukin 		device_printf(xdma->dev, "%s: maxnsegs is too big\n",
2753d5b3b0aSRuslan Bukin 		    __func__);
2763d5b3b0aSRuslan Bukin 		return (-1);
2773d5b3b0aSRuslan Bukin 	}
2783d5b3b0aSRuslan Bukin 
2793d5b3b0aSRuslan Bukin 	xchan_bank_init(xchan);
2803d5b3b0aSRuslan Bukin 
2813d5b3b0aSRuslan Bukin 	/* Allocate sglist. */
2823d5b3b0aSRuslan Bukin 	ret = xchan_sglist_alloc(xchan);
2833d5b3b0aSRuslan Bukin 	if (ret != 0) {
2843d5b3b0aSRuslan Bukin 		device_printf(xdma->dev,
2853d5b3b0aSRuslan Bukin 		    "%s: Can't allocate sglist.\n", __func__);
2863d5b3b0aSRuslan Bukin 		return (-1);
2873d5b3b0aSRuslan Bukin 	}
2883d5b3b0aSRuslan Bukin 
2895a51e5e4SRuslan Bukin 	/* Allocate buffers if required. */
2905a51e5e4SRuslan Bukin 	if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0) {
2913d5b3b0aSRuslan Bukin 		ret = xchan_bufs_alloc(xchan);
2923d5b3b0aSRuslan Bukin 		if (ret != 0) {
2933d5b3b0aSRuslan Bukin 			device_printf(xdma->dev,
2943d5b3b0aSRuslan Bukin 			    "%s: Can't allocate bufs.\n", __func__);
2953d5b3b0aSRuslan Bukin 
2963d5b3b0aSRuslan Bukin 			/* Cleanup */
2973d5b3b0aSRuslan Bukin 			xchan_sglist_free(xchan);
2983d5b3b0aSRuslan Bukin 			xchan_bank_free(xchan);
2993d5b3b0aSRuslan Bukin 
3003d5b3b0aSRuslan Bukin 			return (-1);
3013d5b3b0aSRuslan Bukin 		}
3025a51e5e4SRuslan Bukin 	}
3033d5b3b0aSRuslan Bukin 
3043d5b3b0aSRuslan Bukin 	xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_SG);
3053d5b3b0aSRuslan Bukin 
3063d5b3b0aSRuslan Bukin 	XCHAN_LOCK(xchan);
3073d5b3b0aSRuslan Bukin 	ret = XDMA_CHANNEL_PREP_SG(xdma->dma_dev, xchan);
3083d5b3b0aSRuslan Bukin 	if (ret != 0) {
3093d5b3b0aSRuslan Bukin 		device_printf(xdma->dev,
3103d5b3b0aSRuslan Bukin 		    "%s: Can't prepare SG transfer.\n", __func__);
3113d5b3b0aSRuslan Bukin 		XCHAN_UNLOCK(xchan);
3123d5b3b0aSRuslan Bukin 
3133d5b3b0aSRuslan Bukin 		return (-1);
3143d5b3b0aSRuslan Bukin 	}
3153d5b3b0aSRuslan Bukin 	XCHAN_UNLOCK(xchan);
3163d5b3b0aSRuslan Bukin 
3173d5b3b0aSRuslan Bukin 	return (0);
3183d5b3b0aSRuslan Bukin }
3193d5b3b0aSRuslan Bukin 
3203d5b3b0aSRuslan Bukin void
3213d5b3b0aSRuslan Bukin xchan_seg_done(xdma_channel_t *xchan,
3223d5b3b0aSRuslan Bukin     struct xdma_transfer_status *st)
3233d5b3b0aSRuslan Bukin {
3243d5b3b0aSRuslan Bukin 	struct xdma_request *xr;
3253d5b3b0aSRuslan Bukin 	xdma_controller_t *xdma;
3263d5b3b0aSRuslan Bukin 	struct xchan_buf *b;
3273d5b3b0aSRuslan Bukin 
3283d5b3b0aSRuslan Bukin 	xdma = xchan->xdma;
3293d5b3b0aSRuslan Bukin 
3303d5b3b0aSRuslan Bukin 	xr = TAILQ_FIRST(&xchan->processing);
3313d5b3b0aSRuslan Bukin 	if (xr == NULL)
3323d5b3b0aSRuslan Bukin 		panic("request not found\n");
3333d5b3b0aSRuslan Bukin 
3343d5b3b0aSRuslan Bukin 	b = &xr->buf;
3353d5b3b0aSRuslan Bukin 
3363d5b3b0aSRuslan Bukin 	atomic_subtract_int(&b->nsegs_left, 1);
3373d5b3b0aSRuslan Bukin 
3383d5b3b0aSRuslan Bukin 	if (b->nsegs_left == 0) {
3393d5b3b0aSRuslan Bukin 		if (xchan->caps & XCHAN_CAP_BUSDMA) {
3403d5b3b0aSRuslan Bukin 			if (xr->direction == XDMA_MEM_TO_DEV)
3413d5b3b0aSRuslan Bukin 				bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
3423d5b3b0aSRuslan Bukin 				    BUS_DMASYNC_POSTWRITE);
3433d5b3b0aSRuslan Bukin 			else
3443d5b3b0aSRuslan Bukin 				bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
3453d5b3b0aSRuslan Bukin 				    BUS_DMASYNC_POSTREAD);
3463d5b3b0aSRuslan Bukin 			bus_dmamap_unload(xchan->dma_tag_bufs, b->map);
347*101869a8SRuslan Bukin 		} else {
348*101869a8SRuslan Bukin 			if (xr->req_type == XR_TYPE_MBUF &&
349*101869a8SRuslan Bukin 			    xr->direction == XDMA_DEV_TO_MEM)
350*101869a8SRuslan Bukin 				m_copyback(xr->m, 0, st->transferred,
351*101869a8SRuslan Bukin 				    (void *)xr->buf.vaddr);
3523d5b3b0aSRuslan Bukin 		}
3533d5b3b0aSRuslan Bukin 		xr->status.error = st->error;
3543d5b3b0aSRuslan Bukin 		xr->status.transferred = st->transferred;
3553d5b3b0aSRuslan Bukin 
3563d5b3b0aSRuslan Bukin 		QUEUE_PROC_LOCK(xchan);
3573d5b3b0aSRuslan Bukin 		TAILQ_REMOVE(&xchan->processing, xr, xr_next);
3583d5b3b0aSRuslan Bukin 		QUEUE_PROC_UNLOCK(xchan);
3593d5b3b0aSRuslan Bukin 
3603d5b3b0aSRuslan Bukin 		QUEUE_OUT_LOCK(xchan);
3613d5b3b0aSRuslan Bukin 		TAILQ_INSERT_TAIL(&xchan->queue_out, xr, xr_next);
3623d5b3b0aSRuslan Bukin 		QUEUE_OUT_UNLOCK(xchan);
3633d5b3b0aSRuslan Bukin 	}
3643d5b3b0aSRuslan Bukin }
3653d5b3b0aSRuslan Bukin 
3663d5b3b0aSRuslan Bukin static void
3673d5b3b0aSRuslan Bukin xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3683d5b3b0aSRuslan Bukin {
3693d5b3b0aSRuslan Bukin 	struct seg_load_request *slr;
3703d5b3b0aSRuslan Bukin 	struct bus_dma_segment *seg;
3713d5b3b0aSRuslan Bukin 	int i;
3723d5b3b0aSRuslan Bukin 
3733d5b3b0aSRuslan Bukin 	slr = arg;
3743d5b3b0aSRuslan Bukin 	seg = slr->seg;
3753d5b3b0aSRuslan Bukin 
3763d5b3b0aSRuslan Bukin 	if (error != 0) {
3773d5b3b0aSRuslan Bukin 		slr->error = error;
3783d5b3b0aSRuslan Bukin 		return;
3793d5b3b0aSRuslan Bukin 	}
3803d5b3b0aSRuslan Bukin 
3813d5b3b0aSRuslan Bukin 	slr->nsegs = nsegs;
3823d5b3b0aSRuslan Bukin 
3833d5b3b0aSRuslan Bukin 	for (i = 0; i < nsegs; i++) {
3843d5b3b0aSRuslan Bukin 		seg[i].ds_addr = segs[i].ds_addr;
3853d5b3b0aSRuslan Bukin 		seg[i].ds_len = segs[i].ds_len;
3863d5b3b0aSRuslan Bukin 	}
3873d5b3b0aSRuslan Bukin }
3883d5b3b0aSRuslan Bukin 
3893d5b3b0aSRuslan Bukin static int
3903d5b3b0aSRuslan Bukin _xdma_load_data_busdma(xdma_channel_t *xchan, struct xdma_request *xr,
3913d5b3b0aSRuslan Bukin     struct bus_dma_segment *seg)
3923d5b3b0aSRuslan Bukin {
3933d5b3b0aSRuslan Bukin 	xdma_controller_t *xdma;
3943d5b3b0aSRuslan Bukin 	struct seg_load_request slr;
3953d5b3b0aSRuslan Bukin 	uint32_t nsegs;
3963d5b3b0aSRuslan Bukin 	void *addr;
3973d5b3b0aSRuslan Bukin 	int error;
3983d5b3b0aSRuslan Bukin 
3993d5b3b0aSRuslan Bukin 	xdma = xchan->xdma;
4003d5b3b0aSRuslan Bukin 
4013d5b3b0aSRuslan Bukin 	error = 0;
4023d5b3b0aSRuslan Bukin 	nsegs = 0;
4033d5b3b0aSRuslan Bukin 
4043d5b3b0aSRuslan Bukin 	switch (xr->req_type) {
4053d5b3b0aSRuslan Bukin 	case XR_TYPE_MBUF:
4063d5b3b0aSRuslan Bukin 		error = bus_dmamap_load_mbuf_sg(xchan->dma_tag_bufs,
4073d5b3b0aSRuslan Bukin 		    xr->buf.map, xr->m, seg, &nsegs, BUS_DMA_NOWAIT);
4083d5b3b0aSRuslan Bukin 		break;
4093d5b3b0aSRuslan Bukin 	case XR_TYPE_BIO:
4103d5b3b0aSRuslan Bukin 		slr.nsegs = 0;
4113d5b3b0aSRuslan Bukin 		slr.error = 0;
4123d5b3b0aSRuslan Bukin 		slr.seg = seg;
4133d5b3b0aSRuslan Bukin 		error = bus_dmamap_load_bio(xchan->dma_tag_bufs,
4143d5b3b0aSRuslan Bukin 		    xr->buf.map, xr->bp, xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
4153d5b3b0aSRuslan Bukin 		if (slr.error != 0) {
4163d5b3b0aSRuslan Bukin 			device_printf(xdma->dma_dev,
4173d5b3b0aSRuslan Bukin 			    "%s: bus_dmamap_load failed, err %d\n",
4183d5b3b0aSRuslan Bukin 			    __func__, slr.error);
4193d5b3b0aSRuslan Bukin 			return (0);
4203d5b3b0aSRuslan Bukin 		}
4213d5b3b0aSRuslan Bukin 		nsegs = slr.nsegs;
4223d5b3b0aSRuslan Bukin 		break;
4233d5b3b0aSRuslan Bukin 	case XR_TYPE_VIRT:
4243d5b3b0aSRuslan Bukin 		switch (xr->direction) {
4253d5b3b0aSRuslan Bukin 		case XDMA_MEM_TO_DEV:
4263d5b3b0aSRuslan Bukin 			addr = (void *)xr->src_addr;
4273d5b3b0aSRuslan Bukin 			break;
4283d5b3b0aSRuslan Bukin 		case XDMA_DEV_TO_MEM:
4293d5b3b0aSRuslan Bukin 			addr = (void *)xr->dst_addr;
4303d5b3b0aSRuslan Bukin 			break;
4313d5b3b0aSRuslan Bukin 		default:
4323d5b3b0aSRuslan Bukin 			device_printf(xdma->dma_dev,
4333d5b3b0aSRuslan Bukin 			    "%s: Direction is not supported\n", __func__);
4343d5b3b0aSRuslan Bukin 			return (0);
4353d5b3b0aSRuslan Bukin 		}
4363d5b3b0aSRuslan Bukin 		slr.nsegs = 0;
4373d5b3b0aSRuslan Bukin 		slr.error = 0;
4383d5b3b0aSRuslan Bukin 		slr.seg = seg;
4393d5b3b0aSRuslan Bukin 		error = bus_dmamap_load(xchan->dma_tag_bufs, xr->buf.map,
4403d5b3b0aSRuslan Bukin 		    addr, (xr->block_len * xr->block_num),
4413d5b3b0aSRuslan Bukin 		    xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
4423d5b3b0aSRuslan Bukin 		if (slr.error != 0) {
4433d5b3b0aSRuslan Bukin 			device_printf(xdma->dma_dev,
4443d5b3b0aSRuslan Bukin 			    "%s: bus_dmamap_load failed, err %d\n",
4453d5b3b0aSRuslan Bukin 			    __func__, slr.error);
4463d5b3b0aSRuslan Bukin 			return (0);
4473d5b3b0aSRuslan Bukin 		}
4483d5b3b0aSRuslan Bukin 		nsegs = slr.nsegs;
4493d5b3b0aSRuslan Bukin 		break;
4503d5b3b0aSRuslan Bukin 	default:
4513d5b3b0aSRuslan Bukin 		break;
4523d5b3b0aSRuslan Bukin 	}
4533d5b3b0aSRuslan Bukin 
4543d5b3b0aSRuslan Bukin 	if (error != 0) {
4553d5b3b0aSRuslan Bukin 		if (error == ENOMEM) {
4563d5b3b0aSRuslan Bukin 			/*
4573d5b3b0aSRuslan Bukin 			 * Out of memory. Try again later.
4583d5b3b0aSRuslan Bukin 			 * TODO: count errors.
4593d5b3b0aSRuslan Bukin 			 */
4603d5b3b0aSRuslan Bukin 		} else
4613d5b3b0aSRuslan Bukin 			device_printf(xdma->dma_dev,
4623d5b3b0aSRuslan Bukin 			    "%s: bus_dmamap_load failed with err %d\n",
4633d5b3b0aSRuslan Bukin 			    __func__, error);
4643d5b3b0aSRuslan Bukin 		return (0);
4653d5b3b0aSRuslan Bukin 	}
4663d5b3b0aSRuslan Bukin 
4673d5b3b0aSRuslan Bukin 	if (xr->direction == XDMA_MEM_TO_DEV)
4683d5b3b0aSRuslan Bukin 		bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
4693d5b3b0aSRuslan Bukin 		    BUS_DMASYNC_PREWRITE);
4703d5b3b0aSRuslan Bukin 	else
4713d5b3b0aSRuslan Bukin 		bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
4723d5b3b0aSRuslan Bukin 		    BUS_DMASYNC_PREREAD);
4733d5b3b0aSRuslan Bukin 
4743d5b3b0aSRuslan Bukin 	return (nsegs);
4753d5b3b0aSRuslan Bukin }
4763d5b3b0aSRuslan Bukin 
4773d5b3b0aSRuslan Bukin static int
4783d5b3b0aSRuslan Bukin _xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr,
4793d5b3b0aSRuslan Bukin     struct bus_dma_segment *seg)
4803d5b3b0aSRuslan Bukin {
4813d5b3b0aSRuslan Bukin 	xdma_controller_t *xdma;
4823d5b3b0aSRuslan Bukin 	struct mbuf *m;
4833d5b3b0aSRuslan Bukin 	uint32_t nsegs;
4843d5b3b0aSRuslan Bukin 
4853d5b3b0aSRuslan Bukin 	xdma = xchan->xdma;
4863d5b3b0aSRuslan Bukin 
4873d5b3b0aSRuslan Bukin 	m = xr->m;
4883d5b3b0aSRuslan Bukin 
4893d5b3b0aSRuslan Bukin 	nsegs = 1;
4903d5b3b0aSRuslan Bukin 
4913d5b3b0aSRuslan Bukin 	switch (xr->req_type) {
4923d5b3b0aSRuslan Bukin 	case XR_TYPE_MBUF:
493*101869a8SRuslan Bukin 		if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0) {
494*101869a8SRuslan Bukin 			if (xr->direction == XDMA_MEM_TO_DEV)
495*101869a8SRuslan Bukin 				m_copydata(m, 0, m->m_pkthdr.len,
496*101869a8SRuslan Bukin 				    (void *)xr->buf.vaddr);
497*101869a8SRuslan Bukin 			seg[0].ds_addr = (bus_addr_t)xr->buf.paddr;
498*101869a8SRuslan Bukin 		} else
4993d5b3b0aSRuslan Bukin 			seg[0].ds_addr = mtod(m, bus_addr_t);
5003d5b3b0aSRuslan Bukin 		seg[0].ds_len = m->m_pkthdr.len;
5013d5b3b0aSRuslan Bukin 		break;
5023d5b3b0aSRuslan Bukin 	case XR_TYPE_BIO:
5033d5b3b0aSRuslan Bukin 	case XR_TYPE_VIRT:
5043d5b3b0aSRuslan Bukin 	default:
5053d5b3b0aSRuslan Bukin 		panic("implement me\n");
5063d5b3b0aSRuslan Bukin 	}
5073d5b3b0aSRuslan Bukin 
5083d5b3b0aSRuslan Bukin 	return (nsegs);
5093d5b3b0aSRuslan Bukin }
5103d5b3b0aSRuslan Bukin 
5113d5b3b0aSRuslan Bukin static int
5123d5b3b0aSRuslan Bukin xdma_load_data(xdma_channel_t *xchan,
5133d5b3b0aSRuslan Bukin     struct xdma_request *xr, struct bus_dma_segment *seg)
5143d5b3b0aSRuslan Bukin {
5153d5b3b0aSRuslan Bukin 	xdma_controller_t *xdma;
5163d5b3b0aSRuslan Bukin 	int error;
5173d5b3b0aSRuslan Bukin 	int nsegs;
5183d5b3b0aSRuslan Bukin 
5193d5b3b0aSRuslan Bukin 	xdma = xchan->xdma;
5203d5b3b0aSRuslan Bukin 
5213d5b3b0aSRuslan Bukin 	error = 0;
5223d5b3b0aSRuslan Bukin 	nsegs = 0;
5233d5b3b0aSRuslan Bukin 
5243d5b3b0aSRuslan Bukin 	if (xchan->caps & XCHAN_CAP_BUSDMA)
5253d5b3b0aSRuslan Bukin 		nsegs = _xdma_load_data_busdma(xchan, xr, seg);
5263d5b3b0aSRuslan Bukin 	else
5273d5b3b0aSRuslan Bukin 		nsegs = _xdma_load_data(xchan, xr, seg);
5283d5b3b0aSRuslan Bukin 	if (nsegs == 0)
5293d5b3b0aSRuslan Bukin 		return (0); /* Try again later. */
5303d5b3b0aSRuslan Bukin 
5313d5b3b0aSRuslan Bukin 	xr->buf.nsegs = nsegs;
5323d5b3b0aSRuslan Bukin 	xr->buf.nsegs_left = nsegs;
5333d5b3b0aSRuslan Bukin 
5343d5b3b0aSRuslan Bukin 	return (nsegs);
5353d5b3b0aSRuslan Bukin }
5363d5b3b0aSRuslan Bukin 
5373d5b3b0aSRuslan Bukin static int
5383d5b3b0aSRuslan Bukin xdma_process(xdma_channel_t *xchan,
5393d5b3b0aSRuslan Bukin     struct xdma_sglist *sg)
5403d5b3b0aSRuslan Bukin {
5413d5b3b0aSRuslan Bukin 	struct bus_dma_segment seg[XDMA_MAX_SEG];
5423d5b3b0aSRuslan Bukin 	struct xdma_request *xr;
5433d5b3b0aSRuslan Bukin 	struct xdma_request *xr_tmp;
5443d5b3b0aSRuslan Bukin 	xdma_controller_t *xdma;
5453d5b3b0aSRuslan Bukin 	uint32_t capacity;
5463d5b3b0aSRuslan Bukin 	uint32_t n;
5473d5b3b0aSRuslan Bukin 	uint32_t c;
5483d5b3b0aSRuslan Bukin 	int nsegs;
5493d5b3b0aSRuslan Bukin 	int ret;
5503d5b3b0aSRuslan Bukin 
5513d5b3b0aSRuslan Bukin 	XCHAN_ASSERT_LOCKED(xchan);
5523d5b3b0aSRuslan Bukin 
5533d5b3b0aSRuslan Bukin 	xdma = xchan->xdma;
5543d5b3b0aSRuslan Bukin 
5553d5b3b0aSRuslan Bukin 	n = 0;
556*101869a8SRuslan Bukin 	c = 0;
5573d5b3b0aSRuslan Bukin 
5583d5b3b0aSRuslan Bukin 	ret = XDMA_CHANNEL_CAPACITY(xdma->dma_dev, xchan, &capacity);
5593d5b3b0aSRuslan Bukin 	if (ret != 0) {
5603d5b3b0aSRuslan Bukin 		device_printf(xdma->dev,
5613d5b3b0aSRuslan Bukin 		    "%s: Can't get DMA controller capacity.\n", __func__);
5623d5b3b0aSRuslan Bukin 		return (-1);
5633d5b3b0aSRuslan Bukin 	}
5643d5b3b0aSRuslan Bukin 
5653d5b3b0aSRuslan Bukin 	TAILQ_FOREACH_SAFE(xr, &xchan->queue_in, xr_next, xr_tmp) {
5663d5b3b0aSRuslan Bukin 		switch (xr->req_type) {
5673d5b3b0aSRuslan Bukin 		case XR_TYPE_MBUF:
5685a51e5e4SRuslan Bukin 			if ((xchan->caps & XCHAN_CAP_NOSEG) ||
5695a51e5e4SRuslan Bukin 			    (c > xchan->maxnsegs))
5703d5b3b0aSRuslan Bukin 				c = xdma_mbuf_defrag(xchan, xr);
5713d5b3b0aSRuslan Bukin 			break;
5723d5b3b0aSRuslan Bukin 		case XR_TYPE_BIO:
5733d5b3b0aSRuslan Bukin 		case XR_TYPE_VIRT:
5743d5b3b0aSRuslan Bukin 		default:
5753d5b3b0aSRuslan Bukin 			c = 1;
5763d5b3b0aSRuslan Bukin 		}
5773d5b3b0aSRuslan Bukin 
5783d5b3b0aSRuslan Bukin 		if (capacity <= (c + n)) {
5793d5b3b0aSRuslan Bukin 			/*
5803d5b3b0aSRuslan Bukin 			 * No space yet available for the entire
5813d5b3b0aSRuslan Bukin 			 * request in the DMA engine.
5823d5b3b0aSRuslan Bukin 			 */
5833d5b3b0aSRuslan Bukin 			break;
5843d5b3b0aSRuslan Bukin 		}
5853d5b3b0aSRuslan Bukin 
5863d5b3b0aSRuslan Bukin 		if ((c + n + xchan->maxnsegs) >= XDMA_SGLIST_MAXLEN) {
5873d5b3b0aSRuslan Bukin 			/* Sglist is full. */
5883d5b3b0aSRuslan Bukin 			break;
5893d5b3b0aSRuslan Bukin 		}
5903d5b3b0aSRuslan Bukin 
5913d5b3b0aSRuslan Bukin 		nsegs = xdma_load_data(xchan, xr, seg);
5923d5b3b0aSRuslan Bukin 		if (nsegs == 0)
5933d5b3b0aSRuslan Bukin 			break;
5943d5b3b0aSRuslan Bukin 
5953d5b3b0aSRuslan Bukin 		xdma_sglist_add(&sg[n], seg, nsegs, xr);
5963d5b3b0aSRuslan Bukin 		n += nsegs;
5973d5b3b0aSRuslan Bukin 
5983d5b3b0aSRuslan Bukin 		QUEUE_IN_LOCK(xchan);
5993d5b3b0aSRuslan Bukin 		TAILQ_REMOVE(&xchan->queue_in, xr, xr_next);
6003d5b3b0aSRuslan Bukin 		QUEUE_IN_UNLOCK(xchan);
6013d5b3b0aSRuslan Bukin 
6023d5b3b0aSRuslan Bukin 		QUEUE_PROC_LOCK(xchan);
6033d5b3b0aSRuslan Bukin 		TAILQ_INSERT_TAIL(&xchan->processing, xr, xr_next);
6043d5b3b0aSRuslan Bukin 		QUEUE_PROC_UNLOCK(xchan);
6053d5b3b0aSRuslan Bukin 	}
6063d5b3b0aSRuslan Bukin 
6073d5b3b0aSRuslan Bukin 	return (n);
6083d5b3b0aSRuslan Bukin }
6093d5b3b0aSRuslan Bukin 
6103d5b3b0aSRuslan Bukin int
6113d5b3b0aSRuslan Bukin xdma_queue_submit_sg(xdma_channel_t *xchan)
6123d5b3b0aSRuslan Bukin {
6133d5b3b0aSRuslan Bukin 	struct xdma_sglist *sg;
6143d5b3b0aSRuslan Bukin 	xdma_controller_t *xdma;
6153d5b3b0aSRuslan Bukin 	uint32_t sg_n;
6163d5b3b0aSRuslan Bukin 	int ret;
6173d5b3b0aSRuslan Bukin 
6183d5b3b0aSRuslan Bukin 	xdma = xchan->xdma;
6193d5b3b0aSRuslan Bukin 	KASSERT(xdma != NULL, ("xdma is NULL"));
6203d5b3b0aSRuslan Bukin 
6213d5b3b0aSRuslan Bukin 	XCHAN_ASSERT_LOCKED(xchan);
6223d5b3b0aSRuslan Bukin 
6233d5b3b0aSRuslan Bukin 	sg = xchan->sg;
6243d5b3b0aSRuslan Bukin 
6255a51e5e4SRuslan Bukin 	if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0 &&
6265a51e5e4SRuslan Bukin 	   (xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
6273d5b3b0aSRuslan Bukin 		device_printf(xdma->dev,
6283d5b3b0aSRuslan Bukin 		    "%s: Can't submit a transfer: no bufs\n",
6293d5b3b0aSRuslan Bukin 		    __func__);
6303d5b3b0aSRuslan Bukin 		return (-1);
6313d5b3b0aSRuslan Bukin 	}
6323d5b3b0aSRuslan Bukin 
6333d5b3b0aSRuslan Bukin 	sg_n = xdma_process(xchan, sg);
6343d5b3b0aSRuslan Bukin 	if (sg_n == 0)
6353d5b3b0aSRuslan Bukin 		return (0); /* Nothing to submit */
6363d5b3b0aSRuslan Bukin 
6373d5b3b0aSRuslan Bukin 	/* Now submit sglist to DMA engine driver. */
6383d5b3b0aSRuslan Bukin 	ret = XDMA_CHANNEL_SUBMIT_SG(xdma->dma_dev, xchan, sg, sg_n);
6393d5b3b0aSRuslan Bukin 	if (ret != 0) {
6403d5b3b0aSRuslan Bukin 		device_printf(xdma->dev,
6413d5b3b0aSRuslan Bukin 		    "%s: Can't submit an sglist.\n", __func__);
6423d5b3b0aSRuslan Bukin 		return (-1);
6433d5b3b0aSRuslan Bukin 	}
6443d5b3b0aSRuslan Bukin 
6453d5b3b0aSRuslan Bukin 	return (0);
6463d5b3b0aSRuslan Bukin }
647