xref: /freebsd/sys/dev/xdma/xdma_sg.c (revision d676fedfbcec903620dbcd9bdffe8954c5238da3)
13d5b3b0aSRuslan Bukin /*-
2101869a8SRuslan Bukin  * SPDX-License-Identifier: BSD-2-Clause
3101869a8SRuslan Bukin  *
4101869a8SRuslan Bukin  * Copyright (c) 2018-2019 Ruslan Bukin <br@bsdpad.com>
53d5b3b0aSRuslan Bukin  *
63d5b3b0aSRuslan Bukin  * This software was developed by SRI International and the University of
73d5b3b0aSRuslan Bukin  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
83d5b3b0aSRuslan Bukin  * ("CTSRD"), as part of the DARPA CRASH research programme.
93d5b3b0aSRuslan Bukin  *
103d5b3b0aSRuslan Bukin  * Redistribution and use in source and binary forms, with or without
113d5b3b0aSRuslan Bukin  * modification, are permitted provided that the following conditions
123d5b3b0aSRuslan Bukin  * are met:
133d5b3b0aSRuslan Bukin  * 1. Redistributions of source code must retain the above copyright
143d5b3b0aSRuslan Bukin  *    notice, this list of conditions and the following disclaimer.
153d5b3b0aSRuslan Bukin  * 2. Redistributions in binary form must reproduce the above copyright
163d5b3b0aSRuslan Bukin  *    notice, this list of conditions and the following disclaimer in the
173d5b3b0aSRuslan Bukin  *    documentation and/or other materials provided with the distribution.
183d5b3b0aSRuslan Bukin  *
193d5b3b0aSRuslan Bukin  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
203d5b3b0aSRuslan Bukin  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
213d5b3b0aSRuslan Bukin  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
223d5b3b0aSRuslan Bukin  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
233d5b3b0aSRuslan Bukin  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
243d5b3b0aSRuslan Bukin  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
253d5b3b0aSRuslan Bukin  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
263d5b3b0aSRuslan Bukin  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
273d5b3b0aSRuslan Bukin  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
283d5b3b0aSRuslan Bukin  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
293d5b3b0aSRuslan Bukin  * SUCH DAMAGE.
303d5b3b0aSRuslan Bukin  */
313d5b3b0aSRuslan Bukin 
323d5b3b0aSRuslan Bukin #include <sys/cdefs.h>
333d5b3b0aSRuslan Bukin __FBSDID("$FreeBSD$");
343d5b3b0aSRuslan Bukin 
353d5b3b0aSRuslan Bukin #include "opt_platform.h"
363d5b3b0aSRuslan Bukin #include <sys/param.h>
373d5b3b0aSRuslan Bukin #include <sys/conf.h>
383d5b3b0aSRuslan Bukin #include <sys/bus.h>
393d5b3b0aSRuslan Bukin #include <sys/kernel.h>
40e2e050c8SConrad Meyer #include <sys/lock.h>
413d5b3b0aSRuslan Bukin #include <sys/malloc.h>
423d5b3b0aSRuslan Bukin #include <sys/mbuf.h>
43e2e050c8SConrad Meyer #include <sys/mutex.h>
44101869a8SRuslan Bukin #include <sys/rwlock.h>
453d5b3b0aSRuslan Bukin 
463d5b3b0aSRuslan Bukin #include <machine/bus.h>
473d5b3b0aSRuslan Bukin 
48101869a8SRuslan Bukin #include <vm/vm.h>
49e2e050c8SConrad Meyer #include <vm/pmap.h>
50101869a8SRuslan Bukin #include <vm/vm_extern.h>
51101869a8SRuslan Bukin #include <vm/vm_page.h>
52101869a8SRuslan Bukin 
533d5b3b0aSRuslan Bukin #ifdef FDT
543d5b3b0aSRuslan Bukin #include <dev/fdt/fdt_common.h>
553d5b3b0aSRuslan Bukin #include <dev/ofw/ofw_bus.h>
563d5b3b0aSRuslan Bukin #include <dev/ofw/ofw_bus_subr.h>
573d5b3b0aSRuslan Bukin #endif
583d5b3b0aSRuslan Bukin 
593d5b3b0aSRuslan Bukin #include <dev/xdma/xdma.h>
603d5b3b0aSRuslan Bukin 
613d5b3b0aSRuslan Bukin #include <xdma_if.h>
623d5b3b0aSRuslan Bukin 
633d5b3b0aSRuslan Bukin struct seg_load_request {
643d5b3b0aSRuslan Bukin 	struct bus_dma_segment *seg;
653d5b3b0aSRuslan Bukin 	uint32_t nsegs;
663d5b3b0aSRuslan Bukin 	uint32_t error;
673d5b3b0aSRuslan Bukin };
683d5b3b0aSRuslan Bukin 
69101869a8SRuslan Bukin static void
70101869a8SRuslan Bukin xchan_bufs_free_reserved(xdma_channel_t *xchan)
71101869a8SRuslan Bukin {
72101869a8SRuslan Bukin 	struct xdma_request *xr;
73101869a8SRuslan Bukin 	vm_size_t size;
74101869a8SRuslan Bukin 	int i;
75101869a8SRuslan Bukin 
76101869a8SRuslan Bukin 	for (i = 0; i < xchan->xr_num; i++) {
77101869a8SRuslan Bukin 		xr = &xchan->xr_mem[i];
78101869a8SRuslan Bukin 		size = xr->buf.size;
79101869a8SRuslan Bukin 		if (xr->buf.vaddr) {
80101869a8SRuslan Bukin 			pmap_kremove_device(xr->buf.vaddr, size);
81101869a8SRuslan Bukin 			kva_free(xr->buf.vaddr, size);
82101869a8SRuslan Bukin 			xr->buf.vaddr = 0;
83101869a8SRuslan Bukin 		}
84101869a8SRuslan Bukin 		if (xr->buf.paddr) {
85101869a8SRuslan Bukin 			vmem_free(xchan->vmem, xr->buf.paddr, size);
86101869a8SRuslan Bukin 			xr->buf.paddr = 0;
87101869a8SRuslan Bukin 		}
88101869a8SRuslan Bukin 		xr->buf.size = 0;
89101869a8SRuslan Bukin 	}
90101869a8SRuslan Bukin }
91101869a8SRuslan Bukin 
923d5b3b0aSRuslan Bukin static int
93101869a8SRuslan Bukin xchan_bufs_alloc_reserved(xdma_channel_t *xchan)
943d5b3b0aSRuslan Bukin {
953d5b3b0aSRuslan Bukin 	xdma_controller_t *xdma;
963d5b3b0aSRuslan Bukin 	struct xdma_request *xr;
97101869a8SRuslan Bukin 	vmem_addr_t addr;
98101869a8SRuslan Bukin 	vm_size_t size;
993d5b3b0aSRuslan Bukin 	int i;
1003d5b3b0aSRuslan Bukin 
1013d5b3b0aSRuslan Bukin 	xdma = xchan->xdma;
1023d5b3b0aSRuslan Bukin 
103101869a8SRuslan Bukin 	if (xchan->vmem == NULL)
104101869a8SRuslan Bukin 		return (ENOBUFS);
105101869a8SRuslan Bukin 
1063d5b3b0aSRuslan Bukin 	for (i = 0; i < xchan->xr_num; i++) {
1073d5b3b0aSRuslan Bukin 		xr = &xchan->xr_mem[i];
108101869a8SRuslan Bukin 		size = round_page(xchan->maxsegsize);
109101869a8SRuslan Bukin 		if (vmem_alloc(xchan->vmem, size,
110101869a8SRuslan Bukin 		    M_BESTFIT | M_NOWAIT, &addr)) {
111101869a8SRuslan Bukin 			device_printf(xdma->dev,
112101869a8SRuslan Bukin 			    "%s: Can't allocate memory\n", __func__);
113101869a8SRuslan Bukin 			xchan_bufs_free_reserved(xchan);
114101869a8SRuslan Bukin 			return (ENOMEM);
115101869a8SRuslan Bukin 		}
116101869a8SRuslan Bukin 
117101869a8SRuslan Bukin 		xr->buf.size = size;
118101869a8SRuslan Bukin 		xr->buf.paddr = addr;
119101869a8SRuslan Bukin 		xr->buf.vaddr = kva_alloc(size);
120101869a8SRuslan Bukin 		if (xr->buf.vaddr == 0) {
121101869a8SRuslan Bukin 			device_printf(xdma->dev,
122101869a8SRuslan Bukin 			    "%s: Can't allocate KVA\n", __func__);
123101869a8SRuslan Bukin 			xchan_bufs_free_reserved(xchan);
124101869a8SRuslan Bukin 			return (ENOMEM);
125101869a8SRuslan Bukin 		}
126101869a8SRuslan Bukin 		pmap_kenter_device(xr->buf.vaddr, size, addr);
1273d5b3b0aSRuslan Bukin 	}
1283d5b3b0aSRuslan Bukin 
1293d5b3b0aSRuslan Bukin 	return (0);
1303d5b3b0aSRuslan Bukin }
1313d5b3b0aSRuslan Bukin 
1323d5b3b0aSRuslan Bukin static int
133101869a8SRuslan Bukin xchan_bufs_alloc_busdma(xdma_channel_t *xchan)
1343d5b3b0aSRuslan Bukin {
1353d5b3b0aSRuslan Bukin 	xdma_controller_t *xdma;
1363d5b3b0aSRuslan Bukin 	struct xdma_request *xr;
1373d5b3b0aSRuslan Bukin 	int err;
1383d5b3b0aSRuslan Bukin 	int i;
1393d5b3b0aSRuslan Bukin 
1403d5b3b0aSRuslan Bukin 	xdma = xchan->xdma;
1413d5b3b0aSRuslan Bukin 
1423d5b3b0aSRuslan Bukin 	/* Create bus_dma tag */
1433d5b3b0aSRuslan Bukin 	err = bus_dma_tag_create(
1443d5b3b0aSRuslan Bukin 	    bus_get_dma_tag(xdma->dev),	/* Parent tag. */
1453d5b3b0aSRuslan Bukin 	    xchan->alignment,		/* alignment */
1463d5b3b0aSRuslan Bukin 	    xchan->boundary,		/* boundary */
1473d5b3b0aSRuslan Bukin 	    xchan->lowaddr,		/* lowaddr */
1483d5b3b0aSRuslan Bukin 	    xchan->highaddr,		/* highaddr */
1493d5b3b0aSRuslan Bukin 	    NULL, NULL,			/* filter, filterarg */
1503d5b3b0aSRuslan Bukin 	    xchan->maxsegsize * xchan->maxnsegs, /* maxsize */
1513d5b3b0aSRuslan Bukin 	    xchan->maxnsegs,		/* nsegments */
1523d5b3b0aSRuslan Bukin 	    xchan->maxsegsize,		/* maxsegsize */
1533d5b3b0aSRuslan Bukin 	    0,				/* flags */
1543d5b3b0aSRuslan Bukin 	    NULL, NULL,			/* lockfunc, lockarg */
1553d5b3b0aSRuslan Bukin 	    &xchan->dma_tag_bufs);
1563d5b3b0aSRuslan Bukin 	if (err != 0) {
1573d5b3b0aSRuslan Bukin 		device_printf(xdma->dev,
1583d5b3b0aSRuslan Bukin 		    "%s: Can't create bus_dma tag.\n", __func__);
1593d5b3b0aSRuslan Bukin 		return (-1);
1603d5b3b0aSRuslan Bukin 	}
1613d5b3b0aSRuslan Bukin 
1623d5b3b0aSRuslan Bukin 	for (i = 0; i < xchan->xr_num; i++) {
1633d5b3b0aSRuslan Bukin 		xr = &xchan->xr_mem[i];
1643d5b3b0aSRuslan Bukin 		err = bus_dmamap_create(xchan->dma_tag_bufs, 0,
1653d5b3b0aSRuslan Bukin 		    &xr->buf.map);
1663d5b3b0aSRuslan Bukin 		if (err != 0) {
1673d5b3b0aSRuslan Bukin 			device_printf(xdma->dev,
1683d5b3b0aSRuslan Bukin 			    "%s: Can't create buf DMA map.\n", __func__);
1693d5b3b0aSRuslan Bukin 
1703d5b3b0aSRuslan Bukin 			/* Cleanup. */
1713d5b3b0aSRuslan Bukin 			bus_dma_tag_destroy(xchan->dma_tag_bufs);
1723d5b3b0aSRuslan Bukin 
1733d5b3b0aSRuslan Bukin 			return (-1);
1743d5b3b0aSRuslan Bukin 		}
1753d5b3b0aSRuslan Bukin 	}
1763d5b3b0aSRuslan Bukin 
1773d5b3b0aSRuslan Bukin 	return (0);
1783d5b3b0aSRuslan Bukin }
1793d5b3b0aSRuslan Bukin 
1803d5b3b0aSRuslan Bukin static int
1813d5b3b0aSRuslan Bukin xchan_bufs_alloc(xdma_channel_t *xchan)
1823d5b3b0aSRuslan Bukin {
1833d5b3b0aSRuslan Bukin 	xdma_controller_t *xdma;
1843d5b3b0aSRuslan Bukin 	int ret;
1853d5b3b0aSRuslan Bukin 
1863d5b3b0aSRuslan Bukin 	xdma = xchan->xdma;
1873d5b3b0aSRuslan Bukin 
1883d5b3b0aSRuslan Bukin 	if (xdma == NULL) {
189*d676fedfSEd Maste 		printf("%s: Channel was not allocated properly.\n", __func__);
1903d5b3b0aSRuslan Bukin 		return (-1);
1913d5b3b0aSRuslan Bukin 	}
1923d5b3b0aSRuslan Bukin 
1933d5b3b0aSRuslan Bukin 	if (xchan->caps & XCHAN_CAP_BUSDMA)
194101869a8SRuslan Bukin 		ret = xchan_bufs_alloc_busdma(xchan);
195101869a8SRuslan Bukin 	else {
196101869a8SRuslan Bukin 		ret = xchan_bufs_alloc_reserved(xchan);
197101869a8SRuslan Bukin 	}
1983d5b3b0aSRuslan Bukin 	if (ret != 0) {
1993d5b3b0aSRuslan Bukin 		device_printf(xdma->dev,
2003d5b3b0aSRuslan Bukin 		    "%s: Can't allocate bufs.\n", __func__);
2013d5b3b0aSRuslan Bukin 		return (-1);
2023d5b3b0aSRuslan Bukin 	}
2033d5b3b0aSRuslan Bukin 
2043d5b3b0aSRuslan Bukin 	xchan->flags |= XCHAN_BUFS_ALLOCATED;
2053d5b3b0aSRuslan Bukin 
2063d5b3b0aSRuslan Bukin 	return (0);
2073d5b3b0aSRuslan Bukin }
2083d5b3b0aSRuslan Bukin 
2093d5b3b0aSRuslan Bukin static int
2103d5b3b0aSRuslan Bukin xchan_bufs_free(xdma_channel_t *xchan)
2113d5b3b0aSRuslan Bukin {
2123d5b3b0aSRuslan Bukin 	struct xdma_request *xr;
2133d5b3b0aSRuslan Bukin 	struct xchan_buf *b;
2143d5b3b0aSRuslan Bukin 	int i;
2153d5b3b0aSRuslan Bukin 
2163d5b3b0aSRuslan Bukin 	if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0)
2173d5b3b0aSRuslan Bukin 		return (-1);
2183d5b3b0aSRuslan Bukin 
2193d5b3b0aSRuslan Bukin 	if (xchan->caps & XCHAN_CAP_BUSDMA) {
2203d5b3b0aSRuslan Bukin 		for (i = 0; i < xchan->xr_num; i++) {
2213d5b3b0aSRuslan Bukin 			xr = &xchan->xr_mem[i];
2223d5b3b0aSRuslan Bukin 			b = &xr->buf;
2233d5b3b0aSRuslan Bukin 			bus_dmamap_destroy(xchan->dma_tag_bufs, b->map);
2243d5b3b0aSRuslan Bukin 		}
2253d5b3b0aSRuslan Bukin 		bus_dma_tag_destroy(xchan->dma_tag_bufs);
226101869a8SRuslan Bukin 	} else
227101869a8SRuslan Bukin 		xchan_bufs_free_reserved(xchan);
2283d5b3b0aSRuslan Bukin 
2293d5b3b0aSRuslan Bukin 	xchan->flags &= ~XCHAN_BUFS_ALLOCATED;
2303d5b3b0aSRuslan Bukin 
2313d5b3b0aSRuslan Bukin 	return (0);
2323d5b3b0aSRuslan Bukin }
2333d5b3b0aSRuslan Bukin 
2343d5b3b0aSRuslan Bukin void
2353d5b3b0aSRuslan Bukin xdma_channel_free_sg(xdma_channel_t *xchan)
2363d5b3b0aSRuslan Bukin {
2373d5b3b0aSRuslan Bukin 
2383d5b3b0aSRuslan Bukin 	xchan_bufs_free(xchan);
2393d5b3b0aSRuslan Bukin 	xchan_sglist_free(xchan);
2403d5b3b0aSRuslan Bukin 	xchan_bank_free(xchan);
2413d5b3b0aSRuslan Bukin }
2423d5b3b0aSRuslan Bukin 
2433d5b3b0aSRuslan Bukin /*
2443d5b3b0aSRuslan Bukin  * Prepare xchan for a scatter-gather transfer.
2453d5b3b0aSRuslan Bukin  * xr_num - xdma requests queue size,
2463d5b3b0aSRuslan Bukin  * maxsegsize - maximum allowed scatter-gather list element size in bytes
2473d5b3b0aSRuslan Bukin  */
2483d5b3b0aSRuslan Bukin int
2493d5b3b0aSRuslan Bukin xdma_prep_sg(xdma_channel_t *xchan, uint32_t xr_num,
2503d5b3b0aSRuslan Bukin     bus_size_t maxsegsize, bus_size_t maxnsegs,
2513d5b3b0aSRuslan Bukin     bus_size_t alignment, bus_addr_t boundary,
2523d5b3b0aSRuslan Bukin     bus_addr_t lowaddr, bus_addr_t highaddr)
2533d5b3b0aSRuslan Bukin {
2543d5b3b0aSRuslan Bukin 	xdma_controller_t *xdma;
2553d5b3b0aSRuslan Bukin 	int ret;
2563d5b3b0aSRuslan Bukin 
2573d5b3b0aSRuslan Bukin 	xdma = xchan->xdma;
2583d5b3b0aSRuslan Bukin 
2593d5b3b0aSRuslan Bukin 	KASSERT(xdma != NULL, ("xdma is NULL"));
2603d5b3b0aSRuslan Bukin 
2613d5b3b0aSRuslan Bukin 	if (xchan->flags & XCHAN_CONFIGURED) {
2623d5b3b0aSRuslan Bukin 		device_printf(xdma->dev,
2633d5b3b0aSRuslan Bukin 		    "%s: Channel is already configured.\n", __func__);
2643d5b3b0aSRuslan Bukin 		return (-1);
2653d5b3b0aSRuslan Bukin 	}
2663d5b3b0aSRuslan Bukin 
2673d5b3b0aSRuslan Bukin 	xchan->xr_num = xr_num;
2683d5b3b0aSRuslan Bukin 	xchan->maxsegsize = maxsegsize;
2693d5b3b0aSRuslan Bukin 	xchan->maxnsegs = maxnsegs;
2703d5b3b0aSRuslan Bukin 	xchan->alignment = alignment;
2713d5b3b0aSRuslan Bukin 	xchan->boundary = boundary;
2723d5b3b0aSRuslan Bukin 	xchan->lowaddr = lowaddr;
2733d5b3b0aSRuslan Bukin 	xchan->highaddr = highaddr;
2743d5b3b0aSRuslan Bukin 
2753d5b3b0aSRuslan Bukin 	if (xchan->maxnsegs > XDMA_MAX_SEG) {
2763d5b3b0aSRuslan Bukin 		device_printf(xdma->dev, "%s: maxnsegs is too big\n",
2773d5b3b0aSRuslan Bukin 		    __func__);
2783d5b3b0aSRuslan Bukin 		return (-1);
2793d5b3b0aSRuslan Bukin 	}
2803d5b3b0aSRuslan Bukin 
2813d5b3b0aSRuslan Bukin 	xchan_bank_init(xchan);
2823d5b3b0aSRuslan Bukin 
2833d5b3b0aSRuslan Bukin 	/* Allocate sglist. */
2843d5b3b0aSRuslan Bukin 	ret = xchan_sglist_alloc(xchan);
2853d5b3b0aSRuslan Bukin 	if (ret != 0) {
2863d5b3b0aSRuslan Bukin 		device_printf(xdma->dev,
2873d5b3b0aSRuslan Bukin 		    "%s: Can't allocate sglist.\n", __func__);
2883d5b3b0aSRuslan Bukin 		return (-1);
2893d5b3b0aSRuslan Bukin 	}
2903d5b3b0aSRuslan Bukin 
2915a51e5e4SRuslan Bukin 	/* Allocate buffers if required. */
2920c340d7eSRuslan Bukin 	if (xchan->caps & (XCHAN_CAP_BUSDMA | XCHAN_CAP_BOUNCE)) {
2933d5b3b0aSRuslan Bukin 		ret = xchan_bufs_alloc(xchan);
2943d5b3b0aSRuslan Bukin 		if (ret != 0) {
2953d5b3b0aSRuslan Bukin 			device_printf(xdma->dev,
2963d5b3b0aSRuslan Bukin 			    "%s: Can't allocate bufs.\n", __func__);
2973d5b3b0aSRuslan Bukin 
2983d5b3b0aSRuslan Bukin 			/* Cleanup */
2993d5b3b0aSRuslan Bukin 			xchan_sglist_free(xchan);
3003d5b3b0aSRuslan Bukin 			xchan_bank_free(xchan);
3013d5b3b0aSRuslan Bukin 
3023d5b3b0aSRuslan Bukin 			return (-1);
3033d5b3b0aSRuslan Bukin 		}
3045a51e5e4SRuslan Bukin 	}
3053d5b3b0aSRuslan Bukin 
3063d5b3b0aSRuslan Bukin 	xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_SG);
3073d5b3b0aSRuslan Bukin 
3083d5b3b0aSRuslan Bukin 	XCHAN_LOCK(xchan);
3093d5b3b0aSRuslan Bukin 	ret = XDMA_CHANNEL_PREP_SG(xdma->dma_dev, xchan);
3103d5b3b0aSRuslan Bukin 	if (ret != 0) {
3113d5b3b0aSRuslan Bukin 		device_printf(xdma->dev,
3123d5b3b0aSRuslan Bukin 		    "%s: Can't prepare SG transfer.\n", __func__);
3133d5b3b0aSRuslan Bukin 		XCHAN_UNLOCK(xchan);
3143d5b3b0aSRuslan Bukin 
3153d5b3b0aSRuslan Bukin 		return (-1);
3163d5b3b0aSRuslan Bukin 	}
3173d5b3b0aSRuslan Bukin 	XCHAN_UNLOCK(xchan);
3183d5b3b0aSRuslan Bukin 
3193d5b3b0aSRuslan Bukin 	return (0);
3203d5b3b0aSRuslan Bukin }
3213d5b3b0aSRuslan Bukin 
3223d5b3b0aSRuslan Bukin void
3233d5b3b0aSRuslan Bukin xchan_seg_done(xdma_channel_t *xchan,
3243d5b3b0aSRuslan Bukin     struct xdma_transfer_status *st)
3253d5b3b0aSRuslan Bukin {
3263d5b3b0aSRuslan Bukin 	struct xdma_request *xr;
3273d5b3b0aSRuslan Bukin 	xdma_controller_t *xdma;
3283d5b3b0aSRuslan Bukin 	struct xchan_buf *b;
329951e0584SRuslan Bukin 	bus_addr_t addr;
3303d5b3b0aSRuslan Bukin 
3313d5b3b0aSRuslan Bukin 	xdma = xchan->xdma;
3323d5b3b0aSRuslan Bukin 
3333d5b3b0aSRuslan Bukin 	xr = TAILQ_FIRST(&xchan->processing);
3343d5b3b0aSRuslan Bukin 	if (xr == NULL)
3353d5b3b0aSRuslan Bukin 		panic("request not found\n");
3363d5b3b0aSRuslan Bukin 
3373d5b3b0aSRuslan Bukin 	b = &xr->buf;
3383d5b3b0aSRuslan Bukin 
3393d5b3b0aSRuslan Bukin 	atomic_subtract_int(&b->nsegs_left, 1);
3403d5b3b0aSRuslan Bukin 
3413d5b3b0aSRuslan Bukin 	if (b->nsegs_left == 0) {
3423d5b3b0aSRuslan Bukin 		if (xchan->caps & XCHAN_CAP_BUSDMA) {
3433d5b3b0aSRuslan Bukin 			if (xr->direction == XDMA_MEM_TO_DEV)
3443d5b3b0aSRuslan Bukin 				bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
3453d5b3b0aSRuslan Bukin 				    BUS_DMASYNC_POSTWRITE);
3463d5b3b0aSRuslan Bukin 			else
3473d5b3b0aSRuslan Bukin 				bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
3483d5b3b0aSRuslan Bukin 				    BUS_DMASYNC_POSTREAD);
3493d5b3b0aSRuslan Bukin 			bus_dmamap_unload(xchan->dma_tag_bufs, b->map);
3500c340d7eSRuslan Bukin 		} else if (xchan->caps & XCHAN_CAP_BOUNCE) {
3510c340d7eSRuslan Bukin 			if (xr->req_type == XR_TYPE_MBUF &&
352101869a8SRuslan Bukin 			    xr->direction == XDMA_DEV_TO_MEM)
353101869a8SRuslan Bukin 				m_copyback(xr->m, 0, st->transferred,
354101869a8SRuslan Bukin 				    (void *)xr->buf.vaddr);
355951e0584SRuslan Bukin 		} else if (xchan->caps & XCHAN_CAP_IOMMU) {
356951e0584SRuslan Bukin 			if (xr->direction == XDMA_MEM_TO_DEV)
357951e0584SRuslan Bukin 				addr = xr->src_addr;
358951e0584SRuslan Bukin 			else
359951e0584SRuslan Bukin 				addr = xr->dst_addr;
360951e0584SRuslan Bukin 			xdma_iommu_remove_entry(xchan, addr);
3613d5b3b0aSRuslan Bukin 		}
3623d5b3b0aSRuslan Bukin 		xr->status.error = st->error;
3633d5b3b0aSRuslan Bukin 		xr->status.transferred = st->transferred;
3643d5b3b0aSRuslan Bukin 
3653d5b3b0aSRuslan Bukin 		QUEUE_PROC_LOCK(xchan);
3663d5b3b0aSRuslan Bukin 		TAILQ_REMOVE(&xchan->processing, xr, xr_next);
3673d5b3b0aSRuslan Bukin 		QUEUE_PROC_UNLOCK(xchan);
3683d5b3b0aSRuslan Bukin 
3693d5b3b0aSRuslan Bukin 		QUEUE_OUT_LOCK(xchan);
3703d5b3b0aSRuslan Bukin 		TAILQ_INSERT_TAIL(&xchan->queue_out, xr, xr_next);
3713d5b3b0aSRuslan Bukin 		QUEUE_OUT_UNLOCK(xchan);
3723d5b3b0aSRuslan Bukin 	}
3733d5b3b0aSRuslan Bukin }
3743d5b3b0aSRuslan Bukin 
3753d5b3b0aSRuslan Bukin static void
3763d5b3b0aSRuslan Bukin xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3773d5b3b0aSRuslan Bukin {
3783d5b3b0aSRuslan Bukin 	struct seg_load_request *slr;
3793d5b3b0aSRuslan Bukin 	struct bus_dma_segment *seg;
3803d5b3b0aSRuslan Bukin 	int i;
3813d5b3b0aSRuslan Bukin 
3823d5b3b0aSRuslan Bukin 	slr = arg;
3833d5b3b0aSRuslan Bukin 	seg = slr->seg;
3843d5b3b0aSRuslan Bukin 
3853d5b3b0aSRuslan Bukin 	if (error != 0) {
3863d5b3b0aSRuslan Bukin 		slr->error = error;
3873d5b3b0aSRuslan Bukin 		return;
3883d5b3b0aSRuslan Bukin 	}
3893d5b3b0aSRuslan Bukin 
3903d5b3b0aSRuslan Bukin 	slr->nsegs = nsegs;
3913d5b3b0aSRuslan Bukin 
3923d5b3b0aSRuslan Bukin 	for (i = 0; i < nsegs; i++) {
3933d5b3b0aSRuslan Bukin 		seg[i].ds_addr = segs[i].ds_addr;
3943d5b3b0aSRuslan Bukin 		seg[i].ds_len = segs[i].ds_len;
3953d5b3b0aSRuslan Bukin 	}
3963d5b3b0aSRuslan Bukin }
3973d5b3b0aSRuslan Bukin 
3983d5b3b0aSRuslan Bukin static int
3993d5b3b0aSRuslan Bukin _xdma_load_data_busdma(xdma_channel_t *xchan, struct xdma_request *xr,
4003d5b3b0aSRuslan Bukin     struct bus_dma_segment *seg)
4013d5b3b0aSRuslan Bukin {
4023d5b3b0aSRuslan Bukin 	xdma_controller_t *xdma;
4033d5b3b0aSRuslan Bukin 	struct seg_load_request slr;
4043d5b3b0aSRuslan Bukin 	uint32_t nsegs;
4053d5b3b0aSRuslan Bukin 	void *addr;
4063d5b3b0aSRuslan Bukin 	int error;
4073d5b3b0aSRuslan Bukin 
4083d5b3b0aSRuslan Bukin 	xdma = xchan->xdma;
4093d5b3b0aSRuslan Bukin 
4103d5b3b0aSRuslan Bukin 	error = 0;
4113d5b3b0aSRuslan Bukin 	nsegs = 0;
4123d5b3b0aSRuslan Bukin 
4133d5b3b0aSRuslan Bukin 	switch (xr->req_type) {
4143d5b3b0aSRuslan Bukin 	case XR_TYPE_MBUF:
4153d5b3b0aSRuslan Bukin 		error = bus_dmamap_load_mbuf_sg(xchan->dma_tag_bufs,
4163d5b3b0aSRuslan Bukin 		    xr->buf.map, xr->m, seg, &nsegs, BUS_DMA_NOWAIT);
4173d5b3b0aSRuslan Bukin 		break;
4183d5b3b0aSRuslan Bukin 	case XR_TYPE_BIO:
4193d5b3b0aSRuslan Bukin 		slr.nsegs = 0;
4203d5b3b0aSRuslan Bukin 		slr.error = 0;
4213d5b3b0aSRuslan Bukin 		slr.seg = seg;
4223d5b3b0aSRuslan Bukin 		error = bus_dmamap_load_bio(xchan->dma_tag_bufs,
4233d5b3b0aSRuslan Bukin 		    xr->buf.map, xr->bp, xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
4243d5b3b0aSRuslan Bukin 		if (slr.error != 0) {
4253d5b3b0aSRuslan Bukin 			device_printf(xdma->dma_dev,
4263d5b3b0aSRuslan Bukin 			    "%s: bus_dmamap_load failed, err %d\n",
4273d5b3b0aSRuslan Bukin 			    __func__, slr.error);
4283d5b3b0aSRuslan Bukin 			return (0);
4293d5b3b0aSRuslan Bukin 		}
4303d5b3b0aSRuslan Bukin 		nsegs = slr.nsegs;
4313d5b3b0aSRuslan Bukin 		break;
4323d5b3b0aSRuslan Bukin 	case XR_TYPE_VIRT:
4333d5b3b0aSRuslan Bukin 		switch (xr->direction) {
4343d5b3b0aSRuslan Bukin 		case XDMA_MEM_TO_DEV:
4353d5b3b0aSRuslan Bukin 			addr = (void *)xr->src_addr;
4363d5b3b0aSRuslan Bukin 			break;
4373d5b3b0aSRuslan Bukin 		case XDMA_DEV_TO_MEM:
4383d5b3b0aSRuslan Bukin 			addr = (void *)xr->dst_addr;
4393d5b3b0aSRuslan Bukin 			break;
4403d5b3b0aSRuslan Bukin 		default:
4413d5b3b0aSRuslan Bukin 			device_printf(xdma->dma_dev,
4423d5b3b0aSRuslan Bukin 			    "%s: Direction is not supported\n", __func__);
4433d5b3b0aSRuslan Bukin 			return (0);
4443d5b3b0aSRuslan Bukin 		}
4453d5b3b0aSRuslan Bukin 		slr.nsegs = 0;
4463d5b3b0aSRuslan Bukin 		slr.error = 0;
4473d5b3b0aSRuslan Bukin 		slr.seg = seg;
4483d5b3b0aSRuslan Bukin 		error = bus_dmamap_load(xchan->dma_tag_bufs, xr->buf.map,
4493d5b3b0aSRuslan Bukin 		    addr, (xr->block_len * xr->block_num),
4503d5b3b0aSRuslan Bukin 		    xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
4513d5b3b0aSRuslan Bukin 		if (slr.error != 0) {
4523d5b3b0aSRuslan Bukin 			device_printf(xdma->dma_dev,
4533d5b3b0aSRuslan Bukin 			    "%s: bus_dmamap_load failed, err %d\n",
4543d5b3b0aSRuslan Bukin 			    __func__, slr.error);
4553d5b3b0aSRuslan Bukin 			return (0);
4563d5b3b0aSRuslan Bukin 		}
4573d5b3b0aSRuslan Bukin 		nsegs = slr.nsegs;
4583d5b3b0aSRuslan Bukin 		break;
4593d5b3b0aSRuslan Bukin 	default:
4603d5b3b0aSRuslan Bukin 		break;
4613d5b3b0aSRuslan Bukin 	}
4623d5b3b0aSRuslan Bukin 
4633d5b3b0aSRuslan Bukin 	if (error != 0) {
4643d5b3b0aSRuslan Bukin 		if (error == ENOMEM) {
4653d5b3b0aSRuslan Bukin 			/*
4663d5b3b0aSRuslan Bukin 			 * Out of memory. Try again later.
4673d5b3b0aSRuslan Bukin 			 * TODO: count errors.
4683d5b3b0aSRuslan Bukin 			 */
4693d5b3b0aSRuslan Bukin 		} else
4703d5b3b0aSRuslan Bukin 			device_printf(xdma->dma_dev,
4713d5b3b0aSRuslan Bukin 			    "%s: bus_dmamap_load failed with err %d\n",
4723d5b3b0aSRuslan Bukin 			    __func__, error);
4733d5b3b0aSRuslan Bukin 		return (0);
4743d5b3b0aSRuslan Bukin 	}
4753d5b3b0aSRuslan Bukin 
4763d5b3b0aSRuslan Bukin 	if (xr->direction == XDMA_MEM_TO_DEV)
4773d5b3b0aSRuslan Bukin 		bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
4783d5b3b0aSRuslan Bukin 		    BUS_DMASYNC_PREWRITE);
4793d5b3b0aSRuslan Bukin 	else
4803d5b3b0aSRuslan Bukin 		bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
4813d5b3b0aSRuslan Bukin 		    BUS_DMASYNC_PREREAD);
4823d5b3b0aSRuslan Bukin 
4833d5b3b0aSRuslan Bukin 	return (nsegs);
4843d5b3b0aSRuslan Bukin }
4853d5b3b0aSRuslan Bukin 
4863d5b3b0aSRuslan Bukin static int
4873d5b3b0aSRuslan Bukin _xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr,
4883d5b3b0aSRuslan Bukin     struct bus_dma_segment *seg)
4893d5b3b0aSRuslan Bukin {
4903d5b3b0aSRuslan Bukin 	xdma_controller_t *xdma;
4913d5b3b0aSRuslan Bukin 	struct mbuf *m;
4923d5b3b0aSRuslan Bukin 	uint32_t nsegs;
493951e0584SRuslan Bukin 	vm_offset_t va, addr;
494951e0584SRuslan Bukin 	bus_addr_t pa;
495951e0584SRuslan Bukin 	vm_prot_t prot;
4963d5b3b0aSRuslan Bukin 
4973d5b3b0aSRuslan Bukin 	xdma = xchan->xdma;
4983d5b3b0aSRuslan Bukin 
4993d5b3b0aSRuslan Bukin 	m = xr->m;
5003d5b3b0aSRuslan Bukin 
501951e0584SRuslan Bukin 	KASSERT(xchan->caps & XCHAN_CAP_NOSEG,
502951e0584SRuslan Bukin 	    ("Handling segmented data is not implemented here."));
503951e0584SRuslan Bukin 
5043d5b3b0aSRuslan Bukin 	nsegs = 1;
5053d5b3b0aSRuslan Bukin 
5063d5b3b0aSRuslan Bukin 	switch (xr->req_type) {
5073d5b3b0aSRuslan Bukin 	case XR_TYPE_MBUF:
50817f1623dSRuslan Bukin 		if (xchan->caps & XCHAN_CAP_BOUNCE) {
509101869a8SRuslan Bukin 			if (xr->direction == XDMA_MEM_TO_DEV)
510101869a8SRuslan Bukin 				m_copydata(m, 0, m->m_pkthdr.len,
511101869a8SRuslan Bukin 				    (void *)xr->buf.vaddr);
512101869a8SRuslan Bukin 			seg[0].ds_addr = (bus_addr_t)xr->buf.paddr;
513951e0584SRuslan Bukin 		} else if (xchan->caps & XCHAN_CAP_IOMMU) {
514951e0584SRuslan Bukin 			addr = mtod(m, bus_addr_t);
515951e0584SRuslan Bukin 			pa = vtophys(addr);
516951e0584SRuslan Bukin 
517951e0584SRuslan Bukin 			if (xr->direction == XDMA_MEM_TO_DEV)
518951e0584SRuslan Bukin 				prot = VM_PROT_READ;
519951e0584SRuslan Bukin 			else
520951e0584SRuslan Bukin 				prot = VM_PROT_WRITE;
521951e0584SRuslan Bukin 
522951e0584SRuslan Bukin 			xdma_iommu_add_entry(xchan, &va,
523951e0584SRuslan Bukin 			    pa, m->m_pkthdr.len, prot);
524951e0584SRuslan Bukin 
525951e0584SRuslan Bukin 			/*
526951e0584SRuslan Bukin 			 * Save VA so we can unload data later
527951e0584SRuslan Bukin 			 * after completion of this transfer.
528951e0584SRuslan Bukin 			 */
529951e0584SRuslan Bukin 			if (xr->direction == XDMA_MEM_TO_DEV)
530951e0584SRuslan Bukin 				xr->src_addr = va;
531951e0584SRuslan Bukin 			else
532951e0584SRuslan Bukin 				xr->dst_addr = va;
533951e0584SRuslan Bukin 			seg[0].ds_addr = va;
53417f1623dSRuslan Bukin 		} else
53517f1623dSRuslan Bukin 			seg[0].ds_addr = mtod(m, bus_addr_t);
5363d5b3b0aSRuslan Bukin 		seg[0].ds_len = m->m_pkthdr.len;
5373d5b3b0aSRuslan Bukin 		break;
5383d5b3b0aSRuslan Bukin 	case XR_TYPE_BIO:
5393d5b3b0aSRuslan Bukin 	case XR_TYPE_VIRT:
5403d5b3b0aSRuslan Bukin 	default:
5413d5b3b0aSRuslan Bukin 		panic("implement me\n");
5423d5b3b0aSRuslan Bukin 	}
5433d5b3b0aSRuslan Bukin 
5443d5b3b0aSRuslan Bukin 	return (nsegs);
5453d5b3b0aSRuslan Bukin }
5463d5b3b0aSRuslan Bukin 
5473d5b3b0aSRuslan Bukin static int
5483d5b3b0aSRuslan Bukin xdma_load_data(xdma_channel_t *xchan,
5493d5b3b0aSRuslan Bukin     struct xdma_request *xr, struct bus_dma_segment *seg)
5503d5b3b0aSRuslan Bukin {
5513d5b3b0aSRuslan Bukin 	xdma_controller_t *xdma;
5523d5b3b0aSRuslan Bukin 	int error;
5533d5b3b0aSRuslan Bukin 	int nsegs;
5543d5b3b0aSRuslan Bukin 
5553d5b3b0aSRuslan Bukin 	xdma = xchan->xdma;
5563d5b3b0aSRuslan Bukin 
5573d5b3b0aSRuslan Bukin 	error = 0;
5583d5b3b0aSRuslan Bukin 	nsegs = 0;
5593d5b3b0aSRuslan Bukin 
5603d5b3b0aSRuslan Bukin 	if (xchan->caps & XCHAN_CAP_BUSDMA)
5613d5b3b0aSRuslan Bukin 		nsegs = _xdma_load_data_busdma(xchan, xr, seg);
5623d5b3b0aSRuslan Bukin 	else
5633d5b3b0aSRuslan Bukin 		nsegs = _xdma_load_data(xchan, xr, seg);
5643d5b3b0aSRuslan Bukin 	if (nsegs == 0)
5653d5b3b0aSRuslan Bukin 		return (0); /* Try again later. */
5663d5b3b0aSRuslan Bukin 
5673d5b3b0aSRuslan Bukin 	xr->buf.nsegs = nsegs;
5683d5b3b0aSRuslan Bukin 	xr->buf.nsegs_left = nsegs;
5693d5b3b0aSRuslan Bukin 
5703d5b3b0aSRuslan Bukin 	return (nsegs);
5713d5b3b0aSRuslan Bukin }
5723d5b3b0aSRuslan Bukin 
5733d5b3b0aSRuslan Bukin static int
5743d5b3b0aSRuslan Bukin xdma_process(xdma_channel_t *xchan,
5753d5b3b0aSRuslan Bukin     struct xdma_sglist *sg)
5763d5b3b0aSRuslan Bukin {
5773d5b3b0aSRuslan Bukin 	struct bus_dma_segment seg[XDMA_MAX_SEG];
5783d5b3b0aSRuslan Bukin 	struct xdma_request *xr;
5793d5b3b0aSRuslan Bukin 	struct xdma_request *xr_tmp;
5803d5b3b0aSRuslan Bukin 	xdma_controller_t *xdma;
5813d5b3b0aSRuslan Bukin 	uint32_t capacity;
5823d5b3b0aSRuslan Bukin 	uint32_t n;
5833d5b3b0aSRuslan Bukin 	uint32_t c;
5843d5b3b0aSRuslan Bukin 	int nsegs;
5853d5b3b0aSRuslan Bukin 	int ret;
5863d5b3b0aSRuslan Bukin 
5873d5b3b0aSRuslan Bukin 	XCHAN_ASSERT_LOCKED(xchan);
5883d5b3b0aSRuslan Bukin 
5893d5b3b0aSRuslan Bukin 	xdma = xchan->xdma;
5903d5b3b0aSRuslan Bukin 
5913d5b3b0aSRuslan Bukin 	n = 0;
592101869a8SRuslan Bukin 	c = 0;
5933d5b3b0aSRuslan Bukin 
5943d5b3b0aSRuslan Bukin 	ret = XDMA_CHANNEL_CAPACITY(xdma->dma_dev, xchan, &capacity);
5953d5b3b0aSRuslan Bukin 	if (ret != 0) {
5963d5b3b0aSRuslan Bukin 		device_printf(xdma->dev,
5973d5b3b0aSRuslan Bukin 		    "%s: Can't get DMA controller capacity.\n", __func__);
5983d5b3b0aSRuslan Bukin 		return (-1);
5993d5b3b0aSRuslan Bukin 	}
6003d5b3b0aSRuslan Bukin 
6013d5b3b0aSRuslan Bukin 	TAILQ_FOREACH_SAFE(xr, &xchan->queue_in, xr_next, xr_tmp) {
6023d5b3b0aSRuslan Bukin 		switch (xr->req_type) {
6033d5b3b0aSRuslan Bukin 		case XR_TYPE_MBUF:
6045a51e5e4SRuslan Bukin 			if ((xchan->caps & XCHAN_CAP_NOSEG) ||
6055a51e5e4SRuslan Bukin 			    (c > xchan->maxnsegs))
6063d5b3b0aSRuslan Bukin 				c = xdma_mbuf_defrag(xchan, xr);
6073d5b3b0aSRuslan Bukin 			break;
6083d5b3b0aSRuslan Bukin 		case XR_TYPE_BIO:
6093d5b3b0aSRuslan Bukin 		case XR_TYPE_VIRT:
6103d5b3b0aSRuslan Bukin 		default:
6113d5b3b0aSRuslan Bukin 			c = 1;
6123d5b3b0aSRuslan Bukin 		}
6133d5b3b0aSRuslan Bukin 
6143d5b3b0aSRuslan Bukin 		if (capacity <= (c + n)) {
6153d5b3b0aSRuslan Bukin 			/*
6163d5b3b0aSRuslan Bukin 			 * No space yet available for the entire
6173d5b3b0aSRuslan Bukin 			 * request in the DMA engine.
6183d5b3b0aSRuslan Bukin 			 */
6193d5b3b0aSRuslan Bukin 			break;
6203d5b3b0aSRuslan Bukin 		}
6213d5b3b0aSRuslan Bukin 
6223d5b3b0aSRuslan Bukin 		if ((c + n + xchan->maxnsegs) >= XDMA_SGLIST_MAXLEN) {
6233d5b3b0aSRuslan Bukin 			/* Sglist is full. */
6243d5b3b0aSRuslan Bukin 			break;
6253d5b3b0aSRuslan Bukin 		}
6263d5b3b0aSRuslan Bukin 
6273d5b3b0aSRuslan Bukin 		nsegs = xdma_load_data(xchan, xr, seg);
6283d5b3b0aSRuslan Bukin 		if (nsegs == 0)
6293d5b3b0aSRuslan Bukin 			break;
6303d5b3b0aSRuslan Bukin 
6313d5b3b0aSRuslan Bukin 		xdma_sglist_add(&sg[n], seg, nsegs, xr);
6323d5b3b0aSRuslan Bukin 		n += nsegs;
6333d5b3b0aSRuslan Bukin 
6343d5b3b0aSRuslan Bukin 		QUEUE_IN_LOCK(xchan);
6353d5b3b0aSRuslan Bukin 		TAILQ_REMOVE(&xchan->queue_in, xr, xr_next);
6363d5b3b0aSRuslan Bukin 		QUEUE_IN_UNLOCK(xchan);
6373d5b3b0aSRuslan Bukin 
6383d5b3b0aSRuslan Bukin 		QUEUE_PROC_LOCK(xchan);
6393d5b3b0aSRuslan Bukin 		TAILQ_INSERT_TAIL(&xchan->processing, xr, xr_next);
6403d5b3b0aSRuslan Bukin 		QUEUE_PROC_UNLOCK(xchan);
6413d5b3b0aSRuslan Bukin 	}
6423d5b3b0aSRuslan Bukin 
6433d5b3b0aSRuslan Bukin 	return (n);
6443d5b3b0aSRuslan Bukin }
6453d5b3b0aSRuslan Bukin 
6463d5b3b0aSRuslan Bukin int
6473d5b3b0aSRuslan Bukin xdma_queue_submit_sg(xdma_channel_t *xchan)
6483d5b3b0aSRuslan Bukin {
6493d5b3b0aSRuslan Bukin 	struct xdma_sglist *sg;
6503d5b3b0aSRuslan Bukin 	xdma_controller_t *xdma;
6513d5b3b0aSRuslan Bukin 	uint32_t sg_n;
6523d5b3b0aSRuslan Bukin 	int ret;
6533d5b3b0aSRuslan Bukin 
6543d5b3b0aSRuslan Bukin 	xdma = xchan->xdma;
6553d5b3b0aSRuslan Bukin 	KASSERT(xdma != NULL, ("xdma is NULL"));
6563d5b3b0aSRuslan Bukin 
6573d5b3b0aSRuslan Bukin 	XCHAN_ASSERT_LOCKED(xchan);
6583d5b3b0aSRuslan Bukin 
6593d5b3b0aSRuslan Bukin 	sg = xchan->sg;
6603d5b3b0aSRuslan Bukin 
6610c340d7eSRuslan Bukin 	if ((xchan->caps & (XCHAN_CAP_BOUNCE | XCHAN_CAP_BUSDMA)) &&
6625a51e5e4SRuslan Bukin 	   (xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
6633d5b3b0aSRuslan Bukin 		device_printf(xdma->dev,
6643d5b3b0aSRuslan Bukin 		    "%s: Can't submit a transfer: no bufs\n",
6653d5b3b0aSRuslan Bukin 		    __func__);
6663d5b3b0aSRuslan Bukin 		return (-1);
6673d5b3b0aSRuslan Bukin 	}
6683d5b3b0aSRuslan Bukin 
6693d5b3b0aSRuslan Bukin 	sg_n = xdma_process(xchan, sg);
6703d5b3b0aSRuslan Bukin 	if (sg_n == 0)
6713d5b3b0aSRuslan Bukin 		return (0); /* Nothing to submit */
6723d5b3b0aSRuslan Bukin 
6733d5b3b0aSRuslan Bukin 	/* Now submit sglist to DMA engine driver. */
6743d5b3b0aSRuslan Bukin 	ret = XDMA_CHANNEL_SUBMIT_SG(xdma->dma_dev, xchan, sg, sg_n);
6753d5b3b0aSRuslan Bukin 	if (ret != 0) {
6763d5b3b0aSRuslan Bukin 		device_printf(xdma->dev,
6773d5b3b0aSRuslan Bukin 		    "%s: Can't submit an sglist.\n", __func__);
6783d5b3b0aSRuslan Bukin 		return (-1);
6793d5b3b0aSRuslan Bukin 	}
6803d5b3b0aSRuslan Bukin 
6813d5b3b0aSRuslan Bukin 	return (0);
6823d5b3b0aSRuslan Bukin }
683