xref: /freebsd/sys/dev/xdma/xdma_sg.c (revision 0bf48626aaa33768078f5872b922b1487b3a9296)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2018-2019 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by SRI International and the University of
7  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
8  * ("CTSRD"), as part of the DARPA CRASH research programme.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_platform.h"
36 #include <sys/param.h>
37 #include <sys/conf.h>
38 #include <sys/bus.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/mutex.h>
44 #include <sys/rwlock.h>
45 
46 #include <machine/bus.h>
47 
48 #include <vm/vm.h>
49 #include <vm/pmap.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_page.h>
52 
53 #ifdef FDT
54 #include <dev/fdt/fdt_common.h>
55 #include <dev/ofw/ofw_bus.h>
56 #include <dev/ofw/ofw_bus_subr.h>
57 #endif
58 
59 #include <dev/xdma/xdma.h>
60 
61 #include <xdma_if.h>
62 
63 struct seg_load_request {
64 	struct bus_dma_segment *seg;
65 	uint32_t nsegs;
66 	uint32_t error;
67 };
68 
69 static void
70 xchan_bufs_free_reserved(xdma_channel_t *xchan)
71 {
72 	struct xdma_request *xr;
73 	vm_size_t size;
74 	int i;
75 
76 	for (i = 0; i < xchan->xr_num; i++) {
77 		xr = &xchan->xr_mem[i];
78 		size = xr->buf.size;
79 		if (xr->buf.vaddr) {
80 			pmap_kremove_device(xr->buf.vaddr, size);
81 			kva_free(xr->buf.vaddr, size);
82 			xr->buf.vaddr = 0;
83 		}
84 		if (xr->buf.paddr) {
85 			vmem_free(xchan->vmem, xr->buf.paddr, size);
86 			xr->buf.paddr = 0;
87 		}
88 		xr->buf.size = 0;
89 	}
90 }
91 
92 static int
93 xchan_bufs_alloc_reserved(xdma_channel_t *xchan)
94 {
95 	xdma_controller_t *xdma;
96 	struct xdma_request *xr;
97 	vmem_addr_t addr;
98 	vm_size_t size;
99 	int i;
100 
101 	xdma = xchan->xdma;
102 
103 	if (xchan->vmem == NULL)
104 		return (ENOBUFS);
105 
106 	for (i = 0; i < xchan->xr_num; i++) {
107 		xr = &xchan->xr_mem[i];
108 		size = round_page(xchan->maxsegsize);
109 		if (vmem_alloc(xchan->vmem, size,
110 		    M_BESTFIT | M_NOWAIT, &addr)) {
111 			device_printf(xdma->dev,
112 			    "%s: Can't allocate memory\n", __func__);
113 			xchan_bufs_free_reserved(xchan);
114 			return (ENOMEM);
115 		}
116 
117 		xr->buf.size = size;
118 		xr->buf.paddr = addr;
119 		xr->buf.vaddr = kva_alloc(size);
120 		if (xr->buf.vaddr == 0) {
121 			device_printf(xdma->dev,
122 			    "%s: Can't allocate KVA\n", __func__);
123 			xchan_bufs_free_reserved(xchan);
124 			return (ENOMEM);
125 		}
126 		pmap_kenter_device(xr->buf.vaddr, size, addr);
127 	}
128 
129 	return (0);
130 }
131 
132 static int
133 xchan_bufs_alloc_busdma(xdma_channel_t *xchan)
134 {
135 	xdma_controller_t *xdma;
136 	struct xdma_request *xr;
137 	int err;
138 	int i;
139 
140 	xdma = xchan->xdma;
141 
142 	/* Create bus_dma tag */
143 	err = bus_dma_tag_create(
144 	    bus_get_dma_tag(xdma->dev),	/* Parent tag. */
145 	    xchan->alignment,		/* alignment */
146 	    xchan->boundary,		/* boundary */
147 	    xchan->lowaddr,		/* lowaddr */
148 	    xchan->highaddr,		/* highaddr */
149 	    NULL, NULL,			/* filter, filterarg */
150 	    xchan->maxsegsize * xchan->maxnsegs, /* maxsize */
151 	    xchan->maxnsegs,		/* nsegments */
152 	    xchan->maxsegsize,		/* maxsegsize */
153 	    0,				/* flags */
154 	    NULL, NULL,			/* lockfunc, lockarg */
155 	    &xchan->dma_tag_bufs);
156 	if (err != 0) {
157 		device_printf(xdma->dev,
158 		    "%s: Can't create bus_dma tag.\n", __func__);
159 		return (-1);
160 	}
161 
162 	for (i = 0; i < xchan->xr_num; i++) {
163 		xr = &xchan->xr_mem[i];
164 		err = bus_dmamap_create(xchan->dma_tag_bufs, 0,
165 		    &xr->buf.map);
166 		if (err != 0) {
167 			device_printf(xdma->dev,
168 			    "%s: Can't create buf DMA map.\n", __func__);
169 
170 			/* Cleanup. */
171 			bus_dma_tag_destroy(xchan->dma_tag_bufs);
172 
173 			return (-1);
174 		}
175 	}
176 
177 	return (0);
178 }
179 
180 static int
181 xchan_bufs_alloc(xdma_channel_t *xchan)
182 {
183 	xdma_controller_t *xdma;
184 	int ret;
185 
186 	xdma = xchan->xdma;
187 
188 	if (xdma == NULL) {
189 		device_printf(xdma->dev,
190 		    "%s: Channel was not allocated properly.\n", __func__);
191 		return (-1);
192 	}
193 
194 	if (xchan->caps & XCHAN_CAP_BUSDMA)
195 		ret = xchan_bufs_alloc_busdma(xchan);
196 	else {
197 		ret = xchan_bufs_alloc_reserved(xchan);
198 	}
199 	if (ret != 0) {
200 		device_printf(xdma->dev,
201 		    "%s: Can't allocate bufs.\n", __func__);
202 		return (-1);
203 	}
204 
205 	xchan->flags |= XCHAN_BUFS_ALLOCATED;
206 
207 	return (0);
208 }
209 
210 static int
211 xchan_bufs_free(xdma_channel_t *xchan)
212 {
213 	struct xdma_request *xr;
214 	struct xchan_buf *b;
215 	int i;
216 
217 	if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0)
218 		return (-1);
219 
220 	if (xchan->caps & XCHAN_CAP_BUSDMA) {
221 		for (i = 0; i < xchan->xr_num; i++) {
222 			xr = &xchan->xr_mem[i];
223 			b = &xr->buf;
224 			bus_dmamap_destroy(xchan->dma_tag_bufs, b->map);
225 		}
226 		bus_dma_tag_destroy(xchan->dma_tag_bufs);
227 	} else
228 		xchan_bufs_free_reserved(xchan);
229 
230 	xchan->flags &= ~XCHAN_BUFS_ALLOCATED;
231 
232 	return (0);
233 }
234 
235 void
236 xdma_channel_free_sg(xdma_channel_t *xchan)
237 {
238 
239 	xchan_bufs_free(xchan);
240 	xchan_sglist_free(xchan);
241 	xchan_bank_free(xchan);
242 }
243 
244 /*
245  * Prepare xchan for a scatter-gather transfer.
246  * xr_num - xdma requests queue size,
247  * maxsegsize - maximum allowed scatter-gather list element size in bytes
248  */
249 int
250 xdma_prep_sg(xdma_channel_t *xchan, uint32_t xr_num,
251     bus_size_t maxsegsize, bus_size_t maxnsegs,
252     bus_size_t alignment, bus_addr_t boundary,
253     bus_addr_t lowaddr, bus_addr_t highaddr)
254 {
255 	xdma_controller_t *xdma;
256 	int ret;
257 
258 	xdma = xchan->xdma;
259 
260 	KASSERT(xdma != NULL, ("xdma is NULL"));
261 
262 	if (xchan->flags & XCHAN_CONFIGURED) {
263 		device_printf(xdma->dev,
264 		    "%s: Channel is already configured.\n", __func__);
265 		return (-1);
266 	}
267 
268 	xchan->xr_num = xr_num;
269 	xchan->maxsegsize = maxsegsize;
270 	xchan->maxnsegs = maxnsegs;
271 	xchan->alignment = alignment;
272 	xchan->boundary = boundary;
273 	xchan->lowaddr = lowaddr;
274 	xchan->highaddr = highaddr;
275 
276 	if (xchan->maxnsegs > XDMA_MAX_SEG) {
277 		device_printf(xdma->dev, "%s: maxnsegs is too big\n",
278 		    __func__);
279 		return (-1);
280 	}
281 
282 	xchan_bank_init(xchan);
283 
284 	/* Allocate sglist. */
285 	ret = xchan_sglist_alloc(xchan);
286 	if (ret != 0) {
287 		device_printf(xdma->dev,
288 		    "%s: Can't allocate sglist.\n", __func__);
289 		return (-1);
290 	}
291 
292 	/* Allocate buffers if required. */
293 	if (xchan->caps & (XCHAN_CAP_BUSDMA | XCHAN_CAP_BOUNCE)) {
294 		ret = xchan_bufs_alloc(xchan);
295 		if (ret != 0) {
296 			device_printf(xdma->dev,
297 			    "%s: Can't allocate bufs.\n", __func__);
298 
299 			/* Cleanup */
300 			xchan_sglist_free(xchan);
301 			xchan_bank_free(xchan);
302 
303 			return (-1);
304 		}
305 	}
306 
307 	xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_SG);
308 
309 	XCHAN_LOCK(xchan);
310 	ret = XDMA_CHANNEL_PREP_SG(xdma->dma_dev, xchan);
311 	if (ret != 0) {
312 		device_printf(xdma->dev,
313 		    "%s: Can't prepare SG transfer.\n", __func__);
314 		XCHAN_UNLOCK(xchan);
315 
316 		return (-1);
317 	}
318 	XCHAN_UNLOCK(xchan);
319 
320 	return (0);
321 }
322 
323 void
324 xchan_seg_done(xdma_channel_t *xchan,
325     struct xdma_transfer_status *st)
326 {
327 	struct xdma_request *xr;
328 	xdma_controller_t *xdma;
329 	struct xchan_buf *b;
330 	bus_addr_t addr;
331 
332 	xdma = xchan->xdma;
333 
334 	xr = TAILQ_FIRST(&xchan->processing);
335 	if (xr == NULL)
336 		panic("request not found\n");
337 
338 	b = &xr->buf;
339 
340 	atomic_subtract_int(&b->nsegs_left, 1);
341 
342 	if (b->nsegs_left == 0) {
343 		if (xchan->caps & XCHAN_CAP_BUSDMA) {
344 			if (xr->direction == XDMA_MEM_TO_DEV)
345 				bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
346 				    BUS_DMASYNC_POSTWRITE);
347 			else
348 				bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
349 				    BUS_DMASYNC_POSTREAD);
350 			bus_dmamap_unload(xchan->dma_tag_bufs, b->map);
351 		} else if (xchan->caps & XCHAN_CAP_BOUNCE) {
352 			if (xr->req_type == XR_TYPE_MBUF &&
353 			    xr->direction == XDMA_DEV_TO_MEM)
354 				m_copyback(xr->m, 0, st->transferred,
355 				    (void *)xr->buf.vaddr);
356 		} else if (xchan->caps & XCHAN_CAP_IOMMU) {
357 			if (xr->direction == XDMA_MEM_TO_DEV)
358 				addr = xr->src_addr;
359 			else
360 				addr = xr->dst_addr;
361 			xdma_iommu_remove_entry(xchan, addr);
362 		}
363 		xr->status.error = st->error;
364 		xr->status.transferred = st->transferred;
365 
366 		QUEUE_PROC_LOCK(xchan);
367 		TAILQ_REMOVE(&xchan->processing, xr, xr_next);
368 		QUEUE_PROC_UNLOCK(xchan);
369 
370 		QUEUE_OUT_LOCK(xchan);
371 		TAILQ_INSERT_TAIL(&xchan->queue_out, xr, xr_next);
372 		QUEUE_OUT_UNLOCK(xchan);
373 	}
374 }
375 
376 static void
377 xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
378 {
379 	struct seg_load_request *slr;
380 	struct bus_dma_segment *seg;
381 	int i;
382 
383 	slr = arg;
384 	seg = slr->seg;
385 
386 	if (error != 0) {
387 		slr->error = error;
388 		return;
389 	}
390 
391 	slr->nsegs = nsegs;
392 
393 	for (i = 0; i < nsegs; i++) {
394 		seg[i].ds_addr = segs[i].ds_addr;
395 		seg[i].ds_len = segs[i].ds_len;
396 	}
397 }
398 
399 static int
400 _xdma_load_data_busdma(xdma_channel_t *xchan, struct xdma_request *xr,
401     struct bus_dma_segment *seg)
402 {
403 	xdma_controller_t *xdma;
404 	struct seg_load_request slr;
405 	uint32_t nsegs;
406 	void *addr;
407 	int error;
408 
409 	xdma = xchan->xdma;
410 
411 	error = 0;
412 	nsegs = 0;
413 
414 	switch (xr->req_type) {
415 	case XR_TYPE_MBUF:
416 		error = bus_dmamap_load_mbuf_sg(xchan->dma_tag_bufs,
417 		    xr->buf.map, xr->m, seg, &nsegs, BUS_DMA_NOWAIT);
418 		break;
419 	case XR_TYPE_BIO:
420 		slr.nsegs = 0;
421 		slr.error = 0;
422 		slr.seg = seg;
423 		error = bus_dmamap_load_bio(xchan->dma_tag_bufs,
424 		    xr->buf.map, xr->bp, xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
425 		if (slr.error != 0) {
426 			device_printf(xdma->dma_dev,
427 			    "%s: bus_dmamap_load failed, err %d\n",
428 			    __func__, slr.error);
429 			return (0);
430 		}
431 		nsegs = slr.nsegs;
432 		break;
433 	case XR_TYPE_VIRT:
434 		switch (xr->direction) {
435 		case XDMA_MEM_TO_DEV:
436 			addr = (void *)xr->src_addr;
437 			break;
438 		case XDMA_DEV_TO_MEM:
439 			addr = (void *)xr->dst_addr;
440 			break;
441 		default:
442 			device_printf(xdma->dma_dev,
443 			    "%s: Direction is not supported\n", __func__);
444 			return (0);
445 		}
446 		slr.nsegs = 0;
447 		slr.error = 0;
448 		slr.seg = seg;
449 		error = bus_dmamap_load(xchan->dma_tag_bufs, xr->buf.map,
450 		    addr, (xr->block_len * xr->block_num),
451 		    xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
452 		if (slr.error != 0) {
453 			device_printf(xdma->dma_dev,
454 			    "%s: bus_dmamap_load failed, err %d\n",
455 			    __func__, slr.error);
456 			return (0);
457 		}
458 		nsegs = slr.nsegs;
459 		break;
460 	default:
461 		break;
462 	}
463 
464 	if (error != 0) {
465 		if (error == ENOMEM) {
466 			/*
467 			 * Out of memory. Try again later.
468 			 * TODO: count errors.
469 			 */
470 		} else
471 			device_printf(xdma->dma_dev,
472 			    "%s: bus_dmamap_load failed with err %d\n",
473 			    __func__, error);
474 		return (0);
475 	}
476 
477 	if (xr->direction == XDMA_MEM_TO_DEV)
478 		bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
479 		    BUS_DMASYNC_PREWRITE);
480 	else
481 		bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
482 		    BUS_DMASYNC_PREREAD);
483 
484 	return (nsegs);
485 }
486 
487 static int
488 _xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr,
489     struct bus_dma_segment *seg)
490 {
491 	xdma_controller_t *xdma;
492 	struct mbuf *m;
493 	uint32_t nsegs;
494 	vm_offset_t va, addr;
495 	bus_addr_t pa;
496 	vm_prot_t prot;
497 
498 	xdma = xchan->xdma;
499 
500 	m = xr->m;
501 
502 	KASSERT(xchan->caps & XCHAN_CAP_NOSEG,
503 	    ("Handling segmented data is not implemented here."));
504 
505 	nsegs = 1;
506 
507 	switch (xr->req_type) {
508 	case XR_TYPE_MBUF:
509 		if (xchan->caps & XCHAN_CAP_BOUNCE) {
510 			if (xr->direction == XDMA_MEM_TO_DEV)
511 				m_copydata(m, 0, m->m_pkthdr.len,
512 				    (void *)xr->buf.vaddr);
513 			seg[0].ds_addr = (bus_addr_t)xr->buf.paddr;
514 		} else if (xchan->caps & XCHAN_CAP_IOMMU) {
515 			addr = mtod(m, bus_addr_t);
516 			pa = vtophys(addr);
517 
518 			if (xr->direction == XDMA_MEM_TO_DEV)
519 				prot = VM_PROT_READ;
520 			else
521 				prot = VM_PROT_WRITE;
522 
523 			xdma_iommu_add_entry(xchan, &va,
524 			    pa, m->m_pkthdr.len, prot);
525 
526 			/*
527 			 * Save VA so we can unload data later
528 			 * after completion of this transfer.
529 			 */
530 			if (xr->direction == XDMA_MEM_TO_DEV)
531 				xr->src_addr = va;
532 			else
533 				xr->dst_addr = va;
534 			seg[0].ds_addr = va;
535 		} else
536 			seg[0].ds_addr = mtod(m, bus_addr_t);
537 		seg[0].ds_len = m->m_pkthdr.len;
538 		break;
539 	case XR_TYPE_BIO:
540 	case XR_TYPE_VIRT:
541 	default:
542 		panic("implement me\n");
543 	}
544 
545 	return (nsegs);
546 }
547 
548 static int
549 xdma_load_data(xdma_channel_t *xchan,
550     struct xdma_request *xr, struct bus_dma_segment *seg)
551 {
552 	xdma_controller_t *xdma;
553 	int error;
554 	int nsegs;
555 
556 	xdma = xchan->xdma;
557 
558 	error = 0;
559 	nsegs = 0;
560 
561 	if (xchan->caps & XCHAN_CAP_BUSDMA)
562 		nsegs = _xdma_load_data_busdma(xchan, xr, seg);
563 	else
564 		nsegs = _xdma_load_data(xchan, xr, seg);
565 	if (nsegs == 0)
566 		return (0); /* Try again later. */
567 
568 	xr->buf.nsegs = nsegs;
569 	xr->buf.nsegs_left = nsegs;
570 
571 	return (nsegs);
572 }
573 
574 static int
575 xdma_process(xdma_channel_t *xchan,
576     struct xdma_sglist *sg)
577 {
578 	struct bus_dma_segment seg[XDMA_MAX_SEG];
579 	struct xdma_request *xr;
580 	struct xdma_request *xr_tmp;
581 	xdma_controller_t *xdma;
582 	uint32_t capacity;
583 	uint32_t n;
584 	uint32_t c;
585 	int nsegs;
586 	int ret;
587 
588 	XCHAN_ASSERT_LOCKED(xchan);
589 
590 	xdma = xchan->xdma;
591 
592 	n = 0;
593 	c = 0;
594 
595 	ret = XDMA_CHANNEL_CAPACITY(xdma->dma_dev, xchan, &capacity);
596 	if (ret != 0) {
597 		device_printf(xdma->dev,
598 		    "%s: Can't get DMA controller capacity.\n", __func__);
599 		return (-1);
600 	}
601 
602 	TAILQ_FOREACH_SAFE(xr, &xchan->queue_in, xr_next, xr_tmp) {
603 		switch (xr->req_type) {
604 		case XR_TYPE_MBUF:
605 			if ((xchan->caps & XCHAN_CAP_NOSEG) ||
606 			    (c > xchan->maxnsegs))
607 				c = xdma_mbuf_defrag(xchan, xr);
608 			break;
609 		case XR_TYPE_BIO:
610 		case XR_TYPE_VIRT:
611 		default:
612 			c = 1;
613 		}
614 
615 		if (capacity <= (c + n)) {
616 			/*
617 			 * No space yet available for the entire
618 			 * request in the DMA engine.
619 			 */
620 			break;
621 		}
622 
623 		if ((c + n + xchan->maxnsegs) >= XDMA_SGLIST_MAXLEN) {
624 			/* Sglist is full. */
625 			break;
626 		}
627 
628 		nsegs = xdma_load_data(xchan, xr, seg);
629 		if (nsegs == 0)
630 			break;
631 
632 		xdma_sglist_add(&sg[n], seg, nsegs, xr);
633 		n += nsegs;
634 
635 		QUEUE_IN_LOCK(xchan);
636 		TAILQ_REMOVE(&xchan->queue_in, xr, xr_next);
637 		QUEUE_IN_UNLOCK(xchan);
638 
639 		QUEUE_PROC_LOCK(xchan);
640 		TAILQ_INSERT_TAIL(&xchan->processing, xr, xr_next);
641 		QUEUE_PROC_UNLOCK(xchan);
642 	}
643 
644 	return (n);
645 }
646 
647 int
648 xdma_queue_submit_sg(xdma_channel_t *xchan)
649 {
650 	struct xdma_sglist *sg;
651 	xdma_controller_t *xdma;
652 	uint32_t sg_n;
653 	int ret;
654 
655 	xdma = xchan->xdma;
656 	KASSERT(xdma != NULL, ("xdma is NULL"));
657 
658 	XCHAN_ASSERT_LOCKED(xchan);
659 
660 	sg = xchan->sg;
661 
662 	if ((xchan->caps & (XCHAN_CAP_BOUNCE | XCHAN_CAP_BUSDMA)) &&
663 	   (xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
664 		device_printf(xdma->dev,
665 		    "%s: Can't submit a transfer: no bufs\n",
666 		    __func__);
667 		return (-1);
668 	}
669 
670 	sg_n = xdma_process(xchan, sg);
671 	if (sg_n == 0)
672 		return (0); /* Nothing to submit */
673 
674 	/* Now submit sglist to DMA engine driver. */
675 	ret = XDMA_CHANNEL_SUBMIT_SG(xdma->dma_dev, xchan, sg, sg_n);
676 	if (ret != 0) {
677 		device_printf(xdma->dev,
678 		    "%s: Can't submit an sglist.\n", __func__);
679 		return (-1);
680 	}
681 
682 	return (0);
683 }
684