xref: /freebsd/sys/dev/xdma/xdma_sg.c (revision 6b1e5fae2cfc205d8c2e27bd8badebd1ee61500c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2018-2019 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by SRI International and the University of
7  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
8  * ("CTSRD"), as part of the DARPA CRASH research programme.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_platform.h"
36 #include <sys/param.h>
37 #include <sys/conf.h>
38 #include <sys/bus.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/mutex.h>
44 #include <sys/rwlock.h>
45 
46 #include <machine/bus.h>
47 
48 #include <vm/vm.h>
49 #include <vm/pmap.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_page.h>
52 
53 #ifdef FDT
54 #include <dev/fdt/fdt_common.h>
55 #include <dev/ofw/ofw_bus.h>
56 #include <dev/ofw/ofw_bus_subr.h>
57 #endif
58 
59 #include <dev/xdma/xdma.h>
60 
61 #include <xdma_if.h>
62 
63 struct seg_load_request {
64 	struct bus_dma_segment *seg;
65 	uint32_t nsegs;
66 	uint32_t error;
67 };
68 
69 static void
70 xchan_bufs_free_reserved(xdma_channel_t *xchan)
71 {
72 	struct xdma_request *xr;
73 	vm_size_t size;
74 	int i;
75 
76 	for (i = 0; i < xchan->xr_num; i++) {
77 		xr = &xchan->xr_mem[i];
78 		size = xr->buf.size;
79 		if (xr->buf.vaddr) {
80 			pmap_kremove_device(xr->buf.vaddr, size);
81 			kva_free(xr->buf.vaddr, size);
82 			xr->buf.vaddr = 0;
83 		}
84 		if (xr->buf.paddr) {
85 			vmem_free(xchan->vmem, xr->buf.paddr, size);
86 			xr->buf.paddr = 0;
87 		}
88 		xr->buf.size = 0;
89 	}
90 }
91 
92 static int
93 xchan_bufs_alloc_reserved(xdma_channel_t *xchan)
94 {
95 	xdma_controller_t *xdma;
96 	struct xdma_request *xr;
97 	vmem_addr_t addr;
98 	vm_size_t size;
99 	int i;
100 
101 	xdma = xchan->xdma;
102 
103 	if (xchan->vmem == NULL)
104 		return (ENOBUFS);
105 
106 	for (i = 0; i < xchan->xr_num; i++) {
107 		xr = &xchan->xr_mem[i];
108 		size = round_page(xchan->maxsegsize);
109 		if (vmem_alloc(xchan->vmem, size,
110 		    M_BESTFIT | M_NOWAIT, &addr)) {
111 			device_printf(xdma->dev,
112 			    "%s: Can't allocate memory\n", __func__);
113 			xchan_bufs_free_reserved(xchan);
114 			return (ENOMEM);
115 		}
116 
117 		xr->buf.size = size;
118 		xr->buf.paddr = addr;
119 		xr->buf.vaddr = kva_alloc(size);
120 		if (xr->buf.vaddr == 0) {
121 			device_printf(xdma->dev,
122 			    "%s: Can't allocate KVA\n", __func__);
123 			xchan_bufs_free_reserved(xchan);
124 			return (ENOMEM);
125 		}
126 		pmap_kenter_device(xr->buf.vaddr, size, addr);
127 	}
128 
129 	return (0);
130 }
131 
132 static int
133 xchan_bufs_alloc_busdma(xdma_channel_t *xchan)
134 {
135 	xdma_controller_t *xdma;
136 	struct xdma_request *xr;
137 	int err;
138 	int i;
139 
140 	xdma = xchan->xdma;
141 
142 	/* Create bus_dma tag */
143 	err = bus_dma_tag_create(
144 	    bus_get_dma_tag(xdma->dev),	/* Parent tag. */
145 	    xchan->alignment,		/* alignment */
146 	    xchan->boundary,		/* boundary */
147 	    xchan->lowaddr,		/* lowaddr */
148 	    xchan->highaddr,		/* highaddr */
149 	    NULL, NULL,			/* filter, filterarg */
150 	    xchan->maxsegsize * xchan->maxnsegs, /* maxsize */
151 	    xchan->maxnsegs,		/* nsegments */
152 	    xchan->maxsegsize,		/* maxsegsize */
153 	    0,				/* flags */
154 	    NULL, NULL,			/* lockfunc, lockarg */
155 	    &xchan->dma_tag_bufs);
156 	if (err != 0) {
157 		device_printf(xdma->dev,
158 		    "%s: Can't create bus_dma tag.\n", __func__);
159 		return (-1);
160 	}
161 
162 	for (i = 0; i < xchan->xr_num; i++) {
163 		xr = &xchan->xr_mem[i];
164 		err = bus_dmamap_create(xchan->dma_tag_bufs, 0,
165 		    &xr->buf.map);
166 		if (err != 0) {
167 			device_printf(xdma->dev,
168 			    "%s: Can't create buf DMA map.\n", __func__);
169 
170 			/* Cleanup. */
171 			bus_dma_tag_destroy(xchan->dma_tag_bufs);
172 
173 			return (-1);
174 		}
175 	}
176 
177 	return (0);
178 }
179 
180 static int
181 xchan_bufs_alloc(xdma_channel_t *xchan)
182 {
183 	xdma_controller_t *xdma;
184 	int ret;
185 
186 	xdma = xchan->xdma;
187 
188 	if (xdma == NULL) {
189 		printf("%s: Channel was not allocated properly.\n", __func__);
190 		return (-1);
191 	}
192 
193 	if (xchan->caps & XCHAN_CAP_BUSDMA)
194 		ret = xchan_bufs_alloc_busdma(xchan);
195 	else {
196 		ret = xchan_bufs_alloc_reserved(xchan);
197 	}
198 	if (ret != 0) {
199 		device_printf(xdma->dev,
200 		    "%s: Can't allocate bufs.\n", __func__);
201 		return (-1);
202 	}
203 
204 	xchan->flags |= XCHAN_BUFS_ALLOCATED;
205 
206 	return (0);
207 }
208 
209 static int
210 xchan_bufs_free(xdma_channel_t *xchan)
211 {
212 	struct xdma_request *xr;
213 	struct xchan_buf *b;
214 	int i;
215 
216 	if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0)
217 		return (-1);
218 
219 	if (xchan->caps & XCHAN_CAP_BUSDMA) {
220 		for (i = 0; i < xchan->xr_num; i++) {
221 			xr = &xchan->xr_mem[i];
222 			b = &xr->buf;
223 			bus_dmamap_destroy(xchan->dma_tag_bufs, b->map);
224 		}
225 		bus_dma_tag_destroy(xchan->dma_tag_bufs);
226 	} else
227 		xchan_bufs_free_reserved(xchan);
228 
229 	xchan->flags &= ~XCHAN_BUFS_ALLOCATED;
230 
231 	return (0);
232 }
233 
234 void
235 xdma_channel_free_sg(xdma_channel_t *xchan)
236 {
237 
238 	xchan_bufs_free(xchan);
239 	xchan_sglist_free(xchan);
240 	xchan_bank_free(xchan);
241 }
242 
243 /*
244  * Prepare xchan for a scatter-gather transfer.
245  * xr_num - xdma requests queue size,
246  * maxsegsize - maximum allowed scatter-gather list element size in bytes
247  */
248 int
249 xdma_prep_sg(xdma_channel_t *xchan, uint32_t xr_num,
250     bus_size_t maxsegsize, bus_size_t maxnsegs,
251     bus_size_t alignment, bus_addr_t boundary,
252     bus_addr_t lowaddr, bus_addr_t highaddr)
253 {
254 	xdma_controller_t *xdma;
255 	int ret;
256 
257 	xdma = xchan->xdma;
258 
259 	KASSERT(xdma != NULL, ("xdma is NULL"));
260 
261 	if (xchan->flags & XCHAN_CONFIGURED) {
262 		device_printf(xdma->dev,
263 		    "%s: Channel is already configured.\n", __func__);
264 		return (-1);
265 	}
266 
267 	xchan->xr_num = xr_num;
268 	xchan->maxsegsize = maxsegsize;
269 	xchan->maxnsegs = maxnsegs;
270 	xchan->alignment = alignment;
271 	xchan->boundary = boundary;
272 	xchan->lowaddr = lowaddr;
273 	xchan->highaddr = highaddr;
274 
275 	if (xchan->maxnsegs > XDMA_MAX_SEG) {
276 		device_printf(xdma->dev, "%s: maxnsegs is too big\n",
277 		    __func__);
278 		return (-1);
279 	}
280 
281 	xchan_bank_init(xchan);
282 
283 	/* Allocate sglist. */
284 	ret = xchan_sglist_alloc(xchan);
285 	if (ret != 0) {
286 		device_printf(xdma->dev,
287 		    "%s: Can't allocate sglist.\n", __func__);
288 		return (-1);
289 	}
290 
291 	/* Allocate buffers if required. */
292 	if (xchan->caps & (XCHAN_CAP_BUSDMA | XCHAN_CAP_BOUNCE)) {
293 		ret = xchan_bufs_alloc(xchan);
294 		if (ret != 0) {
295 			device_printf(xdma->dev,
296 			    "%s: Can't allocate bufs.\n", __func__);
297 
298 			/* Cleanup */
299 			xchan_sglist_free(xchan);
300 			xchan_bank_free(xchan);
301 
302 			return (-1);
303 		}
304 	}
305 
306 	xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_SG);
307 
308 	XCHAN_LOCK(xchan);
309 	ret = XDMA_CHANNEL_PREP_SG(xdma->dma_dev, xchan);
310 	if (ret != 0) {
311 		device_printf(xdma->dev,
312 		    "%s: Can't prepare SG transfer.\n", __func__);
313 		XCHAN_UNLOCK(xchan);
314 
315 		return (-1);
316 	}
317 	XCHAN_UNLOCK(xchan);
318 
319 	return (0);
320 }
321 
322 void
323 xchan_seg_done(xdma_channel_t *xchan,
324     struct xdma_transfer_status *st)
325 {
326 	struct xdma_request *xr;
327 	xdma_controller_t *xdma;
328 	struct xchan_buf *b;
329 	bus_addr_t addr;
330 
331 	xdma = xchan->xdma;
332 
333 	xr = TAILQ_FIRST(&xchan->processing);
334 	if (xr == NULL)
335 		panic("request not found\n");
336 
337 	b = &xr->buf;
338 
339 	atomic_subtract_int(&b->nsegs_left, 1);
340 
341 	if (b->nsegs_left == 0) {
342 		if (xchan->caps & XCHAN_CAP_BUSDMA) {
343 			if (xr->direction == XDMA_MEM_TO_DEV)
344 				bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
345 				    BUS_DMASYNC_POSTWRITE);
346 			else
347 				bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
348 				    BUS_DMASYNC_POSTREAD);
349 			bus_dmamap_unload(xchan->dma_tag_bufs, b->map);
350 		} else if (xchan->caps & XCHAN_CAP_BOUNCE) {
351 			if (xr->req_type == XR_TYPE_MBUF &&
352 			    xr->direction == XDMA_DEV_TO_MEM)
353 				m_copyback(xr->m, 0, st->transferred,
354 				    (void *)xr->buf.vaddr);
355 		} else if (xchan->caps & XCHAN_CAP_IOMMU) {
356 			if (xr->direction == XDMA_MEM_TO_DEV)
357 				addr = xr->src_addr;
358 			else
359 				addr = xr->dst_addr;
360 			xdma_iommu_remove_entry(xchan, addr);
361 		}
362 		xr->status.error = st->error;
363 		xr->status.transferred = st->transferred;
364 
365 		QUEUE_PROC_LOCK(xchan);
366 		TAILQ_REMOVE(&xchan->processing, xr, xr_next);
367 		QUEUE_PROC_UNLOCK(xchan);
368 
369 		QUEUE_OUT_LOCK(xchan);
370 		TAILQ_INSERT_TAIL(&xchan->queue_out, xr, xr_next);
371 		QUEUE_OUT_UNLOCK(xchan);
372 	}
373 }
374 
375 static void
376 xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
377 {
378 	struct seg_load_request *slr;
379 	struct bus_dma_segment *seg;
380 	int i;
381 
382 	slr = arg;
383 	seg = slr->seg;
384 
385 	if (error != 0) {
386 		slr->error = error;
387 		return;
388 	}
389 
390 	slr->nsegs = nsegs;
391 
392 	for (i = 0; i < nsegs; i++) {
393 		seg[i].ds_addr = segs[i].ds_addr;
394 		seg[i].ds_len = segs[i].ds_len;
395 	}
396 }
397 
398 static int
399 _xdma_load_data_busdma(xdma_channel_t *xchan, struct xdma_request *xr,
400     struct bus_dma_segment *seg)
401 {
402 	xdma_controller_t *xdma;
403 	struct seg_load_request slr;
404 	uint32_t nsegs;
405 	void *addr;
406 	int error;
407 
408 	xdma = xchan->xdma;
409 
410 	error = 0;
411 	nsegs = 0;
412 
413 	switch (xr->req_type) {
414 	case XR_TYPE_MBUF:
415 		error = bus_dmamap_load_mbuf_sg(xchan->dma_tag_bufs,
416 		    xr->buf.map, xr->m, seg, &nsegs, BUS_DMA_NOWAIT);
417 		break;
418 	case XR_TYPE_BIO:
419 		slr.nsegs = 0;
420 		slr.error = 0;
421 		slr.seg = seg;
422 		error = bus_dmamap_load_bio(xchan->dma_tag_bufs,
423 		    xr->buf.map, xr->bp, xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
424 		if (slr.error != 0) {
425 			device_printf(xdma->dma_dev,
426 			    "%s: bus_dmamap_load failed, err %d\n",
427 			    __func__, slr.error);
428 			return (0);
429 		}
430 		nsegs = slr.nsegs;
431 		break;
432 	case XR_TYPE_VIRT:
433 		switch (xr->direction) {
434 		case XDMA_MEM_TO_DEV:
435 			addr = (void *)xr->src_addr;
436 			break;
437 		case XDMA_DEV_TO_MEM:
438 			addr = (void *)xr->dst_addr;
439 			break;
440 		default:
441 			device_printf(xdma->dma_dev,
442 			    "%s: Direction is not supported\n", __func__);
443 			return (0);
444 		}
445 		slr.nsegs = 0;
446 		slr.error = 0;
447 		slr.seg = seg;
448 		error = bus_dmamap_load(xchan->dma_tag_bufs, xr->buf.map,
449 		    addr, (xr->block_len * xr->block_num),
450 		    xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
451 		if (slr.error != 0) {
452 			device_printf(xdma->dma_dev,
453 			    "%s: bus_dmamap_load failed, err %d\n",
454 			    __func__, slr.error);
455 			return (0);
456 		}
457 		nsegs = slr.nsegs;
458 		break;
459 	default:
460 		break;
461 	}
462 
463 	if (error != 0) {
464 		if (error == ENOMEM) {
465 			/*
466 			 * Out of memory. Try again later.
467 			 * TODO: count errors.
468 			 */
469 		} else
470 			device_printf(xdma->dma_dev,
471 			    "%s: bus_dmamap_load failed with err %d\n",
472 			    __func__, error);
473 		return (0);
474 	}
475 
476 	if (xr->direction == XDMA_MEM_TO_DEV)
477 		bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
478 		    BUS_DMASYNC_PREWRITE);
479 	else
480 		bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
481 		    BUS_DMASYNC_PREREAD);
482 
483 	return (nsegs);
484 }
485 
486 static int
487 _xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr,
488     struct bus_dma_segment *seg)
489 {
490 	xdma_controller_t *xdma;
491 	struct mbuf *m;
492 	uint32_t nsegs;
493 	vm_offset_t va, addr;
494 	bus_addr_t pa;
495 	vm_prot_t prot;
496 
497 	xdma = xchan->xdma;
498 
499 	m = xr->m;
500 
501 	KASSERT(xchan->caps & (XCHAN_CAP_NOSEG | XCHAN_CAP_BOUNCE),
502 	    ("Handling segmented data is not implemented here."));
503 
504 	nsegs = 1;
505 
506 	switch (xr->req_type) {
507 	case XR_TYPE_MBUF:
508 		if (xchan->caps & XCHAN_CAP_BOUNCE) {
509 			if (xr->direction == XDMA_MEM_TO_DEV)
510 				m_copydata(m, 0, m->m_pkthdr.len,
511 				    (void *)xr->buf.vaddr);
512 			seg[0].ds_addr = (bus_addr_t)xr->buf.paddr;
513 		} else if (xchan->caps & XCHAN_CAP_IOMMU) {
514 			addr = mtod(m, bus_addr_t);
515 			pa = vtophys(addr);
516 
517 			if (xr->direction == XDMA_MEM_TO_DEV)
518 				prot = VM_PROT_READ;
519 			else
520 				prot = VM_PROT_WRITE;
521 
522 			xdma_iommu_add_entry(xchan, &va,
523 			    pa, m->m_pkthdr.len, prot);
524 
525 			/*
526 			 * Save VA so we can unload data later
527 			 * after completion of this transfer.
528 			 */
529 			if (xr->direction == XDMA_MEM_TO_DEV)
530 				xr->src_addr = va;
531 			else
532 				xr->dst_addr = va;
533 			seg[0].ds_addr = va;
534 		} else
535 			seg[0].ds_addr = mtod(m, bus_addr_t);
536 		seg[0].ds_len = m->m_pkthdr.len;
537 		break;
538 	case XR_TYPE_BIO:
539 	case XR_TYPE_VIRT:
540 	default:
541 		panic("implement me\n");
542 	}
543 
544 	return (nsegs);
545 }
546 
547 static int
548 xdma_load_data(xdma_channel_t *xchan,
549     struct xdma_request *xr, struct bus_dma_segment *seg)
550 {
551 	xdma_controller_t *xdma;
552 	int error;
553 	int nsegs;
554 
555 	xdma = xchan->xdma;
556 
557 	error = 0;
558 	nsegs = 0;
559 
560 	if (xchan->caps & XCHAN_CAP_BUSDMA)
561 		nsegs = _xdma_load_data_busdma(xchan, xr, seg);
562 	else
563 		nsegs = _xdma_load_data(xchan, xr, seg);
564 	if (nsegs == 0)
565 		return (0); /* Try again later. */
566 
567 	xr->buf.nsegs = nsegs;
568 	xr->buf.nsegs_left = nsegs;
569 
570 	return (nsegs);
571 }
572 
573 static int
574 xdma_process(xdma_channel_t *xchan,
575     struct xdma_sglist *sg)
576 {
577 	struct bus_dma_segment seg[XDMA_MAX_SEG];
578 	struct xdma_request *xr;
579 	struct xdma_request *xr_tmp;
580 	xdma_controller_t *xdma;
581 	uint32_t capacity;
582 	uint32_t n;
583 	uint32_t c;
584 	int nsegs;
585 	int ret;
586 
587 	XCHAN_ASSERT_LOCKED(xchan);
588 
589 	xdma = xchan->xdma;
590 
591 	n = 0;
592 	c = 0;
593 
594 	ret = XDMA_CHANNEL_CAPACITY(xdma->dma_dev, xchan, &capacity);
595 	if (ret != 0) {
596 		device_printf(xdma->dev,
597 		    "%s: Can't get DMA controller capacity.\n", __func__);
598 		return (-1);
599 	}
600 
601 	TAILQ_FOREACH_SAFE(xr, &xchan->queue_in, xr_next, xr_tmp) {
602 		switch (xr->req_type) {
603 		case XR_TYPE_MBUF:
604 			if ((xchan->caps & XCHAN_CAP_NOSEG) ||
605 			    (c > xchan->maxnsegs))
606 				c = xdma_mbuf_defrag(xchan, xr);
607 			break;
608 		case XR_TYPE_BIO:
609 		case XR_TYPE_VIRT:
610 		default:
611 			c = 1;
612 		}
613 
614 		if (capacity <= (c + n)) {
615 			/*
616 			 * No space yet available for the entire
617 			 * request in the DMA engine.
618 			 */
619 			break;
620 		}
621 
622 		if ((c + n + xchan->maxnsegs) >= XDMA_SGLIST_MAXLEN) {
623 			/* Sglist is full. */
624 			break;
625 		}
626 
627 		nsegs = xdma_load_data(xchan, xr, seg);
628 		if (nsegs == 0)
629 			break;
630 
631 		xdma_sglist_add(&sg[n], seg, nsegs, xr);
632 		n += nsegs;
633 
634 		QUEUE_IN_LOCK(xchan);
635 		TAILQ_REMOVE(&xchan->queue_in, xr, xr_next);
636 		QUEUE_IN_UNLOCK(xchan);
637 
638 		QUEUE_PROC_LOCK(xchan);
639 		TAILQ_INSERT_TAIL(&xchan->processing, xr, xr_next);
640 		QUEUE_PROC_UNLOCK(xchan);
641 	}
642 
643 	return (n);
644 }
645 
646 int
647 xdma_queue_submit_sg(xdma_channel_t *xchan)
648 {
649 	struct xdma_sglist *sg;
650 	xdma_controller_t *xdma;
651 	uint32_t sg_n;
652 	int ret;
653 
654 	xdma = xchan->xdma;
655 	KASSERT(xdma != NULL, ("xdma is NULL"));
656 
657 	XCHAN_ASSERT_LOCKED(xchan);
658 
659 	sg = xchan->sg;
660 
661 	if ((xchan->caps & (XCHAN_CAP_BOUNCE | XCHAN_CAP_BUSDMA)) &&
662 	   (xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
663 		device_printf(xdma->dev,
664 		    "%s: Can't submit a transfer: no bufs\n",
665 		    __func__);
666 		return (-1);
667 	}
668 
669 	sg_n = xdma_process(xchan, sg);
670 	if (sg_n == 0)
671 		return (0); /* Nothing to submit */
672 
673 	/* Now submit sglist to DMA engine driver. */
674 	ret = XDMA_CHANNEL_SUBMIT_SG(xdma->dma_dev, xchan, sg, sg_n);
675 	if (ret != 0) {
676 		device_printf(xdma->dev,
677 		    "%s: Can't submit an sglist.\n", __func__);
678 		return (-1);
679 	}
680 
681 	return (0);
682 }
683