xref: /freebsd/sys/kern/subr_bus_dma.c (revision b3e7694832e81d7a904a10f525f8797b753bf0d3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2012 EMC Corp.
5  * All rights reserved.
6  *
7  * Copyright (c) 1997, 1998 Justin T. Gibbs.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_bus.h"
36 #include "opt_iommu.h"
37 
38 #include <sys/param.h>
39 #include <sys/conf.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/callout.h>
43 #include <sys/ktr.h>
44 #include <sys/limits.h>
45 #include <sys/lock.h>
46 #include <sys/mbuf.h>
47 #include <sys/memdesc.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/uio.h>
51 
52 #include <vm/vm.h>
53 #include <vm/vm_page.h>
54 #include <vm/vm_map.h>
55 #include <vm/pmap.h>
56 
57 #include <opencrypto/cryptodev.h>
58 
59 #include <machine/bus.h>
60 
61 /*
62  * Convenience function for manipulating driver locks from busdma (during
63  * busdma_swi, for example).
64  */
65 void
66 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
67 {
68 	struct mtx *dmtx;
69 
70 	dmtx = (struct mtx *)arg;
71 	switch (op) {
72 	case BUS_DMA_LOCK:
73 		mtx_lock(dmtx);
74 		break;
75 	case BUS_DMA_UNLOCK:
76 		mtx_unlock(dmtx);
77 		break;
78 	default:
79 		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
80 	}
81 }
82 
83 /*
84  * dflt_lock should never get called.  It gets put into the dma tag when
85  * lockfunc == NULL, which is only valid if the maps that are associated
86  * with the tag are meant to never be deferred.
87  *
88  * XXX Should have a way to identify which driver is responsible here.
89  */
90 void
91 _busdma_dflt_lock(void *arg, bus_dma_lock_op_t op)
92 {
93 
94 	panic("driver error: _bus_dma_dflt_lock called");
95 }
96 
97 
98 /*
99  * Load up data starting at offset within a region specified by a
100  * list of virtual address ranges until either length or the region
101  * are exhausted.
102  */
103 static int
104 _bus_dmamap_load_vlist(bus_dma_tag_t dmat, bus_dmamap_t map,
105     bus_dma_segment_t *list, int sglist_cnt, struct pmap *pmap, int *nsegs,
106     int flags, size_t offset, size_t length)
107 {
108 	int error;
109 
110 	error = 0;
111 	for (; sglist_cnt > 0 && length != 0; sglist_cnt--, list++) {
112 		char *addr;
113 		size_t ds_len;
114 
115 		KASSERT((offset < list->ds_len),
116 		    ("Invalid mid-segment offset"));
117 		addr = (char *)(uintptr_t)list->ds_addr + offset;
118 		ds_len = list->ds_len - offset;
119 		offset = 0;
120 		if (ds_len > length)
121 			ds_len = length;
122 		length -= ds_len;
123 		KASSERT((ds_len != 0), ("Segment length is zero"));
124 		error = _bus_dmamap_load_buffer(dmat, map, addr, ds_len, pmap,
125 		    flags, NULL, nsegs);
126 		if (error)
127 			break;
128 	}
129 	return (error);
130 }
131 
132 /*
133  * Load a list of physical addresses.
134  */
135 static int
136 _bus_dmamap_load_plist(bus_dma_tag_t dmat, bus_dmamap_t map,
137     bus_dma_segment_t *list, int sglist_cnt, int *nsegs, int flags)
138 {
139 	int error;
140 
141 	error = 0;
142 	for (; sglist_cnt > 0; sglist_cnt--, list++) {
143 		error = _bus_dmamap_load_phys(dmat, map,
144 		    (vm_paddr_t)list->ds_addr, list->ds_len, flags, NULL,
145 		    nsegs);
146 		if (error)
147 			break;
148 	}
149 	return (error);
150 }
151 
152 /*
153  * Load an unmapped mbuf
154  */
155 static int
156 _bus_dmamap_load_mbuf_epg(bus_dma_tag_t dmat, bus_dmamap_t map,
157     struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags)
158 {
159 	int error, i, off, len, pglen, pgoff, seglen, segoff;
160 
161 	M_ASSERTEXTPG(m);
162 
163 	len = m->m_len;
164 	error = 0;
165 
166 	/* Skip over any data removed from the front. */
167 	off = mtod(m, vm_offset_t);
168 
169 	if (m->m_epg_hdrlen != 0) {
170 		if (off >= m->m_epg_hdrlen) {
171 			off -= m->m_epg_hdrlen;
172 		} else {
173 			seglen = m->m_epg_hdrlen - off;
174 			segoff = off;
175 			seglen = min(seglen, len);
176 			off = 0;
177 			len -= seglen;
178 			error = _bus_dmamap_load_buffer(dmat, map,
179 			    &m->m_epg_hdr[segoff], seglen, kernel_pmap,
180 			    flags, segs, nsegs);
181 		}
182 	}
183 	pgoff = m->m_epg_1st_off;
184 	for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) {
185 		pglen = m_epg_pagelen(m, i, pgoff);
186 		if (off >= pglen) {
187 			off -= pglen;
188 			pgoff = 0;
189 			continue;
190 		}
191 		seglen = pglen - off;
192 		segoff = pgoff + off;
193 		off = 0;
194 		seglen = min(seglen, len);
195 		len -= seglen;
196 		error = _bus_dmamap_load_phys(dmat, map,
197 		    m->m_epg_pa[i] + segoff, seglen, flags, segs, nsegs);
198 		pgoff = 0;
199 	};
200 	if (len != 0 && error == 0) {
201 		KASSERT((off + len) <= m->m_epg_trllen,
202 		    ("off + len > trail (%d + %d > %d)", off, len,
203 		    m->m_epg_trllen));
204 		error = _bus_dmamap_load_buffer(dmat, map,
205 		    &m->m_epg_trail[off], len, kernel_pmap, flags, segs,
206 		    nsegs);
207 	}
208 	return (error);
209 }
210 
211 /*
212  * Load a single mbuf.
213  */
214 static int
215 _bus_dmamap_load_single_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
216     struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags)
217 {
218 	int error;
219 
220 	error = 0;
221 	if ((m->m_flags & M_EXTPG) != 0)
222 		error = _bus_dmamap_load_mbuf_epg(dmat, map, m, segs, nsegs,
223 		    flags);
224 	else
225 		error = _bus_dmamap_load_buffer(dmat, map, m->m_data, m->m_len,
226 		    kernel_pmap, flags | BUS_DMA_LOAD_MBUF, segs, nsegs);
227 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
228 	    __func__, dmat, flags, error, *nsegs);
229 	return (error);
230 }
231 
232 /*
233  * Load an mbuf chain.
234  */
235 static int
236 _bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
237     struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags)
238 {
239 	struct mbuf *m;
240 	int error;
241 
242 	error = 0;
243 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
244 		if (m->m_len > 0) {
245 			if ((m->m_flags & M_EXTPG) != 0)
246 				error = _bus_dmamap_load_mbuf_epg(dmat,
247 				    map, m, segs, nsegs, flags);
248 			else
249 				error = _bus_dmamap_load_buffer(dmat, map,
250 				    m->m_data, m->m_len, kernel_pmap,
251 				    flags | BUS_DMA_LOAD_MBUF, segs, nsegs);
252 		}
253 	}
254 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
255 	    __func__, dmat, flags, error, *nsegs);
256 	return (error);
257 }
258 
259 int
260 bus_dmamap_load_ma_triv(bus_dma_tag_t dmat, bus_dmamap_t map,
261     struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
262     bus_dma_segment_t *segs, int *segp)
263 {
264 	vm_paddr_t paddr;
265 	bus_size_t len;
266 	int error, i;
267 
268 	error = 0;
269 	for (i = 0; tlen > 0; i++, tlen -= len) {
270 		len = min(PAGE_SIZE - ma_offs, tlen);
271 		paddr = VM_PAGE_TO_PHYS(ma[i]) + ma_offs;
272 		error = _bus_dmamap_load_phys(dmat, map, paddr, len,
273 		    flags, segs, segp);
274 		if (error != 0)
275 			break;
276 		ma_offs = 0;
277 	}
278 	return (error);
279 }
280 
281 /*
282  * Load a uio.
283  */
284 static int
285 _bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
286     int *nsegs, int flags)
287 {
288 	bus_size_t resid;
289 	bus_size_t minlen;
290 	struct iovec *iov;
291 	pmap_t pmap;
292 	caddr_t addr;
293 	int error, i;
294 
295 	if (uio->uio_segflg == UIO_USERSPACE) {
296 		KASSERT(uio->uio_td != NULL,
297 			("bus_dmamap_load_uio: USERSPACE but no proc"));
298 		pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
299 	} else
300 		pmap = kernel_pmap;
301 	resid = uio->uio_resid;
302 	iov = uio->uio_iov;
303 	error = 0;
304 
305 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
306 		/*
307 		 * Now at the first iovec to load.  Load each iovec
308 		 * until we have exhausted the residual count.
309 		 */
310 
311 		addr = (caddr_t) iov[i].iov_base;
312 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
313 		if (minlen > 0) {
314 			error = _bus_dmamap_load_buffer(dmat, map, addr,
315 			    minlen, pmap, flags, NULL, nsegs);
316 			resid -= minlen;
317 		}
318 	}
319 
320 	return (error);
321 }
322 
323 /*
324  * Map the buffer buf into bus space using the dmamap map.
325  */
326 int
327 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
328     bus_size_t buflen, bus_dmamap_callback_t *callback,
329     void *callback_arg, int flags)
330 {
331 	bus_dma_segment_t *segs;
332 	struct memdesc mem;
333 	int error;
334 	int nsegs;
335 
336 #ifdef KMSAN
337 	mem = memdesc_vaddr(buf, buflen);
338 	_bus_dmamap_load_kmsan(dmat, map, &mem);
339 #endif
340 
341 	if ((flags & BUS_DMA_NOWAIT) == 0) {
342 		mem = memdesc_vaddr(buf, buflen);
343 		_bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg);
344 	}
345 
346 	nsegs = -1;
347 	error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, kernel_pmap,
348 	    flags, NULL, &nsegs);
349 	nsegs++;
350 
351 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
352 	    __func__, dmat, flags, error, nsegs);
353 
354 	if (error == EINPROGRESS)
355 		return (error);
356 
357 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
358 	if (error)
359 		(*callback)(callback_arg, segs, 0, error);
360 	else
361 		(*callback)(callback_arg, segs, nsegs, 0);
362 
363 	/*
364 	 * Return ENOMEM to the caller so that it can pass it up the stack.
365 	 * This error only happens when NOWAIT is set, so deferral is disabled.
366 	 */
367 	if (error == ENOMEM)
368 		return (error);
369 
370 	return (0);
371 }
372 
373 int
374 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
375     bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
376 {
377 	bus_dma_segment_t *segs;
378 	int nsegs, error;
379 
380 	M_ASSERTPKTHDR(m0);
381 
382 #ifdef KMSAN
383 	struct memdesc mem = memdesc_mbuf(m0);
384 	_bus_dmamap_load_kmsan(dmat, map, &mem);
385 #endif
386 
387 	flags |= BUS_DMA_NOWAIT;
388 	nsegs = -1;
389 	error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, NULL, &nsegs, flags);
390 	++nsegs;
391 
392 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
393 	if (error)
394 		(*callback)(callback_arg, segs, 0, 0, error);
395 	else
396 		(*callback)(callback_arg, segs, nsegs, m0->m_pkthdr.len, error);
397 
398 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
399 	    __func__, dmat, flags, error, nsegs);
400 	return (error);
401 }
402 
403 int
404 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
405     bus_dma_segment_t *segs, int *nsegs, int flags)
406 {
407 	int error;
408 
409 #ifdef KMSAN
410 	struct memdesc mem = memdesc_mbuf(m0);
411 	_bus_dmamap_load_kmsan(dmat, map, &mem);
412 #endif
413 
414 	flags |= BUS_DMA_NOWAIT;
415 	*nsegs = -1;
416 	error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags);
417 	++*nsegs;
418 	_bus_dmamap_complete(dmat, map, segs, *nsegs, error);
419 	return (error);
420 }
421 
422 int
423 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
424     bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
425 {
426 	bus_dma_segment_t *segs;
427 	int nsegs, error;
428 
429 #ifdef KMSAN
430 	struct memdesc mem = memdesc_uio(uio);
431 	_bus_dmamap_load_kmsan(dmat, map, &mem);
432 #endif
433 
434 	flags |= BUS_DMA_NOWAIT;
435 	nsegs = -1;
436 	error = _bus_dmamap_load_uio(dmat, map, uio, &nsegs, flags);
437 	nsegs++;
438 
439 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
440 	if (error)
441 		(*callback)(callback_arg, segs, 0, 0, error);
442 	else
443 		(*callback)(callback_arg, segs, nsegs, uio->uio_resid, error);
444 
445 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
446 	    __func__, dmat, flags, error, nsegs);
447 	return (error);
448 }
449 
450 int
451 bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio,
452 		    bus_dmamap_callback_t *callback, void *callback_arg,
453 		    int flags)
454 {
455 	struct memdesc mem;
456 
457 	mem = memdesc_bio(bio);
458 	return (bus_dmamap_load_mem(dmat, map, &mem, callback, callback_arg,
459 	    flags));
460 }
461 
462 int
463 bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map,
464     struct memdesc *mem, bus_dmamap_callback_t *callback,
465     void *callback_arg, int flags)
466 {
467 	bus_dma_segment_t *segs;
468 	int error;
469 	int nsegs;
470 
471 #ifdef KMSAN
472 	_bus_dmamap_load_kmsan(dmat, map, mem);
473 #endif
474 
475 	if ((flags & BUS_DMA_NOWAIT) == 0)
476 		_bus_dmamap_waitok(dmat, map, mem, callback, callback_arg);
477 
478 	nsegs = -1;
479 	error = 0;
480 	switch (mem->md_type) {
481 	case MEMDESC_VADDR:
482 		error = _bus_dmamap_load_buffer(dmat, map, mem->u.md_vaddr,
483 		    mem->md_len, kernel_pmap, flags, NULL, &nsegs);
484 		break;
485 	case MEMDESC_PADDR:
486 		error = _bus_dmamap_load_phys(dmat, map, mem->u.md_paddr,
487 		    mem->md_len, flags, NULL, &nsegs);
488 		break;
489 	case MEMDESC_VLIST:
490 		error = _bus_dmamap_load_vlist(dmat, map, mem->u.md_list,
491 		    mem->md_nseg, kernel_pmap, &nsegs, flags, 0, SIZE_T_MAX);
492 		break;
493 	case MEMDESC_PLIST:
494 		error = _bus_dmamap_load_plist(dmat, map, mem->u.md_list,
495 		    mem->md_nseg, &nsegs, flags);
496 		break;
497 	case MEMDESC_UIO:
498 		error = _bus_dmamap_load_uio(dmat, map, mem->u.md_uio,
499 		    &nsegs, flags);
500 		break;
501 	case MEMDESC_MBUF:
502 		error = _bus_dmamap_load_mbuf_sg(dmat, map, mem->u.md_mbuf,
503 		    NULL, &nsegs, flags);
504 		break;
505 	case MEMDESC_VMPAGES:
506 		error = _bus_dmamap_load_ma(dmat, map, mem->u.md_ma,
507 		    mem->md_len, mem->md_offset, flags, NULL, &nsegs);
508 		break;
509 	}
510 	nsegs++;
511 
512 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
513 	    __func__, dmat, flags, error, nsegs);
514 
515 	if (error == EINPROGRESS)
516 		return (error);
517 
518 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
519 	if (error)
520 		(*callback)(callback_arg, segs, 0, error);
521 	else
522 		(*callback)(callback_arg, segs, nsegs, 0);
523 
524 	/*
525 	 * Return ENOMEM to the caller so that it can pass it up the stack.
526 	 * This error only happens when NOWAIT is set, so deferral is disabled.
527 	 */
528 	if (error == ENOMEM)
529 		return (error);
530 
531 	return (0);
532 }
533 
534 int
535 bus_dmamap_load_crp_buffer(bus_dma_tag_t dmat, bus_dmamap_t map,
536     struct crypto_buffer *cb, bus_dmamap_callback_t *callback,
537     void *callback_arg, int flags)
538 {
539 	bus_dma_segment_t *segs;
540 	int error;
541 	int nsegs;
542 
543 	flags |= BUS_DMA_NOWAIT;
544 	nsegs = -1;
545 	error = 0;
546 	switch (cb->cb_type) {
547 	case CRYPTO_BUF_CONTIG:
548 		error = _bus_dmamap_load_buffer(dmat, map, cb->cb_buf,
549 		    cb->cb_buf_len, kernel_pmap, flags, NULL, &nsegs);
550 		break;
551 	case CRYPTO_BUF_MBUF:
552 		error = _bus_dmamap_load_mbuf_sg(dmat, map, cb->cb_mbuf,
553 		    NULL, &nsegs, flags);
554 		break;
555 	case CRYPTO_BUF_SINGLE_MBUF:
556 		error = _bus_dmamap_load_single_mbuf(dmat, map, cb->cb_mbuf,
557 		    NULL, &nsegs, flags);
558 		break;
559 	case CRYPTO_BUF_UIO:
560 		error = _bus_dmamap_load_uio(dmat, map, cb->cb_uio, &nsegs,
561 		    flags);
562 		break;
563 	case CRYPTO_BUF_VMPAGE:
564 		error = _bus_dmamap_load_ma(dmat, map, cb->cb_vm_page,
565 		    cb->cb_vm_page_len, cb->cb_vm_page_offset, flags, NULL,
566 		    &nsegs);
567 		break;
568 	default:
569 		error = EINVAL;
570 	}
571 	nsegs++;
572 
573 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
574 	    __func__, dmat, flags, error, nsegs);
575 
576 	if (error == EINPROGRESS)
577 		return (error);
578 
579 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
580 	if (error)
581 		(*callback)(callback_arg, segs, 0, error);
582 	else
583 		(*callback)(callback_arg, segs, nsegs, 0);
584 
585 	/*
586 	 * Return ENOMEM to the caller so that it can pass it up the stack.
587 	 * This error only happens when NOWAIT is set, so deferral is disabled.
588 	 */
589 	if (error == ENOMEM)
590 		return (error);
591 
592 	return (0);
593 }
594 
595 int
596 bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp,
597     bus_dmamap_callback_t *callback, void *callback_arg, int flags)
598 {
599 	return (bus_dmamap_load_crp_buffer(dmat, map, &crp->crp_buf, callback,
600 	    callback_arg, flags));
601 }
602 
603 void
604 bus_dma_template_init(bus_dma_template_t *t, bus_dma_tag_t parent)
605 {
606 
607 	if (t == NULL)
608 		return;
609 
610 	t->parent = parent;
611 	t->alignment = 1;
612 	t->boundary = 0;
613 	t->lowaddr = t->highaddr = BUS_SPACE_MAXADDR;
614 	t->maxsize = t->maxsegsize = BUS_SPACE_MAXSIZE;
615 	t->nsegments = BUS_SPACE_UNRESTRICTED;
616 	t->lockfunc = NULL;
617 	t->lockfuncarg = NULL;
618 	t->flags = 0;
619 }
620 
621 int
622 bus_dma_template_tag(bus_dma_template_t *t, bus_dma_tag_t *dmat)
623 {
624 
625 	if (t == NULL || dmat == NULL)
626 		return (EINVAL);
627 
628 	return (bus_dma_tag_create(t->parent, t->alignment, t->boundary,
629 	    t->lowaddr, t->highaddr, NULL, NULL, t->maxsize,
630 	    t->nsegments, t->maxsegsize, t->flags, t->lockfunc, t->lockfuncarg,
631 	    dmat));
632 }
633 
634 void
635 bus_dma_template_fill(bus_dma_template_t *t, bus_dma_param_t *kv, u_int count)
636 {
637 	bus_dma_param_t *pkv;
638 
639 	while (count) {
640 		pkv = &kv[--count];
641 		switch (pkv->key) {
642 		case BD_PARAM_PARENT:
643 			t->parent = pkv->ptr;
644 			break;
645 		case BD_PARAM_ALIGNMENT:
646 			t->alignment = pkv->num;
647 			break;
648 		case BD_PARAM_BOUNDARY:
649 			t->boundary = pkv->num;
650 			break;
651 		case BD_PARAM_LOWADDR:
652 			t->lowaddr = pkv->pa;
653 			break;
654 		case BD_PARAM_HIGHADDR:
655 			t->highaddr = pkv->pa;
656 			break;
657 		case BD_PARAM_MAXSIZE:
658 			t->maxsize = pkv->num;
659 			break;
660 		case BD_PARAM_NSEGMENTS:
661 			t->nsegments = pkv->num;
662 			break;
663 		case BD_PARAM_MAXSEGSIZE:
664 			t->maxsegsize = pkv->num;
665 			break;
666 		case BD_PARAM_FLAGS:
667 			t->flags = pkv->num;
668 			break;
669 		case BD_PARAM_LOCKFUNC:
670 			t->lockfunc = pkv->ptr;
671 			break;
672 		case BD_PARAM_LOCKFUNCARG:
673 			t->lockfuncarg = pkv->ptr;
674 			break;
675 		case BD_PARAM_NAME:
676 			t->name = pkv->ptr;
677 			break;
678 		case BD_PARAM_INVALID:
679 		default:
680 			KASSERT(0, ("Invalid key %d\n", pkv->key));
681 			break;
682 		}
683 	}
684 	return;
685 }
686 
687 #ifndef IOMMU
688 bool bus_dma_iommu_set_buswide(device_t dev);
689 int bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map,
690     vm_paddr_t start, vm_size_t length, int flags);
691 
692 bool
693 bus_dma_iommu_set_buswide(device_t dev)
694 {
695 	return (false);
696 }
697 
698 int
699 bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map,
700     vm_paddr_t start, vm_size_t length, int flags)
701 {
702 	return (0);
703 }
704 #endif
705