xref: /freebsd/sys/kern/subr_bus_dma.c (revision a03411e84728e9b267056fd31c7d1d9d1dc1b01e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2012 EMC Corp.
5  * All rights reserved.
6  *
7  * Copyright (c) 1997, 1998 Justin T. Gibbs.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 #include "opt_bus.h"
34 #include "opt_iommu.h"
35 
36 #include <sys/param.h>
37 #include <sys/conf.h>
38 #include <sys/systm.h>
39 #include <sys/bus.h>
40 #include <sys/callout.h>
41 #include <sys/ktr.h>
42 #include <sys/limits.h>
43 #include <sys/lock.h>
44 #include <sys/mbuf.h>
45 #include <sys/memdesc.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48 #include <sys/uio.h>
49 
50 #include <vm/vm.h>
51 #include <vm/vm_page.h>
52 #include <vm/vm_map.h>
53 #include <vm/pmap.h>
54 
55 #include <opencrypto/cryptodev.h>
56 
57 #include <machine/bus.h>
58 
59 /*
60  * Convenience function for manipulating driver locks from busdma (during
61  * busdma_swi, for example).
62  */
63 void
64 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
65 {
66 	struct mtx *dmtx;
67 
68 	dmtx = (struct mtx *)arg;
69 	switch (op) {
70 	case BUS_DMA_LOCK:
71 		mtx_lock(dmtx);
72 		break;
73 	case BUS_DMA_UNLOCK:
74 		mtx_unlock(dmtx);
75 		break;
76 	default:
77 		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
78 	}
79 }
80 
81 /*
82  * dflt_lock should never get called.  It gets put into the dma tag when
83  * lockfunc == NULL, which is only valid if the maps that are associated
84  * with the tag are meant to never be deferred.
85  *
86  * XXX Should have a way to identify which driver is responsible here.
87  */
88 void
89 _busdma_dflt_lock(void *arg, bus_dma_lock_op_t op)
90 {
91 
92 	panic("driver error: _bus_dma_dflt_lock called");
93 }
94 
95 
96 /*
97  * Load up data starting at offset within a region specified by a
98  * list of virtual address ranges until either length or the region
99  * are exhausted.
100  */
101 static int
102 _bus_dmamap_load_vlist(bus_dma_tag_t dmat, bus_dmamap_t map,
103     bus_dma_segment_t *list, int sglist_cnt, struct pmap *pmap, int *nsegs,
104     int flags, size_t offset, size_t length)
105 {
106 	int error;
107 
108 	error = 0;
109 	for (; sglist_cnt > 0 && length != 0; sglist_cnt--, list++) {
110 		char *addr;
111 		size_t ds_len;
112 
113 		KASSERT((offset < list->ds_len),
114 		    ("Invalid mid-segment offset"));
115 		addr = (char *)(uintptr_t)list->ds_addr + offset;
116 		ds_len = list->ds_len - offset;
117 		offset = 0;
118 		if (ds_len > length)
119 			ds_len = length;
120 		length -= ds_len;
121 		KASSERT((ds_len != 0), ("Segment length is zero"));
122 		error = _bus_dmamap_load_buffer(dmat, map, addr, ds_len, pmap,
123 		    flags, NULL, nsegs);
124 		if (error)
125 			break;
126 	}
127 	return (error);
128 }
129 
130 /*
131  * Load a list of physical addresses.
132  */
133 static int
134 _bus_dmamap_load_plist(bus_dma_tag_t dmat, bus_dmamap_t map,
135     bus_dma_segment_t *list, int sglist_cnt, int *nsegs, int flags)
136 {
137 	int error;
138 
139 	error = 0;
140 	for (; sglist_cnt > 0; sglist_cnt--, list++) {
141 		error = _bus_dmamap_load_phys(dmat, map,
142 		    (vm_paddr_t)list->ds_addr, list->ds_len, flags, NULL,
143 		    nsegs);
144 		if (error)
145 			break;
146 	}
147 	return (error);
148 }
149 
150 /*
151  * Load an unmapped mbuf
152  */
153 static int
154 _bus_dmamap_load_mbuf_epg(bus_dma_tag_t dmat, bus_dmamap_t map,
155     struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags)
156 {
157 	int error, i, off, len, pglen, pgoff, seglen, segoff;
158 
159 	M_ASSERTEXTPG(m);
160 
161 	len = m->m_len;
162 	error = 0;
163 
164 	/* Skip over any data removed from the front. */
165 	off = mtod(m, vm_offset_t);
166 
167 	if (m->m_epg_hdrlen != 0) {
168 		if (off >= m->m_epg_hdrlen) {
169 			off -= m->m_epg_hdrlen;
170 		} else {
171 			seglen = m->m_epg_hdrlen - off;
172 			segoff = off;
173 			seglen = min(seglen, len);
174 			off = 0;
175 			len -= seglen;
176 			error = _bus_dmamap_load_buffer(dmat, map,
177 			    &m->m_epg_hdr[segoff], seglen, kernel_pmap,
178 			    flags, segs, nsegs);
179 		}
180 	}
181 	pgoff = m->m_epg_1st_off;
182 	for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) {
183 		pglen = m_epg_pagelen(m, i, pgoff);
184 		if (off >= pglen) {
185 			off -= pglen;
186 			pgoff = 0;
187 			continue;
188 		}
189 		seglen = pglen - off;
190 		segoff = pgoff + off;
191 		off = 0;
192 		seglen = min(seglen, len);
193 		len -= seglen;
194 		error = _bus_dmamap_load_phys(dmat, map,
195 		    m->m_epg_pa[i] + segoff, seglen, flags, segs, nsegs);
196 		pgoff = 0;
197 	};
198 	if (len != 0 && error == 0) {
199 		KASSERT((off + len) <= m->m_epg_trllen,
200 		    ("off + len > trail (%d + %d > %d)", off, len,
201 		    m->m_epg_trllen));
202 		error = _bus_dmamap_load_buffer(dmat, map,
203 		    &m->m_epg_trail[off], len, kernel_pmap, flags, segs,
204 		    nsegs);
205 	}
206 	return (error);
207 }
208 
209 /*
210  * Load a single mbuf.
211  */
212 static int
213 _bus_dmamap_load_single_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
214     struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags)
215 {
216 	int error;
217 
218 	error = 0;
219 	if ((m->m_flags & M_EXTPG) != 0)
220 		error = _bus_dmamap_load_mbuf_epg(dmat, map, m, segs, nsegs,
221 		    flags);
222 	else
223 		error = _bus_dmamap_load_buffer(dmat, map, m->m_data, m->m_len,
224 		    kernel_pmap, flags | BUS_DMA_LOAD_MBUF, segs, nsegs);
225 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
226 	    __func__, dmat, flags, error, *nsegs);
227 	return (error);
228 }
229 
230 /*
231  * Load an mbuf chain.
232  */
233 static int
234 _bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
235     struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags)
236 {
237 	struct mbuf *m;
238 	int error;
239 
240 	error = 0;
241 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
242 		if (m->m_len > 0) {
243 			if ((m->m_flags & M_EXTPG) != 0)
244 				error = _bus_dmamap_load_mbuf_epg(dmat,
245 				    map, m, segs, nsegs, flags);
246 			else
247 				error = _bus_dmamap_load_buffer(dmat, map,
248 				    m->m_data, m->m_len, kernel_pmap,
249 				    flags | BUS_DMA_LOAD_MBUF, segs, nsegs);
250 		}
251 	}
252 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
253 	    __func__, dmat, flags, error, *nsegs);
254 	return (error);
255 }
256 
257 int
258 bus_dmamap_load_ma_triv(bus_dma_tag_t dmat, bus_dmamap_t map,
259     struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
260     bus_dma_segment_t *segs, int *segp)
261 {
262 	vm_paddr_t paddr;
263 	bus_size_t len;
264 	int error, i;
265 
266 	error = 0;
267 	for (i = 0; tlen > 0; i++, tlen -= len) {
268 		len = min(PAGE_SIZE - ma_offs, tlen);
269 		paddr = VM_PAGE_TO_PHYS(ma[i]) + ma_offs;
270 		error = _bus_dmamap_load_phys(dmat, map, paddr, len,
271 		    flags, segs, segp);
272 		if (error != 0)
273 			break;
274 		ma_offs = 0;
275 	}
276 	return (error);
277 }
278 
279 /*
280  * Load a uio.
281  */
282 static int
283 _bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
284     int *nsegs, int flags)
285 {
286 	bus_size_t resid;
287 	bus_size_t minlen;
288 	struct iovec *iov;
289 	pmap_t pmap;
290 	caddr_t addr;
291 	int error, i;
292 
293 	if (uio->uio_segflg == UIO_USERSPACE) {
294 		KASSERT(uio->uio_td != NULL,
295 			("bus_dmamap_load_uio: USERSPACE but no proc"));
296 		pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
297 	} else
298 		pmap = kernel_pmap;
299 	resid = uio->uio_resid;
300 	iov = uio->uio_iov;
301 	error = 0;
302 
303 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
304 		/*
305 		 * Now at the first iovec to load.  Load each iovec
306 		 * until we have exhausted the residual count.
307 		 */
308 
309 		addr = (caddr_t) iov[i].iov_base;
310 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
311 		if (minlen > 0) {
312 			error = _bus_dmamap_load_buffer(dmat, map, addr,
313 			    minlen, pmap, flags, NULL, nsegs);
314 			resid -= minlen;
315 		}
316 	}
317 
318 	return (error);
319 }
320 
321 /*
322  * Map the buffer buf into bus space using the dmamap map.
323  */
324 int
325 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
326     bus_size_t buflen, bus_dmamap_callback_t *callback,
327     void *callback_arg, int flags)
328 {
329 	bus_dma_segment_t *segs;
330 	struct memdesc mem;
331 	int error;
332 	int nsegs;
333 
334 #ifdef KMSAN
335 	mem = memdesc_vaddr(buf, buflen);
336 	_bus_dmamap_load_kmsan(dmat, map, &mem);
337 #endif
338 
339 	if ((flags & BUS_DMA_NOWAIT) == 0) {
340 		mem = memdesc_vaddr(buf, buflen);
341 		_bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg);
342 	}
343 
344 	nsegs = -1;
345 	error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, kernel_pmap,
346 	    flags, NULL, &nsegs);
347 	nsegs++;
348 
349 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
350 	    __func__, dmat, flags, error, nsegs);
351 
352 	if (error == EINPROGRESS)
353 		return (error);
354 
355 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
356 	if (error)
357 		(*callback)(callback_arg, segs, 0, error);
358 	else
359 		(*callback)(callback_arg, segs, nsegs, 0);
360 
361 	/*
362 	 * Return ENOMEM to the caller so that it can pass it up the stack.
363 	 * This error only happens when NOWAIT is set, so deferral is disabled.
364 	 */
365 	if (error == ENOMEM)
366 		return (error);
367 
368 	return (0);
369 }
370 
371 int
372 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
373     bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
374 {
375 	bus_dma_segment_t *segs;
376 	int nsegs, error;
377 
378 	M_ASSERTPKTHDR(m0);
379 
380 #ifdef KMSAN
381 	struct memdesc mem = memdesc_mbuf(m0);
382 	_bus_dmamap_load_kmsan(dmat, map, &mem);
383 #endif
384 
385 	flags |= BUS_DMA_NOWAIT;
386 	nsegs = -1;
387 	error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, NULL, &nsegs, flags);
388 	++nsegs;
389 
390 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
391 	if (error)
392 		(*callback)(callback_arg, segs, 0, 0, error);
393 	else
394 		(*callback)(callback_arg, segs, nsegs, m0->m_pkthdr.len, error);
395 
396 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
397 	    __func__, dmat, flags, error, nsegs);
398 	return (error);
399 }
400 
401 int
402 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
403     bus_dma_segment_t *segs, int *nsegs, int flags)
404 {
405 	int error;
406 
407 #ifdef KMSAN
408 	struct memdesc mem = memdesc_mbuf(m0);
409 	_bus_dmamap_load_kmsan(dmat, map, &mem);
410 #endif
411 
412 	flags |= BUS_DMA_NOWAIT;
413 	*nsegs = -1;
414 	error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags);
415 	++*nsegs;
416 	_bus_dmamap_complete(dmat, map, segs, *nsegs, error);
417 	return (error);
418 }
419 
420 int
421 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
422     bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
423 {
424 	bus_dma_segment_t *segs;
425 	int nsegs, error;
426 
427 #ifdef KMSAN
428 	struct memdesc mem = memdesc_uio(uio);
429 	_bus_dmamap_load_kmsan(dmat, map, &mem);
430 #endif
431 
432 	flags |= BUS_DMA_NOWAIT;
433 	nsegs = -1;
434 	error = _bus_dmamap_load_uio(dmat, map, uio, &nsegs, flags);
435 	nsegs++;
436 
437 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
438 	if (error)
439 		(*callback)(callback_arg, segs, 0, 0, error);
440 	else
441 		(*callback)(callback_arg, segs, nsegs, uio->uio_resid, error);
442 
443 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
444 	    __func__, dmat, flags, error, nsegs);
445 	return (error);
446 }
447 
448 int
449 bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio,
450 		    bus_dmamap_callback_t *callback, void *callback_arg,
451 		    int flags)
452 {
453 	struct memdesc mem;
454 
455 	mem = memdesc_bio(bio);
456 	return (bus_dmamap_load_mem(dmat, map, &mem, callback, callback_arg,
457 	    flags));
458 }
459 
460 int
461 bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map,
462     struct memdesc *mem, bus_dmamap_callback_t *callback,
463     void *callback_arg, int flags)
464 {
465 	bus_dma_segment_t *segs;
466 	int error;
467 	int nsegs;
468 
469 #ifdef KMSAN
470 	_bus_dmamap_load_kmsan(dmat, map, mem);
471 #endif
472 
473 	if ((flags & BUS_DMA_NOWAIT) == 0)
474 		_bus_dmamap_waitok(dmat, map, mem, callback, callback_arg);
475 
476 	nsegs = -1;
477 	error = 0;
478 	switch (mem->md_type) {
479 	case MEMDESC_VADDR:
480 		error = _bus_dmamap_load_buffer(dmat, map, mem->u.md_vaddr,
481 		    mem->md_len, kernel_pmap, flags, NULL, &nsegs);
482 		break;
483 	case MEMDESC_PADDR:
484 		error = _bus_dmamap_load_phys(dmat, map, mem->u.md_paddr,
485 		    mem->md_len, flags, NULL, &nsegs);
486 		break;
487 	case MEMDESC_VLIST:
488 		error = _bus_dmamap_load_vlist(dmat, map, mem->u.md_list,
489 		    mem->md_nseg, kernel_pmap, &nsegs, flags, 0, SIZE_T_MAX);
490 		break;
491 	case MEMDESC_PLIST:
492 		error = _bus_dmamap_load_plist(dmat, map, mem->u.md_list,
493 		    mem->md_nseg, &nsegs, flags);
494 		break;
495 	case MEMDESC_UIO:
496 		error = _bus_dmamap_load_uio(dmat, map, mem->u.md_uio,
497 		    &nsegs, flags);
498 		break;
499 	case MEMDESC_MBUF:
500 		error = _bus_dmamap_load_mbuf_sg(dmat, map, mem->u.md_mbuf,
501 		    NULL, &nsegs, flags);
502 		break;
503 	case MEMDESC_VMPAGES:
504 		error = _bus_dmamap_load_ma(dmat, map, mem->u.md_ma,
505 		    mem->md_len, mem->md_offset, flags, NULL, &nsegs);
506 		break;
507 	}
508 	nsegs++;
509 
510 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
511 	    __func__, dmat, flags, error, nsegs);
512 
513 	if (error == EINPROGRESS)
514 		return (error);
515 
516 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
517 	if (error)
518 		(*callback)(callback_arg, segs, 0, error);
519 	else
520 		(*callback)(callback_arg, segs, nsegs, 0);
521 
522 	/*
523 	 * Return ENOMEM to the caller so that it can pass it up the stack.
524 	 * This error only happens when NOWAIT is set, so deferral is disabled.
525 	 */
526 	if (error == ENOMEM)
527 		return (error);
528 
529 	return (0);
530 }
531 
532 int
533 bus_dmamap_load_crp_buffer(bus_dma_tag_t dmat, bus_dmamap_t map,
534     struct crypto_buffer *cb, bus_dmamap_callback_t *callback,
535     void *callback_arg, int flags)
536 {
537 	bus_dma_segment_t *segs;
538 	int error;
539 	int nsegs;
540 
541 	flags |= BUS_DMA_NOWAIT;
542 	nsegs = -1;
543 	error = 0;
544 	switch (cb->cb_type) {
545 	case CRYPTO_BUF_CONTIG:
546 		error = _bus_dmamap_load_buffer(dmat, map, cb->cb_buf,
547 		    cb->cb_buf_len, kernel_pmap, flags, NULL, &nsegs);
548 		break;
549 	case CRYPTO_BUF_MBUF:
550 		error = _bus_dmamap_load_mbuf_sg(dmat, map, cb->cb_mbuf,
551 		    NULL, &nsegs, flags);
552 		break;
553 	case CRYPTO_BUF_SINGLE_MBUF:
554 		error = _bus_dmamap_load_single_mbuf(dmat, map, cb->cb_mbuf,
555 		    NULL, &nsegs, flags);
556 		break;
557 	case CRYPTO_BUF_UIO:
558 		error = _bus_dmamap_load_uio(dmat, map, cb->cb_uio, &nsegs,
559 		    flags);
560 		break;
561 	case CRYPTO_BUF_VMPAGE:
562 		error = _bus_dmamap_load_ma(dmat, map, cb->cb_vm_page,
563 		    cb->cb_vm_page_len, cb->cb_vm_page_offset, flags, NULL,
564 		    &nsegs);
565 		break;
566 	default:
567 		error = EINVAL;
568 	}
569 	nsegs++;
570 
571 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
572 	    __func__, dmat, flags, error, nsegs);
573 
574 	if (error == EINPROGRESS)
575 		return (error);
576 
577 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
578 	if (error)
579 		(*callback)(callback_arg, segs, 0, error);
580 	else
581 		(*callback)(callback_arg, segs, nsegs, 0);
582 
583 	/*
584 	 * Return ENOMEM to the caller so that it can pass it up the stack.
585 	 * This error only happens when NOWAIT is set, so deferral is disabled.
586 	 */
587 	if (error == ENOMEM)
588 		return (error);
589 
590 	return (0);
591 }
592 
593 int
594 bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp,
595     bus_dmamap_callback_t *callback, void *callback_arg, int flags)
596 {
597 	return (bus_dmamap_load_crp_buffer(dmat, map, &crp->crp_buf, callback,
598 	    callback_arg, flags));
599 }
600 
601 void
602 bus_dma_template_init(bus_dma_template_t *t, bus_dma_tag_t parent)
603 {
604 
605 	if (t == NULL)
606 		return;
607 
608 	t->parent = parent;
609 	t->alignment = 1;
610 	t->boundary = 0;
611 	t->lowaddr = t->highaddr = BUS_SPACE_MAXADDR;
612 	t->maxsize = t->maxsegsize = BUS_SPACE_MAXSIZE;
613 	t->nsegments = BUS_SPACE_UNRESTRICTED;
614 	t->lockfunc = NULL;
615 	t->lockfuncarg = NULL;
616 	t->flags = 0;
617 }
618 
619 int
620 bus_dma_template_tag(bus_dma_template_t *t, bus_dma_tag_t *dmat)
621 {
622 
623 	if (t == NULL || dmat == NULL)
624 		return (EINVAL);
625 
626 	return (bus_dma_tag_create(t->parent, t->alignment, t->boundary,
627 	    t->lowaddr, t->highaddr, NULL, NULL, t->maxsize,
628 	    t->nsegments, t->maxsegsize, t->flags, t->lockfunc, t->lockfuncarg,
629 	    dmat));
630 }
631 
632 void
633 bus_dma_template_fill(bus_dma_template_t *t, bus_dma_param_t *kv, u_int count)
634 {
635 	bus_dma_param_t *pkv;
636 
637 	while (count) {
638 		pkv = &kv[--count];
639 		switch (pkv->key) {
640 		case BD_PARAM_PARENT:
641 			t->parent = pkv->ptr;
642 			break;
643 		case BD_PARAM_ALIGNMENT:
644 			t->alignment = pkv->num;
645 			break;
646 		case BD_PARAM_BOUNDARY:
647 			t->boundary = pkv->num;
648 			break;
649 		case BD_PARAM_LOWADDR:
650 			t->lowaddr = pkv->pa;
651 			break;
652 		case BD_PARAM_HIGHADDR:
653 			t->highaddr = pkv->pa;
654 			break;
655 		case BD_PARAM_MAXSIZE:
656 			t->maxsize = pkv->num;
657 			break;
658 		case BD_PARAM_NSEGMENTS:
659 			t->nsegments = pkv->num;
660 			break;
661 		case BD_PARAM_MAXSEGSIZE:
662 			t->maxsegsize = pkv->num;
663 			break;
664 		case BD_PARAM_FLAGS:
665 			t->flags = pkv->num;
666 			break;
667 		case BD_PARAM_LOCKFUNC:
668 			t->lockfunc = pkv->ptr;
669 			break;
670 		case BD_PARAM_LOCKFUNCARG:
671 			t->lockfuncarg = pkv->ptr;
672 			break;
673 		case BD_PARAM_NAME:
674 			t->name = pkv->ptr;
675 			break;
676 		case BD_PARAM_INVALID:
677 		default:
678 			KASSERT(0, ("Invalid key %d\n", pkv->key));
679 			break;
680 		}
681 	}
682 	return;
683 }
684 
685 #ifndef IOMMU
686 bool bus_dma_iommu_set_buswide(device_t dev);
687 int bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map,
688     vm_paddr_t start, vm_size_t length, int flags);
689 
690 bool
691 bus_dma_iommu_set_buswide(device_t dev)
692 {
693 	return (false);
694 }
695 
696 int
697 bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map,
698     vm_paddr_t start, vm_size_t length, int flags)
699 {
700 	return (0);
701 }
702 #endif
703