xref: /freebsd/sys/kern/subr_bus_dma.c (revision 66fd12cf4896eb08ad8e7a2627537f84ead84dd3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2012 EMC Corp.
5  * All rights reserved.
6  *
7  * Copyright (c) 1997, 1998 Justin T. Gibbs.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_bus.h"
36 #include "opt_iommu.h"
37 
38 #include <sys/param.h>
39 #include <sys/conf.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/callout.h>
43 #include <sys/ktr.h>
44 #include <sys/lock.h>
45 #include <sys/mbuf.h>
46 #include <sys/memdesc.h>
47 #include <sys/mutex.h>
48 #include <sys/proc.h>
49 #include <sys/uio.h>
50 
51 #include <vm/vm.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_map.h>
54 #include <vm/pmap.h>
55 
56 #include <cam/cam.h>
57 #include <cam/cam_ccb.h>
58 
59 #include <opencrypto/cryptodev.h>
60 
61 #include <machine/bus.h>
62 
63 /*
64  * Convenience function for manipulating driver locks from busdma (during
65  * busdma_swi, for example).
66  */
67 void
68 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
69 {
70 	struct mtx *dmtx;
71 
72 	dmtx = (struct mtx *)arg;
73 	switch (op) {
74 	case BUS_DMA_LOCK:
75 		mtx_lock(dmtx);
76 		break;
77 	case BUS_DMA_UNLOCK:
78 		mtx_unlock(dmtx);
79 		break;
80 	default:
81 		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
82 	}
83 }
84 
85 /*
86  * dflt_lock should never get called.  It gets put into the dma tag when
87  * lockfunc == NULL, which is only valid if the maps that are associated
88  * with the tag are meant to never be deferred.
89  *
90  * XXX Should have a way to identify which driver is responsible here.
91  */
92 void
93 _busdma_dflt_lock(void *arg, bus_dma_lock_op_t op)
94 {
95 
96 	panic("driver error: _bus_dma_dflt_lock called");
97 }
98 
99 
100 /*
101  * Load up data starting at offset within a region specified by a
102  * list of virtual address ranges until either length or the region
103  * are exhausted.
104  */
105 static int
106 _bus_dmamap_load_vlist(bus_dma_tag_t dmat, bus_dmamap_t map,
107     bus_dma_segment_t *list, int sglist_cnt, struct pmap *pmap, int *nsegs,
108     int flags, size_t offset, size_t length)
109 {
110 	int error;
111 
112 	error = 0;
113 	for (; sglist_cnt > 0 && length != 0; sglist_cnt--, list++) {
114 		char *addr;
115 		size_t ds_len;
116 
117 		KASSERT((offset < list->ds_len),
118 		    ("Invalid mid-segment offset"));
119 		addr = (char *)(uintptr_t)list->ds_addr + offset;
120 		ds_len = list->ds_len - offset;
121 		offset = 0;
122 		if (ds_len > length)
123 			ds_len = length;
124 		length -= ds_len;
125 		KASSERT((ds_len != 0), ("Segment length is zero"));
126 		error = _bus_dmamap_load_buffer(dmat, map, addr, ds_len, pmap,
127 		    flags, NULL, nsegs);
128 		if (error)
129 			break;
130 	}
131 	return (error);
132 }
133 
134 /*
135  * Load a list of physical addresses.
136  */
137 static int
138 _bus_dmamap_load_plist(bus_dma_tag_t dmat, bus_dmamap_t map,
139     bus_dma_segment_t *list, int sglist_cnt, int *nsegs, int flags)
140 {
141 	int error;
142 
143 	error = 0;
144 	for (; sglist_cnt > 0; sglist_cnt--, list++) {
145 		error = _bus_dmamap_load_phys(dmat, map,
146 		    (vm_paddr_t)list->ds_addr, list->ds_len, flags, NULL,
147 		    nsegs);
148 		if (error)
149 			break;
150 	}
151 	return (error);
152 }
153 
154 /*
155  * Load an unmapped mbuf
156  */
157 static int
158 _bus_dmamap_load_mbuf_epg(bus_dma_tag_t dmat, bus_dmamap_t map,
159     struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags)
160 {
161 	int error, i, off, len, pglen, pgoff, seglen, segoff;
162 
163 	M_ASSERTEXTPG(m);
164 
165 	len = m->m_len;
166 	error = 0;
167 
168 	/* Skip over any data removed from the front. */
169 	off = mtod(m, vm_offset_t);
170 
171 	if (m->m_epg_hdrlen != 0) {
172 		if (off >= m->m_epg_hdrlen) {
173 			off -= m->m_epg_hdrlen;
174 		} else {
175 			seglen = m->m_epg_hdrlen - off;
176 			segoff = off;
177 			seglen = min(seglen, len);
178 			off = 0;
179 			len -= seglen;
180 			error = _bus_dmamap_load_buffer(dmat, map,
181 			    &m->m_epg_hdr[segoff], seglen, kernel_pmap,
182 			    flags, segs, nsegs);
183 		}
184 	}
185 	pgoff = m->m_epg_1st_off;
186 	for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) {
187 		pglen = m_epg_pagelen(m, i, pgoff);
188 		if (off >= pglen) {
189 			off -= pglen;
190 			pgoff = 0;
191 			continue;
192 		}
193 		seglen = pglen - off;
194 		segoff = pgoff + off;
195 		off = 0;
196 		seglen = min(seglen, len);
197 		len -= seglen;
198 		error = _bus_dmamap_load_phys(dmat, map,
199 		    m->m_epg_pa[i] + segoff, seglen, flags, segs, nsegs);
200 		pgoff = 0;
201 	};
202 	if (len != 0 && error == 0) {
203 		KASSERT((off + len) <= m->m_epg_trllen,
204 		    ("off + len > trail (%d + %d > %d)", off, len,
205 		    m->m_epg_trllen));
206 		error = _bus_dmamap_load_buffer(dmat, map,
207 		    &m->m_epg_trail[off], len, kernel_pmap, flags, segs,
208 		    nsegs);
209 	}
210 	return (error);
211 }
212 
213 /*
214  * Load a single mbuf.
215  */
216 static int
217 _bus_dmamap_load_single_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
218     struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags)
219 {
220 	int error;
221 
222 	error = 0;
223 	if ((m->m_flags & M_EXTPG) != 0)
224 		error = _bus_dmamap_load_mbuf_epg(dmat, map, m, segs, nsegs,
225 		    flags);
226 	else
227 		error = _bus_dmamap_load_buffer(dmat, map, m->m_data, m->m_len,
228 		    kernel_pmap, flags | BUS_DMA_LOAD_MBUF, segs, nsegs);
229 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
230 	    __func__, dmat, flags, error, *nsegs);
231 	return (error);
232 }
233 
234 /*
235  * Load an mbuf chain.
236  */
237 static int
238 _bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
239     struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags)
240 {
241 	struct mbuf *m;
242 	int error;
243 
244 	error = 0;
245 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
246 		if (m->m_len > 0) {
247 			if ((m->m_flags & M_EXTPG) != 0)
248 				error = _bus_dmamap_load_mbuf_epg(dmat,
249 				    map, m, segs, nsegs, flags);
250 			else
251 				error = _bus_dmamap_load_buffer(dmat, map,
252 				    m->m_data, m->m_len, kernel_pmap,
253 				    flags | BUS_DMA_LOAD_MBUF, segs, nsegs);
254 		}
255 	}
256 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
257 	    __func__, dmat, flags, error, *nsegs);
258 	return (error);
259 }
260 
261 int
262 bus_dmamap_load_ma_triv(bus_dma_tag_t dmat, bus_dmamap_t map,
263     struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
264     bus_dma_segment_t *segs, int *segp)
265 {
266 	vm_paddr_t paddr;
267 	bus_size_t len;
268 	int error, i;
269 
270 	error = 0;
271 	for (i = 0; tlen > 0; i++, tlen -= len) {
272 		len = min(PAGE_SIZE - ma_offs, tlen);
273 		paddr = VM_PAGE_TO_PHYS(ma[i]) + ma_offs;
274 		error = _bus_dmamap_load_phys(dmat, map, paddr, len,
275 		    flags, segs, segp);
276 		if (error != 0)
277 			break;
278 		ma_offs = 0;
279 	}
280 	return (error);
281 }
282 
283 /*
284  * Load a uio.
285  */
286 static int
287 _bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
288     int *nsegs, int flags)
289 {
290 	bus_size_t resid;
291 	bus_size_t minlen;
292 	struct iovec *iov;
293 	pmap_t pmap;
294 	caddr_t addr;
295 	int error, i;
296 
297 	if (uio->uio_segflg == UIO_USERSPACE) {
298 		KASSERT(uio->uio_td != NULL,
299 			("bus_dmamap_load_uio: USERSPACE but no proc"));
300 		pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
301 	} else
302 		pmap = kernel_pmap;
303 	resid = uio->uio_resid;
304 	iov = uio->uio_iov;
305 	error = 0;
306 
307 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
308 		/*
309 		 * Now at the first iovec to load.  Load each iovec
310 		 * until we have exhausted the residual count.
311 		 */
312 
313 		addr = (caddr_t) iov[i].iov_base;
314 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
315 		if (minlen > 0) {
316 			error = _bus_dmamap_load_buffer(dmat, map, addr,
317 			    minlen, pmap, flags, NULL, nsegs);
318 			resid -= minlen;
319 		}
320 	}
321 
322 	return (error);
323 }
324 
325 /*
326  * Map the buffer buf into bus space using the dmamap map.
327  */
328 int
329 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
330     bus_size_t buflen, bus_dmamap_callback_t *callback,
331     void *callback_arg, int flags)
332 {
333 	bus_dma_segment_t *segs;
334 	struct memdesc mem;
335 	int error;
336 	int nsegs;
337 
338 #ifdef KMSAN
339 	mem = memdesc_vaddr(buf, buflen);
340 	_bus_dmamap_load_kmsan(dmat, map, &mem);
341 #endif
342 
343 	if ((flags & BUS_DMA_NOWAIT) == 0) {
344 		mem = memdesc_vaddr(buf, buflen);
345 		_bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg);
346 	}
347 
348 	nsegs = -1;
349 	error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, kernel_pmap,
350 	    flags, NULL, &nsegs);
351 	nsegs++;
352 
353 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
354 	    __func__, dmat, flags, error, nsegs);
355 
356 	if (error == EINPROGRESS)
357 		return (error);
358 
359 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
360 	if (error)
361 		(*callback)(callback_arg, segs, 0, error);
362 	else
363 		(*callback)(callback_arg, segs, nsegs, 0);
364 
365 	/*
366 	 * Return ENOMEM to the caller so that it can pass it up the stack.
367 	 * This error only happens when NOWAIT is set, so deferral is disabled.
368 	 */
369 	if (error == ENOMEM)
370 		return (error);
371 
372 	return (0);
373 }
374 
375 int
376 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
377     bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
378 {
379 	bus_dma_segment_t *segs;
380 	int nsegs, error;
381 
382 	M_ASSERTPKTHDR(m0);
383 
384 #ifdef KMSAN
385 	struct memdesc mem = memdesc_mbuf(m0);
386 	_bus_dmamap_load_kmsan(dmat, map, &mem);
387 #endif
388 
389 	flags |= BUS_DMA_NOWAIT;
390 	nsegs = -1;
391 	error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, NULL, &nsegs, flags);
392 	++nsegs;
393 
394 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
395 	if (error)
396 		(*callback)(callback_arg, segs, 0, 0, error);
397 	else
398 		(*callback)(callback_arg, segs, nsegs, m0->m_pkthdr.len, error);
399 
400 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
401 	    __func__, dmat, flags, error, nsegs);
402 	return (error);
403 }
404 
405 int
406 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
407     bus_dma_segment_t *segs, int *nsegs, int flags)
408 {
409 	int error;
410 
411 #ifdef KMSAN
412 	struct memdesc mem = memdesc_mbuf(m0);
413 	_bus_dmamap_load_kmsan(dmat, map, &mem);
414 #endif
415 
416 	flags |= BUS_DMA_NOWAIT;
417 	*nsegs = -1;
418 	error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags);
419 	++*nsegs;
420 	_bus_dmamap_complete(dmat, map, segs, *nsegs, error);
421 	return (error);
422 }
423 
424 int
425 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
426     bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
427 {
428 	bus_dma_segment_t *segs;
429 	int nsegs, error;
430 
431 #ifdef KMSAN
432 	struct memdesc mem = memdesc_uio(uio);
433 	_bus_dmamap_load_kmsan(dmat, map, &mem);
434 #endif
435 
436 	flags |= BUS_DMA_NOWAIT;
437 	nsegs = -1;
438 	error = _bus_dmamap_load_uio(dmat, map, uio, &nsegs, flags);
439 	nsegs++;
440 
441 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
442 	if (error)
443 		(*callback)(callback_arg, segs, 0, 0, error);
444 	else
445 		(*callback)(callback_arg, segs, nsegs, uio->uio_resid, error);
446 
447 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
448 	    __func__, dmat, flags, error, nsegs);
449 	return (error);
450 }
451 
452 int
453 bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio,
454 		    bus_dmamap_callback_t *callback, void *callback_arg,
455 		    int flags)
456 {
457 	struct memdesc mem;
458 
459 	mem = memdesc_bio(bio);
460 	return (bus_dmamap_load_mem(dmat, map, &mem, callback, callback_arg,
461 	    flags));
462 }
463 
464 int
465 bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map,
466     struct memdesc *mem, bus_dmamap_callback_t *callback,
467     void *callback_arg, int flags)
468 {
469 	bus_dma_segment_t *segs;
470 	int error;
471 	int nsegs;
472 
473 #ifdef KMSAN
474 	_bus_dmamap_load_kmsan(dmat, map, mem);
475 #endif
476 
477 	if ((flags & BUS_DMA_NOWAIT) == 0)
478 		_bus_dmamap_waitok(dmat, map, mem, callback, callback_arg);
479 
480 	nsegs = -1;
481 	error = 0;
482 	switch (mem->md_type) {
483 	case MEMDESC_VADDR:
484 		error = _bus_dmamap_load_buffer(dmat, map, mem->u.md_vaddr,
485 		    mem->md_len, kernel_pmap, flags, NULL, &nsegs);
486 		break;
487 	case MEMDESC_PADDR:
488 		error = _bus_dmamap_load_phys(dmat, map, mem->u.md_paddr,
489 		    mem->md_len, flags, NULL, &nsegs);
490 		break;
491 	case MEMDESC_VLIST:
492 		error = _bus_dmamap_load_vlist(dmat, map, mem->u.md_list,
493 		    mem->md_nseg, kernel_pmap, &nsegs, flags, 0, SIZE_T_MAX);
494 		break;
495 	case MEMDESC_PLIST:
496 		error = _bus_dmamap_load_plist(dmat, map, mem->u.md_list,
497 		    mem->md_nseg, &nsegs, flags);
498 		break;
499 	case MEMDESC_UIO:
500 		error = _bus_dmamap_load_uio(dmat, map, mem->u.md_uio,
501 		    &nsegs, flags);
502 		break;
503 	case MEMDESC_MBUF:
504 		error = _bus_dmamap_load_mbuf_sg(dmat, map, mem->u.md_mbuf,
505 		    NULL, &nsegs, flags);
506 		break;
507 	case MEMDESC_VMPAGES:
508 		error = _bus_dmamap_load_ma(dmat, map, mem->u.md_ma,
509 		    mem->md_len, mem->md_offset, flags, NULL, &nsegs);
510 		break;
511 	}
512 	nsegs++;
513 
514 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
515 	    __func__, dmat, flags, error, nsegs);
516 
517 	if (error == EINPROGRESS)
518 		return (error);
519 
520 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
521 	if (error)
522 		(*callback)(callback_arg, segs, 0, error);
523 	else
524 		(*callback)(callback_arg, segs, nsegs, 0);
525 
526 	/*
527 	 * Return ENOMEM to the caller so that it can pass it up the stack.
528 	 * This error only happens when NOWAIT is set, so deferral is disabled.
529 	 */
530 	if (error == ENOMEM)
531 		return (error);
532 
533 	return (0);
534 }
535 
536 int
537 bus_dmamap_load_crp_buffer(bus_dma_tag_t dmat, bus_dmamap_t map,
538     struct crypto_buffer *cb, bus_dmamap_callback_t *callback,
539     void *callback_arg, int flags)
540 {
541 	bus_dma_segment_t *segs;
542 	int error;
543 	int nsegs;
544 
545 	flags |= BUS_DMA_NOWAIT;
546 	nsegs = -1;
547 	error = 0;
548 	switch (cb->cb_type) {
549 	case CRYPTO_BUF_CONTIG:
550 		error = _bus_dmamap_load_buffer(dmat, map, cb->cb_buf,
551 		    cb->cb_buf_len, kernel_pmap, flags, NULL, &nsegs);
552 		break;
553 	case CRYPTO_BUF_MBUF:
554 		error = _bus_dmamap_load_mbuf_sg(dmat, map, cb->cb_mbuf,
555 		    NULL, &nsegs, flags);
556 		break;
557 	case CRYPTO_BUF_SINGLE_MBUF:
558 		error = _bus_dmamap_load_single_mbuf(dmat, map, cb->cb_mbuf,
559 		    NULL, &nsegs, flags);
560 		break;
561 	case CRYPTO_BUF_UIO:
562 		error = _bus_dmamap_load_uio(dmat, map, cb->cb_uio, &nsegs,
563 		    flags);
564 		break;
565 	case CRYPTO_BUF_VMPAGE:
566 		error = _bus_dmamap_load_ma(dmat, map, cb->cb_vm_page,
567 		    cb->cb_vm_page_len, cb->cb_vm_page_offset, flags, NULL,
568 		    &nsegs);
569 		break;
570 	default:
571 		error = EINVAL;
572 	}
573 	nsegs++;
574 
575 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
576 	    __func__, dmat, flags, error, nsegs);
577 
578 	if (error == EINPROGRESS)
579 		return (error);
580 
581 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
582 	if (error)
583 		(*callback)(callback_arg, segs, 0, error);
584 	else
585 		(*callback)(callback_arg, segs, nsegs, 0);
586 
587 	/*
588 	 * Return ENOMEM to the caller so that it can pass it up the stack.
589 	 * This error only happens when NOWAIT is set, so deferral is disabled.
590 	 */
591 	if (error == ENOMEM)
592 		return (error);
593 
594 	return (0);
595 }
596 
597 int
598 bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp,
599     bus_dmamap_callback_t *callback, void *callback_arg, int flags)
600 {
601 	return (bus_dmamap_load_crp_buffer(dmat, map, &crp->crp_buf, callback,
602 	    callback_arg, flags));
603 }
604 
605 void
606 bus_dma_template_init(bus_dma_template_t *t, bus_dma_tag_t parent)
607 {
608 
609 	if (t == NULL)
610 		return;
611 
612 	t->parent = parent;
613 	t->alignment = 1;
614 	t->boundary = 0;
615 	t->lowaddr = t->highaddr = BUS_SPACE_MAXADDR;
616 	t->maxsize = t->maxsegsize = BUS_SPACE_MAXSIZE;
617 	t->nsegments = BUS_SPACE_UNRESTRICTED;
618 	t->lockfunc = NULL;
619 	t->lockfuncarg = NULL;
620 	t->flags = 0;
621 }
622 
623 int
624 bus_dma_template_tag(bus_dma_template_t *t, bus_dma_tag_t *dmat)
625 {
626 
627 	if (t == NULL || dmat == NULL)
628 		return (EINVAL);
629 
630 	return (bus_dma_tag_create(t->parent, t->alignment, t->boundary,
631 	    t->lowaddr, t->highaddr, NULL, NULL, t->maxsize,
632 	    t->nsegments, t->maxsegsize, t->flags, t->lockfunc, t->lockfuncarg,
633 	    dmat));
634 }
635 
636 void
637 bus_dma_template_fill(bus_dma_template_t *t, bus_dma_param_t *kv, u_int count)
638 {
639 	bus_dma_param_t *pkv;
640 
641 	while (count) {
642 		pkv = &kv[--count];
643 		switch (pkv->key) {
644 		case BD_PARAM_PARENT:
645 			t->parent = pkv->ptr;
646 			break;
647 		case BD_PARAM_ALIGNMENT:
648 			t->alignment = pkv->num;
649 			break;
650 		case BD_PARAM_BOUNDARY:
651 			t->boundary = pkv->num;
652 			break;
653 		case BD_PARAM_LOWADDR:
654 			t->lowaddr = pkv->pa;
655 			break;
656 		case BD_PARAM_HIGHADDR:
657 			t->highaddr = pkv->pa;
658 			break;
659 		case BD_PARAM_MAXSIZE:
660 			t->maxsize = pkv->num;
661 			break;
662 		case BD_PARAM_NSEGMENTS:
663 			t->nsegments = pkv->num;
664 			break;
665 		case BD_PARAM_MAXSEGSIZE:
666 			t->maxsegsize = pkv->num;
667 			break;
668 		case BD_PARAM_FLAGS:
669 			t->flags = pkv->num;
670 			break;
671 		case BD_PARAM_LOCKFUNC:
672 			t->lockfunc = pkv->ptr;
673 			break;
674 		case BD_PARAM_LOCKFUNCARG:
675 			t->lockfuncarg = pkv->ptr;
676 			break;
677 		case BD_PARAM_NAME:
678 			t->name = pkv->ptr;
679 			break;
680 		case BD_PARAM_INVALID:
681 		default:
682 			KASSERT(0, ("Invalid key %d\n", pkv->key));
683 			break;
684 		}
685 	}
686 	return;
687 }
688 
689 #ifndef IOMMU
690 bool bus_dma_iommu_set_buswide(device_t dev);
691 int bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map,
692     vm_paddr_t start, vm_size_t length, int flags);
693 
694 bool
695 bus_dma_iommu_set_buswide(device_t dev)
696 {
697 	return (false);
698 }
699 
700 int
701 bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map,
702     vm_paddr_t start, vm_size_t length, int flags)
703 {
704 	return (0);
705 }
706 #endif
707