xref: /freebsd/sys/kern/subr_bus_dma.c (revision 2a0c0aea42092f89c2a5345991e6e3ce4cbef99a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2012 EMC Corp.
5  * All rights reserved.
6  *
7  * Copyright (c) 1997, 1998 Justin T. Gibbs.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_bus.h"
36 #include "opt_iommu.h"
37 
38 #include <sys/param.h>
39 #include <sys/conf.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/callout.h>
43 #include <sys/ktr.h>
44 #include <sys/lock.h>
45 #include <sys/mbuf.h>
46 #include <sys/memdesc.h>
47 #include <sys/mutex.h>
48 #include <sys/proc.h>
49 #include <sys/uio.h>
50 
51 #include <vm/vm.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_map.h>
54 #include <vm/pmap.h>
55 
56 #include <cam/cam.h>
57 #include <cam/cam_ccb.h>
58 
59 #include <opencrypto/cryptodev.h>
60 
61 #include <machine/bus.h>
62 
63 /*
64  * Convenience function for manipulating driver locks from busdma (during
65  * busdma_swi, for example).
66  */
67 void
68 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
69 {
70 	struct mtx *dmtx;
71 
72 	dmtx = (struct mtx *)arg;
73 	switch (op) {
74 	case BUS_DMA_LOCK:
75 		mtx_lock(dmtx);
76 		break;
77 	case BUS_DMA_UNLOCK:
78 		mtx_unlock(dmtx);
79 		break;
80 	default:
81 		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
82 	}
83 }
84 
85 /*
86  * dflt_lock should never get called.  It gets put into the dma tag when
87  * lockfunc == NULL, which is only valid if the maps that are associated
88  * with the tag are meant to never be deferred.
89  *
90  * XXX Should have a way to identify which driver is responsible here.
91  */
92 void
93 _busdma_dflt_lock(void *arg, bus_dma_lock_op_t op)
94 {
95 
96 	panic("driver error: _bus_dma_dflt_lock called");
97 }
98 
99 
100 /*
101  * Load up data starting at offset within a region specified by a
102  * list of virtual address ranges until either length or the region
103  * are exhausted.
104  */
105 static int
106 _bus_dmamap_load_vlist(bus_dma_tag_t dmat, bus_dmamap_t map,
107     bus_dma_segment_t *list, int sglist_cnt, struct pmap *pmap, int *nsegs,
108     int flags, size_t offset, size_t length)
109 {
110 	int error;
111 
112 	error = 0;
113 	for (; sglist_cnt > 0 && length != 0; sglist_cnt--, list++) {
114 		char *addr;
115 		size_t ds_len;
116 
117 		KASSERT((offset < list->ds_len),
118 		    ("Invalid mid-segment offset"));
119 		addr = (char *)(uintptr_t)list->ds_addr + offset;
120 		ds_len = list->ds_len - offset;
121 		offset = 0;
122 		if (ds_len > length)
123 			ds_len = length;
124 		length -= ds_len;
125 		KASSERT((ds_len != 0), ("Segment length is zero"));
126 		error = _bus_dmamap_load_buffer(dmat, map, addr, ds_len, pmap,
127 		    flags, NULL, nsegs);
128 		if (error)
129 			break;
130 	}
131 	return (error);
132 }
133 
134 /*
135  * Load a list of physical addresses.
136  */
137 static int
138 _bus_dmamap_load_plist(bus_dma_tag_t dmat, bus_dmamap_t map,
139     bus_dma_segment_t *list, int sglist_cnt, int *nsegs, int flags)
140 {
141 	int error;
142 
143 	error = 0;
144 	for (; sglist_cnt > 0; sglist_cnt--, list++) {
145 		error = _bus_dmamap_load_phys(dmat, map,
146 		    (vm_paddr_t)list->ds_addr, list->ds_len, flags, NULL,
147 		    nsegs);
148 		if (error)
149 			break;
150 	}
151 	return (error);
152 }
153 
154 /*
155  * Load an unmapped mbuf
156  */
157 static int
158 _bus_dmamap_load_mbuf_epg(bus_dma_tag_t dmat, bus_dmamap_t map,
159     struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags)
160 {
161 	int error, i, off, len, pglen, pgoff, seglen, segoff;
162 
163 	M_ASSERTEXTPG(m);
164 
165 	len = m->m_len;
166 	error = 0;
167 
168 	/* Skip over any data removed from the front. */
169 	off = mtod(m, vm_offset_t);
170 
171 	if (m->m_epg_hdrlen != 0) {
172 		if (off >= m->m_epg_hdrlen) {
173 			off -= m->m_epg_hdrlen;
174 		} else {
175 			seglen = m->m_epg_hdrlen - off;
176 			segoff = off;
177 			seglen = min(seglen, len);
178 			off = 0;
179 			len -= seglen;
180 			error = _bus_dmamap_load_buffer(dmat, map,
181 			    &m->m_epg_hdr[segoff], seglen, kernel_pmap,
182 			    flags, segs, nsegs);
183 		}
184 	}
185 	pgoff = m->m_epg_1st_off;
186 	for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) {
187 		pglen = m_epg_pagelen(m, i, pgoff);
188 		if (off >= pglen) {
189 			off -= pglen;
190 			pgoff = 0;
191 			continue;
192 		}
193 		seglen = pglen - off;
194 		segoff = pgoff + off;
195 		off = 0;
196 		seglen = min(seglen, len);
197 		len -= seglen;
198 		error = _bus_dmamap_load_phys(dmat, map,
199 		    m->m_epg_pa[i] + segoff, seglen, flags, segs, nsegs);
200 		pgoff = 0;
201 	};
202 	if (len != 0 && error == 0) {
203 		KASSERT((off + len) <= m->m_epg_trllen,
204 		    ("off + len > trail (%d + %d > %d)", off, len,
205 		    m->m_epg_trllen));
206 		error = _bus_dmamap_load_buffer(dmat, map,
207 		    &m->m_epg_trail[off], len, kernel_pmap, flags, segs,
208 		    nsegs);
209 	}
210 	return (error);
211 }
212 
213 /*
214  * Load a single mbuf.
215  */
216 static int
217 _bus_dmamap_load_single_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
218     struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags)
219 {
220 	int error;
221 
222 	error = 0;
223 	if ((m->m_flags & M_EXTPG) != 0)
224 		error = _bus_dmamap_load_mbuf_epg(dmat, map, m, segs, nsegs,
225 		    flags);
226 	else
227 		error = _bus_dmamap_load_buffer(dmat, map, m->m_data, m->m_len,
228 		    kernel_pmap, flags | BUS_DMA_LOAD_MBUF, segs, nsegs);
229 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
230 	    __func__, dmat, flags, error, *nsegs);
231 	return (error);
232 }
233 
234 /*
235  * Load an mbuf chain.
236  */
237 static int
238 _bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
239     struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags)
240 {
241 	struct mbuf *m;
242 	int error;
243 
244 	error = 0;
245 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
246 		if (m->m_len > 0) {
247 			if ((m->m_flags & M_EXTPG) != 0)
248 				error = _bus_dmamap_load_mbuf_epg(dmat,
249 				    map, m, segs, nsegs, flags);
250 			else
251 				error = _bus_dmamap_load_buffer(dmat, map,
252 				    m->m_data, m->m_len, kernel_pmap,
253 				    flags | BUS_DMA_LOAD_MBUF, segs, nsegs);
254 		}
255 	}
256 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
257 	    __func__, dmat, flags, error, *nsegs);
258 	return (error);
259 }
260 
261 int
262 bus_dmamap_load_ma_triv(bus_dma_tag_t dmat, bus_dmamap_t map,
263     struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
264     bus_dma_segment_t *segs, int *segp)
265 {
266 	vm_paddr_t paddr;
267 	bus_size_t len;
268 	int error, i;
269 
270 	error = 0;
271 	for (i = 0; tlen > 0; i++, tlen -= len) {
272 		len = min(PAGE_SIZE - ma_offs, tlen);
273 		paddr = VM_PAGE_TO_PHYS(ma[i]) + ma_offs;
274 		error = _bus_dmamap_load_phys(dmat, map, paddr, len,
275 		    flags, segs, segp);
276 		if (error != 0)
277 			break;
278 		ma_offs = 0;
279 	}
280 	return (error);
281 }
282 
283 /*
284  * Load a uio.
285  */
286 static int
287 _bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
288     int *nsegs, int flags)
289 {
290 	bus_size_t resid;
291 	bus_size_t minlen;
292 	struct iovec *iov;
293 	pmap_t pmap;
294 	caddr_t addr;
295 	int error, i;
296 
297 	if (uio->uio_segflg == UIO_USERSPACE) {
298 		KASSERT(uio->uio_td != NULL,
299 			("bus_dmamap_load_uio: USERSPACE but no proc"));
300 		pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
301 	} else
302 		pmap = kernel_pmap;
303 	resid = uio->uio_resid;
304 	iov = uio->uio_iov;
305 	error = 0;
306 
307 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
308 		/*
309 		 * Now at the first iovec to load.  Load each iovec
310 		 * until we have exhausted the residual count.
311 		 */
312 
313 		addr = (caddr_t) iov[i].iov_base;
314 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
315 		if (minlen > 0) {
316 			error = _bus_dmamap_load_buffer(dmat, map, addr,
317 			    minlen, pmap, flags, NULL, nsegs);
318 			resid -= minlen;
319 		}
320 	}
321 
322 	return (error);
323 }
324 
325 /*
326  * Map the buffer buf into bus space using the dmamap map.
327  */
328 int
329 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
330     bus_size_t buflen, bus_dmamap_callback_t *callback,
331     void *callback_arg, int flags)
332 {
333 	bus_dma_segment_t *segs;
334 	struct memdesc mem;
335 	int error;
336 	int nsegs;
337 
338 #ifdef KMSAN
339 	mem = memdesc_vaddr(buf, buflen);
340 	_bus_dmamap_load_kmsan(dmat, map, &mem);
341 #endif
342 
343 	if ((flags & BUS_DMA_NOWAIT) == 0) {
344 		mem = memdesc_vaddr(buf, buflen);
345 		_bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg);
346 	}
347 
348 	nsegs = -1;
349 	error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, kernel_pmap,
350 	    flags, NULL, &nsegs);
351 	nsegs++;
352 
353 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
354 	    __func__, dmat, flags, error, nsegs);
355 
356 	if (error == EINPROGRESS)
357 		return (error);
358 
359 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
360 	if (error)
361 		(*callback)(callback_arg, segs, 0, error);
362 	else
363 		(*callback)(callback_arg, segs, nsegs, 0);
364 
365 	/*
366 	 * Return ENOMEM to the caller so that it can pass it up the stack.
367 	 * This error only happens when NOWAIT is set, so deferral is disabled.
368 	 */
369 	if (error == ENOMEM)
370 		return (error);
371 
372 	return (0);
373 }
374 
375 int
376 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
377     bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
378 {
379 	bus_dma_segment_t *segs;
380 	int nsegs, error;
381 
382 	M_ASSERTPKTHDR(m0);
383 
384 #ifdef KMSAN
385 	struct memdesc mem = memdesc_mbuf(m0);
386 	_bus_dmamap_load_kmsan(dmat, map, &mem);
387 #endif
388 
389 	flags |= BUS_DMA_NOWAIT;
390 	nsegs = -1;
391 	error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, NULL, &nsegs, flags);
392 	++nsegs;
393 
394 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
395 	if (error)
396 		(*callback)(callback_arg, segs, 0, 0, error);
397 	else
398 		(*callback)(callback_arg, segs, nsegs, m0->m_pkthdr.len, error);
399 
400 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
401 	    __func__, dmat, flags, error, nsegs);
402 	return (error);
403 }
404 
405 int
406 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
407     bus_dma_segment_t *segs, int *nsegs, int flags)
408 {
409 	int error;
410 
411 #ifdef KMSAN
412 	struct memdesc mem = memdesc_mbuf(m0);
413 	_bus_dmamap_load_kmsan(dmat, map, &mem);
414 #endif
415 
416 	flags |= BUS_DMA_NOWAIT;
417 	*nsegs = -1;
418 	error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags);
419 	++*nsegs;
420 	_bus_dmamap_complete(dmat, map, segs, *nsegs, error);
421 	return (error);
422 }
423 
424 int
425 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
426     bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
427 {
428 	bus_dma_segment_t *segs;
429 	int nsegs, error;
430 
431 #ifdef KMSAN
432 	struct memdesc mem = memdesc_uio(uio);
433 	_bus_dmamap_load_kmsan(dmat, map, &mem);
434 #endif
435 
436 	flags |= BUS_DMA_NOWAIT;
437 	nsegs = -1;
438 	error = _bus_dmamap_load_uio(dmat, map, uio, &nsegs, flags);
439 	nsegs++;
440 
441 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
442 	if (error)
443 		(*callback)(callback_arg, segs, 0, 0, error);
444 	else
445 		(*callback)(callback_arg, segs, nsegs, uio->uio_resid, error);
446 
447 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
448 	    __func__, dmat, flags, error, nsegs);
449 	return (error);
450 }
451 
452 int
453 bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb,
454 		    bus_dmamap_callback_t *callback, void *callback_arg,
455 		    int flags)
456 {
457 	struct ccb_hdr *ccb_h;
458 	struct memdesc mem;
459 
460 	ccb_h = &ccb->ccb_h;
461 	if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) {
462 		callback(callback_arg, NULL, 0, 0);
463 		return (0);
464 	}
465 
466 	mem = memdesc_ccb(ccb);
467 	return (bus_dmamap_load_mem(dmat, map, &mem, callback, callback_arg,
468 	    flags));
469 }
470 
471 int
472 bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio,
473 		    bus_dmamap_callback_t *callback, void *callback_arg,
474 		    int flags)
475 {
476 	struct memdesc mem;
477 
478 	mem = memdesc_bio(bio);
479 	return (bus_dmamap_load_mem(dmat, map, &mem, callback, callback_arg,
480 	    flags));
481 }
482 
483 int
484 bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map,
485     struct memdesc *mem, bus_dmamap_callback_t *callback,
486     void *callback_arg, int flags)
487 {
488 	bus_dma_segment_t *segs;
489 	int error;
490 	int nsegs;
491 
492 #ifdef KMSAN
493 	_bus_dmamap_load_kmsan(dmat, map, mem);
494 #endif
495 
496 	if ((flags & BUS_DMA_NOWAIT) == 0)
497 		_bus_dmamap_waitok(dmat, map, mem, callback, callback_arg);
498 
499 	nsegs = -1;
500 	error = 0;
501 	switch (mem->md_type) {
502 	case MEMDESC_VADDR:
503 		error = _bus_dmamap_load_buffer(dmat, map, mem->u.md_vaddr,
504 		    mem->md_len, kernel_pmap, flags, NULL, &nsegs);
505 		break;
506 	case MEMDESC_PADDR:
507 		error = _bus_dmamap_load_phys(dmat, map, mem->u.md_paddr,
508 		    mem->md_len, flags, NULL, &nsegs);
509 		break;
510 	case MEMDESC_VLIST:
511 		error = _bus_dmamap_load_vlist(dmat, map, mem->u.md_list,
512 		    mem->md_nseg, kernel_pmap, &nsegs, flags, 0, SIZE_T_MAX);
513 		break;
514 	case MEMDESC_PLIST:
515 		error = _bus_dmamap_load_plist(dmat, map, mem->u.md_list,
516 		    mem->md_nseg, &nsegs, flags);
517 		break;
518 	case MEMDESC_UIO:
519 		error = _bus_dmamap_load_uio(dmat, map, mem->u.md_uio,
520 		    &nsegs, flags);
521 		break;
522 	case MEMDESC_MBUF:
523 		error = _bus_dmamap_load_mbuf_sg(dmat, map, mem->u.md_mbuf,
524 		    NULL, &nsegs, flags);
525 		break;
526 	case MEMDESC_VMPAGES:
527 		error = _bus_dmamap_load_ma(dmat, map, mem->u.md_ma,
528 		    mem->md_len, mem->md_offset, flags, NULL, &nsegs);
529 		break;
530 	}
531 	nsegs++;
532 
533 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
534 	    __func__, dmat, flags, error, nsegs);
535 
536 	if (error == EINPROGRESS)
537 		return (error);
538 
539 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
540 	if (error)
541 		(*callback)(callback_arg, segs, 0, error);
542 	else
543 		(*callback)(callback_arg, segs, nsegs, 0);
544 
545 	/*
546 	 * Return ENOMEM to the caller so that it can pass it up the stack.
547 	 * This error only happens when NOWAIT is set, so deferral is disabled.
548 	 */
549 	if (error == ENOMEM)
550 		return (error);
551 
552 	return (0);
553 }
554 
555 int
556 bus_dmamap_load_crp_buffer(bus_dma_tag_t dmat, bus_dmamap_t map,
557     struct crypto_buffer *cb, bus_dmamap_callback_t *callback,
558     void *callback_arg, int flags)
559 {
560 	bus_dma_segment_t *segs;
561 	int error;
562 	int nsegs;
563 
564 	flags |= BUS_DMA_NOWAIT;
565 	nsegs = -1;
566 	error = 0;
567 	switch (cb->cb_type) {
568 	case CRYPTO_BUF_CONTIG:
569 		error = _bus_dmamap_load_buffer(dmat, map, cb->cb_buf,
570 		    cb->cb_buf_len, kernel_pmap, flags, NULL, &nsegs);
571 		break;
572 	case CRYPTO_BUF_MBUF:
573 		error = _bus_dmamap_load_mbuf_sg(dmat, map, cb->cb_mbuf,
574 		    NULL, &nsegs, flags);
575 		break;
576 	case CRYPTO_BUF_SINGLE_MBUF:
577 		error = _bus_dmamap_load_single_mbuf(dmat, map, cb->cb_mbuf,
578 		    NULL, &nsegs, flags);
579 		break;
580 	case CRYPTO_BUF_UIO:
581 		error = _bus_dmamap_load_uio(dmat, map, cb->cb_uio, &nsegs,
582 		    flags);
583 		break;
584 	case CRYPTO_BUF_VMPAGE:
585 		error = _bus_dmamap_load_ma(dmat, map, cb->cb_vm_page,
586 		    cb->cb_vm_page_len, cb->cb_vm_page_offset, flags, NULL,
587 		    &nsegs);
588 		break;
589 	default:
590 		error = EINVAL;
591 	}
592 	nsegs++;
593 
594 	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
595 	    __func__, dmat, flags, error, nsegs);
596 
597 	if (error == EINPROGRESS)
598 		return (error);
599 
600 	segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
601 	if (error)
602 		(*callback)(callback_arg, segs, 0, error);
603 	else
604 		(*callback)(callback_arg, segs, nsegs, 0);
605 
606 	/*
607 	 * Return ENOMEM to the caller so that it can pass it up the stack.
608 	 * This error only happens when NOWAIT is set, so deferral is disabled.
609 	 */
610 	if (error == ENOMEM)
611 		return (error);
612 
613 	return (0);
614 }
615 
616 int
617 bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp,
618     bus_dmamap_callback_t *callback, void *callback_arg, int flags)
619 {
620 	return (bus_dmamap_load_crp_buffer(dmat, map, &crp->crp_buf, callback,
621 	    callback_arg, flags));
622 }
623 
624 void
625 bus_dma_template_init(bus_dma_template_t *t, bus_dma_tag_t parent)
626 {
627 
628 	if (t == NULL)
629 		return;
630 
631 	t->parent = parent;
632 	t->alignment = 1;
633 	t->boundary = 0;
634 	t->lowaddr = t->highaddr = BUS_SPACE_MAXADDR;
635 	t->maxsize = t->maxsegsize = BUS_SPACE_MAXSIZE;
636 	t->nsegments = BUS_SPACE_UNRESTRICTED;
637 	t->lockfunc = NULL;
638 	t->lockfuncarg = NULL;
639 	t->flags = 0;
640 }
641 
642 int
643 bus_dma_template_tag(bus_dma_template_t *t, bus_dma_tag_t *dmat)
644 {
645 
646 	if (t == NULL || dmat == NULL)
647 		return (EINVAL);
648 
649 	return (bus_dma_tag_create(t->parent, t->alignment, t->boundary,
650 	    t->lowaddr, t->highaddr, NULL, NULL, t->maxsize,
651 	    t->nsegments, t->maxsegsize, t->flags, t->lockfunc, t->lockfuncarg,
652 	    dmat));
653 }
654 
655 void
656 bus_dma_template_fill(bus_dma_template_t *t, bus_dma_param_t *kv, u_int count)
657 {
658 	bus_dma_param_t *pkv;
659 
660 	while (count) {
661 		pkv = &kv[--count];
662 		switch (pkv->key) {
663 		case BD_PARAM_PARENT:
664 			t->parent = pkv->ptr;
665 			break;
666 		case BD_PARAM_ALIGNMENT:
667 			t->alignment = pkv->num;
668 			break;
669 		case BD_PARAM_BOUNDARY:
670 			t->boundary = pkv->num;
671 			break;
672 		case BD_PARAM_LOWADDR:
673 			t->lowaddr = pkv->pa;
674 			break;
675 		case BD_PARAM_HIGHADDR:
676 			t->highaddr = pkv->pa;
677 			break;
678 		case BD_PARAM_MAXSIZE:
679 			t->maxsize = pkv->num;
680 			break;
681 		case BD_PARAM_NSEGMENTS:
682 			t->nsegments = pkv->num;
683 			break;
684 		case BD_PARAM_MAXSEGSIZE:
685 			t->maxsegsize = pkv->num;
686 			break;
687 		case BD_PARAM_FLAGS:
688 			t->flags = pkv->num;
689 			break;
690 		case BD_PARAM_LOCKFUNC:
691 			t->lockfunc = pkv->ptr;
692 			break;
693 		case BD_PARAM_LOCKFUNCARG:
694 			t->lockfuncarg = pkv->ptr;
695 			break;
696 		case BD_PARAM_NAME:
697 			t->name = pkv->ptr;
698 			break;
699 		case BD_PARAM_INVALID:
700 		default:
701 			KASSERT(0, ("Invalid key %d\n", pkv->key));
702 			break;
703 		}
704 	}
705 	return;
706 }
707 
708 #ifndef IOMMU
709 bool bus_dma_iommu_set_buswide(device_t dev);
710 int bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map,
711     vm_paddr_t start, vm_size_t length, int flags);
712 
713 bool
714 bus_dma_iommu_set_buswide(device_t dev)
715 {
716 	return (false);
717 }
718 
719 int
720 bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map,
721     vm_paddr_t start, vm_size_t length, int flags)
722 {
723 	return (0);
724 }
725 #endif
726