xref: /freebsd/sys/x86/x86/busdma_bounce.c (revision d1bdc2821fcd416ab9b238580386eb605a6128d0)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 1997, 1998 Justin T. Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/domainset.h>
32 #include <sys/malloc.h>
33 #include <sys/bus.h>
34 #include <sys/interrupt.h>
35 #include <sys/kernel.h>
36 #include <sys/ktr.h>
37 #include <sys/lock.h>
38 #include <sys/proc.h>
39 #include <sys/memdesc.h>
40 #include <sys/msan.h>
41 #include <sys/mutex.h>
42 #include <sys/sysctl.h>
43 #include <sys/uio.h>
44 
45 #include <vm/vm.h>
46 #include <vm/vm_extern.h>
47 #include <vm/vm_kern.h>
48 #include <vm/vm_page.h>
49 #include <vm/vm_map.h>
50 
51 #include <machine/atomic.h>
52 #include <machine/bus.h>
53 #include <machine/md_var.h>
54 #include <machine/specialreg.h>
55 #include <x86/include/busdma_impl.h>
56 
57 #ifdef __i386__
58 #define MAX_BPAGES (Maxmem > atop(0x100000000ULL) ? 8192 : 512)
59 #else
60 #define MAX_BPAGES 8192
61 #endif
62 
63 enum {
64 	BUS_DMA_COULD_BOUNCE	= 0x01,
65 	BUS_DMA_MIN_ALLOC_COMP	= 0x02,
66 	BUS_DMA_KMEM_ALLOC	= 0x04,
67 	BUS_DMA_FORCE_MAP	= 0x08,
68 };
69 
70 struct bounce_page;
71 struct bounce_zone;
72 
73 struct bus_dma_tag {
74 	struct bus_dma_tag_common common;
75 	int			map_count;
76 	int			bounce_flags;
77 	bus_dma_segment_t	*segments;
78 	struct bounce_zone	*bounce_zone;
79 };
80 
81 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
82     "Busdma parameters");
83 
84 struct bus_dmamap {
85 	STAILQ_HEAD(, bounce_page) bpages;
86 	int		       pagesneeded;
87 	int		       pagesreserved;
88 	bus_dma_tag_t	       dmat;
89 	struct memdesc	       mem;
90 	bus_dmamap_callback_t *callback;
91 	void		      *callback_arg;
92 	__sbintime_t	       queued_time;
93 	STAILQ_ENTRY(bus_dmamap) links;
94 #ifdef KMSAN
95 	struct memdesc	       kmsan_mem;
96 #endif
97 };
98 
99 static struct bus_dmamap nobounce_dmamap;
100 
101 static bool _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf,
102     bus_size_t buflen, int *pagesneeded);
103 static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
104     pmap_t pmap, void *buf, bus_size_t buflen, int flags);
105 static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
106     vm_paddr_t buf, bus_size_t buflen, int flags);
107 
108 static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
109 
110 #define	dmat_alignment(dmat)	((dmat)->common.alignment)
111 #define	dmat_bounce_flags(dmat)	((dmat)->bounce_flags)
112 #define	dmat_boundary(dmat)	((dmat)->common.boundary)
113 #define	dmat_domain(dmat)	((dmat)->common.domain)
114 #define	dmat_flags(dmat)	((dmat)->common.flags)
115 #define	dmat_highaddr(dmat)	((dmat)->common.highaddr)
116 #define	dmat_lowaddr(dmat)	((dmat)->common.lowaddr)
117 #define	dmat_lockfunc(dmat)	((dmat)->common.lockfunc)
118 #define	dmat_lockfuncarg(dmat)	((dmat)->common.lockfuncarg)
119 #define	dmat_maxsegsz(dmat)	((dmat)->common.maxsegsz)
120 #define	dmat_nsegments(dmat)	((dmat)->common.nsegments)
121 
122 #include "../../kern/subr_busdma_bounce.c"
123 
124 /*
125  * On i386 kernels without 'options PAE' we need to also bounce any
126  * physical addresses above 4G.
127  *
128  * NB: vm_paddr_t is required here since bus_addr_t is only 32 bits in
129  * i386 kernels without 'options PAE'.
130  */
131 static __inline bool
132 must_bounce(bus_dma_tag_t dmat, vm_paddr_t paddr)
133 {
134 #if defined(__i386__) && !defined(PAE)
135 	if (paddr > BUS_SPACE_MAXADDR)
136 		return (true);
137 #endif
138 	return (addr_needs_bounce(dmat, paddr));
139 }
140 
141 static int
142 bounce_bus_dma_zone_setup(bus_dma_tag_t dmat)
143 {
144 	struct bounce_zone *bz;
145 	int error;
146 
147 	/* Must bounce */
148 	if ((error = alloc_bounce_zone(dmat)) != 0)
149 		return (error);
150 	bz = dmat->bounce_zone;
151 
152 	if (ptoa(bz->total_bpages) < dmat->common.maxsize) {
153 		int pages;
154 
155 		pages = atop(dmat->common.maxsize) - bz->total_bpages;
156 
157 		/* Add pages to our bounce pool */
158 		if (alloc_bounce_pages(dmat, pages) < pages)
159 			return (ENOMEM);
160 	}
161 	/* Performed initial allocation */
162 	dmat->bounce_flags |= BUS_DMA_MIN_ALLOC_COMP;
163 
164 	return (0);
165 }
166 
167 /*
168  * Allocate a device specific dma_tag.
169  */
170 static int
171 bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
172     bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
173     bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags,
174     bus_dma_lock_t *lockfunc, void *lockfuncarg, bus_dma_tag_t *dmat)
175 {
176 	bus_dma_tag_t newtag;
177 	int error;
178 
179 	*dmat = NULL;
180 	error = common_bus_dma_tag_create(parent != NULL ? &parent->common :
181 	    NULL, alignment, boundary, lowaddr, highaddr, maxsize, nsegments,
182 	    maxsegsz, flags, lockfunc, lockfuncarg, sizeof(struct bus_dma_tag),
183 	    (void **)&newtag);
184 	if (error != 0)
185 		return (error);
186 
187 	newtag->common.impl = &bus_dma_bounce_impl;
188 	newtag->map_count = 0;
189 	newtag->segments = NULL;
190 
191 #ifdef KMSAN
192 	/*
193 	 * When KMSAN is configured, we need a map to store a memory descriptor
194 	 * which can be used for validation.
195 	 */
196 	newtag->bounce_flags |= BUS_DMA_FORCE_MAP;
197 #endif
198 
199 	if (parent != NULL &&
200 	    (parent->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0)
201 		newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE;
202 
203 	if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) ||
204 	    newtag->common.alignment > 1)
205 		newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE;
206 
207 	if ((newtag->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
208 	    (flags & BUS_DMA_ALLOCNOW) != 0)
209 		error = bounce_bus_dma_zone_setup(newtag);
210 	else
211 		error = 0;
212 
213 	if (error != 0)
214 		free(newtag, M_DEVBUF);
215 	else
216 		*dmat = newtag;
217 	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
218 	    __func__, newtag, (newtag != NULL ? newtag->common.flags : 0),
219 	    error);
220 	return (error);
221 }
222 
223 static bool
224 bounce_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
225 {
226 
227 	if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) == 0)
228 		return (true);
229 	return (!_bus_dmamap_pagesneeded(dmat, buf, buflen, NULL));
230 }
231 
232 /*
233  * Update the domain for the tag.  We may need to reallocate the zone and
234  * bounce pages.
235  */
236 static int
237 bounce_bus_dma_tag_set_domain(bus_dma_tag_t dmat)
238 {
239 
240 	KASSERT(dmat->map_count == 0,
241 	    ("bounce_bus_dma_tag_set_domain:  Domain set after use.\n"));
242 	if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) == 0 ||
243 	    dmat->bounce_zone == NULL)
244 		return (0);
245 	dmat->bounce_flags &= ~BUS_DMA_MIN_ALLOC_COMP;
246 	return (bounce_bus_dma_zone_setup(dmat));
247 }
248 
249 static int
250 bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat)
251 {
252 	int error = 0;
253 
254 	if (dmat != NULL) {
255 		if (dmat->map_count != 0) {
256 			error = EBUSY;
257 			goto out;
258 		}
259 		if (dmat->segments != NULL)
260 			free(dmat->segments, M_DEVBUF);
261 		free(dmat, M_DEVBUF);
262 	}
263 out:
264 	CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat, error);
265 	return (error);
266 }
267 
268 /*
269  * Allocate a handle for mapping from kva/uva/physical
270  * address space into bus device space.
271  */
272 static int
273 bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
274 {
275 	struct bounce_zone *bz;
276 	int error, maxpages, pages;
277 
278 	error = 0;
279 
280 	if (dmat->segments == NULL) {
281 		dmat->segments = malloc_domainset(
282 		    sizeof(bus_dma_segment_t) * dmat->common.nsegments,
283 		    M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), M_NOWAIT);
284 		if (dmat->segments == NULL) {
285 			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
286 			    __func__, dmat, ENOMEM);
287 			return (ENOMEM);
288 		}
289 	}
290 
291 	if (dmat->bounce_flags & (BUS_DMA_COULD_BOUNCE | BUS_DMA_FORCE_MAP)) {
292 		*mapp = malloc_domainset(sizeof(**mapp), M_DEVBUF,
293 		    DOMAINSET_PREF(dmat->common.domain), M_NOWAIT | M_ZERO);
294 		if (*mapp == NULL) {
295 			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
296 			    __func__, dmat, ENOMEM);
297 			return (ENOMEM);
298 		}
299 		STAILQ_INIT(&(*mapp)->bpages);
300 	} else {
301 		*mapp = NULL;
302 	}
303 
304 	/*
305 	 * Bouncing might be required if the driver asks for an active
306 	 * exclusion region, a data alignment that is stricter than 1, and/or
307 	 * an active address boundary.
308 	 */
309 	if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
310 		/* Must bounce */
311 		if (dmat->bounce_zone == NULL &&
312 		    (error = alloc_bounce_zone(dmat)) != 0)
313 			goto out;
314 		bz = dmat->bounce_zone;
315 
316 		/*
317 		 * Attempt to add pages to our pool on a per-instance
318 		 * basis up to a sane limit.
319 		 */
320 		if (dmat->common.alignment > 1)
321 			maxpages = MAX_BPAGES;
322 		else
323 			maxpages = MIN(MAX_BPAGES, Maxmem -
324 			    atop(dmat->common.lowaddr));
325 		if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP) == 0 ||
326 		    (bz->map_count > 0 && bz->total_bpages < maxpages)) {
327 			pages = MAX(atop(dmat->common.maxsize), 1);
328 			pages = MIN(dmat->common.nsegments, pages);
329 			pages = MIN(maxpages - bz->total_bpages, pages);
330 			pages = MAX(pages, 1);
331 			if (alloc_bounce_pages(dmat, pages) < pages)
332 				error = ENOMEM;
333 			if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP)
334 			    == 0) {
335 				if (error == 0) {
336 					dmat->bounce_flags |=
337 					    BUS_DMA_MIN_ALLOC_COMP;
338 				}
339 			} else
340 				error = 0;
341 		}
342 		bz->map_count++;
343 	}
344 
345 out:
346 	if (error == 0) {
347 		dmat->map_count++;
348 	} else {
349 		free(*mapp, M_DEVBUF);
350 		*mapp = NULL;
351 	}
352 
353 	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
354 	    __func__, dmat, dmat->common.flags, error);
355 	return (error);
356 }
357 
358 /*
359  * Destroy a handle for mapping from kva/uva/physical
360  * address space into bus device space.
361  */
362 static int
363 bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
364 {
365 
366 	if (map != NULL && map != &nobounce_dmamap) {
367 		if (STAILQ_FIRST(&map->bpages) != NULL) {
368 			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
369 			    __func__, dmat, EBUSY);
370 			return (EBUSY);
371 		}
372 		if (dmat->bounce_zone)
373 			dmat->bounce_zone->map_count--;
374 		free(map, M_DEVBUF);
375 	}
376 	dmat->map_count--;
377 	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
378 	return (0);
379 }
380 
381 /*
382  * Allocate a piece of memory that can be efficiently mapped into
383  * bus device space based on the constraints lited in the dma tag.
384  * A dmamap to for use with dmamap_load is also allocated.
385  */
386 static int
387 bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
388     bus_dmamap_t *mapp)
389 {
390 	vm_memattr_t attr;
391 	int mflags;
392 
393 	if (flags & BUS_DMA_NOWAIT)
394 		mflags = M_NOWAIT;
395 	else
396 		mflags = M_WAITOK;
397 
398 	/* If we succeed, no mapping/bouncing will be required */
399 	*mapp = NULL;
400 
401 	if (dmat->segments == NULL) {
402 		dmat->segments = (bus_dma_segment_t *)malloc_domainset(
403 		    sizeof(bus_dma_segment_t) * dmat->common.nsegments,
404 		    M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), mflags);
405 		if (dmat->segments == NULL) {
406 			CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
407 			    __func__, dmat, dmat->common.flags, ENOMEM);
408 			return (ENOMEM);
409 		}
410 	}
411 	if (flags & BUS_DMA_ZERO)
412 		mflags |= M_ZERO;
413 	if (flags & BUS_DMA_NOCACHE)
414 		attr = VM_MEMATTR_UNCACHEABLE;
415 	else
416 		attr = VM_MEMATTR_DEFAULT;
417 
418 	/*
419 	 * Allocate the buffer from the malloc(9) allocator if...
420 	 *  - It's small enough to fit into a single page.
421 	 *  - Its alignment requirement is also smaller than the page size.
422 	 *  - The low address requirement is fulfilled.
423 	 *  - Default cache attributes are requested (WB).
424 	 * else allocate non-contiguous pages if...
425 	 *  - The page count that could get allocated doesn't exceed
426 	 *    nsegments also when the maximum segment size is less
427 	 *    than PAGE_SIZE.
428 	 *  - The alignment constraint isn't larger than a page boundary.
429 	 *  - There are no boundary-crossing constraints.
430 	 * else allocate a block of contiguous pages because one or more of the
431 	 * constraints is something that only the contig allocator can fulfill.
432 	 *
433 	 * Warn the user if malloc gets it wrong.
434 	 */
435 	if (dmat->common.maxsize <= PAGE_SIZE &&
436 	    dmat->common.alignment <= PAGE_SIZE &&
437 	    dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
438 	    attr == VM_MEMATTR_DEFAULT) {
439 		*vaddr = malloc_domainset_aligned(dmat->common.maxsize,
440 		    dmat->common.alignment, M_DEVBUF,
441 		    DOMAINSET_PREF(dmat->common.domain), mflags);
442 		KASSERT(*vaddr == NULL || ((uintptr_t)*vaddr & PAGE_MASK) +
443 		    dmat->common.maxsize <= PAGE_SIZE,
444 		    ("bounce_bus_dmamem_alloc: multi-page alloc %p maxsize "
445 		    "%#jx align %#jx", *vaddr, (uintmax_t)dmat->common.maxsize,
446 		    (uintmax_t)dmat->common.alignment));
447 	} else if (dmat->common.nsegments >=
448 	    howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz,
449 	    PAGE_SIZE)) &&
450 	    dmat->common.alignment <= PAGE_SIZE &&
451 	    (dmat->common.boundary % PAGE_SIZE) == 0) {
452 		/* Page-based multi-segment allocations allowed */
453 		*vaddr = kmem_alloc_attr_domainset(
454 		    DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize,
455 		    mflags, 0ul, dmat->common.lowaddr, attr);
456 		dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC;
457 	} else {
458 		*vaddr = kmem_alloc_contig_domainset(
459 		    DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize,
460 		    mflags, 0ul, dmat->common.lowaddr,
461 		    dmat->common.alignment != 0 ? dmat->common.alignment : 1ul,
462 		    dmat->common.boundary, attr);
463 		dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC;
464 	}
465 	if (*vaddr == NULL) {
466 		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
467 		    __func__, dmat, dmat->common.flags, ENOMEM);
468 		return (ENOMEM);
469 	} else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->common.alignment)) {
470 		printf("bus_dmamem_alloc failed to align memory properly.\n");
471 	}
472 	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
473 	    __func__, dmat, dmat->common.flags, 0);
474 	return (0);
475 }
476 
477 /*
478  * Free a piece of memory and its associated dmamap, that was allocated
479  * via bus_dmamem_alloc.
480  */
481 static void
482 bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
483 {
484 	/*
485 	 * dmamem does not need to be bounced, so the map should be
486 	 * NULL and the BUS_DMA_KMEM_ALLOC flag cleared if malloc()
487 	 * was used and set if kmem_alloc_contig() was used.
488 	 */
489 	if (map != NULL)
490 		panic("bus_dmamem_free: Invalid map freed\n");
491 	if ((dmat->bounce_flags & BUS_DMA_KMEM_ALLOC) == 0)
492 		free(vaddr, M_DEVBUF);
493 	else
494 		kmem_free(vaddr, dmat->common.maxsize);
495 	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
496 	    dmat->bounce_flags);
497 }
498 
499 static bool
500 _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen,
501     int *pagesneeded)
502 {
503 	vm_paddr_t curaddr;
504 	bus_size_t sgsize;
505 	int count;
506 
507 	/*
508 	 * Count the number of bounce pages needed in order to
509 	 * complete this transfer
510 	 */
511 	count = 0;
512 	curaddr = buf;
513 	while (buflen != 0) {
514 		sgsize = buflen;
515 		if (must_bounce(dmat, curaddr)) {
516 			sgsize = MIN(sgsize,
517 			    PAGE_SIZE - (curaddr & PAGE_MASK));
518 			if (pagesneeded == NULL)
519 				return (true);
520 			count++;
521 		}
522 		curaddr += sgsize;
523 		buflen -= sgsize;
524 	}
525 
526 	if (pagesneeded != NULL)
527 		*pagesneeded = count;
528 	return (count != 0);
529 }
530 
531 static void
532 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
533     bus_size_t buflen, int flags)
534 {
535 
536 	if (map != &nobounce_dmamap && map->pagesneeded == 0) {
537 		_bus_dmamap_pagesneeded(dmat, buf, buflen, &map->pagesneeded);
538 		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
539 	}
540 }
541 
542 static void
543 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
544     void *buf, bus_size_t buflen, int flags)
545 {
546 	vm_offset_t vaddr;
547 	vm_offset_t vendaddr;
548 	vm_paddr_t paddr;
549 	bus_size_t sg_len;
550 
551 	if (map != &nobounce_dmamap && map->pagesneeded == 0) {
552 		CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
553 		    "alignment= %d", dmat->common.lowaddr,
554 		    ptoa((vm_paddr_t)Maxmem),
555 		    dmat->common.boundary, dmat->common.alignment);
556 		CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
557 		    map, &nobounce_dmamap, map->pagesneeded);
558 		/*
559 		 * Count the number of bounce pages
560 		 * needed in order to complete this transfer
561 		 */
562 		vaddr = (vm_offset_t)buf;
563 		vendaddr = (vm_offset_t)buf + buflen;
564 
565 		while (vaddr < vendaddr) {
566 			sg_len = MIN(vendaddr - vaddr,
567 			    PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
568 			if (pmap == kernel_pmap)
569 				paddr = pmap_kextract(vaddr);
570 			else
571 				paddr = pmap_extract(pmap, vaddr);
572 			if (must_bounce(dmat, paddr)) {
573 				sg_len = roundup2(sg_len,
574 				    dmat->common.alignment);
575 				map->pagesneeded++;
576 			}
577 			vaddr += sg_len;
578 		}
579 		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
580 	}
581 }
582 
583 static void
584 _bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
585     int ma_offs, bus_size_t buflen, int flags)
586 {
587 	bus_size_t sg_len;
588 	int page_index;
589 	vm_paddr_t paddr;
590 
591 	if (map != &nobounce_dmamap && map->pagesneeded == 0) {
592 		CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
593 		    "alignment= %d", dmat->common.lowaddr,
594 		    ptoa((vm_paddr_t)Maxmem),
595 		    dmat->common.boundary, dmat->common.alignment);
596 		CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
597 		    map, &nobounce_dmamap, map->pagesneeded);
598 
599 		/*
600 		 * Count the number of bounce pages
601 		 * needed in order to complete this transfer
602 		 */
603 		page_index = 0;
604 		while (buflen > 0) {
605 			paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs;
606 			sg_len = PAGE_SIZE - ma_offs;
607 			sg_len = MIN(sg_len, buflen);
608 			if (must_bounce(dmat, paddr)) {
609 				sg_len = roundup2(sg_len,
610 				    dmat->common.alignment);
611 				KASSERT(vm_addr_align_ok(sg_len,
612 				    dmat->common.alignment),
613 				    ("Segment size is not aligned"));
614 				map->pagesneeded++;
615 			}
616 			if (((ma_offs + sg_len) & ~PAGE_MASK) != 0)
617 				page_index++;
618 			ma_offs = (ma_offs + sg_len) & PAGE_MASK;
619 			KASSERT(buflen >= sg_len,
620 			    ("Segment length overruns original buffer"));
621 			buflen -= sg_len;
622 		}
623 		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
624 	}
625 }
626 
627 /*
628  * Utility function to load a physical buffer.  segp contains
629  * the starting segment on entrace, and the ending segment on exit.
630  */
631 static int
632 bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
633     vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
634     int *segp)
635 {
636 	bus_size_t sgsize;
637 	vm_paddr_t curaddr;
638 	int error;
639 
640 	if (map == NULL)
641 		map = &nobounce_dmamap;
642 
643 	if (segs == NULL)
644 		segs = dmat->segments;
645 
646 	if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
647 		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
648 		if (map->pagesneeded != 0) {
649 			error = _bus_dmamap_reserve_pages(dmat, map, flags);
650 			if (error)
651 				return (error);
652 		}
653 	}
654 
655 	while (buflen > 0) {
656 		curaddr = buf;
657 		sgsize = buflen;
658 		if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
659 		    map->pagesneeded != 0 &&
660 		    must_bounce(dmat, curaddr)) {
661 			sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
662 			curaddr = add_bounce_page(dmat, map, 0, curaddr, 0,
663 			    sgsize);
664 		}
665 
666 		if (!_bus_dmamap_addsegs(dmat, map, curaddr, sgsize, segs,
667 		    segp))
668 			break;
669 		buf += sgsize;
670 		buflen -= sgsize;
671 	}
672 
673 	/*
674 	 * Did we fit?
675 	 */
676 	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
677 }
678 
679 /*
680  * Utility function to load a linear buffer.  segp contains
681  * the starting segment on entrace, and the ending segment on exit.
682  */
683 static int
684 bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
685     bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
686     int *segp)
687 {
688 	bus_size_t sgsize;
689 	vm_paddr_t curaddr;
690 	vm_offset_t kvaddr, vaddr;
691 	int error;
692 
693 	if (map == NULL)
694 		map = &nobounce_dmamap;
695 
696 	if (segs == NULL)
697 		segs = dmat->segments;
698 
699 	if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
700 		_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
701 		if (map->pagesneeded != 0) {
702 			error = _bus_dmamap_reserve_pages(dmat, map, flags);
703 			if (error)
704 				return (error);
705 		}
706 	}
707 
708 	vaddr = (vm_offset_t)buf;
709 	while (buflen > 0) {
710 		/*
711 		 * Get the physical address for this segment.
712 		 */
713 		if (pmap == kernel_pmap) {
714 			curaddr = pmap_kextract(vaddr);
715 			kvaddr = vaddr;
716 		} else {
717 			curaddr = pmap_extract(pmap, vaddr);
718 			kvaddr = 0;
719 		}
720 
721 		/*
722 		 * Compute the segment size, and adjust counts.
723 		 */
724 		sgsize = MIN(buflen, PAGE_SIZE - (curaddr & PAGE_MASK));
725 		if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
726 		    map->pagesneeded != 0 &&
727 		    must_bounce(dmat, curaddr)) {
728 			sgsize = roundup2(sgsize, dmat->common.alignment);
729 			curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 0,
730 			    sgsize);
731 		}
732 		if (!_bus_dmamap_addsegs(dmat, map, curaddr, sgsize, segs,
733 		    segp))
734 			break;
735 		vaddr += sgsize;
736 		buflen -= MIN(sgsize, buflen); /* avoid underflow */
737 	}
738 
739 	/*
740 	 * Did we fit?
741 	 */
742 	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
743 }
744 
745 static int
746 bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
747     struct vm_page **ma, bus_size_t buflen, int ma_offs, int flags,
748     bus_dma_segment_t *segs, int *segp)
749 {
750 	vm_paddr_t paddr, next_paddr;
751 	int error, page_index;
752 	bus_size_t sgsize;
753 
754 	if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
755 		/*
756 		 * If we have to keep the offset of each page this function
757 		 * is not suitable, switch back to bus_dmamap_load_ma_triv
758 		 * which is going to do the right thing in this case.
759 		 */
760 		error = bus_dmamap_load_ma_triv(dmat, map, ma, buflen, ma_offs,
761 		    flags, segs, segp);
762 		return (error);
763 	}
764 
765 	if (map == NULL)
766 		map = &nobounce_dmamap;
767 
768 	if (segs == NULL)
769 		segs = dmat->segments;
770 
771 	if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
772 		_bus_dmamap_count_ma(dmat, map, ma, ma_offs, buflen, flags);
773 		if (map->pagesneeded != 0) {
774 			error = _bus_dmamap_reserve_pages(dmat, map, flags);
775 			if (error)
776 				return (error);
777 		}
778 	}
779 
780 	page_index = 0;
781 	while (buflen > 0) {
782 		/*
783 		 * Compute the segment size, and adjust counts.
784 		 */
785 		paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs;
786 		sgsize = MIN(buflen, PAGE_SIZE - ma_offs);
787 		if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
788 		    map->pagesneeded != 0 &&
789 		    must_bounce(dmat, paddr)) {
790 			sgsize = roundup2(sgsize, dmat->common.alignment);
791 			KASSERT(vm_addr_align_ok(sgsize,
792 			    dmat->common.alignment),
793 			    ("Segment size is not aligned"));
794 			/*
795 			 * Check if two pages of the user provided buffer
796 			 * are used.
797 			 */
798 			if ((ma_offs + sgsize) > PAGE_SIZE)
799 				next_paddr =
800 				    VM_PAGE_TO_PHYS(ma[page_index + 1]);
801 			else
802 				next_paddr = 0;
803 			paddr = add_bounce_page(dmat, map, 0, paddr,
804 			    next_paddr, sgsize);
805 		}
806 		if (!_bus_dmamap_addsegs(dmat, map, paddr, sgsize, segs,
807 		    segp))
808 			break;
809 		KASSERT(buflen >= sgsize,
810 		    ("Segment length overruns original buffer"));
811 		buflen -= MIN(sgsize, buflen); /* avoid underflow */
812 		if (((ma_offs + sgsize) & ~PAGE_MASK) != 0)
813 			page_index++;
814 		ma_offs = (ma_offs + sgsize) & PAGE_MASK;
815 	}
816 
817 	/*
818 	 * Did we fit?
819 	 */
820 	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
821 }
822 
823 static void
824 bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
825     struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
826 {
827 
828 	if (map == NULL)
829 		return;
830 	map->mem = *mem;
831 	map->dmat = dmat;
832 	map->callback = callback;
833 	map->callback_arg = callback_arg;
834 }
835 
836 static bus_dma_segment_t *
837 bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
838     bus_dma_segment_t *segs, int nsegs, int error)
839 {
840 
841 	if (segs == NULL)
842 		segs = dmat->segments;
843 	return (segs);
844 }
845 
846 /*
847  * Release the mapping held by map.
848  */
849 static void
850 bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
851 {
852 	if (map == NULL)
853 		return;
854 
855 	free_bounce_pages(dmat, map);
856 }
857 
858 static void
859 bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
860     bus_dmasync_op_t op)
861 {
862 	struct bounce_page *bpage;
863 	vm_offset_t datavaddr, tempvaddr;
864 	bus_size_t datacount1, datacount2;
865 
866 	if (map == NULL)
867 		goto out;
868 	if ((bpage = STAILQ_FIRST(&map->bpages)) == NULL)
869 		goto out;
870 
871 	/*
872 	 * Handle data bouncing.  We might also want to add support for
873 	 * invalidating the caches on broken hardware.
874 	 */
875 	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
876 	    "performing bounce", __func__, dmat, dmat->common.flags, op);
877 
878 	if ((op & BUS_DMASYNC_PREWRITE) != 0) {
879 		while (bpage != NULL) {
880 			tempvaddr = 0;
881 			datavaddr = bpage->datavaddr;
882 			datacount1 = bpage->datacount;
883 			if (datavaddr == 0) {
884 				tempvaddr =
885 				    pmap_quick_enter_page(bpage->datapage[0]);
886 				datavaddr = tempvaddr | bpage->dataoffs;
887 				datacount1 = min(PAGE_SIZE - bpage->dataoffs,
888 				    datacount1);
889 			}
890 
891 			bcopy((void *)datavaddr,
892 			    (void *)bpage->vaddr, datacount1);
893 
894 			if (tempvaddr != 0)
895 				pmap_quick_remove_page(tempvaddr);
896 
897 			if (bpage->datapage[1] == 0) {
898 				KASSERT(datacount1 == bpage->datacount,
899 		("Mismatch between data size and provided memory space"));
900 				goto next_w;
901 			}
902 
903 			/*
904 			 * We are dealing with an unmapped buffer that expands
905 			 * over two pages.
906 			 */
907 			datavaddr = pmap_quick_enter_page(bpage->datapage[1]);
908 			datacount2 = bpage->datacount - datacount1;
909 			bcopy((void *)datavaddr,
910 			    (void *)(bpage->vaddr + datacount1), datacount2);
911 			pmap_quick_remove_page(datavaddr);
912 
913 next_w:
914 			bpage = STAILQ_NEXT(bpage, links);
915 		}
916 		dmat->bounce_zone->total_bounced++;
917 	}
918 
919 	if ((op & BUS_DMASYNC_POSTREAD) != 0) {
920 		while (bpage != NULL) {
921 			tempvaddr = 0;
922 			datavaddr = bpage->datavaddr;
923 			datacount1 = bpage->datacount;
924 			if (datavaddr == 0) {
925 				tempvaddr =
926 				    pmap_quick_enter_page(bpage->datapage[0]);
927 				datavaddr = tempvaddr | bpage->dataoffs;
928 				datacount1 = min(PAGE_SIZE - bpage->dataoffs,
929 				    datacount1);
930 			}
931 
932 			bcopy((void *)bpage->vaddr, (void *)datavaddr,
933 			    datacount1);
934 
935 			if (tempvaddr != 0)
936 				pmap_quick_remove_page(tempvaddr);
937 
938 			if (bpage->datapage[1] == 0) {
939 				KASSERT(datacount1 == bpage->datacount,
940 		("Mismatch between data size and provided memory space"));
941 				goto next_r;
942 			}
943 
944 			/*
945 			 * We are dealing with an unmapped buffer that expands
946 			 * over two pages.
947 			 */
948 			datavaddr = pmap_quick_enter_page(bpage->datapage[1]);
949 			datacount2 = bpage->datacount - datacount1;
950 			bcopy((void *)(bpage->vaddr + datacount1),
951 			    (void *)datavaddr, datacount2);
952 			pmap_quick_remove_page(datavaddr);
953 
954 next_r:
955 			bpage = STAILQ_NEXT(bpage, links);
956 		}
957 		dmat->bounce_zone->total_bounced++;
958 	}
959 out:
960 	atomic_thread_fence_rel();
961 	if (map != NULL)
962 		kmsan_bus_dmamap_sync(&map->kmsan_mem, op);
963 }
964 
965 #ifdef KMSAN
966 static void
967 bounce_bus_dmamap_load_kmsan(bus_dmamap_t map, struct memdesc *mem)
968 {
969 	if (map == NULL)
970 		return;
971 	memcpy(&map->kmsan_mem, mem, sizeof(map->kmsan_mem));
972 }
973 #endif
974 
975 struct bus_dma_impl bus_dma_bounce_impl = {
976 	.tag_create = bounce_bus_dma_tag_create,
977 	.tag_destroy = bounce_bus_dma_tag_destroy,
978 	.tag_set_domain = bounce_bus_dma_tag_set_domain,
979 	.id_mapped = bounce_bus_dma_id_mapped,
980 	.map_create = bounce_bus_dmamap_create,
981 	.map_destroy = bounce_bus_dmamap_destroy,
982 	.mem_alloc = bounce_bus_dmamem_alloc,
983 	.mem_free = bounce_bus_dmamem_free,
984 	.load_phys = bounce_bus_dmamap_load_phys,
985 	.load_buffer = bounce_bus_dmamap_load_buffer,
986 	.load_ma = bounce_bus_dmamap_load_ma,
987 	.map_waitok = bounce_bus_dmamap_waitok,
988 	.map_complete = bounce_bus_dmamap_complete,
989 	.map_unload = bounce_bus_dmamap_unload,
990 	.map_sync = bounce_bus_dmamap_sync,
991 #ifdef KMSAN
992 	.load_kmsan = bounce_bus_dmamap_load_kmsan,
993 #endif
994 };
995