1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * Common code for managing bounce pages for bus_dma backends. As
31 * this code currently assumes it can access internal members of
32 * opaque types like bus_dma_tag_t and bus_dmamap it is #include'd in
33 * backends rather than being compiled standalone.
34 *
35 * Prerequisites:
36 *
37 * - M_BUSDMA malloc type
38 * - struct bus_dmamap
39 * - hw_busdma SYSCTL_NODE
40 * - macros to access the following fields of bus_dma_tag_t:
41 * - dmat_alignment()
42 * - dmat_flags()
43 * - dmat_lowaddr()
44 * - dmat_lockfunc()
45 * - dmat_lockarg()
46 */
47
48 #include <sys/kthread.h>
49 #include <sys/sched.h>
50
51 struct bounce_page {
52 vm_offset_t vaddr; /* kva of bounce buffer */
53 bus_addr_t busaddr; /* Physical address */
54 vm_offset_t datavaddr; /* kva of client data */
55 #if defined(__amd64__) || defined(__i386__)
56 vm_page_t datapage[2]; /* physical page(s) of client data */
57 #else
58 vm_page_t datapage; /* physical page of client data */
59 #endif
60 vm_offset_t dataoffs; /* page offset of client data */
61 bus_size_t datacount; /* client data count */
62 STAILQ_ENTRY(bounce_page) links;
63 };
64
65 struct bounce_zone {
66 STAILQ_ENTRY(bounce_zone) links;
67 STAILQ_HEAD(, bounce_page) bounce_page_list;
68 STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
69 int total_bpages;
70 int free_bpages;
71 int reserved_bpages;
72 int active_bpages;
73 int total_bounced;
74 int total_deferred;
75 int map_count;
76 #ifdef dmat_domain
77 int domain;
78 #endif
79 sbintime_t total_deferred_time;
80 bus_size_t alignment;
81 bus_addr_t lowaddr;
82 char zoneid[8];
83 char lowaddrid[20];
84 struct sysctl_ctx_list sysctl_tree;
85 struct sysctl_oid *sysctl_tree_top;
86 };
87
88 static struct mtx bounce_lock;
89 MTX_SYSINIT(bounce_lock, &bounce_lock, "bounce pages lock", MTX_DEF);
90 static int total_bpages;
91 static int busdma_zonecount;
92
93 static STAILQ_HEAD(, bounce_zone) bounce_zone_list =
94 STAILQ_HEAD_INITIALIZER(bounce_zone_list);
95 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist =
96 STAILQ_HEAD_INITIALIZER(bounce_map_callbacklist);
97
98 static MALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages");
99
100 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
101 "Total bounce pages");
102
103 static void busdma_thread(void *);
104 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
105 int commit);
106
107 static int
_bus_dmamap_reserve_pages(bus_dma_tag_t dmat,bus_dmamap_t map,int flags)108 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
109 {
110 struct bounce_zone *bz;
111
112 /* Reserve Necessary Bounce Pages */
113 mtx_lock(&bounce_lock);
114 if (flags & BUS_DMA_NOWAIT) {
115 if (reserve_bounce_pages(dmat, map, 0) != 0) {
116 map->pagesneeded = 0;
117 mtx_unlock(&bounce_lock);
118 return (ENOMEM);
119 }
120 } else {
121 if (reserve_bounce_pages(dmat, map, 1) != 0) {
122 /* Queue us for resources */
123 bz = dmat->bounce_zone;
124 STAILQ_INSERT_TAIL(&bz->bounce_map_waitinglist, map,
125 links);
126 map->queued_time = sbinuptime();
127 mtx_unlock(&bounce_lock);
128 return (EINPROGRESS);
129 }
130 }
131 mtx_unlock(&bounce_lock);
132
133 return (0);
134 }
135
136 static struct sysctl_ctx_list *
busdma_sysctl_tree(struct bounce_zone * bz)137 busdma_sysctl_tree(struct bounce_zone *bz)
138 {
139
140 return (&bz->sysctl_tree);
141 }
142
143 static struct sysctl_oid *
busdma_sysctl_tree_top(struct bounce_zone * bz)144 busdma_sysctl_tree_top(struct bounce_zone *bz)
145 {
146
147 return (bz->sysctl_tree_top);
148 }
149
150 /*
151 * Returns true if the address falls within the tag's exclusion window, or
152 * fails to meet its alignment requirements.
153 */
154 static bool
addr_needs_bounce(bus_dma_tag_t dmat,bus_addr_t paddr)155 addr_needs_bounce(bus_dma_tag_t dmat, bus_addr_t paddr)
156 {
157
158 if (paddr > dmat_lowaddr(dmat) && paddr <= dmat_highaddr(dmat))
159 return (true);
160 if (!vm_addr_align_ok(paddr, dmat_alignment(dmat)))
161 return (true);
162
163 return (false);
164 }
165
166 static int
alloc_bounce_zone(bus_dma_tag_t dmat)167 alloc_bounce_zone(bus_dma_tag_t dmat)
168 {
169 struct bounce_zone *bz;
170 bool start_thread;
171
172 /* Check to see if we already have a suitable zone */
173 STAILQ_FOREACH(bz, &bounce_zone_list, links) {
174 if ((dmat_alignment(dmat) <= bz->alignment) &&
175 #ifdef dmat_domain
176 dmat_domain(dmat) == bz->domain &&
177 #endif
178 (dmat_lowaddr(dmat) >= bz->lowaddr)) {
179 dmat->bounce_zone = bz;
180 return (0);
181 }
182 }
183
184 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA,
185 M_NOWAIT | M_ZERO)) == NULL)
186 return (ENOMEM);
187
188 STAILQ_INIT(&bz->bounce_page_list);
189 STAILQ_INIT(&bz->bounce_map_waitinglist);
190 bz->free_bpages = 0;
191 bz->reserved_bpages = 0;
192 bz->active_bpages = 0;
193 bz->lowaddr = dmat_lowaddr(dmat);
194 bz->alignment = MAX(dmat_alignment(dmat), PAGE_SIZE);
195 bz->map_count = 0;
196 #ifdef dmat_domain
197 bz->domain = dmat_domain(dmat);
198 #endif
199 snprintf(bz->zoneid, sizeof(bz->zoneid), "zone%d", busdma_zonecount);
200 busdma_zonecount++;
201 snprintf(bz->lowaddrid, sizeof(bz->lowaddrid), "%#jx",
202 (uintmax_t)bz->lowaddr);
203 start_thread = STAILQ_EMPTY(&bounce_zone_list);
204 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
205 dmat->bounce_zone = bz;
206
207 sysctl_ctx_init(&bz->sysctl_tree);
208 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
209 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
210 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
211 if (bz->sysctl_tree_top == NULL) {
212 sysctl_ctx_free(&bz->sysctl_tree);
213 return (0); /* XXX error code? */
214 }
215
216 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
217 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
218 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
219 "Total bounce pages");
220 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
221 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
222 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
223 "Free bounce pages");
224 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
225 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
226 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
227 "Reserved bounce pages");
228 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
229 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
230 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
231 "Active bounce pages");
232 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
233 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
234 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
235 "Total bounce requests (pages bounced)");
236 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
237 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
238 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
239 "Total bounce requests that were deferred");
240 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
241 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
242 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
243 SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
244 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
245 "alignment", CTLFLAG_RD, &bz->alignment, "");
246 #ifdef dmat_domain
247 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
248 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
249 "domain", CTLFLAG_RD, &bz->domain, 0,
250 "memory domain");
251 #endif
252 SYSCTL_ADD_SBINTIME_USEC(busdma_sysctl_tree(bz),
253 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
254 "total_deferred_time", CTLFLAG_RD, &bz->total_deferred_time,
255 "Cumulative time busdma requests are deferred (us)");
256 if (start_thread) {
257 if (kproc_create(busdma_thread, NULL, NULL, 0, 0, "busdma") !=
258 0)
259 printf("failed to create busdma thread");
260 }
261 return (0);
262 }
263
264 static int
alloc_bounce_pages(bus_dma_tag_t dmat,u_int numpages)265 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
266 {
267 struct bounce_zone *bz;
268 int count;
269
270 bz = dmat->bounce_zone;
271 count = 0;
272 while (numpages > 0) {
273 struct bounce_page *bpage;
274
275 #ifdef dmat_domain
276 bpage = malloc_domainset(sizeof(*bpage), M_BUSDMA,
277 DOMAINSET_PREF(bz->domain), M_NOWAIT | M_ZERO);
278 #else
279 bpage = malloc(sizeof(*bpage), M_BUSDMA, M_NOWAIT | M_ZERO);
280 #endif
281
282 if (bpage == NULL)
283 break;
284 #ifdef dmat_domain
285 bpage->vaddr = (vm_offset_t)contigmalloc_domainset(PAGE_SIZE,
286 M_BOUNCE, DOMAINSET_PREF(bz->domain), M_NOWAIT,
287 0ul, bz->lowaddr, PAGE_SIZE, 0);
288 #else
289 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE,
290 M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0);
291 #endif
292 if (bpage->vaddr == 0) {
293 free(bpage, M_BUSDMA);
294 break;
295 }
296 bpage->busaddr = pmap_kextract(bpage->vaddr);
297 mtx_lock(&bounce_lock);
298 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
299 total_bpages++;
300 bz->total_bpages++;
301 bz->free_bpages++;
302 mtx_unlock(&bounce_lock);
303 count++;
304 numpages--;
305 }
306 return (count);
307 }
308
309 static int
reserve_bounce_pages(bus_dma_tag_t dmat,bus_dmamap_t map,int commit)310 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
311 {
312 struct bounce_zone *bz;
313 int pages;
314
315 mtx_assert(&bounce_lock, MA_OWNED);
316 bz = dmat->bounce_zone;
317 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
318 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
319 return (map->pagesneeded - (map->pagesreserved + pages));
320 bz->free_bpages -= pages;
321 bz->reserved_bpages += pages;
322 map->pagesreserved += pages;
323 pages = map->pagesneeded - map->pagesreserved;
324
325 return (pages);
326 }
327
328 #if defined(__amd64__) || defined(__i386__)
329 static bus_addr_t
add_bounce_page(bus_dma_tag_t dmat,bus_dmamap_t map,vm_offset_t vaddr,vm_paddr_t addr1,vm_paddr_t addr2,bus_size_t size)330 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
331 vm_paddr_t addr1, vm_paddr_t addr2, bus_size_t size)
332 #else
333 static bus_addr_t
334 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
335 bus_addr_t addr, bus_size_t size)
336 #endif
337 {
338 struct bounce_zone *bz;
339 struct bounce_page *bpage;
340
341 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
342 KASSERT(map != NULL, ("add_bounce_page: bad map %p", map));
343 #if defined(__amd64__) || defined(__i386__)
344 KASSERT(map != &nobounce_dmamap, ("add_bounce_page: bad map %p", map));
345 #endif
346 #ifdef __riscv
347 KASSERT((map->flags & DMAMAP_COULD_BOUNCE) != 0,
348 ("add_bounce_page: bad map %p", map));
349 #endif
350
351 bz = dmat->bounce_zone;
352 if (map->pagesneeded == 0)
353 panic("add_bounce_page: map doesn't need any pages");
354 map->pagesneeded--;
355
356 if (map->pagesreserved == 0)
357 panic("add_bounce_page: map doesn't need any pages");
358 map->pagesreserved--;
359
360 mtx_lock(&bounce_lock);
361 bpage = STAILQ_FIRST(&bz->bounce_page_list);
362 if (bpage == NULL)
363 panic("add_bounce_page: free page list is empty");
364
365 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
366 bz->reserved_bpages--;
367 bz->active_bpages++;
368 mtx_unlock(&bounce_lock);
369
370 if (dmat_flags(dmat) & BUS_DMA_KEEP_PG_OFFSET) {
371 /* Page offset needs to be preserved. */
372 #if defined(__amd64__) || defined(__i386__)
373 bpage->vaddr |= addr1 & PAGE_MASK;
374 bpage->busaddr |= addr1 & PAGE_MASK;
375 KASSERT(addr2 == 0,
376 ("Trying to bounce multiple pages with BUS_DMA_KEEP_PG_OFFSET"));
377 #else
378 bpage->vaddr |= addr & PAGE_MASK;
379 bpage->busaddr |= addr & PAGE_MASK;
380 #endif
381 }
382 bpage->datavaddr = vaddr;
383 #if defined(__amd64__) || defined(__i386__)
384 bpage->datapage[0] = PHYS_TO_VM_PAGE(addr1);
385 KASSERT((addr2 & PAGE_MASK) == 0, ("Second page is not aligned"));
386 bpage->datapage[1] = PHYS_TO_VM_PAGE(addr2);
387 bpage->dataoffs = addr1 & PAGE_MASK;
388 #else
389 bpage->datapage = PHYS_TO_VM_PAGE(addr);
390 bpage->dataoffs = addr & PAGE_MASK;
391 #endif
392 bpage->datacount = size;
393 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
394 return (bpage->busaddr);
395 }
396
397 static void
free_bounce_pages(bus_dma_tag_t dmat,bus_dmamap_t map)398 free_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
399 {
400 struct bounce_page *bpage;
401 struct bounce_zone *bz;
402 bool schedule_thread;
403 u_int count;
404
405 if (STAILQ_EMPTY(&map->bpages))
406 return;
407
408 bz = dmat->bounce_zone;
409 count = 0;
410 schedule_thread = false;
411 STAILQ_FOREACH(bpage, &map->bpages, links) {
412 bpage->datavaddr = 0;
413 bpage->datacount = 0;
414
415 if (dmat_flags(dmat) & BUS_DMA_KEEP_PG_OFFSET) {
416 /*
417 * Reset the bounce page to start at offset 0.
418 * Other uses of this bounce page may need to
419 * store a full page of data and/or assume it
420 * starts on a page boundary.
421 */
422 bpage->vaddr &= ~PAGE_MASK;
423 bpage->busaddr &= ~PAGE_MASK;
424 }
425 count++;
426 }
427
428 mtx_lock(&bounce_lock);
429 STAILQ_CONCAT(&bz->bounce_page_list, &map->bpages);
430 bz->free_bpages += count;
431 bz->active_bpages -= count;
432 while ((map = STAILQ_FIRST(&bz->bounce_map_waitinglist)) != NULL) {
433 if (reserve_bounce_pages(map->dmat, map, 1) != 0)
434 break;
435
436 STAILQ_REMOVE_HEAD(&bz->bounce_map_waitinglist, links);
437 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links);
438 bz->total_deferred++;
439 schedule_thread = true;
440 }
441 mtx_unlock(&bounce_lock);
442 if (schedule_thread)
443 wakeup(&bounce_map_callbacklist);
444 }
445
446 /*
447 * Add a single contiguous physical range to the segment list.
448 */
449 static bus_size_t
_bus_dmamap_addseg(bus_dma_tag_t dmat,bus_dmamap_t map,bus_addr_t curaddr,bus_size_t sgsize,bus_dma_segment_t * segs,int * segp)450 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
451 bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
452 {
453 int seg;
454
455 KASSERT(curaddr <= BUS_SPACE_MAXADDR,
456 ("ds_addr %#jx > BUS_SPACE_MAXADDR %#jx; dmat %p fl %#x low %#jx "
457 "hi %#jx",
458 (uintmax_t)curaddr, (uintmax_t)BUS_SPACE_MAXADDR,
459 dmat, dmat_bounce_flags(dmat), (uintmax_t)dmat_lowaddr(dmat),
460 (uintmax_t)dmat_highaddr(dmat)));
461
462 /*
463 * Make sure we don't cross any boundaries.
464 */
465 if (!vm_addr_bound_ok(curaddr, sgsize, dmat_boundary(dmat)))
466 sgsize = roundup2(curaddr, dmat_boundary(dmat)) - curaddr;
467
468 /*
469 * Insert chunk into a segment, coalescing with
470 * previous segment if possible.
471 */
472 seg = *segp;
473 if (seg == -1) {
474 seg = 0;
475 segs[seg].ds_addr = curaddr;
476 segs[seg].ds_len = sgsize;
477 } else {
478 if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
479 (segs[seg].ds_len + sgsize) <= dmat_maxsegsz(dmat) &&
480 vm_addr_bound_ok(segs[seg].ds_addr,
481 segs[seg].ds_len + sgsize, dmat_boundary(dmat)))
482 segs[seg].ds_len += sgsize;
483 else {
484 if (++seg >= dmat_nsegments(dmat))
485 return (0);
486 segs[seg].ds_addr = curaddr;
487 segs[seg].ds_len = sgsize;
488 }
489 }
490 *segp = seg;
491 return (sgsize);
492 }
493
494 /*
495 * Add a contiguous physical range to the segment list, respecting the tag's
496 * maximum segment size and splitting it into multiple segments as necessary.
497 */
498 static bool
_bus_dmamap_addsegs(bus_dma_tag_t dmat,bus_dmamap_t map,bus_addr_t curaddr,bus_size_t sgsize,bus_dma_segment_t * segs,int * segp)499 _bus_dmamap_addsegs(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
500 bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
501 {
502 bus_size_t done, todo;
503
504 while (sgsize > 0) {
505 todo = MIN(sgsize, dmat_maxsegsz(dmat));
506 done = _bus_dmamap_addseg(dmat, map, curaddr, todo, segs,
507 segp);
508 if (done == 0)
509 return (false);
510 curaddr += done;
511 sgsize -= done;
512 }
513 return (true);
514 }
515
516 static void
busdma_thread(void * dummy __unused)517 busdma_thread(void *dummy __unused)
518 {
519 STAILQ_HEAD(, bus_dmamap) callbacklist;
520 bus_dma_tag_t dmat;
521 struct bus_dmamap *map, *nmap;
522 struct bounce_zone *bz;
523
524 thread_lock(curthread);
525 sched_class(curthread, PRI_ITHD);
526 sched_ithread_prio(curthread, PI_SWI(SWI_BUSDMA));
527 thread_unlock(curthread);
528 for (;;) {
529 mtx_lock(&bounce_lock);
530 while (STAILQ_EMPTY(&bounce_map_callbacklist))
531 mtx_sleep(&bounce_map_callbacklist, &bounce_lock, 0,
532 "-", 0);
533 STAILQ_INIT(&callbacklist);
534 STAILQ_CONCAT(&callbacklist, &bounce_map_callbacklist);
535 mtx_unlock(&bounce_lock);
536
537 STAILQ_FOREACH_SAFE(map, &callbacklist, links, nmap) {
538 dmat = map->dmat;
539 bz = dmat->bounce_zone;
540 dmat_lockfunc(dmat)(dmat_lockfuncarg(dmat),
541 BUS_DMA_LOCK);
542 bz->total_deferred_time += (sbinuptime() - map->queued_time);
543 bus_dmamap_load_mem(map->dmat, map, &map->mem,
544 map->callback, map->callback_arg, BUS_DMA_WAITOK);
545 dmat_lockfunc(dmat)(dmat_lockfuncarg(dmat),
546 BUS_DMA_UNLOCK);
547 }
548 }
549 }
550