1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * From amd64/busdma_machdep.c, r204214
31 */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/bus.h>
37 #include <sys/interrupt.h>
38 #include <sys/kernel.h>
39 #include <sys/ktr.h>
40 #include <sys/lock.h>
41 #include <sys/proc.h>
42 #include <sys/memdesc.h>
43 #include <sys/mutex.h>
44 #include <sys/sysctl.h>
45 #include <sys/uio.h>
46
47 #include <vm/vm.h>
48 #include <vm/vm_extern.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_page.h>
51 #include <vm/vm_map.h>
52
53 #include <machine/atomic.h>
54 #include <machine/bus.h>
55 #include <machine/cpufunc.h>
56 #include <machine/md_var.h>
57
58 #include "iommu_if.h"
59
60 #define MAX_BPAGES MIN(8192, physmem/40)
61
62 struct bounce_page;
63 struct bounce_zone;
64
65 struct bus_dma_tag {
66 bus_size_t alignment;
67 bus_addr_t boundary;
68 bus_addr_t lowaddr;
69 bus_addr_t highaddr;
70 bus_size_t maxsize;
71 bus_size_t maxsegsz;
72 u_int nsegments;
73 int flags;
74 int map_count;
75 bus_dma_lock_t *lockfunc;
76 void *lockfuncarg;
77 struct bounce_zone *bounce_zone;
78 device_t iommu;
79 void *iommu_cookie;
80 };
81
82 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
83 "Busdma parameters");
84
85 struct bus_dmamap {
86 STAILQ_HEAD(, bounce_page) bpages;
87 int pagesneeded;
88 int pagesreserved;
89 bus_dma_tag_t dmat;
90 struct memdesc mem;
91 bus_dma_segment_t *segments;
92 int nsegs;
93 bus_dmamap_callback_t *callback;
94 void *callback_arg;
95 __sbintime_t queued_time;
96 STAILQ_ENTRY(bus_dmamap) links;
97 int contigalloc;
98 };
99
100 static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
101
102 #define dmat_alignment(dmat) ((dmat)->alignment)
103 #define dmat_bounce_flags(dmat) (0)
104 #define dmat_boundary(dmat) ((dmat)->boundary)
105 #define dmat_flags(dmat) ((dmat)->flags)
106 #define dmat_highaddr(dmat) ((dmat)->highaddr)
107 #define dmat_lowaddr(dmat) ((dmat)->lowaddr)
108 #define dmat_lockfunc(dmat) ((dmat)->lockfunc)
109 #define dmat_lockfuncarg(dmat) ((dmat)->lockfuncarg)
110 #define dmat_maxsegsz(dmat) ((dmat)->maxsegsz)
111 #define dmat_nsegments(dmat) ((dmat)->nsegments)
112
113 #include "../../kern/subr_busdma_bounce.c"
114
115 /*
116 * Returns true if the address falls within the tag's exclusion window, or
117 * fails to meet its alignment requirements.
118 */
119 static __inline bool
must_bounce(bus_dma_tag_t dmat,bus_addr_t paddr)120 must_bounce(bus_dma_tag_t dmat, bus_addr_t paddr)
121 {
122
123 if (dmat->iommu == NULL && paddr > dmat->lowaddr &&
124 paddr <= dmat->highaddr)
125 return (true);
126 if (!vm_addr_align_ok(paddr, dmat->alignment))
127 return (true);
128
129 return (false);
130 }
131
132 #define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3
133 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
134 /*
135 * Allocate a device specific dma_tag.
136 */
137 int
bus_dma_tag_create(bus_dma_tag_t parent,bus_size_t alignment,bus_addr_t boundary,bus_addr_t lowaddr,bus_addr_t highaddr,bus_dma_filter_t * filter,void * filterarg,bus_size_t maxsize,int nsegments,bus_size_t maxsegsz,int flags,bus_dma_lock_t * lockfunc,void * lockfuncarg,bus_dma_tag_t * dmat)138 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
139 bus_addr_t boundary, bus_addr_t lowaddr,
140 bus_addr_t highaddr, bus_dma_filter_t *filter,
141 void *filterarg, bus_size_t maxsize, int nsegments,
142 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
143 void *lockfuncarg, bus_dma_tag_t *dmat)
144 {
145 bus_dma_tag_t newtag;
146 int error = 0;
147
148 /* Basic sanity checking */
149 if (boundary != 0 && boundary < maxsegsz)
150 maxsegsz = boundary;
151
152 if (maxsegsz == 0) {
153 return (EINVAL);
154 }
155
156 /* Filters are no longer supported. */
157 if (filter != NULL || filterarg != NULL)
158 return (EINVAL);
159
160 /* Return a NULL tag on failure */
161 *dmat = NULL;
162
163 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
164 M_ZERO | M_NOWAIT);
165 if (newtag == NULL) {
166 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
167 __func__, newtag, 0, error);
168 return (ENOMEM);
169 }
170
171 newtag->alignment = alignment;
172 newtag->boundary = boundary;
173 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
174 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
175 newtag->maxsize = maxsize;
176 newtag->nsegments = nsegments;
177 newtag->maxsegsz = maxsegsz;
178 newtag->flags = flags;
179 newtag->map_count = 0;
180 if (lockfunc != NULL) {
181 newtag->lockfunc = lockfunc;
182 newtag->lockfuncarg = lockfuncarg;
183 } else {
184 newtag->lockfunc = _busdma_dflt_lock;
185 newtag->lockfuncarg = NULL;
186 }
187
188 /* Take into account any restrictions imposed by our parent tag */
189 if (parent != NULL) {
190 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
191 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
192 if (newtag->boundary == 0)
193 newtag->boundary = parent->boundary;
194 else if (parent->boundary != 0)
195 newtag->boundary = MIN(parent->boundary,
196 newtag->boundary);
197
198 newtag->iommu = parent->iommu;
199 newtag->iommu_cookie = parent->iommu_cookie;
200 }
201
202 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && newtag->iommu == NULL)
203 newtag->flags |= BUS_DMA_COULD_BOUNCE;
204
205 if (newtag->alignment > 1)
206 newtag->flags |= BUS_DMA_COULD_BOUNCE;
207
208 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
209 (flags & BUS_DMA_ALLOCNOW) != 0) {
210 struct bounce_zone *bz;
211
212 /* Must bounce */
213
214 if ((error = alloc_bounce_zone(newtag)) != 0) {
215 free(newtag, M_DEVBUF);
216 return (error);
217 }
218 bz = newtag->bounce_zone;
219
220 if (ptoa(bz->total_bpages) < maxsize) {
221 int pages;
222
223 pages = atop(maxsize) - bz->total_bpages;
224
225 /* Add pages to our bounce pool */
226 if (alloc_bounce_pages(newtag, pages) < pages)
227 error = ENOMEM;
228 }
229 /* Performed initial allocation */
230 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
231 }
232
233 if (error != 0) {
234 free(newtag, M_DEVBUF);
235 } else {
236 *dmat = newtag;
237 }
238 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
239 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
240 return (error);
241 }
242
243 void
bus_dma_template_clone(bus_dma_template_t * t,bus_dma_tag_t dmat)244 bus_dma_template_clone(bus_dma_template_t *t, bus_dma_tag_t dmat)
245 {
246
247 if (t == NULL || dmat == NULL)
248 return;
249
250 t->alignment = dmat->alignment;
251 t->boundary = dmat->boundary;
252 t->lowaddr = dmat->lowaddr;
253 t->highaddr = dmat->highaddr;
254 t->maxsize = dmat->maxsize;
255 t->nsegments = dmat->nsegments;
256 t->maxsegsize = dmat->maxsegsz;
257 t->flags = dmat->flags;
258 t->lockfunc = dmat->lockfunc;
259 t->lockfuncarg = dmat->lockfuncarg;
260 }
261
262 int
bus_dma_tag_set_domain(bus_dma_tag_t dmat,int domain)263 bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
264 {
265
266 return (0);
267 }
268
269 int
bus_dma_tag_destroy(bus_dma_tag_t dmat)270 bus_dma_tag_destroy(bus_dma_tag_t dmat)
271 {
272 int error = 0;
273
274 if (dmat != NULL) {
275 if (dmat->map_count != 0) {
276 error = EBUSY;
277 goto out;
278 }
279
280 free(dmat, M_DEVBUF);
281 }
282 out:
283 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat, error);
284 return (error);
285 }
286
287 /*
288 * Allocate a handle for mapping from kva/uva/physical
289 * address space into bus device space.
290 */
291 int
bus_dmamap_create(bus_dma_tag_t dmat,int flags,bus_dmamap_t * mapp)292 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
293 {
294 int error;
295
296 error = 0;
297
298 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
299 M_NOWAIT | M_ZERO);
300 if (*mapp == NULL) {
301 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
302 __func__, dmat, ENOMEM);
303 return (ENOMEM);
304 }
305
306 /* Initialize the new map */
307 STAILQ_INIT(&((*mapp)->bpages));
308
309 /*
310 * Bouncing might be required if the driver asks for an active
311 * exclusion region, a data alignment that is stricter than 1, and/or
312 * an active address boundary.
313 */
314 if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
315 /* Must bounce */
316 struct bounce_zone *bz;
317 int maxpages;
318
319 if (dmat->bounce_zone == NULL) {
320 if ((error = alloc_bounce_zone(dmat)) != 0)
321 return (error);
322 }
323 bz = dmat->bounce_zone;
324
325 /*
326 * Attempt to add pages to our pool on a per-instance
327 * basis up to a sane limit.
328 */
329 if (dmat->alignment > 1)
330 maxpages = MAX_BPAGES;
331 else
332 maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr));
333 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
334 || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
335 int pages;
336
337 pages = MAX(atop(dmat->maxsize), 1);
338 pages = MIN(maxpages - bz->total_bpages, pages);
339 pages = MAX(pages, 1);
340 if (alloc_bounce_pages(dmat, pages) < pages)
341 error = ENOMEM;
342
343 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
344 if (error == 0)
345 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
346 } else {
347 error = 0;
348 }
349 }
350 bz->map_count++;
351 }
352
353 (*mapp)->nsegs = 0;
354 (*mapp)->segments = (bus_dma_segment_t *)malloc(
355 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
356 M_NOWAIT);
357 if ((*mapp)->segments == NULL) {
358 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
359 __func__, dmat, ENOMEM);
360 return (ENOMEM);
361 }
362
363 if (error == 0)
364 dmat->map_count++;
365 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
366 __func__, dmat, dmat->flags, error);
367 return (error);
368 }
369
370 /*
371 * Destroy a handle for mapping from kva/uva/physical
372 * address space into bus device space.
373 */
374 int
bus_dmamap_destroy(bus_dma_tag_t dmat,bus_dmamap_t map)375 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
376 {
377 if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
378 if (STAILQ_FIRST(&map->bpages) != NULL) {
379 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
380 __func__, dmat, EBUSY);
381 return (EBUSY);
382 }
383 if (dmat->bounce_zone)
384 dmat->bounce_zone->map_count--;
385 }
386 free(map->segments, M_DEVBUF);
387 free(map, M_DEVBUF);
388 dmat->map_count--;
389 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
390 return (0);
391 }
392
393 /*
394 * Allocate a piece of memory that can be efficiently mapped into
395 * bus device space based on the constraints lited in the dma tag.
396 * A dmamap to for use with dmamap_load is also allocated.
397 */
398 int
bus_dmamem_alloc(bus_dma_tag_t dmat,void ** vaddr,int flags,bus_dmamap_t * mapp)399 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
400 bus_dmamap_t *mapp)
401 {
402 vm_memattr_t attr;
403 int mflags;
404
405 if (flags & BUS_DMA_NOWAIT)
406 mflags = M_NOWAIT;
407 else
408 mflags = M_WAITOK;
409
410 bus_dmamap_create(dmat, flags, mapp);
411
412 if (flags & BUS_DMA_ZERO)
413 mflags |= M_ZERO;
414 if (flags & BUS_DMA_NOCACHE)
415 attr = VM_MEMATTR_UNCACHEABLE;
416 else
417 attr = VM_MEMATTR_DEFAULT;
418
419 /*
420 * XXX:
421 * (dmat->alignment <= dmat->maxsize) is just a quick hack; the exact
422 * alignment guarantees of malloc need to be nailed down, and the
423 * code below should be rewritten to take that into account.
424 *
425 * In the meantime, we'll warn the user if malloc gets it wrong.
426 */
427 if ((dmat->maxsize <= PAGE_SIZE) &&
428 (dmat->alignment <= dmat->maxsize) &&
429 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
430 attr == VM_MEMATTR_DEFAULT) {
431 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
432 } else {
433 /*
434 * XXX Use Contigmalloc until it is merged into this facility
435 * and handles multi-seg allocations. Nobody is doing
436 * multi-seg allocations yet though.
437 * XXX Certain AGP hardware does.
438 */
439 *vaddr = kmem_alloc_contig(dmat->maxsize, mflags, 0ul,
440 dmat->lowaddr, dmat->alignment ? dmat->alignment : 1ul,
441 dmat->boundary, attr);
442 (*mapp)->contigalloc = 1;
443 }
444 if (*vaddr == NULL) {
445 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
446 __func__, dmat, dmat->flags, ENOMEM);
447 return (ENOMEM);
448 } else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->alignment)) {
449 printf("bus_dmamem_alloc failed to align memory properly.\n");
450 }
451 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
452 __func__, dmat, dmat->flags, 0);
453 return (0);
454 }
455
456 /*
457 * Free a piece of memory and it's allociated dmamap, that was allocated
458 * via bus_dmamem_alloc.
459 */
460 void
bus_dmamem_free(bus_dma_tag_t dmat,void * vaddr,bus_dmamap_t map)461 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
462 {
463
464 if (!map->contigalloc)
465 free(vaddr, M_DEVBUF);
466 else
467 kmem_free(vaddr, dmat->maxsize);
468 bus_dmamap_destroy(dmat, map);
469 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
470 }
471
472 static void
_bus_dmamap_count_phys(bus_dma_tag_t dmat,bus_dmamap_t map,vm_paddr_t buf,bus_size_t buflen,int flags)473 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
474 bus_size_t buflen, int flags)
475 {
476 bus_addr_t curaddr;
477 bus_size_t sgsize;
478
479 if (map->pagesneeded == 0) {
480 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
481 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
482 dmat->boundary, dmat->alignment);
483 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
484 /*
485 * Count the number of bounce pages
486 * needed in order to complete this transfer
487 */
488 curaddr = buf;
489 while (buflen != 0) {
490 sgsize = buflen;
491 if (must_bounce(dmat, curaddr)) {
492 sgsize = MIN(sgsize,
493 PAGE_SIZE - (curaddr & PAGE_MASK));
494 map->pagesneeded++;
495 }
496 curaddr += sgsize;
497 buflen -= sgsize;
498 }
499 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
500 }
501 }
502
503 static void
_bus_dmamap_count_pages(bus_dma_tag_t dmat,bus_dmamap_t map,pmap_t pmap,void * buf,bus_size_t buflen,int flags)504 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
505 void *buf, bus_size_t buflen, int flags)
506 {
507 vm_offset_t vaddr;
508 vm_offset_t vendaddr;
509 bus_addr_t paddr;
510
511 if (map->pagesneeded == 0) {
512 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
513 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
514 dmat->boundary, dmat->alignment);
515 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
516 /*
517 * Count the number of bounce pages
518 * needed in order to complete this transfer
519 */
520 vaddr = (vm_offset_t)buf;
521 vendaddr = (vm_offset_t)buf + buflen;
522
523 while (vaddr < vendaddr) {
524 bus_size_t sg_len;
525
526 sg_len = MIN(vendaddr - vaddr,
527 PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
528 if (pmap == kernel_pmap)
529 paddr = pmap_kextract(vaddr);
530 else
531 paddr = pmap_extract(pmap, vaddr);
532 if (must_bounce(dmat, paddr)) {
533 sg_len = roundup2(sg_len, dmat->alignment);
534 map->pagesneeded++;
535 }
536 vaddr += sg_len;
537 }
538 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
539 }
540 }
541
542 /*
543 * Utility function to load a physical buffer. segp contains
544 * the starting segment on entrace, and the ending segment on exit.
545 */
546 int
_bus_dmamap_load_phys(bus_dma_tag_t dmat,bus_dmamap_t map,vm_paddr_t buf,bus_size_t buflen,int flags,bus_dma_segment_t * segs,int * segp)547 _bus_dmamap_load_phys(bus_dma_tag_t dmat,
548 bus_dmamap_t map,
549 vm_paddr_t buf, bus_size_t buflen,
550 int flags,
551 bus_dma_segment_t *segs,
552 int *segp)
553 {
554 bus_addr_t curaddr;
555 bus_size_t sgsize;
556 int error;
557
558 if (segs == NULL)
559 segs = map->segments;
560
561 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
562 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
563 if (map->pagesneeded != 0) {
564 error = _bus_dmamap_reserve_pages(dmat, map, flags);
565 if (error)
566 return (error);
567 }
568 }
569
570 while (buflen > 0) {
571 curaddr = buf;
572 sgsize = buflen;
573 if (map->pagesneeded != 0 && must_bounce(dmat, curaddr)) {
574 sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
575 curaddr = add_bounce_page(dmat, map, 0, curaddr,
576 sgsize);
577 }
578 if (!_bus_dmamap_addsegs(dmat, map, curaddr, sgsize, segs,
579 segp))
580 break;
581 buf += sgsize;
582 buflen -= sgsize;
583 }
584
585 /*
586 * Did we fit?
587 */
588 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
589 }
590
591 int
_bus_dmamap_load_ma(bus_dma_tag_t dmat,bus_dmamap_t map,struct vm_page ** ma,bus_size_t tlen,int ma_offs,int flags,bus_dma_segment_t * segs,int * segp)592 _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
593 struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
594 bus_dma_segment_t *segs, int *segp)
595 {
596
597 return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
598 segs, segp));
599 }
600
601 /*
602 * Utility function to load a linear buffer. segp contains
603 * the starting segment on entrance, and the ending segment on exit.
604 */
605 int
_bus_dmamap_load_buffer(bus_dma_tag_t dmat,bus_dmamap_t map,void * buf,bus_size_t buflen,pmap_t pmap,int flags,bus_dma_segment_t * segs,int * segp)606 _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
607 bus_dmamap_t map,
608 void *buf, bus_size_t buflen,
609 pmap_t pmap,
610 int flags,
611 bus_dma_segment_t *segs,
612 int *segp)
613 {
614 bus_size_t sgsize;
615 bus_addr_t curaddr;
616 vm_offset_t kvaddr, vaddr;
617 int error;
618
619 if (segs == NULL)
620 segs = map->segments;
621
622 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
623 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
624 if (map->pagesneeded != 0) {
625 error = _bus_dmamap_reserve_pages(dmat, map, flags);
626 if (error)
627 return (error);
628 }
629 }
630
631 vaddr = (vm_offset_t)buf;
632
633 while (buflen > 0) {
634 /*
635 * Get the physical address for this segment.
636 */
637 if (pmap == kernel_pmap) {
638 curaddr = pmap_kextract(vaddr);
639 kvaddr = vaddr;
640 } else {
641 curaddr = pmap_extract(pmap, vaddr);
642 kvaddr = 0;
643 }
644
645 /*
646 * Compute the segment size, and adjust counts.
647 */
648 sgsize = MIN(buflen, PAGE_SIZE - (curaddr & PAGE_MASK));
649 if (map->pagesneeded != 0 && must_bounce(dmat, curaddr)) {
650 sgsize = roundup2(sgsize, dmat->alignment);
651 curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
652 sgsize);
653 }
654
655 if (!_bus_dmamap_addsegs(dmat, map, curaddr, sgsize, segs,
656 segp))
657 break;
658 vaddr += sgsize;
659 buflen -= MIN(sgsize, buflen); /* avoid underflow */
660 }
661
662 /*
663 * Did we fit?
664 */
665 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
666 }
667
668 void
_bus_dmamap_waitok(bus_dma_tag_t dmat,bus_dmamap_t map,struct memdesc * mem,bus_dmamap_callback_t * callback,void * callback_arg)669 _bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
670 struct memdesc *mem, bus_dmamap_callback_t *callback,
671 void *callback_arg)
672 {
673
674 if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
675 map->dmat = dmat;
676 map->mem = *mem;
677 map->callback = callback;
678 map->callback_arg = callback_arg;
679 }
680 }
681
682 bus_dma_segment_t *
_bus_dmamap_complete(bus_dma_tag_t dmat,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,int error)683 _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
684 bus_dma_segment_t *segs, int nsegs, int error)
685 {
686
687 map->nsegs = nsegs;
688 if (segs != NULL)
689 memcpy(map->segments, segs, map->nsegs*sizeof(segs[0]));
690 if (dmat->iommu != NULL)
691 IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs,
692 dmat->lowaddr, dmat->highaddr, dmat->alignment,
693 dmat->boundary, dmat->iommu_cookie);
694
695 if (segs != NULL)
696 memcpy(segs, map->segments, map->nsegs*sizeof(segs[0]));
697 else
698 segs = map->segments;
699
700 return (segs);
701 }
702
703 /*
704 * Release the mapping held by map.
705 */
706 void
bus_dmamap_unload(bus_dma_tag_t dmat,bus_dmamap_t map)707 bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
708 {
709 if (dmat->iommu) {
710 IOMMU_UNMAP(dmat->iommu, map->segments, map->nsegs, dmat->iommu_cookie);
711 map->nsegs = 0;
712 }
713
714 free_bounce_pages(dmat, map);
715 }
716
717 void
bus_dmamap_sync(bus_dma_tag_t dmat,bus_dmamap_t map,bus_dmasync_op_t op)718 bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
719 {
720 struct bounce_page *bpage;
721 vm_offset_t datavaddr, tempvaddr;
722
723 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
724 /*
725 * Handle data bouncing. We might also
726 * want to add support for invalidating
727 * the caches on broken hardware
728 */
729 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
730 "performing bounce", __func__, dmat, dmat->flags, op);
731
732 if (op & BUS_DMASYNC_PREWRITE) {
733 while (bpage != NULL) {
734 tempvaddr = 0;
735 datavaddr = bpage->datavaddr;
736 if (datavaddr == 0) {
737 tempvaddr = pmap_quick_enter_page(
738 bpage->datapage);
739 datavaddr = tempvaddr |
740 bpage->dataoffs;
741 }
742
743 bcopy((void *)datavaddr,
744 (void *)bpage->vaddr, bpage->datacount);
745
746 if (tempvaddr != 0)
747 pmap_quick_remove_page(tempvaddr);
748 bpage = STAILQ_NEXT(bpage, links);
749 }
750 dmat->bounce_zone->total_bounced++;
751 }
752
753 if (op & BUS_DMASYNC_POSTREAD) {
754 while (bpage != NULL) {
755 tempvaddr = 0;
756 datavaddr = bpage->datavaddr;
757 if (datavaddr == 0) {
758 tempvaddr = pmap_quick_enter_page(
759 bpage->datapage);
760 datavaddr = tempvaddr |
761 bpage->dataoffs;
762 }
763
764 bcopy((void *)bpage->vaddr,
765 (void *)datavaddr, bpage->datacount);
766
767 if (tempvaddr != 0)
768 pmap_quick_remove_page(tempvaddr);
769 bpage = STAILQ_NEXT(bpage, links);
770 }
771 dmat->bounce_zone->total_bounced++;
772 }
773 }
774
775 powerpc_sync();
776 }
777
778 int
bus_dma_tag_set_iommu(bus_dma_tag_t tag,device_t iommu,void * cookie)779 bus_dma_tag_set_iommu(bus_dma_tag_t tag, device_t iommu, void *cookie)
780 {
781 tag->iommu = iommu;
782 tag->iommu_cookie = cookie;
783
784 return (0);
785 }
786