xref: /freebsd/sys/dev/usb/usb_busdma.c (revision 499fe48de8938d4c7b0a91e20eb6c16db9d55633)
1 /* $FreeBSD$ */
2 /*-
3  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4  *
5  * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #ifdef USB_GLOBAL_INCLUDE_FILE
30 #include USB_GLOBAL_INCLUDE_FILE
31 #else
32 #include <sys/stdint.h>
33 #include <sys/stddef.h>
34 #include <sys/param.h>
35 #include <sys/queue.h>
36 #include <sys/types.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/bus.h>
40 #include <sys/module.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/condvar.h>
44 #include <sys/sysctl.h>
45 #include <sys/sx.h>
46 #include <sys/unistd.h>
47 #include <sys/callout.h>
48 #include <sys/malloc.h>
49 #include <sys/priv.h>
50 
51 #include <dev/usb/usb.h>
52 #include <dev/usb/usbdi.h>
53 #include <dev/usb/usbdi_util.h>
54 
55 #define	USB_DEBUG_VAR usb_debug
56 
57 #include <dev/usb/usb_core.h>
58 #include <dev/usb/usb_busdma.h>
59 #include <dev/usb/usb_process.h>
60 #include <dev/usb/usb_transfer.h>
61 #include <dev/usb/usb_device.h>
62 #include <dev/usb/usb_util.h>
63 #include <dev/usb/usb_debug.h>
64 
65 #include <dev/usb/usb_controller.h>
66 #include <dev/usb/usb_bus.h>
67 #endif			/* USB_GLOBAL_INCLUDE_FILE */
68 
69 #if USB_HAVE_BUSDMA
70 static void	usb_dma_tag_create(struct usb_dma_tag *, usb_size_t, usb_size_t);
71 static void	usb_dma_tag_destroy(struct usb_dma_tag *);
72 static void	usb_dma_lock_cb(void *, bus_dma_lock_op_t);
73 static void	usb_pc_alloc_mem_cb(void *, bus_dma_segment_t *, int, int);
74 static void	usb_pc_load_mem_cb(void *, bus_dma_segment_t *, int, int);
75 static void	usb_pc_common_mem_cb(void *, bus_dma_segment_t *, int, int,
76 		    uint8_t);
77 #endif
78 
79 /*------------------------------------------------------------------------*
80  *  usbd_get_page - lookup DMA-able memory for the given offset
81  *
82  * NOTE: Only call this function when the "page_cache" structure has
83  * been properly initialized !
84  *------------------------------------------------------------------------*/
85 void
86 usbd_get_page(struct usb_page_cache *pc, usb_frlength_t offset,
87     struct usb_page_search *res)
88 {
89 #if USB_HAVE_BUSDMA
90 	struct usb_page *page;
91 
92 	if (pc->page_start) {
93 
94 		/* Case 1 - something has been loaded into DMA */
95 
96 		if (pc->buffer) {
97 
98 			/* Case 1a - Kernel Virtual Address */
99 
100 			res->buffer = USB_ADD_BYTES(pc->buffer, offset);
101 		}
102 		offset += pc->page_offset_buf;
103 
104 		/* compute destination page */
105 
106 		page = pc->page_start;
107 
108 		if (pc->ismultiseg) {
109 
110 			page += (offset / USB_PAGE_SIZE);
111 
112 			offset %= USB_PAGE_SIZE;
113 
114 			res->length = USB_PAGE_SIZE - offset;
115 			res->physaddr = page->physaddr + offset;
116 		} else {
117 			res->length = (usb_size_t)-1;
118 			res->physaddr = page->physaddr + offset;
119 		}
120 		if (!pc->buffer) {
121 
122 			/* Case 1b - Non Kernel Virtual Address */
123 
124 			res->buffer = USB_ADD_BYTES(page->buffer, offset);
125 		}
126 		return;
127 	}
128 #endif
129 	/* Case 2 - Plain PIO */
130 
131 	res->buffer = USB_ADD_BYTES(pc->buffer, offset);
132 	res->length = (usb_size_t)-1;
133 #if USB_HAVE_BUSDMA
134 	res->physaddr = 0;
135 #endif
136 }
137 
138 /*------------------------------------------------------------------------*
139  *  usb_pc_buffer_is_aligned - verify alignment
140  *
141  * This function is used to check if a page cache buffer is properly
142  * aligned to reduce the use of bounce buffers in PIO mode.
143  *------------------------------------------------------------------------*/
144 uint8_t
145 usb_pc_buffer_is_aligned(struct usb_page_cache *pc, usb_frlength_t offset,
146     usb_frlength_t len, usb_frlength_t mask)
147 {
148 	struct usb_page_search buf_res;
149 
150 	while (len != 0) {
151 
152 		usbd_get_page(pc, offset, &buf_res);
153 
154 		if (buf_res.length > len)
155 			buf_res.length = len;
156 		if (USB_P2U(buf_res.buffer) & mask)
157 			return (0);
158 		if (buf_res.length & mask)
159 			return (0);
160 
161 		offset += buf_res.length;
162 		len -= buf_res.length;
163 	}
164 	return (1);
165 }
166 
167 /*------------------------------------------------------------------------*
168  *  usbd_copy_in - copy directly to DMA-able memory
169  *------------------------------------------------------------------------*/
170 void
171 usbd_copy_in(struct usb_page_cache *cache, usb_frlength_t offset,
172     const void *ptr, usb_frlength_t len)
173 {
174 	struct usb_page_search buf_res;
175 
176 	while (len != 0) {
177 
178 		usbd_get_page(cache, offset, &buf_res);
179 
180 		if (buf_res.length > len) {
181 			buf_res.length = len;
182 		}
183 		memcpy(buf_res.buffer, ptr, buf_res.length);
184 
185 		offset += buf_res.length;
186 		len -= buf_res.length;
187 		ptr = USB_ADD_BYTES(ptr, buf_res.length);
188 	}
189 }
190 
191 /*------------------------------------------------------------------------*
192  *  usbd_copy_in_user - copy directly to DMA-able memory from userland
193  *
194  * Return values:
195  *    0: Success
196  * Else: Failure
197  *------------------------------------------------------------------------*/
198 #if USB_HAVE_USER_IO
199 int
200 usbd_copy_in_user(struct usb_page_cache *cache, usb_frlength_t offset,
201     const void *ptr, usb_frlength_t len)
202 {
203 	struct usb_page_search buf_res;
204 	int error;
205 
206 	while (len != 0) {
207 
208 		usbd_get_page(cache, offset, &buf_res);
209 
210 		if (buf_res.length > len) {
211 			buf_res.length = len;
212 		}
213 		error = copyin(ptr, buf_res.buffer, buf_res.length);
214 		if (error)
215 			return (error);
216 
217 		offset += buf_res.length;
218 		len -= buf_res.length;
219 		ptr = USB_ADD_BYTES(ptr, buf_res.length);
220 	}
221 	return (0);			/* success */
222 }
223 #endif
224 
225 /*------------------------------------------------------------------------*
226  *  usbd_m_copy_in - copy a mbuf chain directly into DMA-able memory
227  *------------------------------------------------------------------------*/
228 #if USB_HAVE_MBUF
229 struct usb_m_copy_in_arg {
230 	struct usb_page_cache *cache;
231 	usb_frlength_t dst_offset;
232 };
233 
234 static int
235 usbd_m_copy_in_cb(void *arg, void *src, uint32_t count)
236 {
237 	struct usb_m_copy_in_arg *ua = arg;
238 
239 	usbd_copy_in(ua->cache, ua->dst_offset, src, count);
240 	ua->dst_offset += count;
241 	return (0);
242 }
243 
244 void
245 usbd_m_copy_in(struct usb_page_cache *cache, usb_frlength_t dst_offset,
246     struct mbuf *m, usb_size_t src_offset, usb_frlength_t src_len)
247 {
248 	struct usb_m_copy_in_arg arg = {cache, dst_offset};
249 	(void) m_apply(m, src_offset, src_len, &usbd_m_copy_in_cb, &arg);
250 }
251 #endif
252 
253 /*------------------------------------------------------------------------*
254  *  usb_uiomove - factored out code
255  *------------------------------------------------------------------------*/
256 #if USB_HAVE_USER_IO
257 int
258 usb_uiomove(struct usb_page_cache *pc, struct uio *uio,
259     usb_frlength_t pc_offset, usb_frlength_t len)
260 {
261 	struct usb_page_search res;
262 	int error = 0;
263 
264 	while (len != 0) {
265 
266 		usbd_get_page(pc, pc_offset, &res);
267 
268 		if (res.length > len) {
269 			res.length = len;
270 		}
271 		/*
272 		 * "uiomove()" can sleep so one needs to make a wrapper,
273 		 * exiting the mutex and checking things
274 		 */
275 		error = uiomove(res.buffer, res.length, uio);
276 
277 		if (error) {
278 			break;
279 		}
280 		pc_offset += res.length;
281 		len -= res.length;
282 	}
283 	return (error);
284 }
285 #endif
286 
287 /*------------------------------------------------------------------------*
288  *  usbd_copy_out - copy directly from DMA-able memory
289  *------------------------------------------------------------------------*/
290 void
291 usbd_copy_out(struct usb_page_cache *cache, usb_frlength_t offset,
292     void *ptr, usb_frlength_t len)
293 {
294 	struct usb_page_search res;
295 
296 	while (len != 0) {
297 
298 		usbd_get_page(cache, offset, &res);
299 
300 		if (res.length > len) {
301 			res.length = len;
302 		}
303 		memcpy(ptr, res.buffer, res.length);
304 
305 		offset += res.length;
306 		len -= res.length;
307 		ptr = USB_ADD_BYTES(ptr, res.length);
308 	}
309 }
310 
311 /*------------------------------------------------------------------------*
312  *  usbd_copy_out_user - copy directly from DMA-able memory to userland
313  *
314  * Return values:
315  *    0: Success
316  * Else: Failure
317  *------------------------------------------------------------------------*/
318 #if USB_HAVE_USER_IO
319 int
320 usbd_copy_out_user(struct usb_page_cache *cache, usb_frlength_t offset,
321     void *ptr, usb_frlength_t len)
322 {
323 	struct usb_page_search res;
324 	int error;
325 
326 	while (len != 0) {
327 
328 		usbd_get_page(cache, offset, &res);
329 
330 		if (res.length > len) {
331 			res.length = len;
332 		}
333 		error = copyout(res.buffer, ptr, res.length);
334 		if (error)
335 			return (error);
336 
337 		offset += res.length;
338 		len -= res.length;
339 		ptr = USB_ADD_BYTES(ptr, res.length);
340 	}
341 	return (0);			/* success */
342 }
343 #endif
344 
345 /*------------------------------------------------------------------------*
346  *  usbd_frame_zero - zero DMA-able memory
347  *------------------------------------------------------------------------*/
348 void
349 usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset,
350     usb_frlength_t len)
351 {
352 	struct usb_page_search res;
353 
354 	while (len != 0) {
355 
356 		usbd_get_page(cache, offset, &res);
357 
358 		if (res.length > len) {
359 			res.length = len;
360 		}
361 		memset(res.buffer, 0, res.length);
362 
363 		offset += res.length;
364 		len -= res.length;
365 	}
366 }
367 
368 #if USB_HAVE_BUSDMA
369 
370 /*------------------------------------------------------------------------*
371  *	usb_dma_lock_cb - dummy callback
372  *------------------------------------------------------------------------*/
373 static void
374 usb_dma_lock_cb(void *arg, bus_dma_lock_op_t op)
375 {
376 	/* we use "mtx_owned()" instead of this function */
377 }
378 
379 /*------------------------------------------------------------------------*
380  *	usb_dma_tag_create - allocate a DMA tag
381  *
382  * NOTE: If the "align" parameter has a value of 1 the DMA-tag will
383  * allow multi-segment mappings. Else all mappings are single-segment.
384  *------------------------------------------------------------------------*/
385 static void
386 usb_dma_tag_create(struct usb_dma_tag *udt,
387     usb_size_t size, usb_size_t align)
388 {
389 	bus_dma_tag_t tag;
390 
391 	if (bus_dma_tag_create
392 	    ( /* parent    */ udt->tag_parent->tag,
393 	     /* alignment */ align,
394 	     /* boundary  */ 0,
395 	     /* lowaddr   */ (2ULL << (udt->tag_parent->dma_bits - 1)) - 1,
396 	     /* highaddr  */ BUS_SPACE_MAXADDR,
397 	     /* filter    */ NULL,
398 	     /* filterarg */ NULL,
399 	     /* maxsize   */ size,
400 	     /* nsegments */ (align == 1 && size > 1) ?
401 	    (2 + (size / USB_PAGE_SIZE)) : 1,
402 	     /* maxsegsz  */ (align == 1 && size > USB_PAGE_SIZE) ?
403 	    USB_PAGE_SIZE : size,
404 	     /* flags     */ BUS_DMA_KEEP_PG_OFFSET,
405 	     /* lockfn    */ &usb_dma_lock_cb,
406 	     /* lockarg   */ NULL,
407 	    &tag)) {
408 		tag = NULL;
409 	}
410 	udt->tag = tag;
411 }
412 
413 /*------------------------------------------------------------------------*
414  *	usb_dma_tag_free - free a DMA tag
415  *------------------------------------------------------------------------*/
416 static void
417 usb_dma_tag_destroy(struct usb_dma_tag *udt)
418 {
419 	bus_dma_tag_destroy(udt->tag);
420 }
421 
422 /*------------------------------------------------------------------------*
423  *	usb_pc_alloc_mem_cb - BUS-DMA callback function
424  *------------------------------------------------------------------------*/
425 static void
426 usb_pc_alloc_mem_cb(void *arg, bus_dma_segment_t *segs,
427     int nseg, int error)
428 {
429 	usb_pc_common_mem_cb(arg, segs, nseg, error, 0);
430 }
431 
432 /*------------------------------------------------------------------------*
433  *	usb_pc_load_mem_cb - BUS-DMA callback function
434  *------------------------------------------------------------------------*/
435 static void
436 usb_pc_load_mem_cb(void *arg, bus_dma_segment_t *segs,
437     int nseg, int error)
438 {
439 	usb_pc_common_mem_cb(arg, segs, nseg, error, 1);
440 }
441 
442 /*------------------------------------------------------------------------*
443  *	usb_pc_common_mem_cb - BUS-DMA callback function
444  *------------------------------------------------------------------------*/
445 static void
446 usb_pc_common_mem_cb(void *arg, bus_dma_segment_t *segs,
447     int nseg, int error, uint8_t isload)
448 {
449 	struct usb_dma_parent_tag *uptag;
450 	struct usb_page_cache *pc;
451 	struct usb_page *pg;
452 	usb_size_t rem;
453 	bus_size_t off;
454 	uint8_t owned;
455 
456 	pc = arg;
457 	uptag = pc->tag_parent;
458 
459 	/*
460 	 * XXX There is sometimes recursive locking here.
461 	 * XXX We should try to find a better solution.
462 	 * XXX Until further the "owned" variable does
463 	 * XXX the trick.
464 	 */
465 
466 	if (error) {
467 		goto done;
468 	}
469 
470 	off = 0;
471 	pg = pc->page_start;
472 	pg->physaddr = rounddown2(segs->ds_addr, USB_PAGE_SIZE);
473 	rem = segs->ds_addr & (USB_PAGE_SIZE - 1);
474 	pc->page_offset_buf = rem;
475 	pc->page_offset_end += rem;
476 #ifdef USB_DEBUG
477 	if (nseg > 1) {
478 		int x;
479 
480 		for (x = 0; x != nseg - 1; x++) {
481 			if (((segs[x].ds_addr + segs[x].ds_len) & (USB_PAGE_SIZE - 1)) ==
482 			    ((segs[x + 1].ds_addr & (USB_PAGE_SIZE - 1))))
483 				continue;
484 			/*
485 			 * This check verifies there is no page offset
486 			 * hole between any of the segments. See the
487 			 * BUS_DMA_KEEP_PG_OFFSET flag.
488 			 */
489 			DPRINTFN(0, "Page offset was not preserved\n");
490 			error = 1;
491 			goto done;
492 		}
493 	}
494 #endif
495 	while (pc->ismultiseg) {
496 		off += USB_PAGE_SIZE;
497 		if (off >= (segs->ds_len + rem)) {
498 			/* page crossing */
499 			nseg--;
500 			segs++;
501 			off = 0;
502 			rem = 0;
503 			if (nseg == 0)
504 				break;
505 		}
506 		pg++;
507 		pg->physaddr = rounddown2(segs->ds_addr + off, USB_PAGE_SIZE);
508 	}
509 
510 done:
511 	owned = mtx_owned(uptag->mtx);
512 	if (!owned)
513 		USB_MTX_LOCK(uptag->mtx);
514 
515 	uptag->dma_error = (error ? 1 : 0);
516 	if (isload) {
517 		(uptag->func) (uptag);
518 	} else {
519 		cv_broadcast(uptag->cv);
520 	}
521 	if (!owned)
522 		USB_MTX_UNLOCK(uptag->mtx);
523 }
524 
525 /*------------------------------------------------------------------------*
526  *	usb_pc_alloc_mem - allocate DMA'able memory
527  *
528  * Returns:
529  *    0: Success
530  * Else: Failure
531  *------------------------------------------------------------------------*/
532 uint8_t
533 usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg,
534     usb_size_t size, usb_size_t align)
535 {
536 	struct usb_dma_parent_tag *uptag;
537 	struct usb_dma_tag *utag;
538 	bus_dmamap_t map;
539 	void *ptr;
540 	int err;
541 
542 	uptag = pc->tag_parent;
543 
544 	if (align != 1) {
545 		/*
546 	         * The alignment must be greater or equal to the
547 	         * "size" else the object can be split between two
548 	         * memory pages and we get a problem!
549 	         */
550 		while (align < size) {
551 			align *= 2;
552 			if (align == 0) {
553 				goto error;
554 			}
555 		}
556 #if 1
557 		/*
558 		 * XXX BUS-DMA workaround - FIXME later:
559 		 *
560 		 * We assume that that the aligment at this point of
561 		 * the code is greater than or equal to the size and
562 		 * less than two times the size, so that if we double
563 		 * the size, the size will be greater than the
564 		 * alignment.
565 		 *
566 		 * The bus-dma system has a check for "alignment"
567 		 * being less than "size". If that check fails we end
568 		 * up using contigmalloc which is page based even for
569 		 * small allocations. Try to avoid that to save
570 		 * memory, hence we sometimes to a large number of
571 		 * small allocations!
572 		 */
573 		if (size <= (USB_PAGE_SIZE / 2)) {
574 			size *= 2;
575 		}
576 #endif
577 	}
578 	/* get the correct DMA tag */
579 	utag = usb_dma_tag_find(uptag, size, align);
580 	if (utag == NULL) {
581 		goto error;
582 	}
583 	/* allocate memory */
584 	if (bus_dmamem_alloc(
585 	    utag->tag, &ptr, (BUS_DMA_WAITOK | BUS_DMA_COHERENT), &map)) {
586 		goto error;
587 	}
588 	/* setup page cache */
589 	pc->buffer = ptr;
590 	pc->page_start = pg;
591 	pc->page_offset_buf = 0;
592 	pc->page_offset_end = size;
593 	pc->map = map;
594 	pc->tag = utag->tag;
595 	pc->ismultiseg = (align == 1);
596 
597 	USB_MTX_LOCK(uptag->mtx);
598 
599 	/* load memory into DMA */
600 	err = bus_dmamap_load(
601 	    utag->tag, map, ptr, size, &usb_pc_alloc_mem_cb,
602 	    pc, (BUS_DMA_WAITOK | BUS_DMA_COHERENT));
603 
604 	if (err == EINPROGRESS) {
605 		cv_wait(uptag->cv, uptag->mtx);
606 		err = 0;
607 	}
608 	USB_MTX_UNLOCK(uptag->mtx);
609 
610 	if (err || uptag->dma_error) {
611 		bus_dmamem_free(utag->tag, ptr, map);
612 		goto error;
613 	}
614 	memset(ptr, 0, size);
615 
616 	usb_pc_cpu_flush(pc);
617 
618 	return (0);
619 
620 error:
621 	/* reset most of the page cache */
622 	pc->buffer = NULL;
623 	pc->page_start = NULL;
624 	pc->page_offset_buf = 0;
625 	pc->page_offset_end = 0;
626 	pc->map = NULL;
627 	pc->tag = NULL;
628 	return (1);
629 }
630 
631 /*------------------------------------------------------------------------*
632  *	usb_pc_free_mem - free DMA memory
633  *
634  * This function is NULL safe.
635  *------------------------------------------------------------------------*/
636 void
637 usb_pc_free_mem(struct usb_page_cache *pc)
638 {
639 	if (pc && pc->buffer) {
640 
641 		bus_dmamap_unload(pc->tag, pc->map);
642 
643 		bus_dmamem_free(pc->tag, pc->buffer, pc->map);
644 
645 		pc->buffer = NULL;
646 	}
647 }
648 
649 /*------------------------------------------------------------------------*
650  *	usb_pc_load_mem - load virtual memory into DMA
651  *
652  * Return values:
653  * 0: Success
654  * Else: Error
655  *------------------------------------------------------------------------*/
656 uint8_t
657 usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t sync)
658 {
659 	/* setup page cache */
660 	pc->page_offset_buf = 0;
661 	pc->page_offset_end = size;
662 	pc->ismultiseg = 1;
663 
664 	USB_MTX_ASSERT(pc->tag_parent->mtx, MA_OWNED);
665 
666 	if (size > 0) {
667 		if (sync) {
668 			struct usb_dma_parent_tag *uptag;
669 			int err;
670 
671 			uptag = pc->tag_parent;
672 
673 			/*
674 			 * We have to unload the previous loaded DMA
675 			 * pages before trying to load a new one!
676 			 */
677 			bus_dmamap_unload(pc->tag, pc->map);
678 
679 			/*
680 			 * Try to load memory into DMA.
681 			 */
682 			err = bus_dmamap_load(
683 			    pc->tag, pc->map, pc->buffer, size,
684 			    &usb_pc_alloc_mem_cb, pc, BUS_DMA_WAITOK);
685 			if (err == EINPROGRESS) {
686 				cv_wait(uptag->cv, uptag->mtx);
687 				err = 0;
688 			}
689 			if (err || uptag->dma_error) {
690 				return (1);
691 			}
692 		} else {
693 
694 			/*
695 			 * We have to unload the previous loaded DMA
696 			 * pages before trying to load a new one!
697 			 */
698 			bus_dmamap_unload(pc->tag, pc->map);
699 
700 			/*
701 			 * Try to load memory into DMA. The callback
702 			 * will be called in all cases:
703 			 */
704 			if (bus_dmamap_load(
705 			    pc->tag, pc->map, pc->buffer, size,
706 			    &usb_pc_load_mem_cb, pc, BUS_DMA_WAITOK)) {
707 			}
708 		}
709 	} else {
710 		if (!sync) {
711 			/*
712 			 * Call callback so that refcount is decremented
713 			 * properly:
714 			 */
715 			pc->tag_parent->dma_error = 0;
716 			(pc->tag_parent->func) (pc->tag_parent);
717 		}
718 	}
719 	return (0);
720 }
721 
722 /*------------------------------------------------------------------------*
723  *	usb_pc_cpu_invalidate - invalidate CPU cache
724  *------------------------------------------------------------------------*/
725 void
726 usb_pc_cpu_invalidate(struct usb_page_cache *pc)
727 {
728 	if (pc->page_offset_end == pc->page_offset_buf) {
729 		/* nothing has been loaded into this page cache! */
730 		return;
731 	}
732 
733 	/*
734 	 * TODO: We currently do XXX_POSTREAD and XXX_PREREAD at the
735 	 * same time, but in the future we should try to isolate the
736 	 * different cases to optimise the code. --HPS
737 	 */
738 	bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_POSTREAD);
739 	bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREREAD);
740 }
741 
742 /*------------------------------------------------------------------------*
743  *	usb_pc_cpu_flush - flush CPU cache
744  *------------------------------------------------------------------------*/
745 void
746 usb_pc_cpu_flush(struct usb_page_cache *pc)
747 {
748 	if (pc->page_offset_end == pc->page_offset_buf) {
749 		/* nothing has been loaded into this page cache! */
750 		return;
751 	}
752 	bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREWRITE);
753 }
754 
755 /*------------------------------------------------------------------------*
756  *	usb_pc_dmamap_create - create a DMA map
757  *
758  * Returns:
759  *    0: Success
760  * Else: Failure
761  *------------------------------------------------------------------------*/
762 uint8_t
763 usb_pc_dmamap_create(struct usb_page_cache *pc, usb_size_t size)
764 {
765 	struct usb_xfer_root *info;
766 	struct usb_dma_tag *utag;
767 
768 	/* get info */
769 	info = USB_DMATAG_TO_XROOT(pc->tag_parent);
770 
771 	/* sanity check */
772 	if (info == NULL) {
773 		goto error;
774 	}
775 	utag = usb_dma_tag_find(pc->tag_parent, size, 1);
776 	if (utag == NULL) {
777 		goto error;
778 	}
779 	/* create DMA map */
780 	if (bus_dmamap_create(utag->tag, 0, &pc->map)) {
781 		goto error;
782 	}
783 	pc->tag = utag->tag;
784 	return 0;			/* success */
785 
786 error:
787 	pc->map = NULL;
788 	pc->tag = NULL;
789 	return 1;			/* failure */
790 }
791 
792 /*------------------------------------------------------------------------*
793  *	usb_pc_dmamap_destroy
794  *
795  * This function is NULL safe.
796  *------------------------------------------------------------------------*/
797 void
798 usb_pc_dmamap_destroy(struct usb_page_cache *pc)
799 {
800 	if (pc && pc->tag) {
801 		bus_dmamap_destroy(pc->tag, pc->map);
802 		pc->tag = NULL;
803 		pc->map = NULL;
804 	}
805 }
806 
807 /*------------------------------------------------------------------------*
808  *	usb_dma_tag_find - factored out code
809  *------------------------------------------------------------------------*/
810 struct usb_dma_tag *
811 usb_dma_tag_find(struct usb_dma_parent_tag *udpt,
812     usb_size_t size, usb_size_t align)
813 {
814 	struct usb_dma_tag *udt;
815 	uint8_t nudt;
816 
817 	USB_ASSERT(align > 0, ("Invalid parameter align = 0\n"));
818 	USB_ASSERT(size > 0, ("Invalid parameter size = 0\n"));
819 
820 	udt = udpt->utag_first;
821 	nudt = udpt->utag_max;
822 
823 	while (nudt--) {
824 
825 		if (udt->align == 0) {
826 			usb_dma_tag_create(udt, size, align);
827 			if (udt->tag == NULL) {
828 				return (NULL);
829 			}
830 			udt->align = align;
831 			udt->size = size;
832 			return (udt);
833 		}
834 		if ((udt->align == align) && (udt->size == size)) {
835 			return (udt);
836 		}
837 		udt++;
838 	}
839 	return (NULL);
840 }
841 
842 /*------------------------------------------------------------------------*
843  *	usb_dma_tag_setup - initialise USB DMA tags
844  *------------------------------------------------------------------------*/
845 void
846 usb_dma_tag_setup(struct usb_dma_parent_tag *udpt,
847     struct usb_dma_tag *udt, bus_dma_tag_t dmat,
848     struct mtx *mtx, usb_dma_callback_t *func,
849     uint8_t ndmabits, uint8_t nudt)
850 {
851 	memset(udpt, 0, sizeof(*udpt));
852 
853 	/* sanity checking */
854 	if ((nudt == 0) ||
855 	    (ndmabits == 0) ||
856 	    (mtx == NULL)) {
857 		/* something is corrupt */
858 		return;
859 	}
860 	/* initialise condition variable */
861 	cv_init(udpt->cv, "USB DMA CV");
862 
863 	/* store some information */
864 	udpt->mtx = mtx;
865 	udpt->func = func;
866 	udpt->tag = dmat;
867 	udpt->utag_first = udt;
868 	udpt->utag_max = nudt;
869 	udpt->dma_bits = ndmabits;
870 
871 	while (nudt--) {
872 		memset(udt, 0, sizeof(*udt));
873 		udt->tag_parent = udpt;
874 		udt++;
875 	}
876 }
877 
878 /*------------------------------------------------------------------------*
879  *	usb_bus_tag_unsetup - factored out code
880  *------------------------------------------------------------------------*/
881 void
882 usb_dma_tag_unsetup(struct usb_dma_parent_tag *udpt)
883 {
884 	struct usb_dma_tag *udt;
885 	uint8_t nudt;
886 
887 	udt = udpt->utag_first;
888 	nudt = udpt->utag_max;
889 
890 	while (nudt--) {
891 
892 		if (udt->align) {
893 			/* destroy the USB DMA tag */
894 			usb_dma_tag_destroy(udt);
895 			udt->align = 0;
896 		}
897 		udt++;
898 	}
899 
900 	if (udpt->utag_max) {
901 		/* destroy the condition variable */
902 		cv_destroy(udpt->cv);
903 	}
904 }
905 
906 /*------------------------------------------------------------------------*
907  *	usb_bdma_work_loop
908  *
909  * This function handles loading of virtual buffers into DMA and is
910  * only called when "dma_refcount" is zero.
911  *------------------------------------------------------------------------*/
912 void
913 usb_bdma_work_loop(struct usb_xfer_queue *pq)
914 {
915 	struct usb_xfer_root *info;
916 	struct usb_xfer *xfer;
917 	usb_frcount_t nframes;
918 
919 	xfer = pq->curr;
920 	info = xfer->xroot;
921 
922 	USB_MTX_ASSERT(info->xfer_mtx, MA_OWNED);
923 
924 	if (xfer->error) {
925 		/* some error happened */
926 		USB_BUS_LOCK(info->bus);
927 		usbd_transfer_done(xfer, 0);
928 		USB_BUS_UNLOCK(info->bus);
929 		return;
930 	}
931 	if (!xfer->flags_int.bdma_setup) {
932 		struct usb_page *pg;
933 		usb_frlength_t frlength_0;
934 		uint8_t isread;
935 
936 		xfer->flags_int.bdma_setup = 1;
937 
938 		/* reset BUS-DMA load state */
939 
940 		info->dma_error = 0;
941 
942 		if (xfer->flags_int.isochronous_xfr) {
943 			/* only one frame buffer */
944 			nframes = 1;
945 			frlength_0 = xfer->sumlen;
946 		} else {
947 			/* can be multiple frame buffers */
948 			nframes = xfer->nframes;
949 			frlength_0 = xfer->frlengths[0];
950 		}
951 
952 		/*
953 		 * Set DMA direction first. This is needed to
954 		 * select the correct cache invalidate and cache
955 		 * flush operations.
956 		 */
957 		isread = USB_GET_DATA_ISREAD(xfer);
958 		pg = xfer->dma_page_ptr;
959 
960 		if (xfer->flags_int.control_xfr &&
961 		    xfer->flags_int.control_hdr) {
962 			/* special case */
963 			if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
964 				/* The device controller writes to memory */
965 				xfer->frbuffers[0].isread = 1;
966 			} else {
967 				/* The host controller reads from memory */
968 				xfer->frbuffers[0].isread = 0;
969 			}
970 		} else {
971 			/* default case */
972 			xfer->frbuffers[0].isread = isread;
973 		}
974 
975 		/*
976 		 * Setup the "page_start" pointer which points to an array of
977 		 * USB pages where information about the physical address of a
978 		 * page will be stored. Also initialise the "isread" field of
979 		 * the USB page caches.
980 		 */
981 		xfer->frbuffers[0].page_start = pg;
982 
983 		info->dma_nframes = nframes;
984 		info->dma_currframe = 0;
985 		info->dma_frlength_0 = frlength_0;
986 
987 		pg += (frlength_0 / USB_PAGE_SIZE);
988 		pg += 2;
989 
990 		while (--nframes > 0) {
991 			xfer->frbuffers[nframes].isread = isread;
992 			xfer->frbuffers[nframes].page_start = pg;
993 
994 			pg += (xfer->frlengths[nframes] / USB_PAGE_SIZE);
995 			pg += 2;
996 		}
997 
998 	}
999 	if (info->dma_error) {
1000 		USB_BUS_LOCK(info->bus);
1001 		usbd_transfer_done(xfer, USB_ERR_DMA_LOAD_FAILED);
1002 		USB_BUS_UNLOCK(info->bus);
1003 		return;
1004 	}
1005 	if (info->dma_currframe != info->dma_nframes) {
1006 
1007 		if (info->dma_currframe == 0) {
1008 			/* special case */
1009 			usb_pc_load_mem(xfer->frbuffers,
1010 			    info->dma_frlength_0, 0);
1011 		} else {
1012 			/* default case */
1013 			nframes = info->dma_currframe;
1014 			usb_pc_load_mem(xfer->frbuffers + nframes,
1015 			    xfer->frlengths[nframes], 0);
1016 		}
1017 
1018 		/* advance frame index */
1019 		info->dma_currframe++;
1020 
1021 		return;
1022 	}
1023 	/* go ahead */
1024 	usb_bdma_pre_sync(xfer);
1025 
1026 	/* start loading next USB transfer, if any */
1027 	usb_command_wrapper(pq, NULL);
1028 
1029 	/* finally start the hardware */
1030 	usbd_pipe_enter(xfer);
1031 }
1032 
1033 /*------------------------------------------------------------------------*
1034  *	usb_bdma_done_event
1035  *
1036  * This function is called when the BUS-DMA has loaded virtual memory
1037  * into DMA, if any.
1038  *------------------------------------------------------------------------*/
1039 void
1040 usb_bdma_done_event(struct usb_dma_parent_tag *udpt)
1041 {
1042 	struct usb_xfer_root *info;
1043 
1044 	info = USB_DMATAG_TO_XROOT(udpt);
1045 
1046 	USB_MTX_ASSERT(info->xfer_mtx, MA_OWNED);
1047 
1048 	/* copy error */
1049 	info->dma_error = udpt->dma_error;
1050 
1051 	/* enter workloop again */
1052 	usb_command_wrapper(&info->dma_q,
1053 	    info->dma_q.curr);
1054 }
1055 
1056 /*------------------------------------------------------------------------*
1057  *	usb_bdma_pre_sync
1058  *
1059  * This function handles DMA synchronisation that must be done before
1060  * an USB transfer is started.
1061  *------------------------------------------------------------------------*/
1062 void
1063 usb_bdma_pre_sync(struct usb_xfer *xfer)
1064 {
1065 	struct usb_page_cache *pc;
1066 	usb_frcount_t nframes;
1067 
1068 	if (xfer->flags_int.isochronous_xfr) {
1069 		/* only one frame buffer */
1070 		nframes = 1;
1071 	} else {
1072 		/* can be multiple frame buffers */
1073 		nframes = xfer->nframes;
1074 	}
1075 
1076 	pc = xfer->frbuffers;
1077 
1078 	while (nframes--) {
1079 
1080 		if (pc->isread) {
1081 			usb_pc_cpu_invalidate(pc);
1082 		} else {
1083 			usb_pc_cpu_flush(pc);
1084 		}
1085 		pc++;
1086 	}
1087 }
1088 
1089 /*------------------------------------------------------------------------*
1090  *	usb_bdma_post_sync
1091  *
1092  * This function handles DMA synchronisation that must be done after
1093  * an USB transfer is complete.
1094  *------------------------------------------------------------------------*/
1095 void
1096 usb_bdma_post_sync(struct usb_xfer *xfer)
1097 {
1098 	struct usb_page_cache *pc;
1099 	usb_frcount_t nframes;
1100 
1101 	if (xfer->flags_int.isochronous_xfr) {
1102 		/* only one frame buffer */
1103 		nframes = 1;
1104 	} else {
1105 		/* can be multiple frame buffers */
1106 		nframes = xfer->nframes;
1107 	}
1108 
1109 	pc = xfer->frbuffers;
1110 
1111 	while (nframes--) {
1112 		if (pc->isread) {
1113 			usb_pc_cpu_invalidate(pc);
1114 		}
1115 		pc++;
1116 	}
1117 }
1118 
1119 #endif
1120