1 /* $FreeBSD$ */ 2 /*- 3 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #ifdef USB_GLOBAL_INCLUDE_FILE 28 #include USB_GLOBAL_INCLUDE_FILE 29 #else 30 #include <sys/stdint.h> 31 #include <sys/stddef.h> 32 #include <sys/param.h> 33 #include <sys/queue.h> 34 #include <sys/types.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/bus.h> 38 #include <sys/module.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/condvar.h> 42 #include <sys/sysctl.h> 43 #include <sys/sx.h> 44 #include <sys/unistd.h> 45 #include <sys/callout.h> 46 #include <sys/malloc.h> 47 #include <sys/priv.h> 48 49 #include <dev/usb/usb.h> 50 #include <dev/usb/usbdi.h> 51 #include <dev/usb/usbdi_util.h> 52 53 #define USB_DEBUG_VAR usb_debug 54 55 #include <dev/usb/usb_core.h> 56 #include <dev/usb/usb_busdma.h> 57 #include <dev/usb/usb_process.h> 58 #include <dev/usb/usb_transfer.h> 59 #include <dev/usb/usb_device.h> 60 #include <dev/usb/usb_util.h> 61 #include <dev/usb/usb_debug.h> 62 63 #include <dev/usb/usb_controller.h> 64 #include <dev/usb/usb_bus.h> 65 #endif /* USB_GLOBAL_INCLUDE_FILE */ 66 67 #if USB_HAVE_BUSDMA 68 static void usb_dma_tag_create(struct usb_dma_tag *, usb_size_t, usb_size_t); 69 static void usb_dma_tag_destroy(struct usb_dma_tag *); 70 static void usb_dma_lock_cb(void *, bus_dma_lock_op_t); 71 static void usb_pc_alloc_mem_cb(void *, bus_dma_segment_t *, int, int); 72 static void usb_pc_load_mem_cb(void *, bus_dma_segment_t *, int, int); 73 static void usb_pc_common_mem_cb(void *, bus_dma_segment_t *, int, int, 74 uint8_t); 75 #endif 76 77 /*------------------------------------------------------------------------* 78 * usbd_get_page - lookup DMA-able memory for the given offset 79 * 80 * NOTE: Only call this function when the "page_cache" structure has 81 * been properly initialized ! 82 *------------------------------------------------------------------------*/ 83 void 84 usbd_get_page(struct usb_page_cache *pc, usb_frlength_t offset, 85 struct usb_page_search *res) 86 { 87 #if USB_HAVE_BUSDMA 88 struct usb_page *page; 89 90 if (pc->page_start) { 91 92 /* Case 1 - something has been loaded into DMA */ 93 94 if (pc->buffer) { 95 96 /* Case 1a - Kernel Virtual Address */ 97 98 res->buffer = USB_ADD_BYTES(pc->buffer, offset); 99 } 100 offset += pc->page_offset_buf; 101 102 /* compute destination page */ 103 104 page = pc->page_start; 105 106 if (pc->ismultiseg) { 107 108 page += (offset / USB_PAGE_SIZE); 109 110 offset %= USB_PAGE_SIZE; 111 112 res->length = USB_PAGE_SIZE - offset; 113 res->physaddr = page->physaddr + offset; 114 } else { 115 res->length = (usb_size_t)-1; 116 res->physaddr = page->physaddr + offset; 117 } 118 if (!pc->buffer) { 119 120 /* Case 1b - Non Kernel Virtual Address */ 121 122 res->buffer = USB_ADD_BYTES(page->buffer, offset); 123 } 124 return; 125 } 126 #endif 127 /* Case 2 - Plain PIO */ 128 129 res->buffer = USB_ADD_BYTES(pc->buffer, offset); 130 res->length = (usb_size_t)-1; 131 #if USB_HAVE_BUSDMA 132 res->physaddr = 0; 133 #endif 134 } 135 136 /*------------------------------------------------------------------------* 137 * usbd_copy_in - copy directly to DMA-able memory 138 *------------------------------------------------------------------------*/ 139 void 140 usbd_copy_in(struct usb_page_cache *cache, usb_frlength_t offset, 141 const void *ptr, usb_frlength_t len) 142 { 143 struct usb_page_search buf_res; 144 145 while (len != 0) { 146 147 usbd_get_page(cache, offset, &buf_res); 148 149 if (buf_res.length > len) { 150 buf_res.length = len; 151 } 152 memcpy(buf_res.buffer, ptr, buf_res.length); 153 154 offset += buf_res.length; 155 len -= buf_res.length; 156 ptr = USB_ADD_BYTES(ptr, buf_res.length); 157 } 158 } 159 160 /*------------------------------------------------------------------------* 161 * usbd_copy_in_user - copy directly to DMA-able memory from userland 162 * 163 * Return values: 164 * 0: Success 165 * Else: Failure 166 *------------------------------------------------------------------------*/ 167 #if USB_HAVE_USER_IO 168 int 169 usbd_copy_in_user(struct usb_page_cache *cache, usb_frlength_t offset, 170 const void *ptr, usb_frlength_t len) 171 { 172 struct usb_page_search buf_res; 173 int error; 174 175 while (len != 0) { 176 177 usbd_get_page(cache, offset, &buf_res); 178 179 if (buf_res.length > len) { 180 buf_res.length = len; 181 } 182 error = copyin(ptr, buf_res.buffer, buf_res.length); 183 if (error) 184 return (error); 185 186 offset += buf_res.length; 187 len -= buf_res.length; 188 ptr = USB_ADD_BYTES(ptr, buf_res.length); 189 } 190 return (0); /* success */ 191 } 192 #endif 193 194 /*------------------------------------------------------------------------* 195 * usbd_m_copy_in - copy a mbuf chain directly into DMA-able memory 196 *------------------------------------------------------------------------*/ 197 #if USB_HAVE_MBUF 198 struct usb_m_copy_in_arg { 199 struct usb_page_cache *cache; 200 usb_frlength_t dst_offset; 201 }; 202 203 static int 204 usbd_m_copy_in_cb(void *arg, void *src, uint32_t count) 205 { 206 register struct usb_m_copy_in_arg *ua = arg; 207 208 usbd_copy_in(ua->cache, ua->dst_offset, src, count); 209 ua->dst_offset += count; 210 return (0); 211 } 212 213 void 214 usbd_m_copy_in(struct usb_page_cache *cache, usb_frlength_t dst_offset, 215 struct mbuf *m, usb_size_t src_offset, usb_frlength_t src_len) 216 { 217 struct usb_m_copy_in_arg arg = {cache, dst_offset}; 218 (void) m_apply(m, src_offset, src_len, &usbd_m_copy_in_cb, &arg); 219 } 220 #endif 221 222 /*------------------------------------------------------------------------* 223 * usb_uiomove - factored out code 224 *------------------------------------------------------------------------*/ 225 #if USB_HAVE_USER_IO 226 int 227 usb_uiomove(struct usb_page_cache *pc, struct uio *uio, 228 usb_frlength_t pc_offset, usb_frlength_t len) 229 { 230 struct usb_page_search res; 231 int error = 0; 232 233 while (len != 0) { 234 235 usbd_get_page(pc, pc_offset, &res); 236 237 if (res.length > len) { 238 res.length = len; 239 } 240 /* 241 * "uiomove()" can sleep so one needs to make a wrapper, 242 * exiting the mutex and checking things 243 */ 244 error = uiomove(res.buffer, res.length, uio); 245 246 if (error) { 247 break; 248 } 249 pc_offset += res.length; 250 len -= res.length; 251 } 252 return (error); 253 } 254 #endif 255 256 /*------------------------------------------------------------------------* 257 * usbd_copy_out - copy directly from DMA-able memory 258 *------------------------------------------------------------------------*/ 259 void 260 usbd_copy_out(struct usb_page_cache *cache, usb_frlength_t offset, 261 void *ptr, usb_frlength_t len) 262 { 263 struct usb_page_search res; 264 265 while (len != 0) { 266 267 usbd_get_page(cache, offset, &res); 268 269 if (res.length > len) { 270 res.length = len; 271 } 272 memcpy(ptr, res.buffer, res.length); 273 274 offset += res.length; 275 len -= res.length; 276 ptr = USB_ADD_BYTES(ptr, res.length); 277 } 278 } 279 280 /*------------------------------------------------------------------------* 281 * usbd_copy_out_user - copy directly from DMA-able memory to userland 282 * 283 * Return values: 284 * 0: Success 285 * Else: Failure 286 *------------------------------------------------------------------------*/ 287 #if USB_HAVE_USER_IO 288 int 289 usbd_copy_out_user(struct usb_page_cache *cache, usb_frlength_t offset, 290 void *ptr, usb_frlength_t len) 291 { 292 struct usb_page_search res; 293 int error; 294 295 while (len != 0) { 296 297 usbd_get_page(cache, offset, &res); 298 299 if (res.length > len) { 300 res.length = len; 301 } 302 error = copyout(res.buffer, ptr, res.length); 303 if (error) 304 return (error); 305 306 offset += res.length; 307 len -= res.length; 308 ptr = USB_ADD_BYTES(ptr, res.length); 309 } 310 return (0); /* success */ 311 } 312 #endif 313 314 /*------------------------------------------------------------------------* 315 * usbd_frame_zero - zero DMA-able memory 316 *------------------------------------------------------------------------*/ 317 void 318 usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset, 319 usb_frlength_t len) 320 { 321 struct usb_page_search res; 322 323 while (len != 0) { 324 325 usbd_get_page(cache, offset, &res); 326 327 if (res.length > len) { 328 res.length = len; 329 } 330 memset(res.buffer, 0, res.length); 331 332 offset += res.length; 333 len -= res.length; 334 } 335 } 336 337 #if USB_HAVE_BUSDMA 338 339 /*------------------------------------------------------------------------* 340 * usb_dma_lock_cb - dummy callback 341 *------------------------------------------------------------------------*/ 342 static void 343 usb_dma_lock_cb(void *arg, bus_dma_lock_op_t op) 344 { 345 /* we use "mtx_owned()" instead of this function */ 346 } 347 348 /*------------------------------------------------------------------------* 349 * usb_dma_tag_create - allocate a DMA tag 350 * 351 * NOTE: If the "align" parameter has a value of 1 the DMA-tag will 352 * allow multi-segment mappings. Else all mappings are single-segment. 353 *------------------------------------------------------------------------*/ 354 static void 355 usb_dma_tag_create(struct usb_dma_tag *udt, 356 usb_size_t size, usb_size_t align) 357 { 358 bus_dma_tag_t tag; 359 360 if (bus_dma_tag_create 361 ( /* parent */ udt->tag_parent->tag, 362 /* alignment */ align, 363 /* boundary */ 0, 364 /* lowaddr */ (2ULL << (udt->tag_parent->dma_bits - 1)) - 1, 365 /* highaddr */ BUS_SPACE_MAXADDR, 366 /* filter */ NULL, 367 /* filterarg */ NULL, 368 /* maxsize */ size, 369 /* nsegments */ (align == 1 && size > 1) ? 370 (2 + (size / USB_PAGE_SIZE)) : 1, 371 /* maxsegsz */ (align == 1 && size > USB_PAGE_SIZE) ? 372 USB_PAGE_SIZE : size, 373 /* flags */ BUS_DMA_KEEP_PG_OFFSET, 374 /* lockfn */ &usb_dma_lock_cb, 375 /* lockarg */ NULL, 376 &tag)) { 377 tag = NULL; 378 } 379 udt->tag = tag; 380 } 381 382 /*------------------------------------------------------------------------* 383 * usb_dma_tag_free - free a DMA tag 384 *------------------------------------------------------------------------*/ 385 static void 386 usb_dma_tag_destroy(struct usb_dma_tag *udt) 387 { 388 bus_dma_tag_destroy(udt->tag); 389 } 390 391 /*------------------------------------------------------------------------* 392 * usb_pc_alloc_mem_cb - BUS-DMA callback function 393 *------------------------------------------------------------------------*/ 394 static void 395 usb_pc_alloc_mem_cb(void *arg, bus_dma_segment_t *segs, 396 int nseg, int error) 397 { 398 usb_pc_common_mem_cb(arg, segs, nseg, error, 0); 399 } 400 401 /*------------------------------------------------------------------------* 402 * usb_pc_load_mem_cb - BUS-DMA callback function 403 *------------------------------------------------------------------------*/ 404 static void 405 usb_pc_load_mem_cb(void *arg, bus_dma_segment_t *segs, 406 int nseg, int error) 407 { 408 usb_pc_common_mem_cb(arg, segs, nseg, error, 1); 409 } 410 411 /*------------------------------------------------------------------------* 412 * usb_pc_common_mem_cb - BUS-DMA callback function 413 *------------------------------------------------------------------------*/ 414 static void 415 usb_pc_common_mem_cb(void *arg, bus_dma_segment_t *segs, 416 int nseg, int error, uint8_t isload) 417 { 418 struct usb_dma_parent_tag *uptag; 419 struct usb_page_cache *pc; 420 struct usb_page *pg; 421 usb_size_t rem; 422 bus_size_t off; 423 uint8_t owned; 424 425 pc = arg; 426 uptag = pc->tag_parent; 427 428 /* 429 * XXX There is sometimes recursive locking here. 430 * XXX We should try to find a better solution. 431 * XXX Until further the "owned" variable does 432 * XXX the trick. 433 */ 434 435 if (error) { 436 goto done; 437 } 438 439 off = 0; 440 pg = pc->page_start; 441 pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1); 442 rem = segs->ds_addr & (USB_PAGE_SIZE - 1); 443 pc->page_offset_buf = rem; 444 pc->page_offset_end += rem; 445 #ifdef USB_DEBUG 446 if (rem != (USB_P2U(pc->buffer) & (USB_PAGE_SIZE - 1))) { 447 /* 448 * This check verifies that the physical address is correct: 449 */ 450 DPRINTFN(0, "Page offset was not preserved\n"); 451 error = 1; 452 goto done; 453 } 454 #endif 455 while (1) { 456 off += USB_PAGE_SIZE; 457 if (off >= (segs->ds_len + rem)) { 458 /* page crossing */ 459 nseg--; 460 segs++; 461 off = 0; 462 rem = 0; 463 if (nseg == 0) 464 break; 465 } 466 pg++; 467 pg->physaddr = (segs->ds_addr + off) & ~(USB_PAGE_SIZE - 1); 468 } 469 470 done: 471 owned = mtx_owned(uptag->mtx); 472 if (!owned) 473 mtx_lock(uptag->mtx); 474 475 uptag->dma_error = (error ? 1 : 0); 476 if (isload) { 477 (uptag->func) (uptag); 478 } else { 479 cv_broadcast(uptag->cv); 480 } 481 if (!owned) 482 mtx_unlock(uptag->mtx); 483 } 484 485 /*------------------------------------------------------------------------* 486 * usb_pc_alloc_mem - allocate DMA'able memory 487 * 488 * Returns: 489 * 0: Success 490 * Else: Failure 491 *------------------------------------------------------------------------*/ 492 uint8_t 493 usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg, 494 usb_size_t size, usb_size_t align) 495 { 496 struct usb_dma_parent_tag *uptag; 497 struct usb_dma_tag *utag; 498 bus_dmamap_t map; 499 void *ptr; 500 int err; 501 502 uptag = pc->tag_parent; 503 504 if (align != 1) { 505 /* 506 * The alignment must be greater or equal to the 507 * "size" else the object can be split between two 508 * memory pages and we get a problem! 509 */ 510 while (align < size) { 511 align *= 2; 512 if (align == 0) { 513 goto error; 514 } 515 } 516 #if 1 517 /* 518 * XXX BUS-DMA workaround - FIXME later: 519 * 520 * We assume that that the aligment at this point of 521 * the code is greater than or equal to the size and 522 * less than two times the size, so that if we double 523 * the size, the size will be greater than the 524 * alignment. 525 * 526 * The bus-dma system has a check for "alignment" 527 * being less than "size". If that check fails we end 528 * up using contigmalloc which is page based even for 529 * small allocations. Try to avoid that to save 530 * memory, hence we sometimes to a large number of 531 * small allocations! 532 */ 533 if (size <= (USB_PAGE_SIZE / 2)) { 534 size *= 2; 535 } 536 #endif 537 } 538 /* get the correct DMA tag */ 539 utag = usb_dma_tag_find(uptag, size, align); 540 if (utag == NULL) { 541 goto error; 542 } 543 /* allocate memory */ 544 if (bus_dmamem_alloc( 545 utag->tag, &ptr, (BUS_DMA_WAITOK | BUS_DMA_COHERENT), &map)) { 546 goto error; 547 } 548 /* setup page cache */ 549 pc->buffer = ptr; 550 pc->page_start = pg; 551 pc->page_offset_buf = 0; 552 pc->page_offset_end = size; 553 pc->map = map; 554 pc->tag = utag->tag; 555 pc->ismultiseg = (align == 1); 556 557 mtx_lock(uptag->mtx); 558 559 /* load memory into DMA */ 560 err = bus_dmamap_load( 561 utag->tag, map, ptr, size, &usb_pc_alloc_mem_cb, 562 pc, (BUS_DMA_WAITOK | BUS_DMA_COHERENT)); 563 564 if (err == EINPROGRESS) { 565 cv_wait(uptag->cv, uptag->mtx); 566 err = 0; 567 } 568 mtx_unlock(uptag->mtx); 569 570 if (err || uptag->dma_error) { 571 bus_dmamem_free(utag->tag, ptr, map); 572 goto error; 573 } 574 memset(ptr, 0, size); 575 576 usb_pc_cpu_flush(pc); 577 578 return (0); 579 580 error: 581 /* reset most of the page cache */ 582 pc->buffer = NULL; 583 pc->page_start = NULL; 584 pc->page_offset_buf = 0; 585 pc->page_offset_end = 0; 586 pc->map = NULL; 587 pc->tag = NULL; 588 return (1); 589 } 590 591 /*------------------------------------------------------------------------* 592 * usb_pc_free_mem - free DMA memory 593 * 594 * This function is NULL safe. 595 *------------------------------------------------------------------------*/ 596 void 597 usb_pc_free_mem(struct usb_page_cache *pc) 598 { 599 if (pc && pc->buffer) { 600 601 bus_dmamap_unload(pc->tag, pc->map); 602 603 bus_dmamem_free(pc->tag, pc->buffer, pc->map); 604 605 pc->buffer = NULL; 606 } 607 } 608 609 /*------------------------------------------------------------------------* 610 * usb_pc_load_mem - load virtual memory into DMA 611 * 612 * Return values: 613 * 0: Success 614 * Else: Error 615 *------------------------------------------------------------------------*/ 616 uint8_t 617 usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t sync) 618 { 619 /* setup page cache */ 620 pc->page_offset_buf = 0; 621 pc->page_offset_end = size; 622 pc->ismultiseg = 1; 623 624 mtx_assert(pc->tag_parent->mtx, MA_OWNED); 625 626 if (size > 0) { 627 if (sync) { 628 struct usb_dma_parent_tag *uptag; 629 int err; 630 631 uptag = pc->tag_parent; 632 633 /* 634 * We have to unload the previous loaded DMA 635 * pages before trying to load a new one! 636 */ 637 bus_dmamap_unload(pc->tag, pc->map); 638 639 /* 640 * Try to load memory into DMA. 641 */ 642 err = bus_dmamap_load( 643 pc->tag, pc->map, pc->buffer, size, 644 &usb_pc_alloc_mem_cb, pc, BUS_DMA_WAITOK); 645 if (err == EINPROGRESS) { 646 cv_wait(uptag->cv, uptag->mtx); 647 err = 0; 648 } 649 if (err || uptag->dma_error) { 650 return (1); 651 } 652 } else { 653 654 /* 655 * We have to unload the previous loaded DMA 656 * pages before trying to load a new one! 657 */ 658 bus_dmamap_unload(pc->tag, pc->map); 659 660 /* 661 * Try to load memory into DMA. The callback 662 * will be called in all cases: 663 */ 664 if (bus_dmamap_load( 665 pc->tag, pc->map, pc->buffer, size, 666 &usb_pc_load_mem_cb, pc, BUS_DMA_WAITOK)) { 667 } 668 } 669 } else { 670 if (!sync) { 671 /* 672 * Call callback so that refcount is decremented 673 * properly: 674 */ 675 pc->tag_parent->dma_error = 0; 676 (pc->tag_parent->func) (pc->tag_parent); 677 } 678 } 679 return (0); 680 } 681 682 /*------------------------------------------------------------------------* 683 * usb_pc_cpu_invalidate - invalidate CPU cache 684 *------------------------------------------------------------------------*/ 685 void 686 usb_pc_cpu_invalidate(struct usb_page_cache *pc) 687 { 688 if (pc->page_offset_end == pc->page_offset_buf) { 689 /* nothing has been loaded into this page cache! */ 690 return; 691 } 692 693 /* 694 * TODO: We currently do XXX_POSTREAD and XXX_PREREAD at the 695 * same time, but in the future we should try to isolate the 696 * different cases to optimise the code. --HPS 697 */ 698 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_POSTREAD); 699 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREREAD); 700 } 701 702 /*------------------------------------------------------------------------* 703 * usb_pc_cpu_flush - flush CPU cache 704 *------------------------------------------------------------------------*/ 705 void 706 usb_pc_cpu_flush(struct usb_page_cache *pc) 707 { 708 if (pc->page_offset_end == pc->page_offset_buf) { 709 /* nothing has been loaded into this page cache! */ 710 return; 711 } 712 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREWRITE); 713 } 714 715 /*------------------------------------------------------------------------* 716 * usb_pc_dmamap_create - create a DMA map 717 * 718 * Returns: 719 * 0: Success 720 * Else: Failure 721 *------------------------------------------------------------------------*/ 722 uint8_t 723 usb_pc_dmamap_create(struct usb_page_cache *pc, usb_size_t size) 724 { 725 struct usb_xfer_root *info; 726 struct usb_dma_tag *utag; 727 728 /* get info */ 729 info = USB_DMATAG_TO_XROOT(pc->tag_parent); 730 731 /* sanity check */ 732 if (info == NULL) { 733 goto error; 734 } 735 utag = usb_dma_tag_find(pc->tag_parent, size, 1); 736 if (utag == NULL) { 737 goto error; 738 } 739 /* create DMA map */ 740 if (bus_dmamap_create(utag->tag, 0, &pc->map)) { 741 goto error; 742 } 743 pc->tag = utag->tag; 744 return 0; /* success */ 745 746 error: 747 pc->map = NULL; 748 pc->tag = NULL; 749 return 1; /* failure */ 750 } 751 752 /*------------------------------------------------------------------------* 753 * usb_pc_dmamap_destroy 754 * 755 * This function is NULL safe. 756 *------------------------------------------------------------------------*/ 757 void 758 usb_pc_dmamap_destroy(struct usb_page_cache *pc) 759 { 760 if (pc && pc->tag) { 761 bus_dmamap_destroy(pc->tag, pc->map); 762 pc->tag = NULL; 763 pc->map = NULL; 764 } 765 } 766 767 /*------------------------------------------------------------------------* 768 * usb_dma_tag_find - factored out code 769 *------------------------------------------------------------------------*/ 770 struct usb_dma_tag * 771 usb_dma_tag_find(struct usb_dma_parent_tag *udpt, 772 usb_size_t size, usb_size_t align) 773 { 774 struct usb_dma_tag *udt; 775 uint8_t nudt; 776 777 USB_ASSERT(align > 0, ("Invalid parameter align = 0\n")); 778 USB_ASSERT(size > 0, ("Invalid parameter size = 0\n")); 779 780 udt = udpt->utag_first; 781 nudt = udpt->utag_max; 782 783 while (nudt--) { 784 785 if (udt->align == 0) { 786 usb_dma_tag_create(udt, size, align); 787 if (udt->tag == NULL) { 788 return (NULL); 789 } 790 udt->align = align; 791 udt->size = size; 792 return (udt); 793 } 794 if ((udt->align == align) && (udt->size == size)) { 795 return (udt); 796 } 797 udt++; 798 } 799 return (NULL); 800 } 801 802 /*------------------------------------------------------------------------* 803 * usb_dma_tag_setup - initialise USB DMA tags 804 *------------------------------------------------------------------------*/ 805 void 806 usb_dma_tag_setup(struct usb_dma_parent_tag *udpt, 807 struct usb_dma_tag *udt, bus_dma_tag_t dmat, 808 struct mtx *mtx, usb_dma_callback_t *func, 809 uint8_t ndmabits, uint8_t nudt) 810 { 811 memset(udpt, 0, sizeof(*udpt)); 812 813 /* sanity checking */ 814 if ((nudt == 0) || 815 (ndmabits == 0) || 816 (mtx == NULL)) { 817 /* something is corrupt */ 818 return; 819 } 820 /* initialise condition variable */ 821 cv_init(udpt->cv, "USB DMA CV"); 822 823 /* store some information */ 824 udpt->mtx = mtx; 825 udpt->func = func; 826 udpt->tag = dmat; 827 udpt->utag_first = udt; 828 udpt->utag_max = nudt; 829 udpt->dma_bits = ndmabits; 830 831 while (nudt--) { 832 memset(udt, 0, sizeof(*udt)); 833 udt->tag_parent = udpt; 834 udt++; 835 } 836 } 837 838 /*------------------------------------------------------------------------* 839 * usb_bus_tag_unsetup - factored out code 840 *------------------------------------------------------------------------*/ 841 void 842 usb_dma_tag_unsetup(struct usb_dma_parent_tag *udpt) 843 { 844 struct usb_dma_tag *udt; 845 uint8_t nudt; 846 847 udt = udpt->utag_first; 848 nudt = udpt->utag_max; 849 850 while (nudt--) { 851 852 if (udt->align) { 853 /* destroy the USB DMA tag */ 854 usb_dma_tag_destroy(udt); 855 udt->align = 0; 856 } 857 udt++; 858 } 859 860 if (udpt->utag_max) { 861 /* destroy the condition variable */ 862 cv_destroy(udpt->cv); 863 } 864 } 865 866 /*------------------------------------------------------------------------* 867 * usb_bdma_work_loop 868 * 869 * This function handles loading of virtual buffers into DMA and is 870 * only called when "dma_refcount" is zero. 871 *------------------------------------------------------------------------*/ 872 void 873 usb_bdma_work_loop(struct usb_xfer_queue *pq) 874 { 875 struct usb_xfer_root *info; 876 struct usb_xfer *xfer; 877 usb_frcount_t nframes; 878 879 xfer = pq->curr; 880 info = xfer->xroot; 881 882 mtx_assert(info->xfer_mtx, MA_OWNED); 883 884 if (xfer->error) { 885 /* some error happened */ 886 USB_BUS_LOCK(info->bus); 887 usbd_transfer_done(xfer, 0); 888 USB_BUS_UNLOCK(info->bus); 889 return; 890 } 891 if (!xfer->flags_int.bdma_setup) { 892 struct usb_page *pg; 893 usb_frlength_t frlength_0; 894 uint8_t isread; 895 896 xfer->flags_int.bdma_setup = 1; 897 898 /* reset BUS-DMA load state */ 899 900 info->dma_error = 0; 901 902 if (xfer->flags_int.isochronous_xfr) { 903 /* only one frame buffer */ 904 nframes = 1; 905 frlength_0 = xfer->sumlen; 906 } else { 907 /* can be multiple frame buffers */ 908 nframes = xfer->nframes; 909 frlength_0 = xfer->frlengths[0]; 910 } 911 912 /* 913 * Set DMA direction first. This is needed to 914 * select the correct cache invalidate and cache 915 * flush operations. 916 */ 917 isread = USB_GET_DATA_ISREAD(xfer); 918 pg = xfer->dma_page_ptr; 919 920 if (xfer->flags_int.control_xfr && 921 xfer->flags_int.control_hdr) { 922 /* special case */ 923 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { 924 /* The device controller writes to memory */ 925 xfer->frbuffers[0].isread = 1; 926 } else { 927 /* The host controller reads from memory */ 928 xfer->frbuffers[0].isread = 0; 929 } 930 } else { 931 /* default case */ 932 xfer->frbuffers[0].isread = isread; 933 } 934 935 /* 936 * Setup the "page_start" pointer which points to an array of 937 * USB pages where information about the physical address of a 938 * page will be stored. Also initialise the "isread" field of 939 * the USB page caches. 940 */ 941 xfer->frbuffers[0].page_start = pg; 942 943 info->dma_nframes = nframes; 944 info->dma_currframe = 0; 945 info->dma_frlength_0 = frlength_0; 946 947 pg += (frlength_0 / USB_PAGE_SIZE); 948 pg += 2; 949 950 while (--nframes > 0) { 951 xfer->frbuffers[nframes].isread = isread; 952 xfer->frbuffers[nframes].page_start = pg; 953 954 pg += (xfer->frlengths[nframes] / USB_PAGE_SIZE); 955 pg += 2; 956 } 957 958 } 959 if (info->dma_error) { 960 USB_BUS_LOCK(info->bus); 961 usbd_transfer_done(xfer, USB_ERR_DMA_LOAD_FAILED); 962 USB_BUS_UNLOCK(info->bus); 963 return; 964 } 965 if (info->dma_currframe != info->dma_nframes) { 966 967 if (info->dma_currframe == 0) { 968 /* special case */ 969 usb_pc_load_mem(xfer->frbuffers, 970 info->dma_frlength_0, 0); 971 } else { 972 /* default case */ 973 nframes = info->dma_currframe; 974 usb_pc_load_mem(xfer->frbuffers + nframes, 975 xfer->frlengths[nframes], 0); 976 } 977 978 /* advance frame index */ 979 info->dma_currframe++; 980 981 return; 982 } 983 /* go ahead */ 984 usb_bdma_pre_sync(xfer); 985 986 /* start loading next USB transfer, if any */ 987 usb_command_wrapper(pq, NULL); 988 989 /* finally start the hardware */ 990 usbd_pipe_enter(xfer); 991 } 992 993 /*------------------------------------------------------------------------* 994 * usb_bdma_done_event 995 * 996 * This function is called when the BUS-DMA has loaded virtual memory 997 * into DMA, if any. 998 *------------------------------------------------------------------------*/ 999 void 1000 usb_bdma_done_event(struct usb_dma_parent_tag *udpt) 1001 { 1002 struct usb_xfer_root *info; 1003 1004 info = USB_DMATAG_TO_XROOT(udpt); 1005 1006 mtx_assert(info->xfer_mtx, MA_OWNED); 1007 1008 /* copy error */ 1009 info->dma_error = udpt->dma_error; 1010 1011 /* enter workloop again */ 1012 usb_command_wrapper(&info->dma_q, 1013 info->dma_q.curr); 1014 } 1015 1016 /*------------------------------------------------------------------------* 1017 * usb_bdma_pre_sync 1018 * 1019 * This function handles DMA synchronisation that must be done before 1020 * an USB transfer is started. 1021 *------------------------------------------------------------------------*/ 1022 void 1023 usb_bdma_pre_sync(struct usb_xfer *xfer) 1024 { 1025 struct usb_page_cache *pc; 1026 usb_frcount_t nframes; 1027 1028 if (xfer->flags_int.isochronous_xfr) { 1029 /* only one frame buffer */ 1030 nframes = 1; 1031 } else { 1032 /* can be multiple frame buffers */ 1033 nframes = xfer->nframes; 1034 } 1035 1036 pc = xfer->frbuffers; 1037 1038 while (nframes--) { 1039 1040 if (pc->isread) { 1041 usb_pc_cpu_invalidate(pc); 1042 } else { 1043 usb_pc_cpu_flush(pc); 1044 } 1045 pc++; 1046 } 1047 } 1048 1049 /*------------------------------------------------------------------------* 1050 * usb_bdma_post_sync 1051 * 1052 * This function handles DMA synchronisation that must be done after 1053 * an USB transfer is complete. 1054 *------------------------------------------------------------------------*/ 1055 void 1056 usb_bdma_post_sync(struct usb_xfer *xfer) 1057 { 1058 struct usb_page_cache *pc; 1059 usb_frcount_t nframes; 1060 1061 if (xfer->flags_int.isochronous_xfr) { 1062 /* only one frame buffer */ 1063 nframes = 1; 1064 } else { 1065 /* can be multiple frame buffers */ 1066 nframes = xfer->nframes; 1067 } 1068 1069 pc = xfer->frbuffers; 1070 1071 while (nframes--) { 1072 if (pc->isread) { 1073 usb_pc_cpu_invalidate(pc); 1074 } 1075 pc++; 1076 } 1077 } 1078 1079 #endif 1080