1 /* $FreeBSD$ */ 2 /*- 3 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/stdint.h> 28 #include <sys/stddef.h> 29 #include <sys/param.h> 30 #include <sys/queue.h> 31 #include <sys/types.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/bus.h> 35 #include <sys/linker_set.h> 36 #include <sys/module.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/condvar.h> 40 #include <sys/sysctl.h> 41 #include <sys/sx.h> 42 #include <sys/unistd.h> 43 #include <sys/callout.h> 44 #include <sys/malloc.h> 45 #include <sys/priv.h> 46 47 #include <dev/usb/usb.h> 48 #include <dev/usb/usbdi.h> 49 #include <dev/usb/usbdi_util.h> 50 51 #define USB_DEBUG_VAR usb_debug 52 53 #include <dev/usb/usb_core.h> 54 #include <dev/usb/usb_busdma.h> 55 #include <dev/usb/usb_process.h> 56 #include <dev/usb/usb_transfer.h> 57 #include <dev/usb/usb_device.h> 58 #include <dev/usb/usb_util.h> 59 #include <dev/usb/usb_debug.h> 60 61 #include <dev/usb/usb_controller.h> 62 #include <dev/usb/usb_bus.h> 63 64 #if USB_HAVE_BUSDMA 65 static void usb_dma_tag_create(struct usb_dma_tag *, usb_size_t, usb_size_t); 66 static void usb_dma_tag_destroy(struct usb_dma_tag *); 67 static void usb_dma_lock_cb(void *, bus_dma_lock_op_t); 68 static void usb_pc_alloc_mem_cb(void *, bus_dma_segment_t *, int, int); 69 static void usb_pc_load_mem_cb(void *, bus_dma_segment_t *, int, int); 70 static void usb_pc_common_mem_cb(void *, bus_dma_segment_t *, int, int, 71 uint8_t); 72 #endif 73 74 /*------------------------------------------------------------------------* 75 * usbd_get_page - lookup DMA-able memory for the given offset 76 * 77 * NOTE: Only call this function when the "page_cache" structure has 78 * been properly initialized ! 79 *------------------------------------------------------------------------*/ 80 void 81 usbd_get_page(struct usb_page_cache *pc, usb_frlength_t offset, 82 struct usb_page_search *res) 83 { 84 struct usb_page *page; 85 86 #if USB_HAVE_BUSDMA 87 if (pc->page_start) { 88 89 /* Case 1 - something has been loaded into DMA */ 90 91 if (pc->buffer) { 92 93 /* Case 1a - Kernel Virtual Address */ 94 95 res->buffer = USB_ADD_BYTES(pc->buffer, offset); 96 } 97 offset += pc->page_offset_buf; 98 99 /* compute destination page */ 100 101 page = pc->page_start; 102 103 if (pc->ismultiseg) { 104 105 page += (offset / USB_PAGE_SIZE); 106 107 offset %= USB_PAGE_SIZE; 108 109 res->length = USB_PAGE_SIZE - offset; 110 res->physaddr = page->physaddr + offset; 111 } else { 112 res->length = 0 - 1; 113 res->physaddr = page->physaddr + offset; 114 } 115 if (!pc->buffer) { 116 117 /* Case 1b - Non Kernel Virtual Address */ 118 119 res->buffer = USB_ADD_BYTES(page->buffer, offset); 120 } 121 return; 122 } 123 #endif 124 /* Case 2 - Plain PIO */ 125 126 res->buffer = USB_ADD_BYTES(pc->buffer, offset); 127 res->length = 0 - 1; 128 #if USB_HAVE_BUSDMA 129 res->physaddr = 0; 130 #endif 131 } 132 133 /*------------------------------------------------------------------------* 134 * usbd_copy_in - copy directly to DMA-able memory 135 *------------------------------------------------------------------------*/ 136 void 137 usbd_copy_in(struct usb_page_cache *cache, usb_frlength_t offset, 138 const void *ptr, usb_frlength_t len) 139 { 140 struct usb_page_search buf_res; 141 142 while (len != 0) { 143 144 usbd_get_page(cache, offset, &buf_res); 145 146 if (buf_res.length > len) { 147 buf_res.length = len; 148 } 149 bcopy(ptr, buf_res.buffer, buf_res.length); 150 151 offset += buf_res.length; 152 len -= buf_res.length; 153 ptr = USB_ADD_BYTES(ptr, buf_res.length); 154 } 155 } 156 157 /*------------------------------------------------------------------------* 158 * usbd_copy_in_user - copy directly to DMA-able memory from userland 159 * 160 * Return values: 161 * 0: Success 162 * Else: Failure 163 *------------------------------------------------------------------------*/ 164 #if USB_HAVE_USER_IO 165 int 166 usbd_copy_in_user(struct usb_page_cache *cache, usb_frlength_t offset, 167 const void *ptr, usb_frlength_t len) 168 { 169 struct usb_page_search buf_res; 170 int error; 171 172 while (len != 0) { 173 174 usbd_get_page(cache, offset, &buf_res); 175 176 if (buf_res.length > len) { 177 buf_res.length = len; 178 } 179 error = copyin(ptr, buf_res.buffer, buf_res.length); 180 if (error) 181 return (error); 182 183 offset += buf_res.length; 184 len -= buf_res.length; 185 ptr = USB_ADD_BYTES(ptr, buf_res.length); 186 } 187 return (0); /* success */ 188 } 189 #endif 190 191 /*------------------------------------------------------------------------* 192 * usbd_m_copy_in - copy a mbuf chain directly into DMA-able memory 193 *------------------------------------------------------------------------*/ 194 #if USB_HAVE_MBUF 195 struct usb_m_copy_in_arg { 196 struct usb_page_cache *cache; 197 usb_frlength_t dst_offset; 198 }; 199 200 static int 201 usbd_m_copy_in_cb(void *arg, void *src, uint32_t count) 202 { 203 register struct usb_m_copy_in_arg *ua = arg; 204 205 usbd_copy_in(ua->cache, ua->dst_offset, src, count); 206 ua->dst_offset += count; 207 return (0); 208 } 209 210 void 211 usbd_m_copy_in(struct usb_page_cache *cache, usb_frlength_t dst_offset, 212 struct mbuf *m, usb_size_t src_offset, usb_frlength_t src_len) 213 { 214 struct usb_m_copy_in_arg arg = {cache, dst_offset}; 215 int error; 216 217 error = m_apply(m, src_offset, src_len, &usbd_m_copy_in_cb, &arg); 218 } 219 #endif 220 221 /*------------------------------------------------------------------------* 222 * usb_uiomove - factored out code 223 *------------------------------------------------------------------------*/ 224 #if USB_HAVE_USER_IO 225 int 226 usb_uiomove(struct usb_page_cache *pc, struct uio *uio, 227 usb_frlength_t pc_offset, usb_frlength_t len) 228 { 229 struct usb_page_search res; 230 int error = 0; 231 232 while (len != 0) { 233 234 usbd_get_page(pc, pc_offset, &res); 235 236 if (res.length > len) { 237 res.length = len; 238 } 239 /* 240 * "uiomove()" can sleep so one needs to make a wrapper, 241 * exiting the mutex and checking things 242 */ 243 error = uiomove(res.buffer, res.length, uio); 244 245 if (error) { 246 break; 247 } 248 pc_offset += res.length; 249 len -= res.length; 250 } 251 return (error); 252 } 253 #endif 254 255 /*------------------------------------------------------------------------* 256 * usbd_copy_out - copy directly from DMA-able memory 257 *------------------------------------------------------------------------*/ 258 void 259 usbd_copy_out(struct usb_page_cache *cache, usb_frlength_t offset, 260 void *ptr, usb_frlength_t len) 261 { 262 struct usb_page_search res; 263 264 while (len != 0) { 265 266 usbd_get_page(cache, offset, &res); 267 268 if (res.length > len) { 269 res.length = len; 270 } 271 bcopy(res.buffer, ptr, res.length); 272 273 offset += res.length; 274 len -= res.length; 275 ptr = USB_ADD_BYTES(ptr, res.length); 276 } 277 } 278 279 /*------------------------------------------------------------------------* 280 * usbd_copy_out_user - copy directly from DMA-able memory to userland 281 * 282 * Return values: 283 * 0: Success 284 * Else: Failure 285 *------------------------------------------------------------------------*/ 286 #if USB_HAVE_USER_IO 287 int 288 usbd_copy_out_user(struct usb_page_cache *cache, usb_frlength_t offset, 289 void *ptr, usb_frlength_t len) 290 { 291 struct usb_page_search res; 292 int error; 293 294 while (len != 0) { 295 296 usbd_get_page(cache, offset, &res); 297 298 if (res.length > len) { 299 res.length = len; 300 } 301 error = copyout(res.buffer, ptr, res.length); 302 if (error) 303 return (error); 304 305 offset += res.length; 306 len -= res.length; 307 ptr = USB_ADD_BYTES(ptr, res.length); 308 } 309 return (0); /* success */ 310 } 311 #endif 312 313 /*------------------------------------------------------------------------* 314 * usbd_frame_zero - zero DMA-able memory 315 *------------------------------------------------------------------------*/ 316 void 317 usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset, 318 usb_frlength_t len) 319 { 320 struct usb_page_search res; 321 322 while (len != 0) { 323 324 usbd_get_page(cache, offset, &res); 325 326 if (res.length > len) { 327 res.length = len; 328 } 329 bzero(res.buffer, res.length); 330 331 offset += res.length; 332 len -= res.length; 333 } 334 } 335 336 #if USB_HAVE_BUSDMA 337 338 /*------------------------------------------------------------------------* 339 * usb_dma_lock_cb - dummy callback 340 *------------------------------------------------------------------------*/ 341 static void 342 usb_dma_lock_cb(void *arg, bus_dma_lock_op_t op) 343 { 344 /* we use "mtx_owned()" instead of this function */ 345 } 346 347 /*------------------------------------------------------------------------* 348 * usb_dma_tag_create - allocate a DMA tag 349 * 350 * NOTE: If the "align" parameter has a value of 1 the DMA-tag will 351 * allow multi-segment mappings. Else all mappings are single-segment. 352 *------------------------------------------------------------------------*/ 353 static void 354 usb_dma_tag_create(struct usb_dma_tag *udt, 355 usb_size_t size, usb_size_t align) 356 { 357 bus_dma_tag_t tag; 358 359 if (bus_dma_tag_create 360 ( /* parent */ udt->tag_parent->tag, 361 /* alignment */ align, 362 /* boundary */ (align == 1) ? 363 USB_PAGE_SIZE : 0, 364 /* lowaddr */ (2ULL << (udt->tag_parent->dma_bits - 1)) - 1, 365 /* highaddr */ BUS_SPACE_MAXADDR, 366 /* filter */ NULL, 367 /* filterarg */ NULL, 368 /* maxsize */ size, 369 /* nsegments */ (align == 1) ? 370 (2 + (size / USB_PAGE_SIZE)) : 1, 371 /* maxsegsz */ (align == 1) ? 372 USB_PAGE_SIZE : size, 373 /* flags */ BUS_DMA_KEEP_PG_OFFSET, 374 /* lockfn */ &usb_dma_lock_cb, 375 /* lockarg */ NULL, 376 &tag)) { 377 tag = NULL; 378 } 379 udt->tag = tag; 380 } 381 382 /*------------------------------------------------------------------------* 383 * usb_dma_tag_free - free a DMA tag 384 *------------------------------------------------------------------------*/ 385 static void 386 usb_dma_tag_destroy(struct usb_dma_tag *udt) 387 { 388 bus_dma_tag_destroy(udt->tag); 389 } 390 391 /*------------------------------------------------------------------------* 392 * usb_pc_alloc_mem_cb - BUS-DMA callback function 393 *------------------------------------------------------------------------*/ 394 static void 395 usb_pc_alloc_mem_cb(void *arg, bus_dma_segment_t *segs, 396 int nseg, int error) 397 { 398 usb_pc_common_mem_cb(arg, segs, nseg, error, 0); 399 } 400 401 /*------------------------------------------------------------------------* 402 * usb_pc_load_mem_cb - BUS-DMA callback function 403 *------------------------------------------------------------------------*/ 404 static void 405 usb_pc_load_mem_cb(void *arg, bus_dma_segment_t *segs, 406 int nseg, int error) 407 { 408 usb_pc_common_mem_cb(arg, segs, nseg, error, 1); 409 } 410 411 /*------------------------------------------------------------------------* 412 * usb_pc_common_mem_cb - BUS-DMA callback function 413 *------------------------------------------------------------------------*/ 414 static void 415 usb_pc_common_mem_cb(void *arg, bus_dma_segment_t *segs, 416 int nseg, int error, uint8_t isload) 417 { 418 struct usb_dma_parent_tag *uptag; 419 struct usb_page_cache *pc; 420 struct usb_page *pg; 421 usb_size_t rem; 422 uint8_t owned; 423 424 pc = arg; 425 uptag = pc->tag_parent; 426 427 /* 428 * XXX There is sometimes recursive locking here. 429 * XXX We should try to find a better solution. 430 * XXX Until further the "owned" variable does 431 * XXX the trick. 432 */ 433 434 if (error) { 435 goto done; 436 } 437 pg = pc->page_start; 438 pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1); 439 rem = segs->ds_addr & (USB_PAGE_SIZE - 1); 440 pc->page_offset_buf = rem; 441 pc->page_offset_end += rem; 442 nseg--; 443 #ifdef USB_DEBUG 444 if (rem != (USB_P2U(pc->buffer) & (USB_PAGE_SIZE - 1))) { 445 /* 446 * This check verifies that the physical address is correct: 447 */ 448 DPRINTFN(0, "Page offset was not preserved\n"); 449 error = 1; 450 goto done; 451 } 452 #endif 453 while (nseg > 0) { 454 nseg--; 455 segs++; 456 pg++; 457 pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1); 458 } 459 460 done: 461 owned = mtx_owned(uptag->mtx); 462 if (!owned) 463 mtx_lock(uptag->mtx); 464 465 uptag->dma_error = (error ? 1 : 0); 466 if (isload) { 467 (uptag->func) (uptag); 468 } else { 469 cv_broadcast(uptag->cv); 470 } 471 if (!owned) 472 mtx_unlock(uptag->mtx); 473 } 474 475 /*------------------------------------------------------------------------* 476 * usb_pc_alloc_mem - allocate DMA'able memory 477 * 478 * Returns: 479 * 0: Success 480 * Else: Failure 481 *------------------------------------------------------------------------*/ 482 uint8_t 483 usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg, 484 usb_size_t size, usb_size_t align) 485 { 486 struct usb_dma_parent_tag *uptag; 487 struct usb_dma_tag *utag; 488 bus_dmamap_t map; 489 void *ptr; 490 int err; 491 492 uptag = pc->tag_parent; 493 494 if (align != 1) { 495 /* 496 * The alignment must be greater or equal to the 497 * "size" else the object can be split between two 498 * memory pages and we get a problem! 499 */ 500 while (align < size) { 501 align *= 2; 502 if (align == 0) { 503 goto error; 504 } 505 } 506 #if 1 507 /* 508 * XXX BUS-DMA workaround - FIXME later: 509 * 510 * We assume that that the aligment at this point of 511 * the code is greater than or equal to the size and 512 * less than two times the size, so that if we double 513 * the size, the size will be greater than the 514 * alignment. 515 * 516 * The bus-dma system has a check for "alignment" 517 * being less than "size". If that check fails we end 518 * up using contigmalloc which is page based even for 519 * small allocations. Try to avoid that to save 520 * memory, hence we sometimes to a large number of 521 * small allocations! 522 */ 523 if (size <= (USB_PAGE_SIZE / 2)) { 524 size *= 2; 525 } 526 #endif 527 } 528 /* get the correct DMA tag */ 529 utag = usb_dma_tag_find(uptag, size, align); 530 if (utag == NULL) { 531 goto error; 532 } 533 /* allocate memory */ 534 if (bus_dmamem_alloc( 535 utag->tag, &ptr, (BUS_DMA_WAITOK | BUS_DMA_COHERENT), &map)) { 536 goto error; 537 } 538 /* setup page cache */ 539 pc->buffer = ptr; 540 pc->page_start = pg; 541 pc->page_offset_buf = 0; 542 pc->page_offset_end = size; 543 pc->map = map; 544 pc->tag = utag->tag; 545 pc->ismultiseg = (align == 1); 546 547 mtx_lock(uptag->mtx); 548 549 /* load memory into DMA */ 550 err = bus_dmamap_load( 551 utag->tag, map, ptr, size, &usb_pc_alloc_mem_cb, 552 pc, (BUS_DMA_WAITOK | BUS_DMA_COHERENT)); 553 554 if (err == EINPROGRESS) { 555 cv_wait(uptag->cv, uptag->mtx); 556 err = 0; 557 } 558 mtx_unlock(uptag->mtx); 559 560 if (err || uptag->dma_error) { 561 bus_dmamem_free(utag->tag, ptr, map); 562 goto error; 563 } 564 bzero(ptr, size); 565 566 usb_pc_cpu_flush(pc); 567 568 return (0); 569 570 error: 571 /* reset most of the page cache */ 572 pc->buffer = NULL; 573 pc->page_start = NULL; 574 pc->page_offset_buf = 0; 575 pc->page_offset_end = 0; 576 pc->map = NULL; 577 pc->tag = NULL; 578 return (1); 579 } 580 581 /*------------------------------------------------------------------------* 582 * usb_pc_free_mem - free DMA memory 583 * 584 * This function is NULL safe. 585 *------------------------------------------------------------------------*/ 586 void 587 usb_pc_free_mem(struct usb_page_cache *pc) 588 { 589 if (pc && pc->buffer) { 590 591 bus_dmamap_unload(pc->tag, pc->map); 592 593 bus_dmamem_free(pc->tag, pc->buffer, pc->map); 594 595 pc->buffer = NULL; 596 } 597 } 598 599 /*------------------------------------------------------------------------* 600 * usb_pc_load_mem - load virtual memory into DMA 601 * 602 * Return values: 603 * 0: Success 604 * Else: Error 605 *------------------------------------------------------------------------*/ 606 uint8_t 607 usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t sync) 608 { 609 /* setup page cache */ 610 pc->page_offset_buf = 0; 611 pc->page_offset_end = size; 612 pc->ismultiseg = 1; 613 614 mtx_assert(pc->tag_parent->mtx, MA_OWNED); 615 616 if (size > 0) { 617 if (sync) { 618 struct usb_dma_parent_tag *uptag; 619 int err; 620 621 uptag = pc->tag_parent; 622 623 /* 624 * We have to unload the previous loaded DMA 625 * pages before trying to load a new one! 626 */ 627 bus_dmamap_unload(pc->tag, pc->map); 628 629 /* 630 * Try to load memory into DMA. 631 */ 632 err = bus_dmamap_load( 633 pc->tag, pc->map, pc->buffer, size, 634 &usb_pc_alloc_mem_cb, pc, BUS_DMA_WAITOK); 635 if (err == EINPROGRESS) { 636 cv_wait(uptag->cv, uptag->mtx); 637 err = 0; 638 } 639 if (err || uptag->dma_error) { 640 return (1); 641 } 642 } else { 643 644 /* 645 * We have to unload the previous loaded DMA 646 * pages before trying to load a new one! 647 */ 648 bus_dmamap_unload(pc->tag, pc->map); 649 650 /* 651 * Try to load memory into DMA. The callback 652 * will be called in all cases: 653 */ 654 if (bus_dmamap_load( 655 pc->tag, pc->map, pc->buffer, size, 656 &usb_pc_load_mem_cb, pc, BUS_DMA_WAITOK)) { 657 } 658 } 659 } else { 660 if (!sync) { 661 /* 662 * Call callback so that refcount is decremented 663 * properly: 664 */ 665 pc->tag_parent->dma_error = 0; 666 (pc->tag_parent->func) (pc->tag_parent); 667 } 668 } 669 return (0); 670 } 671 672 /*------------------------------------------------------------------------* 673 * usb_pc_cpu_invalidate - invalidate CPU cache 674 *------------------------------------------------------------------------*/ 675 void 676 usb_pc_cpu_invalidate(struct usb_page_cache *pc) 677 { 678 if (pc->page_offset_end == pc->page_offset_buf) { 679 /* nothing has been loaded into this page cache! */ 680 return; 681 } 682 683 /* 684 * TODO: We currently do XXX_POSTREAD and XXX_PREREAD at the 685 * same time, but in the future we should try to isolate the 686 * different cases to optimise the code. --HPS 687 */ 688 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_POSTREAD); 689 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREREAD); 690 } 691 692 /*------------------------------------------------------------------------* 693 * usb_pc_cpu_flush - flush CPU cache 694 *------------------------------------------------------------------------*/ 695 void 696 usb_pc_cpu_flush(struct usb_page_cache *pc) 697 { 698 if (pc->page_offset_end == pc->page_offset_buf) { 699 /* nothing has been loaded into this page cache! */ 700 return; 701 } 702 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREWRITE); 703 } 704 705 /*------------------------------------------------------------------------* 706 * usb_pc_dmamap_create - create a DMA map 707 * 708 * Returns: 709 * 0: Success 710 * Else: Failure 711 *------------------------------------------------------------------------*/ 712 uint8_t 713 usb_pc_dmamap_create(struct usb_page_cache *pc, usb_size_t size) 714 { 715 struct usb_xfer_root *info; 716 struct usb_dma_tag *utag; 717 718 /* get info */ 719 info = USB_DMATAG_TO_XROOT(pc->tag_parent); 720 721 /* sanity check */ 722 if (info == NULL) { 723 goto error; 724 } 725 utag = usb_dma_tag_find(pc->tag_parent, size, 1); 726 if (utag == NULL) { 727 goto error; 728 } 729 /* create DMA map */ 730 if (bus_dmamap_create(utag->tag, 0, &pc->map)) { 731 goto error; 732 } 733 pc->tag = utag->tag; 734 return 0; /* success */ 735 736 error: 737 pc->map = NULL; 738 pc->tag = NULL; 739 return 1; /* failure */ 740 } 741 742 /*------------------------------------------------------------------------* 743 * usb_pc_dmamap_destroy 744 * 745 * This function is NULL safe. 746 *------------------------------------------------------------------------*/ 747 void 748 usb_pc_dmamap_destroy(struct usb_page_cache *pc) 749 { 750 if (pc && pc->tag) { 751 bus_dmamap_destroy(pc->tag, pc->map); 752 pc->tag = NULL; 753 pc->map = NULL; 754 } 755 } 756 757 /*------------------------------------------------------------------------* 758 * usb_dma_tag_find - factored out code 759 *------------------------------------------------------------------------*/ 760 struct usb_dma_tag * 761 usb_dma_tag_find(struct usb_dma_parent_tag *udpt, 762 usb_size_t size, usb_size_t align) 763 { 764 struct usb_dma_tag *udt; 765 uint8_t nudt; 766 767 USB_ASSERT(align > 0, ("Invalid parameter align = 0\n")); 768 USB_ASSERT(size > 0, ("Invalid parameter size = 0\n")); 769 770 udt = udpt->utag_first; 771 nudt = udpt->utag_max; 772 773 while (nudt--) { 774 775 if (udt->align == 0) { 776 usb_dma_tag_create(udt, size, align); 777 if (udt->tag == NULL) { 778 return (NULL); 779 } 780 udt->align = align; 781 udt->size = size; 782 return (udt); 783 } 784 if ((udt->align == align) && (udt->size == size)) { 785 return (udt); 786 } 787 udt++; 788 } 789 return (NULL); 790 } 791 792 /*------------------------------------------------------------------------* 793 * usb_dma_tag_setup - initialise USB DMA tags 794 *------------------------------------------------------------------------*/ 795 void 796 usb_dma_tag_setup(struct usb_dma_parent_tag *udpt, 797 struct usb_dma_tag *udt, bus_dma_tag_t dmat, 798 struct mtx *mtx, usb_dma_callback_t *func, 799 uint8_t ndmabits, uint8_t nudt) 800 { 801 bzero(udpt, sizeof(*udpt)); 802 803 /* sanity checking */ 804 if ((nudt == 0) || 805 (ndmabits == 0) || 806 (mtx == NULL)) { 807 /* something is corrupt */ 808 return; 809 } 810 /* initialise condition variable */ 811 cv_init(udpt->cv, "USB DMA CV"); 812 813 /* store some information */ 814 udpt->mtx = mtx; 815 udpt->func = func; 816 udpt->tag = dmat; 817 udpt->utag_first = udt; 818 udpt->utag_max = nudt; 819 udpt->dma_bits = ndmabits; 820 821 while (nudt--) { 822 bzero(udt, sizeof(*udt)); 823 udt->tag_parent = udpt; 824 udt++; 825 } 826 } 827 828 /*------------------------------------------------------------------------* 829 * usb_bus_tag_unsetup - factored out code 830 *------------------------------------------------------------------------*/ 831 void 832 usb_dma_tag_unsetup(struct usb_dma_parent_tag *udpt) 833 { 834 struct usb_dma_tag *udt; 835 uint8_t nudt; 836 837 udt = udpt->utag_first; 838 nudt = udpt->utag_max; 839 840 while (nudt--) { 841 842 if (udt->align) { 843 /* destroy the USB DMA tag */ 844 usb_dma_tag_destroy(udt); 845 udt->align = 0; 846 } 847 udt++; 848 } 849 850 if (udpt->utag_max) { 851 /* destroy the condition variable */ 852 cv_destroy(udpt->cv); 853 } 854 } 855 856 /*------------------------------------------------------------------------* 857 * usb_bdma_work_loop 858 * 859 * This function handles loading of virtual buffers into DMA and is 860 * only called when "dma_refcount" is zero. 861 *------------------------------------------------------------------------*/ 862 void 863 usb_bdma_work_loop(struct usb_xfer_queue *pq) 864 { 865 struct usb_xfer_root *info; 866 struct usb_xfer *xfer; 867 usb_frcount_t nframes; 868 869 xfer = pq->curr; 870 info = xfer->xroot; 871 872 mtx_assert(info->xfer_mtx, MA_OWNED); 873 874 if (xfer->error) { 875 /* some error happened */ 876 USB_BUS_LOCK(info->bus); 877 usbd_transfer_done(xfer, 0); 878 USB_BUS_UNLOCK(info->bus); 879 return; 880 } 881 if (!xfer->flags_int.bdma_setup) { 882 struct usb_page *pg; 883 usb_frlength_t frlength_0; 884 uint8_t isread; 885 886 xfer->flags_int.bdma_setup = 1; 887 888 /* reset BUS-DMA load state */ 889 890 info->dma_error = 0; 891 892 if (xfer->flags_int.isochronous_xfr) { 893 /* only one frame buffer */ 894 nframes = 1; 895 frlength_0 = xfer->sumlen; 896 } else { 897 /* can be multiple frame buffers */ 898 nframes = xfer->nframes; 899 frlength_0 = xfer->frlengths[0]; 900 } 901 902 /* 903 * Set DMA direction first. This is needed to 904 * select the correct cache invalidate and cache 905 * flush operations. 906 */ 907 isread = USB_GET_DATA_ISREAD(xfer); 908 pg = xfer->dma_page_ptr; 909 910 if (xfer->flags_int.control_xfr && 911 xfer->flags_int.control_hdr) { 912 /* special case */ 913 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { 914 /* The device controller writes to memory */ 915 xfer->frbuffers[0].isread = 1; 916 } else { 917 /* The host controller reads from memory */ 918 xfer->frbuffers[0].isread = 0; 919 } 920 } else { 921 /* default case */ 922 xfer->frbuffers[0].isread = isread; 923 } 924 925 /* 926 * Setup the "page_start" pointer which points to an array of 927 * USB pages where information about the physical address of a 928 * page will be stored. Also initialise the "isread" field of 929 * the USB page caches. 930 */ 931 xfer->frbuffers[0].page_start = pg; 932 933 info->dma_nframes = nframes; 934 info->dma_currframe = 0; 935 info->dma_frlength_0 = frlength_0; 936 937 pg += (frlength_0 / USB_PAGE_SIZE); 938 pg += 2; 939 940 while (--nframes > 0) { 941 xfer->frbuffers[nframes].isread = isread; 942 xfer->frbuffers[nframes].page_start = pg; 943 944 pg += (xfer->frlengths[nframes] / USB_PAGE_SIZE); 945 pg += 2; 946 } 947 948 } 949 if (info->dma_error) { 950 USB_BUS_LOCK(info->bus); 951 usbd_transfer_done(xfer, USB_ERR_DMA_LOAD_FAILED); 952 USB_BUS_UNLOCK(info->bus); 953 return; 954 } 955 if (info->dma_currframe != info->dma_nframes) { 956 957 if (info->dma_currframe == 0) { 958 /* special case */ 959 usb_pc_load_mem(xfer->frbuffers, 960 info->dma_frlength_0, 0); 961 } else { 962 /* default case */ 963 nframes = info->dma_currframe; 964 usb_pc_load_mem(xfer->frbuffers + nframes, 965 xfer->frlengths[nframes], 0); 966 } 967 968 /* advance frame index */ 969 info->dma_currframe++; 970 971 return; 972 } 973 /* go ahead */ 974 usb_bdma_pre_sync(xfer); 975 976 /* start loading next USB transfer, if any */ 977 usb_command_wrapper(pq, NULL); 978 979 /* finally start the hardware */ 980 usbd_pipe_enter(xfer); 981 } 982 983 /*------------------------------------------------------------------------* 984 * usb_bdma_done_event 985 * 986 * This function is called when the BUS-DMA has loaded virtual memory 987 * into DMA, if any. 988 *------------------------------------------------------------------------*/ 989 void 990 usb_bdma_done_event(struct usb_dma_parent_tag *udpt) 991 { 992 struct usb_xfer_root *info; 993 994 info = USB_DMATAG_TO_XROOT(udpt); 995 996 mtx_assert(info->xfer_mtx, MA_OWNED); 997 998 /* copy error */ 999 info->dma_error = udpt->dma_error; 1000 1001 /* enter workloop again */ 1002 usb_command_wrapper(&info->dma_q, 1003 info->dma_q.curr); 1004 } 1005 1006 /*------------------------------------------------------------------------* 1007 * usb_bdma_pre_sync 1008 * 1009 * This function handles DMA synchronisation that must be done before 1010 * an USB transfer is started. 1011 *------------------------------------------------------------------------*/ 1012 void 1013 usb_bdma_pre_sync(struct usb_xfer *xfer) 1014 { 1015 struct usb_page_cache *pc; 1016 usb_frcount_t nframes; 1017 1018 if (xfer->flags_int.isochronous_xfr) { 1019 /* only one frame buffer */ 1020 nframes = 1; 1021 } else { 1022 /* can be multiple frame buffers */ 1023 nframes = xfer->nframes; 1024 } 1025 1026 pc = xfer->frbuffers; 1027 1028 while (nframes--) { 1029 1030 if (pc->isread) { 1031 usb_pc_cpu_invalidate(pc); 1032 } else { 1033 usb_pc_cpu_flush(pc); 1034 } 1035 pc++; 1036 } 1037 } 1038 1039 /*------------------------------------------------------------------------* 1040 * usb_bdma_post_sync 1041 * 1042 * This function handles DMA synchronisation that must be done after 1043 * an USB transfer is complete. 1044 *------------------------------------------------------------------------*/ 1045 void 1046 usb_bdma_post_sync(struct usb_xfer *xfer) 1047 { 1048 struct usb_page_cache *pc; 1049 usb_frcount_t nframes; 1050 1051 if (xfer->flags_int.isochronous_xfr) { 1052 /* only one frame buffer */ 1053 nframes = 1; 1054 } else { 1055 /* can be multiple frame buffers */ 1056 nframes = xfer->nframes; 1057 } 1058 1059 pc = xfer->frbuffers; 1060 1061 while (nframes--) { 1062 if (pc->isread) { 1063 usb_pc_cpu_invalidate(pc); 1064 } 1065 pc++; 1066 } 1067 } 1068 1069 #endif 1070