1 /* $FreeBSD$ */ 2 /*- 3 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/stdint.h> 28 #include <sys/stddef.h> 29 #include <sys/param.h> 30 #include <sys/queue.h> 31 #include <sys/types.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/bus.h> 35 #include <sys/module.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/condvar.h> 39 #include <sys/sysctl.h> 40 #include <sys/sx.h> 41 #include <sys/unistd.h> 42 #include <sys/callout.h> 43 #include <sys/malloc.h> 44 #include <sys/priv.h> 45 46 #include <dev/usb/usb.h> 47 #include <dev/usb/usbdi.h> 48 #include <dev/usb/usbdi_util.h> 49 50 #define USB_DEBUG_VAR usb_debug 51 52 #include <dev/usb/usb_core.h> 53 #include <dev/usb/usb_busdma.h> 54 #include <dev/usb/usb_process.h> 55 #include <dev/usb/usb_transfer.h> 56 #include <dev/usb/usb_device.h> 57 #include <dev/usb/usb_util.h> 58 #include <dev/usb/usb_debug.h> 59 60 #include <dev/usb/usb_controller.h> 61 #include <dev/usb/usb_bus.h> 62 63 #if USB_HAVE_BUSDMA 64 static void usb_dma_tag_create(struct usb_dma_tag *, usb_size_t, usb_size_t); 65 static void usb_dma_tag_destroy(struct usb_dma_tag *); 66 static void usb_dma_lock_cb(void *, bus_dma_lock_op_t); 67 static void usb_pc_alloc_mem_cb(void *, bus_dma_segment_t *, int, int); 68 static void usb_pc_load_mem_cb(void *, bus_dma_segment_t *, int, int); 69 static void usb_pc_common_mem_cb(void *, bus_dma_segment_t *, int, int, 70 uint8_t); 71 #endif 72 73 /*------------------------------------------------------------------------* 74 * usbd_get_page - lookup DMA-able memory for the given offset 75 * 76 * NOTE: Only call this function when the "page_cache" structure has 77 * been properly initialized ! 78 *------------------------------------------------------------------------*/ 79 void 80 usbd_get_page(struct usb_page_cache *pc, usb_frlength_t offset, 81 struct usb_page_search *res) 82 { 83 struct usb_page *page; 84 85 #if USB_HAVE_BUSDMA 86 if (pc->page_start) { 87 88 /* Case 1 - something has been loaded into DMA */ 89 90 if (pc->buffer) { 91 92 /* Case 1a - Kernel Virtual Address */ 93 94 res->buffer = USB_ADD_BYTES(pc->buffer, offset); 95 } 96 offset += pc->page_offset_buf; 97 98 /* compute destination page */ 99 100 page = pc->page_start; 101 102 if (pc->ismultiseg) { 103 104 page += (offset / USB_PAGE_SIZE); 105 106 offset %= USB_PAGE_SIZE; 107 108 res->length = USB_PAGE_SIZE - offset; 109 res->physaddr = page->physaddr + offset; 110 } else { 111 res->length = 0 - 1; 112 res->physaddr = page->physaddr + offset; 113 } 114 if (!pc->buffer) { 115 116 /* Case 1b - Non Kernel Virtual Address */ 117 118 res->buffer = USB_ADD_BYTES(page->buffer, offset); 119 } 120 return; 121 } 122 #endif 123 /* Case 2 - Plain PIO */ 124 125 res->buffer = USB_ADD_BYTES(pc->buffer, offset); 126 res->length = 0 - 1; 127 #if USB_HAVE_BUSDMA 128 res->physaddr = 0; 129 #endif 130 } 131 132 /*------------------------------------------------------------------------* 133 * usbd_copy_in - copy directly to DMA-able memory 134 *------------------------------------------------------------------------*/ 135 void 136 usbd_copy_in(struct usb_page_cache *cache, usb_frlength_t offset, 137 const void *ptr, usb_frlength_t len) 138 { 139 struct usb_page_search buf_res; 140 141 while (len != 0) { 142 143 usbd_get_page(cache, offset, &buf_res); 144 145 if (buf_res.length > len) { 146 buf_res.length = len; 147 } 148 bcopy(ptr, buf_res.buffer, buf_res.length); 149 150 offset += buf_res.length; 151 len -= buf_res.length; 152 ptr = USB_ADD_BYTES(ptr, buf_res.length); 153 } 154 } 155 156 /*------------------------------------------------------------------------* 157 * usbd_copy_in_user - copy directly to DMA-able memory from userland 158 * 159 * Return values: 160 * 0: Success 161 * Else: Failure 162 *------------------------------------------------------------------------*/ 163 #if USB_HAVE_USER_IO 164 int 165 usbd_copy_in_user(struct usb_page_cache *cache, usb_frlength_t offset, 166 const void *ptr, usb_frlength_t len) 167 { 168 struct usb_page_search buf_res; 169 int error; 170 171 while (len != 0) { 172 173 usbd_get_page(cache, offset, &buf_res); 174 175 if (buf_res.length > len) { 176 buf_res.length = len; 177 } 178 error = copyin(ptr, buf_res.buffer, buf_res.length); 179 if (error) 180 return (error); 181 182 offset += buf_res.length; 183 len -= buf_res.length; 184 ptr = USB_ADD_BYTES(ptr, buf_res.length); 185 } 186 return (0); /* success */ 187 } 188 #endif 189 190 /*------------------------------------------------------------------------* 191 * usbd_m_copy_in - copy a mbuf chain directly into DMA-able memory 192 *------------------------------------------------------------------------*/ 193 #if USB_HAVE_MBUF 194 struct usb_m_copy_in_arg { 195 struct usb_page_cache *cache; 196 usb_frlength_t dst_offset; 197 }; 198 199 static int 200 usbd_m_copy_in_cb(void *arg, void *src, uint32_t count) 201 { 202 register struct usb_m_copy_in_arg *ua = arg; 203 204 usbd_copy_in(ua->cache, ua->dst_offset, src, count); 205 ua->dst_offset += count; 206 return (0); 207 } 208 209 void 210 usbd_m_copy_in(struct usb_page_cache *cache, usb_frlength_t dst_offset, 211 struct mbuf *m, usb_size_t src_offset, usb_frlength_t src_len) 212 { 213 struct usb_m_copy_in_arg arg = {cache, dst_offset}; 214 int error; 215 216 error = m_apply(m, src_offset, src_len, &usbd_m_copy_in_cb, &arg); 217 } 218 #endif 219 220 /*------------------------------------------------------------------------* 221 * usb_uiomove - factored out code 222 *------------------------------------------------------------------------*/ 223 #if USB_HAVE_USER_IO 224 int 225 usb_uiomove(struct usb_page_cache *pc, struct uio *uio, 226 usb_frlength_t pc_offset, usb_frlength_t len) 227 { 228 struct usb_page_search res; 229 int error = 0; 230 231 while (len != 0) { 232 233 usbd_get_page(pc, pc_offset, &res); 234 235 if (res.length > len) { 236 res.length = len; 237 } 238 /* 239 * "uiomove()" can sleep so one needs to make a wrapper, 240 * exiting the mutex and checking things 241 */ 242 error = uiomove(res.buffer, res.length, uio); 243 244 if (error) { 245 break; 246 } 247 pc_offset += res.length; 248 len -= res.length; 249 } 250 return (error); 251 } 252 #endif 253 254 /*------------------------------------------------------------------------* 255 * usbd_copy_out - copy directly from DMA-able memory 256 *------------------------------------------------------------------------*/ 257 void 258 usbd_copy_out(struct usb_page_cache *cache, usb_frlength_t offset, 259 void *ptr, usb_frlength_t len) 260 { 261 struct usb_page_search res; 262 263 while (len != 0) { 264 265 usbd_get_page(cache, offset, &res); 266 267 if (res.length > len) { 268 res.length = len; 269 } 270 bcopy(res.buffer, ptr, res.length); 271 272 offset += res.length; 273 len -= res.length; 274 ptr = USB_ADD_BYTES(ptr, res.length); 275 } 276 } 277 278 /*------------------------------------------------------------------------* 279 * usbd_copy_out_user - copy directly from DMA-able memory to userland 280 * 281 * Return values: 282 * 0: Success 283 * Else: Failure 284 *------------------------------------------------------------------------*/ 285 #if USB_HAVE_USER_IO 286 int 287 usbd_copy_out_user(struct usb_page_cache *cache, usb_frlength_t offset, 288 void *ptr, usb_frlength_t len) 289 { 290 struct usb_page_search res; 291 int error; 292 293 while (len != 0) { 294 295 usbd_get_page(cache, offset, &res); 296 297 if (res.length > len) { 298 res.length = len; 299 } 300 error = copyout(res.buffer, ptr, res.length); 301 if (error) 302 return (error); 303 304 offset += res.length; 305 len -= res.length; 306 ptr = USB_ADD_BYTES(ptr, res.length); 307 } 308 return (0); /* success */ 309 } 310 #endif 311 312 /*------------------------------------------------------------------------* 313 * usbd_frame_zero - zero DMA-able memory 314 *------------------------------------------------------------------------*/ 315 void 316 usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset, 317 usb_frlength_t len) 318 { 319 struct usb_page_search res; 320 321 while (len != 0) { 322 323 usbd_get_page(cache, offset, &res); 324 325 if (res.length > len) { 326 res.length = len; 327 } 328 bzero(res.buffer, res.length); 329 330 offset += res.length; 331 len -= res.length; 332 } 333 } 334 335 #if USB_HAVE_BUSDMA 336 337 /*------------------------------------------------------------------------* 338 * usb_dma_lock_cb - dummy callback 339 *------------------------------------------------------------------------*/ 340 static void 341 usb_dma_lock_cb(void *arg, bus_dma_lock_op_t op) 342 { 343 /* we use "mtx_owned()" instead of this function */ 344 } 345 346 /*------------------------------------------------------------------------* 347 * usb_dma_tag_create - allocate a DMA tag 348 * 349 * NOTE: If the "align" parameter has a value of 1 the DMA-tag will 350 * allow multi-segment mappings. Else all mappings are single-segment. 351 *------------------------------------------------------------------------*/ 352 static void 353 usb_dma_tag_create(struct usb_dma_tag *udt, 354 usb_size_t size, usb_size_t align) 355 { 356 bus_dma_tag_t tag; 357 358 if (bus_dma_tag_create 359 ( /* parent */ udt->tag_parent->tag, 360 /* alignment */ align, 361 /* boundary */ (align == 1) ? 362 USB_PAGE_SIZE : 0, 363 /* lowaddr */ (2ULL << (udt->tag_parent->dma_bits - 1)) - 1, 364 /* highaddr */ BUS_SPACE_MAXADDR, 365 /* filter */ NULL, 366 /* filterarg */ NULL, 367 /* maxsize */ size, 368 /* nsegments */ (align == 1 && size > 1) ? 369 (2 + (size / USB_PAGE_SIZE)) : 1, 370 /* maxsegsz */ (align == 1 && size > USB_PAGE_SIZE) ? 371 USB_PAGE_SIZE : size, 372 /* flags */ BUS_DMA_KEEP_PG_OFFSET, 373 /* lockfn */ &usb_dma_lock_cb, 374 /* lockarg */ NULL, 375 &tag)) { 376 tag = NULL; 377 } 378 udt->tag = tag; 379 } 380 381 /*------------------------------------------------------------------------* 382 * usb_dma_tag_free - free a DMA tag 383 *------------------------------------------------------------------------*/ 384 static void 385 usb_dma_tag_destroy(struct usb_dma_tag *udt) 386 { 387 bus_dma_tag_destroy(udt->tag); 388 } 389 390 /*------------------------------------------------------------------------* 391 * usb_pc_alloc_mem_cb - BUS-DMA callback function 392 *------------------------------------------------------------------------*/ 393 static void 394 usb_pc_alloc_mem_cb(void *arg, bus_dma_segment_t *segs, 395 int nseg, int error) 396 { 397 usb_pc_common_mem_cb(arg, segs, nseg, error, 0); 398 } 399 400 /*------------------------------------------------------------------------* 401 * usb_pc_load_mem_cb - BUS-DMA callback function 402 *------------------------------------------------------------------------*/ 403 static void 404 usb_pc_load_mem_cb(void *arg, bus_dma_segment_t *segs, 405 int nseg, int error) 406 { 407 usb_pc_common_mem_cb(arg, segs, nseg, error, 1); 408 } 409 410 /*------------------------------------------------------------------------* 411 * usb_pc_common_mem_cb - BUS-DMA callback function 412 *------------------------------------------------------------------------*/ 413 static void 414 usb_pc_common_mem_cb(void *arg, bus_dma_segment_t *segs, 415 int nseg, int error, uint8_t isload) 416 { 417 struct usb_dma_parent_tag *uptag; 418 struct usb_page_cache *pc; 419 struct usb_page *pg; 420 usb_size_t rem; 421 uint8_t owned; 422 423 pc = arg; 424 uptag = pc->tag_parent; 425 426 /* 427 * XXX There is sometimes recursive locking here. 428 * XXX We should try to find a better solution. 429 * XXX Until further the "owned" variable does 430 * XXX the trick. 431 */ 432 433 if (error) { 434 goto done; 435 } 436 pg = pc->page_start; 437 pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1); 438 rem = segs->ds_addr & (USB_PAGE_SIZE - 1); 439 pc->page_offset_buf = rem; 440 pc->page_offset_end += rem; 441 nseg--; 442 #ifdef USB_DEBUG 443 if (rem != (USB_P2U(pc->buffer) & (USB_PAGE_SIZE - 1))) { 444 /* 445 * This check verifies that the physical address is correct: 446 */ 447 DPRINTFN(0, "Page offset was not preserved\n"); 448 error = 1; 449 goto done; 450 } 451 #endif 452 while (nseg > 0) { 453 nseg--; 454 segs++; 455 pg++; 456 pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1); 457 } 458 459 done: 460 owned = mtx_owned(uptag->mtx); 461 if (!owned) 462 mtx_lock(uptag->mtx); 463 464 uptag->dma_error = (error ? 1 : 0); 465 if (isload) { 466 (uptag->func) (uptag); 467 } else { 468 cv_broadcast(uptag->cv); 469 } 470 if (!owned) 471 mtx_unlock(uptag->mtx); 472 } 473 474 /*------------------------------------------------------------------------* 475 * usb_pc_alloc_mem - allocate DMA'able memory 476 * 477 * Returns: 478 * 0: Success 479 * Else: Failure 480 *------------------------------------------------------------------------*/ 481 uint8_t 482 usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg, 483 usb_size_t size, usb_size_t align) 484 { 485 struct usb_dma_parent_tag *uptag; 486 struct usb_dma_tag *utag; 487 bus_dmamap_t map; 488 void *ptr; 489 int err; 490 491 uptag = pc->tag_parent; 492 493 if (align != 1) { 494 /* 495 * The alignment must be greater or equal to the 496 * "size" else the object can be split between two 497 * memory pages and we get a problem! 498 */ 499 while (align < size) { 500 align *= 2; 501 if (align == 0) { 502 goto error; 503 } 504 } 505 #if 1 506 /* 507 * XXX BUS-DMA workaround - FIXME later: 508 * 509 * We assume that that the aligment at this point of 510 * the code is greater than or equal to the size and 511 * less than two times the size, so that if we double 512 * the size, the size will be greater than the 513 * alignment. 514 * 515 * The bus-dma system has a check for "alignment" 516 * being less than "size". If that check fails we end 517 * up using contigmalloc which is page based even for 518 * small allocations. Try to avoid that to save 519 * memory, hence we sometimes to a large number of 520 * small allocations! 521 */ 522 if (size <= (USB_PAGE_SIZE / 2)) { 523 size *= 2; 524 } 525 #endif 526 } 527 /* get the correct DMA tag */ 528 utag = usb_dma_tag_find(uptag, size, align); 529 if (utag == NULL) { 530 goto error; 531 } 532 /* allocate memory */ 533 if (bus_dmamem_alloc( 534 utag->tag, &ptr, (BUS_DMA_WAITOK | BUS_DMA_COHERENT), &map)) { 535 goto error; 536 } 537 /* setup page cache */ 538 pc->buffer = ptr; 539 pc->page_start = pg; 540 pc->page_offset_buf = 0; 541 pc->page_offset_end = size; 542 pc->map = map; 543 pc->tag = utag->tag; 544 pc->ismultiseg = (align == 1); 545 546 mtx_lock(uptag->mtx); 547 548 /* load memory into DMA */ 549 err = bus_dmamap_load( 550 utag->tag, map, ptr, size, &usb_pc_alloc_mem_cb, 551 pc, (BUS_DMA_WAITOK | BUS_DMA_COHERENT)); 552 553 if (err == EINPROGRESS) { 554 cv_wait(uptag->cv, uptag->mtx); 555 err = 0; 556 } 557 mtx_unlock(uptag->mtx); 558 559 if (err || uptag->dma_error) { 560 bus_dmamem_free(utag->tag, ptr, map); 561 goto error; 562 } 563 bzero(ptr, size); 564 565 usb_pc_cpu_flush(pc); 566 567 return (0); 568 569 error: 570 /* reset most of the page cache */ 571 pc->buffer = NULL; 572 pc->page_start = NULL; 573 pc->page_offset_buf = 0; 574 pc->page_offset_end = 0; 575 pc->map = NULL; 576 pc->tag = NULL; 577 return (1); 578 } 579 580 /*------------------------------------------------------------------------* 581 * usb_pc_free_mem - free DMA memory 582 * 583 * This function is NULL safe. 584 *------------------------------------------------------------------------*/ 585 void 586 usb_pc_free_mem(struct usb_page_cache *pc) 587 { 588 if (pc && pc->buffer) { 589 590 bus_dmamap_unload(pc->tag, pc->map); 591 592 bus_dmamem_free(pc->tag, pc->buffer, pc->map); 593 594 pc->buffer = NULL; 595 } 596 } 597 598 /*------------------------------------------------------------------------* 599 * usb_pc_load_mem - load virtual memory into DMA 600 * 601 * Return values: 602 * 0: Success 603 * Else: Error 604 *------------------------------------------------------------------------*/ 605 uint8_t 606 usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t sync) 607 { 608 /* setup page cache */ 609 pc->page_offset_buf = 0; 610 pc->page_offset_end = size; 611 pc->ismultiseg = 1; 612 613 mtx_assert(pc->tag_parent->mtx, MA_OWNED); 614 615 if (size > 0) { 616 if (sync) { 617 struct usb_dma_parent_tag *uptag; 618 int err; 619 620 uptag = pc->tag_parent; 621 622 /* 623 * We have to unload the previous loaded DMA 624 * pages before trying to load a new one! 625 */ 626 bus_dmamap_unload(pc->tag, pc->map); 627 628 /* 629 * Try to load memory into DMA. 630 */ 631 err = bus_dmamap_load( 632 pc->tag, pc->map, pc->buffer, size, 633 &usb_pc_alloc_mem_cb, pc, BUS_DMA_WAITOK); 634 if (err == EINPROGRESS) { 635 cv_wait(uptag->cv, uptag->mtx); 636 err = 0; 637 } 638 if (err || uptag->dma_error) { 639 return (1); 640 } 641 } else { 642 643 /* 644 * We have to unload the previous loaded DMA 645 * pages before trying to load a new one! 646 */ 647 bus_dmamap_unload(pc->tag, pc->map); 648 649 /* 650 * Try to load memory into DMA. The callback 651 * will be called in all cases: 652 */ 653 if (bus_dmamap_load( 654 pc->tag, pc->map, pc->buffer, size, 655 &usb_pc_load_mem_cb, pc, BUS_DMA_WAITOK)) { 656 } 657 } 658 } else { 659 if (!sync) { 660 /* 661 * Call callback so that refcount is decremented 662 * properly: 663 */ 664 pc->tag_parent->dma_error = 0; 665 (pc->tag_parent->func) (pc->tag_parent); 666 } 667 } 668 return (0); 669 } 670 671 /*------------------------------------------------------------------------* 672 * usb_pc_cpu_invalidate - invalidate CPU cache 673 *------------------------------------------------------------------------*/ 674 void 675 usb_pc_cpu_invalidate(struct usb_page_cache *pc) 676 { 677 if (pc->page_offset_end == pc->page_offset_buf) { 678 /* nothing has been loaded into this page cache! */ 679 return; 680 } 681 682 /* 683 * TODO: We currently do XXX_POSTREAD and XXX_PREREAD at the 684 * same time, but in the future we should try to isolate the 685 * different cases to optimise the code. --HPS 686 */ 687 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_POSTREAD); 688 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREREAD); 689 } 690 691 /*------------------------------------------------------------------------* 692 * usb_pc_cpu_flush - flush CPU cache 693 *------------------------------------------------------------------------*/ 694 void 695 usb_pc_cpu_flush(struct usb_page_cache *pc) 696 { 697 if (pc->page_offset_end == pc->page_offset_buf) { 698 /* nothing has been loaded into this page cache! */ 699 return; 700 } 701 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREWRITE); 702 } 703 704 /*------------------------------------------------------------------------* 705 * usb_pc_dmamap_create - create a DMA map 706 * 707 * Returns: 708 * 0: Success 709 * Else: Failure 710 *------------------------------------------------------------------------*/ 711 uint8_t 712 usb_pc_dmamap_create(struct usb_page_cache *pc, usb_size_t size) 713 { 714 struct usb_xfer_root *info; 715 struct usb_dma_tag *utag; 716 717 /* get info */ 718 info = USB_DMATAG_TO_XROOT(pc->tag_parent); 719 720 /* sanity check */ 721 if (info == NULL) { 722 goto error; 723 } 724 utag = usb_dma_tag_find(pc->tag_parent, size, 1); 725 if (utag == NULL) { 726 goto error; 727 } 728 /* create DMA map */ 729 if (bus_dmamap_create(utag->tag, 0, &pc->map)) { 730 goto error; 731 } 732 pc->tag = utag->tag; 733 return 0; /* success */ 734 735 error: 736 pc->map = NULL; 737 pc->tag = NULL; 738 return 1; /* failure */ 739 } 740 741 /*------------------------------------------------------------------------* 742 * usb_pc_dmamap_destroy 743 * 744 * This function is NULL safe. 745 *------------------------------------------------------------------------*/ 746 void 747 usb_pc_dmamap_destroy(struct usb_page_cache *pc) 748 { 749 if (pc && pc->tag) { 750 bus_dmamap_destroy(pc->tag, pc->map); 751 pc->tag = NULL; 752 pc->map = NULL; 753 } 754 } 755 756 /*------------------------------------------------------------------------* 757 * usb_dma_tag_find - factored out code 758 *------------------------------------------------------------------------*/ 759 struct usb_dma_tag * 760 usb_dma_tag_find(struct usb_dma_parent_tag *udpt, 761 usb_size_t size, usb_size_t align) 762 { 763 struct usb_dma_tag *udt; 764 uint8_t nudt; 765 766 USB_ASSERT(align > 0, ("Invalid parameter align = 0\n")); 767 USB_ASSERT(size > 0, ("Invalid parameter size = 0\n")); 768 769 udt = udpt->utag_first; 770 nudt = udpt->utag_max; 771 772 while (nudt--) { 773 774 if (udt->align == 0) { 775 usb_dma_tag_create(udt, size, align); 776 if (udt->tag == NULL) { 777 return (NULL); 778 } 779 udt->align = align; 780 udt->size = size; 781 return (udt); 782 } 783 if ((udt->align == align) && (udt->size == size)) { 784 return (udt); 785 } 786 udt++; 787 } 788 return (NULL); 789 } 790 791 /*------------------------------------------------------------------------* 792 * usb_dma_tag_setup - initialise USB DMA tags 793 *------------------------------------------------------------------------*/ 794 void 795 usb_dma_tag_setup(struct usb_dma_parent_tag *udpt, 796 struct usb_dma_tag *udt, bus_dma_tag_t dmat, 797 struct mtx *mtx, usb_dma_callback_t *func, 798 uint8_t ndmabits, uint8_t nudt) 799 { 800 bzero(udpt, sizeof(*udpt)); 801 802 /* sanity checking */ 803 if ((nudt == 0) || 804 (ndmabits == 0) || 805 (mtx == NULL)) { 806 /* something is corrupt */ 807 return; 808 } 809 /* initialise condition variable */ 810 cv_init(udpt->cv, "USB DMA CV"); 811 812 /* store some information */ 813 udpt->mtx = mtx; 814 udpt->func = func; 815 udpt->tag = dmat; 816 udpt->utag_first = udt; 817 udpt->utag_max = nudt; 818 udpt->dma_bits = ndmabits; 819 820 while (nudt--) { 821 bzero(udt, sizeof(*udt)); 822 udt->tag_parent = udpt; 823 udt++; 824 } 825 } 826 827 /*------------------------------------------------------------------------* 828 * usb_bus_tag_unsetup - factored out code 829 *------------------------------------------------------------------------*/ 830 void 831 usb_dma_tag_unsetup(struct usb_dma_parent_tag *udpt) 832 { 833 struct usb_dma_tag *udt; 834 uint8_t nudt; 835 836 udt = udpt->utag_first; 837 nudt = udpt->utag_max; 838 839 while (nudt--) { 840 841 if (udt->align) { 842 /* destroy the USB DMA tag */ 843 usb_dma_tag_destroy(udt); 844 udt->align = 0; 845 } 846 udt++; 847 } 848 849 if (udpt->utag_max) { 850 /* destroy the condition variable */ 851 cv_destroy(udpt->cv); 852 } 853 } 854 855 /*------------------------------------------------------------------------* 856 * usb_bdma_work_loop 857 * 858 * This function handles loading of virtual buffers into DMA and is 859 * only called when "dma_refcount" is zero. 860 *------------------------------------------------------------------------*/ 861 void 862 usb_bdma_work_loop(struct usb_xfer_queue *pq) 863 { 864 struct usb_xfer_root *info; 865 struct usb_xfer *xfer; 866 usb_frcount_t nframes; 867 868 xfer = pq->curr; 869 info = xfer->xroot; 870 871 mtx_assert(info->xfer_mtx, MA_OWNED); 872 873 if (xfer->error) { 874 /* some error happened */ 875 USB_BUS_LOCK(info->bus); 876 usbd_transfer_done(xfer, 0); 877 USB_BUS_UNLOCK(info->bus); 878 return; 879 } 880 if (!xfer->flags_int.bdma_setup) { 881 struct usb_page *pg; 882 usb_frlength_t frlength_0; 883 uint8_t isread; 884 885 xfer->flags_int.bdma_setup = 1; 886 887 /* reset BUS-DMA load state */ 888 889 info->dma_error = 0; 890 891 if (xfer->flags_int.isochronous_xfr) { 892 /* only one frame buffer */ 893 nframes = 1; 894 frlength_0 = xfer->sumlen; 895 } else { 896 /* can be multiple frame buffers */ 897 nframes = xfer->nframes; 898 frlength_0 = xfer->frlengths[0]; 899 } 900 901 /* 902 * Set DMA direction first. This is needed to 903 * select the correct cache invalidate and cache 904 * flush operations. 905 */ 906 isread = USB_GET_DATA_ISREAD(xfer); 907 pg = xfer->dma_page_ptr; 908 909 if (xfer->flags_int.control_xfr && 910 xfer->flags_int.control_hdr) { 911 /* special case */ 912 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { 913 /* The device controller writes to memory */ 914 xfer->frbuffers[0].isread = 1; 915 } else { 916 /* The host controller reads from memory */ 917 xfer->frbuffers[0].isread = 0; 918 } 919 } else { 920 /* default case */ 921 xfer->frbuffers[0].isread = isread; 922 } 923 924 /* 925 * Setup the "page_start" pointer which points to an array of 926 * USB pages where information about the physical address of a 927 * page will be stored. Also initialise the "isread" field of 928 * the USB page caches. 929 */ 930 xfer->frbuffers[0].page_start = pg; 931 932 info->dma_nframes = nframes; 933 info->dma_currframe = 0; 934 info->dma_frlength_0 = frlength_0; 935 936 pg += (frlength_0 / USB_PAGE_SIZE); 937 pg += 2; 938 939 while (--nframes > 0) { 940 xfer->frbuffers[nframes].isread = isread; 941 xfer->frbuffers[nframes].page_start = pg; 942 943 pg += (xfer->frlengths[nframes] / USB_PAGE_SIZE); 944 pg += 2; 945 } 946 947 } 948 if (info->dma_error) { 949 USB_BUS_LOCK(info->bus); 950 usbd_transfer_done(xfer, USB_ERR_DMA_LOAD_FAILED); 951 USB_BUS_UNLOCK(info->bus); 952 return; 953 } 954 if (info->dma_currframe != info->dma_nframes) { 955 956 if (info->dma_currframe == 0) { 957 /* special case */ 958 usb_pc_load_mem(xfer->frbuffers, 959 info->dma_frlength_0, 0); 960 } else { 961 /* default case */ 962 nframes = info->dma_currframe; 963 usb_pc_load_mem(xfer->frbuffers + nframes, 964 xfer->frlengths[nframes], 0); 965 } 966 967 /* advance frame index */ 968 info->dma_currframe++; 969 970 return; 971 } 972 /* go ahead */ 973 usb_bdma_pre_sync(xfer); 974 975 /* start loading next USB transfer, if any */ 976 usb_command_wrapper(pq, NULL); 977 978 /* finally start the hardware */ 979 usbd_pipe_enter(xfer); 980 } 981 982 /*------------------------------------------------------------------------* 983 * usb_bdma_done_event 984 * 985 * This function is called when the BUS-DMA has loaded virtual memory 986 * into DMA, if any. 987 *------------------------------------------------------------------------*/ 988 void 989 usb_bdma_done_event(struct usb_dma_parent_tag *udpt) 990 { 991 struct usb_xfer_root *info; 992 993 info = USB_DMATAG_TO_XROOT(udpt); 994 995 mtx_assert(info->xfer_mtx, MA_OWNED); 996 997 /* copy error */ 998 info->dma_error = udpt->dma_error; 999 1000 /* enter workloop again */ 1001 usb_command_wrapper(&info->dma_q, 1002 info->dma_q.curr); 1003 } 1004 1005 /*------------------------------------------------------------------------* 1006 * usb_bdma_pre_sync 1007 * 1008 * This function handles DMA synchronisation that must be done before 1009 * an USB transfer is started. 1010 *------------------------------------------------------------------------*/ 1011 void 1012 usb_bdma_pre_sync(struct usb_xfer *xfer) 1013 { 1014 struct usb_page_cache *pc; 1015 usb_frcount_t nframes; 1016 1017 if (xfer->flags_int.isochronous_xfr) { 1018 /* only one frame buffer */ 1019 nframes = 1; 1020 } else { 1021 /* can be multiple frame buffers */ 1022 nframes = xfer->nframes; 1023 } 1024 1025 pc = xfer->frbuffers; 1026 1027 while (nframes--) { 1028 1029 if (pc->isread) { 1030 usb_pc_cpu_invalidate(pc); 1031 } else { 1032 usb_pc_cpu_flush(pc); 1033 } 1034 pc++; 1035 } 1036 } 1037 1038 /*------------------------------------------------------------------------* 1039 * usb_bdma_post_sync 1040 * 1041 * This function handles DMA synchronisation that must be done after 1042 * an USB transfer is complete. 1043 *------------------------------------------------------------------------*/ 1044 void 1045 usb_bdma_post_sync(struct usb_xfer *xfer) 1046 { 1047 struct usb_page_cache *pc; 1048 usb_frcount_t nframes; 1049 1050 if (xfer->flags_int.isochronous_xfr) { 1051 /* only one frame buffer */ 1052 nframes = 1; 1053 } else { 1054 /* can be multiple frame buffers */ 1055 nframes = xfer->nframes; 1056 } 1057 1058 pc = xfer->frbuffers; 1059 1060 while (nframes--) { 1061 if (pc->isread) { 1062 usb_pc_cpu_invalidate(pc); 1063 } 1064 pc++; 1065 } 1066 } 1067 1068 #endif 1069