1 /* $FreeBSD$ */ 2 /*- 3 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/stdint.h> 28 #include <sys/stddef.h> 29 #include <sys/param.h> 30 #include <sys/queue.h> 31 #include <sys/types.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/bus.h> 35 #include <sys/module.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/condvar.h> 39 #include <sys/sysctl.h> 40 #include <sys/sx.h> 41 #include <sys/unistd.h> 42 #include <sys/callout.h> 43 #include <sys/malloc.h> 44 #include <sys/priv.h> 45 46 #include <dev/usb/usb.h> 47 #include <dev/usb/usbdi.h> 48 #include <dev/usb/usbdi_util.h> 49 50 #define USB_DEBUG_VAR usb_debug 51 52 #include <dev/usb/usb_core.h> 53 #include <dev/usb/usb_busdma.h> 54 #include <dev/usb/usb_process.h> 55 #include <dev/usb/usb_transfer.h> 56 #include <dev/usb/usb_device.h> 57 #include <dev/usb/usb_util.h> 58 #include <dev/usb/usb_debug.h> 59 60 #include <dev/usb/usb_controller.h> 61 #include <dev/usb/usb_bus.h> 62 63 #if USB_HAVE_BUSDMA 64 static void usb_dma_tag_create(struct usb_dma_tag *, usb_size_t, usb_size_t); 65 static void usb_dma_tag_destroy(struct usb_dma_tag *); 66 static void usb_dma_lock_cb(void *, bus_dma_lock_op_t); 67 static void usb_pc_alloc_mem_cb(void *, bus_dma_segment_t *, int, int); 68 static void usb_pc_load_mem_cb(void *, bus_dma_segment_t *, int, int); 69 static void usb_pc_common_mem_cb(void *, bus_dma_segment_t *, int, int, 70 uint8_t); 71 #endif 72 73 /*------------------------------------------------------------------------* 74 * usbd_get_page - lookup DMA-able memory for the given offset 75 * 76 * NOTE: Only call this function when the "page_cache" structure has 77 * been properly initialized ! 78 *------------------------------------------------------------------------*/ 79 void 80 usbd_get_page(struct usb_page_cache *pc, usb_frlength_t offset, 81 struct usb_page_search *res) 82 { 83 #if USB_HAVE_BUSDMA 84 struct usb_page *page; 85 86 if (pc->page_start) { 87 88 /* Case 1 - something has been loaded into DMA */ 89 90 if (pc->buffer) { 91 92 /* Case 1a - Kernel Virtual Address */ 93 94 res->buffer = USB_ADD_BYTES(pc->buffer, offset); 95 } 96 offset += pc->page_offset_buf; 97 98 /* compute destination page */ 99 100 page = pc->page_start; 101 102 if (pc->ismultiseg) { 103 104 page += (offset / USB_PAGE_SIZE); 105 106 offset %= USB_PAGE_SIZE; 107 108 res->length = USB_PAGE_SIZE - offset; 109 res->physaddr = page->physaddr + offset; 110 } else { 111 res->length = (usb_size_t)-1; 112 res->physaddr = page->physaddr + offset; 113 } 114 if (!pc->buffer) { 115 116 /* Case 1b - Non Kernel Virtual Address */ 117 118 res->buffer = USB_ADD_BYTES(page->buffer, offset); 119 } 120 return; 121 } 122 #endif 123 /* Case 2 - Plain PIO */ 124 125 res->buffer = USB_ADD_BYTES(pc->buffer, offset); 126 res->length = (usb_size_t)-1; 127 #if USB_HAVE_BUSDMA 128 res->physaddr = 0; 129 #endif 130 } 131 132 /*------------------------------------------------------------------------* 133 * usbd_copy_in - copy directly to DMA-able memory 134 *------------------------------------------------------------------------*/ 135 void 136 usbd_copy_in(struct usb_page_cache *cache, usb_frlength_t offset, 137 const void *ptr, usb_frlength_t len) 138 { 139 struct usb_page_search buf_res; 140 141 while (len != 0) { 142 143 usbd_get_page(cache, offset, &buf_res); 144 145 if (buf_res.length > len) { 146 buf_res.length = len; 147 } 148 memcpy(buf_res.buffer, ptr, buf_res.length); 149 150 offset += buf_res.length; 151 len -= buf_res.length; 152 ptr = USB_ADD_BYTES(ptr, buf_res.length); 153 } 154 } 155 156 /*------------------------------------------------------------------------* 157 * usbd_copy_in_user - copy directly to DMA-able memory from userland 158 * 159 * Return values: 160 * 0: Success 161 * Else: Failure 162 *------------------------------------------------------------------------*/ 163 #if USB_HAVE_USER_IO 164 int 165 usbd_copy_in_user(struct usb_page_cache *cache, usb_frlength_t offset, 166 const void *ptr, usb_frlength_t len) 167 { 168 struct usb_page_search buf_res; 169 int error; 170 171 while (len != 0) { 172 173 usbd_get_page(cache, offset, &buf_res); 174 175 if (buf_res.length > len) { 176 buf_res.length = len; 177 } 178 error = copyin(ptr, buf_res.buffer, buf_res.length); 179 if (error) 180 return (error); 181 182 offset += buf_res.length; 183 len -= buf_res.length; 184 ptr = USB_ADD_BYTES(ptr, buf_res.length); 185 } 186 return (0); /* success */ 187 } 188 #endif 189 190 /*------------------------------------------------------------------------* 191 * usbd_m_copy_in - copy a mbuf chain directly into DMA-able memory 192 *------------------------------------------------------------------------*/ 193 #if USB_HAVE_MBUF 194 struct usb_m_copy_in_arg { 195 struct usb_page_cache *cache; 196 usb_frlength_t dst_offset; 197 }; 198 199 static int 200 usbd_m_copy_in_cb(void *arg, void *src, uint32_t count) 201 { 202 register struct usb_m_copy_in_arg *ua = arg; 203 204 usbd_copy_in(ua->cache, ua->dst_offset, src, count); 205 ua->dst_offset += count; 206 return (0); 207 } 208 209 void 210 usbd_m_copy_in(struct usb_page_cache *cache, usb_frlength_t dst_offset, 211 struct mbuf *m, usb_size_t src_offset, usb_frlength_t src_len) 212 { 213 struct usb_m_copy_in_arg arg = {cache, dst_offset}; 214 int error; 215 216 error = m_apply(m, src_offset, src_len, &usbd_m_copy_in_cb, &arg); 217 } 218 #endif 219 220 /*------------------------------------------------------------------------* 221 * usb_uiomove - factored out code 222 *------------------------------------------------------------------------*/ 223 #if USB_HAVE_USER_IO 224 int 225 usb_uiomove(struct usb_page_cache *pc, struct uio *uio, 226 usb_frlength_t pc_offset, usb_frlength_t len) 227 { 228 struct usb_page_search res; 229 int error = 0; 230 231 while (len != 0) { 232 233 usbd_get_page(pc, pc_offset, &res); 234 235 if (res.length > len) { 236 res.length = len; 237 } 238 /* 239 * "uiomove()" can sleep so one needs to make a wrapper, 240 * exiting the mutex and checking things 241 */ 242 error = uiomove(res.buffer, res.length, uio); 243 244 if (error) { 245 break; 246 } 247 pc_offset += res.length; 248 len -= res.length; 249 } 250 return (error); 251 } 252 #endif 253 254 /*------------------------------------------------------------------------* 255 * usbd_copy_out - copy directly from DMA-able memory 256 *------------------------------------------------------------------------*/ 257 void 258 usbd_copy_out(struct usb_page_cache *cache, usb_frlength_t offset, 259 void *ptr, usb_frlength_t len) 260 { 261 struct usb_page_search res; 262 263 while (len != 0) { 264 265 usbd_get_page(cache, offset, &res); 266 267 if (res.length > len) { 268 res.length = len; 269 } 270 memcpy(ptr, res.buffer, res.length); 271 272 offset += res.length; 273 len -= res.length; 274 ptr = USB_ADD_BYTES(ptr, res.length); 275 } 276 } 277 278 /*------------------------------------------------------------------------* 279 * usbd_copy_out_user - copy directly from DMA-able memory to userland 280 * 281 * Return values: 282 * 0: Success 283 * Else: Failure 284 *------------------------------------------------------------------------*/ 285 #if USB_HAVE_USER_IO 286 int 287 usbd_copy_out_user(struct usb_page_cache *cache, usb_frlength_t offset, 288 void *ptr, usb_frlength_t len) 289 { 290 struct usb_page_search res; 291 int error; 292 293 while (len != 0) { 294 295 usbd_get_page(cache, offset, &res); 296 297 if (res.length > len) { 298 res.length = len; 299 } 300 error = copyout(res.buffer, ptr, res.length); 301 if (error) 302 return (error); 303 304 offset += res.length; 305 len -= res.length; 306 ptr = USB_ADD_BYTES(ptr, res.length); 307 } 308 return (0); /* success */ 309 } 310 #endif 311 312 /*------------------------------------------------------------------------* 313 * usbd_frame_zero - zero DMA-able memory 314 *------------------------------------------------------------------------*/ 315 void 316 usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset, 317 usb_frlength_t len) 318 { 319 struct usb_page_search res; 320 321 while (len != 0) { 322 323 usbd_get_page(cache, offset, &res); 324 325 if (res.length > len) { 326 res.length = len; 327 } 328 memset(res.buffer, 0, res.length); 329 330 offset += res.length; 331 len -= res.length; 332 } 333 } 334 335 #if USB_HAVE_BUSDMA 336 337 /*------------------------------------------------------------------------* 338 * usb_dma_lock_cb - dummy callback 339 *------------------------------------------------------------------------*/ 340 static void 341 usb_dma_lock_cb(void *arg, bus_dma_lock_op_t op) 342 { 343 /* we use "mtx_owned()" instead of this function */ 344 } 345 346 /*------------------------------------------------------------------------* 347 * usb_dma_tag_create - allocate a DMA tag 348 * 349 * NOTE: If the "align" parameter has a value of 1 the DMA-tag will 350 * allow multi-segment mappings. Else all mappings are single-segment. 351 *------------------------------------------------------------------------*/ 352 static void 353 usb_dma_tag_create(struct usb_dma_tag *udt, 354 usb_size_t size, usb_size_t align) 355 { 356 bus_dma_tag_t tag; 357 358 if (bus_dma_tag_create 359 ( /* parent */ udt->tag_parent->tag, 360 /* alignment */ align, 361 /* boundary */ 0, 362 /* lowaddr */ (2ULL << (udt->tag_parent->dma_bits - 1)) - 1, 363 /* highaddr */ BUS_SPACE_MAXADDR, 364 /* filter */ NULL, 365 /* filterarg */ NULL, 366 /* maxsize */ size, 367 /* nsegments */ (align == 1 && size > 1) ? 368 (2 + (size / USB_PAGE_SIZE)) : 1, 369 /* maxsegsz */ (align == 1 && size > USB_PAGE_SIZE) ? 370 USB_PAGE_SIZE : size, 371 /* flags */ BUS_DMA_KEEP_PG_OFFSET, 372 /* lockfn */ &usb_dma_lock_cb, 373 /* lockarg */ NULL, 374 &tag)) { 375 tag = NULL; 376 } 377 udt->tag = tag; 378 } 379 380 /*------------------------------------------------------------------------* 381 * usb_dma_tag_free - free a DMA tag 382 *------------------------------------------------------------------------*/ 383 static void 384 usb_dma_tag_destroy(struct usb_dma_tag *udt) 385 { 386 bus_dma_tag_destroy(udt->tag); 387 } 388 389 /*------------------------------------------------------------------------* 390 * usb_pc_alloc_mem_cb - BUS-DMA callback function 391 *------------------------------------------------------------------------*/ 392 static void 393 usb_pc_alloc_mem_cb(void *arg, bus_dma_segment_t *segs, 394 int nseg, int error) 395 { 396 usb_pc_common_mem_cb(arg, segs, nseg, error, 0); 397 } 398 399 /*------------------------------------------------------------------------* 400 * usb_pc_load_mem_cb - BUS-DMA callback function 401 *------------------------------------------------------------------------*/ 402 static void 403 usb_pc_load_mem_cb(void *arg, bus_dma_segment_t *segs, 404 int nseg, int error) 405 { 406 usb_pc_common_mem_cb(arg, segs, nseg, error, 1); 407 } 408 409 /*------------------------------------------------------------------------* 410 * usb_pc_common_mem_cb - BUS-DMA callback function 411 *------------------------------------------------------------------------*/ 412 static void 413 usb_pc_common_mem_cb(void *arg, bus_dma_segment_t *segs, 414 int nseg, int error, uint8_t isload) 415 { 416 struct usb_dma_parent_tag *uptag; 417 struct usb_page_cache *pc; 418 struct usb_page *pg; 419 usb_size_t rem; 420 bus_size_t off; 421 uint8_t owned; 422 423 pc = arg; 424 uptag = pc->tag_parent; 425 426 /* 427 * XXX There is sometimes recursive locking here. 428 * XXX We should try to find a better solution. 429 * XXX Until further the "owned" variable does 430 * XXX the trick. 431 */ 432 433 if (error) { 434 goto done; 435 } 436 437 off = 0; 438 pg = pc->page_start; 439 pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1); 440 rem = segs->ds_addr & (USB_PAGE_SIZE - 1); 441 pc->page_offset_buf = rem; 442 pc->page_offset_end += rem; 443 nseg--; 444 #ifdef USB_DEBUG 445 if (rem != (USB_P2U(pc->buffer) & (USB_PAGE_SIZE - 1))) { 446 /* 447 * This check verifies that the physical address is correct: 448 */ 449 DPRINTFN(0, "Page offset was not preserved\n"); 450 error = 1; 451 goto done; 452 } 453 #endif 454 while (nseg > 0) { 455 off += USB_PAGE_SIZE; 456 if (off >= (segs->ds_len + rem)) { 457 /* page crossing */ 458 nseg--; 459 segs++; 460 off = 0; 461 rem = 0; 462 } 463 pg++; 464 pg->physaddr = (segs->ds_addr + off) & ~(USB_PAGE_SIZE - 1); 465 } 466 467 done: 468 owned = mtx_owned(uptag->mtx); 469 if (!owned) 470 mtx_lock(uptag->mtx); 471 472 uptag->dma_error = (error ? 1 : 0); 473 if (isload) { 474 (uptag->func) (uptag); 475 } else { 476 cv_broadcast(uptag->cv); 477 } 478 if (!owned) 479 mtx_unlock(uptag->mtx); 480 } 481 482 /*------------------------------------------------------------------------* 483 * usb_pc_alloc_mem - allocate DMA'able memory 484 * 485 * Returns: 486 * 0: Success 487 * Else: Failure 488 *------------------------------------------------------------------------*/ 489 uint8_t 490 usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg, 491 usb_size_t size, usb_size_t align) 492 { 493 struct usb_dma_parent_tag *uptag; 494 struct usb_dma_tag *utag; 495 bus_dmamap_t map; 496 void *ptr; 497 int err; 498 499 uptag = pc->tag_parent; 500 501 if (align != 1) { 502 /* 503 * The alignment must be greater or equal to the 504 * "size" else the object can be split between two 505 * memory pages and we get a problem! 506 */ 507 while (align < size) { 508 align *= 2; 509 if (align == 0) { 510 goto error; 511 } 512 } 513 #if 1 514 /* 515 * XXX BUS-DMA workaround - FIXME later: 516 * 517 * We assume that that the aligment at this point of 518 * the code is greater than or equal to the size and 519 * less than two times the size, so that if we double 520 * the size, the size will be greater than the 521 * alignment. 522 * 523 * The bus-dma system has a check for "alignment" 524 * being less than "size". If that check fails we end 525 * up using contigmalloc which is page based even for 526 * small allocations. Try to avoid that to save 527 * memory, hence we sometimes to a large number of 528 * small allocations! 529 */ 530 if (size <= (USB_PAGE_SIZE / 2)) { 531 size *= 2; 532 } 533 #endif 534 } 535 /* get the correct DMA tag */ 536 utag = usb_dma_tag_find(uptag, size, align); 537 if (utag == NULL) { 538 goto error; 539 } 540 /* allocate memory */ 541 if (bus_dmamem_alloc( 542 utag->tag, &ptr, (BUS_DMA_WAITOK | BUS_DMA_COHERENT), &map)) { 543 goto error; 544 } 545 /* setup page cache */ 546 pc->buffer = ptr; 547 pc->page_start = pg; 548 pc->page_offset_buf = 0; 549 pc->page_offset_end = size; 550 pc->map = map; 551 pc->tag = utag->tag; 552 pc->ismultiseg = (align == 1); 553 554 mtx_lock(uptag->mtx); 555 556 /* load memory into DMA */ 557 err = bus_dmamap_load( 558 utag->tag, map, ptr, size, &usb_pc_alloc_mem_cb, 559 pc, (BUS_DMA_WAITOK | BUS_DMA_COHERENT)); 560 561 if (err == EINPROGRESS) { 562 cv_wait(uptag->cv, uptag->mtx); 563 err = 0; 564 } 565 mtx_unlock(uptag->mtx); 566 567 if (err || uptag->dma_error) { 568 bus_dmamem_free(utag->tag, ptr, map); 569 goto error; 570 } 571 memset(ptr, 0, size); 572 573 usb_pc_cpu_flush(pc); 574 575 return (0); 576 577 error: 578 /* reset most of the page cache */ 579 pc->buffer = NULL; 580 pc->page_start = NULL; 581 pc->page_offset_buf = 0; 582 pc->page_offset_end = 0; 583 pc->map = NULL; 584 pc->tag = NULL; 585 return (1); 586 } 587 588 /*------------------------------------------------------------------------* 589 * usb_pc_free_mem - free DMA memory 590 * 591 * This function is NULL safe. 592 *------------------------------------------------------------------------*/ 593 void 594 usb_pc_free_mem(struct usb_page_cache *pc) 595 { 596 if (pc && pc->buffer) { 597 598 bus_dmamap_unload(pc->tag, pc->map); 599 600 bus_dmamem_free(pc->tag, pc->buffer, pc->map); 601 602 pc->buffer = NULL; 603 } 604 } 605 606 /*------------------------------------------------------------------------* 607 * usb_pc_load_mem - load virtual memory into DMA 608 * 609 * Return values: 610 * 0: Success 611 * Else: Error 612 *------------------------------------------------------------------------*/ 613 uint8_t 614 usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t sync) 615 { 616 /* setup page cache */ 617 pc->page_offset_buf = 0; 618 pc->page_offset_end = size; 619 pc->ismultiseg = 1; 620 621 mtx_assert(pc->tag_parent->mtx, MA_OWNED); 622 623 if (size > 0) { 624 if (sync) { 625 struct usb_dma_parent_tag *uptag; 626 int err; 627 628 uptag = pc->tag_parent; 629 630 /* 631 * We have to unload the previous loaded DMA 632 * pages before trying to load a new one! 633 */ 634 bus_dmamap_unload(pc->tag, pc->map); 635 636 /* 637 * Try to load memory into DMA. 638 */ 639 err = bus_dmamap_load( 640 pc->tag, pc->map, pc->buffer, size, 641 &usb_pc_alloc_mem_cb, pc, BUS_DMA_WAITOK); 642 if (err == EINPROGRESS) { 643 cv_wait(uptag->cv, uptag->mtx); 644 err = 0; 645 } 646 if (err || uptag->dma_error) { 647 return (1); 648 } 649 } else { 650 651 /* 652 * We have to unload the previous loaded DMA 653 * pages before trying to load a new one! 654 */ 655 bus_dmamap_unload(pc->tag, pc->map); 656 657 /* 658 * Try to load memory into DMA. The callback 659 * will be called in all cases: 660 */ 661 if (bus_dmamap_load( 662 pc->tag, pc->map, pc->buffer, size, 663 &usb_pc_load_mem_cb, pc, BUS_DMA_WAITOK)) { 664 } 665 } 666 } else { 667 if (!sync) { 668 /* 669 * Call callback so that refcount is decremented 670 * properly: 671 */ 672 pc->tag_parent->dma_error = 0; 673 (pc->tag_parent->func) (pc->tag_parent); 674 } 675 } 676 return (0); 677 } 678 679 /*------------------------------------------------------------------------* 680 * usb_pc_cpu_invalidate - invalidate CPU cache 681 *------------------------------------------------------------------------*/ 682 void 683 usb_pc_cpu_invalidate(struct usb_page_cache *pc) 684 { 685 if (pc->page_offset_end == pc->page_offset_buf) { 686 /* nothing has been loaded into this page cache! */ 687 return; 688 } 689 690 /* 691 * TODO: We currently do XXX_POSTREAD and XXX_PREREAD at the 692 * same time, but in the future we should try to isolate the 693 * different cases to optimise the code. --HPS 694 */ 695 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_POSTREAD); 696 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREREAD); 697 } 698 699 /*------------------------------------------------------------------------* 700 * usb_pc_cpu_flush - flush CPU cache 701 *------------------------------------------------------------------------*/ 702 void 703 usb_pc_cpu_flush(struct usb_page_cache *pc) 704 { 705 if (pc->page_offset_end == pc->page_offset_buf) { 706 /* nothing has been loaded into this page cache! */ 707 return; 708 } 709 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREWRITE); 710 } 711 712 /*------------------------------------------------------------------------* 713 * usb_pc_dmamap_create - create a DMA map 714 * 715 * Returns: 716 * 0: Success 717 * Else: Failure 718 *------------------------------------------------------------------------*/ 719 uint8_t 720 usb_pc_dmamap_create(struct usb_page_cache *pc, usb_size_t size) 721 { 722 struct usb_xfer_root *info; 723 struct usb_dma_tag *utag; 724 725 /* get info */ 726 info = USB_DMATAG_TO_XROOT(pc->tag_parent); 727 728 /* sanity check */ 729 if (info == NULL) { 730 goto error; 731 } 732 utag = usb_dma_tag_find(pc->tag_parent, size, 1); 733 if (utag == NULL) { 734 goto error; 735 } 736 /* create DMA map */ 737 if (bus_dmamap_create(utag->tag, 0, &pc->map)) { 738 goto error; 739 } 740 pc->tag = utag->tag; 741 return 0; /* success */ 742 743 error: 744 pc->map = NULL; 745 pc->tag = NULL; 746 return 1; /* failure */ 747 } 748 749 /*------------------------------------------------------------------------* 750 * usb_pc_dmamap_destroy 751 * 752 * This function is NULL safe. 753 *------------------------------------------------------------------------*/ 754 void 755 usb_pc_dmamap_destroy(struct usb_page_cache *pc) 756 { 757 if (pc && pc->tag) { 758 bus_dmamap_destroy(pc->tag, pc->map); 759 pc->tag = NULL; 760 pc->map = NULL; 761 } 762 } 763 764 /*------------------------------------------------------------------------* 765 * usb_dma_tag_find - factored out code 766 *------------------------------------------------------------------------*/ 767 struct usb_dma_tag * 768 usb_dma_tag_find(struct usb_dma_parent_tag *udpt, 769 usb_size_t size, usb_size_t align) 770 { 771 struct usb_dma_tag *udt; 772 uint8_t nudt; 773 774 USB_ASSERT(align > 0, ("Invalid parameter align = 0\n")); 775 USB_ASSERT(size > 0, ("Invalid parameter size = 0\n")); 776 777 udt = udpt->utag_first; 778 nudt = udpt->utag_max; 779 780 while (nudt--) { 781 782 if (udt->align == 0) { 783 usb_dma_tag_create(udt, size, align); 784 if (udt->tag == NULL) { 785 return (NULL); 786 } 787 udt->align = align; 788 udt->size = size; 789 return (udt); 790 } 791 if ((udt->align == align) && (udt->size == size)) { 792 return (udt); 793 } 794 udt++; 795 } 796 return (NULL); 797 } 798 799 /*------------------------------------------------------------------------* 800 * usb_dma_tag_setup - initialise USB DMA tags 801 *------------------------------------------------------------------------*/ 802 void 803 usb_dma_tag_setup(struct usb_dma_parent_tag *udpt, 804 struct usb_dma_tag *udt, bus_dma_tag_t dmat, 805 struct mtx *mtx, usb_dma_callback_t *func, 806 uint8_t ndmabits, uint8_t nudt) 807 { 808 memset(udpt, 0, sizeof(*udpt)); 809 810 /* sanity checking */ 811 if ((nudt == 0) || 812 (ndmabits == 0) || 813 (mtx == NULL)) { 814 /* something is corrupt */ 815 return; 816 } 817 /* initialise condition variable */ 818 cv_init(udpt->cv, "USB DMA CV"); 819 820 /* store some information */ 821 udpt->mtx = mtx; 822 udpt->func = func; 823 udpt->tag = dmat; 824 udpt->utag_first = udt; 825 udpt->utag_max = nudt; 826 udpt->dma_bits = ndmabits; 827 828 while (nudt--) { 829 memset(udt, 0, sizeof(*udt)); 830 udt->tag_parent = udpt; 831 udt++; 832 } 833 } 834 835 /*------------------------------------------------------------------------* 836 * usb_bus_tag_unsetup - factored out code 837 *------------------------------------------------------------------------*/ 838 void 839 usb_dma_tag_unsetup(struct usb_dma_parent_tag *udpt) 840 { 841 struct usb_dma_tag *udt; 842 uint8_t nudt; 843 844 udt = udpt->utag_first; 845 nudt = udpt->utag_max; 846 847 while (nudt--) { 848 849 if (udt->align) { 850 /* destroy the USB DMA tag */ 851 usb_dma_tag_destroy(udt); 852 udt->align = 0; 853 } 854 udt++; 855 } 856 857 if (udpt->utag_max) { 858 /* destroy the condition variable */ 859 cv_destroy(udpt->cv); 860 } 861 } 862 863 /*------------------------------------------------------------------------* 864 * usb_bdma_work_loop 865 * 866 * This function handles loading of virtual buffers into DMA and is 867 * only called when "dma_refcount" is zero. 868 *------------------------------------------------------------------------*/ 869 void 870 usb_bdma_work_loop(struct usb_xfer_queue *pq) 871 { 872 struct usb_xfer_root *info; 873 struct usb_xfer *xfer; 874 usb_frcount_t nframes; 875 876 xfer = pq->curr; 877 info = xfer->xroot; 878 879 mtx_assert(info->xfer_mtx, MA_OWNED); 880 881 if (xfer->error) { 882 /* some error happened */ 883 USB_BUS_LOCK(info->bus); 884 usbd_transfer_done(xfer, 0); 885 USB_BUS_UNLOCK(info->bus); 886 return; 887 } 888 if (!xfer->flags_int.bdma_setup) { 889 struct usb_page *pg; 890 usb_frlength_t frlength_0; 891 uint8_t isread; 892 893 xfer->flags_int.bdma_setup = 1; 894 895 /* reset BUS-DMA load state */ 896 897 info->dma_error = 0; 898 899 if (xfer->flags_int.isochronous_xfr) { 900 /* only one frame buffer */ 901 nframes = 1; 902 frlength_0 = xfer->sumlen; 903 } else { 904 /* can be multiple frame buffers */ 905 nframes = xfer->nframes; 906 frlength_0 = xfer->frlengths[0]; 907 } 908 909 /* 910 * Set DMA direction first. This is needed to 911 * select the correct cache invalidate and cache 912 * flush operations. 913 */ 914 isread = USB_GET_DATA_ISREAD(xfer); 915 pg = xfer->dma_page_ptr; 916 917 if (xfer->flags_int.control_xfr && 918 xfer->flags_int.control_hdr) { 919 /* special case */ 920 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { 921 /* The device controller writes to memory */ 922 xfer->frbuffers[0].isread = 1; 923 } else { 924 /* The host controller reads from memory */ 925 xfer->frbuffers[0].isread = 0; 926 } 927 } else { 928 /* default case */ 929 xfer->frbuffers[0].isread = isread; 930 } 931 932 /* 933 * Setup the "page_start" pointer which points to an array of 934 * USB pages where information about the physical address of a 935 * page will be stored. Also initialise the "isread" field of 936 * the USB page caches. 937 */ 938 xfer->frbuffers[0].page_start = pg; 939 940 info->dma_nframes = nframes; 941 info->dma_currframe = 0; 942 info->dma_frlength_0 = frlength_0; 943 944 pg += (frlength_0 / USB_PAGE_SIZE); 945 pg += 2; 946 947 while (--nframes > 0) { 948 xfer->frbuffers[nframes].isread = isread; 949 xfer->frbuffers[nframes].page_start = pg; 950 951 pg += (xfer->frlengths[nframes] / USB_PAGE_SIZE); 952 pg += 2; 953 } 954 955 } 956 if (info->dma_error) { 957 USB_BUS_LOCK(info->bus); 958 usbd_transfer_done(xfer, USB_ERR_DMA_LOAD_FAILED); 959 USB_BUS_UNLOCK(info->bus); 960 return; 961 } 962 if (info->dma_currframe != info->dma_nframes) { 963 964 if (info->dma_currframe == 0) { 965 /* special case */ 966 usb_pc_load_mem(xfer->frbuffers, 967 info->dma_frlength_0, 0); 968 } else { 969 /* default case */ 970 nframes = info->dma_currframe; 971 usb_pc_load_mem(xfer->frbuffers + nframes, 972 xfer->frlengths[nframes], 0); 973 } 974 975 /* advance frame index */ 976 info->dma_currframe++; 977 978 return; 979 } 980 /* go ahead */ 981 usb_bdma_pre_sync(xfer); 982 983 /* start loading next USB transfer, if any */ 984 usb_command_wrapper(pq, NULL); 985 986 /* finally start the hardware */ 987 usbd_pipe_enter(xfer); 988 } 989 990 /*------------------------------------------------------------------------* 991 * usb_bdma_done_event 992 * 993 * This function is called when the BUS-DMA has loaded virtual memory 994 * into DMA, if any. 995 *------------------------------------------------------------------------*/ 996 void 997 usb_bdma_done_event(struct usb_dma_parent_tag *udpt) 998 { 999 struct usb_xfer_root *info; 1000 1001 info = USB_DMATAG_TO_XROOT(udpt); 1002 1003 mtx_assert(info->xfer_mtx, MA_OWNED); 1004 1005 /* copy error */ 1006 info->dma_error = udpt->dma_error; 1007 1008 /* enter workloop again */ 1009 usb_command_wrapper(&info->dma_q, 1010 info->dma_q.curr); 1011 } 1012 1013 /*------------------------------------------------------------------------* 1014 * usb_bdma_pre_sync 1015 * 1016 * This function handles DMA synchronisation that must be done before 1017 * an USB transfer is started. 1018 *------------------------------------------------------------------------*/ 1019 void 1020 usb_bdma_pre_sync(struct usb_xfer *xfer) 1021 { 1022 struct usb_page_cache *pc; 1023 usb_frcount_t nframes; 1024 1025 if (xfer->flags_int.isochronous_xfr) { 1026 /* only one frame buffer */ 1027 nframes = 1; 1028 } else { 1029 /* can be multiple frame buffers */ 1030 nframes = xfer->nframes; 1031 } 1032 1033 pc = xfer->frbuffers; 1034 1035 while (nframes--) { 1036 1037 if (pc->isread) { 1038 usb_pc_cpu_invalidate(pc); 1039 } else { 1040 usb_pc_cpu_flush(pc); 1041 } 1042 pc++; 1043 } 1044 } 1045 1046 /*------------------------------------------------------------------------* 1047 * usb_bdma_post_sync 1048 * 1049 * This function handles DMA synchronisation that must be done after 1050 * an USB transfer is complete. 1051 *------------------------------------------------------------------------*/ 1052 void 1053 usb_bdma_post_sync(struct usb_xfer *xfer) 1054 { 1055 struct usb_page_cache *pc; 1056 usb_frcount_t nframes; 1057 1058 if (xfer->flags_int.isochronous_xfr) { 1059 /* only one frame buffer */ 1060 nframes = 1; 1061 } else { 1062 /* can be multiple frame buffers */ 1063 nframes = xfer->nframes; 1064 } 1065 1066 pc = xfer->frbuffers; 1067 1068 while (nframes--) { 1069 if (pc->isread) { 1070 usb_pc_cpu_invalidate(pc); 1071 } 1072 pc++; 1073 } 1074 } 1075 1076 #endif 1077