1 /* $FreeBSD$ */ 2 /*- 3 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #ifdef USB_GLOBAL_INCLUDE_FILE 28 #include USB_GLOBAL_INCLUDE_FILE 29 #else 30 #include <sys/stdint.h> 31 #include <sys/stddef.h> 32 #include <sys/param.h> 33 #include <sys/queue.h> 34 #include <sys/types.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/bus.h> 38 #include <sys/module.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/condvar.h> 42 #include <sys/sysctl.h> 43 #include <sys/sx.h> 44 #include <sys/unistd.h> 45 #include <sys/callout.h> 46 #include <sys/malloc.h> 47 #include <sys/priv.h> 48 49 #include <dev/usb/usb.h> 50 #include <dev/usb/usbdi.h> 51 #include <dev/usb/usbdi_util.h> 52 53 #define USB_DEBUG_VAR usb_debug 54 55 #include <dev/usb/usb_core.h> 56 #include <dev/usb/usb_busdma.h> 57 #include <dev/usb/usb_process.h> 58 #include <dev/usb/usb_transfer.h> 59 #include <dev/usb/usb_device.h> 60 #include <dev/usb/usb_util.h> 61 #include <dev/usb/usb_debug.h> 62 63 #include <dev/usb/usb_controller.h> 64 #include <dev/usb/usb_bus.h> 65 #endif /* USB_GLOBAL_INCLUDE_FILE */ 66 67 #if USB_HAVE_BUSDMA 68 static void usb_dma_tag_create(struct usb_dma_tag *, usb_size_t, usb_size_t); 69 static void usb_dma_tag_destroy(struct usb_dma_tag *); 70 static void usb_dma_lock_cb(void *, bus_dma_lock_op_t); 71 static void usb_pc_alloc_mem_cb(void *, bus_dma_segment_t *, int, int); 72 static void usb_pc_load_mem_cb(void *, bus_dma_segment_t *, int, int); 73 static void usb_pc_common_mem_cb(void *, bus_dma_segment_t *, int, int, 74 uint8_t); 75 #endif 76 77 /*------------------------------------------------------------------------* 78 * usbd_get_page - lookup DMA-able memory for the given offset 79 * 80 * NOTE: Only call this function when the "page_cache" structure has 81 * been properly initialized ! 82 *------------------------------------------------------------------------*/ 83 void 84 usbd_get_page(struct usb_page_cache *pc, usb_frlength_t offset, 85 struct usb_page_search *res) 86 { 87 #if USB_HAVE_BUSDMA 88 struct usb_page *page; 89 90 if (pc->page_start) { 91 92 /* Case 1 - something has been loaded into DMA */ 93 94 if (pc->buffer) { 95 96 /* Case 1a - Kernel Virtual Address */ 97 98 res->buffer = USB_ADD_BYTES(pc->buffer, offset); 99 } 100 offset += pc->page_offset_buf; 101 102 /* compute destination page */ 103 104 page = pc->page_start; 105 106 if (pc->ismultiseg) { 107 108 page += (offset / USB_PAGE_SIZE); 109 110 offset %= USB_PAGE_SIZE; 111 112 res->length = USB_PAGE_SIZE - offset; 113 res->physaddr = page->physaddr + offset; 114 } else { 115 res->length = (usb_size_t)-1; 116 res->physaddr = page->physaddr + offset; 117 } 118 if (!pc->buffer) { 119 120 /* Case 1b - Non Kernel Virtual Address */ 121 122 res->buffer = USB_ADD_BYTES(page->buffer, offset); 123 } 124 return; 125 } 126 #endif 127 /* Case 2 - Plain PIO */ 128 129 res->buffer = USB_ADD_BYTES(pc->buffer, offset); 130 res->length = (usb_size_t)-1; 131 #if USB_HAVE_BUSDMA 132 res->physaddr = 0; 133 #endif 134 } 135 136 /*------------------------------------------------------------------------* 137 * usbd_copy_in - copy directly to DMA-able memory 138 *------------------------------------------------------------------------*/ 139 void 140 usbd_copy_in(struct usb_page_cache *cache, usb_frlength_t offset, 141 const void *ptr, usb_frlength_t len) 142 { 143 struct usb_page_search buf_res; 144 145 while (len != 0) { 146 147 usbd_get_page(cache, offset, &buf_res); 148 149 if (buf_res.length > len) { 150 buf_res.length = len; 151 } 152 memcpy(buf_res.buffer, ptr, buf_res.length); 153 154 offset += buf_res.length; 155 len -= buf_res.length; 156 ptr = USB_ADD_BYTES(ptr, buf_res.length); 157 } 158 } 159 160 /*------------------------------------------------------------------------* 161 * usbd_copy_in_user - copy directly to DMA-able memory from userland 162 * 163 * Return values: 164 * 0: Success 165 * Else: Failure 166 *------------------------------------------------------------------------*/ 167 #if USB_HAVE_USER_IO 168 int 169 usbd_copy_in_user(struct usb_page_cache *cache, usb_frlength_t offset, 170 const void *ptr, usb_frlength_t len) 171 { 172 struct usb_page_search buf_res; 173 int error; 174 175 while (len != 0) { 176 177 usbd_get_page(cache, offset, &buf_res); 178 179 if (buf_res.length > len) { 180 buf_res.length = len; 181 } 182 error = copyin(ptr, buf_res.buffer, buf_res.length); 183 if (error) 184 return (error); 185 186 offset += buf_res.length; 187 len -= buf_res.length; 188 ptr = USB_ADD_BYTES(ptr, buf_res.length); 189 } 190 return (0); /* success */ 191 } 192 #endif 193 194 /*------------------------------------------------------------------------* 195 * usbd_m_copy_in - copy a mbuf chain directly into DMA-able memory 196 *------------------------------------------------------------------------*/ 197 #if USB_HAVE_MBUF 198 struct usb_m_copy_in_arg { 199 struct usb_page_cache *cache; 200 usb_frlength_t dst_offset; 201 }; 202 203 static int 204 usbd_m_copy_in_cb(void *arg, void *src, uint32_t count) 205 { 206 register struct usb_m_copy_in_arg *ua = arg; 207 208 usbd_copy_in(ua->cache, ua->dst_offset, src, count); 209 ua->dst_offset += count; 210 return (0); 211 } 212 213 void 214 usbd_m_copy_in(struct usb_page_cache *cache, usb_frlength_t dst_offset, 215 struct mbuf *m, usb_size_t src_offset, usb_frlength_t src_len) 216 { 217 struct usb_m_copy_in_arg arg = {cache, dst_offset}; 218 int error; 219 220 error = m_apply(m, src_offset, src_len, &usbd_m_copy_in_cb, &arg); 221 } 222 #endif 223 224 /*------------------------------------------------------------------------* 225 * usb_uiomove - factored out code 226 *------------------------------------------------------------------------*/ 227 #if USB_HAVE_USER_IO 228 int 229 usb_uiomove(struct usb_page_cache *pc, struct uio *uio, 230 usb_frlength_t pc_offset, usb_frlength_t len) 231 { 232 struct usb_page_search res; 233 int error = 0; 234 235 while (len != 0) { 236 237 usbd_get_page(pc, pc_offset, &res); 238 239 if (res.length > len) { 240 res.length = len; 241 } 242 /* 243 * "uiomove()" can sleep so one needs to make a wrapper, 244 * exiting the mutex and checking things 245 */ 246 error = uiomove(res.buffer, res.length, uio); 247 248 if (error) { 249 break; 250 } 251 pc_offset += res.length; 252 len -= res.length; 253 } 254 return (error); 255 } 256 #endif 257 258 /*------------------------------------------------------------------------* 259 * usbd_copy_out - copy directly from DMA-able memory 260 *------------------------------------------------------------------------*/ 261 void 262 usbd_copy_out(struct usb_page_cache *cache, usb_frlength_t offset, 263 void *ptr, usb_frlength_t len) 264 { 265 struct usb_page_search res; 266 267 while (len != 0) { 268 269 usbd_get_page(cache, offset, &res); 270 271 if (res.length > len) { 272 res.length = len; 273 } 274 memcpy(ptr, res.buffer, res.length); 275 276 offset += res.length; 277 len -= res.length; 278 ptr = USB_ADD_BYTES(ptr, res.length); 279 } 280 } 281 282 /*------------------------------------------------------------------------* 283 * usbd_copy_out_user - copy directly from DMA-able memory to userland 284 * 285 * Return values: 286 * 0: Success 287 * Else: Failure 288 *------------------------------------------------------------------------*/ 289 #if USB_HAVE_USER_IO 290 int 291 usbd_copy_out_user(struct usb_page_cache *cache, usb_frlength_t offset, 292 void *ptr, usb_frlength_t len) 293 { 294 struct usb_page_search res; 295 int error; 296 297 while (len != 0) { 298 299 usbd_get_page(cache, offset, &res); 300 301 if (res.length > len) { 302 res.length = len; 303 } 304 error = copyout(res.buffer, ptr, res.length); 305 if (error) 306 return (error); 307 308 offset += res.length; 309 len -= res.length; 310 ptr = USB_ADD_BYTES(ptr, res.length); 311 } 312 return (0); /* success */ 313 } 314 #endif 315 316 /*------------------------------------------------------------------------* 317 * usbd_frame_zero - zero DMA-able memory 318 *------------------------------------------------------------------------*/ 319 void 320 usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset, 321 usb_frlength_t len) 322 { 323 struct usb_page_search res; 324 325 while (len != 0) { 326 327 usbd_get_page(cache, offset, &res); 328 329 if (res.length > len) { 330 res.length = len; 331 } 332 memset(res.buffer, 0, res.length); 333 334 offset += res.length; 335 len -= res.length; 336 } 337 } 338 339 #if USB_HAVE_BUSDMA 340 341 /*------------------------------------------------------------------------* 342 * usb_dma_lock_cb - dummy callback 343 *------------------------------------------------------------------------*/ 344 static void 345 usb_dma_lock_cb(void *arg, bus_dma_lock_op_t op) 346 { 347 /* we use "mtx_owned()" instead of this function */ 348 } 349 350 /*------------------------------------------------------------------------* 351 * usb_dma_tag_create - allocate a DMA tag 352 * 353 * NOTE: If the "align" parameter has a value of 1 the DMA-tag will 354 * allow multi-segment mappings. Else all mappings are single-segment. 355 *------------------------------------------------------------------------*/ 356 static void 357 usb_dma_tag_create(struct usb_dma_tag *udt, 358 usb_size_t size, usb_size_t align) 359 { 360 bus_dma_tag_t tag; 361 362 if (bus_dma_tag_create 363 ( /* parent */ udt->tag_parent->tag, 364 /* alignment */ align, 365 /* boundary */ 0, 366 /* lowaddr */ (2ULL << (udt->tag_parent->dma_bits - 1)) - 1, 367 /* highaddr */ BUS_SPACE_MAXADDR, 368 /* filter */ NULL, 369 /* filterarg */ NULL, 370 /* maxsize */ size, 371 /* nsegments */ (align == 1 && size > 1) ? 372 (2 + (size / USB_PAGE_SIZE)) : 1, 373 /* maxsegsz */ (align == 1 && size > USB_PAGE_SIZE) ? 374 USB_PAGE_SIZE : size, 375 /* flags */ BUS_DMA_KEEP_PG_OFFSET, 376 /* lockfn */ &usb_dma_lock_cb, 377 /* lockarg */ NULL, 378 &tag)) { 379 tag = NULL; 380 } 381 udt->tag = tag; 382 } 383 384 /*------------------------------------------------------------------------* 385 * usb_dma_tag_free - free a DMA tag 386 *------------------------------------------------------------------------*/ 387 static void 388 usb_dma_tag_destroy(struct usb_dma_tag *udt) 389 { 390 bus_dma_tag_destroy(udt->tag); 391 } 392 393 /*------------------------------------------------------------------------* 394 * usb_pc_alloc_mem_cb - BUS-DMA callback function 395 *------------------------------------------------------------------------*/ 396 static void 397 usb_pc_alloc_mem_cb(void *arg, bus_dma_segment_t *segs, 398 int nseg, int error) 399 { 400 usb_pc_common_mem_cb(arg, segs, nseg, error, 0); 401 } 402 403 /*------------------------------------------------------------------------* 404 * usb_pc_load_mem_cb - BUS-DMA callback function 405 *------------------------------------------------------------------------*/ 406 static void 407 usb_pc_load_mem_cb(void *arg, bus_dma_segment_t *segs, 408 int nseg, int error) 409 { 410 usb_pc_common_mem_cb(arg, segs, nseg, error, 1); 411 } 412 413 /*------------------------------------------------------------------------* 414 * usb_pc_common_mem_cb - BUS-DMA callback function 415 *------------------------------------------------------------------------*/ 416 static void 417 usb_pc_common_mem_cb(void *arg, bus_dma_segment_t *segs, 418 int nseg, int error, uint8_t isload) 419 { 420 struct usb_dma_parent_tag *uptag; 421 struct usb_page_cache *pc; 422 struct usb_page *pg; 423 usb_size_t rem; 424 bus_size_t off; 425 uint8_t owned; 426 427 pc = arg; 428 uptag = pc->tag_parent; 429 430 /* 431 * XXX There is sometimes recursive locking here. 432 * XXX We should try to find a better solution. 433 * XXX Until further the "owned" variable does 434 * XXX the trick. 435 */ 436 437 if (error) { 438 goto done; 439 } 440 441 off = 0; 442 pg = pc->page_start; 443 pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1); 444 rem = segs->ds_addr & (USB_PAGE_SIZE - 1); 445 pc->page_offset_buf = rem; 446 pc->page_offset_end += rem; 447 #ifdef USB_DEBUG 448 if (rem != (USB_P2U(pc->buffer) & (USB_PAGE_SIZE - 1))) { 449 /* 450 * This check verifies that the physical address is correct: 451 */ 452 DPRINTFN(0, "Page offset was not preserved\n"); 453 error = 1; 454 goto done; 455 } 456 #endif 457 while (1) { 458 off += USB_PAGE_SIZE; 459 if (off >= (segs->ds_len + rem)) { 460 /* page crossing */ 461 nseg--; 462 segs++; 463 off = 0; 464 rem = 0; 465 if (nseg == 0) 466 break; 467 } 468 pg++; 469 pg->physaddr = (segs->ds_addr + off) & ~(USB_PAGE_SIZE - 1); 470 } 471 472 done: 473 owned = mtx_owned(uptag->mtx); 474 if (!owned) 475 mtx_lock(uptag->mtx); 476 477 uptag->dma_error = (error ? 1 : 0); 478 if (isload) { 479 (uptag->func) (uptag); 480 } else { 481 cv_broadcast(uptag->cv); 482 } 483 if (!owned) 484 mtx_unlock(uptag->mtx); 485 } 486 487 /*------------------------------------------------------------------------* 488 * usb_pc_alloc_mem - allocate DMA'able memory 489 * 490 * Returns: 491 * 0: Success 492 * Else: Failure 493 *------------------------------------------------------------------------*/ 494 uint8_t 495 usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg, 496 usb_size_t size, usb_size_t align) 497 { 498 struct usb_dma_parent_tag *uptag; 499 struct usb_dma_tag *utag; 500 bus_dmamap_t map; 501 void *ptr; 502 int err; 503 504 uptag = pc->tag_parent; 505 506 if (align != 1) { 507 /* 508 * The alignment must be greater or equal to the 509 * "size" else the object can be split between two 510 * memory pages and we get a problem! 511 */ 512 while (align < size) { 513 align *= 2; 514 if (align == 0) { 515 goto error; 516 } 517 } 518 #if 1 519 /* 520 * XXX BUS-DMA workaround - FIXME later: 521 * 522 * We assume that that the aligment at this point of 523 * the code is greater than or equal to the size and 524 * less than two times the size, so that if we double 525 * the size, the size will be greater than the 526 * alignment. 527 * 528 * The bus-dma system has a check for "alignment" 529 * being less than "size". If that check fails we end 530 * up using contigmalloc which is page based even for 531 * small allocations. Try to avoid that to save 532 * memory, hence we sometimes to a large number of 533 * small allocations! 534 */ 535 if (size <= (USB_PAGE_SIZE / 2)) { 536 size *= 2; 537 } 538 #endif 539 } 540 /* get the correct DMA tag */ 541 utag = usb_dma_tag_find(uptag, size, align); 542 if (utag == NULL) { 543 goto error; 544 } 545 /* allocate memory */ 546 if (bus_dmamem_alloc( 547 utag->tag, &ptr, (BUS_DMA_WAITOK | BUS_DMA_COHERENT), &map)) { 548 goto error; 549 } 550 /* setup page cache */ 551 pc->buffer = ptr; 552 pc->page_start = pg; 553 pc->page_offset_buf = 0; 554 pc->page_offset_end = size; 555 pc->map = map; 556 pc->tag = utag->tag; 557 pc->ismultiseg = (align == 1); 558 559 mtx_lock(uptag->mtx); 560 561 /* load memory into DMA */ 562 err = bus_dmamap_load( 563 utag->tag, map, ptr, size, &usb_pc_alloc_mem_cb, 564 pc, (BUS_DMA_WAITOK | BUS_DMA_COHERENT)); 565 566 if (err == EINPROGRESS) { 567 cv_wait(uptag->cv, uptag->mtx); 568 err = 0; 569 } 570 mtx_unlock(uptag->mtx); 571 572 if (err || uptag->dma_error) { 573 bus_dmamem_free(utag->tag, ptr, map); 574 goto error; 575 } 576 memset(ptr, 0, size); 577 578 usb_pc_cpu_flush(pc); 579 580 return (0); 581 582 error: 583 /* reset most of the page cache */ 584 pc->buffer = NULL; 585 pc->page_start = NULL; 586 pc->page_offset_buf = 0; 587 pc->page_offset_end = 0; 588 pc->map = NULL; 589 pc->tag = NULL; 590 return (1); 591 } 592 593 /*------------------------------------------------------------------------* 594 * usb_pc_free_mem - free DMA memory 595 * 596 * This function is NULL safe. 597 *------------------------------------------------------------------------*/ 598 void 599 usb_pc_free_mem(struct usb_page_cache *pc) 600 { 601 if (pc && pc->buffer) { 602 603 bus_dmamap_unload(pc->tag, pc->map); 604 605 bus_dmamem_free(pc->tag, pc->buffer, pc->map); 606 607 pc->buffer = NULL; 608 } 609 } 610 611 /*------------------------------------------------------------------------* 612 * usb_pc_load_mem - load virtual memory into DMA 613 * 614 * Return values: 615 * 0: Success 616 * Else: Error 617 *------------------------------------------------------------------------*/ 618 uint8_t 619 usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t sync) 620 { 621 /* setup page cache */ 622 pc->page_offset_buf = 0; 623 pc->page_offset_end = size; 624 pc->ismultiseg = 1; 625 626 mtx_assert(pc->tag_parent->mtx, MA_OWNED); 627 628 if (size > 0) { 629 if (sync) { 630 struct usb_dma_parent_tag *uptag; 631 int err; 632 633 uptag = pc->tag_parent; 634 635 /* 636 * We have to unload the previous loaded DMA 637 * pages before trying to load a new one! 638 */ 639 bus_dmamap_unload(pc->tag, pc->map); 640 641 /* 642 * Try to load memory into DMA. 643 */ 644 err = bus_dmamap_load( 645 pc->tag, pc->map, pc->buffer, size, 646 &usb_pc_alloc_mem_cb, pc, BUS_DMA_WAITOK); 647 if (err == EINPROGRESS) { 648 cv_wait(uptag->cv, uptag->mtx); 649 err = 0; 650 } 651 if (err || uptag->dma_error) { 652 return (1); 653 } 654 } else { 655 656 /* 657 * We have to unload the previous loaded DMA 658 * pages before trying to load a new one! 659 */ 660 bus_dmamap_unload(pc->tag, pc->map); 661 662 /* 663 * Try to load memory into DMA. The callback 664 * will be called in all cases: 665 */ 666 if (bus_dmamap_load( 667 pc->tag, pc->map, pc->buffer, size, 668 &usb_pc_load_mem_cb, pc, BUS_DMA_WAITOK)) { 669 } 670 } 671 } else { 672 if (!sync) { 673 /* 674 * Call callback so that refcount is decremented 675 * properly: 676 */ 677 pc->tag_parent->dma_error = 0; 678 (pc->tag_parent->func) (pc->tag_parent); 679 } 680 } 681 return (0); 682 } 683 684 /*------------------------------------------------------------------------* 685 * usb_pc_cpu_invalidate - invalidate CPU cache 686 *------------------------------------------------------------------------*/ 687 void 688 usb_pc_cpu_invalidate(struct usb_page_cache *pc) 689 { 690 if (pc->page_offset_end == pc->page_offset_buf) { 691 /* nothing has been loaded into this page cache! */ 692 return; 693 } 694 695 /* 696 * TODO: We currently do XXX_POSTREAD and XXX_PREREAD at the 697 * same time, but in the future we should try to isolate the 698 * different cases to optimise the code. --HPS 699 */ 700 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_POSTREAD); 701 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREREAD); 702 } 703 704 /*------------------------------------------------------------------------* 705 * usb_pc_cpu_flush - flush CPU cache 706 *------------------------------------------------------------------------*/ 707 void 708 usb_pc_cpu_flush(struct usb_page_cache *pc) 709 { 710 if (pc->page_offset_end == pc->page_offset_buf) { 711 /* nothing has been loaded into this page cache! */ 712 return; 713 } 714 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREWRITE); 715 } 716 717 /*------------------------------------------------------------------------* 718 * usb_pc_dmamap_create - create a DMA map 719 * 720 * Returns: 721 * 0: Success 722 * Else: Failure 723 *------------------------------------------------------------------------*/ 724 uint8_t 725 usb_pc_dmamap_create(struct usb_page_cache *pc, usb_size_t size) 726 { 727 struct usb_xfer_root *info; 728 struct usb_dma_tag *utag; 729 730 /* get info */ 731 info = USB_DMATAG_TO_XROOT(pc->tag_parent); 732 733 /* sanity check */ 734 if (info == NULL) { 735 goto error; 736 } 737 utag = usb_dma_tag_find(pc->tag_parent, size, 1); 738 if (utag == NULL) { 739 goto error; 740 } 741 /* create DMA map */ 742 if (bus_dmamap_create(utag->tag, 0, &pc->map)) { 743 goto error; 744 } 745 pc->tag = utag->tag; 746 return 0; /* success */ 747 748 error: 749 pc->map = NULL; 750 pc->tag = NULL; 751 return 1; /* failure */ 752 } 753 754 /*------------------------------------------------------------------------* 755 * usb_pc_dmamap_destroy 756 * 757 * This function is NULL safe. 758 *------------------------------------------------------------------------*/ 759 void 760 usb_pc_dmamap_destroy(struct usb_page_cache *pc) 761 { 762 if (pc && pc->tag) { 763 bus_dmamap_destroy(pc->tag, pc->map); 764 pc->tag = NULL; 765 pc->map = NULL; 766 } 767 } 768 769 /*------------------------------------------------------------------------* 770 * usb_dma_tag_find - factored out code 771 *------------------------------------------------------------------------*/ 772 struct usb_dma_tag * 773 usb_dma_tag_find(struct usb_dma_parent_tag *udpt, 774 usb_size_t size, usb_size_t align) 775 { 776 struct usb_dma_tag *udt; 777 uint8_t nudt; 778 779 USB_ASSERT(align > 0, ("Invalid parameter align = 0\n")); 780 USB_ASSERT(size > 0, ("Invalid parameter size = 0\n")); 781 782 udt = udpt->utag_first; 783 nudt = udpt->utag_max; 784 785 while (nudt--) { 786 787 if (udt->align == 0) { 788 usb_dma_tag_create(udt, size, align); 789 if (udt->tag == NULL) { 790 return (NULL); 791 } 792 udt->align = align; 793 udt->size = size; 794 return (udt); 795 } 796 if ((udt->align == align) && (udt->size == size)) { 797 return (udt); 798 } 799 udt++; 800 } 801 return (NULL); 802 } 803 804 /*------------------------------------------------------------------------* 805 * usb_dma_tag_setup - initialise USB DMA tags 806 *------------------------------------------------------------------------*/ 807 void 808 usb_dma_tag_setup(struct usb_dma_parent_tag *udpt, 809 struct usb_dma_tag *udt, bus_dma_tag_t dmat, 810 struct mtx *mtx, usb_dma_callback_t *func, 811 uint8_t ndmabits, uint8_t nudt) 812 { 813 memset(udpt, 0, sizeof(*udpt)); 814 815 /* sanity checking */ 816 if ((nudt == 0) || 817 (ndmabits == 0) || 818 (mtx == NULL)) { 819 /* something is corrupt */ 820 return; 821 } 822 /* initialise condition variable */ 823 cv_init(udpt->cv, "USB DMA CV"); 824 825 /* store some information */ 826 udpt->mtx = mtx; 827 udpt->func = func; 828 udpt->tag = dmat; 829 udpt->utag_first = udt; 830 udpt->utag_max = nudt; 831 udpt->dma_bits = ndmabits; 832 833 while (nudt--) { 834 memset(udt, 0, sizeof(*udt)); 835 udt->tag_parent = udpt; 836 udt++; 837 } 838 } 839 840 /*------------------------------------------------------------------------* 841 * usb_bus_tag_unsetup - factored out code 842 *------------------------------------------------------------------------*/ 843 void 844 usb_dma_tag_unsetup(struct usb_dma_parent_tag *udpt) 845 { 846 struct usb_dma_tag *udt; 847 uint8_t nudt; 848 849 udt = udpt->utag_first; 850 nudt = udpt->utag_max; 851 852 while (nudt--) { 853 854 if (udt->align) { 855 /* destroy the USB DMA tag */ 856 usb_dma_tag_destroy(udt); 857 udt->align = 0; 858 } 859 udt++; 860 } 861 862 if (udpt->utag_max) { 863 /* destroy the condition variable */ 864 cv_destroy(udpt->cv); 865 } 866 } 867 868 /*------------------------------------------------------------------------* 869 * usb_bdma_work_loop 870 * 871 * This function handles loading of virtual buffers into DMA and is 872 * only called when "dma_refcount" is zero. 873 *------------------------------------------------------------------------*/ 874 void 875 usb_bdma_work_loop(struct usb_xfer_queue *pq) 876 { 877 struct usb_xfer_root *info; 878 struct usb_xfer *xfer; 879 usb_frcount_t nframes; 880 881 xfer = pq->curr; 882 info = xfer->xroot; 883 884 mtx_assert(info->xfer_mtx, MA_OWNED); 885 886 if (xfer->error) { 887 /* some error happened */ 888 USB_BUS_LOCK(info->bus); 889 usbd_transfer_done(xfer, 0); 890 USB_BUS_UNLOCK(info->bus); 891 return; 892 } 893 if (!xfer->flags_int.bdma_setup) { 894 struct usb_page *pg; 895 usb_frlength_t frlength_0; 896 uint8_t isread; 897 898 xfer->flags_int.bdma_setup = 1; 899 900 /* reset BUS-DMA load state */ 901 902 info->dma_error = 0; 903 904 if (xfer->flags_int.isochronous_xfr) { 905 /* only one frame buffer */ 906 nframes = 1; 907 frlength_0 = xfer->sumlen; 908 } else { 909 /* can be multiple frame buffers */ 910 nframes = xfer->nframes; 911 frlength_0 = xfer->frlengths[0]; 912 } 913 914 /* 915 * Set DMA direction first. This is needed to 916 * select the correct cache invalidate and cache 917 * flush operations. 918 */ 919 isread = USB_GET_DATA_ISREAD(xfer); 920 pg = xfer->dma_page_ptr; 921 922 if (xfer->flags_int.control_xfr && 923 xfer->flags_int.control_hdr) { 924 /* special case */ 925 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { 926 /* The device controller writes to memory */ 927 xfer->frbuffers[0].isread = 1; 928 } else { 929 /* The host controller reads from memory */ 930 xfer->frbuffers[0].isread = 0; 931 } 932 } else { 933 /* default case */ 934 xfer->frbuffers[0].isread = isread; 935 } 936 937 /* 938 * Setup the "page_start" pointer which points to an array of 939 * USB pages where information about the physical address of a 940 * page will be stored. Also initialise the "isread" field of 941 * the USB page caches. 942 */ 943 xfer->frbuffers[0].page_start = pg; 944 945 info->dma_nframes = nframes; 946 info->dma_currframe = 0; 947 info->dma_frlength_0 = frlength_0; 948 949 pg += (frlength_0 / USB_PAGE_SIZE); 950 pg += 2; 951 952 while (--nframes > 0) { 953 xfer->frbuffers[nframes].isread = isread; 954 xfer->frbuffers[nframes].page_start = pg; 955 956 pg += (xfer->frlengths[nframes] / USB_PAGE_SIZE); 957 pg += 2; 958 } 959 960 } 961 if (info->dma_error) { 962 USB_BUS_LOCK(info->bus); 963 usbd_transfer_done(xfer, USB_ERR_DMA_LOAD_FAILED); 964 USB_BUS_UNLOCK(info->bus); 965 return; 966 } 967 if (info->dma_currframe != info->dma_nframes) { 968 969 if (info->dma_currframe == 0) { 970 /* special case */ 971 usb_pc_load_mem(xfer->frbuffers, 972 info->dma_frlength_0, 0); 973 } else { 974 /* default case */ 975 nframes = info->dma_currframe; 976 usb_pc_load_mem(xfer->frbuffers + nframes, 977 xfer->frlengths[nframes], 0); 978 } 979 980 /* advance frame index */ 981 info->dma_currframe++; 982 983 return; 984 } 985 /* go ahead */ 986 usb_bdma_pre_sync(xfer); 987 988 /* start loading next USB transfer, if any */ 989 usb_command_wrapper(pq, NULL); 990 991 /* finally start the hardware */ 992 usbd_pipe_enter(xfer); 993 } 994 995 /*------------------------------------------------------------------------* 996 * usb_bdma_done_event 997 * 998 * This function is called when the BUS-DMA has loaded virtual memory 999 * into DMA, if any. 1000 *------------------------------------------------------------------------*/ 1001 void 1002 usb_bdma_done_event(struct usb_dma_parent_tag *udpt) 1003 { 1004 struct usb_xfer_root *info; 1005 1006 info = USB_DMATAG_TO_XROOT(udpt); 1007 1008 mtx_assert(info->xfer_mtx, MA_OWNED); 1009 1010 /* copy error */ 1011 info->dma_error = udpt->dma_error; 1012 1013 /* enter workloop again */ 1014 usb_command_wrapper(&info->dma_q, 1015 info->dma_q.curr); 1016 } 1017 1018 /*------------------------------------------------------------------------* 1019 * usb_bdma_pre_sync 1020 * 1021 * This function handles DMA synchronisation that must be done before 1022 * an USB transfer is started. 1023 *------------------------------------------------------------------------*/ 1024 void 1025 usb_bdma_pre_sync(struct usb_xfer *xfer) 1026 { 1027 struct usb_page_cache *pc; 1028 usb_frcount_t nframes; 1029 1030 if (xfer->flags_int.isochronous_xfr) { 1031 /* only one frame buffer */ 1032 nframes = 1; 1033 } else { 1034 /* can be multiple frame buffers */ 1035 nframes = xfer->nframes; 1036 } 1037 1038 pc = xfer->frbuffers; 1039 1040 while (nframes--) { 1041 1042 if (pc->isread) { 1043 usb_pc_cpu_invalidate(pc); 1044 } else { 1045 usb_pc_cpu_flush(pc); 1046 } 1047 pc++; 1048 } 1049 } 1050 1051 /*------------------------------------------------------------------------* 1052 * usb_bdma_post_sync 1053 * 1054 * This function handles DMA synchronisation that must be done after 1055 * an USB transfer is complete. 1056 *------------------------------------------------------------------------*/ 1057 void 1058 usb_bdma_post_sync(struct usb_xfer *xfer) 1059 { 1060 struct usb_page_cache *pc; 1061 usb_frcount_t nframes; 1062 1063 if (xfer->flags_int.isochronous_xfr) { 1064 /* only one frame buffer */ 1065 nframes = 1; 1066 } else { 1067 /* can be multiple frame buffers */ 1068 nframes = xfer->nframes; 1069 } 1070 1071 pc = xfer->frbuffers; 1072 1073 while (nframes--) { 1074 if (pc->isread) { 1075 usb_pc_cpu_invalidate(pc); 1076 } 1077 pc++; 1078 } 1079 } 1080 1081 #endif 1082