1 /* $FreeBSD$ */ 2 /*- 3 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #ifdef USB_GLOBAL_INCLUDE_FILE 28 #include USB_GLOBAL_INCLUDE_FILE 29 #else 30 #include <sys/stdint.h> 31 #include <sys/stddef.h> 32 #include <sys/param.h> 33 #include <sys/queue.h> 34 #include <sys/types.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/bus.h> 38 #include <sys/module.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/condvar.h> 42 #include <sys/sysctl.h> 43 #include <sys/sx.h> 44 #include <sys/unistd.h> 45 #include <sys/callout.h> 46 #include <sys/malloc.h> 47 #include <sys/priv.h> 48 49 #include <dev/usb/usb.h> 50 #include <dev/usb/usbdi.h> 51 #include <dev/usb/usbdi_util.h> 52 53 #define USB_DEBUG_VAR usb_debug 54 55 #include <dev/usb/usb_core.h> 56 #include <dev/usb/usb_busdma.h> 57 #include <dev/usb/usb_process.h> 58 #include <dev/usb/usb_transfer.h> 59 #include <dev/usb/usb_device.h> 60 #include <dev/usb/usb_util.h> 61 #include <dev/usb/usb_debug.h> 62 63 #include <dev/usb/usb_controller.h> 64 #include <dev/usb/usb_bus.h> 65 #endif /* USB_GLOBAL_INCLUDE_FILE */ 66 67 #if USB_HAVE_BUSDMA 68 static void usb_dma_tag_create(struct usb_dma_tag *, usb_size_t, usb_size_t); 69 static void usb_dma_tag_destroy(struct usb_dma_tag *); 70 static void usb_dma_lock_cb(void *, bus_dma_lock_op_t); 71 static void usb_pc_alloc_mem_cb(void *, bus_dma_segment_t *, int, int); 72 static void usb_pc_load_mem_cb(void *, bus_dma_segment_t *, int, int); 73 static void usb_pc_common_mem_cb(void *, bus_dma_segment_t *, int, int, 74 uint8_t); 75 #endif 76 77 /*------------------------------------------------------------------------* 78 * usbd_get_page - lookup DMA-able memory for the given offset 79 * 80 * NOTE: Only call this function when the "page_cache" structure has 81 * been properly initialized ! 82 *------------------------------------------------------------------------*/ 83 void 84 usbd_get_page(struct usb_page_cache *pc, usb_frlength_t offset, 85 struct usb_page_search *res) 86 { 87 #if USB_HAVE_BUSDMA 88 struct usb_page *page; 89 90 if (pc->page_start) { 91 92 /* Case 1 - something has been loaded into DMA */ 93 94 if (pc->buffer) { 95 96 /* Case 1a - Kernel Virtual Address */ 97 98 res->buffer = USB_ADD_BYTES(pc->buffer, offset); 99 } 100 offset += pc->page_offset_buf; 101 102 /* compute destination page */ 103 104 page = pc->page_start; 105 106 if (pc->ismultiseg) { 107 108 page += (offset / USB_PAGE_SIZE); 109 110 offset %= USB_PAGE_SIZE; 111 112 res->length = USB_PAGE_SIZE - offset; 113 res->physaddr = page->physaddr + offset; 114 } else { 115 res->length = (usb_size_t)-1; 116 res->physaddr = page->physaddr + offset; 117 } 118 if (!pc->buffer) { 119 120 /* Case 1b - Non Kernel Virtual Address */ 121 122 res->buffer = USB_ADD_BYTES(page->buffer, offset); 123 } 124 return; 125 } 126 #endif 127 /* Case 2 - Plain PIO */ 128 129 res->buffer = USB_ADD_BYTES(pc->buffer, offset); 130 res->length = (usb_size_t)-1; 131 #if USB_HAVE_BUSDMA 132 res->physaddr = 0; 133 #endif 134 } 135 136 /*------------------------------------------------------------------------* 137 * usb_pc_buffer_is_aligned - verify alignment 138 * 139 * This function is used to check if a page cache buffer is properly 140 * aligned to reduce the use of bounce buffers in PIO mode. 141 *------------------------------------------------------------------------*/ 142 uint8_t 143 usb_pc_buffer_is_aligned(struct usb_page_cache *pc, usb_frlength_t offset, 144 usb_frlength_t len, usb_frlength_t mask) 145 { 146 struct usb_page_search buf_res; 147 148 while (len != 0) { 149 150 usbd_get_page(pc, offset, &buf_res); 151 152 if (buf_res.length > len) 153 buf_res.length = len; 154 if (USB_P2U(buf_res.buffer) & mask) 155 return (0); 156 if (buf_res.length & mask) 157 return (0); 158 159 offset += buf_res.length; 160 len -= buf_res.length; 161 } 162 return (1); 163 } 164 165 /*------------------------------------------------------------------------* 166 * usbd_copy_in - copy directly to DMA-able memory 167 *------------------------------------------------------------------------*/ 168 void 169 usbd_copy_in(struct usb_page_cache *cache, usb_frlength_t offset, 170 const void *ptr, usb_frlength_t len) 171 { 172 struct usb_page_search buf_res; 173 174 while (len != 0) { 175 176 usbd_get_page(cache, offset, &buf_res); 177 178 if (buf_res.length > len) { 179 buf_res.length = len; 180 } 181 memcpy(buf_res.buffer, ptr, buf_res.length); 182 183 offset += buf_res.length; 184 len -= buf_res.length; 185 ptr = USB_ADD_BYTES(ptr, buf_res.length); 186 } 187 } 188 189 /*------------------------------------------------------------------------* 190 * usbd_copy_in_user - copy directly to DMA-able memory from userland 191 * 192 * Return values: 193 * 0: Success 194 * Else: Failure 195 *------------------------------------------------------------------------*/ 196 #if USB_HAVE_USER_IO 197 int 198 usbd_copy_in_user(struct usb_page_cache *cache, usb_frlength_t offset, 199 const void *ptr, usb_frlength_t len) 200 { 201 struct usb_page_search buf_res; 202 int error; 203 204 while (len != 0) { 205 206 usbd_get_page(cache, offset, &buf_res); 207 208 if (buf_res.length > len) { 209 buf_res.length = len; 210 } 211 error = copyin(ptr, buf_res.buffer, buf_res.length); 212 if (error) 213 return (error); 214 215 offset += buf_res.length; 216 len -= buf_res.length; 217 ptr = USB_ADD_BYTES(ptr, buf_res.length); 218 } 219 return (0); /* success */ 220 } 221 #endif 222 223 /*------------------------------------------------------------------------* 224 * usbd_m_copy_in - copy a mbuf chain directly into DMA-able memory 225 *------------------------------------------------------------------------*/ 226 #if USB_HAVE_MBUF 227 struct usb_m_copy_in_arg { 228 struct usb_page_cache *cache; 229 usb_frlength_t dst_offset; 230 }; 231 232 static int 233 usbd_m_copy_in_cb(void *arg, void *src, uint32_t count) 234 { 235 register struct usb_m_copy_in_arg *ua = arg; 236 237 usbd_copy_in(ua->cache, ua->dst_offset, src, count); 238 ua->dst_offset += count; 239 return (0); 240 } 241 242 void 243 usbd_m_copy_in(struct usb_page_cache *cache, usb_frlength_t dst_offset, 244 struct mbuf *m, usb_size_t src_offset, usb_frlength_t src_len) 245 { 246 struct usb_m_copy_in_arg arg = {cache, dst_offset}; 247 (void) m_apply(m, src_offset, src_len, &usbd_m_copy_in_cb, &arg); 248 } 249 #endif 250 251 /*------------------------------------------------------------------------* 252 * usb_uiomove - factored out code 253 *------------------------------------------------------------------------*/ 254 #if USB_HAVE_USER_IO 255 int 256 usb_uiomove(struct usb_page_cache *pc, struct uio *uio, 257 usb_frlength_t pc_offset, usb_frlength_t len) 258 { 259 struct usb_page_search res; 260 int error = 0; 261 262 while (len != 0) { 263 264 usbd_get_page(pc, pc_offset, &res); 265 266 if (res.length > len) { 267 res.length = len; 268 } 269 /* 270 * "uiomove()" can sleep so one needs to make a wrapper, 271 * exiting the mutex and checking things 272 */ 273 error = uiomove(res.buffer, res.length, uio); 274 275 if (error) { 276 break; 277 } 278 pc_offset += res.length; 279 len -= res.length; 280 } 281 return (error); 282 } 283 #endif 284 285 /*------------------------------------------------------------------------* 286 * usbd_copy_out - copy directly from DMA-able memory 287 *------------------------------------------------------------------------*/ 288 void 289 usbd_copy_out(struct usb_page_cache *cache, usb_frlength_t offset, 290 void *ptr, usb_frlength_t len) 291 { 292 struct usb_page_search res; 293 294 while (len != 0) { 295 296 usbd_get_page(cache, offset, &res); 297 298 if (res.length > len) { 299 res.length = len; 300 } 301 memcpy(ptr, res.buffer, res.length); 302 303 offset += res.length; 304 len -= res.length; 305 ptr = USB_ADD_BYTES(ptr, res.length); 306 } 307 } 308 309 /*------------------------------------------------------------------------* 310 * usbd_copy_out_user - copy directly from DMA-able memory to userland 311 * 312 * Return values: 313 * 0: Success 314 * Else: Failure 315 *------------------------------------------------------------------------*/ 316 #if USB_HAVE_USER_IO 317 int 318 usbd_copy_out_user(struct usb_page_cache *cache, usb_frlength_t offset, 319 void *ptr, usb_frlength_t len) 320 { 321 struct usb_page_search res; 322 int error; 323 324 while (len != 0) { 325 326 usbd_get_page(cache, offset, &res); 327 328 if (res.length > len) { 329 res.length = len; 330 } 331 error = copyout(res.buffer, ptr, res.length); 332 if (error) 333 return (error); 334 335 offset += res.length; 336 len -= res.length; 337 ptr = USB_ADD_BYTES(ptr, res.length); 338 } 339 return (0); /* success */ 340 } 341 #endif 342 343 /*------------------------------------------------------------------------* 344 * usbd_frame_zero - zero DMA-able memory 345 *------------------------------------------------------------------------*/ 346 void 347 usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset, 348 usb_frlength_t len) 349 { 350 struct usb_page_search res; 351 352 while (len != 0) { 353 354 usbd_get_page(cache, offset, &res); 355 356 if (res.length > len) { 357 res.length = len; 358 } 359 memset(res.buffer, 0, res.length); 360 361 offset += res.length; 362 len -= res.length; 363 } 364 } 365 366 #if USB_HAVE_BUSDMA 367 368 /*------------------------------------------------------------------------* 369 * usb_dma_lock_cb - dummy callback 370 *------------------------------------------------------------------------*/ 371 static void 372 usb_dma_lock_cb(void *arg, bus_dma_lock_op_t op) 373 { 374 /* we use "mtx_owned()" instead of this function */ 375 } 376 377 /*------------------------------------------------------------------------* 378 * usb_dma_tag_create - allocate a DMA tag 379 * 380 * NOTE: If the "align" parameter has a value of 1 the DMA-tag will 381 * allow multi-segment mappings. Else all mappings are single-segment. 382 *------------------------------------------------------------------------*/ 383 static void 384 usb_dma_tag_create(struct usb_dma_tag *udt, 385 usb_size_t size, usb_size_t align) 386 { 387 bus_dma_tag_t tag; 388 389 if (bus_dma_tag_create 390 ( /* parent */ udt->tag_parent->tag, 391 /* alignment */ align, 392 /* boundary */ 0, 393 /* lowaddr */ (2ULL << (udt->tag_parent->dma_bits - 1)) - 1, 394 /* highaddr */ BUS_SPACE_MAXADDR, 395 /* filter */ NULL, 396 /* filterarg */ NULL, 397 /* maxsize */ size, 398 /* nsegments */ (align == 1 && size > 1) ? 399 (2 + (size / USB_PAGE_SIZE)) : 1, 400 /* maxsegsz */ (align == 1 && size > USB_PAGE_SIZE) ? 401 USB_PAGE_SIZE : size, 402 /* flags */ BUS_DMA_KEEP_PG_OFFSET, 403 /* lockfn */ &usb_dma_lock_cb, 404 /* lockarg */ NULL, 405 &tag)) { 406 tag = NULL; 407 } 408 udt->tag = tag; 409 } 410 411 /*------------------------------------------------------------------------* 412 * usb_dma_tag_free - free a DMA tag 413 *------------------------------------------------------------------------*/ 414 static void 415 usb_dma_tag_destroy(struct usb_dma_tag *udt) 416 { 417 bus_dma_tag_destroy(udt->tag); 418 } 419 420 /*------------------------------------------------------------------------* 421 * usb_pc_alloc_mem_cb - BUS-DMA callback function 422 *------------------------------------------------------------------------*/ 423 static void 424 usb_pc_alloc_mem_cb(void *arg, bus_dma_segment_t *segs, 425 int nseg, int error) 426 { 427 usb_pc_common_mem_cb(arg, segs, nseg, error, 0); 428 } 429 430 /*------------------------------------------------------------------------* 431 * usb_pc_load_mem_cb - BUS-DMA callback function 432 *------------------------------------------------------------------------*/ 433 static void 434 usb_pc_load_mem_cb(void *arg, bus_dma_segment_t *segs, 435 int nseg, int error) 436 { 437 usb_pc_common_mem_cb(arg, segs, nseg, error, 1); 438 } 439 440 /*------------------------------------------------------------------------* 441 * usb_pc_common_mem_cb - BUS-DMA callback function 442 *------------------------------------------------------------------------*/ 443 static void 444 usb_pc_common_mem_cb(void *arg, bus_dma_segment_t *segs, 445 int nseg, int error, uint8_t isload) 446 { 447 struct usb_dma_parent_tag *uptag; 448 struct usb_page_cache *pc; 449 struct usb_page *pg; 450 usb_size_t rem; 451 bus_size_t off; 452 uint8_t owned; 453 454 pc = arg; 455 uptag = pc->tag_parent; 456 457 /* 458 * XXX There is sometimes recursive locking here. 459 * XXX We should try to find a better solution. 460 * XXX Until further the "owned" variable does 461 * XXX the trick. 462 */ 463 464 if (error) { 465 goto done; 466 } 467 468 off = 0; 469 pg = pc->page_start; 470 pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1); 471 rem = segs->ds_addr & (USB_PAGE_SIZE - 1); 472 pc->page_offset_buf = rem; 473 pc->page_offset_end += rem; 474 #ifdef USB_DEBUG 475 if (nseg > 1 && 476 ((segs->ds_addr + segs->ds_len) & (USB_PAGE_SIZE - 1)) != 477 ((segs + 1)->ds_addr & (USB_PAGE_SIZE - 1))) { 478 /* 479 * This check verifies there is no page offset hole 480 * between the first and second segment. See the 481 * BUS_DMA_KEEP_PG_OFFSET flag. 482 */ 483 DPRINTFN(0, "Page offset was not preserved\n"); 484 error = 1; 485 goto done; 486 } 487 #endif 488 while (pc->ismultiseg) { 489 off += USB_PAGE_SIZE; 490 if (off >= (segs->ds_len + rem)) { 491 /* page crossing */ 492 nseg--; 493 segs++; 494 off = 0; 495 rem = 0; 496 if (nseg == 0) 497 break; 498 } 499 pg++; 500 pg->physaddr = (segs->ds_addr + off) & ~(USB_PAGE_SIZE - 1); 501 } 502 503 done: 504 owned = mtx_owned(uptag->mtx); 505 if (!owned) 506 mtx_lock(uptag->mtx); 507 508 uptag->dma_error = (error ? 1 : 0); 509 if (isload) { 510 (uptag->func) (uptag); 511 } else { 512 cv_broadcast(uptag->cv); 513 } 514 if (!owned) 515 mtx_unlock(uptag->mtx); 516 } 517 518 /*------------------------------------------------------------------------* 519 * usb_pc_alloc_mem - allocate DMA'able memory 520 * 521 * Returns: 522 * 0: Success 523 * Else: Failure 524 *------------------------------------------------------------------------*/ 525 uint8_t 526 usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg, 527 usb_size_t size, usb_size_t align) 528 { 529 struct usb_dma_parent_tag *uptag; 530 struct usb_dma_tag *utag; 531 bus_dmamap_t map; 532 void *ptr; 533 int err; 534 535 uptag = pc->tag_parent; 536 537 if (align != 1) { 538 /* 539 * The alignment must be greater or equal to the 540 * "size" else the object can be split between two 541 * memory pages and we get a problem! 542 */ 543 while (align < size) { 544 align *= 2; 545 if (align == 0) { 546 goto error; 547 } 548 } 549 #if 1 550 /* 551 * XXX BUS-DMA workaround - FIXME later: 552 * 553 * We assume that that the aligment at this point of 554 * the code is greater than or equal to the size and 555 * less than two times the size, so that if we double 556 * the size, the size will be greater than the 557 * alignment. 558 * 559 * The bus-dma system has a check for "alignment" 560 * being less than "size". If that check fails we end 561 * up using contigmalloc which is page based even for 562 * small allocations. Try to avoid that to save 563 * memory, hence we sometimes to a large number of 564 * small allocations! 565 */ 566 if (size <= (USB_PAGE_SIZE / 2)) { 567 size *= 2; 568 } 569 #endif 570 } 571 /* get the correct DMA tag */ 572 utag = usb_dma_tag_find(uptag, size, align); 573 if (utag == NULL) { 574 goto error; 575 } 576 /* allocate memory */ 577 if (bus_dmamem_alloc( 578 utag->tag, &ptr, (BUS_DMA_WAITOK | BUS_DMA_COHERENT), &map)) { 579 goto error; 580 } 581 /* setup page cache */ 582 pc->buffer = ptr; 583 pc->page_start = pg; 584 pc->page_offset_buf = 0; 585 pc->page_offset_end = size; 586 pc->map = map; 587 pc->tag = utag->tag; 588 pc->ismultiseg = (align == 1); 589 590 mtx_lock(uptag->mtx); 591 592 /* load memory into DMA */ 593 err = bus_dmamap_load( 594 utag->tag, map, ptr, size, &usb_pc_alloc_mem_cb, 595 pc, (BUS_DMA_WAITOK | BUS_DMA_COHERENT)); 596 597 if (err == EINPROGRESS) { 598 cv_wait(uptag->cv, uptag->mtx); 599 err = 0; 600 } 601 mtx_unlock(uptag->mtx); 602 603 if (err || uptag->dma_error) { 604 bus_dmamem_free(utag->tag, ptr, map); 605 goto error; 606 } 607 memset(ptr, 0, size); 608 609 usb_pc_cpu_flush(pc); 610 611 return (0); 612 613 error: 614 /* reset most of the page cache */ 615 pc->buffer = NULL; 616 pc->page_start = NULL; 617 pc->page_offset_buf = 0; 618 pc->page_offset_end = 0; 619 pc->map = NULL; 620 pc->tag = NULL; 621 return (1); 622 } 623 624 /*------------------------------------------------------------------------* 625 * usb_pc_free_mem - free DMA memory 626 * 627 * This function is NULL safe. 628 *------------------------------------------------------------------------*/ 629 void 630 usb_pc_free_mem(struct usb_page_cache *pc) 631 { 632 if (pc && pc->buffer) { 633 634 bus_dmamap_unload(pc->tag, pc->map); 635 636 bus_dmamem_free(pc->tag, pc->buffer, pc->map); 637 638 pc->buffer = NULL; 639 } 640 } 641 642 /*------------------------------------------------------------------------* 643 * usb_pc_load_mem - load virtual memory into DMA 644 * 645 * Return values: 646 * 0: Success 647 * Else: Error 648 *------------------------------------------------------------------------*/ 649 uint8_t 650 usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t sync) 651 { 652 /* setup page cache */ 653 pc->page_offset_buf = 0; 654 pc->page_offset_end = size; 655 pc->ismultiseg = 1; 656 657 mtx_assert(pc->tag_parent->mtx, MA_OWNED); 658 659 if (size > 0) { 660 if (sync) { 661 struct usb_dma_parent_tag *uptag; 662 int err; 663 664 uptag = pc->tag_parent; 665 666 /* 667 * We have to unload the previous loaded DMA 668 * pages before trying to load a new one! 669 */ 670 bus_dmamap_unload(pc->tag, pc->map); 671 672 /* 673 * Try to load memory into DMA. 674 */ 675 err = bus_dmamap_load( 676 pc->tag, pc->map, pc->buffer, size, 677 &usb_pc_alloc_mem_cb, pc, BUS_DMA_WAITOK); 678 if (err == EINPROGRESS) { 679 cv_wait(uptag->cv, uptag->mtx); 680 err = 0; 681 } 682 if (err || uptag->dma_error) { 683 return (1); 684 } 685 } else { 686 687 /* 688 * We have to unload the previous loaded DMA 689 * pages before trying to load a new one! 690 */ 691 bus_dmamap_unload(pc->tag, pc->map); 692 693 /* 694 * Try to load memory into DMA. The callback 695 * will be called in all cases: 696 */ 697 if (bus_dmamap_load( 698 pc->tag, pc->map, pc->buffer, size, 699 &usb_pc_load_mem_cb, pc, BUS_DMA_WAITOK)) { 700 } 701 } 702 } else { 703 if (!sync) { 704 /* 705 * Call callback so that refcount is decremented 706 * properly: 707 */ 708 pc->tag_parent->dma_error = 0; 709 (pc->tag_parent->func) (pc->tag_parent); 710 } 711 } 712 return (0); 713 } 714 715 /*------------------------------------------------------------------------* 716 * usb_pc_cpu_invalidate - invalidate CPU cache 717 *------------------------------------------------------------------------*/ 718 void 719 usb_pc_cpu_invalidate(struct usb_page_cache *pc) 720 { 721 if (pc->page_offset_end == pc->page_offset_buf) { 722 /* nothing has been loaded into this page cache! */ 723 return; 724 } 725 726 /* 727 * TODO: We currently do XXX_POSTREAD and XXX_PREREAD at the 728 * same time, but in the future we should try to isolate the 729 * different cases to optimise the code. --HPS 730 */ 731 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_POSTREAD); 732 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREREAD); 733 } 734 735 /*------------------------------------------------------------------------* 736 * usb_pc_cpu_flush - flush CPU cache 737 *------------------------------------------------------------------------*/ 738 void 739 usb_pc_cpu_flush(struct usb_page_cache *pc) 740 { 741 if (pc->page_offset_end == pc->page_offset_buf) { 742 /* nothing has been loaded into this page cache! */ 743 return; 744 } 745 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREWRITE); 746 } 747 748 /*------------------------------------------------------------------------* 749 * usb_pc_dmamap_create - create a DMA map 750 * 751 * Returns: 752 * 0: Success 753 * Else: Failure 754 *------------------------------------------------------------------------*/ 755 uint8_t 756 usb_pc_dmamap_create(struct usb_page_cache *pc, usb_size_t size) 757 { 758 struct usb_xfer_root *info; 759 struct usb_dma_tag *utag; 760 761 /* get info */ 762 info = USB_DMATAG_TO_XROOT(pc->tag_parent); 763 764 /* sanity check */ 765 if (info == NULL) { 766 goto error; 767 } 768 utag = usb_dma_tag_find(pc->tag_parent, size, 1); 769 if (utag == NULL) { 770 goto error; 771 } 772 /* create DMA map */ 773 if (bus_dmamap_create(utag->tag, 0, &pc->map)) { 774 goto error; 775 } 776 pc->tag = utag->tag; 777 return 0; /* success */ 778 779 error: 780 pc->map = NULL; 781 pc->tag = NULL; 782 return 1; /* failure */ 783 } 784 785 /*------------------------------------------------------------------------* 786 * usb_pc_dmamap_destroy 787 * 788 * This function is NULL safe. 789 *------------------------------------------------------------------------*/ 790 void 791 usb_pc_dmamap_destroy(struct usb_page_cache *pc) 792 { 793 if (pc && pc->tag) { 794 bus_dmamap_destroy(pc->tag, pc->map); 795 pc->tag = NULL; 796 pc->map = NULL; 797 } 798 } 799 800 /*------------------------------------------------------------------------* 801 * usb_dma_tag_find - factored out code 802 *------------------------------------------------------------------------*/ 803 struct usb_dma_tag * 804 usb_dma_tag_find(struct usb_dma_parent_tag *udpt, 805 usb_size_t size, usb_size_t align) 806 { 807 struct usb_dma_tag *udt; 808 uint8_t nudt; 809 810 USB_ASSERT(align > 0, ("Invalid parameter align = 0\n")); 811 USB_ASSERT(size > 0, ("Invalid parameter size = 0\n")); 812 813 udt = udpt->utag_first; 814 nudt = udpt->utag_max; 815 816 while (nudt--) { 817 818 if (udt->align == 0) { 819 usb_dma_tag_create(udt, size, align); 820 if (udt->tag == NULL) { 821 return (NULL); 822 } 823 udt->align = align; 824 udt->size = size; 825 return (udt); 826 } 827 if ((udt->align == align) && (udt->size == size)) { 828 return (udt); 829 } 830 udt++; 831 } 832 return (NULL); 833 } 834 835 /*------------------------------------------------------------------------* 836 * usb_dma_tag_setup - initialise USB DMA tags 837 *------------------------------------------------------------------------*/ 838 void 839 usb_dma_tag_setup(struct usb_dma_parent_tag *udpt, 840 struct usb_dma_tag *udt, bus_dma_tag_t dmat, 841 struct mtx *mtx, usb_dma_callback_t *func, 842 uint8_t ndmabits, uint8_t nudt) 843 { 844 memset(udpt, 0, sizeof(*udpt)); 845 846 /* sanity checking */ 847 if ((nudt == 0) || 848 (ndmabits == 0) || 849 (mtx == NULL)) { 850 /* something is corrupt */ 851 return; 852 } 853 /* initialise condition variable */ 854 cv_init(udpt->cv, "USB DMA CV"); 855 856 /* store some information */ 857 udpt->mtx = mtx; 858 udpt->func = func; 859 udpt->tag = dmat; 860 udpt->utag_first = udt; 861 udpt->utag_max = nudt; 862 udpt->dma_bits = ndmabits; 863 864 while (nudt--) { 865 memset(udt, 0, sizeof(*udt)); 866 udt->tag_parent = udpt; 867 udt++; 868 } 869 } 870 871 /*------------------------------------------------------------------------* 872 * usb_bus_tag_unsetup - factored out code 873 *------------------------------------------------------------------------*/ 874 void 875 usb_dma_tag_unsetup(struct usb_dma_parent_tag *udpt) 876 { 877 struct usb_dma_tag *udt; 878 uint8_t nudt; 879 880 udt = udpt->utag_first; 881 nudt = udpt->utag_max; 882 883 while (nudt--) { 884 885 if (udt->align) { 886 /* destroy the USB DMA tag */ 887 usb_dma_tag_destroy(udt); 888 udt->align = 0; 889 } 890 udt++; 891 } 892 893 if (udpt->utag_max) { 894 /* destroy the condition variable */ 895 cv_destroy(udpt->cv); 896 } 897 } 898 899 /*------------------------------------------------------------------------* 900 * usb_bdma_work_loop 901 * 902 * This function handles loading of virtual buffers into DMA and is 903 * only called when "dma_refcount" is zero. 904 *------------------------------------------------------------------------*/ 905 void 906 usb_bdma_work_loop(struct usb_xfer_queue *pq) 907 { 908 struct usb_xfer_root *info; 909 struct usb_xfer *xfer; 910 usb_frcount_t nframes; 911 912 xfer = pq->curr; 913 info = xfer->xroot; 914 915 mtx_assert(info->xfer_mtx, MA_OWNED); 916 917 if (xfer->error) { 918 /* some error happened */ 919 USB_BUS_LOCK(info->bus); 920 usbd_transfer_done(xfer, 0); 921 USB_BUS_UNLOCK(info->bus); 922 return; 923 } 924 if (!xfer->flags_int.bdma_setup) { 925 struct usb_page *pg; 926 usb_frlength_t frlength_0; 927 uint8_t isread; 928 929 xfer->flags_int.bdma_setup = 1; 930 931 /* reset BUS-DMA load state */ 932 933 info->dma_error = 0; 934 935 if (xfer->flags_int.isochronous_xfr) { 936 /* only one frame buffer */ 937 nframes = 1; 938 frlength_0 = xfer->sumlen; 939 } else { 940 /* can be multiple frame buffers */ 941 nframes = xfer->nframes; 942 frlength_0 = xfer->frlengths[0]; 943 } 944 945 /* 946 * Set DMA direction first. This is needed to 947 * select the correct cache invalidate and cache 948 * flush operations. 949 */ 950 isread = USB_GET_DATA_ISREAD(xfer); 951 pg = xfer->dma_page_ptr; 952 953 if (xfer->flags_int.control_xfr && 954 xfer->flags_int.control_hdr) { 955 /* special case */ 956 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { 957 /* The device controller writes to memory */ 958 xfer->frbuffers[0].isread = 1; 959 } else { 960 /* The host controller reads from memory */ 961 xfer->frbuffers[0].isread = 0; 962 } 963 } else { 964 /* default case */ 965 xfer->frbuffers[0].isread = isread; 966 } 967 968 /* 969 * Setup the "page_start" pointer which points to an array of 970 * USB pages where information about the physical address of a 971 * page will be stored. Also initialise the "isread" field of 972 * the USB page caches. 973 */ 974 xfer->frbuffers[0].page_start = pg; 975 976 info->dma_nframes = nframes; 977 info->dma_currframe = 0; 978 info->dma_frlength_0 = frlength_0; 979 980 pg += (frlength_0 / USB_PAGE_SIZE); 981 pg += 2; 982 983 while (--nframes > 0) { 984 xfer->frbuffers[nframes].isread = isread; 985 xfer->frbuffers[nframes].page_start = pg; 986 987 pg += (xfer->frlengths[nframes] / USB_PAGE_SIZE); 988 pg += 2; 989 } 990 991 } 992 if (info->dma_error) { 993 USB_BUS_LOCK(info->bus); 994 usbd_transfer_done(xfer, USB_ERR_DMA_LOAD_FAILED); 995 USB_BUS_UNLOCK(info->bus); 996 return; 997 } 998 if (info->dma_currframe != info->dma_nframes) { 999 1000 if (info->dma_currframe == 0) { 1001 /* special case */ 1002 usb_pc_load_mem(xfer->frbuffers, 1003 info->dma_frlength_0, 0); 1004 } else { 1005 /* default case */ 1006 nframes = info->dma_currframe; 1007 usb_pc_load_mem(xfer->frbuffers + nframes, 1008 xfer->frlengths[nframes], 0); 1009 } 1010 1011 /* advance frame index */ 1012 info->dma_currframe++; 1013 1014 return; 1015 } 1016 /* go ahead */ 1017 usb_bdma_pre_sync(xfer); 1018 1019 /* start loading next USB transfer, if any */ 1020 usb_command_wrapper(pq, NULL); 1021 1022 /* finally start the hardware */ 1023 usbd_pipe_enter(xfer); 1024 } 1025 1026 /*------------------------------------------------------------------------* 1027 * usb_bdma_done_event 1028 * 1029 * This function is called when the BUS-DMA has loaded virtual memory 1030 * into DMA, if any. 1031 *------------------------------------------------------------------------*/ 1032 void 1033 usb_bdma_done_event(struct usb_dma_parent_tag *udpt) 1034 { 1035 struct usb_xfer_root *info; 1036 1037 info = USB_DMATAG_TO_XROOT(udpt); 1038 1039 mtx_assert(info->xfer_mtx, MA_OWNED); 1040 1041 /* copy error */ 1042 info->dma_error = udpt->dma_error; 1043 1044 /* enter workloop again */ 1045 usb_command_wrapper(&info->dma_q, 1046 info->dma_q.curr); 1047 } 1048 1049 /*------------------------------------------------------------------------* 1050 * usb_bdma_pre_sync 1051 * 1052 * This function handles DMA synchronisation that must be done before 1053 * an USB transfer is started. 1054 *------------------------------------------------------------------------*/ 1055 void 1056 usb_bdma_pre_sync(struct usb_xfer *xfer) 1057 { 1058 struct usb_page_cache *pc; 1059 usb_frcount_t nframes; 1060 1061 if (xfer->flags_int.isochronous_xfr) { 1062 /* only one frame buffer */ 1063 nframes = 1; 1064 } else { 1065 /* can be multiple frame buffers */ 1066 nframes = xfer->nframes; 1067 } 1068 1069 pc = xfer->frbuffers; 1070 1071 while (nframes--) { 1072 1073 if (pc->isread) { 1074 usb_pc_cpu_invalidate(pc); 1075 } else { 1076 usb_pc_cpu_flush(pc); 1077 } 1078 pc++; 1079 } 1080 } 1081 1082 /*------------------------------------------------------------------------* 1083 * usb_bdma_post_sync 1084 * 1085 * This function handles DMA synchronisation that must be done after 1086 * an USB transfer is complete. 1087 *------------------------------------------------------------------------*/ 1088 void 1089 usb_bdma_post_sync(struct usb_xfer *xfer) 1090 { 1091 struct usb_page_cache *pc; 1092 usb_frcount_t nframes; 1093 1094 if (xfer->flags_int.isochronous_xfr) { 1095 /* only one frame buffer */ 1096 nframes = 1; 1097 } else { 1098 /* can be multiple frame buffers */ 1099 nframes = xfer->nframes; 1100 } 1101 1102 pc = xfer->frbuffers; 1103 1104 while (nframes--) { 1105 if (pc->isread) { 1106 usb_pc_cpu_invalidate(pc); 1107 } 1108 pc++; 1109 } 1110 } 1111 1112 #endif 1113