1 /* $FreeBSD$ */ 2 /*- 3 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/stdint.h> 28 #include <sys/stddef.h> 29 #include <sys/param.h> 30 #include <sys/queue.h> 31 #include <sys/types.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/bus.h> 35 #include <sys/module.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/condvar.h> 39 #include <sys/sysctl.h> 40 #include <sys/sx.h> 41 #include <sys/unistd.h> 42 #include <sys/callout.h> 43 #include <sys/malloc.h> 44 #include <sys/priv.h> 45 #include <sys/proc.h> 46 47 #include <dev/usb/usb.h> 48 #include <dev/usb/usbdi.h> 49 #include <dev/usb/usbdi_util.h> 50 51 #define USB_DEBUG_VAR usb_debug 52 53 #include <dev/usb/usb_core.h> 54 #include <dev/usb/usb_busdma.h> 55 #include <dev/usb/usb_process.h> 56 #include <dev/usb/usb_transfer.h> 57 #include <dev/usb/usb_device.h> 58 #include <dev/usb/usb_debug.h> 59 #include <dev/usb/usb_util.h> 60 61 #include <dev/usb/usb_controller.h> 62 #include <dev/usb/usb_bus.h> 63 #include <dev/usb/usb_pf.h> 64 65 struct usb_std_packet_size { 66 struct { 67 uint16_t min; /* inclusive */ 68 uint16_t max; /* inclusive */ 69 } range; 70 71 uint16_t fixed[4]; 72 }; 73 74 static usb_callback_t usb_request_callback; 75 76 static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = { 77 78 /* This transfer is used for generic control endpoint transfers */ 79 80 [0] = { 81 .type = UE_CONTROL, 82 .endpoint = 0x00, /* Control endpoint */ 83 .direction = UE_DIR_ANY, 84 .bufsize = USB_EP0_BUFSIZE, /* bytes */ 85 .flags = {.proxy_buffer = 1,}, 86 .callback = &usb_request_callback, 87 .usb_mode = USB_MODE_DUAL, /* both modes */ 88 }, 89 90 /* This transfer is used for generic clear stall only */ 91 92 [1] = { 93 .type = UE_CONTROL, 94 .endpoint = 0x00, /* Control pipe */ 95 .direction = UE_DIR_ANY, 96 .bufsize = sizeof(struct usb_device_request), 97 .callback = &usb_do_clear_stall_callback, 98 .timeout = 1000, /* 1 second */ 99 .interval = 50, /* 50ms */ 100 .usb_mode = USB_MODE_HOST, 101 }, 102 }; 103 104 /* function prototypes */ 105 106 static void usbd_update_max_frame_size(struct usb_xfer *); 107 static void usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t); 108 static void usbd_control_transfer_init(struct usb_xfer *); 109 static int usbd_setup_ctrl_transfer(struct usb_xfer *); 110 static void usb_callback_proc(struct usb_proc_msg *); 111 static void usbd_callback_ss_done_defer(struct usb_xfer *); 112 static void usbd_callback_wrapper(struct usb_xfer_queue *); 113 static void usbd_transfer_start_cb(void *); 114 static uint8_t usbd_callback_wrapper_sub(struct usb_xfer *); 115 static void usbd_get_std_packet_size(struct usb_std_packet_size *ptr, 116 uint8_t type, enum usb_dev_speed speed); 117 118 /*------------------------------------------------------------------------* 119 * usb_request_callback 120 *------------------------------------------------------------------------*/ 121 static void 122 usb_request_callback(struct usb_xfer *xfer, usb_error_t error) 123 { 124 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) 125 usb_handle_request_callback(xfer, error); 126 else 127 usbd_do_request_callback(xfer, error); 128 } 129 130 /*------------------------------------------------------------------------* 131 * usbd_update_max_frame_size 132 * 133 * This function updates the maximum frame size, hence high speed USB 134 * can transfer multiple consecutive packets. 135 *------------------------------------------------------------------------*/ 136 static void 137 usbd_update_max_frame_size(struct usb_xfer *xfer) 138 { 139 /* compute maximum frame size */ 140 /* this computation should not overflow 16-bit */ 141 /* max = 15 * 1024 */ 142 143 xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count; 144 } 145 146 /*------------------------------------------------------------------------* 147 * usbd_get_dma_delay 148 * 149 * The following function is called when we need to 150 * synchronize with DMA hardware. 151 * 152 * Returns: 153 * 0: no DMA delay required 154 * Else: milliseconds of DMA delay 155 *------------------------------------------------------------------------*/ 156 usb_timeout_t 157 usbd_get_dma_delay(struct usb_device *udev) 158 { 159 struct usb_bus_methods *mtod; 160 uint32_t temp; 161 162 mtod = udev->bus->methods; 163 temp = 0; 164 165 if (mtod->get_dma_delay) { 166 (mtod->get_dma_delay) (udev, &temp); 167 /* 168 * Round up and convert to milliseconds. Note that we use 169 * 1024 milliseconds per second. to save a division. 170 */ 171 temp += 0x3FF; 172 temp /= 0x400; 173 } 174 return (temp); 175 } 176 177 /*------------------------------------------------------------------------* 178 * usbd_transfer_setup_sub_malloc 179 * 180 * This function will allocate one or more DMA'able memory chunks 181 * according to "size", "align" and "count" arguments. "ppc" is 182 * pointed to a linear array of USB page caches afterwards. 183 * 184 * Returns: 185 * 0: Success 186 * Else: Failure 187 *------------------------------------------------------------------------*/ 188 #if USB_HAVE_BUSDMA 189 uint8_t 190 usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm, 191 struct usb_page_cache **ppc, usb_size_t size, usb_size_t align, 192 usb_size_t count) 193 { 194 struct usb_page_cache *pc; 195 struct usb_page *pg; 196 void *buf; 197 usb_size_t n_dma_pc; 198 usb_size_t n_obj; 199 usb_size_t x; 200 usb_size_t y; 201 usb_size_t r; 202 usb_size_t z; 203 204 USB_ASSERT(align > 1, ("Invalid alignment, 0x%08x\n", 205 align)); 206 USB_ASSERT(size > 0, ("Invalid size = 0\n")); 207 208 if (count == 0) { 209 return (0); /* nothing to allocate */ 210 } 211 /* 212 * Make sure that the size is aligned properly. 213 */ 214 size = -((-size) & (-align)); 215 216 /* 217 * Try multi-allocation chunks to reduce the number of DMA 218 * allocations, hence DMA allocations are slow. 219 */ 220 if (size >= USB_PAGE_SIZE) { 221 n_dma_pc = count; 222 n_obj = 1; 223 } else { 224 /* compute number of objects per page */ 225 n_obj = (USB_PAGE_SIZE / size); 226 /* 227 * Compute number of DMA chunks, rounded up 228 * to nearest one: 229 */ 230 n_dma_pc = ((count + n_obj - 1) / n_obj); 231 } 232 233 if (parm->buf == NULL) { 234 /* for the future */ 235 parm->dma_page_ptr += n_dma_pc; 236 parm->dma_page_cache_ptr += n_dma_pc; 237 parm->dma_page_ptr += count; 238 parm->xfer_page_cache_ptr += count; 239 return (0); 240 } 241 for (x = 0; x != n_dma_pc; x++) { 242 /* need to initialize the page cache */ 243 parm->dma_page_cache_ptr[x].tag_parent = 244 &parm->curr_xfer->xroot->dma_parent_tag; 245 } 246 for (x = 0; x != count; x++) { 247 /* need to initialize the page cache */ 248 parm->xfer_page_cache_ptr[x].tag_parent = 249 &parm->curr_xfer->xroot->dma_parent_tag; 250 } 251 252 if (ppc) { 253 *ppc = parm->xfer_page_cache_ptr; 254 } 255 r = count; /* set remainder count */ 256 z = n_obj * size; /* set allocation size */ 257 pc = parm->xfer_page_cache_ptr; 258 pg = parm->dma_page_ptr; 259 260 for (x = 0; x != n_dma_pc; x++) { 261 262 if (r < n_obj) { 263 /* compute last remainder */ 264 z = r * size; 265 n_obj = r; 266 } 267 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr, 268 pg, z, align)) { 269 return (1); /* failure */ 270 } 271 /* Set beginning of current buffer */ 272 buf = parm->dma_page_cache_ptr->buffer; 273 /* Make room for one DMA page cache and one page */ 274 parm->dma_page_cache_ptr++; 275 pg++; 276 277 for (y = 0; (y != n_obj); y++, r--, pc++, pg++) { 278 279 /* Load sub-chunk into DMA */ 280 if (usb_pc_dmamap_create(pc, size)) { 281 return (1); /* failure */ 282 } 283 pc->buffer = USB_ADD_BYTES(buf, y * size); 284 pc->page_start = pg; 285 286 mtx_lock(pc->tag_parent->mtx); 287 if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) { 288 mtx_unlock(pc->tag_parent->mtx); 289 return (1); /* failure */ 290 } 291 mtx_unlock(pc->tag_parent->mtx); 292 } 293 } 294 295 parm->xfer_page_cache_ptr = pc; 296 parm->dma_page_ptr = pg; 297 return (0); 298 } 299 #endif 300 301 /*------------------------------------------------------------------------* 302 * usbd_transfer_setup_sub - transfer setup subroutine 303 * 304 * This function must be called from the "xfer_setup" callback of the 305 * USB Host or Device controller driver when setting up an USB 306 * transfer. This function will setup correct packet sizes, buffer 307 * sizes, flags and more, that are stored in the "usb_xfer" 308 * structure. 309 *------------------------------------------------------------------------*/ 310 void 311 usbd_transfer_setup_sub(struct usb_setup_params *parm) 312 { 313 enum { 314 REQ_SIZE = 8, 315 MIN_PKT = 8, 316 }; 317 struct usb_xfer *xfer = parm->curr_xfer; 318 const struct usb_config *setup = parm->curr_setup; 319 struct usb_endpoint_ss_comp_descriptor *ecomp; 320 struct usb_endpoint_descriptor *edesc; 321 struct usb_std_packet_size std_size; 322 usb_frcount_t n_frlengths; 323 usb_frcount_t n_frbuffers; 324 usb_frcount_t x; 325 uint8_t type; 326 uint8_t zmps; 327 328 /* 329 * Sanity check. The following parameters must be initialized before 330 * calling this function. 331 */ 332 if ((parm->hc_max_packet_size == 0) || 333 (parm->hc_max_packet_count == 0) || 334 (parm->hc_max_frame_size == 0)) { 335 parm->err = USB_ERR_INVAL; 336 goto done; 337 } 338 edesc = xfer->endpoint->edesc; 339 ecomp = xfer->endpoint->ecomp; 340 341 type = (edesc->bmAttributes & UE_XFERTYPE); 342 343 xfer->flags = setup->flags; 344 xfer->nframes = setup->frames; 345 xfer->timeout = setup->timeout; 346 xfer->callback = setup->callback; 347 xfer->interval = setup->interval; 348 xfer->endpointno = edesc->bEndpointAddress; 349 xfer->max_packet_size = UGETW(edesc->wMaxPacketSize); 350 xfer->max_packet_count = 1; 351 /* make a shadow copy: */ 352 xfer->flags_int.usb_mode = parm->udev->flags.usb_mode; 353 354 parm->bufsize = setup->bufsize; 355 356 switch (parm->speed) { 357 case USB_SPEED_HIGH: 358 switch (type) { 359 case UE_ISOCHRONOUS: 360 case UE_INTERRUPT: 361 xfer->max_packet_count += 362 (xfer->max_packet_size >> 11) & 3; 363 364 /* check for invalid max packet count */ 365 if (xfer->max_packet_count > 3) 366 xfer->max_packet_count = 3; 367 break; 368 default: 369 break; 370 } 371 xfer->max_packet_size &= 0x7FF; 372 break; 373 case USB_SPEED_SUPER: 374 xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3; 375 376 if (ecomp != NULL) 377 xfer->max_packet_count += ecomp->bMaxBurst; 378 379 if ((xfer->max_packet_count == 0) || 380 (xfer->max_packet_count > 16)) 381 xfer->max_packet_count = 16; 382 383 switch (type) { 384 case UE_CONTROL: 385 xfer->max_packet_count = 1; 386 break; 387 case UE_ISOCHRONOUS: 388 if (ecomp != NULL) { 389 uint8_t mult; 390 391 mult = UE_GET_SS_ISO_MULT( 392 ecomp->bmAttributes) + 1; 393 if (mult > 3) 394 mult = 3; 395 396 xfer->max_packet_count *= mult; 397 } 398 break; 399 default: 400 break; 401 } 402 xfer->max_packet_size &= 0x7FF; 403 break; 404 default: 405 break; 406 } 407 /* range check "max_packet_count" */ 408 409 if (xfer->max_packet_count > parm->hc_max_packet_count) { 410 xfer->max_packet_count = parm->hc_max_packet_count; 411 } 412 /* filter "wMaxPacketSize" according to HC capabilities */ 413 414 if ((xfer->max_packet_size > parm->hc_max_packet_size) || 415 (xfer->max_packet_size == 0)) { 416 xfer->max_packet_size = parm->hc_max_packet_size; 417 } 418 /* filter "wMaxPacketSize" according to standard sizes */ 419 420 usbd_get_std_packet_size(&std_size, type, parm->speed); 421 422 if (std_size.range.min || std_size.range.max) { 423 424 if (xfer->max_packet_size < std_size.range.min) { 425 xfer->max_packet_size = std_size.range.min; 426 } 427 if (xfer->max_packet_size > std_size.range.max) { 428 xfer->max_packet_size = std_size.range.max; 429 } 430 } else { 431 432 if (xfer->max_packet_size >= std_size.fixed[3]) { 433 xfer->max_packet_size = std_size.fixed[3]; 434 } else if (xfer->max_packet_size >= std_size.fixed[2]) { 435 xfer->max_packet_size = std_size.fixed[2]; 436 } else if (xfer->max_packet_size >= std_size.fixed[1]) { 437 xfer->max_packet_size = std_size.fixed[1]; 438 } else { 439 /* only one possibility left */ 440 xfer->max_packet_size = std_size.fixed[0]; 441 } 442 } 443 444 /* compute "max_frame_size" */ 445 446 usbd_update_max_frame_size(xfer); 447 448 /* check interrupt interval and transfer pre-delay */ 449 450 if (type == UE_ISOCHRONOUS) { 451 452 uint16_t frame_limit; 453 454 xfer->interval = 0; /* not used, must be zero */ 455 xfer->flags_int.isochronous_xfr = 1; /* set flag */ 456 457 if (xfer->timeout == 0) { 458 /* 459 * set a default timeout in 460 * case something goes wrong! 461 */ 462 xfer->timeout = 1000 / 4; 463 } 464 switch (parm->speed) { 465 case USB_SPEED_LOW: 466 case USB_SPEED_FULL: 467 frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER; 468 xfer->fps_shift = 0; 469 break; 470 default: 471 frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER; 472 xfer->fps_shift = edesc->bInterval; 473 if (xfer->fps_shift > 0) 474 xfer->fps_shift--; 475 if (xfer->fps_shift > 3) 476 xfer->fps_shift = 3; 477 if (xfer->flags.pre_scale_frames != 0) 478 xfer->nframes <<= (3 - xfer->fps_shift); 479 break; 480 } 481 482 if (xfer->nframes > frame_limit) { 483 /* 484 * this is not going to work 485 * cross hardware 486 */ 487 parm->err = USB_ERR_INVAL; 488 goto done; 489 } 490 if (xfer->nframes == 0) { 491 /* 492 * this is not a valid value 493 */ 494 parm->err = USB_ERR_ZERO_NFRAMES; 495 goto done; 496 } 497 } else { 498 499 /* 500 * If a value is specified use that else check the 501 * endpoint descriptor! 502 */ 503 if (type == UE_INTERRUPT) { 504 505 uint32_t temp; 506 507 if (xfer->interval == 0) { 508 509 xfer->interval = edesc->bInterval; 510 511 switch (parm->speed) { 512 case USB_SPEED_LOW: 513 case USB_SPEED_FULL: 514 break; 515 default: 516 /* 125us -> 1ms */ 517 if (xfer->interval < 4) 518 xfer->interval = 1; 519 else if (xfer->interval > 16) 520 xfer->interval = (1 << (16 - 4)); 521 else 522 xfer->interval = 523 (1 << (xfer->interval - 4)); 524 break; 525 } 526 } 527 528 if (xfer->interval == 0) { 529 /* 530 * One millisecond is the smallest 531 * interval we support: 532 */ 533 xfer->interval = 1; 534 } 535 536 xfer->fps_shift = 0; 537 temp = 1; 538 539 while ((temp != 0) && (temp < xfer->interval)) { 540 xfer->fps_shift++; 541 temp *= 2; 542 } 543 544 switch (parm->speed) { 545 case USB_SPEED_LOW: 546 case USB_SPEED_FULL: 547 break; 548 default: 549 xfer->fps_shift += 3; 550 break; 551 } 552 } 553 } 554 555 /* 556 * NOTE: we do not allow "max_packet_size" or "max_frame_size" 557 * to be equal to zero when setting up USB transfers, hence 558 * this leads to alot of extra code in the USB kernel. 559 */ 560 561 if ((xfer->max_frame_size == 0) || 562 (xfer->max_packet_size == 0)) { 563 564 zmps = 1; 565 566 if ((parm->bufsize <= MIN_PKT) && 567 (type != UE_CONTROL) && 568 (type != UE_BULK)) { 569 570 /* workaround */ 571 xfer->max_packet_size = MIN_PKT; 572 xfer->max_packet_count = 1; 573 parm->bufsize = 0; /* automatic setup length */ 574 usbd_update_max_frame_size(xfer); 575 576 } else { 577 parm->err = USB_ERR_ZERO_MAXP; 578 goto done; 579 } 580 581 } else { 582 zmps = 0; 583 } 584 585 /* 586 * check if we should setup a default 587 * length: 588 */ 589 590 if (parm->bufsize == 0) { 591 592 parm->bufsize = xfer->max_frame_size; 593 594 if (type == UE_ISOCHRONOUS) { 595 parm->bufsize *= xfer->nframes; 596 } 597 } 598 /* 599 * check if we are about to setup a proxy 600 * type of buffer: 601 */ 602 603 if (xfer->flags.proxy_buffer) { 604 605 /* round bufsize up */ 606 607 parm->bufsize += (xfer->max_frame_size - 1); 608 609 if (parm->bufsize < xfer->max_frame_size) { 610 /* length wrapped around */ 611 parm->err = USB_ERR_INVAL; 612 goto done; 613 } 614 /* subtract remainder */ 615 616 parm->bufsize -= (parm->bufsize % xfer->max_frame_size); 617 618 /* add length of USB device request structure, if any */ 619 620 if (type == UE_CONTROL) { 621 parm->bufsize += REQ_SIZE; /* SETUP message */ 622 } 623 } 624 xfer->max_data_length = parm->bufsize; 625 626 /* Setup "n_frlengths" and "n_frbuffers" */ 627 628 if (type == UE_ISOCHRONOUS) { 629 n_frlengths = xfer->nframes; 630 n_frbuffers = 1; 631 } else { 632 633 if (type == UE_CONTROL) { 634 xfer->flags_int.control_xfr = 1; 635 if (xfer->nframes == 0) { 636 if (parm->bufsize <= REQ_SIZE) { 637 /* 638 * there will never be any data 639 * stage 640 */ 641 xfer->nframes = 1; 642 } else { 643 xfer->nframes = 2; 644 } 645 } 646 } else { 647 if (xfer->nframes == 0) { 648 xfer->nframes = 1; 649 } 650 } 651 652 n_frlengths = xfer->nframes; 653 n_frbuffers = xfer->nframes; 654 } 655 656 /* 657 * check if we have room for the 658 * USB device request structure: 659 */ 660 661 if (type == UE_CONTROL) { 662 663 if (xfer->max_data_length < REQ_SIZE) { 664 /* length wrapped around or too small bufsize */ 665 parm->err = USB_ERR_INVAL; 666 goto done; 667 } 668 xfer->max_data_length -= REQ_SIZE; 669 } 670 /* 671 * Setup "frlengths" and shadow "frlengths" for keeping the 672 * initial frame lengths when a USB transfer is complete. This 673 * information is useful when computing isochronous offsets. 674 */ 675 xfer->frlengths = parm->xfer_length_ptr; 676 parm->xfer_length_ptr += 2 * n_frlengths; 677 678 /* setup "frbuffers" */ 679 xfer->frbuffers = parm->xfer_page_cache_ptr; 680 parm->xfer_page_cache_ptr += n_frbuffers; 681 682 /* initialize max frame count */ 683 xfer->max_frame_count = xfer->nframes; 684 685 /* 686 * check if we need to setup 687 * a local buffer: 688 */ 689 690 if (!xfer->flags.ext_buffer) { 691 692 /* align data */ 693 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 694 695 if (parm->buf) { 696 697 xfer->local_buffer = 698 USB_ADD_BYTES(parm->buf, parm->size[0]); 699 700 usbd_xfer_set_frame_offset(xfer, 0, 0); 701 702 if ((type == UE_CONTROL) && (n_frbuffers > 1)) { 703 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1); 704 } 705 } 706 parm->size[0] += parm->bufsize; 707 708 /* align data again */ 709 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 710 } 711 /* 712 * Compute maximum buffer size 713 */ 714 715 if (parm->bufsize_max < parm->bufsize) { 716 parm->bufsize_max = parm->bufsize; 717 } 718 #if USB_HAVE_BUSDMA 719 if (xfer->flags_int.bdma_enable) { 720 /* 721 * Setup "dma_page_ptr". 722 * 723 * Proof for formula below: 724 * 725 * Assume there are three USB frames having length "a", "b" and 726 * "c". These USB frames will at maximum need "z" 727 * "usb_page" structures. "z" is given by: 728 * 729 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) + 730 * ((c / USB_PAGE_SIZE) + 2); 731 * 732 * Constraining "a", "b" and "c" like this: 733 * 734 * (a + b + c) <= parm->bufsize 735 * 736 * We know that: 737 * 738 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2)); 739 * 740 * Here is the general formula: 741 */ 742 xfer->dma_page_ptr = parm->dma_page_ptr; 743 parm->dma_page_ptr += (2 * n_frbuffers); 744 parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE); 745 } 746 #endif 747 if (zmps) { 748 /* correct maximum data length */ 749 xfer->max_data_length = 0; 750 } 751 /* subtract USB frame remainder from "hc_max_frame_size" */ 752 753 xfer->max_hc_frame_size = 754 (parm->hc_max_frame_size - 755 (parm->hc_max_frame_size % xfer->max_frame_size)); 756 757 if (xfer->max_hc_frame_size == 0) { 758 parm->err = USB_ERR_INVAL; 759 goto done; 760 } 761 762 /* initialize frame buffers */ 763 764 if (parm->buf) { 765 for (x = 0; x != n_frbuffers; x++) { 766 xfer->frbuffers[x].tag_parent = 767 &xfer->xroot->dma_parent_tag; 768 #if USB_HAVE_BUSDMA 769 if (xfer->flags_int.bdma_enable && 770 (parm->bufsize_max > 0)) { 771 772 if (usb_pc_dmamap_create( 773 xfer->frbuffers + x, 774 parm->bufsize_max)) { 775 parm->err = USB_ERR_NOMEM; 776 goto done; 777 } 778 } 779 #endif 780 } 781 } 782 done: 783 if (parm->err) { 784 /* 785 * Set some dummy values so that we avoid division by zero: 786 */ 787 xfer->max_hc_frame_size = 1; 788 xfer->max_frame_size = 1; 789 xfer->max_packet_size = 1; 790 xfer->max_data_length = 0; 791 xfer->nframes = 0; 792 xfer->max_frame_count = 0; 793 } 794 } 795 796 /*------------------------------------------------------------------------* 797 * usbd_transfer_setup - setup an array of USB transfers 798 * 799 * NOTE: You must always call "usbd_transfer_unsetup" after calling 800 * "usbd_transfer_setup" if success was returned. 801 * 802 * The idea is that the USB device driver should pre-allocate all its 803 * transfers by one call to this function. 804 * 805 * Return values: 806 * 0: Success 807 * Else: Failure 808 *------------------------------------------------------------------------*/ 809 usb_error_t 810 usbd_transfer_setup(struct usb_device *udev, 811 const uint8_t *ifaces, struct usb_xfer **ppxfer, 812 const struct usb_config *setup_start, uint16_t n_setup, 813 void *priv_sc, struct mtx *xfer_mtx) 814 { 815 struct usb_xfer dummy; 816 struct usb_setup_params parm; 817 const struct usb_config *setup_end = setup_start + n_setup; 818 const struct usb_config *setup; 819 struct usb_endpoint *ep; 820 struct usb_xfer_root *info; 821 struct usb_xfer *xfer; 822 void *buf = NULL; 823 uint16_t n; 824 uint16_t refcount; 825 826 parm.err = 0; 827 refcount = 0; 828 info = NULL; 829 830 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 831 "usbd_transfer_setup can sleep!"); 832 833 /* do some checking first */ 834 835 if (n_setup == 0) { 836 DPRINTFN(6, "setup array has zero length!\n"); 837 return (USB_ERR_INVAL); 838 } 839 if (ifaces == 0) { 840 DPRINTFN(6, "ifaces array is NULL!\n"); 841 return (USB_ERR_INVAL); 842 } 843 if (xfer_mtx == NULL) { 844 DPRINTFN(6, "using global lock\n"); 845 xfer_mtx = &Giant; 846 } 847 /* sanity checks */ 848 for (setup = setup_start, n = 0; 849 setup != setup_end; setup++, n++) { 850 if (setup->bufsize == (usb_frlength_t)-1) { 851 parm.err = USB_ERR_BAD_BUFSIZE; 852 DPRINTF("invalid bufsize\n"); 853 } 854 if (setup->callback == NULL) { 855 parm.err = USB_ERR_NO_CALLBACK; 856 DPRINTF("no callback\n"); 857 } 858 ppxfer[n] = NULL; 859 } 860 861 if (parm.err) { 862 goto done; 863 } 864 memset(&parm, 0, sizeof(parm)); 865 866 parm.udev = udev; 867 parm.speed = usbd_get_speed(udev); 868 parm.hc_max_packet_count = 1; 869 870 if (parm.speed >= USB_SPEED_MAX) { 871 parm.err = USB_ERR_INVAL; 872 goto done; 873 } 874 /* setup all transfers */ 875 876 while (1) { 877 878 if (buf) { 879 /* 880 * Initialize the "usb_xfer_root" structure, 881 * which is common for all our USB transfers. 882 */ 883 info = USB_ADD_BYTES(buf, 0); 884 885 info->memory_base = buf; 886 info->memory_size = parm.size[0]; 887 888 #if USB_HAVE_BUSDMA 889 info->dma_page_cache_start = USB_ADD_BYTES(buf, parm.size[4]); 890 info->dma_page_cache_end = USB_ADD_BYTES(buf, parm.size[5]); 891 #endif 892 info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm.size[5]); 893 info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm.size[2]); 894 895 cv_init(&info->cv_drain, "WDRAIN"); 896 897 info->xfer_mtx = xfer_mtx; 898 #if USB_HAVE_BUSDMA 899 usb_dma_tag_setup(&info->dma_parent_tag, 900 parm.dma_tag_p, udev->bus->dma_parent_tag[0].tag, 901 xfer_mtx, &usb_bdma_done_event, 32, parm.dma_tag_max); 902 #endif 903 904 info->bus = udev->bus; 905 info->udev = udev; 906 907 TAILQ_INIT(&info->done_q.head); 908 info->done_q.command = &usbd_callback_wrapper; 909 #if USB_HAVE_BUSDMA 910 TAILQ_INIT(&info->dma_q.head); 911 info->dma_q.command = &usb_bdma_work_loop; 912 #endif 913 info->done_m[0].hdr.pm_callback = &usb_callback_proc; 914 info->done_m[0].xroot = info; 915 info->done_m[1].hdr.pm_callback = &usb_callback_proc; 916 info->done_m[1].xroot = info; 917 918 /* 919 * In device side mode control endpoint 920 * requests need to run from a separate 921 * context, else there is a chance of 922 * deadlock! 923 */ 924 if (setup_start == usb_control_ep_cfg) 925 info->done_p = 926 &udev->bus->control_xfer_proc; 927 else if (xfer_mtx == &Giant) 928 info->done_p = 929 &udev->bus->giant_callback_proc; 930 else 931 info->done_p = 932 &udev->bus->non_giant_callback_proc; 933 } 934 /* reset sizes */ 935 936 parm.size[0] = 0; 937 parm.buf = buf; 938 parm.size[0] += sizeof(info[0]); 939 940 for (setup = setup_start, n = 0; 941 setup != setup_end; setup++, n++) { 942 943 /* skip USB transfers without callbacks: */ 944 if (setup->callback == NULL) { 945 continue; 946 } 947 /* see if there is a matching endpoint */ 948 ep = usbd_get_endpoint(udev, 949 ifaces[setup->if_index], setup); 950 951 /* 952 * Check that the USB PIPE is valid and that 953 * the endpoint mode is proper. 954 * 955 * Make sure we don't allocate a streams 956 * transfer when such a combination is not 957 * valid. 958 */ 959 if ((ep == NULL) || (ep->methods == NULL) || 960 ((ep->ep_mode != USB_EP_MODE_STREAMS) && 961 (ep->ep_mode != USB_EP_MODE_DEFAULT)) || 962 (setup->stream_id != 0 && 963 (setup->stream_id >= USB_MAX_EP_STREAMS || 964 (ep->ep_mode != USB_EP_MODE_STREAMS)))) { 965 if (setup->flags.no_pipe_ok) 966 continue; 967 if ((setup->usb_mode != USB_MODE_DUAL) && 968 (setup->usb_mode != udev->flags.usb_mode)) 969 continue; 970 parm.err = USB_ERR_NO_PIPE; 971 goto done; 972 } 973 974 /* align data properly */ 975 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 976 977 /* store current setup pointer */ 978 parm.curr_setup = setup; 979 980 if (buf) { 981 /* 982 * Common initialization of the 983 * "usb_xfer" structure. 984 */ 985 xfer = USB_ADD_BYTES(buf, parm.size[0]); 986 xfer->address = udev->address; 987 xfer->priv_sc = priv_sc; 988 xfer->xroot = info; 989 990 usb_callout_init_mtx(&xfer->timeout_handle, 991 &udev->bus->bus_mtx, 0); 992 } else { 993 /* 994 * Setup a dummy xfer, hence we are 995 * writing to the "usb_xfer" 996 * structure pointed to by "xfer" 997 * before we have allocated any 998 * memory: 999 */ 1000 xfer = &dummy; 1001 memset(&dummy, 0, sizeof(dummy)); 1002 refcount++; 1003 } 1004 1005 /* set transfer endpoint pointer */ 1006 xfer->endpoint = ep; 1007 1008 /* set transfer stream ID */ 1009 xfer->stream_id = setup->stream_id; 1010 1011 parm.size[0] += sizeof(xfer[0]); 1012 parm.methods = xfer->endpoint->methods; 1013 parm.curr_xfer = xfer; 1014 1015 /* 1016 * Call the Host or Device controller transfer 1017 * setup routine: 1018 */ 1019 (udev->bus->methods->xfer_setup) (&parm); 1020 1021 /* check for error */ 1022 if (parm.err) 1023 goto done; 1024 1025 if (buf) { 1026 /* 1027 * Increment the endpoint refcount. This 1028 * basically prevents setting a new 1029 * configuration and alternate setting 1030 * when USB transfers are in use on 1031 * the given interface. Search the USB 1032 * code for "endpoint->refcount_alloc" if you 1033 * want more information. 1034 */ 1035 USB_BUS_LOCK(info->bus); 1036 if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX) 1037 parm.err = USB_ERR_INVAL; 1038 1039 xfer->endpoint->refcount_alloc++; 1040 1041 if (xfer->endpoint->refcount_alloc == 0) 1042 panic("usbd_transfer_setup(): Refcount wrapped to zero\n"); 1043 USB_BUS_UNLOCK(info->bus); 1044 1045 /* 1046 * Whenever we set ppxfer[] then we 1047 * also need to increment the 1048 * "setup_refcount": 1049 */ 1050 info->setup_refcount++; 1051 1052 /* 1053 * Transfer is successfully setup and 1054 * can be used: 1055 */ 1056 ppxfer[n] = xfer; 1057 } 1058 1059 /* check for error */ 1060 if (parm.err) 1061 goto done; 1062 } 1063 1064 if (buf || parm.err) { 1065 goto done; 1066 } 1067 if (refcount == 0) { 1068 /* no transfers - nothing to do ! */ 1069 goto done; 1070 } 1071 /* align data properly */ 1072 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 1073 1074 /* store offset temporarily */ 1075 parm.size[1] = parm.size[0]; 1076 1077 /* 1078 * The number of DMA tags required depends on 1079 * the number of endpoints. The current estimate 1080 * for maximum number of DMA tags per endpoint 1081 * is two. 1082 */ 1083 parm.dma_tag_max += 2 * MIN(n_setup, USB_EP_MAX); 1084 1085 /* 1086 * DMA tags for QH, TD, Data and more. 1087 */ 1088 parm.dma_tag_max += 8; 1089 1090 parm.dma_tag_p += parm.dma_tag_max; 1091 1092 parm.size[0] += ((uint8_t *)parm.dma_tag_p) - 1093 ((uint8_t *)0); 1094 1095 /* align data properly */ 1096 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 1097 1098 /* store offset temporarily */ 1099 parm.size[3] = parm.size[0]; 1100 1101 parm.size[0] += ((uint8_t *)parm.dma_page_ptr) - 1102 ((uint8_t *)0); 1103 1104 /* align data properly */ 1105 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 1106 1107 /* store offset temporarily */ 1108 parm.size[4] = parm.size[0]; 1109 1110 parm.size[0] += ((uint8_t *)parm.dma_page_cache_ptr) - 1111 ((uint8_t *)0); 1112 1113 /* store end offset temporarily */ 1114 parm.size[5] = parm.size[0]; 1115 1116 parm.size[0] += ((uint8_t *)parm.xfer_page_cache_ptr) - 1117 ((uint8_t *)0); 1118 1119 /* store end offset temporarily */ 1120 1121 parm.size[2] = parm.size[0]; 1122 1123 /* align data properly */ 1124 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 1125 1126 parm.size[6] = parm.size[0]; 1127 1128 parm.size[0] += ((uint8_t *)parm.xfer_length_ptr) - 1129 ((uint8_t *)0); 1130 1131 /* align data properly */ 1132 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 1133 1134 /* allocate zeroed memory */ 1135 buf = malloc(parm.size[0], M_USB, M_WAITOK | M_ZERO); 1136 1137 if (buf == NULL) { 1138 parm.err = USB_ERR_NOMEM; 1139 DPRINTFN(0, "cannot allocate memory block for " 1140 "configuration (%d bytes)\n", 1141 parm.size[0]); 1142 goto done; 1143 } 1144 parm.dma_tag_p = USB_ADD_BYTES(buf, parm.size[1]); 1145 parm.dma_page_ptr = USB_ADD_BYTES(buf, parm.size[3]); 1146 parm.dma_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[4]); 1147 parm.xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[5]); 1148 parm.xfer_length_ptr = USB_ADD_BYTES(buf, parm.size[6]); 1149 } 1150 1151 done: 1152 if (buf) { 1153 if (info->setup_refcount == 0) { 1154 /* 1155 * "usbd_transfer_unsetup_sub" will unlock 1156 * the bus mutex before returning ! 1157 */ 1158 USB_BUS_LOCK(info->bus); 1159 1160 /* something went wrong */ 1161 usbd_transfer_unsetup_sub(info, 0); 1162 } 1163 } 1164 if (parm.err) { 1165 usbd_transfer_unsetup(ppxfer, n_setup); 1166 } 1167 return (parm.err); 1168 } 1169 1170 /*------------------------------------------------------------------------* 1171 * usbd_transfer_unsetup_sub - factored out code 1172 *------------------------------------------------------------------------*/ 1173 static void 1174 usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay) 1175 { 1176 #if USB_HAVE_BUSDMA 1177 struct usb_page_cache *pc; 1178 #endif 1179 1180 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED); 1181 1182 /* wait for any outstanding DMA operations */ 1183 1184 if (needs_delay) { 1185 usb_timeout_t temp; 1186 temp = usbd_get_dma_delay(info->udev); 1187 if (temp != 0) { 1188 usb_pause_mtx(&info->bus->bus_mtx, 1189 USB_MS_TO_TICKS(temp)); 1190 } 1191 } 1192 1193 /* make sure that our done messages are not queued anywhere */ 1194 usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]); 1195 1196 USB_BUS_UNLOCK(info->bus); 1197 1198 #if USB_HAVE_BUSDMA 1199 /* free DMA'able memory, if any */ 1200 pc = info->dma_page_cache_start; 1201 while (pc != info->dma_page_cache_end) { 1202 usb_pc_free_mem(pc); 1203 pc++; 1204 } 1205 1206 /* free DMA maps in all "xfer->frbuffers" */ 1207 pc = info->xfer_page_cache_start; 1208 while (pc != info->xfer_page_cache_end) { 1209 usb_pc_dmamap_destroy(pc); 1210 pc++; 1211 } 1212 1213 /* free all DMA tags */ 1214 usb_dma_tag_unsetup(&info->dma_parent_tag); 1215 #endif 1216 1217 cv_destroy(&info->cv_drain); 1218 1219 /* 1220 * free the "memory_base" last, hence the "info" structure is 1221 * contained within the "memory_base"! 1222 */ 1223 free(info->memory_base, M_USB); 1224 } 1225 1226 /*------------------------------------------------------------------------* 1227 * usbd_transfer_unsetup - unsetup/free an array of USB transfers 1228 * 1229 * NOTE: All USB transfers in progress will get called back passing 1230 * the error code "USB_ERR_CANCELLED" before this function 1231 * returns. 1232 *------------------------------------------------------------------------*/ 1233 void 1234 usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup) 1235 { 1236 struct usb_xfer *xfer; 1237 struct usb_xfer_root *info; 1238 uint8_t needs_delay = 0; 1239 1240 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1241 "usbd_transfer_unsetup can sleep!"); 1242 1243 while (n_setup--) { 1244 xfer = pxfer[n_setup]; 1245 1246 if (xfer == NULL) 1247 continue; 1248 1249 info = xfer->xroot; 1250 1251 USB_XFER_LOCK(xfer); 1252 USB_BUS_LOCK(info->bus); 1253 1254 /* 1255 * HINT: when you start/stop a transfer, it might be a 1256 * good idea to directly use the "pxfer[]" structure: 1257 * 1258 * usbd_transfer_start(sc->pxfer[0]); 1259 * usbd_transfer_stop(sc->pxfer[0]); 1260 * 1261 * That way, if your code has many parts that will not 1262 * stop running under the same lock, in other words 1263 * "xfer_mtx", the usbd_transfer_start and 1264 * usbd_transfer_stop functions will simply return 1265 * when they detect a NULL pointer argument. 1266 * 1267 * To avoid any races we clear the "pxfer[]" pointer 1268 * while holding the private mutex of the driver: 1269 */ 1270 pxfer[n_setup] = NULL; 1271 1272 USB_BUS_UNLOCK(info->bus); 1273 USB_XFER_UNLOCK(xfer); 1274 1275 usbd_transfer_drain(xfer); 1276 1277 #if USB_HAVE_BUSDMA 1278 if (xfer->flags_int.bdma_enable) 1279 needs_delay = 1; 1280 #endif 1281 /* 1282 * NOTE: default endpoint does not have an 1283 * interface, even if endpoint->iface_index == 0 1284 */ 1285 USB_BUS_LOCK(info->bus); 1286 xfer->endpoint->refcount_alloc--; 1287 USB_BUS_UNLOCK(info->bus); 1288 1289 usb_callout_drain(&xfer->timeout_handle); 1290 1291 USB_BUS_LOCK(info->bus); 1292 1293 USB_ASSERT(info->setup_refcount != 0, ("Invalid setup " 1294 "reference count\n")); 1295 1296 info->setup_refcount--; 1297 1298 if (info->setup_refcount == 0) { 1299 usbd_transfer_unsetup_sub(info, 1300 needs_delay); 1301 } else { 1302 USB_BUS_UNLOCK(info->bus); 1303 } 1304 } 1305 } 1306 1307 /*------------------------------------------------------------------------* 1308 * usbd_control_transfer_init - factored out code 1309 * 1310 * In USB Device Mode we have to wait for the SETUP packet which 1311 * containst the "struct usb_device_request" structure, before we can 1312 * transfer any data. In USB Host Mode we already have the SETUP 1313 * packet at the moment the USB transfer is started. This leads us to 1314 * having to setup the USB transfer at two different places in 1315 * time. This function just contains factored out control transfer 1316 * initialisation code, so that we don't duplicate the code. 1317 *------------------------------------------------------------------------*/ 1318 static void 1319 usbd_control_transfer_init(struct usb_xfer *xfer) 1320 { 1321 struct usb_device_request req; 1322 1323 /* copy out the USB request header */ 1324 1325 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req)); 1326 1327 /* setup remainder */ 1328 1329 xfer->flags_int.control_rem = UGETW(req.wLength); 1330 1331 /* copy direction to endpoint variable */ 1332 1333 xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT); 1334 xfer->endpointno |= 1335 (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT; 1336 } 1337 1338 /*------------------------------------------------------------------------* 1339 * usbd_setup_ctrl_transfer 1340 * 1341 * This function handles initialisation of control transfers. Control 1342 * transfers are special in that regard that they can both transmit 1343 * and receive data. 1344 * 1345 * Return values: 1346 * 0: Success 1347 * Else: Failure 1348 *------------------------------------------------------------------------*/ 1349 static int 1350 usbd_setup_ctrl_transfer(struct usb_xfer *xfer) 1351 { 1352 usb_frlength_t len; 1353 1354 /* Check for control endpoint stall */ 1355 if (xfer->flags.stall_pipe && xfer->flags_int.control_act) { 1356 /* the control transfer is no longer active */ 1357 xfer->flags_int.control_stall = 1; 1358 xfer->flags_int.control_act = 0; 1359 } else { 1360 /* don't stall control transfer by default */ 1361 xfer->flags_int.control_stall = 0; 1362 } 1363 1364 /* Check for invalid number of frames */ 1365 if (xfer->nframes > 2) { 1366 /* 1367 * If you need to split a control transfer, you 1368 * have to do one part at a time. Only with 1369 * non-control transfers you can do multiple 1370 * parts a time. 1371 */ 1372 DPRINTFN(0, "Too many frames: %u\n", 1373 (unsigned int)xfer->nframes); 1374 goto error; 1375 } 1376 1377 /* 1378 * Check if there is a control 1379 * transfer in progress: 1380 */ 1381 if (xfer->flags_int.control_act) { 1382 1383 if (xfer->flags_int.control_hdr) { 1384 1385 /* clear send header flag */ 1386 1387 xfer->flags_int.control_hdr = 0; 1388 1389 /* setup control transfer */ 1390 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { 1391 usbd_control_transfer_init(xfer); 1392 } 1393 } 1394 /* get data length */ 1395 1396 len = xfer->sumlen; 1397 1398 } else { 1399 1400 /* the size of the SETUP structure is hardcoded ! */ 1401 1402 if (xfer->frlengths[0] != sizeof(struct usb_device_request)) { 1403 DPRINTFN(0, "Wrong framelength %u != %zu\n", 1404 xfer->frlengths[0], sizeof(struct 1405 usb_device_request)); 1406 goto error; 1407 } 1408 /* check USB mode */ 1409 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { 1410 1411 /* check number of frames */ 1412 if (xfer->nframes != 1) { 1413 /* 1414 * We need to receive the setup 1415 * message first so that we know the 1416 * data direction! 1417 */ 1418 DPRINTF("Misconfigured transfer\n"); 1419 goto error; 1420 } 1421 /* 1422 * Set a dummy "control_rem" value. This 1423 * variable will be overwritten later by a 1424 * call to "usbd_control_transfer_init()" ! 1425 */ 1426 xfer->flags_int.control_rem = 0xFFFF; 1427 } else { 1428 1429 /* setup "endpoint" and "control_rem" */ 1430 1431 usbd_control_transfer_init(xfer); 1432 } 1433 1434 /* set transfer-header flag */ 1435 1436 xfer->flags_int.control_hdr = 1; 1437 1438 /* get data length */ 1439 1440 len = (xfer->sumlen - sizeof(struct usb_device_request)); 1441 } 1442 1443 /* check if there is a length mismatch */ 1444 1445 if (len > xfer->flags_int.control_rem) { 1446 DPRINTFN(0, "Length (%d) greater than " 1447 "remaining length (%d)\n", len, 1448 xfer->flags_int.control_rem); 1449 goto error; 1450 } 1451 /* check if we are doing a short transfer */ 1452 1453 if (xfer->flags.force_short_xfer) { 1454 xfer->flags_int.control_rem = 0; 1455 } else { 1456 if ((len != xfer->max_data_length) && 1457 (len != xfer->flags_int.control_rem) && 1458 (xfer->nframes != 1)) { 1459 DPRINTFN(0, "Short control transfer without " 1460 "force_short_xfer set\n"); 1461 goto error; 1462 } 1463 xfer->flags_int.control_rem -= len; 1464 } 1465 1466 /* the status part is executed when "control_act" is 0 */ 1467 1468 if ((xfer->flags_int.control_rem > 0) || 1469 (xfer->flags.manual_status)) { 1470 /* don't execute the STATUS stage yet */ 1471 xfer->flags_int.control_act = 1; 1472 1473 /* sanity check */ 1474 if ((!xfer->flags_int.control_hdr) && 1475 (xfer->nframes == 1)) { 1476 /* 1477 * This is not a valid operation! 1478 */ 1479 DPRINTFN(0, "Invalid parameter " 1480 "combination\n"); 1481 goto error; 1482 } 1483 } else { 1484 /* time to execute the STATUS stage */ 1485 xfer->flags_int.control_act = 0; 1486 } 1487 return (0); /* success */ 1488 1489 error: 1490 return (1); /* failure */ 1491 } 1492 1493 /*------------------------------------------------------------------------* 1494 * usbd_transfer_submit - start USB hardware for the given transfer 1495 * 1496 * This function should only be called from the USB callback. 1497 *------------------------------------------------------------------------*/ 1498 void 1499 usbd_transfer_submit(struct usb_xfer *xfer) 1500 { 1501 struct usb_xfer_root *info; 1502 struct usb_bus *bus; 1503 usb_frcount_t x; 1504 1505 info = xfer->xroot; 1506 bus = info->bus; 1507 1508 DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n", 1509 xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ? 1510 "read" : "write"); 1511 1512 #ifdef USB_DEBUG 1513 if (USB_DEBUG_VAR > 0) { 1514 USB_BUS_LOCK(bus); 1515 1516 usb_dump_endpoint(xfer->endpoint); 1517 1518 USB_BUS_UNLOCK(bus); 1519 } 1520 #endif 1521 1522 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1523 USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED); 1524 1525 /* Only open the USB transfer once! */ 1526 if (!xfer->flags_int.open) { 1527 xfer->flags_int.open = 1; 1528 1529 DPRINTF("open\n"); 1530 1531 USB_BUS_LOCK(bus); 1532 (xfer->endpoint->methods->open) (xfer); 1533 USB_BUS_UNLOCK(bus); 1534 } 1535 /* set "transferring" flag */ 1536 xfer->flags_int.transferring = 1; 1537 1538 #if USB_HAVE_POWERD 1539 /* increment power reference */ 1540 usbd_transfer_power_ref(xfer, 1); 1541 #endif 1542 /* 1543 * Check if the transfer is waiting on a queue, most 1544 * frequently the "done_q": 1545 */ 1546 if (xfer->wait_queue) { 1547 USB_BUS_LOCK(bus); 1548 usbd_transfer_dequeue(xfer); 1549 USB_BUS_UNLOCK(bus); 1550 } 1551 /* clear "did_dma_delay" flag */ 1552 xfer->flags_int.did_dma_delay = 0; 1553 1554 /* clear "did_close" flag */ 1555 xfer->flags_int.did_close = 0; 1556 1557 #if USB_HAVE_BUSDMA 1558 /* clear "bdma_setup" flag */ 1559 xfer->flags_int.bdma_setup = 0; 1560 #endif 1561 /* by default we cannot cancel any USB transfer immediately */ 1562 xfer->flags_int.can_cancel_immed = 0; 1563 1564 /* clear lengths and frame counts by default */ 1565 xfer->sumlen = 0; 1566 xfer->actlen = 0; 1567 xfer->aframes = 0; 1568 1569 /* clear any previous errors */ 1570 xfer->error = 0; 1571 1572 /* Check if the device is still alive */ 1573 if (info->udev->state < USB_STATE_POWERED) { 1574 USB_BUS_LOCK(bus); 1575 /* 1576 * Must return cancelled error code else 1577 * device drivers can hang. 1578 */ 1579 usbd_transfer_done(xfer, USB_ERR_CANCELLED); 1580 USB_BUS_UNLOCK(bus); 1581 return; 1582 } 1583 1584 /* sanity check */ 1585 if (xfer->nframes == 0) { 1586 if (xfer->flags.stall_pipe) { 1587 /* 1588 * Special case - want to stall without transferring 1589 * any data: 1590 */ 1591 DPRINTF("xfer=%p nframes=0: stall " 1592 "or clear stall!\n", xfer); 1593 USB_BUS_LOCK(bus); 1594 xfer->flags_int.can_cancel_immed = 1; 1595 /* start the transfer */ 1596 usb_command_wrapper(&xfer->endpoint-> 1597 endpoint_q[xfer->stream_id], xfer); 1598 USB_BUS_UNLOCK(bus); 1599 return; 1600 } 1601 USB_BUS_LOCK(bus); 1602 usbd_transfer_done(xfer, USB_ERR_INVAL); 1603 USB_BUS_UNLOCK(bus); 1604 return; 1605 } 1606 /* compute some variables */ 1607 1608 for (x = 0; x != xfer->nframes; x++) { 1609 /* make a copy of the frlenghts[] */ 1610 xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x]; 1611 /* compute total transfer length */ 1612 xfer->sumlen += xfer->frlengths[x]; 1613 if (xfer->sumlen < xfer->frlengths[x]) { 1614 /* length wrapped around */ 1615 USB_BUS_LOCK(bus); 1616 usbd_transfer_done(xfer, USB_ERR_INVAL); 1617 USB_BUS_UNLOCK(bus); 1618 return; 1619 } 1620 } 1621 1622 /* clear some internal flags */ 1623 1624 xfer->flags_int.short_xfer_ok = 0; 1625 xfer->flags_int.short_frames_ok = 0; 1626 1627 /* check if this is a control transfer */ 1628 1629 if (xfer->flags_int.control_xfr) { 1630 1631 if (usbd_setup_ctrl_transfer(xfer)) { 1632 USB_BUS_LOCK(bus); 1633 usbd_transfer_done(xfer, USB_ERR_STALLED); 1634 USB_BUS_UNLOCK(bus); 1635 return; 1636 } 1637 } 1638 /* 1639 * Setup filtered version of some transfer flags, 1640 * in case of data read direction 1641 */ 1642 if (USB_GET_DATA_ISREAD(xfer)) { 1643 1644 if (xfer->flags.short_frames_ok) { 1645 xfer->flags_int.short_xfer_ok = 1; 1646 xfer->flags_int.short_frames_ok = 1; 1647 } else if (xfer->flags.short_xfer_ok) { 1648 xfer->flags_int.short_xfer_ok = 1; 1649 1650 /* check for control transfer */ 1651 if (xfer->flags_int.control_xfr) { 1652 /* 1653 * 1) Control transfers do not support 1654 * reception of multiple short USB 1655 * frames in host mode and device side 1656 * mode, with exception of: 1657 * 1658 * 2) Due to sometimes buggy device 1659 * side firmware we need to do a 1660 * STATUS stage in case of short 1661 * control transfers in USB host mode. 1662 * The STATUS stage then becomes the 1663 * "alt_next" to the DATA stage. 1664 */ 1665 xfer->flags_int.short_frames_ok = 1; 1666 } 1667 } 1668 } 1669 /* 1670 * Check if BUS-DMA support is enabled and try to load virtual 1671 * buffers into DMA, if any: 1672 */ 1673 #if USB_HAVE_BUSDMA 1674 if (xfer->flags_int.bdma_enable) { 1675 /* insert the USB transfer last in the BUS-DMA queue */ 1676 usb_command_wrapper(&xfer->xroot->dma_q, xfer); 1677 return; 1678 } 1679 #endif 1680 /* 1681 * Enter the USB transfer into the Host Controller or 1682 * Device Controller schedule: 1683 */ 1684 usbd_pipe_enter(xfer); 1685 } 1686 1687 /*------------------------------------------------------------------------* 1688 * usbd_pipe_enter - factored out code 1689 *------------------------------------------------------------------------*/ 1690 void 1691 usbd_pipe_enter(struct usb_xfer *xfer) 1692 { 1693 struct usb_endpoint *ep; 1694 1695 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1696 1697 USB_BUS_LOCK(xfer->xroot->bus); 1698 1699 ep = xfer->endpoint; 1700 1701 DPRINTF("enter\n"); 1702 1703 /* the transfer can now be cancelled */ 1704 xfer->flags_int.can_cancel_immed = 1; 1705 1706 /* enter the transfer */ 1707 (ep->methods->enter) (xfer); 1708 1709 /* check for transfer error */ 1710 if (xfer->error) { 1711 /* some error has happened */ 1712 usbd_transfer_done(xfer, 0); 1713 USB_BUS_UNLOCK(xfer->xroot->bus); 1714 return; 1715 } 1716 1717 /* start the transfer */ 1718 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], xfer); 1719 USB_BUS_UNLOCK(xfer->xroot->bus); 1720 } 1721 1722 /*------------------------------------------------------------------------* 1723 * usbd_transfer_start - start an USB transfer 1724 * 1725 * NOTE: Calling this function more than one time will only 1726 * result in a single transfer start, until the USB transfer 1727 * completes. 1728 *------------------------------------------------------------------------*/ 1729 void 1730 usbd_transfer_start(struct usb_xfer *xfer) 1731 { 1732 if (xfer == NULL) { 1733 /* transfer is gone */ 1734 return; 1735 } 1736 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1737 1738 /* mark the USB transfer started */ 1739 1740 if (!xfer->flags_int.started) { 1741 /* lock the BUS lock to avoid races updating flags_int */ 1742 USB_BUS_LOCK(xfer->xroot->bus); 1743 xfer->flags_int.started = 1; 1744 USB_BUS_UNLOCK(xfer->xroot->bus); 1745 } 1746 /* check if the USB transfer callback is already transferring */ 1747 1748 if (xfer->flags_int.transferring) { 1749 return; 1750 } 1751 USB_BUS_LOCK(xfer->xroot->bus); 1752 /* call the USB transfer callback */ 1753 usbd_callback_ss_done_defer(xfer); 1754 USB_BUS_UNLOCK(xfer->xroot->bus); 1755 } 1756 1757 /*------------------------------------------------------------------------* 1758 * usbd_transfer_stop - stop an USB transfer 1759 * 1760 * NOTE: Calling this function more than one time will only 1761 * result in a single transfer stop. 1762 * NOTE: When this function returns it is not safe to free nor 1763 * reuse any DMA buffers. See "usbd_transfer_drain()". 1764 *------------------------------------------------------------------------*/ 1765 void 1766 usbd_transfer_stop(struct usb_xfer *xfer) 1767 { 1768 struct usb_endpoint *ep; 1769 1770 if (xfer == NULL) { 1771 /* transfer is gone */ 1772 return; 1773 } 1774 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1775 1776 /* check if the USB transfer was ever opened */ 1777 1778 if (!xfer->flags_int.open) { 1779 if (xfer->flags_int.started) { 1780 /* nothing to do except clearing the "started" flag */ 1781 /* lock the BUS lock to avoid races updating flags_int */ 1782 USB_BUS_LOCK(xfer->xroot->bus); 1783 xfer->flags_int.started = 0; 1784 USB_BUS_UNLOCK(xfer->xroot->bus); 1785 } 1786 return; 1787 } 1788 /* try to stop the current USB transfer */ 1789 1790 USB_BUS_LOCK(xfer->xroot->bus); 1791 /* override any previous error */ 1792 xfer->error = USB_ERR_CANCELLED; 1793 1794 /* 1795 * Clear "open" and "started" when both private and USB lock 1796 * is locked so that we don't get a race updating "flags_int" 1797 */ 1798 xfer->flags_int.open = 0; 1799 xfer->flags_int.started = 0; 1800 1801 /* 1802 * Check if we can cancel the USB transfer immediately. 1803 */ 1804 if (xfer->flags_int.transferring) { 1805 if (xfer->flags_int.can_cancel_immed && 1806 (!xfer->flags_int.did_close)) { 1807 DPRINTF("close\n"); 1808 /* 1809 * The following will lead to an USB_ERR_CANCELLED 1810 * error code being passed to the USB callback. 1811 */ 1812 (xfer->endpoint->methods->close) (xfer); 1813 /* only close once */ 1814 xfer->flags_int.did_close = 1; 1815 } else { 1816 /* need to wait for the next done callback */ 1817 } 1818 } else { 1819 DPRINTF("close\n"); 1820 1821 /* close here and now */ 1822 (xfer->endpoint->methods->close) (xfer); 1823 1824 /* 1825 * Any additional DMA delay is done by 1826 * "usbd_transfer_unsetup()". 1827 */ 1828 1829 /* 1830 * Special case. Check if we need to restart a blocked 1831 * endpoint. 1832 */ 1833 ep = xfer->endpoint; 1834 1835 /* 1836 * If the current USB transfer is completing we need 1837 * to start the next one: 1838 */ 1839 if (ep->endpoint_q[xfer->stream_id].curr == xfer) { 1840 usb_command_wrapper( 1841 &ep->endpoint_q[xfer->stream_id], NULL); 1842 } 1843 } 1844 1845 USB_BUS_UNLOCK(xfer->xroot->bus); 1846 } 1847 1848 /*------------------------------------------------------------------------* 1849 * usbd_transfer_pending 1850 * 1851 * This function will check if an USB transfer is pending which is a 1852 * little bit complicated! 1853 * Return values: 1854 * 0: Not pending 1855 * 1: Pending: The USB transfer will receive a callback in the future. 1856 *------------------------------------------------------------------------*/ 1857 uint8_t 1858 usbd_transfer_pending(struct usb_xfer *xfer) 1859 { 1860 struct usb_xfer_root *info; 1861 struct usb_xfer_queue *pq; 1862 1863 if (xfer == NULL) { 1864 /* transfer is gone */ 1865 return (0); 1866 } 1867 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1868 1869 if (xfer->flags_int.transferring) { 1870 /* trivial case */ 1871 return (1); 1872 } 1873 USB_BUS_LOCK(xfer->xroot->bus); 1874 if (xfer->wait_queue) { 1875 /* we are waiting on a queue somewhere */ 1876 USB_BUS_UNLOCK(xfer->xroot->bus); 1877 return (1); 1878 } 1879 info = xfer->xroot; 1880 pq = &info->done_q; 1881 1882 if (pq->curr == xfer) { 1883 /* we are currently scheduled for callback */ 1884 USB_BUS_UNLOCK(xfer->xroot->bus); 1885 return (1); 1886 } 1887 /* we are not pending */ 1888 USB_BUS_UNLOCK(xfer->xroot->bus); 1889 return (0); 1890 } 1891 1892 /*------------------------------------------------------------------------* 1893 * usbd_transfer_drain 1894 * 1895 * This function will stop the USB transfer and wait for any 1896 * additional BUS-DMA and HW-DMA operations to complete. Buffers that 1897 * are loaded into DMA can safely be freed or reused after that this 1898 * function has returned. 1899 *------------------------------------------------------------------------*/ 1900 void 1901 usbd_transfer_drain(struct usb_xfer *xfer) 1902 { 1903 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1904 "usbd_transfer_drain can sleep!"); 1905 1906 if (xfer == NULL) { 1907 /* transfer is gone */ 1908 return; 1909 } 1910 if (xfer->xroot->xfer_mtx != &Giant) { 1911 USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED); 1912 } 1913 USB_XFER_LOCK(xfer); 1914 1915 usbd_transfer_stop(xfer); 1916 1917 while (usbd_transfer_pending(xfer) || 1918 xfer->flags_int.doing_callback) { 1919 1920 /* 1921 * It is allowed that the callback can drop its 1922 * transfer mutex. In that case checking only 1923 * "usbd_transfer_pending()" is not enough to tell if 1924 * the USB transfer is fully drained. We also need to 1925 * check the internal "doing_callback" flag. 1926 */ 1927 xfer->flags_int.draining = 1; 1928 1929 /* 1930 * Wait until the current outstanding USB 1931 * transfer is complete ! 1932 */ 1933 cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx); 1934 } 1935 USB_XFER_UNLOCK(xfer); 1936 } 1937 1938 struct usb_page_cache * 1939 usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex) 1940 { 1941 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 1942 1943 return (&xfer->frbuffers[frindex]); 1944 } 1945 1946 /*------------------------------------------------------------------------* 1947 * usbd_xfer_get_fps_shift 1948 * 1949 * The following function is only useful for isochronous transfers. It 1950 * returns how many times the frame execution rate has been shifted 1951 * down. 1952 * 1953 * Return value: 1954 * Success: 0..3 1955 * Failure: 0 1956 *------------------------------------------------------------------------*/ 1957 uint8_t 1958 usbd_xfer_get_fps_shift(struct usb_xfer *xfer) 1959 { 1960 return (xfer->fps_shift); 1961 } 1962 1963 usb_frlength_t 1964 usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex) 1965 { 1966 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 1967 1968 return (xfer->frlengths[frindex]); 1969 } 1970 1971 /*------------------------------------------------------------------------* 1972 * usbd_xfer_set_frame_data 1973 * 1974 * This function sets the pointer of the buffer that should 1975 * loaded directly into DMA for the given USB frame. Passing "ptr" 1976 * equal to NULL while the corresponding "frlength" is greater 1977 * than zero gives undefined results! 1978 *------------------------------------------------------------------------*/ 1979 void 1980 usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex, 1981 void *ptr, usb_frlength_t len) 1982 { 1983 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 1984 1985 /* set virtual address to load and length */ 1986 xfer->frbuffers[frindex].buffer = ptr; 1987 usbd_xfer_set_frame_len(xfer, frindex, len); 1988 } 1989 1990 void 1991 usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex, 1992 void **ptr, int *len) 1993 { 1994 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 1995 1996 if (ptr != NULL) 1997 *ptr = xfer->frbuffers[frindex].buffer; 1998 if (len != NULL) 1999 *len = xfer->frlengths[frindex]; 2000 } 2001 2002 /*------------------------------------------------------------------------* 2003 * usbd_xfer_old_frame_length 2004 * 2005 * This function returns the framelength of the given frame at the 2006 * time the transfer was submitted. This function can be used to 2007 * compute the starting data pointer of the next isochronous frame 2008 * when an isochronous transfer has completed. 2009 *------------------------------------------------------------------------*/ 2010 usb_frlength_t 2011 usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex) 2012 { 2013 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2014 2015 return (xfer->frlengths[frindex + xfer->max_frame_count]); 2016 } 2017 2018 void 2019 usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes, 2020 int *nframes) 2021 { 2022 if (actlen != NULL) 2023 *actlen = xfer->actlen; 2024 if (sumlen != NULL) 2025 *sumlen = xfer->sumlen; 2026 if (aframes != NULL) 2027 *aframes = xfer->aframes; 2028 if (nframes != NULL) 2029 *nframes = xfer->nframes; 2030 } 2031 2032 /*------------------------------------------------------------------------* 2033 * usbd_xfer_set_frame_offset 2034 * 2035 * This function sets the frame data buffer offset relative to the beginning 2036 * of the USB DMA buffer allocated for this USB transfer. 2037 *------------------------------------------------------------------------*/ 2038 void 2039 usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset, 2040 usb_frcount_t frindex) 2041 { 2042 KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame " 2043 "when the USB buffer is external\n")); 2044 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2045 2046 /* set virtual address to load */ 2047 xfer->frbuffers[frindex].buffer = 2048 USB_ADD_BYTES(xfer->local_buffer, offset); 2049 } 2050 2051 void 2052 usbd_xfer_set_interval(struct usb_xfer *xfer, int i) 2053 { 2054 xfer->interval = i; 2055 } 2056 2057 void 2058 usbd_xfer_set_timeout(struct usb_xfer *xfer, int t) 2059 { 2060 xfer->timeout = t; 2061 } 2062 2063 void 2064 usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n) 2065 { 2066 xfer->nframes = n; 2067 } 2068 2069 usb_frcount_t 2070 usbd_xfer_max_frames(struct usb_xfer *xfer) 2071 { 2072 return (xfer->max_frame_count); 2073 } 2074 2075 usb_frlength_t 2076 usbd_xfer_max_len(struct usb_xfer *xfer) 2077 { 2078 return (xfer->max_data_length); 2079 } 2080 2081 usb_frlength_t 2082 usbd_xfer_max_framelen(struct usb_xfer *xfer) 2083 { 2084 return (xfer->max_frame_size); 2085 } 2086 2087 void 2088 usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex, 2089 usb_frlength_t len) 2090 { 2091 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2092 2093 xfer->frlengths[frindex] = len; 2094 } 2095 2096 /*------------------------------------------------------------------------* 2097 * usb_callback_proc - factored out code 2098 * 2099 * This function performs USB callbacks. 2100 *------------------------------------------------------------------------*/ 2101 static void 2102 usb_callback_proc(struct usb_proc_msg *_pm) 2103 { 2104 struct usb_done_msg *pm = (void *)_pm; 2105 struct usb_xfer_root *info = pm->xroot; 2106 2107 /* Change locking order */ 2108 USB_BUS_UNLOCK(info->bus); 2109 2110 /* 2111 * We exploit the fact that the mutex is the same for all 2112 * callbacks that will be called from this thread: 2113 */ 2114 mtx_lock(info->xfer_mtx); 2115 USB_BUS_LOCK(info->bus); 2116 2117 /* Continue where we lost track */ 2118 usb_command_wrapper(&info->done_q, 2119 info->done_q.curr); 2120 2121 mtx_unlock(info->xfer_mtx); 2122 } 2123 2124 /*------------------------------------------------------------------------* 2125 * usbd_callback_ss_done_defer 2126 * 2127 * This function will defer the start, stop and done callback to the 2128 * correct thread. 2129 *------------------------------------------------------------------------*/ 2130 static void 2131 usbd_callback_ss_done_defer(struct usb_xfer *xfer) 2132 { 2133 struct usb_xfer_root *info = xfer->xroot; 2134 struct usb_xfer_queue *pq = &info->done_q; 2135 2136 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2137 2138 if (pq->curr != xfer) { 2139 usbd_transfer_enqueue(pq, xfer); 2140 } 2141 if (!pq->recurse_1) { 2142 2143 /* 2144 * We have to postpone the callback due to the fact we 2145 * will have a Lock Order Reversal, LOR, if we try to 2146 * proceed ! 2147 */ 2148 if (usb_proc_msignal(info->done_p, 2149 &info->done_m[0], &info->done_m[1])) { 2150 /* ignore */ 2151 } 2152 } else { 2153 /* clear second recurse flag */ 2154 pq->recurse_2 = 0; 2155 } 2156 return; 2157 2158 } 2159 2160 /*------------------------------------------------------------------------* 2161 * usbd_callback_wrapper 2162 * 2163 * This is a wrapper for USB callbacks. This wrapper does some 2164 * auto-magic things like figuring out if we can call the callback 2165 * directly from the current context or if we need to wakeup the 2166 * interrupt process. 2167 *------------------------------------------------------------------------*/ 2168 static void 2169 usbd_callback_wrapper(struct usb_xfer_queue *pq) 2170 { 2171 struct usb_xfer *xfer = pq->curr; 2172 struct usb_xfer_root *info = xfer->xroot; 2173 2174 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED); 2175 if (!mtx_owned(info->xfer_mtx) && !SCHEDULER_STOPPED()) { 2176 /* 2177 * Cases that end up here: 2178 * 2179 * 5) HW interrupt done callback or other source. 2180 */ 2181 DPRINTFN(3, "case 5\n"); 2182 2183 /* 2184 * We have to postpone the callback due to the fact we 2185 * will have a Lock Order Reversal, LOR, if we try to 2186 * proceed ! 2187 */ 2188 if (usb_proc_msignal(info->done_p, 2189 &info->done_m[0], &info->done_m[1])) { 2190 /* ignore */ 2191 } 2192 return; 2193 } 2194 /* 2195 * Cases that end up here: 2196 * 2197 * 1) We are starting a transfer 2198 * 2) We are prematurely calling back a transfer 2199 * 3) We are stopping a transfer 2200 * 4) We are doing an ordinary callback 2201 */ 2202 DPRINTFN(3, "case 1-4\n"); 2203 /* get next USB transfer in the queue */ 2204 info->done_q.curr = NULL; 2205 2206 /* set flag in case of drain */ 2207 xfer->flags_int.doing_callback = 1; 2208 2209 USB_BUS_UNLOCK(info->bus); 2210 USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED); 2211 2212 /* set correct USB state for callback */ 2213 if (!xfer->flags_int.transferring) { 2214 xfer->usb_state = USB_ST_SETUP; 2215 if (!xfer->flags_int.started) { 2216 /* we got stopped before we even got started */ 2217 USB_BUS_LOCK(info->bus); 2218 goto done; 2219 } 2220 } else { 2221 2222 if (usbd_callback_wrapper_sub(xfer)) { 2223 /* the callback has been deferred */ 2224 USB_BUS_LOCK(info->bus); 2225 goto done; 2226 } 2227 #if USB_HAVE_POWERD 2228 /* decrement power reference */ 2229 usbd_transfer_power_ref(xfer, -1); 2230 #endif 2231 xfer->flags_int.transferring = 0; 2232 2233 if (xfer->error) { 2234 xfer->usb_state = USB_ST_ERROR; 2235 } else { 2236 /* set transferred state */ 2237 xfer->usb_state = USB_ST_TRANSFERRED; 2238 #if USB_HAVE_BUSDMA 2239 /* sync DMA memory, if any */ 2240 if (xfer->flags_int.bdma_enable && 2241 (!xfer->flags_int.bdma_no_post_sync)) { 2242 usb_bdma_post_sync(xfer); 2243 } 2244 #endif 2245 } 2246 } 2247 2248 #if USB_HAVE_PF 2249 if (xfer->usb_state != USB_ST_SETUP) 2250 usbpf_xfertap(xfer, USBPF_XFERTAP_DONE); 2251 #endif 2252 /* call processing routine */ 2253 (xfer->callback) (xfer, xfer->error); 2254 2255 /* pickup the USB mutex again */ 2256 USB_BUS_LOCK(info->bus); 2257 2258 /* 2259 * Check if we got started after that we got cancelled, but 2260 * before we managed to do the callback. 2261 */ 2262 if ((!xfer->flags_int.open) && 2263 (xfer->flags_int.started) && 2264 (xfer->usb_state == USB_ST_ERROR)) { 2265 /* clear flag in case of drain */ 2266 xfer->flags_int.doing_callback = 0; 2267 /* try to loop, but not recursivly */ 2268 usb_command_wrapper(&info->done_q, xfer); 2269 return; 2270 } 2271 2272 done: 2273 /* clear flag in case of drain */ 2274 xfer->flags_int.doing_callback = 0; 2275 2276 /* 2277 * Check if we are draining. 2278 */ 2279 if (xfer->flags_int.draining && 2280 (!xfer->flags_int.transferring)) { 2281 /* "usbd_transfer_drain()" is waiting for end of transfer */ 2282 xfer->flags_int.draining = 0; 2283 cv_broadcast(&info->cv_drain); 2284 } 2285 2286 /* do the next callback, if any */ 2287 usb_command_wrapper(&info->done_q, 2288 info->done_q.curr); 2289 } 2290 2291 /*------------------------------------------------------------------------* 2292 * usb_dma_delay_done_cb 2293 * 2294 * This function is called when the DMA delay has been exectuded, and 2295 * will make sure that the callback is called to complete the USB 2296 * transfer. This code path is ususally only used when there is an USB 2297 * error like USB_ERR_CANCELLED. 2298 *------------------------------------------------------------------------*/ 2299 void 2300 usb_dma_delay_done_cb(struct usb_xfer *xfer) 2301 { 2302 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2303 2304 DPRINTFN(3, "Completed %p\n", xfer); 2305 2306 /* queue callback for execution, again */ 2307 usbd_transfer_done(xfer, 0); 2308 } 2309 2310 /*------------------------------------------------------------------------* 2311 * usbd_transfer_dequeue 2312 * 2313 * - This function is used to remove an USB transfer from a USB 2314 * transfer queue. 2315 * 2316 * - This function can be called multiple times in a row. 2317 *------------------------------------------------------------------------*/ 2318 void 2319 usbd_transfer_dequeue(struct usb_xfer *xfer) 2320 { 2321 struct usb_xfer_queue *pq; 2322 2323 pq = xfer->wait_queue; 2324 if (pq) { 2325 TAILQ_REMOVE(&pq->head, xfer, wait_entry); 2326 xfer->wait_queue = NULL; 2327 } 2328 } 2329 2330 /*------------------------------------------------------------------------* 2331 * usbd_transfer_enqueue 2332 * 2333 * - This function is used to insert an USB transfer into a USB * 2334 * transfer queue. 2335 * 2336 * - This function can be called multiple times in a row. 2337 *------------------------------------------------------------------------*/ 2338 void 2339 usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer) 2340 { 2341 /* 2342 * Insert the USB transfer into the queue, if it is not 2343 * already on a USB transfer queue: 2344 */ 2345 if (xfer->wait_queue == NULL) { 2346 xfer->wait_queue = pq; 2347 TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry); 2348 } 2349 } 2350 2351 /*------------------------------------------------------------------------* 2352 * usbd_transfer_done 2353 * 2354 * - This function is used to remove an USB transfer from the busdma, 2355 * pipe or interrupt queue. 2356 * 2357 * - This function is used to queue the USB transfer on the done 2358 * queue. 2359 * 2360 * - This function is used to stop any USB transfer timeouts. 2361 *------------------------------------------------------------------------*/ 2362 void 2363 usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error) 2364 { 2365 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2366 2367 DPRINTF("err=%s\n", usbd_errstr(error)); 2368 2369 /* 2370 * If we are not transferring then just return. 2371 * This can happen during transfer cancel. 2372 */ 2373 if (!xfer->flags_int.transferring) { 2374 DPRINTF("not transferring\n"); 2375 /* end of control transfer, if any */ 2376 xfer->flags_int.control_act = 0; 2377 return; 2378 } 2379 /* only set transfer error if not already set */ 2380 if (!xfer->error) { 2381 xfer->error = error; 2382 } 2383 /* stop any callouts */ 2384 usb_callout_stop(&xfer->timeout_handle); 2385 2386 /* 2387 * If we are waiting on a queue, just remove the USB transfer 2388 * from the queue, if any. We should have the required locks 2389 * locked to do the remove when this function is called. 2390 */ 2391 usbd_transfer_dequeue(xfer); 2392 2393 #if USB_HAVE_BUSDMA 2394 if (mtx_owned(xfer->xroot->xfer_mtx)) { 2395 struct usb_xfer_queue *pq; 2396 2397 /* 2398 * If the private USB lock is not locked, then we assume 2399 * that the BUS-DMA load stage has been passed: 2400 */ 2401 pq = &xfer->xroot->dma_q; 2402 2403 if (pq->curr == xfer) { 2404 /* start the next BUS-DMA load, if any */ 2405 usb_command_wrapper(pq, NULL); 2406 } 2407 } 2408 #endif 2409 /* keep some statistics */ 2410 if (xfer->error) { 2411 xfer->xroot->bus->stats_err.uds_requests 2412 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++; 2413 } else { 2414 xfer->xroot->bus->stats_ok.uds_requests 2415 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++; 2416 } 2417 2418 /* call the USB transfer callback */ 2419 usbd_callback_ss_done_defer(xfer); 2420 } 2421 2422 /*------------------------------------------------------------------------* 2423 * usbd_transfer_start_cb 2424 * 2425 * This function is called to start the USB transfer when 2426 * "xfer->interval" is greater than zero, and and the endpoint type is 2427 * BULK or CONTROL. 2428 *------------------------------------------------------------------------*/ 2429 static void 2430 usbd_transfer_start_cb(void *arg) 2431 { 2432 struct usb_xfer *xfer = arg; 2433 struct usb_endpoint *ep = xfer->endpoint; 2434 2435 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2436 2437 DPRINTF("start\n"); 2438 2439 #if USB_HAVE_PF 2440 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT); 2441 #endif 2442 2443 /* the transfer can now be cancelled */ 2444 xfer->flags_int.can_cancel_immed = 1; 2445 2446 /* start USB transfer, if no error */ 2447 if (xfer->error == 0) 2448 (ep->methods->start) (xfer); 2449 2450 /* check for transfer error */ 2451 if (xfer->error) { 2452 /* some error has happened */ 2453 usbd_transfer_done(xfer, 0); 2454 } 2455 } 2456 2457 /*------------------------------------------------------------------------* 2458 * usbd_xfer_set_stall 2459 * 2460 * This function is used to set the stall flag outside the 2461 * callback. This function is NULL safe. 2462 *------------------------------------------------------------------------*/ 2463 void 2464 usbd_xfer_set_stall(struct usb_xfer *xfer) 2465 { 2466 if (xfer == NULL) { 2467 /* tearing down */ 2468 return; 2469 } 2470 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 2471 2472 /* avoid any races by locking the USB mutex */ 2473 USB_BUS_LOCK(xfer->xroot->bus); 2474 xfer->flags.stall_pipe = 1; 2475 USB_BUS_UNLOCK(xfer->xroot->bus); 2476 } 2477 2478 int 2479 usbd_xfer_is_stalled(struct usb_xfer *xfer) 2480 { 2481 return (xfer->endpoint->is_stalled); 2482 } 2483 2484 /*------------------------------------------------------------------------* 2485 * usbd_transfer_clear_stall 2486 * 2487 * This function is used to clear the stall flag outside the 2488 * callback. This function is NULL safe. 2489 *------------------------------------------------------------------------*/ 2490 void 2491 usbd_transfer_clear_stall(struct usb_xfer *xfer) 2492 { 2493 if (xfer == NULL) { 2494 /* tearing down */ 2495 return; 2496 } 2497 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 2498 2499 /* avoid any races by locking the USB mutex */ 2500 USB_BUS_LOCK(xfer->xroot->bus); 2501 2502 xfer->flags.stall_pipe = 0; 2503 2504 USB_BUS_UNLOCK(xfer->xroot->bus); 2505 } 2506 2507 /*------------------------------------------------------------------------* 2508 * usbd_pipe_start 2509 * 2510 * This function is used to add an USB transfer to the pipe transfer list. 2511 *------------------------------------------------------------------------*/ 2512 void 2513 usbd_pipe_start(struct usb_xfer_queue *pq) 2514 { 2515 struct usb_endpoint *ep; 2516 struct usb_xfer *xfer; 2517 uint8_t type; 2518 2519 xfer = pq->curr; 2520 ep = xfer->endpoint; 2521 2522 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2523 2524 /* 2525 * If the endpoint is already stalled we do nothing ! 2526 */ 2527 if (ep->is_stalled) { 2528 return; 2529 } 2530 /* 2531 * Check if we are supposed to stall the endpoint: 2532 */ 2533 if (xfer->flags.stall_pipe) { 2534 struct usb_device *udev; 2535 struct usb_xfer_root *info; 2536 2537 /* clear stall command */ 2538 xfer->flags.stall_pipe = 0; 2539 2540 /* get pointer to USB device */ 2541 info = xfer->xroot; 2542 udev = info->udev; 2543 2544 /* 2545 * Only stall BULK and INTERRUPT endpoints. 2546 */ 2547 type = (ep->edesc->bmAttributes & UE_XFERTYPE); 2548 if ((type == UE_BULK) || 2549 (type == UE_INTERRUPT)) { 2550 uint8_t did_stall; 2551 2552 did_stall = 1; 2553 2554 if (udev->flags.usb_mode == USB_MODE_DEVICE) { 2555 (udev->bus->methods->set_stall) ( 2556 udev, ep, &did_stall); 2557 } else if (udev->ctrl_xfer[1]) { 2558 info = udev->ctrl_xfer[1]->xroot; 2559 usb_proc_msignal( 2560 &info->bus->non_giant_callback_proc, 2561 &udev->cs_msg[0], &udev->cs_msg[1]); 2562 } else { 2563 /* should not happen */ 2564 DPRINTFN(0, "No stall handler\n"); 2565 } 2566 /* 2567 * Check if we should stall. Some USB hardware 2568 * handles set- and clear-stall in hardware. 2569 */ 2570 if (did_stall) { 2571 /* 2572 * The transfer will be continued when 2573 * the clear-stall control endpoint 2574 * message is received. 2575 */ 2576 ep->is_stalled = 1; 2577 return; 2578 } 2579 } else if (type == UE_ISOCHRONOUS) { 2580 2581 /* 2582 * Make sure any FIFO overflow or other FIFO 2583 * error conditions go away by resetting the 2584 * endpoint FIFO through the clear stall 2585 * method. 2586 */ 2587 if (udev->flags.usb_mode == USB_MODE_DEVICE) { 2588 (udev->bus->methods->clear_stall) (udev, ep); 2589 } 2590 } 2591 } 2592 /* Set or clear stall complete - special case */ 2593 if (xfer->nframes == 0) { 2594 /* we are complete */ 2595 xfer->aframes = 0; 2596 usbd_transfer_done(xfer, 0); 2597 return; 2598 } 2599 /* 2600 * Handled cases: 2601 * 2602 * 1) Start the first transfer queued. 2603 * 2604 * 2) Re-start the current USB transfer. 2605 */ 2606 /* 2607 * Check if there should be any 2608 * pre transfer start delay: 2609 */ 2610 if (xfer->interval > 0) { 2611 type = (ep->edesc->bmAttributes & UE_XFERTYPE); 2612 if ((type == UE_BULK) || 2613 (type == UE_CONTROL)) { 2614 usbd_transfer_timeout_ms(xfer, 2615 &usbd_transfer_start_cb, 2616 xfer->interval); 2617 return; 2618 } 2619 } 2620 DPRINTF("start\n"); 2621 2622 #if USB_HAVE_PF 2623 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT); 2624 #endif 2625 /* the transfer can now be cancelled */ 2626 xfer->flags_int.can_cancel_immed = 1; 2627 2628 /* start USB transfer, if no error */ 2629 if (xfer->error == 0) 2630 (ep->methods->start) (xfer); 2631 2632 /* check for transfer error */ 2633 if (xfer->error) { 2634 /* some error has happened */ 2635 usbd_transfer_done(xfer, 0); 2636 } 2637 } 2638 2639 /*------------------------------------------------------------------------* 2640 * usbd_transfer_timeout_ms 2641 * 2642 * This function is used to setup a timeout on the given USB 2643 * transfer. If the timeout has been deferred the callback given by 2644 * "cb" will get called after "ms" milliseconds. 2645 *------------------------------------------------------------------------*/ 2646 void 2647 usbd_transfer_timeout_ms(struct usb_xfer *xfer, 2648 void (*cb) (void *arg), usb_timeout_t ms) 2649 { 2650 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2651 2652 /* defer delay */ 2653 usb_callout_reset(&xfer->timeout_handle, 2654 USB_MS_TO_TICKS(ms), cb, xfer); 2655 } 2656 2657 /*------------------------------------------------------------------------* 2658 * usbd_callback_wrapper_sub 2659 * 2660 * - This function will update variables in an USB transfer after 2661 * that the USB transfer is complete. 2662 * 2663 * - This function is used to start the next USB transfer on the 2664 * ep transfer queue, if any. 2665 * 2666 * NOTE: In some special cases the USB transfer will not be removed from 2667 * the pipe queue, but remain first. To enforce USB transfer removal call 2668 * this function passing the error code "USB_ERR_CANCELLED". 2669 * 2670 * Return values: 2671 * 0: Success. 2672 * Else: The callback has been deferred. 2673 *------------------------------------------------------------------------*/ 2674 static uint8_t 2675 usbd_callback_wrapper_sub(struct usb_xfer *xfer) 2676 { 2677 struct usb_endpoint *ep; 2678 struct usb_bus *bus; 2679 usb_frcount_t x; 2680 2681 bus = xfer->xroot->bus; 2682 2683 if ((!xfer->flags_int.open) && 2684 (!xfer->flags_int.did_close)) { 2685 DPRINTF("close\n"); 2686 USB_BUS_LOCK(bus); 2687 (xfer->endpoint->methods->close) (xfer); 2688 USB_BUS_UNLOCK(bus); 2689 /* only close once */ 2690 xfer->flags_int.did_close = 1; 2691 return (1); /* wait for new callback */ 2692 } 2693 /* 2694 * If we have a non-hardware induced error we 2695 * need to do the DMA delay! 2696 */ 2697 if (xfer->error != 0 && !xfer->flags_int.did_dma_delay && 2698 (xfer->error == USB_ERR_CANCELLED || 2699 xfer->error == USB_ERR_TIMEOUT || 2700 bus->methods->start_dma_delay != NULL)) { 2701 2702 usb_timeout_t temp; 2703 2704 /* only delay once */ 2705 xfer->flags_int.did_dma_delay = 1; 2706 2707 /* we can not cancel this delay */ 2708 xfer->flags_int.can_cancel_immed = 0; 2709 2710 temp = usbd_get_dma_delay(xfer->xroot->udev); 2711 2712 DPRINTFN(3, "DMA delay, %u ms, " 2713 "on %p\n", temp, xfer); 2714 2715 if (temp != 0) { 2716 USB_BUS_LOCK(bus); 2717 /* 2718 * Some hardware solutions have dedicated 2719 * events when it is safe to free DMA'ed 2720 * memory. For the other hardware platforms we 2721 * use a static delay. 2722 */ 2723 if (bus->methods->start_dma_delay != NULL) { 2724 (bus->methods->start_dma_delay) (xfer); 2725 } else { 2726 usbd_transfer_timeout_ms(xfer, 2727 (void (*)(void *))&usb_dma_delay_done_cb, 2728 temp); 2729 } 2730 USB_BUS_UNLOCK(bus); 2731 return (1); /* wait for new callback */ 2732 } 2733 } 2734 /* check actual number of frames */ 2735 if (xfer->aframes > xfer->nframes) { 2736 if (xfer->error == 0) { 2737 panic("%s: actual number of frames, %d, is " 2738 "greater than initial number of frames, %d\n", 2739 __FUNCTION__, xfer->aframes, xfer->nframes); 2740 } else { 2741 /* just set some valid value */ 2742 xfer->aframes = xfer->nframes; 2743 } 2744 } 2745 /* compute actual length */ 2746 xfer->actlen = 0; 2747 2748 for (x = 0; x != xfer->aframes; x++) { 2749 xfer->actlen += xfer->frlengths[x]; 2750 } 2751 2752 /* 2753 * Frames that were not transferred get zero actual length in 2754 * case the USB device driver does not check the actual number 2755 * of frames transferred, "xfer->aframes": 2756 */ 2757 for (; x < xfer->nframes; x++) { 2758 usbd_xfer_set_frame_len(xfer, x, 0); 2759 } 2760 2761 /* check actual length */ 2762 if (xfer->actlen > xfer->sumlen) { 2763 if (xfer->error == 0) { 2764 panic("%s: actual length, %d, is greater than " 2765 "initial length, %d\n", 2766 __FUNCTION__, xfer->actlen, xfer->sumlen); 2767 } else { 2768 /* just set some valid value */ 2769 xfer->actlen = xfer->sumlen; 2770 } 2771 } 2772 DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n", 2773 xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen, 2774 xfer->aframes, xfer->nframes); 2775 2776 if (xfer->error) { 2777 /* end of control transfer, if any */ 2778 xfer->flags_int.control_act = 0; 2779 2780 /* check if we should block the execution queue */ 2781 if ((xfer->error != USB_ERR_CANCELLED) && 2782 (xfer->flags.pipe_bof)) { 2783 DPRINTFN(2, "xfer=%p: Block On Failure " 2784 "on endpoint=%p\n", xfer, xfer->endpoint); 2785 goto done; 2786 } 2787 } else { 2788 /* check for short transfers */ 2789 if (xfer->actlen < xfer->sumlen) { 2790 2791 /* end of control transfer, if any */ 2792 xfer->flags_int.control_act = 0; 2793 2794 if (!xfer->flags_int.short_xfer_ok) { 2795 xfer->error = USB_ERR_SHORT_XFER; 2796 if (xfer->flags.pipe_bof) { 2797 DPRINTFN(2, "xfer=%p: Block On Failure on " 2798 "Short Transfer on endpoint %p.\n", 2799 xfer, xfer->endpoint); 2800 goto done; 2801 } 2802 } 2803 } else { 2804 /* 2805 * Check if we are in the middle of a 2806 * control transfer: 2807 */ 2808 if (xfer->flags_int.control_act) { 2809 DPRINTFN(5, "xfer=%p: Control transfer " 2810 "active on endpoint=%p\n", xfer, xfer->endpoint); 2811 goto done; 2812 } 2813 } 2814 } 2815 2816 ep = xfer->endpoint; 2817 2818 /* 2819 * If the current USB transfer is completing we need to start the 2820 * next one: 2821 */ 2822 USB_BUS_LOCK(bus); 2823 if (ep->endpoint_q[xfer->stream_id].curr == xfer) { 2824 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], NULL); 2825 2826 if (ep->endpoint_q[xfer->stream_id].curr != NULL || 2827 TAILQ_FIRST(&ep->endpoint_q[xfer->stream_id].head) != NULL) { 2828 /* there is another USB transfer waiting */ 2829 } else { 2830 /* this is the last USB transfer */ 2831 /* clear isochronous sync flag */ 2832 xfer->endpoint->is_synced = 0; 2833 } 2834 } 2835 USB_BUS_UNLOCK(bus); 2836 done: 2837 return (0); 2838 } 2839 2840 /*------------------------------------------------------------------------* 2841 * usb_command_wrapper 2842 * 2843 * This function is used to execute commands non-recursivly on an USB 2844 * transfer. 2845 *------------------------------------------------------------------------*/ 2846 void 2847 usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer) 2848 { 2849 if (xfer) { 2850 /* 2851 * If the transfer is not already processing, 2852 * queue it! 2853 */ 2854 if (pq->curr != xfer) { 2855 usbd_transfer_enqueue(pq, xfer); 2856 if (pq->curr != NULL) { 2857 /* something is already processing */ 2858 DPRINTFN(6, "busy %p\n", pq->curr); 2859 return; 2860 } 2861 } 2862 } else { 2863 /* Get next element in queue */ 2864 pq->curr = NULL; 2865 } 2866 2867 if (!pq->recurse_1) { 2868 2869 do { 2870 2871 /* set both recurse flags */ 2872 pq->recurse_1 = 1; 2873 pq->recurse_2 = 1; 2874 2875 if (pq->curr == NULL) { 2876 xfer = TAILQ_FIRST(&pq->head); 2877 if (xfer) { 2878 TAILQ_REMOVE(&pq->head, xfer, 2879 wait_entry); 2880 xfer->wait_queue = NULL; 2881 pq->curr = xfer; 2882 } else { 2883 break; 2884 } 2885 } 2886 DPRINTFN(6, "cb %p (enter)\n", pq->curr); 2887 (pq->command) (pq); 2888 DPRINTFN(6, "cb %p (leave)\n", pq->curr); 2889 2890 } while (!pq->recurse_2); 2891 2892 /* clear first recurse flag */ 2893 pq->recurse_1 = 0; 2894 2895 } else { 2896 /* clear second recurse flag */ 2897 pq->recurse_2 = 0; 2898 } 2899 } 2900 2901 /*------------------------------------------------------------------------* 2902 * usbd_ctrl_transfer_setup 2903 * 2904 * This function is used to setup the default USB control endpoint 2905 * transfer. 2906 *------------------------------------------------------------------------*/ 2907 void 2908 usbd_ctrl_transfer_setup(struct usb_device *udev) 2909 { 2910 struct usb_xfer *xfer; 2911 uint8_t no_resetup; 2912 uint8_t iface_index; 2913 2914 /* check for root HUB */ 2915 if (udev->parent_hub == NULL) 2916 return; 2917 repeat: 2918 2919 xfer = udev->ctrl_xfer[0]; 2920 if (xfer) { 2921 USB_XFER_LOCK(xfer); 2922 no_resetup = 2923 ((xfer->address == udev->address) && 2924 (udev->ctrl_ep_desc.wMaxPacketSize[0] == 2925 udev->ddesc.bMaxPacketSize)); 2926 if (udev->flags.usb_mode == USB_MODE_DEVICE) { 2927 if (no_resetup) { 2928 /* 2929 * NOTE: checking "xfer->address" and 2930 * starting the USB transfer must be 2931 * atomic! 2932 */ 2933 usbd_transfer_start(xfer); 2934 } 2935 } 2936 USB_XFER_UNLOCK(xfer); 2937 } else { 2938 no_resetup = 0; 2939 } 2940 2941 if (no_resetup) { 2942 /* 2943 * All parameters are exactly the same like before. 2944 * Just return. 2945 */ 2946 return; 2947 } 2948 /* 2949 * Update wMaxPacketSize for the default control endpoint: 2950 */ 2951 udev->ctrl_ep_desc.wMaxPacketSize[0] = 2952 udev->ddesc.bMaxPacketSize; 2953 2954 /* 2955 * Unsetup any existing USB transfer: 2956 */ 2957 usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX); 2958 2959 /* 2960 * Reset clear stall error counter. 2961 */ 2962 udev->clear_stall_errors = 0; 2963 2964 /* 2965 * Try to setup a new USB transfer for the 2966 * default control endpoint: 2967 */ 2968 iface_index = 0; 2969 if (usbd_transfer_setup(udev, &iface_index, 2970 udev->ctrl_xfer, usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL, 2971 &udev->device_mtx)) { 2972 DPRINTFN(0, "could not setup default " 2973 "USB transfer\n"); 2974 } else { 2975 goto repeat; 2976 } 2977 } 2978 2979 /*------------------------------------------------------------------------* 2980 * usbd_clear_data_toggle - factored out code 2981 * 2982 * NOTE: the intention of this function is not to reset the hardware 2983 * data toggle. 2984 *------------------------------------------------------------------------*/ 2985 void 2986 usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep) 2987 { 2988 USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED); 2989 2990 /* check that we have a valid case */ 2991 if (udev->flags.usb_mode == USB_MODE_HOST && 2992 udev->parent_hub != NULL && 2993 udev->bus->methods->clear_stall != NULL && 2994 ep->methods != NULL) { 2995 (udev->bus->methods->clear_stall) (udev, ep); 2996 } 2997 } 2998 2999 /*------------------------------------------------------------------------* 3000 * usbd_clear_data_toggle - factored out code 3001 * 3002 * NOTE: the intention of this function is not to reset the hardware 3003 * data toggle on the USB device side. 3004 *------------------------------------------------------------------------*/ 3005 void 3006 usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep) 3007 { 3008 DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep); 3009 3010 USB_BUS_LOCK(udev->bus); 3011 ep->toggle_next = 0; 3012 /* some hardware needs a callback to clear the data toggle */ 3013 usbd_clear_stall_locked(udev, ep); 3014 USB_BUS_UNLOCK(udev->bus); 3015 } 3016 3017 /*------------------------------------------------------------------------* 3018 * usbd_clear_stall_callback - factored out clear stall callback 3019 * 3020 * Input parameters: 3021 * xfer1: Clear Stall Control Transfer 3022 * xfer2: Stalled USB Transfer 3023 * 3024 * This function is NULL safe. 3025 * 3026 * Return values: 3027 * 0: In progress 3028 * Else: Finished 3029 * 3030 * Clear stall config example: 3031 * 3032 * static const struct usb_config my_clearstall = { 3033 * .type = UE_CONTROL, 3034 * .endpoint = 0, 3035 * .direction = UE_DIR_ANY, 3036 * .interval = 50, //50 milliseconds 3037 * .bufsize = sizeof(struct usb_device_request), 3038 * .timeout = 1000, //1.000 seconds 3039 * .callback = &my_clear_stall_callback, // ** 3040 * .usb_mode = USB_MODE_HOST, 3041 * }; 3042 * 3043 * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback" 3044 * passing the correct parameters. 3045 *------------------------------------------------------------------------*/ 3046 uint8_t 3047 usbd_clear_stall_callback(struct usb_xfer *xfer1, 3048 struct usb_xfer *xfer2) 3049 { 3050 struct usb_device_request req; 3051 3052 if (xfer2 == NULL) { 3053 /* looks like we are tearing down */ 3054 DPRINTF("NULL input parameter\n"); 3055 return (0); 3056 } 3057 USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED); 3058 USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED); 3059 3060 switch (USB_GET_STATE(xfer1)) { 3061 case USB_ST_SETUP: 3062 3063 /* 3064 * pre-clear the data toggle to DATA0 ("umass.c" and 3065 * "ata-usb.c" depends on this) 3066 */ 3067 3068 usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint); 3069 3070 /* setup a clear-stall packet */ 3071 3072 req.bmRequestType = UT_WRITE_ENDPOINT; 3073 req.bRequest = UR_CLEAR_FEATURE; 3074 USETW(req.wValue, UF_ENDPOINT_HALT); 3075 req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress; 3076 req.wIndex[1] = 0; 3077 USETW(req.wLength, 0); 3078 3079 /* 3080 * "usbd_transfer_setup_sub()" will ensure that 3081 * we have sufficient room in the buffer for 3082 * the request structure! 3083 */ 3084 3085 /* copy in the transfer */ 3086 3087 usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req)); 3088 3089 /* set length */ 3090 xfer1->frlengths[0] = sizeof(req); 3091 xfer1->nframes = 1; 3092 3093 usbd_transfer_submit(xfer1); 3094 return (0); 3095 3096 case USB_ST_TRANSFERRED: 3097 break; 3098 3099 default: /* Error */ 3100 if (xfer1->error == USB_ERR_CANCELLED) { 3101 return (0); 3102 } 3103 break; 3104 } 3105 return (1); /* Clear Stall Finished */ 3106 } 3107 3108 /*------------------------------------------------------------------------* 3109 * usbd_transfer_poll 3110 * 3111 * The following function gets called from the USB keyboard driver and 3112 * UMASS when the system has paniced. 3113 * 3114 * NOTE: It is currently not possible to resume normal operation on 3115 * the USB controller which has been polled, due to clearing of the 3116 * "up_dsleep" and "up_msleep" flags. 3117 *------------------------------------------------------------------------*/ 3118 void 3119 usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max) 3120 { 3121 struct usb_xfer *xfer; 3122 struct usb_xfer_root *xroot; 3123 struct usb_device *udev; 3124 struct usb_proc_msg *pm; 3125 uint16_t n; 3126 uint16_t drop_bus; 3127 uint16_t drop_xfer; 3128 3129 for (n = 0; n != max; n++) { 3130 /* Extra checks to avoid panic */ 3131 xfer = ppxfer[n]; 3132 if (xfer == NULL) 3133 continue; /* no USB transfer */ 3134 xroot = xfer->xroot; 3135 if (xroot == NULL) 3136 continue; /* no USB root */ 3137 udev = xroot->udev; 3138 if (udev == NULL) 3139 continue; /* no USB device */ 3140 if (udev->bus == NULL) 3141 continue; /* no BUS structure */ 3142 if (udev->bus->methods == NULL) 3143 continue; /* no BUS methods */ 3144 if (udev->bus->methods->xfer_poll == NULL) 3145 continue; /* no poll method */ 3146 3147 /* make sure that the BUS mutex is not locked */ 3148 drop_bus = 0; 3149 while (mtx_owned(&xroot->udev->bus->bus_mtx) && !SCHEDULER_STOPPED()) { 3150 mtx_unlock(&xroot->udev->bus->bus_mtx); 3151 drop_bus++; 3152 } 3153 3154 /* make sure that the transfer mutex is not locked */ 3155 drop_xfer = 0; 3156 while (mtx_owned(xroot->xfer_mtx) && !SCHEDULER_STOPPED()) { 3157 mtx_unlock(xroot->xfer_mtx); 3158 drop_xfer++; 3159 } 3160 3161 /* Make sure cv_signal() and cv_broadcast() is not called */ 3162 udev->bus->control_xfer_proc.up_msleep = 0; 3163 udev->bus->explore_proc.up_msleep = 0; 3164 udev->bus->giant_callback_proc.up_msleep = 0; 3165 udev->bus->non_giant_callback_proc.up_msleep = 0; 3166 3167 /* poll USB hardware */ 3168 (udev->bus->methods->xfer_poll) (udev->bus); 3169 3170 USB_BUS_LOCK(xroot->bus); 3171 3172 /* check for clear stall */ 3173 if (udev->ctrl_xfer[1] != NULL) { 3174 3175 /* poll clear stall start */ 3176 pm = &udev->cs_msg[0].hdr; 3177 (pm->pm_callback) (pm); 3178 /* poll clear stall done thread */ 3179 pm = &udev->ctrl_xfer[1]-> 3180 xroot->done_m[0].hdr; 3181 (pm->pm_callback) (pm); 3182 } 3183 3184 /* poll done thread */ 3185 pm = &xroot->done_m[0].hdr; 3186 (pm->pm_callback) (pm); 3187 3188 USB_BUS_UNLOCK(xroot->bus); 3189 3190 /* restore transfer mutex */ 3191 while (drop_xfer--) 3192 mtx_lock(xroot->xfer_mtx); 3193 3194 /* restore BUS mutex */ 3195 while (drop_bus--) 3196 mtx_lock(&xroot->udev->bus->bus_mtx); 3197 } 3198 } 3199 3200 static void 3201 usbd_get_std_packet_size(struct usb_std_packet_size *ptr, 3202 uint8_t type, enum usb_dev_speed speed) 3203 { 3204 static const uint16_t intr_range_max[USB_SPEED_MAX] = { 3205 [USB_SPEED_LOW] = 8, 3206 [USB_SPEED_FULL] = 64, 3207 [USB_SPEED_HIGH] = 1024, 3208 [USB_SPEED_VARIABLE] = 1024, 3209 [USB_SPEED_SUPER] = 1024, 3210 }; 3211 3212 static const uint16_t isoc_range_max[USB_SPEED_MAX] = { 3213 [USB_SPEED_LOW] = 0, /* invalid */ 3214 [USB_SPEED_FULL] = 1023, 3215 [USB_SPEED_HIGH] = 1024, 3216 [USB_SPEED_VARIABLE] = 3584, 3217 [USB_SPEED_SUPER] = 1024, 3218 }; 3219 3220 static const uint16_t control_min[USB_SPEED_MAX] = { 3221 [USB_SPEED_LOW] = 8, 3222 [USB_SPEED_FULL] = 8, 3223 [USB_SPEED_HIGH] = 64, 3224 [USB_SPEED_VARIABLE] = 512, 3225 [USB_SPEED_SUPER] = 512, 3226 }; 3227 3228 static const uint16_t bulk_min[USB_SPEED_MAX] = { 3229 [USB_SPEED_LOW] = 8, 3230 [USB_SPEED_FULL] = 8, 3231 [USB_SPEED_HIGH] = 512, 3232 [USB_SPEED_VARIABLE] = 512, 3233 [USB_SPEED_SUPER] = 1024, 3234 }; 3235 3236 uint16_t temp; 3237 3238 memset(ptr, 0, sizeof(*ptr)); 3239 3240 switch (type) { 3241 case UE_INTERRUPT: 3242 ptr->range.max = intr_range_max[speed]; 3243 break; 3244 case UE_ISOCHRONOUS: 3245 ptr->range.max = isoc_range_max[speed]; 3246 break; 3247 default: 3248 if (type == UE_BULK) 3249 temp = bulk_min[speed]; 3250 else /* UE_CONTROL */ 3251 temp = control_min[speed]; 3252 3253 /* default is fixed */ 3254 ptr->fixed[0] = temp; 3255 ptr->fixed[1] = temp; 3256 ptr->fixed[2] = temp; 3257 ptr->fixed[3] = temp; 3258 3259 if (speed == USB_SPEED_FULL) { 3260 /* multiple sizes */ 3261 ptr->fixed[1] = 16; 3262 ptr->fixed[2] = 32; 3263 ptr->fixed[3] = 64; 3264 } 3265 if ((speed == USB_SPEED_VARIABLE) && 3266 (type == UE_BULK)) { 3267 /* multiple sizes */ 3268 ptr->fixed[2] = 1024; 3269 ptr->fixed[3] = 1536; 3270 } 3271 break; 3272 } 3273 } 3274 3275 void * 3276 usbd_xfer_softc(struct usb_xfer *xfer) 3277 { 3278 return (xfer->priv_sc); 3279 } 3280 3281 void * 3282 usbd_xfer_get_priv(struct usb_xfer *xfer) 3283 { 3284 return (xfer->priv_fifo); 3285 } 3286 3287 void 3288 usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr) 3289 { 3290 xfer->priv_fifo = ptr; 3291 } 3292 3293 uint8_t 3294 usbd_xfer_state(struct usb_xfer *xfer) 3295 { 3296 return (xfer->usb_state); 3297 } 3298 3299 void 3300 usbd_xfer_set_flag(struct usb_xfer *xfer, int flag) 3301 { 3302 switch (flag) { 3303 case USB_FORCE_SHORT_XFER: 3304 xfer->flags.force_short_xfer = 1; 3305 break; 3306 case USB_SHORT_XFER_OK: 3307 xfer->flags.short_xfer_ok = 1; 3308 break; 3309 case USB_MULTI_SHORT_OK: 3310 xfer->flags.short_frames_ok = 1; 3311 break; 3312 case USB_MANUAL_STATUS: 3313 xfer->flags.manual_status = 1; 3314 break; 3315 } 3316 } 3317 3318 void 3319 usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag) 3320 { 3321 switch (flag) { 3322 case USB_FORCE_SHORT_XFER: 3323 xfer->flags.force_short_xfer = 0; 3324 break; 3325 case USB_SHORT_XFER_OK: 3326 xfer->flags.short_xfer_ok = 0; 3327 break; 3328 case USB_MULTI_SHORT_OK: 3329 xfer->flags.short_frames_ok = 0; 3330 break; 3331 case USB_MANUAL_STATUS: 3332 xfer->flags.manual_status = 0; 3333 break; 3334 } 3335 } 3336 3337 /* 3338 * The following function returns in milliseconds when the isochronous 3339 * transfer was completed by the hardware. The returned value wraps 3340 * around 65536 milliseconds. 3341 */ 3342 uint16_t 3343 usbd_xfer_get_timestamp(struct usb_xfer *xfer) 3344 { 3345 return (xfer->isoc_time_complete); 3346 } 3347