1 /* $FreeBSD$ */ 2 /*- 3 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/stdint.h> 28 #include <sys/stddef.h> 29 #include <sys/param.h> 30 #include <sys/queue.h> 31 #include <sys/types.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/bus.h> 35 #include <sys/linker_set.h> 36 #include <sys/module.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/condvar.h> 40 #include <sys/sysctl.h> 41 #include <sys/sx.h> 42 #include <sys/unistd.h> 43 #include <sys/callout.h> 44 #include <sys/malloc.h> 45 #include <sys/priv.h> 46 47 #include <dev/usb/usb.h> 48 #include <dev/usb/usbdi.h> 49 #include <dev/usb/usbdi_util.h> 50 51 #define USB_DEBUG_VAR usb_debug 52 53 #include <dev/usb/usb_core.h> 54 #include <dev/usb/usb_busdma.h> 55 #include <dev/usb/usb_process.h> 56 #include <dev/usb/usb_transfer.h> 57 #include <dev/usb/usb_device.h> 58 #include <dev/usb/usb_debug.h> 59 #include <dev/usb/usb_util.h> 60 61 #include <dev/usb/usb_controller.h> 62 #include <dev/usb/usb_bus.h> 63 64 struct usb_std_packet_size { 65 struct { 66 uint16_t min; /* inclusive */ 67 uint16_t max; /* inclusive */ 68 } range; 69 70 uint16_t fixed[4]; 71 }; 72 73 static usb_callback_t usb_request_callback; 74 75 static const struct usb_config usb_control_ep_cfg[USB_DEFAULT_XFER_MAX] = { 76 77 /* This transfer is used for generic control endpoint transfers */ 78 79 [0] = { 80 .type = UE_CONTROL, 81 .endpoint = 0x00, /* Control endpoint */ 82 .direction = UE_DIR_ANY, 83 .bufsize = USB_EP0_BUFSIZE, /* bytes */ 84 .flags = {.proxy_buffer = 1,}, 85 .callback = &usb_request_callback, 86 .usb_mode = USB_MODE_DUAL, /* both modes */ 87 }, 88 89 /* This transfer is used for generic clear stall only */ 90 91 [1] = { 92 .type = UE_CONTROL, 93 .endpoint = 0x00, /* Control pipe */ 94 .direction = UE_DIR_ANY, 95 .bufsize = sizeof(struct usb_device_request), 96 .callback = &usb_do_clear_stall_callback, 97 .timeout = 1000, /* 1 second */ 98 .interval = 50, /* 50ms */ 99 .usb_mode = USB_MODE_HOST, 100 }, 101 }; 102 103 /* function prototypes */ 104 105 static void usbd_update_max_frame_size(struct usb_xfer *); 106 static void usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t); 107 static void usbd_control_transfer_init(struct usb_xfer *); 108 static int usbd_setup_ctrl_transfer(struct usb_xfer *); 109 static void usb_callback_proc(struct usb_proc_msg *); 110 static void usbd_callback_ss_done_defer(struct usb_xfer *); 111 static void usbd_callback_wrapper(struct usb_xfer_queue *); 112 static void usb_dma_delay_done_cb(void *); 113 static void usbd_transfer_start_cb(void *); 114 static uint8_t usbd_callback_wrapper_sub(struct usb_xfer *); 115 static void usbd_get_std_packet_size(struct usb_std_packet_size *ptr, 116 uint8_t type, enum usb_dev_speed speed); 117 118 /*------------------------------------------------------------------------* 119 * usb_request_callback 120 *------------------------------------------------------------------------*/ 121 static void 122 usb_request_callback(struct usb_xfer *xfer, usb_error_t error) 123 { 124 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) 125 usb_handle_request_callback(xfer, error); 126 else 127 usbd_do_request_callback(xfer, error); 128 } 129 130 /*------------------------------------------------------------------------* 131 * usbd_update_max_frame_size 132 * 133 * This function updates the maximum frame size, hence high speed USB 134 * can transfer multiple consecutive packets. 135 *------------------------------------------------------------------------*/ 136 static void 137 usbd_update_max_frame_size(struct usb_xfer *xfer) 138 { 139 /* compute maximum frame size */ 140 141 if (xfer->max_packet_count == 2) { 142 xfer->max_frame_size = 2 * xfer->max_packet_size; 143 } else if (xfer->max_packet_count == 3) { 144 xfer->max_frame_size = 3 * xfer->max_packet_size; 145 } else { 146 xfer->max_frame_size = xfer->max_packet_size; 147 } 148 } 149 150 /*------------------------------------------------------------------------* 151 * usbd_get_dma_delay 152 * 153 * The following function is called when we need to 154 * synchronize with DMA hardware. 155 * 156 * Returns: 157 * 0: no DMA delay required 158 * Else: milliseconds of DMA delay 159 *------------------------------------------------------------------------*/ 160 usb_timeout_t 161 usbd_get_dma_delay(struct usb_bus *bus) 162 { 163 uint32_t temp = 0; 164 165 if (bus->methods->get_dma_delay) { 166 (bus->methods->get_dma_delay) (bus, &temp); 167 /* 168 * Round up and convert to milliseconds. Note that we use 169 * 1024 milliseconds per second. to save a division. 170 */ 171 temp += 0x3FF; 172 temp /= 0x400; 173 } 174 return (temp); 175 } 176 177 /*------------------------------------------------------------------------* 178 * usbd_transfer_setup_sub_malloc 179 * 180 * This function will allocate one or more DMA'able memory chunks 181 * according to "size", "align" and "count" arguments. "ppc" is 182 * pointed to a linear array of USB page caches afterwards. 183 * 184 * Returns: 185 * 0: Success 186 * Else: Failure 187 *------------------------------------------------------------------------*/ 188 #if USB_HAVE_BUSDMA 189 uint8_t 190 usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm, 191 struct usb_page_cache **ppc, usb_size_t size, usb_size_t align, 192 usb_size_t count) 193 { 194 struct usb_page_cache *pc; 195 struct usb_page *pg; 196 void *buf; 197 usb_size_t n_dma_pc; 198 usb_size_t n_obj; 199 usb_size_t x; 200 usb_size_t y; 201 usb_size_t r; 202 usb_size_t z; 203 204 USB_ASSERT(align > 1, ("Invalid alignment, 0x%08x\n", 205 align)); 206 USB_ASSERT(size > 0, ("Invalid size = 0\n")); 207 208 if (count == 0) { 209 return (0); /* nothing to allocate */ 210 } 211 /* 212 * Make sure that the size is aligned properly. 213 */ 214 size = -((-size) & (-align)); 215 216 /* 217 * Try multi-allocation chunks to reduce the number of DMA 218 * allocations, hence DMA allocations are slow. 219 */ 220 if (size >= PAGE_SIZE) { 221 n_dma_pc = count; 222 n_obj = 1; 223 } else { 224 /* compute number of objects per page */ 225 n_obj = (PAGE_SIZE / size); 226 /* 227 * Compute number of DMA chunks, rounded up 228 * to nearest one: 229 */ 230 n_dma_pc = ((count + n_obj - 1) / n_obj); 231 } 232 233 if (parm->buf == NULL) { 234 /* for the future */ 235 parm->dma_page_ptr += n_dma_pc; 236 parm->dma_page_cache_ptr += n_dma_pc; 237 parm->dma_page_ptr += count; 238 parm->xfer_page_cache_ptr += count; 239 return (0); 240 } 241 for (x = 0; x != n_dma_pc; x++) { 242 /* need to initialize the page cache */ 243 parm->dma_page_cache_ptr[x].tag_parent = 244 &parm->curr_xfer->xroot->dma_parent_tag; 245 } 246 for (x = 0; x != count; x++) { 247 /* need to initialize the page cache */ 248 parm->xfer_page_cache_ptr[x].tag_parent = 249 &parm->curr_xfer->xroot->dma_parent_tag; 250 } 251 252 if (ppc) { 253 *ppc = parm->xfer_page_cache_ptr; 254 } 255 r = count; /* set remainder count */ 256 z = n_obj * size; /* set allocation size */ 257 pc = parm->xfer_page_cache_ptr; 258 pg = parm->dma_page_ptr; 259 260 for (x = 0; x != n_dma_pc; x++) { 261 262 if (r < n_obj) { 263 /* compute last remainder */ 264 z = r * size; 265 n_obj = r; 266 } 267 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr, 268 pg, z, align)) { 269 return (1); /* failure */ 270 } 271 /* Set beginning of current buffer */ 272 buf = parm->dma_page_cache_ptr->buffer; 273 /* Make room for one DMA page cache and one page */ 274 parm->dma_page_cache_ptr++; 275 pg++; 276 277 for (y = 0; (y != n_obj); y++, r--, pc++, pg++) { 278 279 /* Load sub-chunk into DMA */ 280 if (usb_pc_dmamap_create(pc, size)) { 281 return (1); /* failure */ 282 } 283 pc->buffer = USB_ADD_BYTES(buf, y * size); 284 pc->page_start = pg; 285 286 mtx_lock(pc->tag_parent->mtx); 287 if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) { 288 mtx_unlock(pc->tag_parent->mtx); 289 return (1); /* failure */ 290 } 291 mtx_unlock(pc->tag_parent->mtx); 292 } 293 } 294 295 parm->xfer_page_cache_ptr = pc; 296 parm->dma_page_ptr = pg; 297 return (0); 298 } 299 #endif 300 301 /*------------------------------------------------------------------------* 302 * usbd_transfer_setup_sub - transfer setup subroutine 303 * 304 * This function must be called from the "xfer_setup" callback of the 305 * USB Host or Device controller driver when setting up an USB 306 * transfer. This function will setup correct packet sizes, buffer 307 * sizes, flags and more, that are stored in the "usb_xfer" 308 * structure. 309 *------------------------------------------------------------------------*/ 310 void 311 usbd_transfer_setup_sub(struct usb_setup_params *parm) 312 { 313 enum { 314 REQ_SIZE = 8, 315 MIN_PKT = 8, 316 }; 317 struct usb_xfer *xfer = parm->curr_xfer; 318 const struct usb_config *setup = parm->curr_setup; 319 struct usb_endpoint_descriptor *edesc; 320 struct usb_std_packet_size std_size; 321 usb_frcount_t n_frlengths; 322 usb_frcount_t n_frbuffers; 323 usb_frcount_t x; 324 uint8_t type; 325 uint8_t zmps; 326 327 /* 328 * Sanity check. The following parameters must be initialized before 329 * calling this function. 330 */ 331 if ((parm->hc_max_packet_size == 0) || 332 (parm->hc_max_packet_count == 0) || 333 (parm->hc_max_frame_size == 0)) { 334 parm->err = USB_ERR_INVAL; 335 goto done; 336 } 337 edesc = xfer->endpoint->edesc; 338 339 type = (edesc->bmAttributes & UE_XFERTYPE); 340 341 xfer->flags = setup->flags; 342 xfer->nframes = setup->frames; 343 xfer->timeout = setup->timeout; 344 xfer->callback = setup->callback; 345 xfer->interval = setup->interval; 346 xfer->endpointno = edesc->bEndpointAddress; 347 xfer->max_packet_size = UGETW(edesc->wMaxPacketSize); 348 xfer->max_packet_count = 1; 349 /* make a shadow copy: */ 350 xfer->flags_int.usb_mode = parm->udev->flags.usb_mode; 351 352 parm->bufsize = setup->bufsize; 353 354 if (parm->speed == USB_SPEED_HIGH) { 355 xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3; 356 xfer->max_packet_size &= 0x7FF; 357 } 358 /* range check "max_packet_count" */ 359 360 if (xfer->max_packet_count > parm->hc_max_packet_count) { 361 xfer->max_packet_count = parm->hc_max_packet_count; 362 } 363 /* filter "wMaxPacketSize" according to HC capabilities */ 364 365 if ((xfer->max_packet_size > parm->hc_max_packet_size) || 366 (xfer->max_packet_size == 0)) { 367 xfer->max_packet_size = parm->hc_max_packet_size; 368 } 369 /* filter "wMaxPacketSize" according to standard sizes */ 370 371 usbd_get_std_packet_size(&std_size, type, parm->speed); 372 373 if (std_size.range.min || std_size.range.max) { 374 375 if (xfer->max_packet_size < std_size.range.min) { 376 xfer->max_packet_size = std_size.range.min; 377 } 378 if (xfer->max_packet_size > std_size.range.max) { 379 xfer->max_packet_size = std_size.range.max; 380 } 381 } else { 382 383 if (xfer->max_packet_size >= std_size.fixed[3]) { 384 xfer->max_packet_size = std_size.fixed[3]; 385 } else if (xfer->max_packet_size >= std_size.fixed[2]) { 386 xfer->max_packet_size = std_size.fixed[2]; 387 } else if (xfer->max_packet_size >= std_size.fixed[1]) { 388 xfer->max_packet_size = std_size.fixed[1]; 389 } else { 390 /* only one possibility left */ 391 xfer->max_packet_size = std_size.fixed[0]; 392 } 393 } 394 395 /* compute "max_frame_size" */ 396 397 usbd_update_max_frame_size(xfer); 398 399 /* check interrupt interval and transfer pre-delay */ 400 401 if (type == UE_ISOCHRONOUS) { 402 403 uint16_t frame_limit; 404 405 xfer->interval = 0; /* not used, must be zero */ 406 xfer->flags_int.isochronous_xfr = 1; /* set flag */ 407 408 if (xfer->timeout == 0) { 409 /* 410 * set a default timeout in 411 * case something goes wrong! 412 */ 413 xfer->timeout = 1000 / 4; 414 } 415 switch (parm->speed) { 416 case USB_SPEED_LOW: 417 case USB_SPEED_FULL: 418 frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER; 419 xfer->fps_shift = 0; 420 break; 421 default: 422 frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER; 423 xfer->fps_shift = edesc->bInterval; 424 if (xfer->fps_shift > 0) 425 xfer->fps_shift--; 426 if (xfer->fps_shift > 3) 427 xfer->fps_shift = 3; 428 break; 429 } 430 431 if (xfer->nframes > frame_limit) { 432 /* 433 * this is not going to work 434 * cross hardware 435 */ 436 parm->err = USB_ERR_INVAL; 437 goto done; 438 } 439 if (xfer->nframes == 0) { 440 /* 441 * this is not a valid value 442 */ 443 parm->err = USB_ERR_ZERO_NFRAMES; 444 goto done; 445 } 446 } else { 447 448 /* 449 * if a value is specified use that else check the endpoint 450 * descriptor 451 */ 452 if (xfer->interval == 0) { 453 454 if (type == UE_INTERRUPT) { 455 456 xfer->interval = edesc->bInterval; 457 458 switch (parm->speed) { 459 case USB_SPEED_SUPER: 460 case USB_SPEED_VARIABLE: 461 /* 125us -> 1ms */ 462 if (xfer->interval < 4) 463 xfer->interval = 1; 464 else if (xfer->interval > 16) 465 xfer->interval = (1<<(16-4)); 466 else 467 xfer->interval = 468 (1 << (xfer->interval-4)); 469 break; 470 case USB_SPEED_HIGH: 471 /* 125us -> 1ms */ 472 xfer->interval /= 8; 473 break; 474 default: 475 break; 476 } 477 if (xfer->interval == 0) { 478 /* 479 * One millisecond is the smallest 480 * interval we support: 481 */ 482 xfer->interval = 1; 483 } 484 } 485 } 486 } 487 488 /* 489 * NOTE: we do not allow "max_packet_size" or "max_frame_size" 490 * to be equal to zero when setting up USB transfers, hence 491 * this leads to alot of extra code in the USB kernel. 492 */ 493 494 if ((xfer->max_frame_size == 0) || 495 (xfer->max_packet_size == 0)) { 496 497 zmps = 1; 498 499 if ((parm->bufsize <= MIN_PKT) && 500 (type != UE_CONTROL) && 501 (type != UE_BULK)) { 502 503 /* workaround */ 504 xfer->max_packet_size = MIN_PKT; 505 xfer->max_packet_count = 1; 506 parm->bufsize = 0; /* automatic setup length */ 507 usbd_update_max_frame_size(xfer); 508 509 } else { 510 parm->err = USB_ERR_ZERO_MAXP; 511 goto done; 512 } 513 514 } else { 515 zmps = 0; 516 } 517 518 /* 519 * check if we should setup a default 520 * length: 521 */ 522 523 if (parm->bufsize == 0) { 524 525 parm->bufsize = xfer->max_frame_size; 526 527 if (type == UE_ISOCHRONOUS) { 528 parm->bufsize *= xfer->nframes; 529 } 530 } 531 /* 532 * check if we are about to setup a proxy 533 * type of buffer: 534 */ 535 536 if (xfer->flags.proxy_buffer) { 537 538 /* round bufsize up */ 539 540 parm->bufsize += (xfer->max_frame_size - 1); 541 542 if (parm->bufsize < xfer->max_frame_size) { 543 /* length wrapped around */ 544 parm->err = USB_ERR_INVAL; 545 goto done; 546 } 547 /* subtract remainder */ 548 549 parm->bufsize -= (parm->bufsize % xfer->max_frame_size); 550 551 /* add length of USB device request structure, if any */ 552 553 if (type == UE_CONTROL) { 554 parm->bufsize += REQ_SIZE; /* SETUP message */ 555 } 556 } 557 xfer->max_data_length = parm->bufsize; 558 559 /* Setup "n_frlengths" and "n_frbuffers" */ 560 561 if (type == UE_ISOCHRONOUS) { 562 n_frlengths = xfer->nframes; 563 n_frbuffers = 1; 564 } else { 565 566 if (type == UE_CONTROL) { 567 xfer->flags_int.control_xfr = 1; 568 if (xfer->nframes == 0) { 569 if (parm->bufsize <= REQ_SIZE) { 570 /* 571 * there will never be any data 572 * stage 573 */ 574 xfer->nframes = 1; 575 } else { 576 xfer->nframes = 2; 577 } 578 } 579 } else { 580 if (xfer->nframes == 0) { 581 xfer->nframes = 1; 582 } 583 } 584 585 n_frlengths = xfer->nframes; 586 n_frbuffers = xfer->nframes; 587 } 588 589 /* 590 * check if we have room for the 591 * USB device request structure: 592 */ 593 594 if (type == UE_CONTROL) { 595 596 if (xfer->max_data_length < REQ_SIZE) { 597 /* length wrapped around or too small bufsize */ 598 parm->err = USB_ERR_INVAL; 599 goto done; 600 } 601 xfer->max_data_length -= REQ_SIZE; 602 } 603 /* setup "frlengths" */ 604 xfer->frlengths = parm->xfer_length_ptr; 605 parm->xfer_length_ptr += n_frlengths; 606 607 /* setup "frbuffers" */ 608 xfer->frbuffers = parm->xfer_page_cache_ptr; 609 parm->xfer_page_cache_ptr += n_frbuffers; 610 611 /* initialize max frame count */ 612 xfer->max_frame_count = xfer->nframes; 613 614 /* 615 * check if we need to setup 616 * a local buffer: 617 */ 618 619 if (!xfer->flags.ext_buffer) { 620 621 /* align data */ 622 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 623 624 if (parm->buf) { 625 626 xfer->local_buffer = 627 USB_ADD_BYTES(parm->buf, parm->size[0]); 628 629 usbd_xfer_set_frame_offset(xfer, 0, 0); 630 631 if ((type == UE_CONTROL) && (n_frbuffers > 1)) { 632 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1); 633 } 634 } 635 parm->size[0] += parm->bufsize; 636 637 /* align data again */ 638 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 639 } 640 /* 641 * Compute maximum buffer size 642 */ 643 644 if (parm->bufsize_max < parm->bufsize) { 645 parm->bufsize_max = parm->bufsize; 646 } 647 #if USB_HAVE_BUSDMA 648 if (xfer->flags_int.bdma_enable) { 649 /* 650 * Setup "dma_page_ptr". 651 * 652 * Proof for formula below: 653 * 654 * Assume there are three USB frames having length "a", "b" and 655 * "c". These USB frames will at maximum need "z" 656 * "usb_page" structures. "z" is given by: 657 * 658 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) + 659 * ((c / USB_PAGE_SIZE) + 2); 660 * 661 * Constraining "a", "b" and "c" like this: 662 * 663 * (a + b + c) <= parm->bufsize 664 * 665 * We know that: 666 * 667 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2)); 668 * 669 * Here is the general formula: 670 */ 671 xfer->dma_page_ptr = parm->dma_page_ptr; 672 parm->dma_page_ptr += (2 * n_frbuffers); 673 parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE); 674 } 675 #endif 676 if (zmps) { 677 /* correct maximum data length */ 678 xfer->max_data_length = 0; 679 } 680 /* subtract USB frame remainder from "hc_max_frame_size" */ 681 682 xfer->max_hc_frame_size = 683 (parm->hc_max_frame_size - 684 (parm->hc_max_frame_size % xfer->max_frame_size)); 685 686 if (xfer->max_hc_frame_size == 0) { 687 parm->err = USB_ERR_INVAL; 688 goto done; 689 } 690 691 /* initialize frame buffers */ 692 693 if (parm->buf) { 694 for (x = 0; x != n_frbuffers; x++) { 695 xfer->frbuffers[x].tag_parent = 696 &xfer->xroot->dma_parent_tag; 697 #if USB_HAVE_BUSDMA 698 if (xfer->flags_int.bdma_enable && 699 (parm->bufsize_max > 0)) { 700 701 if (usb_pc_dmamap_create( 702 xfer->frbuffers + x, 703 parm->bufsize_max)) { 704 parm->err = USB_ERR_NOMEM; 705 goto done; 706 } 707 } 708 #endif 709 } 710 } 711 done: 712 if (parm->err) { 713 /* 714 * Set some dummy values so that we avoid division by zero: 715 */ 716 xfer->max_hc_frame_size = 1; 717 xfer->max_frame_size = 1; 718 xfer->max_packet_size = 1; 719 xfer->max_data_length = 0; 720 xfer->nframes = 0; 721 xfer->max_frame_count = 0; 722 } 723 } 724 725 /*------------------------------------------------------------------------* 726 * usbd_transfer_setup - setup an array of USB transfers 727 * 728 * NOTE: You must always call "usbd_transfer_unsetup" after calling 729 * "usbd_transfer_setup" if success was returned. 730 * 731 * The idea is that the USB device driver should pre-allocate all its 732 * transfers by one call to this function. 733 * 734 * Return values: 735 * 0: Success 736 * Else: Failure 737 *------------------------------------------------------------------------*/ 738 usb_error_t 739 usbd_transfer_setup(struct usb_device *udev, 740 const uint8_t *ifaces, struct usb_xfer **ppxfer, 741 const struct usb_config *setup_start, uint16_t n_setup, 742 void *priv_sc, struct mtx *xfer_mtx) 743 { 744 struct usb_xfer dummy; 745 struct usb_setup_params parm; 746 const struct usb_config *setup_end = setup_start + n_setup; 747 const struct usb_config *setup; 748 struct usb_endpoint *ep; 749 struct usb_xfer_root *info; 750 struct usb_xfer *xfer; 751 void *buf = NULL; 752 uint16_t n; 753 uint16_t refcount; 754 755 parm.err = 0; 756 refcount = 0; 757 info = NULL; 758 759 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 760 "usbd_transfer_setup can sleep!"); 761 762 /* do some checking first */ 763 764 if (n_setup == 0) { 765 DPRINTFN(6, "setup array has zero length!\n"); 766 return (USB_ERR_INVAL); 767 } 768 if (ifaces == 0) { 769 DPRINTFN(6, "ifaces array is NULL!\n"); 770 return (USB_ERR_INVAL); 771 } 772 if (xfer_mtx == NULL) { 773 DPRINTFN(6, "using global lock\n"); 774 xfer_mtx = &Giant; 775 } 776 /* sanity checks */ 777 for (setup = setup_start, n = 0; 778 setup != setup_end; setup++, n++) { 779 if (setup->bufsize == (usb_frlength_t)-1) { 780 parm.err = USB_ERR_BAD_BUFSIZE; 781 DPRINTF("invalid bufsize\n"); 782 } 783 if (setup->callback == NULL) { 784 parm.err = USB_ERR_NO_CALLBACK; 785 DPRINTF("no callback\n"); 786 } 787 ppxfer[n] = NULL; 788 } 789 790 if (parm.err) { 791 goto done; 792 } 793 bzero(&parm, sizeof(parm)); 794 795 parm.udev = udev; 796 parm.speed = usbd_get_speed(udev); 797 parm.hc_max_packet_count = 1; 798 799 if (parm.speed >= USB_SPEED_MAX) { 800 parm.err = USB_ERR_INVAL; 801 goto done; 802 } 803 /* setup all transfers */ 804 805 while (1) { 806 807 if (buf) { 808 /* 809 * Initialize the "usb_xfer_root" structure, 810 * which is common for all our USB transfers. 811 */ 812 info = USB_ADD_BYTES(buf, 0); 813 814 info->memory_base = buf; 815 info->memory_size = parm.size[0]; 816 817 #if USB_HAVE_BUSDMA 818 info->dma_page_cache_start = USB_ADD_BYTES(buf, parm.size[4]); 819 info->dma_page_cache_end = USB_ADD_BYTES(buf, parm.size[5]); 820 #endif 821 info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm.size[5]); 822 info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm.size[2]); 823 824 cv_init(&info->cv_drain, "WDRAIN"); 825 826 info->xfer_mtx = xfer_mtx; 827 #if USB_HAVE_BUSDMA 828 usb_dma_tag_setup(&info->dma_parent_tag, 829 parm.dma_tag_p, udev->bus->dma_parent_tag[0].tag, 830 xfer_mtx, &usb_bdma_done_event, 32, parm.dma_tag_max); 831 #endif 832 833 info->bus = udev->bus; 834 info->udev = udev; 835 836 TAILQ_INIT(&info->done_q.head); 837 info->done_q.command = &usbd_callback_wrapper; 838 #if USB_HAVE_BUSDMA 839 TAILQ_INIT(&info->dma_q.head); 840 info->dma_q.command = &usb_bdma_work_loop; 841 #endif 842 info->done_m[0].hdr.pm_callback = &usb_callback_proc; 843 info->done_m[0].xroot = info; 844 info->done_m[1].hdr.pm_callback = &usb_callback_proc; 845 info->done_m[1].xroot = info; 846 847 /* 848 * In device side mode control endpoint 849 * requests need to run from a separate 850 * context, else there is a chance of 851 * deadlock! 852 */ 853 if (setup_start == usb_control_ep_cfg) 854 info->done_p = 855 &udev->bus->control_xfer_proc; 856 else if (xfer_mtx == &Giant) 857 info->done_p = 858 &udev->bus->giant_callback_proc; 859 else 860 info->done_p = 861 &udev->bus->non_giant_callback_proc; 862 } 863 /* reset sizes */ 864 865 parm.size[0] = 0; 866 parm.buf = buf; 867 parm.size[0] += sizeof(info[0]); 868 869 for (setup = setup_start, n = 0; 870 setup != setup_end; setup++, n++) { 871 872 /* skip USB transfers without callbacks: */ 873 if (setup->callback == NULL) { 874 continue; 875 } 876 /* see if there is a matching endpoint */ 877 ep = usbd_get_endpoint(udev, 878 ifaces[setup->if_index], setup); 879 880 if ((ep == NULL) || (ep->methods == NULL)) { 881 if (setup->flags.no_pipe_ok) 882 continue; 883 if ((setup->usb_mode != USB_MODE_DUAL) && 884 (setup->usb_mode != udev->flags.usb_mode)) 885 continue; 886 parm.err = USB_ERR_NO_PIPE; 887 goto done; 888 } 889 890 /* align data properly */ 891 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 892 893 /* store current setup pointer */ 894 parm.curr_setup = setup; 895 896 if (buf) { 897 /* 898 * Common initialization of the 899 * "usb_xfer" structure. 900 */ 901 xfer = USB_ADD_BYTES(buf, parm.size[0]); 902 xfer->address = udev->address; 903 xfer->priv_sc = priv_sc; 904 xfer->xroot = info; 905 906 usb_callout_init_mtx(&xfer->timeout_handle, 907 &udev->bus->bus_mtx, 0); 908 } else { 909 /* 910 * Setup a dummy xfer, hence we are 911 * writing to the "usb_xfer" 912 * structure pointed to by "xfer" 913 * before we have allocated any 914 * memory: 915 */ 916 xfer = &dummy; 917 bzero(&dummy, sizeof(dummy)); 918 refcount++; 919 } 920 921 /* set transfer endpoint pointer */ 922 xfer->endpoint = ep; 923 924 parm.size[0] += sizeof(xfer[0]); 925 parm.methods = xfer->endpoint->methods; 926 parm.curr_xfer = xfer; 927 928 /* 929 * Call the Host or Device controller transfer 930 * setup routine: 931 */ 932 (udev->bus->methods->xfer_setup) (&parm); 933 934 /* check for error */ 935 if (parm.err) 936 goto done; 937 938 if (buf) { 939 /* 940 * Increment the endpoint refcount. This 941 * basically prevents setting a new 942 * configuration and alternate setting 943 * when USB transfers are in use on 944 * the given interface. Search the USB 945 * code for "endpoint->refcount_alloc" if you 946 * want more information. 947 */ 948 USB_BUS_LOCK(info->bus); 949 if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX) 950 parm.err = USB_ERR_INVAL; 951 952 xfer->endpoint->refcount_alloc++; 953 954 if (xfer->endpoint->refcount_alloc == 0) 955 panic("usbd_transfer_setup(): Refcount wrapped to zero\n"); 956 USB_BUS_UNLOCK(info->bus); 957 958 /* 959 * Whenever we set ppxfer[] then we 960 * also need to increment the 961 * "setup_refcount": 962 */ 963 info->setup_refcount++; 964 965 /* 966 * Transfer is successfully setup and 967 * can be used: 968 */ 969 ppxfer[n] = xfer; 970 } 971 972 /* check for error */ 973 if (parm.err) 974 goto done; 975 } 976 977 if (buf || parm.err) { 978 goto done; 979 } 980 if (refcount == 0) { 981 /* no transfers - nothing to do ! */ 982 goto done; 983 } 984 /* align data properly */ 985 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 986 987 /* store offset temporarily */ 988 parm.size[1] = parm.size[0]; 989 990 /* 991 * The number of DMA tags required depends on 992 * the number of endpoints. The current estimate 993 * for maximum number of DMA tags per endpoint 994 * is two. 995 */ 996 parm.dma_tag_max += 2 * MIN(n_setup, USB_EP_MAX); 997 998 /* 999 * DMA tags for QH, TD, Data and more. 1000 */ 1001 parm.dma_tag_max += 8; 1002 1003 parm.dma_tag_p += parm.dma_tag_max; 1004 1005 parm.size[0] += ((uint8_t *)parm.dma_tag_p) - 1006 ((uint8_t *)0); 1007 1008 /* align data properly */ 1009 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 1010 1011 /* store offset temporarily */ 1012 parm.size[3] = parm.size[0]; 1013 1014 parm.size[0] += ((uint8_t *)parm.dma_page_ptr) - 1015 ((uint8_t *)0); 1016 1017 /* align data properly */ 1018 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 1019 1020 /* store offset temporarily */ 1021 parm.size[4] = parm.size[0]; 1022 1023 parm.size[0] += ((uint8_t *)parm.dma_page_cache_ptr) - 1024 ((uint8_t *)0); 1025 1026 /* store end offset temporarily */ 1027 parm.size[5] = parm.size[0]; 1028 1029 parm.size[0] += ((uint8_t *)parm.xfer_page_cache_ptr) - 1030 ((uint8_t *)0); 1031 1032 /* store end offset temporarily */ 1033 1034 parm.size[2] = parm.size[0]; 1035 1036 /* align data properly */ 1037 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 1038 1039 parm.size[6] = parm.size[0]; 1040 1041 parm.size[0] += ((uint8_t *)parm.xfer_length_ptr) - 1042 ((uint8_t *)0); 1043 1044 /* align data properly */ 1045 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 1046 1047 /* allocate zeroed memory */ 1048 buf = malloc(parm.size[0], M_USB, M_WAITOK | M_ZERO); 1049 1050 if (buf == NULL) { 1051 parm.err = USB_ERR_NOMEM; 1052 DPRINTFN(0, "cannot allocate memory block for " 1053 "configuration (%d bytes)\n", 1054 parm.size[0]); 1055 goto done; 1056 } 1057 parm.dma_tag_p = USB_ADD_BYTES(buf, parm.size[1]); 1058 parm.dma_page_ptr = USB_ADD_BYTES(buf, parm.size[3]); 1059 parm.dma_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[4]); 1060 parm.xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[5]); 1061 parm.xfer_length_ptr = USB_ADD_BYTES(buf, parm.size[6]); 1062 } 1063 1064 done: 1065 if (buf) { 1066 if (info->setup_refcount == 0) { 1067 /* 1068 * "usbd_transfer_unsetup_sub" will unlock 1069 * the bus mutex before returning ! 1070 */ 1071 USB_BUS_LOCK(info->bus); 1072 1073 /* something went wrong */ 1074 usbd_transfer_unsetup_sub(info, 0); 1075 } 1076 } 1077 if (parm.err) { 1078 usbd_transfer_unsetup(ppxfer, n_setup); 1079 } 1080 return (parm.err); 1081 } 1082 1083 /*------------------------------------------------------------------------* 1084 * usbd_transfer_unsetup_sub - factored out code 1085 *------------------------------------------------------------------------*/ 1086 static void 1087 usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay) 1088 { 1089 struct usb_page_cache *pc; 1090 1091 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED); 1092 1093 /* wait for any outstanding DMA operations */ 1094 1095 if (needs_delay) { 1096 usb_timeout_t temp; 1097 temp = usbd_get_dma_delay(info->bus); 1098 usb_pause_mtx(&info->bus->bus_mtx, 1099 USB_MS_TO_TICKS(temp)); 1100 } 1101 1102 /* make sure that our done messages are not queued anywhere */ 1103 usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]); 1104 1105 USB_BUS_UNLOCK(info->bus); 1106 1107 #if USB_HAVE_BUSDMA 1108 /* free DMA'able memory, if any */ 1109 pc = info->dma_page_cache_start; 1110 while (pc != info->dma_page_cache_end) { 1111 usb_pc_free_mem(pc); 1112 pc++; 1113 } 1114 1115 /* free DMA maps in all "xfer->frbuffers" */ 1116 pc = info->xfer_page_cache_start; 1117 while (pc != info->xfer_page_cache_end) { 1118 usb_pc_dmamap_destroy(pc); 1119 pc++; 1120 } 1121 1122 /* free all DMA tags */ 1123 usb_dma_tag_unsetup(&info->dma_parent_tag); 1124 #endif 1125 1126 cv_destroy(&info->cv_drain); 1127 1128 /* 1129 * free the "memory_base" last, hence the "info" structure is 1130 * contained within the "memory_base"! 1131 */ 1132 free(info->memory_base, M_USB); 1133 } 1134 1135 /*------------------------------------------------------------------------* 1136 * usbd_transfer_unsetup - unsetup/free an array of USB transfers 1137 * 1138 * NOTE: All USB transfers in progress will get called back passing 1139 * the error code "USB_ERR_CANCELLED" before this function 1140 * returns. 1141 *------------------------------------------------------------------------*/ 1142 void 1143 usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup) 1144 { 1145 struct usb_xfer *xfer; 1146 struct usb_xfer_root *info; 1147 uint8_t needs_delay = 0; 1148 1149 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1150 "usbd_transfer_unsetup can sleep!"); 1151 1152 while (n_setup--) { 1153 xfer = pxfer[n_setup]; 1154 1155 if (xfer == NULL) 1156 continue; 1157 1158 info = xfer->xroot; 1159 1160 USB_XFER_LOCK(xfer); 1161 USB_BUS_LOCK(info->bus); 1162 1163 /* 1164 * HINT: when you start/stop a transfer, it might be a 1165 * good idea to directly use the "pxfer[]" structure: 1166 * 1167 * usbd_transfer_start(sc->pxfer[0]); 1168 * usbd_transfer_stop(sc->pxfer[0]); 1169 * 1170 * That way, if your code has many parts that will not 1171 * stop running under the same lock, in other words 1172 * "xfer_mtx", the usbd_transfer_start and 1173 * usbd_transfer_stop functions will simply return 1174 * when they detect a NULL pointer argument. 1175 * 1176 * To avoid any races we clear the "pxfer[]" pointer 1177 * while holding the private mutex of the driver: 1178 */ 1179 pxfer[n_setup] = NULL; 1180 1181 USB_BUS_UNLOCK(info->bus); 1182 USB_XFER_UNLOCK(xfer); 1183 1184 usbd_transfer_drain(xfer); 1185 1186 #if USB_HAVE_BUSDMA 1187 if (xfer->flags_int.bdma_enable) 1188 needs_delay = 1; 1189 #endif 1190 /* 1191 * NOTE: default endpoint does not have an 1192 * interface, even if endpoint->iface_index == 0 1193 */ 1194 USB_BUS_LOCK(info->bus); 1195 xfer->endpoint->refcount_alloc--; 1196 USB_BUS_UNLOCK(info->bus); 1197 1198 usb_callout_drain(&xfer->timeout_handle); 1199 1200 USB_BUS_LOCK(info->bus); 1201 1202 USB_ASSERT(info->setup_refcount != 0, ("Invalid setup " 1203 "reference count\n")); 1204 1205 info->setup_refcount--; 1206 1207 if (info->setup_refcount == 0) { 1208 usbd_transfer_unsetup_sub(info, 1209 needs_delay); 1210 } else { 1211 USB_BUS_UNLOCK(info->bus); 1212 } 1213 } 1214 } 1215 1216 /*------------------------------------------------------------------------* 1217 * usbd_control_transfer_init - factored out code 1218 * 1219 * In USB Device Mode we have to wait for the SETUP packet which 1220 * containst the "struct usb_device_request" structure, before we can 1221 * transfer any data. In USB Host Mode we already have the SETUP 1222 * packet at the moment the USB transfer is started. This leads us to 1223 * having to setup the USB transfer at two different places in 1224 * time. This function just contains factored out control transfer 1225 * initialisation code, so that we don't duplicate the code. 1226 *------------------------------------------------------------------------*/ 1227 static void 1228 usbd_control_transfer_init(struct usb_xfer *xfer) 1229 { 1230 struct usb_device_request req; 1231 1232 /* copy out the USB request header */ 1233 1234 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req)); 1235 1236 /* setup remainder */ 1237 1238 xfer->flags_int.control_rem = UGETW(req.wLength); 1239 1240 /* copy direction to endpoint variable */ 1241 1242 xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT); 1243 xfer->endpointno |= 1244 (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT; 1245 } 1246 1247 /*------------------------------------------------------------------------* 1248 * usbd_setup_ctrl_transfer 1249 * 1250 * This function handles initialisation of control transfers. Control 1251 * transfers are special in that regard that they can both transmit 1252 * and receive data. 1253 * 1254 * Return values: 1255 * 0: Success 1256 * Else: Failure 1257 *------------------------------------------------------------------------*/ 1258 static int 1259 usbd_setup_ctrl_transfer(struct usb_xfer *xfer) 1260 { 1261 usb_frlength_t len; 1262 1263 /* Check for control endpoint stall */ 1264 if (xfer->flags.stall_pipe && xfer->flags_int.control_act) { 1265 /* the control transfer is no longer active */ 1266 xfer->flags_int.control_stall = 1; 1267 xfer->flags_int.control_act = 0; 1268 } else { 1269 /* don't stall control transfer by default */ 1270 xfer->flags_int.control_stall = 0; 1271 } 1272 1273 /* Check for invalid number of frames */ 1274 if (xfer->nframes > 2) { 1275 /* 1276 * If you need to split a control transfer, you 1277 * have to do one part at a time. Only with 1278 * non-control transfers you can do multiple 1279 * parts a time. 1280 */ 1281 DPRINTFN(0, "Too many frames: %u\n", 1282 (unsigned int)xfer->nframes); 1283 goto error; 1284 } 1285 1286 /* 1287 * Check if there is a control 1288 * transfer in progress: 1289 */ 1290 if (xfer->flags_int.control_act) { 1291 1292 if (xfer->flags_int.control_hdr) { 1293 1294 /* clear send header flag */ 1295 1296 xfer->flags_int.control_hdr = 0; 1297 1298 /* setup control transfer */ 1299 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { 1300 usbd_control_transfer_init(xfer); 1301 } 1302 } 1303 /* get data length */ 1304 1305 len = xfer->sumlen; 1306 1307 } else { 1308 1309 /* the size of the SETUP structure is hardcoded ! */ 1310 1311 if (xfer->frlengths[0] != sizeof(struct usb_device_request)) { 1312 DPRINTFN(0, "Wrong framelength %u != %zu\n", 1313 xfer->frlengths[0], sizeof(struct 1314 usb_device_request)); 1315 goto error; 1316 } 1317 /* check USB mode */ 1318 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { 1319 1320 /* check number of frames */ 1321 if (xfer->nframes != 1) { 1322 /* 1323 * We need to receive the setup 1324 * message first so that we know the 1325 * data direction! 1326 */ 1327 DPRINTF("Misconfigured transfer\n"); 1328 goto error; 1329 } 1330 /* 1331 * Set a dummy "control_rem" value. This 1332 * variable will be overwritten later by a 1333 * call to "usbd_control_transfer_init()" ! 1334 */ 1335 xfer->flags_int.control_rem = 0xFFFF; 1336 } else { 1337 1338 /* setup "endpoint" and "control_rem" */ 1339 1340 usbd_control_transfer_init(xfer); 1341 } 1342 1343 /* set transfer-header flag */ 1344 1345 xfer->flags_int.control_hdr = 1; 1346 1347 /* get data length */ 1348 1349 len = (xfer->sumlen - sizeof(struct usb_device_request)); 1350 } 1351 1352 /* check if there is a length mismatch */ 1353 1354 if (len > xfer->flags_int.control_rem) { 1355 DPRINTFN(0, "Length (%d) greater than " 1356 "remaining length (%d)\n", len, 1357 xfer->flags_int.control_rem); 1358 goto error; 1359 } 1360 /* check if we are doing a short transfer */ 1361 1362 if (xfer->flags.force_short_xfer) { 1363 xfer->flags_int.control_rem = 0; 1364 } else { 1365 if ((len != xfer->max_data_length) && 1366 (len != xfer->flags_int.control_rem) && 1367 (xfer->nframes != 1)) { 1368 DPRINTFN(0, "Short control transfer without " 1369 "force_short_xfer set\n"); 1370 goto error; 1371 } 1372 xfer->flags_int.control_rem -= len; 1373 } 1374 1375 /* the status part is executed when "control_act" is 0 */ 1376 1377 if ((xfer->flags_int.control_rem > 0) || 1378 (xfer->flags.manual_status)) { 1379 /* don't execute the STATUS stage yet */ 1380 xfer->flags_int.control_act = 1; 1381 1382 /* sanity check */ 1383 if ((!xfer->flags_int.control_hdr) && 1384 (xfer->nframes == 1)) { 1385 /* 1386 * This is not a valid operation! 1387 */ 1388 DPRINTFN(0, "Invalid parameter " 1389 "combination\n"); 1390 goto error; 1391 } 1392 } else { 1393 /* time to execute the STATUS stage */ 1394 xfer->flags_int.control_act = 0; 1395 } 1396 return (0); /* success */ 1397 1398 error: 1399 return (1); /* failure */ 1400 } 1401 1402 /*------------------------------------------------------------------------* 1403 * usbd_transfer_submit - start USB hardware for the given transfer 1404 * 1405 * This function should only be called from the USB callback. 1406 *------------------------------------------------------------------------*/ 1407 void 1408 usbd_transfer_submit(struct usb_xfer *xfer) 1409 { 1410 struct usb_xfer_root *info; 1411 struct usb_bus *bus; 1412 usb_frcount_t x; 1413 1414 info = xfer->xroot; 1415 bus = info->bus; 1416 1417 DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n", 1418 xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ? 1419 "read" : "write"); 1420 1421 #if USB_DEBUG 1422 if (USB_DEBUG_VAR > 0) { 1423 USB_BUS_LOCK(bus); 1424 1425 usb_dump_endpoint(xfer->endpoint); 1426 1427 USB_BUS_UNLOCK(bus); 1428 } 1429 #endif 1430 1431 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1432 USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED); 1433 1434 /* Only open the USB transfer once! */ 1435 if (!xfer->flags_int.open) { 1436 xfer->flags_int.open = 1; 1437 1438 DPRINTF("open\n"); 1439 1440 USB_BUS_LOCK(bus); 1441 (xfer->endpoint->methods->open) (xfer); 1442 USB_BUS_UNLOCK(bus); 1443 } 1444 /* set "transferring" flag */ 1445 xfer->flags_int.transferring = 1; 1446 1447 #if USB_HAVE_POWERD 1448 /* increment power reference */ 1449 usbd_transfer_power_ref(xfer, 1); 1450 #endif 1451 /* 1452 * Check if the transfer is waiting on a queue, most 1453 * frequently the "done_q": 1454 */ 1455 if (xfer->wait_queue) { 1456 USB_BUS_LOCK(bus); 1457 usbd_transfer_dequeue(xfer); 1458 USB_BUS_UNLOCK(bus); 1459 } 1460 /* clear "did_dma_delay" flag */ 1461 xfer->flags_int.did_dma_delay = 0; 1462 1463 /* clear "did_close" flag */ 1464 xfer->flags_int.did_close = 0; 1465 1466 #if USB_HAVE_BUSDMA 1467 /* clear "bdma_setup" flag */ 1468 xfer->flags_int.bdma_setup = 0; 1469 #endif 1470 /* by default we cannot cancel any USB transfer immediately */ 1471 xfer->flags_int.can_cancel_immed = 0; 1472 1473 /* clear lengths and frame counts by default */ 1474 xfer->sumlen = 0; 1475 xfer->actlen = 0; 1476 xfer->aframes = 0; 1477 1478 /* clear any previous errors */ 1479 xfer->error = 0; 1480 1481 /* Check if the device is still alive */ 1482 if (info->udev->state < USB_STATE_POWERED) { 1483 USB_BUS_LOCK(bus); 1484 /* 1485 * Must return cancelled error code else 1486 * device drivers can hang. 1487 */ 1488 usbd_transfer_done(xfer, USB_ERR_CANCELLED); 1489 USB_BUS_UNLOCK(bus); 1490 return; 1491 } 1492 1493 /* sanity check */ 1494 if (xfer->nframes == 0) { 1495 if (xfer->flags.stall_pipe) { 1496 /* 1497 * Special case - want to stall without transferring 1498 * any data: 1499 */ 1500 DPRINTF("xfer=%p nframes=0: stall " 1501 "or clear stall!\n", xfer); 1502 USB_BUS_LOCK(bus); 1503 xfer->flags_int.can_cancel_immed = 1; 1504 /* start the transfer */ 1505 usb_command_wrapper(&xfer->endpoint->endpoint_q, xfer); 1506 USB_BUS_UNLOCK(bus); 1507 return; 1508 } 1509 USB_BUS_LOCK(bus); 1510 usbd_transfer_done(xfer, USB_ERR_INVAL); 1511 USB_BUS_UNLOCK(bus); 1512 return; 1513 } 1514 /* compute total transfer length */ 1515 1516 for (x = 0; x != xfer->nframes; x++) { 1517 xfer->sumlen += xfer->frlengths[x]; 1518 if (xfer->sumlen < xfer->frlengths[x]) { 1519 /* length wrapped around */ 1520 USB_BUS_LOCK(bus); 1521 usbd_transfer_done(xfer, USB_ERR_INVAL); 1522 USB_BUS_UNLOCK(bus); 1523 return; 1524 } 1525 } 1526 1527 /* clear some internal flags */ 1528 1529 xfer->flags_int.short_xfer_ok = 0; 1530 xfer->flags_int.short_frames_ok = 0; 1531 1532 /* check if this is a control transfer */ 1533 1534 if (xfer->flags_int.control_xfr) { 1535 1536 if (usbd_setup_ctrl_transfer(xfer)) { 1537 USB_BUS_LOCK(bus); 1538 usbd_transfer_done(xfer, USB_ERR_STALLED); 1539 USB_BUS_UNLOCK(bus); 1540 return; 1541 } 1542 } 1543 /* 1544 * Setup filtered version of some transfer flags, 1545 * in case of data read direction 1546 */ 1547 if (USB_GET_DATA_ISREAD(xfer)) { 1548 1549 if (xfer->flags.short_frames_ok) { 1550 xfer->flags_int.short_xfer_ok = 1; 1551 xfer->flags_int.short_frames_ok = 1; 1552 } else if (xfer->flags.short_xfer_ok) { 1553 xfer->flags_int.short_xfer_ok = 1; 1554 1555 /* check for control transfer */ 1556 if (xfer->flags_int.control_xfr) { 1557 /* 1558 * 1) Control transfers do not support 1559 * reception of multiple short USB 1560 * frames in host mode and device side 1561 * mode, with exception of: 1562 * 1563 * 2) Due to sometimes buggy device 1564 * side firmware we need to do a 1565 * STATUS stage in case of short 1566 * control transfers in USB host mode. 1567 * The STATUS stage then becomes the 1568 * "alt_next" to the DATA stage. 1569 */ 1570 xfer->flags_int.short_frames_ok = 1; 1571 } 1572 } 1573 } 1574 /* 1575 * Check if BUS-DMA support is enabled and try to load virtual 1576 * buffers into DMA, if any: 1577 */ 1578 #if USB_HAVE_BUSDMA 1579 if (xfer->flags_int.bdma_enable) { 1580 /* insert the USB transfer last in the BUS-DMA queue */ 1581 usb_command_wrapper(&xfer->xroot->dma_q, xfer); 1582 return; 1583 } 1584 #endif 1585 /* 1586 * Enter the USB transfer into the Host Controller or 1587 * Device Controller schedule: 1588 */ 1589 usbd_pipe_enter(xfer); 1590 } 1591 1592 /*------------------------------------------------------------------------* 1593 * usbd_pipe_enter - factored out code 1594 *------------------------------------------------------------------------*/ 1595 void 1596 usbd_pipe_enter(struct usb_xfer *xfer) 1597 { 1598 struct usb_endpoint *ep; 1599 1600 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1601 1602 USB_BUS_LOCK(xfer->xroot->bus); 1603 1604 ep = xfer->endpoint; 1605 1606 DPRINTF("enter\n"); 1607 1608 /* enter the transfer */ 1609 (ep->methods->enter) (xfer); 1610 1611 xfer->flags_int.can_cancel_immed = 1; 1612 1613 /* check for transfer error */ 1614 if (xfer->error) { 1615 /* some error has happened */ 1616 usbd_transfer_done(xfer, 0); 1617 USB_BUS_UNLOCK(xfer->xroot->bus); 1618 return; 1619 } 1620 1621 /* start the transfer */ 1622 usb_command_wrapper(&ep->endpoint_q, xfer); 1623 USB_BUS_UNLOCK(xfer->xroot->bus); 1624 } 1625 1626 /*------------------------------------------------------------------------* 1627 * usbd_transfer_start - start an USB transfer 1628 * 1629 * NOTE: Calling this function more than one time will only 1630 * result in a single transfer start, until the USB transfer 1631 * completes. 1632 *------------------------------------------------------------------------*/ 1633 void 1634 usbd_transfer_start(struct usb_xfer *xfer) 1635 { 1636 if (xfer == NULL) { 1637 /* transfer is gone */ 1638 return; 1639 } 1640 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1641 1642 /* mark the USB transfer started */ 1643 1644 if (!xfer->flags_int.started) { 1645 /* lock the BUS lock to avoid races updating flags_int */ 1646 USB_BUS_LOCK(xfer->xroot->bus); 1647 xfer->flags_int.started = 1; 1648 USB_BUS_UNLOCK(xfer->xroot->bus); 1649 } 1650 /* check if the USB transfer callback is already transferring */ 1651 1652 if (xfer->flags_int.transferring) { 1653 return; 1654 } 1655 USB_BUS_LOCK(xfer->xroot->bus); 1656 /* call the USB transfer callback */ 1657 usbd_callback_ss_done_defer(xfer); 1658 USB_BUS_UNLOCK(xfer->xroot->bus); 1659 } 1660 1661 /*------------------------------------------------------------------------* 1662 * usbd_transfer_stop - stop an USB transfer 1663 * 1664 * NOTE: Calling this function more than one time will only 1665 * result in a single transfer stop. 1666 * NOTE: When this function returns it is not safe to free nor 1667 * reuse any DMA buffers. See "usbd_transfer_drain()". 1668 *------------------------------------------------------------------------*/ 1669 void 1670 usbd_transfer_stop(struct usb_xfer *xfer) 1671 { 1672 struct usb_endpoint *ep; 1673 1674 if (xfer == NULL) { 1675 /* transfer is gone */ 1676 return; 1677 } 1678 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1679 1680 /* check if the USB transfer was ever opened */ 1681 1682 if (!xfer->flags_int.open) { 1683 if (xfer->flags_int.started) { 1684 /* nothing to do except clearing the "started" flag */ 1685 /* lock the BUS lock to avoid races updating flags_int */ 1686 USB_BUS_LOCK(xfer->xroot->bus); 1687 xfer->flags_int.started = 0; 1688 USB_BUS_UNLOCK(xfer->xroot->bus); 1689 } 1690 return; 1691 } 1692 /* try to stop the current USB transfer */ 1693 1694 USB_BUS_LOCK(xfer->xroot->bus); 1695 /* override any previous error */ 1696 xfer->error = USB_ERR_CANCELLED; 1697 1698 /* 1699 * Clear "open" and "started" when both private and USB lock 1700 * is locked so that we don't get a race updating "flags_int" 1701 */ 1702 xfer->flags_int.open = 0; 1703 xfer->flags_int.started = 0; 1704 1705 /* 1706 * Check if we can cancel the USB transfer immediately. 1707 */ 1708 if (xfer->flags_int.transferring) { 1709 if (xfer->flags_int.can_cancel_immed && 1710 (!xfer->flags_int.did_close)) { 1711 DPRINTF("close\n"); 1712 /* 1713 * The following will lead to an USB_ERR_CANCELLED 1714 * error code being passed to the USB callback. 1715 */ 1716 (xfer->endpoint->methods->close) (xfer); 1717 /* only close once */ 1718 xfer->flags_int.did_close = 1; 1719 } else { 1720 /* need to wait for the next done callback */ 1721 } 1722 } else { 1723 DPRINTF("close\n"); 1724 1725 /* close here and now */ 1726 (xfer->endpoint->methods->close) (xfer); 1727 1728 /* 1729 * Any additional DMA delay is done by 1730 * "usbd_transfer_unsetup()". 1731 */ 1732 1733 /* 1734 * Special case. Check if we need to restart a blocked 1735 * endpoint. 1736 */ 1737 ep = xfer->endpoint; 1738 1739 /* 1740 * If the current USB transfer is completing we need 1741 * to start the next one: 1742 */ 1743 if (ep->endpoint_q.curr == xfer) { 1744 usb_command_wrapper(&ep->endpoint_q, NULL); 1745 } 1746 } 1747 1748 USB_BUS_UNLOCK(xfer->xroot->bus); 1749 } 1750 1751 /*------------------------------------------------------------------------* 1752 * usbd_transfer_pending 1753 * 1754 * This function will check if an USB transfer is pending which is a 1755 * little bit complicated! 1756 * Return values: 1757 * 0: Not pending 1758 * 1: Pending: The USB transfer will receive a callback in the future. 1759 *------------------------------------------------------------------------*/ 1760 uint8_t 1761 usbd_transfer_pending(struct usb_xfer *xfer) 1762 { 1763 struct usb_xfer_root *info; 1764 struct usb_xfer_queue *pq; 1765 1766 if (xfer == NULL) { 1767 /* transfer is gone */ 1768 return (0); 1769 } 1770 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1771 1772 if (xfer->flags_int.transferring) { 1773 /* trivial case */ 1774 return (1); 1775 } 1776 USB_BUS_LOCK(xfer->xroot->bus); 1777 if (xfer->wait_queue) { 1778 /* we are waiting on a queue somewhere */ 1779 USB_BUS_UNLOCK(xfer->xroot->bus); 1780 return (1); 1781 } 1782 info = xfer->xroot; 1783 pq = &info->done_q; 1784 1785 if (pq->curr == xfer) { 1786 /* we are currently scheduled for callback */ 1787 USB_BUS_UNLOCK(xfer->xroot->bus); 1788 return (1); 1789 } 1790 /* we are not pending */ 1791 USB_BUS_UNLOCK(xfer->xroot->bus); 1792 return (0); 1793 } 1794 1795 /*------------------------------------------------------------------------* 1796 * usbd_transfer_drain 1797 * 1798 * This function will stop the USB transfer and wait for any 1799 * additional BUS-DMA and HW-DMA operations to complete. Buffers that 1800 * are loaded into DMA can safely be freed or reused after that this 1801 * function has returned. 1802 *------------------------------------------------------------------------*/ 1803 void 1804 usbd_transfer_drain(struct usb_xfer *xfer) 1805 { 1806 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1807 "usbd_transfer_drain can sleep!"); 1808 1809 if (xfer == NULL) { 1810 /* transfer is gone */ 1811 return; 1812 } 1813 if (xfer->xroot->xfer_mtx != &Giant) { 1814 USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED); 1815 } 1816 USB_XFER_LOCK(xfer); 1817 1818 usbd_transfer_stop(xfer); 1819 1820 while (usbd_transfer_pending(xfer) || 1821 xfer->flags_int.doing_callback) { 1822 1823 /* 1824 * It is allowed that the callback can drop its 1825 * transfer mutex. In that case checking only 1826 * "usbd_transfer_pending()" is not enough to tell if 1827 * the USB transfer is fully drained. We also need to 1828 * check the internal "doing_callback" flag. 1829 */ 1830 xfer->flags_int.draining = 1; 1831 1832 /* 1833 * Wait until the current outstanding USB 1834 * transfer is complete ! 1835 */ 1836 cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx); 1837 } 1838 USB_XFER_UNLOCK(xfer); 1839 } 1840 1841 struct usb_page_cache * 1842 usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex) 1843 { 1844 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 1845 1846 return (&xfer->frbuffers[frindex]); 1847 } 1848 1849 /*------------------------------------------------------------------------* 1850 * usbd_xfer_get_fps_shift 1851 * 1852 * The following function is only useful for isochronous transfers. It 1853 * returns how many times the frame execution rate has been shifted 1854 * down. 1855 * 1856 * Return value: 1857 * Success: 0..3 1858 * Failure: 0 1859 *------------------------------------------------------------------------*/ 1860 uint8_t 1861 usbd_xfer_get_fps_shift(struct usb_xfer *xfer) 1862 { 1863 return (xfer->fps_shift); 1864 } 1865 1866 usb_frlength_t 1867 usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex) 1868 { 1869 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 1870 1871 return (xfer->frlengths[frindex]); 1872 } 1873 1874 /*------------------------------------------------------------------------* 1875 * usbd_xfer_set_frame_data 1876 * 1877 * This function sets the pointer of the buffer that should 1878 * loaded directly into DMA for the given USB frame. Passing "ptr" 1879 * equal to NULL while the corresponding "frlength" is greater 1880 * than zero gives undefined results! 1881 *------------------------------------------------------------------------*/ 1882 void 1883 usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex, 1884 void *ptr, usb_frlength_t len) 1885 { 1886 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 1887 1888 /* set virtual address to load and length */ 1889 xfer->frbuffers[frindex].buffer = ptr; 1890 usbd_xfer_set_frame_len(xfer, frindex, len); 1891 } 1892 1893 void 1894 usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex, 1895 void **ptr, int *len) 1896 { 1897 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 1898 1899 if (ptr != NULL) 1900 *ptr = xfer->frbuffers[frindex].buffer; 1901 if (len != NULL) 1902 *len = xfer->frlengths[frindex]; 1903 } 1904 1905 void 1906 usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes, 1907 int *nframes) 1908 { 1909 if (actlen != NULL) 1910 *actlen = xfer->actlen; 1911 if (sumlen != NULL) 1912 *sumlen = xfer->sumlen; 1913 if (aframes != NULL) 1914 *aframes = xfer->aframes; 1915 if (nframes != NULL) 1916 *nframes = xfer->nframes; 1917 } 1918 1919 /*------------------------------------------------------------------------* 1920 * usbd_xfer_set_frame_offset 1921 * 1922 * This function sets the frame data buffer offset relative to the beginning 1923 * of the USB DMA buffer allocated for this USB transfer. 1924 *------------------------------------------------------------------------*/ 1925 void 1926 usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset, 1927 usb_frcount_t frindex) 1928 { 1929 KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame " 1930 "when the USB buffer is external\n")); 1931 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 1932 1933 /* set virtual address to load */ 1934 xfer->frbuffers[frindex].buffer = 1935 USB_ADD_BYTES(xfer->local_buffer, offset); 1936 } 1937 1938 void 1939 usbd_xfer_set_interval(struct usb_xfer *xfer, int i) 1940 { 1941 xfer->interval = i; 1942 } 1943 1944 void 1945 usbd_xfer_set_timeout(struct usb_xfer *xfer, int t) 1946 { 1947 xfer->timeout = t; 1948 } 1949 1950 void 1951 usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n) 1952 { 1953 xfer->nframes = n; 1954 } 1955 1956 usb_frcount_t 1957 usbd_xfer_max_frames(struct usb_xfer *xfer) 1958 { 1959 return (xfer->max_frame_count); 1960 } 1961 1962 usb_frlength_t 1963 usbd_xfer_max_len(struct usb_xfer *xfer) 1964 { 1965 return (xfer->max_data_length); 1966 } 1967 1968 usb_frlength_t 1969 usbd_xfer_max_framelen(struct usb_xfer *xfer) 1970 { 1971 return (xfer->max_frame_size); 1972 } 1973 1974 void 1975 usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex, 1976 usb_frlength_t len) 1977 { 1978 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 1979 1980 xfer->frlengths[frindex] = len; 1981 } 1982 1983 /*------------------------------------------------------------------------* 1984 * usb_callback_proc - factored out code 1985 * 1986 * This function performs USB callbacks. 1987 *------------------------------------------------------------------------*/ 1988 static void 1989 usb_callback_proc(struct usb_proc_msg *_pm) 1990 { 1991 struct usb_done_msg *pm = (void *)_pm; 1992 struct usb_xfer_root *info = pm->xroot; 1993 1994 /* Change locking order */ 1995 USB_BUS_UNLOCK(info->bus); 1996 1997 /* 1998 * We exploit the fact that the mutex is the same for all 1999 * callbacks that will be called from this thread: 2000 */ 2001 mtx_lock(info->xfer_mtx); 2002 USB_BUS_LOCK(info->bus); 2003 2004 /* Continue where we lost track */ 2005 usb_command_wrapper(&info->done_q, 2006 info->done_q.curr); 2007 2008 mtx_unlock(info->xfer_mtx); 2009 } 2010 2011 /*------------------------------------------------------------------------* 2012 * usbd_callback_ss_done_defer 2013 * 2014 * This function will defer the start, stop and done callback to the 2015 * correct thread. 2016 *------------------------------------------------------------------------*/ 2017 static void 2018 usbd_callback_ss_done_defer(struct usb_xfer *xfer) 2019 { 2020 struct usb_xfer_root *info = xfer->xroot; 2021 struct usb_xfer_queue *pq = &info->done_q; 2022 2023 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2024 2025 if (pq->curr != xfer) { 2026 usbd_transfer_enqueue(pq, xfer); 2027 } 2028 if (!pq->recurse_1) { 2029 2030 /* 2031 * We have to postpone the callback due to the fact we 2032 * will have a Lock Order Reversal, LOR, if we try to 2033 * proceed ! 2034 */ 2035 if (usb_proc_msignal(info->done_p, 2036 &info->done_m[0], &info->done_m[1])) { 2037 /* ignore */ 2038 } 2039 } else { 2040 /* clear second recurse flag */ 2041 pq->recurse_2 = 0; 2042 } 2043 return; 2044 2045 } 2046 2047 /*------------------------------------------------------------------------* 2048 * usbd_callback_wrapper 2049 * 2050 * This is a wrapper for USB callbacks. This wrapper does some 2051 * auto-magic things like figuring out if we can call the callback 2052 * directly from the current context or if we need to wakeup the 2053 * interrupt process. 2054 *------------------------------------------------------------------------*/ 2055 static void 2056 usbd_callback_wrapper(struct usb_xfer_queue *pq) 2057 { 2058 struct usb_xfer *xfer = pq->curr; 2059 struct usb_xfer_root *info = xfer->xroot; 2060 2061 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED); 2062 if (!mtx_owned(info->xfer_mtx)) { 2063 /* 2064 * Cases that end up here: 2065 * 2066 * 5) HW interrupt done callback or other source. 2067 */ 2068 DPRINTFN(3, "case 5\n"); 2069 2070 /* 2071 * We have to postpone the callback due to the fact we 2072 * will have a Lock Order Reversal, LOR, if we try to 2073 * proceed ! 2074 */ 2075 if (usb_proc_msignal(info->done_p, 2076 &info->done_m[0], &info->done_m[1])) { 2077 /* ignore */ 2078 } 2079 return; 2080 } 2081 /* 2082 * Cases that end up here: 2083 * 2084 * 1) We are starting a transfer 2085 * 2) We are prematurely calling back a transfer 2086 * 3) We are stopping a transfer 2087 * 4) We are doing an ordinary callback 2088 */ 2089 DPRINTFN(3, "case 1-4\n"); 2090 /* get next USB transfer in the queue */ 2091 info->done_q.curr = NULL; 2092 2093 /* set flag in case of drain */ 2094 xfer->flags_int.doing_callback = 1; 2095 2096 USB_BUS_UNLOCK(info->bus); 2097 USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED); 2098 2099 /* set correct USB state for callback */ 2100 if (!xfer->flags_int.transferring) { 2101 xfer->usb_state = USB_ST_SETUP; 2102 if (!xfer->flags_int.started) { 2103 /* we got stopped before we even got started */ 2104 USB_BUS_LOCK(info->bus); 2105 goto done; 2106 } 2107 } else { 2108 2109 if (usbd_callback_wrapper_sub(xfer)) { 2110 /* the callback has been deferred */ 2111 USB_BUS_LOCK(info->bus); 2112 goto done; 2113 } 2114 #if USB_HAVE_POWERD 2115 /* decrement power reference */ 2116 usbd_transfer_power_ref(xfer, -1); 2117 #endif 2118 xfer->flags_int.transferring = 0; 2119 2120 if (xfer->error) { 2121 xfer->usb_state = USB_ST_ERROR; 2122 } else { 2123 /* set transferred state */ 2124 xfer->usb_state = USB_ST_TRANSFERRED; 2125 #if USB_HAVE_BUSDMA 2126 /* sync DMA memory, if any */ 2127 if (xfer->flags_int.bdma_enable && 2128 (!xfer->flags_int.bdma_no_post_sync)) { 2129 usb_bdma_post_sync(xfer); 2130 } 2131 #endif 2132 } 2133 } 2134 2135 /* call processing routine */ 2136 (xfer->callback) (xfer, xfer->error); 2137 2138 /* pickup the USB mutex again */ 2139 USB_BUS_LOCK(info->bus); 2140 2141 /* 2142 * Check if we got started after that we got cancelled, but 2143 * before we managed to do the callback. 2144 */ 2145 if ((!xfer->flags_int.open) && 2146 (xfer->flags_int.started) && 2147 (xfer->usb_state == USB_ST_ERROR)) { 2148 /* clear flag in case of drain */ 2149 xfer->flags_int.doing_callback = 0; 2150 /* try to loop, but not recursivly */ 2151 usb_command_wrapper(&info->done_q, xfer); 2152 return; 2153 } 2154 2155 done: 2156 /* clear flag in case of drain */ 2157 xfer->flags_int.doing_callback = 0; 2158 2159 /* 2160 * Check if we are draining. 2161 */ 2162 if (xfer->flags_int.draining && 2163 (!xfer->flags_int.transferring)) { 2164 /* "usbd_transfer_drain()" is waiting for end of transfer */ 2165 xfer->flags_int.draining = 0; 2166 cv_broadcast(&info->cv_drain); 2167 } 2168 2169 /* do the next callback, if any */ 2170 usb_command_wrapper(&info->done_q, 2171 info->done_q.curr); 2172 } 2173 2174 /*------------------------------------------------------------------------* 2175 * usb_dma_delay_done_cb 2176 * 2177 * This function is called when the DMA delay has been exectuded, and 2178 * will make sure that the callback is called to complete the USB 2179 * transfer. This code path is ususally only used when there is an USB 2180 * error like USB_ERR_CANCELLED. 2181 *------------------------------------------------------------------------*/ 2182 static void 2183 usb_dma_delay_done_cb(void *arg) 2184 { 2185 struct usb_xfer *xfer = arg; 2186 2187 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2188 2189 DPRINTFN(3, "Completed %p\n", xfer); 2190 2191 /* queue callback for execution, again */ 2192 usbd_transfer_done(xfer, 0); 2193 } 2194 2195 /*------------------------------------------------------------------------* 2196 * usbd_transfer_dequeue 2197 * 2198 * - This function is used to remove an USB transfer from a USB 2199 * transfer queue. 2200 * 2201 * - This function can be called multiple times in a row. 2202 *------------------------------------------------------------------------*/ 2203 void 2204 usbd_transfer_dequeue(struct usb_xfer *xfer) 2205 { 2206 struct usb_xfer_queue *pq; 2207 2208 pq = xfer->wait_queue; 2209 if (pq) { 2210 TAILQ_REMOVE(&pq->head, xfer, wait_entry); 2211 xfer->wait_queue = NULL; 2212 } 2213 } 2214 2215 /*------------------------------------------------------------------------* 2216 * usbd_transfer_enqueue 2217 * 2218 * - This function is used to insert an USB transfer into a USB * 2219 * transfer queue. 2220 * 2221 * - This function can be called multiple times in a row. 2222 *------------------------------------------------------------------------*/ 2223 void 2224 usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer) 2225 { 2226 /* 2227 * Insert the USB transfer into the queue, if it is not 2228 * already on a USB transfer queue: 2229 */ 2230 if (xfer->wait_queue == NULL) { 2231 xfer->wait_queue = pq; 2232 TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry); 2233 } 2234 } 2235 2236 /*------------------------------------------------------------------------* 2237 * usbd_transfer_done 2238 * 2239 * - This function is used to remove an USB transfer from the busdma, 2240 * pipe or interrupt queue. 2241 * 2242 * - This function is used to queue the USB transfer on the done 2243 * queue. 2244 * 2245 * - This function is used to stop any USB transfer timeouts. 2246 *------------------------------------------------------------------------*/ 2247 void 2248 usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error) 2249 { 2250 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2251 2252 DPRINTF("err=%s\n", usbd_errstr(error)); 2253 2254 /* 2255 * If we are not transferring then just return. 2256 * This can happen during transfer cancel. 2257 */ 2258 if (!xfer->flags_int.transferring) { 2259 DPRINTF("not transferring\n"); 2260 /* end of control transfer, if any */ 2261 xfer->flags_int.control_act = 0; 2262 return; 2263 } 2264 /* only set transfer error if not already set */ 2265 if (!xfer->error) { 2266 xfer->error = error; 2267 } 2268 /* stop any callouts */ 2269 usb_callout_stop(&xfer->timeout_handle); 2270 2271 /* 2272 * If we are waiting on a queue, just remove the USB transfer 2273 * from the queue, if any. We should have the required locks 2274 * locked to do the remove when this function is called. 2275 */ 2276 usbd_transfer_dequeue(xfer); 2277 2278 #if USB_HAVE_BUSDMA 2279 if (mtx_owned(xfer->xroot->xfer_mtx)) { 2280 struct usb_xfer_queue *pq; 2281 2282 /* 2283 * If the private USB lock is not locked, then we assume 2284 * that the BUS-DMA load stage has been passed: 2285 */ 2286 pq = &xfer->xroot->dma_q; 2287 2288 if (pq->curr == xfer) { 2289 /* start the next BUS-DMA load, if any */ 2290 usb_command_wrapper(pq, NULL); 2291 } 2292 } 2293 #endif 2294 /* keep some statistics */ 2295 if (xfer->error) { 2296 xfer->xroot->bus->stats_err.uds_requests 2297 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++; 2298 } else { 2299 xfer->xroot->bus->stats_ok.uds_requests 2300 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++; 2301 } 2302 2303 /* call the USB transfer callback */ 2304 usbd_callback_ss_done_defer(xfer); 2305 } 2306 2307 /*------------------------------------------------------------------------* 2308 * usbd_transfer_start_cb 2309 * 2310 * This function is called to start the USB transfer when 2311 * "xfer->interval" is greater than zero, and and the endpoint type is 2312 * BULK or CONTROL. 2313 *------------------------------------------------------------------------*/ 2314 static void 2315 usbd_transfer_start_cb(void *arg) 2316 { 2317 struct usb_xfer *xfer = arg; 2318 struct usb_endpoint *ep = xfer->endpoint; 2319 2320 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2321 2322 DPRINTF("start\n"); 2323 2324 /* start the transfer */ 2325 (ep->methods->start) (xfer); 2326 2327 xfer->flags_int.can_cancel_immed = 1; 2328 2329 /* check for error */ 2330 if (xfer->error) { 2331 /* some error has happened */ 2332 usbd_transfer_done(xfer, 0); 2333 } 2334 } 2335 2336 /*------------------------------------------------------------------------* 2337 * usbd_xfer_set_stall 2338 * 2339 * This function is used to set the stall flag outside the 2340 * callback. This function is NULL safe. 2341 *------------------------------------------------------------------------*/ 2342 void 2343 usbd_xfer_set_stall(struct usb_xfer *xfer) 2344 { 2345 if (xfer == NULL) { 2346 /* tearing down */ 2347 return; 2348 } 2349 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 2350 2351 /* avoid any races by locking the USB mutex */ 2352 USB_BUS_LOCK(xfer->xroot->bus); 2353 xfer->flags.stall_pipe = 1; 2354 USB_BUS_UNLOCK(xfer->xroot->bus); 2355 } 2356 2357 int 2358 usbd_xfer_is_stalled(struct usb_xfer *xfer) 2359 { 2360 return (xfer->endpoint->is_stalled); 2361 } 2362 2363 /*------------------------------------------------------------------------* 2364 * usbd_transfer_clear_stall 2365 * 2366 * This function is used to clear the stall flag outside the 2367 * callback. This function is NULL safe. 2368 *------------------------------------------------------------------------*/ 2369 void 2370 usbd_transfer_clear_stall(struct usb_xfer *xfer) 2371 { 2372 if (xfer == NULL) { 2373 /* tearing down */ 2374 return; 2375 } 2376 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 2377 2378 /* avoid any races by locking the USB mutex */ 2379 USB_BUS_LOCK(xfer->xroot->bus); 2380 2381 xfer->flags.stall_pipe = 0; 2382 2383 USB_BUS_UNLOCK(xfer->xroot->bus); 2384 } 2385 2386 /*------------------------------------------------------------------------* 2387 * usbd_pipe_start 2388 * 2389 * This function is used to add an USB transfer to the pipe transfer list. 2390 *------------------------------------------------------------------------*/ 2391 void 2392 usbd_pipe_start(struct usb_xfer_queue *pq) 2393 { 2394 struct usb_endpoint *ep; 2395 struct usb_xfer *xfer; 2396 uint8_t type; 2397 2398 xfer = pq->curr; 2399 ep = xfer->endpoint; 2400 2401 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2402 2403 /* 2404 * If the endpoint is already stalled we do nothing ! 2405 */ 2406 if (ep->is_stalled) { 2407 return; 2408 } 2409 /* 2410 * Check if we are supposed to stall the endpoint: 2411 */ 2412 if (xfer->flags.stall_pipe) { 2413 /* clear stall command */ 2414 xfer->flags.stall_pipe = 0; 2415 2416 /* 2417 * Only stall BULK and INTERRUPT endpoints. 2418 */ 2419 type = (ep->edesc->bmAttributes & UE_XFERTYPE); 2420 if ((type == UE_BULK) || 2421 (type == UE_INTERRUPT)) { 2422 struct usb_device *udev; 2423 struct usb_xfer_root *info; 2424 uint8_t did_stall; 2425 2426 info = xfer->xroot; 2427 udev = info->udev; 2428 did_stall = 1; 2429 2430 if (udev->flags.usb_mode == USB_MODE_DEVICE) { 2431 (udev->bus->methods->set_stall) ( 2432 udev, NULL, ep, &did_stall); 2433 } else if (udev->default_xfer[1]) { 2434 info = udev->default_xfer[1]->xroot; 2435 usb_proc_msignal( 2436 &info->bus->non_giant_callback_proc, 2437 &udev->cs_msg[0], &udev->cs_msg[1]); 2438 } else { 2439 /* should not happen */ 2440 DPRINTFN(0, "No stall handler\n"); 2441 } 2442 /* 2443 * Check if we should stall. Some USB hardware 2444 * handles set- and clear-stall in hardware. 2445 */ 2446 if (did_stall) { 2447 /* 2448 * The transfer will be continued when 2449 * the clear-stall control endpoint 2450 * message is received. 2451 */ 2452 ep->is_stalled = 1; 2453 return; 2454 } 2455 } 2456 } 2457 /* Set or clear stall complete - special case */ 2458 if (xfer->nframes == 0) { 2459 /* we are complete */ 2460 xfer->aframes = 0; 2461 usbd_transfer_done(xfer, 0); 2462 return; 2463 } 2464 /* 2465 * Handled cases: 2466 * 2467 * 1) Start the first transfer queued. 2468 * 2469 * 2) Re-start the current USB transfer. 2470 */ 2471 /* 2472 * Check if there should be any 2473 * pre transfer start delay: 2474 */ 2475 if (xfer->interval > 0) { 2476 type = (ep->edesc->bmAttributes & UE_XFERTYPE); 2477 if ((type == UE_BULK) || 2478 (type == UE_CONTROL)) { 2479 usbd_transfer_timeout_ms(xfer, 2480 &usbd_transfer_start_cb, 2481 xfer->interval); 2482 return; 2483 } 2484 } 2485 DPRINTF("start\n"); 2486 2487 /* start USB transfer */ 2488 (ep->methods->start) (xfer); 2489 2490 xfer->flags_int.can_cancel_immed = 1; 2491 2492 /* check for error */ 2493 if (xfer->error) { 2494 /* some error has happened */ 2495 usbd_transfer_done(xfer, 0); 2496 } 2497 } 2498 2499 /*------------------------------------------------------------------------* 2500 * usbd_transfer_timeout_ms 2501 * 2502 * This function is used to setup a timeout on the given USB 2503 * transfer. If the timeout has been deferred the callback given by 2504 * "cb" will get called after "ms" milliseconds. 2505 *------------------------------------------------------------------------*/ 2506 void 2507 usbd_transfer_timeout_ms(struct usb_xfer *xfer, 2508 void (*cb) (void *arg), usb_timeout_t ms) 2509 { 2510 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2511 2512 /* defer delay */ 2513 usb_callout_reset(&xfer->timeout_handle, 2514 USB_MS_TO_TICKS(ms), cb, xfer); 2515 } 2516 2517 /*------------------------------------------------------------------------* 2518 * usbd_callback_wrapper_sub 2519 * 2520 * - This function will update variables in an USB transfer after 2521 * that the USB transfer is complete. 2522 * 2523 * - This function is used to start the next USB transfer on the 2524 * ep transfer queue, if any. 2525 * 2526 * NOTE: In some special cases the USB transfer will not be removed from 2527 * the pipe queue, but remain first. To enforce USB transfer removal call 2528 * this function passing the error code "USB_ERR_CANCELLED". 2529 * 2530 * Return values: 2531 * 0: Success. 2532 * Else: The callback has been deferred. 2533 *------------------------------------------------------------------------*/ 2534 static uint8_t 2535 usbd_callback_wrapper_sub(struct usb_xfer *xfer) 2536 { 2537 struct usb_endpoint *ep; 2538 usb_frcount_t x; 2539 2540 if ((!xfer->flags_int.open) && 2541 (!xfer->flags_int.did_close)) { 2542 DPRINTF("close\n"); 2543 USB_BUS_LOCK(xfer->xroot->bus); 2544 (xfer->endpoint->methods->close) (xfer); 2545 USB_BUS_UNLOCK(xfer->xroot->bus); 2546 /* only close once */ 2547 xfer->flags_int.did_close = 1; 2548 return (1); /* wait for new callback */ 2549 } 2550 /* 2551 * If we have a non-hardware induced error we 2552 * need to do the DMA delay! 2553 */ 2554 if (((xfer->error == USB_ERR_CANCELLED) || 2555 (xfer->error == USB_ERR_TIMEOUT)) && 2556 (!xfer->flags_int.did_dma_delay)) { 2557 2558 usb_timeout_t temp; 2559 2560 /* only delay once */ 2561 xfer->flags_int.did_dma_delay = 1; 2562 2563 /* we can not cancel this delay */ 2564 xfer->flags_int.can_cancel_immed = 0; 2565 2566 temp = usbd_get_dma_delay(xfer->xroot->bus); 2567 2568 DPRINTFN(3, "DMA delay, %u ms, " 2569 "on %p\n", temp, xfer); 2570 2571 if (temp != 0) { 2572 USB_BUS_LOCK(xfer->xroot->bus); 2573 usbd_transfer_timeout_ms(xfer, 2574 &usb_dma_delay_done_cb, temp); 2575 USB_BUS_UNLOCK(xfer->xroot->bus); 2576 return (1); /* wait for new callback */ 2577 } 2578 } 2579 /* check actual number of frames */ 2580 if (xfer->aframes > xfer->nframes) { 2581 if (xfer->error == 0) { 2582 panic("%s: actual number of frames, %d, is " 2583 "greater than initial number of frames, %d\n", 2584 __FUNCTION__, xfer->aframes, xfer->nframes); 2585 } else { 2586 /* just set some valid value */ 2587 xfer->aframes = xfer->nframes; 2588 } 2589 } 2590 /* compute actual length */ 2591 xfer->actlen = 0; 2592 2593 for (x = 0; x != xfer->aframes; x++) { 2594 xfer->actlen += xfer->frlengths[x]; 2595 } 2596 2597 /* 2598 * Frames that were not transferred get zero actual length in 2599 * case the USB device driver does not check the actual number 2600 * of frames transferred, "xfer->aframes": 2601 */ 2602 for (; x < xfer->nframes; x++) { 2603 usbd_xfer_set_frame_len(xfer, x, 0); 2604 } 2605 2606 /* check actual length */ 2607 if (xfer->actlen > xfer->sumlen) { 2608 if (xfer->error == 0) { 2609 panic("%s: actual length, %d, is greater than " 2610 "initial length, %d\n", 2611 __FUNCTION__, xfer->actlen, xfer->sumlen); 2612 } else { 2613 /* just set some valid value */ 2614 xfer->actlen = xfer->sumlen; 2615 } 2616 } 2617 DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n", 2618 xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen, 2619 xfer->aframes, xfer->nframes); 2620 2621 if (xfer->error) { 2622 /* end of control transfer, if any */ 2623 xfer->flags_int.control_act = 0; 2624 2625 /* check if we should block the execution queue */ 2626 if ((xfer->error != USB_ERR_CANCELLED) && 2627 (xfer->flags.pipe_bof)) { 2628 DPRINTFN(2, "xfer=%p: Block On Failure " 2629 "on endpoint=%p\n", xfer, xfer->endpoint); 2630 goto done; 2631 } 2632 } else { 2633 /* check for short transfers */ 2634 if (xfer->actlen < xfer->sumlen) { 2635 2636 /* end of control transfer, if any */ 2637 xfer->flags_int.control_act = 0; 2638 2639 if (!xfer->flags_int.short_xfer_ok) { 2640 xfer->error = USB_ERR_SHORT_XFER; 2641 if (xfer->flags.pipe_bof) { 2642 DPRINTFN(2, "xfer=%p: Block On Failure on " 2643 "Short Transfer on endpoint %p.\n", 2644 xfer, xfer->endpoint); 2645 goto done; 2646 } 2647 } 2648 } else { 2649 /* 2650 * Check if we are in the middle of a 2651 * control transfer: 2652 */ 2653 if (xfer->flags_int.control_act) { 2654 DPRINTFN(5, "xfer=%p: Control transfer " 2655 "active on endpoint=%p\n", xfer, xfer->endpoint); 2656 goto done; 2657 } 2658 } 2659 } 2660 2661 ep = xfer->endpoint; 2662 2663 /* 2664 * If the current USB transfer is completing we need to start the 2665 * next one: 2666 */ 2667 USB_BUS_LOCK(xfer->xroot->bus); 2668 if (ep->endpoint_q.curr == xfer) { 2669 usb_command_wrapper(&ep->endpoint_q, NULL); 2670 2671 if (ep->endpoint_q.curr || TAILQ_FIRST(&ep->endpoint_q.head)) { 2672 /* there is another USB transfer waiting */ 2673 } else { 2674 /* this is the last USB transfer */ 2675 /* clear isochronous sync flag */ 2676 xfer->endpoint->is_synced = 0; 2677 } 2678 } 2679 USB_BUS_UNLOCK(xfer->xroot->bus); 2680 done: 2681 return (0); 2682 } 2683 2684 /*------------------------------------------------------------------------* 2685 * usb_command_wrapper 2686 * 2687 * This function is used to execute commands non-recursivly on an USB 2688 * transfer. 2689 *------------------------------------------------------------------------*/ 2690 void 2691 usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer) 2692 { 2693 if (xfer) { 2694 /* 2695 * If the transfer is not already processing, 2696 * queue it! 2697 */ 2698 if (pq->curr != xfer) { 2699 usbd_transfer_enqueue(pq, xfer); 2700 if (pq->curr != NULL) { 2701 /* something is already processing */ 2702 DPRINTFN(6, "busy %p\n", pq->curr); 2703 return; 2704 } 2705 } 2706 } else { 2707 /* Get next element in queue */ 2708 pq->curr = NULL; 2709 } 2710 2711 if (!pq->recurse_1) { 2712 2713 do { 2714 2715 /* set both recurse flags */ 2716 pq->recurse_1 = 1; 2717 pq->recurse_2 = 1; 2718 2719 if (pq->curr == NULL) { 2720 xfer = TAILQ_FIRST(&pq->head); 2721 if (xfer) { 2722 TAILQ_REMOVE(&pq->head, xfer, 2723 wait_entry); 2724 xfer->wait_queue = NULL; 2725 pq->curr = xfer; 2726 } else { 2727 break; 2728 } 2729 } 2730 DPRINTFN(6, "cb %p (enter)\n", pq->curr); 2731 (pq->command) (pq); 2732 DPRINTFN(6, "cb %p (leave)\n", pq->curr); 2733 2734 } while (!pq->recurse_2); 2735 2736 /* clear first recurse flag */ 2737 pq->recurse_1 = 0; 2738 2739 } else { 2740 /* clear second recurse flag */ 2741 pq->recurse_2 = 0; 2742 } 2743 } 2744 2745 /*------------------------------------------------------------------------* 2746 * usbd_default_transfer_setup 2747 * 2748 * This function is used to setup the default USB control endpoint 2749 * transfer. 2750 *------------------------------------------------------------------------*/ 2751 void 2752 usbd_default_transfer_setup(struct usb_device *udev) 2753 { 2754 struct usb_xfer *xfer; 2755 uint8_t no_resetup; 2756 uint8_t iface_index; 2757 2758 /* check for root HUB */ 2759 if (udev->parent_hub == NULL) 2760 return; 2761 repeat: 2762 2763 xfer = udev->default_xfer[0]; 2764 if (xfer) { 2765 USB_XFER_LOCK(xfer); 2766 no_resetup = 2767 ((xfer->address == udev->address) && 2768 (udev->default_ep_desc.wMaxPacketSize[0] == 2769 udev->ddesc.bMaxPacketSize)); 2770 if (udev->flags.usb_mode == USB_MODE_DEVICE) { 2771 if (no_resetup) { 2772 /* 2773 * NOTE: checking "xfer->address" and 2774 * starting the USB transfer must be 2775 * atomic! 2776 */ 2777 usbd_transfer_start(xfer); 2778 } 2779 } 2780 USB_XFER_UNLOCK(xfer); 2781 } else { 2782 no_resetup = 0; 2783 } 2784 2785 if (no_resetup) { 2786 /* 2787 * All parameters are exactly the same like before. 2788 * Just return. 2789 */ 2790 return; 2791 } 2792 /* 2793 * Update wMaxPacketSize for the default control endpoint: 2794 */ 2795 udev->default_ep_desc.wMaxPacketSize[0] = 2796 udev->ddesc.bMaxPacketSize; 2797 2798 /* 2799 * Unsetup any existing USB transfer: 2800 */ 2801 usbd_transfer_unsetup(udev->default_xfer, USB_DEFAULT_XFER_MAX); 2802 2803 /* 2804 * Try to setup a new USB transfer for the 2805 * default control endpoint: 2806 */ 2807 iface_index = 0; 2808 if (usbd_transfer_setup(udev, &iface_index, 2809 udev->default_xfer, usb_control_ep_cfg, USB_DEFAULT_XFER_MAX, NULL, 2810 udev->default_mtx)) { 2811 DPRINTFN(0, "could not setup default " 2812 "USB transfer\n"); 2813 } else { 2814 goto repeat; 2815 } 2816 } 2817 2818 /*------------------------------------------------------------------------* 2819 * usbd_clear_data_toggle - factored out code 2820 * 2821 * NOTE: the intention of this function is not to reset the hardware 2822 * data toggle. 2823 *------------------------------------------------------------------------*/ 2824 void 2825 usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep) 2826 { 2827 DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep); 2828 2829 USB_BUS_LOCK(udev->bus); 2830 ep->toggle_next = 0; 2831 USB_BUS_UNLOCK(udev->bus); 2832 } 2833 2834 /*------------------------------------------------------------------------* 2835 * usbd_clear_stall_callback - factored out clear stall callback 2836 * 2837 * Input parameters: 2838 * xfer1: Clear Stall Control Transfer 2839 * xfer2: Stalled USB Transfer 2840 * 2841 * This function is NULL safe. 2842 * 2843 * Return values: 2844 * 0: In progress 2845 * Else: Finished 2846 * 2847 * Clear stall config example: 2848 * 2849 * static const struct usb_config my_clearstall = { 2850 * .type = UE_CONTROL, 2851 * .endpoint = 0, 2852 * .direction = UE_DIR_ANY, 2853 * .interval = 50, //50 milliseconds 2854 * .bufsize = sizeof(struct usb_device_request), 2855 * .timeout = 1000, //1.000 seconds 2856 * .callback = &my_clear_stall_callback, // ** 2857 * .usb_mode = USB_MODE_HOST, 2858 * }; 2859 * 2860 * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback" 2861 * passing the correct parameters. 2862 *------------------------------------------------------------------------*/ 2863 uint8_t 2864 usbd_clear_stall_callback(struct usb_xfer *xfer1, 2865 struct usb_xfer *xfer2) 2866 { 2867 struct usb_device_request req; 2868 2869 if (xfer2 == NULL) { 2870 /* looks like we are tearing down */ 2871 DPRINTF("NULL input parameter\n"); 2872 return (0); 2873 } 2874 USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED); 2875 USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED); 2876 2877 switch (USB_GET_STATE(xfer1)) { 2878 case USB_ST_SETUP: 2879 2880 /* 2881 * pre-clear the data toggle to DATA0 ("umass.c" and 2882 * "ata-usb.c" depends on this) 2883 */ 2884 2885 usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint); 2886 2887 /* setup a clear-stall packet */ 2888 2889 req.bmRequestType = UT_WRITE_ENDPOINT; 2890 req.bRequest = UR_CLEAR_FEATURE; 2891 USETW(req.wValue, UF_ENDPOINT_HALT); 2892 req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress; 2893 req.wIndex[1] = 0; 2894 USETW(req.wLength, 0); 2895 2896 /* 2897 * "usbd_transfer_setup_sub()" will ensure that 2898 * we have sufficient room in the buffer for 2899 * the request structure! 2900 */ 2901 2902 /* copy in the transfer */ 2903 2904 usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req)); 2905 2906 /* set length */ 2907 xfer1->frlengths[0] = sizeof(req); 2908 xfer1->nframes = 1; 2909 2910 usbd_transfer_submit(xfer1); 2911 return (0); 2912 2913 case USB_ST_TRANSFERRED: 2914 break; 2915 2916 default: /* Error */ 2917 if (xfer1->error == USB_ERR_CANCELLED) { 2918 return (0); 2919 } 2920 break; 2921 } 2922 return (1); /* Clear Stall Finished */ 2923 } 2924 2925 /*------------------------------------------------------------------------* 2926 * usbd_transfer_poll 2927 * 2928 * The following function gets called from the USB keyboard driver and 2929 * UMASS when the system has paniced. 2930 * 2931 * NOTE: It is currently not possible to resume normal operation on 2932 * the USB controller which has been polled, due to clearing of the 2933 * "up_dsleep" and "up_msleep" flags. 2934 *------------------------------------------------------------------------*/ 2935 void 2936 usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max) 2937 { 2938 struct usb_xfer *xfer; 2939 struct usb_xfer_root *xroot; 2940 struct usb_device *udev; 2941 struct usb_proc_msg *pm; 2942 uint16_t n; 2943 uint16_t drop_bus; 2944 uint16_t drop_xfer; 2945 2946 for (n = 0; n != max; n++) { 2947 /* Extra checks to avoid panic */ 2948 xfer = ppxfer[n]; 2949 if (xfer == NULL) 2950 continue; /* no USB transfer */ 2951 xroot = xfer->xroot; 2952 if (xroot == NULL) 2953 continue; /* no USB root */ 2954 udev = xroot->udev; 2955 if (udev == NULL) 2956 continue; /* no USB device */ 2957 if (udev->bus == NULL) 2958 continue; /* no BUS structure */ 2959 if (udev->bus->methods == NULL) 2960 continue; /* no BUS methods */ 2961 if (udev->bus->methods->xfer_poll == NULL) 2962 continue; /* no poll method */ 2963 2964 /* make sure that the BUS mutex is not locked */ 2965 drop_bus = 0; 2966 while (mtx_owned(&xroot->udev->bus->bus_mtx)) { 2967 mtx_unlock(&xroot->udev->bus->bus_mtx); 2968 drop_bus++; 2969 } 2970 2971 /* make sure that the transfer mutex is not locked */ 2972 drop_xfer = 0; 2973 while (mtx_owned(xroot->xfer_mtx)) { 2974 mtx_unlock(xroot->xfer_mtx); 2975 drop_xfer++; 2976 } 2977 2978 /* Make sure cv_signal() and cv_broadcast() is not called */ 2979 udev->bus->control_xfer_proc.up_msleep = 0; 2980 udev->bus->explore_proc.up_msleep = 0; 2981 udev->bus->giant_callback_proc.up_msleep = 0; 2982 udev->bus->non_giant_callback_proc.up_msleep = 0; 2983 2984 /* poll USB hardware */ 2985 (udev->bus->methods->xfer_poll) (udev->bus); 2986 2987 USB_BUS_LOCK(xroot->bus); 2988 2989 /* check for clear stall */ 2990 if (udev->default_xfer[1] != NULL) { 2991 2992 /* poll clear stall start */ 2993 pm = &udev->cs_msg[0].hdr; 2994 (pm->pm_callback) (pm); 2995 /* poll clear stall done thread */ 2996 pm = &udev->default_xfer[1]-> 2997 xroot->done_m[0].hdr; 2998 (pm->pm_callback) (pm); 2999 } 3000 3001 /* poll done thread */ 3002 pm = &xroot->done_m[0].hdr; 3003 (pm->pm_callback) (pm); 3004 3005 USB_BUS_UNLOCK(xroot->bus); 3006 3007 /* restore transfer mutex */ 3008 while (drop_xfer--) 3009 mtx_lock(xroot->xfer_mtx); 3010 3011 /* restore BUS mutex */ 3012 while (drop_bus--) 3013 mtx_lock(&xroot->udev->bus->bus_mtx); 3014 } 3015 } 3016 3017 static void 3018 usbd_get_std_packet_size(struct usb_std_packet_size *ptr, 3019 uint8_t type, enum usb_dev_speed speed) 3020 { 3021 static const uint16_t intr_range_max[USB_SPEED_MAX] = { 3022 [USB_SPEED_LOW] = 8, 3023 [USB_SPEED_FULL] = 64, 3024 [USB_SPEED_HIGH] = 1024, 3025 [USB_SPEED_VARIABLE] = 1024, 3026 [USB_SPEED_SUPER] = 1024, 3027 }; 3028 3029 static const uint16_t isoc_range_max[USB_SPEED_MAX] = { 3030 [USB_SPEED_LOW] = 0, /* invalid */ 3031 [USB_SPEED_FULL] = 1023, 3032 [USB_SPEED_HIGH] = 1024, 3033 [USB_SPEED_VARIABLE] = 3584, 3034 [USB_SPEED_SUPER] = 1024, 3035 }; 3036 3037 static const uint16_t control_min[USB_SPEED_MAX] = { 3038 [USB_SPEED_LOW] = 8, 3039 [USB_SPEED_FULL] = 8, 3040 [USB_SPEED_HIGH] = 64, 3041 [USB_SPEED_VARIABLE] = 512, 3042 [USB_SPEED_SUPER] = 512, 3043 }; 3044 3045 static const uint16_t bulk_min[USB_SPEED_MAX] = { 3046 [USB_SPEED_LOW] = 0, /* not supported */ 3047 [USB_SPEED_FULL] = 8, 3048 [USB_SPEED_HIGH] = 512, 3049 [USB_SPEED_VARIABLE] = 512, 3050 [USB_SPEED_SUPER] = 1024, 3051 }; 3052 3053 uint16_t temp; 3054 3055 memset(ptr, 0, sizeof(*ptr)); 3056 3057 switch (type) { 3058 case UE_INTERRUPT: 3059 ptr->range.max = intr_range_max[speed]; 3060 break; 3061 case UE_ISOCHRONOUS: 3062 ptr->range.max = isoc_range_max[speed]; 3063 break; 3064 default: 3065 if (type == UE_BULK) 3066 temp = bulk_min[speed]; 3067 else /* UE_CONTROL */ 3068 temp = control_min[speed]; 3069 3070 /* default is fixed */ 3071 ptr->fixed[0] = temp; 3072 ptr->fixed[1] = temp; 3073 ptr->fixed[2] = temp; 3074 ptr->fixed[3] = temp; 3075 3076 if (speed == USB_SPEED_FULL) { 3077 /* multiple sizes */ 3078 ptr->fixed[1] = 16; 3079 ptr->fixed[2] = 32; 3080 ptr->fixed[3] = 64; 3081 } 3082 if ((speed == USB_SPEED_VARIABLE) && 3083 (type == UE_BULK)) { 3084 /* multiple sizes */ 3085 ptr->fixed[2] = 1024; 3086 ptr->fixed[3] = 1536; 3087 } 3088 break; 3089 } 3090 } 3091 3092 void * 3093 usbd_xfer_softc(struct usb_xfer *xfer) 3094 { 3095 return (xfer->priv_sc); 3096 } 3097 3098 void * 3099 usbd_xfer_get_priv(struct usb_xfer *xfer) 3100 { 3101 return (xfer->priv_fifo); 3102 } 3103 3104 void 3105 usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr) 3106 { 3107 xfer->priv_fifo = ptr; 3108 } 3109 3110 uint8_t 3111 usbd_xfer_state(struct usb_xfer *xfer) 3112 { 3113 return (xfer->usb_state); 3114 } 3115 3116 void 3117 usbd_xfer_set_flag(struct usb_xfer *xfer, int flag) 3118 { 3119 switch (flag) { 3120 case USB_FORCE_SHORT_XFER: 3121 xfer->flags.force_short_xfer = 1; 3122 break; 3123 case USB_SHORT_XFER_OK: 3124 xfer->flags.short_xfer_ok = 1; 3125 break; 3126 case USB_MULTI_SHORT_OK: 3127 xfer->flags.short_frames_ok = 1; 3128 break; 3129 case USB_MANUAL_STATUS: 3130 xfer->flags.manual_status = 1; 3131 break; 3132 } 3133 } 3134 3135 void 3136 usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag) 3137 { 3138 switch (flag) { 3139 case USB_FORCE_SHORT_XFER: 3140 xfer->flags.force_short_xfer = 0; 3141 break; 3142 case USB_SHORT_XFER_OK: 3143 xfer->flags.short_xfer_ok = 0; 3144 break; 3145 case USB_MULTI_SHORT_OK: 3146 xfer->flags.short_frames_ok = 0; 3147 break; 3148 case USB_MANUAL_STATUS: 3149 xfer->flags.manual_status = 0; 3150 break; 3151 } 3152 } 3153 3154 /* 3155 * The following function returns in milliseconds when the isochronous 3156 * transfer was completed by the hardware. The returned value wraps 3157 * around 65536 milliseconds. 3158 */ 3159 uint16_t 3160 usbd_xfer_get_timestamp(struct usb_xfer *xfer) 3161 { 3162 return (xfer->isoc_time_complete); 3163 } 3164