1 /* $FreeBSD$ */ 2 /*- 3 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/stdint.h> 28 #include <sys/stddef.h> 29 #include <sys/param.h> 30 #include <sys/queue.h> 31 #include <sys/types.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/bus.h> 35 #include <sys/linker_set.h> 36 #include <sys/module.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/condvar.h> 40 #include <sys/sysctl.h> 41 #include <sys/sx.h> 42 #include <sys/unistd.h> 43 #include <sys/callout.h> 44 #include <sys/malloc.h> 45 #include <sys/priv.h> 46 47 #include <dev/usb/usb.h> 48 #include <dev/usb/usbdi.h> 49 #include <dev/usb/usbdi_util.h> 50 51 #define USB_DEBUG_VAR usb_debug 52 53 #include <dev/usb/usb_core.h> 54 #include <dev/usb/usb_busdma.h> 55 #include <dev/usb/usb_process.h> 56 #include <dev/usb/usb_transfer.h> 57 #include <dev/usb/usb_device.h> 58 #include <dev/usb/usb_debug.h> 59 #include <dev/usb/usb_util.h> 60 61 #include <dev/usb/usb_controller.h> 62 #include <dev/usb/usb_bus.h> 63 64 struct usb_std_packet_size { 65 struct { 66 uint16_t min; /* inclusive */ 67 uint16_t max; /* inclusive */ 68 } range; 69 70 uint16_t fixed[4]; 71 }; 72 73 static usb_callback_t usb_request_callback; 74 75 static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = { 76 77 /* This transfer is used for generic control endpoint transfers */ 78 79 [0] = { 80 .type = UE_CONTROL, 81 .endpoint = 0x00, /* Control endpoint */ 82 .direction = UE_DIR_ANY, 83 .bufsize = USB_EP0_BUFSIZE, /* bytes */ 84 .flags = {.proxy_buffer = 1,}, 85 .callback = &usb_request_callback, 86 .usb_mode = USB_MODE_DUAL, /* both modes */ 87 }, 88 89 /* This transfer is used for generic clear stall only */ 90 91 [1] = { 92 .type = UE_CONTROL, 93 .endpoint = 0x00, /* Control pipe */ 94 .direction = UE_DIR_ANY, 95 .bufsize = sizeof(struct usb_device_request), 96 .callback = &usb_do_clear_stall_callback, 97 .timeout = 1000, /* 1 second */ 98 .interval = 50, /* 50ms */ 99 .usb_mode = USB_MODE_HOST, 100 }, 101 }; 102 103 /* function prototypes */ 104 105 static void usbd_update_max_frame_size(struct usb_xfer *); 106 static void usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t); 107 static void usbd_control_transfer_init(struct usb_xfer *); 108 static int usbd_setup_ctrl_transfer(struct usb_xfer *); 109 static void usb_callback_proc(struct usb_proc_msg *); 110 static void usbd_callback_ss_done_defer(struct usb_xfer *); 111 static void usbd_callback_wrapper(struct usb_xfer_queue *); 112 static void usb_dma_delay_done_cb(void *); 113 static void usbd_transfer_start_cb(void *); 114 static uint8_t usbd_callback_wrapper_sub(struct usb_xfer *); 115 static void usbd_get_std_packet_size(struct usb_std_packet_size *ptr, 116 uint8_t type, enum usb_dev_speed speed); 117 118 /*------------------------------------------------------------------------* 119 * usb_request_callback 120 *------------------------------------------------------------------------*/ 121 static void 122 usb_request_callback(struct usb_xfer *xfer, usb_error_t error) 123 { 124 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) 125 usb_handle_request_callback(xfer, error); 126 else 127 usbd_do_request_callback(xfer, error); 128 } 129 130 /*------------------------------------------------------------------------* 131 * usbd_update_max_frame_size 132 * 133 * This function updates the maximum frame size, hence high speed USB 134 * can transfer multiple consecutive packets. 135 *------------------------------------------------------------------------*/ 136 static void 137 usbd_update_max_frame_size(struct usb_xfer *xfer) 138 { 139 /* compute maximum frame size */ 140 141 if (xfer->max_packet_count == 2) { 142 xfer->max_frame_size = 2 * xfer->max_packet_size; 143 } else if (xfer->max_packet_count == 3) { 144 xfer->max_frame_size = 3 * xfer->max_packet_size; 145 } else { 146 xfer->max_frame_size = xfer->max_packet_size; 147 } 148 } 149 150 /*------------------------------------------------------------------------* 151 * usbd_get_dma_delay 152 * 153 * The following function is called when we need to 154 * synchronize with DMA hardware. 155 * 156 * Returns: 157 * 0: no DMA delay required 158 * Else: milliseconds of DMA delay 159 *------------------------------------------------------------------------*/ 160 usb_timeout_t 161 usbd_get_dma_delay(struct usb_device *udev) 162 { 163 struct usb_bus_methods *mtod; 164 uint32_t temp; 165 166 mtod = udev->bus->methods; 167 temp = 0; 168 169 if (mtod->get_dma_delay) { 170 (mtod->get_dma_delay) (udev, &temp); 171 /* 172 * Round up and convert to milliseconds. Note that we use 173 * 1024 milliseconds per second. to save a division. 174 */ 175 temp += 0x3FF; 176 temp /= 0x400; 177 } 178 return (temp); 179 } 180 181 /*------------------------------------------------------------------------* 182 * usbd_transfer_setup_sub_malloc 183 * 184 * This function will allocate one or more DMA'able memory chunks 185 * according to "size", "align" and "count" arguments. "ppc" is 186 * pointed to a linear array of USB page caches afterwards. 187 * 188 * Returns: 189 * 0: Success 190 * Else: Failure 191 *------------------------------------------------------------------------*/ 192 #if USB_HAVE_BUSDMA 193 uint8_t 194 usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm, 195 struct usb_page_cache **ppc, usb_size_t size, usb_size_t align, 196 usb_size_t count) 197 { 198 struct usb_page_cache *pc; 199 struct usb_page *pg; 200 void *buf; 201 usb_size_t n_dma_pc; 202 usb_size_t n_obj; 203 usb_size_t x; 204 usb_size_t y; 205 usb_size_t r; 206 usb_size_t z; 207 208 USB_ASSERT(align > 1, ("Invalid alignment, 0x%08x\n", 209 align)); 210 USB_ASSERT(size > 0, ("Invalid size = 0\n")); 211 212 if (count == 0) { 213 return (0); /* nothing to allocate */ 214 } 215 /* 216 * Make sure that the size is aligned properly. 217 */ 218 size = -((-size) & (-align)); 219 220 /* 221 * Try multi-allocation chunks to reduce the number of DMA 222 * allocations, hence DMA allocations are slow. 223 */ 224 if (size >= PAGE_SIZE) { 225 n_dma_pc = count; 226 n_obj = 1; 227 } else { 228 /* compute number of objects per page */ 229 n_obj = (PAGE_SIZE / size); 230 /* 231 * Compute number of DMA chunks, rounded up 232 * to nearest one: 233 */ 234 n_dma_pc = ((count + n_obj - 1) / n_obj); 235 } 236 237 if (parm->buf == NULL) { 238 /* for the future */ 239 parm->dma_page_ptr += n_dma_pc; 240 parm->dma_page_cache_ptr += n_dma_pc; 241 parm->dma_page_ptr += count; 242 parm->xfer_page_cache_ptr += count; 243 return (0); 244 } 245 for (x = 0; x != n_dma_pc; x++) { 246 /* need to initialize the page cache */ 247 parm->dma_page_cache_ptr[x].tag_parent = 248 &parm->curr_xfer->xroot->dma_parent_tag; 249 } 250 for (x = 0; x != count; x++) { 251 /* need to initialize the page cache */ 252 parm->xfer_page_cache_ptr[x].tag_parent = 253 &parm->curr_xfer->xroot->dma_parent_tag; 254 } 255 256 if (ppc) { 257 *ppc = parm->xfer_page_cache_ptr; 258 } 259 r = count; /* set remainder count */ 260 z = n_obj * size; /* set allocation size */ 261 pc = parm->xfer_page_cache_ptr; 262 pg = parm->dma_page_ptr; 263 264 for (x = 0; x != n_dma_pc; x++) { 265 266 if (r < n_obj) { 267 /* compute last remainder */ 268 z = r * size; 269 n_obj = r; 270 } 271 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr, 272 pg, z, align)) { 273 return (1); /* failure */ 274 } 275 /* Set beginning of current buffer */ 276 buf = parm->dma_page_cache_ptr->buffer; 277 /* Make room for one DMA page cache and one page */ 278 parm->dma_page_cache_ptr++; 279 pg++; 280 281 for (y = 0; (y != n_obj); y++, r--, pc++, pg++) { 282 283 /* Load sub-chunk into DMA */ 284 if (usb_pc_dmamap_create(pc, size)) { 285 return (1); /* failure */ 286 } 287 pc->buffer = USB_ADD_BYTES(buf, y * size); 288 pc->page_start = pg; 289 290 mtx_lock(pc->tag_parent->mtx); 291 if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) { 292 mtx_unlock(pc->tag_parent->mtx); 293 return (1); /* failure */ 294 } 295 mtx_unlock(pc->tag_parent->mtx); 296 } 297 } 298 299 parm->xfer_page_cache_ptr = pc; 300 parm->dma_page_ptr = pg; 301 return (0); 302 } 303 #endif 304 305 /*------------------------------------------------------------------------* 306 * usbd_transfer_setup_sub - transfer setup subroutine 307 * 308 * This function must be called from the "xfer_setup" callback of the 309 * USB Host or Device controller driver when setting up an USB 310 * transfer. This function will setup correct packet sizes, buffer 311 * sizes, flags and more, that are stored in the "usb_xfer" 312 * structure. 313 *------------------------------------------------------------------------*/ 314 void 315 usbd_transfer_setup_sub(struct usb_setup_params *parm) 316 { 317 enum { 318 REQ_SIZE = 8, 319 MIN_PKT = 8, 320 }; 321 struct usb_xfer *xfer = parm->curr_xfer; 322 const struct usb_config *setup = parm->curr_setup; 323 struct usb_endpoint_descriptor *edesc; 324 struct usb_std_packet_size std_size; 325 usb_frcount_t n_frlengths; 326 usb_frcount_t n_frbuffers; 327 usb_frcount_t x; 328 uint8_t type; 329 uint8_t zmps; 330 331 /* 332 * Sanity check. The following parameters must be initialized before 333 * calling this function. 334 */ 335 if ((parm->hc_max_packet_size == 0) || 336 (parm->hc_max_packet_count == 0) || 337 (parm->hc_max_frame_size == 0)) { 338 parm->err = USB_ERR_INVAL; 339 goto done; 340 } 341 edesc = xfer->endpoint->edesc; 342 343 type = (edesc->bmAttributes & UE_XFERTYPE); 344 345 xfer->flags = setup->flags; 346 xfer->nframes = setup->frames; 347 xfer->timeout = setup->timeout; 348 xfer->callback = setup->callback; 349 xfer->interval = setup->interval; 350 xfer->endpointno = edesc->bEndpointAddress; 351 xfer->max_packet_size = UGETW(edesc->wMaxPacketSize); 352 xfer->max_packet_count = 1; 353 /* make a shadow copy: */ 354 xfer->flags_int.usb_mode = parm->udev->flags.usb_mode; 355 356 parm->bufsize = setup->bufsize; 357 358 if (parm->speed == USB_SPEED_HIGH) { 359 xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3; 360 xfer->max_packet_size &= 0x7FF; 361 } 362 /* range check "max_packet_count" */ 363 364 if (xfer->max_packet_count > parm->hc_max_packet_count) { 365 xfer->max_packet_count = parm->hc_max_packet_count; 366 } 367 /* filter "wMaxPacketSize" according to HC capabilities */ 368 369 if ((xfer->max_packet_size > parm->hc_max_packet_size) || 370 (xfer->max_packet_size == 0)) { 371 xfer->max_packet_size = parm->hc_max_packet_size; 372 } 373 /* filter "wMaxPacketSize" according to standard sizes */ 374 375 usbd_get_std_packet_size(&std_size, type, parm->speed); 376 377 if (std_size.range.min || std_size.range.max) { 378 379 if (xfer->max_packet_size < std_size.range.min) { 380 xfer->max_packet_size = std_size.range.min; 381 } 382 if (xfer->max_packet_size > std_size.range.max) { 383 xfer->max_packet_size = std_size.range.max; 384 } 385 } else { 386 387 if (xfer->max_packet_size >= std_size.fixed[3]) { 388 xfer->max_packet_size = std_size.fixed[3]; 389 } else if (xfer->max_packet_size >= std_size.fixed[2]) { 390 xfer->max_packet_size = std_size.fixed[2]; 391 } else if (xfer->max_packet_size >= std_size.fixed[1]) { 392 xfer->max_packet_size = std_size.fixed[1]; 393 } else { 394 /* only one possibility left */ 395 xfer->max_packet_size = std_size.fixed[0]; 396 } 397 } 398 399 /* compute "max_frame_size" */ 400 401 usbd_update_max_frame_size(xfer); 402 403 /* check interrupt interval and transfer pre-delay */ 404 405 if (type == UE_ISOCHRONOUS) { 406 407 uint16_t frame_limit; 408 409 xfer->interval = 0; /* not used, must be zero */ 410 xfer->flags_int.isochronous_xfr = 1; /* set flag */ 411 412 if (xfer->timeout == 0) { 413 /* 414 * set a default timeout in 415 * case something goes wrong! 416 */ 417 xfer->timeout = 1000 / 4; 418 } 419 switch (parm->speed) { 420 case USB_SPEED_LOW: 421 case USB_SPEED_FULL: 422 frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER; 423 xfer->fps_shift = 0; 424 break; 425 default: 426 frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER; 427 xfer->fps_shift = edesc->bInterval; 428 if (xfer->fps_shift > 0) 429 xfer->fps_shift--; 430 if (xfer->fps_shift > 3) 431 xfer->fps_shift = 3; 432 break; 433 } 434 435 if (xfer->nframes > frame_limit) { 436 /* 437 * this is not going to work 438 * cross hardware 439 */ 440 parm->err = USB_ERR_INVAL; 441 goto done; 442 } 443 if (xfer->nframes == 0) { 444 /* 445 * this is not a valid value 446 */ 447 parm->err = USB_ERR_ZERO_NFRAMES; 448 goto done; 449 } 450 } else { 451 452 /* 453 * if a value is specified use that else check the endpoint 454 * descriptor 455 */ 456 if (xfer->interval == 0) { 457 458 if (type == UE_INTERRUPT) { 459 460 xfer->interval = edesc->bInterval; 461 462 switch (parm->speed) { 463 case USB_SPEED_SUPER: 464 case USB_SPEED_VARIABLE: 465 /* 125us -> 1ms */ 466 if (xfer->interval < 4) 467 xfer->interval = 1; 468 else if (xfer->interval > 16) 469 xfer->interval = (1<<(16-4)); 470 else 471 xfer->interval = 472 (1 << (xfer->interval-4)); 473 break; 474 case USB_SPEED_HIGH: 475 /* 125us -> 1ms */ 476 xfer->interval /= 8; 477 break; 478 default: 479 break; 480 } 481 if (xfer->interval == 0) { 482 /* 483 * One millisecond is the smallest 484 * interval we support: 485 */ 486 xfer->interval = 1; 487 } 488 } 489 } 490 } 491 492 /* 493 * NOTE: we do not allow "max_packet_size" or "max_frame_size" 494 * to be equal to zero when setting up USB transfers, hence 495 * this leads to alot of extra code in the USB kernel. 496 */ 497 498 if ((xfer->max_frame_size == 0) || 499 (xfer->max_packet_size == 0)) { 500 501 zmps = 1; 502 503 if ((parm->bufsize <= MIN_PKT) && 504 (type != UE_CONTROL) && 505 (type != UE_BULK)) { 506 507 /* workaround */ 508 xfer->max_packet_size = MIN_PKT; 509 xfer->max_packet_count = 1; 510 parm->bufsize = 0; /* automatic setup length */ 511 usbd_update_max_frame_size(xfer); 512 513 } else { 514 parm->err = USB_ERR_ZERO_MAXP; 515 goto done; 516 } 517 518 } else { 519 zmps = 0; 520 } 521 522 /* 523 * check if we should setup a default 524 * length: 525 */ 526 527 if (parm->bufsize == 0) { 528 529 parm->bufsize = xfer->max_frame_size; 530 531 if (type == UE_ISOCHRONOUS) { 532 parm->bufsize *= xfer->nframes; 533 } 534 } 535 /* 536 * check if we are about to setup a proxy 537 * type of buffer: 538 */ 539 540 if (xfer->flags.proxy_buffer) { 541 542 /* round bufsize up */ 543 544 parm->bufsize += (xfer->max_frame_size - 1); 545 546 if (parm->bufsize < xfer->max_frame_size) { 547 /* length wrapped around */ 548 parm->err = USB_ERR_INVAL; 549 goto done; 550 } 551 /* subtract remainder */ 552 553 parm->bufsize -= (parm->bufsize % xfer->max_frame_size); 554 555 /* add length of USB device request structure, if any */ 556 557 if (type == UE_CONTROL) { 558 parm->bufsize += REQ_SIZE; /* SETUP message */ 559 } 560 } 561 xfer->max_data_length = parm->bufsize; 562 563 /* Setup "n_frlengths" and "n_frbuffers" */ 564 565 if (type == UE_ISOCHRONOUS) { 566 n_frlengths = xfer->nframes; 567 n_frbuffers = 1; 568 } else { 569 570 if (type == UE_CONTROL) { 571 xfer->flags_int.control_xfr = 1; 572 if (xfer->nframes == 0) { 573 if (parm->bufsize <= REQ_SIZE) { 574 /* 575 * there will never be any data 576 * stage 577 */ 578 xfer->nframes = 1; 579 } else { 580 xfer->nframes = 2; 581 } 582 } 583 } else { 584 if (xfer->nframes == 0) { 585 xfer->nframes = 1; 586 } 587 } 588 589 n_frlengths = xfer->nframes; 590 n_frbuffers = xfer->nframes; 591 } 592 593 /* 594 * check if we have room for the 595 * USB device request structure: 596 */ 597 598 if (type == UE_CONTROL) { 599 600 if (xfer->max_data_length < REQ_SIZE) { 601 /* length wrapped around or too small bufsize */ 602 parm->err = USB_ERR_INVAL; 603 goto done; 604 } 605 xfer->max_data_length -= REQ_SIZE; 606 } 607 /* setup "frlengths" */ 608 xfer->frlengths = parm->xfer_length_ptr; 609 parm->xfer_length_ptr += n_frlengths; 610 611 /* setup "frbuffers" */ 612 xfer->frbuffers = parm->xfer_page_cache_ptr; 613 parm->xfer_page_cache_ptr += n_frbuffers; 614 615 /* initialize max frame count */ 616 xfer->max_frame_count = xfer->nframes; 617 618 /* 619 * check if we need to setup 620 * a local buffer: 621 */ 622 623 if (!xfer->flags.ext_buffer) { 624 625 /* align data */ 626 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 627 628 if (parm->buf) { 629 630 xfer->local_buffer = 631 USB_ADD_BYTES(parm->buf, parm->size[0]); 632 633 usbd_xfer_set_frame_offset(xfer, 0, 0); 634 635 if ((type == UE_CONTROL) && (n_frbuffers > 1)) { 636 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1); 637 } 638 } 639 parm->size[0] += parm->bufsize; 640 641 /* align data again */ 642 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 643 } 644 /* 645 * Compute maximum buffer size 646 */ 647 648 if (parm->bufsize_max < parm->bufsize) { 649 parm->bufsize_max = parm->bufsize; 650 } 651 #if USB_HAVE_BUSDMA 652 if (xfer->flags_int.bdma_enable) { 653 /* 654 * Setup "dma_page_ptr". 655 * 656 * Proof for formula below: 657 * 658 * Assume there are three USB frames having length "a", "b" and 659 * "c". These USB frames will at maximum need "z" 660 * "usb_page" structures. "z" is given by: 661 * 662 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) + 663 * ((c / USB_PAGE_SIZE) + 2); 664 * 665 * Constraining "a", "b" and "c" like this: 666 * 667 * (a + b + c) <= parm->bufsize 668 * 669 * We know that: 670 * 671 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2)); 672 * 673 * Here is the general formula: 674 */ 675 xfer->dma_page_ptr = parm->dma_page_ptr; 676 parm->dma_page_ptr += (2 * n_frbuffers); 677 parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE); 678 } 679 #endif 680 if (zmps) { 681 /* correct maximum data length */ 682 xfer->max_data_length = 0; 683 } 684 /* subtract USB frame remainder from "hc_max_frame_size" */ 685 686 xfer->max_hc_frame_size = 687 (parm->hc_max_frame_size - 688 (parm->hc_max_frame_size % xfer->max_frame_size)); 689 690 if (xfer->max_hc_frame_size == 0) { 691 parm->err = USB_ERR_INVAL; 692 goto done; 693 } 694 695 /* initialize frame buffers */ 696 697 if (parm->buf) { 698 for (x = 0; x != n_frbuffers; x++) { 699 xfer->frbuffers[x].tag_parent = 700 &xfer->xroot->dma_parent_tag; 701 #if USB_HAVE_BUSDMA 702 if (xfer->flags_int.bdma_enable && 703 (parm->bufsize_max > 0)) { 704 705 if (usb_pc_dmamap_create( 706 xfer->frbuffers + x, 707 parm->bufsize_max)) { 708 parm->err = USB_ERR_NOMEM; 709 goto done; 710 } 711 } 712 #endif 713 } 714 } 715 done: 716 if (parm->err) { 717 /* 718 * Set some dummy values so that we avoid division by zero: 719 */ 720 xfer->max_hc_frame_size = 1; 721 xfer->max_frame_size = 1; 722 xfer->max_packet_size = 1; 723 xfer->max_data_length = 0; 724 xfer->nframes = 0; 725 xfer->max_frame_count = 0; 726 } 727 } 728 729 /*------------------------------------------------------------------------* 730 * usbd_transfer_setup - setup an array of USB transfers 731 * 732 * NOTE: You must always call "usbd_transfer_unsetup" after calling 733 * "usbd_transfer_setup" if success was returned. 734 * 735 * The idea is that the USB device driver should pre-allocate all its 736 * transfers by one call to this function. 737 * 738 * Return values: 739 * 0: Success 740 * Else: Failure 741 *------------------------------------------------------------------------*/ 742 usb_error_t 743 usbd_transfer_setup(struct usb_device *udev, 744 const uint8_t *ifaces, struct usb_xfer **ppxfer, 745 const struct usb_config *setup_start, uint16_t n_setup, 746 void *priv_sc, struct mtx *xfer_mtx) 747 { 748 struct usb_xfer dummy; 749 struct usb_setup_params parm; 750 const struct usb_config *setup_end = setup_start + n_setup; 751 const struct usb_config *setup; 752 struct usb_endpoint *ep; 753 struct usb_xfer_root *info; 754 struct usb_xfer *xfer; 755 void *buf = NULL; 756 uint16_t n; 757 uint16_t refcount; 758 759 parm.err = 0; 760 refcount = 0; 761 info = NULL; 762 763 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 764 "usbd_transfer_setup can sleep!"); 765 766 /* do some checking first */ 767 768 if (n_setup == 0) { 769 DPRINTFN(6, "setup array has zero length!\n"); 770 return (USB_ERR_INVAL); 771 } 772 if (ifaces == 0) { 773 DPRINTFN(6, "ifaces array is NULL!\n"); 774 return (USB_ERR_INVAL); 775 } 776 if (xfer_mtx == NULL) { 777 DPRINTFN(6, "using global lock\n"); 778 xfer_mtx = &Giant; 779 } 780 /* sanity checks */ 781 for (setup = setup_start, n = 0; 782 setup != setup_end; setup++, n++) { 783 if (setup->bufsize == (usb_frlength_t)-1) { 784 parm.err = USB_ERR_BAD_BUFSIZE; 785 DPRINTF("invalid bufsize\n"); 786 } 787 if (setup->callback == NULL) { 788 parm.err = USB_ERR_NO_CALLBACK; 789 DPRINTF("no callback\n"); 790 } 791 ppxfer[n] = NULL; 792 } 793 794 if (parm.err) { 795 goto done; 796 } 797 bzero(&parm, sizeof(parm)); 798 799 parm.udev = udev; 800 parm.speed = usbd_get_speed(udev); 801 parm.hc_max_packet_count = 1; 802 803 if (parm.speed >= USB_SPEED_MAX) { 804 parm.err = USB_ERR_INVAL; 805 goto done; 806 } 807 /* setup all transfers */ 808 809 while (1) { 810 811 if (buf) { 812 /* 813 * Initialize the "usb_xfer_root" structure, 814 * which is common for all our USB transfers. 815 */ 816 info = USB_ADD_BYTES(buf, 0); 817 818 info->memory_base = buf; 819 info->memory_size = parm.size[0]; 820 821 #if USB_HAVE_BUSDMA 822 info->dma_page_cache_start = USB_ADD_BYTES(buf, parm.size[4]); 823 info->dma_page_cache_end = USB_ADD_BYTES(buf, parm.size[5]); 824 #endif 825 info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm.size[5]); 826 info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm.size[2]); 827 828 cv_init(&info->cv_drain, "WDRAIN"); 829 830 info->xfer_mtx = xfer_mtx; 831 #if USB_HAVE_BUSDMA 832 usb_dma_tag_setup(&info->dma_parent_tag, 833 parm.dma_tag_p, udev->bus->dma_parent_tag[0].tag, 834 xfer_mtx, &usb_bdma_done_event, 32, parm.dma_tag_max); 835 #endif 836 837 info->bus = udev->bus; 838 info->udev = udev; 839 840 TAILQ_INIT(&info->done_q.head); 841 info->done_q.command = &usbd_callback_wrapper; 842 #if USB_HAVE_BUSDMA 843 TAILQ_INIT(&info->dma_q.head); 844 info->dma_q.command = &usb_bdma_work_loop; 845 #endif 846 info->done_m[0].hdr.pm_callback = &usb_callback_proc; 847 info->done_m[0].xroot = info; 848 info->done_m[1].hdr.pm_callback = &usb_callback_proc; 849 info->done_m[1].xroot = info; 850 851 /* 852 * In device side mode control endpoint 853 * requests need to run from a separate 854 * context, else there is a chance of 855 * deadlock! 856 */ 857 if (setup_start == usb_control_ep_cfg) 858 info->done_p = 859 &udev->bus->control_xfer_proc; 860 else if (xfer_mtx == &Giant) 861 info->done_p = 862 &udev->bus->giant_callback_proc; 863 else 864 info->done_p = 865 &udev->bus->non_giant_callback_proc; 866 } 867 /* reset sizes */ 868 869 parm.size[0] = 0; 870 parm.buf = buf; 871 parm.size[0] += sizeof(info[0]); 872 873 for (setup = setup_start, n = 0; 874 setup != setup_end; setup++, n++) { 875 876 /* skip USB transfers without callbacks: */ 877 if (setup->callback == NULL) { 878 continue; 879 } 880 /* see if there is a matching endpoint */ 881 ep = usbd_get_endpoint(udev, 882 ifaces[setup->if_index], setup); 883 884 if ((ep == NULL) || (ep->methods == NULL)) { 885 if (setup->flags.no_pipe_ok) 886 continue; 887 if ((setup->usb_mode != USB_MODE_DUAL) && 888 (setup->usb_mode != udev->flags.usb_mode)) 889 continue; 890 parm.err = USB_ERR_NO_PIPE; 891 goto done; 892 } 893 894 /* align data properly */ 895 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 896 897 /* store current setup pointer */ 898 parm.curr_setup = setup; 899 900 if (buf) { 901 /* 902 * Common initialization of the 903 * "usb_xfer" structure. 904 */ 905 xfer = USB_ADD_BYTES(buf, parm.size[0]); 906 xfer->address = udev->address; 907 xfer->priv_sc = priv_sc; 908 xfer->xroot = info; 909 910 usb_callout_init_mtx(&xfer->timeout_handle, 911 &udev->bus->bus_mtx, 0); 912 } else { 913 /* 914 * Setup a dummy xfer, hence we are 915 * writing to the "usb_xfer" 916 * structure pointed to by "xfer" 917 * before we have allocated any 918 * memory: 919 */ 920 xfer = &dummy; 921 bzero(&dummy, sizeof(dummy)); 922 refcount++; 923 } 924 925 /* set transfer endpoint pointer */ 926 xfer->endpoint = ep; 927 928 parm.size[0] += sizeof(xfer[0]); 929 parm.methods = xfer->endpoint->methods; 930 parm.curr_xfer = xfer; 931 932 /* 933 * Call the Host or Device controller transfer 934 * setup routine: 935 */ 936 (udev->bus->methods->xfer_setup) (&parm); 937 938 /* check for error */ 939 if (parm.err) 940 goto done; 941 942 if (buf) { 943 /* 944 * Increment the endpoint refcount. This 945 * basically prevents setting a new 946 * configuration and alternate setting 947 * when USB transfers are in use on 948 * the given interface. Search the USB 949 * code for "endpoint->refcount_alloc" if you 950 * want more information. 951 */ 952 USB_BUS_LOCK(info->bus); 953 if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX) 954 parm.err = USB_ERR_INVAL; 955 956 xfer->endpoint->refcount_alloc++; 957 958 if (xfer->endpoint->refcount_alloc == 0) 959 panic("usbd_transfer_setup(): Refcount wrapped to zero\n"); 960 USB_BUS_UNLOCK(info->bus); 961 962 /* 963 * Whenever we set ppxfer[] then we 964 * also need to increment the 965 * "setup_refcount": 966 */ 967 info->setup_refcount++; 968 969 /* 970 * Transfer is successfully setup and 971 * can be used: 972 */ 973 ppxfer[n] = xfer; 974 } 975 976 /* check for error */ 977 if (parm.err) 978 goto done; 979 } 980 981 if (buf || parm.err) { 982 goto done; 983 } 984 if (refcount == 0) { 985 /* no transfers - nothing to do ! */ 986 goto done; 987 } 988 /* align data properly */ 989 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 990 991 /* store offset temporarily */ 992 parm.size[1] = parm.size[0]; 993 994 /* 995 * The number of DMA tags required depends on 996 * the number of endpoints. The current estimate 997 * for maximum number of DMA tags per endpoint 998 * is two. 999 */ 1000 parm.dma_tag_max += 2 * MIN(n_setup, USB_EP_MAX); 1001 1002 /* 1003 * DMA tags for QH, TD, Data and more. 1004 */ 1005 parm.dma_tag_max += 8; 1006 1007 parm.dma_tag_p += parm.dma_tag_max; 1008 1009 parm.size[0] += ((uint8_t *)parm.dma_tag_p) - 1010 ((uint8_t *)0); 1011 1012 /* align data properly */ 1013 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 1014 1015 /* store offset temporarily */ 1016 parm.size[3] = parm.size[0]; 1017 1018 parm.size[0] += ((uint8_t *)parm.dma_page_ptr) - 1019 ((uint8_t *)0); 1020 1021 /* align data properly */ 1022 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 1023 1024 /* store offset temporarily */ 1025 parm.size[4] = parm.size[0]; 1026 1027 parm.size[0] += ((uint8_t *)parm.dma_page_cache_ptr) - 1028 ((uint8_t *)0); 1029 1030 /* store end offset temporarily */ 1031 parm.size[5] = parm.size[0]; 1032 1033 parm.size[0] += ((uint8_t *)parm.xfer_page_cache_ptr) - 1034 ((uint8_t *)0); 1035 1036 /* store end offset temporarily */ 1037 1038 parm.size[2] = parm.size[0]; 1039 1040 /* align data properly */ 1041 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 1042 1043 parm.size[6] = parm.size[0]; 1044 1045 parm.size[0] += ((uint8_t *)parm.xfer_length_ptr) - 1046 ((uint8_t *)0); 1047 1048 /* align data properly */ 1049 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 1050 1051 /* allocate zeroed memory */ 1052 buf = malloc(parm.size[0], M_USB, M_WAITOK | M_ZERO); 1053 1054 if (buf == NULL) { 1055 parm.err = USB_ERR_NOMEM; 1056 DPRINTFN(0, "cannot allocate memory block for " 1057 "configuration (%d bytes)\n", 1058 parm.size[0]); 1059 goto done; 1060 } 1061 parm.dma_tag_p = USB_ADD_BYTES(buf, parm.size[1]); 1062 parm.dma_page_ptr = USB_ADD_BYTES(buf, parm.size[3]); 1063 parm.dma_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[4]); 1064 parm.xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[5]); 1065 parm.xfer_length_ptr = USB_ADD_BYTES(buf, parm.size[6]); 1066 } 1067 1068 done: 1069 if (buf) { 1070 if (info->setup_refcount == 0) { 1071 /* 1072 * "usbd_transfer_unsetup_sub" will unlock 1073 * the bus mutex before returning ! 1074 */ 1075 USB_BUS_LOCK(info->bus); 1076 1077 /* something went wrong */ 1078 usbd_transfer_unsetup_sub(info, 0); 1079 } 1080 } 1081 if (parm.err) { 1082 usbd_transfer_unsetup(ppxfer, n_setup); 1083 } 1084 return (parm.err); 1085 } 1086 1087 /*------------------------------------------------------------------------* 1088 * usbd_transfer_unsetup_sub - factored out code 1089 *------------------------------------------------------------------------*/ 1090 static void 1091 usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay) 1092 { 1093 struct usb_page_cache *pc; 1094 1095 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED); 1096 1097 /* wait for any outstanding DMA operations */ 1098 1099 if (needs_delay) { 1100 usb_timeout_t temp; 1101 temp = usbd_get_dma_delay(info->udev); 1102 if (temp != 0) { 1103 usb_pause_mtx(&info->bus->bus_mtx, 1104 USB_MS_TO_TICKS(temp)); 1105 } 1106 } 1107 1108 /* make sure that our done messages are not queued anywhere */ 1109 usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]); 1110 1111 USB_BUS_UNLOCK(info->bus); 1112 1113 #if USB_HAVE_BUSDMA 1114 /* free DMA'able memory, if any */ 1115 pc = info->dma_page_cache_start; 1116 while (pc != info->dma_page_cache_end) { 1117 usb_pc_free_mem(pc); 1118 pc++; 1119 } 1120 1121 /* free DMA maps in all "xfer->frbuffers" */ 1122 pc = info->xfer_page_cache_start; 1123 while (pc != info->xfer_page_cache_end) { 1124 usb_pc_dmamap_destroy(pc); 1125 pc++; 1126 } 1127 1128 /* free all DMA tags */ 1129 usb_dma_tag_unsetup(&info->dma_parent_tag); 1130 #endif 1131 1132 cv_destroy(&info->cv_drain); 1133 1134 /* 1135 * free the "memory_base" last, hence the "info" structure is 1136 * contained within the "memory_base"! 1137 */ 1138 free(info->memory_base, M_USB); 1139 } 1140 1141 /*------------------------------------------------------------------------* 1142 * usbd_transfer_unsetup - unsetup/free an array of USB transfers 1143 * 1144 * NOTE: All USB transfers in progress will get called back passing 1145 * the error code "USB_ERR_CANCELLED" before this function 1146 * returns. 1147 *------------------------------------------------------------------------*/ 1148 void 1149 usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup) 1150 { 1151 struct usb_xfer *xfer; 1152 struct usb_xfer_root *info; 1153 uint8_t needs_delay = 0; 1154 1155 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1156 "usbd_transfer_unsetup can sleep!"); 1157 1158 while (n_setup--) { 1159 xfer = pxfer[n_setup]; 1160 1161 if (xfer == NULL) 1162 continue; 1163 1164 info = xfer->xroot; 1165 1166 USB_XFER_LOCK(xfer); 1167 USB_BUS_LOCK(info->bus); 1168 1169 /* 1170 * HINT: when you start/stop a transfer, it might be a 1171 * good idea to directly use the "pxfer[]" structure: 1172 * 1173 * usbd_transfer_start(sc->pxfer[0]); 1174 * usbd_transfer_stop(sc->pxfer[0]); 1175 * 1176 * That way, if your code has many parts that will not 1177 * stop running under the same lock, in other words 1178 * "xfer_mtx", the usbd_transfer_start and 1179 * usbd_transfer_stop functions will simply return 1180 * when they detect a NULL pointer argument. 1181 * 1182 * To avoid any races we clear the "pxfer[]" pointer 1183 * while holding the private mutex of the driver: 1184 */ 1185 pxfer[n_setup] = NULL; 1186 1187 USB_BUS_UNLOCK(info->bus); 1188 USB_XFER_UNLOCK(xfer); 1189 1190 usbd_transfer_drain(xfer); 1191 1192 #if USB_HAVE_BUSDMA 1193 if (xfer->flags_int.bdma_enable) 1194 needs_delay = 1; 1195 #endif 1196 /* 1197 * NOTE: default endpoint does not have an 1198 * interface, even if endpoint->iface_index == 0 1199 */ 1200 USB_BUS_LOCK(info->bus); 1201 xfer->endpoint->refcount_alloc--; 1202 USB_BUS_UNLOCK(info->bus); 1203 1204 usb_callout_drain(&xfer->timeout_handle); 1205 1206 USB_BUS_LOCK(info->bus); 1207 1208 USB_ASSERT(info->setup_refcount != 0, ("Invalid setup " 1209 "reference count\n")); 1210 1211 info->setup_refcount--; 1212 1213 if (info->setup_refcount == 0) { 1214 usbd_transfer_unsetup_sub(info, 1215 needs_delay); 1216 } else { 1217 USB_BUS_UNLOCK(info->bus); 1218 } 1219 } 1220 } 1221 1222 /*------------------------------------------------------------------------* 1223 * usbd_control_transfer_init - factored out code 1224 * 1225 * In USB Device Mode we have to wait for the SETUP packet which 1226 * containst the "struct usb_device_request" structure, before we can 1227 * transfer any data. In USB Host Mode we already have the SETUP 1228 * packet at the moment the USB transfer is started. This leads us to 1229 * having to setup the USB transfer at two different places in 1230 * time. This function just contains factored out control transfer 1231 * initialisation code, so that we don't duplicate the code. 1232 *------------------------------------------------------------------------*/ 1233 static void 1234 usbd_control_transfer_init(struct usb_xfer *xfer) 1235 { 1236 struct usb_device_request req; 1237 1238 /* copy out the USB request header */ 1239 1240 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req)); 1241 1242 /* setup remainder */ 1243 1244 xfer->flags_int.control_rem = UGETW(req.wLength); 1245 1246 /* copy direction to endpoint variable */ 1247 1248 xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT); 1249 xfer->endpointno |= 1250 (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT; 1251 } 1252 1253 /*------------------------------------------------------------------------* 1254 * usbd_setup_ctrl_transfer 1255 * 1256 * This function handles initialisation of control transfers. Control 1257 * transfers are special in that regard that they can both transmit 1258 * and receive data. 1259 * 1260 * Return values: 1261 * 0: Success 1262 * Else: Failure 1263 *------------------------------------------------------------------------*/ 1264 static int 1265 usbd_setup_ctrl_transfer(struct usb_xfer *xfer) 1266 { 1267 usb_frlength_t len; 1268 1269 /* Check for control endpoint stall */ 1270 if (xfer->flags.stall_pipe && xfer->flags_int.control_act) { 1271 /* the control transfer is no longer active */ 1272 xfer->flags_int.control_stall = 1; 1273 xfer->flags_int.control_act = 0; 1274 } else { 1275 /* don't stall control transfer by default */ 1276 xfer->flags_int.control_stall = 0; 1277 } 1278 1279 /* Check for invalid number of frames */ 1280 if (xfer->nframes > 2) { 1281 /* 1282 * If you need to split a control transfer, you 1283 * have to do one part at a time. Only with 1284 * non-control transfers you can do multiple 1285 * parts a time. 1286 */ 1287 DPRINTFN(0, "Too many frames: %u\n", 1288 (unsigned int)xfer->nframes); 1289 goto error; 1290 } 1291 1292 /* 1293 * Check if there is a control 1294 * transfer in progress: 1295 */ 1296 if (xfer->flags_int.control_act) { 1297 1298 if (xfer->flags_int.control_hdr) { 1299 1300 /* clear send header flag */ 1301 1302 xfer->flags_int.control_hdr = 0; 1303 1304 /* setup control transfer */ 1305 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { 1306 usbd_control_transfer_init(xfer); 1307 } 1308 } 1309 /* get data length */ 1310 1311 len = xfer->sumlen; 1312 1313 } else { 1314 1315 /* the size of the SETUP structure is hardcoded ! */ 1316 1317 if (xfer->frlengths[0] != sizeof(struct usb_device_request)) { 1318 DPRINTFN(0, "Wrong framelength %u != %zu\n", 1319 xfer->frlengths[0], sizeof(struct 1320 usb_device_request)); 1321 goto error; 1322 } 1323 /* check USB mode */ 1324 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { 1325 1326 /* check number of frames */ 1327 if (xfer->nframes != 1) { 1328 /* 1329 * We need to receive the setup 1330 * message first so that we know the 1331 * data direction! 1332 */ 1333 DPRINTF("Misconfigured transfer\n"); 1334 goto error; 1335 } 1336 /* 1337 * Set a dummy "control_rem" value. This 1338 * variable will be overwritten later by a 1339 * call to "usbd_control_transfer_init()" ! 1340 */ 1341 xfer->flags_int.control_rem = 0xFFFF; 1342 } else { 1343 1344 /* setup "endpoint" and "control_rem" */ 1345 1346 usbd_control_transfer_init(xfer); 1347 } 1348 1349 /* set transfer-header flag */ 1350 1351 xfer->flags_int.control_hdr = 1; 1352 1353 /* get data length */ 1354 1355 len = (xfer->sumlen - sizeof(struct usb_device_request)); 1356 } 1357 1358 /* check if there is a length mismatch */ 1359 1360 if (len > xfer->flags_int.control_rem) { 1361 DPRINTFN(0, "Length (%d) greater than " 1362 "remaining length (%d)\n", len, 1363 xfer->flags_int.control_rem); 1364 goto error; 1365 } 1366 /* check if we are doing a short transfer */ 1367 1368 if (xfer->flags.force_short_xfer) { 1369 xfer->flags_int.control_rem = 0; 1370 } else { 1371 if ((len != xfer->max_data_length) && 1372 (len != xfer->flags_int.control_rem) && 1373 (xfer->nframes != 1)) { 1374 DPRINTFN(0, "Short control transfer without " 1375 "force_short_xfer set\n"); 1376 goto error; 1377 } 1378 xfer->flags_int.control_rem -= len; 1379 } 1380 1381 /* the status part is executed when "control_act" is 0 */ 1382 1383 if ((xfer->flags_int.control_rem > 0) || 1384 (xfer->flags.manual_status)) { 1385 /* don't execute the STATUS stage yet */ 1386 xfer->flags_int.control_act = 1; 1387 1388 /* sanity check */ 1389 if ((!xfer->flags_int.control_hdr) && 1390 (xfer->nframes == 1)) { 1391 /* 1392 * This is not a valid operation! 1393 */ 1394 DPRINTFN(0, "Invalid parameter " 1395 "combination\n"); 1396 goto error; 1397 } 1398 } else { 1399 /* time to execute the STATUS stage */ 1400 xfer->flags_int.control_act = 0; 1401 } 1402 return (0); /* success */ 1403 1404 error: 1405 return (1); /* failure */ 1406 } 1407 1408 /*------------------------------------------------------------------------* 1409 * usbd_transfer_submit - start USB hardware for the given transfer 1410 * 1411 * This function should only be called from the USB callback. 1412 *------------------------------------------------------------------------*/ 1413 void 1414 usbd_transfer_submit(struct usb_xfer *xfer) 1415 { 1416 struct usb_xfer_root *info; 1417 struct usb_bus *bus; 1418 usb_frcount_t x; 1419 1420 info = xfer->xroot; 1421 bus = info->bus; 1422 1423 DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n", 1424 xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ? 1425 "read" : "write"); 1426 1427 #ifdef USB_DEBUG 1428 if (USB_DEBUG_VAR > 0) { 1429 USB_BUS_LOCK(bus); 1430 1431 usb_dump_endpoint(xfer->endpoint); 1432 1433 USB_BUS_UNLOCK(bus); 1434 } 1435 #endif 1436 1437 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1438 USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED); 1439 1440 /* Only open the USB transfer once! */ 1441 if (!xfer->flags_int.open) { 1442 xfer->flags_int.open = 1; 1443 1444 DPRINTF("open\n"); 1445 1446 USB_BUS_LOCK(bus); 1447 (xfer->endpoint->methods->open) (xfer); 1448 USB_BUS_UNLOCK(bus); 1449 } 1450 /* set "transferring" flag */ 1451 xfer->flags_int.transferring = 1; 1452 1453 #if USB_HAVE_POWERD 1454 /* increment power reference */ 1455 usbd_transfer_power_ref(xfer, 1); 1456 #endif 1457 /* 1458 * Check if the transfer is waiting on a queue, most 1459 * frequently the "done_q": 1460 */ 1461 if (xfer->wait_queue) { 1462 USB_BUS_LOCK(bus); 1463 usbd_transfer_dequeue(xfer); 1464 USB_BUS_UNLOCK(bus); 1465 } 1466 /* clear "did_dma_delay" flag */ 1467 xfer->flags_int.did_dma_delay = 0; 1468 1469 /* clear "did_close" flag */ 1470 xfer->flags_int.did_close = 0; 1471 1472 #if USB_HAVE_BUSDMA 1473 /* clear "bdma_setup" flag */ 1474 xfer->flags_int.bdma_setup = 0; 1475 #endif 1476 /* by default we cannot cancel any USB transfer immediately */ 1477 xfer->flags_int.can_cancel_immed = 0; 1478 1479 /* clear lengths and frame counts by default */ 1480 xfer->sumlen = 0; 1481 xfer->actlen = 0; 1482 xfer->aframes = 0; 1483 1484 /* clear any previous errors */ 1485 xfer->error = 0; 1486 1487 /* Check if the device is still alive */ 1488 if (info->udev->state < USB_STATE_POWERED) { 1489 USB_BUS_LOCK(bus); 1490 /* 1491 * Must return cancelled error code else 1492 * device drivers can hang. 1493 */ 1494 usbd_transfer_done(xfer, USB_ERR_CANCELLED); 1495 USB_BUS_UNLOCK(bus); 1496 return; 1497 } 1498 1499 /* sanity check */ 1500 if (xfer->nframes == 0) { 1501 if (xfer->flags.stall_pipe) { 1502 /* 1503 * Special case - want to stall without transferring 1504 * any data: 1505 */ 1506 DPRINTF("xfer=%p nframes=0: stall " 1507 "or clear stall!\n", xfer); 1508 USB_BUS_LOCK(bus); 1509 xfer->flags_int.can_cancel_immed = 1; 1510 /* start the transfer */ 1511 usb_command_wrapper(&xfer->endpoint->endpoint_q, xfer); 1512 USB_BUS_UNLOCK(bus); 1513 return; 1514 } 1515 USB_BUS_LOCK(bus); 1516 usbd_transfer_done(xfer, USB_ERR_INVAL); 1517 USB_BUS_UNLOCK(bus); 1518 return; 1519 } 1520 /* compute total transfer length */ 1521 1522 for (x = 0; x != xfer->nframes; x++) { 1523 xfer->sumlen += xfer->frlengths[x]; 1524 if (xfer->sumlen < xfer->frlengths[x]) { 1525 /* length wrapped around */ 1526 USB_BUS_LOCK(bus); 1527 usbd_transfer_done(xfer, USB_ERR_INVAL); 1528 USB_BUS_UNLOCK(bus); 1529 return; 1530 } 1531 } 1532 1533 /* clear some internal flags */ 1534 1535 xfer->flags_int.short_xfer_ok = 0; 1536 xfer->flags_int.short_frames_ok = 0; 1537 1538 /* check if this is a control transfer */ 1539 1540 if (xfer->flags_int.control_xfr) { 1541 1542 if (usbd_setup_ctrl_transfer(xfer)) { 1543 USB_BUS_LOCK(bus); 1544 usbd_transfer_done(xfer, USB_ERR_STALLED); 1545 USB_BUS_UNLOCK(bus); 1546 return; 1547 } 1548 } 1549 /* 1550 * Setup filtered version of some transfer flags, 1551 * in case of data read direction 1552 */ 1553 if (USB_GET_DATA_ISREAD(xfer)) { 1554 1555 if (xfer->flags.short_frames_ok) { 1556 xfer->flags_int.short_xfer_ok = 1; 1557 xfer->flags_int.short_frames_ok = 1; 1558 } else if (xfer->flags.short_xfer_ok) { 1559 xfer->flags_int.short_xfer_ok = 1; 1560 1561 /* check for control transfer */ 1562 if (xfer->flags_int.control_xfr) { 1563 /* 1564 * 1) Control transfers do not support 1565 * reception of multiple short USB 1566 * frames in host mode and device side 1567 * mode, with exception of: 1568 * 1569 * 2) Due to sometimes buggy device 1570 * side firmware we need to do a 1571 * STATUS stage in case of short 1572 * control transfers in USB host mode. 1573 * The STATUS stage then becomes the 1574 * "alt_next" to the DATA stage. 1575 */ 1576 xfer->flags_int.short_frames_ok = 1; 1577 } 1578 } 1579 } 1580 /* 1581 * Check if BUS-DMA support is enabled and try to load virtual 1582 * buffers into DMA, if any: 1583 */ 1584 #if USB_HAVE_BUSDMA 1585 if (xfer->flags_int.bdma_enable) { 1586 /* insert the USB transfer last in the BUS-DMA queue */ 1587 usb_command_wrapper(&xfer->xroot->dma_q, xfer); 1588 return; 1589 } 1590 #endif 1591 /* 1592 * Enter the USB transfer into the Host Controller or 1593 * Device Controller schedule: 1594 */ 1595 usbd_pipe_enter(xfer); 1596 } 1597 1598 /*------------------------------------------------------------------------* 1599 * usbd_pipe_enter - factored out code 1600 *------------------------------------------------------------------------*/ 1601 void 1602 usbd_pipe_enter(struct usb_xfer *xfer) 1603 { 1604 struct usb_endpoint *ep; 1605 1606 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1607 1608 USB_BUS_LOCK(xfer->xroot->bus); 1609 1610 ep = xfer->endpoint; 1611 1612 DPRINTF("enter\n"); 1613 1614 /* enter the transfer */ 1615 (ep->methods->enter) (xfer); 1616 1617 xfer->flags_int.can_cancel_immed = 1; 1618 1619 /* check for transfer error */ 1620 if (xfer->error) { 1621 /* some error has happened */ 1622 usbd_transfer_done(xfer, 0); 1623 USB_BUS_UNLOCK(xfer->xroot->bus); 1624 return; 1625 } 1626 1627 /* start the transfer */ 1628 usb_command_wrapper(&ep->endpoint_q, xfer); 1629 USB_BUS_UNLOCK(xfer->xroot->bus); 1630 } 1631 1632 /*------------------------------------------------------------------------* 1633 * usbd_transfer_start - start an USB transfer 1634 * 1635 * NOTE: Calling this function more than one time will only 1636 * result in a single transfer start, until the USB transfer 1637 * completes. 1638 *------------------------------------------------------------------------*/ 1639 void 1640 usbd_transfer_start(struct usb_xfer *xfer) 1641 { 1642 if (xfer == NULL) { 1643 /* transfer is gone */ 1644 return; 1645 } 1646 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1647 1648 /* mark the USB transfer started */ 1649 1650 if (!xfer->flags_int.started) { 1651 /* lock the BUS lock to avoid races updating flags_int */ 1652 USB_BUS_LOCK(xfer->xroot->bus); 1653 xfer->flags_int.started = 1; 1654 USB_BUS_UNLOCK(xfer->xroot->bus); 1655 } 1656 /* check if the USB transfer callback is already transferring */ 1657 1658 if (xfer->flags_int.transferring) { 1659 return; 1660 } 1661 USB_BUS_LOCK(xfer->xroot->bus); 1662 /* call the USB transfer callback */ 1663 usbd_callback_ss_done_defer(xfer); 1664 USB_BUS_UNLOCK(xfer->xroot->bus); 1665 } 1666 1667 /*------------------------------------------------------------------------* 1668 * usbd_transfer_stop - stop an USB transfer 1669 * 1670 * NOTE: Calling this function more than one time will only 1671 * result in a single transfer stop. 1672 * NOTE: When this function returns it is not safe to free nor 1673 * reuse any DMA buffers. See "usbd_transfer_drain()". 1674 *------------------------------------------------------------------------*/ 1675 void 1676 usbd_transfer_stop(struct usb_xfer *xfer) 1677 { 1678 struct usb_endpoint *ep; 1679 1680 if (xfer == NULL) { 1681 /* transfer is gone */ 1682 return; 1683 } 1684 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1685 1686 /* check if the USB transfer was ever opened */ 1687 1688 if (!xfer->flags_int.open) { 1689 if (xfer->flags_int.started) { 1690 /* nothing to do except clearing the "started" flag */ 1691 /* lock the BUS lock to avoid races updating flags_int */ 1692 USB_BUS_LOCK(xfer->xroot->bus); 1693 xfer->flags_int.started = 0; 1694 USB_BUS_UNLOCK(xfer->xroot->bus); 1695 } 1696 return; 1697 } 1698 /* try to stop the current USB transfer */ 1699 1700 USB_BUS_LOCK(xfer->xroot->bus); 1701 /* override any previous error */ 1702 xfer->error = USB_ERR_CANCELLED; 1703 1704 /* 1705 * Clear "open" and "started" when both private and USB lock 1706 * is locked so that we don't get a race updating "flags_int" 1707 */ 1708 xfer->flags_int.open = 0; 1709 xfer->flags_int.started = 0; 1710 1711 /* 1712 * Check if we can cancel the USB transfer immediately. 1713 */ 1714 if (xfer->flags_int.transferring) { 1715 if (xfer->flags_int.can_cancel_immed && 1716 (!xfer->flags_int.did_close)) { 1717 DPRINTF("close\n"); 1718 /* 1719 * The following will lead to an USB_ERR_CANCELLED 1720 * error code being passed to the USB callback. 1721 */ 1722 (xfer->endpoint->methods->close) (xfer); 1723 /* only close once */ 1724 xfer->flags_int.did_close = 1; 1725 } else { 1726 /* need to wait for the next done callback */ 1727 } 1728 } else { 1729 DPRINTF("close\n"); 1730 1731 /* close here and now */ 1732 (xfer->endpoint->methods->close) (xfer); 1733 1734 /* 1735 * Any additional DMA delay is done by 1736 * "usbd_transfer_unsetup()". 1737 */ 1738 1739 /* 1740 * Special case. Check if we need to restart a blocked 1741 * endpoint. 1742 */ 1743 ep = xfer->endpoint; 1744 1745 /* 1746 * If the current USB transfer is completing we need 1747 * to start the next one: 1748 */ 1749 if (ep->endpoint_q.curr == xfer) { 1750 usb_command_wrapper(&ep->endpoint_q, NULL); 1751 } 1752 } 1753 1754 USB_BUS_UNLOCK(xfer->xroot->bus); 1755 } 1756 1757 /*------------------------------------------------------------------------* 1758 * usbd_transfer_pending 1759 * 1760 * This function will check if an USB transfer is pending which is a 1761 * little bit complicated! 1762 * Return values: 1763 * 0: Not pending 1764 * 1: Pending: The USB transfer will receive a callback in the future. 1765 *------------------------------------------------------------------------*/ 1766 uint8_t 1767 usbd_transfer_pending(struct usb_xfer *xfer) 1768 { 1769 struct usb_xfer_root *info; 1770 struct usb_xfer_queue *pq; 1771 1772 if (xfer == NULL) { 1773 /* transfer is gone */ 1774 return (0); 1775 } 1776 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1777 1778 if (xfer->flags_int.transferring) { 1779 /* trivial case */ 1780 return (1); 1781 } 1782 USB_BUS_LOCK(xfer->xroot->bus); 1783 if (xfer->wait_queue) { 1784 /* we are waiting on a queue somewhere */ 1785 USB_BUS_UNLOCK(xfer->xroot->bus); 1786 return (1); 1787 } 1788 info = xfer->xroot; 1789 pq = &info->done_q; 1790 1791 if (pq->curr == xfer) { 1792 /* we are currently scheduled for callback */ 1793 USB_BUS_UNLOCK(xfer->xroot->bus); 1794 return (1); 1795 } 1796 /* we are not pending */ 1797 USB_BUS_UNLOCK(xfer->xroot->bus); 1798 return (0); 1799 } 1800 1801 /*------------------------------------------------------------------------* 1802 * usbd_transfer_drain 1803 * 1804 * This function will stop the USB transfer and wait for any 1805 * additional BUS-DMA and HW-DMA operations to complete. Buffers that 1806 * are loaded into DMA can safely be freed or reused after that this 1807 * function has returned. 1808 *------------------------------------------------------------------------*/ 1809 void 1810 usbd_transfer_drain(struct usb_xfer *xfer) 1811 { 1812 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1813 "usbd_transfer_drain can sleep!"); 1814 1815 if (xfer == NULL) { 1816 /* transfer is gone */ 1817 return; 1818 } 1819 if (xfer->xroot->xfer_mtx != &Giant) { 1820 USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED); 1821 } 1822 USB_XFER_LOCK(xfer); 1823 1824 usbd_transfer_stop(xfer); 1825 1826 while (usbd_transfer_pending(xfer) || 1827 xfer->flags_int.doing_callback) { 1828 1829 /* 1830 * It is allowed that the callback can drop its 1831 * transfer mutex. In that case checking only 1832 * "usbd_transfer_pending()" is not enough to tell if 1833 * the USB transfer is fully drained. We also need to 1834 * check the internal "doing_callback" flag. 1835 */ 1836 xfer->flags_int.draining = 1; 1837 1838 /* 1839 * Wait until the current outstanding USB 1840 * transfer is complete ! 1841 */ 1842 cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx); 1843 } 1844 USB_XFER_UNLOCK(xfer); 1845 } 1846 1847 struct usb_page_cache * 1848 usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex) 1849 { 1850 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 1851 1852 return (&xfer->frbuffers[frindex]); 1853 } 1854 1855 /*------------------------------------------------------------------------* 1856 * usbd_xfer_get_fps_shift 1857 * 1858 * The following function is only useful for isochronous transfers. It 1859 * returns how many times the frame execution rate has been shifted 1860 * down. 1861 * 1862 * Return value: 1863 * Success: 0..3 1864 * Failure: 0 1865 *------------------------------------------------------------------------*/ 1866 uint8_t 1867 usbd_xfer_get_fps_shift(struct usb_xfer *xfer) 1868 { 1869 return (xfer->fps_shift); 1870 } 1871 1872 usb_frlength_t 1873 usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex) 1874 { 1875 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 1876 1877 return (xfer->frlengths[frindex]); 1878 } 1879 1880 /*------------------------------------------------------------------------* 1881 * usbd_xfer_set_frame_data 1882 * 1883 * This function sets the pointer of the buffer that should 1884 * loaded directly into DMA for the given USB frame. Passing "ptr" 1885 * equal to NULL while the corresponding "frlength" is greater 1886 * than zero gives undefined results! 1887 *------------------------------------------------------------------------*/ 1888 void 1889 usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex, 1890 void *ptr, usb_frlength_t len) 1891 { 1892 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 1893 1894 /* set virtual address to load and length */ 1895 xfer->frbuffers[frindex].buffer = ptr; 1896 usbd_xfer_set_frame_len(xfer, frindex, len); 1897 } 1898 1899 void 1900 usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex, 1901 void **ptr, int *len) 1902 { 1903 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 1904 1905 if (ptr != NULL) 1906 *ptr = xfer->frbuffers[frindex].buffer; 1907 if (len != NULL) 1908 *len = xfer->frlengths[frindex]; 1909 } 1910 1911 void 1912 usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes, 1913 int *nframes) 1914 { 1915 if (actlen != NULL) 1916 *actlen = xfer->actlen; 1917 if (sumlen != NULL) 1918 *sumlen = xfer->sumlen; 1919 if (aframes != NULL) 1920 *aframes = xfer->aframes; 1921 if (nframes != NULL) 1922 *nframes = xfer->nframes; 1923 } 1924 1925 /*------------------------------------------------------------------------* 1926 * usbd_xfer_set_frame_offset 1927 * 1928 * This function sets the frame data buffer offset relative to the beginning 1929 * of the USB DMA buffer allocated for this USB transfer. 1930 *------------------------------------------------------------------------*/ 1931 void 1932 usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset, 1933 usb_frcount_t frindex) 1934 { 1935 KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame " 1936 "when the USB buffer is external\n")); 1937 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 1938 1939 /* set virtual address to load */ 1940 xfer->frbuffers[frindex].buffer = 1941 USB_ADD_BYTES(xfer->local_buffer, offset); 1942 } 1943 1944 void 1945 usbd_xfer_set_interval(struct usb_xfer *xfer, int i) 1946 { 1947 xfer->interval = i; 1948 } 1949 1950 void 1951 usbd_xfer_set_timeout(struct usb_xfer *xfer, int t) 1952 { 1953 xfer->timeout = t; 1954 } 1955 1956 void 1957 usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n) 1958 { 1959 xfer->nframes = n; 1960 } 1961 1962 usb_frcount_t 1963 usbd_xfer_max_frames(struct usb_xfer *xfer) 1964 { 1965 return (xfer->max_frame_count); 1966 } 1967 1968 usb_frlength_t 1969 usbd_xfer_max_len(struct usb_xfer *xfer) 1970 { 1971 return (xfer->max_data_length); 1972 } 1973 1974 usb_frlength_t 1975 usbd_xfer_max_framelen(struct usb_xfer *xfer) 1976 { 1977 return (xfer->max_frame_size); 1978 } 1979 1980 void 1981 usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex, 1982 usb_frlength_t len) 1983 { 1984 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 1985 1986 xfer->frlengths[frindex] = len; 1987 } 1988 1989 /*------------------------------------------------------------------------* 1990 * usb_callback_proc - factored out code 1991 * 1992 * This function performs USB callbacks. 1993 *------------------------------------------------------------------------*/ 1994 static void 1995 usb_callback_proc(struct usb_proc_msg *_pm) 1996 { 1997 struct usb_done_msg *pm = (void *)_pm; 1998 struct usb_xfer_root *info = pm->xroot; 1999 2000 /* Change locking order */ 2001 USB_BUS_UNLOCK(info->bus); 2002 2003 /* 2004 * We exploit the fact that the mutex is the same for all 2005 * callbacks that will be called from this thread: 2006 */ 2007 mtx_lock(info->xfer_mtx); 2008 USB_BUS_LOCK(info->bus); 2009 2010 /* Continue where we lost track */ 2011 usb_command_wrapper(&info->done_q, 2012 info->done_q.curr); 2013 2014 mtx_unlock(info->xfer_mtx); 2015 } 2016 2017 /*------------------------------------------------------------------------* 2018 * usbd_callback_ss_done_defer 2019 * 2020 * This function will defer the start, stop and done callback to the 2021 * correct thread. 2022 *------------------------------------------------------------------------*/ 2023 static void 2024 usbd_callback_ss_done_defer(struct usb_xfer *xfer) 2025 { 2026 struct usb_xfer_root *info = xfer->xroot; 2027 struct usb_xfer_queue *pq = &info->done_q; 2028 2029 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2030 2031 if (pq->curr != xfer) { 2032 usbd_transfer_enqueue(pq, xfer); 2033 } 2034 if (!pq->recurse_1) { 2035 2036 /* 2037 * We have to postpone the callback due to the fact we 2038 * will have a Lock Order Reversal, LOR, if we try to 2039 * proceed ! 2040 */ 2041 if (usb_proc_msignal(info->done_p, 2042 &info->done_m[0], &info->done_m[1])) { 2043 /* ignore */ 2044 } 2045 } else { 2046 /* clear second recurse flag */ 2047 pq->recurse_2 = 0; 2048 } 2049 return; 2050 2051 } 2052 2053 /*------------------------------------------------------------------------* 2054 * usbd_callback_wrapper 2055 * 2056 * This is a wrapper for USB callbacks. This wrapper does some 2057 * auto-magic things like figuring out if we can call the callback 2058 * directly from the current context or if we need to wakeup the 2059 * interrupt process. 2060 *------------------------------------------------------------------------*/ 2061 static void 2062 usbd_callback_wrapper(struct usb_xfer_queue *pq) 2063 { 2064 struct usb_xfer *xfer = pq->curr; 2065 struct usb_xfer_root *info = xfer->xroot; 2066 2067 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED); 2068 if (!mtx_owned(info->xfer_mtx)) { 2069 /* 2070 * Cases that end up here: 2071 * 2072 * 5) HW interrupt done callback or other source. 2073 */ 2074 DPRINTFN(3, "case 5\n"); 2075 2076 /* 2077 * We have to postpone the callback due to the fact we 2078 * will have a Lock Order Reversal, LOR, if we try to 2079 * proceed ! 2080 */ 2081 if (usb_proc_msignal(info->done_p, 2082 &info->done_m[0], &info->done_m[1])) { 2083 /* ignore */ 2084 } 2085 return; 2086 } 2087 /* 2088 * Cases that end up here: 2089 * 2090 * 1) We are starting a transfer 2091 * 2) We are prematurely calling back a transfer 2092 * 3) We are stopping a transfer 2093 * 4) We are doing an ordinary callback 2094 */ 2095 DPRINTFN(3, "case 1-4\n"); 2096 /* get next USB transfer in the queue */ 2097 info->done_q.curr = NULL; 2098 2099 /* set flag in case of drain */ 2100 xfer->flags_int.doing_callback = 1; 2101 2102 USB_BUS_UNLOCK(info->bus); 2103 USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED); 2104 2105 /* set correct USB state for callback */ 2106 if (!xfer->flags_int.transferring) { 2107 xfer->usb_state = USB_ST_SETUP; 2108 if (!xfer->flags_int.started) { 2109 /* we got stopped before we even got started */ 2110 USB_BUS_LOCK(info->bus); 2111 goto done; 2112 } 2113 } else { 2114 2115 if (usbd_callback_wrapper_sub(xfer)) { 2116 /* the callback has been deferred */ 2117 USB_BUS_LOCK(info->bus); 2118 goto done; 2119 } 2120 #if USB_HAVE_POWERD 2121 /* decrement power reference */ 2122 usbd_transfer_power_ref(xfer, -1); 2123 #endif 2124 xfer->flags_int.transferring = 0; 2125 2126 if (xfer->error) { 2127 xfer->usb_state = USB_ST_ERROR; 2128 } else { 2129 /* set transferred state */ 2130 xfer->usb_state = USB_ST_TRANSFERRED; 2131 #if USB_HAVE_BUSDMA 2132 /* sync DMA memory, if any */ 2133 if (xfer->flags_int.bdma_enable && 2134 (!xfer->flags_int.bdma_no_post_sync)) { 2135 usb_bdma_post_sync(xfer); 2136 } 2137 #endif 2138 } 2139 } 2140 2141 /* call processing routine */ 2142 (xfer->callback) (xfer, xfer->error); 2143 2144 /* pickup the USB mutex again */ 2145 USB_BUS_LOCK(info->bus); 2146 2147 /* 2148 * Check if we got started after that we got cancelled, but 2149 * before we managed to do the callback. 2150 */ 2151 if ((!xfer->flags_int.open) && 2152 (xfer->flags_int.started) && 2153 (xfer->usb_state == USB_ST_ERROR)) { 2154 /* clear flag in case of drain */ 2155 xfer->flags_int.doing_callback = 0; 2156 /* try to loop, but not recursivly */ 2157 usb_command_wrapper(&info->done_q, xfer); 2158 return; 2159 } 2160 2161 done: 2162 /* clear flag in case of drain */ 2163 xfer->flags_int.doing_callback = 0; 2164 2165 /* 2166 * Check if we are draining. 2167 */ 2168 if (xfer->flags_int.draining && 2169 (!xfer->flags_int.transferring)) { 2170 /* "usbd_transfer_drain()" is waiting for end of transfer */ 2171 xfer->flags_int.draining = 0; 2172 cv_broadcast(&info->cv_drain); 2173 } 2174 2175 /* do the next callback, if any */ 2176 usb_command_wrapper(&info->done_q, 2177 info->done_q.curr); 2178 } 2179 2180 /*------------------------------------------------------------------------* 2181 * usb_dma_delay_done_cb 2182 * 2183 * This function is called when the DMA delay has been exectuded, and 2184 * will make sure that the callback is called to complete the USB 2185 * transfer. This code path is ususally only used when there is an USB 2186 * error like USB_ERR_CANCELLED. 2187 *------------------------------------------------------------------------*/ 2188 static void 2189 usb_dma_delay_done_cb(void *arg) 2190 { 2191 struct usb_xfer *xfer = arg; 2192 2193 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2194 2195 DPRINTFN(3, "Completed %p\n", xfer); 2196 2197 /* queue callback for execution, again */ 2198 usbd_transfer_done(xfer, 0); 2199 } 2200 2201 /*------------------------------------------------------------------------* 2202 * usbd_transfer_dequeue 2203 * 2204 * - This function is used to remove an USB transfer from a USB 2205 * transfer queue. 2206 * 2207 * - This function can be called multiple times in a row. 2208 *------------------------------------------------------------------------*/ 2209 void 2210 usbd_transfer_dequeue(struct usb_xfer *xfer) 2211 { 2212 struct usb_xfer_queue *pq; 2213 2214 pq = xfer->wait_queue; 2215 if (pq) { 2216 TAILQ_REMOVE(&pq->head, xfer, wait_entry); 2217 xfer->wait_queue = NULL; 2218 } 2219 } 2220 2221 /*------------------------------------------------------------------------* 2222 * usbd_transfer_enqueue 2223 * 2224 * - This function is used to insert an USB transfer into a USB * 2225 * transfer queue. 2226 * 2227 * - This function can be called multiple times in a row. 2228 *------------------------------------------------------------------------*/ 2229 void 2230 usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer) 2231 { 2232 /* 2233 * Insert the USB transfer into the queue, if it is not 2234 * already on a USB transfer queue: 2235 */ 2236 if (xfer->wait_queue == NULL) { 2237 xfer->wait_queue = pq; 2238 TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry); 2239 } 2240 } 2241 2242 /*------------------------------------------------------------------------* 2243 * usbd_transfer_done 2244 * 2245 * - This function is used to remove an USB transfer from the busdma, 2246 * pipe or interrupt queue. 2247 * 2248 * - This function is used to queue the USB transfer on the done 2249 * queue. 2250 * 2251 * - This function is used to stop any USB transfer timeouts. 2252 *------------------------------------------------------------------------*/ 2253 void 2254 usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error) 2255 { 2256 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2257 2258 DPRINTF("err=%s\n", usbd_errstr(error)); 2259 2260 /* 2261 * If we are not transferring then just return. 2262 * This can happen during transfer cancel. 2263 */ 2264 if (!xfer->flags_int.transferring) { 2265 DPRINTF("not transferring\n"); 2266 /* end of control transfer, if any */ 2267 xfer->flags_int.control_act = 0; 2268 return; 2269 } 2270 /* only set transfer error if not already set */ 2271 if (!xfer->error) { 2272 xfer->error = error; 2273 } 2274 /* stop any callouts */ 2275 usb_callout_stop(&xfer->timeout_handle); 2276 2277 /* 2278 * If we are waiting on a queue, just remove the USB transfer 2279 * from the queue, if any. We should have the required locks 2280 * locked to do the remove when this function is called. 2281 */ 2282 usbd_transfer_dequeue(xfer); 2283 2284 #if USB_HAVE_BUSDMA 2285 if (mtx_owned(xfer->xroot->xfer_mtx)) { 2286 struct usb_xfer_queue *pq; 2287 2288 /* 2289 * If the private USB lock is not locked, then we assume 2290 * that the BUS-DMA load stage has been passed: 2291 */ 2292 pq = &xfer->xroot->dma_q; 2293 2294 if (pq->curr == xfer) { 2295 /* start the next BUS-DMA load, if any */ 2296 usb_command_wrapper(pq, NULL); 2297 } 2298 } 2299 #endif 2300 /* keep some statistics */ 2301 if (xfer->error) { 2302 xfer->xroot->bus->stats_err.uds_requests 2303 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++; 2304 } else { 2305 xfer->xroot->bus->stats_ok.uds_requests 2306 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++; 2307 } 2308 2309 /* call the USB transfer callback */ 2310 usbd_callback_ss_done_defer(xfer); 2311 } 2312 2313 /*------------------------------------------------------------------------* 2314 * usbd_transfer_start_cb 2315 * 2316 * This function is called to start the USB transfer when 2317 * "xfer->interval" is greater than zero, and and the endpoint type is 2318 * BULK or CONTROL. 2319 *------------------------------------------------------------------------*/ 2320 static void 2321 usbd_transfer_start_cb(void *arg) 2322 { 2323 struct usb_xfer *xfer = arg; 2324 struct usb_endpoint *ep = xfer->endpoint; 2325 2326 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2327 2328 DPRINTF("start\n"); 2329 2330 /* start the transfer */ 2331 (ep->methods->start) (xfer); 2332 2333 xfer->flags_int.can_cancel_immed = 1; 2334 2335 /* check for error */ 2336 if (xfer->error) { 2337 /* some error has happened */ 2338 usbd_transfer_done(xfer, 0); 2339 } 2340 } 2341 2342 /*------------------------------------------------------------------------* 2343 * usbd_xfer_set_stall 2344 * 2345 * This function is used to set the stall flag outside the 2346 * callback. This function is NULL safe. 2347 *------------------------------------------------------------------------*/ 2348 void 2349 usbd_xfer_set_stall(struct usb_xfer *xfer) 2350 { 2351 if (xfer == NULL) { 2352 /* tearing down */ 2353 return; 2354 } 2355 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 2356 2357 /* avoid any races by locking the USB mutex */ 2358 USB_BUS_LOCK(xfer->xroot->bus); 2359 xfer->flags.stall_pipe = 1; 2360 USB_BUS_UNLOCK(xfer->xroot->bus); 2361 } 2362 2363 int 2364 usbd_xfer_is_stalled(struct usb_xfer *xfer) 2365 { 2366 return (xfer->endpoint->is_stalled); 2367 } 2368 2369 /*------------------------------------------------------------------------* 2370 * usbd_transfer_clear_stall 2371 * 2372 * This function is used to clear the stall flag outside the 2373 * callback. This function is NULL safe. 2374 *------------------------------------------------------------------------*/ 2375 void 2376 usbd_transfer_clear_stall(struct usb_xfer *xfer) 2377 { 2378 if (xfer == NULL) { 2379 /* tearing down */ 2380 return; 2381 } 2382 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 2383 2384 /* avoid any races by locking the USB mutex */ 2385 USB_BUS_LOCK(xfer->xroot->bus); 2386 2387 xfer->flags.stall_pipe = 0; 2388 2389 USB_BUS_UNLOCK(xfer->xroot->bus); 2390 } 2391 2392 /*------------------------------------------------------------------------* 2393 * usbd_pipe_start 2394 * 2395 * This function is used to add an USB transfer to the pipe transfer list. 2396 *------------------------------------------------------------------------*/ 2397 void 2398 usbd_pipe_start(struct usb_xfer_queue *pq) 2399 { 2400 struct usb_endpoint *ep; 2401 struct usb_xfer *xfer; 2402 uint8_t type; 2403 2404 xfer = pq->curr; 2405 ep = xfer->endpoint; 2406 2407 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2408 2409 /* 2410 * If the endpoint is already stalled we do nothing ! 2411 */ 2412 if (ep->is_stalled) { 2413 return; 2414 } 2415 /* 2416 * Check if we are supposed to stall the endpoint: 2417 */ 2418 if (xfer->flags.stall_pipe) { 2419 struct usb_device *udev; 2420 struct usb_xfer_root *info; 2421 2422 /* clear stall command */ 2423 xfer->flags.stall_pipe = 0; 2424 2425 /* get pointer to USB device */ 2426 info = xfer->xroot; 2427 udev = info->udev; 2428 2429 /* 2430 * Only stall BULK and INTERRUPT endpoints. 2431 */ 2432 type = (ep->edesc->bmAttributes & UE_XFERTYPE); 2433 if ((type == UE_BULK) || 2434 (type == UE_INTERRUPT)) { 2435 uint8_t did_stall; 2436 2437 did_stall = 1; 2438 2439 if (udev->flags.usb_mode == USB_MODE_DEVICE) { 2440 (udev->bus->methods->set_stall) ( 2441 udev, NULL, ep, &did_stall); 2442 } else if (udev->ctrl_xfer[1]) { 2443 info = udev->ctrl_xfer[1]->xroot; 2444 usb_proc_msignal( 2445 &info->bus->non_giant_callback_proc, 2446 &udev->cs_msg[0], &udev->cs_msg[1]); 2447 } else { 2448 /* should not happen */ 2449 DPRINTFN(0, "No stall handler\n"); 2450 } 2451 /* 2452 * Check if we should stall. Some USB hardware 2453 * handles set- and clear-stall in hardware. 2454 */ 2455 if (did_stall) { 2456 /* 2457 * The transfer will be continued when 2458 * the clear-stall control endpoint 2459 * message is received. 2460 */ 2461 ep->is_stalled = 1; 2462 return; 2463 } 2464 } else if (type == UE_ISOCHRONOUS) { 2465 2466 /* 2467 * Make sure any FIFO overflow or other FIFO 2468 * error conditions go away by resetting the 2469 * endpoint FIFO through the clear stall 2470 * method. 2471 */ 2472 if (udev->flags.usb_mode == USB_MODE_DEVICE) { 2473 (udev->bus->methods->clear_stall) (udev, ep); 2474 } 2475 } 2476 } 2477 /* Set or clear stall complete - special case */ 2478 if (xfer->nframes == 0) { 2479 /* we are complete */ 2480 xfer->aframes = 0; 2481 usbd_transfer_done(xfer, 0); 2482 return; 2483 } 2484 /* 2485 * Handled cases: 2486 * 2487 * 1) Start the first transfer queued. 2488 * 2489 * 2) Re-start the current USB transfer. 2490 */ 2491 /* 2492 * Check if there should be any 2493 * pre transfer start delay: 2494 */ 2495 if (xfer->interval > 0) { 2496 type = (ep->edesc->bmAttributes & UE_XFERTYPE); 2497 if ((type == UE_BULK) || 2498 (type == UE_CONTROL)) { 2499 usbd_transfer_timeout_ms(xfer, 2500 &usbd_transfer_start_cb, 2501 xfer->interval); 2502 return; 2503 } 2504 } 2505 DPRINTF("start\n"); 2506 2507 /* start USB transfer */ 2508 (ep->methods->start) (xfer); 2509 2510 xfer->flags_int.can_cancel_immed = 1; 2511 2512 /* check for error */ 2513 if (xfer->error) { 2514 /* some error has happened */ 2515 usbd_transfer_done(xfer, 0); 2516 } 2517 } 2518 2519 /*------------------------------------------------------------------------* 2520 * usbd_transfer_timeout_ms 2521 * 2522 * This function is used to setup a timeout on the given USB 2523 * transfer. If the timeout has been deferred the callback given by 2524 * "cb" will get called after "ms" milliseconds. 2525 *------------------------------------------------------------------------*/ 2526 void 2527 usbd_transfer_timeout_ms(struct usb_xfer *xfer, 2528 void (*cb) (void *arg), usb_timeout_t ms) 2529 { 2530 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2531 2532 /* defer delay */ 2533 usb_callout_reset(&xfer->timeout_handle, 2534 USB_MS_TO_TICKS(ms), cb, xfer); 2535 } 2536 2537 /*------------------------------------------------------------------------* 2538 * usbd_callback_wrapper_sub 2539 * 2540 * - This function will update variables in an USB transfer after 2541 * that the USB transfer is complete. 2542 * 2543 * - This function is used to start the next USB transfer on the 2544 * ep transfer queue, if any. 2545 * 2546 * NOTE: In some special cases the USB transfer will not be removed from 2547 * the pipe queue, but remain first. To enforce USB transfer removal call 2548 * this function passing the error code "USB_ERR_CANCELLED". 2549 * 2550 * Return values: 2551 * 0: Success. 2552 * Else: The callback has been deferred. 2553 *------------------------------------------------------------------------*/ 2554 static uint8_t 2555 usbd_callback_wrapper_sub(struct usb_xfer *xfer) 2556 { 2557 struct usb_endpoint *ep; 2558 usb_frcount_t x; 2559 2560 if ((!xfer->flags_int.open) && 2561 (!xfer->flags_int.did_close)) { 2562 DPRINTF("close\n"); 2563 USB_BUS_LOCK(xfer->xroot->bus); 2564 (xfer->endpoint->methods->close) (xfer); 2565 USB_BUS_UNLOCK(xfer->xroot->bus); 2566 /* only close once */ 2567 xfer->flags_int.did_close = 1; 2568 return (1); /* wait for new callback */ 2569 } 2570 /* 2571 * If we have a non-hardware induced error we 2572 * need to do the DMA delay! 2573 */ 2574 if (((xfer->error == USB_ERR_CANCELLED) || 2575 (xfer->error == USB_ERR_TIMEOUT)) && 2576 (!xfer->flags_int.did_dma_delay)) { 2577 2578 usb_timeout_t temp; 2579 2580 /* only delay once */ 2581 xfer->flags_int.did_dma_delay = 1; 2582 2583 /* we can not cancel this delay */ 2584 xfer->flags_int.can_cancel_immed = 0; 2585 2586 temp = usbd_get_dma_delay(xfer->xroot->udev); 2587 2588 DPRINTFN(3, "DMA delay, %u ms, " 2589 "on %p\n", temp, xfer); 2590 2591 if (temp != 0) { 2592 USB_BUS_LOCK(xfer->xroot->bus); 2593 usbd_transfer_timeout_ms(xfer, 2594 &usb_dma_delay_done_cb, temp); 2595 USB_BUS_UNLOCK(xfer->xroot->bus); 2596 return (1); /* wait for new callback */ 2597 } 2598 } 2599 /* check actual number of frames */ 2600 if (xfer->aframes > xfer->nframes) { 2601 if (xfer->error == 0) { 2602 panic("%s: actual number of frames, %d, is " 2603 "greater than initial number of frames, %d\n", 2604 __FUNCTION__, xfer->aframes, xfer->nframes); 2605 } else { 2606 /* just set some valid value */ 2607 xfer->aframes = xfer->nframes; 2608 } 2609 } 2610 /* compute actual length */ 2611 xfer->actlen = 0; 2612 2613 for (x = 0; x != xfer->aframes; x++) { 2614 xfer->actlen += xfer->frlengths[x]; 2615 } 2616 2617 /* 2618 * Frames that were not transferred get zero actual length in 2619 * case the USB device driver does not check the actual number 2620 * of frames transferred, "xfer->aframes": 2621 */ 2622 for (; x < xfer->nframes; x++) { 2623 usbd_xfer_set_frame_len(xfer, x, 0); 2624 } 2625 2626 /* check actual length */ 2627 if (xfer->actlen > xfer->sumlen) { 2628 if (xfer->error == 0) { 2629 panic("%s: actual length, %d, is greater than " 2630 "initial length, %d\n", 2631 __FUNCTION__, xfer->actlen, xfer->sumlen); 2632 } else { 2633 /* just set some valid value */ 2634 xfer->actlen = xfer->sumlen; 2635 } 2636 } 2637 DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n", 2638 xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen, 2639 xfer->aframes, xfer->nframes); 2640 2641 if (xfer->error) { 2642 /* end of control transfer, if any */ 2643 xfer->flags_int.control_act = 0; 2644 2645 /* check if we should block the execution queue */ 2646 if ((xfer->error != USB_ERR_CANCELLED) && 2647 (xfer->flags.pipe_bof)) { 2648 DPRINTFN(2, "xfer=%p: Block On Failure " 2649 "on endpoint=%p\n", xfer, xfer->endpoint); 2650 goto done; 2651 } 2652 } else { 2653 /* check for short transfers */ 2654 if (xfer->actlen < xfer->sumlen) { 2655 2656 /* end of control transfer, if any */ 2657 xfer->flags_int.control_act = 0; 2658 2659 if (!xfer->flags_int.short_xfer_ok) { 2660 xfer->error = USB_ERR_SHORT_XFER; 2661 if (xfer->flags.pipe_bof) { 2662 DPRINTFN(2, "xfer=%p: Block On Failure on " 2663 "Short Transfer on endpoint %p.\n", 2664 xfer, xfer->endpoint); 2665 goto done; 2666 } 2667 } 2668 } else { 2669 /* 2670 * Check if we are in the middle of a 2671 * control transfer: 2672 */ 2673 if (xfer->flags_int.control_act) { 2674 DPRINTFN(5, "xfer=%p: Control transfer " 2675 "active on endpoint=%p\n", xfer, xfer->endpoint); 2676 goto done; 2677 } 2678 } 2679 } 2680 2681 ep = xfer->endpoint; 2682 2683 /* 2684 * If the current USB transfer is completing we need to start the 2685 * next one: 2686 */ 2687 USB_BUS_LOCK(xfer->xroot->bus); 2688 if (ep->endpoint_q.curr == xfer) { 2689 usb_command_wrapper(&ep->endpoint_q, NULL); 2690 2691 if (ep->endpoint_q.curr || TAILQ_FIRST(&ep->endpoint_q.head)) { 2692 /* there is another USB transfer waiting */ 2693 } else { 2694 /* this is the last USB transfer */ 2695 /* clear isochronous sync flag */ 2696 xfer->endpoint->is_synced = 0; 2697 } 2698 } 2699 USB_BUS_UNLOCK(xfer->xroot->bus); 2700 done: 2701 return (0); 2702 } 2703 2704 /*------------------------------------------------------------------------* 2705 * usb_command_wrapper 2706 * 2707 * This function is used to execute commands non-recursivly on an USB 2708 * transfer. 2709 *------------------------------------------------------------------------*/ 2710 void 2711 usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer) 2712 { 2713 if (xfer) { 2714 /* 2715 * If the transfer is not already processing, 2716 * queue it! 2717 */ 2718 if (pq->curr != xfer) { 2719 usbd_transfer_enqueue(pq, xfer); 2720 if (pq->curr != NULL) { 2721 /* something is already processing */ 2722 DPRINTFN(6, "busy %p\n", pq->curr); 2723 return; 2724 } 2725 } 2726 } else { 2727 /* Get next element in queue */ 2728 pq->curr = NULL; 2729 } 2730 2731 if (!pq->recurse_1) { 2732 2733 do { 2734 2735 /* set both recurse flags */ 2736 pq->recurse_1 = 1; 2737 pq->recurse_2 = 1; 2738 2739 if (pq->curr == NULL) { 2740 xfer = TAILQ_FIRST(&pq->head); 2741 if (xfer) { 2742 TAILQ_REMOVE(&pq->head, xfer, 2743 wait_entry); 2744 xfer->wait_queue = NULL; 2745 pq->curr = xfer; 2746 } else { 2747 break; 2748 } 2749 } 2750 DPRINTFN(6, "cb %p (enter)\n", pq->curr); 2751 (pq->command) (pq); 2752 DPRINTFN(6, "cb %p (leave)\n", pq->curr); 2753 2754 } while (!pq->recurse_2); 2755 2756 /* clear first recurse flag */ 2757 pq->recurse_1 = 0; 2758 2759 } else { 2760 /* clear second recurse flag */ 2761 pq->recurse_2 = 0; 2762 } 2763 } 2764 2765 /*------------------------------------------------------------------------* 2766 * usbd_ctrl_transfer_setup 2767 * 2768 * This function is used to setup the default USB control endpoint 2769 * transfer. 2770 *------------------------------------------------------------------------*/ 2771 void 2772 usbd_ctrl_transfer_setup(struct usb_device *udev) 2773 { 2774 struct usb_xfer *xfer; 2775 uint8_t no_resetup; 2776 uint8_t iface_index; 2777 2778 /* check for root HUB */ 2779 if (udev->parent_hub == NULL) 2780 return; 2781 repeat: 2782 2783 xfer = udev->ctrl_xfer[0]; 2784 if (xfer) { 2785 USB_XFER_LOCK(xfer); 2786 no_resetup = 2787 ((xfer->address == udev->address) && 2788 (udev->ctrl_ep_desc.wMaxPacketSize[0] == 2789 udev->ddesc.bMaxPacketSize)); 2790 if (udev->flags.usb_mode == USB_MODE_DEVICE) { 2791 if (no_resetup) { 2792 /* 2793 * NOTE: checking "xfer->address" and 2794 * starting the USB transfer must be 2795 * atomic! 2796 */ 2797 usbd_transfer_start(xfer); 2798 } 2799 } 2800 USB_XFER_UNLOCK(xfer); 2801 } else { 2802 no_resetup = 0; 2803 } 2804 2805 if (no_resetup) { 2806 /* 2807 * All parameters are exactly the same like before. 2808 * Just return. 2809 */ 2810 return; 2811 } 2812 /* 2813 * Update wMaxPacketSize for the default control endpoint: 2814 */ 2815 udev->ctrl_ep_desc.wMaxPacketSize[0] = 2816 udev->ddesc.bMaxPacketSize; 2817 2818 /* 2819 * Unsetup any existing USB transfer: 2820 */ 2821 usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX); 2822 2823 /* 2824 * Try to setup a new USB transfer for the 2825 * default control endpoint: 2826 */ 2827 iface_index = 0; 2828 if (usbd_transfer_setup(udev, &iface_index, 2829 udev->ctrl_xfer, usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL, 2830 &udev->device_mtx)) { 2831 DPRINTFN(0, "could not setup default " 2832 "USB transfer\n"); 2833 } else { 2834 goto repeat; 2835 } 2836 } 2837 2838 /*------------------------------------------------------------------------* 2839 * usbd_clear_data_toggle - factored out code 2840 * 2841 * NOTE: the intention of this function is not to reset the hardware 2842 * data toggle. 2843 *------------------------------------------------------------------------*/ 2844 void 2845 usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep) 2846 { 2847 DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep); 2848 2849 USB_BUS_LOCK(udev->bus); 2850 ep->toggle_next = 0; 2851 USB_BUS_UNLOCK(udev->bus); 2852 } 2853 2854 /*------------------------------------------------------------------------* 2855 * usbd_clear_stall_callback - factored out clear stall callback 2856 * 2857 * Input parameters: 2858 * xfer1: Clear Stall Control Transfer 2859 * xfer2: Stalled USB Transfer 2860 * 2861 * This function is NULL safe. 2862 * 2863 * Return values: 2864 * 0: In progress 2865 * Else: Finished 2866 * 2867 * Clear stall config example: 2868 * 2869 * static const struct usb_config my_clearstall = { 2870 * .type = UE_CONTROL, 2871 * .endpoint = 0, 2872 * .direction = UE_DIR_ANY, 2873 * .interval = 50, //50 milliseconds 2874 * .bufsize = sizeof(struct usb_device_request), 2875 * .timeout = 1000, //1.000 seconds 2876 * .callback = &my_clear_stall_callback, // ** 2877 * .usb_mode = USB_MODE_HOST, 2878 * }; 2879 * 2880 * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback" 2881 * passing the correct parameters. 2882 *------------------------------------------------------------------------*/ 2883 uint8_t 2884 usbd_clear_stall_callback(struct usb_xfer *xfer1, 2885 struct usb_xfer *xfer2) 2886 { 2887 struct usb_device_request req; 2888 2889 if (xfer2 == NULL) { 2890 /* looks like we are tearing down */ 2891 DPRINTF("NULL input parameter\n"); 2892 return (0); 2893 } 2894 USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED); 2895 USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED); 2896 2897 switch (USB_GET_STATE(xfer1)) { 2898 case USB_ST_SETUP: 2899 2900 /* 2901 * pre-clear the data toggle to DATA0 ("umass.c" and 2902 * "ata-usb.c" depends on this) 2903 */ 2904 2905 usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint); 2906 2907 /* setup a clear-stall packet */ 2908 2909 req.bmRequestType = UT_WRITE_ENDPOINT; 2910 req.bRequest = UR_CLEAR_FEATURE; 2911 USETW(req.wValue, UF_ENDPOINT_HALT); 2912 req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress; 2913 req.wIndex[1] = 0; 2914 USETW(req.wLength, 0); 2915 2916 /* 2917 * "usbd_transfer_setup_sub()" will ensure that 2918 * we have sufficient room in the buffer for 2919 * the request structure! 2920 */ 2921 2922 /* copy in the transfer */ 2923 2924 usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req)); 2925 2926 /* set length */ 2927 xfer1->frlengths[0] = sizeof(req); 2928 xfer1->nframes = 1; 2929 2930 usbd_transfer_submit(xfer1); 2931 return (0); 2932 2933 case USB_ST_TRANSFERRED: 2934 break; 2935 2936 default: /* Error */ 2937 if (xfer1->error == USB_ERR_CANCELLED) { 2938 return (0); 2939 } 2940 break; 2941 } 2942 return (1); /* Clear Stall Finished */ 2943 } 2944 2945 /*------------------------------------------------------------------------* 2946 * usbd_transfer_poll 2947 * 2948 * The following function gets called from the USB keyboard driver and 2949 * UMASS when the system has paniced. 2950 * 2951 * NOTE: It is currently not possible to resume normal operation on 2952 * the USB controller which has been polled, due to clearing of the 2953 * "up_dsleep" and "up_msleep" flags. 2954 *------------------------------------------------------------------------*/ 2955 void 2956 usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max) 2957 { 2958 struct usb_xfer *xfer; 2959 struct usb_xfer_root *xroot; 2960 struct usb_device *udev; 2961 struct usb_proc_msg *pm; 2962 uint16_t n; 2963 uint16_t drop_bus; 2964 uint16_t drop_xfer; 2965 2966 for (n = 0; n != max; n++) { 2967 /* Extra checks to avoid panic */ 2968 xfer = ppxfer[n]; 2969 if (xfer == NULL) 2970 continue; /* no USB transfer */ 2971 xroot = xfer->xroot; 2972 if (xroot == NULL) 2973 continue; /* no USB root */ 2974 udev = xroot->udev; 2975 if (udev == NULL) 2976 continue; /* no USB device */ 2977 if (udev->bus == NULL) 2978 continue; /* no BUS structure */ 2979 if (udev->bus->methods == NULL) 2980 continue; /* no BUS methods */ 2981 if (udev->bus->methods->xfer_poll == NULL) 2982 continue; /* no poll method */ 2983 2984 /* make sure that the BUS mutex is not locked */ 2985 drop_bus = 0; 2986 while (mtx_owned(&xroot->udev->bus->bus_mtx)) { 2987 mtx_unlock(&xroot->udev->bus->bus_mtx); 2988 drop_bus++; 2989 } 2990 2991 /* make sure that the transfer mutex is not locked */ 2992 drop_xfer = 0; 2993 while (mtx_owned(xroot->xfer_mtx)) { 2994 mtx_unlock(xroot->xfer_mtx); 2995 drop_xfer++; 2996 } 2997 2998 /* Make sure cv_signal() and cv_broadcast() is not called */ 2999 udev->bus->control_xfer_proc.up_msleep = 0; 3000 udev->bus->explore_proc.up_msleep = 0; 3001 udev->bus->giant_callback_proc.up_msleep = 0; 3002 udev->bus->non_giant_callback_proc.up_msleep = 0; 3003 3004 /* poll USB hardware */ 3005 (udev->bus->methods->xfer_poll) (udev->bus); 3006 3007 USB_BUS_LOCK(xroot->bus); 3008 3009 /* check for clear stall */ 3010 if (udev->ctrl_xfer[1] != NULL) { 3011 3012 /* poll clear stall start */ 3013 pm = &udev->cs_msg[0].hdr; 3014 (pm->pm_callback) (pm); 3015 /* poll clear stall done thread */ 3016 pm = &udev->ctrl_xfer[1]-> 3017 xroot->done_m[0].hdr; 3018 (pm->pm_callback) (pm); 3019 } 3020 3021 /* poll done thread */ 3022 pm = &xroot->done_m[0].hdr; 3023 (pm->pm_callback) (pm); 3024 3025 USB_BUS_UNLOCK(xroot->bus); 3026 3027 /* restore transfer mutex */ 3028 while (drop_xfer--) 3029 mtx_lock(xroot->xfer_mtx); 3030 3031 /* restore BUS mutex */ 3032 while (drop_bus--) 3033 mtx_lock(&xroot->udev->bus->bus_mtx); 3034 } 3035 } 3036 3037 static void 3038 usbd_get_std_packet_size(struct usb_std_packet_size *ptr, 3039 uint8_t type, enum usb_dev_speed speed) 3040 { 3041 static const uint16_t intr_range_max[USB_SPEED_MAX] = { 3042 [USB_SPEED_LOW] = 8, 3043 [USB_SPEED_FULL] = 64, 3044 [USB_SPEED_HIGH] = 1024, 3045 [USB_SPEED_VARIABLE] = 1024, 3046 [USB_SPEED_SUPER] = 1024, 3047 }; 3048 3049 static const uint16_t isoc_range_max[USB_SPEED_MAX] = { 3050 [USB_SPEED_LOW] = 0, /* invalid */ 3051 [USB_SPEED_FULL] = 1023, 3052 [USB_SPEED_HIGH] = 1024, 3053 [USB_SPEED_VARIABLE] = 3584, 3054 [USB_SPEED_SUPER] = 1024, 3055 }; 3056 3057 static const uint16_t control_min[USB_SPEED_MAX] = { 3058 [USB_SPEED_LOW] = 8, 3059 [USB_SPEED_FULL] = 8, 3060 [USB_SPEED_HIGH] = 64, 3061 [USB_SPEED_VARIABLE] = 512, 3062 [USB_SPEED_SUPER] = 512, 3063 }; 3064 3065 static const uint16_t bulk_min[USB_SPEED_MAX] = { 3066 [USB_SPEED_LOW] = 8, 3067 [USB_SPEED_FULL] = 8, 3068 [USB_SPEED_HIGH] = 512, 3069 [USB_SPEED_VARIABLE] = 512, 3070 [USB_SPEED_SUPER] = 1024, 3071 }; 3072 3073 uint16_t temp; 3074 3075 memset(ptr, 0, sizeof(*ptr)); 3076 3077 switch (type) { 3078 case UE_INTERRUPT: 3079 ptr->range.max = intr_range_max[speed]; 3080 break; 3081 case UE_ISOCHRONOUS: 3082 ptr->range.max = isoc_range_max[speed]; 3083 break; 3084 default: 3085 if (type == UE_BULK) 3086 temp = bulk_min[speed]; 3087 else /* UE_CONTROL */ 3088 temp = control_min[speed]; 3089 3090 /* default is fixed */ 3091 ptr->fixed[0] = temp; 3092 ptr->fixed[1] = temp; 3093 ptr->fixed[2] = temp; 3094 ptr->fixed[3] = temp; 3095 3096 if (speed == USB_SPEED_FULL) { 3097 /* multiple sizes */ 3098 ptr->fixed[1] = 16; 3099 ptr->fixed[2] = 32; 3100 ptr->fixed[3] = 64; 3101 } 3102 if ((speed == USB_SPEED_VARIABLE) && 3103 (type == UE_BULK)) { 3104 /* multiple sizes */ 3105 ptr->fixed[2] = 1024; 3106 ptr->fixed[3] = 1536; 3107 } 3108 break; 3109 } 3110 } 3111 3112 void * 3113 usbd_xfer_softc(struct usb_xfer *xfer) 3114 { 3115 return (xfer->priv_sc); 3116 } 3117 3118 void * 3119 usbd_xfer_get_priv(struct usb_xfer *xfer) 3120 { 3121 return (xfer->priv_fifo); 3122 } 3123 3124 void 3125 usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr) 3126 { 3127 xfer->priv_fifo = ptr; 3128 } 3129 3130 uint8_t 3131 usbd_xfer_state(struct usb_xfer *xfer) 3132 { 3133 return (xfer->usb_state); 3134 } 3135 3136 void 3137 usbd_xfer_set_flag(struct usb_xfer *xfer, int flag) 3138 { 3139 switch (flag) { 3140 case USB_FORCE_SHORT_XFER: 3141 xfer->flags.force_short_xfer = 1; 3142 break; 3143 case USB_SHORT_XFER_OK: 3144 xfer->flags.short_xfer_ok = 1; 3145 break; 3146 case USB_MULTI_SHORT_OK: 3147 xfer->flags.short_frames_ok = 1; 3148 break; 3149 case USB_MANUAL_STATUS: 3150 xfer->flags.manual_status = 1; 3151 break; 3152 } 3153 } 3154 3155 void 3156 usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag) 3157 { 3158 switch (flag) { 3159 case USB_FORCE_SHORT_XFER: 3160 xfer->flags.force_short_xfer = 0; 3161 break; 3162 case USB_SHORT_XFER_OK: 3163 xfer->flags.short_xfer_ok = 0; 3164 break; 3165 case USB_MULTI_SHORT_OK: 3166 xfer->flags.short_frames_ok = 0; 3167 break; 3168 case USB_MANUAL_STATUS: 3169 xfer->flags.manual_status = 0; 3170 break; 3171 } 3172 } 3173 3174 /* 3175 * The following function returns in milliseconds when the isochronous 3176 * transfer was completed by the hardware. The returned value wraps 3177 * around 65536 milliseconds. 3178 */ 3179 uint16_t 3180 usbd_xfer_get_timestamp(struct usb_xfer *xfer) 3181 { 3182 return (xfer->isoc_time_complete); 3183 } 3184