1 /* $FreeBSD$ */ 2 /*- 3 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #ifdef USB_GLOBAL_INCLUDE_FILE 28 #include USB_GLOBAL_INCLUDE_FILE 29 #else 30 #include <sys/stdint.h> 31 #include <sys/stddef.h> 32 #include <sys/param.h> 33 #include <sys/queue.h> 34 #include <sys/types.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/bus.h> 38 #include <sys/module.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/condvar.h> 42 #include <sys/sysctl.h> 43 #include <sys/sx.h> 44 #include <sys/unistd.h> 45 #include <sys/callout.h> 46 #include <sys/malloc.h> 47 #include <sys/priv.h> 48 #include <sys/proc.h> 49 50 #include <dev/usb/usb.h> 51 #include <dev/usb/usbdi.h> 52 #include <dev/usb/usbdi_util.h> 53 54 #define USB_DEBUG_VAR usb_debug 55 56 #include <dev/usb/usb_core.h> 57 #include <dev/usb/usb_busdma.h> 58 #include <dev/usb/usb_process.h> 59 #include <dev/usb/usb_transfer.h> 60 #include <dev/usb/usb_device.h> 61 #include <dev/usb/usb_debug.h> 62 #include <dev/usb/usb_util.h> 63 64 #include <dev/usb/usb_controller.h> 65 #include <dev/usb/usb_bus.h> 66 #include <dev/usb/usb_pf.h> 67 #endif /* USB_GLOBAL_INCLUDE_FILE */ 68 69 struct usb_std_packet_size { 70 struct { 71 uint16_t min; /* inclusive */ 72 uint16_t max; /* inclusive */ 73 } range; 74 75 uint16_t fixed[4]; 76 }; 77 78 static usb_callback_t usb_request_callback; 79 80 static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = { 81 82 /* This transfer is used for generic control endpoint transfers */ 83 84 [0] = { 85 .type = UE_CONTROL, 86 .endpoint = 0x00, /* Control endpoint */ 87 .direction = UE_DIR_ANY, 88 .bufsize = USB_EP0_BUFSIZE, /* bytes */ 89 .flags = {.proxy_buffer = 1,}, 90 .callback = &usb_request_callback, 91 .usb_mode = USB_MODE_DUAL, /* both modes */ 92 }, 93 94 /* This transfer is used for generic clear stall only */ 95 96 [1] = { 97 .type = UE_CONTROL, 98 .endpoint = 0x00, /* Control pipe */ 99 .direction = UE_DIR_ANY, 100 .bufsize = sizeof(struct usb_device_request), 101 .callback = &usb_do_clear_stall_callback, 102 .timeout = 1000, /* 1 second */ 103 .interval = 50, /* 50ms */ 104 .usb_mode = USB_MODE_HOST, 105 }, 106 }; 107 108 /* function prototypes */ 109 110 static void usbd_update_max_frame_size(struct usb_xfer *); 111 static void usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t); 112 static void usbd_control_transfer_init(struct usb_xfer *); 113 static int usbd_setup_ctrl_transfer(struct usb_xfer *); 114 static void usb_callback_proc(struct usb_proc_msg *); 115 static void usbd_callback_ss_done_defer(struct usb_xfer *); 116 static void usbd_callback_wrapper(struct usb_xfer_queue *); 117 static void usbd_transfer_start_cb(void *); 118 static uint8_t usbd_callback_wrapper_sub(struct usb_xfer *); 119 static void usbd_get_std_packet_size(struct usb_std_packet_size *ptr, 120 uint8_t type, enum usb_dev_speed speed); 121 122 /*------------------------------------------------------------------------* 123 * usb_request_callback 124 *------------------------------------------------------------------------*/ 125 static void 126 usb_request_callback(struct usb_xfer *xfer, usb_error_t error) 127 { 128 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) 129 usb_handle_request_callback(xfer, error); 130 else 131 usbd_do_request_callback(xfer, error); 132 } 133 134 /*------------------------------------------------------------------------* 135 * usbd_update_max_frame_size 136 * 137 * This function updates the maximum frame size, hence high speed USB 138 * can transfer multiple consecutive packets. 139 *------------------------------------------------------------------------*/ 140 static void 141 usbd_update_max_frame_size(struct usb_xfer *xfer) 142 { 143 /* compute maximum frame size */ 144 /* this computation should not overflow 16-bit */ 145 /* max = 15 * 1024 */ 146 147 xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count; 148 } 149 150 /*------------------------------------------------------------------------* 151 * usbd_get_dma_delay 152 * 153 * The following function is called when we need to 154 * synchronize with DMA hardware. 155 * 156 * Returns: 157 * 0: no DMA delay required 158 * Else: milliseconds of DMA delay 159 *------------------------------------------------------------------------*/ 160 usb_timeout_t 161 usbd_get_dma_delay(struct usb_device *udev) 162 { 163 const struct usb_bus_methods *mtod; 164 uint32_t temp; 165 166 mtod = udev->bus->methods; 167 temp = 0; 168 169 if (mtod->get_dma_delay) { 170 (mtod->get_dma_delay) (udev, &temp); 171 /* 172 * Round up and convert to milliseconds. Note that we use 173 * 1024 milliseconds per second. to save a division. 174 */ 175 temp += 0x3FF; 176 temp /= 0x400; 177 } 178 return (temp); 179 } 180 181 /*------------------------------------------------------------------------* 182 * usbd_transfer_setup_sub_malloc 183 * 184 * This function will allocate one or more DMA'able memory chunks 185 * according to "size", "align" and "count" arguments. "ppc" is 186 * pointed to a linear array of USB page caches afterwards. 187 * 188 * If the "align" argument is equal to "1" a non-contiguous allocation 189 * can happen. Else if the "align" argument is greater than "1", the 190 * allocation will always be contiguous in memory. 191 * 192 * Returns: 193 * 0: Success 194 * Else: Failure 195 *------------------------------------------------------------------------*/ 196 #if USB_HAVE_BUSDMA 197 uint8_t 198 usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm, 199 struct usb_page_cache **ppc, usb_size_t size, usb_size_t align, 200 usb_size_t count) 201 { 202 struct usb_page_cache *pc; 203 struct usb_page *pg; 204 void *buf; 205 usb_size_t n_dma_pc; 206 usb_size_t n_dma_pg; 207 usb_size_t n_obj; 208 usb_size_t x; 209 usb_size_t y; 210 usb_size_t r; 211 usb_size_t z; 212 213 USB_ASSERT(align > 0, ("Invalid alignment, 0x%08x\n", 214 align)); 215 USB_ASSERT(size > 0, ("Invalid size = 0\n")); 216 217 if (count == 0) { 218 return (0); /* nothing to allocate */ 219 } 220 /* 221 * Make sure that the size is aligned properly. 222 */ 223 size = -((-size) & (-align)); 224 225 /* 226 * Try multi-allocation chunks to reduce the number of DMA 227 * allocations, hence DMA allocations are slow. 228 */ 229 if (align == 1) { 230 /* special case - non-cached multi page DMA memory */ 231 n_dma_pc = count; 232 n_dma_pg = (2 + (size / USB_PAGE_SIZE)); 233 n_obj = 1; 234 } else if (size >= USB_PAGE_SIZE) { 235 n_dma_pc = count; 236 n_dma_pg = 1; 237 n_obj = 1; 238 } else { 239 /* compute number of objects per page */ 240 n_obj = (USB_PAGE_SIZE / size); 241 /* 242 * Compute number of DMA chunks, rounded up 243 * to nearest one: 244 */ 245 n_dma_pc = ((count + n_obj - 1) / n_obj); 246 n_dma_pg = 1; 247 } 248 249 /* 250 * DMA memory is allocated once, but mapped twice. That's why 251 * there is one list for auto-free and another list for 252 * non-auto-free which only holds the mapping and not the 253 * allocation. 254 */ 255 if (parm->buf == NULL) { 256 /* reserve memory (auto-free) */ 257 parm->dma_page_ptr += n_dma_pc * n_dma_pg; 258 parm->dma_page_cache_ptr += n_dma_pc; 259 260 /* reserve memory (no-auto-free) */ 261 parm->dma_page_ptr += count * n_dma_pg; 262 parm->xfer_page_cache_ptr += count; 263 return (0); 264 } 265 for (x = 0; x != n_dma_pc; x++) { 266 /* need to initialize the page cache */ 267 parm->dma_page_cache_ptr[x].tag_parent = 268 &parm->curr_xfer->xroot->dma_parent_tag; 269 } 270 for (x = 0; x != count; x++) { 271 /* need to initialize the page cache */ 272 parm->xfer_page_cache_ptr[x].tag_parent = 273 &parm->curr_xfer->xroot->dma_parent_tag; 274 } 275 276 if (ppc) { 277 *ppc = parm->xfer_page_cache_ptr; 278 } 279 r = count; /* set remainder count */ 280 z = n_obj * size; /* set allocation size */ 281 pc = parm->xfer_page_cache_ptr; 282 pg = parm->dma_page_ptr; 283 284 for (x = 0; x != n_dma_pc; x++) { 285 286 if (r < n_obj) { 287 /* compute last remainder */ 288 z = r * size; 289 n_obj = r; 290 } 291 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr, 292 pg, z, align)) { 293 return (1); /* failure */ 294 } 295 /* Set beginning of current buffer */ 296 buf = parm->dma_page_cache_ptr->buffer; 297 /* Make room for one DMA page cache and one page */ 298 parm->dma_page_cache_ptr++; 299 pg += n_dma_pg; 300 301 for (y = 0; (y != n_obj); y++, r--, pc++, pg += n_dma_pg) { 302 303 /* Load sub-chunk into DMA */ 304 if (usb_pc_dmamap_create(pc, size)) { 305 return (1); /* failure */ 306 } 307 pc->buffer = USB_ADD_BYTES(buf, y * size); 308 pc->page_start = pg; 309 310 mtx_lock(pc->tag_parent->mtx); 311 if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) { 312 mtx_unlock(pc->tag_parent->mtx); 313 return (1); /* failure */ 314 } 315 mtx_unlock(pc->tag_parent->mtx); 316 } 317 } 318 319 parm->xfer_page_cache_ptr = pc; 320 parm->dma_page_ptr = pg; 321 return (0); 322 } 323 #endif 324 325 /*------------------------------------------------------------------------* 326 * usbd_transfer_setup_sub - transfer setup subroutine 327 * 328 * This function must be called from the "xfer_setup" callback of the 329 * USB Host or Device controller driver when setting up an USB 330 * transfer. This function will setup correct packet sizes, buffer 331 * sizes, flags and more, that are stored in the "usb_xfer" 332 * structure. 333 *------------------------------------------------------------------------*/ 334 void 335 usbd_transfer_setup_sub(struct usb_setup_params *parm) 336 { 337 enum { 338 REQ_SIZE = 8, 339 MIN_PKT = 8, 340 }; 341 struct usb_xfer *xfer = parm->curr_xfer; 342 const struct usb_config *setup = parm->curr_setup; 343 struct usb_endpoint_ss_comp_descriptor *ecomp; 344 struct usb_endpoint_descriptor *edesc; 345 struct usb_std_packet_size std_size; 346 usb_frcount_t n_frlengths; 347 usb_frcount_t n_frbuffers; 348 usb_frcount_t x; 349 uint8_t type; 350 uint8_t zmps; 351 352 /* 353 * Sanity check. The following parameters must be initialized before 354 * calling this function. 355 */ 356 if ((parm->hc_max_packet_size == 0) || 357 (parm->hc_max_packet_count == 0) || 358 (parm->hc_max_frame_size == 0)) { 359 parm->err = USB_ERR_INVAL; 360 goto done; 361 } 362 edesc = xfer->endpoint->edesc; 363 ecomp = xfer->endpoint->ecomp; 364 365 type = (edesc->bmAttributes & UE_XFERTYPE); 366 367 xfer->flags = setup->flags; 368 xfer->nframes = setup->frames; 369 xfer->timeout = setup->timeout; 370 xfer->callback = setup->callback; 371 xfer->interval = setup->interval; 372 xfer->endpointno = edesc->bEndpointAddress; 373 xfer->max_packet_size = UGETW(edesc->wMaxPacketSize); 374 xfer->max_packet_count = 1; 375 /* make a shadow copy: */ 376 xfer->flags_int.usb_mode = parm->udev->flags.usb_mode; 377 378 parm->bufsize = setup->bufsize; 379 380 switch (parm->speed) { 381 case USB_SPEED_HIGH: 382 switch (type) { 383 case UE_ISOCHRONOUS: 384 case UE_INTERRUPT: 385 xfer->max_packet_count += 386 (xfer->max_packet_size >> 11) & 3; 387 388 /* check for invalid max packet count */ 389 if (xfer->max_packet_count > 3) 390 xfer->max_packet_count = 3; 391 break; 392 default: 393 break; 394 } 395 xfer->max_packet_size &= 0x7FF; 396 break; 397 case USB_SPEED_SUPER: 398 xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3; 399 400 if (ecomp != NULL) 401 xfer->max_packet_count += ecomp->bMaxBurst; 402 403 if ((xfer->max_packet_count == 0) || 404 (xfer->max_packet_count > 16)) 405 xfer->max_packet_count = 16; 406 407 switch (type) { 408 case UE_CONTROL: 409 xfer->max_packet_count = 1; 410 break; 411 case UE_ISOCHRONOUS: 412 if (ecomp != NULL) { 413 uint8_t mult; 414 415 mult = UE_GET_SS_ISO_MULT( 416 ecomp->bmAttributes) + 1; 417 if (mult > 3) 418 mult = 3; 419 420 xfer->max_packet_count *= mult; 421 } 422 break; 423 default: 424 break; 425 } 426 xfer->max_packet_size &= 0x7FF; 427 break; 428 default: 429 break; 430 } 431 /* range check "max_packet_count" */ 432 433 if (xfer->max_packet_count > parm->hc_max_packet_count) { 434 xfer->max_packet_count = parm->hc_max_packet_count; 435 } 436 /* filter "wMaxPacketSize" according to HC capabilities */ 437 438 if ((xfer->max_packet_size > parm->hc_max_packet_size) || 439 (xfer->max_packet_size == 0)) { 440 xfer->max_packet_size = parm->hc_max_packet_size; 441 } 442 /* filter "wMaxPacketSize" according to standard sizes */ 443 444 usbd_get_std_packet_size(&std_size, type, parm->speed); 445 446 if (std_size.range.min || std_size.range.max) { 447 448 if (xfer->max_packet_size < std_size.range.min) { 449 xfer->max_packet_size = std_size.range.min; 450 } 451 if (xfer->max_packet_size > std_size.range.max) { 452 xfer->max_packet_size = std_size.range.max; 453 } 454 } else { 455 456 if (xfer->max_packet_size >= std_size.fixed[3]) { 457 xfer->max_packet_size = std_size.fixed[3]; 458 } else if (xfer->max_packet_size >= std_size.fixed[2]) { 459 xfer->max_packet_size = std_size.fixed[2]; 460 } else if (xfer->max_packet_size >= std_size.fixed[1]) { 461 xfer->max_packet_size = std_size.fixed[1]; 462 } else { 463 /* only one possibility left */ 464 xfer->max_packet_size = std_size.fixed[0]; 465 } 466 } 467 468 /* compute "max_frame_size" */ 469 470 usbd_update_max_frame_size(xfer); 471 472 /* check interrupt interval and transfer pre-delay */ 473 474 if (type == UE_ISOCHRONOUS) { 475 476 uint16_t frame_limit; 477 478 xfer->interval = 0; /* not used, must be zero */ 479 xfer->flags_int.isochronous_xfr = 1; /* set flag */ 480 481 if (xfer->timeout == 0) { 482 /* 483 * set a default timeout in 484 * case something goes wrong! 485 */ 486 xfer->timeout = 1000 / 4; 487 } 488 switch (parm->speed) { 489 case USB_SPEED_LOW: 490 case USB_SPEED_FULL: 491 frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER; 492 xfer->fps_shift = 0; 493 break; 494 default: 495 frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER; 496 xfer->fps_shift = edesc->bInterval; 497 if (xfer->fps_shift > 0) 498 xfer->fps_shift--; 499 if (xfer->fps_shift > 3) 500 xfer->fps_shift = 3; 501 if (xfer->flags.pre_scale_frames != 0) 502 xfer->nframes <<= (3 - xfer->fps_shift); 503 break; 504 } 505 506 if (xfer->nframes > frame_limit) { 507 /* 508 * this is not going to work 509 * cross hardware 510 */ 511 parm->err = USB_ERR_INVAL; 512 goto done; 513 } 514 if (xfer->nframes == 0) { 515 /* 516 * this is not a valid value 517 */ 518 parm->err = USB_ERR_ZERO_NFRAMES; 519 goto done; 520 } 521 } else { 522 523 /* 524 * If a value is specified use that else check the 525 * endpoint descriptor! 526 */ 527 if (type == UE_INTERRUPT) { 528 529 uint32_t temp; 530 531 if (xfer->interval == 0) { 532 533 xfer->interval = edesc->bInterval; 534 535 switch (parm->speed) { 536 case USB_SPEED_LOW: 537 case USB_SPEED_FULL: 538 break; 539 default: 540 /* 125us -> 1ms */ 541 if (xfer->interval < 4) 542 xfer->interval = 1; 543 else if (xfer->interval > 16) 544 xfer->interval = (1 << (16 - 4)); 545 else 546 xfer->interval = 547 (1 << (xfer->interval - 4)); 548 break; 549 } 550 } 551 552 if (xfer->interval == 0) { 553 /* 554 * One millisecond is the smallest 555 * interval we support: 556 */ 557 xfer->interval = 1; 558 } 559 560 xfer->fps_shift = 0; 561 temp = 1; 562 563 while ((temp != 0) && (temp < xfer->interval)) { 564 xfer->fps_shift++; 565 temp *= 2; 566 } 567 568 switch (parm->speed) { 569 case USB_SPEED_LOW: 570 case USB_SPEED_FULL: 571 break; 572 default: 573 xfer->fps_shift += 3; 574 break; 575 } 576 } 577 } 578 579 /* 580 * NOTE: we do not allow "max_packet_size" or "max_frame_size" 581 * to be equal to zero when setting up USB transfers, hence 582 * this leads to alot of extra code in the USB kernel. 583 */ 584 585 if ((xfer->max_frame_size == 0) || 586 (xfer->max_packet_size == 0)) { 587 588 zmps = 1; 589 590 if ((parm->bufsize <= MIN_PKT) && 591 (type != UE_CONTROL) && 592 (type != UE_BULK)) { 593 594 /* workaround */ 595 xfer->max_packet_size = MIN_PKT; 596 xfer->max_packet_count = 1; 597 parm->bufsize = 0; /* automatic setup length */ 598 usbd_update_max_frame_size(xfer); 599 600 } else { 601 parm->err = USB_ERR_ZERO_MAXP; 602 goto done; 603 } 604 605 } else { 606 zmps = 0; 607 } 608 609 /* 610 * check if we should setup a default 611 * length: 612 */ 613 614 if (parm->bufsize == 0) { 615 616 parm->bufsize = xfer->max_frame_size; 617 618 if (type == UE_ISOCHRONOUS) { 619 parm->bufsize *= xfer->nframes; 620 } 621 } 622 /* 623 * check if we are about to setup a proxy 624 * type of buffer: 625 */ 626 627 if (xfer->flags.proxy_buffer) { 628 629 /* round bufsize up */ 630 631 parm->bufsize += (xfer->max_frame_size - 1); 632 633 if (parm->bufsize < xfer->max_frame_size) { 634 /* length wrapped around */ 635 parm->err = USB_ERR_INVAL; 636 goto done; 637 } 638 /* subtract remainder */ 639 640 parm->bufsize -= (parm->bufsize % xfer->max_frame_size); 641 642 /* add length of USB device request structure, if any */ 643 644 if (type == UE_CONTROL) { 645 parm->bufsize += REQ_SIZE; /* SETUP message */ 646 } 647 } 648 xfer->max_data_length = parm->bufsize; 649 650 /* Setup "n_frlengths" and "n_frbuffers" */ 651 652 if (type == UE_ISOCHRONOUS) { 653 n_frlengths = xfer->nframes; 654 n_frbuffers = 1; 655 } else { 656 657 if (type == UE_CONTROL) { 658 xfer->flags_int.control_xfr = 1; 659 if (xfer->nframes == 0) { 660 if (parm->bufsize <= REQ_SIZE) { 661 /* 662 * there will never be any data 663 * stage 664 */ 665 xfer->nframes = 1; 666 } else { 667 xfer->nframes = 2; 668 } 669 } 670 } else { 671 if (xfer->nframes == 0) { 672 xfer->nframes = 1; 673 } 674 } 675 676 n_frlengths = xfer->nframes; 677 n_frbuffers = xfer->nframes; 678 } 679 680 /* 681 * check if we have room for the 682 * USB device request structure: 683 */ 684 685 if (type == UE_CONTROL) { 686 687 if (xfer->max_data_length < REQ_SIZE) { 688 /* length wrapped around or too small bufsize */ 689 parm->err = USB_ERR_INVAL; 690 goto done; 691 } 692 xfer->max_data_length -= REQ_SIZE; 693 } 694 /* 695 * Setup "frlengths" and shadow "frlengths" for keeping the 696 * initial frame lengths when a USB transfer is complete. This 697 * information is useful when computing isochronous offsets. 698 */ 699 xfer->frlengths = parm->xfer_length_ptr; 700 parm->xfer_length_ptr += 2 * n_frlengths; 701 702 /* setup "frbuffers" */ 703 xfer->frbuffers = parm->xfer_page_cache_ptr; 704 parm->xfer_page_cache_ptr += n_frbuffers; 705 706 /* initialize max frame count */ 707 xfer->max_frame_count = xfer->nframes; 708 709 /* 710 * check if we need to setup 711 * a local buffer: 712 */ 713 714 if (!xfer->flags.ext_buffer) { 715 #if USB_HAVE_BUSDMA 716 struct usb_page_search page_info; 717 struct usb_page_cache *pc; 718 719 if (usbd_transfer_setup_sub_malloc(parm, 720 &pc, parm->bufsize, 1, 1)) { 721 parm->err = USB_ERR_NOMEM; 722 } else if (parm->buf != NULL) { 723 724 usbd_get_page(pc, 0, &page_info); 725 726 xfer->local_buffer = page_info.buffer; 727 728 usbd_xfer_set_frame_offset(xfer, 0, 0); 729 730 if ((type == UE_CONTROL) && (n_frbuffers > 1)) { 731 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1); 732 } 733 } 734 #else 735 /* align data */ 736 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 737 738 if (parm->buf != NULL) { 739 xfer->local_buffer = 740 USB_ADD_BYTES(parm->buf, parm->size[0]); 741 742 usbd_xfer_set_frame_offset(xfer, 0, 0); 743 744 if ((type == UE_CONTROL) && (n_frbuffers > 1)) { 745 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1); 746 } 747 } 748 parm->size[0] += parm->bufsize; 749 750 /* align data again */ 751 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 752 #endif 753 } 754 /* 755 * Compute maximum buffer size 756 */ 757 758 if (parm->bufsize_max < parm->bufsize) { 759 parm->bufsize_max = parm->bufsize; 760 } 761 #if USB_HAVE_BUSDMA 762 if (xfer->flags_int.bdma_enable) { 763 /* 764 * Setup "dma_page_ptr". 765 * 766 * Proof for formula below: 767 * 768 * Assume there are three USB frames having length "a", "b" and 769 * "c". These USB frames will at maximum need "z" 770 * "usb_page" structures. "z" is given by: 771 * 772 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) + 773 * ((c / USB_PAGE_SIZE) + 2); 774 * 775 * Constraining "a", "b" and "c" like this: 776 * 777 * (a + b + c) <= parm->bufsize 778 * 779 * We know that: 780 * 781 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2)); 782 * 783 * Here is the general formula: 784 */ 785 xfer->dma_page_ptr = parm->dma_page_ptr; 786 parm->dma_page_ptr += (2 * n_frbuffers); 787 parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE); 788 } 789 #endif 790 if (zmps) { 791 /* correct maximum data length */ 792 xfer->max_data_length = 0; 793 } 794 /* subtract USB frame remainder from "hc_max_frame_size" */ 795 796 xfer->max_hc_frame_size = 797 (parm->hc_max_frame_size - 798 (parm->hc_max_frame_size % xfer->max_frame_size)); 799 800 if (xfer->max_hc_frame_size == 0) { 801 parm->err = USB_ERR_INVAL; 802 goto done; 803 } 804 805 /* initialize frame buffers */ 806 807 if (parm->buf) { 808 for (x = 0; x != n_frbuffers; x++) { 809 xfer->frbuffers[x].tag_parent = 810 &xfer->xroot->dma_parent_tag; 811 #if USB_HAVE_BUSDMA 812 if (xfer->flags_int.bdma_enable && 813 (parm->bufsize_max > 0)) { 814 815 if (usb_pc_dmamap_create( 816 xfer->frbuffers + x, 817 parm->bufsize_max)) { 818 parm->err = USB_ERR_NOMEM; 819 goto done; 820 } 821 } 822 #endif 823 } 824 } 825 done: 826 if (parm->err) { 827 /* 828 * Set some dummy values so that we avoid division by zero: 829 */ 830 xfer->max_hc_frame_size = 1; 831 xfer->max_frame_size = 1; 832 xfer->max_packet_size = 1; 833 xfer->max_data_length = 0; 834 xfer->nframes = 0; 835 xfer->max_frame_count = 0; 836 } 837 } 838 839 /*------------------------------------------------------------------------* 840 * usbd_transfer_setup - setup an array of USB transfers 841 * 842 * NOTE: You must always call "usbd_transfer_unsetup" after calling 843 * "usbd_transfer_setup" if success was returned. 844 * 845 * The idea is that the USB device driver should pre-allocate all its 846 * transfers by one call to this function. 847 * 848 * Return values: 849 * 0: Success 850 * Else: Failure 851 *------------------------------------------------------------------------*/ 852 usb_error_t 853 usbd_transfer_setup(struct usb_device *udev, 854 const uint8_t *ifaces, struct usb_xfer **ppxfer, 855 const struct usb_config *setup_start, uint16_t n_setup, 856 void *priv_sc, struct mtx *xfer_mtx) 857 { 858 const struct usb_config *setup_end = setup_start + n_setup; 859 const struct usb_config *setup; 860 struct usb_setup_params *parm; 861 struct usb_endpoint *ep; 862 struct usb_xfer_root *info; 863 struct usb_xfer *xfer; 864 void *buf = NULL; 865 usb_error_t error = 0; 866 uint16_t n; 867 uint16_t refcount; 868 uint8_t do_unlock; 869 870 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 871 "usbd_transfer_setup can sleep!"); 872 873 /* do some checking first */ 874 875 if (n_setup == 0) { 876 DPRINTFN(6, "setup array has zero length!\n"); 877 return (USB_ERR_INVAL); 878 } 879 if (ifaces == 0) { 880 DPRINTFN(6, "ifaces array is NULL!\n"); 881 return (USB_ERR_INVAL); 882 } 883 if (xfer_mtx == NULL) { 884 DPRINTFN(6, "using global lock\n"); 885 xfer_mtx = &Giant; 886 } 887 888 /* more sanity checks */ 889 890 for (setup = setup_start, n = 0; 891 setup != setup_end; setup++, n++) { 892 if (setup->bufsize == (usb_frlength_t)-1) { 893 error = USB_ERR_BAD_BUFSIZE; 894 DPRINTF("invalid bufsize\n"); 895 } 896 if (setup->callback == NULL) { 897 error = USB_ERR_NO_CALLBACK; 898 DPRINTF("no callback\n"); 899 } 900 ppxfer[n] = NULL; 901 } 902 903 if (error) 904 return (error); 905 906 /* Protect scratch area */ 907 do_unlock = usbd_enum_lock(udev); 908 909 refcount = 0; 910 info = NULL; 911 912 parm = &udev->scratch.xfer_setup[0].parm; 913 memset(parm, 0, sizeof(*parm)); 914 915 parm->udev = udev; 916 parm->speed = usbd_get_speed(udev); 917 parm->hc_max_packet_count = 1; 918 919 if (parm->speed >= USB_SPEED_MAX) { 920 parm->err = USB_ERR_INVAL; 921 goto done; 922 } 923 /* setup all transfers */ 924 925 while (1) { 926 927 if (buf) { 928 /* 929 * Initialize the "usb_xfer_root" structure, 930 * which is common for all our USB transfers. 931 */ 932 info = USB_ADD_BYTES(buf, 0); 933 934 info->memory_base = buf; 935 info->memory_size = parm->size[0]; 936 937 #if USB_HAVE_BUSDMA 938 info->dma_page_cache_start = USB_ADD_BYTES(buf, parm->size[4]); 939 info->dma_page_cache_end = USB_ADD_BYTES(buf, parm->size[5]); 940 #endif 941 info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm->size[5]); 942 info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm->size[2]); 943 944 cv_init(&info->cv_drain, "WDRAIN"); 945 946 info->xfer_mtx = xfer_mtx; 947 #if USB_HAVE_BUSDMA 948 usb_dma_tag_setup(&info->dma_parent_tag, 949 parm->dma_tag_p, udev->bus->dma_parent_tag[0].tag, 950 xfer_mtx, &usb_bdma_done_event, 32, parm->dma_tag_max); 951 #endif 952 953 info->bus = udev->bus; 954 info->udev = udev; 955 956 TAILQ_INIT(&info->done_q.head); 957 info->done_q.command = &usbd_callback_wrapper; 958 #if USB_HAVE_BUSDMA 959 TAILQ_INIT(&info->dma_q.head); 960 info->dma_q.command = &usb_bdma_work_loop; 961 #endif 962 info->done_m[0].hdr.pm_callback = &usb_callback_proc; 963 info->done_m[0].xroot = info; 964 info->done_m[1].hdr.pm_callback = &usb_callback_proc; 965 info->done_m[1].xroot = info; 966 967 /* 968 * In device side mode control endpoint 969 * requests need to run from a separate 970 * context, else there is a chance of 971 * deadlock! 972 */ 973 if (setup_start == usb_control_ep_cfg) 974 info->done_p = 975 USB_BUS_CONTROL_XFER_PROC(udev->bus); 976 else if (xfer_mtx == &Giant) 977 info->done_p = 978 USB_BUS_GIANT_PROC(udev->bus); 979 else 980 info->done_p = 981 USB_BUS_NON_GIANT_PROC(udev->bus); 982 } 983 /* reset sizes */ 984 985 parm->size[0] = 0; 986 parm->buf = buf; 987 parm->size[0] += sizeof(info[0]); 988 989 for (setup = setup_start, n = 0; 990 setup != setup_end; setup++, n++) { 991 992 /* skip USB transfers without callbacks: */ 993 if (setup->callback == NULL) { 994 continue; 995 } 996 /* see if there is a matching endpoint */ 997 ep = usbd_get_endpoint(udev, 998 ifaces[setup->if_index], setup); 999 1000 /* 1001 * Check that the USB PIPE is valid and that 1002 * the endpoint mode is proper. 1003 * 1004 * Make sure we don't allocate a streams 1005 * transfer when such a combination is not 1006 * valid. 1007 */ 1008 if ((ep == NULL) || (ep->methods == NULL) || 1009 ((ep->ep_mode != USB_EP_MODE_STREAMS) && 1010 (ep->ep_mode != USB_EP_MODE_DEFAULT)) || 1011 (setup->stream_id != 0 && 1012 (setup->stream_id >= USB_MAX_EP_STREAMS || 1013 (ep->ep_mode != USB_EP_MODE_STREAMS)))) { 1014 if (setup->flags.no_pipe_ok) 1015 continue; 1016 if ((setup->usb_mode != USB_MODE_DUAL) && 1017 (setup->usb_mode != udev->flags.usb_mode)) 1018 continue; 1019 parm->err = USB_ERR_NO_PIPE; 1020 goto done; 1021 } 1022 1023 /* align data properly */ 1024 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 1025 1026 /* store current setup pointer */ 1027 parm->curr_setup = setup; 1028 1029 if (buf) { 1030 /* 1031 * Common initialization of the 1032 * "usb_xfer" structure. 1033 */ 1034 xfer = USB_ADD_BYTES(buf, parm->size[0]); 1035 xfer->address = udev->address; 1036 xfer->priv_sc = priv_sc; 1037 xfer->xroot = info; 1038 1039 usb_callout_init_mtx(&xfer->timeout_handle, 1040 &udev->bus->bus_mtx, 0); 1041 } else { 1042 /* 1043 * Setup a dummy xfer, hence we are 1044 * writing to the "usb_xfer" 1045 * structure pointed to by "xfer" 1046 * before we have allocated any 1047 * memory: 1048 */ 1049 xfer = &udev->scratch.xfer_setup[0].dummy; 1050 memset(xfer, 0, sizeof(*xfer)); 1051 refcount++; 1052 } 1053 1054 /* set transfer endpoint pointer */ 1055 xfer->endpoint = ep; 1056 1057 /* set transfer stream ID */ 1058 xfer->stream_id = setup->stream_id; 1059 1060 parm->size[0] += sizeof(xfer[0]); 1061 parm->methods = xfer->endpoint->methods; 1062 parm->curr_xfer = xfer; 1063 1064 /* 1065 * Call the Host or Device controller transfer 1066 * setup routine: 1067 */ 1068 (udev->bus->methods->xfer_setup) (parm); 1069 1070 /* check for error */ 1071 if (parm->err) 1072 goto done; 1073 1074 if (buf) { 1075 /* 1076 * Increment the endpoint refcount. This 1077 * basically prevents setting a new 1078 * configuration and alternate setting 1079 * when USB transfers are in use on 1080 * the given interface. Search the USB 1081 * code for "endpoint->refcount_alloc" if you 1082 * want more information. 1083 */ 1084 USB_BUS_LOCK(info->bus); 1085 if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX) 1086 parm->err = USB_ERR_INVAL; 1087 1088 xfer->endpoint->refcount_alloc++; 1089 1090 if (xfer->endpoint->refcount_alloc == 0) 1091 panic("usbd_transfer_setup(): Refcount wrapped to zero\n"); 1092 USB_BUS_UNLOCK(info->bus); 1093 1094 /* 1095 * Whenever we set ppxfer[] then we 1096 * also need to increment the 1097 * "setup_refcount": 1098 */ 1099 info->setup_refcount++; 1100 1101 /* 1102 * Transfer is successfully setup and 1103 * can be used: 1104 */ 1105 ppxfer[n] = xfer; 1106 } 1107 1108 /* check for error */ 1109 if (parm->err) 1110 goto done; 1111 } 1112 1113 if (buf != NULL || parm->err != 0) 1114 goto done; 1115 1116 /* if no transfers, nothing to do */ 1117 if (refcount == 0) 1118 goto done; 1119 1120 /* align data properly */ 1121 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 1122 1123 /* store offset temporarily */ 1124 parm->size[1] = parm->size[0]; 1125 1126 /* 1127 * The number of DMA tags required depends on 1128 * the number of endpoints. The current estimate 1129 * for maximum number of DMA tags per endpoint 1130 * is three: 1131 * 1) for loading memory 1132 * 2) for allocating memory 1133 * 3) for fixing memory [UHCI] 1134 */ 1135 parm->dma_tag_max += 3 * MIN(n_setup, USB_EP_MAX); 1136 1137 /* 1138 * DMA tags for QH, TD, Data and more. 1139 */ 1140 parm->dma_tag_max += 8; 1141 1142 parm->dma_tag_p += parm->dma_tag_max; 1143 1144 parm->size[0] += ((uint8_t *)parm->dma_tag_p) - 1145 ((uint8_t *)0); 1146 1147 /* align data properly */ 1148 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 1149 1150 /* store offset temporarily */ 1151 parm->size[3] = parm->size[0]; 1152 1153 parm->size[0] += ((uint8_t *)parm->dma_page_ptr) - 1154 ((uint8_t *)0); 1155 1156 /* align data properly */ 1157 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 1158 1159 /* store offset temporarily */ 1160 parm->size[4] = parm->size[0]; 1161 1162 parm->size[0] += ((uint8_t *)parm->dma_page_cache_ptr) - 1163 ((uint8_t *)0); 1164 1165 /* store end offset temporarily */ 1166 parm->size[5] = parm->size[0]; 1167 1168 parm->size[0] += ((uint8_t *)parm->xfer_page_cache_ptr) - 1169 ((uint8_t *)0); 1170 1171 /* store end offset temporarily */ 1172 1173 parm->size[2] = parm->size[0]; 1174 1175 /* align data properly */ 1176 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 1177 1178 parm->size[6] = parm->size[0]; 1179 1180 parm->size[0] += ((uint8_t *)parm->xfer_length_ptr) - 1181 ((uint8_t *)0); 1182 1183 /* align data properly */ 1184 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 1185 1186 /* allocate zeroed memory */ 1187 buf = malloc(parm->size[0], M_USB, M_WAITOK | M_ZERO); 1188 1189 if (buf == NULL) { 1190 parm->err = USB_ERR_NOMEM; 1191 DPRINTFN(0, "cannot allocate memory block for " 1192 "configuration (%d bytes)\n", 1193 parm->size[0]); 1194 goto done; 1195 } 1196 parm->dma_tag_p = USB_ADD_BYTES(buf, parm->size[1]); 1197 parm->dma_page_ptr = USB_ADD_BYTES(buf, parm->size[3]); 1198 parm->dma_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[4]); 1199 parm->xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[5]); 1200 parm->xfer_length_ptr = USB_ADD_BYTES(buf, parm->size[6]); 1201 } 1202 1203 done: 1204 if (buf) { 1205 if (info->setup_refcount == 0) { 1206 /* 1207 * "usbd_transfer_unsetup_sub" will unlock 1208 * the bus mutex before returning ! 1209 */ 1210 USB_BUS_LOCK(info->bus); 1211 1212 /* something went wrong */ 1213 usbd_transfer_unsetup_sub(info, 0); 1214 } 1215 } 1216 1217 /* check if any errors happened */ 1218 if (parm->err) 1219 usbd_transfer_unsetup(ppxfer, n_setup); 1220 1221 error = parm->err; 1222 1223 if (do_unlock) 1224 usbd_enum_unlock(udev); 1225 1226 return (error); 1227 } 1228 1229 /*------------------------------------------------------------------------* 1230 * usbd_transfer_unsetup_sub - factored out code 1231 *------------------------------------------------------------------------*/ 1232 static void 1233 usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay) 1234 { 1235 #if USB_HAVE_BUSDMA 1236 struct usb_page_cache *pc; 1237 #endif 1238 1239 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED); 1240 1241 /* wait for any outstanding DMA operations */ 1242 1243 if (needs_delay) { 1244 usb_timeout_t temp; 1245 temp = usbd_get_dma_delay(info->udev); 1246 if (temp != 0) { 1247 usb_pause_mtx(&info->bus->bus_mtx, 1248 USB_MS_TO_TICKS(temp)); 1249 } 1250 } 1251 1252 /* make sure that our done messages are not queued anywhere */ 1253 usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]); 1254 1255 USB_BUS_UNLOCK(info->bus); 1256 1257 #if USB_HAVE_BUSDMA 1258 /* free DMA'able memory, if any */ 1259 pc = info->dma_page_cache_start; 1260 while (pc != info->dma_page_cache_end) { 1261 usb_pc_free_mem(pc); 1262 pc++; 1263 } 1264 1265 /* free DMA maps in all "xfer->frbuffers" */ 1266 pc = info->xfer_page_cache_start; 1267 while (pc != info->xfer_page_cache_end) { 1268 usb_pc_dmamap_destroy(pc); 1269 pc++; 1270 } 1271 1272 /* free all DMA tags */ 1273 usb_dma_tag_unsetup(&info->dma_parent_tag); 1274 #endif 1275 1276 cv_destroy(&info->cv_drain); 1277 1278 /* 1279 * free the "memory_base" last, hence the "info" structure is 1280 * contained within the "memory_base"! 1281 */ 1282 free(info->memory_base, M_USB); 1283 } 1284 1285 /*------------------------------------------------------------------------* 1286 * usbd_transfer_unsetup - unsetup/free an array of USB transfers 1287 * 1288 * NOTE: All USB transfers in progress will get called back passing 1289 * the error code "USB_ERR_CANCELLED" before this function 1290 * returns. 1291 *------------------------------------------------------------------------*/ 1292 void 1293 usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup) 1294 { 1295 struct usb_xfer *xfer; 1296 struct usb_xfer_root *info; 1297 uint8_t needs_delay = 0; 1298 1299 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1300 "usbd_transfer_unsetup can sleep!"); 1301 1302 while (n_setup--) { 1303 xfer = pxfer[n_setup]; 1304 1305 if (xfer == NULL) 1306 continue; 1307 1308 info = xfer->xroot; 1309 1310 USB_XFER_LOCK(xfer); 1311 USB_BUS_LOCK(info->bus); 1312 1313 /* 1314 * HINT: when you start/stop a transfer, it might be a 1315 * good idea to directly use the "pxfer[]" structure: 1316 * 1317 * usbd_transfer_start(sc->pxfer[0]); 1318 * usbd_transfer_stop(sc->pxfer[0]); 1319 * 1320 * That way, if your code has many parts that will not 1321 * stop running under the same lock, in other words 1322 * "xfer_mtx", the usbd_transfer_start and 1323 * usbd_transfer_stop functions will simply return 1324 * when they detect a NULL pointer argument. 1325 * 1326 * To avoid any races we clear the "pxfer[]" pointer 1327 * while holding the private mutex of the driver: 1328 */ 1329 pxfer[n_setup] = NULL; 1330 1331 USB_BUS_UNLOCK(info->bus); 1332 USB_XFER_UNLOCK(xfer); 1333 1334 usbd_transfer_drain(xfer); 1335 1336 #if USB_HAVE_BUSDMA 1337 if (xfer->flags_int.bdma_enable) 1338 needs_delay = 1; 1339 #endif 1340 /* 1341 * NOTE: default endpoint does not have an 1342 * interface, even if endpoint->iface_index == 0 1343 */ 1344 USB_BUS_LOCK(info->bus); 1345 xfer->endpoint->refcount_alloc--; 1346 USB_BUS_UNLOCK(info->bus); 1347 1348 usb_callout_drain(&xfer->timeout_handle); 1349 1350 USB_BUS_LOCK(info->bus); 1351 1352 USB_ASSERT(info->setup_refcount != 0, ("Invalid setup " 1353 "reference count\n")); 1354 1355 info->setup_refcount--; 1356 1357 if (info->setup_refcount == 0) { 1358 usbd_transfer_unsetup_sub(info, 1359 needs_delay); 1360 } else { 1361 USB_BUS_UNLOCK(info->bus); 1362 } 1363 } 1364 } 1365 1366 /*------------------------------------------------------------------------* 1367 * usbd_control_transfer_init - factored out code 1368 * 1369 * In USB Device Mode we have to wait for the SETUP packet which 1370 * containst the "struct usb_device_request" structure, before we can 1371 * transfer any data. In USB Host Mode we already have the SETUP 1372 * packet at the moment the USB transfer is started. This leads us to 1373 * having to setup the USB transfer at two different places in 1374 * time. This function just contains factored out control transfer 1375 * initialisation code, so that we don't duplicate the code. 1376 *------------------------------------------------------------------------*/ 1377 static void 1378 usbd_control_transfer_init(struct usb_xfer *xfer) 1379 { 1380 struct usb_device_request req; 1381 1382 /* copy out the USB request header */ 1383 1384 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req)); 1385 1386 /* setup remainder */ 1387 1388 xfer->flags_int.control_rem = UGETW(req.wLength); 1389 1390 /* copy direction to endpoint variable */ 1391 1392 xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT); 1393 xfer->endpointno |= 1394 (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT; 1395 } 1396 1397 /*------------------------------------------------------------------------* 1398 * usbd_setup_ctrl_transfer 1399 * 1400 * This function handles initialisation of control transfers. Control 1401 * transfers are special in that regard that they can both transmit 1402 * and receive data. 1403 * 1404 * Return values: 1405 * 0: Success 1406 * Else: Failure 1407 *------------------------------------------------------------------------*/ 1408 static int 1409 usbd_setup_ctrl_transfer(struct usb_xfer *xfer) 1410 { 1411 usb_frlength_t len; 1412 1413 /* Check for control endpoint stall */ 1414 if (xfer->flags.stall_pipe && xfer->flags_int.control_act) { 1415 /* the control transfer is no longer active */ 1416 xfer->flags_int.control_stall = 1; 1417 xfer->flags_int.control_act = 0; 1418 } else { 1419 /* don't stall control transfer by default */ 1420 xfer->flags_int.control_stall = 0; 1421 } 1422 1423 /* Check for invalid number of frames */ 1424 if (xfer->nframes > 2) { 1425 /* 1426 * If you need to split a control transfer, you 1427 * have to do one part at a time. Only with 1428 * non-control transfers you can do multiple 1429 * parts a time. 1430 */ 1431 DPRINTFN(0, "Too many frames: %u\n", 1432 (unsigned int)xfer->nframes); 1433 goto error; 1434 } 1435 1436 /* 1437 * Check if there is a control 1438 * transfer in progress: 1439 */ 1440 if (xfer->flags_int.control_act) { 1441 1442 if (xfer->flags_int.control_hdr) { 1443 1444 /* clear send header flag */ 1445 1446 xfer->flags_int.control_hdr = 0; 1447 1448 /* setup control transfer */ 1449 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { 1450 usbd_control_transfer_init(xfer); 1451 } 1452 } 1453 /* get data length */ 1454 1455 len = xfer->sumlen; 1456 1457 } else { 1458 1459 /* the size of the SETUP structure is hardcoded ! */ 1460 1461 if (xfer->frlengths[0] != sizeof(struct usb_device_request)) { 1462 DPRINTFN(0, "Wrong framelength %u != %zu\n", 1463 xfer->frlengths[0], sizeof(struct 1464 usb_device_request)); 1465 goto error; 1466 } 1467 /* check USB mode */ 1468 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { 1469 1470 /* check number of frames */ 1471 if (xfer->nframes != 1) { 1472 /* 1473 * We need to receive the setup 1474 * message first so that we know the 1475 * data direction! 1476 */ 1477 DPRINTF("Misconfigured transfer\n"); 1478 goto error; 1479 } 1480 /* 1481 * Set a dummy "control_rem" value. This 1482 * variable will be overwritten later by a 1483 * call to "usbd_control_transfer_init()" ! 1484 */ 1485 xfer->flags_int.control_rem = 0xFFFF; 1486 } else { 1487 1488 /* setup "endpoint" and "control_rem" */ 1489 1490 usbd_control_transfer_init(xfer); 1491 } 1492 1493 /* set transfer-header flag */ 1494 1495 xfer->flags_int.control_hdr = 1; 1496 1497 /* get data length */ 1498 1499 len = (xfer->sumlen - sizeof(struct usb_device_request)); 1500 } 1501 1502 /* check if there is a length mismatch */ 1503 1504 if (len > xfer->flags_int.control_rem) { 1505 DPRINTFN(0, "Length (%d) greater than " 1506 "remaining length (%d)\n", len, 1507 xfer->flags_int.control_rem); 1508 goto error; 1509 } 1510 /* check if we are doing a short transfer */ 1511 1512 if (xfer->flags.force_short_xfer) { 1513 xfer->flags_int.control_rem = 0; 1514 } else { 1515 if ((len != xfer->max_data_length) && 1516 (len != xfer->flags_int.control_rem) && 1517 (xfer->nframes != 1)) { 1518 DPRINTFN(0, "Short control transfer without " 1519 "force_short_xfer set\n"); 1520 goto error; 1521 } 1522 xfer->flags_int.control_rem -= len; 1523 } 1524 1525 /* the status part is executed when "control_act" is 0 */ 1526 1527 if ((xfer->flags_int.control_rem > 0) || 1528 (xfer->flags.manual_status)) { 1529 /* don't execute the STATUS stage yet */ 1530 xfer->flags_int.control_act = 1; 1531 1532 /* sanity check */ 1533 if ((!xfer->flags_int.control_hdr) && 1534 (xfer->nframes == 1)) { 1535 /* 1536 * This is not a valid operation! 1537 */ 1538 DPRINTFN(0, "Invalid parameter " 1539 "combination\n"); 1540 goto error; 1541 } 1542 } else { 1543 /* time to execute the STATUS stage */ 1544 xfer->flags_int.control_act = 0; 1545 } 1546 return (0); /* success */ 1547 1548 error: 1549 return (1); /* failure */ 1550 } 1551 1552 /*------------------------------------------------------------------------* 1553 * usbd_transfer_submit - start USB hardware for the given transfer 1554 * 1555 * This function should only be called from the USB callback. 1556 *------------------------------------------------------------------------*/ 1557 void 1558 usbd_transfer_submit(struct usb_xfer *xfer) 1559 { 1560 struct usb_xfer_root *info; 1561 struct usb_bus *bus; 1562 usb_frcount_t x; 1563 1564 info = xfer->xroot; 1565 bus = info->bus; 1566 1567 DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n", 1568 xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ? 1569 "read" : "write"); 1570 1571 #ifdef USB_DEBUG 1572 if (USB_DEBUG_VAR > 0) { 1573 USB_BUS_LOCK(bus); 1574 1575 usb_dump_endpoint(xfer->endpoint); 1576 1577 USB_BUS_UNLOCK(bus); 1578 } 1579 #endif 1580 1581 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1582 USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED); 1583 1584 /* Only open the USB transfer once! */ 1585 if (!xfer->flags_int.open) { 1586 xfer->flags_int.open = 1; 1587 1588 DPRINTF("open\n"); 1589 1590 USB_BUS_LOCK(bus); 1591 (xfer->endpoint->methods->open) (xfer); 1592 USB_BUS_UNLOCK(bus); 1593 } 1594 /* set "transferring" flag */ 1595 xfer->flags_int.transferring = 1; 1596 1597 #if USB_HAVE_POWERD 1598 /* increment power reference */ 1599 usbd_transfer_power_ref(xfer, 1); 1600 #endif 1601 /* 1602 * Check if the transfer is waiting on a queue, most 1603 * frequently the "done_q": 1604 */ 1605 if (xfer->wait_queue) { 1606 USB_BUS_LOCK(bus); 1607 usbd_transfer_dequeue(xfer); 1608 USB_BUS_UNLOCK(bus); 1609 } 1610 /* clear "did_dma_delay" flag */ 1611 xfer->flags_int.did_dma_delay = 0; 1612 1613 /* clear "did_close" flag */ 1614 xfer->flags_int.did_close = 0; 1615 1616 #if USB_HAVE_BUSDMA 1617 /* clear "bdma_setup" flag */ 1618 xfer->flags_int.bdma_setup = 0; 1619 #endif 1620 /* by default we cannot cancel any USB transfer immediately */ 1621 xfer->flags_int.can_cancel_immed = 0; 1622 1623 /* clear lengths and frame counts by default */ 1624 xfer->sumlen = 0; 1625 xfer->actlen = 0; 1626 xfer->aframes = 0; 1627 1628 /* clear any previous errors */ 1629 xfer->error = 0; 1630 1631 /* Check if the device is still alive */ 1632 if (info->udev->state < USB_STATE_POWERED) { 1633 USB_BUS_LOCK(bus); 1634 /* 1635 * Must return cancelled error code else 1636 * device drivers can hang. 1637 */ 1638 usbd_transfer_done(xfer, USB_ERR_CANCELLED); 1639 USB_BUS_UNLOCK(bus); 1640 return; 1641 } 1642 1643 /* sanity check */ 1644 if (xfer->nframes == 0) { 1645 if (xfer->flags.stall_pipe) { 1646 /* 1647 * Special case - want to stall without transferring 1648 * any data: 1649 */ 1650 DPRINTF("xfer=%p nframes=0: stall " 1651 "or clear stall!\n", xfer); 1652 USB_BUS_LOCK(bus); 1653 xfer->flags_int.can_cancel_immed = 1; 1654 /* start the transfer */ 1655 usb_command_wrapper(&xfer->endpoint-> 1656 endpoint_q[xfer->stream_id], xfer); 1657 USB_BUS_UNLOCK(bus); 1658 return; 1659 } 1660 USB_BUS_LOCK(bus); 1661 usbd_transfer_done(xfer, USB_ERR_INVAL); 1662 USB_BUS_UNLOCK(bus); 1663 return; 1664 } 1665 /* compute some variables */ 1666 1667 for (x = 0; x != xfer->nframes; x++) { 1668 /* make a copy of the frlenghts[] */ 1669 xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x]; 1670 /* compute total transfer length */ 1671 xfer->sumlen += xfer->frlengths[x]; 1672 if (xfer->sumlen < xfer->frlengths[x]) { 1673 /* length wrapped around */ 1674 USB_BUS_LOCK(bus); 1675 usbd_transfer_done(xfer, USB_ERR_INVAL); 1676 USB_BUS_UNLOCK(bus); 1677 return; 1678 } 1679 } 1680 1681 /* clear some internal flags */ 1682 1683 xfer->flags_int.short_xfer_ok = 0; 1684 xfer->flags_int.short_frames_ok = 0; 1685 1686 /* check if this is a control transfer */ 1687 1688 if (xfer->flags_int.control_xfr) { 1689 1690 if (usbd_setup_ctrl_transfer(xfer)) { 1691 USB_BUS_LOCK(bus); 1692 usbd_transfer_done(xfer, USB_ERR_STALLED); 1693 USB_BUS_UNLOCK(bus); 1694 return; 1695 } 1696 } 1697 /* 1698 * Setup filtered version of some transfer flags, 1699 * in case of data read direction 1700 */ 1701 if (USB_GET_DATA_ISREAD(xfer)) { 1702 1703 if (xfer->flags.short_frames_ok) { 1704 xfer->flags_int.short_xfer_ok = 1; 1705 xfer->flags_int.short_frames_ok = 1; 1706 } else if (xfer->flags.short_xfer_ok) { 1707 xfer->flags_int.short_xfer_ok = 1; 1708 1709 /* check for control transfer */ 1710 if (xfer->flags_int.control_xfr) { 1711 /* 1712 * 1) Control transfers do not support 1713 * reception of multiple short USB 1714 * frames in host mode and device side 1715 * mode, with exception of: 1716 * 1717 * 2) Due to sometimes buggy device 1718 * side firmware we need to do a 1719 * STATUS stage in case of short 1720 * control transfers in USB host mode. 1721 * The STATUS stage then becomes the 1722 * "alt_next" to the DATA stage. 1723 */ 1724 xfer->flags_int.short_frames_ok = 1; 1725 } 1726 } 1727 } 1728 /* 1729 * Check if BUS-DMA support is enabled and try to load virtual 1730 * buffers into DMA, if any: 1731 */ 1732 #if USB_HAVE_BUSDMA 1733 if (xfer->flags_int.bdma_enable) { 1734 /* insert the USB transfer last in the BUS-DMA queue */ 1735 usb_command_wrapper(&xfer->xroot->dma_q, xfer); 1736 return; 1737 } 1738 #endif 1739 /* 1740 * Enter the USB transfer into the Host Controller or 1741 * Device Controller schedule: 1742 */ 1743 usbd_pipe_enter(xfer); 1744 } 1745 1746 /*------------------------------------------------------------------------* 1747 * usbd_pipe_enter - factored out code 1748 *------------------------------------------------------------------------*/ 1749 void 1750 usbd_pipe_enter(struct usb_xfer *xfer) 1751 { 1752 struct usb_endpoint *ep; 1753 1754 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1755 1756 USB_BUS_LOCK(xfer->xroot->bus); 1757 1758 ep = xfer->endpoint; 1759 1760 DPRINTF("enter\n"); 1761 1762 /* the transfer can now be cancelled */ 1763 xfer->flags_int.can_cancel_immed = 1; 1764 1765 /* enter the transfer */ 1766 (ep->methods->enter) (xfer); 1767 1768 /* check for transfer error */ 1769 if (xfer->error) { 1770 /* some error has happened */ 1771 usbd_transfer_done(xfer, 0); 1772 USB_BUS_UNLOCK(xfer->xroot->bus); 1773 return; 1774 } 1775 1776 /* start the transfer */ 1777 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], xfer); 1778 USB_BUS_UNLOCK(xfer->xroot->bus); 1779 } 1780 1781 /*------------------------------------------------------------------------* 1782 * usbd_transfer_start - start an USB transfer 1783 * 1784 * NOTE: Calling this function more than one time will only 1785 * result in a single transfer start, until the USB transfer 1786 * completes. 1787 *------------------------------------------------------------------------*/ 1788 void 1789 usbd_transfer_start(struct usb_xfer *xfer) 1790 { 1791 if (xfer == NULL) { 1792 /* transfer is gone */ 1793 return; 1794 } 1795 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1796 1797 /* mark the USB transfer started */ 1798 1799 if (!xfer->flags_int.started) { 1800 /* lock the BUS lock to avoid races updating flags_int */ 1801 USB_BUS_LOCK(xfer->xroot->bus); 1802 xfer->flags_int.started = 1; 1803 USB_BUS_UNLOCK(xfer->xroot->bus); 1804 } 1805 /* check if the USB transfer callback is already transferring */ 1806 1807 if (xfer->flags_int.transferring) { 1808 return; 1809 } 1810 USB_BUS_LOCK(xfer->xroot->bus); 1811 /* call the USB transfer callback */ 1812 usbd_callback_ss_done_defer(xfer); 1813 USB_BUS_UNLOCK(xfer->xroot->bus); 1814 } 1815 1816 /*------------------------------------------------------------------------* 1817 * usbd_transfer_stop - stop an USB transfer 1818 * 1819 * NOTE: Calling this function more than one time will only 1820 * result in a single transfer stop. 1821 * NOTE: When this function returns it is not safe to free nor 1822 * reuse any DMA buffers. See "usbd_transfer_drain()". 1823 *------------------------------------------------------------------------*/ 1824 void 1825 usbd_transfer_stop(struct usb_xfer *xfer) 1826 { 1827 struct usb_endpoint *ep; 1828 1829 if (xfer == NULL) { 1830 /* transfer is gone */ 1831 return; 1832 } 1833 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1834 1835 /* check if the USB transfer was ever opened */ 1836 1837 if (!xfer->flags_int.open) { 1838 if (xfer->flags_int.started) { 1839 /* nothing to do except clearing the "started" flag */ 1840 /* lock the BUS lock to avoid races updating flags_int */ 1841 USB_BUS_LOCK(xfer->xroot->bus); 1842 xfer->flags_int.started = 0; 1843 USB_BUS_UNLOCK(xfer->xroot->bus); 1844 } 1845 return; 1846 } 1847 /* try to stop the current USB transfer */ 1848 1849 USB_BUS_LOCK(xfer->xroot->bus); 1850 /* override any previous error */ 1851 xfer->error = USB_ERR_CANCELLED; 1852 1853 /* 1854 * Clear "open" and "started" when both private and USB lock 1855 * is locked so that we don't get a race updating "flags_int" 1856 */ 1857 xfer->flags_int.open = 0; 1858 xfer->flags_int.started = 0; 1859 1860 /* 1861 * Check if we can cancel the USB transfer immediately. 1862 */ 1863 if (xfer->flags_int.transferring) { 1864 if (xfer->flags_int.can_cancel_immed && 1865 (!xfer->flags_int.did_close)) { 1866 DPRINTF("close\n"); 1867 /* 1868 * The following will lead to an USB_ERR_CANCELLED 1869 * error code being passed to the USB callback. 1870 */ 1871 (xfer->endpoint->methods->close) (xfer); 1872 /* only close once */ 1873 xfer->flags_int.did_close = 1; 1874 } else { 1875 /* need to wait for the next done callback */ 1876 } 1877 } else { 1878 DPRINTF("close\n"); 1879 1880 /* close here and now */ 1881 (xfer->endpoint->methods->close) (xfer); 1882 1883 /* 1884 * Any additional DMA delay is done by 1885 * "usbd_transfer_unsetup()". 1886 */ 1887 1888 /* 1889 * Special case. Check if we need to restart a blocked 1890 * endpoint. 1891 */ 1892 ep = xfer->endpoint; 1893 1894 /* 1895 * If the current USB transfer is completing we need 1896 * to start the next one: 1897 */ 1898 if (ep->endpoint_q[xfer->stream_id].curr == xfer) { 1899 usb_command_wrapper( 1900 &ep->endpoint_q[xfer->stream_id], NULL); 1901 } 1902 } 1903 1904 USB_BUS_UNLOCK(xfer->xroot->bus); 1905 } 1906 1907 /*------------------------------------------------------------------------* 1908 * usbd_transfer_pending 1909 * 1910 * This function will check if an USB transfer is pending which is a 1911 * little bit complicated! 1912 * Return values: 1913 * 0: Not pending 1914 * 1: Pending: The USB transfer will receive a callback in the future. 1915 *------------------------------------------------------------------------*/ 1916 uint8_t 1917 usbd_transfer_pending(struct usb_xfer *xfer) 1918 { 1919 struct usb_xfer_root *info; 1920 struct usb_xfer_queue *pq; 1921 1922 if (xfer == NULL) { 1923 /* transfer is gone */ 1924 return (0); 1925 } 1926 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1927 1928 if (xfer->flags_int.transferring) { 1929 /* trivial case */ 1930 return (1); 1931 } 1932 USB_BUS_LOCK(xfer->xroot->bus); 1933 if (xfer->wait_queue) { 1934 /* we are waiting on a queue somewhere */ 1935 USB_BUS_UNLOCK(xfer->xroot->bus); 1936 return (1); 1937 } 1938 info = xfer->xroot; 1939 pq = &info->done_q; 1940 1941 if (pq->curr == xfer) { 1942 /* we are currently scheduled for callback */ 1943 USB_BUS_UNLOCK(xfer->xroot->bus); 1944 return (1); 1945 } 1946 /* we are not pending */ 1947 USB_BUS_UNLOCK(xfer->xroot->bus); 1948 return (0); 1949 } 1950 1951 /*------------------------------------------------------------------------* 1952 * usbd_transfer_drain 1953 * 1954 * This function will stop the USB transfer and wait for any 1955 * additional BUS-DMA and HW-DMA operations to complete. Buffers that 1956 * are loaded into DMA can safely be freed or reused after that this 1957 * function has returned. 1958 *------------------------------------------------------------------------*/ 1959 void 1960 usbd_transfer_drain(struct usb_xfer *xfer) 1961 { 1962 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1963 "usbd_transfer_drain can sleep!"); 1964 1965 if (xfer == NULL) { 1966 /* transfer is gone */ 1967 return; 1968 } 1969 if (xfer->xroot->xfer_mtx != &Giant) { 1970 USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED); 1971 } 1972 USB_XFER_LOCK(xfer); 1973 1974 usbd_transfer_stop(xfer); 1975 1976 while (usbd_transfer_pending(xfer) || 1977 xfer->flags_int.doing_callback) { 1978 1979 /* 1980 * It is allowed that the callback can drop its 1981 * transfer mutex. In that case checking only 1982 * "usbd_transfer_pending()" is not enough to tell if 1983 * the USB transfer is fully drained. We also need to 1984 * check the internal "doing_callback" flag. 1985 */ 1986 xfer->flags_int.draining = 1; 1987 1988 /* 1989 * Wait until the current outstanding USB 1990 * transfer is complete ! 1991 */ 1992 cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx); 1993 } 1994 USB_XFER_UNLOCK(xfer); 1995 } 1996 1997 struct usb_page_cache * 1998 usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex) 1999 { 2000 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2001 2002 return (&xfer->frbuffers[frindex]); 2003 } 2004 2005 void * 2006 usbd_xfer_get_frame_buffer(struct usb_xfer *xfer, usb_frcount_t frindex) 2007 { 2008 struct usb_page_search page_info; 2009 2010 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2011 2012 usbd_get_page(&xfer->frbuffers[frindex], 0, &page_info); 2013 return (page_info.buffer); 2014 } 2015 2016 /*------------------------------------------------------------------------* 2017 * usbd_xfer_get_fps_shift 2018 * 2019 * The following function is only useful for isochronous transfers. It 2020 * returns how many times the frame execution rate has been shifted 2021 * down. 2022 * 2023 * Return value: 2024 * Success: 0..3 2025 * Failure: 0 2026 *------------------------------------------------------------------------*/ 2027 uint8_t 2028 usbd_xfer_get_fps_shift(struct usb_xfer *xfer) 2029 { 2030 return (xfer->fps_shift); 2031 } 2032 2033 usb_frlength_t 2034 usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex) 2035 { 2036 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2037 2038 return (xfer->frlengths[frindex]); 2039 } 2040 2041 /*------------------------------------------------------------------------* 2042 * usbd_xfer_set_frame_data 2043 * 2044 * This function sets the pointer of the buffer that should 2045 * loaded directly into DMA for the given USB frame. Passing "ptr" 2046 * equal to NULL while the corresponding "frlength" is greater 2047 * than zero gives undefined results! 2048 *------------------------------------------------------------------------*/ 2049 void 2050 usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex, 2051 void *ptr, usb_frlength_t len) 2052 { 2053 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2054 2055 /* set virtual address to load and length */ 2056 xfer->frbuffers[frindex].buffer = ptr; 2057 usbd_xfer_set_frame_len(xfer, frindex, len); 2058 } 2059 2060 void 2061 usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex, 2062 void **ptr, int *len) 2063 { 2064 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2065 2066 if (ptr != NULL) 2067 *ptr = xfer->frbuffers[frindex].buffer; 2068 if (len != NULL) 2069 *len = xfer->frlengths[frindex]; 2070 } 2071 2072 /*------------------------------------------------------------------------* 2073 * usbd_xfer_old_frame_length 2074 * 2075 * This function returns the framelength of the given frame at the 2076 * time the transfer was submitted. This function can be used to 2077 * compute the starting data pointer of the next isochronous frame 2078 * when an isochronous transfer has completed. 2079 *------------------------------------------------------------------------*/ 2080 usb_frlength_t 2081 usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex) 2082 { 2083 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2084 2085 return (xfer->frlengths[frindex + xfer->max_frame_count]); 2086 } 2087 2088 void 2089 usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes, 2090 int *nframes) 2091 { 2092 if (actlen != NULL) 2093 *actlen = xfer->actlen; 2094 if (sumlen != NULL) 2095 *sumlen = xfer->sumlen; 2096 if (aframes != NULL) 2097 *aframes = xfer->aframes; 2098 if (nframes != NULL) 2099 *nframes = xfer->nframes; 2100 } 2101 2102 /*------------------------------------------------------------------------* 2103 * usbd_xfer_set_frame_offset 2104 * 2105 * This function sets the frame data buffer offset relative to the beginning 2106 * of the USB DMA buffer allocated for this USB transfer. 2107 *------------------------------------------------------------------------*/ 2108 void 2109 usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset, 2110 usb_frcount_t frindex) 2111 { 2112 KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame " 2113 "when the USB buffer is external\n")); 2114 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2115 2116 /* set virtual address to load */ 2117 xfer->frbuffers[frindex].buffer = 2118 USB_ADD_BYTES(xfer->local_buffer, offset); 2119 } 2120 2121 void 2122 usbd_xfer_set_interval(struct usb_xfer *xfer, int i) 2123 { 2124 xfer->interval = i; 2125 } 2126 2127 void 2128 usbd_xfer_set_timeout(struct usb_xfer *xfer, int t) 2129 { 2130 xfer->timeout = t; 2131 } 2132 2133 void 2134 usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n) 2135 { 2136 xfer->nframes = n; 2137 } 2138 2139 usb_frcount_t 2140 usbd_xfer_max_frames(struct usb_xfer *xfer) 2141 { 2142 return (xfer->max_frame_count); 2143 } 2144 2145 usb_frlength_t 2146 usbd_xfer_max_len(struct usb_xfer *xfer) 2147 { 2148 return (xfer->max_data_length); 2149 } 2150 2151 usb_frlength_t 2152 usbd_xfer_max_framelen(struct usb_xfer *xfer) 2153 { 2154 return (xfer->max_frame_size); 2155 } 2156 2157 void 2158 usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex, 2159 usb_frlength_t len) 2160 { 2161 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2162 2163 xfer->frlengths[frindex] = len; 2164 } 2165 2166 /*------------------------------------------------------------------------* 2167 * usb_callback_proc - factored out code 2168 * 2169 * This function performs USB callbacks. 2170 *------------------------------------------------------------------------*/ 2171 static void 2172 usb_callback_proc(struct usb_proc_msg *_pm) 2173 { 2174 struct usb_done_msg *pm = (void *)_pm; 2175 struct usb_xfer_root *info = pm->xroot; 2176 2177 /* Change locking order */ 2178 USB_BUS_UNLOCK(info->bus); 2179 2180 /* 2181 * We exploit the fact that the mutex is the same for all 2182 * callbacks that will be called from this thread: 2183 */ 2184 mtx_lock(info->xfer_mtx); 2185 USB_BUS_LOCK(info->bus); 2186 2187 /* Continue where we lost track */ 2188 usb_command_wrapper(&info->done_q, 2189 info->done_q.curr); 2190 2191 mtx_unlock(info->xfer_mtx); 2192 } 2193 2194 /*------------------------------------------------------------------------* 2195 * usbd_callback_ss_done_defer 2196 * 2197 * This function will defer the start, stop and done callback to the 2198 * correct thread. 2199 *------------------------------------------------------------------------*/ 2200 static void 2201 usbd_callback_ss_done_defer(struct usb_xfer *xfer) 2202 { 2203 struct usb_xfer_root *info = xfer->xroot; 2204 struct usb_xfer_queue *pq = &info->done_q; 2205 2206 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2207 2208 if (pq->curr != xfer) { 2209 usbd_transfer_enqueue(pq, xfer); 2210 } 2211 if (!pq->recurse_1) { 2212 2213 /* 2214 * We have to postpone the callback due to the fact we 2215 * will have a Lock Order Reversal, LOR, if we try to 2216 * proceed ! 2217 */ 2218 if (usb_proc_msignal(info->done_p, 2219 &info->done_m[0], &info->done_m[1])) { 2220 /* ignore */ 2221 } 2222 } else { 2223 /* clear second recurse flag */ 2224 pq->recurse_2 = 0; 2225 } 2226 return; 2227 2228 } 2229 2230 /*------------------------------------------------------------------------* 2231 * usbd_callback_wrapper 2232 * 2233 * This is a wrapper for USB callbacks. This wrapper does some 2234 * auto-magic things like figuring out if we can call the callback 2235 * directly from the current context or if we need to wakeup the 2236 * interrupt process. 2237 *------------------------------------------------------------------------*/ 2238 static void 2239 usbd_callback_wrapper(struct usb_xfer_queue *pq) 2240 { 2241 struct usb_xfer *xfer = pq->curr; 2242 struct usb_xfer_root *info = xfer->xroot; 2243 2244 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED); 2245 if (!mtx_owned(info->xfer_mtx) && !SCHEDULER_STOPPED()) { 2246 /* 2247 * Cases that end up here: 2248 * 2249 * 5) HW interrupt done callback or other source. 2250 */ 2251 DPRINTFN(3, "case 5\n"); 2252 2253 /* 2254 * We have to postpone the callback due to the fact we 2255 * will have a Lock Order Reversal, LOR, if we try to 2256 * proceed ! 2257 */ 2258 if (usb_proc_msignal(info->done_p, 2259 &info->done_m[0], &info->done_m[1])) { 2260 /* ignore */ 2261 } 2262 return; 2263 } 2264 /* 2265 * Cases that end up here: 2266 * 2267 * 1) We are starting a transfer 2268 * 2) We are prematurely calling back a transfer 2269 * 3) We are stopping a transfer 2270 * 4) We are doing an ordinary callback 2271 */ 2272 DPRINTFN(3, "case 1-4\n"); 2273 /* get next USB transfer in the queue */ 2274 info->done_q.curr = NULL; 2275 2276 /* set flag in case of drain */ 2277 xfer->flags_int.doing_callback = 1; 2278 2279 USB_BUS_UNLOCK(info->bus); 2280 USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED); 2281 2282 /* set correct USB state for callback */ 2283 if (!xfer->flags_int.transferring) { 2284 xfer->usb_state = USB_ST_SETUP; 2285 if (!xfer->flags_int.started) { 2286 /* we got stopped before we even got started */ 2287 USB_BUS_LOCK(info->bus); 2288 goto done; 2289 } 2290 } else { 2291 2292 if (usbd_callback_wrapper_sub(xfer)) { 2293 /* the callback has been deferred */ 2294 USB_BUS_LOCK(info->bus); 2295 goto done; 2296 } 2297 #if USB_HAVE_POWERD 2298 /* decrement power reference */ 2299 usbd_transfer_power_ref(xfer, -1); 2300 #endif 2301 xfer->flags_int.transferring = 0; 2302 2303 if (xfer->error) { 2304 xfer->usb_state = USB_ST_ERROR; 2305 } else { 2306 /* set transferred state */ 2307 xfer->usb_state = USB_ST_TRANSFERRED; 2308 #if USB_HAVE_BUSDMA 2309 /* sync DMA memory, if any */ 2310 if (xfer->flags_int.bdma_enable && 2311 (!xfer->flags_int.bdma_no_post_sync)) { 2312 usb_bdma_post_sync(xfer); 2313 } 2314 #endif 2315 } 2316 } 2317 2318 #if USB_HAVE_PF 2319 if (xfer->usb_state != USB_ST_SETUP) 2320 usbpf_xfertap(xfer, USBPF_XFERTAP_DONE); 2321 #endif 2322 /* call processing routine */ 2323 (xfer->callback) (xfer, xfer->error); 2324 2325 /* pickup the USB mutex again */ 2326 USB_BUS_LOCK(info->bus); 2327 2328 /* 2329 * Check if we got started after that we got cancelled, but 2330 * before we managed to do the callback. 2331 */ 2332 if ((!xfer->flags_int.open) && 2333 (xfer->flags_int.started) && 2334 (xfer->usb_state == USB_ST_ERROR)) { 2335 /* clear flag in case of drain */ 2336 xfer->flags_int.doing_callback = 0; 2337 /* try to loop, but not recursivly */ 2338 usb_command_wrapper(&info->done_q, xfer); 2339 return; 2340 } 2341 2342 done: 2343 /* clear flag in case of drain */ 2344 xfer->flags_int.doing_callback = 0; 2345 2346 /* 2347 * Check if we are draining. 2348 */ 2349 if (xfer->flags_int.draining && 2350 (!xfer->flags_int.transferring)) { 2351 /* "usbd_transfer_drain()" is waiting for end of transfer */ 2352 xfer->flags_int.draining = 0; 2353 cv_broadcast(&info->cv_drain); 2354 } 2355 2356 /* do the next callback, if any */ 2357 usb_command_wrapper(&info->done_q, 2358 info->done_q.curr); 2359 } 2360 2361 /*------------------------------------------------------------------------* 2362 * usb_dma_delay_done_cb 2363 * 2364 * This function is called when the DMA delay has been exectuded, and 2365 * will make sure that the callback is called to complete the USB 2366 * transfer. This code path is ususally only used when there is an USB 2367 * error like USB_ERR_CANCELLED. 2368 *------------------------------------------------------------------------*/ 2369 void 2370 usb_dma_delay_done_cb(struct usb_xfer *xfer) 2371 { 2372 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2373 2374 DPRINTFN(3, "Completed %p\n", xfer); 2375 2376 /* queue callback for execution, again */ 2377 usbd_transfer_done(xfer, 0); 2378 } 2379 2380 /*------------------------------------------------------------------------* 2381 * usbd_transfer_dequeue 2382 * 2383 * - This function is used to remove an USB transfer from a USB 2384 * transfer queue. 2385 * 2386 * - This function can be called multiple times in a row. 2387 *------------------------------------------------------------------------*/ 2388 void 2389 usbd_transfer_dequeue(struct usb_xfer *xfer) 2390 { 2391 struct usb_xfer_queue *pq; 2392 2393 pq = xfer->wait_queue; 2394 if (pq) { 2395 TAILQ_REMOVE(&pq->head, xfer, wait_entry); 2396 xfer->wait_queue = NULL; 2397 } 2398 } 2399 2400 /*------------------------------------------------------------------------* 2401 * usbd_transfer_enqueue 2402 * 2403 * - This function is used to insert an USB transfer into a USB * 2404 * transfer queue. 2405 * 2406 * - This function can be called multiple times in a row. 2407 *------------------------------------------------------------------------*/ 2408 void 2409 usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer) 2410 { 2411 /* 2412 * Insert the USB transfer into the queue, if it is not 2413 * already on a USB transfer queue: 2414 */ 2415 if (xfer->wait_queue == NULL) { 2416 xfer->wait_queue = pq; 2417 TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry); 2418 } 2419 } 2420 2421 /*------------------------------------------------------------------------* 2422 * usbd_transfer_done 2423 * 2424 * - This function is used to remove an USB transfer from the busdma, 2425 * pipe or interrupt queue. 2426 * 2427 * - This function is used to queue the USB transfer on the done 2428 * queue. 2429 * 2430 * - This function is used to stop any USB transfer timeouts. 2431 *------------------------------------------------------------------------*/ 2432 void 2433 usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error) 2434 { 2435 struct usb_xfer_root *info = xfer->xroot; 2436 2437 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED); 2438 2439 DPRINTF("err=%s\n", usbd_errstr(error)); 2440 2441 /* 2442 * If we are not transferring then just return. 2443 * This can happen during transfer cancel. 2444 */ 2445 if (!xfer->flags_int.transferring) { 2446 DPRINTF("not transferring\n"); 2447 /* end of control transfer, if any */ 2448 xfer->flags_int.control_act = 0; 2449 return; 2450 } 2451 /* only set transfer error, if not already set */ 2452 if (xfer->error == USB_ERR_NORMAL_COMPLETION) 2453 xfer->error = error; 2454 2455 /* stop any callouts */ 2456 usb_callout_stop(&xfer->timeout_handle); 2457 2458 /* 2459 * If we are waiting on a queue, just remove the USB transfer 2460 * from the queue, if any. We should have the required locks 2461 * locked to do the remove when this function is called. 2462 */ 2463 usbd_transfer_dequeue(xfer); 2464 2465 #if USB_HAVE_BUSDMA 2466 if (mtx_owned(info->xfer_mtx)) { 2467 struct usb_xfer_queue *pq; 2468 2469 /* 2470 * If the private USB lock is not locked, then we assume 2471 * that the BUS-DMA load stage has been passed: 2472 */ 2473 pq = &info->dma_q; 2474 2475 if (pq->curr == xfer) { 2476 /* start the next BUS-DMA load, if any */ 2477 usb_command_wrapper(pq, NULL); 2478 } 2479 } 2480 #endif 2481 /* keep some statistics */ 2482 if (xfer->error) { 2483 info->bus->stats_err.uds_requests 2484 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++; 2485 } else { 2486 info->bus->stats_ok.uds_requests 2487 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++; 2488 } 2489 2490 /* call the USB transfer callback */ 2491 usbd_callback_ss_done_defer(xfer); 2492 } 2493 2494 /*------------------------------------------------------------------------* 2495 * usbd_transfer_start_cb 2496 * 2497 * This function is called to start the USB transfer when 2498 * "xfer->interval" is greater than zero, and and the endpoint type is 2499 * BULK or CONTROL. 2500 *------------------------------------------------------------------------*/ 2501 static void 2502 usbd_transfer_start_cb(void *arg) 2503 { 2504 struct usb_xfer *xfer = arg; 2505 struct usb_endpoint *ep = xfer->endpoint; 2506 2507 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2508 2509 DPRINTF("start\n"); 2510 2511 #if USB_HAVE_PF 2512 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT); 2513 #endif 2514 2515 /* the transfer can now be cancelled */ 2516 xfer->flags_int.can_cancel_immed = 1; 2517 2518 /* start USB transfer, if no error */ 2519 if (xfer->error == 0) 2520 (ep->methods->start) (xfer); 2521 2522 /* check for transfer error */ 2523 if (xfer->error) { 2524 /* some error has happened */ 2525 usbd_transfer_done(xfer, 0); 2526 } 2527 } 2528 2529 /*------------------------------------------------------------------------* 2530 * usbd_xfer_set_stall 2531 * 2532 * This function is used to set the stall flag outside the 2533 * callback. This function is NULL safe. 2534 *------------------------------------------------------------------------*/ 2535 void 2536 usbd_xfer_set_stall(struct usb_xfer *xfer) 2537 { 2538 if (xfer == NULL) { 2539 /* tearing down */ 2540 return; 2541 } 2542 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 2543 2544 /* avoid any races by locking the USB mutex */ 2545 USB_BUS_LOCK(xfer->xroot->bus); 2546 xfer->flags.stall_pipe = 1; 2547 USB_BUS_UNLOCK(xfer->xroot->bus); 2548 } 2549 2550 int 2551 usbd_xfer_is_stalled(struct usb_xfer *xfer) 2552 { 2553 return (xfer->endpoint->is_stalled); 2554 } 2555 2556 /*------------------------------------------------------------------------* 2557 * usbd_transfer_clear_stall 2558 * 2559 * This function is used to clear the stall flag outside the 2560 * callback. This function is NULL safe. 2561 *------------------------------------------------------------------------*/ 2562 void 2563 usbd_transfer_clear_stall(struct usb_xfer *xfer) 2564 { 2565 if (xfer == NULL) { 2566 /* tearing down */ 2567 return; 2568 } 2569 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 2570 2571 /* avoid any races by locking the USB mutex */ 2572 USB_BUS_LOCK(xfer->xroot->bus); 2573 2574 xfer->flags.stall_pipe = 0; 2575 2576 USB_BUS_UNLOCK(xfer->xroot->bus); 2577 } 2578 2579 /*------------------------------------------------------------------------* 2580 * usbd_pipe_start 2581 * 2582 * This function is used to add an USB transfer to the pipe transfer list. 2583 *------------------------------------------------------------------------*/ 2584 void 2585 usbd_pipe_start(struct usb_xfer_queue *pq) 2586 { 2587 struct usb_endpoint *ep; 2588 struct usb_xfer *xfer; 2589 uint8_t type; 2590 2591 xfer = pq->curr; 2592 ep = xfer->endpoint; 2593 2594 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2595 2596 /* 2597 * If the endpoint is already stalled we do nothing ! 2598 */ 2599 if (ep->is_stalled) { 2600 return; 2601 } 2602 /* 2603 * Check if we are supposed to stall the endpoint: 2604 */ 2605 if (xfer->flags.stall_pipe) { 2606 struct usb_device *udev; 2607 struct usb_xfer_root *info; 2608 2609 /* clear stall command */ 2610 xfer->flags.stall_pipe = 0; 2611 2612 /* get pointer to USB device */ 2613 info = xfer->xroot; 2614 udev = info->udev; 2615 2616 /* 2617 * Only stall BULK and INTERRUPT endpoints. 2618 */ 2619 type = (ep->edesc->bmAttributes & UE_XFERTYPE); 2620 if ((type == UE_BULK) || 2621 (type == UE_INTERRUPT)) { 2622 uint8_t did_stall; 2623 2624 did_stall = 1; 2625 2626 if (udev->flags.usb_mode == USB_MODE_DEVICE) { 2627 (udev->bus->methods->set_stall) ( 2628 udev, ep, &did_stall); 2629 } else if (udev->ctrl_xfer[1]) { 2630 info = udev->ctrl_xfer[1]->xroot; 2631 usb_proc_msignal( 2632 USB_BUS_NON_GIANT_PROC(info->bus), 2633 &udev->cs_msg[0], &udev->cs_msg[1]); 2634 } else { 2635 /* should not happen */ 2636 DPRINTFN(0, "No stall handler\n"); 2637 } 2638 /* 2639 * Check if we should stall. Some USB hardware 2640 * handles set- and clear-stall in hardware. 2641 */ 2642 if (did_stall) { 2643 /* 2644 * The transfer will be continued when 2645 * the clear-stall control endpoint 2646 * message is received. 2647 */ 2648 ep->is_stalled = 1; 2649 return; 2650 } 2651 } else if (type == UE_ISOCHRONOUS) { 2652 2653 /* 2654 * Make sure any FIFO overflow or other FIFO 2655 * error conditions go away by resetting the 2656 * endpoint FIFO through the clear stall 2657 * method. 2658 */ 2659 if (udev->flags.usb_mode == USB_MODE_DEVICE) { 2660 (udev->bus->methods->clear_stall) (udev, ep); 2661 } 2662 } 2663 } 2664 /* Set or clear stall complete - special case */ 2665 if (xfer->nframes == 0) { 2666 /* we are complete */ 2667 xfer->aframes = 0; 2668 usbd_transfer_done(xfer, 0); 2669 return; 2670 } 2671 /* 2672 * Handled cases: 2673 * 2674 * 1) Start the first transfer queued. 2675 * 2676 * 2) Re-start the current USB transfer. 2677 */ 2678 /* 2679 * Check if there should be any 2680 * pre transfer start delay: 2681 */ 2682 if (xfer->interval > 0) { 2683 type = (ep->edesc->bmAttributes & UE_XFERTYPE); 2684 if ((type == UE_BULK) || 2685 (type == UE_CONTROL)) { 2686 usbd_transfer_timeout_ms(xfer, 2687 &usbd_transfer_start_cb, 2688 xfer->interval); 2689 return; 2690 } 2691 } 2692 DPRINTF("start\n"); 2693 2694 #if USB_HAVE_PF 2695 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT); 2696 #endif 2697 /* the transfer can now be cancelled */ 2698 xfer->flags_int.can_cancel_immed = 1; 2699 2700 /* start USB transfer, if no error */ 2701 if (xfer->error == 0) 2702 (ep->methods->start) (xfer); 2703 2704 /* check for transfer error */ 2705 if (xfer->error) { 2706 /* some error has happened */ 2707 usbd_transfer_done(xfer, 0); 2708 } 2709 } 2710 2711 /*------------------------------------------------------------------------* 2712 * usbd_transfer_timeout_ms 2713 * 2714 * This function is used to setup a timeout on the given USB 2715 * transfer. If the timeout has been deferred the callback given by 2716 * "cb" will get called after "ms" milliseconds. 2717 *------------------------------------------------------------------------*/ 2718 void 2719 usbd_transfer_timeout_ms(struct usb_xfer *xfer, 2720 void (*cb) (void *arg), usb_timeout_t ms) 2721 { 2722 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2723 2724 /* defer delay */ 2725 usb_callout_reset(&xfer->timeout_handle, 2726 USB_MS_TO_TICKS(ms), cb, xfer); 2727 } 2728 2729 /*------------------------------------------------------------------------* 2730 * usbd_callback_wrapper_sub 2731 * 2732 * - This function will update variables in an USB transfer after 2733 * that the USB transfer is complete. 2734 * 2735 * - This function is used to start the next USB transfer on the 2736 * ep transfer queue, if any. 2737 * 2738 * NOTE: In some special cases the USB transfer will not be removed from 2739 * the pipe queue, but remain first. To enforce USB transfer removal call 2740 * this function passing the error code "USB_ERR_CANCELLED". 2741 * 2742 * Return values: 2743 * 0: Success. 2744 * Else: The callback has been deferred. 2745 *------------------------------------------------------------------------*/ 2746 static uint8_t 2747 usbd_callback_wrapper_sub(struct usb_xfer *xfer) 2748 { 2749 struct usb_endpoint *ep; 2750 struct usb_bus *bus; 2751 usb_frcount_t x; 2752 2753 bus = xfer->xroot->bus; 2754 2755 if ((!xfer->flags_int.open) && 2756 (!xfer->flags_int.did_close)) { 2757 DPRINTF("close\n"); 2758 USB_BUS_LOCK(bus); 2759 (xfer->endpoint->methods->close) (xfer); 2760 USB_BUS_UNLOCK(bus); 2761 /* only close once */ 2762 xfer->flags_int.did_close = 1; 2763 return (1); /* wait for new callback */ 2764 } 2765 /* 2766 * If we have a non-hardware induced error we 2767 * need to do the DMA delay! 2768 */ 2769 if (xfer->error != 0 && !xfer->flags_int.did_dma_delay && 2770 (xfer->error == USB_ERR_CANCELLED || 2771 xfer->error == USB_ERR_TIMEOUT || 2772 bus->methods->start_dma_delay != NULL)) { 2773 2774 usb_timeout_t temp; 2775 2776 /* only delay once */ 2777 xfer->flags_int.did_dma_delay = 1; 2778 2779 /* we can not cancel this delay */ 2780 xfer->flags_int.can_cancel_immed = 0; 2781 2782 temp = usbd_get_dma_delay(xfer->xroot->udev); 2783 2784 DPRINTFN(3, "DMA delay, %u ms, " 2785 "on %p\n", temp, xfer); 2786 2787 if (temp != 0) { 2788 USB_BUS_LOCK(bus); 2789 /* 2790 * Some hardware solutions have dedicated 2791 * events when it is safe to free DMA'ed 2792 * memory. For the other hardware platforms we 2793 * use a static delay. 2794 */ 2795 if (bus->methods->start_dma_delay != NULL) { 2796 (bus->methods->start_dma_delay) (xfer); 2797 } else { 2798 usbd_transfer_timeout_ms(xfer, 2799 (void (*)(void *))&usb_dma_delay_done_cb, 2800 temp); 2801 } 2802 USB_BUS_UNLOCK(bus); 2803 return (1); /* wait for new callback */ 2804 } 2805 } 2806 /* check actual number of frames */ 2807 if (xfer->aframes > xfer->nframes) { 2808 if (xfer->error == 0) { 2809 panic("%s: actual number of frames, %d, is " 2810 "greater than initial number of frames, %d\n", 2811 __FUNCTION__, xfer->aframes, xfer->nframes); 2812 } else { 2813 /* just set some valid value */ 2814 xfer->aframes = xfer->nframes; 2815 } 2816 } 2817 /* compute actual length */ 2818 xfer->actlen = 0; 2819 2820 for (x = 0; x != xfer->aframes; x++) { 2821 xfer->actlen += xfer->frlengths[x]; 2822 } 2823 2824 /* 2825 * Frames that were not transferred get zero actual length in 2826 * case the USB device driver does not check the actual number 2827 * of frames transferred, "xfer->aframes": 2828 */ 2829 for (; x < xfer->nframes; x++) { 2830 usbd_xfer_set_frame_len(xfer, x, 0); 2831 } 2832 2833 /* check actual length */ 2834 if (xfer->actlen > xfer->sumlen) { 2835 if (xfer->error == 0) { 2836 panic("%s: actual length, %d, is greater than " 2837 "initial length, %d\n", 2838 __FUNCTION__, xfer->actlen, xfer->sumlen); 2839 } else { 2840 /* just set some valid value */ 2841 xfer->actlen = xfer->sumlen; 2842 } 2843 } 2844 DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n", 2845 xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen, 2846 xfer->aframes, xfer->nframes); 2847 2848 if (xfer->error) { 2849 /* end of control transfer, if any */ 2850 xfer->flags_int.control_act = 0; 2851 2852 #if USB_HAVE_TT_SUPPORT 2853 switch (xfer->error) { 2854 case USB_ERR_NORMAL_COMPLETION: 2855 case USB_ERR_SHORT_XFER: 2856 case USB_ERR_STALLED: 2857 case USB_ERR_CANCELLED: 2858 /* nothing to do */ 2859 break; 2860 default: 2861 /* try to reset the TT, if any */ 2862 USB_BUS_LOCK(bus); 2863 uhub_tt_buffer_reset_async_locked(xfer->xroot->udev, xfer->endpoint); 2864 USB_BUS_UNLOCK(bus); 2865 break; 2866 } 2867 #endif 2868 /* check if we should block the execution queue */ 2869 if ((xfer->error != USB_ERR_CANCELLED) && 2870 (xfer->flags.pipe_bof)) { 2871 DPRINTFN(2, "xfer=%p: Block On Failure " 2872 "on endpoint=%p\n", xfer, xfer->endpoint); 2873 goto done; 2874 } 2875 } else { 2876 /* check for short transfers */ 2877 if (xfer->actlen < xfer->sumlen) { 2878 2879 /* end of control transfer, if any */ 2880 xfer->flags_int.control_act = 0; 2881 2882 if (!xfer->flags_int.short_xfer_ok) { 2883 xfer->error = USB_ERR_SHORT_XFER; 2884 if (xfer->flags.pipe_bof) { 2885 DPRINTFN(2, "xfer=%p: Block On Failure on " 2886 "Short Transfer on endpoint %p.\n", 2887 xfer, xfer->endpoint); 2888 goto done; 2889 } 2890 } 2891 } else { 2892 /* 2893 * Check if we are in the middle of a 2894 * control transfer: 2895 */ 2896 if (xfer->flags_int.control_act) { 2897 DPRINTFN(5, "xfer=%p: Control transfer " 2898 "active on endpoint=%p\n", xfer, xfer->endpoint); 2899 goto done; 2900 } 2901 } 2902 } 2903 2904 ep = xfer->endpoint; 2905 2906 /* 2907 * If the current USB transfer is completing we need to start the 2908 * next one: 2909 */ 2910 USB_BUS_LOCK(bus); 2911 if (ep->endpoint_q[xfer->stream_id].curr == xfer) { 2912 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], NULL); 2913 2914 if (ep->endpoint_q[xfer->stream_id].curr != NULL || 2915 TAILQ_FIRST(&ep->endpoint_q[xfer->stream_id].head) != NULL) { 2916 /* there is another USB transfer waiting */ 2917 } else { 2918 /* this is the last USB transfer */ 2919 /* clear isochronous sync flag */ 2920 xfer->endpoint->is_synced = 0; 2921 } 2922 } 2923 USB_BUS_UNLOCK(bus); 2924 done: 2925 return (0); 2926 } 2927 2928 /*------------------------------------------------------------------------* 2929 * usb_command_wrapper 2930 * 2931 * This function is used to execute commands non-recursivly on an USB 2932 * transfer. 2933 *------------------------------------------------------------------------*/ 2934 void 2935 usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer) 2936 { 2937 if (xfer) { 2938 /* 2939 * If the transfer is not already processing, 2940 * queue it! 2941 */ 2942 if (pq->curr != xfer) { 2943 usbd_transfer_enqueue(pq, xfer); 2944 if (pq->curr != NULL) { 2945 /* something is already processing */ 2946 DPRINTFN(6, "busy %p\n", pq->curr); 2947 return; 2948 } 2949 } 2950 } else { 2951 /* Get next element in queue */ 2952 pq->curr = NULL; 2953 } 2954 2955 if (!pq->recurse_1) { 2956 2957 do { 2958 2959 /* set both recurse flags */ 2960 pq->recurse_1 = 1; 2961 pq->recurse_2 = 1; 2962 2963 if (pq->curr == NULL) { 2964 xfer = TAILQ_FIRST(&pq->head); 2965 if (xfer) { 2966 TAILQ_REMOVE(&pq->head, xfer, 2967 wait_entry); 2968 xfer->wait_queue = NULL; 2969 pq->curr = xfer; 2970 } else { 2971 break; 2972 } 2973 } 2974 DPRINTFN(6, "cb %p (enter)\n", pq->curr); 2975 (pq->command) (pq); 2976 DPRINTFN(6, "cb %p (leave)\n", pq->curr); 2977 2978 } while (!pq->recurse_2); 2979 2980 /* clear first recurse flag */ 2981 pq->recurse_1 = 0; 2982 2983 } else { 2984 /* clear second recurse flag */ 2985 pq->recurse_2 = 0; 2986 } 2987 } 2988 2989 /*------------------------------------------------------------------------* 2990 * usbd_ctrl_transfer_setup 2991 * 2992 * This function is used to setup the default USB control endpoint 2993 * transfer. 2994 *------------------------------------------------------------------------*/ 2995 void 2996 usbd_ctrl_transfer_setup(struct usb_device *udev) 2997 { 2998 struct usb_xfer *xfer; 2999 uint8_t no_resetup; 3000 uint8_t iface_index; 3001 3002 /* check for root HUB */ 3003 if (udev->parent_hub == NULL) 3004 return; 3005 repeat: 3006 3007 xfer = udev->ctrl_xfer[0]; 3008 if (xfer) { 3009 USB_XFER_LOCK(xfer); 3010 no_resetup = 3011 ((xfer->address == udev->address) && 3012 (udev->ctrl_ep_desc.wMaxPacketSize[0] == 3013 udev->ddesc.bMaxPacketSize)); 3014 if (udev->flags.usb_mode == USB_MODE_DEVICE) { 3015 if (no_resetup) { 3016 /* 3017 * NOTE: checking "xfer->address" and 3018 * starting the USB transfer must be 3019 * atomic! 3020 */ 3021 usbd_transfer_start(xfer); 3022 } 3023 } 3024 USB_XFER_UNLOCK(xfer); 3025 } else { 3026 no_resetup = 0; 3027 } 3028 3029 if (no_resetup) { 3030 /* 3031 * All parameters are exactly the same like before. 3032 * Just return. 3033 */ 3034 return; 3035 } 3036 /* 3037 * Update wMaxPacketSize for the default control endpoint: 3038 */ 3039 udev->ctrl_ep_desc.wMaxPacketSize[0] = 3040 udev->ddesc.bMaxPacketSize; 3041 3042 /* 3043 * Unsetup any existing USB transfer: 3044 */ 3045 usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX); 3046 3047 /* 3048 * Reset clear stall error counter. 3049 */ 3050 udev->clear_stall_errors = 0; 3051 3052 /* 3053 * Try to setup a new USB transfer for the 3054 * default control endpoint: 3055 */ 3056 iface_index = 0; 3057 if (usbd_transfer_setup(udev, &iface_index, 3058 udev->ctrl_xfer, usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL, 3059 &udev->device_mtx)) { 3060 DPRINTFN(0, "could not setup default " 3061 "USB transfer\n"); 3062 } else { 3063 goto repeat; 3064 } 3065 } 3066 3067 /*------------------------------------------------------------------------* 3068 * usbd_clear_data_toggle - factored out code 3069 * 3070 * NOTE: the intention of this function is not to reset the hardware 3071 * data toggle. 3072 *------------------------------------------------------------------------*/ 3073 void 3074 usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep) 3075 { 3076 USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED); 3077 3078 /* check that we have a valid case */ 3079 if (udev->flags.usb_mode == USB_MODE_HOST && 3080 udev->parent_hub != NULL && 3081 udev->bus->methods->clear_stall != NULL && 3082 ep->methods != NULL) { 3083 (udev->bus->methods->clear_stall) (udev, ep); 3084 } 3085 } 3086 3087 /*------------------------------------------------------------------------* 3088 * usbd_clear_data_toggle - factored out code 3089 * 3090 * NOTE: the intention of this function is not to reset the hardware 3091 * data toggle on the USB device side. 3092 *------------------------------------------------------------------------*/ 3093 void 3094 usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep) 3095 { 3096 DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep); 3097 3098 USB_BUS_LOCK(udev->bus); 3099 ep->toggle_next = 0; 3100 /* some hardware needs a callback to clear the data toggle */ 3101 usbd_clear_stall_locked(udev, ep); 3102 USB_BUS_UNLOCK(udev->bus); 3103 } 3104 3105 /*------------------------------------------------------------------------* 3106 * usbd_clear_stall_callback - factored out clear stall callback 3107 * 3108 * Input parameters: 3109 * xfer1: Clear Stall Control Transfer 3110 * xfer2: Stalled USB Transfer 3111 * 3112 * This function is NULL safe. 3113 * 3114 * Return values: 3115 * 0: In progress 3116 * Else: Finished 3117 * 3118 * Clear stall config example: 3119 * 3120 * static const struct usb_config my_clearstall = { 3121 * .type = UE_CONTROL, 3122 * .endpoint = 0, 3123 * .direction = UE_DIR_ANY, 3124 * .interval = 50, //50 milliseconds 3125 * .bufsize = sizeof(struct usb_device_request), 3126 * .timeout = 1000, //1.000 seconds 3127 * .callback = &my_clear_stall_callback, // ** 3128 * .usb_mode = USB_MODE_HOST, 3129 * }; 3130 * 3131 * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback" 3132 * passing the correct parameters. 3133 *------------------------------------------------------------------------*/ 3134 uint8_t 3135 usbd_clear_stall_callback(struct usb_xfer *xfer1, 3136 struct usb_xfer *xfer2) 3137 { 3138 struct usb_device_request req; 3139 3140 if (xfer2 == NULL) { 3141 /* looks like we are tearing down */ 3142 DPRINTF("NULL input parameter\n"); 3143 return (0); 3144 } 3145 USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED); 3146 USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED); 3147 3148 switch (USB_GET_STATE(xfer1)) { 3149 case USB_ST_SETUP: 3150 3151 /* 3152 * pre-clear the data toggle to DATA0 ("umass.c" and 3153 * "ata-usb.c" depends on this) 3154 */ 3155 3156 usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint); 3157 3158 /* setup a clear-stall packet */ 3159 3160 req.bmRequestType = UT_WRITE_ENDPOINT; 3161 req.bRequest = UR_CLEAR_FEATURE; 3162 USETW(req.wValue, UF_ENDPOINT_HALT); 3163 req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress; 3164 req.wIndex[1] = 0; 3165 USETW(req.wLength, 0); 3166 3167 /* 3168 * "usbd_transfer_setup_sub()" will ensure that 3169 * we have sufficient room in the buffer for 3170 * the request structure! 3171 */ 3172 3173 /* copy in the transfer */ 3174 3175 usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req)); 3176 3177 /* set length */ 3178 xfer1->frlengths[0] = sizeof(req); 3179 xfer1->nframes = 1; 3180 3181 usbd_transfer_submit(xfer1); 3182 return (0); 3183 3184 case USB_ST_TRANSFERRED: 3185 break; 3186 3187 default: /* Error */ 3188 if (xfer1->error == USB_ERR_CANCELLED) { 3189 return (0); 3190 } 3191 break; 3192 } 3193 return (1); /* Clear Stall Finished */ 3194 } 3195 3196 /*------------------------------------------------------------------------* 3197 * usbd_transfer_poll 3198 * 3199 * The following function gets called from the USB keyboard driver and 3200 * UMASS when the system has paniced. 3201 * 3202 * NOTE: It is currently not possible to resume normal operation on 3203 * the USB controller which has been polled, due to clearing of the 3204 * "up_dsleep" and "up_msleep" flags. 3205 *------------------------------------------------------------------------*/ 3206 void 3207 usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max) 3208 { 3209 struct usb_xfer *xfer; 3210 struct usb_xfer_root *xroot; 3211 struct usb_device *udev; 3212 struct usb_proc_msg *pm; 3213 uint16_t n; 3214 uint16_t drop_bus; 3215 uint16_t drop_xfer; 3216 3217 for (n = 0; n != max; n++) { 3218 /* Extra checks to avoid panic */ 3219 xfer = ppxfer[n]; 3220 if (xfer == NULL) 3221 continue; /* no USB transfer */ 3222 xroot = xfer->xroot; 3223 if (xroot == NULL) 3224 continue; /* no USB root */ 3225 udev = xroot->udev; 3226 if (udev == NULL) 3227 continue; /* no USB device */ 3228 if (udev->bus == NULL) 3229 continue; /* no BUS structure */ 3230 if (udev->bus->methods == NULL) 3231 continue; /* no BUS methods */ 3232 if (udev->bus->methods->xfer_poll == NULL) 3233 continue; /* no poll method */ 3234 3235 /* make sure that the BUS mutex is not locked */ 3236 drop_bus = 0; 3237 while (mtx_owned(&xroot->udev->bus->bus_mtx) && !SCHEDULER_STOPPED()) { 3238 mtx_unlock(&xroot->udev->bus->bus_mtx); 3239 drop_bus++; 3240 } 3241 3242 /* make sure that the transfer mutex is not locked */ 3243 drop_xfer = 0; 3244 while (mtx_owned(xroot->xfer_mtx) && !SCHEDULER_STOPPED()) { 3245 mtx_unlock(xroot->xfer_mtx); 3246 drop_xfer++; 3247 } 3248 3249 /* Make sure cv_signal() and cv_broadcast() is not called */ 3250 USB_BUS_CONTROL_XFER_PROC(udev->bus)->up_msleep = 0; 3251 USB_BUS_EXPLORE_PROC(udev->bus)->up_msleep = 0; 3252 USB_BUS_GIANT_PROC(udev->bus)->up_msleep = 0; 3253 USB_BUS_NON_GIANT_PROC(udev->bus)->up_msleep = 0; 3254 3255 /* poll USB hardware */ 3256 (udev->bus->methods->xfer_poll) (udev->bus); 3257 3258 USB_BUS_LOCK(xroot->bus); 3259 3260 /* check for clear stall */ 3261 if (udev->ctrl_xfer[1] != NULL) { 3262 3263 /* poll clear stall start */ 3264 pm = &udev->cs_msg[0].hdr; 3265 (pm->pm_callback) (pm); 3266 /* poll clear stall done thread */ 3267 pm = &udev->ctrl_xfer[1]-> 3268 xroot->done_m[0].hdr; 3269 (pm->pm_callback) (pm); 3270 } 3271 3272 /* poll done thread */ 3273 pm = &xroot->done_m[0].hdr; 3274 (pm->pm_callback) (pm); 3275 3276 USB_BUS_UNLOCK(xroot->bus); 3277 3278 /* restore transfer mutex */ 3279 while (drop_xfer--) 3280 mtx_lock(xroot->xfer_mtx); 3281 3282 /* restore BUS mutex */ 3283 while (drop_bus--) 3284 mtx_lock(&xroot->udev->bus->bus_mtx); 3285 } 3286 } 3287 3288 static void 3289 usbd_get_std_packet_size(struct usb_std_packet_size *ptr, 3290 uint8_t type, enum usb_dev_speed speed) 3291 { 3292 static const uint16_t intr_range_max[USB_SPEED_MAX] = { 3293 [USB_SPEED_LOW] = 8, 3294 [USB_SPEED_FULL] = 64, 3295 [USB_SPEED_HIGH] = 1024, 3296 [USB_SPEED_VARIABLE] = 1024, 3297 [USB_SPEED_SUPER] = 1024, 3298 }; 3299 3300 static const uint16_t isoc_range_max[USB_SPEED_MAX] = { 3301 [USB_SPEED_LOW] = 0, /* invalid */ 3302 [USB_SPEED_FULL] = 1023, 3303 [USB_SPEED_HIGH] = 1024, 3304 [USB_SPEED_VARIABLE] = 3584, 3305 [USB_SPEED_SUPER] = 1024, 3306 }; 3307 3308 static const uint16_t control_min[USB_SPEED_MAX] = { 3309 [USB_SPEED_LOW] = 8, 3310 [USB_SPEED_FULL] = 8, 3311 [USB_SPEED_HIGH] = 64, 3312 [USB_SPEED_VARIABLE] = 512, 3313 [USB_SPEED_SUPER] = 512, 3314 }; 3315 3316 static const uint16_t bulk_min[USB_SPEED_MAX] = { 3317 [USB_SPEED_LOW] = 8, 3318 [USB_SPEED_FULL] = 8, 3319 [USB_SPEED_HIGH] = 512, 3320 [USB_SPEED_VARIABLE] = 512, 3321 [USB_SPEED_SUPER] = 1024, 3322 }; 3323 3324 uint16_t temp; 3325 3326 memset(ptr, 0, sizeof(*ptr)); 3327 3328 switch (type) { 3329 case UE_INTERRUPT: 3330 ptr->range.max = intr_range_max[speed]; 3331 break; 3332 case UE_ISOCHRONOUS: 3333 ptr->range.max = isoc_range_max[speed]; 3334 break; 3335 default: 3336 if (type == UE_BULK) 3337 temp = bulk_min[speed]; 3338 else /* UE_CONTROL */ 3339 temp = control_min[speed]; 3340 3341 /* default is fixed */ 3342 ptr->fixed[0] = temp; 3343 ptr->fixed[1] = temp; 3344 ptr->fixed[2] = temp; 3345 ptr->fixed[3] = temp; 3346 3347 if (speed == USB_SPEED_FULL) { 3348 /* multiple sizes */ 3349 ptr->fixed[1] = 16; 3350 ptr->fixed[2] = 32; 3351 ptr->fixed[3] = 64; 3352 } 3353 if ((speed == USB_SPEED_VARIABLE) && 3354 (type == UE_BULK)) { 3355 /* multiple sizes */ 3356 ptr->fixed[2] = 1024; 3357 ptr->fixed[3] = 1536; 3358 } 3359 break; 3360 } 3361 } 3362 3363 void * 3364 usbd_xfer_softc(struct usb_xfer *xfer) 3365 { 3366 return (xfer->priv_sc); 3367 } 3368 3369 void * 3370 usbd_xfer_get_priv(struct usb_xfer *xfer) 3371 { 3372 return (xfer->priv_fifo); 3373 } 3374 3375 void 3376 usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr) 3377 { 3378 xfer->priv_fifo = ptr; 3379 } 3380 3381 uint8_t 3382 usbd_xfer_state(struct usb_xfer *xfer) 3383 { 3384 return (xfer->usb_state); 3385 } 3386 3387 void 3388 usbd_xfer_set_flag(struct usb_xfer *xfer, int flag) 3389 { 3390 switch (flag) { 3391 case USB_FORCE_SHORT_XFER: 3392 xfer->flags.force_short_xfer = 1; 3393 break; 3394 case USB_SHORT_XFER_OK: 3395 xfer->flags.short_xfer_ok = 1; 3396 break; 3397 case USB_MULTI_SHORT_OK: 3398 xfer->flags.short_frames_ok = 1; 3399 break; 3400 case USB_MANUAL_STATUS: 3401 xfer->flags.manual_status = 1; 3402 break; 3403 } 3404 } 3405 3406 void 3407 usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag) 3408 { 3409 switch (flag) { 3410 case USB_FORCE_SHORT_XFER: 3411 xfer->flags.force_short_xfer = 0; 3412 break; 3413 case USB_SHORT_XFER_OK: 3414 xfer->flags.short_xfer_ok = 0; 3415 break; 3416 case USB_MULTI_SHORT_OK: 3417 xfer->flags.short_frames_ok = 0; 3418 break; 3419 case USB_MANUAL_STATUS: 3420 xfer->flags.manual_status = 0; 3421 break; 3422 } 3423 } 3424 3425 /* 3426 * The following function returns in milliseconds when the isochronous 3427 * transfer was completed by the hardware. The returned value wraps 3428 * around 65536 milliseconds. 3429 */ 3430 uint16_t 3431 usbd_xfer_get_timestamp(struct usb_xfer *xfer) 3432 { 3433 return (xfer->isoc_time_complete); 3434 } 3435