1 /* $FreeBSD$ */ 2 /*- 3 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/stdint.h> 28 #include <sys/stddef.h> 29 #include <sys/param.h> 30 #include <sys/queue.h> 31 #include <sys/types.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/bus.h> 35 #include <sys/module.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/condvar.h> 39 #include <sys/sysctl.h> 40 #include <sys/sx.h> 41 #include <sys/unistd.h> 42 #include <sys/callout.h> 43 #include <sys/malloc.h> 44 #include <sys/priv.h> 45 #include <sys/proc.h> 46 47 #include <dev/usb/usb.h> 48 #include <dev/usb/usbdi.h> 49 #include <dev/usb/usbdi_util.h> 50 51 #define USB_DEBUG_VAR usb_debug 52 53 #include <dev/usb/usb_core.h> 54 #include <dev/usb/usb_busdma.h> 55 #include <dev/usb/usb_process.h> 56 #include <dev/usb/usb_transfer.h> 57 #include <dev/usb/usb_device.h> 58 #include <dev/usb/usb_debug.h> 59 #include <dev/usb/usb_util.h> 60 61 #include <dev/usb/usb_controller.h> 62 #include <dev/usb/usb_bus.h> 63 #include <dev/usb/usb_pf.h> 64 65 struct usb_std_packet_size { 66 struct { 67 uint16_t min; /* inclusive */ 68 uint16_t max; /* inclusive */ 69 } range; 70 71 uint16_t fixed[4]; 72 }; 73 74 static usb_callback_t usb_request_callback; 75 76 static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = { 77 78 /* This transfer is used for generic control endpoint transfers */ 79 80 [0] = { 81 .type = UE_CONTROL, 82 .endpoint = 0x00, /* Control endpoint */ 83 .direction = UE_DIR_ANY, 84 .bufsize = USB_EP0_BUFSIZE, /* bytes */ 85 .flags = {.proxy_buffer = 1,}, 86 .callback = &usb_request_callback, 87 .usb_mode = USB_MODE_DUAL, /* both modes */ 88 }, 89 90 /* This transfer is used for generic clear stall only */ 91 92 [1] = { 93 .type = UE_CONTROL, 94 .endpoint = 0x00, /* Control pipe */ 95 .direction = UE_DIR_ANY, 96 .bufsize = sizeof(struct usb_device_request), 97 .callback = &usb_do_clear_stall_callback, 98 .timeout = 1000, /* 1 second */ 99 .interval = 50, /* 50ms */ 100 .usb_mode = USB_MODE_HOST, 101 }, 102 }; 103 104 /* function prototypes */ 105 106 static void usbd_update_max_frame_size(struct usb_xfer *); 107 static void usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t); 108 static void usbd_control_transfer_init(struct usb_xfer *); 109 static int usbd_setup_ctrl_transfer(struct usb_xfer *); 110 static void usb_callback_proc(struct usb_proc_msg *); 111 static void usbd_callback_ss_done_defer(struct usb_xfer *); 112 static void usbd_callback_wrapper(struct usb_xfer_queue *); 113 static void usbd_transfer_start_cb(void *); 114 static uint8_t usbd_callback_wrapper_sub(struct usb_xfer *); 115 static void usbd_get_std_packet_size(struct usb_std_packet_size *ptr, 116 uint8_t type, enum usb_dev_speed speed); 117 118 /*------------------------------------------------------------------------* 119 * usb_request_callback 120 *------------------------------------------------------------------------*/ 121 static void 122 usb_request_callback(struct usb_xfer *xfer, usb_error_t error) 123 { 124 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) 125 usb_handle_request_callback(xfer, error); 126 else 127 usbd_do_request_callback(xfer, error); 128 } 129 130 /*------------------------------------------------------------------------* 131 * usbd_update_max_frame_size 132 * 133 * This function updates the maximum frame size, hence high speed USB 134 * can transfer multiple consecutive packets. 135 *------------------------------------------------------------------------*/ 136 static void 137 usbd_update_max_frame_size(struct usb_xfer *xfer) 138 { 139 /* compute maximum frame size */ 140 /* this computation should not overflow 16-bit */ 141 /* max = 15 * 1024 */ 142 143 xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count; 144 } 145 146 /*------------------------------------------------------------------------* 147 * usbd_get_dma_delay 148 * 149 * The following function is called when we need to 150 * synchronize with DMA hardware. 151 * 152 * Returns: 153 * 0: no DMA delay required 154 * Else: milliseconds of DMA delay 155 *------------------------------------------------------------------------*/ 156 usb_timeout_t 157 usbd_get_dma_delay(struct usb_device *udev) 158 { 159 struct usb_bus_methods *mtod; 160 uint32_t temp; 161 162 mtod = udev->bus->methods; 163 temp = 0; 164 165 if (mtod->get_dma_delay) { 166 (mtod->get_dma_delay) (udev, &temp); 167 /* 168 * Round up and convert to milliseconds. Note that we use 169 * 1024 milliseconds per second. to save a division. 170 */ 171 temp += 0x3FF; 172 temp /= 0x400; 173 } 174 return (temp); 175 } 176 177 /*------------------------------------------------------------------------* 178 * usbd_transfer_setup_sub_malloc 179 * 180 * This function will allocate one or more DMA'able memory chunks 181 * according to "size", "align" and "count" arguments. "ppc" is 182 * pointed to a linear array of USB page caches afterwards. 183 * 184 * If the "align" argument is equal to "1" a non-contiguous allocation 185 * can happen. Else if the "align" argument is greater than "1", the 186 * allocation will always be contiguous in memory. 187 * 188 * Returns: 189 * 0: Success 190 * Else: Failure 191 *------------------------------------------------------------------------*/ 192 #if USB_HAVE_BUSDMA 193 uint8_t 194 usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm, 195 struct usb_page_cache **ppc, usb_size_t size, usb_size_t align, 196 usb_size_t count) 197 { 198 struct usb_page_cache *pc; 199 struct usb_page *pg; 200 void *buf; 201 usb_size_t n_dma_pc; 202 usb_size_t n_dma_pg; 203 usb_size_t n_obj; 204 usb_size_t x; 205 usb_size_t y; 206 usb_size_t r; 207 usb_size_t z; 208 209 USB_ASSERT(align > 0, ("Invalid alignment, 0x%08x\n", 210 align)); 211 USB_ASSERT(size > 0, ("Invalid size = 0\n")); 212 213 if (count == 0) { 214 return (0); /* nothing to allocate */ 215 } 216 /* 217 * Make sure that the size is aligned properly. 218 */ 219 size = -((-size) & (-align)); 220 221 /* 222 * Try multi-allocation chunks to reduce the number of DMA 223 * allocations, hence DMA allocations are slow. 224 */ 225 if (align == 1) { 226 /* special case - non-cached multi page DMA memory */ 227 n_dma_pc = count; 228 n_dma_pg = (2 + (size / USB_PAGE_SIZE)); 229 n_obj = 1; 230 } else if (size >= USB_PAGE_SIZE) { 231 n_dma_pc = count; 232 n_dma_pg = 1; 233 n_obj = 1; 234 } else { 235 /* compute number of objects per page */ 236 n_obj = (USB_PAGE_SIZE / size); 237 /* 238 * Compute number of DMA chunks, rounded up 239 * to nearest one: 240 */ 241 n_dma_pc = ((count + n_obj - 1) / n_obj); 242 n_dma_pg = 1; 243 } 244 245 /* 246 * DMA memory is allocated once, but mapped twice. That's why 247 * there is one list for auto-free and another list for 248 * non-auto-free which only holds the mapping and not the 249 * allocation. 250 */ 251 if (parm->buf == NULL) { 252 /* reserve memory (auto-free) */ 253 parm->dma_page_ptr += n_dma_pc * n_dma_pg; 254 parm->dma_page_cache_ptr += n_dma_pc; 255 256 /* reserve memory (no-auto-free) */ 257 parm->dma_page_ptr += count * n_dma_pg; 258 parm->xfer_page_cache_ptr += count; 259 return (0); 260 } 261 for (x = 0; x != n_dma_pc; x++) { 262 /* need to initialize the page cache */ 263 parm->dma_page_cache_ptr[x].tag_parent = 264 &parm->curr_xfer->xroot->dma_parent_tag; 265 } 266 for (x = 0; x != count; x++) { 267 /* need to initialize the page cache */ 268 parm->xfer_page_cache_ptr[x].tag_parent = 269 &parm->curr_xfer->xroot->dma_parent_tag; 270 } 271 272 if (ppc) { 273 *ppc = parm->xfer_page_cache_ptr; 274 } 275 r = count; /* set remainder count */ 276 z = n_obj * size; /* set allocation size */ 277 pc = parm->xfer_page_cache_ptr; 278 pg = parm->dma_page_ptr; 279 280 for (x = 0; x != n_dma_pc; x++) { 281 282 if (r < n_obj) { 283 /* compute last remainder */ 284 z = r * size; 285 n_obj = r; 286 } 287 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr, 288 pg, z, align)) { 289 return (1); /* failure */ 290 } 291 /* Set beginning of current buffer */ 292 buf = parm->dma_page_cache_ptr->buffer; 293 /* Make room for one DMA page cache and one page */ 294 parm->dma_page_cache_ptr++; 295 pg += n_dma_pg; 296 297 for (y = 0; (y != n_obj); y++, r--, pc++, pg += n_dma_pg) { 298 299 /* Load sub-chunk into DMA */ 300 if (usb_pc_dmamap_create(pc, size)) { 301 return (1); /* failure */ 302 } 303 pc->buffer = USB_ADD_BYTES(buf, y * size); 304 pc->page_start = pg; 305 306 mtx_lock(pc->tag_parent->mtx); 307 if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) { 308 mtx_unlock(pc->tag_parent->mtx); 309 return (1); /* failure */ 310 } 311 mtx_unlock(pc->tag_parent->mtx); 312 } 313 } 314 315 parm->xfer_page_cache_ptr = pc; 316 parm->dma_page_ptr = pg; 317 return (0); 318 } 319 #endif 320 321 /*------------------------------------------------------------------------* 322 * usbd_transfer_setup_sub - transfer setup subroutine 323 * 324 * This function must be called from the "xfer_setup" callback of the 325 * USB Host or Device controller driver when setting up an USB 326 * transfer. This function will setup correct packet sizes, buffer 327 * sizes, flags and more, that are stored in the "usb_xfer" 328 * structure. 329 *------------------------------------------------------------------------*/ 330 void 331 usbd_transfer_setup_sub(struct usb_setup_params *parm) 332 { 333 enum { 334 REQ_SIZE = 8, 335 MIN_PKT = 8, 336 }; 337 struct usb_xfer *xfer = parm->curr_xfer; 338 const struct usb_config *setup = parm->curr_setup; 339 struct usb_endpoint_ss_comp_descriptor *ecomp; 340 struct usb_endpoint_descriptor *edesc; 341 struct usb_std_packet_size std_size; 342 usb_frcount_t n_frlengths; 343 usb_frcount_t n_frbuffers; 344 usb_frcount_t x; 345 uint8_t type; 346 uint8_t zmps; 347 348 /* 349 * Sanity check. The following parameters must be initialized before 350 * calling this function. 351 */ 352 if ((parm->hc_max_packet_size == 0) || 353 (parm->hc_max_packet_count == 0) || 354 (parm->hc_max_frame_size == 0)) { 355 parm->err = USB_ERR_INVAL; 356 goto done; 357 } 358 edesc = xfer->endpoint->edesc; 359 ecomp = xfer->endpoint->ecomp; 360 361 type = (edesc->bmAttributes & UE_XFERTYPE); 362 363 xfer->flags = setup->flags; 364 xfer->nframes = setup->frames; 365 xfer->timeout = setup->timeout; 366 xfer->callback = setup->callback; 367 xfer->interval = setup->interval; 368 xfer->endpointno = edesc->bEndpointAddress; 369 xfer->max_packet_size = UGETW(edesc->wMaxPacketSize); 370 xfer->max_packet_count = 1; 371 /* make a shadow copy: */ 372 xfer->flags_int.usb_mode = parm->udev->flags.usb_mode; 373 374 parm->bufsize = setup->bufsize; 375 376 switch (parm->speed) { 377 case USB_SPEED_HIGH: 378 switch (type) { 379 case UE_ISOCHRONOUS: 380 case UE_INTERRUPT: 381 xfer->max_packet_count += 382 (xfer->max_packet_size >> 11) & 3; 383 384 /* check for invalid max packet count */ 385 if (xfer->max_packet_count > 3) 386 xfer->max_packet_count = 3; 387 break; 388 default: 389 break; 390 } 391 xfer->max_packet_size &= 0x7FF; 392 break; 393 case USB_SPEED_SUPER: 394 xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3; 395 396 if (ecomp != NULL) 397 xfer->max_packet_count += ecomp->bMaxBurst; 398 399 if ((xfer->max_packet_count == 0) || 400 (xfer->max_packet_count > 16)) 401 xfer->max_packet_count = 16; 402 403 switch (type) { 404 case UE_CONTROL: 405 xfer->max_packet_count = 1; 406 break; 407 case UE_ISOCHRONOUS: 408 if (ecomp != NULL) { 409 uint8_t mult; 410 411 mult = UE_GET_SS_ISO_MULT( 412 ecomp->bmAttributes) + 1; 413 if (mult > 3) 414 mult = 3; 415 416 xfer->max_packet_count *= mult; 417 } 418 break; 419 default: 420 break; 421 } 422 xfer->max_packet_size &= 0x7FF; 423 break; 424 default: 425 break; 426 } 427 /* range check "max_packet_count" */ 428 429 if (xfer->max_packet_count > parm->hc_max_packet_count) { 430 xfer->max_packet_count = parm->hc_max_packet_count; 431 } 432 /* filter "wMaxPacketSize" according to HC capabilities */ 433 434 if ((xfer->max_packet_size > parm->hc_max_packet_size) || 435 (xfer->max_packet_size == 0)) { 436 xfer->max_packet_size = parm->hc_max_packet_size; 437 } 438 /* filter "wMaxPacketSize" according to standard sizes */ 439 440 usbd_get_std_packet_size(&std_size, type, parm->speed); 441 442 if (std_size.range.min || std_size.range.max) { 443 444 if (xfer->max_packet_size < std_size.range.min) { 445 xfer->max_packet_size = std_size.range.min; 446 } 447 if (xfer->max_packet_size > std_size.range.max) { 448 xfer->max_packet_size = std_size.range.max; 449 } 450 } else { 451 452 if (xfer->max_packet_size >= std_size.fixed[3]) { 453 xfer->max_packet_size = std_size.fixed[3]; 454 } else if (xfer->max_packet_size >= std_size.fixed[2]) { 455 xfer->max_packet_size = std_size.fixed[2]; 456 } else if (xfer->max_packet_size >= std_size.fixed[1]) { 457 xfer->max_packet_size = std_size.fixed[1]; 458 } else { 459 /* only one possibility left */ 460 xfer->max_packet_size = std_size.fixed[0]; 461 } 462 } 463 464 /* compute "max_frame_size" */ 465 466 usbd_update_max_frame_size(xfer); 467 468 /* check interrupt interval and transfer pre-delay */ 469 470 if (type == UE_ISOCHRONOUS) { 471 472 uint16_t frame_limit; 473 474 xfer->interval = 0; /* not used, must be zero */ 475 xfer->flags_int.isochronous_xfr = 1; /* set flag */ 476 477 if (xfer->timeout == 0) { 478 /* 479 * set a default timeout in 480 * case something goes wrong! 481 */ 482 xfer->timeout = 1000 / 4; 483 } 484 switch (parm->speed) { 485 case USB_SPEED_LOW: 486 case USB_SPEED_FULL: 487 frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER; 488 xfer->fps_shift = 0; 489 break; 490 default: 491 frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER; 492 xfer->fps_shift = edesc->bInterval; 493 if (xfer->fps_shift > 0) 494 xfer->fps_shift--; 495 if (xfer->fps_shift > 3) 496 xfer->fps_shift = 3; 497 if (xfer->flags.pre_scale_frames != 0) 498 xfer->nframes <<= (3 - xfer->fps_shift); 499 break; 500 } 501 502 if (xfer->nframes > frame_limit) { 503 /* 504 * this is not going to work 505 * cross hardware 506 */ 507 parm->err = USB_ERR_INVAL; 508 goto done; 509 } 510 if (xfer->nframes == 0) { 511 /* 512 * this is not a valid value 513 */ 514 parm->err = USB_ERR_ZERO_NFRAMES; 515 goto done; 516 } 517 } else { 518 519 /* 520 * If a value is specified use that else check the 521 * endpoint descriptor! 522 */ 523 if (type == UE_INTERRUPT) { 524 525 uint32_t temp; 526 527 if (xfer->interval == 0) { 528 529 xfer->interval = edesc->bInterval; 530 531 switch (parm->speed) { 532 case USB_SPEED_LOW: 533 case USB_SPEED_FULL: 534 break; 535 default: 536 /* 125us -> 1ms */ 537 if (xfer->interval < 4) 538 xfer->interval = 1; 539 else if (xfer->interval > 16) 540 xfer->interval = (1 << (16 - 4)); 541 else 542 xfer->interval = 543 (1 << (xfer->interval - 4)); 544 break; 545 } 546 } 547 548 if (xfer->interval == 0) { 549 /* 550 * One millisecond is the smallest 551 * interval we support: 552 */ 553 xfer->interval = 1; 554 } 555 556 xfer->fps_shift = 0; 557 temp = 1; 558 559 while ((temp != 0) && (temp < xfer->interval)) { 560 xfer->fps_shift++; 561 temp *= 2; 562 } 563 564 switch (parm->speed) { 565 case USB_SPEED_LOW: 566 case USB_SPEED_FULL: 567 break; 568 default: 569 xfer->fps_shift += 3; 570 break; 571 } 572 } 573 } 574 575 /* 576 * NOTE: we do not allow "max_packet_size" or "max_frame_size" 577 * to be equal to zero when setting up USB transfers, hence 578 * this leads to alot of extra code in the USB kernel. 579 */ 580 581 if ((xfer->max_frame_size == 0) || 582 (xfer->max_packet_size == 0)) { 583 584 zmps = 1; 585 586 if ((parm->bufsize <= MIN_PKT) && 587 (type != UE_CONTROL) && 588 (type != UE_BULK)) { 589 590 /* workaround */ 591 xfer->max_packet_size = MIN_PKT; 592 xfer->max_packet_count = 1; 593 parm->bufsize = 0; /* automatic setup length */ 594 usbd_update_max_frame_size(xfer); 595 596 } else { 597 parm->err = USB_ERR_ZERO_MAXP; 598 goto done; 599 } 600 601 } else { 602 zmps = 0; 603 } 604 605 /* 606 * check if we should setup a default 607 * length: 608 */ 609 610 if (parm->bufsize == 0) { 611 612 parm->bufsize = xfer->max_frame_size; 613 614 if (type == UE_ISOCHRONOUS) { 615 parm->bufsize *= xfer->nframes; 616 } 617 } 618 /* 619 * check if we are about to setup a proxy 620 * type of buffer: 621 */ 622 623 if (xfer->flags.proxy_buffer) { 624 625 /* round bufsize up */ 626 627 parm->bufsize += (xfer->max_frame_size - 1); 628 629 if (parm->bufsize < xfer->max_frame_size) { 630 /* length wrapped around */ 631 parm->err = USB_ERR_INVAL; 632 goto done; 633 } 634 /* subtract remainder */ 635 636 parm->bufsize -= (parm->bufsize % xfer->max_frame_size); 637 638 /* add length of USB device request structure, if any */ 639 640 if (type == UE_CONTROL) { 641 parm->bufsize += REQ_SIZE; /* SETUP message */ 642 } 643 } 644 xfer->max_data_length = parm->bufsize; 645 646 /* Setup "n_frlengths" and "n_frbuffers" */ 647 648 if (type == UE_ISOCHRONOUS) { 649 n_frlengths = xfer->nframes; 650 n_frbuffers = 1; 651 } else { 652 653 if (type == UE_CONTROL) { 654 xfer->flags_int.control_xfr = 1; 655 if (xfer->nframes == 0) { 656 if (parm->bufsize <= REQ_SIZE) { 657 /* 658 * there will never be any data 659 * stage 660 */ 661 xfer->nframes = 1; 662 } else { 663 xfer->nframes = 2; 664 } 665 } 666 } else { 667 if (xfer->nframes == 0) { 668 xfer->nframes = 1; 669 } 670 } 671 672 n_frlengths = xfer->nframes; 673 n_frbuffers = xfer->nframes; 674 } 675 676 /* 677 * check if we have room for the 678 * USB device request structure: 679 */ 680 681 if (type == UE_CONTROL) { 682 683 if (xfer->max_data_length < REQ_SIZE) { 684 /* length wrapped around or too small bufsize */ 685 parm->err = USB_ERR_INVAL; 686 goto done; 687 } 688 xfer->max_data_length -= REQ_SIZE; 689 } 690 /* 691 * Setup "frlengths" and shadow "frlengths" for keeping the 692 * initial frame lengths when a USB transfer is complete. This 693 * information is useful when computing isochronous offsets. 694 */ 695 xfer->frlengths = parm->xfer_length_ptr; 696 parm->xfer_length_ptr += 2 * n_frlengths; 697 698 /* setup "frbuffers" */ 699 xfer->frbuffers = parm->xfer_page_cache_ptr; 700 parm->xfer_page_cache_ptr += n_frbuffers; 701 702 /* initialize max frame count */ 703 xfer->max_frame_count = xfer->nframes; 704 705 /* 706 * check if we need to setup 707 * a local buffer: 708 */ 709 710 if (!xfer->flags.ext_buffer) { 711 #if USB_HAVE_BUSDMA 712 struct usb_page_search page_info; 713 struct usb_page_cache *pc; 714 715 if (usbd_transfer_setup_sub_malloc(parm, 716 &pc, parm->bufsize, 1, 1)) { 717 parm->err = USB_ERR_NOMEM; 718 } else if (parm->buf != NULL) { 719 720 usbd_get_page(pc, 0, &page_info); 721 722 xfer->local_buffer = page_info.buffer; 723 724 usbd_xfer_set_frame_offset(xfer, 0, 0); 725 726 if ((type == UE_CONTROL) && (n_frbuffers > 1)) { 727 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1); 728 } 729 } 730 #else 731 /* align data */ 732 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 733 734 if (parm->buf != NULL) { 735 xfer->local_buffer = 736 USB_ADD_BYTES(parm->buf, parm->size[0]); 737 738 usbd_xfer_set_frame_offset(xfer, 0, 0); 739 740 if ((type == UE_CONTROL) && (n_frbuffers > 1)) { 741 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1); 742 } 743 } 744 parm->size[0] += parm->bufsize; 745 746 /* align data again */ 747 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); 748 #endif 749 } 750 /* 751 * Compute maximum buffer size 752 */ 753 754 if (parm->bufsize_max < parm->bufsize) { 755 parm->bufsize_max = parm->bufsize; 756 } 757 #if USB_HAVE_BUSDMA 758 if (xfer->flags_int.bdma_enable) { 759 /* 760 * Setup "dma_page_ptr". 761 * 762 * Proof for formula below: 763 * 764 * Assume there are three USB frames having length "a", "b" and 765 * "c". These USB frames will at maximum need "z" 766 * "usb_page" structures. "z" is given by: 767 * 768 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) + 769 * ((c / USB_PAGE_SIZE) + 2); 770 * 771 * Constraining "a", "b" and "c" like this: 772 * 773 * (a + b + c) <= parm->bufsize 774 * 775 * We know that: 776 * 777 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2)); 778 * 779 * Here is the general formula: 780 */ 781 xfer->dma_page_ptr = parm->dma_page_ptr; 782 parm->dma_page_ptr += (2 * n_frbuffers); 783 parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE); 784 } 785 #endif 786 if (zmps) { 787 /* correct maximum data length */ 788 xfer->max_data_length = 0; 789 } 790 /* subtract USB frame remainder from "hc_max_frame_size" */ 791 792 xfer->max_hc_frame_size = 793 (parm->hc_max_frame_size - 794 (parm->hc_max_frame_size % xfer->max_frame_size)); 795 796 if (xfer->max_hc_frame_size == 0) { 797 parm->err = USB_ERR_INVAL; 798 goto done; 799 } 800 801 /* initialize frame buffers */ 802 803 if (parm->buf) { 804 for (x = 0; x != n_frbuffers; x++) { 805 xfer->frbuffers[x].tag_parent = 806 &xfer->xroot->dma_parent_tag; 807 #if USB_HAVE_BUSDMA 808 if (xfer->flags_int.bdma_enable && 809 (parm->bufsize_max > 0)) { 810 811 if (usb_pc_dmamap_create( 812 xfer->frbuffers + x, 813 parm->bufsize_max)) { 814 parm->err = USB_ERR_NOMEM; 815 goto done; 816 } 817 } 818 #endif 819 } 820 } 821 done: 822 if (parm->err) { 823 /* 824 * Set some dummy values so that we avoid division by zero: 825 */ 826 xfer->max_hc_frame_size = 1; 827 xfer->max_frame_size = 1; 828 xfer->max_packet_size = 1; 829 xfer->max_data_length = 0; 830 xfer->nframes = 0; 831 xfer->max_frame_count = 0; 832 } 833 } 834 835 /*------------------------------------------------------------------------* 836 * usbd_transfer_setup - setup an array of USB transfers 837 * 838 * NOTE: You must always call "usbd_transfer_unsetup" after calling 839 * "usbd_transfer_setup" if success was returned. 840 * 841 * The idea is that the USB device driver should pre-allocate all its 842 * transfers by one call to this function. 843 * 844 * Return values: 845 * 0: Success 846 * Else: Failure 847 *------------------------------------------------------------------------*/ 848 usb_error_t 849 usbd_transfer_setup(struct usb_device *udev, 850 const uint8_t *ifaces, struct usb_xfer **ppxfer, 851 const struct usb_config *setup_start, uint16_t n_setup, 852 void *priv_sc, struct mtx *xfer_mtx) 853 { 854 struct usb_xfer dummy; 855 struct usb_setup_params parm; 856 const struct usb_config *setup_end = setup_start + n_setup; 857 const struct usb_config *setup; 858 struct usb_endpoint *ep; 859 struct usb_xfer_root *info; 860 struct usb_xfer *xfer; 861 void *buf = NULL; 862 uint16_t n; 863 uint16_t refcount; 864 865 parm.err = 0; 866 refcount = 0; 867 info = NULL; 868 869 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 870 "usbd_transfer_setup can sleep!"); 871 872 /* do some checking first */ 873 874 if (n_setup == 0) { 875 DPRINTFN(6, "setup array has zero length!\n"); 876 return (USB_ERR_INVAL); 877 } 878 if (ifaces == 0) { 879 DPRINTFN(6, "ifaces array is NULL!\n"); 880 return (USB_ERR_INVAL); 881 } 882 if (xfer_mtx == NULL) { 883 DPRINTFN(6, "using global lock\n"); 884 xfer_mtx = &Giant; 885 } 886 /* sanity checks */ 887 for (setup = setup_start, n = 0; 888 setup != setup_end; setup++, n++) { 889 if (setup->bufsize == (usb_frlength_t)-1) { 890 parm.err = USB_ERR_BAD_BUFSIZE; 891 DPRINTF("invalid bufsize\n"); 892 } 893 if (setup->callback == NULL) { 894 parm.err = USB_ERR_NO_CALLBACK; 895 DPRINTF("no callback\n"); 896 } 897 ppxfer[n] = NULL; 898 } 899 900 if (parm.err) { 901 goto done; 902 } 903 memset(&parm, 0, sizeof(parm)); 904 905 parm.udev = udev; 906 parm.speed = usbd_get_speed(udev); 907 parm.hc_max_packet_count = 1; 908 909 if (parm.speed >= USB_SPEED_MAX) { 910 parm.err = USB_ERR_INVAL; 911 goto done; 912 } 913 /* setup all transfers */ 914 915 while (1) { 916 917 if (buf) { 918 /* 919 * Initialize the "usb_xfer_root" structure, 920 * which is common for all our USB transfers. 921 */ 922 info = USB_ADD_BYTES(buf, 0); 923 924 info->memory_base = buf; 925 info->memory_size = parm.size[0]; 926 927 #if USB_HAVE_BUSDMA 928 info->dma_page_cache_start = USB_ADD_BYTES(buf, parm.size[4]); 929 info->dma_page_cache_end = USB_ADD_BYTES(buf, parm.size[5]); 930 #endif 931 info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm.size[5]); 932 info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm.size[2]); 933 934 cv_init(&info->cv_drain, "WDRAIN"); 935 936 info->xfer_mtx = xfer_mtx; 937 #if USB_HAVE_BUSDMA 938 usb_dma_tag_setup(&info->dma_parent_tag, 939 parm.dma_tag_p, udev->bus->dma_parent_tag[0].tag, 940 xfer_mtx, &usb_bdma_done_event, 32, parm.dma_tag_max); 941 #endif 942 943 info->bus = udev->bus; 944 info->udev = udev; 945 946 TAILQ_INIT(&info->done_q.head); 947 info->done_q.command = &usbd_callback_wrapper; 948 #if USB_HAVE_BUSDMA 949 TAILQ_INIT(&info->dma_q.head); 950 info->dma_q.command = &usb_bdma_work_loop; 951 #endif 952 info->done_m[0].hdr.pm_callback = &usb_callback_proc; 953 info->done_m[0].xroot = info; 954 info->done_m[1].hdr.pm_callback = &usb_callback_proc; 955 info->done_m[1].xroot = info; 956 957 /* 958 * In device side mode control endpoint 959 * requests need to run from a separate 960 * context, else there is a chance of 961 * deadlock! 962 */ 963 if (setup_start == usb_control_ep_cfg) 964 info->done_p = 965 &udev->bus->control_xfer_proc; 966 else if (xfer_mtx == &Giant) 967 info->done_p = 968 &udev->bus->giant_callback_proc; 969 else 970 info->done_p = 971 &udev->bus->non_giant_callback_proc; 972 } 973 /* reset sizes */ 974 975 parm.size[0] = 0; 976 parm.buf = buf; 977 parm.size[0] += sizeof(info[0]); 978 979 for (setup = setup_start, n = 0; 980 setup != setup_end; setup++, n++) { 981 982 /* skip USB transfers without callbacks: */ 983 if (setup->callback == NULL) { 984 continue; 985 } 986 /* see if there is a matching endpoint */ 987 ep = usbd_get_endpoint(udev, 988 ifaces[setup->if_index], setup); 989 990 /* 991 * Check that the USB PIPE is valid and that 992 * the endpoint mode is proper. 993 * 994 * Make sure we don't allocate a streams 995 * transfer when such a combination is not 996 * valid. 997 */ 998 if ((ep == NULL) || (ep->methods == NULL) || 999 ((ep->ep_mode != USB_EP_MODE_STREAMS) && 1000 (ep->ep_mode != USB_EP_MODE_DEFAULT)) || 1001 (setup->stream_id != 0 && 1002 (setup->stream_id >= USB_MAX_EP_STREAMS || 1003 (ep->ep_mode != USB_EP_MODE_STREAMS)))) { 1004 if (setup->flags.no_pipe_ok) 1005 continue; 1006 if ((setup->usb_mode != USB_MODE_DUAL) && 1007 (setup->usb_mode != udev->flags.usb_mode)) 1008 continue; 1009 parm.err = USB_ERR_NO_PIPE; 1010 goto done; 1011 } 1012 1013 /* align data properly */ 1014 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 1015 1016 /* store current setup pointer */ 1017 parm.curr_setup = setup; 1018 1019 if (buf) { 1020 /* 1021 * Common initialization of the 1022 * "usb_xfer" structure. 1023 */ 1024 xfer = USB_ADD_BYTES(buf, parm.size[0]); 1025 xfer->address = udev->address; 1026 xfer->priv_sc = priv_sc; 1027 xfer->xroot = info; 1028 1029 usb_callout_init_mtx(&xfer->timeout_handle, 1030 &udev->bus->bus_mtx, 0); 1031 } else { 1032 /* 1033 * Setup a dummy xfer, hence we are 1034 * writing to the "usb_xfer" 1035 * structure pointed to by "xfer" 1036 * before we have allocated any 1037 * memory: 1038 */ 1039 xfer = &dummy; 1040 memset(&dummy, 0, sizeof(dummy)); 1041 refcount++; 1042 } 1043 1044 /* set transfer endpoint pointer */ 1045 xfer->endpoint = ep; 1046 1047 /* set transfer stream ID */ 1048 xfer->stream_id = setup->stream_id; 1049 1050 parm.size[0] += sizeof(xfer[0]); 1051 parm.methods = xfer->endpoint->methods; 1052 parm.curr_xfer = xfer; 1053 1054 /* 1055 * Call the Host or Device controller transfer 1056 * setup routine: 1057 */ 1058 (udev->bus->methods->xfer_setup) (&parm); 1059 1060 /* check for error */ 1061 if (parm.err) 1062 goto done; 1063 1064 if (buf) { 1065 /* 1066 * Increment the endpoint refcount. This 1067 * basically prevents setting a new 1068 * configuration and alternate setting 1069 * when USB transfers are in use on 1070 * the given interface. Search the USB 1071 * code for "endpoint->refcount_alloc" if you 1072 * want more information. 1073 */ 1074 USB_BUS_LOCK(info->bus); 1075 if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX) 1076 parm.err = USB_ERR_INVAL; 1077 1078 xfer->endpoint->refcount_alloc++; 1079 1080 if (xfer->endpoint->refcount_alloc == 0) 1081 panic("usbd_transfer_setup(): Refcount wrapped to zero\n"); 1082 USB_BUS_UNLOCK(info->bus); 1083 1084 /* 1085 * Whenever we set ppxfer[] then we 1086 * also need to increment the 1087 * "setup_refcount": 1088 */ 1089 info->setup_refcount++; 1090 1091 /* 1092 * Transfer is successfully setup and 1093 * can be used: 1094 */ 1095 ppxfer[n] = xfer; 1096 } 1097 1098 /* check for error */ 1099 if (parm.err) 1100 goto done; 1101 } 1102 1103 if (buf || parm.err) { 1104 goto done; 1105 } 1106 if (refcount == 0) { 1107 /* no transfers - nothing to do ! */ 1108 goto done; 1109 } 1110 /* align data properly */ 1111 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 1112 1113 /* store offset temporarily */ 1114 parm.size[1] = parm.size[0]; 1115 1116 /* 1117 * The number of DMA tags required depends on 1118 * the number of endpoints. The current estimate 1119 * for maximum number of DMA tags per endpoint 1120 * is three: 1121 * 1) for loading memory 1122 * 2) for allocating memory 1123 * 3) for fixing memory [UHCI] 1124 */ 1125 parm.dma_tag_max += 3 * MIN(n_setup, USB_EP_MAX); 1126 1127 /* 1128 * DMA tags for QH, TD, Data and more. 1129 */ 1130 parm.dma_tag_max += 8; 1131 1132 parm.dma_tag_p += parm.dma_tag_max; 1133 1134 parm.size[0] += ((uint8_t *)parm.dma_tag_p) - 1135 ((uint8_t *)0); 1136 1137 /* align data properly */ 1138 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 1139 1140 /* store offset temporarily */ 1141 parm.size[3] = parm.size[0]; 1142 1143 parm.size[0] += ((uint8_t *)parm.dma_page_ptr) - 1144 ((uint8_t *)0); 1145 1146 /* align data properly */ 1147 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 1148 1149 /* store offset temporarily */ 1150 parm.size[4] = parm.size[0]; 1151 1152 parm.size[0] += ((uint8_t *)parm.dma_page_cache_ptr) - 1153 ((uint8_t *)0); 1154 1155 /* store end offset temporarily */ 1156 parm.size[5] = parm.size[0]; 1157 1158 parm.size[0] += ((uint8_t *)parm.xfer_page_cache_ptr) - 1159 ((uint8_t *)0); 1160 1161 /* store end offset temporarily */ 1162 1163 parm.size[2] = parm.size[0]; 1164 1165 /* align data properly */ 1166 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 1167 1168 parm.size[6] = parm.size[0]; 1169 1170 parm.size[0] += ((uint8_t *)parm.xfer_length_ptr) - 1171 ((uint8_t *)0); 1172 1173 /* align data properly */ 1174 parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1)); 1175 1176 /* allocate zeroed memory */ 1177 buf = malloc(parm.size[0], M_USB, M_WAITOK | M_ZERO); 1178 1179 if (buf == NULL) { 1180 parm.err = USB_ERR_NOMEM; 1181 DPRINTFN(0, "cannot allocate memory block for " 1182 "configuration (%d bytes)\n", 1183 parm.size[0]); 1184 goto done; 1185 } 1186 parm.dma_tag_p = USB_ADD_BYTES(buf, parm.size[1]); 1187 parm.dma_page_ptr = USB_ADD_BYTES(buf, parm.size[3]); 1188 parm.dma_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[4]); 1189 parm.xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[5]); 1190 parm.xfer_length_ptr = USB_ADD_BYTES(buf, parm.size[6]); 1191 } 1192 1193 done: 1194 if (buf) { 1195 if (info->setup_refcount == 0) { 1196 /* 1197 * "usbd_transfer_unsetup_sub" will unlock 1198 * the bus mutex before returning ! 1199 */ 1200 USB_BUS_LOCK(info->bus); 1201 1202 /* something went wrong */ 1203 usbd_transfer_unsetup_sub(info, 0); 1204 } 1205 } 1206 if (parm.err) { 1207 usbd_transfer_unsetup(ppxfer, n_setup); 1208 } 1209 return (parm.err); 1210 } 1211 1212 /*------------------------------------------------------------------------* 1213 * usbd_transfer_unsetup_sub - factored out code 1214 *------------------------------------------------------------------------*/ 1215 static void 1216 usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay) 1217 { 1218 #if USB_HAVE_BUSDMA 1219 struct usb_page_cache *pc; 1220 #endif 1221 1222 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED); 1223 1224 /* wait for any outstanding DMA operations */ 1225 1226 if (needs_delay) { 1227 usb_timeout_t temp; 1228 temp = usbd_get_dma_delay(info->udev); 1229 if (temp != 0) { 1230 usb_pause_mtx(&info->bus->bus_mtx, 1231 USB_MS_TO_TICKS(temp)); 1232 } 1233 } 1234 1235 /* make sure that our done messages are not queued anywhere */ 1236 usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]); 1237 1238 USB_BUS_UNLOCK(info->bus); 1239 1240 #if USB_HAVE_BUSDMA 1241 /* free DMA'able memory, if any */ 1242 pc = info->dma_page_cache_start; 1243 while (pc != info->dma_page_cache_end) { 1244 usb_pc_free_mem(pc); 1245 pc++; 1246 } 1247 1248 /* free DMA maps in all "xfer->frbuffers" */ 1249 pc = info->xfer_page_cache_start; 1250 while (pc != info->xfer_page_cache_end) { 1251 usb_pc_dmamap_destroy(pc); 1252 pc++; 1253 } 1254 1255 /* free all DMA tags */ 1256 usb_dma_tag_unsetup(&info->dma_parent_tag); 1257 #endif 1258 1259 cv_destroy(&info->cv_drain); 1260 1261 /* 1262 * free the "memory_base" last, hence the "info" structure is 1263 * contained within the "memory_base"! 1264 */ 1265 free(info->memory_base, M_USB); 1266 } 1267 1268 /*------------------------------------------------------------------------* 1269 * usbd_transfer_unsetup - unsetup/free an array of USB transfers 1270 * 1271 * NOTE: All USB transfers in progress will get called back passing 1272 * the error code "USB_ERR_CANCELLED" before this function 1273 * returns. 1274 *------------------------------------------------------------------------*/ 1275 void 1276 usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup) 1277 { 1278 struct usb_xfer *xfer; 1279 struct usb_xfer_root *info; 1280 uint8_t needs_delay = 0; 1281 1282 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1283 "usbd_transfer_unsetup can sleep!"); 1284 1285 while (n_setup--) { 1286 xfer = pxfer[n_setup]; 1287 1288 if (xfer == NULL) 1289 continue; 1290 1291 info = xfer->xroot; 1292 1293 USB_XFER_LOCK(xfer); 1294 USB_BUS_LOCK(info->bus); 1295 1296 /* 1297 * HINT: when you start/stop a transfer, it might be a 1298 * good idea to directly use the "pxfer[]" structure: 1299 * 1300 * usbd_transfer_start(sc->pxfer[0]); 1301 * usbd_transfer_stop(sc->pxfer[0]); 1302 * 1303 * That way, if your code has many parts that will not 1304 * stop running under the same lock, in other words 1305 * "xfer_mtx", the usbd_transfer_start and 1306 * usbd_transfer_stop functions will simply return 1307 * when they detect a NULL pointer argument. 1308 * 1309 * To avoid any races we clear the "pxfer[]" pointer 1310 * while holding the private mutex of the driver: 1311 */ 1312 pxfer[n_setup] = NULL; 1313 1314 USB_BUS_UNLOCK(info->bus); 1315 USB_XFER_UNLOCK(xfer); 1316 1317 usbd_transfer_drain(xfer); 1318 1319 #if USB_HAVE_BUSDMA 1320 if (xfer->flags_int.bdma_enable) 1321 needs_delay = 1; 1322 #endif 1323 /* 1324 * NOTE: default endpoint does not have an 1325 * interface, even if endpoint->iface_index == 0 1326 */ 1327 USB_BUS_LOCK(info->bus); 1328 xfer->endpoint->refcount_alloc--; 1329 USB_BUS_UNLOCK(info->bus); 1330 1331 usb_callout_drain(&xfer->timeout_handle); 1332 1333 USB_BUS_LOCK(info->bus); 1334 1335 USB_ASSERT(info->setup_refcount != 0, ("Invalid setup " 1336 "reference count\n")); 1337 1338 info->setup_refcount--; 1339 1340 if (info->setup_refcount == 0) { 1341 usbd_transfer_unsetup_sub(info, 1342 needs_delay); 1343 } else { 1344 USB_BUS_UNLOCK(info->bus); 1345 } 1346 } 1347 } 1348 1349 /*------------------------------------------------------------------------* 1350 * usbd_control_transfer_init - factored out code 1351 * 1352 * In USB Device Mode we have to wait for the SETUP packet which 1353 * containst the "struct usb_device_request" structure, before we can 1354 * transfer any data. In USB Host Mode we already have the SETUP 1355 * packet at the moment the USB transfer is started. This leads us to 1356 * having to setup the USB transfer at two different places in 1357 * time. This function just contains factored out control transfer 1358 * initialisation code, so that we don't duplicate the code. 1359 *------------------------------------------------------------------------*/ 1360 static void 1361 usbd_control_transfer_init(struct usb_xfer *xfer) 1362 { 1363 struct usb_device_request req; 1364 1365 /* copy out the USB request header */ 1366 1367 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req)); 1368 1369 /* setup remainder */ 1370 1371 xfer->flags_int.control_rem = UGETW(req.wLength); 1372 1373 /* copy direction to endpoint variable */ 1374 1375 xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT); 1376 xfer->endpointno |= 1377 (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT; 1378 } 1379 1380 /*------------------------------------------------------------------------* 1381 * usbd_setup_ctrl_transfer 1382 * 1383 * This function handles initialisation of control transfers. Control 1384 * transfers are special in that regard that they can both transmit 1385 * and receive data. 1386 * 1387 * Return values: 1388 * 0: Success 1389 * Else: Failure 1390 *------------------------------------------------------------------------*/ 1391 static int 1392 usbd_setup_ctrl_transfer(struct usb_xfer *xfer) 1393 { 1394 usb_frlength_t len; 1395 1396 /* Check for control endpoint stall */ 1397 if (xfer->flags.stall_pipe && xfer->flags_int.control_act) { 1398 /* the control transfer is no longer active */ 1399 xfer->flags_int.control_stall = 1; 1400 xfer->flags_int.control_act = 0; 1401 } else { 1402 /* don't stall control transfer by default */ 1403 xfer->flags_int.control_stall = 0; 1404 } 1405 1406 /* Check for invalid number of frames */ 1407 if (xfer->nframes > 2) { 1408 /* 1409 * If you need to split a control transfer, you 1410 * have to do one part at a time. Only with 1411 * non-control transfers you can do multiple 1412 * parts a time. 1413 */ 1414 DPRINTFN(0, "Too many frames: %u\n", 1415 (unsigned int)xfer->nframes); 1416 goto error; 1417 } 1418 1419 /* 1420 * Check if there is a control 1421 * transfer in progress: 1422 */ 1423 if (xfer->flags_int.control_act) { 1424 1425 if (xfer->flags_int.control_hdr) { 1426 1427 /* clear send header flag */ 1428 1429 xfer->flags_int.control_hdr = 0; 1430 1431 /* setup control transfer */ 1432 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { 1433 usbd_control_transfer_init(xfer); 1434 } 1435 } 1436 /* get data length */ 1437 1438 len = xfer->sumlen; 1439 1440 } else { 1441 1442 /* the size of the SETUP structure is hardcoded ! */ 1443 1444 if (xfer->frlengths[0] != sizeof(struct usb_device_request)) { 1445 DPRINTFN(0, "Wrong framelength %u != %zu\n", 1446 xfer->frlengths[0], sizeof(struct 1447 usb_device_request)); 1448 goto error; 1449 } 1450 /* check USB mode */ 1451 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { 1452 1453 /* check number of frames */ 1454 if (xfer->nframes != 1) { 1455 /* 1456 * We need to receive the setup 1457 * message first so that we know the 1458 * data direction! 1459 */ 1460 DPRINTF("Misconfigured transfer\n"); 1461 goto error; 1462 } 1463 /* 1464 * Set a dummy "control_rem" value. This 1465 * variable will be overwritten later by a 1466 * call to "usbd_control_transfer_init()" ! 1467 */ 1468 xfer->flags_int.control_rem = 0xFFFF; 1469 } else { 1470 1471 /* setup "endpoint" and "control_rem" */ 1472 1473 usbd_control_transfer_init(xfer); 1474 } 1475 1476 /* set transfer-header flag */ 1477 1478 xfer->flags_int.control_hdr = 1; 1479 1480 /* get data length */ 1481 1482 len = (xfer->sumlen - sizeof(struct usb_device_request)); 1483 } 1484 1485 /* check if there is a length mismatch */ 1486 1487 if (len > xfer->flags_int.control_rem) { 1488 DPRINTFN(0, "Length (%d) greater than " 1489 "remaining length (%d)\n", len, 1490 xfer->flags_int.control_rem); 1491 goto error; 1492 } 1493 /* check if we are doing a short transfer */ 1494 1495 if (xfer->flags.force_short_xfer) { 1496 xfer->flags_int.control_rem = 0; 1497 } else { 1498 if ((len != xfer->max_data_length) && 1499 (len != xfer->flags_int.control_rem) && 1500 (xfer->nframes != 1)) { 1501 DPRINTFN(0, "Short control transfer without " 1502 "force_short_xfer set\n"); 1503 goto error; 1504 } 1505 xfer->flags_int.control_rem -= len; 1506 } 1507 1508 /* the status part is executed when "control_act" is 0 */ 1509 1510 if ((xfer->flags_int.control_rem > 0) || 1511 (xfer->flags.manual_status)) { 1512 /* don't execute the STATUS stage yet */ 1513 xfer->flags_int.control_act = 1; 1514 1515 /* sanity check */ 1516 if ((!xfer->flags_int.control_hdr) && 1517 (xfer->nframes == 1)) { 1518 /* 1519 * This is not a valid operation! 1520 */ 1521 DPRINTFN(0, "Invalid parameter " 1522 "combination\n"); 1523 goto error; 1524 } 1525 } else { 1526 /* time to execute the STATUS stage */ 1527 xfer->flags_int.control_act = 0; 1528 } 1529 return (0); /* success */ 1530 1531 error: 1532 return (1); /* failure */ 1533 } 1534 1535 /*------------------------------------------------------------------------* 1536 * usbd_transfer_submit - start USB hardware for the given transfer 1537 * 1538 * This function should only be called from the USB callback. 1539 *------------------------------------------------------------------------*/ 1540 void 1541 usbd_transfer_submit(struct usb_xfer *xfer) 1542 { 1543 struct usb_xfer_root *info; 1544 struct usb_bus *bus; 1545 usb_frcount_t x; 1546 1547 info = xfer->xroot; 1548 bus = info->bus; 1549 1550 DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n", 1551 xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ? 1552 "read" : "write"); 1553 1554 #ifdef USB_DEBUG 1555 if (USB_DEBUG_VAR > 0) { 1556 USB_BUS_LOCK(bus); 1557 1558 usb_dump_endpoint(xfer->endpoint); 1559 1560 USB_BUS_UNLOCK(bus); 1561 } 1562 #endif 1563 1564 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1565 USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED); 1566 1567 /* Only open the USB transfer once! */ 1568 if (!xfer->flags_int.open) { 1569 xfer->flags_int.open = 1; 1570 1571 DPRINTF("open\n"); 1572 1573 USB_BUS_LOCK(bus); 1574 (xfer->endpoint->methods->open) (xfer); 1575 USB_BUS_UNLOCK(bus); 1576 } 1577 /* set "transferring" flag */ 1578 xfer->flags_int.transferring = 1; 1579 1580 #if USB_HAVE_POWERD 1581 /* increment power reference */ 1582 usbd_transfer_power_ref(xfer, 1); 1583 #endif 1584 /* 1585 * Check if the transfer is waiting on a queue, most 1586 * frequently the "done_q": 1587 */ 1588 if (xfer->wait_queue) { 1589 USB_BUS_LOCK(bus); 1590 usbd_transfer_dequeue(xfer); 1591 USB_BUS_UNLOCK(bus); 1592 } 1593 /* clear "did_dma_delay" flag */ 1594 xfer->flags_int.did_dma_delay = 0; 1595 1596 /* clear "did_close" flag */ 1597 xfer->flags_int.did_close = 0; 1598 1599 #if USB_HAVE_BUSDMA 1600 /* clear "bdma_setup" flag */ 1601 xfer->flags_int.bdma_setup = 0; 1602 #endif 1603 /* by default we cannot cancel any USB transfer immediately */ 1604 xfer->flags_int.can_cancel_immed = 0; 1605 1606 /* clear lengths and frame counts by default */ 1607 xfer->sumlen = 0; 1608 xfer->actlen = 0; 1609 xfer->aframes = 0; 1610 1611 /* clear any previous errors */ 1612 xfer->error = 0; 1613 1614 /* Check if the device is still alive */ 1615 if (info->udev->state < USB_STATE_POWERED) { 1616 USB_BUS_LOCK(bus); 1617 /* 1618 * Must return cancelled error code else 1619 * device drivers can hang. 1620 */ 1621 usbd_transfer_done(xfer, USB_ERR_CANCELLED); 1622 USB_BUS_UNLOCK(bus); 1623 return; 1624 } 1625 1626 /* sanity check */ 1627 if (xfer->nframes == 0) { 1628 if (xfer->flags.stall_pipe) { 1629 /* 1630 * Special case - want to stall without transferring 1631 * any data: 1632 */ 1633 DPRINTF("xfer=%p nframes=0: stall " 1634 "or clear stall!\n", xfer); 1635 USB_BUS_LOCK(bus); 1636 xfer->flags_int.can_cancel_immed = 1; 1637 /* start the transfer */ 1638 usb_command_wrapper(&xfer->endpoint-> 1639 endpoint_q[xfer->stream_id], xfer); 1640 USB_BUS_UNLOCK(bus); 1641 return; 1642 } 1643 USB_BUS_LOCK(bus); 1644 usbd_transfer_done(xfer, USB_ERR_INVAL); 1645 USB_BUS_UNLOCK(bus); 1646 return; 1647 } 1648 /* compute some variables */ 1649 1650 for (x = 0; x != xfer->nframes; x++) { 1651 /* make a copy of the frlenghts[] */ 1652 xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x]; 1653 /* compute total transfer length */ 1654 xfer->sumlen += xfer->frlengths[x]; 1655 if (xfer->sumlen < xfer->frlengths[x]) { 1656 /* length wrapped around */ 1657 USB_BUS_LOCK(bus); 1658 usbd_transfer_done(xfer, USB_ERR_INVAL); 1659 USB_BUS_UNLOCK(bus); 1660 return; 1661 } 1662 } 1663 1664 /* clear some internal flags */ 1665 1666 xfer->flags_int.short_xfer_ok = 0; 1667 xfer->flags_int.short_frames_ok = 0; 1668 1669 /* check if this is a control transfer */ 1670 1671 if (xfer->flags_int.control_xfr) { 1672 1673 if (usbd_setup_ctrl_transfer(xfer)) { 1674 USB_BUS_LOCK(bus); 1675 usbd_transfer_done(xfer, USB_ERR_STALLED); 1676 USB_BUS_UNLOCK(bus); 1677 return; 1678 } 1679 } 1680 /* 1681 * Setup filtered version of some transfer flags, 1682 * in case of data read direction 1683 */ 1684 if (USB_GET_DATA_ISREAD(xfer)) { 1685 1686 if (xfer->flags.short_frames_ok) { 1687 xfer->flags_int.short_xfer_ok = 1; 1688 xfer->flags_int.short_frames_ok = 1; 1689 } else if (xfer->flags.short_xfer_ok) { 1690 xfer->flags_int.short_xfer_ok = 1; 1691 1692 /* check for control transfer */ 1693 if (xfer->flags_int.control_xfr) { 1694 /* 1695 * 1) Control transfers do not support 1696 * reception of multiple short USB 1697 * frames in host mode and device side 1698 * mode, with exception of: 1699 * 1700 * 2) Due to sometimes buggy device 1701 * side firmware we need to do a 1702 * STATUS stage in case of short 1703 * control transfers in USB host mode. 1704 * The STATUS stage then becomes the 1705 * "alt_next" to the DATA stage. 1706 */ 1707 xfer->flags_int.short_frames_ok = 1; 1708 } 1709 } 1710 } 1711 /* 1712 * Check if BUS-DMA support is enabled and try to load virtual 1713 * buffers into DMA, if any: 1714 */ 1715 #if USB_HAVE_BUSDMA 1716 if (xfer->flags_int.bdma_enable) { 1717 /* insert the USB transfer last in the BUS-DMA queue */ 1718 usb_command_wrapper(&xfer->xroot->dma_q, xfer); 1719 return; 1720 } 1721 #endif 1722 /* 1723 * Enter the USB transfer into the Host Controller or 1724 * Device Controller schedule: 1725 */ 1726 usbd_pipe_enter(xfer); 1727 } 1728 1729 /*------------------------------------------------------------------------* 1730 * usbd_pipe_enter - factored out code 1731 *------------------------------------------------------------------------*/ 1732 void 1733 usbd_pipe_enter(struct usb_xfer *xfer) 1734 { 1735 struct usb_endpoint *ep; 1736 1737 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1738 1739 USB_BUS_LOCK(xfer->xroot->bus); 1740 1741 ep = xfer->endpoint; 1742 1743 DPRINTF("enter\n"); 1744 1745 /* the transfer can now be cancelled */ 1746 xfer->flags_int.can_cancel_immed = 1; 1747 1748 /* enter the transfer */ 1749 (ep->methods->enter) (xfer); 1750 1751 /* check for transfer error */ 1752 if (xfer->error) { 1753 /* some error has happened */ 1754 usbd_transfer_done(xfer, 0); 1755 USB_BUS_UNLOCK(xfer->xroot->bus); 1756 return; 1757 } 1758 1759 /* start the transfer */ 1760 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], xfer); 1761 USB_BUS_UNLOCK(xfer->xroot->bus); 1762 } 1763 1764 /*------------------------------------------------------------------------* 1765 * usbd_transfer_start - start an USB transfer 1766 * 1767 * NOTE: Calling this function more than one time will only 1768 * result in a single transfer start, until the USB transfer 1769 * completes. 1770 *------------------------------------------------------------------------*/ 1771 void 1772 usbd_transfer_start(struct usb_xfer *xfer) 1773 { 1774 if (xfer == NULL) { 1775 /* transfer is gone */ 1776 return; 1777 } 1778 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1779 1780 /* mark the USB transfer started */ 1781 1782 if (!xfer->flags_int.started) { 1783 /* lock the BUS lock to avoid races updating flags_int */ 1784 USB_BUS_LOCK(xfer->xroot->bus); 1785 xfer->flags_int.started = 1; 1786 USB_BUS_UNLOCK(xfer->xroot->bus); 1787 } 1788 /* check if the USB transfer callback is already transferring */ 1789 1790 if (xfer->flags_int.transferring) { 1791 return; 1792 } 1793 USB_BUS_LOCK(xfer->xroot->bus); 1794 /* call the USB transfer callback */ 1795 usbd_callback_ss_done_defer(xfer); 1796 USB_BUS_UNLOCK(xfer->xroot->bus); 1797 } 1798 1799 /*------------------------------------------------------------------------* 1800 * usbd_transfer_stop - stop an USB transfer 1801 * 1802 * NOTE: Calling this function more than one time will only 1803 * result in a single transfer stop. 1804 * NOTE: When this function returns it is not safe to free nor 1805 * reuse any DMA buffers. See "usbd_transfer_drain()". 1806 *------------------------------------------------------------------------*/ 1807 void 1808 usbd_transfer_stop(struct usb_xfer *xfer) 1809 { 1810 struct usb_endpoint *ep; 1811 1812 if (xfer == NULL) { 1813 /* transfer is gone */ 1814 return; 1815 } 1816 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1817 1818 /* check if the USB transfer was ever opened */ 1819 1820 if (!xfer->flags_int.open) { 1821 if (xfer->flags_int.started) { 1822 /* nothing to do except clearing the "started" flag */ 1823 /* lock the BUS lock to avoid races updating flags_int */ 1824 USB_BUS_LOCK(xfer->xroot->bus); 1825 xfer->flags_int.started = 0; 1826 USB_BUS_UNLOCK(xfer->xroot->bus); 1827 } 1828 return; 1829 } 1830 /* try to stop the current USB transfer */ 1831 1832 USB_BUS_LOCK(xfer->xroot->bus); 1833 /* override any previous error */ 1834 xfer->error = USB_ERR_CANCELLED; 1835 1836 /* 1837 * Clear "open" and "started" when both private and USB lock 1838 * is locked so that we don't get a race updating "flags_int" 1839 */ 1840 xfer->flags_int.open = 0; 1841 xfer->flags_int.started = 0; 1842 1843 /* 1844 * Check if we can cancel the USB transfer immediately. 1845 */ 1846 if (xfer->flags_int.transferring) { 1847 if (xfer->flags_int.can_cancel_immed && 1848 (!xfer->flags_int.did_close)) { 1849 DPRINTF("close\n"); 1850 /* 1851 * The following will lead to an USB_ERR_CANCELLED 1852 * error code being passed to the USB callback. 1853 */ 1854 (xfer->endpoint->methods->close) (xfer); 1855 /* only close once */ 1856 xfer->flags_int.did_close = 1; 1857 } else { 1858 /* need to wait for the next done callback */ 1859 } 1860 } else { 1861 DPRINTF("close\n"); 1862 1863 /* close here and now */ 1864 (xfer->endpoint->methods->close) (xfer); 1865 1866 /* 1867 * Any additional DMA delay is done by 1868 * "usbd_transfer_unsetup()". 1869 */ 1870 1871 /* 1872 * Special case. Check if we need to restart a blocked 1873 * endpoint. 1874 */ 1875 ep = xfer->endpoint; 1876 1877 /* 1878 * If the current USB transfer is completing we need 1879 * to start the next one: 1880 */ 1881 if (ep->endpoint_q[xfer->stream_id].curr == xfer) { 1882 usb_command_wrapper( 1883 &ep->endpoint_q[xfer->stream_id], NULL); 1884 } 1885 } 1886 1887 USB_BUS_UNLOCK(xfer->xroot->bus); 1888 } 1889 1890 /*------------------------------------------------------------------------* 1891 * usbd_transfer_pending 1892 * 1893 * This function will check if an USB transfer is pending which is a 1894 * little bit complicated! 1895 * Return values: 1896 * 0: Not pending 1897 * 1: Pending: The USB transfer will receive a callback in the future. 1898 *------------------------------------------------------------------------*/ 1899 uint8_t 1900 usbd_transfer_pending(struct usb_xfer *xfer) 1901 { 1902 struct usb_xfer_root *info; 1903 struct usb_xfer_queue *pq; 1904 1905 if (xfer == NULL) { 1906 /* transfer is gone */ 1907 return (0); 1908 } 1909 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 1910 1911 if (xfer->flags_int.transferring) { 1912 /* trivial case */ 1913 return (1); 1914 } 1915 USB_BUS_LOCK(xfer->xroot->bus); 1916 if (xfer->wait_queue) { 1917 /* we are waiting on a queue somewhere */ 1918 USB_BUS_UNLOCK(xfer->xroot->bus); 1919 return (1); 1920 } 1921 info = xfer->xroot; 1922 pq = &info->done_q; 1923 1924 if (pq->curr == xfer) { 1925 /* we are currently scheduled for callback */ 1926 USB_BUS_UNLOCK(xfer->xroot->bus); 1927 return (1); 1928 } 1929 /* we are not pending */ 1930 USB_BUS_UNLOCK(xfer->xroot->bus); 1931 return (0); 1932 } 1933 1934 /*------------------------------------------------------------------------* 1935 * usbd_transfer_drain 1936 * 1937 * This function will stop the USB transfer and wait for any 1938 * additional BUS-DMA and HW-DMA operations to complete. Buffers that 1939 * are loaded into DMA can safely be freed or reused after that this 1940 * function has returned. 1941 *------------------------------------------------------------------------*/ 1942 void 1943 usbd_transfer_drain(struct usb_xfer *xfer) 1944 { 1945 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1946 "usbd_transfer_drain can sleep!"); 1947 1948 if (xfer == NULL) { 1949 /* transfer is gone */ 1950 return; 1951 } 1952 if (xfer->xroot->xfer_mtx != &Giant) { 1953 USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED); 1954 } 1955 USB_XFER_LOCK(xfer); 1956 1957 usbd_transfer_stop(xfer); 1958 1959 while (usbd_transfer_pending(xfer) || 1960 xfer->flags_int.doing_callback) { 1961 1962 /* 1963 * It is allowed that the callback can drop its 1964 * transfer mutex. In that case checking only 1965 * "usbd_transfer_pending()" is not enough to tell if 1966 * the USB transfer is fully drained. We also need to 1967 * check the internal "doing_callback" flag. 1968 */ 1969 xfer->flags_int.draining = 1; 1970 1971 /* 1972 * Wait until the current outstanding USB 1973 * transfer is complete ! 1974 */ 1975 cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx); 1976 } 1977 USB_XFER_UNLOCK(xfer); 1978 } 1979 1980 struct usb_page_cache * 1981 usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex) 1982 { 1983 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 1984 1985 return (&xfer->frbuffers[frindex]); 1986 } 1987 1988 void * 1989 usbd_xfer_get_frame_buffer(struct usb_xfer *xfer, usb_frcount_t frindex) 1990 { 1991 struct usb_page_search page_info; 1992 1993 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 1994 1995 usbd_get_page(&xfer->frbuffers[frindex], 0, &page_info); 1996 return (page_info.buffer); 1997 } 1998 1999 /*------------------------------------------------------------------------* 2000 * usbd_xfer_get_fps_shift 2001 * 2002 * The following function is only useful for isochronous transfers. It 2003 * returns how many times the frame execution rate has been shifted 2004 * down. 2005 * 2006 * Return value: 2007 * Success: 0..3 2008 * Failure: 0 2009 *------------------------------------------------------------------------*/ 2010 uint8_t 2011 usbd_xfer_get_fps_shift(struct usb_xfer *xfer) 2012 { 2013 return (xfer->fps_shift); 2014 } 2015 2016 usb_frlength_t 2017 usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex) 2018 { 2019 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2020 2021 return (xfer->frlengths[frindex]); 2022 } 2023 2024 /*------------------------------------------------------------------------* 2025 * usbd_xfer_set_frame_data 2026 * 2027 * This function sets the pointer of the buffer that should 2028 * loaded directly into DMA for the given USB frame. Passing "ptr" 2029 * equal to NULL while the corresponding "frlength" is greater 2030 * than zero gives undefined results! 2031 *------------------------------------------------------------------------*/ 2032 void 2033 usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex, 2034 void *ptr, usb_frlength_t len) 2035 { 2036 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2037 2038 /* set virtual address to load and length */ 2039 xfer->frbuffers[frindex].buffer = ptr; 2040 usbd_xfer_set_frame_len(xfer, frindex, len); 2041 } 2042 2043 void 2044 usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex, 2045 void **ptr, int *len) 2046 { 2047 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2048 2049 if (ptr != NULL) 2050 *ptr = xfer->frbuffers[frindex].buffer; 2051 if (len != NULL) 2052 *len = xfer->frlengths[frindex]; 2053 } 2054 2055 /*------------------------------------------------------------------------* 2056 * usbd_xfer_old_frame_length 2057 * 2058 * This function returns the framelength of the given frame at the 2059 * time the transfer was submitted. This function can be used to 2060 * compute the starting data pointer of the next isochronous frame 2061 * when an isochronous transfer has completed. 2062 *------------------------------------------------------------------------*/ 2063 usb_frlength_t 2064 usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex) 2065 { 2066 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2067 2068 return (xfer->frlengths[frindex + xfer->max_frame_count]); 2069 } 2070 2071 void 2072 usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes, 2073 int *nframes) 2074 { 2075 if (actlen != NULL) 2076 *actlen = xfer->actlen; 2077 if (sumlen != NULL) 2078 *sumlen = xfer->sumlen; 2079 if (aframes != NULL) 2080 *aframes = xfer->aframes; 2081 if (nframes != NULL) 2082 *nframes = xfer->nframes; 2083 } 2084 2085 /*------------------------------------------------------------------------* 2086 * usbd_xfer_set_frame_offset 2087 * 2088 * This function sets the frame data buffer offset relative to the beginning 2089 * of the USB DMA buffer allocated for this USB transfer. 2090 *------------------------------------------------------------------------*/ 2091 void 2092 usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset, 2093 usb_frcount_t frindex) 2094 { 2095 KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame " 2096 "when the USB buffer is external\n")); 2097 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2098 2099 /* set virtual address to load */ 2100 xfer->frbuffers[frindex].buffer = 2101 USB_ADD_BYTES(xfer->local_buffer, offset); 2102 } 2103 2104 void 2105 usbd_xfer_set_interval(struct usb_xfer *xfer, int i) 2106 { 2107 xfer->interval = i; 2108 } 2109 2110 void 2111 usbd_xfer_set_timeout(struct usb_xfer *xfer, int t) 2112 { 2113 xfer->timeout = t; 2114 } 2115 2116 void 2117 usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n) 2118 { 2119 xfer->nframes = n; 2120 } 2121 2122 usb_frcount_t 2123 usbd_xfer_max_frames(struct usb_xfer *xfer) 2124 { 2125 return (xfer->max_frame_count); 2126 } 2127 2128 usb_frlength_t 2129 usbd_xfer_max_len(struct usb_xfer *xfer) 2130 { 2131 return (xfer->max_data_length); 2132 } 2133 2134 usb_frlength_t 2135 usbd_xfer_max_framelen(struct usb_xfer *xfer) 2136 { 2137 return (xfer->max_frame_size); 2138 } 2139 2140 void 2141 usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex, 2142 usb_frlength_t len) 2143 { 2144 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); 2145 2146 xfer->frlengths[frindex] = len; 2147 } 2148 2149 /*------------------------------------------------------------------------* 2150 * usb_callback_proc - factored out code 2151 * 2152 * This function performs USB callbacks. 2153 *------------------------------------------------------------------------*/ 2154 static void 2155 usb_callback_proc(struct usb_proc_msg *_pm) 2156 { 2157 struct usb_done_msg *pm = (void *)_pm; 2158 struct usb_xfer_root *info = pm->xroot; 2159 2160 /* Change locking order */ 2161 USB_BUS_UNLOCK(info->bus); 2162 2163 /* 2164 * We exploit the fact that the mutex is the same for all 2165 * callbacks that will be called from this thread: 2166 */ 2167 mtx_lock(info->xfer_mtx); 2168 USB_BUS_LOCK(info->bus); 2169 2170 /* Continue where we lost track */ 2171 usb_command_wrapper(&info->done_q, 2172 info->done_q.curr); 2173 2174 mtx_unlock(info->xfer_mtx); 2175 } 2176 2177 /*------------------------------------------------------------------------* 2178 * usbd_callback_ss_done_defer 2179 * 2180 * This function will defer the start, stop and done callback to the 2181 * correct thread. 2182 *------------------------------------------------------------------------*/ 2183 static void 2184 usbd_callback_ss_done_defer(struct usb_xfer *xfer) 2185 { 2186 struct usb_xfer_root *info = xfer->xroot; 2187 struct usb_xfer_queue *pq = &info->done_q; 2188 2189 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2190 2191 if (pq->curr != xfer) { 2192 usbd_transfer_enqueue(pq, xfer); 2193 } 2194 if (!pq->recurse_1) { 2195 2196 /* 2197 * We have to postpone the callback due to the fact we 2198 * will have a Lock Order Reversal, LOR, if we try to 2199 * proceed ! 2200 */ 2201 if (usb_proc_msignal(info->done_p, 2202 &info->done_m[0], &info->done_m[1])) { 2203 /* ignore */ 2204 } 2205 } else { 2206 /* clear second recurse flag */ 2207 pq->recurse_2 = 0; 2208 } 2209 return; 2210 2211 } 2212 2213 /*------------------------------------------------------------------------* 2214 * usbd_callback_wrapper 2215 * 2216 * This is a wrapper for USB callbacks. This wrapper does some 2217 * auto-magic things like figuring out if we can call the callback 2218 * directly from the current context or if we need to wakeup the 2219 * interrupt process. 2220 *------------------------------------------------------------------------*/ 2221 static void 2222 usbd_callback_wrapper(struct usb_xfer_queue *pq) 2223 { 2224 struct usb_xfer *xfer = pq->curr; 2225 struct usb_xfer_root *info = xfer->xroot; 2226 2227 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED); 2228 if (!mtx_owned(info->xfer_mtx) && !SCHEDULER_STOPPED()) { 2229 /* 2230 * Cases that end up here: 2231 * 2232 * 5) HW interrupt done callback or other source. 2233 */ 2234 DPRINTFN(3, "case 5\n"); 2235 2236 /* 2237 * We have to postpone the callback due to the fact we 2238 * will have a Lock Order Reversal, LOR, if we try to 2239 * proceed ! 2240 */ 2241 if (usb_proc_msignal(info->done_p, 2242 &info->done_m[0], &info->done_m[1])) { 2243 /* ignore */ 2244 } 2245 return; 2246 } 2247 /* 2248 * Cases that end up here: 2249 * 2250 * 1) We are starting a transfer 2251 * 2) We are prematurely calling back a transfer 2252 * 3) We are stopping a transfer 2253 * 4) We are doing an ordinary callback 2254 */ 2255 DPRINTFN(3, "case 1-4\n"); 2256 /* get next USB transfer in the queue */ 2257 info->done_q.curr = NULL; 2258 2259 /* set flag in case of drain */ 2260 xfer->flags_int.doing_callback = 1; 2261 2262 USB_BUS_UNLOCK(info->bus); 2263 USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED); 2264 2265 /* set correct USB state for callback */ 2266 if (!xfer->flags_int.transferring) { 2267 xfer->usb_state = USB_ST_SETUP; 2268 if (!xfer->flags_int.started) { 2269 /* we got stopped before we even got started */ 2270 USB_BUS_LOCK(info->bus); 2271 goto done; 2272 } 2273 } else { 2274 2275 if (usbd_callback_wrapper_sub(xfer)) { 2276 /* the callback has been deferred */ 2277 USB_BUS_LOCK(info->bus); 2278 goto done; 2279 } 2280 #if USB_HAVE_POWERD 2281 /* decrement power reference */ 2282 usbd_transfer_power_ref(xfer, -1); 2283 #endif 2284 xfer->flags_int.transferring = 0; 2285 2286 if (xfer->error) { 2287 xfer->usb_state = USB_ST_ERROR; 2288 } else { 2289 /* set transferred state */ 2290 xfer->usb_state = USB_ST_TRANSFERRED; 2291 #if USB_HAVE_BUSDMA 2292 /* sync DMA memory, if any */ 2293 if (xfer->flags_int.bdma_enable && 2294 (!xfer->flags_int.bdma_no_post_sync)) { 2295 usb_bdma_post_sync(xfer); 2296 } 2297 #endif 2298 } 2299 } 2300 2301 #if USB_HAVE_PF 2302 if (xfer->usb_state != USB_ST_SETUP) 2303 usbpf_xfertap(xfer, USBPF_XFERTAP_DONE); 2304 #endif 2305 /* call processing routine */ 2306 (xfer->callback) (xfer, xfer->error); 2307 2308 /* pickup the USB mutex again */ 2309 USB_BUS_LOCK(info->bus); 2310 2311 /* 2312 * Check if we got started after that we got cancelled, but 2313 * before we managed to do the callback. 2314 */ 2315 if ((!xfer->flags_int.open) && 2316 (xfer->flags_int.started) && 2317 (xfer->usb_state == USB_ST_ERROR)) { 2318 /* clear flag in case of drain */ 2319 xfer->flags_int.doing_callback = 0; 2320 /* try to loop, but not recursivly */ 2321 usb_command_wrapper(&info->done_q, xfer); 2322 return; 2323 } 2324 2325 done: 2326 /* clear flag in case of drain */ 2327 xfer->flags_int.doing_callback = 0; 2328 2329 /* 2330 * Check if we are draining. 2331 */ 2332 if (xfer->flags_int.draining && 2333 (!xfer->flags_int.transferring)) { 2334 /* "usbd_transfer_drain()" is waiting for end of transfer */ 2335 xfer->flags_int.draining = 0; 2336 cv_broadcast(&info->cv_drain); 2337 } 2338 2339 /* do the next callback, if any */ 2340 usb_command_wrapper(&info->done_q, 2341 info->done_q.curr); 2342 } 2343 2344 /*------------------------------------------------------------------------* 2345 * usb_dma_delay_done_cb 2346 * 2347 * This function is called when the DMA delay has been exectuded, and 2348 * will make sure that the callback is called to complete the USB 2349 * transfer. This code path is ususally only used when there is an USB 2350 * error like USB_ERR_CANCELLED. 2351 *------------------------------------------------------------------------*/ 2352 void 2353 usb_dma_delay_done_cb(struct usb_xfer *xfer) 2354 { 2355 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2356 2357 DPRINTFN(3, "Completed %p\n", xfer); 2358 2359 /* queue callback for execution, again */ 2360 usbd_transfer_done(xfer, 0); 2361 } 2362 2363 /*------------------------------------------------------------------------* 2364 * usbd_transfer_dequeue 2365 * 2366 * - This function is used to remove an USB transfer from a USB 2367 * transfer queue. 2368 * 2369 * - This function can be called multiple times in a row. 2370 *------------------------------------------------------------------------*/ 2371 void 2372 usbd_transfer_dequeue(struct usb_xfer *xfer) 2373 { 2374 struct usb_xfer_queue *pq; 2375 2376 pq = xfer->wait_queue; 2377 if (pq) { 2378 TAILQ_REMOVE(&pq->head, xfer, wait_entry); 2379 xfer->wait_queue = NULL; 2380 } 2381 } 2382 2383 /*------------------------------------------------------------------------* 2384 * usbd_transfer_enqueue 2385 * 2386 * - This function is used to insert an USB transfer into a USB * 2387 * transfer queue. 2388 * 2389 * - This function can be called multiple times in a row. 2390 *------------------------------------------------------------------------*/ 2391 void 2392 usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer) 2393 { 2394 /* 2395 * Insert the USB transfer into the queue, if it is not 2396 * already on a USB transfer queue: 2397 */ 2398 if (xfer->wait_queue == NULL) { 2399 xfer->wait_queue = pq; 2400 TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry); 2401 } 2402 } 2403 2404 /*------------------------------------------------------------------------* 2405 * usbd_transfer_done 2406 * 2407 * - This function is used to remove an USB transfer from the busdma, 2408 * pipe or interrupt queue. 2409 * 2410 * - This function is used to queue the USB transfer on the done 2411 * queue. 2412 * 2413 * - This function is used to stop any USB transfer timeouts. 2414 *------------------------------------------------------------------------*/ 2415 void 2416 usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error) 2417 { 2418 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2419 2420 DPRINTF("err=%s\n", usbd_errstr(error)); 2421 2422 /* 2423 * If we are not transferring then just return. 2424 * This can happen during transfer cancel. 2425 */ 2426 if (!xfer->flags_int.transferring) { 2427 DPRINTF("not transferring\n"); 2428 /* end of control transfer, if any */ 2429 xfer->flags_int.control_act = 0; 2430 return; 2431 } 2432 /* only set transfer error if not already set */ 2433 if (!xfer->error) { 2434 xfer->error = error; 2435 } 2436 /* stop any callouts */ 2437 usb_callout_stop(&xfer->timeout_handle); 2438 2439 /* 2440 * If we are waiting on a queue, just remove the USB transfer 2441 * from the queue, if any. We should have the required locks 2442 * locked to do the remove when this function is called. 2443 */ 2444 usbd_transfer_dequeue(xfer); 2445 2446 #if USB_HAVE_BUSDMA 2447 if (mtx_owned(xfer->xroot->xfer_mtx)) { 2448 struct usb_xfer_queue *pq; 2449 2450 /* 2451 * If the private USB lock is not locked, then we assume 2452 * that the BUS-DMA load stage has been passed: 2453 */ 2454 pq = &xfer->xroot->dma_q; 2455 2456 if (pq->curr == xfer) { 2457 /* start the next BUS-DMA load, if any */ 2458 usb_command_wrapper(pq, NULL); 2459 } 2460 } 2461 #endif 2462 /* keep some statistics */ 2463 if (xfer->error) { 2464 xfer->xroot->bus->stats_err.uds_requests 2465 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++; 2466 } else { 2467 xfer->xroot->bus->stats_ok.uds_requests 2468 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++; 2469 } 2470 2471 /* call the USB transfer callback */ 2472 usbd_callback_ss_done_defer(xfer); 2473 } 2474 2475 /*------------------------------------------------------------------------* 2476 * usbd_transfer_start_cb 2477 * 2478 * This function is called to start the USB transfer when 2479 * "xfer->interval" is greater than zero, and and the endpoint type is 2480 * BULK or CONTROL. 2481 *------------------------------------------------------------------------*/ 2482 static void 2483 usbd_transfer_start_cb(void *arg) 2484 { 2485 struct usb_xfer *xfer = arg; 2486 struct usb_endpoint *ep = xfer->endpoint; 2487 2488 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2489 2490 DPRINTF("start\n"); 2491 2492 #if USB_HAVE_PF 2493 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT); 2494 #endif 2495 2496 /* the transfer can now be cancelled */ 2497 xfer->flags_int.can_cancel_immed = 1; 2498 2499 /* start USB transfer, if no error */ 2500 if (xfer->error == 0) 2501 (ep->methods->start) (xfer); 2502 2503 /* check for transfer error */ 2504 if (xfer->error) { 2505 /* some error has happened */ 2506 usbd_transfer_done(xfer, 0); 2507 } 2508 } 2509 2510 /*------------------------------------------------------------------------* 2511 * usbd_xfer_set_stall 2512 * 2513 * This function is used to set the stall flag outside the 2514 * callback. This function is NULL safe. 2515 *------------------------------------------------------------------------*/ 2516 void 2517 usbd_xfer_set_stall(struct usb_xfer *xfer) 2518 { 2519 if (xfer == NULL) { 2520 /* tearing down */ 2521 return; 2522 } 2523 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 2524 2525 /* avoid any races by locking the USB mutex */ 2526 USB_BUS_LOCK(xfer->xroot->bus); 2527 xfer->flags.stall_pipe = 1; 2528 USB_BUS_UNLOCK(xfer->xroot->bus); 2529 } 2530 2531 int 2532 usbd_xfer_is_stalled(struct usb_xfer *xfer) 2533 { 2534 return (xfer->endpoint->is_stalled); 2535 } 2536 2537 /*------------------------------------------------------------------------* 2538 * usbd_transfer_clear_stall 2539 * 2540 * This function is used to clear the stall flag outside the 2541 * callback. This function is NULL safe. 2542 *------------------------------------------------------------------------*/ 2543 void 2544 usbd_transfer_clear_stall(struct usb_xfer *xfer) 2545 { 2546 if (xfer == NULL) { 2547 /* tearing down */ 2548 return; 2549 } 2550 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); 2551 2552 /* avoid any races by locking the USB mutex */ 2553 USB_BUS_LOCK(xfer->xroot->bus); 2554 2555 xfer->flags.stall_pipe = 0; 2556 2557 USB_BUS_UNLOCK(xfer->xroot->bus); 2558 } 2559 2560 /*------------------------------------------------------------------------* 2561 * usbd_pipe_start 2562 * 2563 * This function is used to add an USB transfer to the pipe transfer list. 2564 *------------------------------------------------------------------------*/ 2565 void 2566 usbd_pipe_start(struct usb_xfer_queue *pq) 2567 { 2568 struct usb_endpoint *ep; 2569 struct usb_xfer *xfer; 2570 uint8_t type; 2571 2572 xfer = pq->curr; 2573 ep = xfer->endpoint; 2574 2575 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2576 2577 /* 2578 * If the endpoint is already stalled we do nothing ! 2579 */ 2580 if (ep->is_stalled) { 2581 return; 2582 } 2583 /* 2584 * Check if we are supposed to stall the endpoint: 2585 */ 2586 if (xfer->flags.stall_pipe) { 2587 struct usb_device *udev; 2588 struct usb_xfer_root *info; 2589 2590 /* clear stall command */ 2591 xfer->flags.stall_pipe = 0; 2592 2593 /* get pointer to USB device */ 2594 info = xfer->xroot; 2595 udev = info->udev; 2596 2597 /* 2598 * Only stall BULK and INTERRUPT endpoints. 2599 */ 2600 type = (ep->edesc->bmAttributes & UE_XFERTYPE); 2601 if ((type == UE_BULK) || 2602 (type == UE_INTERRUPT)) { 2603 uint8_t did_stall; 2604 2605 did_stall = 1; 2606 2607 if (udev->flags.usb_mode == USB_MODE_DEVICE) { 2608 (udev->bus->methods->set_stall) ( 2609 udev, ep, &did_stall); 2610 } else if (udev->ctrl_xfer[1]) { 2611 info = udev->ctrl_xfer[1]->xroot; 2612 usb_proc_msignal( 2613 &info->bus->non_giant_callback_proc, 2614 &udev->cs_msg[0], &udev->cs_msg[1]); 2615 } else { 2616 /* should not happen */ 2617 DPRINTFN(0, "No stall handler\n"); 2618 } 2619 /* 2620 * Check if we should stall. Some USB hardware 2621 * handles set- and clear-stall in hardware. 2622 */ 2623 if (did_stall) { 2624 /* 2625 * The transfer will be continued when 2626 * the clear-stall control endpoint 2627 * message is received. 2628 */ 2629 ep->is_stalled = 1; 2630 return; 2631 } 2632 } else if (type == UE_ISOCHRONOUS) { 2633 2634 /* 2635 * Make sure any FIFO overflow or other FIFO 2636 * error conditions go away by resetting the 2637 * endpoint FIFO through the clear stall 2638 * method. 2639 */ 2640 if (udev->flags.usb_mode == USB_MODE_DEVICE) { 2641 (udev->bus->methods->clear_stall) (udev, ep); 2642 } 2643 } 2644 } 2645 /* Set or clear stall complete - special case */ 2646 if (xfer->nframes == 0) { 2647 /* we are complete */ 2648 xfer->aframes = 0; 2649 usbd_transfer_done(xfer, 0); 2650 return; 2651 } 2652 /* 2653 * Handled cases: 2654 * 2655 * 1) Start the first transfer queued. 2656 * 2657 * 2) Re-start the current USB transfer. 2658 */ 2659 /* 2660 * Check if there should be any 2661 * pre transfer start delay: 2662 */ 2663 if (xfer->interval > 0) { 2664 type = (ep->edesc->bmAttributes & UE_XFERTYPE); 2665 if ((type == UE_BULK) || 2666 (type == UE_CONTROL)) { 2667 usbd_transfer_timeout_ms(xfer, 2668 &usbd_transfer_start_cb, 2669 xfer->interval); 2670 return; 2671 } 2672 } 2673 DPRINTF("start\n"); 2674 2675 #if USB_HAVE_PF 2676 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT); 2677 #endif 2678 /* the transfer can now be cancelled */ 2679 xfer->flags_int.can_cancel_immed = 1; 2680 2681 /* start USB transfer, if no error */ 2682 if (xfer->error == 0) 2683 (ep->methods->start) (xfer); 2684 2685 /* check for transfer error */ 2686 if (xfer->error) { 2687 /* some error has happened */ 2688 usbd_transfer_done(xfer, 0); 2689 } 2690 } 2691 2692 /*------------------------------------------------------------------------* 2693 * usbd_transfer_timeout_ms 2694 * 2695 * This function is used to setup a timeout on the given USB 2696 * transfer. If the timeout has been deferred the callback given by 2697 * "cb" will get called after "ms" milliseconds. 2698 *------------------------------------------------------------------------*/ 2699 void 2700 usbd_transfer_timeout_ms(struct usb_xfer *xfer, 2701 void (*cb) (void *arg), usb_timeout_t ms) 2702 { 2703 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); 2704 2705 /* defer delay */ 2706 usb_callout_reset(&xfer->timeout_handle, 2707 USB_MS_TO_TICKS(ms), cb, xfer); 2708 } 2709 2710 /*------------------------------------------------------------------------* 2711 * usbd_callback_wrapper_sub 2712 * 2713 * - This function will update variables in an USB transfer after 2714 * that the USB transfer is complete. 2715 * 2716 * - This function is used to start the next USB transfer on the 2717 * ep transfer queue, if any. 2718 * 2719 * NOTE: In some special cases the USB transfer will not be removed from 2720 * the pipe queue, but remain first. To enforce USB transfer removal call 2721 * this function passing the error code "USB_ERR_CANCELLED". 2722 * 2723 * Return values: 2724 * 0: Success. 2725 * Else: The callback has been deferred. 2726 *------------------------------------------------------------------------*/ 2727 static uint8_t 2728 usbd_callback_wrapper_sub(struct usb_xfer *xfer) 2729 { 2730 struct usb_endpoint *ep; 2731 struct usb_bus *bus; 2732 usb_frcount_t x; 2733 2734 bus = xfer->xroot->bus; 2735 2736 if ((!xfer->flags_int.open) && 2737 (!xfer->flags_int.did_close)) { 2738 DPRINTF("close\n"); 2739 USB_BUS_LOCK(bus); 2740 (xfer->endpoint->methods->close) (xfer); 2741 USB_BUS_UNLOCK(bus); 2742 /* only close once */ 2743 xfer->flags_int.did_close = 1; 2744 return (1); /* wait for new callback */ 2745 } 2746 /* 2747 * If we have a non-hardware induced error we 2748 * need to do the DMA delay! 2749 */ 2750 if (xfer->error != 0 && !xfer->flags_int.did_dma_delay && 2751 (xfer->error == USB_ERR_CANCELLED || 2752 xfer->error == USB_ERR_TIMEOUT || 2753 bus->methods->start_dma_delay != NULL)) { 2754 2755 usb_timeout_t temp; 2756 2757 /* only delay once */ 2758 xfer->flags_int.did_dma_delay = 1; 2759 2760 /* we can not cancel this delay */ 2761 xfer->flags_int.can_cancel_immed = 0; 2762 2763 temp = usbd_get_dma_delay(xfer->xroot->udev); 2764 2765 DPRINTFN(3, "DMA delay, %u ms, " 2766 "on %p\n", temp, xfer); 2767 2768 if (temp != 0) { 2769 USB_BUS_LOCK(bus); 2770 /* 2771 * Some hardware solutions have dedicated 2772 * events when it is safe to free DMA'ed 2773 * memory. For the other hardware platforms we 2774 * use a static delay. 2775 */ 2776 if (bus->methods->start_dma_delay != NULL) { 2777 (bus->methods->start_dma_delay) (xfer); 2778 } else { 2779 usbd_transfer_timeout_ms(xfer, 2780 (void (*)(void *))&usb_dma_delay_done_cb, 2781 temp); 2782 } 2783 USB_BUS_UNLOCK(bus); 2784 return (1); /* wait for new callback */ 2785 } 2786 } 2787 /* check actual number of frames */ 2788 if (xfer->aframes > xfer->nframes) { 2789 if (xfer->error == 0) { 2790 panic("%s: actual number of frames, %d, is " 2791 "greater than initial number of frames, %d\n", 2792 __FUNCTION__, xfer->aframes, xfer->nframes); 2793 } else { 2794 /* just set some valid value */ 2795 xfer->aframes = xfer->nframes; 2796 } 2797 } 2798 /* compute actual length */ 2799 xfer->actlen = 0; 2800 2801 for (x = 0; x != xfer->aframes; x++) { 2802 xfer->actlen += xfer->frlengths[x]; 2803 } 2804 2805 /* 2806 * Frames that were not transferred get zero actual length in 2807 * case the USB device driver does not check the actual number 2808 * of frames transferred, "xfer->aframes": 2809 */ 2810 for (; x < xfer->nframes; x++) { 2811 usbd_xfer_set_frame_len(xfer, x, 0); 2812 } 2813 2814 /* check actual length */ 2815 if (xfer->actlen > xfer->sumlen) { 2816 if (xfer->error == 0) { 2817 panic("%s: actual length, %d, is greater than " 2818 "initial length, %d\n", 2819 __FUNCTION__, xfer->actlen, xfer->sumlen); 2820 } else { 2821 /* just set some valid value */ 2822 xfer->actlen = xfer->sumlen; 2823 } 2824 } 2825 DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n", 2826 xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen, 2827 xfer->aframes, xfer->nframes); 2828 2829 if (xfer->error) { 2830 /* end of control transfer, if any */ 2831 xfer->flags_int.control_act = 0; 2832 2833 /* check if we should block the execution queue */ 2834 if ((xfer->error != USB_ERR_CANCELLED) && 2835 (xfer->flags.pipe_bof)) { 2836 DPRINTFN(2, "xfer=%p: Block On Failure " 2837 "on endpoint=%p\n", xfer, xfer->endpoint); 2838 goto done; 2839 } 2840 } else { 2841 /* check for short transfers */ 2842 if (xfer->actlen < xfer->sumlen) { 2843 2844 /* end of control transfer, if any */ 2845 xfer->flags_int.control_act = 0; 2846 2847 if (!xfer->flags_int.short_xfer_ok) { 2848 xfer->error = USB_ERR_SHORT_XFER; 2849 if (xfer->flags.pipe_bof) { 2850 DPRINTFN(2, "xfer=%p: Block On Failure on " 2851 "Short Transfer on endpoint %p.\n", 2852 xfer, xfer->endpoint); 2853 goto done; 2854 } 2855 } 2856 } else { 2857 /* 2858 * Check if we are in the middle of a 2859 * control transfer: 2860 */ 2861 if (xfer->flags_int.control_act) { 2862 DPRINTFN(5, "xfer=%p: Control transfer " 2863 "active on endpoint=%p\n", xfer, xfer->endpoint); 2864 goto done; 2865 } 2866 } 2867 } 2868 2869 ep = xfer->endpoint; 2870 2871 /* 2872 * If the current USB transfer is completing we need to start the 2873 * next one: 2874 */ 2875 USB_BUS_LOCK(bus); 2876 if (ep->endpoint_q[xfer->stream_id].curr == xfer) { 2877 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], NULL); 2878 2879 if (ep->endpoint_q[xfer->stream_id].curr != NULL || 2880 TAILQ_FIRST(&ep->endpoint_q[xfer->stream_id].head) != NULL) { 2881 /* there is another USB transfer waiting */ 2882 } else { 2883 /* this is the last USB transfer */ 2884 /* clear isochronous sync flag */ 2885 xfer->endpoint->is_synced = 0; 2886 } 2887 } 2888 USB_BUS_UNLOCK(bus); 2889 done: 2890 return (0); 2891 } 2892 2893 /*------------------------------------------------------------------------* 2894 * usb_command_wrapper 2895 * 2896 * This function is used to execute commands non-recursivly on an USB 2897 * transfer. 2898 *------------------------------------------------------------------------*/ 2899 void 2900 usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer) 2901 { 2902 if (xfer) { 2903 /* 2904 * If the transfer is not already processing, 2905 * queue it! 2906 */ 2907 if (pq->curr != xfer) { 2908 usbd_transfer_enqueue(pq, xfer); 2909 if (pq->curr != NULL) { 2910 /* something is already processing */ 2911 DPRINTFN(6, "busy %p\n", pq->curr); 2912 return; 2913 } 2914 } 2915 } else { 2916 /* Get next element in queue */ 2917 pq->curr = NULL; 2918 } 2919 2920 if (!pq->recurse_1) { 2921 2922 do { 2923 2924 /* set both recurse flags */ 2925 pq->recurse_1 = 1; 2926 pq->recurse_2 = 1; 2927 2928 if (pq->curr == NULL) { 2929 xfer = TAILQ_FIRST(&pq->head); 2930 if (xfer) { 2931 TAILQ_REMOVE(&pq->head, xfer, 2932 wait_entry); 2933 xfer->wait_queue = NULL; 2934 pq->curr = xfer; 2935 } else { 2936 break; 2937 } 2938 } 2939 DPRINTFN(6, "cb %p (enter)\n", pq->curr); 2940 (pq->command) (pq); 2941 DPRINTFN(6, "cb %p (leave)\n", pq->curr); 2942 2943 } while (!pq->recurse_2); 2944 2945 /* clear first recurse flag */ 2946 pq->recurse_1 = 0; 2947 2948 } else { 2949 /* clear second recurse flag */ 2950 pq->recurse_2 = 0; 2951 } 2952 } 2953 2954 /*------------------------------------------------------------------------* 2955 * usbd_ctrl_transfer_setup 2956 * 2957 * This function is used to setup the default USB control endpoint 2958 * transfer. 2959 *------------------------------------------------------------------------*/ 2960 void 2961 usbd_ctrl_transfer_setup(struct usb_device *udev) 2962 { 2963 struct usb_xfer *xfer; 2964 uint8_t no_resetup; 2965 uint8_t iface_index; 2966 2967 /* check for root HUB */ 2968 if (udev->parent_hub == NULL) 2969 return; 2970 repeat: 2971 2972 xfer = udev->ctrl_xfer[0]; 2973 if (xfer) { 2974 USB_XFER_LOCK(xfer); 2975 no_resetup = 2976 ((xfer->address == udev->address) && 2977 (udev->ctrl_ep_desc.wMaxPacketSize[0] == 2978 udev->ddesc.bMaxPacketSize)); 2979 if (udev->flags.usb_mode == USB_MODE_DEVICE) { 2980 if (no_resetup) { 2981 /* 2982 * NOTE: checking "xfer->address" and 2983 * starting the USB transfer must be 2984 * atomic! 2985 */ 2986 usbd_transfer_start(xfer); 2987 } 2988 } 2989 USB_XFER_UNLOCK(xfer); 2990 } else { 2991 no_resetup = 0; 2992 } 2993 2994 if (no_resetup) { 2995 /* 2996 * All parameters are exactly the same like before. 2997 * Just return. 2998 */ 2999 return; 3000 } 3001 /* 3002 * Update wMaxPacketSize for the default control endpoint: 3003 */ 3004 udev->ctrl_ep_desc.wMaxPacketSize[0] = 3005 udev->ddesc.bMaxPacketSize; 3006 3007 /* 3008 * Unsetup any existing USB transfer: 3009 */ 3010 usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX); 3011 3012 /* 3013 * Reset clear stall error counter. 3014 */ 3015 udev->clear_stall_errors = 0; 3016 3017 /* 3018 * Try to setup a new USB transfer for the 3019 * default control endpoint: 3020 */ 3021 iface_index = 0; 3022 if (usbd_transfer_setup(udev, &iface_index, 3023 udev->ctrl_xfer, usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL, 3024 &udev->device_mtx)) { 3025 DPRINTFN(0, "could not setup default " 3026 "USB transfer\n"); 3027 } else { 3028 goto repeat; 3029 } 3030 } 3031 3032 /*------------------------------------------------------------------------* 3033 * usbd_clear_data_toggle - factored out code 3034 * 3035 * NOTE: the intention of this function is not to reset the hardware 3036 * data toggle. 3037 *------------------------------------------------------------------------*/ 3038 void 3039 usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep) 3040 { 3041 USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED); 3042 3043 /* check that we have a valid case */ 3044 if (udev->flags.usb_mode == USB_MODE_HOST && 3045 udev->parent_hub != NULL && 3046 udev->bus->methods->clear_stall != NULL && 3047 ep->methods != NULL) { 3048 (udev->bus->methods->clear_stall) (udev, ep); 3049 } 3050 } 3051 3052 /*------------------------------------------------------------------------* 3053 * usbd_clear_data_toggle - factored out code 3054 * 3055 * NOTE: the intention of this function is not to reset the hardware 3056 * data toggle on the USB device side. 3057 *------------------------------------------------------------------------*/ 3058 void 3059 usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep) 3060 { 3061 DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep); 3062 3063 USB_BUS_LOCK(udev->bus); 3064 ep->toggle_next = 0; 3065 /* some hardware needs a callback to clear the data toggle */ 3066 usbd_clear_stall_locked(udev, ep); 3067 USB_BUS_UNLOCK(udev->bus); 3068 } 3069 3070 /*------------------------------------------------------------------------* 3071 * usbd_clear_stall_callback - factored out clear stall callback 3072 * 3073 * Input parameters: 3074 * xfer1: Clear Stall Control Transfer 3075 * xfer2: Stalled USB Transfer 3076 * 3077 * This function is NULL safe. 3078 * 3079 * Return values: 3080 * 0: In progress 3081 * Else: Finished 3082 * 3083 * Clear stall config example: 3084 * 3085 * static const struct usb_config my_clearstall = { 3086 * .type = UE_CONTROL, 3087 * .endpoint = 0, 3088 * .direction = UE_DIR_ANY, 3089 * .interval = 50, //50 milliseconds 3090 * .bufsize = sizeof(struct usb_device_request), 3091 * .timeout = 1000, //1.000 seconds 3092 * .callback = &my_clear_stall_callback, // ** 3093 * .usb_mode = USB_MODE_HOST, 3094 * }; 3095 * 3096 * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback" 3097 * passing the correct parameters. 3098 *------------------------------------------------------------------------*/ 3099 uint8_t 3100 usbd_clear_stall_callback(struct usb_xfer *xfer1, 3101 struct usb_xfer *xfer2) 3102 { 3103 struct usb_device_request req; 3104 3105 if (xfer2 == NULL) { 3106 /* looks like we are tearing down */ 3107 DPRINTF("NULL input parameter\n"); 3108 return (0); 3109 } 3110 USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED); 3111 USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED); 3112 3113 switch (USB_GET_STATE(xfer1)) { 3114 case USB_ST_SETUP: 3115 3116 /* 3117 * pre-clear the data toggle to DATA0 ("umass.c" and 3118 * "ata-usb.c" depends on this) 3119 */ 3120 3121 usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint); 3122 3123 /* setup a clear-stall packet */ 3124 3125 req.bmRequestType = UT_WRITE_ENDPOINT; 3126 req.bRequest = UR_CLEAR_FEATURE; 3127 USETW(req.wValue, UF_ENDPOINT_HALT); 3128 req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress; 3129 req.wIndex[1] = 0; 3130 USETW(req.wLength, 0); 3131 3132 /* 3133 * "usbd_transfer_setup_sub()" will ensure that 3134 * we have sufficient room in the buffer for 3135 * the request structure! 3136 */ 3137 3138 /* copy in the transfer */ 3139 3140 usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req)); 3141 3142 /* set length */ 3143 xfer1->frlengths[0] = sizeof(req); 3144 xfer1->nframes = 1; 3145 3146 usbd_transfer_submit(xfer1); 3147 return (0); 3148 3149 case USB_ST_TRANSFERRED: 3150 break; 3151 3152 default: /* Error */ 3153 if (xfer1->error == USB_ERR_CANCELLED) { 3154 return (0); 3155 } 3156 break; 3157 } 3158 return (1); /* Clear Stall Finished */ 3159 } 3160 3161 /*------------------------------------------------------------------------* 3162 * usbd_transfer_poll 3163 * 3164 * The following function gets called from the USB keyboard driver and 3165 * UMASS when the system has paniced. 3166 * 3167 * NOTE: It is currently not possible to resume normal operation on 3168 * the USB controller which has been polled, due to clearing of the 3169 * "up_dsleep" and "up_msleep" flags. 3170 *------------------------------------------------------------------------*/ 3171 void 3172 usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max) 3173 { 3174 struct usb_xfer *xfer; 3175 struct usb_xfer_root *xroot; 3176 struct usb_device *udev; 3177 struct usb_proc_msg *pm; 3178 uint16_t n; 3179 uint16_t drop_bus; 3180 uint16_t drop_xfer; 3181 3182 for (n = 0; n != max; n++) { 3183 /* Extra checks to avoid panic */ 3184 xfer = ppxfer[n]; 3185 if (xfer == NULL) 3186 continue; /* no USB transfer */ 3187 xroot = xfer->xroot; 3188 if (xroot == NULL) 3189 continue; /* no USB root */ 3190 udev = xroot->udev; 3191 if (udev == NULL) 3192 continue; /* no USB device */ 3193 if (udev->bus == NULL) 3194 continue; /* no BUS structure */ 3195 if (udev->bus->methods == NULL) 3196 continue; /* no BUS methods */ 3197 if (udev->bus->methods->xfer_poll == NULL) 3198 continue; /* no poll method */ 3199 3200 /* make sure that the BUS mutex is not locked */ 3201 drop_bus = 0; 3202 while (mtx_owned(&xroot->udev->bus->bus_mtx) && !SCHEDULER_STOPPED()) { 3203 mtx_unlock(&xroot->udev->bus->bus_mtx); 3204 drop_bus++; 3205 } 3206 3207 /* make sure that the transfer mutex is not locked */ 3208 drop_xfer = 0; 3209 while (mtx_owned(xroot->xfer_mtx) && !SCHEDULER_STOPPED()) { 3210 mtx_unlock(xroot->xfer_mtx); 3211 drop_xfer++; 3212 } 3213 3214 /* Make sure cv_signal() and cv_broadcast() is not called */ 3215 udev->bus->control_xfer_proc.up_msleep = 0; 3216 udev->bus->explore_proc.up_msleep = 0; 3217 udev->bus->giant_callback_proc.up_msleep = 0; 3218 udev->bus->non_giant_callback_proc.up_msleep = 0; 3219 3220 /* poll USB hardware */ 3221 (udev->bus->methods->xfer_poll) (udev->bus); 3222 3223 USB_BUS_LOCK(xroot->bus); 3224 3225 /* check for clear stall */ 3226 if (udev->ctrl_xfer[1] != NULL) { 3227 3228 /* poll clear stall start */ 3229 pm = &udev->cs_msg[0].hdr; 3230 (pm->pm_callback) (pm); 3231 /* poll clear stall done thread */ 3232 pm = &udev->ctrl_xfer[1]-> 3233 xroot->done_m[0].hdr; 3234 (pm->pm_callback) (pm); 3235 } 3236 3237 /* poll done thread */ 3238 pm = &xroot->done_m[0].hdr; 3239 (pm->pm_callback) (pm); 3240 3241 USB_BUS_UNLOCK(xroot->bus); 3242 3243 /* restore transfer mutex */ 3244 while (drop_xfer--) 3245 mtx_lock(xroot->xfer_mtx); 3246 3247 /* restore BUS mutex */ 3248 while (drop_bus--) 3249 mtx_lock(&xroot->udev->bus->bus_mtx); 3250 } 3251 } 3252 3253 static void 3254 usbd_get_std_packet_size(struct usb_std_packet_size *ptr, 3255 uint8_t type, enum usb_dev_speed speed) 3256 { 3257 static const uint16_t intr_range_max[USB_SPEED_MAX] = { 3258 [USB_SPEED_LOW] = 8, 3259 [USB_SPEED_FULL] = 64, 3260 [USB_SPEED_HIGH] = 1024, 3261 [USB_SPEED_VARIABLE] = 1024, 3262 [USB_SPEED_SUPER] = 1024, 3263 }; 3264 3265 static const uint16_t isoc_range_max[USB_SPEED_MAX] = { 3266 [USB_SPEED_LOW] = 0, /* invalid */ 3267 [USB_SPEED_FULL] = 1023, 3268 [USB_SPEED_HIGH] = 1024, 3269 [USB_SPEED_VARIABLE] = 3584, 3270 [USB_SPEED_SUPER] = 1024, 3271 }; 3272 3273 static const uint16_t control_min[USB_SPEED_MAX] = { 3274 [USB_SPEED_LOW] = 8, 3275 [USB_SPEED_FULL] = 8, 3276 [USB_SPEED_HIGH] = 64, 3277 [USB_SPEED_VARIABLE] = 512, 3278 [USB_SPEED_SUPER] = 512, 3279 }; 3280 3281 static const uint16_t bulk_min[USB_SPEED_MAX] = { 3282 [USB_SPEED_LOW] = 8, 3283 [USB_SPEED_FULL] = 8, 3284 [USB_SPEED_HIGH] = 512, 3285 [USB_SPEED_VARIABLE] = 512, 3286 [USB_SPEED_SUPER] = 1024, 3287 }; 3288 3289 uint16_t temp; 3290 3291 memset(ptr, 0, sizeof(*ptr)); 3292 3293 switch (type) { 3294 case UE_INTERRUPT: 3295 ptr->range.max = intr_range_max[speed]; 3296 break; 3297 case UE_ISOCHRONOUS: 3298 ptr->range.max = isoc_range_max[speed]; 3299 break; 3300 default: 3301 if (type == UE_BULK) 3302 temp = bulk_min[speed]; 3303 else /* UE_CONTROL */ 3304 temp = control_min[speed]; 3305 3306 /* default is fixed */ 3307 ptr->fixed[0] = temp; 3308 ptr->fixed[1] = temp; 3309 ptr->fixed[2] = temp; 3310 ptr->fixed[3] = temp; 3311 3312 if (speed == USB_SPEED_FULL) { 3313 /* multiple sizes */ 3314 ptr->fixed[1] = 16; 3315 ptr->fixed[2] = 32; 3316 ptr->fixed[3] = 64; 3317 } 3318 if ((speed == USB_SPEED_VARIABLE) && 3319 (type == UE_BULK)) { 3320 /* multiple sizes */ 3321 ptr->fixed[2] = 1024; 3322 ptr->fixed[3] = 1536; 3323 } 3324 break; 3325 } 3326 } 3327 3328 void * 3329 usbd_xfer_softc(struct usb_xfer *xfer) 3330 { 3331 return (xfer->priv_sc); 3332 } 3333 3334 void * 3335 usbd_xfer_get_priv(struct usb_xfer *xfer) 3336 { 3337 return (xfer->priv_fifo); 3338 } 3339 3340 void 3341 usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr) 3342 { 3343 xfer->priv_fifo = ptr; 3344 } 3345 3346 uint8_t 3347 usbd_xfer_state(struct usb_xfer *xfer) 3348 { 3349 return (xfer->usb_state); 3350 } 3351 3352 void 3353 usbd_xfer_set_flag(struct usb_xfer *xfer, int flag) 3354 { 3355 switch (flag) { 3356 case USB_FORCE_SHORT_XFER: 3357 xfer->flags.force_short_xfer = 1; 3358 break; 3359 case USB_SHORT_XFER_OK: 3360 xfer->flags.short_xfer_ok = 1; 3361 break; 3362 case USB_MULTI_SHORT_OK: 3363 xfer->flags.short_frames_ok = 1; 3364 break; 3365 case USB_MANUAL_STATUS: 3366 xfer->flags.manual_status = 1; 3367 break; 3368 } 3369 } 3370 3371 void 3372 usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag) 3373 { 3374 switch (flag) { 3375 case USB_FORCE_SHORT_XFER: 3376 xfer->flags.force_short_xfer = 0; 3377 break; 3378 case USB_SHORT_XFER_OK: 3379 xfer->flags.short_xfer_ok = 0; 3380 break; 3381 case USB_MULTI_SHORT_OK: 3382 xfer->flags.short_frames_ok = 0; 3383 break; 3384 case USB_MANUAL_STATUS: 3385 xfer->flags.manual_status = 0; 3386 break; 3387 } 3388 } 3389 3390 /* 3391 * The following function returns in milliseconds when the isochronous 3392 * transfer was completed by the hardware. The returned value wraps 3393 * around 65536 milliseconds. 3394 */ 3395 uint16_t 3396 usbd_xfer_get_timestamp(struct usb_xfer *xfer) 3397 { 3398 return (xfer->isoc_time_complete); 3399 } 3400