1 /* 2 * Released under the GPLv2 only. 3 * SPDX-License-Identifier: GPL-2.0 4 */ 5 6 #include <linux/module.h> 7 #include <linux/string.h> 8 #include <linux/bitops.h> 9 #include <linux/slab.h> 10 #include <linux/log2.h> 11 #include <linux/usb.h> 12 #include <linux/wait.h> 13 #include <linux/usb/hcd.h> 14 #include <linux/scatterlist.h> 15 16 #define to_urb(d) container_of(d, struct urb, kref) 17 18 19 static void urb_destroy(struct kref *kref) 20 { 21 struct urb *urb = to_urb(kref); 22 23 if (urb->transfer_flags & URB_FREE_BUFFER) 24 kfree(urb->transfer_buffer); 25 26 kfree(urb); 27 } 28 29 /** 30 * usb_init_urb - initializes a urb so that it can be used by a USB driver 31 * @urb: pointer to the urb to initialize 32 * 33 * Initializes a urb so that the USB subsystem can use it properly. 34 * 35 * If a urb is created with a call to usb_alloc_urb() it is not 36 * necessary to call this function. Only use this if you allocate the 37 * space for a struct urb on your own. If you call this function, be 38 * careful when freeing the memory for your urb that it is no longer in 39 * use by the USB core. 40 * 41 * Only use this function if you _really_ understand what you are doing. 42 */ 43 void usb_init_urb(struct urb *urb) 44 { 45 if (urb) { 46 memset(urb, 0, sizeof(*urb)); 47 kref_init(&urb->kref); 48 INIT_LIST_HEAD(&urb->anchor_list); 49 } 50 } 51 EXPORT_SYMBOL_GPL(usb_init_urb); 52 53 /** 54 * usb_alloc_urb - creates a new urb for a USB driver to use 55 * @iso_packets: number of iso packets for this urb 56 * @mem_flags: the type of memory to allocate, see kmalloc() for a list of 57 * valid options for this. 58 * 59 * Creates an urb for the USB driver to use, initializes a few internal 60 * structures, increments the usage counter, and returns a pointer to it. 61 * 62 * If the driver want to use this urb for interrupt, control, or bulk 63 * endpoints, pass '0' as the number of iso packets. 64 * 65 * The driver must call usb_free_urb() when it is finished with the urb. 66 * 67 * Return: A pointer to the new urb, or %NULL if no memory is available. 68 */ 69 struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags) 70 { 71 struct urb *urb; 72 73 urb = kmalloc(sizeof(struct urb) + 74 iso_packets * sizeof(struct usb_iso_packet_descriptor), 75 mem_flags); 76 if (!urb) 77 return NULL; 78 usb_init_urb(urb); 79 return urb; 80 } 81 EXPORT_SYMBOL_GPL(usb_alloc_urb); 82 83 /** 84 * usb_free_urb - frees the memory used by a urb when all users of it are finished 85 * @urb: pointer to the urb to free, may be NULL 86 * 87 * Must be called when a user of a urb is finished with it. When the last user 88 * of the urb calls this function, the memory of the urb is freed. 89 * 90 * Note: The transfer buffer associated with the urb is not freed unless the 91 * URB_FREE_BUFFER transfer flag is set. 92 */ 93 void usb_free_urb(struct urb *urb) 94 { 95 if (urb) 96 kref_put(&urb->kref, urb_destroy); 97 } 98 EXPORT_SYMBOL_GPL(usb_free_urb); 99 100 /** 101 * usb_get_urb - increments the reference count of the urb 102 * @urb: pointer to the urb to modify, may be NULL 103 * 104 * This must be called whenever a urb is transferred from a device driver to a 105 * host controller driver. This allows proper reference counting to happen 106 * for urbs. 107 * 108 * Return: A pointer to the urb with the incremented reference counter. 109 */ 110 struct urb *usb_get_urb(struct urb *urb) 111 { 112 if (urb) 113 kref_get(&urb->kref); 114 return urb; 115 } 116 EXPORT_SYMBOL_GPL(usb_get_urb); 117 118 /** 119 * usb_anchor_urb - anchors an URB while it is processed 120 * @urb: pointer to the urb to anchor 121 * @anchor: pointer to the anchor 122 * 123 * This can be called to have access to URBs which are to be executed 124 * without bothering to track them 125 */ 126 void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor) 127 { 128 unsigned long flags; 129 130 spin_lock_irqsave(&anchor->lock, flags); 131 usb_get_urb(urb); 132 list_add_tail(&urb->anchor_list, &anchor->urb_list); 133 urb->anchor = anchor; 134 135 if (unlikely(anchor->poisoned)) 136 atomic_inc(&urb->reject); 137 138 spin_unlock_irqrestore(&anchor->lock, flags); 139 } 140 EXPORT_SYMBOL_GPL(usb_anchor_urb); 141 142 static int usb_anchor_check_wakeup(struct usb_anchor *anchor) 143 { 144 return atomic_read(&anchor->suspend_wakeups) == 0 && 145 list_empty(&anchor->urb_list); 146 } 147 148 /* Callers must hold anchor->lock */ 149 static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor) 150 { 151 urb->anchor = NULL; 152 list_del(&urb->anchor_list); 153 usb_put_urb(urb); 154 if (usb_anchor_check_wakeup(anchor)) 155 wake_up(&anchor->wait); 156 } 157 158 /** 159 * usb_unanchor_urb - unanchors an URB 160 * @urb: pointer to the urb to anchor 161 * 162 * Call this to stop the system keeping track of this URB 163 */ 164 void usb_unanchor_urb(struct urb *urb) 165 { 166 unsigned long flags; 167 struct usb_anchor *anchor; 168 169 if (!urb) 170 return; 171 172 anchor = urb->anchor; 173 if (!anchor) 174 return; 175 176 spin_lock_irqsave(&anchor->lock, flags); 177 /* 178 * At this point, we could be competing with another thread which 179 * has the same intention. To protect the urb from being unanchored 180 * twice, only the winner of the race gets the job. 181 */ 182 if (likely(anchor == urb->anchor)) 183 __usb_unanchor_urb(urb, anchor); 184 spin_unlock_irqrestore(&anchor->lock, flags); 185 } 186 EXPORT_SYMBOL_GPL(usb_unanchor_urb); 187 188 /*-------------------------------------------------------------------*/ 189 190 /** 191 * usb_submit_urb - issue an asynchronous transfer request for an endpoint 192 * @urb: pointer to the urb describing the request 193 * @mem_flags: the type of memory to allocate, see kmalloc() for a list 194 * of valid options for this. 195 * 196 * This submits a transfer request, and transfers control of the URB 197 * describing that request to the USB subsystem. Request completion will 198 * be indicated later, asynchronously, by calling the completion handler. 199 * The three types of completion are success, error, and unlink 200 * (a software-induced fault, also called "request cancellation"). 201 * 202 * URBs may be submitted in interrupt context. 203 * 204 * The caller must have correctly initialized the URB before submitting 205 * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are 206 * available to ensure that most fields are correctly initialized, for 207 * the particular kind of transfer, although they will not initialize 208 * any transfer flags. 209 * 210 * If the submission is successful, the complete() callback from the URB 211 * will be called exactly once, when the USB core and Host Controller Driver 212 * (HCD) are finished with the URB. When the completion function is called, 213 * control of the URB is returned to the device driver which issued the 214 * request. The completion handler may then immediately free or reuse that 215 * URB. 216 * 217 * With few exceptions, USB device drivers should never access URB fields 218 * provided by usbcore or the HCD until its complete() is called. 219 * The exceptions relate to periodic transfer scheduling. For both 220 * interrupt and isochronous urbs, as part of successful URB submission 221 * urb->interval is modified to reflect the actual transfer period used 222 * (normally some power of two units). And for isochronous urbs, 223 * urb->start_frame is modified to reflect when the URB's transfers were 224 * scheduled to start. 225 * 226 * Not all isochronous transfer scheduling policies will work, but most 227 * host controller drivers should easily handle ISO queues going from now 228 * until 10-200 msec into the future. Drivers should try to keep at 229 * least one or two msec of data in the queue; many controllers require 230 * that new transfers start at least 1 msec in the future when they are 231 * added. If the driver is unable to keep up and the queue empties out, 232 * the behavior for new submissions is governed by the URB_ISO_ASAP flag. 233 * If the flag is set, or if the queue is idle, then the URB is always 234 * assigned to the first available (and not yet expired) slot in the 235 * endpoint's schedule. If the flag is not set and the queue is active 236 * then the URB is always assigned to the next slot in the schedule 237 * following the end of the endpoint's previous URB, even if that slot is 238 * in the past. When a packet is assigned in this way to a slot that has 239 * already expired, the packet is not transmitted and the corresponding 240 * usb_iso_packet_descriptor's status field will return -EXDEV. If this 241 * would happen to all the packets in the URB, submission fails with a 242 * -EXDEV error code. 243 * 244 * For control endpoints, the synchronous usb_control_msg() call is 245 * often used (in non-interrupt context) instead of this call. 246 * That is often used through convenience wrappers, for the requests 247 * that are standardized in the USB 2.0 specification. For bulk 248 * endpoints, a synchronous usb_bulk_msg() call is available. 249 * 250 * Return: 251 * 0 on successful submissions. A negative error number otherwise. 252 * 253 * Request Queuing: 254 * 255 * URBs may be submitted to endpoints before previous ones complete, to 256 * minimize the impact of interrupt latencies and system overhead on data 257 * throughput. With that queuing policy, an endpoint's queue would never 258 * be empty. This is required for continuous isochronous data streams, 259 * and may also be required for some kinds of interrupt transfers. Such 260 * queuing also maximizes bandwidth utilization by letting USB controllers 261 * start work on later requests before driver software has finished the 262 * completion processing for earlier (successful) requests. 263 * 264 * As of Linux 2.6, all USB endpoint transfer queues support depths greater 265 * than one. This was previously a HCD-specific behavior, except for ISO 266 * transfers. Non-isochronous endpoint queues are inactive during cleanup 267 * after faults (transfer errors or cancellation). 268 * 269 * Reserved Bandwidth Transfers: 270 * 271 * Periodic transfers (interrupt or isochronous) are performed repeatedly, 272 * using the interval specified in the urb. Submitting the first urb to 273 * the endpoint reserves the bandwidth necessary to make those transfers. 274 * If the USB subsystem can't allocate sufficient bandwidth to perform 275 * the periodic request, submitting such a periodic request should fail. 276 * 277 * For devices under xHCI, the bandwidth is reserved at configuration time, or 278 * when the alt setting is selected. If there is not enough bus bandwidth, the 279 * configuration/alt setting request will fail. Therefore, submissions to 280 * periodic endpoints on devices under xHCI should never fail due to bandwidth 281 * constraints. 282 * 283 * Device drivers must explicitly request that repetition, by ensuring that 284 * some URB is always on the endpoint's queue (except possibly for short 285 * periods during completion callbacks). When there is no longer an urb 286 * queued, the endpoint's bandwidth reservation is canceled. This means 287 * drivers can use their completion handlers to ensure they keep bandwidth 288 * they need, by reinitializing and resubmitting the just-completed urb 289 * until the driver longer needs that periodic bandwidth. 290 * 291 * Memory Flags: 292 * 293 * The general rules for how to decide which mem_flags to use 294 * are the same as for kmalloc. There are four 295 * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and 296 * GFP_ATOMIC. 297 * 298 * GFP_NOFS is not ever used, as it has not been implemented yet. 299 * 300 * GFP_ATOMIC is used when 301 * (a) you are inside a completion handler, an interrupt, bottom half, 302 * tasklet or timer, or 303 * (b) you are holding a spinlock or rwlock (does not apply to 304 * semaphores), or 305 * (c) current->state != TASK_RUNNING, this is the case only after 306 * you've changed it. 307 * 308 * GFP_NOIO is used in the block io path and error handling of storage 309 * devices. 310 * 311 * All other situations use GFP_KERNEL. 312 * 313 * Some more specific rules for mem_flags can be inferred, such as 314 * (1) start_xmit, timeout, and receive methods of network drivers must 315 * use GFP_ATOMIC (they are called with a spinlock held); 316 * (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also 317 * called with a spinlock held); 318 * (3) If you use a kernel thread with a network driver you must use 319 * GFP_NOIO, unless (b) or (c) apply; 320 * (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c) 321 * apply or your are in a storage driver's block io path; 322 * (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and 323 * (6) changing firmware on a running storage or net device uses 324 * GFP_NOIO, unless b) or c) apply 325 * 326 */ 327 int usb_submit_urb(struct urb *urb, gfp_t mem_flags) 328 { 329 static int pipetypes[4] = { 330 PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT 331 }; 332 int xfertype, max; 333 struct usb_device *dev; 334 struct usb_host_endpoint *ep; 335 int is_out; 336 unsigned int allowed; 337 338 if (!urb || !urb->complete) 339 return -EINVAL; 340 if (urb->hcpriv) { 341 WARN_ONCE(1, "URB %pK submitted while active\n", urb); 342 return -EBUSY; 343 } 344 345 dev = urb->dev; 346 if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED)) 347 return -ENODEV; 348 349 /* For now, get the endpoint from the pipe. Eventually drivers 350 * will be required to set urb->ep directly and we will eliminate 351 * urb->pipe. 352 */ 353 ep = usb_pipe_endpoint(dev, urb->pipe); 354 if (!ep) 355 return -ENOENT; 356 357 urb->ep = ep; 358 urb->status = -EINPROGRESS; 359 urb->actual_length = 0; 360 361 /* Lots of sanity checks, so HCDs can rely on clean data 362 * and don't need to duplicate tests 363 */ 364 xfertype = usb_endpoint_type(&ep->desc); 365 if (xfertype == USB_ENDPOINT_XFER_CONTROL) { 366 struct usb_ctrlrequest *setup = 367 (struct usb_ctrlrequest *) urb->setup_packet; 368 369 if (!setup) 370 return -ENOEXEC; 371 is_out = !(setup->bRequestType & USB_DIR_IN) || 372 !setup->wLength; 373 } else { 374 is_out = usb_endpoint_dir_out(&ep->desc); 375 } 376 377 /* Clear the internal flags and cache the direction for later use */ 378 urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE | 379 URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL | 380 URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL | 381 URB_DMA_SG_COMBINED); 382 urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN); 383 384 if (xfertype != USB_ENDPOINT_XFER_CONTROL && 385 dev->state < USB_STATE_CONFIGURED) 386 return -ENODEV; 387 388 max = usb_endpoint_maxp(&ep->desc); 389 if (max <= 0) { 390 dev_dbg(&dev->dev, 391 "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n", 392 usb_endpoint_num(&ep->desc), is_out ? "out" : "in", 393 __func__, max); 394 return -EMSGSIZE; 395 } 396 397 /* periodic transfers limit size per frame/uframe, 398 * but drivers only control those sizes for ISO. 399 * while we're checking, initialize return status. 400 */ 401 if (xfertype == USB_ENDPOINT_XFER_ISOC) { 402 int n, len; 403 404 /* SuperSpeed isoc endpoints have up to 16 bursts of up to 405 * 3 packets each 406 */ 407 if (dev->speed >= USB_SPEED_SUPER) { 408 int burst = 1 + ep->ss_ep_comp.bMaxBurst; 409 int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes); 410 max *= burst; 411 max *= mult; 412 } 413 414 /* "high bandwidth" mode, 1-3 packets/uframe? */ 415 if (dev->speed == USB_SPEED_HIGH) 416 max *= usb_endpoint_maxp_mult(&ep->desc); 417 418 if (urb->number_of_packets <= 0) 419 return -EINVAL; 420 for (n = 0; n < urb->number_of_packets; n++) { 421 len = urb->iso_frame_desc[n].length; 422 if (len < 0 || len > max) 423 return -EMSGSIZE; 424 urb->iso_frame_desc[n].status = -EXDEV; 425 urb->iso_frame_desc[n].actual_length = 0; 426 } 427 } else if (urb->num_sgs && !urb->dev->bus->no_sg_constraint && 428 dev->speed != USB_SPEED_WIRELESS) { 429 struct scatterlist *sg; 430 int i; 431 432 for_each_sg(urb->sg, sg, urb->num_sgs - 1, i) 433 if (sg->length % max) 434 return -EINVAL; 435 } 436 437 /* the I/O buffer must be mapped/unmapped, except when length=0 */ 438 if (urb->transfer_buffer_length > INT_MAX) 439 return -EMSGSIZE; 440 441 /* 442 * stuff that drivers shouldn't do, but which shouldn't 443 * cause problems in HCDs if they get it wrong. 444 */ 445 446 /* Check that the pipe's type matches the endpoint's type */ 447 if (usb_pipetype(urb->pipe) != pipetypes[xfertype]) 448 dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n", 449 usb_pipetype(urb->pipe), pipetypes[xfertype]); 450 451 /* Check against a simple/standard policy */ 452 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK | 453 URB_FREE_BUFFER); 454 switch (xfertype) { 455 case USB_ENDPOINT_XFER_BULK: 456 case USB_ENDPOINT_XFER_INT: 457 if (is_out) 458 allowed |= URB_ZERO_PACKET; 459 /* FALLTHROUGH */ 460 case USB_ENDPOINT_XFER_CONTROL: 461 allowed |= URB_NO_FSBR; /* only affects UHCI */ 462 /* FALLTHROUGH */ 463 default: /* all non-iso endpoints */ 464 if (!is_out) 465 allowed |= URB_SHORT_NOT_OK; 466 break; 467 case USB_ENDPOINT_XFER_ISOC: 468 allowed |= URB_ISO_ASAP; 469 break; 470 } 471 allowed &= urb->transfer_flags; 472 473 /* warn if submitter gave bogus flags */ 474 if (allowed != urb->transfer_flags) 475 dev_WARN(&dev->dev, "BOGUS urb flags, %x --> %x\n", 476 urb->transfer_flags, allowed); 477 478 /* 479 * Force periodic transfer intervals to be legal values that are 480 * a power of two (so HCDs don't need to). 481 * 482 * FIXME want bus->{intr,iso}_sched_horizon values here. Each HC 483 * supports different values... this uses EHCI/UHCI defaults (and 484 * EHCI can use smaller non-default values). 485 */ 486 switch (xfertype) { 487 case USB_ENDPOINT_XFER_ISOC: 488 case USB_ENDPOINT_XFER_INT: 489 /* too small? */ 490 switch (dev->speed) { 491 case USB_SPEED_WIRELESS: 492 if ((urb->interval < 6) 493 && (xfertype == USB_ENDPOINT_XFER_INT)) 494 return -EINVAL; 495 /* fall through */ 496 default: 497 if (urb->interval <= 0) 498 return -EINVAL; 499 break; 500 } 501 /* too big? */ 502 switch (dev->speed) { 503 case USB_SPEED_SUPER_PLUS: 504 case USB_SPEED_SUPER: /* units are 125us */ 505 /* Handle up to 2^(16-1) microframes */ 506 if (urb->interval > (1 << 15)) 507 return -EINVAL; 508 max = 1 << 15; 509 break; 510 case USB_SPEED_WIRELESS: 511 if (urb->interval > 16) 512 return -EINVAL; 513 break; 514 case USB_SPEED_HIGH: /* units are microframes */ 515 /* NOTE usb handles 2^15 */ 516 if (urb->interval > (1024 * 8)) 517 urb->interval = 1024 * 8; 518 max = 1024 * 8; 519 break; 520 case USB_SPEED_FULL: /* units are frames/msec */ 521 case USB_SPEED_LOW: 522 if (xfertype == USB_ENDPOINT_XFER_INT) { 523 if (urb->interval > 255) 524 return -EINVAL; 525 /* NOTE ohci only handles up to 32 */ 526 max = 128; 527 } else { 528 if (urb->interval > 1024) 529 urb->interval = 1024; 530 /* NOTE usb and ohci handle up to 2^15 */ 531 max = 1024; 532 } 533 break; 534 default: 535 return -EINVAL; 536 } 537 if (dev->speed != USB_SPEED_WIRELESS) { 538 /* Round down to a power of 2, no more than max */ 539 urb->interval = min(max, 1 << ilog2(urb->interval)); 540 } 541 } 542 543 return usb_hcd_submit_urb(urb, mem_flags); 544 } 545 EXPORT_SYMBOL_GPL(usb_submit_urb); 546 547 /*-------------------------------------------------------------------*/ 548 549 /** 550 * usb_unlink_urb - abort/cancel a transfer request for an endpoint 551 * @urb: pointer to urb describing a previously submitted request, 552 * may be NULL 553 * 554 * This routine cancels an in-progress request. URBs complete only once 555 * per submission, and may be canceled only once per submission. 556 * Successful cancellation means termination of @urb will be expedited 557 * and the completion handler will be called with a status code 558 * indicating that the request has been canceled (rather than any other 559 * code). 560 * 561 * Drivers should not call this routine or related routines, such as 562 * usb_kill_urb() or usb_unlink_anchored_urbs(), after their disconnect 563 * method has returned. The disconnect function should synchronize with 564 * a driver's I/O routines to insure that all URB-related activity has 565 * completed before it returns. 566 * 567 * This request is asynchronous, however the HCD might call the ->complete() 568 * callback during unlink. Therefore when drivers call usb_unlink_urb(), they 569 * must not hold any locks that may be taken by the completion function. 570 * Success is indicated by returning -EINPROGRESS, at which time the URB will 571 * probably not yet have been given back to the device driver. When it is 572 * eventually called, the completion function will see @urb->status == 573 * -ECONNRESET. 574 * Failure is indicated by usb_unlink_urb() returning any other value. 575 * Unlinking will fail when @urb is not currently "linked" (i.e., it was 576 * never submitted, or it was unlinked before, or the hardware is already 577 * finished with it), even if the completion handler has not yet run. 578 * 579 * The URB must not be deallocated while this routine is running. In 580 * particular, when a driver calls this routine, it must insure that the 581 * completion handler cannot deallocate the URB. 582 * 583 * Return: -EINPROGRESS on success. See description for other values on 584 * failure. 585 * 586 * Unlinking and Endpoint Queues: 587 * 588 * [The behaviors and guarantees described below do not apply to virtual 589 * root hubs but only to endpoint queues for physical USB devices.] 590 * 591 * Host Controller Drivers (HCDs) place all the URBs for a particular 592 * endpoint in a queue. Normally the queue advances as the controller 593 * hardware processes each request. But when an URB terminates with an 594 * error its queue generally stops (see below), at least until that URB's 595 * completion routine returns. It is guaranteed that a stopped queue 596 * will not restart until all its unlinked URBs have been fully retired, 597 * with their completion routines run, even if that's not until some time 598 * after the original completion handler returns. The same behavior and 599 * guarantee apply when an URB terminates because it was unlinked. 600 * 601 * Bulk and interrupt endpoint queues are guaranteed to stop whenever an 602 * URB terminates with any sort of error, including -ECONNRESET, -ENOENT, 603 * and -EREMOTEIO. Control endpoint queues behave the same way except 604 * that they are not guaranteed to stop for -EREMOTEIO errors. Queues 605 * for isochronous endpoints are treated differently, because they must 606 * advance at fixed rates. Such queues do not stop when an URB 607 * encounters an error or is unlinked. An unlinked isochronous URB may 608 * leave a gap in the stream of packets; it is undefined whether such 609 * gaps can be filled in. 610 * 611 * Note that early termination of an URB because a short packet was 612 * received will generate a -EREMOTEIO error if and only if the 613 * URB_SHORT_NOT_OK flag is set. By setting this flag, USB device 614 * drivers can build deep queues for large or complex bulk transfers 615 * and clean them up reliably after any sort of aborted transfer by 616 * unlinking all pending URBs at the first fault. 617 * 618 * When a control URB terminates with an error other than -EREMOTEIO, it 619 * is quite likely that the status stage of the transfer will not take 620 * place. 621 */ 622 int usb_unlink_urb(struct urb *urb) 623 { 624 if (!urb) 625 return -EINVAL; 626 if (!urb->dev) 627 return -ENODEV; 628 if (!urb->ep) 629 return -EIDRM; 630 return usb_hcd_unlink_urb(urb, -ECONNRESET); 631 } 632 EXPORT_SYMBOL_GPL(usb_unlink_urb); 633 634 /** 635 * usb_kill_urb - cancel a transfer request and wait for it to finish 636 * @urb: pointer to URB describing a previously submitted request, 637 * may be NULL 638 * 639 * This routine cancels an in-progress request. It is guaranteed that 640 * upon return all completion handlers will have finished and the URB 641 * will be totally idle and available for reuse. These features make 642 * this an ideal way to stop I/O in a disconnect() callback or close() 643 * function. If the request has not already finished or been unlinked 644 * the completion handler will see urb->status == -ENOENT. 645 * 646 * While the routine is running, attempts to resubmit the URB will fail 647 * with error -EPERM. Thus even if the URB's completion handler always 648 * tries to resubmit, it will not succeed and the URB will become idle. 649 * 650 * The URB must not be deallocated while this routine is running. In 651 * particular, when a driver calls this routine, it must insure that the 652 * completion handler cannot deallocate the URB. 653 * 654 * This routine may not be used in an interrupt context (such as a bottom 655 * half or a completion handler), or when holding a spinlock, or in other 656 * situations where the caller can't schedule(). 657 * 658 * This routine should not be called by a driver after its disconnect 659 * method has returned. 660 */ 661 void usb_kill_urb(struct urb *urb) 662 { 663 might_sleep(); 664 if (!(urb && urb->dev && urb->ep)) 665 return; 666 atomic_inc(&urb->reject); 667 668 usb_hcd_unlink_urb(urb, -ENOENT); 669 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); 670 671 atomic_dec(&urb->reject); 672 } 673 EXPORT_SYMBOL_GPL(usb_kill_urb); 674 675 /** 676 * usb_poison_urb - reliably kill a transfer and prevent further use of an URB 677 * @urb: pointer to URB describing a previously submitted request, 678 * may be NULL 679 * 680 * This routine cancels an in-progress request. It is guaranteed that 681 * upon return all completion handlers will have finished and the URB 682 * will be totally idle and cannot be reused. These features make 683 * this an ideal way to stop I/O in a disconnect() callback. 684 * If the request has not already finished or been unlinked 685 * the completion handler will see urb->status == -ENOENT. 686 * 687 * After and while the routine runs, attempts to resubmit the URB will fail 688 * with error -EPERM. Thus even if the URB's completion handler always 689 * tries to resubmit, it will not succeed and the URB will become idle. 690 * 691 * The URB must not be deallocated while this routine is running. In 692 * particular, when a driver calls this routine, it must insure that the 693 * completion handler cannot deallocate the URB. 694 * 695 * This routine may not be used in an interrupt context (such as a bottom 696 * half or a completion handler), or when holding a spinlock, or in other 697 * situations where the caller can't schedule(). 698 * 699 * This routine should not be called by a driver after its disconnect 700 * method has returned. 701 */ 702 void usb_poison_urb(struct urb *urb) 703 { 704 might_sleep(); 705 if (!urb) 706 return; 707 atomic_inc(&urb->reject); 708 709 if (!urb->dev || !urb->ep) 710 return; 711 712 usb_hcd_unlink_urb(urb, -ENOENT); 713 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); 714 } 715 EXPORT_SYMBOL_GPL(usb_poison_urb); 716 717 void usb_unpoison_urb(struct urb *urb) 718 { 719 if (!urb) 720 return; 721 722 atomic_dec(&urb->reject); 723 } 724 EXPORT_SYMBOL_GPL(usb_unpoison_urb); 725 726 /** 727 * usb_block_urb - reliably prevent further use of an URB 728 * @urb: pointer to URB to be blocked, may be NULL 729 * 730 * After the routine has run, attempts to resubmit the URB will fail 731 * with error -EPERM. Thus even if the URB's completion handler always 732 * tries to resubmit, it will not succeed and the URB will become idle. 733 * 734 * The URB must not be deallocated while this routine is running. In 735 * particular, when a driver calls this routine, it must insure that the 736 * completion handler cannot deallocate the URB. 737 */ 738 void usb_block_urb(struct urb *urb) 739 { 740 if (!urb) 741 return; 742 743 atomic_inc(&urb->reject); 744 } 745 EXPORT_SYMBOL_GPL(usb_block_urb); 746 747 /** 748 * usb_kill_anchored_urbs - cancel transfer requests en masse 749 * @anchor: anchor the requests are bound to 750 * 751 * this allows all outstanding URBs to be killed starting 752 * from the back of the queue 753 * 754 * This routine should not be called by a driver after its disconnect 755 * method has returned. 756 */ 757 void usb_kill_anchored_urbs(struct usb_anchor *anchor) 758 { 759 struct urb *victim; 760 761 spin_lock_irq(&anchor->lock); 762 while (!list_empty(&anchor->urb_list)) { 763 victim = list_entry(anchor->urb_list.prev, struct urb, 764 anchor_list); 765 /* we must make sure the URB isn't freed before we kill it*/ 766 usb_get_urb(victim); 767 spin_unlock_irq(&anchor->lock); 768 /* this will unanchor the URB */ 769 usb_kill_urb(victim); 770 usb_put_urb(victim); 771 spin_lock_irq(&anchor->lock); 772 } 773 spin_unlock_irq(&anchor->lock); 774 } 775 EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs); 776 777 778 /** 779 * usb_poison_anchored_urbs - cease all traffic from an anchor 780 * @anchor: anchor the requests are bound to 781 * 782 * this allows all outstanding URBs to be poisoned starting 783 * from the back of the queue. Newly added URBs will also be 784 * poisoned 785 * 786 * This routine should not be called by a driver after its disconnect 787 * method has returned. 788 */ 789 void usb_poison_anchored_urbs(struct usb_anchor *anchor) 790 { 791 struct urb *victim; 792 793 spin_lock_irq(&anchor->lock); 794 anchor->poisoned = 1; 795 while (!list_empty(&anchor->urb_list)) { 796 victim = list_entry(anchor->urb_list.prev, struct urb, 797 anchor_list); 798 /* we must make sure the URB isn't freed before we kill it*/ 799 usb_get_urb(victim); 800 spin_unlock_irq(&anchor->lock); 801 /* this will unanchor the URB */ 802 usb_poison_urb(victim); 803 usb_put_urb(victim); 804 spin_lock_irq(&anchor->lock); 805 } 806 spin_unlock_irq(&anchor->lock); 807 } 808 EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs); 809 810 /** 811 * usb_unpoison_anchored_urbs - let an anchor be used successfully again 812 * @anchor: anchor the requests are bound to 813 * 814 * Reverses the effect of usb_poison_anchored_urbs 815 * the anchor can be used normally after it returns 816 */ 817 void usb_unpoison_anchored_urbs(struct usb_anchor *anchor) 818 { 819 unsigned long flags; 820 struct urb *lazarus; 821 822 spin_lock_irqsave(&anchor->lock, flags); 823 list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) { 824 usb_unpoison_urb(lazarus); 825 } 826 anchor->poisoned = 0; 827 spin_unlock_irqrestore(&anchor->lock, flags); 828 } 829 EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs); 830 /** 831 * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse 832 * @anchor: anchor the requests are bound to 833 * 834 * this allows all outstanding URBs to be unlinked starting 835 * from the back of the queue. This function is asynchronous. 836 * The unlinking is just triggered. It may happen after this 837 * function has returned. 838 * 839 * This routine should not be called by a driver after its disconnect 840 * method has returned. 841 */ 842 void usb_unlink_anchored_urbs(struct usb_anchor *anchor) 843 { 844 struct urb *victim; 845 846 while ((victim = usb_get_from_anchor(anchor)) != NULL) { 847 usb_unlink_urb(victim); 848 usb_put_urb(victim); 849 } 850 } 851 EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs); 852 853 /** 854 * usb_anchor_suspend_wakeups 855 * @anchor: the anchor you want to suspend wakeups on 856 * 857 * Call this to stop the last urb being unanchored from waking up any 858 * usb_wait_anchor_empty_timeout waiters. This is used in the hcd urb give- 859 * back path to delay waking up until after the completion handler has run. 860 */ 861 void usb_anchor_suspend_wakeups(struct usb_anchor *anchor) 862 { 863 if (anchor) 864 atomic_inc(&anchor->suspend_wakeups); 865 } 866 EXPORT_SYMBOL_GPL(usb_anchor_suspend_wakeups); 867 868 /** 869 * usb_anchor_resume_wakeups 870 * @anchor: the anchor you want to resume wakeups on 871 * 872 * Allow usb_wait_anchor_empty_timeout waiters to be woken up again, and 873 * wake up any current waiters if the anchor is empty. 874 */ 875 void usb_anchor_resume_wakeups(struct usb_anchor *anchor) 876 { 877 if (!anchor) 878 return; 879 880 atomic_dec(&anchor->suspend_wakeups); 881 if (usb_anchor_check_wakeup(anchor)) 882 wake_up(&anchor->wait); 883 } 884 EXPORT_SYMBOL_GPL(usb_anchor_resume_wakeups); 885 886 /** 887 * usb_wait_anchor_empty_timeout - wait for an anchor to be unused 888 * @anchor: the anchor you want to become unused 889 * @timeout: how long you are willing to wait in milliseconds 890 * 891 * Call this is you want to be sure all an anchor's 892 * URBs have finished 893 * 894 * Return: Non-zero if the anchor became unused. Zero on timeout. 895 */ 896 int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor, 897 unsigned int timeout) 898 { 899 return wait_event_timeout(anchor->wait, 900 usb_anchor_check_wakeup(anchor), 901 msecs_to_jiffies(timeout)); 902 } 903 EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout); 904 905 /** 906 * usb_get_from_anchor - get an anchor's oldest urb 907 * @anchor: the anchor whose urb you want 908 * 909 * This will take the oldest urb from an anchor, 910 * unanchor and return it 911 * 912 * Return: The oldest urb from @anchor, or %NULL if @anchor has no 913 * urbs associated with it. 914 */ 915 struct urb *usb_get_from_anchor(struct usb_anchor *anchor) 916 { 917 struct urb *victim; 918 unsigned long flags; 919 920 spin_lock_irqsave(&anchor->lock, flags); 921 if (!list_empty(&anchor->urb_list)) { 922 victim = list_entry(anchor->urb_list.next, struct urb, 923 anchor_list); 924 usb_get_urb(victim); 925 __usb_unanchor_urb(victim, anchor); 926 } else { 927 victim = NULL; 928 } 929 spin_unlock_irqrestore(&anchor->lock, flags); 930 931 return victim; 932 } 933 934 EXPORT_SYMBOL_GPL(usb_get_from_anchor); 935 936 /** 937 * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs 938 * @anchor: the anchor whose urbs you want to unanchor 939 * 940 * use this to get rid of all an anchor's urbs 941 */ 942 void usb_scuttle_anchored_urbs(struct usb_anchor *anchor) 943 { 944 struct urb *victim; 945 unsigned long flags; 946 947 spin_lock_irqsave(&anchor->lock, flags); 948 while (!list_empty(&anchor->urb_list)) { 949 victim = list_entry(anchor->urb_list.prev, struct urb, 950 anchor_list); 951 __usb_unanchor_urb(victim, anchor); 952 } 953 spin_unlock_irqrestore(&anchor->lock, flags); 954 } 955 956 EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs); 957 958 /** 959 * usb_anchor_empty - is an anchor empty 960 * @anchor: the anchor you want to query 961 * 962 * Return: 1 if the anchor has no urbs associated with it. 963 */ 964 int usb_anchor_empty(struct usb_anchor *anchor) 965 { 966 return list_empty(&anchor->urb_list); 967 } 968 969 EXPORT_SYMBOL_GPL(usb_anchor_empty); 970 971