1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Released under the GPLv2 only. 4 */ 5 6 #include <linux/module.h> 7 #include <linux/string.h> 8 #include <linux/bitops.h> 9 #include <linux/slab.h> 10 #include <linux/log2.h> 11 #include <linux/kmsan.h> 12 #include <linux/usb.h> 13 #include <linux/wait.h> 14 #include <linux/usb/hcd.h> 15 #include <linux/scatterlist.h> 16 17 #define to_urb(d) container_of(d, struct urb, kref) 18 19 20 static void urb_destroy(struct kref *kref) 21 { 22 struct urb *urb = to_urb(kref); 23 24 if (urb->transfer_flags & URB_FREE_BUFFER) 25 kfree(urb->transfer_buffer); 26 27 kfree(urb); 28 } 29 30 /** 31 * usb_init_urb - initializes a urb so that it can be used by a USB driver 32 * @urb: pointer to the urb to initialize 33 * 34 * Initializes a urb so that the USB subsystem can use it properly. 35 * 36 * If a urb is created with a call to usb_alloc_urb() it is not 37 * necessary to call this function. Only use this if you allocate the 38 * space for a struct urb on your own. If you call this function, be 39 * careful when freeing the memory for your urb that it is no longer in 40 * use by the USB core. 41 * 42 * Only use this function if you _really_ understand what you are doing. 43 */ 44 void usb_init_urb(struct urb *urb) 45 { 46 if (urb) { 47 memset(urb, 0, sizeof(*urb)); 48 kref_init(&urb->kref); 49 INIT_LIST_HEAD(&urb->urb_list); 50 INIT_LIST_HEAD(&urb->anchor_list); 51 } 52 } 53 EXPORT_SYMBOL_GPL(usb_init_urb); 54 55 /** 56 * usb_alloc_urb - creates a new urb for a USB driver to use 57 * @iso_packets: number of iso packets for this urb 58 * @mem_flags: the type of memory to allocate, see kmalloc() for a list of 59 * valid options for this. 60 * 61 * Creates an urb for the USB driver to use, initializes a few internal 62 * structures, increments the usage counter, and returns a pointer to it. 63 * 64 * If the driver want to use this urb for interrupt, control, or bulk 65 * endpoints, pass '0' as the number of iso packets. 66 * 67 * The driver must call usb_free_urb() when it is finished with the urb. 68 * 69 * Return: A pointer to the new urb, or %NULL if no memory is available. 70 */ 71 struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags) 72 { 73 struct urb *urb; 74 75 urb = kmalloc(struct_size(urb, iso_frame_desc, iso_packets), 76 mem_flags); 77 if (!urb) 78 return NULL; 79 usb_init_urb(urb); 80 return urb; 81 } 82 EXPORT_SYMBOL_GPL(usb_alloc_urb); 83 84 /** 85 * usb_free_urb - frees the memory used by a urb when all users of it are finished 86 * @urb: pointer to the urb to free, may be NULL 87 * 88 * Must be called when a user of a urb is finished with it. When the last user 89 * of the urb calls this function, the memory of the urb is freed. 90 * 91 * Note: The transfer buffer associated with the urb is not freed unless the 92 * URB_FREE_BUFFER transfer flag is set. 93 */ 94 void usb_free_urb(struct urb *urb) 95 { 96 if (urb) 97 kref_put(&urb->kref, urb_destroy); 98 } 99 EXPORT_SYMBOL_GPL(usb_free_urb); 100 101 /** 102 * usb_get_urb - increments the reference count of the urb 103 * @urb: pointer to the urb to modify, may be NULL 104 * 105 * This must be called whenever a urb is transferred from a device driver to a 106 * host controller driver. This allows proper reference counting to happen 107 * for urbs. 108 * 109 * Return: A pointer to the urb with the incremented reference counter. 110 */ 111 struct urb *usb_get_urb(struct urb *urb) 112 { 113 if (urb) 114 kref_get(&urb->kref); 115 return urb; 116 } 117 EXPORT_SYMBOL_GPL(usb_get_urb); 118 119 /** 120 * usb_anchor_urb - anchors an URB while it is processed 121 * @urb: pointer to the urb to anchor 122 * @anchor: pointer to the anchor 123 * 124 * This can be called to have access to URBs which are to be executed 125 * without bothering to track them 126 */ 127 void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor) 128 { 129 unsigned long flags; 130 131 spin_lock_irqsave(&anchor->lock, flags); 132 usb_get_urb(urb); 133 list_add_tail(&urb->anchor_list, &anchor->urb_list); 134 urb->anchor = anchor; 135 136 if (unlikely(anchor->poisoned)) 137 atomic_inc(&urb->reject); 138 139 spin_unlock_irqrestore(&anchor->lock, flags); 140 } 141 EXPORT_SYMBOL_GPL(usb_anchor_urb); 142 143 static int usb_anchor_check_wakeup(struct usb_anchor *anchor) 144 { 145 return atomic_read(&anchor->suspend_wakeups) == 0 && 146 list_empty(&anchor->urb_list); 147 } 148 149 /* Callers must hold anchor->lock */ 150 static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor) 151 { 152 urb->anchor = NULL; 153 list_del(&urb->anchor_list); 154 usb_put_urb(urb); 155 if (usb_anchor_check_wakeup(anchor)) 156 wake_up(&anchor->wait); 157 } 158 159 /** 160 * usb_unanchor_urb - unanchors an URB 161 * @urb: pointer to the urb to anchor 162 * 163 * Call this to stop the system keeping track of this URB 164 */ 165 void usb_unanchor_urb(struct urb *urb) 166 { 167 unsigned long flags; 168 struct usb_anchor *anchor; 169 170 if (!urb) 171 return; 172 173 anchor = urb->anchor; 174 if (!anchor) 175 return; 176 177 spin_lock_irqsave(&anchor->lock, flags); 178 /* 179 * At this point, we could be competing with another thread which 180 * has the same intention. To protect the urb from being unanchored 181 * twice, only the winner of the race gets the job. 182 */ 183 if (likely(anchor == urb->anchor)) 184 __usb_unanchor_urb(urb, anchor); 185 spin_unlock_irqrestore(&anchor->lock, flags); 186 } 187 EXPORT_SYMBOL_GPL(usb_unanchor_urb); 188 189 /*-------------------------------------------------------------------*/ 190 191 static const int pipetypes[4] = { 192 PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT 193 }; 194 195 /** 196 * usb_pipe_type_check - sanity check of a specific pipe for a usb device 197 * @dev: struct usb_device to be checked 198 * @pipe: pipe to check 199 * 200 * This performs a light-weight sanity check for the endpoint in the 201 * given usb device. It returns 0 if the pipe is valid for the specific usb 202 * device, otherwise a negative error code. 203 */ 204 int usb_pipe_type_check(struct usb_device *dev, unsigned int pipe) 205 { 206 const struct usb_host_endpoint *ep; 207 208 ep = usb_pipe_endpoint(dev, pipe); 209 if (!ep) 210 return -EINVAL; 211 if (usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)]) 212 return -EINVAL; 213 return 0; 214 } 215 EXPORT_SYMBOL_GPL(usb_pipe_type_check); 216 217 /** 218 * usb_urb_ep_type_check - sanity check of endpoint in the given urb 219 * @urb: urb to be checked 220 * 221 * This performs a light-weight sanity check for the endpoint in the 222 * given urb. It returns 0 if the urb contains a valid endpoint, otherwise 223 * a negative error code. 224 */ 225 int usb_urb_ep_type_check(const struct urb *urb) 226 { 227 return usb_pipe_type_check(urb->dev, urb->pipe); 228 } 229 EXPORT_SYMBOL_GPL(usb_urb_ep_type_check); 230 231 /** 232 * usb_submit_urb - issue an asynchronous transfer request for an endpoint 233 * @urb: pointer to the urb describing the request 234 * @mem_flags: the type of memory to allocate, see kmalloc() for a list 235 * of valid options for this. 236 * 237 * This submits a transfer request, and transfers control of the URB 238 * describing that request to the USB subsystem. Request completion will 239 * be indicated later, asynchronously, by calling the completion handler. 240 * The three types of completion are success, error, and unlink 241 * (a software-induced fault, also called "request cancellation"). 242 * 243 * URBs may be submitted in interrupt context. 244 * 245 * The caller must have correctly initialized the URB before submitting 246 * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are 247 * available to ensure that most fields are correctly initialized, for 248 * the particular kind of transfer, although they will not initialize 249 * any transfer flags. 250 * 251 * If the submission is successful, the complete() callback from the URB 252 * will be called exactly once, when the USB core and Host Controller Driver 253 * (HCD) are finished with the URB. When the completion function is called, 254 * control of the URB is returned to the device driver which issued the 255 * request. The completion handler may then immediately free or reuse that 256 * URB. 257 * 258 * With few exceptions, USB device drivers should never access URB fields 259 * provided by usbcore or the HCD until its complete() is called. 260 * The exceptions relate to periodic transfer scheduling. For both 261 * interrupt and isochronous urbs, as part of successful URB submission 262 * urb->interval is modified to reflect the actual transfer period used 263 * (normally some power of two units). And for isochronous urbs, 264 * urb->start_frame is modified to reflect when the URB's transfers were 265 * scheduled to start. 266 * 267 * Not all isochronous transfer scheduling policies will work, but most 268 * host controller drivers should easily handle ISO queues going from now 269 * until 10-200 msec into the future. Drivers should try to keep at 270 * least one or two msec of data in the queue; many controllers require 271 * that new transfers start at least 1 msec in the future when they are 272 * added. If the driver is unable to keep up and the queue empties out, 273 * the behavior for new submissions is governed by the URB_ISO_ASAP flag. 274 * If the flag is set, or if the queue is idle, then the URB is always 275 * assigned to the first available (and not yet expired) slot in the 276 * endpoint's schedule. If the flag is not set and the queue is active 277 * then the URB is always assigned to the next slot in the schedule 278 * following the end of the endpoint's previous URB, even if that slot is 279 * in the past. When a packet is assigned in this way to a slot that has 280 * already expired, the packet is not transmitted and the corresponding 281 * usb_iso_packet_descriptor's status field will return -EXDEV. If this 282 * would happen to all the packets in the URB, submission fails with a 283 * -EXDEV error code. 284 * 285 * For control endpoints, the synchronous usb_control_msg() call is 286 * often used (in non-interrupt context) instead of this call. 287 * That is often used through convenience wrappers, for the requests 288 * that are standardized in the USB 2.0 specification. For bulk 289 * endpoints, a synchronous usb_bulk_msg() call is available. 290 * 291 * Return: 292 * 0 on successful submissions. A negative error number otherwise. 293 * 294 * Request Queuing: 295 * 296 * URBs may be submitted to endpoints before previous ones complete, to 297 * minimize the impact of interrupt latencies and system overhead on data 298 * throughput. With that queuing policy, an endpoint's queue would never 299 * be empty. This is required for continuous isochronous data streams, 300 * and may also be required for some kinds of interrupt transfers. Such 301 * queuing also maximizes bandwidth utilization by letting USB controllers 302 * start work on later requests before driver software has finished the 303 * completion processing for earlier (successful) requests. 304 * 305 * As of Linux 2.6, all USB endpoint transfer queues support depths greater 306 * than one. This was previously a HCD-specific behavior, except for ISO 307 * transfers. Non-isochronous endpoint queues are inactive during cleanup 308 * after faults (transfer errors or cancellation). 309 * 310 * Reserved Bandwidth Transfers: 311 * 312 * Periodic transfers (interrupt or isochronous) are performed repeatedly, 313 * using the interval specified in the urb. Submitting the first urb to 314 * the endpoint reserves the bandwidth necessary to make those transfers. 315 * If the USB subsystem can't allocate sufficient bandwidth to perform 316 * the periodic request, submitting such a periodic request should fail. 317 * 318 * For devices under xHCI, the bandwidth is reserved at configuration time, or 319 * when the alt setting is selected. If there is not enough bus bandwidth, the 320 * configuration/alt setting request will fail. Therefore, submissions to 321 * periodic endpoints on devices under xHCI should never fail due to bandwidth 322 * constraints. 323 * 324 * Device drivers must explicitly request that repetition, by ensuring that 325 * some URB is always on the endpoint's queue (except possibly for short 326 * periods during completion callbacks). When there is no longer an urb 327 * queued, the endpoint's bandwidth reservation is canceled. This means 328 * drivers can use their completion handlers to ensure they keep bandwidth 329 * they need, by reinitializing and resubmitting the just-completed urb 330 * until the driver longer needs that periodic bandwidth. 331 * 332 * Memory Flags: 333 * 334 * The general rules for how to decide which mem_flags to use 335 * are the same as for kmalloc. There are four 336 * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and 337 * GFP_ATOMIC. 338 * 339 * GFP_NOFS is not ever used, as it has not been implemented yet. 340 * 341 * GFP_ATOMIC is used when 342 * (a) you are inside a completion handler, an interrupt, bottom half, 343 * tasklet or timer, or 344 * (b) you are holding a spinlock or rwlock (does not apply to 345 * semaphores), or 346 * (c) current->state != TASK_RUNNING, this is the case only after 347 * you've changed it. 348 * 349 * GFP_NOIO is used in the block io path and error handling of storage 350 * devices. 351 * 352 * All other situations use GFP_KERNEL. 353 * 354 * Some more specific rules for mem_flags can be inferred, such as 355 * (1) start_xmit, timeout, and receive methods of network drivers must 356 * use GFP_ATOMIC (they are called with a spinlock held); 357 * (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also 358 * called with a spinlock held); 359 * (3) If you use a kernel thread with a network driver you must use 360 * GFP_NOIO, unless (b) or (c) apply; 361 * (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c) 362 * apply or your are in a storage driver's block io path; 363 * (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and 364 * (6) changing firmware on a running storage or net device uses 365 * GFP_NOIO, unless b) or c) apply 366 * 367 */ 368 int usb_submit_urb(struct urb *urb, gfp_t mem_flags) 369 { 370 int xfertype, max; 371 struct usb_device *dev; 372 struct usb_host_endpoint *ep; 373 int is_out; 374 unsigned int allowed; 375 376 if (!urb || !urb->complete) 377 return -EINVAL; 378 if (urb->hcpriv) { 379 WARN_ONCE(1, "URB %p submitted while active\n", urb); 380 return -EBUSY; 381 } 382 383 dev = urb->dev; 384 if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED)) 385 return -ENODEV; 386 387 /* For now, get the endpoint from the pipe. Eventually drivers 388 * will be required to set urb->ep directly and we will eliminate 389 * urb->pipe. 390 */ 391 ep = usb_pipe_endpoint(dev, urb->pipe); 392 if (!ep) 393 return -ENOENT; 394 395 urb->ep = ep; 396 urb->status = -EINPROGRESS; 397 urb->actual_length = 0; 398 399 /* Lots of sanity checks, so HCDs can rely on clean data 400 * and don't need to duplicate tests 401 */ 402 xfertype = usb_endpoint_type(&ep->desc); 403 if (xfertype == USB_ENDPOINT_XFER_CONTROL) { 404 struct usb_ctrlrequest *setup = 405 (struct usb_ctrlrequest *) urb->setup_packet; 406 407 if (!setup) 408 return -ENOEXEC; 409 is_out = !(setup->bRequestType & USB_DIR_IN) || 410 !setup->wLength; 411 dev_WARN_ONCE(&dev->dev, (usb_pipeout(urb->pipe) != is_out), 412 "BOGUS control dir, pipe %x doesn't match bRequestType %x\n", 413 urb->pipe, setup->bRequestType); 414 if (le16_to_cpu(setup->wLength) != urb->transfer_buffer_length) { 415 dev_dbg(&dev->dev, "BOGUS control len %d doesn't match transfer length %d\n", 416 le16_to_cpu(setup->wLength), 417 urb->transfer_buffer_length); 418 return -EBADR; 419 } 420 } else { 421 is_out = usb_endpoint_dir_out(&ep->desc); 422 } 423 424 /* Clear the internal flags and cache the direction for later use */ 425 urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE | 426 URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL | 427 URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL | 428 URB_DMA_SG_COMBINED); 429 urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN); 430 kmsan_handle_urb(urb, is_out); 431 432 if (xfertype != USB_ENDPOINT_XFER_CONTROL && 433 dev->state < USB_STATE_CONFIGURED) 434 return -ENODEV; 435 436 max = usb_endpoint_maxp(&ep->desc); 437 if (max <= 0) { 438 dev_dbg(&dev->dev, 439 "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n", 440 usb_endpoint_num(&ep->desc), is_out ? "out" : "in", 441 __func__, max); 442 return -EMSGSIZE; 443 } 444 445 /* periodic transfers limit size per frame/uframe, 446 * but drivers only control those sizes for ISO. 447 * while we're checking, initialize return status. 448 */ 449 if (xfertype == USB_ENDPOINT_XFER_ISOC) { 450 int n, len; 451 452 /* SuperSpeed isoc endpoints have up to 16 bursts of up to 453 * 3 packets each 454 */ 455 if (dev->speed >= USB_SPEED_SUPER) { 456 int burst = 1 + ep->ss_ep_comp.bMaxBurst; 457 int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes); 458 max *= burst; 459 max *= mult; 460 } 461 462 if (dev->speed == USB_SPEED_SUPER_PLUS && 463 USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes)) { 464 struct usb_ssp_isoc_ep_comp_descriptor *isoc_ep_comp; 465 466 isoc_ep_comp = &ep->ssp_isoc_ep_comp; 467 max = le32_to_cpu(isoc_ep_comp->dwBytesPerInterval); 468 } 469 470 /* "high bandwidth" mode, 1-3 packets/uframe? */ 471 if (dev->speed == USB_SPEED_HIGH) 472 max *= usb_endpoint_maxp_mult(&ep->desc); 473 474 if (urb->number_of_packets <= 0) 475 return -EINVAL; 476 for (n = 0; n < urb->number_of_packets; n++) { 477 len = urb->iso_frame_desc[n].length; 478 if (len < 0 || len > max) 479 return -EMSGSIZE; 480 urb->iso_frame_desc[n].status = -EXDEV; 481 urb->iso_frame_desc[n].actual_length = 0; 482 } 483 } else if (urb->num_sgs && !urb->dev->bus->no_sg_constraint) { 484 struct scatterlist *sg; 485 int i; 486 487 for_each_sg(urb->sg, sg, urb->num_sgs - 1, i) 488 if (sg->length % max) 489 return -EINVAL; 490 } 491 492 /* the I/O buffer must be mapped/unmapped, except when length=0 */ 493 if (urb->transfer_buffer_length > INT_MAX) 494 return -EMSGSIZE; 495 496 /* 497 * stuff that drivers shouldn't do, but which shouldn't 498 * cause problems in HCDs if they get it wrong. 499 */ 500 501 /* Check that the pipe's type matches the endpoint's type */ 502 if (usb_pipe_type_check(urb->dev, urb->pipe)) 503 dev_warn_once(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n", 504 usb_pipetype(urb->pipe), pipetypes[xfertype]); 505 506 /* Check against a simple/standard policy */ 507 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK | 508 URB_FREE_BUFFER); 509 switch (xfertype) { 510 case USB_ENDPOINT_XFER_BULK: 511 case USB_ENDPOINT_XFER_INT: 512 if (is_out) 513 allowed |= URB_ZERO_PACKET; 514 fallthrough; 515 default: /* all non-iso endpoints */ 516 if (!is_out) 517 allowed |= URB_SHORT_NOT_OK; 518 break; 519 case USB_ENDPOINT_XFER_ISOC: 520 allowed |= URB_ISO_ASAP; 521 break; 522 } 523 allowed &= urb->transfer_flags; 524 525 /* warn if submitter gave bogus flags */ 526 if (allowed != urb->transfer_flags) 527 dev_WARN(&dev->dev, "BOGUS urb flags, %x --> %x\n", 528 urb->transfer_flags, allowed); 529 530 /* 531 * Force periodic transfer intervals to be legal values that are 532 * a power of two (so HCDs don't need to). 533 * 534 * FIXME want bus->{intr,iso}_sched_horizon values here. Each HC 535 * supports different values... this uses EHCI/UHCI defaults (and 536 * EHCI can use smaller non-default values). 537 */ 538 switch (xfertype) { 539 case USB_ENDPOINT_XFER_ISOC: 540 case USB_ENDPOINT_XFER_INT: 541 /* too small? */ 542 if (urb->interval <= 0) 543 return -EINVAL; 544 545 /* too big? */ 546 switch (dev->speed) { 547 case USB_SPEED_SUPER_PLUS: 548 case USB_SPEED_SUPER: /* units are 125us */ 549 /* Handle up to 2^(16-1) microframes */ 550 if (urb->interval > (1 << 15)) 551 return -EINVAL; 552 max = 1 << 15; 553 break; 554 case USB_SPEED_HIGH: /* units are microframes */ 555 /* NOTE usb handles 2^15 */ 556 if (urb->interval > (1024 * 8)) 557 urb->interval = 1024 * 8; 558 max = 1024 * 8; 559 break; 560 case USB_SPEED_FULL: /* units are frames/msec */ 561 case USB_SPEED_LOW: 562 if (xfertype == USB_ENDPOINT_XFER_INT) { 563 if (urb->interval > 255) 564 return -EINVAL; 565 /* NOTE ohci only handles up to 32 */ 566 max = 128; 567 } else { 568 if (urb->interval > 1024) 569 urb->interval = 1024; 570 /* NOTE usb and ohci handle up to 2^15 */ 571 max = 1024; 572 } 573 break; 574 default: 575 return -EINVAL; 576 } 577 /* Round down to a power of 2, no more than max */ 578 urb->interval = min(max, 1 << ilog2(urb->interval)); 579 } 580 581 return usb_hcd_submit_urb(urb, mem_flags); 582 } 583 EXPORT_SYMBOL_GPL(usb_submit_urb); 584 585 /*-------------------------------------------------------------------*/ 586 587 /** 588 * usb_unlink_urb - abort/cancel a transfer request for an endpoint 589 * @urb: pointer to urb describing a previously submitted request, 590 * may be NULL 591 * 592 * This routine cancels an in-progress request. URBs complete only once 593 * per submission, and may be canceled only once per submission. 594 * Successful cancellation means termination of @urb will be expedited 595 * and the completion handler will be called with a status code 596 * indicating that the request has been canceled (rather than any other 597 * code). 598 * 599 * Drivers should not call this routine or related routines, such as 600 * usb_kill_urb(), after their disconnect method has returned. The 601 * disconnect function should synchronize with a driver's I/O routines 602 * to insure that all URB-related activity has completed before it returns. 603 * 604 * This request is asynchronous, however the HCD might call the ->complete() 605 * callback during unlink. Therefore when drivers call usb_unlink_urb(), they 606 * must not hold any locks that may be taken by the completion function. 607 * Success is indicated by returning -EINPROGRESS, at which time the URB will 608 * probably not yet have been given back to the device driver. When it is 609 * eventually called, the completion function will see @urb->status == 610 * -ECONNRESET. 611 * Failure is indicated by usb_unlink_urb() returning any other value. 612 * Unlinking will fail when @urb is not currently "linked" (i.e., it was 613 * never submitted, or it was unlinked before, or the hardware is already 614 * finished with it), even if the completion handler has not yet run. 615 * 616 * The URB must not be deallocated while this routine is running. In 617 * particular, when a driver calls this routine, it must insure that the 618 * completion handler cannot deallocate the URB. 619 * 620 * Return: -EINPROGRESS on success. See description for other values on 621 * failure. 622 * 623 * Unlinking and Endpoint Queues: 624 * 625 * [The behaviors and guarantees described below do not apply to virtual 626 * root hubs but only to endpoint queues for physical USB devices.] 627 * 628 * Host Controller Drivers (HCDs) place all the URBs for a particular 629 * endpoint in a queue. Normally the queue advances as the controller 630 * hardware processes each request. But when an URB terminates with an 631 * error its queue generally stops (see below), at least until that URB's 632 * completion routine returns. It is guaranteed that a stopped queue 633 * will not restart until all its unlinked URBs have been fully retired, 634 * with their completion routines run, even if that's not until some time 635 * after the original completion handler returns. The same behavior and 636 * guarantee apply when an URB terminates because it was unlinked. 637 * 638 * Bulk and interrupt endpoint queues are guaranteed to stop whenever an 639 * URB terminates with any sort of error, including -ECONNRESET, -ENOENT, 640 * and -EREMOTEIO. Control endpoint queues behave the same way except 641 * that they are not guaranteed to stop for -EREMOTEIO errors. Queues 642 * for isochronous endpoints are treated differently, because they must 643 * advance at fixed rates. Such queues do not stop when an URB 644 * encounters an error or is unlinked. An unlinked isochronous URB may 645 * leave a gap in the stream of packets; it is undefined whether such 646 * gaps can be filled in. 647 * 648 * Note that early termination of an URB because a short packet was 649 * received will generate a -EREMOTEIO error if and only if the 650 * URB_SHORT_NOT_OK flag is set. By setting this flag, USB device 651 * drivers can build deep queues for large or complex bulk transfers 652 * and clean them up reliably after any sort of aborted transfer by 653 * unlinking all pending URBs at the first fault. 654 * 655 * When a control URB terminates with an error other than -EREMOTEIO, it 656 * is quite likely that the status stage of the transfer will not take 657 * place. 658 */ 659 int usb_unlink_urb(struct urb *urb) 660 { 661 if (!urb) 662 return -EINVAL; 663 if (!urb->dev) 664 return -ENODEV; 665 if (!urb->ep) 666 return -EIDRM; 667 return usb_hcd_unlink_urb(urb, -ECONNRESET); 668 } 669 EXPORT_SYMBOL_GPL(usb_unlink_urb); 670 671 /** 672 * usb_kill_urb - cancel a transfer request and wait for it to finish 673 * @urb: pointer to URB describing a previously submitted request, 674 * may be NULL 675 * 676 * This routine cancels an in-progress request. It is guaranteed that 677 * upon return all completion handlers will have finished and the URB 678 * will be totally idle and available for reuse. These features make 679 * this an ideal way to stop I/O in a disconnect() callback or close() 680 * function. If the request has not already finished or been unlinked 681 * the completion handler will see urb->status == -ENOENT. 682 * 683 * While the routine is running, attempts to resubmit the URB will fail 684 * with error -EPERM. Thus even if the URB's completion handler always 685 * tries to resubmit, it will not succeed and the URB will become idle. 686 * 687 * The URB must not be deallocated while this routine is running. In 688 * particular, when a driver calls this routine, it must insure that the 689 * completion handler cannot deallocate the URB. 690 * 691 * This routine may not be used in an interrupt context (such as a bottom 692 * half or a completion handler), or when holding a spinlock, or in other 693 * situations where the caller can't schedule(). 694 * 695 * This routine should not be called by a driver after its disconnect 696 * method has returned. 697 */ 698 void usb_kill_urb(struct urb *urb) 699 { 700 might_sleep(); 701 if (!(urb && urb->dev && urb->ep)) 702 return; 703 atomic_inc(&urb->reject); 704 /* 705 * Order the write of urb->reject above before the read 706 * of urb->use_count below. Pairs with the barriers in 707 * __usb_hcd_giveback_urb() and usb_hcd_submit_urb(). 708 */ 709 smp_mb__after_atomic(); 710 711 usb_hcd_unlink_urb(urb, -ENOENT); 712 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); 713 714 atomic_dec(&urb->reject); 715 } 716 EXPORT_SYMBOL_GPL(usb_kill_urb); 717 718 /** 719 * usb_poison_urb - reliably kill a transfer and prevent further use of an URB 720 * @urb: pointer to URB describing a previously submitted request, 721 * may be NULL 722 * 723 * This routine cancels an in-progress request. It is guaranteed that 724 * upon return all completion handlers will have finished and the URB 725 * will be totally idle and cannot be reused. These features make 726 * this an ideal way to stop I/O in a disconnect() callback. 727 * If the request has not already finished or been unlinked 728 * the completion handler will see urb->status == -ENOENT. 729 * 730 * After and while the routine runs, attempts to resubmit the URB will fail 731 * with error -EPERM. Thus even if the URB's completion handler always 732 * tries to resubmit, it will not succeed and the URB will become idle. 733 * 734 * The URB must not be deallocated while this routine is running. In 735 * particular, when a driver calls this routine, it must insure that the 736 * completion handler cannot deallocate the URB. 737 * 738 * This routine may not be used in an interrupt context (such as a bottom 739 * half or a completion handler), or when holding a spinlock, or in other 740 * situations where the caller can't schedule(). 741 * 742 * This routine should not be called by a driver after its disconnect 743 * method has returned. 744 */ 745 void usb_poison_urb(struct urb *urb) 746 { 747 might_sleep(); 748 if (!urb) 749 return; 750 atomic_inc(&urb->reject); 751 /* 752 * Order the write of urb->reject above before the read 753 * of urb->use_count below. Pairs with the barriers in 754 * __usb_hcd_giveback_urb() and usb_hcd_submit_urb(). 755 */ 756 smp_mb__after_atomic(); 757 758 if (!urb->dev || !urb->ep) 759 return; 760 761 usb_hcd_unlink_urb(urb, -ENOENT); 762 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); 763 } 764 EXPORT_SYMBOL_GPL(usb_poison_urb); 765 766 void usb_unpoison_urb(struct urb *urb) 767 { 768 if (!urb) 769 return; 770 771 atomic_dec(&urb->reject); 772 } 773 EXPORT_SYMBOL_GPL(usb_unpoison_urb); 774 775 /** 776 * usb_block_urb - reliably prevent further use of an URB 777 * @urb: pointer to URB to be blocked, may be NULL 778 * 779 * After the routine has run, attempts to resubmit the URB will fail 780 * with error -EPERM. Thus even if the URB's completion handler always 781 * tries to resubmit, it will not succeed and the URB will become idle. 782 * 783 * The URB must not be deallocated while this routine is running. In 784 * particular, when a driver calls this routine, it must insure that the 785 * completion handler cannot deallocate the URB. 786 */ 787 void usb_block_urb(struct urb *urb) 788 { 789 if (!urb) 790 return; 791 792 atomic_inc(&urb->reject); 793 } 794 EXPORT_SYMBOL_GPL(usb_block_urb); 795 796 /** 797 * usb_kill_anchored_urbs - kill all URBs associated with an anchor 798 * @anchor: anchor the requests are bound to 799 * 800 * This kills all outstanding URBs starting from the back of the queue, 801 * with guarantee that no completer callbacks will take place from the 802 * anchor after this function returns. 803 * 804 * This routine should not be called by a driver after its disconnect 805 * method has returned. 806 */ 807 void usb_kill_anchored_urbs(struct usb_anchor *anchor) 808 { 809 struct urb *victim; 810 int surely_empty; 811 812 do { 813 spin_lock_irq(&anchor->lock); 814 while (!list_empty(&anchor->urb_list)) { 815 victim = list_entry(anchor->urb_list.prev, 816 struct urb, anchor_list); 817 /* make sure the URB isn't freed before we kill it */ 818 usb_get_urb(victim); 819 spin_unlock_irq(&anchor->lock); 820 /* this will unanchor the URB */ 821 usb_kill_urb(victim); 822 usb_put_urb(victim); 823 spin_lock_irq(&anchor->lock); 824 } 825 surely_empty = usb_anchor_check_wakeup(anchor); 826 827 spin_unlock_irq(&anchor->lock); 828 cpu_relax(); 829 } while (!surely_empty); 830 } 831 EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs); 832 833 834 /** 835 * usb_poison_anchored_urbs - cease all traffic from an anchor 836 * @anchor: anchor the requests are bound to 837 * 838 * this allows all outstanding URBs to be poisoned starting 839 * from the back of the queue. Newly added URBs will also be 840 * poisoned 841 * 842 * This routine should not be called by a driver after its disconnect 843 * method has returned. 844 */ 845 void usb_poison_anchored_urbs(struct usb_anchor *anchor) 846 { 847 struct urb *victim; 848 int surely_empty; 849 850 do { 851 spin_lock_irq(&anchor->lock); 852 anchor->poisoned = 1; 853 while (!list_empty(&anchor->urb_list)) { 854 victim = list_entry(anchor->urb_list.prev, 855 struct urb, anchor_list); 856 /* make sure the URB isn't freed before we kill it */ 857 usb_get_urb(victim); 858 spin_unlock_irq(&anchor->lock); 859 /* this will unanchor the URB */ 860 usb_poison_urb(victim); 861 usb_put_urb(victim); 862 spin_lock_irq(&anchor->lock); 863 } 864 surely_empty = usb_anchor_check_wakeup(anchor); 865 866 spin_unlock_irq(&anchor->lock); 867 cpu_relax(); 868 } while (!surely_empty); 869 } 870 EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs); 871 872 /** 873 * usb_unpoison_anchored_urbs - let an anchor be used successfully again 874 * @anchor: anchor the requests are bound to 875 * 876 * Reverses the effect of usb_poison_anchored_urbs 877 * the anchor can be used normally after it returns 878 */ 879 void usb_unpoison_anchored_urbs(struct usb_anchor *anchor) 880 { 881 unsigned long flags; 882 struct urb *lazarus; 883 884 spin_lock_irqsave(&anchor->lock, flags); 885 list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) { 886 usb_unpoison_urb(lazarus); 887 } 888 anchor->poisoned = 0; 889 spin_unlock_irqrestore(&anchor->lock, flags); 890 } 891 EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs); 892 893 /** 894 * usb_anchor_suspend_wakeups 895 * @anchor: the anchor you want to suspend wakeups on 896 * 897 * Call this to stop the last urb being unanchored from waking up any 898 * usb_wait_anchor_empty_timeout waiters. This is used in the hcd urb give- 899 * back path to delay waking up until after the completion handler has run. 900 */ 901 void usb_anchor_suspend_wakeups(struct usb_anchor *anchor) 902 { 903 if (anchor) 904 atomic_inc(&anchor->suspend_wakeups); 905 } 906 EXPORT_SYMBOL_GPL(usb_anchor_suspend_wakeups); 907 908 /** 909 * usb_anchor_resume_wakeups 910 * @anchor: the anchor you want to resume wakeups on 911 * 912 * Allow usb_wait_anchor_empty_timeout waiters to be woken up again, and 913 * wake up any current waiters if the anchor is empty. 914 */ 915 void usb_anchor_resume_wakeups(struct usb_anchor *anchor) 916 { 917 if (!anchor) 918 return; 919 920 atomic_dec(&anchor->suspend_wakeups); 921 if (usb_anchor_check_wakeup(anchor)) 922 wake_up(&anchor->wait); 923 } 924 EXPORT_SYMBOL_GPL(usb_anchor_resume_wakeups); 925 926 /** 927 * usb_wait_anchor_empty_timeout - wait for an anchor to be unused 928 * @anchor: the anchor you want to become unused 929 * @timeout: how long you are willing to wait in milliseconds 930 * 931 * Call this is you want to be sure all an anchor's 932 * URBs have finished 933 * 934 * Return: Non-zero if the anchor became unused. Zero on timeout. 935 */ 936 int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor, 937 unsigned int timeout) 938 { 939 return wait_event_timeout(anchor->wait, 940 usb_anchor_check_wakeup(anchor), 941 msecs_to_jiffies(timeout)); 942 } 943 EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout); 944 945 /** 946 * usb_get_from_anchor - get an anchor's oldest urb 947 * @anchor: the anchor whose urb you want 948 * 949 * This will take the oldest urb from an anchor, 950 * unanchor and return it 951 * 952 * Return: The oldest urb from @anchor, or %NULL if @anchor has no 953 * urbs associated with it. 954 */ 955 struct urb *usb_get_from_anchor(struct usb_anchor *anchor) 956 { 957 struct urb *victim; 958 unsigned long flags; 959 960 spin_lock_irqsave(&anchor->lock, flags); 961 if (!list_empty(&anchor->urb_list)) { 962 victim = list_entry(anchor->urb_list.next, struct urb, 963 anchor_list); 964 usb_get_urb(victim); 965 __usb_unanchor_urb(victim, anchor); 966 } else { 967 victim = NULL; 968 } 969 spin_unlock_irqrestore(&anchor->lock, flags); 970 971 return victim; 972 } 973 974 EXPORT_SYMBOL_GPL(usb_get_from_anchor); 975 976 /** 977 * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs 978 * @anchor: the anchor whose urbs you want to unanchor 979 * 980 * use this to get rid of all an anchor's urbs 981 */ 982 void usb_scuttle_anchored_urbs(struct usb_anchor *anchor) 983 { 984 struct urb *victim; 985 unsigned long flags; 986 int surely_empty; 987 988 do { 989 spin_lock_irqsave(&anchor->lock, flags); 990 while (!list_empty(&anchor->urb_list)) { 991 victim = list_entry(anchor->urb_list.prev, 992 struct urb, anchor_list); 993 __usb_unanchor_urb(victim, anchor); 994 } 995 surely_empty = usb_anchor_check_wakeup(anchor); 996 997 spin_unlock_irqrestore(&anchor->lock, flags); 998 cpu_relax(); 999 } while (!surely_empty); 1000 } 1001 1002 EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs); 1003 1004 /** 1005 * usb_anchor_empty - is an anchor empty 1006 * @anchor: the anchor you want to query 1007 * 1008 * Return: 1 if the anchor has no urbs associated with it. 1009 */ 1010 int usb_anchor_empty(struct usb_anchor *anchor) 1011 { 1012 return list_empty(&anchor->urb_list); 1013 } 1014 1015 EXPORT_SYMBOL_GPL(usb_anchor_empty); 1016 1017