1 #include <linux/module.h> 2 #include <linux/string.h> 3 #include <linux/bitops.h> 4 #include <linux/slab.h> 5 #include <linux/init.h> 6 #include <linux/log2.h> 7 #include <linux/usb.h> 8 #include <linux/wait.h> 9 #include "hcd.h" 10 11 #define to_urb(d) container_of(d, struct urb, kref) 12 13 14 static void urb_destroy(struct kref *kref) 15 { 16 struct urb *urb = to_urb(kref); 17 18 if (urb->transfer_flags & URB_FREE_BUFFER) 19 kfree(urb->transfer_buffer); 20 21 kfree(urb); 22 } 23 24 /** 25 * usb_init_urb - initializes a urb so that it can be used by a USB driver 26 * @urb: pointer to the urb to initialize 27 * 28 * Initializes a urb so that the USB subsystem can use it properly. 29 * 30 * If a urb is created with a call to usb_alloc_urb() it is not 31 * necessary to call this function. Only use this if you allocate the 32 * space for a struct urb on your own. If you call this function, be 33 * careful when freeing the memory for your urb that it is no longer in 34 * use by the USB core. 35 * 36 * Only use this function if you _really_ understand what you are doing. 37 */ 38 void usb_init_urb(struct urb *urb) 39 { 40 if (urb) { 41 memset(urb, 0, sizeof(*urb)); 42 kref_init(&urb->kref); 43 INIT_LIST_HEAD(&urb->anchor_list); 44 } 45 } 46 EXPORT_SYMBOL_GPL(usb_init_urb); 47 48 /** 49 * usb_alloc_urb - creates a new urb for a USB driver to use 50 * @iso_packets: number of iso packets for this urb 51 * @mem_flags: the type of memory to allocate, see kmalloc() for a list of 52 * valid options for this. 53 * 54 * Creates an urb for the USB driver to use, initializes a few internal 55 * structures, incrementes the usage counter, and returns a pointer to it. 56 * 57 * If no memory is available, NULL is returned. 58 * 59 * If the driver want to use this urb for interrupt, control, or bulk 60 * endpoints, pass '0' as the number of iso packets. 61 * 62 * The driver must call usb_free_urb() when it is finished with the urb. 63 */ 64 struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags) 65 { 66 struct urb *urb; 67 68 urb = kmalloc(sizeof(struct urb) + 69 iso_packets * sizeof(struct usb_iso_packet_descriptor), 70 mem_flags); 71 if (!urb) { 72 printk(KERN_ERR "alloc_urb: kmalloc failed\n"); 73 return NULL; 74 } 75 usb_init_urb(urb); 76 return urb; 77 } 78 EXPORT_SYMBOL_GPL(usb_alloc_urb); 79 80 /** 81 * usb_free_urb - frees the memory used by a urb when all users of it are finished 82 * @urb: pointer to the urb to free, may be NULL 83 * 84 * Must be called when a user of a urb is finished with it. When the last user 85 * of the urb calls this function, the memory of the urb is freed. 86 * 87 * Note: The transfer buffer associated with the urb is not freed unless the 88 * URB_FREE_BUFFER transfer flag is set. 89 */ 90 void usb_free_urb(struct urb *urb) 91 { 92 if (urb) 93 kref_put(&urb->kref, urb_destroy); 94 } 95 EXPORT_SYMBOL_GPL(usb_free_urb); 96 97 /** 98 * usb_get_urb - increments the reference count of the urb 99 * @urb: pointer to the urb to modify, may be NULL 100 * 101 * This must be called whenever a urb is transferred from a device driver to a 102 * host controller driver. This allows proper reference counting to happen 103 * for urbs. 104 * 105 * A pointer to the urb with the incremented reference counter is returned. 106 */ 107 struct urb *usb_get_urb(struct urb *urb) 108 { 109 if (urb) 110 kref_get(&urb->kref); 111 return urb; 112 } 113 EXPORT_SYMBOL_GPL(usb_get_urb); 114 115 /** 116 * usb_anchor_urb - anchors an URB while it is processed 117 * @urb: pointer to the urb to anchor 118 * @anchor: pointer to the anchor 119 * 120 * This can be called to have access to URBs which are to be executed 121 * without bothering to track them 122 */ 123 void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor) 124 { 125 unsigned long flags; 126 127 spin_lock_irqsave(&anchor->lock, flags); 128 usb_get_urb(urb); 129 list_add_tail(&urb->anchor_list, &anchor->urb_list); 130 urb->anchor = anchor; 131 132 if (unlikely(anchor->poisoned)) { 133 atomic_inc(&urb->reject); 134 } 135 136 spin_unlock_irqrestore(&anchor->lock, flags); 137 } 138 EXPORT_SYMBOL_GPL(usb_anchor_urb); 139 140 /** 141 * usb_unanchor_urb - unanchors an URB 142 * @urb: pointer to the urb to anchor 143 * 144 * Call this to stop the system keeping track of this URB 145 */ 146 void usb_unanchor_urb(struct urb *urb) 147 { 148 unsigned long flags; 149 struct usb_anchor *anchor; 150 151 if (!urb) 152 return; 153 154 anchor = urb->anchor; 155 if (!anchor) 156 return; 157 158 spin_lock_irqsave(&anchor->lock, flags); 159 if (unlikely(anchor != urb->anchor)) { 160 /* we've lost the race to another thread */ 161 spin_unlock_irqrestore(&anchor->lock, flags); 162 return; 163 } 164 urb->anchor = NULL; 165 list_del(&urb->anchor_list); 166 spin_unlock_irqrestore(&anchor->lock, flags); 167 usb_put_urb(urb); 168 if (list_empty(&anchor->urb_list)) 169 wake_up(&anchor->wait); 170 } 171 EXPORT_SYMBOL_GPL(usb_unanchor_urb); 172 173 /*-------------------------------------------------------------------*/ 174 175 /** 176 * usb_submit_urb - issue an asynchronous transfer request for an endpoint 177 * @urb: pointer to the urb describing the request 178 * @mem_flags: the type of memory to allocate, see kmalloc() for a list 179 * of valid options for this. 180 * 181 * This submits a transfer request, and transfers control of the URB 182 * describing that request to the USB subsystem. Request completion will 183 * be indicated later, asynchronously, by calling the completion handler. 184 * The three types of completion are success, error, and unlink 185 * (a software-induced fault, also called "request cancellation"). 186 * 187 * URBs may be submitted in interrupt context. 188 * 189 * The caller must have correctly initialized the URB before submitting 190 * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are 191 * available to ensure that most fields are correctly initialized, for 192 * the particular kind of transfer, although they will not initialize 193 * any transfer flags. 194 * 195 * Successful submissions return 0; otherwise this routine returns a 196 * negative error number. If the submission is successful, the complete() 197 * callback from the URB will be called exactly once, when the USB core and 198 * Host Controller Driver (HCD) are finished with the URB. When the completion 199 * function is called, control of the URB is returned to the device 200 * driver which issued the request. The completion handler may then 201 * immediately free or reuse that URB. 202 * 203 * With few exceptions, USB device drivers should never access URB fields 204 * provided by usbcore or the HCD until its complete() is called. 205 * The exceptions relate to periodic transfer scheduling. For both 206 * interrupt and isochronous urbs, as part of successful URB submission 207 * urb->interval is modified to reflect the actual transfer period used 208 * (normally some power of two units). And for isochronous urbs, 209 * urb->start_frame is modified to reflect when the URB's transfers were 210 * scheduled to start. Not all isochronous transfer scheduling policies 211 * will work, but most host controller drivers should easily handle ISO 212 * queues going from now until 10-200 msec into the future. 213 * 214 * For control endpoints, the synchronous usb_control_msg() call is 215 * often used (in non-interrupt context) instead of this call. 216 * That is often used through convenience wrappers, for the requests 217 * that are standardized in the USB 2.0 specification. For bulk 218 * endpoints, a synchronous usb_bulk_msg() call is available. 219 * 220 * Request Queuing: 221 * 222 * URBs may be submitted to endpoints before previous ones complete, to 223 * minimize the impact of interrupt latencies and system overhead on data 224 * throughput. With that queuing policy, an endpoint's queue would never 225 * be empty. This is required for continuous isochronous data streams, 226 * and may also be required for some kinds of interrupt transfers. Such 227 * queuing also maximizes bandwidth utilization by letting USB controllers 228 * start work on later requests before driver software has finished the 229 * completion processing for earlier (successful) requests. 230 * 231 * As of Linux 2.6, all USB endpoint transfer queues support depths greater 232 * than one. This was previously a HCD-specific behavior, except for ISO 233 * transfers. Non-isochronous endpoint queues are inactive during cleanup 234 * after faults (transfer errors or cancellation). 235 * 236 * Reserved Bandwidth Transfers: 237 * 238 * Periodic transfers (interrupt or isochronous) are performed repeatedly, 239 * using the interval specified in the urb. Submitting the first urb to 240 * the endpoint reserves the bandwidth necessary to make those transfers. 241 * If the USB subsystem can't allocate sufficient bandwidth to perform 242 * the periodic request, submitting such a periodic request should fail. 243 * 244 * For devices under xHCI, the bandwidth is reserved at configuration time, or 245 * when the alt setting is selected. If there is not enough bus bandwidth, the 246 * configuration/alt setting request will fail. Therefore, submissions to 247 * periodic endpoints on devices under xHCI should never fail due to bandwidth 248 * constraints. 249 * 250 * Device drivers must explicitly request that repetition, by ensuring that 251 * some URB is always on the endpoint's queue (except possibly for short 252 * periods during completion callacks). When there is no longer an urb 253 * queued, the endpoint's bandwidth reservation is canceled. This means 254 * drivers can use their completion handlers to ensure they keep bandwidth 255 * they need, by reinitializing and resubmitting the just-completed urb 256 * until the driver longer needs that periodic bandwidth. 257 * 258 * Memory Flags: 259 * 260 * The general rules for how to decide which mem_flags to use 261 * are the same as for kmalloc. There are four 262 * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and 263 * GFP_ATOMIC. 264 * 265 * GFP_NOFS is not ever used, as it has not been implemented yet. 266 * 267 * GFP_ATOMIC is used when 268 * (a) you are inside a completion handler, an interrupt, bottom half, 269 * tasklet or timer, or 270 * (b) you are holding a spinlock or rwlock (does not apply to 271 * semaphores), or 272 * (c) current->state != TASK_RUNNING, this is the case only after 273 * you've changed it. 274 * 275 * GFP_NOIO is used in the block io path and error handling of storage 276 * devices. 277 * 278 * All other situations use GFP_KERNEL. 279 * 280 * Some more specific rules for mem_flags can be inferred, such as 281 * (1) start_xmit, timeout, and receive methods of network drivers must 282 * use GFP_ATOMIC (they are called with a spinlock held); 283 * (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also 284 * called with a spinlock held); 285 * (3) If you use a kernel thread with a network driver you must use 286 * GFP_NOIO, unless (b) or (c) apply; 287 * (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c) 288 * apply or your are in a storage driver's block io path; 289 * (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and 290 * (6) changing firmware on a running storage or net device uses 291 * GFP_NOIO, unless b) or c) apply 292 * 293 */ 294 int usb_submit_urb(struct urb *urb, gfp_t mem_flags) 295 { 296 int xfertype, max; 297 struct usb_device *dev; 298 struct usb_host_endpoint *ep; 299 int is_out; 300 301 if (!urb || urb->hcpriv || !urb->complete) 302 return -EINVAL; 303 dev = urb->dev; 304 if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED)) 305 return -ENODEV; 306 307 /* For now, get the endpoint from the pipe. Eventually drivers 308 * will be required to set urb->ep directly and we will eliminate 309 * urb->pipe. 310 */ 311 ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out) 312 [usb_pipeendpoint(urb->pipe)]; 313 if (!ep) 314 return -ENOENT; 315 316 urb->ep = ep; 317 urb->status = -EINPROGRESS; 318 urb->actual_length = 0; 319 320 /* Lots of sanity checks, so HCDs can rely on clean data 321 * and don't need to duplicate tests 322 */ 323 xfertype = usb_endpoint_type(&ep->desc); 324 if (xfertype == USB_ENDPOINT_XFER_CONTROL) { 325 struct usb_ctrlrequest *setup = 326 (struct usb_ctrlrequest *) urb->setup_packet; 327 328 if (!setup) 329 return -ENOEXEC; 330 is_out = !(setup->bRequestType & USB_DIR_IN) || 331 !setup->wLength; 332 } else { 333 is_out = usb_endpoint_dir_out(&ep->desc); 334 } 335 336 /* Cache the direction for later use */ 337 urb->transfer_flags = (urb->transfer_flags & ~URB_DIR_MASK) | 338 (is_out ? URB_DIR_OUT : URB_DIR_IN); 339 340 if (xfertype != USB_ENDPOINT_XFER_CONTROL && 341 dev->state < USB_STATE_CONFIGURED) 342 return -ENODEV; 343 344 max = le16_to_cpu(ep->desc.wMaxPacketSize); 345 if (max <= 0) { 346 dev_dbg(&dev->dev, 347 "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n", 348 usb_endpoint_num(&ep->desc), is_out ? "out" : "in", 349 __func__, max); 350 return -EMSGSIZE; 351 } 352 353 /* periodic transfers limit size per frame/uframe, 354 * but drivers only control those sizes for ISO. 355 * while we're checking, initialize return status. 356 */ 357 if (xfertype == USB_ENDPOINT_XFER_ISOC) { 358 int n, len; 359 360 /* FIXME SuperSpeed isoc endpoints have up to 16 bursts */ 361 /* "high bandwidth" mode, 1-3 packets/uframe? */ 362 if (dev->speed == USB_SPEED_HIGH) { 363 int mult = 1 + ((max >> 11) & 0x03); 364 max &= 0x07ff; 365 max *= mult; 366 } 367 368 if (urb->number_of_packets <= 0) 369 return -EINVAL; 370 for (n = 0; n < urb->number_of_packets; n++) { 371 len = urb->iso_frame_desc[n].length; 372 if (len < 0 || len > max) 373 return -EMSGSIZE; 374 urb->iso_frame_desc[n].status = -EXDEV; 375 urb->iso_frame_desc[n].actual_length = 0; 376 } 377 } 378 379 /* the I/O buffer must be mapped/unmapped, except when length=0 */ 380 if (urb->transfer_buffer_length > INT_MAX) 381 return -EMSGSIZE; 382 383 #ifdef DEBUG 384 /* stuff that drivers shouldn't do, but which shouldn't 385 * cause problems in HCDs if they get it wrong. 386 */ 387 { 388 unsigned int orig_flags = urb->transfer_flags; 389 unsigned int allowed; 390 391 /* enforce simple/standard policy */ 392 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP | 393 URB_NO_INTERRUPT | URB_DIR_MASK | URB_FREE_BUFFER); 394 switch (xfertype) { 395 case USB_ENDPOINT_XFER_BULK: 396 if (is_out) 397 allowed |= URB_ZERO_PACKET; 398 /* FALLTHROUGH */ 399 case USB_ENDPOINT_XFER_CONTROL: 400 allowed |= URB_NO_FSBR; /* only affects UHCI */ 401 /* FALLTHROUGH */ 402 default: /* all non-iso endpoints */ 403 if (!is_out) 404 allowed |= URB_SHORT_NOT_OK; 405 break; 406 case USB_ENDPOINT_XFER_ISOC: 407 allowed |= URB_ISO_ASAP; 408 break; 409 } 410 urb->transfer_flags &= allowed; 411 412 /* fail if submitter gave bogus flags */ 413 if (urb->transfer_flags != orig_flags) { 414 dev_err(&dev->dev, "BOGUS urb flags, %x --> %x\n", 415 orig_flags, urb->transfer_flags); 416 return -EINVAL; 417 } 418 } 419 #endif 420 /* 421 * Force periodic transfer intervals to be legal values that are 422 * a power of two (so HCDs don't need to). 423 * 424 * FIXME want bus->{intr,iso}_sched_horizon values here. Each HC 425 * supports different values... this uses EHCI/UHCI defaults (and 426 * EHCI can use smaller non-default values). 427 */ 428 switch (xfertype) { 429 case USB_ENDPOINT_XFER_ISOC: 430 case USB_ENDPOINT_XFER_INT: 431 /* too small? */ 432 switch (dev->speed) { 433 case USB_SPEED_VARIABLE: 434 if (urb->interval < 6) 435 return -EINVAL; 436 break; 437 default: 438 if (urb->interval <= 0) 439 return -EINVAL; 440 break; 441 } 442 /* too big? */ 443 switch (dev->speed) { 444 case USB_SPEED_SUPER: /* units are 125us */ 445 /* Handle up to 2^(16-1) microframes */ 446 if (urb->interval > (1 << 15)) 447 return -EINVAL; 448 max = 1 << 15; 449 case USB_SPEED_VARIABLE: 450 if (urb->interval > 16) 451 return -EINVAL; 452 break; 453 case USB_SPEED_HIGH: /* units are microframes */ 454 /* NOTE usb handles 2^15 */ 455 if (urb->interval > (1024 * 8)) 456 urb->interval = 1024 * 8; 457 max = 1024 * 8; 458 break; 459 case USB_SPEED_FULL: /* units are frames/msec */ 460 case USB_SPEED_LOW: 461 if (xfertype == USB_ENDPOINT_XFER_INT) { 462 if (urb->interval > 255) 463 return -EINVAL; 464 /* NOTE ohci only handles up to 32 */ 465 max = 128; 466 } else { 467 if (urb->interval > 1024) 468 urb->interval = 1024; 469 /* NOTE usb and ohci handle up to 2^15 */ 470 max = 1024; 471 } 472 break; 473 default: 474 return -EINVAL; 475 } 476 if (dev->speed != USB_SPEED_VARIABLE) { 477 /* Round down to a power of 2, no more than max */ 478 urb->interval = min(max, 1 << ilog2(urb->interval)); 479 } 480 } 481 482 return usb_hcd_submit_urb(urb, mem_flags); 483 } 484 EXPORT_SYMBOL_GPL(usb_submit_urb); 485 486 /*-------------------------------------------------------------------*/ 487 488 /** 489 * usb_unlink_urb - abort/cancel a transfer request for an endpoint 490 * @urb: pointer to urb describing a previously submitted request, 491 * may be NULL 492 * 493 * This routine cancels an in-progress request. URBs complete only once 494 * per submission, and may be canceled only once per submission. 495 * Successful cancellation means termination of @urb will be expedited 496 * and the completion handler will be called with a status code 497 * indicating that the request has been canceled (rather than any other 498 * code). 499 * 500 * Drivers should not call this routine or related routines, such as 501 * usb_kill_urb() or usb_unlink_anchored_urbs(), after their disconnect 502 * method has returned. The disconnect function should synchronize with 503 * a driver's I/O routines to insure that all URB-related activity has 504 * completed before it returns. 505 * 506 * This request is always asynchronous. Success is indicated by 507 * returning -EINPROGRESS, at which time the URB will probably not yet 508 * have been given back to the device driver. When it is eventually 509 * called, the completion function will see @urb->status == -ECONNRESET. 510 * Failure is indicated by usb_unlink_urb() returning any other value. 511 * Unlinking will fail when @urb is not currently "linked" (i.e., it was 512 * never submitted, or it was unlinked before, or the hardware is already 513 * finished with it), even if the completion handler has not yet run. 514 * 515 * Unlinking and Endpoint Queues: 516 * 517 * [The behaviors and guarantees described below do not apply to virtual 518 * root hubs but only to endpoint queues for physical USB devices.] 519 * 520 * Host Controller Drivers (HCDs) place all the URBs for a particular 521 * endpoint in a queue. Normally the queue advances as the controller 522 * hardware processes each request. But when an URB terminates with an 523 * error its queue generally stops (see below), at least until that URB's 524 * completion routine returns. It is guaranteed that a stopped queue 525 * will not restart until all its unlinked URBs have been fully retired, 526 * with their completion routines run, even if that's not until some time 527 * after the original completion handler returns. The same behavior and 528 * guarantee apply when an URB terminates because it was unlinked. 529 * 530 * Bulk and interrupt endpoint queues are guaranteed to stop whenever an 531 * URB terminates with any sort of error, including -ECONNRESET, -ENOENT, 532 * and -EREMOTEIO. Control endpoint queues behave the same way except 533 * that they are not guaranteed to stop for -EREMOTEIO errors. Queues 534 * for isochronous endpoints are treated differently, because they must 535 * advance at fixed rates. Such queues do not stop when an URB 536 * encounters an error or is unlinked. An unlinked isochronous URB may 537 * leave a gap in the stream of packets; it is undefined whether such 538 * gaps can be filled in. 539 * 540 * Note that early termination of an URB because a short packet was 541 * received will generate a -EREMOTEIO error if and only if the 542 * URB_SHORT_NOT_OK flag is set. By setting this flag, USB device 543 * drivers can build deep queues for large or complex bulk transfers 544 * and clean them up reliably after any sort of aborted transfer by 545 * unlinking all pending URBs at the first fault. 546 * 547 * When a control URB terminates with an error other than -EREMOTEIO, it 548 * is quite likely that the status stage of the transfer will not take 549 * place. 550 */ 551 int usb_unlink_urb(struct urb *urb) 552 { 553 if (!urb) 554 return -EINVAL; 555 if (!urb->dev) 556 return -ENODEV; 557 if (!urb->ep) 558 return -EIDRM; 559 return usb_hcd_unlink_urb(urb, -ECONNRESET); 560 } 561 EXPORT_SYMBOL_GPL(usb_unlink_urb); 562 563 /** 564 * usb_kill_urb - cancel a transfer request and wait for it to finish 565 * @urb: pointer to URB describing a previously submitted request, 566 * may be NULL 567 * 568 * This routine cancels an in-progress request. It is guaranteed that 569 * upon return all completion handlers will have finished and the URB 570 * will be totally idle and available for reuse. These features make 571 * this an ideal way to stop I/O in a disconnect() callback or close() 572 * function. If the request has not already finished or been unlinked 573 * the completion handler will see urb->status == -ENOENT. 574 * 575 * While the routine is running, attempts to resubmit the URB will fail 576 * with error -EPERM. Thus even if the URB's completion handler always 577 * tries to resubmit, it will not succeed and the URB will become idle. 578 * 579 * This routine may not be used in an interrupt context (such as a bottom 580 * half or a completion handler), or when holding a spinlock, or in other 581 * situations where the caller can't schedule(). 582 * 583 * This routine should not be called by a driver after its disconnect 584 * method has returned. 585 */ 586 void usb_kill_urb(struct urb *urb) 587 { 588 might_sleep(); 589 if (!(urb && urb->dev && urb->ep)) 590 return; 591 atomic_inc(&urb->reject); 592 593 usb_hcd_unlink_urb(urb, -ENOENT); 594 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); 595 596 atomic_dec(&urb->reject); 597 } 598 EXPORT_SYMBOL_GPL(usb_kill_urb); 599 600 /** 601 * usb_poison_urb - reliably kill a transfer and prevent further use of an URB 602 * @urb: pointer to URB describing a previously submitted request, 603 * may be NULL 604 * 605 * This routine cancels an in-progress request. It is guaranteed that 606 * upon return all completion handlers will have finished and the URB 607 * will be totally idle and cannot be reused. These features make 608 * this an ideal way to stop I/O in a disconnect() callback. 609 * If the request has not already finished or been unlinked 610 * the completion handler will see urb->status == -ENOENT. 611 * 612 * After and while the routine runs, attempts to resubmit the URB will fail 613 * with error -EPERM. Thus even if the URB's completion handler always 614 * tries to resubmit, it will not succeed and the URB will become idle. 615 * 616 * This routine may not be used in an interrupt context (such as a bottom 617 * half or a completion handler), or when holding a spinlock, or in other 618 * situations where the caller can't schedule(). 619 * 620 * This routine should not be called by a driver after its disconnect 621 * method has returned. 622 */ 623 void usb_poison_urb(struct urb *urb) 624 { 625 might_sleep(); 626 if (!(urb && urb->dev && urb->ep)) 627 return; 628 atomic_inc(&urb->reject); 629 630 usb_hcd_unlink_urb(urb, -ENOENT); 631 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); 632 } 633 EXPORT_SYMBOL_GPL(usb_poison_urb); 634 635 void usb_unpoison_urb(struct urb *urb) 636 { 637 if (!urb) 638 return; 639 640 atomic_dec(&urb->reject); 641 } 642 EXPORT_SYMBOL_GPL(usb_unpoison_urb); 643 644 /** 645 * usb_kill_anchored_urbs - cancel transfer requests en masse 646 * @anchor: anchor the requests are bound to 647 * 648 * this allows all outstanding URBs to be killed starting 649 * from the back of the queue 650 * 651 * This routine should not be called by a driver after its disconnect 652 * method has returned. 653 */ 654 void usb_kill_anchored_urbs(struct usb_anchor *anchor) 655 { 656 struct urb *victim; 657 658 spin_lock_irq(&anchor->lock); 659 while (!list_empty(&anchor->urb_list)) { 660 victim = list_entry(anchor->urb_list.prev, struct urb, 661 anchor_list); 662 /* we must make sure the URB isn't freed before we kill it*/ 663 usb_get_urb(victim); 664 spin_unlock_irq(&anchor->lock); 665 /* this will unanchor the URB */ 666 usb_kill_urb(victim); 667 usb_put_urb(victim); 668 spin_lock_irq(&anchor->lock); 669 } 670 spin_unlock_irq(&anchor->lock); 671 } 672 EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs); 673 674 675 /** 676 * usb_poison_anchored_urbs - cease all traffic from an anchor 677 * @anchor: anchor the requests are bound to 678 * 679 * this allows all outstanding URBs to be poisoned starting 680 * from the back of the queue. Newly added URBs will also be 681 * poisoned 682 * 683 * This routine should not be called by a driver after its disconnect 684 * method has returned. 685 */ 686 void usb_poison_anchored_urbs(struct usb_anchor *anchor) 687 { 688 struct urb *victim; 689 690 spin_lock_irq(&anchor->lock); 691 anchor->poisoned = 1; 692 while (!list_empty(&anchor->urb_list)) { 693 victim = list_entry(anchor->urb_list.prev, struct urb, 694 anchor_list); 695 /* we must make sure the URB isn't freed before we kill it*/ 696 usb_get_urb(victim); 697 spin_unlock_irq(&anchor->lock); 698 /* this will unanchor the URB */ 699 usb_poison_urb(victim); 700 usb_put_urb(victim); 701 spin_lock_irq(&anchor->lock); 702 } 703 spin_unlock_irq(&anchor->lock); 704 } 705 EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs); 706 707 /** 708 * usb_unpoison_anchored_urbs - let an anchor be used successfully again 709 * @anchor: anchor the requests are bound to 710 * 711 * Reverses the effect of usb_poison_anchored_urbs 712 * the anchor can be used normally after it returns 713 */ 714 void usb_unpoison_anchored_urbs(struct usb_anchor *anchor) 715 { 716 unsigned long flags; 717 struct urb *lazarus; 718 719 spin_lock_irqsave(&anchor->lock, flags); 720 list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) { 721 usb_unpoison_urb(lazarus); 722 } 723 anchor->poisoned = 0; 724 spin_unlock_irqrestore(&anchor->lock, flags); 725 } 726 EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs); 727 /** 728 * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse 729 * @anchor: anchor the requests are bound to 730 * 731 * this allows all outstanding URBs to be unlinked starting 732 * from the back of the queue. This function is asynchronous. 733 * The unlinking is just tiggered. It may happen after this 734 * function has returned. 735 * 736 * This routine should not be called by a driver after its disconnect 737 * method has returned. 738 */ 739 void usb_unlink_anchored_urbs(struct usb_anchor *anchor) 740 { 741 struct urb *victim; 742 unsigned long flags; 743 744 spin_lock_irqsave(&anchor->lock, flags); 745 while (!list_empty(&anchor->urb_list)) { 746 victim = list_entry(anchor->urb_list.prev, struct urb, 747 anchor_list); 748 usb_get_urb(victim); 749 spin_unlock_irqrestore(&anchor->lock, flags); 750 /* this will unanchor the URB */ 751 usb_unlink_urb(victim); 752 usb_put_urb(victim); 753 spin_lock_irqsave(&anchor->lock, flags); 754 } 755 spin_unlock_irqrestore(&anchor->lock, flags); 756 } 757 EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs); 758 759 /** 760 * usb_wait_anchor_empty_timeout - wait for an anchor to be unused 761 * @anchor: the anchor you want to become unused 762 * @timeout: how long you are willing to wait in milliseconds 763 * 764 * Call this is you want to be sure all an anchor's 765 * URBs have finished 766 */ 767 int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor, 768 unsigned int timeout) 769 { 770 return wait_event_timeout(anchor->wait, list_empty(&anchor->urb_list), 771 msecs_to_jiffies(timeout)); 772 } 773 EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout); 774 775 /** 776 * usb_get_from_anchor - get an anchor's oldest urb 777 * @anchor: the anchor whose urb you want 778 * 779 * this will take the oldest urb from an anchor, 780 * unanchor and return it 781 */ 782 struct urb *usb_get_from_anchor(struct usb_anchor *anchor) 783 { 784 struct urb *victim; 785 unsigned long flags; 786 787 spin_lock_irqsave(&anchor->lock, flags); 788 if (!list_empty(&anchor->urb_list)) { 789 victim = list_entry(anchor->urb_list.next, struct urb, 790 anchor_list); 791 usb_get_urb(victim); 792 spin_unlock_irqrestore(&anchor->lock, flags); 793 usb_unanchor_urb(victim); 794 } else { 795 spin_unlock_irqrestore(&anchor->lock, flags); 796 victim = NULL; 797 } 798 799 return victim; 800 } 801 802 EXPORT_SYMBOL_GPL(usb_get_from_anchor); 803 804 /** 805 * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs 806 * @anchor: the anchor whose urbs you want to unanchor 807 * 808 * use this to get rid of all an anchor's urbs 809 */ 810 void usb_scuttle_anchored_urbs(struct usb_anchor *anchor) 811 { 812 struct urb *victim; 813 unsigned long flags; 814 815 spin_lock_irqsave(&anchor->lock, flags); 816 while (!list_empty(&anchor->urb_list)) { 817 victim = list_entry(anchor->urb_list.prev, struct urb, 818 anchor_list); 819 usb_get_urb(victim); 820 spin_unlock_irqrestore(&anchor->lock, flags); 821 /* this may free the URB */ 822 usb_unanchor_urb(victim); 823 usb_put_urb(victim); 824 spin_lock_irqsave(&anchor->lock, flags); 825 } 826 spin_unlock_irqrestore(&anchor->lock, flags); 827 } 828 829 EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs); 830 831 /** 832 * usb_anchor_empty - is an anchor empty 833 * @anchor: the anchor you want to query 834 * 835 * returns 1 if the anchor has no urbs associated with it 836 */ 837 int usb_anchor_empty(struct usb_anchor *anchor) 838 { 839 return list_empty(&anchor->urb_list); 840 } 841 842 EXPORT_SYMBOL_GPL(usb_anchor_empty); 843 844