1 /* 2 * Universal Host Controller Interface driver for USB. 3 * 4 * Maintainer: Alan Stern <stern@rowland.harvard.edu> 5 * 6 * (C) Copyright 1999 Linus Torvalds 7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com 8 * (C) Copyright 1999 Randy Dunlap 9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de 10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de 11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch 12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at 13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface 14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). 15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) 16 * (C) Copyright 2004-2006 Alan Stern, stern@rowland.harvard.edu 17 */ 18 19 20 /* 21 * Technically, updating td->status here is a race, but it's not really a 22 * problem. The worst that can happen is that we set the IOC bit again 23 * generating a spurious interrupt. We could fix this by creating another 24 * QH and leaving the IOC bit always set, but then we would have to play 25 * games with the FSBR code to make sure we get the correct order in all 26 * the cases. I don't think it's worth the effort 27 */ 28 static void uhci_set_next_interrupt(struct uhci_hcd *uhci) 29 { 30 if (uhci->is_stopped) 31 mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies); 32 uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); 33 } 34 35 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci) 36 { 37 uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC); 38 } 39 40 41 /* 42 * Full-Speed Bandwidth Reclamation (FSBR). 43 * We turn on FSBR whenever a queue that wants it is advancing, 44 * and leave it on for a short time thereafter. 45 */ 46 static void uhci_fsbr_on(struct uhci_hcd *uhci) 47 { 48 uhci->fsbr_is_on = 1; 49 uhci->skel_term_qh->link = cpu_to_le32( 50 uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH; 51 } 52 53 static void uhci_fsbr_off(struct uhci_hcd *uhci) 54 { 55 uhci->fsbr_is_on = 0; 56 uhci->skel_term_qh->link = UHCI_PTR_TERM; 57 } 58 59 static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb) 60 { 61 struct urb_priv *urbp = urb->hcpriv; 62 63 if (!(urb->transfer_flags & URB_NO_FSBR)) 64 urbp->fsbr = 1; 65 } 66 67 static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp) 68 { 69 if (urbp->fsbr) { 70 uhci->fsbr_is_wanted = 1; 71 if (!uhci->fsbr_is_on) 72 uhci_fsbr_on(uhci); 73 else if (uhci->fsbr_expiring) { 74 uhci->fsbr_expiring = 0; 75 del_timer(&uhci->fsbr_timer); 76 } 77 } 78 } 79 80 static void uhci_fsbr_timeout(unsigned long _uhci) 81 { 82 struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci; 83 unsigned long flags; 84 85 spin_lock_irqsave(&uhci->lock, flags); 86 if (uhci->fsbr_expiring) { 87 uhci->fsbr_expiring = 0; 88 uhci_fsbr_off(uhci); 89 } 90 spin_unlock_irqrestore(&uhci->lock, flags); 91 } 92 93 94 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci) 95 { 96 dma_addr_t dma_handle; 97 struct uhci_td *td; 98 99 td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle); 100 if (!td) 101 return NULL; 102 103 td->dma_handle = dma_handle; 104 td->frame = -1; 105 106 INIT_LIST_HEAD(&td->list); 107 INIT_LIST_HEAD(&td->fl_list); 108 109 return td; 110 } 111 112 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td) 113 { 114 if (!list_empty(&td->list)) 115 dev_warn(uhci_dev(uhci), "td %p still in list!\n", td); 116 if (!list_empty(&td->fl_list)) 117 dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td); 118 119 dma_pool_free(uhci->td_pool, td, td->dma_handle); 120 } 121 122 static inline void uhci_fill_td(struct uhci_td *td, u32 status, 123 u32 token, u32 buffer) 124 { 125 td->status = cpu_to_le32(status); 126 td->token = cpu_to_le32(token); 127 td->buffer = cpu_to_le32(buffer); 128 } 129 130 static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp) 131 { 132 list_add_tail(&td->list, &urbp->td_list); 133 } 134 135 static void uhci_remove_td_from_urbp(struct uhci_td *td) 136 { 137 list_del_init(&td->list); 138 } 139 140 /* 141 * We insert Isochronous URBs directly into the frame list at the beginning 142 */ 143 static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci, 144 struct uhci_td *td, unsigned framenum) 145 { 146 framenum &= (UHCI_NUMFRAMES - 1); 147 148 td->frame = framenum; 149 150 /* Is there a TD already mapped there? */ 151 if (uhci->frame_cpu[framenum]) { 152 struct uhci_td *ftd, *ltd; 153 154 ftd = uhci->frame_cpu[framenum]; 155 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list); 156 157 list_add_tail(&td->fl_list, &ftd->fl_list); 158 159 td->link = ltd->link; 160 wmb(); 161 ltd->link = cpu_to_le32(td->dma_handle); 162 } else { 163 td->link = uhci->frame[framenum]; 164 wmb(); 165 uhci->frame[framenum] = cpu_to_le32(td->dma_handle); 166 uhci->frame_cpu[framenum] = td; 167 } 168 } 169 170 static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci, 171 struct uhci_td *td) 172 { 173 /* If it's not inserted, don't remove it */ 174 if (td->frame == -1) { 175 WARN_ON(!list_empty(&td->fl_list)); 176 return; 177 } 178 179 if (uhci->frame_cpu[td->frame] == td) { 180 if (list_empty(&td->fl_list)) { 181 uhci->frame[td->frame] = td->link; 182 uhci->frame_cpu[td->frame] = NULL; 183 } else { 184 struct uhci_td *ntd; 185 186 ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list); 187 uhci->frame[td->frame] = cpu_to_le32(ntd->dma_handle); 188 uhci->frame_cpu[td->frame] = ntd; 189 } 190 } else { 191 struct uhci_td *ptd; 192 193 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list); 194 ptd->link = td->link; 195 } 196 197 list_del_init(&td->fl_list); 198 td->frame = -1; 199 } 200 201 static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci, 202 unsigned int framenum) 203 { 204 struct uhci_td *ftd, *ltd; 205 206 framenum &= (UHCI_NUMFRAMES - 1); 207 208 ftd = uhci->frame_cpu[framenum]; 209 if (ftd) { 210 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list); 211 uhci->frame[framenum] = ltd->link; 212 uhci->frame_cpu[framenum] = NULL; 213 214 while (!list_empty(&ftd->fl_list)) 215 list_del_init(ftd->fl_list.prev); 216 } 217 } 218 219 /* 220 * Remove all the TDs for an Isochronous URB from the frame list 221 */ 222 static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb) 223 { 224 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; 225 struct uhci_td *td; 226 227 list_for_each_entry(td, &urbp->td_list, list) 228 uhci_remove_td_from_frame_list(uhci, td); 229 } 230 231 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, 232 struct usb_device *udev, struct usb_host_endpoint *hep) 233 { 234 dma_addr_t dma_handle; 235 struct uhci_qh *qh; 236 237 qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle); 238 if (!qh) 239 return NULL; 240 241 memset(qh, 0, sizeof(*qh)); 242 qh->dma_handle = dma_handle; 243 244 qh->element = UHCI_PTR_TERM; 245 qh->link = UHCI_PTR_TERM; 246 247 INIT_LIST_HEAD(&qh->queue); 248 INIT_LIST_HEAD(&qh->node); 249 250 if (udev) { /* Normal QH */ 251 qh->dummy_td = uhci_alloc_td(uhci); 252 if (!qh->dummy_td) { 253 dma_pool_free(uhci->qh_pool, qh, dma_handle); 254 return NULL; 255 } 256 qh->state = QH_STATE_IDLE; 257 qh->hep = hep; 258 qh->udev = udev; 259 hep->hcpriv = qh; 260 qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; 261 262 } else { /* Skeleton QH */ 263 qh->state = QH_STATE_ACTIVE; 264 qh->type = -1; 265 } 266 return qh; 267 } 268 269 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) 270 { 271 WARN_ON(qh->state != QH_STATE_IDLE && qh->udev); 272 if (!list_empty(&qh->queue)) 273 dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh); 274 275 list_del(&qh->node); 276 if (qh->udev) { 277 qh->hep->hcpriv = NULL; 278 uhci_free_td(uhci, qh->dummy_td); 279 } 280 dma_pool_free(uhci->qh_pool, qh, qh->dma_handle); 281 } 282 283 /* 284 * When a queue is stopped and a dequeued URB is given back, adjust 285 * the previous TD link (if the URB isn't first on the queue) or 286 * save its toggle value (if it is first and is currently executing). 287 * 288 * Returns 0 if the URB should not yet be given back, 1 otherwise. 289 */ 290 static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh, 291 struct urb *urb) 292 { 293 struct urb_priv *urbp = urb->hcpriv; 294 struct uhci_td *td; 295 int ret = 1; 296 297 /* Isochronous pipes don't use toggles and their TD link pointers 298 * get adjusted during uhci_urb_dequeue(). But since their queues 299 * cannot truly be stopped, we have to watch out for dequeues 300 * occurring after the nominal unlink frame. */ 301 if (qh->type == USB_ENDPOINT_XFER_ISOC) { 302 ret = (uhci->frame_number + uhci->is_stopped != 303 qh->unlink_frame); 304 goto done; 305 } 306 307 /* If the URB isn't first on its queue, adjust the link pointer 308 * of the last TD in the previous URB. The toggle doesn't need 309 * to be saved since this URB can't be executing yet. */ 310 if (qh->queue.next != &urbp->node) { 311 struct urb_priv *purbp; 312 struct uhci_td *ptd; 313 314 purbp = list_entry(urbp->node.prev, struct urb_priv, node); 315 WARN_ON(list_empty(&purbp->td_list)); 316 ptd = list_entry(purbp->td_list.prev, struct uhci_td, 317 list); 318 td = list_entry(urbp->td_list.prev, struct uhci_td, 319 list); 320 ptd->link = td->link; 321 goto done; 322 } 323 324 /* If the QH element pointer is UHCI_PTR_TERM then then currently 325 * executing URB has already been unlinked, so this one isn't it. */ 326 if (qh_element(qh) == UHCI_PTR_TERM) 327 goto done; 328 qh->element = UHCI_PTR_TERM; 329 330 /* Control pipes have to worry about toggles */ 331 if (qh->type == USB_ENDPOINT_XFER_CONTROL) 332 goto done; 333 334 /* Save the next toggle value */ 335 WARN_ON(list_empty(&urbp->td_list)); 336 td = list_entry(urbp->td_list.next, struct uhci_td, list); 337 qh->needs_fixup = 1; 338 qh->initial_toggle = uhci_toggle(td_token(td)); 339 340 done: 341 return ret; 342 } 343 344 /* 345 * Fix up the data toggles for URBs in a queue, when one of them 346 * terminates early (short transfer, error, or dequeued). 347 */ 348 static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first) 349 { 350 struct urb_priv *urbp = NULL; 351 struct uhci_td *td; 352 unsigned int toggle = qh->initial_toggle; 353 unsigned int pipe; 354 355 /* Fixups for a short transfer start with the second URB in the 356 * queue (the short URB is the first). */ 357 if (skip_first) 358 urbp = list_entry(qh->queue.next, struct urb_priv, node); 359 360 /* When starting with the first URB, if the QH element pointer is 361 * still valid then we know the URB's toggles are okay. */ 362 else if (qh_element(qh) != UHCI_PTR_TERM) 363 toggle = 2; 364 365 /* Fix up the toggle for the URBs in the queue. Normally this 366 * loop won't run more than once: When an error or short transfer 367 * occurs, the queue usually gets emptied. */ 368 urbp = list_prepare_entry(urbp, &qh->queue, node); 369 list_for_each_entry_continue(urbp, &qh->queue, node) { 370 371 /* If the first TD has the right toggle value, we don't 372 * need to change any toggles in this URB */ 373 td = list_entry(urbp->td_list.next, struct uhci_td, list); 374 if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) { 375 td = list_entry(urbp->td_list.next, struct uhci_td, 376 list); 377 toggle = uhci_toggle(td_token(td)) ^ 1; 378 379 /* Otherwise all the toggles in the URB have to be switched */ 380 } else { 381 list_for_each_entry(td, &urbp->td_list, list) { 382 td->token ^= __constant_cpu_to_le32( 383 TD_TOKEN_TOGGLE); 384 toggle ^= 1; 385 } 386 } 387 } 388 389 wmb(); 390 pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe; 391 usb_settoggle(qh->udev, usb_pipeendpoint(pipe), 392 usb_pipeout(pipe), toggle); 393 qh->needs_fixup = 0; 394 } 395 396 /* 397 * Put a QH on the schedule in both hardware and software 398 */ 399 static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) 400 { 401 struct uhci_qh *pqh; 402 403 WARN_ON(list_empty(&qh->queue)); 404 405 /* Set the element pointer if it isn't set already. 406 * This isn't needed for Isochronous queues, but it doesn't hurt. */ 407 if (qh_element(qh) == UHCI_PTR_TERM) { 408 struct urb_priv *urbp = list_entry(qh->queue.next, 409 struct urb_priv, node); 410 struct uhci_td *td = list_entry(urbp->td_list.next, 411 struct uhci_td, list); 412 413 qh->element = cpu_to_le32(td->dma_handle); 414 } 415 416 /* Treat the queue as if it has just advanced */ 417 qh->wait_expired = 0; 418 qh->advance_jiffies = jiffies; 419 420 if (qh->state == QH_STATE_ACTIVE) 421 return; 422 qh->state = QH_STATE_ACTIVE; 423 424 /* Move the QH from its old list to the end of the appropriate 425 * skeleton's list */ 426 if (qh == uhci->next_qh) 427 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, 428 node); 429 list_move_tail(&qh->node, &qh->skel->node); 430 431 /* Link it into the schedule */ 432 pqh = list_entry(qh->node.prev, struct uhci_qh, node); 433 qh->link = pqh->link; 434 wmb(); 435 pqh->link = UHCI_PTR_QH | cpu_to_le32(qh->dma_handle); 436 } 437 438 /* 439 * Take a QH off the hardware schedule 440 */ 441 static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) 442 { 443 struct uhci_qh *pqh; 444 445 if (qh->state == QH_STATE_UNLINKING) 446 return; 447 WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev); 448 qh->state = QH_STATE_UNLINKING; 449 450 /* Unlink the QH from the schedule and record when we did it */ 451 pqh = list_entry(qh->node.prev, struct uhci_qh, node); 452 pqh->link = qh->link; 453 mb(); 454 455 uhci_get_current_frame_number(uhci); 456 qh->unlink_frame = uhci->frame_number; 457 458 /* Force an interrupt so we know when the QH is fully unlinked */ 459 if (list_empty(&uhci->skel_unlink_qh->node)) 460 uhci_set_next_interrupt(uhci); 461 462 /* Move the QH from its old list to the end of the unlinking list */ 463 if (qh == uhci->next_qh) 464 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, 465 node); 466 list_move_tail(&qh->node, &uhci->skel_unlink_qh->node); 467 } 468 469 /* 470 * When we and the controller are through with a QH, it becomes IDLE. 471 * This happens when a QH has been off the schedule (on the unlinking 472 * list) for more than one frame, or when an error occurs while adding 473 * the first URB onto a new QH. 474 */ 475 static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh) 476 { 477 WARN_ON(qh->state == QH_STATE_ACTIVE); 478 479 if (qh == uhci->next_qh) 480 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, 481 node); 482 list_move(&qh->node, &uhci->idle_qh_list); 483 qh->state = QH_STATE_IDLE; 484 485 /* Now that the QH is idle, its post_td isn't being used */ 486 if (qh->post_td) { 487 uhci_free_td(uhci, qh->post_td); 488 qh->post_td = NULL; 489 } 490 491 /* If anyone is waiting for a QH to become idle, wake them up */ 492 if (uhci->num_waiting) 493 wake_up_all(&uhci->waitqh); 494 } 495 496 static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, 497 struct urb *urb) 498 { 499 struct urb_priv *urbp; 500 501 urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC); 502 if (!urbp) 503 return NULL; 504 505 memset((void *)urbp, 0, sizeof(*urbp)); 506 507 urbp->urb = urb; 508 urb->hcpriv = urbp; 509 510 INIT_LIST_HEAD(&urbp->node); 511 INIT_LIST_HEAD(&urbp->td_list); 512 513 return urbp; 514 } 515 516 static void uhci_free_urb_priv(struct uhci_hcd *uhci, 517 struct urb_priv *urbp) 518 { 519 struct uhci_td *td, *tmp; 520 521 if (!list_empty(&urbp->node)) 522 dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n", 523 urbp->urb); 524 525 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { 526 uhci_remove_td_from_urbp(td); 527 uhci_free_td(uhci, td); 528 } 529 530 urbp->urb->hcpriv = NULL; 531 kmem_cache_free(uhci_up_cachep, urbp); 532 } 533 534 /* 535 * Map status to standard result codes 536 * 537 * <status> is (td_status(td) & 0xF60000), a.k.a. 538 * uhci_status_bits(td_status(td)). 539 * Note: <status> does not include the TD_CTRL_NAK bit. 540 * <dir_out> is True for output TDs and False for input TDs. 541 */ 542 static int uhci_map_status(int status, int dir_out) 543 { 544 if (!status) 545 return 0; 546 if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */ 547 return -EPROTO; 548 if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */ 549 if (dir_out) 550 return -EPROTO; 551 else 552 return -EILSEQ; 553 } 554 if (status & TD_CTRL_BABBLE) /* Babble */ 555 return -EOVERFLOW; 556 if (status & TD_CTRL_DBUFERR) /* Buffer error */ 557 return -ENOSR; 558 if (status & TD_CTRL_STALLED) /* Stalled */ 559 return -EPIPE; 560 return 0; 561 } 562 563 /* 564 * Control transfers 565 */ 566 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, 567 struct uhci_qh *qh) 568 { 569 struct uhci_td *td; 570 unsigned long destination, status; 571 int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); 572 int len = urb->transfer_buffer_length; 573 dma_addr_t data = urb->transfer_dma; 574 __le32 *plink; 575 struct urb_priv *urbp = urb->hcpriv; 576 577 /* The "pipe" thing contains the destination in bits 8--18 */ 578 destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP; 579 580 /* 3 errors, dummy TD remains inactive */ 581 status = uhci_maxerr(3); 582 if (urb->dev->speed == USB_SPEED_LOW) 583 status |= TD_CTRL_LS; 584 585 /* 586 * Build the TD for the control request setup packet 587 */ 588 td = qh->dummy_td; 589 uhci_add_td_to_urbp(td, urbp); 590 uhci_fill_td(td, status, destination | uhci_explen(8), 591 urb->setup_dma); 592 plink = &td->link; 593 status |= TD_CTRL_ACTIVE; 594 595 /* 596 * If direction is "send", change the packet ID from SETUP (0x2D) 597 * to OUT (0xE1). Else change it from SETUP to IN (0x69) and 598 * set Short Packet Detect (SPD) for all data packets. 599 */ 600 if (usb_pipeout(urb->pipe)) 601 destination ^= (USB_PID_SETUP ^ USB_PID_OUT); 602 else { 603 destination ^= (USB_PID_SETUP ^ USB_PID_IN); 604 status |= TD_CTRL_SPD; 605 } 606 607 /* 608 * Build the DATA TDs 609 */ 610 while (len > 0) { 611 int pktsze = min(len, maxsze); 612 613 td = uhci_alloc_td(uhci); 614 if (!td) 615 goto nomem; 616 *plink = cpu_to_le32(td->dma_handle); 617 618 /* Alternate Data0/1 (start with Data1) */ 619 destination ^= TD_TOKEN_TOGGLE; 620 621 uhci_add_td_to_urbp(td, urbp); 622 uhci_fill_td(td, status, destination | uhci_explen(pktsze), 623 data); 624 plink = &td->link; 625 626 data += pktsze; 627 len -= pktsze; 628 } 629 630 /* 631 * Build the final TD for control status 632 */ 633 td = uhci_alloc_td(uhci); 634 if (!td) 635 goto nomem; 636 *plink = cpu_to_le32(td->dma_handle); 637 638 /* 639 * It's IN if the pipe is an output pipe or we're not expecting 640 * data back. 641 */ 642 destination &= ~TD_TOKEN_PID_MASK; 643 if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length) 644 destination |= USB_PID_IN; 645 else 646 destination |= USB_PID_OUT; 647 648 destination |= TD_TOKEN_TOGGLE; /* End in Data1 */ 649 650 status &= ~TD_CTRL_SPD; 651 652 uhci_add_td_to_urbp(td, urbp); 653 uhci_fill_td(td, status | TD_CTRL_IOC, 654 destination | uhci_explen(0), 0); 655 plink = &td->link; 656 657 /* 658 * Build the new dummy TD and activate the old one 659 */ 660 td = uhci_alloc_td(uhci); 661 if (!td) 662 goto nomem; 663 *plink = cpu_to_le32(td->dma_handle); 664 665 uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); 666 wmb(); 667 qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE); 668 qh->dummy_td = td; 669 670 /* Low-speed transfers get a different queue, and won't hog the bus. 671 * Also, some devices enumerate better without FSBR; the easiest way 672 * to do that is to put URBs on the low-speed queue while the device 673 * isn't in the CONFIGURED state. */ 674 if (urb->dev->speed == USB_SPEED_LOW || 675 urb->dev->state != USB_STATE_CONFIGURED) 676 qh->skel = uhci->skel_ls_control_qh; 677 else { 678 qh->skel = uhci->skel_fs_control_qh; 679 uhci_add_fsbr(uhci, urb); 680 } 681 682 urb->actual_length = -8; /* Account for the SETUP packet */ 683 return 0; 684 685 nomem: 686 /* Remove the dummy TD from the td_list so it doesn't get freed */ 687 uhci_remove_td_from_urbp(qh->dummy_td); 688 return -ENOMEM; 689 } 690 691 /* 692 * Common submit for bulk and interrupt 693 */ 694 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, 695 struct uhci_qh *qh) 696 { 697 struct uhci_td *td; 698 unsigned long destination, status; 699 int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); 700 int len = urb->transfer_buffer_length; 701 dma_addr_t data = urb->transfer_dma; 702 __le32 *plink; 703 struct urb_priv *urbp = urb->hcpriv; 704 unsigned int toggle; 705 706 if (len < 0) 707 return -EINVAL; 708 709 /* The "pipe" thing contains the destination in bits 8--18 */ 710 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); 711 toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), 712 usb_pipeout(urb->pipe)); 713 714 /* 3 errors, dummy TD remains inactive */ 715 status = uhci_maxerr(3); 716 if (urb->dev->speed == USB_SPEED_LOW) 717 status |= TD_CTRL_LS; 718 if (usb_pipein(urb->pipe)) 719 status |= TD_CTRL_SPD; 720 721 /* 722 * Build the DATA TDs 723 */ 724 plink = NULL; 725 td = qh->dummy_td; 726 do { /* Allow zero length packets */ 727 int pktsze = maxsze; 728 729 if (len <= pktsze) { /* The last packet */ 730 pktsze = len; 731 if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) 732 status &= ~TD_CTRL_SPD; 733 } 734 735 if (plink) { 736 td = uhci_alloc_td(uhci); 737 if (!td) 738 goto nomem; 739 *plink = cpu_to_le32(td->dma_handle); 740 } 741 uhci_add_td_to_urbp(td, urbp); 742 uhci_fill_td(td, status, 743 destination | uhci_explen(pktsze) | 744 (toggle << TD_TOKEN_TOGGLE_SHIFT), 745 data); 746 plink = &td->link; 747 status |= TD_CTRL_ACTIVE; 748 749 data += pktsze; 750 len -= maxsze; 751 toggle ^= 1; 752 } while (len > 0); 753 754 /* 755 * URB_ZERO_PACKET means adding a 0-length packet, if direction 756 * is OUT and the transfer_length was an exact multiple of maxsze, 757 * hence (len = transfer_length - N * maxsze) == 0 758 * however, if transfer_length == 0, the zero packet was already 759 * prepared above. 760 */ 761 if ((urb->transfer_flags & URB_ZERO_PACKET) && 762 usb_pipeout(urb->pipe) && len == 0 && 763 urb->transfer_buffer_length > 0) { 764 td = uhci_alloc_td(uhci); 765 if (!td) 766 goto nomem; 767 *plink = cpu_to_le32(td->dma_handle); 768 769 uhci_add_td_to_urbp(td, urbp); 770 uhci_fill_td(td, status, 771 destination | uhci_explen(0) | 772 (toggle << TD_TOKEN_TOGGLE_SHIFT), 773 data); 774 plink = &td->link; 775 776 toggle ^= 1; 777 } 778 779 /* Set the interrupt-on-completion flag on the last packet. 780 * A more-or-less typical 4 KB URB (= size of one memory page) 781 * will require about 3 ms to transfer; that's a little on the 782 * fast side but not enough to justify delaying an interrupt 783 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT 784 * flag setting. */ 785 td->status |= __constant_cpu_to_le32(TD_CTRL_IOC); 786 787 /* 788 * Build the new dummy TD and activate the old one 789 */ 790 td = uhci_alloc_td(uhci); 791 if (!td) 792 goto nomem; 793 *plink = cpu_to_le32(td->dma_handle); 794 795 uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); 796 wmb(); 797 qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE); 798 qh->dummy_td = td; 799 qh->period = urb->interval; 800 801 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), 802 usb_pipeout(urb->pipe), toggle); 803 return 0; 804 805 nomem: 806 /* Remove the dummy TD from the td_list so it doesn't get freed */ 807 uhci_remove_td_from_urbp(qh->dummy_td); 808 return -ENOMEM; 809 } 810 811 static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, 812 struct uhci_qh *qh) 813 { 814 int ret; 815 816 /* Can't have low-speed bulk transfers */ 817 if (urb->dev->speed == USB_SPEED_LOW) 818 return -EINVAL; 819 820 qh->skel = uhci->skel_bulk_qh; 821 ret = uhci_submit_common(uhci, urb, qh); 822 if (ret == 0) 823 uhci_add_fsbr(uhci, urb); 824 return ret; 825 } 826 827 static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, 828 struct uhci_qh *qh) 829 { 830 int exponent; 831 832 /* USB 1.1 interrupt transfers only involve one packet per interval. 833 * Drivers can submit URBs of any length, but longer ones will need 834 * multiple intervals to complete. 835 */ 836 837 /* Figure out which power-of-two queue to use */ 838 for (exponent = 7; exponent >= 0; --exponent) { 839 if ((1 << exponent) <= urb->interval) 840 break; 841 } 842 if (exponent < 0) 843 return -EINVAL; 844 urb->interval = 1 << exponent; 845 846 if (qh->period == 0) 847 qh->skel = uhci->skelqh[UHCI_SKEL_INDEX(exponent)]; 848 else if (qh->period != urb->interval) 849 return -EINVAL; /* Can't change the period */ 850 851 return uhci_submit_common(uhci, urb, qh); 852 } 853 854 /* 855 * Fix up the data structures following a short transfer 856 */ 857 static int uhci_fixup_short_transfer(struct uhci_hcd *uhci, 858 struct uhci_qh *qh, struct urb_priv *urbp) 859 { 860 struct uhci_td *td; 861 struct list_head *tmp; 862 int ret; 863 864 td = list_entry(urbp->td_list.prev, struct uhci_td, list); 865 if (qh->type == USB_ENDPOINT_XFER_CONTROL) { 866 867 /* When a control transfer is short, we have to restart 868 * the queue at the status stage transaction, which is 869 * the last TD. */ 870 WARN_ON(list_empty(&urbp->td_list)); 871 qh->element = cpu_to_le32(td->dma_handle); 872 tmp = td->list.prev; 873 ret = -EINPROGRESS; 874 875 } else { 876 877 /* When a bulk/interrupt transfer is short, we have to 878 * fix up the toggles of the following URBs on the queue 879 * before restarting the queue at the next URB. */ 880 qh->initial_toggle = uhci_toggle(td_token(qh->post_td)) ^ 1; 881 uhci_fixup_toggles(qh, 1); 882 883 if (list_empty(&urbp->td_list)) 884 td = qh->post_td; 885 qh->element = td->link; 886 tmp = urbp->td_list.prev; 887 ret = 0; 888 } 889 890 /* Remove all the TDs we skipped over, from tmp back to the start */ 891 while (tmp != &urbp->td_list) { 892 td = list_entry(tmp, struct uhci_td, list); 893 tmp = tmp->prev; 894 895 uhci_remove_td_from_urbp(td); 896 uhci_free_td(uhci, td); 897 } 898 return ret; 899 } 900 901 /* 902 * Common result for control, bulk, and interrupt 903 */ 904 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) 905 { 906 struct urb_priv *urbp = urb->hcpriv; 907 struct uhci_qh *qh = urbp->qh; 908 struct uhci_td *td, *tmp; 909 unsigned status; 910 int ret = 0; 911 912 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { 913 unsigned int ctrlstat; 914 int len; 915 916 ctrlstat = td_status(td); 917 status = uhci_status_bits(ctrlstat); 918 if (status & TD_CTRL_ACTIVE) 919 return -EINPROGRESS; 920 921 len = uhci_actual_length(ctrlstat); 922 urb->actual_length += len; 923 924 if (status) { 925 ret = uhci_map_status(status, 926 uhci_packetout(td_token(td))); 927 if ((debug == 1 && ret != -EPIPE) || debug > 1) { 928 /* Some debugging code */ 929 dev_dbg(&urb->dev->dev, 930 "%s: failed with status %x\n", 931 __FUNCTION__, status); 932 933 if (debug > 1 && errbuf) { 934 /* Print the chain for debugging */ 935 uhci_show_qh(urbp->qh, errbuf, 936 ERRBUF_LEN, 0); 937 lprintk(errbuf); 938 } 939 } 940 941 } else if (len < uhci_expected_length(td_token(td))) { 942 943 /* We received a short packet */ 944 if (urb->transfer_flags & URB_SHORT_NOT_OK) 945 ret = -EREMOTEIO; 946 else if (ctrlstat & TD_CTRL_SPD) 947 ret = 1; 948 } 949 950 uhci_remove_td_from_urbp(td); 951 if (qh->post_td) 952 uhci_free_td(uhci, qh->post_td); 953 qh->post_td = td; 954 955 if (ret != 0) 956 goto err; 957 } 958 return ret; 959 960 err: 961 if (ret < 0) { 962 /* In case a control transfer gets an error 963 * during the setup stage */ 964 urb->actual_length = max(urb->actual_length, 0); 965 966 /* Note that the queue has stopped and save 967 * the next toggle value */ 968 qh->element = UHCI_PTR_TERM; 969 qh->is_stopped = 1; 970 qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL); 971 qh->initial_toggle = uhci_toggle(td_token(td)) ^ 972 (ret == -EREMOTEIO); 973 974 } else /* Short packet received */ 975 ret = uhci_fixup_short_transfer(uhci, qh, urbp); 976 return ret; 977 } 978 979 /* 980 * Isochronous transfers 981 */ 982 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, 983 struct uhci_qh *qh) 984 { 985 struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */ 986 int i, frame; 987 unsigned long destination, status; 988 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; 989 990 /* Values must not be too big (could overflow below) */ 991 if (urb->interval >= UHCI_NUMFRAMES || 992 urb->number_of_packets >= UHCI_NUMFRAMES) 993 return -EFBIG; 994 995 /* Check the period and figure out the starting frame number */ 996 if (qh->period == 0) { 997 if (urb->transfer_flags & URB_ISO_ASAP) { 998 uhci_get_current_frame_number(uhci); 999 urb->start_frame = uhci->frame_number + 10; 1000 } else { 1001 i = urb->start_frame - uhci->last_iso_frame; 1002 if (i <= 0 || i >= UHCI_NUMFRAMES) 1003 return -EINVAL; 1004 } 1005 } else if (qh->period != urb->interval) { 1006 return -EINVAL; /* Can't change the period */ 1007 1008 } else { /* Pick up where the last URB leaves off */ 1009 if (list_empty(&qh->queue)) { 1010 frame = qh->iso_frame; 1011 } else { 1012 struct urb *lurb; 1013 1014 lurb = list_entry(qh->queue.prev, 1015 struct urb_priv, node)->urb; 1016 frame = lurb->start_frame + 1017 lurb->number_of_packets * 1018 lurb->interval; 1019 } 1020 if (urb->transfer_flags & URB_ISO_ASAP) 1021 urb->start_frame = frame; 1022 else if (urb->start_frame != frame) 1023 return -EINVAL; 1024 } 1025 1026 /* Make sure we won't have to go too far into the future */ 1027 if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES, 1028 urb->start_frame + urb->number_of_packets * 1029 urb->interval)) 1030 return -EFBIG; 1031 1032 status = TD_CTRL_ACTIVE | TD_CTRL_IOS; 1033 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); 1034 1035 for (i = 0; i < urb->number_of_packets; i++) { 1036 td = uhci_alloc_td(uhci); 1037 if (!td) 1038 return -ENOMEM; 1039 1040 uhci_add_td_to_urbp(td, urbp); 1041 uhci_fill_td(td, status, destination | 1042 uhci_explen(urb->iso_frame_desc[i].length), 1043 urb->transfer_dma + 1044 urb->iso_frame_desc[i].offset); 1045 } 1046 1047 /* Set the interrupt-on-completion flag on the last packet. */ 1048 td->status |= __constant_cpu_to_le32(TD_CTRL_IOC); 1049 1050 qh->skel = uhci->skel_iso_qh; 1051 qh->period = urb->interval; 1052 1053 /* Add the TDs to the frame list */ 1054 frame = urb->start_frame; 1055 list_for_each_entry(td, &urbp->td_list, list) { 1056 uhci_insert_td_in_frame_list(uhci, td, frame); 1057 frame += qh->period; 1058 } 1059 1060 if (list_empty(&qh->queue)) { 1061 qh->iso_packet_desc = &urb->iso_frame_desc[0]; 1062 qh->iso_frame = urb->start_frame; 1063 qh->iso_status = 0; 1064 } 1065 1066 return 0; 1067 } 1068 1069 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb) 1070 { 1071 struct uhci_td *td, *tmp; 1072 struct urb_priv *urbp = urb->hcpriv; 1073 struct uhci_qh *qh = urbp->qh; 1074 1075 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { 1076 unsigned int ctrlstat; 1077 int status; 1078 int actlength; 1079 1080 if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame)) 1081 return -EINPROGRESS; 1082 1083 uhci_remove_tds_from_frame(uhci, qh->iso_frame); 1084 1085 ctrlstat = td_status(td); 1086 if (ctrlstat & TD_CTRL_ACTIVE) { 1087 status = -EXDEV; /* TD was added too late? */ 1088 } else { 1089 status = uhci_map_status(uhci_status_bits(ctrlstat), 1090 usb_pipeout(urb->pipe)); 1091 actlength = uhci_actual_length(ctrlstat); 1092 1093 urb->actual_length += actlength; 1094 qh->iso_packet_desc->actual_length = actlength; 1095 qh->iso_packet_desc->status = status; 1096 } 1097 1098 if (status) { 1099 urb->error_count++; 1100 qh->iso_status = status; 1101 } 1102 1103 uhci_remove_td_from_urbp(td); 1104 uhci_free_td(uhci, td); 1105 qh->iso_frame += qh->period; 1106 ++qh->iso_packet_desc; 1107 } 1108 return qh->iso_status; 1109 } 1110 1111 static int uhci_urb_enqueue(struct usb_hcd *hcd, 1112 struct usb_host_endpoint *hep, 1113 struct urb *urb, gfp_t mem_flags) 1114 { 1115 int ret; 1116 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 1117 unsigned long flags; 1118 struct urb_priv *urbp; 1119 struct uhci_qh *qh; 1120 int bustime; 1121 1122 spin_lock_irqsave(&uhci->lock, flags); 1123 1124 ret = urb->status; 1125 if (ret != -EINPROGRESS) /* URB already unlinked! */ 1126 goto done; 1127 1128 ret = -ENOMEM; 1129 urbp = uhci_alloc_urb_priv(uhci, urb); 1130 if (!urbp) 1131 goto done; 1132 1133 if (hep->hcpriv) 1134 qh = (struct uhci_qh *) hep->hcpriv; 1135 else { 1136 qh = uhci_alloc_qh(uhci, urb->dev, hep); 1137 if (!qh) 1138 goto err_no_qh; 1139 } 1140 urbp->qh = qh; 1141 1142 switch (qh->type) { 1143 case USB_ENDPOINT_XFER_CONTROL: 1144 ret = uhci_submit_control(uhci, urb, qh); 1145 break; 1146 case USB_ENDPOINT_XFER_BULK: 1147 ret = uhci_submit_bulk(uhci, urb, qh); 1148 break; 1149 case USB_ENDPOINT_XFER_INT: 1150 if (list_empty(&qh->queue)) { 1151 bustime = usb_check_bandwidth(urb->dev, urb); 1152 if (bustime < 0) 1153 ret = bustime; 1154 else { 1155 ret = uhci_submit_interrupt(uhci, urb, qh); 1156 if (ret == 0) 1157 usb_claim_bandwidth(urb->dev, urb, bustime, 0); 1158 } 1159 } else { /* inherit from parent */ 1160 struct urb_priv *eurbp; 1161 1162 eurbp = list_entry(qh->queue.prev, struct urb_priv, 1163 node); 1164 urb->bandwidth = eurbp->urb->bandwidth; 1165 ret = uhci_submit_interrupt(uhci, urb, qh); 1166 } 1167 break; 1168 case USB_ENDPOINT_XFER_ISOC: 1169 urb->error_count = 0; 1170 bustime = usb_check_bandwidth(urb->dev, urb); 1171 if (bustime < 0) { 1172 ret = bustime; 1173 break; 1174 } 1175 1176 ret = uhci_submit_isochronous(uhci, urb, qh); 1177 if (ret == 0) 1178 usb_claim_bandwidth(urb->dev, urb, bustime, 1); 1179 break; 1180 } 1181 if (ret != 0) 1182 goto err_submit_failed; 1183 1184 /* Add this URB to the QH */ 1185 urbp->qh = qh; 1186 list_add_tail(&urbp->node, &qh->queue); 1187 1188 /* If the new URB is the first and only one on this QH then either 1189 * the QH is new and idle or else it's unlinked and waiting to 1190 * become idle, so we can activate it right away. But only if the 1191 * queue isn't stopped. */ 1192 if (qh->queue.next == &urbp->node && !qh->is_stopped) { 1193 uhci_activate_qh(uhci, qh); 1194 uhci_urbp_wants_fsbr(uhci, urbp); 1195 } 1196 goto done; 1197 1198 err_submit_failed: 1199 if (qh->state == QH_STATE_IDLE) 1200 uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */ 1201 1202 err_no_qh: 1203 uhci_free_urb_priv(uhci, urbp); 1204 1205 done: 1206 spin_unlock_irqrestore(&uhci->lock, flags); 1207 return ret; 1208 } 1209 1210 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) 1211 { 1212 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 1213 unsigned long flags; 1214 struct urb_priv *urbp; 1215 struct uhci_qh *qh; 1216 1217 spin_lock_irqsave(&uhci->lock, flags); 1218 urbp = urb->hcpriv; 1219 if (!urbp) /* URB was never linked! */ 1220 goto done; 1221 qh = urbp->qh; 1222 1223 /* Remove Isochronous TDs from the frame list ASAP */ 1224 if (qh->type == USB_ENDPOINT_XFER_ISOC) { 1225 uhci_unlink_isochronous_tds(uhci, urb); 1226 mb(); 1227 1228 /* If the URB has already started, update the QH unlink time */ 1229 uhci_get_current_frame_number(uhci); 1230 if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number)) 1231 qh->unlink_frame = uhci->frame_number; 1232 } 1233 1234 uhci_unlink_qh(uhci, qh); 1235 1236 done: 1237 spin_unlock_irqrestore(&uhci->lock, flags); 1238 return 0; 1239 } 1240 1241 /* 1242 * Finish unlinking an URB and give it back 1243 */ 1244 static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh, 1245 struct urb *urb, struct pt_regs *regs) 1246 __releases(uhci->lock) 1247 __acquires(uhci->lock) 1248 { 1249 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; 1250 1251 /* When giving back the first URB in an Isochronous queue, 1252 * reinitialize the QH's iso-related members for the next URB. */ 1253 if (qh->type == USB_ENDPOINT_XFER_ISOC && 1254 urbp->node.prev == &qh->queue && 1255 urbp->node.next != &qh->queue) { 1256 struct urb *nurb = list_entry(urbp->node.next, 1257 struct urb_priv, node)->urb; 1258 1259 qh->iso_packet_desc = &nurb->iso_frame_desc[0]; 1260 qh->iso_frame = nurb->start_frame; 1261 qh->iso_status = 0; 1262 } 1263 1264 /* Take the URB off the QH's queue. If the queue is now empty, 1265 * this is a perfect time for a toggle fixup. */ 1266 list_del_init(&urbp->node); 1267 if (list_empty(&qh->queue) && qh->needs_fixup) { 1268 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), 1269 usb_pipeout(urb->pipe), qh->initial_toggle); 1270 qh->needs_fixup = 0; 1271 } 1272 1273 uhci_free_urb_priv(uhci, urbp); 1274 1275 switch (qh->type) { 1276 case USB_ENDPOINT_XFER_ISOC: 1277 /* Release bandwidth for Interrupt or Isoc. transfers */ 1278 if (urb->bandwidth) 1279 usb_release_bandwidth(urb->dev, urb, 1); 1280 break; 1281 case USB_ENDPOINT_XFER_INT: 1282 /* Release bandwidth for Interrupt or Isoc. transfers */ 1283 /* Make sure we don't release if we have a queued URB */ 1284 if (list_empty(&qh->queue) && urb->bandwidth) 1285 usb_release_bandwidth(urb->dev, urb, 0); 1286 else 1287 /* bandwidth was passed on to queued URB, */ 1288 /* so don't let usb_unlink_urb() release it */ 1289 urb->bandwidth = 0; 1290 break; 1291 } 1292 1293 spin_unlock(&uhci->lock); 1294 usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, regs); 1295 spin_lock(&uhci->lock); 1296 1297 /* If the queue is now empty, we can unlink the QH and give up its 1298 * reserved bandwidth. */ 1299 if (list_empty(&qh->queue)) { 1300 uhci_unlink_qh(uhci, qh); 1301 1302 /* Bandwidth stuff not yet implemented */ 1303 qh->period = 0; 1304 } 1305 } 1306 1307 /* 1308 * Scan the URBs in a QH's queue 1309 */ 1310 #define QH_FINISHED_UNLINKING(qh) \ 1311 (qh->state == QH_STATE_UNLINKING && \ 1312 uhci->frame_number + uhci->is_stopped != qh->unlink_frame) 1313 1314 static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh, 1315 struct pt_regs *regs) 1316 { 1317 struct urb_priv *urbp; 1318 struct urb *urb; 1319 int status; 1320 1321 while (!list_empty(&qh->queue)) { 1322 urbp = list_entry(qh->queue.next, struct urb_priv, node); 1323 urb = urbp->urb; 1324 1325 if (qh->type == USB_ENDPOINT_XFER_ISOC) 1326 status = uhci_result_isochronous(uhci, urb); 1327 else 1328 status = uhci_result_common(uhci, urb); 1329 if (status == -EINPROGRESS) 1330 break; 1331 1332 spin_lock(&urb->lock); 1333 if (urb->status == -EINPROGRESS) /* Not dequeued */ 1334 urb->status = status; 1335 else 1336 status = ECONNRESET; /* Not -ECONNRESET */ 1337 spin_unlock(&urb->lock); 1338 1339 /* Dequeued but completed URBs can't be given back unless 1340 * the QH is stopped or has finished unlinking. */ 1341 if (status == ECONNRESET) { 1342 if (QH_FINISHED_UNLINKING(qh)) 1343 qh->is_stopped = 1; 1344 else if (!qh->is_stopped) 1345 return; 1346 } 1347 1348 uhci_giveback_urb(uhci, qh, urb, regs); 1349 if (status < 0) 1350 break; 1351 } 1352 1353 /* If the QH is neither stopped nor finished unlinking (normal case), 1354 * our work here is done. */ 1355 if (QH_FINISHED_UNLINKING(qh)) 1356 qh->is_stopped = 1; 1357 else if (!qh->is_stopped) 1358 return; 1359 1360 /* Otherwise give back each of the dequeued URBs */ 1361 restart: 1362 list_for_each_entry(urbp, &qh->queue, node) { 1363 urb = urbp->urb; 1364 if (urb->status != -EINPROGRESS) { 1365 1366 /* Fix up the TD links and save the toggles for 1367 * non-Isochronous queues. For Isochronous queues, 1368 * test for too-recent dequeues. */ 1369 if (!uhci_cleanup_queue(uhci, qh, urb)) { 1370 qh->is_stopped = 0; 1371 return; 1372 } 1373 uhci_giveback_urb(uhci, qh, urb, regs); 1374 goto restart; 1375 } 1376 } 1377 qh->is_stopped = 0; 1378 1379 /* There are no more dequeued URBs. If there are still URBs on the 1380 * queue, the QH can now be re-activated. */ 1381 if (!list_empty(&qh->queue)) { 1382 if (qh->needs_fixup) 1383 uhci_fixup_toggles(qh, 0); 1384 1385 /* If the first URB on the queue wants FSBR but its time 1386 * limit has expired, set the next TD to interrupt on 1387 * completion before reactivating the QH. */ 1388 urbp = list_entry(qh->queue.next, struct urb_priv, node); 1389 if (urbp->fsbr && qh->wait_expired) { 1390 struct uhci_td *td = list_entry(urbp->td_list.next, 1391 struct uhci_td, list); 1392 1393 td->status |= __cpu_to_le32(TD_CTRL_IOC); 1394 } 1395 1396 uhci_activate_qh(uhci, qh); 1397 } 1398 1399 /* The queue is empty. The QH can become idle if it is fully 1400 * unlinked. */ 1401 else if (QH_FINISHED_UNLINKING(qh)) 1402 uhci_make_qh_idle(uhci, qh); 1403 } 1404 1405 /* 1406 * Check for queues that have made some forward progress. 1407 * Returns 0 if the queue is not Isochronous, is ACTIVE, and 1408 * has not advanced since last examined; 1 otherwise. 1409 * 1410 * Early Intel controllers have a bug which causes qh->element sometimes 1411 * not to advance when a TD completes successfully. The queue remains 1412 * stuck on the inactive completed TD. We detect such cases and advance 1413 * the element pointer by hand. 1414 */ 1415 static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh) 1416 { 1417 struct urb_priv *urbp = NULL; 1418 struct uhci_td *td; 1419 int ret = 1; 1420 unsigned status; 1421 1422 if (qh->type == USB_ENDPOINT_XFER_ISOC) 1423 goto done; 1424 1425 /* Treat an UNLINKING queue as though it hasn't advanced. 1426 * This is okay because reactivation will treat it as though 1427 * it has advanced, and if it is going to become IDLE then 1428 * this doesn't matter anyway. Furthermore it's possible 1429 * for an UNLINKING queue not to have any URBs at all, or 1430 * for its first URB not to have any TDs (if it was dequeued 1431 * just as it completed). So it's not easy in any case to 1432 * test whether such queues have advanced. */ 1433 if (qh->state != QH_STATE_ACTIVE) { 1434 urbp = NULL; 1435 status = 0; 1436 1437 } else { 1438 urbp = list_entry(qh->queue.next, struct urb_priv, node); 1439 td = list_entry(urbp->td_list.next, struct uhci_td, list); 1440 status = td_status(td); 1441 if (!(status & TD_CTRL_ACTIVE)) { 1442 1443 /* We're okay, the queue has advanced */ 1444 qh->wait_expired = 0; 1445 qh->advance_jiffies = jiffies; 1446 goto done; 1447 } 1448 ret = 0; 1449 } 1450 1451 /* The queue hasn't advanced; check for timeout */ 1452 if (qh->wait_expired) 1453 goto done; 1454 1455 if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) { 1456 1457 /* Detect the Intel bug and work around it */ 1458 if (qh->post_td && qh_element(qh) == 1459 cpu_to_le32(qh->post_td->dma_handle)) { 1460 qh->element = qh->post_td->link; 1461 qh->advance_jiffies = jiffies; 1462 ret = 1; 1463 goto done; 1464 } 1465 1466 qh->wait_expired = 1; 1467 1468 /* If the current URB wants FSBR, unlink it temporarily 1469 * so that we can safely set the next TD to interrupt on 1470 * completion. That way we'll know as soon as the queue 1471 * starts moving again. */ 1472 if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC)) 1473 uhci_unlink_qh(uhci, qh); 1474 1475 } else { 1476 /* Unmoving but not-yet-expired queues keep FSBR alive */ 1477 if (urbp) 1478 uhci_urbp_wants_fsbr(uhci, urbp); 1479 } 1480 1481 done: 1482 return ret; 1483 } 1484 1485 /* 1486 * Process events in the schedule, but only in one thread at a time 1487 */ 1488 static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs) 1489 { 1490 int i; 1491 struct uhci_qh *qh; 1492 1493 /* Don't allow re-entrant calls */ 1494 if (uhci->scan_in_progress) { 1495 uhci->need_rescan = 1; 1496 return; 1497 } 1498 uhci->scan_in_progress = 1; 1499 rescan: 1500 uhci->need_rescan = 0; 1501 uhci->fsbr_is_wanted = 0; 1502 1503 uhci_clear_next_interrupt(uhci); 1504 uhci_get_current_frame_number(uhci); 1505 uhci->cur_iso_frame = uhci->frame_number; 1506 1507 /* Go through all the QH queues and process the URBs in each one */ 1508 for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) { 1509 uhci->next_qh = list_entry(uhci->skelqh[i]->node.next, 1510 struct uhci_qh, node); 1511 while ((qh = uhci->next_qh) != uhci->skelqh[i]) { 1512 uhci->next_qh = list_entry(qh->node.next, 1513 struct uhci_qh, node); 1514 1515 if (uhci_advance_check(uhci, qh)) { 1516 uhci_scan_qh(uhci, qh, regs); 1517 if (qh->state == QH_STATE_ACTIVE) { 1518 uhci_urbp_wants_fsbr(uhci, 1519 list_entry(qh->queue.next, struct urb_priv, node)); 1520 } 1521 } 1522 } 1523 } 1524 1525 uhci->last_iso_frame = uhci->cur_iso_frame; 1526 if (uhci->need_rescan) 1527 goto rescan; 1528 uhci->scan_in_progress = 0; 1529 1530 if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted && 1531 !uhci->fsbr_expiring) { 1532 uhci->fsbr_expiring = 1; 1533 mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY); 1534 } 1535 1536 if (list_empty(&uhci->skel_unlink_qh->node)) 1537 uhci_clear_next_interrupt(uhci); 1538 else 1539 uhci_set_next_interrupt(uhci); 1540 } 1541