1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2001-2004 by David Brownell 4 */ 5 6 /* this file is part of ehci-hcd.c */ 7 8 /*-------------------------------------------------------------------------*/ 9 10 /* 11 * EHCI hardware queue manipulation ... the core. QH/QTD manipulation. 12 * 13 * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd" 14 * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned 15 * buffers needed for the larger number). We use one QH per endpoint, queue 16 * multiple urbs (all three types) per endpoint. URBs may need several qtds. 17 * 18 * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with 19 * interrupts) needs careful scheduling. Performance improvements can be 20 * an ongoing challenge. That's in "ehci-sched.c". 21 * 22 * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs, 23 * or otherwise through transaction translators (TTs) in USB 2.0 hubs using 24 * (b) special fields in qh entries or (c) split iso entries. TTs will 25 * buffer low/full speed data so the host collects it at high speed. 26 */ 27 28 /*-------------------------------------------------------------------------*/ 29 30 /* fill a qtd, returning how much of the buffer we were able to queue up */ 31 32 static unsigned int 33 qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf, 34 size_t len, int token, int maxpacket) 35 { 36 unsigned int count; 37 u64 addr = buf; 38 int i; 39 40 /* one buffer entry per 4K ... first might be short or unaligned */ 41 qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr); 42 qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32)); 43 count = 0x1000 - (buf & 0x0fff); /* rest of that page */ 44 if (likely (len < count)) /* ... iff needed */ 45 count = len; 46 else { 47 buf += 0x1000; 48 buf &= ~0x0fff; 49 50 /* per-qtd limit: from 16K to 20K (best alignment) */ 51 for (i = 1; count < len && i < 5; i++) { 52 addr = buf; 53 qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr); 54 qtd->hw_buf_hi[i] = cpu_to_hc32(ehci, 55 (u32)(addr >> 32)); 56 buf += 0x1000; 57 if ((count + 0x1000) < len) 58 count += 0x1000; 59 else 60 count = len; 61 } 62 63 /* short packets may only terminate transfers */ 64 if (count != len) 65 count -= (count % maxpacket); 66 } 67 qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token); 68 qtd->length = count; 69 70 return count; 71 } 72 73 /*-------------------------------------------------------------------------*/ 74 75 static inline void 76 qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd) 77 { 78 struct ehci_qh_hw *hw = qh->hw; 79 80 /* writes to an active overlay are unsafe */ 81 WARN_ON(qh->qh_state != QH_STATE_IDLE); 82 83 hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); 84 hw->hw_alt_next = EHCI_LIST_END(ehci); 85 86 /* Except for control endpoints, we make hardware maintain data 87 * toggle (like OHCI) ... here (re)initialize the toggle in the QH, 88 * and set the pseudo-toggle in udev. Only usb_clear_halt() will 89 * ever clear it. 90 */ 91 if (!(hw->hw_info1 & cpu_to_hc32(ehci, QH_TOGGLE_CTL))) { 92 unsigned is_out, epnum; 93 94 is_out = qh->is_out; 95 epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f; 96 if (unlikely(!usb_gettoggle(qh->ps.udev, epnum, is_out))) { 97 hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE); 98 usb_settoggle(qh->ps.udev, epnum, is_out, 1); 99 } 100 } 101 102 hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); 103 } 104 105 /* if it weren't for a common silicon quirk (writing the dummy into the qh 106 * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault 107 * recovery (including urb dequeue) would need software changes to a QH... 108 */ 109 static void 110 qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) 111 { 112 struct ehci_qtd *qtd; 113 114 qtd = list_entry(qh->qtd_list.next, struct ehci_qtd, qtd_list); 115 116 /* 117 * first qtd may already be partially processed. 118 * If we come here during unlink, the QH overlay region 119 * might have reference to the just unlinked qtd. The 120 * qtd is updated in qh_completions(). Update the QH 121 * overlay here. 122 */ 123 if (qh->hw->hw_token & ACTIVE_BIT(ehci)) { 124 qh->hw->hw_qtd_next = qtd->hw_next; 125 if (qh->should_be_inactive) 126 ehci_warn(ehci, "qh %p should be inactive!\n", qh); 127 } else { 128 qh_update(ehci, qh, qtd); 129 } 130 qh->should_be_inactive = 0; 131 } 132 133 /*-------------------------------------------------------------------------*/ 134 135 static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh); 136 137 static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd, 138 struct usb_host_endpoint *ep) 139 { 140 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 141 struct ehci_qh *qh = ep->hcpriv; 142 unsigned long flags; 143 144 spin_lock_irqsave(&ehci->lock, flags); 145 qh->clearing_tt = 0; 146 if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list) 147 && ehci->rh_state == EHCI_RH_RUNNING) 148 qh_link_async(ehci, qh); 149 spin_unlock_irqrestore(&ehci->lock, flags); 150 } 151 152 static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh, 153 struct urb *urb, u32 token) 154 { 155 156 /* If an async split transaction gets an error or is unlinked, 157 * the TT buffer may be left in an indeterminate state. We 158 * have to clear the TT buffer. 159 * 160 * Note: this routine is never called for Isochronous transfers. 161 */ 162 if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) { 163 #ifdef CONFIG_DYNAMIC_DEBUG 164 struct usb_device *tt = urb->dev->tt->hub; 165 dev_dbg(&tt->dev, 166 "clear tt buffer port %d, a%d ep%d t%08x\n", 167 urb->dev->ttport, urb->dev->devnum, 168 usb_pipeendpoint(urb->pipe), token); 169 #endif /* CONFIG_DYNAMIC_DEBUG */ 170 if (!ehci_is_TDI(ehci) 171 || urb->dev->tt->hub != 172 ehci_to_hcd(ehci)->self.root_hub) { 173 if (usb_hub_clear_tt_buffer(urb) == 0) 174 qh->clearing_tt = 1; 175 } else { 176 177 /* REVISIT ARC-derived cores don't clear the root 178 * hub TT buffer in this way... 179 */ 180 } 181 } 182 } 183 184 static int qtd_copy_status ( 185 struct ehci_hcd *ehci, 186 struct urb *urb, 187 size_t length, 188 u32 token 189 ) 190 { 191 int status = -EINPROGRESS; 192 193 /* count IN/OUT bytes, not SETUP (even short packets) */ 194 if (likely(QTD_PID(token) != PID_CODE_SETUP)) 195 urb->actual_length += length - QTD_LENGTH (token); 196 197 /* don't modify error codes */ 198 if (unlikely(urb->unlinked)) 199 return status; 200 201 /* force cleanup after short read; not always an error */ 202 if (unlikely (IS_SHORT_READ (token))) 203 status = -EREMOTEIO; 204 205 /* serious "can't proceed" faults reported by the hardware */ 206 if (token & QTD_STS_HALT) { 207 if (token & QTD_STS_BABBLE) { 208 /* FIXME "must" disable babbling device's port too */ 209 status = -EOVERFLOW; 210 /* 211 * When MMF is active and PID Code is IN, queue is halted. 212 * EHCI Specification, Table 4-13. 213 */ 214 } else if ((token & QTD_STS_MMF) && 215 (QTD_PID(token) == PID_CODE_IN)) { 216 status = -EPROTO; 217 /* CERR nonzero + halt --> stall */ 218 } else if (QTD_CERR(token)) { 219 status = -EPIPE; 220 221 /* In theory, more than one of the following bits can be set 222 * since they are sticky and the transaction is retried. 223 * Which to test first is rather arbitrary. 224 */ 225 } else if (token & QTD_STS_MMF) { 226 /* fs/ls interrupt xfer missed the complete-split */ 227 status = -EPROTO; 228 } else if (token & QTD_STS_DBE) { 229 status = (QTD_PID(token) == PID_CODE_IN) /* IN ? */ 230 ? -ENOSR /* hc couldn't read data */ 231 : -ECOMM; /* hc couldn't write data */ 232 } else if (token & QTD_STS_XACT) { 233 /* timeout, bad CRC, wrong PID, etc */ 234 ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n", 235 urb->dev->devpath, 236 usb_pipeendpoint(urb->pipe), 237 usb_pipein(urb->pipe) ? "in" : "out"); 238 status = -EPROTO; 239 } else { /* unknown */ 240 status = -EPROTO; 241 } 242 } 243 244 return status; 245 } 246 247 static void 248 ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status) 249 { 250 if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { 251 /* ... update hc-wide periodic stats */ 252 ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; 253 } 254 255 if (unlikely(urb->unlinked)) { 256 INCR(ehci->stats.unlink); 257 } else { 258 /* report non-error and short read status as zero */ 259 if (status == -EINPROGRESS || status == -EREMOTEIO) 260 status = 0; 261 INCR(ehci->stats.complete); 262 } 263 264 #ifdef EHCI_URB_TRACE 265 ehci_dbg (ehci, 266 "%s %s urb %p ep%d%s status %d len %d/%d\n", 267 __func__, urb->dev->devpath, urb, 268 usb_pipeendpoint (urb->pipe), 269 usb_pipein (urb->pipe) ? "in" : "out", 270 status, 271 urb->actual_length, urb->transfer_buffer_length); 272 #endif 273 274 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); 275 usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status); 276 } 277 278 static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh); 279 280 /* 281 * Process and free completed qtds for a qh, returning URBs to drivers. 282 * Chases up to qh->hw_current. Returns nonzero if the caller should 283 * unlink qh. 284 */ 285 static unsigned 286 qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) 287 { 288 struct ehci_qtd *last, *end = qh->dummy; 289 struct list_head *entry, *tmp; 290 int last_status; 291 int stopped; 292 u8 state; 293 struct ehci_qh_hw *hw = qh->hw; 294 295 /* completions (or tasks on other cpus) must never clobber HALT 296 * till we've gone through and cleaned everything up, even when 297 * they add urbs to this qh's queue or mark them for unlinking. 298 * 299 * NOTE: unlinking expects to be done in queue order. 300 * 301 * It's a bug for qh->qh_state to be anything other than 302 * QH_STATE_IDLE, unless our caller is scan_async() or 303 * scan_intr(). 304 */ 305 state = qh->qh_state; 306 qh->qh_state = QH_STATE_COMPLETING; 307 stopped = (state == QH_STATE_IDLE); 308 309 rescan: 310 last = NULL; 311 last_status = -EINPROGRESS; 312 qh->dequeue_during_giveback = 0; 313 314 /* remove de-activated QTDs from front of queue. 315 * after faults (including short reads), cleanup this urb 316 * then let the queue advance. 317 * if queue is stopped, handles unlinks. 318 */ 319 list_for_each_safe (entry, tmp, &qh->qtd_list) { 320 struct ehci_qtd *qtd; 321 struct urb *urb; 322 u32 token = 0; 323 324 qtd = list_entry (entry, struct ehci_qtd, qtd_list); 325 urb = qtd->urb; 326 327 /* clean up any state from previous QTD ...*/ 328 if (last) { 329 if (likely (last->urb != urb)) { 330 ehci_urb_done(ehci, last->urb, last_status); 331 last_status = -EINPROGRESS; 332 } 333 ehci_qtd_free (ehci, last); 334 last = NULL; 335 } 336 337 /* ignore urbs submitted during completions we reported */ 338 if (qtd == end) 339 break; 340 341 /* hardware copies qtd out of qh overlay */ 342 rmb (); 343 token = hc32_to_cpu(ehci, qtd->hw_token); 344 345 /* always clean up qtds the hc de-activated */ 346 retry_xacterr: 347 if ((token & QTD_STS_ACTIVE) == 0) { 348 349 /* Report Data Buffer Error: non-fatal but useful */ 350 if (token & QTD_STS_DBE) 351 ehci_dbg(ehci, 352 "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n", 353 urb, 354 usb_endpoint_num(&urb->ep->desc), 355 usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out", 356 urb->transfer_buffer_length, 357 qtd, 358 qh); 359 360 /* on STALL, error, and short reads this urb must 361 * complete and all its qtds must be recycled. 362 */ 363 if ((token & QTD_STS_HALT) != 0) { 364 365 /* retry transaction errors until we 366 * reach the software xacterr limit 367 */ 368 if ((token & QTD_STS_XACT) && 369 QTD_CERR(token) == 0 && 370 ++qh->xacterrs < QH_XACTERR_MAX && 371 !urb->unlinked) { 372 ehci_dbg(ehci, 373 "detected XactErr len %zu/%zu retry %d\n", 374 qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs); 375 376 /* reset the token in the qtd and the 377 * qh overlay (which still contains 378 * the qtd) so that we pick up from 379 * where we left off 380 */ 381 token &= ~QTD_STS_HALT; 382 token |= QTD_STS_ACTIVE | 383 (EHCI_TUNE_CERR << 10); 384 qtd->hw_token = cpu_to_hc32(ehci, 385 token); 386 wmb(); 387 hw->hw_token = cpu_to_hc32(ehci, 388 token); 389 goto retry_xacterr; 390 } 391 stopped = 1; 392 qh->unlink_reason |= QH_UNLINK_HALTED; 393 394 /* magic dummy for some short reads; qh won't advance. 395 * that silicon quirk can kick in with this dummy too. 396 * 397 * other short reads won't stop the queue, including 398 * control transfers (status stage handles that) or 399 * most other single-qtd reads ... the queue stops if 400 * URB_SHORT_NOT_OK was set so the driver submitting 401 * the urbs could clean it up. 402 */ 403 } else if (IS_SHORT_READ (token) 404 && !(qtd->hw_alt_next 405 & EHCI_LIST_END(ehci))) { 406 stopped = 1; 407 qh->unlink_reason |= QH_UNLINK_SHORT_READ; 408 } 409 410 /* stop scanning when we reach qtds the hc is using */ 411 } else if (likely (!stopped 412 && ehci->rh_state >= EHCI_RH_RUNNING)) { 413 break; 414 415 /* scan the whole queue for unlinks whenever it stops */ 416 } else { 417 stopped = 1; 418 419 /* cancel everything if we halt, suspend, etc */ 420 if (ehci->rh_state < EHCI_RH_RUNNING) { 421 last_status = -ESHUTDOWN; 422 qh->unlink_reason |= QH_UNLINK_SHUTDOWN; 423 } 424 425 /* this qtd is active; skip it unless a previous qtd 426 * for its urb faulted, or its urb was canceled. 427 */ 428 else if (last_status == -EINPROGRESS && !urb->unlinked) 429 continue; 430 431 /* 432 * If this was the active qtd when the qh was unlinked 433 * and the overlay's token is active, then the overlay 434 * hasn't been written back to the qtd yet so use its 435 * token instead of the qtd's. After the qtd is 436 * processed and removed, the overlay won't be valid 437 * any more. 438 */ 439 if (state == QH_STATE_IDLE && 440 qh->qtd_list.next == &qtd->qtd_list && 441 (hw->hw_token & ACTIVE_BIT(ehci))) { 442 token = hc32_to_cpu(ehci, hw->hw_token); 443 hw->hw_token &= ~ACTIVE_BIT(ehci); 444 qh->should_be_inactive = 1; 445 446 /* An unlink may leave an incomplete 447 * async transaction in the TT buffer. 448 * We have to clear it. 449 */ 450 ehci_clear_tt_buffer(ehci, qh, urb, token); 451 } 452 } 453 454 /* unless we already know the urb's status, collect qtd status 455 * and update count of bytes transferred. in common short read 456 * cases with only one data qtd (including control transfers), 457 * queue processing won't halt. but with two or more qtds (for 458 * example, with a 32 KB transfer), when the first qtd gets a 459 * short read the second must be removed by hand. 460 */ 461 if (last_status == -EINPROGRESS) { 462 last_status = qtd_copy_status(ehci, urb, 463 qtd->length, token); 464 if (last_status == -EREMOTEIO 465 && (qtd->hw_alt_next 466 & EHCI_LIST_END(ehci))) 467 last_status = -EINPROGRESS; 468 469 /* As part of low/full-speed endpoint-halt processing 470 * we must clear the TT buffer (11.17.5). 471 */ 472 if (unlikely(last_status != -EINPROGRESS && 473 last_status != -EREMOTEIO)) { 474 /* The TT's in some hubs malfunction when they 475 * receive this request following a STALL (they 476 * stop sending isochronous packets). Since a 477 * STALL can't leave the TT buffer in a busy 478 * state (if you believe Figures 11-48 - 11-51 479 * in the USB 2.0 spec), we won't clear the TT 480 * buffer in this case. Strictly speaking this 481 * is a violation of the spec. 482 */ 483 if (last_status != -EPIPE) 484 ehci_clear_tt_buffer(ehci, qh, urb, 485 token); 486 } 487 } 488 489 /* if we're removing something not at the queue head, 490 * patch the hardware queue pointer. 491 */ 492 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { 493 last = list_entry (qtd->qtd_list.prev, 494 struct ehci_qtd, qtd_list); 495 last->hw_next = qtd->hw_next; 496 } 497 498 /* remove qtd; it's recycled after possible urb completion */ 499 list_del (&qtd->qtd_list); 500 last = qtd; 501 502 /* reinit the xacterr counter for the next qtd */ 503 qh->xacterrs = 0; 504 } 505 506 /* last urb's completion might still need calling */ 507 if (likely (last != NULL)) { 508 ehci_urb_done(ehci, last->urb, last_status); 509 ehci_qtd_free (ehci, last); 510 } 511 512 /* Do we need to rescan for URBs dequeued during a giveback? */ 513 if (unlikely(qh->dequeue_during_giveback)) { 514 /* If the QH is already unlinked, do the rescan now. */ 515 if (state == QH_STATE_IDLE) 516 goto rescan; 517 518 /* Otherwise the caller must unlink the QH. */ 519 } 520 521 /* restore original state; caller must unlink or relink */ 522 qh->qh_state = state; 523 524 /* be sure the hardware's done with the qh before refreshing 525 * it after fault cleanup, or recovering from silicon wrongly 526 * overlaying the dummy qtd (which reduces DMA chatter). 527 * 528 * We won't refresh a QH that's linked (after the HC 529 * stopped the queue). That avoids a race: 530 * - HC reads first part of QH; 531 * - CPU updates that first part and the token; 532 * - HC reads rest of that QH, including token 533 * Result: HC gets an inconsistent image, and then 534 * DMAs to/from the wrong memory (corrupting it). 535 * 536 * That should be rare for interrupt transfers, 537 * except maybe high bandwidth ... 538 */ 539 if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci)) 540 qh->unlink_reason |= QH_UNLINK_DUMMY_OVERLAY; 541 542 /* Let the caller know if the QH needs to be unlinked. */ 543 return qh->unlink_reason; 544 } 545 546 /*-------------------------------------------------------------------------*/ 547 548 /* 549 * reverse of qh_urb_transaction: free a list of TDs. 550 * used for cleanup after errors, before HC sees an URB's TDs. 551 */ 552 static void qtd_list_free ( 553 struct ehci_hcd *ehci, 554 struct urb *urb, 555 struct list_head *qtd_list 556 ) { 557 struct list_head *entry, *temp; 558 559 list_for_each_safe (entry, temp, qtd_list) { 560 struct ehci_qtd *qtd; 561 562 qtd = list_entry (entry, struct ehci_qtd, qtd_list); 563 list_del (&qtd->qtd_list); 564 ehci_qtd_free (ehci, qtd); 565 } 566 } 567 568 /* 569 * create a list of filled qtds for this URB; won't link into qh. 570 */ 571 static struct list_head * 572 qh_urb_transaction ( 573 struct ehci_hcd *ehci, 574 struct urb *urb, 575 struct list_head *head, 576 gfp_t flags 577 ) { 578 struct ehci_qtd *qtd, *qtd_prev; 579 dma_addr_t buf; 580 int len, this_sg_len, maxpacket; 581 int is_input; 582 u32 token; 583 int i; 584 struct scatterlist *sg; 585 586 /* 587 * URBs map to sequences of QTDs: one logical transaction 588 */ 589 qtd = ehci_qtd_alloc (ehci, flags); 590 if (unlikely (!qtd)) 591 return NULL; 592 list_add_tail (&qtd->qtd_list, head); 593 qtd->urb = urb; 594 595 token = QTD_STS_ACTIVE; 596 token |= (EHCI_TUNE_CERR << 10); 597 /* for split transactions, SplitXState initialized to zero */ 598 599 len = urb->transfer_buffer_length; 600 is_input = usb_pipein (urb->pipe); 601 if (usb_pipecontrol (urb->pipe)) { 602 /* SETUP pid */ 603 qtd_fill(ehci, qtd, urb->setup_dma, 604 sizeof (struct usb_ctrlrequest), 605 token | (PID_CODE_SETUP << 8), 8); 606 607 /* ... and always at least one more pid */ 608 token ^= QTD_TOGGLE; 609 qtd_prev = qtd; 610 qtd = ehci_qtd_alloc (ehci, flags); 611 if (unlikely (!qtd)) 612 goto cleanup; 613 qtd->urb = urb; 614 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); 615 list_add_tail (&qtd->qtd_list, head); 616 617 /* for zero length DATA stages, STATUS is always IN */ 618 if (len == 0) 619 token |= (PID_CODE_IN << 8); 620 } 621 622 /* 623 * data transfer stage: buffer setup 624 */ 625 i = urb->num_mapped_sgs; 626 if (len > 0 && i > 0) { 627 sg = urb->sg; 628 buf = sg_dma_address(sg); 629 630 /* urb->transfer_buffer_length may be smaller than the 631 * size of the scatterlist (or vice versa) 632 */ 633 this_sg_len = min_t(int, sg_dma_len(sg), len); 634 } else { 635 sg = NULL; 636 buf = urb->transfer_dma; 637 this_sg_len = len; 638 } 639 640 if (is_input) 641 token |= (PID_CODE_IN << 8); 642 /* else it's already initted to "out" pid (0 << 8) */ 643 644 maxpacket = usb_endpoint_maxp(&urb->ep->desc); 645 646 /* 647 * buffer gets wrapped in one or more qtds; 648 * last one may be "short" (including zero len) 649 * and may serve as a control status ack 650 */ 651 for (;;) { 652 unsigned int this_qtd_len; 653 654 this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token, 655 maxpacket); 656 this_sg_len -= this_qtd_len; 657 len -= this_qtd_len; 658 buf += this_qtd_len; 659 660 /* 661 * short reads advance to a "magic" dummy instead of the next 662 * qtd ... that forces the queue to stop, for manual cleanup. 663 * (this will usually be overridden later.) 664 */ 665 if (is_input) 666 qtd->hw_alt_next = ehci->async->hw->hw_alt_next; 667 668 /* qh makes control packets use qtd toggle; maybe switch it */ 669 if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) 670 token ^= QTD_TOGGLE; 671 672 if (likely(this_sg_len <= 0)) { 673 if (--i <= 0 || len <= 0) 674 break; 675 sg = sg_next(sg); 676 buf = sg_dma_address(sg); 677 this_sg_len = min_t(int, sg_dma_len(sg), len); 678 } 679 680 qtd_prev = qtd; 681 qtd = ehci_qtd_alloc (ehci, flags); 682 if (unlikely (!qtd)) 683 goto cleanup; 684 qtd->urb = urb; 685 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); 686 list_add_tail (&qtd->qtd_list, head); 687 } 688 689 /* 690 * unless the caller requires manual cleanup after short reads, 691 * have the alt_next mechanism keep the queue running after the 692 * last data qtd (the only one, for control and most other cases). 693 */ 694 if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 695 || usb_pipecontrol (urb->pipe))) 696 qtd->hw_alt_next = EHCI_LIST_END(ehci); 697 698 /* 699 * control requests may need a terminating data "status" ack; 700 * other OUT ones may need a terminating short packet 701 * (zero length). 702 */ 703 if (likely (urb->transfer_buffer_length != 0)) { 704 int one_more = 0; 705 706 if (usb_pipecontrol (urb->pipe)) { 707 one_more = 1; 708 token ^= (PID_CODE_IN << 8); /* "in" <--> "out" */ 709 token |= QTD_TOGGLE; /* force DATA1 */ 710 } else if (usb_pipeout(urb->pipe) 711 && (urb->transfer_flags & URB_ZERO_PACKET) 712 && !(urb->transfer_buffer_length % maxpacket)) { 713 one_more = 1; 714 } 715 if (one_more) { 716 qtd_prev = qtd; 717 qtd = ehci_qtd_alloc (ehci, flags); 718 if (unlikely (!qtd)) 719 goto cleanup; 720 qtd->urb = urb; 721 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); 722 list_add_tail (&qtd->qtd_list, head); 723 724 /* never any data in such packets */ 725 qtd_fill(ehci, qtd, 0, 0, token, 0); 726 } 727 } 728 729 /* by default, enable interrupt on urb completion */ 730 if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT))) 731 qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC); 732 return head; 733 734 cleanup: 735 qtd_list_free (ehci, urb, head); 736 return NULL; 737 } 738 739 /*-------------------------------------------------------------------------*/ 740 741 // Would be best to create all qh's from config descriptors, 742 // when each interface/altsetting is established. Unlink 743 // any previous qh and cancel its urbs first; endpoints are 744 // implicitly reset then (data toggle too). 745 // That'd mean updating how usbcore talks to HCDs. (2.7?) 746 747 748 /* 749 * Each QH holds a qtd list; a QH is used for everything except iso. 750 * 751 * For interrupt urbs, the scheduler must set the microframe scheduling 752 * mask(s) each time the QH gets scheduled. For highspeed, that's 753 * just one microframe in the s-mask. For split interrupt transactions 754 * there are additional complications: c-mask, maybe FSTNs. 755 */ 756 static struct ehci_qh * 757 qh_make ( 758 struct ehci_hcd *ehci, 759 struct urb *urb, 760 gfp_t flags 761 ) { 762 struct ehci_qh *qh = ehci_qh_alloc (ehci, flags); 763 struct usb_host_endpoint *ep; 764 u32 info1 = 0, info2 = 0; 765 int is_input, type; 766 int maxp = 0; 767 int mult; 768 struct usb_tt *tt = urb->dev->tt; 769 struct ehci_qh_hw *hw; 770 771 if (!qh) 772 return qh; 773 774 /* 775 * init endpoint/device data for this QH 776 */ 777 info1 |= usb_pipeendpoint (urb->pipe) << 8; 778 info1 |= usb_pipedevice (urb->pipe) << 0; 779 780 is_input = usb_pipein (urb->pipe); 781 type = usb_pipetype (urb->pipe); 782 ep = usb_pipe_endpoint (urb->dev, urb->pipe); 783 maxp = usb_endpoint_maxp (&ep->desc); 784 mult = usb_endpoint_maxp_mult (&ep->desc); 785 786 /* 1024 byte maxpacket is a hardware ceiling. High bandwidth 787 * acts like up to 3KB, but is built from smaller packets. 788 */ 789 if (maxp > 1024) { 790 ehci_dbg(ehci, "bogus qh maxpacket %d\n", maxp); 791 goto done; 792 } 793 794 /* Compute interrupt scheduling parameters just once, and save. 795 * - allowing for high bandwidth, how many nsec/uframe are used? 796 * - split transactions need a second CSPLIT uframe; same question 797 * - splits also need a schedule gap (for full/low speed I/O) 798 * - qh has a polling interval 799 * 800 * For control/bulk requests, the HC or TT handles these. 801 */ 802 if (type == PIPE_INTERRUPT) { 803 unsigned tmp; 804 805 qh->ps.usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH, 806 is_input, 0, mult * maxp)); 807 qh->ps.phase = NO_FRAME; 808 809 if (urb->dev->speed == USB_SPEED_HIGH) { 810 qh->ps.c_usecs = 0; 811 qh->gap_uf = 0; 812 813 if (urb->interval > 1 && urb->interval < 8) { 814 /* NOTE interval 2 or 4 uframes could work. 815 * But interval 1 scheduling is simpler, and 816 * includes high bandwidth. 817 */ 818 urb->interval = 1; 819 } else if (urb->interval > ehci->periodic_size << 3) { 820 urb->interval = ehci->periodic_size << 3; 821 } 822 qh->ps.period = urb->interval >> 3; 823 824 /* period for bandwidth allocation */ 825 tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE, 826 1 << (urb->ep->desc.bInterval - 1)); 827 828 /* Allow urb->interval to override */ 829 qh->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval); 830 qh->ps.bw_period = qh->ps.bw_uperiod >> 3; 831 } else { 832 int think_time; 833 834 /* gap is f(FS/LS transfer times) */ 835 qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed, 836 is_input, 0, maxp) / (125 * 1000); 837 838 /* FIXME this just approximates SPLIT/CSPLIT times */ 839 if (is_input) { // SPLIT, gap, CSPLIT+DATA 840 qh->ps.c_usecs = qh->ps.usecs + HS_USECS(0); 841 qh->ps.usecs = HS_USECS(1); 842 } else { // SPLIT+DATA, gap, CSPLIT 843 qh->ps.usecs += HS_USECS(1); 844 qh->ps.c_usecs = HS_USECS(0); 845 } 846 847 think_time = tt ? tt->think_time : 0; 848 qh->ps.tt_usecs = NS_TO_US(think_time + 849 usb_calc_bus_time (urb->dev->speed, 850 is_input, 0, maxp)); 851 if (urb->interval > ehci->periodic_size) 852 urb->interval = ehci->periodic_size; 853 qh->ps.period = urb->interval; 854 855 /* period for bandwidth allocation */ 856 tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES, 857 urb->ep->desc.bInterval); 858 tmp = rounddown_pow_of_two(tmp); 859 860 /* Allow urb->interval to override */ 861 qh->ps.bw_period = min_t(unsigned, tmp, urb->interval); 862 qh->ps.bw_uperiod = qh->ps.bw_period << 3; 863 } 864 } 865 866 /* support for tt scheduling, and access to toggles */ 867 qh->ps.udev = urb->dev; 868 qh->ps.ep = urb->ep; 869 870 /* using TT? */ 871 switch (urb->dev->speed) { 872 case USB_SPEED_LOW: 873 info1 |= QH_LOW_SPEED; 874 fallthrough; 875 876 case USB_SPEED_FULL: 877 /* EPS 0 means "full" */ 878 if (type != PIPE_INTERRUPT) 879 info1 |= (EHCI_TUNE_RL_TT << 28); 880 if (type == PIPE_CONTROL) { 881 info1 |= QH_CONTROL_EP; /* for TT */ 882 info1 |= QH_TOGGLE_CTL; /* toggle from qtd */ 883 } 884 info1 |= maxp << 16; 885 886 info2 |= (EHCI_TUNE_MULT_TT << 30); 887 888 /* Some Freescale processors have an erratum in which the 889 * port number in the queue head was 0..N-1 instead of 1..N. 890 */ 891 if (ehci_has_fsl_portno_bug(ehci)) 892 info2 |= (urb->dev->ttport-1) << 23; 893 else 894 info2 |= urb->dev->ttport << 23; 895 896 /* set the address of the TT; for TDI's integrated 897 * root hub tt, leave it zeroed. 898 */ 899 if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub) 900 info2 |= tt->hub->devnum << 16; 901 902 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */ 903 904 break; 905 906 case USB_SPEED_HIGH: /* no TT involved */ 907 info1 |= QH_HIGH_SPEED; 908 if (type == PIPE_CONTROL) { 909 info1 |= (EHCI_TUNE_RL_HS << 28); 910 info1 |= 64 << 16; /* usb2 fixed maxpacket */ 911 info1 |= QH_TOGGLE_CTL; /* toggle from qtd */ 912 info2 |= (EHCI_TUNE_MULT_HS << 30); 913 } else if (type == PIPE_BULK) { 914 info1 |= (EHCI_TUNE_RL_HS << 28); 915 /* The USB spec says that high speed bulk endpoints 916 * always use 512 byte maxpacket. But some device 917 * vendors decided to ignore that, and MSFT is happy 918 * to help them do so. So now people expect to use 919 * such nonconformant devices with Linux too; sigh. 920 */ 921 info1 |= maxp << 16; 922 info2 |= (EHCI_TUNE_MULT_HS << 30); 923 } else { /* PIPE_INTERRUPT */ 924 info1 |= maxp << 16; 925 info2 |= mult << 30; 926 } 927 break; 928 default: 929 ehci_dbg(ehci, "bogus dev %p speed %d\n", urb->dev, 930 urb->dev->speed); 931 done: 932 qh_destroy(ehci, qh); 933 return NULL; 934 } 935 936 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */ 937 938 /* init as live, toggle clear */ 939 qh->qh_state = QH_STATE_IDLE; 940 hw = qh->hw; 941 hw->hw_info1 = cpu_to_hc32(ehci, info1); 942 hw->hw_info2 = cpu_to_hc32(ehci, info2); 943 qh->is_out = !is_input; 944 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); 945 return qh; 946 } 947 948 /*-------------------------------------------------------------------------*/ 949 950 static void enable_async(struct ehci_hcd *ehci) 951 { 952 if (ehci->async_count++) 953 return; 954 955 /* Stop waiting to turn off the async schedule */ 956 ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_ASYNC); 957 958 /* Don't start the schedule until ASS is 0 */ 959 ehci_poll_ASS(ehci); 960 turn_on_io_watchdog(ehci); 961 } 962 963 static void disable_async(struct ehci_hcd *ehci) 964 { 965 if (--ehci->async_count) 966 return; 967 968 /* The async schedule and unlink lists are supposed to be empty */ 969 WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) || 970 !list_empty(&ehci->async_idle)); 971 972 /* Don't turn off the schedule until ASS is 1 */ 973 ehci_poll_ASS(ehci); 974 } 975 976 /* move qh (and its qtds) onto async queue; maybe enable queue. */ 977 978 static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) 979 { 980 __hc32 dma = QH_NEXT(ehci, qh->qh_dma); 981 struct ehci_qh *head; 982 983 /* Don't link a QH if there's a Clear-TT-Buffer pending */ 984 if (unlikely(qh->clearing_tt)) 985 return; 986 987 WARN_ON(qh->qh_state != QH_STATE_IDLE); 988 989 /* clear halt and/or toggle; and maybe recover from silicon quirk */ 990 qh_refresh(ehci, qh); 991 992 /* splice right after start */ 993 head = ehci->async; 994 qh->qh_next = head->qh_next; 995 qh->hw->hw_next = head->hw->hw_next; 996 wmb (); 997 998 head->qh_next.qh = qh; 999 head->hw->hw_next = dma; 1000 1001 qh->qh_state = QH_STATE_LINKED; 1002 qh->xacterrs = 0; 1003 qh->unlink_reason = 0; 1004 /* qtd completions reported later by interrupt */ 1005 1006 enable_async(ehci); 1007 } 1008 1009 /*-------------------------------------------------------------------------*/ 1010 1011 /* 1012 * For control/bulk/interrupt, return QH with these TDs appended. 1013 * Allocates and initializes the QH if necessary. 1014 * Returns null if it can't allocate a QH it needs to. 1015 * If the QH has TDs (urbs) already, that's great. 1016 */ 1017 static struct ehci_qh *qh_append_tds ( 1018 struct ehci_hcd *ehci, 1019 struct urb *urb, 1020 struct list_head *qtd_list, 1021 int epnum, 1022 void **ptr 1023 ) 1024 { 1025 struct ehci_qh *qh = NULL; 1026 __hc32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f); 1027 1028 qh = (struct ehci_qh *) *ptr; 1029 if (unlikely (qh == NULL)) { 1030 /* can't sleep here, we have ehci->lock... */ 1031 qh = qh_make (ehci, urb, GFP_ATOMIC); 1032 *ptr = qh; 1033 } 1034 if (likely (qh != NULL)) { 1035 struct ehci_qtd *qtd; 1036 1037 if (unlikely (list_empty (qtd_list))) 1038 qtd = NULL; 1039 else 1040 qtd = list_entry (qtd_list->next, struct ehci_qtd, 1041 qtd_list); 1042 1043 /* control qh may need patching ... */ 1044 if (unlikely (epnum == 0)) { 1045 1046 /* usb_reset_device() briefly reverts to address 0 */ 1047 if (usb_pipedevice (urb->pipe) == 0) 1048 qh->hw->hw_info1 &= ~qh_addr_mask; 1049 } 1050 1051 /* just one way to queue requests: swap with the dummy qtd. 1052 * only hc or qh_refresh() ever modify the overlay. 1053 */ 1054 if (likely (qtd != NULL)) { 1055 struct ehci_qtd *dummy; 1056 dma_addr_t dma; 1057 __hc32 token; 1058 1059 /* to avoid racing the HC, use the dummy td instead of 1060 * the first td of our list (becomes new dummy). both 1061 * tds stay deactivated until we're done, when the 1062 * HC is allowed to fetch the old dummy (4.10.2). 1063 */ 1064 token = qtd->hw_token; 1065 qtd->hw_token = HALT_BIT(ehci); 1066 1067 dummy = qh->dummy; 1068 1069 dma = dummy->qtd_dma; 1070 *dummy = *qtd; 1071 dummy->qtd_dma = dma; 1072 1073 list_del (&qtd->qtd_list); 1074 list_add (&dummy->qtd_list, qtd_list); 1075 list_splice_tail(qtd_list, &qh->qtd_list); 1076 1077 ehci_qtd_init(ehci, qtd, qtd->qtd_dma); 1078 qh->dummy = qtd; 1079 1080 /* hc must see the new dummy at list end */ 1081 dma = qtd->qtd_dma; 1082 qtd = list_entry (qh->qtd_list.prev, 1083 struct ehci_qtd, qtd_list); 1084 qtd->hw_next = QTD_NEXT(ehci, dma); 1085 1086 /* let the hc process these next qtds */ 1087 wmb (); 1088 dummy->hw_token = token; 1089 1090 urb->hcpriv = qh; 1091 } 1092 } 1093 return qh; 1094 } 1095 1096 /*-------------------------------------------------------------------------*/ 1097 1098 static int 1099 submit_async ( 1100 struct ehci_hcd *ehci, 1101 struct urb *urb, 1102 struct list_head *qtd_list, 1103 gfp_t mem_flags 1104 ) { 1105 int epnum; 1106 unsigned long flags; 1107 struct ehci_qh *qh = NULL; 1108 int rc; 1109 1110 epnum = urb->ep->desc.bEndpointAddress; 1111 1112 #ifdef EHCI_URB_TRACE 1113 { 1114 struct ehci_qtd *qtd; 1115 qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list); 1116 ehci_dbg(ehci, 1117 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n", 1118 __func__, urb->dev->devpath, urb, 1119 epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out", 1120 urb->transfer_buffer_length, 1121 qtd, urb->ep->hcpriv); 1122 } 1123 #endif 1124 1125 spin_lock_irqsave (&ehci->lock, flags); 1126 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { 1127 rc = -ESHUTDOWN; 1128 goto done; 1129 } 1130 rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); 1131 if (unlikely(rc)) 1132 goto done; 1133 1134 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv); 1135 if (unlikely(qh == NULL)) { 1136 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); 1137 rc = -ENOMEM; 1138 goto done; 1139 } 1140 1141 /* Control/bulk operations through TTs don't need scheduling, 1142 * the HC and TT handle it when the TT has a buffer ready. 1143 */ 1144 if (likely (qh->qh_state == QH_STATE_IDLE)) 1145 qh_link_async(ehci, qh); 1146 done: 1147 spin_unlock_irqrestore (&ehci->lock, flags); 1148 if (unlikely (qh == NULL)) 1149 qtd_list_free (ehci, urb, qtd_list); 1150 return rc; 1151 } 1152 1153 /*-------------------------------------------------------------------------*/ 1154 #ifdef CONFIG_USB_HCD_TEST_MODE 1155 /* 1156 * This function creates the qtds and submits them for the 1157 * SINGLE_STEP_SET_FEATURE Test. 1158 * This is done in two parts: first SETUP req for GetDesc is sent then 1159 * 15 seconds later, the IN stage for GetDesc starts to req data from dev 1160 * 1161 * is_setup : i/p argument decides which of the two stage needs to be 1162 * performed; TRUE - SETUP and FALSE - IN+STATUS 1163 * Returns 0 if success 1164 */ 1165 static int ehci_submit_single_step_set_feature( 1166 struct usb_hcd *hcd, 1167 struct urb *urb, 1168 int is_setup 1169 ) { 1170 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 1171 struct list_head qtd_list; 1172 struct list_head *head; 1173 1174 struct ehci_qtd *qtd, *qtd_prev; 1175 dma_addr_t buf; 1176 int len, maxpacket; 1177 u32 token; 1178 1179 INIT_LIST_HEAD(&qtd_list); 1180 head = &qtd_list; 1181 1182 /* URBs map to sequences of QTDs: one logical transaction */ 1183 qtd = ehci_qtd_alloc(ehci, GFP_KERNEL); 1184 if (unlikely(!qtd)) 1185 return -1; 1186 list_add_tail(&qtd->qtd_list, head); 1187 qtd->urb = urb; 1188 1189 token = QTD_STS_ACTIVE; 1190 token |= (EHCI_TUNE_CERR << 10); 1191 1192 len = urb->transfer_buffer_length; 1193 /* 1194 * Check if the request is to perform just the SETUP stage (getDesc) 1195 * as in SINGLE_STEP_SET_FEATURE test, DATA stage (IN) happens 1196 * 15 secs after the setup 1197 */ 1198 if (is_setup) { 1199 /* SETUP pid, and interrupt after SETUP completion */ 1200 qtd_fill(ehci, qtd, urb->setup_dma, 1201 sizeof(struct usb_ctrlrequest), 1202 QTD_IOC | token | (PID_CODE_SETUP << 8), 8); 1203 1204 submit_async(ehci, urb, &qtd_list, GFP_ATOMIC); 1205 return 0; /*Return now; we shall come back after 15 seconds*/ 1206 } 1207 1208 /* 1209 * IN: data transfer stage: buffer setup : start the IN txn phase for 1210 * the get_Desc SETUP which was sent 15seconds back 1211 */ 1212 token ^= QTD_TOGGLE; /*We need to start IN with DATA-1 Pid-sequence*/ 1213 buf = urb->transfer_dma; 1214 1215 token |= (PID_CODE_IN << 8); /*This is IN stage*/ 1216 1217 maxpacket = usb_endpoint_maxp(&urb->ep->desc); 1218 1219 qtd_fill(ehci, qtd, buf, len, token, maxpacket); 1220 1221 /* 1222 * Our IN phase shall always be a short read; so keep the queue running 1223 * and let it advance to the next qtd which zero length OUT status 1224 */ 1225 qtd->hw_alt_next = EHCI_LIST_END(ehci); 1226 1227 /* STATUS stage for GetDesc control request */ 1228 token ^= (PID_CODE_IN << 8); /* "in" <--> "out" */ 1229 token |= QTD_TOGGLE; /* force DATA1 */ 1230 1231 qtd_prev = qtd; 1232 qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC); 1233 if (unlikely(!qtd)) 1234 goto cleanup; 1235 qtd->urb = urb; 1236 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); 1237 list_add_tail(&qtd->qtd_list, head); 1238 1239 /* Interrupt after STATUS completion */ 1240 qtd_fill(ehci, qtd, 0, 0, token | QTD_IOC, 0); 1241 1242 submit_async(ehci, urb, &qtd_list, GFP_KERNEL); 1243 1244 return 0; 1245 1246 cleanup: 1247 qtd_list_free(ehci, urb, head); 1248 return -1; 1249 } 1250 #endif /* CONFIG_USB_HCD_TEST_MODE */ 1251 1252 /*-------------------------------------------------------------------------*/ 1253 1254 static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh) 1255 { 1256 struct ehci_qh *prev; 1257 1258 /* Add to the end of the list of QHs waiting for the next IAAD */ 1259 qh->qh_state = QH_STATE_UNLINK_WAIT; 1260 list_add_tail(&qh->unlink_node, &ehci->async_unlink); 1261 1262 /* Unlink it from the schedule */ 1263 prev = ehci->async; 1264 while (prev->qh_next.qh != qh) 1265 prev = prev->qh_next.qh; 1266 1267 prev->hw->hw_next = qh->hw->hw_next; 1268 prev->qh_next = qh->qh_next; 1269 if (ehci->qh_scan_next == qh) 1270 ehci->qh_scan_next = qh->qh_next.qh; 1271 } 1272 1273 static void start_iaa_cycle(struct ehci_hcd *ehci) 1274 { 1275 /* If the controller isn't running, we don't have to wait for it */ 1276 if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) { 1277 end_unlink_async(ehci); 1278 1279 /* Otherwise start a new IAA cycle if one isn't already running */ 1280 } else if (ehci->rh_state == EHCI_RH_RUNNING && 1281 !ehci->iaa_in_progress) { 1282 1283 /* Make sure the unlinks are all visible to the hardware */ 1284 wmb(); 1285 1286 ehci_writel(ehci, ehci->command | CMD_IAAD, 1287 &ehci->regs->command); 1288 ehci_readl(ehci, &ehci->regs->command); 1289 ehci->iaa_in_progress = true; 1290 ehci_enable_event(ehci, EHCI_HRTIMER_IAA_WATCHDOG, true); 1291 } 1292 } 1293 1294 static void end_iaa_cycle(struct ehci_hcd *ehci) 1295 { 1296 if (ehci->has_synopsys_hc_bug) 1297 ehci_writel(ehci, (u32) ehci->async->qh_dma, 1298 &ehci->regs->async_next); 1299 1300 /* The current IAA cycle has ended */ 1301 ehci->iaa_in_progress = false; 1302 1303 end_unlink_async(ehci); 1304 } 1305 1306 /* See if the async qh for the qtds being unlinked are now gone from the HC */ 1307 1308 static void end_unlink_async(struct ehci_hcd *ehci) 1309 { 1310 struct ehci_qh *qh; 1311 bool early_exit; 1312 1313 if (list_empty(&ehci->async_unlink)) 1314 return; 1315 qh = list_first_entry(&ehci->async_unlink, struct ehci_qh, 1316 unlink_node); /* QH whose IAA cycle just ended */ 1317 1318 /* 1319 * If async_unlinking is set then this routine is already running, 1320 * either on the stack or on another CPU. 1321 */ 1322 early_exit = ehci->async_unlinking; 1323 1324 /* If the controller isn't running, process all the waiting QHs */ 1325 if (ehci->rh_state < EHCI_RH_RUNNING) 1326 list_splice_tail_init(&ehci->async_unlink, &ehci->async_idle); 1327 1328 /* 1329 * Intel (?) bug: The HC can write back the overlay region even 1330 * after the IAA interrupt occurs. In self-defense, always go 1331 * through two IAA cycles for each QH. 1332 */ 1333 else if (qh->qh_state == QH_STATE_UNLINK) { 1334 /* 1335 * Second IAA cycle has finished. Process only the first 1336 * waiting QH (NVIDIA (?) bug). 1337 */ 1338 list_move_tail(&qh->unlink_node, &ehci->async_idle); 1339 } 1340 1341 /* 1342 * AMD/ATI (?) bug: The HC can continue to use an active QH long 1343 * after the IAA interrupt occurs. To prevent problems, QHs that 1344 * may still be active will wait until 2 ms have passed with no 1345 * change to the hw_current and hw_token fields (this delay occurs 1346 * between the two IAA cycles). 1347 * 1348 * The EHCI spec (4.8.2) says that active QHs must not be removed 1349 * from the async schedule and recommends waiting until the QH 1350 * goes inactive. This is ridiculous because the QH will _never_ 1351 * become inactive if the endpoint NAKs indefinitely. 1352 */ 1353 1354 /* Some reasons for unlinking guarantee the QH can't be active */ 1355 else if (qh->unlink_reason & (QH_UNLINK_HALTED | 1356 QH_UNLINK_SHORT_READ | QH_UNLINK_DUMMY_OVERLAY)) 1357 goto DelayDone; 1358 1359 /* The QH can't be active if the queue was and still is empty... */ 1360 else if ((qh->unlink_reason & QH_UNLINK_QUEUE_EMPTY) && 1361 list_empty(&qh->qtd_list)) 1362 goto DelayDone; 1363 1364 /* ... or if the QH has halted */ 1365 else if (qh->hw->hw_token & cpu_to_hc32(ehci, QTD_STS_HALT)) 1366 goto DelayDone; 1367 1368 /* Otherwise we have to wait until the QH stops changing */ 1369 else { 1370 __hc32 qh_current, qh_token; 1371 1372 qh_current = qh->hw->hw_current; 1373 qh_token = qh->hw->hw_token; 1374 if (qh_current != ehci->old_current || 1375 qh_token != ehci->old_token) { 1376 ehci->old_current = qh_current; 1377 ehci->old_token = qh_token; 1378 ehci_enable_event(ehci, 1379 EHCI_HRTIMER_ACTIVE_UNLINK, true); 1380 return; 1381 } 1382 DelayDone: 1383 qh->qh_state = QH_STATE_UNLINK; 1384 early_exit = true; 1385 } 1386 ehci->old_current = ~0; /* Prepare for next QH */ 1387 1388 /* Start a new IAA cycle if any QHs are waiting for it */ 1389 if (!list_empty(&ehci->async_unlink)) 1390 start_iaa_cycle(ehci); 1391 1392 /* 1393 * Don't allow nesting or concurrent calls, 1394 * or wait for the second IAA cycle for the next QH. 1395 */ 1396 if (early_exit) 1397 return; 1398 1399 /* Process the idle QHs */ 1400 ehci->async_unlinking = true; 1401 while (!list_empty(&ehci->async_idle)) { 1402 qh = list_first_entry(&ehci->async_idle, struct ehci_qh, 1403 unlink_node); 1404 list_del(&qh->unlink_node); 1405 1406 qh->qh_state = QH_STATE_IDLE; 1407 qh->qh_next.qh = NULL; 1408 1409 if (!list_empty(&qh->qtd_list)) 1410 qh_completions(ehci, qh); 1411 if (!list_empty(&qh->qtd_list) && 1412 ehci->rh_state == EHCI_RH_RUNNING) 1413 qh_link_async(ehci, qh); 1414 disable_async(ehci); 1415 } 1416 ehci->async_unlinking = false; 1417 } 1418 1419 static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh); 1420 1421 static void unlink_empty_async(struct ehci_hcd *ehci) 1422 { 1423 struct ehci_qh *qh; 1424 struct ehci_qh *qh_to_unlink = NULL; 1425 int count = 0; 1426 1427 /* Find the last async QH which has been empty for a timer cycle */ 1428 for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) { 1429 if (list_empty(&qh->qtd_list) && 1430 qh->qh_state == QH_STATE_LINKED) { 1431 ++count; 1432 if (qh->unlink_cycle != ehci->async_unlink_cycle) 1433 qh_to_unlink = qh; 1434 } 1435 } 1436 1437 /* If nothing else is being unlinked, unlink the last empty QH */ 1438 if (list_empty(&ehci->async_unlink) && qh_to_unlink) { 1439 qh_to_unlink->unlink_reason |= QH_UNLINK_QUEUE_EMPTY; 1440 start_unlink_async(ehci, qh_to_unlink); 1441 --count; 1442 } 1443 1444 /* Other QHs will be handled later */ 1445 if (count > 0) { 1446 ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true); 1447 ++ehci->async_unlink_cycle; 1448 } 1449 } 1450 1451 #ifdef CONFIG_PM 1452 1453 /* The root hub is suspended; unlink all the async QHs */ 1454 static void unlink_empty_async_suspended(struct ehci_hcd *ehci) 1455 { 1456 struct ehci_qh *qh; 1457 1458 while (ehci->async->qh_next.qh) { 1459 qh = ehci->async->qh_next.qh; 1460 WARN_ON(!list_empty(&qh->qtd_list)); 1461 single_unlink_async(ehci, qh); 1462 } 1463 } 1464 1465 #endif 1466 1467 /* makes sure the async qh will become idle */ 1468 /* caller must own ehci->lock */ 1469 1470 static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh) 1471 { 1472 /* If the QH isn't linked then there's nothing we can do. */ 1473 if (qh->qh_state != QH_STATE_LINKED) 1474 return; 1475 1476 single_unlink_async(ehci, qh); 1477 start_iaa_cycle(ehci); 1478 } 1479 1480 /*-------------------------------------------------------------------------*/ 1481 1482 static void scan_async (struct ehci_hcd *ehci) 1483 { 1484 struct ehci_qh *qh; 1485 bool check_unlinks_later = false; 1486 1487 ehci->qh_scan_next = ehci->async->qh_next.qh; 1488 while (ehci->qh_scan_next) { 1489 qh = ehci->qh_scan_next; 1490 ehci->qh_scan_next = qh->qh_next.qh; 1491 1492 /* clean any finished work for this qh */ 1493 if (!list_empty(&qh->qtd_list)) { 1494 int temp; 1495 1496 /* 1497 * Unlinks could happen here; completion reporting 1498 * drops the lock. That's why ehci->qh_scan_next 1499 * always holds the next qh to scan; if the next qh 1500 * gets unlinked then ehci->qh_scan_next is adjusted 1501 * in single_unlink_async(). 1502 */ 1503 temp = qh_completions(ehci, qh); 1504 if (unlikely(temp)) { 1505 start_unlink_async(ehci, qh); 1506 } else if (list_empty(&qh->qtd_list) 1507 && qh->qh_state == QH_STATE_LINKED) { 1508 qh->unlink_cycle = ehci->async_unlink_cycle; 1509 check_unlinks_later = true; 1510 } 1511 } 1512 } 1513 1514 /* 1515 * Unlink empty entries, reducing DMA usage as well 1516 * as HCD schedule-scanning costs. Delay for any qh 1517 * we just scanned, there's a not-unusual case that it 1518 * doesn't stay idle for long. 1519 */ 1520 if (check_unlinks_later && ehci->rh_state == EHCI_RH_RUNNING && 1521 !(ehci->enabled_hrtimer_events & 1522 BIT(EHCI_HRTIMER_ASYNC_UNLINKS))) { 1523 ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true); 1524 ++ehci->async_unlink_cycle; 1525 } 1526 } 1527