1 /* 2 * Copyright (c) 2001-2004 by David Brownell 3 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License as published by the 7 * Free Software Foundation; either version 2 of the License, or (at your 8 * option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software Foundation, 17 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 */ 19 20 /* this file is part of ehci-hcd.c */ 21 22 /*-------------------------------------------------------------------------*/ 23 24 /* 25 * EHCI scheduled transaction support: interrupt, iso, split iso 26 * These are called "periodic" transactions in the EHCI spec. 27 * 28 * Note that for interrupt transfers, the QH/QTD manipulation is shared 29 * with the "asynchronous" transaction support (control/bulk transfers). 30 * The only real difference is in how interrupt transfers are scheduled. 31 * 32 * For ISO, we make an "iso_stream" head to serve the same role as a QH. 33 * It keeps track of every ITD (or SITD) that's linked, and holds enough 34 * pre-calculated schedule data to make appending to the queue be quick. 35 */ 36 37 static int ehci_get_frame (struct usb_hcd *hcd); 38 39 /*-------------------------------------------------------------------------*/ 40 41 /* 42 * periodic_next_shadow - return "next" pointer on shadow list 43 * @periodic: host pointer to qh/itd/sitd 44 * @tag: hardware tag for type of this record 45 */ 46 static union ehci_shadow * 47 periodic_next_shadow (union ehci_shadow *periodic, __le32 tag) 48 { 49 switch (tag) { 50 case Q_TYPE_QH: 51 return &periodic->qh->qh_next; 52 case Q_TYPE_FSTN: 53 return &periodic->fstn->fstn_next; 54 case Q_TYPE_ITD: 55 return &periodic->itd->itd_next; 56 // case Q_TYPE_SITD: 57 default: 58 return &periodic->sitd->sitd_next; 59 } 60 } 61 62 /* caller must hold ehci->lock */ 63 static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) 64 { 65 union ehci_shadow *prev_p = &ehci->pshadow [frame]; 66 __le32 *hw_p = &ehci->periodic [frame]; 67 union ehci_shadow here = *prev_p; 68 69 /* find predecessor of "ptr"; hw and shadow lists are in sync */ 70 while (here.ptr && here.ptr != ptr) { 71 prev_p = periodic_next_shadow (prev_p, Q_NEXT_TYPE (*hw_p)); 72 hw_p = here.hw_next; 73 here = *prev_p; 74 } 75 /* an interrupt entry (at list end) could have been shared */ 76 if (!here.ptr) 77 return; 78 79 /* update shadow and hardware lists ... the old "next" pointers 80 * from ptr may still be in use, the caller updates them. 81 */ 82 *prev_p = *periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p)); 83 *hw_p = *here.hw_next; 84 } 85 86 /* how many of the uframe's 125 usecs are allocated? */ 87 static unsigned short 88 periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe) 89 { 90 __le32 *hw_p = &ehci->periodic [frame]; 91 union ehci_shadow *q = &ehci->pshadow [frame]; 92 unsigned usecs = 0; 93 94 while (q->ptr) { 95 switch (Q_NEXT_TYPE (*hw_p)) { 96 case Q_TYPE_QH: 97 /* is it in the S-mask? */ 98 if (q->qh->hw_info2 & cpu_to_le32 (1 << uframe)) 99 usecs += q->qh->usecs; 100 /* ... or C-mask? */ 101 if (q->qh->hw_info2 & cpu_to_le32 (1 << (8 + uframe))) 102 usecs += q->qh->c_usecs; 103 hw_p = &q->qh->hw_next; 104 q = &q->qh->qh_next; 105 break; 106 // case Q_TYPE_FSTN: 107 default: 108 /* for "save place" FSTNs, count the relevant INTR 109 * bandwidth from the previous frame 110 */ 111 if (q->fstn->hw_prev != EHCI_LIST_END) { 112 ehci_dbg (ehci, "ignoring FSTN cost ...\n"); 113 } 114 hw_p = &q->fstn->hw_next; 115 q = &q->fstn->fstn_next; 116 break; 117 case Q_TYPE_ITD: 118 usecs += q->itd->usecs [uframe]; 119 hw_p = &q->itd->hw_next; 120 q = &q->itd->itd_next; 121 break; 122 case Q_TYPE_SITD: 123 /* is it in the S-mask? (count SPLIT, DATA) */ 124 if (q->sitd->hw_uframe & cpu_to_le32 (1 << uframe)) { 125 if (q->sitd->hw_fullspeed_ep & 126 __constant_cpu_to_le32 (1<<31)) 127 usecs += q->sitd->stream->usecs; 128 else /* worst case for OUT start-split */ 129 usecs += HS_USECS_ISO (188); 130 } 131 132 /* ... C-mask? (count CSPLIT, DATA) */ 133 if (q->sitd->hw_uframe & 134 cpu_to_le32 (1 << (8 + uframe))) { 135 /* worst case for IN complete-split */ 136 usecs += q->sitd->stream->c_usecs; 137 } 138 139 hw_p = &q->sitd->hw_next; 140 q = &q->sitd->sitd_next; 141 break; 142 } 143 } 144 #ifdef DEBUG 145 if (usecs > 100) 146 ehci_err (ehci, "uframe %d sched overrun: %d usecs\n", 147 frame * 8 + uframe, usecs); 148 #endif 149 return usecs; 150 } 151 152 /*-------------------------------------------------------------------------*/ 153 154 static int same_tt (struct usb_device *dev1, struct usb_device *dev2) 155 { 156 if (!dev1->tt || !dev2->tt) 157 return 0; 158 if (dev1->tt != dev2->tt) 159 return 0; 160 if (dev1->tt->multi) 161 return dev1->ttport == dev2->ttport; 162 else 163 return 1; 164 } 165 166 #ifdef CONFIG_USB_EHCI_TT_NEWSCHED 167 168 /* Which uframe does the low/fullspeed transfer start in? 169 * 170 * The parameter is the mask of ssplits in "H-frame" terms 171 * and this returns the transfer start uframe in "B-frame" terms, 172 * which allows both to match, e.g. a ssplit in "H-frame" uframe 0 173 * will cause a transfer in "B-frame" uframe 0. "B-frames" lag 174 * "H-frames" by 1 uframe. See the EHCI spec sec 4.5 and figure 4.7. 175 */ 176 static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __le32 mask) 177 { 178 unsigned char smask = QH_SMASK & le32_to_cpu(mask); 179 if (!smask) { 180 ehci_err(ehci, "invalid empty smask!\n"); 181 /* uframe 7 can't have bw so this will indicate failure */ 182 return 7; 183 } 184 return ffs(smask) - 1; 185 } 186 187 static const unsigned char 188 max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 }; 189 190 /* carryover low/fullspeed bandwidth that crosses uframe boundries */ 191 static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8]) 192 { 193 int i; 194 for (i=0; i<7; i++) { 195 if (max_tt_usecs[i] < tt_usecs[i]) { 196 tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i]; 197 tt_usecs[i] = max_tt_usecs[i]; 198 } 199 } 200 } 201 202 /* How many of the tt's periodic downstream 1000 usecs are allocated? 203 * 204 * While this measures the bandwidth in terms of usecs/uframe, 205 * the low/fullspeed bus has no notion of uframes, so any particular 206 * low/fullspeed transfer can "carry over" from one uframe to the next, 207 * since the TT just performs downstream transfers in sequence. 208 * 209 * For example two seperate 100 usec transfers can start in the same uframe, 210 * and the second one would "carry over" 75 usecs into the next uframe. 211 */ 212 static void 213 periodic_tt_usecs ( 214 struct ehci_hcd *ehci, 215 struct usb_device *dev, 216 unsigned frame, 217 unsigned short tt_usecs[8] 218 ) 219 { 220 __le32 *hw_p = &ehci->periodic [frame]; 221 union ehci_shadow *q = &ehci->pshadow [frame]; 222 unsigned char uf; 223 224 memset(tt_usecs, 0, 16); 225 226 while (q->ptr) { 227 switch (Q_NEXT_TYPE(*hw_p)) { 228 case Q_TYPE_ITD: 229 hw_p = &q->itd->hw_next; 230 q = &q->itd->itd_next; 231 continue; 232 case Q_TYPE_QH: 233 if (same_tt(dev, q->qh->dev)) { 234 uf = tt_start_uframe(ehci, q->qh->hw_info2); 235 tt_usecs[uf] += q->qh->tt_usecs; 236 } 237 hw_p = &q->qh->hw_next; 238 q = &q->qh->qh_next; 239 continue; 240 case Q_TYPE_SITD: 241 if (same_tt(dev, q->sitd->urb->dev)) { 242 uf = tt_start_uframe(ehci, q->sitd->hw_uframe); 243 tt_usecs[uf] += q->sitd->stream->tt_usecs; 244 } 245 hw_p = &q->sitd->hw_next; 246 q = &q->sitd->sitd_next; 247 continue; 248 // case Q_TYPE_FSTN: 249 default: 250 ehci_dbg(ehci, 251 "ignoring periodic frame %d FSTN\n", frame); 252 hw_p = &q->fstn->hw_next; 253 q = &q->fstn->fstn_next; 254 } 255 } 256 257 carryover_tt_bandwidth(tt_usecs); 258 259 if (max_tt_usecs[7] < tt_usecs[7]) 260 ehci_err(ehci, "frame %d tt sched overrun: %d usecs\n", 261 frame, tt_usecs[7] - max_tt_usecs[7]); 262 } 263 264 /* 265 * Return true if the device's tt's downstream bus is available for a 266 * periodic transfer of the specified length (usecs), starting at the 267 * specified frame/uframe. Note that (as summarized in section 11.19 268 * of the usb 2.0 spec) TTs can buffer multiple transactions for each 269 * uframe. 270 * 271 * The uframe parameter is when the fullspeed/lowspeed transfer 272 * should be executed in "B-frame" terms, which is the same as the 273 * highspeed ssplit's uframe (which is in "H-frame" terms). For example 274 * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0. 275 * See the EHCI spec sec 4.5 and fig 4.7. 276 * 277 * This checks if the full/lowspeed bus, at the specified starting uframe, 278 * has the specified bandwidth available, according to rules listed 279 * in USB 2.0 spec section 11.18.1 fig 11-60. 280 * 281 * This does not check if the transfer would exceed the max ssplit 282 * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4, 283 * since proper scheduling limits ssplits to less than 16 per uframe. 284 */ 285 static int tt_available ( 286 struct ehci_hcd *ehci, 287 unsigned period, 288 struct usb_device *dev, 289 unsigned frame, 290 unsigned uframe, 291 u16 usecs 292 ) 293 { 294 if ((period == 0) || (uframe >= 7)) /* error */ 295 return 0; 296 297 for (; frame < ehci->periodic_size; frame += period) { 298 unsigned short tt_usecs[8]; 299 300 periodic_tt_usecs (ehci, dev, frame, tt_usecs); 301 302 ehci_vdbg(ehci, "tt frame %d check %d usecs start uframe %d in" 303 " schedule %d/%d/%d/%d/%d/%d/%d/%d\n", 304 frame, usecs, uframe, 305 tt_usecs[0], tt_usecs[1], tt_usecs[2], tt_usecs[3], 306 tt_usecs[4], tt_usecs[5], tt_usecs[6], tt_usecs[7]); 307 308 if (max_tt_usecs[uframe] <= tt_usecs[uframe]) { 309 ehci_vdbg(ehci, "frame %d uframe %d fully scheduled\n", 310 frame, uframe); 311 return 0; 312 } 313 314 /* special case for isoc transfers larger than 125us: 315 * the first and each subsequent fully used uframe 316 * must be empty, so as to not illegally delay 317 * already scheduled transactions 318 */ 319 if (125 < usecs) { 320 int ufs = (usecs / 125) - 1; 321 int i; 322 for (i = uframe; i < (uframe + ufs) && i < 8; i++) 323 if (0 < tt_usecs[i]) { 324 ehci_vdbg(ehci, 325 "multi-uframe xfer can't fit " 326 "in frame %d uframe %d\n", 327 frame, i); 328 return 0; 329 } 330 } 331 332 tt_usecs[uframe] += usecs; 333 334 carryover_tt_bandwidth(tt_usecs); 335 336 /* fail if the carryover pushed bw past the last uframe's limit */ 337 if (max_tt_usecs[7] < tt_usecs[7]) { 338 ehci_vdbg(ehci, 339 "tt unavailable usecs %d frame %d uframe %d\n", 340 usecs, frame, uframe); 341 return 0; 342 } 343 } 344 345 return 1; 346 } 347 348 #else 349 350 /* return true iff the device's transaction translator is available 351 * for a periodic transfer starting at the specified frame, using 352 * all the uframes in the mask. 353 */ 354 static int tt_no_collision ( 355 struct ehci_hcd *ehci, 356 unsigned period, 357 struct usb_device *dev, 358 unsigned frame, 359 u32 uf_mask 360 ) 361 { 362 if (period == 0) /* error */ 363 return 0; 364 365 /* note bandwidth wastage: split never follows csplit 366 * (different dev or endpoint) until the next uframe. 367 * calling convention doesn't make that distinction. 368 */ 369 for (; frame < ehci->periodic_size; frame += period) { 370 union ehci_shadow here; 371 __le32 type; 372 373 here = ehci->pshadow [frame]; 374 type = Q_NEXT_TYPE (ehci->periodic [frame]); 375 while (here.ptr) { 376 switch (type) { 377 case Q_TYPE_ITD: 378 type = Q_NEXT_TYPE (here.itd->hw_next); 379 here = here.itd->itd_next; 380 continue; 381 case Q_TYPE_QH: 382 if (same_tt (dev, here.qh->dev)) { 383 u32 mask; 384 385 mask = le32_to_cpu (here.qh->hw_info2); 386 /* "knows" no gap is needed */ 387 mask |= mask >> 8; 388 if (mask & uf_mask) 389 break; 390 } 391 type = Q_NEXT_TYPE (here.qh->hw_next); 392 here = here.qh->qh_next; 393 continue; 394 case Q_TYPE_SITD: 395 if (same_tt (dev, here.sitd->urb->dev)) { 396 u16 mask; 397 398 mask = le32_to_cpu (here.sitd 399 ->hw_uframe); 400 /* FIXME assumes no gap for IN! */ 401 mask |= mask >> 8; 402 if (mask & uf_mask) 403 break; 404 } 405 type = Q_NEXT_TYPE (here.sitd->hw_next); 406 here = here.sitd->sitd_next; 407 continue; 408 // case Q_TYPE_FSTN: 409 default: 410 ehci_dbg (ehci, 411 "periodic frame %d bogus type %d\n", 412 frame, type); 413 } 414 415 /* collision or error */ 416 return 0; 417 } 418 } 419 420 /* no collision */ 421 return 1; 422 } 423 424 #endif /* CONFIG_USB_EHCI_TT_NEWSCHED */ 425 426 /*-------------------------------------------------------------------------*/ 427 428 static int enable_periodic (struct ehci_hcd *ehci) 429 { 430 u32 cmd; 431 int status; 432 433 /* did clearing PSE did take effect yet? 434 * takes effect only at frame boundaries... 435 */ 436 status = handshake (&ehci->regs->status, STS_PSS, 0, 9 * 125); 437 if (status != 0) { 438 ehci_to_hcd(ehci)->state = HC_STATE_HALT; 439 return status; 440 } 441 442 cmd = readl (&ehci->regs->command) | CMD_PSE; 443 writel (cmd, &ehci->regs->command); 444 /* posted write ... PSS happens later */ 445 ehci_to_hcd(ehci)->state = HC_STATE_RUNNING; 446 447 /* make sure ehci_work scans these */ 448 ehci->next_uframe = readl (&ehci->regs->frame_index) 449 % (ehci->periodic_size << 3); 450 return 0; 451 } 452 453 static int disable_periodic (struct ehci_hcd *ehci) 454 { 455 u32 cmd; 456 int status; 457 458 /* did setting PSE not take effect yet? 459 * takes effect only at frame boundaries... 460 */ 461 status = handshake (&ehci->regs->status, STS_PSS, STS_PSS, 9 * 125); 462 if (status != 0) { 463 ehci_to_hcd(ehci)->state = HC_STATE_HALT; 464 return status; 465 } 466 467 cmd = readl (&ehci->regs->command) & ~CMD_PSE; 468 writel (cmd, &ehci->regs->command); 469 /* posted write ... */ 470 471 ehci->next_uframe = -1; 472 return 0; 473 } 474 475 /*-------------------------------------------------------------------------*/ 476 477 /* periodic schedule slots have iso tds (normal or split) first, then a 478 * sparse tree for active interrupt transfers. 479 * 480 * this just links in a qh; caller guarantees uframe masks are set right. 481 * no FSTN support (yet; ehci 0.96+) 482 */ 483 static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) 484 { 485 unsigned i; 486 unsigned period = qh->period; 487 488 dev_dbg (&qh->dev->dev, 489 "link qh%d-%04x/%p start %d [%d/%d us]\n", 490 period, le32_to_cpup (&qh->hw_info2) & (QH_CMASK | QH_SMASK), 491 qh, qh->start, qh->usecs, qh->c_usecs); 492 493 /* high bandwidth, or otherwise every microframe */ 494 if (period == 0) 495 period = 1; 496 497 for (i = qh->start; i < ehci->periodic_size; i += period) { 498 union ehci_shadow *prev = &ehci->pshadow [i]; 499 __le32 *hw_p = &ehci->periodic [i]; 500 union ehci_shadow here = *prev; 501 __le32 type = 0; 502 503 /* skip the iso nodes at list head */ 504 while (here.ptr) { 505 type = Q_NEXT_TYPE (*hw_p); 506 if (type == Q_TYPE_QH) 507 break; 508 prev = periodic_next_shadow (prev, type); 509 hw_p = &here.qh->hw_next; 510 here = *prev; 511 } 512 513 /* sorting each branch by period (slow-->fast) 514 * enables sharing interior tree nodes 515 */ 516 while (here.ptr && qh != here.qh) { 517 if (qh->period > here.qh->period) 518 break; 519 prev = &here.qh->qh_next; 520 hw_p = &here.qh->hw_next; 521 here = *prev; 522 } 523 /* link in this qh, unless some earlier pass did that */ 524 if (qh != here.qh) { 525 qh->qh_next = here; 526 if (here.qh) 527 qh->hw_next = *hw_p; 528 wmb (); 529 prev->qh = qh; 530 *hw_p = QH_NEXT (qh->qh_dma); 531 } 532 } 533 qh->qh_state = QH_STATE_LINKED; 534 qh_get (qh); 535 536 /* update per-qh bandwidth for usbfs */ 537 ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->period 538 ? ((qh->usecs + qh->c_usecs) / qh->period) 539 : (qh->usecs * 8); 540 541 /* maybe enable periodic schedule processing */ 542 if (!ehci->periodic_sched++) 543 return enable_periodic (ehci); 544 545 return 0; 546 } 547 548 static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) 549 { 550 unsigned i; 551 unsigned period; 552 553 // FIXME: 554 // IF this isn't high speed 555 // and this qh is active in the current uframe 556 // (and overlay token SplitXstate is false?) 557 // THEN 558 // qh->hw_info1 |= __constant_cpu_to_le32 (1 << 7 /* "ignore" */); 559 560 /* high bandwidth, or otherwise part of every microframe */ 561 if ((period = qh->period) == 0) 562 period = 1; 563 564 for (i = qh->start; i < ehci->periodic_size; i += period) 565 periodic_unlink (ehci, i, qh); 566 567 /* update per-qh bandwidth for usbfs */ 568 ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->period 569 ? ((qh->usecs + qh->c_usecs) / qh->period) 570 : (qh->usecs * 8); 571 572 dev_dbg (&qh->dev->dev, 573 "unlink qh%d-%04x/%p start %d [%d/%d us]\n", 574 qh->period, 575 le32_to_cpup (&qh->hw_info2) & (QH_CMASK | QH_SMASK), 576 qh, qh->start, qh->usecs, qh->c_usecs); 577 578 /* qh->qh_next still "live" to HC */ 579 qh->qh_state = QH_STATE_UNLINK; 580 qh->qh_next.ptr = NULL; 581 qh_put (qh); 582 583 /* maybe turn off periodic schedule */ 584 ehci->periodic_sched--; 585 if (!ehci->periodic_sched) 586 (void) disable_periodic (ehci); 587 } 588 589 static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh) 590 { 591 unsigned wait; 592 593 qh_unlink_periodic (ehci, qh); 594 595 /* simple/paranoid: always delay, expecting the HC needs to read 596 * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and 597 * expect khubd to clean up after any CSPLITs we won't issue. 598 * active high speed queues may need bigger delays... 599 */ 600 if (list_empty (&qh->qtd_list) 601 || (__constant_cpu_to_le32 (QH_CMASK) 602 & qh->hw_info2) != 0) 603 wait = 2; 604 else 605 wait = 55; /* worst case: 3 * 1024 */ 606 607 udelay (wait); 608 qh->qh_state = QH_STATE_IDLE; 609 qh->hw_next = EHCI_LIST_END; 610 wmb (); 611 } 612 613 /*-------------------------------------------------------------------------*/ 614 615 static int check_period ( 616 struct ehci_hcd *ehci, 617 unsigned frame, 618 unsigned uframe, 619 unsigned period, 620 unsigned usecs 621 ) { 622 int claimed; 623 624 /* complete split running into next frame? 625 * given FSTN support, we could sometimes check... 626 */ 627 if (uframe >= 8) 628 return 0; 629 630 /* 631 * 80% periodic == 100 usec/uframe available 632 * convert "usecs we need" to "max already claimed" 633 */ 634 usecs = 100 - usecs; 635 636 /* we "know" 2 and 4 uframe intervals were rejected; so 637 * for period 0, check _every_ microframe in the schedule. 638 */ 639 if (unlikely (period == 0)) { 640 do { 641 for (uframe = 0; uframe < 7; uframe++) { 642 claimed = periodic_usecs (ehci, frame, uframe); 643 if (claimed > usecs) 644 return 0; 645 } 646 } while ((frame += 1) < ehci->periodic_size); 647 648 /* just check the specified uframe, at that period */ 649 } else { 650 do { 651 claimed = periodic_usecs (ehci, frame, uframe); 652 if (claimed > usecs) 653 return 0; 654 } while ((frame += period) < ehci->periodic_size); 655 } 656 657 // success! 658 return 1; 659 } 660 661 static int check_intr_schedule ( 662 struct ehci_hcd *ehci, 663 unsigned frame, 664 unsigned uframe, 665 const struct ehci_qh *qh, 666 __le32 *c_maskp 667 ) 668 { 669 int retval = -ENOSPC; 670 u8 mask = 0; 671 672 if (qh->c_usecs && uframe >= 6) /* FSTN territory? */ 673 goto done; 674 675 if (!check_period (ehci, frame, uframe, qh->period, qh->usecs)) 676 goto done; 677 if (!qh->c_usecs) { 678 retval = 0; 679 *c_maskp = 0; 680 goto done; 681 } 682 683 #ifdef CONFIG_USB_EHCI_TT_NEWSCHED 684 if (tt_available (ehci, qh->period, qh->dev, frame, uframe, 685 qh->tt_usecs)) { 686 unsigned i; 687 688 /* TODO : this may need FSTN for SSPLIT in uframe 5. */ 689 for (i=uframe+1; i<8 && i<uframe+4; i++) 690 if (!check_period (ehci, frame, i, 691 qh->period, qh->c_usecs)) 692 goto done; 693 else 694 mask |= 1 << i; 695 696 retval = 0; 697 698 *c_maskp = cpu_to_le32 (mask << 8); 699 } 700 #else 701 /* Make sure this tt's buffer is also available for CSPLITs. 702 * We pessimize a bit; probably the typical full speed case 703 * doesn't need the second CSPLIT. 704 * 705 * NOTE: both SPLIT and CSPLIT could be checked in just 706 * one smart pass... 707 */ 708 mask = 0x03 << (uframe + qh->gap_uf); 709 *c_maskp = cpu_to_le32 (mask << 8); 710 711 mask |= 1 << uframe; 712 if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) { 713 if (!check_period (ehci, frame, uframe + qh->gap_uf + 1, 714 qh->period, qh->c_usecs)) 715 goto done; 716 if (!check_period (ehci, frame, uframe + qh->gap_uf, 717 qh->period, qh->c_usecs)) 718 goto done; 719 retval = 0; 720 } 721 #endif 722 done: 723 return retval; 724 } 725 726 /* "first fit" scheduling policy used the first time through, 727 * or when the previous schedule slot can't be re-used. 728 */ 729 static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh) 730 { 731 int status; 732 unsigned uframe; 733 __le32 c_mask; 734 unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ 735 736 qh_refresh(ehci, qh); 737 qh->hw_next = EHCI_LIST_END; 738 frame = qh->start; 739 740 /* reuse the previous schedule slots, if we can */ 741 if (frame < qh->period) { 742 uframe = ffs (le32_to_cpup (&qh->hw_info2) & QH_SMASK); 743 status = check_intr_schedule (ehci, frame, --uframe, 744 qh, &c_mask); 745 } else { 746 uframe = 0; 747 c_mask = 0; 748 status = -ENOSPC; 749 } 750 751 /* else scan the schedule to find a group of slots such that all 752 * uframes have enough periodic bandwidth available. 753 */ 754 if (status) { 755 /* "normal" case, uframing flexible except with splits */ 756 if (qh->period) { 757 frame = qh->period - 1; 758 do { 759 for (uframe = 0; uframe < 8; uframe++) { 760 status = check_intr_schedule (ehci, 761 frame, uframe, qh, 762 &c_mask); 763 if (status == 0) 764 break; 765 } 766 } while (status && frame--); 767 768 /* qh->period == 0 means every uframe */ 769 } else { 770 frame = 0; 771 status = check_intr_schedule (ehci, 0, 0, qh, &c_mask); 772 } 773 if (status) 774 goto done; 775 qh->start = frame; 776 777 /* reset S-frame and (maybe) C-frame masks */ 778 qh->hw_info2 &= __constant_cpu_to_le32(~(QH_CMASK | QH_SMASK)); 779 qh->hw_info2 |= qh->period 780 ? cpu_to_le32 (1 << uframe) 781 : __constant_cpu_to_le32 (QH_SMASK); 782 qh->hw_info2 |= c_mask; 783 } else 784 ehci_dbg (ehci, "reused qh %p schedule\n", qh); 785 786 /* stuff into the periodic schedule */ 787 status = qh_link_periodic (ehci, qh); 788 done: 789 return status; 790 } 791 792 static int intr_submit ( 793 struct ehci_hcd *ehci, 794 struct usb_host_endpoint *ep, 795 struct urb *urb, 796 struct list_head *qtd_list, 797 gfp_t mem_flags 798 ) { 799 unsigned epnum; 800 unsigned long flags; 801 struct ehci_qh *qh; 802 int status = 0; 803 struct list_head empty; 804 805 /* get endpoint and transfer/schedule data */ 806 epnum = ep->desc.bEndpointAddress; 807 808 spin_lock_irqsave (&ehci->lock, flags); 809 810 if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, 811 &ehci_to_hcd(ehci)->flags))) { 812 status = -ESHUTDOWN; 813 goto done; 814 } 815 816 /* get qh and force any scheduling errors */ 817 INIT_LIST_HEAD (&empty); 818 qh = qh_append_tds (ehci, urb, &empty, epnum, &ep->hcpriv); 819 if (qh == NULL) { 820 status = -ENOMEM; 821 goto done; 822 } 823 if (qh->qh_state == QH_STATE_IDLE) { 824 if ((status = qh_schedule (ehci, qh)) != 0) 825 goto done; 826 } 827 828 /* then queue the urb's tds to the qh */ 829 qh = qh_append_tds (ehci, urb, qtd_list, epnum, &ep->hcpriv); 830 BUG_ON (qh == NULL); 831 832 /* ... update usbfs periodic stats */ 833 ehci_to_hcd(ehci)->self.bandwidth_int_reqs++; 834 835 done: 836 spin_unlock_irqrestore (&ehci->lock, flags); 837 if (status) 838 qtd_list_free (ehci, urb, qtd_list); 839 840 return status; 841 } 842 843 /*-------------------------------------------------------------------------*/ 844 845 /* ehci_iso_stream ops work with both ITD and SITD */ 846 847 static struct ehci_iso_stream * 848 iso_stream_alloc (gfp_t mem_flags) 849 { 850 struct ehci_iso_stream *stream; 851 852 stream = kzalloc(sizeof *stream, mem_flags); 853 if (likely (stream != NULL)) { 854 INIT_LIST_HEAD(&stream->td_list); 855 INIT_LIST_HEAD(&stream->free_list); 856 stream->next_uframe = -1; 857 stream->refcount = 1; 858 } 859 return stream; 860 } 861 862 static void 863 iso_stream_init ( 864 struct ehci_hcd *ehci, 865 struct ehci_iso_stream *stream, 866 struct usb_device *dev, 867 int pipe, 868 unsigned interval 869 ) 870 { 871 static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f }; 872 873 u32 buf1; 874 unsigned epnum, maxp; 875 int is_input; 876 long bandwidth; 877 878 /* 879 * this might be a "high bandwidth" highspeed endpoint, 880 * as encoded in the ep descriptor's wMaxPacket field 881 */ 882 epnum = usb_pipeendpoint (pipe); 883 is_input = usb_pipein (pipe) ? USB_DIR_IN : 0; 884 maxp = usb_maxpacket(dev, pipe, !is_input); 885 if (is_input) { 886 buf1 = (1 << 11); 887 } else { 888 buf1 = 0; 889 } 890 891 /* knows about ITD vs SITD */ 892 if (dev->speed == USB_SPEED_HIGH) { 893 unsigned multi = hb_mult(maxp); 894 895 stream->highspeed = 1; 896 897 maxp = max_packet(maxp); 898 buf1 |= maxp; 899 maxp *= multi; 900 901 stream->buf0 = cpu_to_le32 ((epnum << 8) | dev->devnum); 902 stream->buf1 = cpu_to_le32 (buf1); 903 stream->buf2 = cpu_to_le32 (multi); 904 905 /* usbfs wants to report the average usecs per frame tied up 906 * when transfers on this endpoint are scheduled ... 907 */ 908 stream->usecs = HS_USECS_ISO (maxp); 909 bandwidth = stream->usecs * 8; 910 bandwidth /= 1 << (interval - 1); 911 912 } else { 913 u32 addr; 914 int think_time; 915 int hs_transfers; 916 917 addr = dev->ttport << 24; 918 if (!ehci_is_TDI(ehci) 919 || (dev->tt->hub != 920 ehci_to_hcd(ehci)->self.root_hub)) 921 addr |= dev->tt->hub->devnum << 16; 922 addr |= epnum << 8; 923 addr |= dev->devnum; 924 stream->usecs = HS_USECS_ISO (maxp); 925 think_time = dev->tt ? dev->tt->think_time : 0; 926 stream->tt_usecs = NS_TO_US (think_time + usb_calc_bus_time ( 927 dev->speed, is_input, 1, maxp)); 928 hs_transfers = max (1u, (maxp + 187) / 188); 929 if (is_input) { 930 u32 tmp; 931 932 addr |= 1 << 31; 933 stream->c_usecs = stream->usecs; 934 stream->usecs = HS_USECS_ISO (1); 935 stream->raw_mask = 1; 936 937 /* c-mask as specified in USB 2.0 11.18.4 3.c */ 938 tmp = (1 << (hs_transfers + 2)) - 1; 939 stream->raw_mask |= tmp << (8 + 2); 940 } else 941 stream->raw_mask = smask_out [hs_transfers - 1]; 942 bandwidth = stream->usecs + stream->c_usecs; 943 bandwidth /= 1 << (interval + 2); 944 945 /* stream->splits gets created from raw_mask later */ 946 stream->address = cpu_to_le32 (addr); 947 } 948 stream->bandwidth = bandwidth; 949 950 stream->udev = dev; 951 952 stream->bEndpointAddress = is_input | epnum; 953 stream->interval = interval; 954 stream->maxp = maxp; 955 } 956 957 static void 958 iso_stream_put(struct ehci_hcd *ehci, struct ehci_iso_stream *stream) 959 { 960 stream->refcount--; 961 962 /* free whenever just a dev->ep reference remains. 963 * not like a QH -- no persistent state (toggle, halt) 964 */ 965 if (stream->refcount == 1) { 966 int is_in; 967 968 // BUG_ON (!list_empty(&stream->td_list)); 969 970 while (!list_empty (&stream->free_list)) { 971 struct list_head *entry; 972 973 entry = stream->free_list.next; 974 list_del (entry); 975 976 /* knows about ITD vs SITD */ 977 if (stream->highspeed) { 978 struct ehci_itd *itd; 979 980 itd = list_entry (entry, struct ehci_itd, 981 itd_list); 982 dma_pool_free (ehci->itd_pool, itd, 983 itd->itd_dma); 984 } else { 985 struct ehci_sitd *sitd; 986 987 sitd = list_entry (entry, struct ehci_sitd, 988 sitd_list); 989 dma_pool_free (ehci->sitd_pool, sitd, 990 sitd->sitd_dma); 991 } 992 } 993 994 is_in = (stream->bEndpointAddress & USB_DIR_IN) ? 0x10 : 0; 995 stream->bEndpointAddress &= 0x0f; 996 stream->ep->hcpriv = NULL; 997 998 if (stream->rescheduled) { 999 ehci_info (ehci, "ep%d%s-iso rescheduled " 1000 "%lu times in %lu seconds\n", 1001 stream->bEndpointAddress, is_in ? "in" : "out", 1002 stream->rescheduled, 1003 ((jiffies - stream->start)/HZ) 1004 ); 1005 } 1006 1007 kfree(stream); 1008 } 1009 } 1010 1011 static inline struct ehci_iso_stream * 1012 iso_stream_get (struct ehci_iso_stream *stream) 1013 { 1014 if (likely (stream != NULL)) 1015 stream->refcount++; 1016 return stream; 1017 } 1018 1019 static struct ehci_iso_stream * 1020 iso_stream_find (struct ehci_hcd *ehci, struct urb *urb) 1021 { 1022 unsigned epnum; 1023 struct ehci_iso_stream *stream; 1024 struct usb_host_endpoint *ep; 1025 unsigned long flags; 1026 1027 epnum = usb_pipeendpoint (urb->pipe); 1028 if (usb_pipein(urb->pipe)) 1029 ep = urb->dev->ep_in[epnum]; 1030 else 1031 ep = urb->dev->ep_out[epnum]; 1032 1033 spin_lock_irqsave (&ehci->lock, flags); 1034 stream = ep->hcpriv; 1035 1036 if (unlikely (stream == NULL)) { 1037 stream = iso_stream_alloc(GFP_ATOMIC); 1038 if (likely (stream != NULL)) { 1039 /* dev->ep owns the initial refcount */ 1040 ep->hcpriv = stream; 1041 stream->ep = ep; 1042 iso_stream_init(ehci, stream, urb->dev, urb->pipe, 1043 urb->interval); 1044 } 1045 1046 /* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */ 1047 } else if (unlikely (stream->hw_info1 != 0)) { 1048 ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n", 1049 urb->dev->devpath, epnum, 1050 usb_pipein(urb->pipe) ? "in" : "out"); 1051 stream = NULL; 1052 } 1053 1054 /* caller guarantees an eventual matching iso_stream_put */ 1055 stream = iso_stream_get (stream); 1056 1057 spin_unlock_irqrestore (&ehci->lock, flags); 1058 return stream; 1059 } 1060 1061 /*-------------------------------------------------------------------------*/ 1062 1063 /* ehci_iso_sched ops can be ITD-only or SITD-only */ 1064 1065 static struct ehci_iso_sched * 1066 iso_sched_alloc (unsigned packets, gfp_t mem_flags) 1067 { 1068 struct ehci_iso_sched *iso_sched; 1069 int size = sizeof *iso_sched; 1070 1071 size += packets * sizeof (struct ehci_iso_packet); 1072 iso_sched = kzalloc(size, mem_flags); 1073 if (likely (iso_sched != NULL)) { 1074 INIT_LIST_HEAD (&iso_sched->td_list); 1075 } 1076 return iso_sched; 1077 } 1078 1079 static inline void 1080 itd_sched_init ( 1081 struct ehci_iso_sched *iso_sched, 1082 struct ehci_iso_stream *stream, 1083 struct urb *urb 1084 ) 1085 { 1086 unsigned i; 1087 dma_addr_t dma = urb->transfer_dma; 1088 1089 /* how many uframes are needed for these transfers */ 1090 iso_sched->span = urb->number_of_packets * stream->interval; 1091 1092 /* figure out per-uframe itd fields that we'll need later 1093 * when we fit new itds into the schedule. 1094 */ 1095 for (i = 0; i < urb->number_of_packets; i++) { 1096 struct ehci_iso_packet *uframe = &iso_sched->packet [i]; 1097 unsigned length; 1098 dma_addr_t buf; 1099 u32 trans; 1100 1101 length = urb->iso_frame_desc [i].length; 1102 buf = dma + urb->iso_frame_desc [i].offset; 1103 1104 trans = EHCI_ISOC_ACTIVE; 1105 trans |= buf & 0x0fff; 1106 if (unlikely (((i + 1) == urb->number_of_packets)) 1107 && !(urb->transfer_flags & URB_NO_INTERRUPT)) 1108 trans |= EHCI_ITD_IOC; 1109 trans |= length << 16; 1110 uframe->transaction = cpu_to_le32 (trans); 1111 1112 /* might need to cross a buffer page within a uframe */ 1113 uframe->bufp = (buf & ~(u64)0x0fff); 1114 buf += length; 1115 if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff)))) 1116 uframe->cross = 1; 1117 } 1118 } 1119 1120 static void 1121 iso_sched_free ( 1122 struct ehci_iso_stream *stream, 1123 struct ehci_iso_sched *iso_sched 1124 ) 1125 { 1126 if (!iso_sched) 1127 return; 1128 // caller must hold ehci->lock! 1129 list_splice (&iso_sched->td_list, &stream->free_list); 1130 kfree (iso_sched); 1131 } 1132 1133 static int 1134 itd_urb_transaction ( 1135 struct ehci_iso_stream *stream, 1136 struct ehci_hcd *ehci, 1137 struct urb *urb, 1138 gfp_t mem_flags 1139 ) 1140 { 1141 struct ehci_itd *itd; 1142 dma_addr_t itd_dma; 1143 int i; 1144 unsigned num_itds; 1145 struct ehci_iso_sched *sched; 1146 unsigned long flags; 1147 1148 sched = iso_sched_alloc (urb->number_of_packets, mem_flags); 1149 if (unlikely (sched == NULL)) 1150 return -ENOMEM; 1151 1152 itd_sched_init (sched, stream, urb); 1153 1154 if (urb->interval < 8) 1155 num_itds = 1 + (sched->span + 7) / 8; 1156 else 1157 num_itds = urb->number_of_packets; 1158 1159 /* allocate/init ITDs */ 1160 spin_lock_irqsave (&ehci->lock, flags); 1161 for (i = 0; i < num_itds; i++) { 1162 1163 /* free_list.next might be cache-hot ... but maybe 1164 * the HC caches it too. avoid that issue for now. 1165 */ 1166 1167 /* prefer previously-allocated itds */ 1168 if (likely (!list_empty(&stream->free_list))) { 1169 itd = list_entry (stream->free_list.prev, 1170 struct ehci_itd, itd_list); 1171 list_del (&itd->itd_list); 1172 itd_dma = itd->itd_dma; 1173 } else 1174 itd = NULL; 1175 1176 if (!itd) { 1177 spin_unlock_irqrestore (&ehci->lock, flags); 1178 itd = dma_pool_alloc (ehci->itd_pool, mem_flags, 1179 &itd_dma); 1180 spin_lock_irqsave (&ehci->lock, flags); 1181 } 1182 1183 if (unlikely (NULL == itd)) { 1184 iso_sched_free (stream, sched); 1185 spin_unlock_irqrestore (&ehci->lock, flags); 1186 return -ENOMEM; 1187 } 1188 memset (itd, 0, sizeof *itd); 1189 itd->itd_dma = itd_dma; 1190 list_add (&itd->itd_list, &sched->td_list); 1191 } 1192 spin_unlock_irqrestore (&ehci->lock, flags); 1193 1194 /* temporarily store schedule info in hcpriv */ 1195 urb->hcpriv = sched; 1196 urb->error_count = 0; 1197 return 0; 1198 } 1199 1200 /*-------------------------------------------------------------------------*/ 1201 1202 static inline int 1203 itd_slot_ok ( 1204 struct ehci_hcd *ehci, 1205 u32 mod, 1206 u32 uframe, 1207 u8 usecs, 1208 u32 period 1209 ) 1210 { 1211 uframe %= period; 1212 do { 1213 /* can't commit more than 80% periodic == 100 usec */ 1214 if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7) 1215 > (100 - usecs)) 1216 return 0; 1217 1218 /* we know urb->interval is 2^N uframes */ 1219 uframe += period; 1220 } while (uframe < mod); 1221 return 1; 1222 } 1223 1224 static inline int 1225 sitd_slot_ok ( 1226 struct ehci_hcd *ehci, 1227 u32 mod, 1228 struct ehci_iso_stream *stream, 1229 u32 uframe, 1230 struct ehci_iso_sched *sched, 1231 u32 period_uframes 1232 ) 1233 { 1234 u32 mask, tmp; 1235 u32 frame, uf; 1236 1237 mask = stream->raw_mask << (uframe & 7); 1238 1239 /* for IN, don't wrap CSPLIT into the next frame */ 1240 if (mask & ~0xffff) 1241 return 0; 1242 1243 /* this multi-pass logic is simple, but performance may 1244 * suffer when the schedule data isn't cached. 1245 */ 1246 1247 /* check bandwidth */ 1248 uframe %= period_uframes; 1249 do { 1250 u32 max_used; 1251 1252 frame = uframe >> 3; 1253 uf = uframe & 7; 1254 1255 #ifdef CONFIG_USB_EHCI_TT_NEWSCHED 1256 /* The tt's fullspeed bus bandwidth must be available. 1257 * tt_available scheduling guarantees 10+% for control/bulk. 1258 */ 1259 if (!tt_available (ehci, period_uframes << 3, 1260 stream->udev, frame, uf, stream->tt_usecs)) 1261 return 0; 1262 #else 1263 /* tt must be idle for start(s), any gap, and csplit. 1264 * assume scheduling slop leaves 10+% for control/bulk. 1265 */ 1266 if (!tt_no_collision (ehci, period_uframes << 3, 1267 stream->udev, frame, mask)) 1268 return 0; 1269 #endif 1270 1271 /* check starts (OUT uses more than one) */ 1272 max_used = 100 - stream->usecs; 1273 for (tmp = stream->raw_mask & 0xff; tmp; tmp >>= 1, uf++) { 1274 if (periodic_usecs (ehci, frame, uf) > max_used) 1275 return 0; 1276 } 1277 1278 /* for IN, check CSPLIT */ 1279 if (stream->c_usecs) { 1280 uf = uframe & 7; 1281 max_used = 100 - stream->c_usecs; 1282 do { 1283 tmp = 1 << uf; 1284 tmp <<= 8; 1285 if ((stream->raw_mask & tmp) == 0) 1286 continue; 1287 if (periodic_usecs (ehci, frame, uf) 1288 > max_used) 1289 return 0; 1290 } while (++uf < 8); 1291 } 1292 1293 /* we know urb->interval is 2^N uframes */ 1294 uframe += period_uframes; 1295 } while (uframe < mod); 1296 1297 stream->splits = cpu_to_le32(stream->raw_mask << (uframe & 7)); 1298 return 1; 1299 } 1300 1301 /* 1302 * This scheduler plans almost as far into the future as it has actual 1303 * periodic schedule slots. (Affected by TUNE_FLS, which defaults to 1304 * "as small as possible" to be cache-friendlier.) That limits the size 1305 * transfers you can stream reliably; avoid more than 64 msec per urb. 1306 * Also avoid queue depths of less than ehci's worst irq latency (affected 1307 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter, 1308 * and other factors); or more than about 230 msec total (for portability, 1309 * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler! 1310 */ 1311 1312 #define SCHEDULE_SLOP 10 /* frames */ 1313 1314 static int 1315 iso_stream_schedule ( 1316 struct ehci_hcd *ehci, 1317 struct urb *urb, 1318 struct ehci_iso_stream *stream 1319 ) 1320 { 1321 u32 now, start, max, period; 1322 int status; 1323 unsigned mod = ehci->periodic_size << 3; 1324 struct ehci_iso_sched *sched = urb->hcpriv; 1325 1326 if (sched->span > (mod - 8 * SCHEDULE_SLOP)) { 1327 ehci_dbg (ehci, "iso request %p too long\n", urb); 1328 status = -EFBIG; 1329 goto fail; 1330 } 1331 1332 if ((stream->depth + sched->span) > mod) { 1333 ehci_dbg (ehci, "request %p would overflow (%d+%d>%d)\n", 1334 urb, stream->depth, sched->span, mod); 1335 status = -EFBIG; 1336 goto fail; 1337 } 1338 1339 now = readl (&ehci->regs->frame_index) % mod; 1340 1341 /* when's the last uframe this urb could start? */ 1342 max = now + mod; 1343 1344 /* typical case: reuse current schedule. stream is still active, 1345 * and no gaps from host falling behind (irq delays etc) 1346 */ 1347 if (likely (!list_empty (&stream->td_list))) { 1348 start = stream->next_uframe; 1349 if (start < now) 1350 start += mod; 1351 if (likely ((start + sched->span) < max)) 1352 goto ready; 1353 /* else fell behind; someday, try to reschedule */ 1354 status = -EL2NSYNC; 1355 goto fail; 1356 } 1357 1358 /* need to schedule; when's the next (u)frame we could start? 1359 * this is bigger than ehci->i_thresh allows; scheduling itself 1360 * isn't free, the slop should handle reasonably slow cpus. it 1361 * can also help high bandwidth if the dma and irq loads don't 1362 * jump until after the queue is primed. 1363 */ 1364 start = SCHEDULE_SLOP * 8 + (now & ~0x07); 1365 start %= mod; 1366 stream->next_uframe = start; 1367 1368 /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */ 1369 1370 period = urb->interval; 1371 if (!stream->highspeed) 1372 period <<= 3; 1373 1374 /* find a uframe slot with enough bandwidth */ 1375 for (; start < (stream->next_uframe + period); start++) { 1376 int enough_space; 1377 1378 /* check schedule: enough space? */ 1379 if (stream->highspeed) 1380 enough_space = itd_slot_ok (ehci, mod, start, 1381 stream->usecs, period); 1382 else { 1383 if ((start % 8) >= 6) 1384 continue; 1385 enough_space = sitd_slot_ok (ehci, mod, stream, 1386 start, sched, period); 1387 } 1388 1389 /* schedule it here if there's enough bandwidth */ 1390 if (enough_space) { 1391 stream->next_uframe = start % mod; 1392 goto ready; 1393 } 1394 } 1395 1396 /* no room in the schedule */ 1397 ehci_dbg (ehci, "iso %ssched full %p (now %d max %d)\n", 1398 list_empty (&stream->td_list) ? "" : "re", 1399 urb, now, max); 1400 status = -ENOSPC; 1401 1402 fail: 1403 iso_sched_free (stream, sched); 1404 urb->hcpriv = NULL; 1405 return status; 1406 1407 ready: 1408 /* report high speed start in uframes; full speed, in frames */ 1409 urb->start_frame = stream->next_uframe; 1410 if (!stream->highspeed) 1411 urb->start_frame >>= 3; 1412 return 0; 1413 } 1414 1415 /*-------------------------------------------------------------------------*/ 1416 1417 static inline void 1418 itd_init (struct ehci_iso_stream *stream, struct ehci_itd *itd) 1419 { 1420 int i; 1421 1422 /* it's been recently zeroed */ 1423 itd->hw_next = EHCI_LIST_END; 1424 itd->hw_bufp [0] = stream->buf0; 1425 itd->hw_bufp [1] = stream->buf1; 1426 itd->hw_bufp [2] = stream->buf2; 1427 1428 for (i = 0; i < 8; i++) 1429 itd->index[i] = -1; 1430 1431 /* All other fields are filled when scheduling */ 1432 } 1433 1434 static inline void 1435 itd_patch ( 1436 struct ehci_itd *itd, 1437 struct ehci_iso_sched *iso_sched, 1438 unsigned index, 1439 u16 uframe 1440 ) 1441 { 1442 struct ehci_iso_packet *uf = &iso_sched->packet [index]; 1443 unsigned pg = itd->pg; 1444 1445 // BUG_ON (pg == 6 && uf->cross); 1446 1447 uframe &= 0x07; 1448 itd->index [uframe] = index; 1449 1450 itd->hw_transaction [uframe] = uf->transaction; 1451 itd->hw_transaction [uframe] |= cpu_to_le32 (pg << 12); 1452 itd->hw_bufp [pg] |= cpu_to_le32 (uf->bufp & ~(u32)0); 1453 itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(uf->bufp >> 32)); 1454 1455 /* iso_frame_desc[].offset must be strictly increasing */ 1456 if (unlikely (uf->cross)) { 1457 u64 bufp = uf->bufp + 4096; 1458 itd->pg = ++pg; 1459 itd->hw_bufp [pg] |= cpu_to_le32 (bufp & ~(u32)0); 1460 itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(bufp >> 32)); 1461 } 1462 } 1463 1464 static inline void 1465 itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd) 1466 { 1467 /* always prepend ITD/SITD ... only QH tree is order-sensitive */ 1468 itd->itd_next = ehci->pshadow [frame]; 1469 itd->hw_next = ehci->periodic [frame]; 1470 ehci->pshadow [frame].itd = itd; 1471 itd->frame = frame; 1472 wmb (); 1473 ehci->periodic [frame] = cpu_to_le32 (itd->itd_dma) | Q_TYPE_ITD; 1474 } 1475 1476 /* fit urb's itds into the selected schedule slot; activate as needed */ 1477 static int 1478 itd_link_urb ( 1479 struct ehci_hcd *ehci, 1480 struct urb *urb, 1481 unsigned mod, 1482 struct ehci_iso_stream *stream 1483 ) 1484 { 1485 int packet; 1486 unsigned next_uframe, uframe, frame; 1487 struct ehci_iso_sched *iso_sched = urb->hcpriv; 1488 struct ehci_itd *itd; 1489 1490 next_uframe = stream->next_uframe % mod; 1491 1492 if (unlikely (list_empty(&stream->td_list))) { 1493 ehci_to_hcd(ehci)->self.bandwidth_allocated 1494 += stream->bandwidth; 1495 ehci_vdbg (ehci, 1496 "schedule devp %s ep%d%s-iso period %d start %d.%d\n", 1497 urb->dev->devpath, stream->bEndpointAddress & 0x0f, 1498 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out", 1499 urb->interval, 1500 next_uframe >> 3, next_uframe & 0x7); 1501 stream->start = jiffies; 1502 } 1503 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; 1504 1505 /* fill iTDs uframe by uframe */ 1506 for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) { 1507 if (itd == NULL) { 1508 /* ASSERT: we have all necessary itds */ 1509 // BUG_ON (list_empty (&iso_sched->td_list)); 1510 1511 /* ASSERT: no itds for this endpoint in this uframe */ 1512 1513 itd = list_entry (iso_sched->td_list.next, 1514 struct ehci_itd, itd_list); 1515 list_move_tail (&itd->itd_list, &stream->td_list); 1516 itd->stream = iso_stream_get (stream); 1517 itd->urb = usb_get_urb (urb); 1518 itd_init (stream, itd); 1519 } 1520 1521 uframe = next_uframe & 0x07; 1522 frame = next_uframe >> 3; 1523 1524 itd->usecs [uframe] = stream->usecs; 1525 itd_patch (itd, iso_sched, packet, uframe); 1526 1527 next_uframe += stream->interval; 1528 stream->depth += stream->interval; 1529 next_uframe %= mod; 1530 packet++; 1531 1532 /* link completed itds into the schedule */ 1533 if (((next_uframe >> 3) != frame) 1534 || packet == urb->number_of_packets) { 1535 itd_link (ehci, frame % ehci->periodic_size, itd); 1536 itd = NULL; 1537 } 1538 } 1539 stream->next_uframe = next_uframe; 1540 1541 /* don't need that schedule data any more */ 1542 iso_sched_free (stream, iso_sched); 1543 urb->hcpriv = NULL; 1544 1545 timer_action (ehci, TIMER_IO_WATCHDOG); 1546 if (unlikely (!ehci->periodic_sched++)) 1547 return enable_periodic (ehci); 1548 return 0; 1549 } 1550 1551 #define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR) 1552 1553 static unsigned 1554 itd_complete ( 1555 struct ehci_hcd *ehci, 1556 struct ehci_itd *itd, 1557 struct pt_regs *regs 1558 ) { 1559 struct urb *urb = itd->urb; 1560 struct usb_iso_packet_descriptor *desc; 1561 u32 t; 1562 unsigned uframe; 1563 int urb_index = -1; 1564 struct ehci_iso_stream *stream = itd->stream; 1565 struct usb_device *dev; 1566 1567 /* for each uframe with a packet */ 1568 for (uframe = 0; uframe < 8; uframe++) { 1569 if (likely (itd->index[uframe] == -1)) 1570 continue; 1571 urb_index = itd->index[uframe]; 1572 desc = &urb->iso_frame_desc [urb_index]; 1573 1574 t = le32_to_cpup (&itd->hw_transaction [uframe]); 1575 itd->hw_transaction [uframe] = 0; 1576 stream->depth -= stream->interval; 1577 1578 /* report transfer status */ 1579 if (unlikely (t & ISO_ERRS)) { 1580 urb->error_count++; 1581 if (t & EHCI_ISOC_BUF_ERR) 1582 desc->status = usb_pipein (urb->pipe) 1583 ? -ENOSR /* hc couldn't read */ 1584 : -ECOMM; /* hc couldn't write */ 1585 else if (t & EHCI_ISOC_BABBLE) 1586 desc->status = -EOVERFLOW; 1587 else /* (t & EHCI_ISOC_XACTERR) */ 1588 desc->status = -EPROTO; 1589 1590 /* HC need not update length with this error */ 1591 if (!(t & EHCI_ISOC_BABBLE)) 1592 desc->actual_length = EHCI_ITD_LENGTH (t); 1593 } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) { 1594 desc->status = 0; 1595 desc->actual_length = EHCI_ITD_LENGTH (t); 1596 } 1597 } 1598 1599 usb_put_urb (urb); 1600 itd->urb = NULL; 1601 itd->stream = NULL; 1602 list_move (&itd->itd_list, &stream->free_list); 1603 iso_stream_put (ehci, stream); 1604 1605 /* handle completion now? */ 1606 if (likely ((urb_index + 1) != urb->number_of_packets)) 1607 return 0; 1608 1609 /* ASSERT: it's really the last itd for this urb 1610 list_for_each_entry (itd, &stream->td_list, itd_list) 1611 BUG_ON (itd->urb == urb); 1612 */ 1613 1614 /* give urb back to the driver ... can be out-of-order */ 1615 dev = urb->dev; 1616 ehci_urb_done (ehci, urb, regs); 1617 urb = NULL; 1618 1619 /* defer stopping schedule; completion can submit */ 1620 ehci->periodic_sched--; 1621 if (unlikely (!ehci->periodic_sched)) 1622 (void) disable_periodic (ehci); 1623 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; 1624 1625 if (unlikely (list_empty (&stream->td_list))) { 1626 ehci_to_hcd(ehci)->self.bandwidth_allocated 1627 -= stream->bandwidth; 1628 ehci_vdbg (ehci, 1629 "deschedule devp %s ep%d%s-iso\n", 1630 dev->devpath, stream->bEndpointAddress & 0x0f, 1631 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); 1632 } 1633 iso_stream_put (ehci, stream); 1634 1635 return 1; 1636 } 1637 1638 /*-------------------------------------------------------------------------*/ 1639 1640 static int itd_submit (struct ehci_hcd *ehci, struct urb *urb, 1641 gfp_t mem_flags) 1642 { 1643 int status = -EINVAL; 1644 unsigned long flags; 1645 struct ehci_iso_stream *stream; 1646 1647 /* Get iso_stream head */ 1648 stream = iso_stream_find (ehci, urb); 1649 if (unlikely (stream == NULL)) { 1650 ehci_dbg (ehci, "can't get iso stream\n"); 1651 return -ENOMEM; 1652 } 1653 if (unlikely (urb->interval != stream->interval)) { 1654 ehci_dbg (ehci, "can't change iso interval %d --> %d\n", 1655 stream->interval, urb->interval); 1656 goto done; 1657 } 1658 1659 #ifdef EHCI_URB_TRACE 1660 ehci_dbg (ehci, 1661 "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n", 1662 __FUNCTION__, urb->dev->devpath, urb, 1663 usb_pipeendpoint (urb->pipe), 1664 usb_pipein (urb->pipe) ? "in" : "out", 1665 urb->transfer_buffer_length, 1666 urb->number_of_packets, urb->interval, 1667 stream); 1668 #endif 1669 1670 /* allocate ITDs w/o locking anything */ 1671 status = itd_urb_transaction (stream, ehci, urb, mem_flags); 1672 if (unlikely (status < 0)) { 1673 ehci_dbg (ehci, "can't init itds\n"); 1674 goto done; 1675 } 1676 1677 /* schedule ... need to lock */ 1678 spin_lock_irqsave (&ehci->lock, flags); 1679 if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, 1680 &ehci_to_hcd(ehci)->flags))) 1681 status = -ESHUTDOWN; 1682 else 1683 status = iso_stream_schedule (ehci, urb, stream); 1684 if (likely (status == 0)) 1685 itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream); 1686 spin_unlock_irqrestore (&ehci->lock, flags); 1687 1688 done: 1689 if (unlikely (status < 0)) 1690 iso_stream_put (ehci, stream); 1691 return status; 1692 } 1693 1694 #ifdef CONFIG_USB_EHCI_SPLIT_ISO 1695 1696 /*-------------------------------------------------------------------------*/ 1697 1698 /* 1699 * "Split ISO TDs" ... used for USB 1.1 devices going through the 1700 * TTs in USB 2.0 hubs. These need microframe scheduling. 1701 */ 1702 1703 static inline void 1704 sitd_sched_init ( 1705 struct ehci_iso_sched *iso_sched, 1706 struct ehci_iso_stream *stream, 1707 struct urb *urb 1708 ) 1709 { 1710 unsigned i; 1711 dma_addr_t dma = urb->transfer_dma; 1712 1713 /* how many frames are needed for these transfers */ 1714 iso_sched->span = urb->number_of_packets * stream->interval; 1715 1716 /* figure out per-frame sitd fields that we'll need later 1717 * when we fit new sitds into the schedule. 1718 */ 1719 for (i = 0; i < urb->number_of_packets; i++) { 1720 struct ehci_iso_packet *packet = &iso_sched->packet [i]; 1721 unsigned length; 1722 dma_addr_t buf; 1723 u32 trans; 1724 1725 length = urb->iso_frame_desc [i].length & 0x03ff; 1726 buf = dma + urb->iso_frame_desc [i].offset; 1727 1728 trans = SITD_STS_ACTIVE; 1729 if (((i + 1) == urb->number_of_packets) 1730 && !(urb->transfer_flags & URB_NO_INTERRUPT)) 1731 trans |= SITD_IOC; 1732 trans |= length << 16; 1733 packet->transaction = cpu_to_le32 (trans); 1734 1735 /* might need to cross a buffer page within a td */ 1736 packet->bufp = buf; 1737 packet->buf1 = (buf + length) & ~0x0fff; 1738 if (packet->buf1 != (buf & ~(u64)0x0fff)) 1739 packet->cross = 1; 1740 1741 /* OUT uses multiple start-splits */ 1742 if (stream->bEndpointAddress & USB_DIR_IN) 1743 continue; 1744 length = (length + 187) / 188; 1745 if (length > 1) /* BEGIN vs ALL */ 1746 length |= 1 << 3; 1747 packet->buf1 |= length; 1748 } 1749 } 1750 1751 static int 1752 sitd_urb_transaction ( 1753 struct ehci_iso_stream *stream, 1754 struct ehci_hcd *ehci, 1755 struct urb *urb, 1756 gfp_t mem_flags 1757 ) 1758 { 1759 struct ehci_sitd *sitd; 1760 dma_addr_t sitd_dma; 1761 int i; 1762 struct ehci_iso_sched *iso_sched; 1763 unsigned long flags; 1764 1765 iso_sched = iso_sched_alloc (urb->number_of_packets, mem_flags); 1766 if (iso_sched == NULL) 1767 return -ENOMEM; 1768 1769 sitd_sched_init (iso_sched, stream, urb); 1770 1771 /* allocate/init sITDs */ 1772 spin_lock_irqsave (&ehci->lock, flags); 1773 for (i = 0; i < urb->number_of_packets; i++) { 1774 1775 /* NOTE: for now, we don't try to handle wraparound cases 1776 * for IN (using sitd->hw_backpointer, like a FSTN), which 1777 * means we never need two sitds for full speed packets. 1778 */ 1779 1780 /* free_list.next might be cache-hot ... but maybe 1781 * the HC caches it too. avoid that issue for now. 1782 */ 1783 1784 /* prefer previously-allocated sitds */ 1785 if (!list_empty(&stream->free_list)) { 1786 sitd = list_entry (stream->free_list.prev, 1787 struct ehci_sitd, sitd_list); 1788 list_del (&sitd->sitd_list); 1789 sitd_dma = sitd->sitd_dma; 1790 } else 1791 sitd = NULL; 1792 1793 if (!sitd) { 1794 spin_unlock_irqrestore (&ehci->lock, flags); 1795 sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags, 1796 &sitd_dma); 1797 spin_lock_irqsave (&ehci->lock, flags); 1798 } 1799 1800 if (!sitd) { 1801 iso_sched_free (stream, iso_sched); 1802 spin_unlock_irqrestore (&ehci->lock, flags); 1803 return -ENOMEM; 1804 } 1805 memset (sitd, 0, sizeof *sitd); 1806 sitd->sitd_dma = sitd_dma; 1807 list_add (&sitd->sitd_list, &iso_sched->td_list); 1808 } 1809 1810 /* temporarily store schedule info in hcpriv */ 1811 urb->hcpriv = iso_sched; 1812 urb->error_count = 0; 1813 1814 spin_unlock_irqrestore (&ehci->lock, flags); 1815 return 0; 1816 } 1817 1818 /*-------------------------------------------------------------------------*/ 1819 1820 static inline void 1821 sitd_patch ( 1822 struct ehci_iso_stream *stream, 1823 struct ehci_sitd *sitd, 1824 struct ehci_iso_sched *iso_sched, 1825 unsigned index 1826 ) 1827 { 1828 struct ehci_iso_packet *uf = &iso_sched->packet [index]; 1829 u64 bufp = uf->bufp; 1830 1831 sitd->hw_next = EHCI_LIST_END; 1832 sitd->hw_fullspeed_ep = stream->address; 1833 sitd->hw_uframe = stream->splits; 1834 sitd->hw_results = uf->transaction; 1835 sitd->hw_backpointer = EHCI_LIST_END; 1836 1837 bufp = uf->bufp; 1838 sitd->hw_buf [0] = cpu_to_le32 (bufp); 1839 sitd->hw_buf_hi [0] = cpu_to_le32 (bufp >> 32); 1840 1841 sitd->hw_buf [1] = cpu_to_le32 (uf->buf1); 1842 if (uf->cross) 1843 bufp += 4096; 1844 sitd->hw_buf_hi [1] = cpu_to_le32 (bufp >> 32); 1845 sitd->index = index; 1846 } 1847 1848 static inline void 1849 sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd) 1850 { 1851 /* note: sitd ordering could matter (CSPLIT then SSPLIT) */ 1852 sitd->sitd_next = ehci->pshadow [frame]; 1853 sitd->hw_next = ehci->periodic [frame]; 1854 ehci->pshadow [frame].sitd = sitd; 1855 sitd->frame = frame; 1856 wmb (); 1857 ehci->periodic [frame] = cpu_to_le32 (sitd->sitd_dma) | Q_TYPE_SITD; 1858 } 1859 1860 /* fit urb's sitds into the selected schedule slot; activate as needed */ 1861 static int 1862 sitd_link_urb ( 1863 struct ehci_hcd *ehci, 1864 struct urb *urb, 1865 unsigned mod, 1866 struct ehci_iso_stream *stream 1867 ) 1868 { 1869 int packet; 1870 unsigned next_uframe; 1871 struct ehci_iso_sched *sched = urb->hcpriv; 1872 struct ehci_sitd *sitd; 1873 1874 next_uframe = stream->next_uframe; 1875 1876 if (list_empty(&stream->td_list)) { 1877 /* usbfs ignores TT bandwidth */ 1878 ehci_to_hcd(ehci)->self.bandwidth_allocated 1879 += stream->bandwidth; 1880 ehci_vdbg (ehci, 1881 "sched devp %s ep%d%s-iso [%d] %dms/%04x\n", 1882 urb->dev->devpath, stream->bEndpointAddress & 0x0f, 1883 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out", 1884 (next_uframe >> 3) % ehci->periodic_size, 1885 stream->interval, le32_to_cpu (stream->splits)); 1886 stream->start = jiffies; 1887 } 1888 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; 1889 1890 /* fill sITDs frame by frame */ 1891 for (packet = 0, sitd = NULL; 1892 packet < urb->number_of_packets; 1893 packet++) { 1894 1895 /* ASSERT: we have all necessary sitds */ 1896 BUG_ON (list_empty (&sched->td_list)); 1897 1898 /* ASSERT: no itds for this endpoint in this frame */ 1899 1900 sitd = list_entry (sched->td_list.next, 1901 struct ehci_sitd, sitd_list); 1902 list_move_tail (&sitd->sitd_list, &stream->td_list); 1903 sitd->stream = iso_stream_get (stream); 1904 sitd->urb = usb_get_urb (urb); 1905 1906 sitd_patch (stream, sitd, sched, packet); 1907 sitd_link (ehci, (next_uframe >> 3) % ehci->periodic_size, 1908 sitd); 1909 1910 next_uframe += stream->interval << 3; 1911 stream->depth += stream->interval << 3; 1912 } 1913 stream->next_uframe = next_uframe % mod; 1914 1915 /* don't need that schedule data any more */ 1916 iso_sched_free (stream, sched); 1917 urb->hcpriv = NULL; 1918 1919 timer_action (ehci, TIMER_IO_WATCHDOG); 1920 if (!ehci->periodic_sched++) 1921 return enable_periodic (ehci); 1922 return 0; 1923 } 1924 1925 /*-------------------------------------------------------------------------*/ 1926 1927 #define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \ 1928 | SITD_STS_XACT | SITD_STS_MMF) 1929 1930 static unsigned 1931 sitd_complete ( 1932 struct ehci_hcd *ehci, 1933 struct ehci_sitd *sitd, 1934 struct pt_regs *regs 1935 ) { 1936 struct urb *urb = sitd->urb; 1937 struct usb_iso_packet_descriptor *desc; 1938 u32 t; 1939 int urb_index = -1; 1940 struct ehci_iso_stream *stream = sitd->stream; 1941 struct usb_device *dev; 1942 1943 urb_index = sitd->index; 1944 desc = &urb->iso_frame_desc [urb_index]; 1945 t = le32_to_cpup (&sitd->hw_results); 1946 1947 /* report transfer status */ 1948 if (t & SITD_ERRS) { 1949 urb->error_count++; 1950 if (t & SITD_STS_DBE) 1951 desc->status = usb_pipein (urb->pipe) 1952 ? -ENOSR /* hc couldn't read */ 1953 : -ECOMM; /* hc couldn't write */ 1954 else if (t & SITD_STS_BABBLE) 1955 desc->status = -EOVERFLOW; 1956 else /* XACT, MMF, etc */ 1957 desc->status = -EPROTO; 1958 } else { 1959 desc->status = 0; 1960 desc->actual_length = desc->length - SITD_LENGTH (t); 1961 } 1962 1963 usb_put_urb (urb); 1964 sitd->urb = NULL; 1965 sitd->stream = NULL; 1966 list_move (&sitd->sitd_list, &stream->free_list); 1967 stream->depth -= stream->interval << 3; 1968 iso_stream_put (ehci, stream); 1969 1970 /* handle completion now? */ 1971 if ((urb_index + 1) != urb->number_of_packets) 1972 return 0; 1973 1974 /* ASSERT: it's really the last sitd for this urb 1975 list_for_each_entry (sitd, &stream->td_list, sitd_list) 1976 BUG_ON (sitd->urb == urb); 1977 */ 1978 1979 /* give urb back to the driver */ 1980 dev = urb->dev; 1981 ehci_urb_done (ehci, urb, regs); 1982 urb = NULL; 1983 1984 /* defer stopping schedule; completion can submit */ 1985 ehci->periodic_sched--; 1986 if (!ehci->periodic_sched) 1987 (void) disable_periodic (ehci); 1988 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; 1989 1990 if (list_empty (&stream->td_list)) { 1991 ehci_to_hcd(ehci)->self.bandwidth_allocated 1992 -= stream->bandwidth; 1993 ehci_vdbg (ehci, 1994 "deschedule devp %s ep%d%s-iso\n", 1995 dev->devpath, stream->bEndpointAddress & 0x0f, 1996 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); 1997 } 1998 iso_stream_put (ehci, stream); 1999 2000 return 1; 2001 } 2002 2003 2004 static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb, 2005 gfp_t mem_flags) 2006 { 2007 int status = -EINVAL; 2008 unsigned long flags; 2009 struct ehci_iso_stream *stream; 2010 2011 /* Get iso_stream head */ 2012 stream = iso_stream_find (ehci, urb); 2013 if (stream == NULL) { 2014 ehci_dbg (ehci, "can't get iso stream\n"); 2015 return -ENOMEM; 2016 } 2017 if (urb->interval != stream->interval) { 2018 ehci_dbg (ehci, "can't change iso interval %d --> %d\n", 2019 stream->interval, urb->interval); 2020 goto done; 2021 } 2022 2023 #ifdef EHCI_URB_TRACE 2024 ehci_dbg (ehci, 2025 "submit %p dev%s ep%d%s-iso len %d\n", 2026 urb, urb->dev->devpath, 2027 usb_pipeendpoint (urb->pipe), 2028 usb_pipein (urb->pipe) ? "in" : "out", 2029 urb->transfer_buffer_length); 2030 #endif 2031 2032 /* allocate SITDs */ 2033 status = sitd_urb_transaction (stream, ehci, urb, mem_flags); 2034 if (status < 0) { 2035 ehci_dbg (ehci, "can't init sitds\n"); 2036 goto done; 2037 } 2038 2039 /* schedule ... need to lock */ 2040 spin_lock_irqsave (&ehci->lock, flags); 2041 if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, 2042 &ehci_to_hcd(ehci)->flags))) 2043 status = -ESHUTDOWN; 2044 else 2045 status = iso_stream_schedule (ehci, urb, stream); 2046 if (status == 0) 2047 sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream); 2048 spin_unlock_irqrestore (&ehci->lock, flags); 2049 2050 done: 2051 if (status < 0) 2052 iso_stream_put (ehci, stream); 2053 return status; 2054 } 2055 2056 #else 2057 2058 static inline int 2059 sitd_submit (struct ehci_hcd *ehci, struct urb *urb, gfp_t mem_flags) 2060 { 2061 ehci_dbg (ehci, "split iso support is disabled\n"); 2062 return -ENOSYS; 2063 } 2064 2065 static inline unsigned 2066 sitd_complete ( 2067 struct ehci_hcd *ehci, 2068 struct ehci_sitd *sitd, 2069 struct pt_regs *regs 2070 ) { 2071 ehci_err (ehci, "sitd_complete %p?\n", sitd); 2072 return 0; 2073 } 2074 2075 #endif /* USB_EHCI_SPLIT_ISO */ 2076 2077 /*-------------------------------------------------------------------------*/ 2078 2079 static void 2080 scan_periodic (struct ehci_hcd *ehci, struct pt_regs *regs) 2081 { 2082 unsigned frame, clock, now_uframe, mod; 2083 unsigned modified; 2084 2085 mod = ehci->periodic_size << 3; 2086 2087 /* 2088 * When running, scan from last scan point up to "now" 2089 * else clean up by scanning everything that's left. 2090 * Touches as few pages as possible: cache-friendly. 2091 */ 2092 now_uframe = ehci->next_uframe; 2093 if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) 2094 clock = readl (&ehci->regs->frame_index); 2095 else 2096 clock = now_uframe + mod - 1; 2097 clock %= mod; 2098 2099 for (;;) { 2100 union ehci_shadow q, *q_p; 2101 __le32 type, *hw_p; 2102 unsigned uframes; 2103 2104 /* don't scan past the live uframe */ 2105 frame = now_uframe >> 3; 2106 if (frame == (clock >> 3)) 2107 uframes = now_uframe & 0x07; 2108 else { 2109 /* safe to scan the whole frame at once */ 2110 now_uframe |= 0x07; 2111 uframes = 8; 2112 } 2113 2114 restart: 2115 /* scan each element in frame's queue for completions */ 2116 q_p = &ehci->pshadow [frame]; 2117 hw_p = &ehci->periodic [frame]; 2118 q.ptr = q_p->ptr; 2119 type = Q_NEXT_TYPE (*hw_p); 2120 modified = 0; 2121 2122 while (q.ptr != NULL) { 2123 unsigned uf; 2124 union ehci_shadow temp; 2125 int live; 2126 2127 live = HC_IS_RUNNING (ehci_to_hcd(ehci)->state); 2128 switch (type) { 2129 case Q_TYPE_QH: 2130 /* handle any completions */ 2131 temp.qh = qh_get (q.qh); 2132 type = Q_NEXT_TYPE (q.qh->hw_next); 2133 q = q.qh->qh_next; 2134 modified = qh_completions (ehci, temp.qh, regs); 2135 if (unlikely (list_empty (&temp.qh->qtd_list))) 2136 intr_deschedule (ehci, temp.qh); 2137 qh_put (temp.qh); 2138 break; 2139 case Q_TYPE_FSTN: 2140 /* for "save place" FSTNs, look at QH entries 2141 * in the previous frame for completions. 2142 */ 2143 if (q.fstn->hw_prev != EHCI_LIST_END) { 2144 dbg ("ignoring completions from FSTNs"); 2145 } 2146 type = Q_NEXT_TYPE (q.fstn->hw_next); 2147 q = q.fstn->fstn_next; 2148 break; 2149 case Q_TYPE_ITD: 2150 /* skip itds for later in the frame */ 2151 rmb (); 2152 for (uf = live ? uframes : 8; uf < 8; uf++) { 2153 if (0 == (q.itd->hw_transaction [uf] 2154 & ITD_ACTIVE)) 2155 continue; 2156 q_p = &q.itd->itd_next; 2157 hw_p = &q.itd->hw_next; 2158 type = Q_NEXT_TYPE (q.itd->hw_next); 2159 q = *q_p; 2160 break; 2161 } 2162 if (uf != 8) 2163 break; 2164 2165 /* this one's ready ... HC won't cache the 2166 * pointer for much longer, if at all. 2167 */ 2168 *q_p = q.itd->itd_next; 2169 *hw_p = q.itd->hw_next; 2170 type = Q_NEXT_TYPE (q.itd->hw_next); 2171 wmb(); 2172 modified = itd_complete (ehci, q.itd, regs); 2173 q = *q_p; 2174 break; 2175 case Q_TYPE_SITD: 2176 if ((q.sitd->hw_results & SITD_ACTIVE) 2177 && live) { 2178 q_p = &q.sitd->sitd_next; 2179 hw_p = &q.sitd->hw_next; 2180 type = Q_NEXT_TYPE (q.sitd->hw_next); 2181 q = *q_p; 2182 break; 2183 } 2184 *q_p = q.sitd->sitd_next; 2185 *hw_p = q.sitd->hw_next; 2186 type = Q_NEXT_TYPE (q.sitd->hw_next); 2187 wmb(); 2188 modified = sitd_complete (ehci, q.sitd, regs); 2189 q = *q_p; 2190 break; 2191 default: 2192 dbg ("corrupt type %d frame %d shadow %p", 2193 type, frame, q.ptr); 2194 // BUG (); 2195 q.ptr = NULL; 2196 } 2197 2198 /* assume completion callbacks modify the queue */ 2199 if (unlikely (modified)) 2200 goto restart; 2201 } 2202 2203 /* stop when we catch up to the HC */ 2204 2205 // FIXME: this assumes we won't get lapped when 2206 // latencies climb; that should be rare, but... 2207 // detect it, and just go all the way around. 2208 // FLR might help detect this case, so long as latencies 2209 // don't exceed periodic_size msec (default 1.024 sec). 2210 2211 // FIXME: likewise assumes HC doesn't halt mid-scan 2212 2213 if (now_uframe == clock) { 2214 unsigned now; 2215 2216 if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) 2217 break; 2218 ehci->next_uframe = now_uframe; 2219 now = readl (&ehci->regs->frame_index) % mod; 2220 if (now_uframe == now) 2221 break; 2222 2223 /* rescan the rest of this frame, then ... */ 2224 clock = now; 2225 } else { 2226 now_uframe++; 2227 now_uframe %= mod; 2228 } 2229 } 2230 } 2231