Lines Matching +full:full +full:- +full:frame
1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (c) 2001-2004 by David Brownell
4 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
7 /* this file is part of ehci-hcd.c */
9 /*-------------------------------------------------------------------------*/
21 * pre-calculated schedule data to make appending to the queue be quick.
27 * periodic_next_shadow - return "next" pointer on shadow list
37 return &periodic->qh->qh_next; in periodic_next_shadow()
39 return &periodic->fstn->fstn_next; in periodic_next_shadow()
41 return &periodic->itd->itd_next; in periodic_next_shadow()
44 return &periodic->sitd->sitd_next; in periodic_next_shadow()
55 return &periodic->qh->hw->hw_next; in shadow_next_periodic()
58 return periodic->hw_next; in shadow_next_periodic()
62 /* caller must hold ehci->lock */
63 static void periodic_unlink(struct ehci_hcd *ehci, unsigned frame, void *ptr) in periodic_unlink() argument
65 union ehci_shadow *prev_p = &ehci->pshadow[frame]; in periodic_unlink()
66 __hc32 *hw_p = &ehci->periodic[frame]; in periodic_unlink()
87 if (!ehci->use_dummy_qh || in periodic_unlink()
93 *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma); in periodic_unlink()
96 /*-------------------------------------------------------------------------*/
103 struct usb_tt *utt = udev->tt; in find_tt()
117 if (utt->multi) { in find_tt()
118 tt_index = utt->hcpriv; in find_tt()
120 tt_index = kcalloc(utt->hub->maxchild, in find_tt()
124 return ERR_PTR(-ENOMEM); in find_tt()
125 utt->hcpriv = tt_index; in find_tt()
128 port = udev->ttport - 1; in find_tt()
132 ptt = (struct ehci_tt **) &utt->hcpriv; in find_tt()
138 hcd_to_ehci(bus_to_hcd(udev->bus)); in find_tt()
143 utt->hcpriv = NULL; in find_tt()
146 return ERR_PTR(-ENOMEM); in find_tt()
148 list_add_tail(&tt->tt_list, &ehci->tt_list); in find_tt()
149 INIT_LIST_HEAD(&tt->ps_list); in find_tt()
150 tt->usb_tt = utt; in find_tt()
151 tt->tt_port = port; in find_tt()
161 struct usb_tt *utt = udev->tt; in drop_tt()
165 if (!utt || !utt->hcpriv) in drop_tt()
169 if (utt->multi) { in drop_tt()
170 tt_index = utt->hcpriv; in drop_tt()
171 ptt = &tt_index[udev->ttport - 1]; in drop_tt()
174 for (i = 0; i < utt->hub->maxchild; ++i) in drop_tt()
178 ptt = (struct ehci_tt **) &utt->hcpriv; in drop_tt()
182 if (!tt || !list_empty(&tt->ps_list)) in drop_tt()
185 list_del(&tt->tt_list); in drop_tt()
189 utt->hcpriv = NULL; in drop_tt()
197 dev_dbg(&ps->udev->dev, in bandwidth_dbg()
199 ps->ep->desc.bEndpointAddress, in bandwidth_dbg()
201 (ps->bw_phase << 3) + ps->phase_uf, ps->bw_uperiod, in bandwidth_dbg()
202 ps->phase, ps->phase_uf, ps->period, in bandwidth_dbg()
203 ps->usecs, ps->c_usecs, ps->cs_mask); in bandwidth_dbg()
211 int usecs = qh->ps.usecs; in reserve_release_intr_bandwidth()
212 int c_usecs = qh->ps.c_usecs; in reserve_release_intr_bandwidth()
213 int tt_usecs = qh->ps.tt_usecs; in reserve_release_intr_bandwidth()
216 if (qh->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */ in reserve_release_intr_bandwidth()
218 start_uf = qh->ps.bw_phase << 3; in reserve_release_intr_bandwidth()
220 bandwidth_dbg(ehci, sign, "intr", &qh->ps); in reserve_release_intr_bandwidth()
223 usecs = -usecs; in reserve_release_intr_bandwidth()
224 c_usecs = -c_usecs; in reserve_release_intr_bandwidth()
225 tt_usecs = -tt_usecs; in reserve_release_intr_bandwidth()
228 /* Entire transaction (high speed) or start-split (full/low speed) */ in reserve_release_intr_bandwidth()
229 for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE; in reserve_release_intr_bandwidth()
230 i += qh->ps.bw_uperiod) in reserve_release_intr_bandwidth()
231 ehci->bandwidth[i] += usecs; in reserve_release_intr_bandwidth()
233 /* Complete-split (full/low speed) */ in reserve_release_intr_bandwidth()
234 if (qh->ps.c_usecs) { in reserve_release_intr_bandwidth()
237 i += qh->ps.bw_uperiod) { in reserve_release_intr_bandwidth()
239 if (qh->ps.cs_mask & m) in reserve_release_intr_bandwidth()
240 ehci->bandwidth[i+j] += c_usecs; in reserve_release_intr_bandwidth()
253 tt = find_tt(qh->ps.udev); in reserve_release_intr_bandwidth()
255 list_add_tail(&qh->ps.ps_list, &tt->ps_list); in reserve_release_intr_bandwidth()
257 list_del(&qh->ps.ps_list); in reserve_release_intr_bandwidth()
260 i += qh->ps.bw_period) in reserve_release_intr_bandwidth()
261 tt->bandwidth[i] += tt_usecs; in reserve_release_intr_bandwidth()
265 /*-------------------------------------------------------------------------*/
279 list_for_each_entry(ps, &tt->ps_list, ps_list) { in compute_tt_budget()
280 for (uframe = ps->bw_phase << 3; uframe < EHCI_BANDWIDTH_SIZE; in compute_tt_budget()
281 uframe += ps->bw_uperiod) { in compute_tt_budget()
283 x = ps->tt_usecs; in compute_tt_budget()
286 for (uf = ps->phase_uf; uf < 8; ++uf) { in compute_tt_budget()
295 x -= 125; in compute_tt_budget()
304 if (!dev1->tt || !dev2->tt) in same_tt()
306 if (dev1->tt != dev2->tt) in same_tt()
308 if (dev1->tt->multi) in same_tt()
309 return dev1->ttport == dev2->ttport; in same_tt()
326 tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i]; in carryover_tt_bandwidth()
335 * specified frame/uframe. Note that (as summarized in section 11.19
340 * should be executed in "B-frame" terms, which is the same as the
341 * highspeed ssplit's uframe (which is in "H-frame" terms). For example
342 * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
345 * This checks if the full/lowspeed bus, at the specified starting uframe,
347 * in USB 2.0 spec section 11.18.1 fig 11-60.
357 unsigned frame, in tt_available() argument
361 unsigned period = ps->bw_period; in tt_available()
362 unsigned usecs = ps->tt_usecs; in tt_available()
367 for (frame &= period - 1; frame < EHCI_BANDWIDTH_FRAMES; in tt_available()
368 frame += period) { in tt_available()
372 if (tt->bandwidth[frame] + usecs > 900) in tt_available()
375 uf = frame << 3; in tt_available()
377 tt_usecs[i] = ehci->tt_budget[uf]; in tt_available()
410 * for a periodic transfer starting at the specified frame, using
417 unsigned frame, in tt_no_collision() argument
428 for (; frame < ehci->periodic_size; frame += period) { in tt_no_collision()
433 here = ehci->pshadow[frame]; in tt_no_collision()
434 type = Q_NEXT_TYPE(ehci, ehci->periodic[frame]); in tt_no_collision()
438 type = Q_NEXT_TYPE(ehci, here.itd->hw_next); in tt_no_collision()
439 here = here.itd->itd_next; in tt_no_collision()
442 hw = here.qh->hw; in tt_no_collision()
443 if (same_tt(dev, here.qh->ps.udev)) { in tt_no_collision()
447 hw->hw_info2); in tt_no_collision()
453 type = Q_NEXT_TYPE(ehci, hw->hw_next); in tt_no_collision()
454 here = here.qh->qh_next; in tt_no_collision()
457 if (same_tt(dev, here.sitd->urb->dev)) { in tt_no_collision()
461 ->hw_uframe); in tt_no_collision()
467 type = Q_NEXT_TYPE(ehci, here.sitd->hw_next); in tt_no_collision()
468 here = here.sitd->sitd_next; in tt_no_collision()
473 "periodic frame %d bogus type %d\n", in tt_no_collision()
474 frame, type); in tt_no_collision()
488 /*-------------------------------------------------------------------------*/
492 if (ehci->periodic_count++) in enable_periodic()
496 ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC); in enable_periodic()
506 if (--ehci->periodic_count) in disable_periodic()
513 /*-------------------------------------------------------------------------*/
524 unsigned period = qh->ps.period; in qh_link_periodic()
526 dev_dbg(&qh->ps.udev->dev, in qh_link_periodic()
527 "link qh%d-%04x/%p start %d [%d/%d us]\n", in qh_link_periodic()
528 period, hc32_to_cpup(ehci, &qh->hw->hw_info2) in qh_link_periodic()
530 qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs); in qh_link_periodic()
536 for (i = qh->ps.phase; i < ehci->periodic_size; i += period) { in qh_link_periodic()
537 union ehci_shadow *prev = &ehci->pshadow[i]; in qh_link_periodic()
538 __hc32 *hw_p = &ehci->periodic[i]; in qh_link_periodic()
552 /* sorting each branch by period (slow-->fast) in qh_link_periodic()
556 if (qh->ps.period > here.qh->ps.period) in qh_link_periodic()
558 prev = &here.qh->qh_next; in qh_link_periodic()
559 hw_p = &here.qh->hw->hw_next; in qh_link_periodic()
564 qh->qh_next = here; in qh_link_periodic()
566 qh->hw->hw_next = *hw_p; in qh_link_periodic()
568 prev->qh = qh; in qh_link_periodic()
569 *hw_p = QH_NEXT(ehci, qh->qh_dma); in qh_link_periodic()
572 qh->qh_state = QH_STATE_LINKED; in qh_link_periodic()
573 qh->xacterrs = 0; in qh_link_periodic()
574 qh->unlink_reason = 0; in qh_link_periodic()
576 /* update per-qh bandwidth for debugfs */ in qh_link_periodic()
577 ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period in qh_link_periodic()
578 ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period) in qh_link_periodic()
579 : (qh->ps.usecs * 8); in qh_link_periodic()
581 list_add(&qh->intr_node, &ehci->intr_qh_list); in qh_link_periodic()
584 ++ehci->intr_count; in qh_link_periodic()
594 * If qh is for a low/full-speed device, simply unlinking it in qh_unlink_periodic()
597 * waiting at least one frame, as described in EHCI 4.12.2.5. in qh_unlink_periodic()
609 period = qh->ps.period ? : 1; in qh_unlink_periodic()
611 for (i = qh->ps.phase; i < ehci->periodic_size; i += period) in qh_unlink_periodic()
614 /* update per-qh bandwidth for debugfs */ in qh_unlink_periodic()
615 ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period in qh_unlink_periodic()
616 ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period) in qh_unlink_periodic()
617 : (qh->ps.usecs * 8); in qh_unlink_periodic()
619 dev_dbg(&qh->ps.udev->dev, in qh_unlink_periodic()
620 "unlink qh%d-%04x/%p start %d [%d/%d us]\n", in qh_unlink_periodic()
621 qh->ps.period, in qh_unlink_periodic()
622 hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK), in qh_unlink_periodic()
623 qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs); in qh_unlink_periodic()
625 /* qh->qh_next still "live" to HC */ in qh_unlink_periodic()
626 qh->qh_state = QH_STATE_UNLINK; in qh_unlink_periodic()
627 qh->qh_next.ptr = NULL; in qh_unlink_periodic()
629 if (ehci->qh_scan_next == qh) in qh_unlink_periodic()
630 ehci->qh_scan_next = list_entry(qh->intr_node.next, in qh_unlink_periodic()
632 list_del(&qh->intr_node); in qh_unlink_periodic()
637 if (qh->qh_state != QH_STATE_LINKED || in cancel_unlink_wait_intr()
638 list_empty(&qh->unlink_node)) in cancel_unlink_wait_intr()
641 list_del_init(&qh->unlink_node); in cancel_unlink_wait_intr()
652 if (qh->qh_state != QH_STATE_LINKED) in start_unlink_intr()
668 qh->unlink_cycle = ehci->intr_unlink_cycle; in start_unlink_intr()
671 list_add_tail(&qh->unlink_node, &ehci->intr_unlink); in start_unlink_intr()
673 if (ehci->intr_unlinking) in start_unlink_intr()
675 else if (ehci->rh_state < EHCI_RH_RUNNING) in start_unlink_intr()
677 else if (ehci->intr_unlink.next == &qh->unlink_node) { in start_unlink_intr()
679 ++ehci->intr_unlink_cycle; in start_unlink_intr()
691 qh->unlink_cycle = ehci->intr_unlink_wait_cycle; in start_unlink_intr_wait()
694 list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait); in start_unlink_intr_wait()
696 if (ehci->rh_state < EHCI_RH_RUNNING) in start_unlink_intr_wait()
698 else if (ehci->intr_unlink_wait.next == &qh->unlink_node) { in start_unlink_intr_wait()
700 ++ehci->intr_unlink_wait_cycle; in start_unlink_intr_wait()
706 struct ehci_qh_hw *hw = qh->hw; in end_unlink_intr()
709 qh->qh_state = QH_STATE_IDLE; in end_unlink_intr()
710 hw->hw_next = EHCI_LIST_END(ehci); in end_unlink_intr()
712 if (!list_empty(&qh->qtd_list)) in end_unlink_intr()
716 if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) { in end_unlink_intr()
727 * FIXME kill the now-dysfunctional queued urbs in end_unlink_intr()
736 --ehci->intr_count; in end_unlink_intr()
740 /*-------------------------------------------------------------------------*/
744 unsigned frame, in check_period() argument
749 /* complete split running into next frame? in check_period()
756 usecs = ehci->uframe_periodic_max - usecs; in check_period()
758 for (uframe += frame << 3; uframe < EHCI_BANDWIDTH_SIZE; in check_period()
760 if (ehci->bandwidth[uframe] > usecs) in check_period()
770 unsigned frame, in check_intr_schedule() argument
777 int retval = -ENOSPC; in check_intr_schedule()
780 if (qh->ps.c_usecs && uframe >= 6) /* FSTN territory? */ in check_intr_schedule()
783 if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs)) in check_intr_schedule()
785 if (!qh->ps.c_usecs) { in check_intr_schedule()
792 if (tt_available(ehci, &qh->ps, tt, frame, uframe)) { in check_intr_schedule()
797 if (!check_period(ehci, frame, i, in check_intr_schedule()
798 qh->ps.bw_uperiod, qh->ps.c_usecs)) in check_intr_schedule()
809 * We pessimize a bit; probably the typical full speed case in check_intr_schedule()
815 mask = 0x03 << (uframe + qh->gap_uf); in check_intr_schedule()
819 if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) { in check_intr_schedule()
820 if (!check_period(ehci, frame, uframe + qh->gap_uf + 1, in check_intr_schedule()
821 qh->ps.bw_uperiod, qh->ps.c_usecs)) in check_intr_schedule()
823 if (!check_period(ehci, frame, uframe + qh->gap_uf, in check_intr_schedule()
824 qh->ps.bw_uperiod, qh->ps.c_usecs)) in check_intr_schedule()
834 * or when the previous schedule slot can't be re-used.
841 struct ehci_qh_hw *hw = qh->hw; in qh_schedule()
844 hw->hw_next = EHCI_LIST_END(ehci); in qh_schedule()
847 if (qh->ps.phase != NO_FRAME) { in qh_schedule()
854 tt = find_tt(qh->ps.udev); in qh_schedule()
859 compute_tt_budget(ehci->tt_budget, tt); in qh_schedule()
865 if (qh->ps.bw_period) { in qh_schedule()
867 unsigned frame; in qh_schedule() local
869 for (i = qh->ps.bw_period; i > 0; --i) { in qh_schedule()
870 frame = ++ehci->random_frame & (qh->ps.bw_period - 1); in qh_schedule()
873 frame, uframe, qh, &c_mask, tt); in qh_schedule()
879 /* qh->ps.bw_period == 0 means every uframe */ in qh_schedule()
887 qh->ps.phase = (qh->ps.period ? ehci->random_frame & in qh_schedule()
888 (qh->ps.period - 1) : 0); in qh_schedule()
889 qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1); in qh_schedule()
890 qh->ps.phase_uf = uframe; in qh_schedule()
891 qh->ps.cs_mask = qh->ps.period ? in qh_schedule()
895 /* reset S-frame and (maybe) C-frame masks */ in qh_schedule()
896 hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK)); in qh_schedule()
897 hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask); in qh_schedule()
917 epnum = urb->ep->desc.bEndpointAddress; in intr_submit()
919 spin_lock_irqsave(&ehci->lock, flags); in intr_submit()
922 status = -ESHUTDOWN; in intr_submit()
931 qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv); in intr_submit()
933 status = -ENOMEM; in intr_submit()
936 if (qh->qh_state == QH_STATE_IDLE) { in intr_submit()
943 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv); in intr_submit()
947 if (qh->qh_state == QH_STATE_IDLE) { in intr_submit()
956 ehci_to_hcd(ehci)->self.bandwidth_int_reqs++; in intr_submit()
962 spin_unlock_irqrestore(&ehci->lock, flags); in intr_submit()
973 list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list, in scan_intr()
977 if (!list_empty(&qh->qtd_list)) { in scan_intr()
982 * drops the lock. That's why ehci->qh_scan_next in scan_intr()
984 * gets unlinked then ehci->qh_scan_next is adjusted in scan_intr()
990 else if (unlikely(list_empty(&qh->qtd_list) && in scan_intr()
991 qh->qh_state == QH_STATE_LINKED)) in scan_intr()
997 /*-------------------------------------------------------------------------*/
1008 INIT_LIST_HEAD(&stream->td_list); in iso_stream_alloc()
1009 INIT_LIST_HEAD(&stream->free_list); in iso_stream_alloc()
1010 stream->next_uframe = NO_FRAME; in iso_stream_alloc()
1011 stream->ps.phase = NO_FRAME; in iso_stream_alloc()
1025 struct usb_device *dev = urb->dev; in iso_stream_init()
1035 epnum = usb_pipeendpoint(urb->pipe); in iso_stream_init()
1036 is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0; in iso_stream_init()
1037 maxp = usb_endpoint_maxp(&urb->ep->desc); in iso_stream_init()
1041 if (dev->speed == USB_SPEED_HIGH) { in iso_stream_init()
1042 unsigned multi = usb_endpoint_maxp_mult(&urb->ep->desc); in iso_stream_init()
1044 stream->highspeed = 1; in iso_stream_init()
1049 stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum); in iso_stream_init()
1050 stream->buf1 = cpu_to_hc32(ehci, buf1); in iso_stream_init()
1051 stream->buf2 = cpu_to_hc32(ehci, multi); in iso_stream_init()
1053 /* usbfs wants to report the average usecs per frame tied up in iso_stream_init()
1056 stream->ps.usecs = HS_USECS_ISO(maxp); in iso_stream_init()
1060 1 << (urb->ep->desc.bInterval - 1)); in iso_stream_init()
1062 /* Allow urb->interval to override */ in iso_stream_init()
1063 stream->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval); in iso_stream_init()
1065 stream->uperiod = urb->interval; in iso_stream_init()
1066 stream->ps.period = urb->interval >> 3; in iso_stream_init()
1067 stream->bandwidth = stream->ps.usecs * 8 / in iso_stream_init()
1068 stream->ps.bw_uperiod; in iso_stream_init()
1075 addr = dev->ttport << 24; in iso_stream_init()
1077 || (dev->tt->hub != in iso_stream_init()
1078 ehci_to_hcd(ehci)->self.root_hub)) in iso_stream_init()
1079 addr |= dev->tt->hub->devnum << 16; in iso_stream_init()
1081 addr |= dev->devnum; in iso_stream_init()
1082 stream->ps.usecs = HS_USECS_ISO(maxp); in iso_stream_init()
1083 think_time = dev->tt->think_time; in iso_stream_init()
1084 stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time( in iso_stream_init()
1085 dev->speed, is_input, 1, maxp)); in iso_stream_init()
1091 stream->ps.c_usecs = stream->ps.usecs; in iso_stream_init()
1092 stream->ps.usecs = HS_USECS_ISO(1); in iso_stream_init()
1093 stream->ps.cs_mask = 1; in iso_stream_init()
1095 /* c-mask as specified in USB 2.0 11.18.4 3.c */ in iso_stream_init()
1096 tmp = (1 << (hs_transfers + 2)) - 1; in iso_stream_init()
1097 stream->ps.cs_mask |= tmp << (8 + 2); in iso_stream_init()
1099 stream->ps.cs_mask = smask_out[hs_transfers - 1]; in iso_stream_init()
1103 1 << (urb->ep->desc.bInterval - 1)); in iso_stream_init()
1105 /* Allow urb->interval to override */ in iso_stream_init()
1106 stream->ps.bw_period = min_t(unsigned, tmp, urb->interval); in iso_stream_init()
1107 stream->ps.bw_uperiod = stream->ps.bw_period << 3; in iso_stream_init()
1109 stream->ps.period = urb->interval; in iso_stream_init()
1110 stream->uperiod = urb->interval << 3; in iso_stream_init()
1111 stream->bandwidth = (stream->ps.usecs + stream->ps.c_usecs) / in iso_stream_init()
1112 stream->ps.bw_period; in iso_stream_init()
1114 /* stream->splits gets created from cs_mask later */ in iso_stream_init()
1115 stream->address = cpu_to_hc32(ehci, addr); in iso_stream_init()
1118 stream->ps.udev = dev; in iso_stream_init()
1119 stream->ps.ep = urb->ep; in iso_stream_init()
1121 stream->bEndpointAddress = is_input | epnum; in iso_stream_init()
1122 stream->maxp = maxp; in iso_stream_init()
1133 epnum = usb_pipeendpoint (urb->pipe); in iso_stream_find()
1134 if (usb_pipein(urb->pipe)) in iso_stream_find()
1135 ep = urb->dev->ep_in[epnum]; in iso_stream_find()
1137 ep = urb->dev->ep_out[epnum]; in iso_stream_find()
1139 spin_lock_irqsave(&ehci->lock, flags); in iso_stream_find()
1140 stream = ep->hcpriv; in iso_stream_find()
1145 ep->hcpriv = stream; in iso_stream_find()
1149 /* if dev->ep [epnum] is a QH, hw is set */ in iso_stream_find()
1150 } else if (unlikely(stream->hw != NULL)) { in iso_stream_find()
1152 urb->dev->devpath, epnum, in iso_stream_find()
1153 usb_pipein(urb->pipe) ? "in" : "out"); in iso_stream_find()
1157 spin_unlock_irqrestore(&ehci->lock, flags); in iso_stream_find()
1161 /*-------------------------------------------------------------------------*/
1163 /* ehci_iso_sched ops can be ITD-only or SITD-only */
1172 INIT_LIST_HEAD(&iso_sched->td_list); in iso_sched_alloc()
1186 dma_addr_t dma = urb->transfer_dma; in itd_sched_init()
1189 iso_sched->span = urb->number_of_packets * stream->uperiod; in itd_sched_init()
1191 /* figure out per-uframe itd fields that we'll need later in itd_sched_init()
1194 for (i = 0; i < urb->number_of_packets; i++) { in itd_sched_init()
1195 struct ehci_iso_packet *uframe = &iso_sched->packet[i]; in itd_sched_init()
1200 length = urb->iso_frame_desc[i].length; in itd_sched_init()
1201 buf = dma + urb->iso_frame_desc[i].offset; in itd_sched_init()
1205 if (unlikely(((i + 1) == urb->number_of_packets)) in itd_sched_init()
1206 && !(urb->transfer_flags & URB_NO_INTERRUPT)) in itd_sched_init()
1209 uframe->transaction = cpu_to_hc32(ehci, trans); in itd_sched_init()
1212 uframe->bufp = (buf & ~(u64)0x0fff); in itd_sched_init()
1214 if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff)))) in itd_sched_init()
1215 uframe->cross = 1; in itd_sched_init()
1227 /* caller must hold ehci->lock! */ in iso_sched_free()
1228 list_splice(&iso_sched->td_list, &stream->free_list); in iso_sched_free()
1247 sched = iso_sched_alloc(urb->number_of_packets, mem_flags); in itd_urb_transaction()
1249 return -ENOMEM; in itd_urb_transaction()
1253 if (urb->interval < 8) in itd_urb_transaction()
1254 num_itds = 1 + (sched->span + 7) / 8; in itd_urb_transaction()
1256 num_itds = urb->number_of_packets; in itd_urb_transaction()
1259 spin_lock_irqsave(&ehci->lock, flags); in itd_urb_transaction()
1266 if (likely(!list_empty(&stream->free_list))) { in itd_urb_transaction()
1267 itd = list_first_entry(&stream->free_list, in itd_urb_transaction()
1269 if (itd->frame == ehci->now_frame) in itd_urb_transaction()
1271 list_del(&itd->itd_list); in itd_urb_transaction()
1272 itd_dma = itd->itd_dma; in itd_urb_transaction()
1275 spin_unlock_irqrestore(&ehci->lock, flags); in itd_urb_transaction()
1276 itd = dma_pool_alloc(ehci->itd_pool, mem_flags, in itd_urb_transaction()
1278 spin_lock_irqsave(&ehci->lock, flags); in itd_urb_transaction()
1281 spin_unlock_irqrestore(&ehci->lock, flags); in itd_urb_transaction()
1282 return -ENOMEM; in itd_urb_transaction()
1287 itd->itd_dma = itd_dma; in itd_urb_transaction()
1288 itd->frame = NO_FRAME; in itd_urb_transaction()
1289 list_add(&itd->itd_list, &sched->td_list); in itd_urb_transaction()
1291 spin_unlock_irqrestore(&ehci->lock, flags); in itd_urb_transaction()
1294 urb->hcpriv = sched; in itd_urb_transaction()
1295 urb->error_count = 0; in itd_urb_transaction()
1299 /*-------------------------------------------------------------------------*/
1307 int usecs = stream->ps.usecs; in reserve_release_iso_bandwidth()
1308 int c_usecs = stream->ps.c_usecs; in reserve_release_iso_bandwidth()
1309 int tt_usecs = stream->ps.tt_usecs; in reserve_release_iso_bandwidth()
1312 if (stream->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */ in reserve_release_iso_bandwidth()
1314 uframe = stream->ps.bw_phase << 3; in reserve_release_iso_bandwidth()
1316 bandwidth_dbg(ehci, sign, "iso", &stream->ps); in reserve_release_iso_bandwidth()
1319 usecs = -usecs; in reserve_release_iso_bandwidth()
1320 c_usecs = -c_usecs; in reserve_release_iso_bandwidth()
1321 tt_usecs = -tt_usecs; in reserve_release_iso_bandwidth()
1324 if (!stream->splits) { /* High speed */ in reserve_release_iso_bandwidth()
1325 for (i = uframe + stream->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE; in reserve_release_iso_bandwidth()
1326 i += stream->ps.bw_uperiod) in reserve_release_iso_bandwidth()
1327 ehci->bandwidth[i] += usecs; in reserve_release_iso_bandwidth()
1329 } else { /* Full speed */ in reserve_release_iso_bandwidth()
1330 s_mask = stream->ps.cs_mask; in reserve_release_iso_bandwidth()
1333 /* NOTE: adjustment needed for frame overflow */ in reserve_release_iso_bandwidth()
1335 i += stream->ps.bw_uperiod) { in reserve_release_iso_bandwidth()
1336 for ((j = stream->ps.phase_uf, m = 1 << j); j < 8; in reserve_release_iso_bandwidth()
1339 ehci->bandwidth[i+j] += usecs; in reserve_release_iso_bandwidth()
1341 ehci->bandwidth[i+j] += c_usecs; in reserve_release_iso_bandwidth()
1351 tt = find_tt(stream->ps.udev); in reserve_release_iso_bandwidth()
1353 list_add_tail(&stream->ps.ps_list, &tt->ps_list); in reserve_release_iso_bandwidth()
1355 list_del(&stream->ps.ps_list); in reserve_release_iso_bandwidth()
1358 i += stream->ps.bw_period) in reserve_release_iso_bandwidth()
1359 tt->bandwidth[i] += tt_usecs; in reserve_release_iso_bandwidth()
1373 usecs = ehci->uframe_periodic_max - stream->ps.usecs; in itd_slot_ok()
1375 for (uframe &= stream->ps.bw_uperiod - 1; uframe < EHCI_BANDWIDTH_SIZE; in itd_slot_ok()
1376 uframe += stream->ps.bw_uperiod) { in itd_slot_ok()
1377 if (ehci->bandwidth[uframe] > usecs) in itd_slot_ok()
1393 unsigned frame, uf; in sitd_slot_ok() local
1395 mask = stream->ps.cs_mask << (uframe & 7); in sitd_slot_ok()
1397 /* for OUT, don't wrap SSPLIT into H-microframe 7 */ in sitd_slot_ok()
1398 if (((stream->ps.cs_mask & 0xff) << (uframe & 7)) >= (1 << 7)) in sitd_slot_ok()
1401 /* for IN, don't wrap CSPLIT into the next frame */ in sitd_slot_ok()
1406 uframe &= stream->ps.bw_uperiod - 1; in sitd_slot_ok()
1407 frame = uframe >> 3; in sitd_slot_ok()
1414 if (!tt_available(ehci, &stream->ps, tt, frame, uf)) in sitd_slot_ok()
1420 if (!tt_no_collision(ehci, stream->ps.bw_period, in sitd_slot_ok()
1421 stream->ps.udev, frame, mask)) in sitd_slot_ok()
1431 max_used = ehci->uframe_periodic_max - stream->ps.usecs; in sitd_slot_ok()
1432 for (tmp = stream->ps.cs_mask & 0xff; tmp; tmp >>= 1, uf++) { in sitd_slot_ok()
1433 if (ehci->bandwidth[uf] > max_used) in sitd_slot_ok()
1438 if (stream->ps.c_usecs) { in sitd_slot_ok()
1439 max_used = ehci->uframe_periodic_max - in sitd_slot_ok()
1440 stream->ps.c_usecs; in sitd_slot_ok()
1444 if ((stream->ps.cs_mask & tmp) == 0) in sitd_slot_ok()
1446 if (ehci->bandwidth[uf+i] > max_used) in sitd_slot_ok()
1451 uframe += stream->ps.bw_uperiod; in sitd_slot_ok()
1454 stream->ps.cs_mask <<= uframe & 7; in sitd_slot_ok()
1455 stream->splits = cpu_to_hc32(ehci, stream->ps.cs_mask); in sitd_slot_ok()
1462 * "as small as possible" to be cache-friendlier.) That limits the size
1465 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
1480 unsigned mod = ehci->periodic_size << 3; in iso_stream_schedule()
1481 struct ehci_iso_sched *sched = urb->hcpriv; in iso_stream_schedule()
1482 bool empty = list_empty(&stream->td_list); in iso_stream_schedule()
1485 period = stream->uperiod; in iso_stream_schedule()
1486 span = sched->span; in iso_stream_schedule()
1487 if (!stream->highspeed) in iso_stream_schedule()
1492 ehci_to_hcd(ehci), urb->ep))) { in iso_stream_schedule()
1495 if (stream->ps.phase == NO_FRAME) { in iso_stream_schedule()
1497 struct ehci_tt *tt = find_tt(stream->ps.udev); in iso_stream_schedule()
1503 compute_tt_budget(ehci->tt_budget, tt); in iso_stream_schedule()
1505 start = ((-(++ehci->random_frame)) << 3) & (period - 1); in iso_stream_schedule()
1508 * Early uframes are more precious because full-speed in iso_stream_schedule()
1515 start--; in iso_stream_schedule()
1517 if (stream->highspeed) { in iso_stream_schedule()
1531 ehci_dbg(ehci, "iso sched full %p", urb); in iso_stream_schedule()
1532 status = -ENOSPC; in iso_stream_schedule()
1535 stream->ps.phase = (start >> 3) & in iso_stream_schedule()
1536 (stream->ps.period - 1); in iso_stream_schedule()
1537 stream->ps.bw_phase = stream->ps.phase & in iso_stream_schedule()
1538 (stream->ps.bw_period - 1); in iso_stream_schedule()
1539 stream->ps.phase_uf = start & 7; in iso_stream_schedule()
1545 start = (stream->ps.phase << 3) + stream->ps.phase_uf; in iso_stream_schedule()
1548 stream->next_uframe = start; in iso_stream_schedule()
1552 now = ehci_read_frame_index(ehci) & (mod - 1); in iso_stream_schedule()
1555 if (ehci->i_thresh) in iso_stream_schedule()
1556 next = now + ehci->i_thresh; /* uframe cache */ in iso_stream_schedule()
1558 next = (now + 2 + 7) & ~0x07; /* full frame cache */ in iso_stream_schedule()
1561 if (ehci->isoc_count == 0) in iso_stream_schedule()
1562 ehci->last_iso_frame = now >> 3; in iso_stream_schedule()
1565 * Use ehci->last_iso_frame as the base. There can't be any in iso_stream_schedule()
1568 base = ehci->last_iso_frame << 3; in iso_stream_schedule()
1569 next = (next - base) & (mod - 1); in iso_stream_schedule()
1570 start = (stream->next_uframe - base) & (mod - 1); in iso_stream_schedule()
1581 now2 = (now - base) & (mod - 1); in iso_stream_schedule()
1585 ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n", in iso_stream_schedule()
1586 urb, stream->next_uframe, base, period, mod); in iso_stream_schedule()
1587 status = -EFBIG; in iso_stream_schedule()
1596 (urb->transfer_flags & URB_ISO_ASAP))) in iso_stream_schedule()
1605 if (urb->transfer_flags & URB_ISO_ASAP) in iso_stream_schedule()
1612 skip = (now2 - start + period - 1) & -period; in iso_stream_schedule()
1615 urb, start + base, span - period, now2 + base, in iso_stream_schedule()
1619 skip = span - period; in iso_stream_schedule()
1629 urb->error_count = skip / period; in iso_stream_schedule()
1631 sched->first_packet = urb->error_count; in iso_stream_schedule()
1636 start = next + ((start - next) & (period - 1)); in iso_stream_schedule()
1640 if (unlikely(start + span - period >= mod + wrap)) { in iso_stream_schedule()
1642 urb, start, span - period, mod + wrap); in iso_stream_schedule()
1643 status = -EFBIG; in iso_stream_schedule()
1648 stream->next_uframe = (start + skip) & (mod - 1); in iso_stream_schedule()
1650 /* report high speed start in uframes; full speed, in frames */ in iso_stream_schedule()
1651 urb->start_frame = start & (mod - 1); in iso_stream_schedule()
1652 if (!stream->highspeed) in iso_stream_schedule()
1653 urb->start_frame >>= 3; in iso_stream_schedule()
1658 urb->hcpriv = NULL; in iso_stream_schedule()
1662 /*-------------------------------------------------------------------------*/
1671 itd->hw_next = EHCI_LIST_END(ehci); in itd_init()
1672 itd->hw_bufp[0] = stream->buf0; in itd_init()
1673 itd->hw_bufp[1] = stream->buf1; in itd_init()
1674 itd->hw_bufp[2] = stream->buf2; in itd_init()
1677 itd->index[i] = -1; in itd_init()
1691 struct ehci_iso_packet *uf = &iso_sched->packet[index]; in itd_patch()
1692 unsigned pg = itd->pg; in itd_patch()
1694 /* BUG_ON(pg == 6 && uf->cross); */ in itd_patch()
1697 itd->index[uframe] = index; in itd_patch()
1699 itd->hw_transaction[uframe] = uf->transaction; in itd_patch()
1700 itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12); in itd_patch()
1701 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0); in itd_patch()
1702 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32)); in itd_patch()
1705 if (unlikely(uf->cross)) { in itd_patch()
1706 u64 bufp = uf->bufp + 4096; in itd_patch()
1708 itd->pg = ++pg; in itd_patch()
1709 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0); in itd_patch()
1710 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32)); in itd_patch()
1715 itd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd) in itd_link() argument
1717 union ehci_shadow *prev = &ehci->pshadow[frame]; in itd_link()
1718 __hc32 *hw_p = &ehci->periodic[frame]; in itd_link()
1732 itd->itd_next = here; in itd_link()
1733 itd->hw_next = *hw_p; in itd_link()
1734 prev->itd = itd; in itd_link()
1735 itd->frame = frame; in itd_link()
1737 *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD); in itd_link()
1749 unsigned next_uframe, uframe, frame; in itd_link_urb() local
1750 struct ehci_iso_sched *iso_sched = urb->hcpriv; in itd_link_urb()
1753 next_uframe = stream->next_uframe & (mod - 1); in itd_link_urb()
1755 if (unlikely(list_empty(&stream->td_list))) in itd_link_urb()
1756 ehci_to_hcd(ehci)->self.bandwidth_allocated in itd_link_urb()
1757 += stream->bandwidth; in itd_link_urb()
1759 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { in itd_link_urb()
1760 if (ehci->amd_pll_fix == 1) in itd_link_urb()
1764 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; in itd_link_urb()
1767 for (packet = iso_sched->first_packet, itd = NULL; in itd_link_urb()
1768 packet < urb->number_of_packets;) { in itd_link_urb()
1771 /* BUG_ON(list_empty(&iso_sched->td_list)); */ in itd_link_urb()
1775 itd = list_entry(iso_sched->td_list.next, in itd_link_urb()
1777 list_move_tail(&itd->itd_list, &stream->td_list); in itd_link_urb()
1778 itd->stream = stream; in itd_link_urb()
1779 itd->urb = urb; in itd_link_urb()
1784 frame = next_uframe >> 3; in itd_link_urb()
1788 next_uframe += stream->uperiod; in itd_link_urb()
1789 next_uframe &= mod - 1; in itd_link_urb()
1793 if (((next_uframe >> 3) != frame) in itd_link_urb()
1794 || packet == urb->number_of_packets) { in itd_link_urb()
1795 itd_link(ehci, frame & (ehci->periodic_size - 1), itd); in itd_link_urb()
1799 stream->next_uframe = next_uframe; in itd_link_urb()
1803 urb->hcpriv = stream; in itd_link_urb()
1805 ++ehci->isoc_count; in itd_link_urb()
1817 * assuming (a) no more than two urbs per frame on this endpoint, and also
1823 struct urb *urb = itd->urb; in itd_complete()
1827 int urb_index = -1; in itd_complete()
1828 struct ehci_iso_stream *stream = itd->stream; in itd_complete()
1833 if (likely(itd->index[uframe] == -1)) in itd_complete()
1835 urb_index = itd->index[uframe]; in itd_complete()
1836 desc = &urb->iso_frame_desc[urb_index]; in itd_complete()
1838 t = hc32_to_cpup(ehci, &itd->hw_transaction[uframe]); in itd_complete()
1839 itd->hw_transaction[uframe] = 0; in itd_complete()
1843 urb->error_count++; in itd_complete()
1845 desc->status = usb_pipein(urb->pipe) in itd_complete()
1846 ? -ENOSR /* hc couldn't read */ in itd_complete()
1847 : -ECOMM; /* hc couldn't write */ in itd_complete()
1849 desc->status = -EOVERFLOW; in itd_complete()
1851 desc->status = -EPROTO; in itd_complete()
1855 desc->actual_length = EHCI_ITD_LENGTH(t); in itd_complete()
1856 urb->actual_length += desc->actual_length; in itd_complete()
1859 desc->status = 0; in itd_complete()
1860 desc->actual_length = EHCI_ITD_LENGTH(t); in itd_complete()
1861 urb->actual_length += desc->actual_length; in itd_complete()
1864 urb->error_count++; in itd_complete()
1869 if (likely((urb_index + 1) != urb->number_of_packets)) in itd_complete()
1874 * list_for_each_entry (itd, &stream->td_list, itd_list) in itd_complete()
1875 * BUG_ON(itd->urb == urb); in itd_complete()
1883 --ehci->isoc_count; in itd_complete()
1886 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; in itd_complete()
1887 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { in itd_complete()
1888 if (ehci->amd_pll_fix == 1) in itd_complete()
1892 if (unlikely(list_is_singular(&stream->td_list))) in itd_complete()
1893 ehci_to_hcd(ehci)->self.bandwidth_allocated in itd_complete()
1894 -= stream->bandwidth; in itd_complete()
1897 itd->urb = NULL; in itd_complete()
1900 list_move_tail(&itd->itd_list, &stream->free_list); in itd_complete()
1903 if (list_empty(&stream->td_list)) { in itd_complete()
1904 list_splice_tail_init(&stream->free_list, in itd_complete()
1905 &ehci->cached_itd_list); in itd_complete()
1912 /*-------------------------------------------------------------------------*/
1917 int status = -EINVAL; in itd_submit()
1925 return -ENOMEM; in itd_submit()
1927 if (unlikely(urb->interval != stream->uperiod)) { in itd_submit()
1928 ehci_dbg(ehci, "can't change iso interval %d --> %d\n", in itd_submit()
1929 stream->uperiod, urb->interval); in itd_submit()
1936 __func__, urb->dev->devpath, urb, in itd_submit()
1937 usb_pipeendpoint(urb->pipe), in itd_submit()
1938 usb_pipein(urb->pipe) ? "in" : "out", in itd_submit()
1939 urb->transfer_buffer_length, in itd_submit()
1940 urb->number_of_packets, urb->interval, in itd_submit()
1952 spin_lock_irqsave(&ehci->lock, flags); in itd_submit()
1954 status = -ESHUTDOWN; in itd_submit()
1962 itd_link_urb(ehci, urb, ehci->periodic_size << 3, stream); in itd_submit()
1970 spin_unlock_irqrestore(&ehci->lock, flags); in itd_submit()
1975 /*-------------------------------------------------------------------------*/
1991 dma_addr_t dma = urb->transfer_dma; in sitd_sched_init()
1994 iso_sched->span = urb->number_of_packets * stream->ps.period; in sitd_sched_init()
1996 /* figure out per-frame sitd fields that we'll need later in sitd_sched_init()
1999 for (i = 0; i < urb->number_of_packets; i++) { in sitd_sched_init()
2000 struct ehci_iso_packet *packet = &iso_sched->packet[i]; in sitd_sched_init()
2005 length = urb->iso_frame_desc[i].length & 0x03ff; in sitd_sched_init()
2006 buf = dma + urb->iso_frame_desc[i].offset; in sitd_sched_init()
2009 if (((i + 1) == urb->number_of_packets) in sitd_sched_init()
2010 && !(urb->transfer_flags & URB_NO_INTERRUPT)) in sitd_sched_init()
2013 packet->transaction = cpu_to_hc32(ehci, trans); in sitd_sched_init()
2016 packet->bufp = buf; in sitd_sched_init()
2017 packet->buf1 = (buf + length) & ~0x0fff; in sitd_sched_init()
2018 if (packet->buf1 != (buf & ~(u64)0x0fff)) in sitd_sched_init()
2019 packet->cross = 1; in sitd_sched_init()
2021 /* OUT uses multiple start-splits */ in sitd_sched_init()
2022 if (stream->bEndpointAddress & USB_DIR_IN) in sitd_sched_init()
2027 packet->buf1 |= length; in sitd_sched_init()
2045 iso_sched = iso_sched_alloc(urb->number_of_packets, mem_flags); in sitd_urb_transaction()
2047 return -ENOMEM; in sitd_urb_transaction()
2052 spin_lock_irqsave(&ehci->lock, flags); in sitd_urb_transaction()
2053 for (i = 0; i < urb->number_of_packets; i++) { in sitd_urb_transaction()
2056 * for IN (using sitd->hw_backpointer, like a FSTN), which in sitd_urb_transaction()
2057 * means we never need two sitds for full speed packets. in sitd_urb_transaction()
2064 if (likely(!list_empty(&stream->free_list))) { in sitd_urb_transaction()
2065 sitd = list_first_entry(&stream->free_list, in sitd_urb_transaction()
2067 if (sitd->frame == ehci->now_frame) in sitd_urb_transaction()
2069 list_del(&sitd->sitd_list); in sitd_urb_transaction()
2070 sitd_dma = sitd->sitd_dma; in sitd_urb_transaction()
2073 spin_unlock_irqrestore(&ehci->lock, flags); in sitd_urb_transaction()
2074 sitd = dma_pool_alloc(ehci->sitd_pool, mem_flags, in sitd_urb_transaction()
2076 spin_lock_irqsave(&ehci->lock, flags); in sitd_urb_transaction()
2079 spin_unlock_irqrestore(&ehci->lock, flags); in sitd_urb_transaction()
2080 return -ENOMEM; in sitd_urb_transaction()
2085 sitd->sitd_dma = sitd_dma; in sitd_urb_transaction()
2086 sitd->frame = NO_FRAME; in sitd_urb_transaction()
2087 list_add(&sitd->sitd_list, &iso_sched->td_list); in sitd_urb_transaction()
2091 urb->hcpriv = iso_sched; in sitd_urb_transaction()
2092 urb->error_count = 0; in sitd_urb_transaction()
2094 spin_unlock_irqrestore(&ehci->lock, flags); in sitd_urb_transaction()
2098 /*-------------------------------------------------------------------------*/
2109 struct ehci_iso_packet *uf = &iso_sched->packet[index]; in sitd_patch()
2112 sitd->hw_next = EHCI_LIST_END(ehci); in sitd_patch()
2113 sitd->hw_fullspeed_ep = stream->address; in sitd_patch()
2114 sitd->hw_uframe = stream->splits; in sitd_patch()
2115 sitd->hw_results = uf->transaction; in sitd_patch()
2116 sitd->hw_backpointer = EHCI_LIST_END(ehci); in sitd_patch()
2118 bufp = uf->bufp; in sitd_patch()
2119 sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp); in sitd_patch()
2120 sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32); in sitd_patch()
2122 sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1); in sitd_patch()
2123 if (uf->cross) in sitd_patch()
2125 sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32); in sitd_patch()
2126 sitd->index = index; in sitd_patch()
2130 sitd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd) in sitd_link() argument
2133 sitd->sitd_next = ehci->pshadow[frame]; in sitd_link()
2134 sitd->hw_next = ehci->periodic[frame]; in sitd_link()
2135 ehci->pshadow[frame].sitd = sitd; in sitd_link()
2136 sitd->frame = frame; in sitd_link()
2138 ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD); in sitd_link()
2151 struct ehci_iso_sched *sched = urb->hcpriv; in sitd_link_urb()
2154 next_uframe = stream->next_uframe; in sitd_link_urb()
2156 if (list_empty(&stream->td_list)) in sitd_link_urb()
2158 ehci_to_hcd(ehci)->self.bandwidth_allocated in sitd_link_urb()
2159 += stream->bandwidth; in sitd_link_urb()
2161 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { in sitd_link_urb()
2162 if (ehci->amd_pll_fix == 1) in sitd_link_urb()
2166 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; in sitd_link_urb()
2168 /* fill sITDs frame by frame */ in sitd_link_urb()
2169 for (packet = sched->first_packet, sitd = NULL; in sitd_link_urb()
2170 packet < urb->number_of_packets; in sitd_link_urb()
2174 BUG_ON(list_empty(&sched->td_list)); in sitd_link_urb()
2176 /* ASSERT: no itds for this endpoint in this frame */ in sitd_link_urb()
2178 sitd = list_entry(sched->td_list.next, in sitd_link_urb()
2180 list_move_tail(&sitd->sitd_list, &stream->td_list); in sitd_link_urb()
2181 sitd->stream = stream; in sitd_link_urb()
2182 sitd->urb = urb; in sitd_link_urb()
2185 sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1), in sitd_link_urb()
2188 next_uframe += stream->uperiod; in sitd_link_urb()
2190 stream->next_uframe = next_uframe & (mod - 1); in sitd_link_urb()
2194 urb->hcpriv = stream; in sitd_link_urb()
2196 ++ehci->isoc_count; in sitd_link_urb()
2200 /*-------------------------------------------------------------------------*/
2211 * assuming (a) no more than two urbs per frame on this endpoint, and also
2217 struct urb *urb = sitd->urb; in sitd_complete()
2221 struct ehci_iso_stream *stream = sitd->stream; in sitd_complete()
2224 urb_index = sitd->index; in sitd_complete()
2225 desc = &urb->iso_frame_desc[urb_index]; in sitd_complete()
2226 t = hc32_to_cpup(ehci, &sitd->hw_results); in sitd_complete()
2230 urb->error_count++; in sitd_complete()
2232 desc->status = usb_pipein(urb->pipe) in sitd_complete()
2233 ? -ENOSR /* hc couldn't read */ in sitd_complete()
2234 : -ECOMM; /* hc couldn't write */ in sitd_complete()
2236 desc->status = -EOVERFLOW; in sitd_complete()
2238 desc->status = -EPROTO; in sitd_complete()
2241 urb->error_count++; in sitd_complete()
2243 desc->status = 0; in sitd_complete()
2244 desc->actual_length = desc->length - SITD_LENGTH(t); in sitd_complete()
2245 urb->actual_length += desc->actual_length; in sitd_complete()
2249 if ((urb_index + 1) != urb->number_of_packets) in sitd_complete()
2254 * list_for_each_entry (sitd, &stream->td_list, sitd_list) in sitd_complete()
2255 * BUG_ON(sitd->urb == urb); in sitd_complete()
2263 --ehci->isoc_count; in sitd_complete()
2266 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; in sitd_complete()
2267 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { in sitd_complete()
2268 if (ehci->amd_pll_fix == 1) in sitd_complete()
2272 if (list_is_singular(&stream->td_list)) in sitd_complete()
2273 ehci_to_hcd(ehci)->self.bandwidth_allocated in sitd_complete()
2274 -= stream->bandwidth; in sitd_complete()
2277 sitd->urb = NULL; in sitd_complete()
2280 list_move_tail(&sitd->sitd_list, &stream->free_list); in sitd_complete()
2283 if (list_empty(&stream->td_list)) { in sitd_complete()
2284 list_splice_tail_init(&stream->free_list, in sitd_complete()
2285 &ehci->cached_sitd_list); in sitd_complete()
2296 int status = -EINVAL; in sitd_submit()
2304 return -ENOMEM; in sitd_submit()
2306 if (urb->interval != stream->ps.period) { in sitd_submit()
2307 ehci_dbg(ehci, "can't change iso interval %d --> %d\n", in sitd_submit()
2308 stream->ps.period, urb->interval); in sitd_submit()
2314 "submit %p dev%s ep%d%s-iso len %d\n", in sitd_submit()
2315 urb, urb->dev->devpath, in sitd_submit()
2316 usb_pipeendpoint(urb->pipe), in sitd_submit()
2317 usb_pipein(urb->pipe) ? "in" : "out", in sitd_submit()
2318 urb->transfer_buffer_length); in sitd_submit()
2329 spin_lock_irqsave(&ehci->lock, flags); in sitd_submit()
2331 status = -ESHUTDOWN; in sitd_submit()
2339 sitd_link_urb(ehci, urb, ehci->periodic_size << 3, stream); in sitd_submit()
2347 spin_unlock_irqrestore(&ehci->lock, flags); in sitd_submit()
2352 /*-------------------------------------------------------------------------*/
2356 unsigned uf, now_frame, frame; in scan_isoc() local
2357 unsigned fmask = ehci->periodic_size - 1; in scan_isoc()
2365 * Touches as few pages as possible: cache-friendly. in scan_isoc()
2367 if (ehci->rh_state >= EHCI_RH_RUNNING) { in scan_isoc()
2372 now_frame = (ehci->last_iso_frame - 1) & fmask; in scan_isoc()
2375 ehci->now_frame = now_frame; in scan_isoc()
2377 frame = ehci->last_iso_frame; in scan_isoc()
2380 /* Scan each element in frame's queue for completions */ in scan_isoc()
2381 q_p = &ehci->pshadow[frame]; in scan_isoc()
2382 hw_p = &ehci->periodic[frame]; in scan_isoc()
2383 q.ptr = q_p->ptr; in scan_isoc()
2394 * frame is current. in scan_isoc()
2396 if (frame == now_frame && live) { in scan_isoc()
2399 if (q.itd->hw_transaction[uf] & in scan_isoc()
2404 q_p = &q.itd->itd_next; in scan_isoc()
2405 hw_p = &q.itd->hw_next; in scan_isoc()
2407 q.itd->hw_next); in scan_isoc()
2419 *q_p = q.itd->itd_next; in scan_isoc()
2420 if (!ehci->use_dummy_qh || in scan_isoc()
2421 q.itd->hw_next != EHCI_LIST_END(ehci)) in scan_isoc()
2422 *hw_p = q.itd->hw_next; in scan_isoc()
2424 *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma); in scan_isoc()
2425 type = Q_NEXT_TYPE(ehci, q.itd->hw_next); in scan_isoc()
2435 * frame is current. in scan_isoc()
2437 if (((frame == now_frame) || in scan_isoc()
2438 (((frame + 1) & fmask) == now_frame)) in scan_isoc()
2440 && (q.sitd->hw_results & SITD_ACTIVE(ehci))) { in scan_isoc()
2442 q_p = &q.sitd->sitd_next; in scan_isoc()
2443 hw_p = &q.sitd->hw_next; in scan_isoc()
2444 type = Q_NEXT_TYPE(ehci, q.sitd->hw_next); in scan_isoc()
2454 *q_p = q.sitd->sitd_next; in scan_isoc()
2455 if (!ehci->use_dummy_qh || in scan_isoc()
2456 q.sitd->hw_next != EHCI_LIST_END(ehci)) in scan_isoc()
2457 *hw_p = q.sitd->hw_next; in scan_isoc()
2459 *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma); in scan_isoc()
2460 type = Q_NEXT_TYPE(ehci, q.sitd->hw_next); in scan_isoc()
2466 ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n", in scan_isoc()
2467 type, frame, q.ptr); in scan_isoc()
2478 if (unlikely(modified && ehci->isoc_count > 0)) in scan_isoc()
2482 /* Stop when we have reached the current frame */ in scan_isoc()
2483 if (frame == now_frame) in scan_isoc()
2486 /* The last frame may still have active siTDs */ in scan_isoc()
2487 ehci->last_iso_frame = frame; in scan_isoc()
2488 frame = (frame + 1) & fmask; in scan_isoc()