Lines Matching +full:ep +full:- +full:side

1 // SPDX-License-Identifier: GPL-2.0
26 * until you reach a non-link TRB.
59 #include <linux/dma-mapping.h>
61 #include "xhci-trace.h"
76 if (!seg || !trb || trb < seg->trbs) in xhci_trb_virt_to_dma()
79 segment_offset = trb - seg->trbs; in xhci_trb_virt_to_dma()
82 return seg->dma + (segment_offset * sizeof(*trb)); in xhci_trb_virt_to_dma()
87 return TRB_TYPE_NOOP_LE32(trb->generic.field[3]); in trb_is_noop()
92 return TRB_TYPE_LINK_LE32(trb->link.control); in trb_is_link()
97 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1]; in last_trb_on_seg()
103 return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg); in last_trb_on_ring()
108 return le32_to_cpu(trb->link.control) & LINK_TOGGLE; in link_trb_toggles_cycle()
113 struct urb_priv *urb_priv = td->urb->hcpriv; in last_td_in_urb()
115 return urb_priv->num_tds_done == urb_priv->num_tds; in last_td_in_urb()
120 return ((le32_to_cpu(ring->dequeue->event_cmd.flags) & TRB_CYCLE) == in unhandled_event_trb()
121 ring->cycle_state); in unhandled_event_trb()
126 struct urb_priv *urb_priv = urb->hcpriv; in inc_td_cnt()
128 urb_priv->num_tds_done++; in inc_td_cnt()
135 trb->link.control &= cpu_to_le32(~TRB_CHAIN); in trb_to_noop()
137 trb->generic.field[0] = 0; in trb_to_noop()
138 trb->generic.field[1] = 0; in trb_to_noop()
139 trb->generic.field[2] = 0; in trb_to_noop()
141 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); in trb_to_noop()
142 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type)); in trb_to_noop()
154 *seg = (*seg)->next; in next_trb()
155 *trb = ((*seg)->trbs); in next_trb()
169 if (ring->type == TYPE_EVENT) { in inc_deq()
170 if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) { in inc_deq()
171 ring->dequeue++; in inc_deq()
174 if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue)) in inc_deq()
175 ring->cycle_state ^= 1; in inc_deq()
176 ring->deq_seg = ring->deq_seg->next; in inc_deq()
177 ring->dequeue = ring->deq_seg->trbs; in inc_deq()
185 if (!trb_is_link(ring->dequeue)) { in inc_deq()
186 if (last_trb_on_seg(ring->deq_seg, ring->dequeue)) in inc_deq()
189 ring->dequeue++; in inc_deq()
192 while (trb_is_link(ring->dequeue)) { in inc_deq()
193 ring->deq_seg = ring->deq_seg->next; in inc_deq()
194 ring->dequeue = ring->deq_seg->trbs; in inc_deq()
198 if (link_trb_count++ > ring->num_segs) { in inc_deq()
214 while (trb_is_link(ring->enqueue)) { in inc_enq_past_link()
225 if (!xhci_link_chain_quirk(xhci, ring->type)) { in inc_enq_past_link()
226 ring->enqueue->link.control &= cpu_to_le32(~TRB_CHAIN); in inc_enq_past_link()
227 ring->enqueue->link.control |= cpu_to_le32(chain); in inc_enq_past_link()
232 ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE); in inc_enq_past_link()
235 if (link_trb_toggles_cycle(ring->enqueue)) in inc_enq_past_link()
236 ring->cycle_state ^= 1; in inc_enq_past_link()
238 ring->enq_seg = ring->enq_seg->next; in inc_enq_past_link()
239 ring->enqueue = ring->enq_seg->trbs; in inc_enq_past_link()
243 if (link_trb_count++ > ring->num_segs) { in inc_enq_past_link()
266 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; in inc_enq()
268 if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) { in inc_enq()
273 ring->enqueue++; in inc_enq()
281 if (trb_is_link(ring->enqueue) && (chain || more_trbs_coming)) in inc_enq()
296 start_dma = xhci_trb_virt_to_dma(td->start_seg, td->start_trb); in trb_in_td()
297 cur_seg = td->start_seg; in trb_in_td()
304 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); in trb_in_td()
306 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, td->end_trb); in trb_in_td()
319 (suspect_dma >= cur_seg->dma && in trb_in_td()
329 cur_seg = cur_seg->next; in trb_in_td()
330 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); in trb_in_td()
331 } while (cur_seg != td->start_seg); in trb_in_td()
344 struct xhci_segment *enq_seg = ring->enq_seg; in xhci_num_trbs_free()
345 union xhci_trb *enq = ring->enqueue; in xhci_num_trbs_free()
352 enq_seg = enq_seg->next; in xhci_num_trbs_free()
353 enq = enq_seg->trbs; in xhci_num_trbs_free()
357 if (enq == ring->dequeue) in xhci_num_trbs_free()
358 return ring->num_segs * (TRBS_PER_SEGMENT - 1); in xhci_num_trbs_free()
361 if (ring->deq_seg == enq_seg && ring->dequeue >= enq) in xhci_num_trbs_free()
362 return free + (ring->dequeue - enq); in xhci_num_trbs_free()
363 last_on_seg = &enq_seg->trbs[TRBS_PER_SEGMENT - 1]; in xhci_num_trbs_free()
364 free += last_on_seg - enq; in xhci_num_trbs_free()
365 enq_seg = enq_seg->next; in xhci_num_trbs_free()
366 enq = enq_seg->trbs; in xhci_num_trbs_free()
367 } while (i++ < ring->num_segs); in xhci_num_trbs_free()
386 enq_used = ring->enqueue - ring->enq_seg->trbs; in xhci_ring_expansion_needed()
389 trbs_past_seg = enq_used + num_trbs - (TRBS_PER_SEGMENT - 1); in xhci_ring_expansion_needed()
401 if (trb_is_link(ring->enqueue) && ring->enq_seg->next->trbs == ring->dequeue) in xhci_ring_expansion_needed()
404 new_segs = 1 + (trbs_past_seg / (TRBS_PER_SEGMENT - 1)); in xhci_ring_expansion_needed()
405 seg = ring->enq_seg; in xhci_ring_expansion_needed()
408 seg = seg->next; in xhci_ring_expansion_needed()
409 if (seg == ring->deq_seg) { in xhci_ring_expansion_needed()
414 new_segs--; in xhci_ring_expansion_needed()
423 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) in xhci_ring_cmd_db()
430 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]); in xhci_ring_cmd_db()
432 readl(&xhci->dba->doorbell[0]); in xhci_ring_cmd_db()
437 return mod_delayed_work(system_wq, &xhci->cmd_timer, in xhci_mod_cmd_timer()
438 msecs_to_jiffies(xhci->current_cmd->timeout_ms)); in xhci_mod_cmd_timer()
443 return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command, in xhci_next_queued_cmd()
448 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
450 * This must be called with command ring stopped and xhci->lock held.
457 /* Turn all aborted commands in list to no-ops, then restart */ in xhci_handle_stopped_cmd_ring()
458 list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) { in xhci_handle_stopped_cmd_ring()
460 if (i_cmd->status != COMP_COMMAND_ABORTED) in xhci_handle_stopped_cmd_ring()
463 i_cmd->status = COMP_COMMAND_RING_STOPPED; in xhci_handle_stopped_cmd_ring()
465 xhci_dbg(xhci, "Turn aborted command %p to no-op\n", in xhci_handle_stopped_cmd_ring()
466 i_cmd->command_trb); in xhci_handle_stopped_cmd_ring()
468 trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP); in xhci_handle_stopped_cmd_ring()
472 * completion event is received for these no-op commands in xhci_handle_stopped_cmd_ring()
476 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; in xhci_handle_stopped_cmd_ring()
479 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) && in xhci_handle_stopped_cmd_ring()
480 !(xhci->xhc_state & XHCI_STATE_DYING)) { in xhci_handle_stopped_cmd_ring()
481 xhci->current_cmd = cur_cmd; in xhci_handle_stopped_cmd_ring()
488 /* Must be called with xhci->lock held, releases and acquires lock back */
491 struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg; in xhci_abort_cmd_ring()
492 union xhci_trb *new_deq = xhci->cmd_ring->dequeue; in xhci_abort_cmd_ring()
498 reinit_completion(&xhci->cmd_ring_stop_completion); in xhci_abort_cmd_ring()
513 xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); in xhci_abort_cmd_ring()
517 * seconds then driver handles it as if host died (-ENODEV). in xhci_abort_cmd_ring()
518 * In the future we should distinguish between -ENODEV and -ETIMEDOUT in xhci_abort_cmd_ring()
519 * and try to recover a -ETIMEDOUT with a host controller reset. in xhci_abort_cmd_ring()
521 ret = xhci_handshake(&xhci->op_regs->cmd_ring, in xhci_abort_cmd_ring()
535 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_abort_cmd_ring()
536 ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion, in xhci_abort_cmd_ring()
538 spin_lock_irqsave(&xhci->lock, flags); in xhci_abort_cmd_ring()
553 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; in xhci_ring_ep_doorbell()
554 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_ring_ep_doorbell() local
555 unsigned int ep_state = ep->ep_state; in xhci_ring_ep_doorbell()
580 struct xhci_virt_ep *ep; in ring_doorbell_for_active_rings() local
582 ep = &xhci->devs[slot_id]->eps[ep_index]; in ring_doorbell_for_active_rings()
585 if (!(ep->ep_state & EP_HAS_STREAMS)) { in ring_doorbell_for_active_rings()
586 if (ep->ring && !(list_empty(&ep->ring->td_list))) in ring_doorbell_for_active_rings()
591 for (stream_id = 1; stream_id < ep->stream_info->num_streams; in ring_doorbell_for_active_rings()
593 struct xhci_stream_info *stream_info = ep->stream_info; in ring_doorbell_for_active_rings()
594 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list)) in ring_doorbell_for_active_rings()
619 if (!xhci->devs[slot_id]) { in xhci_get_virt_ep()
624 return &xhci->devs[slot_id]->eps[ep_index]; in xhci_get_virt_ep()
628 struct xhci_virt_ep *ep, in xhci_virt_ep_to_ring() argument
632 if (!(ep->ep_state & EP_HAS_STREAMS)) in xhci_virt_ep_to_ring()
633 return ep->ring; in xhci_virt_ep_to_ring()
635 if (!ep->stream_info) in xhci_virt_ep_to_ring()
638 if (stream_id == 0 || stream_id >= ep->stream_info->num_streams) { in xhci_virt_ep_to_ring()
640 stream_id, ep->vdev->slot_id, ep->ep_index); in xhci_virt_ep_to_ring()
644 return ep->stream_info->stream_rings[stream_id]; in xhci_virt_ep_to_ring()
655 struct xhci_virt_ep *ep; in xhci_triad_to_transfer_ring() local
657 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_triad_to_transfer_ring()
658 if (!ep) in xhci_triad_to_transfer_ring()
661 return xhci_virt_ep_to_ring(xhci, ep, stream_id); in xhci_triad_to_transfer_ring()
676 struct xhci_virt_ep *ep; in xhci_get_hw_deq() local
678 ep = &vdev->eps[ep_index]; in xhci_get_hw_deq()
680 if (ep->ep_state & EP_HAS_STREAMS) { in xhci_get_hw_deq()
681 st_ctx = &ep->stream_info->stream_ctx_array[stream_id]; in xhci_get_hw_deq()
682 return le64_to_cpu(st_ctx->stream_ring); in xhci_get_hw_deq()
684 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); in xhci_get_hw_deq()
685 return le64_to_cpu(ep_ctx->deq); in xhci_get_hw_deq()
692 struct xhci_virt_device *dev = xhci->devs[slot_id]; in xhci_move_dequeue_past_td()
693 struct xhci_virt_ep *ep = &dev->eps[ep_index]; in xhci_move_dequeue_past_td() local
711 return -ENODEV; in xhci_move_dequeue_past_td()
715 new_seg = ep_ring->deq_seg; in xhci_move_dequeue_past_td()
716 new_deq = ep_ring->dequeue; in xhci_move_dequeue_past_td()
717 new_cycle = le32_to_cpu(td->end_trb->generic.field[3]) & TRB_CYCLE; in xhci_move_dequeue_past_td()
731 if (new_deq == td->end_trb) in xhci_move_dequeue_past_td()
741 if (new_deq == ep->ring->dequeue) { in xhci_move_dequeue_past_td()
743 return -EINVAL; in xhci_move_dequeue_past_td()
753 return -EINVAL; in xhci_move_dequeue_past_td()
756 if ((ep->ep_state & SET_DEQ_PENDING)) { in xhci_move_dequeue_past_td()
759 return -EBUSY; in xhci_move_dequeue_past_td()
766 return -ENOMEM; in xhci_move_dequeue_past_td()
780 ep->queued_deq_seg = new_seg; in xhci_move_dequeue_past_td()
781 ep->queued_deq_ptr = new_deq; in xhci_move_dequeue_past_td()
791 ep->ep_state |= SET_DEQ_PENDING; in xhci_move_dequeue_past_td()
802 struct xhci_segment *seg = td->start_seg; in td_to_noop()
803 union xhci_trb *trb = td->start_trb; in td_to_noop()
809 if (flip_cycle && trb != td->start_trb && trb != td->end_trb) in td_to_noop()
810 trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); in td_to_noop()
812 if (trb == td->end_trb) in td_to_noop()
822 struct urb *urb = cur_td->urb; in xhci_giveback_urb_in_irq()
823 struct urb_priv *urb_priv = urb->hcpriv; in xhci_giveback_urb_in_irq()
824 struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus); in xhci_giveback_urb_in_irq()
826 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { in xhci_giveback_urb_in_irq()
827 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; in xhci_giveback_urb_in_irq()
828 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { in xhci_giveback_urb_in_irq()
829 if (xhci->quirks & XHCI_AMD_PLL_FIX) in xhci_giveback_urb_in_irq()
842 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; in xhci_unmap_td_bounce_buffer()
843 struct xhci_segment *seg = td->bounce_seg; in xhci_unmap_td_bounce_buffer()
844 struct urb *urb = td->urb; in xhci_unmap_td_bounce_buffer()
851 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, in xhci_unmap_td_bounce_buffer()
856 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, in xhci_unmap_td_bounce_buffer()
859 if (urb->num_sgs) { in xhci_unmap_td_bounce_buffer()
860 len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, in xhci_unmap_td_bounce_buffer()
861 seg->bounce_len, seg->bounce_offs); in xhci_unmap_td_bounce_buffer()
862 if (len != seg->bounce_len) in xhci_unmap_td_bounce_buffer()
864 len, seg->bounce_len); in xhci_unmap_td_bounce_buffer()
866 memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf, in xhci_unmap_td_bounce_buffer()
867 seg->bounce_len); in xhci_unmap_td_bounce_buffer()
869 seg->bounce_len = 0; in xhci_unmap_td_bounce_buffer()
870 seg->bounce_offs = 0; in xhci_unmap_td_bounce_buffer()
879 urb = td->urb; in xhci_td_cleanup()
886 * length, urb->actual_length will be a very big number (since it's in xhci_td_cleanup()
889 if (urb->actual_length > urb->transfer_buffer_length) { in xhci_td_cleanup()
891 urb->transfer_buffer_length, urb->actual_length); in xhci_td_cleanup()
892 urb->actual_length = 0; in xhci_td_cleanup()
896 if (!list_empty(&td->td_list)) in xhci_td_cleanup()
897 list_del_init(&td->td_list); in xhci_td_cleanup()
899 if (!list_empty(&td->cancelled_td_list)) in xhci_td_cleanup()
900 list_del_init(&td->cancelled_td_list); in xhci_td_cleanup()
905 if ((urb->actual_length != urb->transfer_buffer_length && in xhci_td_cleanup()
906 (urb->transfer_flags & URB_SHORT_NOT_OK)) || in xhci_td_cleanup()
907 (status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc))) in xhci_td_cleanup()
909 urb, urb->actual_length, in xhci_td_cleanup()
910 urb->transfer_buffer_length, status); in xhci_td_cleanup()
913 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) in xhci_td_cleanup()
923 ring->dequeue = td->end_trb; in xhci_dequeue_td()
924 ring->deq_seg = td->end_seg; in xhci_dequeue_td()
931 static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep) in xhci_giveback_invalidated_tds() argument
936 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, in xhci_giveback_invalidated_tds()
939 ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); in xhci_giveback_invalidated_tds()
941 if (td->cancel_status == TD_CLEARED) { in xhci_giveback_invalidated_tds()
942 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n", in xhci_giveback_invalidated_tds()
943 __func__, td->urb); in xhci_giveback_invalidated_tds()
944 xhci_td_cleanup(ep->xhci, td, ring, td->status); in xhci_giveback_invalidated_tds()
946 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n", in xhci_giveback_invalidated_tds()
947 __func__, td->urb, td->cancel_status); in xhci_giveback_invalidated_tds()
949 if (ep->xhci->xhc_state & XHCI_STATE_DYING) in xhci_giveback_invalidated_tds()
962 ret = -ENOMEM; in xhci_reset_halted_ep()
966 xhci_dbg(xhci, "%s-reset ep %u, slot %u\n", in xhci_reset_halted_ep()
979 struct xhci_virt_ep *ep, in xhci_handle_halted_endpoint() argument
983 unsigned int slot_id = ep->vdev->slot_id; in xhci_handle_halted_endpoint()
990 if (ep->vdev->flags & VDEV_PORT_ERROR) in xhci_handle_halted_endpoint()
991 return -ENODEV; in xhci_handle_halted_endpoint()
993 /* add td to cancelled list and let reset ep handler take care of it */ in xhci_handle_halted_endpoint()
995 ep->ep_state |= EP_HARD_CLEAR_TOGGLE; in xhci_handle_halted_endpoint()
996 if (td && list_empty(&td->cancelled_td_list)) { in xhci_handle_halted_endpoint()
997 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); in xhci_handle_halted_endpoint()
998 td->cancel_status = TD_HALTED; in xhci_handle_halted_endpoint()
1002 if (ep->ep_state & EP_HALTED) { in xhci_handle_halted_endpoint()
1003 xhci_dbg(xhci, "Reset ep command for ep_index %d already pending\n", in xhci_handle_halted_endpoint()
1004 ep->ep_index); in xhci_handle_halted_endpoint()
1008 err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type); in xhci_handle_halted_endpoint()
1012 ep->ep_state |= EP_HALTED; in xhci_handle_halted_endpoint()
1020 * Fix up the ep ring first, so HW stops executing cancelled TDs.
1022 * We're also in the event handler, so we can't get re-interrupted if another
1028 static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) in xhci_invalidate_cancelled_tds() argument
1036 unsigned int slot_id = ep->vdev->slot_id; in xhci_invalidate_cancelled_tds()
1043 if (ep->ep_state & SET_DEQ_PENDING) in xhci_invalidate_cancelled_tds()
1046 xhci = ep->xhci; in xhci_invalidate_cancelled_tds()
1048 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { in xhci_invalidate_cancelled_tds()
1052 td->start_seg, td->start_trb), in xhci_invalidate_cancelled_tds()
1053 td->urb->stream_id, td->urb); in xhci_invalidate_cancelled_tds()
1054 list_del_init(&td->td_list); in xhci_invalidate_cancelled_tds()
1055 ring = xhci_urb_to_transfer_ring(xhci, td->urb); in xhci_invalidate_cancelled_tds()
1058 td->urb, td->urb->stream_id); in xhci_invalidate_cancelled_tds()
1067 hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index, in xhci_invalidate_cancelled_tds()
1068 td->urb->stream_id); in xhci_invalidate_cancelled_tds()
1071 if (td->cancel_status == TD_HALTED || trb_in_td(td, hw_deq)) { in xhci_invalidate_cancelled_tds()
1072 switch (td->cancel_status) { in xhci_invalidate_cancelled_tds()
1073 case TD_CLEARED: /* TD is already no-op */ in xhci_invalidate_cancelled_tds()
1080 if (cached_td->urb->stream_id != td->urb->stream_id) { in xhci_invalidate_cancelled_tds()
1084 td->urb->stream_id, td->urb); in xhci_invalidate_cancelled_tds()
1085 td->cancel_status = TD_CLEARING_CACHE_DEFERRED; in xhci_invalidate_cancelled_tds()
1092 td->urb, cached_td->urb, in xhci_invalidate_cancelled_tds()
1093 td->urb->stream_id); in xhci_invalidate_cancelled_tds()
1095 cached_td->cancel_status = TD_CLEARED; in xhci_invalidate_cancelled_tds()
1098 td->cancel_status = TD_CLEARING_CACHE; in xhci_invalidate_cancelled_tds()
1104 td->cancel_status = TD_CLEARED; in xhci_invalidate_cancelled_tds()
1112 err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index, in xhci_invalidate_cancelled_tds()
1113 cached_td->urb->stream_id, in xhci_invalidate_cancelled_tds()
1116 /* Failed to move past cached td, just set cached TDs to no-op */ in xhci_invalidate_cancelled_tds()
1117 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { in xhci_invalidate_cancelled_tds()
1123 if (td->cancel_status != TD_CLEARING_CACHE && in xhci_invalidate_cancelled_tds()
1124 td->cancel_status != TD_CLEARING_CACHE_DEFERRED) in xhci_invalidate_cancelled_tds()
1127 td->urb); in xhci_invalidate_cancelled_tds()
1129 td->cancel_status = TD_CLEARED; in xhci_invalidate_cancelled_tds()
1140 * Call under xhci->lock on a stopped endpoint.
1142 void xhci_process_cancelled_tds(struct xhci_virt_ep *ep) in xhci_process_cancelled_tds() argument
1144 xhci_invalidate_cancelled_tds(ep); in xhci_process_cancelled_tds()
1145 xhci_giveback_invalidated_tds(ep); in xhci_process_cancelled_tds()
1150 * Only call for non-running rings without streams.
1152 static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep) in find_halted_td() argument
1157 if (!list_empty(&ep->ring->td_list)) { /* Not streams compatible */ in find_halted_td()
1158 hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0); in find_halted_td()
1160 td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list); in find_halted_td()
1174 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
1181 struct xhci_virt_ep *ep; in xhci_handle_cmd_stop_ep() local
1188 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { in xhci_handle_cmd_stop_ep()
1189 if (!xhci->devs[slot_id]) in xhci_handle_cmd_stop_ep()
1195 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); in xhci_handle_cmd_stop_ep()
1196 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_handle_cmd_stop_ep()
1197 if (!ep) in xhci_handle_cmd_stop_ep()
1200 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in xhci_handle_cmd_stop_ep()
1207 * reset the host side endpoint first. in xhci_handle_cmd_stop_ep()
1213 * Proper error code is unknown here, it would be -EPIPE if device side in xhci_handle_cmd_stop_ep()
1214 * of enadpoit halted (aka STALL), and -EPROTO if not (transaction error) in xhci_handle_cmd_stop_ep()
1215 * We use -EPROTO, if device is stalled it should return a stall error on in xhci_handle_cmd_stop_ep()
1216 * next transfer, which then will return -EPIPE, and device side stall is in xhci_handle_cmd_stop_ep()
1221 xhci_dbg(xhci, "Stop ep completion raced with stall\n"); in xhci_handle_cmd_stop_ep()
1226 if (ep->ep_state & EP_HALTED) in xhci_handle_cmd_stop_ep()
1229 if (ep->ep_state & EP_HAS_STREAMS) { in xhci_handle_cmd_stop_ep()
1233 td = find_halted_td(ep); in xhci_handle_cmd_stop_ep()
1235 td->status = -EPROTO; in xhci_handle_cmd_stop_ep()
1237 /* reset ep, reset handler cleans up cancelled tds */ in xhci_handle_cmd_stop_ep()
1238 err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type); in xhci_handle_cmd_stop_ep()
1239 xhci_dbg(xhci, "Stop ep completion resetting ep, status %d\n", err); in xhci_handle_cmd_stop_ep()
1243 /* Reset EP handler will clean up cancelled TDs */ in xhci_handle_cmd_stop_ep()
1244 ep->ep_state &= ~EP_STOP_CMD_PENDING; in xhci_handle_cmd_stop_ep()
1249 * EP is a Context State Error, and EP stays Stopped. in xhci_handle_cmd_stop_ep()
1252 * Endpoint later. EP state is now Stopped and EP_HALTED in xhci_handle_cmd_stop_ep()
1253 * still set because Reset EP handler will run after us. in xhci_handle_cmd_stop_ep()
1255 if (ep->ep_state & EP_HALTED) in xhci_handle_cmd_stop_ep()
1258 * On some HCs EP state remains Stopped for some tens of in xhci_handle_cmd_stop_ep()
1265 * Keep retrying until the EP starts and stops again or in xhci_handle_cmd_stop_ep()
1267 * driver bug may cause stopping an already stopped EP). in xhci_handle_cmd_stop_ep()
1269 if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100))) in xhci_handle_cmd_stop_ep()
1273 /* Race, HW handled stop ep cmd before ep was running */ in xhci_handle_cmd_stop_ep()
1274 xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n", in xhci_handle_cmd_stop_ep()
1279 ep->ep_state &= ~EP_STOP_CMD_PENDING; in xhci_handle_cmd_stop_ep()
1292 xhci_invalidate_cancelled_tds(ep); in xhci_handle_cmd_stop_ep()
1293 ep->ep_state &= ~EP_STOP_CMD_PENDING; in xhci_handle_cmd_stop_ep()
1296 xhci_giveback_invalidated_tds(ep); in xhci_handle_cmd_stop_ep()
1305 list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) { in xhci_kill_ring_urbs()
1306 list_del_init(&cur_td->td_list); in xhci_kill_ring_urbs()
1308 if (!list_empty(&cur_td->cancelled_td_list)) in xhci_kill_ring_urbs()
1309 list_del_init(&cur_td->cancelled_td_list); in xhci_kill_ring_urbs()
1313 inc_td_cnt(cur_td->urb); in xhci_kill_ring_urbs()
1315 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); in xhci_kill_ring_urbs()
1324 struct xhci_virt_ep *ep; in xhci_kill_endpoint_urbs() local
1327 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_kill_endpoint_urbs()
1328 if (!ep) in xhci_kill_endpoint_urbs()
1331 if ((ep->ep_state & EP_HAS_STREAMS) || in xhci_kill_endpoint_urbs()
1332 (ep->ep_state & EP_GETTING_NO_STREAMS)) { in xhci_kill_endpoint_urbs()
1335 for (stream_id = 1; stream_id < ep->stream_info->num_streams; in xhci_kill_endpoint_urbs()
1337 ring = ep->stream_info->stream_rings[stream_id]; in xhci_kill_endpoint_urbs()
1342 "Killing URBs for slot ID %u, ep index %u, stream %u", in xhci_kill_endpoint_urbs()
1347 ring = ep->ring; in xhci_kill_endpoint_urbs()
1351 "Killing URBs for slot ID %u, ep index %u", in xhci_kill_endpoint_urbs()
1356 list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list, in xhci_kill_endpoint_urbs()
1358 list_del_init(&cur_td->cancelled_td_list); in xhci_kill_endpoint_urbs()
1359 inc_td_cnt(cur_td->urb); in xhci_kill_endpoint_urbs()
1362 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); in xhci_kill_endpoint_urbs()
1372 * Call with xhci->lock held.
1373 * lock is relased and re-acquired while giving back urb.
1380 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_hc_died()
1383 notify = !(xhci->xhc_state & XHCI_STATE_REMOVING); in xhci_hc_died()
1386 xhci->xhc_state |= XHCI_STATE_DYING; in xhci_hc_died()
1391 for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) { in xhci_hc_died()
1392 if (!xhci->devs[i]) in xhci_hc_died()
1416 struct xhci_virt_ep *ep; in xhci_handle_cmd_set_deq() local
1422 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); in xhci_handle_cmd_set_deq()
1423 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); in xhci_handle_cmd_set_deq()
1424 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_handle_cmd_set_deq()
1425 if (!ep) in xhci_handle_cmd_set_deq()
1428 ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id); in xhci_handle_cmd_set_deq()
1436 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in xhci_handle_cmd_set_deq()
1437 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); in xhci_handle_cmd_set_deq()
1441 if (ep->ep_state & EP_HAS_STREAMS) { in xhci_handle_cmd_set_deq()
1442 stream_ctx = &ep->stream_info->stream_ctx_array[stream_id]; in xhci_handle_cmd_set_deq()
1443 trace_xhci_handle_cmd_set_deq_stream(ep->stream_info, stream_id); in xhci_handle_cmd_set_deq()
1455 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n"); in xhci_handle_cmd_set_deq()
1457 slot_state = le32_to_cpu(slot_ctx->dev_state); in xhci_handle_cmd_set_deq()
1460 "Slot state = %u, EP state = %u", in xhci_handle_cmd_set_deq()
1481 if (ep->ep_state & EP_HAS_STREAMS) { in xhci_handle_cmd_set_deq()
1482 deq = le64_to_cpu(stream_ctx->stream_ring) & TR_DEQ_PTR_MASK; in xhci_handle_cmd_set_deq()
1493 if (xhci->quirks & XHCI_CDNS_SCTX_QUIRK) { in xhci_handle_cmd_set_deq()
1494 stream_ctx->reserved[0] = 0; in xhci_handle_cmd_set_deq()
1495 stream_ctx->reserved[1] = 0; in xhci_handle_cmd_set_deq()
1498 deq = le64_to_cpu(ep_ctx->deq) & TR_DEQ_PTR_MASK; in xhci_handle_cmd_set_deq()
1502 if (xhci_trb_virt_to_dma(ep->queued_deq_seg, in xhci_handle_cmd_set_deq()
1503 ep->queued_deq_ptr) == deq) { in xhci_handle_cmd_set_deq()
1507 ep_ring->deq_seg = ep->queued_deq_seg; in xhci_handle_cmd_set_deq()
1508 ep_ring->dequeue = ep->queued_deq_ptr; in xhci_handle_cmd_set_deq()
1511 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", in xhci_handle_cmd_set_deq()
1512 ep->queued_deq_seg, ep->queued_deq_ptr); in xhci_handle_cmd_set_deq()
1516 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, in xhci_handle_cmd_set_deq()
1518 ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); in xhci_handle_cmd_set_deq()
1519 if (td->cancel_status == TD_CLEARING_CACHE) { in xhci_handle_cmd_set_deq()
1520 td->cancel_status = TD_CLEARED; in xhci_handle_cmd_set_deq()
1521 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n", in xhci_handle_cmd_set_deq()
1522 __func__, td->urb); in xhci_handle_cmd_set_deq()
1523 xhci_td_cleanup(ep->xhci, td, ep_ring, td->status); in xhci_handle_cmd_set_deq()
1525 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n", in xhci_handle_cmd_set_deq()
1526 __func__, td->urb, td->cancel_status); in xhci_handle_cmd_set_deq()
1530 ep->ep_state &= ~SET_DEQ_PENDING; in xhci_handle_cmd_set_deq()
1531 ep->queued_deq_seg = NULL; in xhci_handle_cmd_set_deq()
1532 ep->queued_deq_ptr = NULL; in xhci_handle_cmd_set_deq()
1535 if (!list_empty(&ep->cancelled_td_list)) { in xhci_handle_cmd_set_deq()
1536 xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n", in xhci_handle_cmd_set_deq()
1538 xhci_invalidate_cancelled_tds(ep); in xhci_handle_cmd_set_deq()
1542 xhci_giveback_invalidated_tds(ep); in xhci_handle_cmd_set_deq()
1545 xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__); in xhci_handle_cmd_set_deq()
1553 struct xhci_virt_ep *ep; in xhci_handle_cmd_reset_ep() local
1557 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); in xhci_handle_cmd_reset_ep()
1558 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_handle_cmd_reset_ep()
1559 if (!ep) in xhci_handle_cmd_reset_ep()
1562 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in xhci_handle_cmd_reset_ep()
1569 "Ignoring reset ep completion code of %u", cmd_comp_code); in xhci_handle_cmd_reset_ep()
1571 /* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */ in xhci_handle_cmd_reset_ep()
1572 xhci_invalidate_cancelled_tds(ep); in xhci_handle_cmd_reset_ep()
1575 ep->ep_state &= ~EP_HALTED; in xhci_handle_cmd_reset_ep()
1577 xhci_giveback_invalidated_tds(ep); in xhci_handle_cmd_reset_ep()
1580 if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP) in xhci_handle_cmd_reset_ep()
1588 command->slot_id = slot_id; in xhci_handle_cmd_enable_slot()
1590 command->slot_id = 0; in xhci_handle_cmd_enable_slot()
1599 virt_dev = xhci->devs[slot_id]; in xhci_handle_cmd_disable_slot()
1603 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_handle_cmd_disable_slot()
1606 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) in xhci_handle_cmd_disable_slot()
1610 xhci->dcbaa->dev_context_ptrs[slot_id] = 0; in xhci_handle_cmd_disable_slot()
1611 xhci->devs[slot_id] = NULL; in xhci_handle_cmd_disable_slot()
1628 virt_dev = xhci->devs[slot_id]; in xhci_handle_cmd_config_ep()
1631 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); in xhci_handle_cmd_config_ep()
1637 add_flags = le32_to_cpu(ctrl_ctx->add_flags); in xhci_handle_cmd_config_ep()
1640 ep_index = xhci_last_valid_endpoint(add_flags) - 1; in xhci_handle_cmd_config_ep()
1642 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index); in xhci_handle_cmd_config_ep()
1653 vdev = xhci->devs[slot_id]; in xhci_handle_cmd_addr_dev()
1656 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); in xhci_handle_cmd_addr_dev()
1665 vdev = xhci->devs[slot_id]; in xhci_handle_cmd_reset_dev()
1671 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); in xhci_handle_cmd_reset_dev()
1680 if (!(xhci->quirks & XHCI_NEC_HOST)) { in xhci_handle_cmd_nec_get_fw()
1681 xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n"); in xhci_handle_cmd_nec_get_fw()
1686 NEC_FW_MAJOR(le32_to_cpu(event->status)), in xhci_handle_cmd_nec_get_fw()
1687 NEC_FW_MINOR(le32_to_cpu(event->status))); in xhci_handle_cmd_nec_get_fw()
1692 list_del(&cmd->cmd_list); in xhci_complete_del_and_free_cmd()
1694 if (cmd->completion) { in xhci_complete_del_and_free_cmd()
1695 cmd->status = comp_code; in xhci_complete_del_and_free_cmd()
1696 cmd->comp_param = comp_param; in xhci_complete_del_and_free_cmd()
1697 complete(cmd->completion); in xhci_complete_del_and_free_cmd()
1706 xhci->current_cmd = NULL; in xhci_cleanup_command_queue()
1707 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) in xhci_cleanup_command_queue()
1722 spin_lock_irqsave(&xhci->lock, flags); in xhci_handle_command_timeout()
1728 if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) { in xhci_handle_command_timeout()
1729 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_handle_command_timeout()
1733 cmd_field3 = le32_to_cpu(xhci->current_cmd->command_trb->generic.field[3]); in xhci_handle_command_timeout()
1734 usbsts = readl(&xhci->op_regs->status); in xhci_handle_command_timeout()
1739 struct xhci_virt_ep *ep; in xhci_handle_command_timeout() local
1743 ep = xhci_get_virt_ep(xhci, TRB_TO_SLOT_ID(cmd_field3), in xhci_handle_command_timeout()
1745 if (ep) in xhci_handle_command_timeout()
1746 ep->ep_state &= ~EP_STOP_CMD_PENDING; in xhci_handle_command_timeout()
1754 xhci->current_cmd->status = COMP_COMMAND_ABORTED; in xhci_handle_command_timeout()
1757 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); in xhci_handle_command_timeout()
1763 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && in xhci_handle_command_timeout()
1766 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; in xhci_handle_command_timeout()
1773 if (xhci->xhc_state & XHCI_STATE_REMOVING) { in xhci_handle_command_timeout()
1782 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); in xhci_handle_command_timeout()
1785 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_handle_command_timeout()
1792 unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); in handle_cmd_completion()
1793 u32 status = le32_to_cpu(event->status); in handle_cmd_completion()
1806 cmd_dma = le64_to_cpu(event->cmd_trb); in handle_cmd_completion()
1807 cmd_trb = xhci->cmd_ring->dequeue; in handle_cmd_completion()
1809 trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic, cmd_dma); in handle_cmd_completion()
1811 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); in handle_cmd_completion()
1815 complete_all(&xhci->cmd_ring_stop_completion); in handle_cmd_completion()
1819 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, in handle_cmd_completion()
1831 cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list); in handle_cmd_completion()
1833 cancel_delayed_work(&xhci->cmd_timer); in handle_cmd_completion()
1835 if (cmd->command_trb != xhci->cmd_ring->dequeue) { in handle_cmd_completion()
1848 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; in handle_cmd_completion()
1849 if (cmd->status == COMP_COMMAND_ABORTED) { in handle_cmd_completion()
1850 if (xhci->current_cmd == cmd) in handle_cmd_completion()
1851 xhci->current_cmd = NULL; in handle_cmd_completion()
1856 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); in handle_cmd_completion()
1865 if (!cmd->completion) in handle_cmd_completion()
1875 le32_to_cpu(cmd_trb->generic.field[3]))); in handle_cmd_completion()
1876 if (!cmd->completion) in handle_cmd_completion()
1882 le32_to_cpu(cmd_trb->generic.field[3]))); in handle_cmd_completion()
1886 /* Is this an aborted command turned to NO-OP? */ in handle_cmd_completion()
1887 if (cmd->status == COMP_COMMAND_RING_STOPPED) in handle_cmd_completion()
1892 le32_to_cpu(cmd_trb->generic.field[3]))); in handle_cmd_completion()
1900 le32_to_cpu(cmd_trb->generic.field[3])); in handle_cmd_completion()
1915 if (!list_is_singular(&xhci->cmd_list)) { in handle_cmd_completion()
1916 xhci->current_cmd = list_first_entry(&cmd->cmd_list, in handle_cmd_completion()
1919 } else if (xhci->current_cmd == cmd) { in handle_cmd_completion()
1920 xhci->current_cmd = NULL; in handle_cmd_completion()
1926 inc_deq(xhci, xhci->cmd_ring); in handle_cmd_completion()
1933 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) in handle_vendor_event()
1934 handle_cmd_completion(xhci, &event->event_cmd); in handle_vendor_event()
1943 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3])); in handle_device_notification()
1944 if (!xhci->devs[slot_id]) { in handle_device_notification()
1952 udev = xhci->devs[slot_id]->udev; in handle_device_notification()
1953 if (udev && udev->parent) in handle_device_notification()
1954 usb_wakeup_notification(udev->parent, udev->portnum); in handle_device_notification()
1960 * As per ThunderX2errata-129 USB 2 device may come up as USB 1
1977 writel(0x6F, hcd->regs + 0x1048); in xhci_cavium_reset_phy_quirk()
1979 /* De-assert the PHY reset */ in xhci_cavium_reset_phy_quirk()
1980 writel(0x7F, hcd->regs + 0x1048); in xhci_cavium_reset_phy_quirk()
1982 pll_lock_check = readl(hcd->regs + 0x1070); in xhci_cavium_reset_phy_quirk()
1983 } while (!(pll_lock_check & 0x1) && --retry_count); in xhci_cavium_reset_phy_quirk()
1999 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) in handle_port_status()
2003 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); in handle_port_status()
2004 max_ports = HCS_MAX_PORTS(xhci->hcs_params1); in handle_port_status()
2012 port = &xhci->hw_ports[port_id - 1]; in handle_port_status()
2013 if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) { in handle_port_status()
2020 if (port->slot_id) in handle_port_status()
2021 vdev = xhci->devs[port->slot_id]; in handle_port_status()
2024 if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) { in handle_port_status()
2030 hcd = port->rhub->hcd; in handle_port_status()
2031 bus_state = &port->rhub->bus_state; in handle_port_status()
2032 hcd_portnum = port->hcd_portnum; in handle_port_status()
2033 portsc = readl(port->addr); in handle_port_status()
2035 xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n", in handle_port_status()
2036 hcd->self.busnum, hcd_portnum + 1, port_id, portsc); in handle_port_status()
2040 if (hcd->state == HC_STATE_SUSPENDED) { in handle_port_status()
2047 vdev->flags |= VDEV_PORT_ERROR; in handle_port_status()
2049 vdev->flags &= ~VDEV_PORT_ERROR; in handle_port_status()
2055 cmd_reg = readl(&xhci->op_regs->command); in handle_port_status()
2067 bus_state->port_remote_wakeup |= 1 << hcd_portnum; in handle_port_status()
2069 usb_hcd_start_port_resume(&hcd->self, hcd_portnum); in handle_port_status()
2076 } else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) { in handle_port_status()
2078 port->resume_timestamp = jiffies + in handle_port_status()
2080 set_bit(hcd_portnum, &bus_state->resuming_ports); in handle_port_status()
2083 * usb device auto-resume latency around ~40ms. in handle_port_status()
2085 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); in handle_port_status()
2086 mod_timer(&hcd->rh_timer, in handle_port_status()
2087 port->resume_timestamp); in handle_port_status()
2088 usb_hcd_start_port_resume(&hcd->self, hcd_portnum); in handle_port_status()
2099 complete(&port->u3exit_done); in handle_port_status()
2102 * U3Exit state after a host-initiated resume. If it's a device in handle_port_status()
2108 xhci_ring_device(xhci, port->slot_id); in handle_port_status()
2109 if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) { in handle_port_status()
2111 usb_wakeup_notification(hcd->self.root_hub, in handle_port_status()
2119 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or in handle_port_status()
2123 if (hcd->speed < HCD_USB3 && port->rexit_active) { in handle_port_status()
2124 complete(&port->rexit_done); in handle_port_status()
2125 port->rexit_active = false; in handle_port_status()
2130 if (hcd->speed < HCD_USB3) { in handle_port_status()
2132 if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) && in handle_port_status()
2147 * xHCI port-status-change events occur when the "or" of all the in handle_port_status()
2148 * status-change bits in the portsc register changes from 0 to 1. in handle_port_status()
2154 __func__, hcd->self.busnum); in handle_port_status()
2155 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); in handle_port_status()
2156 spin_unlock(&xhci->lock); in handle_port_status()
2159 spin_lock(&xhci->lock); in handle_port_status()
2163 struct xhci_virt_ep *ep) in xhci_clear_hub_tt_buffer() argument
2166 * As part of low/full-speed endpoint-halt processing in xhci_clear_hub_tt_buffer()
2169 if (td->urb->dev->tt && !usb_pipeint(td->urb->pipe) && in xhci_clear_hub_tt_buffer()
2170 (td->urb->dev->tt->hub != xhci_to_hcd(xhci)->self.root_hub) && in xhci_clear_hub_tt_buffer()
2171 !(ep->ep_state & EP_CLEARING_TT)) { in xhci_clear_hub_tt_buffer()
2172 ep->ep_state |= EP_CLEARING_TT; in xhci_clear_hub_tt_buffer()
2173 td->urb->ep->hcpriv = td->urb->dev; in xhci_clear_hub_tt_buffer()
2174 if (usb_hub_clear_tt_buffer(td->urb)) in xhci_clear_hub_tt_buffer()
2175 ep->ep_state &= ~EP_CLEARING_TT; in xhci_clear_hub_tt_buffer()
2184 * External device side is also halted in functional stall cases. Class driver
2189 /* Stall halts both internal and device side endpoint */ in xhci_halted_host_endpoint()
2213 * treat as not-an-error. in xhci_is_vendor_info_code()
2223 static void finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in finish_td() argument
2229 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); in finish_td()
2259 * ep command completion in finish_td()
2261 if ((ep->ep_state & EP_HALTED) && in finish_td()
2262 !list_empty(&td->cancelled_td_list)) { in finish_td()
2263 xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n", in finish_td()
2265 td->start_seg, td->start_trb)); in finish_td()
2272 xhci_clear_hub_tt_buffer(xhci, td, ep); in finish_td()
2273 xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET); in finish_td()
2279 * To clear the host side halt we need to issue a reset endpoint in finish_td()
2282 * Class drivers clear the device side halt from a functional in finish_td()
2286 if (ep->ep_index != 0) in finish_td()
2287 xhci_clear_hub_tt_buffer(xhci, td, ep); in finish_td()
2289 xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET); in finish_td()
2296 xhci_dequeue_td(xhci, td, ep_ring, td->status); in finish_td()
2303 union xhci_trb *trb = td->start_trb; in sum_trb_lengths()
2304 struct xhci_segment *seg = td->start_seg; in sum_trb_lengths()
2308 sum += TRB_LEN(le32_to_cpu(trb->generic.field[2])); in sum_trb_lengths()
2316 static void process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in process_ctrl_td() argument
2325 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3])); in process_ctrl_td()
2326 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); in process_ctrl_td()
2327 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); in process_ctrl_td()
2328 requested = td->urb->transfer_buffer_length; in process_ctrl_td()
2329 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); in process_ctrl_td()
2336 td->status = -ESHUTDOWN; in process_ctrl_td()
2339 td->status = 0; in process_ctrl_td()
2342 td->status = 0; in process_ctrl_td()
2346 td->urb->actual_length = remaining; in process_ctrl_td()
2353 td->urb->actual_length = 0; in process_ctrl_td()
2357 td->urb->actual_length = requested - remaining; in process_ctrl_td()
2360 td->urb->actual_length = requested; in process_ctrl_td()
2373 trb_comp_code, ep->ep_index); in process_ctrl_td()
2378 td->urb->actual_length = requested - remaining; in process_ctrl_td()
2379 else if (!td->urb_length_set) in process_ctrl_td()
2380 td->urb->actual_length = 0; in process_ctrl_td()
2394 td->urb_length_set = true; in process_ctrl_td()
2395 td->urb->actual_length = requested - remaining; in process_ctrl_td()
2401 if (!td->urb_length_set) in process_ctrl_td()
2402 td->urb->actual_length = requested; in process_ctrl_td()
2405 finish_td(xhci, ep, ep_ring, td, trb_comp_code); in process_ctrl_td()
2411 static void process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in process_isoc_td() argument
2423 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); in process_isoc_td()
2424 urb_priv = td->urb->hcpriv; in process_isoc_td()
2425 idx = urb_priv->num_tds_done; in process_isoc_td()
2426 frame = &td->urb->iso_frame_desc[idx]; in process_isoc_td()
2427 requested = frame->length; in process_isoc_td()
2428 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); in process_isoc_td()
2429 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); in process_isoc_td()
2430 short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ? in process_isoc_td()
2431 -EREMOTEIO : 0; in process_isoc_td()
2437 if (td->error_mid_td) in process_isoc_td()
2440 frame->status = short_framestatus; in process_isoc_td()
2444 frame->status = 0; in process_isoc_td()
2447 frame->status = short_framestatus; in process_isoc_td()
2451 frame->status = -ECOMM; in process_isoc_td()
2457 frame->status = -EOVERFLOW; in process_isoc_td()
2458 if (ep_trb != td->end_trb) in process_isoc_td()
2459 td->error_mid_td = true; in process_isoc_td()
2462 frame->status = -EXDEV; in process_isoc_td()
2464 if (ep_trb != td->end_trb) in process_isoc_td()
2465 td->error_mid_td = true; in process_isoc_td()
2469 frame->status = -EPROTO; in process_isoc_td()
2472 frame->status = -EPROTO; in process_isoc_td()
2474 if (ep_trb != td->end_trb) in process_isoc_td()
2475 td->error_mid_td = true; in process_isoc_td()
2482 frame->status = short_framestatus; in process_isoc_td()
2493 frame->status = -1; in process_isoc_td()
2497 if (td->urb_length_set) in process_isoc_td()
2501 frame->actual_length = sum_trb_lengths(td, ep_trb) + in process_isoc_td()
2502 ep_trb_len - remaining; in process_isoc_td()
2504 frame->actual_length = requested; in process_isoc_td()
2506 td->urb->actual_length += frame->actual_length; in process_isoc_td()
2510 if (td->error_mid_td && ep_trb != td->end_trb) { in process_isoc_td()
2512 td->urb_length_set = true; in process_isoc_td()
2515 finish_td(xhci, ep, ep_ring, td, trb_comp_code); in process_isoc_td()
2519 struct xhci_virt_ep *ep, int status) in skip_isoc_td() argument
2525 urb_priv = td->urb->hcpriv; in skip_isoc_td()
2526 idx = urb_priv->num_tds_done; in skip_isoc_td()
2527 frame = &td->urb->iso_frame_desc[idx]; in skip_isoc_td()
2530 frame->status = -EXDEV; in skip_isoc_td()
2533 frame->actual_length = 0; in skip_isoc_td()
2535 xhci_dequeue_td(xhci, td, ep->ring, status); in skip_isoc_td()
2541 static void process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in process_bulk_intr_td() argument
2549 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); in process_bulk_intr_td()
2550 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); in process_bulk_intr_td()
2551 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); in process_bulk_intr_td()
2552 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); in process_bulk_intr_td()
2553 requested = td->urb->transfer_buffer_length; in process_bulk_intr_td()
2557 ep->err_count = 0; in process_bulk_intr_td()
2559 if (ep_trb != td->end_trb || remaining) { in process_bulk_intr_td()
2561 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", in process_bulk_intr_td()
2562 td->urb->ep->desc.bEndpointAddress, in process_bulk_intr_td()
2565 td->status = 0; in process_bulk_intr_td()
2568 td->status = 0; in process_bulk_intr_td()
2571 td->urb->actual_length = remaining; in process_bulk_intr_td()
2574 /* stopped on ep trb with invalid length, exclude it */ in process_bulk_intr_td()
2575 td->urb->actual_length = sum_trb_lengths(td, ep_trb); in process_bulk_intr_td()
2578 if (xhci->quirks & XHCI_NO_SOFT_RETRY || in process_bulk_intr_td()
2579 (ep->err_count++ > MAX_SOFT_RETRY) || in process_bulk_intr_td()
2580 le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) in process_bulk_intr_td()
2583 td->status = 0; in process_bulk_intr_td()
2585 xhci_handle_halted_endpoint(xhci, ep, td, EP_SOFT_RESET); in process_bulk_intr_td()
2592 if (ep_trb == td->end_trb) in process_bulk_intr_td()
2593 td->urb->actual_length = requested - remaining; in process_bulk_intr_td()
2595 td->urb->actual_length = in process_bulk_intr_td()
2597 ep_trb_len - remaining; in process_bulk_intr_td()
2602 td->urb->actual_length = 0; in process_bulk_intr_td()
2605 finish_td(xhci, ep, ep_ring, td, trb_comp_code); in process_bulk_intr_td()
2609 static int handle_transferless_tx_event(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in handle_transferless_tx_event() argument
2617 xhci_dbg(xhci, "Stream transaction error ep %u no id\n", ep->ep_index); in handle_transferless_tx_event()
2618 if (ep->err_count++ > MAX_SOFT_RETRY) in handle_transferless_tx_event()
2619 xhci_handle_halted_endpoint(xhci, ep, NULL, EP_HARD_RESET); in handle_transferless_tx_event()
2621 xhci_handle_halted_endpoint(xhci, ep, NULL, EP_SOFT_RESET); in handle_transferless_tx_event()
2628 xhci_err(xhci, "Transfer event %u for unknown stream ring slot %u ep %u\n", in handle_transferless_tx_event()
2629 trb_comp_code, ep->vdev->slot_id, ep->ep_index); in handle_transferless_tx_event()
2630 return -ENODEV; in handle_transferless_tx_event()
2638 switch (ring->old_trb_comp_code) { in xhci_spurious_success_tx_event()
2640 return xhci->quirks & XHCI_SPURIOUS_SUCCESS; in xhci_spurious_success_tx_event()
2644 return xhci->quirks & XHCI_ETRON_HOST && in xhci_spurious_success_tx_event()
2645 ring->type == TYPE_ISOC; in xhci_spurious_success_tx_event()
2660 struct xhci_virt_ep *ep; in handle_tx_event() local
2668 int status = -EINPROGRESS; in handle_tx_event()
2673 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); in handle_tx_event()
2674 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; in handle_tx_event()
2675 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); in handle_tx_event()
2676 ep_trb_dma = le64_to_cpu(event->buffer); in handle_tx_event()
2678 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in handle_tx_event()
2679 if (!ep) { in handle_tx_event()
2684 ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma); in handle_tx_event()
2685 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in handle_tx_event()
2689 "ERROR Transfer event for disabled endpoint slot %u ep %u\n", in handle_tx_event()
2695 return handle_transferless_tx_event(xhci, ep, trb_comp_code); in handle_tx_event()
2703 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { in handle_tx_event()
2705 xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td comp code %d\n", in handle_tx_event()
2706 slot_id, ep_index, ep_ring->old_trb_comp_code); in handle_tx_event()
2713 xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n", in handle_tx_event()
2718 "Stopped on No-op or Link TRB for slot %u ep %u\n", in handle_tx_event()
2723 "Stopped with short packet transfer detected for slot %u ep %u\n", in handle_tx_event()
2728 xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id, in handle_tx_event()
2730 status = -EPIPE; in handle_tx_event()
2733 xhci_dbg(xhci, "Split transaction error for slot %u ep %u\n", in handle_tx_event()
2735 status = -EPROTO; in handle_tx_event()
2738 xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n", in handle_tx_event()
2740 status = -EPROTO; in handle_tx_event()
2743 xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n", in handle_tx_event()
2745 status = -EOVERFLOW; in handle_tx_event()
2750 "WARN: TRB error for slot %u ep %u on endpoint\n", in handle_tx_event()
2752 status = -EILSEQ; in handle_tx_event()
2757 "WARN: HC couldn't access mem fast enough for slot %u ep %u\n", in handle_tx_event()
2759 status = -ENOSR; in handle_tx_event()
2763 "WARN: bandwidth overrun event for slot %u ep %u on endpoint\n", in handle_tx_event()
2768 "WARN: buffer overrun event for slot %u ep %u on endpoint", in handle_tx_event()
2777 xhci_dbg(xhci, "Underrun event on slot %u ep %u\n", slot_id, ep_index); in handle_tx_event()
2781 xhci_dbg(xhci, "Overrun event on slot %u ep %u\n", slot_id, ep_index); in handle_tx_event()
2791 ep->skip = true; in handle_tx_event()
2793 "Miss service interval error for slot %u ep %u, set skip flag%s\n", in handle_tx_event()
2797 ep->skip = true; in handle_tx_event()
2799 "No Ping response error for slot %u ep %u, Skip one Isoc TD\n", in handle_tx_event()
2806 "WARN: detect an incompatible device for slot %u ep %u", in handle_tx_event()
2808 status = -EPROTO; in handle_tx_event()
2816 "ERROR Unknown event condition %u for slot %u ep %u , HC probably busted\n", in handle_tx_event()
2818 if (ep->skip) in handle_tx_event()
2828 * xhci 4.9.1 states that if there are errors in mult-TRB in handle_tx_event()
2836 td = list_first_entry_or_null(&ep_ring->td_list, struct xhci_td, td_list); in handle_tx_event()
2838 if (td && td->error_mid_td && !trb_in_td(td, ep_trb_dma)) { in handle_tx_event()
2840 xhci_dequeue_td(xhci, td, ep_ring, td->status); in handle_tx_event()
2847 if (list_empty(&ep_ring->td_list)) { in handle_tx_event()
2858 xhci_warn(xhci, "Event TRB for slot %u ep %u with no TDs queued\n", in handle_tx_event()
2862 ep->skip = false; in handle_tx_event()
2867 td = list_first_entry(&ep_ring->td_list, struct xhci_td, in handle_tx_event()
2875 if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { in handle_tx_event()
2880 skip_isoc_td(xhci, td, ep, status); in handle_tx_event()
2882 if (!list_empty(&ep_ring->td_list)) { in handle_tx_event()
2890 xhci_dbg(xhci, "Skipped one TD for slot %u ep %u", in handle_tx_event()
2897 xhci_dbg(xhci, "All TDs skipped for slot %u ep %u. Clear skip flag.\n", in handle_tx_event()
2899 ep->skip = false; in handle_tx_event()
2910 * TD pointed by 'ep_ring->dequeue' because that the hardware dequeue in handle_tx_event()
2926 &ep_trb_dma, trb_comp_code, ep_ring->old_trb_comp_code); in handle_tx_event()
2927 ep_ring->old_trb_comp_code = 0; in handle_tx_event()
2935 if (ep->skip) { in handle_tx_event()
2937 "Found td. Clear skip flag for slot %u ep %u.\n", in handle_tx_event()
2939 ep->skip = false; in handle_tx_event()
2943 * If ep->skip is set, it means there are missed tds on the in handle_tx_event()
2948 } while (ep->skip); in handle_tx_event()
2950 ep_ring->old_trb_comp_code = trb_comp_code; in handle_tx_event()
2956 ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / sizeof(*ep_trb)]; in handle_tx_event()
2960 * No-op TRB could trigger interrupts in a case where a URB was killed in handle_tx_event()
2969 td->status = status; in handle_tx_event()
2972 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) in handle_tx_event()
2973 process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event); in handle_tx_event()
2974 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) in handle_tx_event()
2975 process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event); in handle_tx_event()
2977 process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event); in handle_tx_event()
2982 xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET); in handle_tx_event()
2987 xhci_err(xhci, "Event dma %pad for ep %d status %d not part of TD at %016llx - %016llx\n", in handle_tx_event()
2989 (unsigned long long)xhci_trb_virt_to_dma(td->start_seg, td->start_trb), in handle_tx_event()
2990 (unsigned long long)xhci_trb_virt_to_dma(td->end_seg, td->end_trb)); in handle_tx_event()
2992 return -ESHUTDOWN; in handle_tx_event()
2997 ir->event_ring->deq_seg, in handle_tx_event()
2998 ir->event_ring->dequeue), in handle_tx_event()
2999 lower_32_bits(le64_to_cpu(event->buffer)), in handle_tx_event()
3000 upper_32_bits(le64_to_cpu(event->buffer)), in handle_tx_event()
3001 le32_to_cpu(event->transfer_len), in handle_tx_event()
3002 le32_to_cpu(event->flags)); in handle_tx_event()
3003 return -ENODEV; in handle_tx_event()
3007 * This function handles one OS-owned event on the event ring. It may drop
3008 * xhci->lock between event processing (e.g. to pass up port status changes).
3015 trace_xhci_handle_event(ir->event_ring, &event->generic, in xhci_handle_event_trb()
3016 xhci_trb_virt_to_dma(ir->event_ring->deq_seg, in xhci_handle_event_trb()
3017 ir->event_ring->dequeue)); in xhci_handle_event_trb()
3024 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags)); in xhci_handle_event_trb()
3029 handle_cmd_completion(xhci, &event->event_cmd); in xhci_handle_event_trb()
3035 handle_tx_event(xhci, ir, &event->trans_event); in xhci_handle_event_trb()
3046 /* Any of the above functions may drop and re-acquire the lock, so check in xhci_handle_event_trb()
3047 * to make sure a watchdog timer didn't mark the host as non-responsive. in xhci_handle_event_trb()
3049 if (xhci->xhc_state & XHCI_STATE_DYING) { in xhci_handle_event_trb()
3051 return -ENODEV; in xhci_handle_event_trb()
3059 * - When all events have finished
3060 * - To avoid "Event Ring Full Error" condition
3069 temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); in xhci_update_erst_dequeue()
3070 deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg, in xhci_update_erst_dequeue()
3071 ir->event_ring->dequeue); in xhci_update_erst_dequeue()
3082 temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK; in xhci_update_erst_dequeue()
3088 xhci_write_64(xhci, temp_64, &ir->ir_set->erst_dequeue); in xhci_update_erst_dequeue()
3094 if (!ir->ip_autoclear) { in xhci_clear_interrupt_pending()
3097 iman = readl(&ir->ir_set->iman); in xhci_clear_interrupt_pending()
3099 writel(iman, &ir->ir_set->iman); in xhci_clear_interrupt_pending()
3102 readl(&ir->ir_set->iman); in xhci_clear_interrupt_pending()
3107 * Handle all OS-owned events on an interrupter event ring. It may drop
3108 * and reaquire xhci->lock between event processing.
3120 if (!ir->event_ring || !ir->event_ring->dequeue) { in xhci_handle_events()
3122 return -ENOMEM; in xhci_handle_events()
3125 if (xhci->xhc_state & XHCI_STATE_DYING || in xhci_handle_events()
3126 xhci->xhc_state & XHCI_STATE_HALTED) { in xhci_handle_events()
3130 temp = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); in xhci_handle_events()
3131 xhci_write_64(xhci, temp | ERST_EHB, &ir->ir_set->erst_dequeue); in xhci_handle_events()
3132 return -ENODEV; in xhci_handle_events()
3136 while (unhandled_event_trb(ir->event_ring)) { in xhci_handle_events()
3138 err = xhci_handle_event_trb(xhci, ir, ir->event_ring->dequeue); in xhci_handle_events()
3147 if (ir->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN) in xhci_handle_events()
3148 ir->isoc_bei_interval = ir->isoc_bei_interval / 2; in xhci_handle_events()
3154 inc_deq(xhci, ir->event_ring); in xhci_handle_events()
3182 erdp_reg = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); in xhci_skip_sec_intr_events()
3189 current_trb = ir->event_ring->dequeue; in xhci_skip_sec_intr_events()
3191 ring->cycle_state = le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE; in xhci_skip_sec_intr_events()
3207 spin_lock(&xhci->lock); in xhci_irq()
3209 status = readl(&xhci->op_regs->status); in xhci_irq()
3233 * so we can receive interrupts from other MSI-X interrupters. in xhci_irq()
3237 writel(status, &xhci->op_regs->status); in xhci_irq()
3240 xhci_handle_events(xhci, xhci->interrupters[0], false); in xhci_irq()
3242 spin_unlock(&xhci->lock); in xhci_irq()
3268 trb = &ring->enqueue->generic; in queue_trb()
3269 trb->field[0] = cpu_to_le32(field1); in queue_trb()
3270 trb->field[1] = cpu_to_le32(field2); in queue_trb()
3271 trb->field[2] = cpu_to_le32(field3); in queue_trb()
3274 trb->field[3] = cpu_to_le32(field4); in queue_trb()
3277 xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue)); in queue_trb()
3298 xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); in prepare_ring()
3299 return -ENOENT; in prepare_ring()
3301 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); in prepare_ring()
3303 /* XXX not sure if this should be -ENOENT or not */ in prepare_ring()
3304 return -EINVAL; in prepare_ring()
3312 xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); in prepare_ring()
3317 return -EINVAL; in prepare_ring()
3320 if (ep_ring != xhci->cmd_ring) { in prepare_ring()
3324 return -ENOMEM; in prepare_ring()
3329 "ERROR no room on ep ring, try ring expansion"); in prepare_ring()
3332 return -ENOMEM; in prepare_ring()
3337 if (trb_is_link(ep_ring->enqueue)) in prepare_ring()
3340 if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) { in prepare_ring()
3342 return -EINVAL; in prepare_ring()
3361 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in prepare_transfer()
3363 ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index, in prepare_transfer()
3368 return -EINVAL; in prepare_transfer()
3376 urb_priv = urb->hcpriv; in prepare_transfer()
3377 td = &urb_priv->td[td_index]; in prepare_transfer()
3379 INIT_LIST_HEAD(&td->td_list); in prepare_transfer()
3380 INIT_LIST_HEAD(&td->cancelled_td_list); in prepare_transfer()
3383 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); in prepare_transfer()
3388 td->urb = urb; in prepare_transfer()
3390 list_add_tail(&td->td_list, &ep_ring->td_list); in prepare_transfer()
3391 td->start_seg = ep_ring->enq_seg; in prepare_transfer()
3392 td->start_trb = ep_ring->enqueue; in prepare_transfer()
3401 num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)), in count_trbs()
3411 return count_trbs(urb->transfer_dma, urb->transfer_buffer_length); in count_trbs_needed()
3419 full_len = urb->transfer_buffer_length; in count_sg_trbs_needed()
3421 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { in count_sg_trbs_needed()
3425 full_len -= len; in count_sg_trbs_needed()
3437 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); in count_isoc_trbs_needed()
3438 len = urb->iso_frame_desc[i].length; in count_isoc_trbs_needed()
3445 if (unlikely(running_total != urb->transfer_buffer_length)) in check_trb_math()
3446 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " in check_trb_math()
3449 urb->ep->desc.bEndpointAddress, in check_trb_math()
3451 urb->transfer_buffer_length, in check_trb_math()
3452 urb->transfer_buffer_length); in check_trb_math()
3465 start_trb->field[3] |= cpu_to_le32(start_cycle); in giveback_first_trb()
3467 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); in giveback_first_trb()
3476 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); in check_interval()
3477 ep_interval = urb->interval; in check_interval()
3480 if (urb->dev->speed == USB_SPEED_LOW || in check_interval()
3481 urb->dev->speed == USB_SPEED_FULL) in check_interval()
3488 dev_dbg_ratelimited(&urb->dev->dev, in check_interval()
3492 urb->interval = xhci_interval; in check_interval()
3494 if (urb->dev->speed == USB_SPEED_LOW || in check_interval()
3495 urb->dev->speed == USB_SPEED_FULL) in check_interval()
3496 urb->interval /= 8; in check_interval()
3511 ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index); in xhci_queue_intr_tx()
3527 * TD size = total_packet_count - packets_transferred
3544 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST)) in xhci_td_remainder()
3545 return ((td_total_len - transferred) >> 10); in xhci_td_remainder()
3547 /* One TRB with a zero-length data packet. */ in xhci_td_remainder()
3553 if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100)) in xhci_td_remainder()
3556 maxp = xhci_usb_endpoint_maxp(urb->dev, urb->ep); in xhci_td_remainder()
3560 return (total_packet_count - ((transferred + trb_buff_len) / maxp)); in xhci_td_remainder()
3567 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; in xhci_align_td()
3573 max_pkt = xhci_usb_endpoint_maxp(urb->dev, urb->ep); in xhci_align_td()
3585 *trb_buff_len -= unalign; in xhci_align_td()
3595 new_buff_len = max_pkt - (enqd_len % max_pkt); in xhci_align_td()
3597 if (new_buff_len > (urb->transfer_buffer_length - enqd_len)) in xhci_align_td()
3598 new_buff_len = (urb->transfer_buffer_length - enqd_len); in xhci_align_td()
3602 if (urb->num_sgs) { in xhci_align_td()
3603 len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, in xhci_align_td()
3604 seg->bounce_buf, new_buff_len, enqd_len); in xhci_align_td()
3609 memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len); in xhci_align_td()
3612 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, in xhci_align_td()
3615 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, in xhci_align_td()
3619 if (dma_mapping_error(dev, seg->bounce_dma)) { in xhci_align_td()
3625 seg->bounce_len = new_buff_len; in xhci_align_td()
3626 seg->bounce_offs = enqd_len; in xhci_align_td()
3633 /* This is very similar to what ehci-q.c qtd_fill() does */
3654 return -EINVAL; in xhci_queue_bulk_tx()
3656 full_len = urb->transfer_buffer_length; in xhci_queue_bulk_tx()
3658 if (urb->num_sgs && !(urb->transfer_flags & URB_DMA_MAP_SINGLE)) { in xhci_queue_bulk_tx()
3659 num_sgs = urb->num_mapped_sgs; in xhci_queue_bulk_tx()
3660 sg = urb->sg; in xhci_queue_bulk_tx()
3666 addr = (u64) urb->transfer_dma; in xhci_queue_bulk_tx()
3669 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_bulk_tx()
3670 ep_index, urb->stream_id, in xhci_queue_bulk_tx()
3675 urb_priv = urb->hcpriv; in xhci_queue_bulk_tx()
3677 /* Deal with URB_ZERO_PACKET - need one more td/trb */ in xhci_queue_bulk_tx()
3678 if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1) in xhci_queue_bulk_tx()
3681 td = &urb_priv->td[0]; in xhci_queue_bulk_tx()
3688 start_trb = &ring->enqueue->generic; in xhci_queue_bulk_tx()
3689 start_cycle = ring->cycle_state; in xhci_queue_bulk_tx()
3692 /* Queue the TRBs, even if they are zero-length */ in xhci_queue_bulk_tx()
3702 trb_buff_len = full_len - enqd_len; in xhci_queue_bulk_tx()
3710 field |= ring->cycle_state; in xhci_queue_bulk_tx()
3717 if (trb_is_link(ring->enqueue + 1)) { in xhci_queue_bulk_tx()
3720 ring->enq_seg)) { in xhci_queue_bulk_tx()
3721 send_addr = ring->enq_seg->bounce_dma; in xhci_queue_bulk_tx()
3723 td->bounce_seg = ring->enq_seg; in xhci_queue_bulk_tx()
3731 td->end_trb = ring->enqueue; in xhci_queue_bulk_tx()
3732 td->end_seg = ring->enq_seg; in xhci_queue_bulk_tx()
3734 memcpy(&send_addr, urb->transfer_buffer, in xhci_queue_bulk_tx()
3763 --num_sgs; in xhci_queue_bulk_tx()
3764 sent_len -= block_len; in xhci_queue_bulk_tx()
3772 block_len -= sent_len; in xhci_queue_bulk_tx()
3777 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_bulk_tx()
3778 ep_index, urb->stream_id, in xhci_queue_bulk_tx()
3780 urb_priv->td[1].end_trb = ring->enqueue; in xhci_queue_bulk_tx()
3781 urb_priv->td[1].end_seg = ring->enq_seg; in xhci_queue_bulk_tx()
3782 field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC; in xhci_queue_bulk_tx()
3787 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, in xhci_queue_bulk_tx()
3792 /* Caller must have locked xhci->lock */
3808 return -EINVAL; in xhci_queue_ctrl_tx()
3814 if (!urb->setup_packet) in xhci_queue_ctrl_tx()
3815 return -EINVAL; in xhci_queue_ctrl_tx()
3817 if ((xhci->quirks & XHCI_ETRON_HOST) && in xhci_queue_ctrl_tx()
3818 urb->dev->speed >= USB_SPEED_SUPER) { in xhci_queue_ctrl_tx()
3824 if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue + 1)) { in xhci_queue_ctrl_tx()
3825 field = TRB_TYPE(TRB_TR_NOOP) | ep_ring->cycle_state; in xhci_queue_ctrl_tx()
3838 if (urb->transfer_buffer_length > 0) in xhci_queue_ctrl_tx()
3840 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_ctrl_tx()
3841 ep_index, urb->stream_id, in xhci_queue_ctrl_tx()
3846 urb_priv = urb->hcpriv; in xhci_queue_ctrl_tx()
3847 td = &urb_priv->td[0]; in xhci_queue_ctrl_tx()
3854 start_trb = &ep_ring->enqueue->generic; in xhci_queue_ctrl_tx()
3855 start_cycle = ep_ring->cycle_state; in xhci_queue_ctrl_tx()
3857 /* Queue setup TRB - see section 6.4.1.2.1 */ in xhci_queue_ctrl_tx()
3859 setup = (struct usb_ctrlrequest *) urb->setup_packet; in xhci_queue_ctrl_tx()
3866 if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) { in xhci_queue_ctrl_tx()
3867 if (urb->transfer_buffer_length > 0) { in xhci_queue_ctrl_tx()
3868 if (setup->bRequestType & USB_DIR_IN) in xhci_queue_ctrl_tx()
3876 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, in xhci_queue_ctrl_tx()
3877 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, in xhci_queue_ctrl_tx()
3889 if (urb->transfer_buffer_length > 0) { in xhci_queue_ctrl_tx()
3894 memcpy(&addr, urb->transfer_buffer, in xhci_queue_ctrl_tx()
3895 urb->transfer_buffer_length); in xhci_queue_ctrl_tx()
3899 addr = (u64) urb->transfer_dma; in xhci_queue_ctrl_tx()
3903 urb->transfer_buffer_length, in xhci_queue_ctrl_tx()
3904 urb->transfer_buffer_length, in xhci_queue_ctrl_tx()
3906 length_field = TRB_LEN(urb->transfer_buffer_length) | in xhci_queue_ctrl_tx()
3909 if (setup->bRequestType & USB_DIR_IN) in xhci_queue_ctrl_tx()
3915 field | ep_ring->cycle_state); in xhci_queue_ctrl_tx()
3919 td->end_trb = ep_ring->enqueue; in xhci_queue_ctrl_tx()
3920 td->end_seg = ep_ring->enq_seg; in xhci_queue_ctrl_tx()
3922 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ in xhci_queue_ctrl_tx()
3924 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) in xhci_queue_ctrl_tx()
3933 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); in xhci_queue_ctrl_tx()
3953 if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER) in xhci_get_burst_count()
3956 max_burst = urb->ep->ss_ep_comp.bMaxBurst; in xhci_get_burst_count()
3957 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1; in xhci_get_burst_count()
3974 if (xhci->hci_version < 0x100) in xhci_get_last_burst_packet_count()
3977 if (urb->dev->speed >= USB_SPEED_SUPER) { in xhci_get_last_burst_packet_count()
3979 max_burst = urb->ep->ss_ep_comp.bMaxBurst; in xhci_get_last_burst_packet_count()
3982 * number of packets, but the TLBPC field is zero-based. in xhci_get_last_burst_packet_count()
3986 return residue - 1; in xhci_get_last_burst_packet_count()
3990 return total_packet_count - 1; in xhci_get_last_burst_packet_count()
4006 if (urb->dev->speed == USB_SPEED_LOW || in xhci_get_isoc_frame_id()
4007 urb->dev->speed == USB_SPEED_FULL) in xhci_get_isoc_frame_id()
4008 start_frame = urb->start_frame + index * urb->interval; in xhci_get_isoc_frame_id()
4010 start_frame = (urb->start_frame + index * urb->interval) >> 3; in xhci_get_isoc_frame_id()
4020 ist = HCS_IST(xhci->hcs_params2) & 0x7; in xhci_get_isoc_frame_id()
4021 if (HCS_IST(xhci->hcs_params2) & (1 << 3)) in xhci_get_isoc_frame_id()
4037 current_frame_id = readl(&xhci->run_regs->microframe_index); in xhci_get_isoc_frame_id()
4048 ret = -EINVAL; in xhci_get_isoc_frame_id()
4052 ret = -EINVAL; in xhci_get_isoc_frame_id()
4054 ret = -EINVAL; in xhci_get_isoc_frame_id()
4058 if (ret == -EINVAL || start_frame == start_frame_id) { in xhci_get_isoc_frame_id()
4060 if (urb->dev->speed == USB_SPEED_LOW || in xhci_get_isoc_frame_id()
4061 urb->dev->speed == USB_SPEED_FULL) in xhci_get_isoc_frame_id()
4062 urb->start_frame = start_frame; in xhci_get_isoc_frame_id()
4064 urb->start_frame = start_frame << 3; in xhci_get_isoc_frame_id()
4084 if (xhci->hci_version < 0x100) in trb_block_event_intr()
4087 if (i == num_tds - 1) in trb_block_event_intr()
4093 if (i && ir->isoc_bei_interval && xhci->quirks & XHCI_AVOID_BEI) in trb_block_event_intr()
4094 return !!(i % ir->isoc_bei_interval); in trb_block_event_intr()
4119 xep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_queue_isoc_tx()
4120 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; in xhci_queue_isoc_tx()
4121 ir = xhci->interrupters[0]; in xhci_queue_isoc_tx()
4123 num_tds = urb->number_of_packets; in xhci_queue_isoc_tx()
4126 return -EINVAL; in xhci_queue_isoc_tx()
4128 start_addr = (u64) urb->transfer_dma; in xhci_queue_isoc_tx()
4129 start_trb = &ep_ring->enqueue->generic; in xhci_queue_isoc_tx()
4130 start_cycle = ep_ring->cycle_state; in xhci_queue_isoc_tx()
4132 urb_priv = urb->hcpriv; in xhci_queue_isoc_tx()
4133 /* Queue the TRBs for each TD, even if they are zero-length */ in xhci_queue_isoc_tx()
4141 addr = start_addr + urb->iso_frame_desc[i].offset; in xhci_queue_isoc_tx()
4142 td_len = urb->iso_frame_desc[i].length; in xhci_queue_isoc_tx()
4144 max_pkt = xhci_usb_endpoint_maxp(urb->dev, urb->ep); in xhci_queue_isoc_tx()
4147 /* A zero-length transfer still involves at least one packet. */ in xhci_queue_isoc_tx()
4156 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, in xhci_queue_isoc_tx()
4157 urb->stream_id, trbs_per_td, urb, i, mem_flags); in xhci_queue_isoc_tx()
4163 td = &urb_priv->td[i]; in xhci_queue_isoc_tx()
4166 if (!(urb->transfer_flags & URB_ISO_ASAP) && in xhci_queue_isoc_tx()
4167 HCC_CFC(xhci->hcc_params)) { in xhci_queue_isoc_tx()
4180 (i ? ep_ring->cycle_state : !start_cycle); in xhci_queue_isoc_tx()
4183 if (!xep->use_extended_tbc) in xhci_queue_isoc_tx()
4193 ep_ring->cycle_state; in xhci_queue_isoc_tx()
4200 if (j < trbs_per_td - 1) { in xhci_queue_isoc_tx()
4205 td->end_trb = ep_ring->enqueue; in xhci_queue_isoc_tx()
4206 td->end_seg = ep_ring->enq_seg; in xhci_queue_isoc_tx()
4225 if (first_trb && xep->use_extended_tbc) in xhci_queue_isoc_tx()
4239 td_remain_len -= trb_buff_len; in xhci_queue_isoc_tx()
4245 ret = -EINVAL; in xhci_queue_isoc_tx()
4251 if (HCC_CFC(xhci->hcc_params)) in xhci_queue_isoc_tx()
4252 xep->next_frame_id = urb->start_frame + num_tds * urb->interval; in xhci_queue_isoc_tx()
4254 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { in xhci_queue_isoc_tx()
4255 if (xhci->quirks & XHCI_AMD_PLL_FIX) in xhci_queue_isoc_tx()
4258 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; in xhci_queue_isoc_tx()
4260 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, in xhci_queue_isoc_tx()
4266 for (i--; i >= 0; i--) in xhci_queue_isoc_tx()
4267 list_del_init(&urb_priv->td[i].td_list); in xhci_queue_isoc_tx()
4270 * into No-ops with a software-owned cycle bit. That way the hardware in xhci_queue_isoc_tx()
4272 * overwrite them. td->start_trb and td->start_seg are already set. in xhci_queue_isoc_tx()
4274 urb_priv->td[0].end_trb = ep_ring->enqueue; in xhci_queue_isoc_tx()
4276 td_to_noop(&urb_priv->td[0], true); in xhci_queue_isoc_tx()
4279 ep_ring->enqueue = urb_priv->td[0].start_trb; in xhci_queue_isoc_tx()
4280 ep_ring->enq_seg = urb_priv->td[0].start_seg; in xhci_queue_isoc_tx()
4281 ep_ring->cycle_state = start_cycle; in xhci_queue_isoc_tx()
4282 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); in xhci_queue_isoc_tx()
4290 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
4305 xdev = xhci->devs[slot_id]; in xhci_queue_isoc_tx_prepare()
4306 xep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_queue_isoc_tx_prepare()
4307 ep_ring = xdev->eps[ep_index].ring; in xhci_queue_isoc_tx_prepare()
4308 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in xhci_queue_isoc_tx_prepare()
4311 num_tds = urb->number_of_packets; in xhci_queue_isoc_tx_prepare()
4329 /* Calculate the start frame and put it in urb->start_frame. */ in xhci_queue_isoc_tx_prepare()
4330 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { in xhci_queue_isoc_tx_prepare()
4332 urb->start_frame = xep->next_frame_id; in xhci_queue_isoc_tx_prepare()
4337 start_frame = readl(&xhci->run_regs->microframe_index); in xhci_queue_isoc_tx_prepare()
4343 ist = HCS_IST(xhci->hcs_params2) & 0x7; in xhci_queue_isoc_tx_prepare()
4344 if (HCS_IST(xhci->hcs_params2) & (1 << 3)) in xhci_queue_isoc_tx_prepare()
4353 if (urb->dev->speed == USB_SPEED_LOW || in xhci_queue_isoc_tx_prepare()
4354 urb->dev->speed == USB_SPEED_FULL) { in xhci_queue_isoc_tx_prepare()
4355 start_frame = roundup(start_frame, urb->interval << 3); in xhci_queue_isoc_tx_prepare()
4356 urb->start_frame = start_frame >> 3; in xhci_queue_isoc_tx_prepare()
4358 start_frame = roundup(start_frame, urb->interval); in xhci_queue_isoc_tx_prepare()
4359 urb->start_frame = start_frame; in xhci_queue_isoc_tx_prepare()
4374 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
4381 int reserved_trbs = xhci->cmd_ring_reserved_trbs; in queue_command()
4384 if ((xhci->xhc_state & XHCI_STATE_DYING) || in queue_command()
4385 (xhci->xhc_state & XHCI_STATE_HALTED)) { in queue_command()
4387 xhci->xhc_state); in queue_command()
4388 return -ESHUTDOWN; in queue_command()
4394 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, in queue_command()
4404 cmd->command_trb = xhci->cmd_ring->enqueue; in queue_command()
4407 if (list_empty(&xhci->cmd_list)) { in queue_command()
4408 xhci->current_cmd = cmd; in queue_command()
4412 list_add_tail(&cmd->cmd_list, &xhci->cmd_list); in queue_command()
4414 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, in queue_command()
4415 field4 | xhci->cmd_ring->cycle_state); in queue_command()