xref: /linux/drivers/usb/host/xhci-ring.c (revision 98f4a2c27c76e7eaf75c2f3f25487fabca62ef3d)
1 /*
2  * xHCI host controller driver
3  *
4  * Copyright (C) 2008 Intel Corp.
5  *
6  * Author: Sarah Sharp
7  * Some code borrowed from the Linux EHCI driver.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16  * for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software Foundation,
20  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22 
23 /*
24  * Ring initialization rules:
25  * 1. Each segment is initialized to zero, except for link TRBs.
26  * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
27  *    Consumer Cycle State (CCS), depending on ring function.
28  * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
29  *
30  * Ring behavior rules:
31  * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
32  *    least one free TRB in the ring.  This is useful if you want to turn that
33  *    into a link TRB and expand the ring.
34  * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
35  *    link TRB, then load the pointer with the address in the link TRB.  If the
36  *    link TRB had its toggle bit set, you may need to update the ring cycle
37  *    state (see cycle bit rules).  You may have to do this multiple times
38  *    until you reach a non-link TRB.
39  * 3. A ring is full if enqueue++ (for the definition of increment above)
40  *    equals the dequeue pointer.
41  *
42  * Cycle bit rules:
43  * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
44  *    in a link TRB, it must toggle the ring cycle state.
45  * 2. When a producer increments an enqueue pointer and encounters a toggle bit
46  *    in a link TRB, it must toggle the ring cycle state.
47  *
48  * Producer rules:
49  * 1. Check if ring is full before you enqueue.
50  * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
51  *    Update enqueue pointer between each write (which may update the ring
52  *    cycle state).
53  * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
54  *    and endpoint rings.  If HC is the producer for the event ring,
55  *    and it generates an interrupt according to interrupt modulation rules.
56  *
57  * Consumer rules:
58  * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
59  *    the TRB is owned by the consumer.
60  * 2. Update dequeue pointer (which may update the ring cycle state) and
61  *    continue processing TRBs until you reach a TRB which is not owned by you.
62  * 3. Notify the producer.  SW is the consumer for the event ring, and it
63  *   updates event ring dequeue pointer.  HC is the consumer for the command and
64  *   endpoint rings; it generates events on the event ring for these.
65  */
66 
67 #include <linux/scatterlist.h>
68 #include <linux/slab.h>
69 #include "xhci.h"
70 
71 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
72 		struct xhci_virt_device *virt_dev,
73 		struct xhci_event_cmd *event);
74 
75 /*
76  * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
77  * address of the TRB.
78  */
79 dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
80 		union xhci_trb *trb)
81 {
82 	unsigned long segment_offset;
83 
84 	if (!seg || !trb || trb < seg->trbs)
85 		return 0;
86 	/* offset in TRBs */
87 	segment_offset = trb - seg->trbs;
88 	if (segment_offset > TRBS_PER_SEGMENT)
89 		return 0;
90 	return seg->dma + (segment_offset * sizeof(*trb));
91 }
92 
93 /* Does this link TRB point to the first segment in a ring,
94  * or was the previous TRB the last TRB on the last segment in the ERST?
95  */
96 static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
97 		struct xhci_segment *seg, union xhci_trb *trb)
98 {
99 	if (ring == xhci->event_ring)
100 		return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
101 			(seg->next == xhci->event_ring->first_seg);
102 	else
103 		return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
104 }
105 
106 /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
107  * segment?  I.e. would the updated event TRB pointer step off the end of the
108  * event seg?
109  */
110 static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
111 		struct xhci_segment *seg, union xhci_trb *trb)
112 {
113 	if (ring == xhci->event_ring)
114 		return trb == &seg->trbs[TRBS_PER_SEGMENT];
115 	else
116 		return (le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK)
117 			== TRB_TYPE(TRB_LINK);
118 }
119 
120 static int enqueue_is_link_trb(struct xhci_ring *ring)
121 {
122 	struct xhci_link_trb *link = &ring->enqueue->link;
123 	return ((le32_to_cpu(link->control) & TRB_TYPE_BITMASK) ==
124 		TRB_TYPE(TRB_LINK));
125 }
126 
127 /* Updates trb to point to the next TRB in the ring, and updates seg if the next
128  * TRB is in a new segment.  This does not skip over link TRBs, and it does not
129  * effect the ring dequeue or enqueue pointers.
130  */
131 static void next_trb(struct xhci_hcd *xhci,
132 		struct xhci_ring *ring,
133 		struct xhci_segment **seg,
134 		union xhci_trb **trb)
135 {
136 	if (last_trb(xhci, ring, *seg, *trb)) {
137 		*seg = (*seg)->next;
138 		*trb = ((*seg)->trbs);
139 	} else {
140 		(*trb)++;
141 	}
142 }
143 
144 /*
145  * See Cycle bit rules. SW is the consumer for the event ring only.
146  * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
147  */
148 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
149 {
150 	union xhci_trb *next = ++(ring->dequeue);
151 	unsigned long long addr;
152 
153 	ring->deq_updates++;
154 	/* Update the dequeue pointer further if that was a link TRB or we're at
155 	 * the end of an event ring segment (which doesn't have link TRBS)
156 	 */
157 	while (last_trb(xhci, ring, ring->deq_seg, next)) {
158 		if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
159 			ring->cycle_state = (ring->cycle_state ? 0 : 1);
160 			if (!in_interrupt())
161 				xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
162 						ring,
163 						(unsigned int) ring->cycle_state);
164 		}
165 		ring->deq_seg = ring->deq_seg->next;
166 		ring->dequeue = ring->deq_seg->trbs;
167 		next = ring->dequeue;
168 	}
169 	addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
170 }
171 
172 /*
173  * See Cycle bit rules. SW is the consumer for the event ring only.
174  * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
175  *
176  * If we've just enqueued a TRB that is in the middle of a TD (meaning the
177  * chain bit is set), then set the chain bit in all the following link TRBs.
178  * If we've enqueued the last TRB in a TD, make sure the following link TRBs
179  * have their chain bit cleared (so that each Link TRB is a separate TD).
180  *
181  * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
182  * set, but other sections talk about dealing with the chain bit set.  This was
183  * fixed in the 0.96 specification errata, but we have to assume that all 0.95
184  * xHCI hardware can't handle the chain bit being cleared on a link TRB.
185  *
186  * @more_trbs_coming:	Will you enqueue more TRBs before calling
187  *			prepare_transfer()?
188  */
189 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
190 		bool consumer, bool more_trbs_coming)
191 {
192 	u32 chain;
193 	union xhci_trb *next;
194 	unsigned long long addr;
195 
196 	chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
197 	next = ++(ring->enqueue);
198 
199 	ring->enq_updates++;
200 	/* Update the dequeue pointer further if that was a link TRB or we're at
201 	 * the end of an event ring segment (which doesn't have link TRBS)
202 	 */
203 	while (last_trb(xhci, ring, ring->enq_seg, next)) {
204 		if (!consumer) {
205 			if (ring != xhci->event_ring) {
206 				/*
207 				 * If the caller doesn't plan on enqueueing more
208 				 * TDs before ringing the doorbell, then we
209 				 * don't want to give the link TRB to the
210 				 * hardware just yet.  We'll give the link TRB
211 				 * back in prepare_ring() just before we enqueue
212 				 * the TD at the top of the ring.
213 				 */
214 				if (!chain && !more_trbs_coming)
215 					break;
216 
217 				/* If we're not dealing with 0.95 hardware,
218 				 * carry over the chain bit of the previous TRB
219 				 * (which may mean the chain bit is cleared).
220 				 */
221 				if (!xhci_link_trb_quirk(xhci)) {
222 					next->link.control &=
223 						cpu_to_le32(~TRB_CHAIN);
224 					next->link.control |=
225 						cpu_to_le32(chain);
226 				}
227 				/* Give this link TRB to the hardware */
228 				wmb();
229 				next->link.control ^= cpu_to_le32(TRB_CYCLE);
230 			}
231 			/* Toggle the cycle bit after the last ring segment. */
232 			if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
233 				ring->cycle_state = (ring->cycle_state ? 0 : 1);
234 				if (!in_interrupt())
235 					xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
236 							ring,
237 							(unsigned int) ring->cycle_state);
238 			}
239 		}
240 		ring->enq_seg = ring->enq_seg->next;
241 		ring->enqueue = ring->enq_seg->trbs;
242 		next = ring->enqueue;
243 	}
244 	addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
245 }
246 
247 /*
248  * Check to see if there's room to enqueue num_trbs on the ring.  See rules
249  * above.
250  * FIXME: this would be simpler and faster if we just kept track of the number
251  * of free TRBs in a ring.
252  */
253 static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
254 		unsigned int num_trbs)
255 {
256 	int i;
257 	union xhci_trb *enq = ring->enqueue;
258 	struct xhci_segment *enq_seg = ring->enq_seg;
259 	struct xhci_segment *cur_seg;
260 	unsigned int left_on_ring;
261 
262 	/* If we are currently pointing to a link TRB, advance the
263 	 * enqueue pointer before checking for space */
264 	while (last_trb(xhci, ring, enq_seg, enq)) {
265 		enq_seg = enq_seg->next;
266 		enq = enq_seg->trbs;
267 	}
268 
269 	/* Check if ring is empty */
270 	if (enq == ring->dequeue) {
271 		/* Can't use link trbs */
272 		left_on_ring = TRBS_PER_SEGMENT - 1;
273 		for (cur_seg = enq_seg->next; cur_seg != enq_seg;
274 				cur_seg = cur_seg->next)
275 			left_on_ring += TRBS_PER_SEGMENT - 1;
276 
277 		/* Always need one TRB free in the ring. */
278 		left_on_ring -= 1;
279 		if (num_trbs > left_on_ring) {
280 			xhci_warn(xhci, "Not enough room on ring; "
281 					"need %u TRBs, %u TRBs left\n",
282 					num_trbs, left_on_ring);
283 			return 0;
284 		}
285 		return 1;
286 	}
287 	/* Make sure there's an extra empty TRB available */
288 	for (i = 0; i <= num_trbs; ++i) {
289 		if (enq == ring->dequeue)
290 			return 0;
291 		enq++;
292 		while (last_trb(xhci, ring, enq_seg, enq)) {
293 			enq_seg = enq_seg->next;
294 			enq = enq_seg->trbs;
295 		}
296 	}
297 	return 1;
298 }
299 
300 /* Ring the host controller doorbell after placing a command on the ring */
301 void xhci_ring_cmd_db(struct xhci_hcd *xhci)
302 {
303 	xhci_dbg(xhci, "// Ding dong!\n");
304 	xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
305 	/* Flush PCI posted writes */
306 	xhci_readl(xhci, &xhci->dba->doorbell[0]);
307 }
308 
309 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
310 		unsigned int slot_id,
311 		unsigned int ep_index,
312 		unsigned int stream_id)
313 {
314 	__le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
315 	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
316 	unsigned int ep_state = ep->ep_state;
317 
318 	/* Don't ring the doorbell for this endpoint if there are pending
319 	 * cancellations because we don't want to interrupt processing.
320 	 * We don't want to restart any stream rings if there's a set dequeue
321 	 * pointer command pending because the device can choose to start any
322 	 * stream once the endpoint is on the HW schedule.
323 	 * FIXME - check all the stream rings for pending cancellations.
324 	 */
325 	if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
326 	    (ep_state & EP_HALTED))
327 		return;
328 	xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
329 	/* The CPU has better things to do at this point than wait for a
330 	 * write-posting flush.  It'll get there soon enough.
331 	 */
332 }
333 
334 /* Ring the doorbell for any rings with pending URBs */
335 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
336 		unsigned int slot_id,
337 		unsigned int ep_index)
338 {
339 	unsigned int stream_id;
340 	struct xhci_virt_ep *ep;
341 
342 	ep = &xhci->devs[slot_id]->eps[ep_index];
343 
344 	/* A ring has pending URBs if its TD list is not empty */
345 	if (!(ep->ep_state & EP_HAS_STREAMS)) {
346 		if (!(list_empty(&ep->ring->td_list)))
347 			xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
348 		return;
349 	}
350 
351 	for (stream_id = 1; stream_id < ep->stream_info->num_streams;
352 			stream_id++) {
353 		struct xhci_stream_info *stream_info = ep->stream_info;
354 		if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
355 			xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
356 						stream_id);
357 	}
358 }
359 
360 /*
361  * Find the segment that trb is in.  Start searching in start_seg.
362  * If we must move past a segment that has a link TRB with a toggle cycle state
363  * bit set, then we will toggle the value pointed at by cycle_state.
364  */
365 static struct xhci_segment *find_trb_seg(
366 		struct xhci_segment *start_seg,
367 		union xhci_trb	*trb, int *cycle_state)
368 {
369 	struct xhci_segment *cur_seg = start_seg;
370 	struct xhci_generic_trb *generic_trb;
371 
372 	while (cur_seg->trbs > trb ||
373 			&cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
374 		generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
375 		if (le32_to_cpu(generic_trb->field[3]) & LINK_TOGGLE)
376 			*cycle_state ^= 0x1;
377 		cur_seg = cur_seg->next;
378 		if (cur_seg == start_seg)
379 			/* Looped over the entire list.  Oops! */
380 			return NULL;
381 	}
382 	return cur_seg;
383 }
384 
385 
386 static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
387 		unsigned int slot_id, unsigned int ep_index,
388 		unsigned int stream_id)
389 {
390 	struct xhci_virt_ep *ep;
391 
392 	ep = &xhci->devs[slot_id]->eps[ep_index];
393 	/* Common case: no streams */
394 	if (!(ep->ep_state & EP_HAS_STREAMS))
395 		return ep->ring;
396 
397 	if (stream_id == 0) {
398 		xhci_warn(xhci,
399 				"WARN: Slot ID %u, ep index %u has streams, "
400 				"but URB has no stream ID.\n",
401 				slot_id, ep_index);
402 		return NULL;
403 	}
404 
405 	if (stream_id < ep->stream_info->num_streams)
406 		return ep->stream_info->stream_rings[stream_id];
407 
408 	xhci_warn(xhci,
409 			"WARN: Slot ID %u, ep index %u has "
410 			"stream IDs 1 to %u allocated, "
411 			"but stream ID %u is requested.\n",
412 			slot_id, ep_index,
413 			ep->stream_info->num_streams - 1,
414 			stream_id);
415 	return NULL;
416 }
417 
418 /* Get the right ring for the given URB.
419  * If the endpoint supports streams, boundary check the URB's stream ID.
420  * If the endpoint doesn't support streams, return the singular endpoint ring.
421  */
422 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
423 		struct urb *urb)
424 {
425 	return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
426 		xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
427 }
428 
429 /*
430  * Move the xHC's endpoint ring dequeue pointer past cur_td.
431  * Record the new state of the xHC's endpoint ring dequeue segment,
432  * dequeue pointer, and new consumer cycle state in state.
433  * Update our internal representation of the ring's dequeue pointer.
434  *
435  * We do this in three jumps:
436  *  - First we update our new ring state to be the same as when the xHC stopped.
437  *  - Then we traverse the ring to find the segment that contains
438  *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
439  *    any link TRBs with the toggle cycle bit set.
440  *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
441  *    if we've moved it past a link TRB with the toggle cycle bit set.
442  *
443  * Some of the uses of xhci_generic_trb are grotty, but if they're done
444  * with correct __le32 accesses they should work fine.  Only users of this are
445  * in here.
446  */
447 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
448 		unsigned int slot_id, unsigned int ep_index,
449 		unsigned int stream_id, struct xhci_td *cur_td,
450 		struct xhci_dequeue_state *state)
451 {
452 	struct xhci_virt_device *dev = xhci->devs[slot_id];
453 	struct xhci_ring *ep_ring;
454 	struct xhci_generic_trb *trb;
455 	struct xhci_ep_ctx *ep_ctx;
456 	dma_addr_t addr;
457 
458 	ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
459 			ep_index, stream_id);
460 	if (!ep_ring) {
461 		xhci_warn(xhci, "WARN can't find new dequeue state "
462 				"for invalid stream ID %u.\n",
463 				stream_id);
464 		return;
465 	}
466 	state->new_cycle_state = 0;
467 	xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
468 	state->new_deq_seg = find_trb_seg(cur_td->start_seg,
469 			dev->eps[ep_index].stopped_trb,
470 			&state->new_cycle_state);
471 	if (!state->new_deq_seg) {
472 		WARN_ON(1);
473 		return;
474 	}
475 
476 	/* Dig out the cycle state saved by the xHC during the stop ep cmd */
477 	xhci_dbg(xhci, "Finding endpoint context\n");
478 	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
479 	state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
480 
481 	state->new_deq_ptr = cur_td->last_trb;
482 	xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
483 	state->new_deq_seg = find_trb_seg(state->new_deq_seg,
484 			state->new_deq_ptr,
485 			&state->new_cycle_state);
486 	if (!state->new_deq_seg) {
487 		WARN_ON(1);
488 		return;
489 	}
490 
491 	trb = &state->new_deq_ptr->generic;
492 	if ((le32_to_cpu(trb->field[3]) & TRB_TYPE_BITMASK) ==
493 	    TRB_TYPE(TRB_LINK) && (le32_to_cpu(trb->field[3]) & LINK_TOGGLE))
494 		state->new_cycle_state ^= 0x1;
495 	next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
496 
497 	/*
498 	 * If there is only one segment in a ring, find_trb_seg()'s while loop
499 	 * will not run, and it will return before it has a chance to see if it
500 	 * needs to toggle the cycle bit.  It can't tell if the stalled transfer
501 	 * ended just before the link TRB on a one-segment ring, or if the TD
502 	 * wrapped around the top of the ring, because it doesn't have the TD in
503 	 * question.  Look for the one-segment case where stalled TRB's address
504 	 * is greater than the new dequeue pointer address.
505 	 */
506 	if (ep_ring->first_seg == ep_ring->first_seg->next &&
507 			state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
508 		state->new_cycle_state ^= 0x1;
509 	xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state);
510 
511 	/* Don't update the ring cycle state for the producer (us). */
512 	xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
513 			state->new_deq_seg);
514 	addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
515 	xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
516 			(unsigned long long) addr);
517 }
518 
519 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
520 		struct xhci_td *cur_td)
521 {
522 	struct xhci_segment *cur_seg;
523 	union xhci_trb *cur_trb;
524 
525 	for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
526 			true;
527 			next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
528 		if ((le32_to_cpu(cur_trb->generic.field[3]) & TRB_TYPE_BITMASK)
529 		    == TRB_TYPE(TRB_LINK)) {
530 			/* Unchain any chained Link TRBs, but
531 			 * leave the pointers intact.
532 			 */
533 			cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
534 			xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
535 			xhci_dbg(xhci, "Address = %p (0x%llx dma); "
536 					"in seg %p (0x%llx dma)\n",
537 					cur_trb,
538 					(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
539 					cur_seg,
540 					(unsigned long long)cur_seg->dma);
541 		} else {
542 			cur_trb->generic.field[0] = 0;
543 			cur_trb->generic.field[1] = 0;
544 			cur_trb->generic.field[2] = 0;
545 			/* Preserve only the cycle bit of this TRB */
546 			cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
547 			cur_trb->generic.field[3] |= cpu_to_le32(
548 				TRB_TYPE(TRB_TR_NOOP));
549 			xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
550 					"in seg %p (0x%llx dma)\n",
551 					cur_trb,
552 					(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
553 					cur_seg,
554 					(unsigned long long)cur_seg->dma);
555 		}
556 		if (cur_trb == cur_td->last_trb)
557 			break;
558 	}
559 }
560 
561 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
562 		unsigned int ep_index, unsigned int stream_id,
563 		struct xhci_segment *deq_seg,
564 		union xhci_trb *deq_ptr, u32 cycle_state);
565 
566 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
567 		unsigned int slot_id, unsigned int ep_index,
568 		unsigned int stream_id,
569 		struct xhci_dequeue_state *deq_state)
570 {
571 	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
572 
573 	xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
574 			"new deq ptr = %p (0x%llx dma), new cycle = %u\n",
575 			deq_state->new_deq_seg,
576 			(unsigned long long)deq_state->new_deq_seg->dma,
577 			deq_state->new_deq_ptr,
578 			(unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
579 			deq_state->new_cycle_state);
580 	queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
581 			deq_state->new_deq_seg,
582 			deq_state->new_deq_ptr,
583 			(u32) deq_state->new_cycle_state);
584 	/* Stop the TD queueing code from ringing the doorbell until
585 	 * this command completes.  The HC won't set the dequeue pointer
586 	 * if the ring is running, and ringing the doorbell starts the
587 	 * ring running.
588 	 */
589 	ep->ep_state |= SET_DEQ_PENDING;
590 }
591 
592 static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
593 		struct xhci_virt_ep *ep)
594 {
595 	ep->ep_state &= ~EP_HALT_PENDING;
596 	/* Can't del_timer_sync in interrupt, so we attempt to cancel.  If the
597 	 * timer is running on another CPU, we don't decrement stop_cmds_pending
598 	 * (since we didn't successfully stop the watchdog timer).
599 	 */
600 	if (del_timer(&ep->stop_cmd_timer))
601 		ep->stop_cmds_pending--;
602 }
603 
604 /* Must be called with xhci->lock held in interrupt context */
605 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
606 		struct xhci_td *cur_td, int status, char *adjective)
607 {
608 	struct usb_hcd *hcd;
609 	struct urb	*urb;
610 	struct urb_priv	*urb_priv;
611 
612 	urb = cur_td->urb;
613 	urb_priv = urb->hcpriv;
614 	urb_priv->td_cnt++;
615 	hcd = bus_to_hcd(urb->dev->bus);
616 
617 	/* Only giveback urb when this is the last td in urb */
618 	if (urb_priv->td_cnt == urb_priv->length) {
619 		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
620 			xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
621 			if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs	== 0) {
622 				if (xhci->quirks & XHCI_AMD_PLL_FIX)
623 					usb_amd_quirk_pll_enable();
624 			}
625 		}
626 		usb_hcd_unlink_urb_from_ep(hcd, urb);
627 
628 		spin_unlock(&xhci->lock);
629 		usb_hcd_giveback_urb(hcd, urb, status);
630 		xhci_urb_free_priv(xhci, urb_priv);
631 		spin_lock(&xhci->lock);
632 	}
633 }
634 
635 /*
636  * When we get a command completion for a Stop Endpoint Command, we need to
637  * unlink any cancelled TDs from the ring.  There are two ways to do that:
638  *
639  *  1. If the HW was in the middle of processing the TD that needs to be
640  *     cancelled, then we must move the ring's dequeue pointer past the last TRB
641  *     in the TD with a Set Dequeue Pointer Command.
642  *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
643  *     bit cleared) so that the HW will skip over them.
644  */
645 static void handle_stopped_endpoint(struct xhci_hcd *xhci,
646 		union xhci_trb *trb, struct xhci_event_cmd *event)
647 {
648 	unsigned int slot_id;
649 	unsigned int ep_index;
650 	struct xhci_virt_device *virt_dev;
651 	struct xhci_ring *ep_ring;
652 	struct xhci_virt_ep *ep;
653 	struct list_head *entry;
654 	struct xhci_td *cur_td = NULL;
655 	struct xhci_td *last_unlinked_td;
656 
657 	struct xhci_dequeue_state deq_state;
658 
659 	if (unlikely(TRB_TO_SUSPEND_PORT(
660 			     le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) {
661 		slot_id = TRB_TO_SLOT_ID(
662 			le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
663 		virt_dev = xhci->devs[slot_id];
664 		if (virt_dev)
665 			handle_cmd_in_cmd_wait_list(xhci, virt_dev,
666 				event);
667 		else
668 			xhci_warn(xhci, "Stop endpoint command "
669 				"completion for disabled slot %u\n",
670 				slot_id);
671 		return;
672 	}
673 
674 	memset(&deq_state, 0, sizeof(deq_state));
675 	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
676 	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
677 	ep = &xhci->devs[slot_id]->eps[ep_index];
678 
679 	if (list_empty(&ep->cancelled_td_list)) {
680 		xhci_stop_watchdog_timer_in_irq(xhci, ep);
681 		ep->stopped_td = NULL;
682 		ep->stopped_trb = NULL;
683 		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
684 		return;
685 	}
686 
687 	/* Fix up the ep ring first, so HW stops executing cancelled TDs.
688 	 * We have the xHCI lock, so nothing can modify this list until we drop
689 	 * it.  We're also in the event handler, so we can't get re-interrupted
690 	 * if another Stop Endpoint command completes
691 	 */
692 	list_for_each(entry, &ep->cancelled_td_list) {
693 		cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
694 		xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
695 				cur_td->first_trb,
696 				(unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
697 		ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
698 		if (!ep_ring) {
699 			/* This shouldn't happen unless a driver is mucking
700 			 * with the stream ID after submission.  This will
701 			 * leave the TD on the hardware ring, and the hardware
702 			 * will try to execute it, and may access a buffer
703 			 * that has already been freed.  In the best case, the
704 			 * hardware will execute it, and the event handler will
705 			 * ignore the completion event for that TD, since it was
706 			 * removed from the td_list for that endpoint.  In
707 			 * short, don't muck with the stream ID after
708 			 * submission.
709 			 */
710 			xhci_warn(xhci, "WARN Cancelled URB %p "
711 					"has invalid stream ID %u.\n",
712 					cur_td->urb,
713 					cur_td->urb->stream_id);
714 			goto remove_finished_td;
715 		}
716 		/*
717 		 * If we stopped on the TD we need to cancel, then we have to
718 		 * move the xHC endpoint ring dequeue pointer past this TD.
719 		 */
720 		if (cur_td == ep->stopped_td)
721 			xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
722 					cur_td->urb->stream_id,
723 					cur_td, &deq_state);
724 		else
725 			td_to_noop(xhci, ep_ring, cur_td);
726 remove_finished_td:
727 		/*
728 		 * The event handler won't see a completion for this TD anymore,
729 		 * so remove it from the endpoint ring's TD list.  Keep it in
730 		 * the cancelled TD list for URB completion later.
731 		 */
732 		list_del(&cur_td->td_list);
733 	}
734 	last_unlinked_td = cur_td;
735 	xhci_stop_watchdog_timer_in_irq(xhci, ep);
736 
737 	/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
738 	if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
739 		xhci_queue_new_dequeue_state(xhci,
740 				slot_id, ep_index,
741 				ep->stopped_td->urb->stream_id,
742 				&deq_state);
743 		xhci_ring_cmd_db(xhci);
744 	} else {
745 		/* Otherwise ring the doorbell(s) to restart queued transfers */
746 		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
747 	}
748 	ep->stopped_td = NULL;
749 	ep->stopped_trb = NULL;
750 
751 	/*
752 	 * Drop the lock and complete the URBs in the cancelled TD list.
753 	 * New TDs to be cancelled might be added to the end of the list before
754 	 * we can complete all the URBs for the TDs we already unlinked.
755 	 * So stop when we've completed the URB for the last TD we unlinked.
756 	 */
757 	do {
758 		cur_td = list_entry(ep->cancelled_td_list.next,
759 				struct xhci_td, cancelled_td_list);
760 		list_del(&cur_td->cancelled_td_list);
761 
762 		/* Clean up the cancelled URB */
763 		/* Doesn't matter what we pass for status, since the core will
764 		 * just overwrite it (because the URB has been unlinked).
765 		 */
766 		xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
767 
768 		/* Stop processing the cancelled list if the watchdog timer is
769 		 * running.
770 		 */
771 		if (xhci->xhc_state & XHCI_STATE_DYING)
772 			return;
773 	} while (cur_td != last_unlinked_td);
774 
775 	/* Return to the event handler with xhci->lock re-acquired */
776 }
777 
778 /* Watchdog timer function for when a stop endpoint command fails to complete.
779  * In this case, we assume the host controller is broken or dying or dead.  The
780  * host may still be completing some other events, so we have to be careful to
781  * let the event ring handler and the URB dequeueing/enqueueing functions know
782  * through xhci->state.
783  *
784  * The timer may also fire if the host takes a very long time to respond to the
785  * command, and the stop endpoint command completion handler cannot delete the
786  * timer before the timer function is called.  Another endpoint cancellation may
787  * sneak in before the timer function can grab the lock, and that may queue
788  * another stop endpoint command and add the timer back.  So we cannot use a
789  * simple flag to say whether there is a pending stop endpoint command for a
790  * particular endpoint.
791  *
792  * Instead we use a combination of that flag and a counter for the number of
793  * pending stop endpoint commands.  If the timer is the tail end of the last
794  * stop endpoint command, and the endpoint's command is still pending, we assume
795  * the host is dying.
796  */
797 void xhci_stop_endpoint_command_watchdog(unsigned long arg)
798 {
799 	struct xhci_hcd *xhci;
800 	struct xhci_virt_ep *ep;
801 	struct xhci_virt_ep *temp_ep;
802 	struct xhci_ring *ring;
803 	struct xhci_td *cur_td;
804 	int ret, i, j;
805 
806 	ep = (struct xhci_virt_ep *) arg;
807 	xhci = ep->xhci;
808 
809 	spin_lock(&xhci->lock);
810 
811 	ep->stop_cmds_pending--;
812 	if (xhci->xhc_state & XHCI_STATE_DYING) {
813 		xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
814 				"xHCI as DYING, exiting.\n");
815 		spin_unlock(&xhci->lock);
816 		return;
817 	}
818 	if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
819 		xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
820 				"exiting.\n");
821 		spin_unlock(&xhci->lock);
822 		return;
823 	}
824 
825 	xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
826 	xhci_warn(xhci, "Assuming host is dying, halting host.\n");
827 	/* Oops, HC is dead or dying or at least not responding to the stop
828 	 * endpoint command.
829 	 */
830 	xhci->xhc_state |= XHCI_STATE_DYING;
831 	/* Disable interrupts from the host controller and start halting it */
832 	xhci_quiesce(xhci);
833 	spin_unlock(&xhci->lock);
834 
835 	ret = xhci_halt(xhci);
836 
837 	spin_lock(&xhci->lock);
838 	if (ret < 0) {
839 		/* This is bad; the host is not responding to commands and it's
840 		 * not allowing itself to be halted.  At least interrupts are
841 		 * disabled. If we call usb_hc_died(), it will attempt to
842 		 * disconnect all device drivers under this host.  Those
843 		 * disconnect() methods will wait for all URBs to be unlinked,
844 		 * so we must complete them.
845 		 */
846 		xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
847 		xhci_warn(xhci, "Completing active URBs anyway.\n");
848 		/* We could turn all TDs on the rings to no-ops.  This won't
849 		 * help if the host has cached part of the ring, and is slow if
850 		 * we want to preserve the cycle bit.  Skip it and hope the host
851 		 * doesn't touch the memory.
852 		 */
853 	}
854 	for (i = 0; i < MAX_HC_SLOTS; i++) {
855 		if (!xhci->devs[i])
856 			continue;
857 		for (j = 0; j < 31; j++) {
858 			temp_ep = &xhci->devs[i]->eps[j];
859 			ring = temp_ep->ring;
860 			if (!ring)
861 				continue;
862 			xhci_dbg(xhci, "Killing URBs for slot ID %u, "
863 					"ep index %u\n", i, j);
864 			while (!list_empty(&ring->td_list)) {
865 				cur_td = list_first_entry(&ring->td_list,
866 						struct xhci_td,
867 						td_list);
868 				list_del(&cur_td->td_list);
869 				if (!list_empty(&cur_td->cancelled_td_list))
870 					list_del(&cur_td->cancelled_td_list);
871 				xhci_giveback_urb_in_irq(xhci, cur_td,
872 						-ESHUTDOWN, "killed");
873 			}
874 			while (!list_empty(&temp_ep->cancelled_td_list)) {
875 				cur_td = list_first_entry(
876 						&temp_ep->cancelled_td_list,
877 						struct xhci_td,
878 						cancelled_td_list);
879 				list_del(&cur_td->cancelled_td_list);
880 				xhci_giveback_urb_in_irq(xhci, cur_td,
881 						-ESHUTDOWN, "killed");
882 			}
883 		}
884 	}
885 	spin_unlock(&xhci->lock);
886 	xhci_dbg(xhci, "Calling usb_hc_died()\n");
887 	usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
888 	xhci_dbg(xhci, "xHCI host controller is dead.\n");
889 }
890 
891 /*
892  * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
893  * we need to clear the set deq pending flag in the endpoint ring state, so that
894  * the TD queueing code can ring the doorbell again.  We also need to ring the
895  * endpoint doorbell to restart the ring, but only if there aren't more
896  * cancellations pending.
897  */
898 static void handle_set_deq_completion(struct xhci_hcd *xhci,
899 		struct xhci_event_cmd *event,
900 		union xhci_trb *trb)
901 {
902 	unsigned int slot_id;
903 	unsigned int ep_index;
904 	unsigned int stream_id;
905 	struct xhci_ring *ep_ring;
906 	struct xhci_virt_device *dev;
907 	struct xhci_ep_ctx *ep_ctx;
908 	struct xhci_slot_ctx *slot_ctx;
909 
910 	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
911 	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
912 	stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
913 	dev = xhci->devs[slot_id];
914 
915 	ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
916 	if (!ep_ring) {
917 		xhci_warn(xhci, "WARN Set TR deq ptr command for "
918 				"freed stream ID %u\n",
919 				stream_id);
920 		/* XXX: Harmless??? */
921 		dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
922 		return;
923 	}
924 
925 	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
926 	slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
927 
928 	if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) {
929 		unsigned int ep_state;
930 		unsigned int slot_state;
931 
932 		switch (GET_COMP_CODE(le32_to_cpu(event->status))) {
933 		case COMP_TRB_ERR:
934 			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
935 					"of stream ID configuration\n");
936 			break;
937 		case COMP_CTX_STATE:
938 			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
939 					"to incorrect slot or ep state.\n");
940 			ep_state = le32_to_cpu(ep_ctx->ep_info);
941 			ep_state &= EP_STATE_MASK;
942 			slot_state = le32_to_cpu(slot_ctx->dev_state);
943 			slot_state = GET_SLOT_STATE(slot_state);
944 			xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
945 					slot_state, ep_state);
946 			break;
947 		case COMP_EBADSLT:
948 			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
949 					"slot %u was not enabled.\n", slot_id);
950 			break;
951 		default:
952 			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
953 					"completion code of %u.\n",
954 				  GET_COMP_CODE(le32_to_cpu(event->status)));
955 			break;
956 		}
957 		/* OK what do we do now?  The endpoint state is hosed, and we
958 		 * should never get to this point if the synchronization between
959 		 * queueing, and endpoint state are correct.  This might happen
960 		 * if the device gets disconnected after we've finished
961 		 * cancelling URBs, which might not be an error...
962 		 */
963 	} else {
964 		xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
965 			 le64_to_cpu(ep_ctx->deq));
966 		if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
967 					 dev->eps[ep_index].queued_deq_ptr) ==
968 		    (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
969 			/* Update the ring's dequeue segment and dequeue pointer
970 			 * to reflect the new position.
971 			 */
972 			ep_ring->deq_seg = dev->eps[ep_index].queued_deq_seg;
973 			ep_ring->dequeue = dev->eps[ep_index].queued_deq_ptr;
974 		} else {
975 			xhci_warn(xhci, "Mismatch between completed Set TR Deq "
976 					"Ptr command & xHCI internal state.\n");
977 			xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
978 					dev->eps[ep_index].queued_deq_seg,
979 					dev->eps[ep_index].queued_deq_ptr);
980 		}
981 	}
982 
983 	dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
984 	dev->eps[ep_index].queued_deq_seg = NULL;
985 	dev->eps[ep_index].queued_deq_ptr = NULL;
986 	/* Restart any rings with pending URBs */
987 	ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
988 }
989 
990 static void handle_reset_ep_completion(struct xhci_hcd *xhci,
991 		struct xhci_event_cmd *event,
992 		union xhci_trb *trb)
993 {
994 	int slot_id;
995 	unsigned int ep_index;
996 
997 	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
998 	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
999 	/* This command will only fail if the endpoint wasn't halted,
1000 	 * but we don't care.
1001 	 */
1002 	xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
1003 		 (unsigned int) GET_COMP_CODE(le32_to_cpu(event->status)));
1004 
1005 	/* HW with the reset endpoint quirk needs to have a configure endpoint
1006 	 * command complete before the endpoint can be used.  Queue that here
1007 	 * because the HW can't handle two commands being queued in a row.
1008 	 */
1009 	if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1010 		xhci_dbg(xhci, "Queueing configure endpoint command\n");
1011 		xhci_queue_configure_endpoint(xhci,
1012 				xhci->devs[slot_id]->in_ctx->dma, slot_id,
1013 				false);
1014 		xhci_ring_cmd_db(xhci);
1015 	} else {
1016 		/* Clear our internal halted state and restart the ring(s) */
1017 		xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1018 		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1019 	}
1020 }
1021 
1022 /* Check to see if a command in the device's command queue matches this one.
1023  * Signal the completion or free the command, and return 1.  Return 0 if the
1024  * completed command isn't at the head of the command list.
1025  */
1026 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1027 		struct xhci_virt_device *virt_dev,
1028 		struct xhci_event_cmd *event)
1029 {
1030 	struct xhci_command *command;
1031 
1032 	if (list_empty(&virt_dev->cmd_list))
1033 		return 0;
1034 
1035 	command = list_entry(virt_dev->cmd_list.next,
1036 			struct xhci_command, cmd_list);
1037 	if (xhci->cmd_ring->dequeue != command->command_trb)
1038 		return 0;
1039 
1040 	command->status = GET_COMP_CODE(le32_to_cpu(event->status));
1041 	list_del(&command->cmd_list);
1042 	if (command->completion)
1043 		complete(command->completion);
1044 	else
1045 		xhci_free_command(xhci, command);
1046 	return 1;
1047 }
1048 
1049 static void handle_cmd_completion(struct xhci_hcd *xhci,
1050 		struct xhci_event_cmd *event)
1051 {
1052 	int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1053 	u64 cmd_dma;
1054 	dma_addr_t cmd_dequeue_dma;
1055 	struct xhci_input_control_ctx *ctrl_ctx;
1056 	struct xhci_virt_device *virt_dev;
1057 	unsigned int ep_index;
1058 	struct xhci_ring *ep_ring;
1059 	unsigned int ep_state;
1060 
1061 	cmd_dma = le64_to_cpu(event->cmd_trb);
1062 	cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1063 			xhci->cmd_ring->dequeue);
1064 	/* Is the command ring deq ptr out of sync with the deq seg ptr? */
1065 	if (cmd_dequeue_dma == 0) {
1066 		xhci->error_bitmask |= 1 << 4;
1067 		return;
1068 	}
1069 	/* Does the DMA address match our internal dequeue pointer address? */
1070 	if (cmd_dma != (u64) cmd_dequeue_dma) {
1071 		xhci->error_bitmask |= 1 << 5;
1072 		return;
1073 	}
1074 	switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
1075 		& TRB_TYPE_BITMASK) {
1076 	case TRB_TYPE(TRB_ENABLE_SLOT):
1077 		if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS)
1078 			xhci->slot_id = slot_id;
1079 		else
1080 			xhci->slot_id = 0;
1081 		complete(&xhci->addr_dev);
1082 		break;
1083 	case TRB_TYPE(TRB_DISABLE_SLOT):
1084 		if (xhci->devs[slot_id]) {
1085 			if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1086 				/* Delete default control endpoint resources */
1087 				xhci_free_device_endpoint_resources(xhci,
1088 						xhci->devs[slot_id], true);
1089 			xhci_free_virt_device(xhci, slot_id);
1090 		}
1091 		break;
1092 	case TRB_TYPE(TRB_CONFIG_EP):
1093 		virt_dev = xhci->devs[slot_id];
1094 		if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1095 			break;
1096 		/*
1097 		 * Configure endpoint commands can come from the USB core
1098 		 * configuration or alt setting changes, or because the HW
1099 		 * needed an extra configure endpoint command after a reset
1100 		 * endpoint command or streams were being configured.
1101 		 * If the command was for a halted endpoint, the xHCI driver
1102 		 * is not waiting on the configure endpoint command.
1103 		 */
1104 		ctrl_ctx = xhci_get_input_control_ctx(xhci,
1105 				virt_dev->in_ctx);
1106 		/* Input ctx add_flags are the endpoint index plus one */
1107 		ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1;
1108 		/* A usb_set_interface() call directly after clearing a halted
1109 		 * condition may race on this quirky hardware.  Not worth
1110 		 * worrying about, since this is prototype hardware.  Not sure
1111 		 * if this will work for streams, but streams support was
1112 		 * untested on this prototype.
1113 		 */
1114 		if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1115 				ep_index != (unsigned int) -1 &&
1116 		    le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG ==
1117 		    le32_to_cpu(ctrl_ctx->drop_flags)) {
1118 			ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
1119 			ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1120 			if (!(ep_state & EP_HALTED))
1121 				goto bandwidth_change;
1122 			xhci_dbg(xhci, "Completed config ep cmd - "
1123 					"last ep index = %d, state = %d\n",
1124 					ep_index, ep_state);
1125 			/* Clear internal halted state and restart ring(s) */
1126 			xhci->devs[slot_id]->eps[ep_index].ep_state &=
1127 				~EP_HALTED;
1128 			ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1129 			break;
1130 		}
1131 bandwidth_change:
1132 		xhci_dbg(xhci, "Completed config ep cmd\n");
1133 		xhci->devs[slot_id]->cmd_status =
1134 			GET_COMP_CODE(le32_to_cpu(event->status));
1135 		complete(&xhci->devs[slot_id]->cmd_completion);
1136 		break;
1137 	case TRB_TYPE(TRB_EVAL_CONTEXT):
1138 		virt_dev = xhci->devs[slot_id];
1139 		if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1140 			break;
1141 		xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
1142 		complete(&xhci->devs[slot_id]->cmd_completion);
1143 		break;
1144 	case TRB_TYPE(TRB_ADDR_DEV):
1145 		xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
1146 		complete(&xhci->addr_dev);
1147 		break;
1148 	case TRB_TYPE(TRB_STOP_RING):
1149 		handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event);
1150 		break;
1151 	case TRB_TYPE(TRB_SET_DEQ):
1152 		handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
1153 		break;
1154 	case TRB_TYPE(TRB_CMD_NOOP):
1155 		break;
1156 	case TRB_TYPE(TRB_RESET_EP):
1157 		handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
1158 		break;
1159 	case TRB_TYPE(TRB_RESET_DEV):
1160 		xhci_dbg(xhci, "Completed reset device command.\n");
1161 		slot_id = TRB_TO_SLOT_ID(
1162 			le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
1163 		virt_dev = xhci->devs[slot_id];
1164 		if (virt_dev)
1165 			handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
1166 		else
1167 			xhci_warn(xhci, "Reset device command completion "
1168 					"for disabled slot %u\n", slot_id);
1169 		break;
1170 	case TRB_TYPE(TRB_NEC_GET_FW):
1171 		if (!(xhci->quirks & XHCI_NEC_HOST)) {
1172 			xhci->error_bitmask |= 1 << 6;
1173 			break;
1174 		}
1175 		xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
1176 			 NEC_FW_MAJOR(le32_to_cpu(event->status)),
1177 			 NEC_FW_MINOR(le32_to_cpu(event->status)));
1178 		break;
1179 	default:
1180 		/* Skip over unknown commands on the event ring */
1181 		xhci->error_bitmask |= 1 << 6;
1182 		break;
1183 	}
1184 	inc_deq(xhci, xhci->cmd_ring, false);
1185 }
1186 
1187 static void handle_vendor_event(struct xhci_hcd *xhci,
1188 		union xhci_trb *event)
1189 {
1190 	u32 trb_type;
1191 
1192 	trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1193 	xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1194 	if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1195 		handle_cmd_completion(xhci, &event->event_cmd);
1196 }
1197 
1198 /* @port_id: the one-based port ID from the hardware (indexed from array of all
1199  * port registers -- USB 3.0 and USB 2.0).
1200  *
1201  * Returns a zero-based port number, which is suitable for indexing into each of
1202  * the split roothubs' port arrays and bus state arrays.
1203  */
1204 static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
1205 		struct xhci_hcd *xhci, u32 port_id)
1206 {
1207 	unsigned int i;
1208 	unsigned int num_similar_speed_ports = 0;
1209 
1210 	/* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
1211 	 * and usb2_ports are 0-based indexes.  Count the number of similar
1212 	 * speed ports, up to 1 port before this port.
1213 	 */
1214 	for (i = 0; i < (port_id - 1); i++) {
1215 		u8 port_speed = xhci->port_array[i];
1216 
1217 		/*
1218 		 * Skip ports that don't have known speeds, or have duplicate
1219 		 * Extended Capabilities port speed entries.
1220 		 */
1221 		if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1222 			continue;
1223 
1224 		/*
1225 		 * USB 3.0 ports are always under a USB 3.0 hub.  USB 2.0 and
1226 		 * 1.1 ports are under the USB 2.0 hub.  If the port speed
1227 		 * matches the device speed, it's a similar speed port.
1228 		 */
1229 		if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
1230 			num_similar_speed_ports++;
1231 	}
1232 	return num_similar_speed_ports;
1233 }
1234 
1235 static void handle_port_status(struct xhci_hcd *xhci,
1236 		union xhci_trb *event)
1237 {
1238 	struct usb_hcd *hcd;
1239 	u32 port_id;
1240 	u32 temp, temp1;
1241 	int max_ports;
1242 	int slot_id;
1243 	unsigned int faked_port_index;
1244 	u8 major_revision;
1245 	struct xhci_bus_state *bus_state;
1246 	__le32 __iomem **port_array;
1247 	bool bogus_port_status = false;
1248 
1249 	/* Port status change events always have a successful completion code */
1250 	if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
1251 		xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
1252 		xhci->error_bitmask |= 1 << 8;
1253 	}
1254 	port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
1255 	xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1256 
1257 	max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1258 	if ((port_id <= 0) || (port_id > max_ports)) {
1259 		xhci_warn(xhci, "Invalid port id %d\n", port_id);
1260 		bogus_port_status = true;
1261 		goto cleanup;
1262 	}
1263 
1264 	/* Figure out which usb_hcd this port is attached to:
1265 	 * is it a USB 3.0 port or a USB 2.0/1.1 port?
1266 	 */
1267 	major_revision = xhci->port_array[port_id - 1];
1268 	if (major_revision == 0) {
1269 		xhci_warn(xhci, "Event for port %u not in "
1270 				"Extended Capabilities, ignoring.\n",
1271 				port_id);
1272 		bogus_port_status = true;
1273 		goto cleanup;
1274 	}
1275 	if (major_revision == DUPLICATE_ENTRY) {
1276 		xhci_warn(xhci, "Event for port %u duplicated in"
1277 				"Extended Capabilities, ignoring.\n",
1278 				port_id);
1279 		bogus_port_status = true;
1280 		goto cleanup;
1281 	}
1282 
1283 	/*
1284 	 * Hardware port IDs reported by a Port Status Change Event include USB
1285 	 * 3.0 and USB 2.0 ports.  We want to check if the port has reported a
1286 	 * resume event, but we first need to translate the hardware port ID
1287 	 * into the index into the ports on the correct split roothub, and the
1288 	 * correct bus_state structure.
1289 	 */
1290 	/* Find the right roothub. */
1291 	hcd = xhci_to_hcd(xhci);
1292 	if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
1293 		hcd = xhci->shared_hcd;
1294 	bus_state = &xhci->bus_state[hcd_index(hcd)];
1295 	if (hcd->speed == HCD_USB3)
1296 		port_array = xhci->usb3_ports;
1297 	else
1298 		port_array = xhci->usb2_ports;
1299 	/* Find the faked port hub number */
1300 	faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
1301 			port_id);
1302 
1303 	temp = xhci_readl(xhci, port_array[faked_port_index]);
1304 	if (hcd->state == HC_STATE_SUSPENDED) {
1305 		xhci_dbg(xhci, "resume root hub\n");
1306 		usb_hcd_resume_root_hub(hcd);
1307 	}
1308 
1309 	if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1310 		xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1311 
1312 		temp1 = xhci_readl(xhci, &xhci->op_regs->command);
1313 		if (!(temp1 & CMD_RUN)) {
1314 			xhci_warn(xhci, "xHC is not running.\n");
1315 			goto cleanup;
1316 		}
1317 
1318 		if (DEV_SUPERSPEED(temp)) {
1319 			xhci_dbg(xhci, "resume SS port %d\n", port_id);
1320 			temp = xhci_port_state_to_neutral(temp);
1321 			temp &= ~PORT_PLS_MASK;
1322 			temp |= PORT_LINK_STROBE | XDEV_U0;
1323 			xhci_writel(xhci, temp, port_array[faked_port_index]);
1324 			slot_id = xhci_find_slot_id_by_port(hcd, xhci,
1325 					faked_port_index);
1326 			if (!slot_id) {
1327 				xhci_dbg(xhci, "slot_id is zero\n");
1328 				goto cleanup;
1329 			}
1330 			xhci_ring_device(xhci, slot_id);
1331 			xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1332 			/* Clear PORT_PLC */
1333 			temp = xhci_readl(xhci, port_array[faked_port_index]);
1334 			temp = xhci_port_state_to_neutral(temp);
1335 			temp |= PORT_PLC;
1336 			xhci_writel(xhci, temp, port_array[faked_port_index]);
1337 		} else {
1338 			xhci_dbg(xhci, "resume HS port %d\n", port_id);
1339 			bus_state->resume_done[faked_port_index] = jiffies +
1340 				msecs_to_jiffies(20);
1341 			mod_timer(&hcd->rh_timer,
1342 				  bus_state->resume_done[faked_port_index]);
1343 			/* Do the rest in GetPortStatus */
1344 		}
1345 	}
1346 
1347 cleanup:
1348 	/* Update event ring dequeue pointer before dropping the lock */
1349 	inc_deq(xhci, xhci->event_ring, true);
1350 
1351 	/* Don't make the USB core poll the roothub if we got a bad port status
1352 	 * change event.  Besides, at that point we can't tell which roothub
1353 	 * (USB 2.0 or USB 3.0) to kick.
1354 	 */
1355 	if (bogus_port_status)
1356 		return;
1357 
1358 	spin_unlock(&xhci->lock);
1359 	/* Pass this up to the core */
1360 	usb_hcd_poll_rh_status(hcd);
1361 	spin_lock(&xhci->lock);
1362 }
1363 
1364 /*
1365  * This TD is defined by the TRBs starting at start_trb in start_seg and ending
1366  * at end_trb, which may be in another segment.  If the suspect DMA address is a
1367  * TRB in this TD, this function returns that TRB's segment.  Otherwise it
1368  * returns 0.
1369  */
1370 struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1371 		union xhci_trb	*start_trb,
1372 		union xhci_trb	*end_trb,
1373 		dma_addr_t	suspect_dma)
1374 {
1375 	dma_addr_t start_dma;
1376 	dma_addr_t end_seg_dma;
1377 	dma_addr_t end_trb_dma;
1378 	struct xhci_segment *cur_seg;
1379 
1380 	start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1381 	cur_seg = start_seg;
1382 
1383 	do {
1384 		if (start_dma == 0)
1385 			return NULL;
1386 		/* We may get an event for a Link TRB in the middle of a TD */
1387 		end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1388 				&cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1389 		/* If the end TRB isn't in this segment, this is set to 0 */
1390 		end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1391 
1392 		if (end_trb_dma > 0) {
1393 			/* The end TRB is in this segment, so suspect should be here */
1394 			if (start_dma <= end_trb_dma) {
1395 				if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
1396 					return cur_seg;
1397 			} else {
1398 				/* Case for one segment with
1399 				 * a TD wrapped around to the top
1400 				 */
1401 				if ((suspect_dma >= start_dma &&
1402 							suspect_dma <= end_seg_dma) ||
1403 						(suspect_dma >= cur_seg->dma &&
1404 						 suspect_dma <= end_trb_dma))
1405 					return cur_seg;
1406 			}
1407 			return NULL;
1408 		} else {
1409 			/* Might still be somewhere in this segment */
1410 			if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
1411 				return cur_seg;
1412 		}
1413 		cur_seg = cur_seg->next;
1414 		start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1415 	} while (cur_seg != start_seg);
1416 
1417 	return NULL;
1418 }
1419 
1420 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1421 		unsigned int slot_id, unsigned int ep_index,
1422 		unsigned int stream_id,
1423 		struct xhci_td *td, union xhci_trb *event_trb)
1424 {
1425 	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1426 	ep->ep_state |= EP_HALTED;
1427 	ep->stopped_td = td;
1428 	ep->stopped_trb = event_trb;
1429 	ep->stopped_stream = stream_id;
1430 
1431 	xhci_queue_reset_ep(xhci, slot_id, ep_index);
1432 	xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1433 
1434 	ep->stopped_td = NULL;
1435 	ep->stopped_trb = NULL;
1436 	ep->stopped_stream = 0;
1437 
1438 	xhci_ring_cmd_db(xhci);
1439 }
1440 
1441 /* Check if an error has halted the endpoint ring.  The class driver will
1442  * cleanup the halt for a non-default control endpoint if we indicate a stall.
1443  * However, a babble and other errors also halt the endpoint ring, and the class
1444  * driver won't clear the halt in that case, so we need to issue a Set Transfer
1445  * Ring Dequeue Pointer command manually.
1446  */
1447 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1448 		struct xhci_ep_ctx *ep_ctx,
1449 		unsigned int trb_comp_code)
1450 {
1451 	/* TRB completion codes that may require a manual halt cleanup */
1452 	if (trb_comp_code == COMP_TX_ERR ||
1453 			trb_comp_code == COMP_BABBLE ||
1454 			trb_comp_code == COMP_SPLIT_ERR)
1455 		/* The 0.96 spec says a babbling control endpoint
1456 		 * is not halted. The 0.96 spec says it is.  Some HW
1457 		 * claims to be 0.95 compliant, but it halts the control
1458 		 * endpoint anyway.  Check if a babble halted the
1459 		 * endpoint.
1460 		 */
1461 		if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == EP_STATE_HALTED)
1462 			return 1;
1463 
1464 	return 0;
1465 }
1466 
1467 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1468 {
1469 	if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1470 		/* Vendor defined "informational" completion code,
1471 		 * treat as not-an-error.
1472 		 */
1473 		xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1474 				trb_comp_code);
1475 		xhci_dbg(xhci, "Treating code as success.\n");
1476 		return 1;
1477 	}
1478 	return 0;
1479 }
1480 
1481 /*
1482  * Finish the td processing, remove the td from td list;
1483  * Return 1 if the urb can be given back.
1484  */
1485 static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1486 	union xhci_trb *event_trb, struct xhci_transfer_event *event,
1487 	struct xhci_virt_ep *ep, int *status, bool skip)
1488 {
1489 	struct xhci_virt_device *xdev;
1490 	struct xhci_ring *ep_ring;
1491 	unsigned int slot_id;
1492 	int ep_index;
1493 	struct urb *urb = NULL;
1494 	struct xhci_ep_ctx *ep_ctx;
1495 	int ret = 0;
1496 	struct urb_priv	*urb_priv;
1497 	u32 trb_comp_code;
1498 
1499 	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1500 	xdev = xhci->devs[slot_id];
1501 	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1502 	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1503 	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1504 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1505 
1506 	if (skip)
1507 		goto td_cleanup;
1508 
1509 	if (trb_comp_code == COMP_STOP_INVAL ||
1510 			trb_comp_code == COMP_STOP) {
1511 		/* The Endpoint Stop Command completion will take care of any
1512 		 * stopped TDs.  A stopped TD may be restarted, so don't update
1513 		 * the ring dequeue pointer or take this TD off any lists yet.
1514 		 */
1515 		ep->stopped_td = td;
1516 		ep->stopped_trb = event_trb;
1517 		return 0;
1518 	} else {
1519 		if (trb_comp_code == COMP_STALL) {
1520 			/* The transfer is completed from the driver's
1521 			 * perspective, but we need to issue a set dequeue
1522 			 * command for this stalled endpoint to move the dequeue
1523 			 * pointer past the TD.  We can't do that here because
1524 			 * the halt condition must be cleared first.  Let the
1525 			 * USB class driver clear the stall later.
1526 			 */
1527 			ep->stopped_td = td;
1528 			ep->stopped_trb = event_trb;
1529 			ep->stopped_stream = ep_ring->stream_id;
1530 		} else if (xhci_requires_manual_halt_cleanup(xhci,
1531 					ep_ctx, trb_comp_code)) {
1532 			/* Other types of errors halt the endpoint, but the
1533 			 * class driver doesn't call usb_reset_endpoint() unless
1534 			 * the error is -EPIPE.  Clear the halted status in the
1535 			 * xHCI hardware manually.
1536 			 */
1537 			xhci_cleanup_halted_endpoint(xhci,
1538 					slot_id, ep_index, ep_ring->stream_id,
1539 					td, event_trb);
1540 		} else {
1541 			/* Update ring dequeue pointer */
1542 			while (ep_ring->dequeue != td->last_trb)
1543 				inc_deq(xhci, ep_ring, false);
1544 			inc_deq(xhci, ep_ring, false);
1545 		}
1546 
1547 td_cleanup:
1548 		/* Clean up the endpoint's TD list */
1549 		urb = td->urb;
1550 		urb_priv = urb->hcpriv;
1551 
1552 		/* Do one last check of the actual transfer length.
1553 		 * If the host controller said we transferred more data than
1554 		 * the buffer length, urb->actual_length will be a very big
1555 		 * number (since it's unsigned).  Play it safe and say we didn't
1556 		 * transfer anything.
1557 		 */
1558 		if (urb->actual_length > urb->transfer_buffer_length) {
1559 			xhci_warn(xhci, "URB transfer length is wrong, "
1560 					"xHC issue? req. len = %u, "
1561 					"act. len = %u\n",
1562 					urb->transfer_buffer_length,
1563 					urb->actual_length);
1564 			urb->actual_length = 0;
1565 			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1566 				*status = -EREMOTEIO;
1567 			else
1568 				*status = 0;
1569 		}
1570 		list_del(&td->td_list);
1571 		/* Was this TD slated to be cancelled but completed anyway? */
1572 		if (!list_empty(&td->cancelled_td_list))
1573 			list_del(&td->cancelled_td_list);
1574 
1575 		urb_priv->td_cnt++;
1576 		/* Giveback the urb when all the tds are completed */
1577 		if (urb_priv->td_cnt == urb_priv->length) {
1578 			ret = 1;
1579 			if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1580 				xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
1581 				if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
1582 					== 0) {
1583 					if (xhci->quirks & XHCI_AMD_PLL_FIX)
1584 						usb_amd_quirk_pll_enable();
1585 				}
1586 			}
1587 		}
1588 	}
1589 
1590 	return ret;
1591 }
1592 
1593 /*
1594  * Process control tds, update urb status and actual_length.
1595  */
1596 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1597 	union xhci_trb *event_trb, struct xhci_transfer_event *event,
1598 	struct xhci_virt_ep *ep, int *status)
1599 {
1600 	struct xhci_virt_device *xdev;
1601 	struct xhci_ring *ep_ring;
1602 	unsigned int slot_id;
1603 	int ep_index;
1604 	struct xhci_ep_ctx *ep_ctx;
1605 	u32 trb_comp_code;
1606 
1607 	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1608 	xdev = xhci->devs[slot_id];
1609 	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1610 	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1611 	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1612 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1613 
1614 	xhci_debug_trb(xhci, xhci->event_ring->dequeue);
1615 	switch (trb_comp_code) {
1616 	case COMP_SUCCESS:
1617 		if (event_trb == ep_ring->dequeue) {
1618 			xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
1619 					"without IOC set??\n");
1620 			*status = -ESHUTDOWN;
1621 		} else if (event_trb != td->last_trb) {
1622 			xhci_warn(xhci, "WARN: Success on ctrl data TRB "
1623 					"without IOC set??\n");
1624 			*status = -ESHUTDOWN;
1625 		} else {
1626 			*status = 0;
1627 		}
1628 		break;
1629 	case COMP_SHORT_TX:
1630 		xhci_warn(xhci, "WARN: short transfer on control ep\n");
1631 		if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1632 			*status = -EREMOTEIO;
1633 		else
1634 			*status = 0;
1635 		break;
1636 	case COMP_STOP_INVAL:
1637 	case COMP_STOP:
1638 		return finish_td(xhci, td, event_trb, event, ep, status, false);
1639 	default:
1640 		if (!xhci_requires_manual_halt_cleanup(xhci,
1641 					ep_ctx, trb_comp_code))
1642 			break;
1643 		xhci_dbg(xhci, "TRB error code %u, "
1644 				"halted endpoint index = %u\n",
1645 				trb_comp_code, ep_index);
1646 		/* else fall through */
1647 	case COMP_STALL:
1648 		/* Did we transfer part of the data (middle) phase? */
1649 		if (event_trb != ep_ring->dequeue &&
1650 				event_trb != td->last_trb)
1651 			td->urb->actual_length =
1652 				td->urb->transfer_buffer_length
1653 				- TRB_LEN(le32_to_cpu(event->transfer_len));
1654 		else
1655 			td->urb->actual_length = 0;
1656 
1657 		xhci_cleanup_halted_endpoint(xhci,
1658 			slot_id, ep_index, 0, td, event_trb);
1659 		return finish_td(xhci, td, event_trb, event, ep, status, true);
1660 	}
1661 	/*
1662 	 * Did we transfer any data, despite the errors that might have
1663 	 * happened?  I.e. did we get past the setup stage?
1664 	 */
1665 	if (event_trb != ep_ring->dequeue) {
1666 		/* The event was for the status stage */
1667 		if (event_trb == td->last_trb) {
1668 			if (td->urb->actual_length != 0) {
1669 				/* Don't overwrite a previously set error code
1670 				 */
1671 				if ((*status == -EINPROGRESS || *status == 0) &&
1672 						(td->urb->transfer_flags
1673 						 & URB_SHORT_NOT_OK))
1674 					/* Did we already see a short data
1675 					 * stage? */
1676 					*status = -EREMOTEIO;
1677 			} else {
1678 				td->urb->actual_length =
1679 					td->urb->transfer_buffer_length;
1680 			}
1681 		} else {
1682 		/* Maybe the event was for the data stage? */
1683 			td->urb->actual_length =
1684 				td->urb->transfer_buffer_length -
1685 				TRB_LEN(le32_to_cpu(event->transfer_len));
1686 			xhci_dbg(xhci, "Waiting for status "
1687 					"stage event\n");
1688 			return 0;
1689 		}
1690 	}
1691 
1692 	return finish_td(xhci, td, event_trb, event, ep, status, false);
1693 }
1694 
1695 /*
1696  * Process isochronous tds, update urb packet status and actual_length.
1697  */
1698 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1699 	union xhci_trb *event_trb, struct xhci_transfer_event *event,
1700 	struct xhci_virt_ep *ep, int *status)
1701 {
1702 	struct xhci_ring *ep_ring;
1703 	struct urb_priv *urb_priv;
1704 	int idx;
1705 	int len = 0;
1706 	union xhci_trb *cur_trb;
1707 	struct xhci_segment *cur_seg;
1708 	struct usb_iso_packet_descriptor *frame;
1709 	u32 trb_comp_code;
1710 	bool skip_td = false;
1711 
1712 	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1713 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1714 	urb_priv = td->urb->hcpriv;
1715 	idx = urb_priv->td_cnt;
1716 	frame = &td->urb->iso_frame_desc[idx];
1717 
1718 	/* handle completion code */
1719 	switch (trb_comp_code) {
1720 	case COMP_SUCCESS:
1721 		frame->status = 0;
1722 		break;
1723 	case COMP_SHORT_TX:
1724 		frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
1725 				-EREMOTEIO : 0;
1726 		break;
1727 	case COMP_BW_OVER:
1728 		frame->status = -ECOMM;
1729 		skip_td = true;
1730 		break;
1731 	case COMP_BUFF_OVER:
1732 	case COMP_BABBLE:
1733 		frame->status = -EOVERFLOW;
1734 		skip_td = true;
1735 		break;
1736 	case COMP_DEV_ERR:
1737 	case COMP_STALL:
1738 		frame->status = -EPROTO;
1739 		skip_td = true;
1740 		break;
1741 	case COMP_STOP:
1742 	case COMP_STOP_INVAL:
1743 		break;
1744 	default:
1745 		frame->status = -1;
1746 		break;
1747 	}
1748 
1749 	if (trb_comp_code == COMP_SUCCESS || skip_td) {
1750 		frame->actual_length = frame->length;
1751 		td->urb->actual_length += frame->length;
1752 	} else {
1753 		for (cur_trb = ep_ring->dequeue,
1754 		     cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
1755 		     next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1756 			if ((le32_to_cpu(cur_trb->generic.field[3]) &
1757 			 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
1758 			    (le32_to_cpu(cur_trb->generic.field[3]) &
1759 			 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
1760 				len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
1761 		}
1762 		len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
1763 			TRB_LEN(le32_to_cpu(event->transfer_len));
1764 
1765 		if (trb_comp_code != COMP_STOP_INVAL) {
1766 			frame->actual_length = len;
1767 			td->urb->actual_length += len;
1768 		}
1769 	}
1770 
1771 	return finish_td(xhci, td, event_trb, event, ep, status, false);
1772 }
1773 
1774 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1775 			struct xhci_transfer_event *event,
1776 			struct xhci_virt_ep *ep, int *status)
1777 {
1778 	struct xhci_ring *ep_ring;
1779 	struct urb_priv *urb_priv;
1780 	struct usb_iso_packet_descriptor *frame;
1781 	int idx;
1782 
1783 	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1784 	urb_priv = td->urb->hcpriv;
1785 	idx = urb_priv->td_cnt;
1786 	frame = &td->urb->iso_frame_desc[idx];
1787 
1788 	/* The transfer is partly done. */
1789 	frame->status = -EXDEV;
1790 
1791 	/* calc actual length */
1792 	frame->actual_length = 0;
1793 
1794 	/* Update ring dequeue pointer */
1795 	while (ep_ring->dequeue != td->last_trb)
1796 		inc_deq(xhci, ep_ring, false);
1797 	inc_deq(xhci, ep_ring, false);
1798 
1799 	return finish_td(xhci, td, NULL, event, ep, status, true);
1800 }
1801 
1802 /*
1803  * Process bulk and interrupt tds, update urb status and actual_length.
1804  */
1805 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
1806 	union xhci_trb *event_trb, struct xhci_transfer_event *event,
1807 	struct xhci_virt_ep *ep, int *status)
1808 {
1809 	struct xhci_ring *ep_ring;
1810 	union xhci_trb *cur_trb;
1811 	struct xhci_segment *cur_seg;
1812 	u32 trb_comp_code;
1813 
1814 	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1815 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1816 
1817 	switch (trb_comp_code) {
1818 	case COMP_SUCCESS:
1819 		/* Double check that the HW transferred everything. */
1820 		if (event_trb != td->last_trb) {
1821 			xhci_warn(xhci, "WARN Successful completion "
1822 					"on short TX\n");
1823 			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1824 				*status = -EREMOTEIO;
1825 			else
1826 				*status = 0;
1827 		} else {
1828 			*status = 0;
1829 		}
1830 		break;
1831 	case COMP_SHORT_TX:
1832 		if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1833 			*status = -EREMOTEIO;
1834 		else
1835 			*status = 0;
1836 		break;
1837 	default:
1838 		/* Others already handled above */
1839 		break;
1840 	}
1841 	if (trb_comp_code == COMP_SHORT_TX)
1842 		xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
1843 				"%d bytes untransferred\n",
1844 				td->urb->ep->desc.bEndpointAddress,
1845 				td->urb->transfer_buffer_length,
1846 				TRB_LEN(le32_to_cpu(event->transfer_len)));
1847 	/* Fast path - was this the last TRB in the TD for this URB? */
1848 	if (event_trb == td->last_trb) {
1849 		if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
1850 			td->urb->actual_length =
1851 				td->urb->transfer_buffer_length -
1852 				TRB_LEN(le32_to_cpu(event->transfer_len));
1853 			if (td->urb->transfer_buffer_length <
1854 					td->urb->actual_length) {
1855 				xhci_warn(xhci, "HC gave bad length "
1856 						"of %d bytes left\n",
1857 					  TRB_LEN(le32_to_cpu(event->transfer_len)));
1858 				td->urb->actual_length = 0;
1859 				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1860 					*status = -EREMOTEIO;
1861 				else
1862 					*status = 0;
1863 			}
1864 			/* Don't overwrite a previously set error code */
1865 			if (*status == -EINPROGRESS) {
1866 				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1867 					*status = -EREMOTEIO;
1868 				else
1869 					*status = 0;
1870 			}
1871 		} else {
1872 			td->urb->actual_length =
1873 				td->urb->transfer_buffer_length;
1874 			/* Ignore a short packet completion if the
1875 			 * untransferred length was zero.
1876 			 */
1877 			if (*status == -EREMOTEIO)
1878 				*status = 0;
1879 		}
1880 	} else {
1881 		/* Slow path - walk the list, starting from the dequeue
1882 		 * pointer, to get the actual length transferred.
1883 		 */
1884 		td->urb->actual_length = 0;
1885 		for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
1886 				cur_trb != event_trb;
1887 				next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1888 			if ((le32_to_cpu(cur_trb->generic.field[3]) &
1889 			 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
1890 			    (le32_to_cpu(cur_trb->generic.field[3]) &
1891 			 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
1892 				td->urb->actual_length +=
1893 					TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
1894 		}
1895 		/* If the ring didn't stop on a Link or No-op TRB, add
1896 		 * in the actual bytes transferred from the Normal TRB
1897 		 */
1898 		if (trb_comp_code != COMP_STOP_INVAL)
1899 			td->urb->actual_length +=
1900 				TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
1901 				TRB_LEN(le32_to_cpu(event->transfer_len));
1902 	}
1903 
1904 	return finish_td(xhci, td, event_trb, event, ep, status, false);
1905 }
1906 
1907 /*
1908  * If this function returns an error condition, it means it got a Transfer
1909  * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
1910  * At this point, the host controller is probably hosed and should be reset.
1911  */
1912 static int handle_tx_event(struct xhci_hcd *xhci,
1913 		struct xhci_transfer_event *event)
1914 {
1915 	struct xhci_virt_device *xdev;
1916 	struct xhci_virt_ep *ep;
1917 	struct xhci_ring *ep_ring;
1918 	unsigned int slot_id;
1919 	int ep_index;
1920 	struct xhci_td *td = NULL;
1921 	dma_addr_t event_dma;
1922 	struct xhci_segment *event_seg;
1923 	union xhci_trb *event_trb;
1924 	struct urb *urb = NULL;
1925 	int status = -EINPROGRESS;
1926 	struct urb_priv *urb_priv;
1927 	struct xhci_ep_ctx *ep_ctx;
1928 	u32 trb_comp_code;
1929 	int ret = 0;
1930 
1931 	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1932 	xdev = xhci->devs[slot_id];
1933 	if (!xdev) {
1934 		xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
1935 		return -ENODEV;
1936 	}
1937 
1938 	/* Endpoint ID is 1 based, our index is zero based */
1939 	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1940 	ep = &xdev->eps[ep_index];
1941 	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1942 	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1943 	if (!ep_ring ||
1944 	    (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
1945 	    EP_STATE_DISABLED) {
1946 		xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
1947 				"or incorrect stream ring\n");
1948 		return -ENODEV;
1949 	}
1950 
1951 	event_dma = le64_to_cpu(event->buffer);
1952 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1953 	/* Look for common error cases */
1954 	switch (trb_comp_code) {
1955 	/* Skip codes that require special handling depending on
1956 	 * transfer type
1957 	 */
1958 	case COMP_SUCCESS:
1959 	case COMP_SHORT_TX:
1960 		break;
1961 	case COMP_STOP:
1962 		xhci_dbg(xhci, "Stopped on Transfer TRB\n");
1963 		break;
1964 	case COMP_STOP_INVAL:
1965 		xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
1966 		break;
1967 	case COMP_STALL:
1968 		xhci_warn(xhci, "WARN: Stalled endpoint\n");
1969 		ep->ep_state |= EP_HALTED;
1970 		status = -EPIPE;
1971 		break;
1972 	case COMP_TRB_ERR:
1973 		xhci_warn(xhci, "WARN: TRB error on endpoint\n");
1974 		status = -EILSEQ;
1975 		break;
1976 	case COMP_SPLIT_ERR:
1977 	case COMP_TX_ERR:
1978 		xhci_warn(xhci, "WARN: transfer error on endpoint\n");
1979 		status = -EPROTO;
1980 		break;
1981 	case COMP_BABBLE:
1982 		xhci_warn(xhci, "WARN: babble error on endpoint\n");
1983 		status = -EOVERFLOW;
1984 		break;
1985 	case COMP_DB_ERR:
1986 		xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
1987 		status = -ENOSR;
1988 		break;
1989 	case COMP_BW_OVER:
1990 		xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
1991 		break;
1992 	case COMP_BUFF_OVER:
1993 		xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
1994 		break;
1995 	case COMP_UNDERRUN:
1996 		/*
1997 		 * When the Isoch ring is empty, the xHC will generate
1998 		 * a Ring Overrun Event for IN Isoch endpoint or Ring
1999 		 * Underrun Event for OUT Isoch endpoint.
2000 		 */
2001 		xhci_dbg(xhci, "underrun event on endpoint\n");
2002 		if (!list_empty(&ep_ring->td_list))
2003 			xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2004 					"still with TDs queued?\n",
2005 				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2006 				 ep_index);
2007 		goto cleanup;
2008 	case COMP_OVERRUN:
2009 		xhci_dbg(xhci, "overrun event on endpoint\n");
2010 		if (!list_empty(&ep_ring->td_list))
2011 			xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2012 					"still with TDs queued?\n",
2013 				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2014 				 ep_index);
2015 		goto cleanup;
2016 	case COMP_DEV_ERR:
2017 		xhci_warn(xhci, "WARN: detect an incompatible device");
2018 		status = -EPROTO;
2019 		break;
2020 	case COMP_MISSED_INT:
2021 		/*
2022 		 * When encounter missed service error, one or more isoc tds
2023 		 * may be missed by xHC.
2024 		 * Set skip flag of the ep_ring; Complete the missed tds as
2025 		 * short transfer when process the ep_ring next time.
2026 		 */
2027 		ep->skip = true;
2028 		xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
2029 		goto cleanup;
2030 	default:
2031 		if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2032 			status = 0;
2033 			break;
2034 		}
2035 		xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
2036 				"busted\n");
2037 		goto cleanup;
2038 	}
2039 
2040 	do {
2041 		/* This TRB should be in the TD at the head of this ring's
2042 		 * TD list.
2043 		 */
2044 		if (list_empty(&ep_ring->td_list)) {
2045 			xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
2046 					"with no TDs queued?\n",
2047 				  TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2048 				  ep_index);
2049 			xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2050 				 (unsigned int) (le32_to_cpu(event->flags)
2051 						 & TRB_TYPE_BITMASK)>>10);
2052 			xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2053 			if (ep->skip) {
2054 				ep->skip = false;
2055 				xhci_dbg(xhci, "td_list is empty while skip "
2056 						"flag set. Clear skip flag.\n");
2057 			}
2058 			ret = 0;
2059 			goto cleanup;
2060 		}
2061 
2062 		td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2063 
2064 		/* Is this a TRB in the currently executing TD? */
2065 		event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
2066 				td->last_trb, event_dma);
2067 
2068 		/*
2069 		 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
2070 		 * is not in the current TD pointed by ep_ring->dequeue because
2071 		 * that the hardware dequeue pointer still at the previous TRB
2072 		 * of the current TD. The previous TRB maybe a Link TD or the
2073 		 * last TRB of the previous TD. The command completion handle
2074 		 * will take care the rest.
2075 		 */
2076 		if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
2077 			ret = 0;
2078 			goto cleanup;
2079 		}
2080 
2081 		if (!event_seg) {
2082 			if (!ep->skip ||
2083 			    !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2084 				/* Some host controllers give a spurious
2085 				 * successful event after a short transfer.
2086 				 * Ignore it.
2087 				 */
2088 				if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2089 						ep_ring->last_td_was_short) {
2090 					ep_ring->last_td_was_short = false;
2091 					ret = 0;
2092 					goto cleanup;
2093 				}
2094 				/* HC is busted, give up! */
2095 				xhci_err(xhci,
2096 					"ERROR Transfer event TRB DMA ptr not "
2097 					"part of current TD\n");
2098 				return -ESHUTDOWN;
2099 			}
2100 
2101 			ret = skip_isoc_td(xhci, td, event, ep, &status);
2102 			goto cleanup;
2103 		}
2104 		if (trb_comp_code == COMP_SHORT_TX)
2105 			ep_ring->last_td_was_short = true;
2106 		else
2107 			ep_ring->last_td_was_short = false;
2108 
2109 		if (ep->skip) {
2110 			xhci_dbg(xhci, "Found td. Clear skip flag.\n");
2111 			ep->skip = false;
2112 		}
2113 
2114 		event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
2115 						sizeof(*event_trb)];
2116 		/*
2117 		 * No-op TRB should not trigger interrupts.
2118 		 * If event_trb is a no-op TRB, it means the
2119 		 * corresponding TD has been cancelled. Just ignore
2120 		 * the TD.
2121 		 */
2122 		if ((le32_to_cpu(event_trb->generic.field[3])
2123 			     & TRB_TYPE_BITMASK)
2124 				 == TRB_TYPE(TRB_TR_NOOP)) {
2125 			xhci_dbg(xhci,
2126 				 "event_trb is a no-op TRB. Skip it\n");
2127 			goto cleanup;
2128 		}
2129 
2130 		/* Now update the urb's actual_length and give back to
2131 		 * the core
2132 		 */
2133 		if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2134 			ret = process_ctrl_td(xhci, td, event_trb, event, ep,
2135 						 &status);
2136 		else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2137 			ret = process_isoc_td(xhci, td, event_trb, event, ep,
2138 						 &status);
2139 		else
2140 			ret = process_bulk_intr_td(xhci, td, event_trb, event,
2141 						 ep, &status);
2142 
2143 cleanup:
2144 		/*
2145 		 * Do not update event ring dequeue pointer if ep->skip is set.
2146 		 * Will roll back to continue process missed tds.
2147 		 */
2148 		if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
2149 			inc_deq(xhci, xhci->event_ring, true);
2150 		}
2151 
2152 		if (ret) {
2153 			urb = td->urb;
2154 			urb_priv = urb->hcpriv;
2155 			/* Leave the TD around for the reset endpoint function
2156 			 * to use(but only if it's not a control endpoint,
2157 			 * since we already queued the Set TR dequeue pointer
2158 			 * command for stalled control endpoints).
2159 			 */
2160 			if (usb_endpoint_xfer_control(&urb->ep->desc) ||
2161 				(trb_comp_code != COMP_STALL &&
2162 					trb_comp_code != COMP_BABBLE))
2163 				xhci_urb_free_priv(xhci, urb_priv);
2164 
2165 			usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2166 			if ((urb->actual_length != urb->transfer_buffer_length &&
2167 						(urb->transfer_flags &
2168 						 URB_SHORT_NOT_OK)) ||
2169 					status != 0)
2170 				xhci_dbg(xhci, "Giveback URB %p, len = %d, "
2171 						"expected = %x, status = %d\n",
2172 						urb, urb->actual_length,
2173 						urb->transfer_buffer_length,
2174 						status);
2175 			spin_unlock(&xhci->lock);
2176 			/* EHCI, UHCI, and OHCI always unconditionally set the
2177 			 * urb->status of an isochronous endpoint to 0.
2178 			 */
2179 			if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
2180 				status = 0;
2181 			usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
2182 			spin_lock(&xhci->lock);
2183 		}
2184 
2185 	/*
2186 	 * If ep->skip is set, it means there are missed tds on the
2187 	 * endpoint ring need to take care of.
2188 	 * Process them as short transfer until reach the td pointed by
2189 	 * the event.
2190 	 */
2191 	} while (ep->skip && trb_comp_code != COMP_MISSED_INT);
2192 
2193 	return 0;
2194 }
2195 
2196 /*
2197  * This function handles all OS-owned events on the event ring.  It may drop
2198  * xhci->lock between event processing (e.g. to pass up port status changes).
2199  * Returns >0 for "possibly more events to process" (caller should call again),
2200  * otherwise 0 if done.  In future, <0 returns should indicate error code.
2201  */
2202 static int xhci_handle_event(struct xhci_hcd *xhci)
2203 {
2204 	union xhci_trb *event;
2205 	int update_ptrs = 1;
2206 	int ret;
2207 
2208 	if (!xhci->event_ring || !xhci->event_ring->dequeue) {
2209 		xhci->error_bitmask |= 1 << 1;
2210 		return 0;
2211 	}
2212 
2213 	event = xhci->event_ring->dequeue;
2214 	/* Does the HC or OS own the TRB? */
2215 	if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
2216 	    xhci->event_ring->cycle_state) {
2217 		xhci->error_bitmask |= 1 << 2;
2218 		return 0;
2219 	}
2220 
2221 	/*
2222 	 * Barrier between reading the TRB_CYCLE (valid) flag above and any
2223 	 * speculative reads of the event's flags/data below.
2224 	 */
2225 	rmb();
2226 	/* FIXME: Handle more event types. */
2227 	switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
2228 	case TRB_TYPE(TRB_COMPLETION):
2229 		handle_cmd_completion(xhci, &event->event_cmd);
2230 		break;
2231 	case TRB_TYPE(TRB_PORT_STATUS):
2232 		handle_port_status(xhci, event);
2233 		update_ptrs = 0;
2234 		break;
2235 	case TRB_TYPE(TRB_TRANSFER):
2236 		ret = handle_tx_event(xhci, &event->trans_event);
2237 		if (ret < 0)
2238 			xhci->error_bitmask |= 1 << 9;
2239 		else
2240 			update_ptrs = 0;
2241 		break;
2242 	default:
2243 		if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
2244 		    TRB_TYPE(48))
2245 			handle_vendor_event(xhci, event);
2246 		else
2247 			xhci->error_bitmask |= 1 << 3;
2248 	}
2249 	/* Any of the above functions may drop and re-acquire the lock, so check
2250 	 * to make sure a watchdog timer didn't mark the host as non-responsive.
2251 	 */
2252 	if (xhci->xhc_state & XHCI_STATE_DYING) {
2253 		xhci_dbg(xhci, "xHCI host dying, returning from "
2254 				"event handler.\n");
2255 		return 0;
2256 	}
2257 
2258 	if (update_ptrs)
2259 		/* Update SW event ring dequeue pointer */
2260 		inc_deq(xhci, xhci->event_ring, true);
2261 
2262 	/* Are there more items on the event ring?  Caller will call us again to
2263 	 * check.
2264 	 */
2265 	return 1;
2266 }
2267 
2268 /*
2269  * xHCI spec says we can get an interrupt, and if the HC has an error condition,
2270  * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
2271  * indicators of an event TRB error, but we check the status *first* to be safe.
2272  */
2273 irqreturn_t xhci_irq(struct usb_hcd *hcd)
2274 {
2275 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2276 	u32 status;
2277 	union xhci_trb *trb;
2278 	u64 temp_64;
2279 	union xhci_trb *event_ring_deq;
2280 	dma_addr_t deq;
2281 
2282 	spin_lock(&xhci->lock);
2283 	trb = xhci->event_ring->dequeue;
2284 	/* Check if the xHC generated the interrupt, or the irq is shared */
2285 	status = xhci_readl(xhci, &xhci->op_regs->status);
2286 	if (status == 0xffffffff)
2287 		goto hw_died;
2288 
2289 	if (!(status & STS_EINT)) {
2290 		spin_unlock(&xhci->lock);
2291 		return IRQ_NONE;
2292 	}
2293 	if (status & STS_FATAL) {
2294 		xhci_warn(xhci, "WARNING: Host System Error\n");
2295 		xhci_halt(xhci);
2296 hw_died:
2297 		spin_unlock(&xhci->lock);
2298 		return -ESHUTDOWN;
2299 	}
2300 
2301 	/*
2302 	 * Clear the op reg interrupt status first,
2303 	 * so we can receive interrupts from other MSI-X interrupters.
2304 	 * Write 1 to clear the interrupt status.
2305 	 */
2306 	status |= STS_EINT;
2307 	xhci_writel(xhci, status, &xhci->op_regs->status);
2308 	/* FIXME when MSI-X is supported and there are multiple vectors */
2309 	/* Clear the MSI-X event interrupt status */
2310 
2311 	if (hcd->irq != -1) {
2312 		u32 irq_pending;
2313 		/* Acknowledge the PCI interrupt */
2314 		irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
2315 		irq_pending |= 0x3;
2316 		xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
2317 	}
2318 
2319 	if (xhci->xhc_state & XHCI_STATE_DYING) {
2320 		xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2321 				"Shouldn't IRQs be disabled?\n");
2322 		/* Clear the event handler busy flag (RW1C);
2323 		 * the event ring should be empty.
2324 		 */
2325 		temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2326 		xhci_write_64(xhci, temp_64 | ERST_EHB,
2327 				&xhci->ir_set->erst_dequeue);
2328 		spin_unlock(&xhci->lock);
2329 
2330 		return IRQ_HANDLED;
2331 	}
2332 
2333 	event_ring_deq = xhci->event_ring->dequeue;
2334 	/* FIXME this should be a delayed service routine
2335 	 * that clears the EHB.
2336 	 */
2337 	while (xhci_handle_event(xhci) > 0) {}
2338 
2339 	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2340 	/* If necessary, update the HW's version of the event ring deq ptr. */
2341 	if (event_ring_deq != xhci->event_ring->dequeue) {
2342 		deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2343 				xhci->event_ring->dequeue);
2344 		if (deq == 0)
2345 			xhci_warn(xhci, "WARN something wrong with SW event "
2346 					"ring dequeue ptr.\n");
2347 		/* Update HC event ring dequeue pointer */
2348 		temp_64 &= ERST_PTR_MASK;
2349 		temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2350 	}
2351 
2352 	/* Clear the event handler busy flag (RW1C); event ring is empty. */
2353 	temp_64 |= ERST_EHB;
2354 	xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2355 
2356 	spin_unlock(&xhci->lock);
2357 
2358 	return IRQ_HANDLED;
2359 }
2360 
2361 irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
2362 {
2363 	irqreturn_t ret;
2364 	struct xhci_hcd *xhci;
2365 
2366 	xhci = hcd_to_xhci(hcd);
2367 	set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
2368 	if (xhci->shared_hcd)
2369 		set_bit(HCD_FLAG_SAW_IRQ, &xhci->shared_hcd->flags);
2370 
2371 	ret = xhci_irq(hcd);
2372 
2373 	return ret;
2374 }
2375 
2376 /****		Endpoint Ring Operations	****/
2377 
2378 /*
2379  * Generic function for queueing a TRB on a ring.
2380  * The caller must have checked to make sure there's room on the ring.
2381  *
2382  * @more_trbs_coming:	Will you enqueue more TRBs before calling
2383  *			prepare_transfer()?
2384  */
2385 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2386 		bool consumer, bool more_trbs_coming,
2387 		u32 field1, u32 field2, u32 field3, u32 field4)
2388 {
2389 	struct xhci_generic_trb *trb;
2390 
2391 	trb = &ring->enqueue->generic;
2392 	trb->field[0] = cpu_to_le32(field1);
2393 	trb->field[1] = cpu_to_le32(field2);
2394 	trb->field[2] = cpu_to_le32(field3);
2395 	trb->field[3] = cpu_to_le32(field4);
2396 	inc_enq(xhci, ring, consumer, more_trbs_coming);
2397 }
2398 
2399 /*
2400  * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
2401  * FIXME allocate segments if the ring is full.
2402  */
2403 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2404 		u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2405 {
2406 	/* Make sure the endpoint has been added to xHC schedule */
2407 	switch (ep_state) {
2408 	case EP_STATE_DISABLED:
2409 		/*
2410 		 * USB core changed config/interfaces without notifying us,
2411 		 * or hardware is reporting the wrong state.
2412 		 */
2413 		xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
2414 		return -ENOENT;
2415 	case EP_STATE_ERROR:
2416 		xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2417 		/* FIXME event handling code for error needs to clear it */
2418 		/* XXX not sure if this should be -ENOENT or not */
2419 		return -EINVAL;
2420 	case EP_STATE_HALTED:
2421 		xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2422 	case EP_STATE_STOPPED:
2423 	case EP_STATE_RUNNING:
2424 		break;
2425 	default:
2426 		xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
2427 		/*
2428 		 * FIXME issue Configure Endpoint command to try to get the HC
2429 		 * back into a known state.
2430 		 */
2431 		return -EINVAL;
2432 	}
2433 	if (!room_on_ring(xhci, ep_ring, num_trbs)) {
2434 		/* FIXME allocate more room */
2435 		xhci_err(xhci, "ERROR no room on ep ring\n");
2436 		return -ENOMEM;
2437 	}
2438 
2439 	if (enqueue_is_link_trb(ep_ring)) {
2440 		struct xhci_ring *ring = ep_ring;
2441 		union xhci_trb *next;
2442 
2443 		next = ring->enqueue;
2444 
2445 		while (last_trb(xhci, ring, ring->enq_seg, next)) {
2446 			/* If we're not dealing with 0.95 hardware,
2447 			 * clear the chain bit.
2448 			 */
2449 			if (!xhci_link_trb_quirk(xhci))
2450 				next->link.control &= cpu_to_le32(~TRB_CHAIN);
2451 			else
2452 				next->link.control |= cpu_to_le32(TRB_CHAIN);
2453 
2454 			wmb();
2455 			next->link.control ^= cpu_to_le32((u32) TRB_CYCLE);
2456 
2457 			/* Toggle the cycle bit after the last ring segment. */
2458 			if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
2459 				ring->cycle_state = (ring->cycle_state ? 0 : 1);
2460 				if (!in_interrupt()) {
2461 					xhci_dbg(xhci, "queue_trb: Toggle cycle "
2462 						"state for ring %p = %i\n",
2463 						ring, (unsigned int)ring->cycle_state);
2464 				}
2465 			}
2466 			ring->enq_seg = ring->enq_seg->next;
2467 			ring->enqueue = ring->enq_seg->trbs;
2468 			next = ring->enqueue;
2469 		}
2470 	}
2471 
2472 	return 0;
2473 }
2474 
2475 static int prepare_transfer(struct xhci_hcd *xhci,
2476 		struct xhci_virt_device *xdev,
2477 		unsigned int ep_index,
2478 		unsigned int stream_id,
2479 		unsigned int num_trbs,
2480 		struct urb *urb,
2481 		unsigned int td_index,
2482 		gfp_t mem_flags)
2483 {
2484 	int ret;
2485 	struct urb_priv *urb_priv;
2486 	struct xhci_td	*td;
2487 	struct xhci_ring *ep_ring;
2488 	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2489 
2490 	ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
2491 	if (!ep_ring) {
2492 		xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
2493 				stream_id);
2494 		return -EINVAL;
2495 	}
2496 
2497 	ret = prepare_ring(xhci, ep_ring,
2498 			   le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
2499 			   num_trbs, mem_flags);
2500 	if (ret)
2501 		return ret;
2502 
2503 	urb_priv = urb->hcpriv;
2504 	td = urb_priv->td[td_index];
2505 
2506 	INIT_LIST_HEAD(&td->td_list);
2507 	INIT_LIST_HEAD(&td->cancelled_td_list);
2508 
2509 	if (td_index == 0) {
2510 		ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2511 		if (unlikely(ret)) {
2512 			xhci_urb_free_priv(xhci, urb_priv);
2513 			urb->hcpriv = NULL;
2514 			return ret;
2515 		}
2516 	}
2517 
2518 	td->urb = urb;
2519 	/* Add this TD to the tail of the endpoint ring's TD list */
2520 	list_add_tail(&td->td_list, &ep_ring->td_list);
2521 	td->start_seg = ep_ring->enq_seg;
2522 	td->first_trb = ep_ring->enqueue;
2523 
2524 	urb_priv->td[td_index] = td;
2525 
2526 	return 0;
2527 }
2528 
2529 static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
2530 {
2531 	int num_sgs, num_trbs, running_total, temp, i;
2532 	struct scatterlist *sg;
2533 
2534 	sg = NULL;
2535 	num_sgs = urb->num_sgs;
2536 	temp = urb->transfer_buffer_length;
2537 
2538 	xhci_dbg(xhci, "count sg list trbs: \n");
2539 	num_trbs = 0;
2540 	for_each_sg(urb->sg, sg, num_sgs, i) {
2541 		unsigned int previous_total_trbs = num_trbs;
2542 		unsigned int len = sg_dma_len(sg);
2543 
2544 		/* Scatter gather list entries may cross 64KB boundaries */
2545 		running_total = TRB_MAX_BUFF_SIZE -
2546 			(sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
2547 		running_total &= TRB_MAX_BUFF_SIZE - 1;
2548 		if (running_total != 0)
2549 			num_trbs++;
2550 
2551 		/* How many more 64KB chunks to transfer, how many more TRBs? */
2552 		while (running_total < sg_dma_len(sg) && running_total < temp) {
2553 			num_trbs++;
2554 			running_total += TRB_MAX_BUFF_SIZE;
2555 		}
2556 		xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
2557 				i, (unsigned long long)sg_dma_address(sg),
2558 				len, len, num_trbs - previous_total_trbs);
2559 
2560 		len = min_t(int, len, temp);
2561 		temp -= len;
2562 		if (temp == 0)
2563 			break;
2564 	}
2565 	xhci_dbg(xhci, "\n");
2566 	if (!in_interrupt())
2567 		xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, "
2568 				"num_trbs = %d\n",
2569 				urb->ep->desc.bEndpointAddress,
2570 				urb->transfer_buffer_length,
2571 				num_trbs);
2572 	return num_trbs;
2573 }
2574 
2575 static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
2576 {
2577 	if (num_trbs != 0)
2578 		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
2579 				"TRBs, %d left\n", __func__,
2580 				urb->ep->desc.bEndpointAddress, num_trbs);
2581 	if (running_total != urb->transfer_buffer_length)
2582 		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
2583 				"queued %#x (%d), asked for %#x (%d)\n",
2584 				__func__,
2585 				urb->ep->desc.bEndpointAddress,
2586 				running_total, running_total,
2587 				urb->transfer_buffer_length,
2588 				urb->transfer_buffer_length);
2589 }
2590 
2591 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
2592 		unsigned int ep_index, unsigned int stream_id, int start_cycle,
2593 		struct xhci_generic_trb *start_trb)
2594 {
2595 	/*
2596 	 * Pass all the TRBs to the hardware at once and make sure this write
2597 	 * isn't reordered.
2598 	 */
2599 	wmb();
2600 	if (start_cycle)
2601 		start_trb->field[3] |= cpu_to_le32(start_cycle);
2602 	else
2603 		start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
2604 	xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
2605 }
2606 
2607 /*
2608  * xHCI uses normal TRBs for both bulk and interrupt.  When the interrupt
2609  * endpoint is to be serviced, the xHC will consume (at most) one TD.  A TD
2610  * (comprised of sg list entries) can take several service intervals to
2611  * transmit.
2612  */
2613 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2614 		struct urb *urb, int slot_id, unsigned int ep_index)
2615 {
2616 	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
2617 			xhci->devs[slot_id]->out_ctx, ep_index);
2618 	int xhci_interval;
2619 	int ep_interval;
2620 
2621 	xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
2622 	ep_interval = urb->interval;
2623 	/* Convert to microframes */
2624 	if (urb->dev->speed == USB_SPEED_LOW ||
2625 			urb->dev->speed == USB_SPEED_FULL)
2626 		ep_interval *= 8;
2627 	/* FIXME change this to a warning and a suggestion to use the new API
2628 	 * to set the polling interval (once the API is added).
2629 	 */
2630 	if (xhci_interval != ep_interval) {
2631 		if (printk_ratelimit())
2632 			dev_dbg(&urb->dev->dev, "Driver uses different interval"
2633 					" (%d microframe%s) than xHCI "
2634 					"(%d microframe%s)\n",
2635 					ep_interval,
2636 					ep_interval == 1 ? "" : "s",
2637 					xhci_interval,
2638 					xhci_interval == 1 ? "" : "s");
2639 		urb->interval = xhci_interval;
2640 		/* Convert back to frames for LS/FS devices */
2641 		if (urb->dev->speed == USB_SPEED_LOW ||
2642 				urb->dev->speed == USB_SPEED_FULL)
2643 			urb->interval /= 8;
2644 	}
2645 	return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
2646 }
2647 
2648 /*
2649  * The TD size is the number of bytes remaining in the TD (including this TRB),
2650  * right shifted by 10.
2651  * It must fit in bits 21:17, so it can't be bigger than 31.
2652  */
2653 static u32 xhci_td_remainder(unsigned int remainder)
2654 {
2655 	u32 max = (1 << (21 - 17 + 1)) - 1;
2656 
2657 	if ((remainder >> 10) >= max)
2658 		return max << 17;
2659 	else
2660 		return (remainder >> 10) << 17;
2661 }
2662 
2663 /*
2664  * For xHCI 1.0 host controllers, TD size is the number of packets remaining in
2665  * the TD (*not* including this TRB).
2666  *
2667  * Total TD packet count = total_packet_count =
2668  *     roundup(TD size in bytes / wMaxPacketSize)
2669  *
2670  * Packets transferred up to and including this TRB = packets_transferred =
2671  *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
2672  *
2673  * TD size = total_packet_count - packets_transferred
2674  *
2675  * It must fit in bits 21:17, so it can't be bigger than 31.
2676  */
2677 
2678 static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
2679 		unsigned int total_packet_count, struct urb *urb)
2680 {
2681 	int packets_transferred;
2682 
2683 	/* All the TRB queueing functions don't count the current TRB in
2684 	 * running_total.
2685 	 */
2686 	packets_transferred = (running_total + trb_buff_len) /
2687 		le16_to_cpu(urb->ep->desc.wMaxPacketSize);
2688 
2689 	return xhci_td_remainder(total_packet_count - packets_transferred);
2690 }
2691 
2692 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2693 		struct urb *urb, int slot_id, unsigned int ep_index)
2694 {
2695 	struct xhci_ring *ep_ring;
2696 	unsigned int num_trbs;
2697 	struct urb_priv *urb_priv;
2698 	struct xhci_td *td;
2699 	struct scatterlist *sg;
2700 	int num_sgs;
2701 	int trb_buff_len, this_sg_len, running_total;
2702 	unsigned int total_packet_count;
2703 	bool first_trb;
2704 	u64 addr;
2705 	bool more_trbs_coming;
2706 
2707 	struct xhci_generic_trb *start_trb;
2708 	int start_cycle;
2709 
2710 	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
2711 	if (!ep_ring)
2712 		return -EINVAL;
2713 
2714 	num_trbs = count_sg_trbs_needed(xhci, urb);
2715 	num_sgs = urb->num_sgs;
2716 	total_packet_count = roundup(urb->transfer_buffer_length,
2717 			le16_to_cpu(urb->ep->desc.wMaxPacketSize));
2718 
2719 	trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
2720 			ep_index, urb->stream_id,
2721 			num_trbs, urb, 0, mem_flags);
2722 	if (trb_buff_len < 0)
2723 		return trb_buff_len;
2724 
2725 	urb_priv = urb->hcpriv;
2726 	td = urb_priv->td[0];
2727 
2728 	/*
2729 	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
2730 	 * until we've finished creating all the other TRBs.  The ring's cycle
2731 	 * state may change as we enqueue the other TRBs, so save it too.
2732 	 */
2733 	start_trb = &ep_ring->enqueue->generic;
2734 	start_cycle = ep_ring->cycle_state;
2735 
2736 	running_total = 0;
2737 	/*
2738 	 * How much data is in the first TRB?
2739 	 *
2740 	 * There are three forces at work for TRB buffer pointers and lengths:
2741 	 * 1. We don't want to walk off the end of this sg-list entry buffer.
2742 	 * 2. The transfer length that the driver requested may be smaller than
2743 	 *    the amount of memory allocated for this scatter-gather list.
2744 	 * 3. TRBs buffers can't cross 64KB boundaries.
2745 	 */
2746 	sg = urb->sg;
2747 	addr = (u64) sg_dma_address(sg);
2748 	this_sg_len = sg_dma_len(sg);
2749 	trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
2750 	trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
2751 	if (trb_buff_len > urb->transfer_buffer_length)
2752 		trb_buff_len = urb->transfer_buffer_length;
2753 	xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
2754 			trb_buff_len);
2755 
2756 	first_trb = true;
2757 	/* Queue the first TRB, even if it's zero-length */
2758 	do {
2759 		u32 field = 0;
2760 		u32 length_field = 0;
2761 		u32 remainder = 0;
2762 
2763 		/* Don't change the cycle bit of the first TRB until later */
2764 		if (first_trb) {
2765 			first_trb = false;
2766 			if (start_cycle == 0)
2767 				field |= 0x1;
2768 		} else
2769 			field |= ep_ring->cycle_state;
2770 
2771 		/* Chain all the TRBs together; clear the chain bit in the last
2772 		 * TRB to indicate it's the last TRB in the chain.
2773 		 */
2774 		if (num_trbs > 1) {
2775 			field |= TRB_CHAIN;
2776 		} else {
2777 			/* FIXME - add check for ZERO_PACKET flag before this */
2778 			td->last_trb = ep_ring->enqueue;
2779 			field |= TRB_IOC;
2780 		}
2781 
2782 		/* Only set interrupt on short packet for IN endpoints */
2783 		if (usb_urb_dir_in(urb))
2784 			field |= TRB_ISP;
2785 
2786 		xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
2787 				"64KB boundary at %#x, end dma = %#x\n",
2788 				(unsigned int) addr, trb_buff_len, trb_buff_len,
2789 				(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
2790 				(unsigned int) addr + trb_buff_len);
2791 		if (TRB_MAX_BUFF_SIZE -
2792 				(addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
2793 			xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
2794 			xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
2795 					(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
2796 					(unsigned int) addr + trb_buff_len);
2797 		}
2798 
2799 		/* Set the TRB length, TD size, and interrupter fields. */
2800 		if (xhci->hci_version < 0x100) {
2801 			remainder = xhci_td_remainder(
2802 					urb->transfer_buffer_length -
2803 					running_total);
2804 		} else {
2805 			remainder = xhci_v1_0_td_remainder(running_total,
2806 					trb_buff_len, total_packet_count, urb);
2807 		}
2808 		length_field = TRB_LEN(trb_buff_len) |
2809 			remainder |
2810 			TRB_INTR_TARGET(0);
2811 
2812 		if (num_trbs > 1)
2813 			more_trbs_coming = true;
2814 		else
2815 			more_trbs_coming = false;
2816 		queue_trb(xhci, ep_ring, false, more_trbs_coming,
2817 				lower_32_bits(addr),
2818 				upper_32_bits(addr),
2819 				length_field,
2820 				field | TRB_TYPE(TRB_NORMAL));
2821 		--num_trbs;
2822 		running_total += trb_buff_len;
2823 
2824 		/* Calculate length for next transfer --
2825 		 * Are we done queueing all the TRBs for this sg entry?
2826 		 */
2827 		this_sg_len -= trb_buff_len;
2828 		if (this_sg_len == 0) {
2829 			--num_sgs;
2830 			if (num_sgs == 0)
2831 				break;
2832 			sg = sg_next(sg);
2833 			addr = (u64) sg_dma_address(sg);
2834 			this_sg_len = sg_dma_len(sg);
2835 		} else {
2836 			addr += trb_buff_len;
2837 		}
2838 
2839 		trb_buff_len = TRB_MAX_BUFF_SIZE -
2840 			(addr & (TRB_MAX_BUFF_SIZE - 1));
2841 		trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
2842 		if (running_total + trb_buff_len > urb->transfer_buffer_length)
2843 			trb_buff_len =
2844 				urb->transfer_buffer_length - running_total;
2845 	} while (running_total < urb->transfer_buffer_length);
2846 
2847 	check_trb_math(urb, num_trbs, running_total);
2848 	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2849 			start_cycle, start_trb);
2850 	return 0;
2851 }
2852 
2853 /* This is very similar to what ehci-q.c qtd_fill() does */
2854 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2855 		struct urb *urb, int slot_id, unsigned int ep_index)
2856 {
2857 	struct xhci_ring *ep_ring;
2858 	struct urb_priv *urb_priv;
2859 	struct xhci_td *td;
2860 	int num_trbs;
2861 	struct xhci_generic_trb *start_trb;
2862 	bool first_trb;
2863 	bool more_trbs_coming;
2864 	int start_cycle;
2865 	u32 field, length_field;
2866 
2867 	int running_total, trb_buff_len, ret;
2868 	unsigned int total_packet_count;
2869 	u64 addr;
2870 
2871 	if (urb->num_sgs)
2872 		return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
2873 
2874 	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
2875 	if (!ep_ring)
2876 		return -EINVAL;
2877 
2878 	num_trbs = 0;
2879 	/* How much data is (potentially) left before the 64KB boundary? */
2880 	running_total = TRB_MAX_BUFF_SIZE -
2881 		(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
2882 	running_total &= TRB_MAX_BUFF_SIZE - 1;
2883 
2884 	/* If there's some data on this 64KB chunk, or we have to send a
2885 	 * zero-length transfer, we need at least one TRB
2886 	 */
2887 	if (running_total != 0 || urb->transfer_buffer_length == 0)
2888 		num_trbs++;
2889 	/* How many more 64KB chunks to transfer, how many more TRBs? */
2890 	while (running_total < urb->transfer_buffer_length) {
2891 		num_trbs++;
2892 		running_total += TRB_MAX_BUFF_SIZE;
2893 	}
2894 	/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
2895 
2896 	if (!in_interrupt())
2897 		xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), "
2898 				"addr = %#llx, num_trbs = %d\n",
2899 				urb->ep->desc.bEndpointAddress,
2900 				urb->transfer_buffer_length,
2901 				urb->transfer_buffer_length,
2902 				(unsigned long long)urb->transfer_dma,
2903 				num_trbs);
2904 
2905 	ret = prepare_transfer(xhci, xhci->devs[slot_id],
2906 			ep_index, urb->stream_id,
2907 			num_trbs, urb, 0, mem_flags);
2908 	if (ret < 0)
2909 		return ret;
2910 
2911 	urb_priv = urb->hcpriv;
2912 	td = urb_priv->td[0];
2913 
2914 	/*
2915 	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
2916 	 * until we've finished creating all the other TRBs.  The ring's cycle
2917 	 * state may change as we enqueue the other TRBs, so save it too.
2918 	 */
2919 	start_trb = &ep_ring->enqueue->generic;
2920 	start_cycle = ep_ring->cycle_state;
2921 
2922 	running_total = 0;
2923 	total_packet_count = roundup(urb->transfer_buffer_length,
2924 			le16_to_cpu(urb->ep->desc.wMaxPacketSize));
2925 	/* How much data is in the first TRB? */
2926 	addr = (u64) urb->transfer_dma;
2927 	trb_buff_len = TRB_MAX_BUFF_SIZE -
2928 		(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
2929 	if (trb_buff_len > urb->transfer_buffer_length)
2930 		trb_buff_len = urb->transfer_buffer_length;
2931 
2932 	first_trb = true;
2933 
2934 	/* Queue the first TRB, even if it's zero-length */
2935 	do {
2936 		u32 remainder = 0;
2937 		field = 0;
2938 
2939 		/* Don't change the cycle bit of the first TRB until later */
2940 		if (first_trb) {
2941 			first_trb = false;
2942 			if (start_cycle == 0)
2943 				field |= 0x1;
2944 		} else
2945 			field |= ep_ring->cycle_state;
2946 
2947 		/* Chain all the TRBs together; clear the chain bit in the last
2948 		 * TRB to indicate it's the last TRB in the chain.
2949 		 */
2950 		if (num_trbs > 1) {
2951 			field |= TRB_CHAIN;
2952 		} else {
2953 			/* FIXME - add check for ZERO_PACKET flag before this */
2954 			td->last_trb = ep_ring->enqueue;
2955 			field |= TRB_IOC;
2956 		}
2957 
2958 		/* Only set interrupt on short packet for IN endpoints */
2959 		if (usb_urb_dir_in(urb))
2960 			field |= TRB_ISP;
2961 
2962 		/* Set the TRB length, TD size, and interrupter fields. */
2963 		if (xhci->hci_version < 0x100) {
2964 			remainder = xhci_td_remainder(
2965 					urb->transfer_buffer_length -
2966 					running_total);
2967 		} else {
2968 			remainder = xhci_v1_0_td_remainder(running_total,
2969 					trb_buff_len, total_packet_count, urb);
2970 		}
2971 		length_field = TRB_LEN(trb_buff_len) |
2972 			remainder |
2973 			TRB_INTR_TARGET(0);
2974 
2975 		if (num_trbs > 1)
2976 			more_trbs_coming = true;
2977 		else
2978 			more_trbs_coming = false;
2979 		queue_trb(xhci, ep_ring, false, more_trbs_coming,
2980 				lower_32_bits(addr),
2981 				upper_32_bits(addr),
2982 				length_field,
2983 				field | TRB_TYPE(TRB_NORMAL));
2984 		--num_trbs;
2985 		running_total += trb_buff_len;
2986 
2987 		/* Calculate length for next transfer */
2988 		addr += trb_buff_len;
2989 		trb_buff_len = urb->transfer_buffer_length - running_total;
2990 		if (trb_buff_len > TRB_MAX_BUFF_SIZE)
2991 			trb_buff_len = TRB_MAX_BUFF_SIZE;
2992 	} while (running_total < urb->transfer_buffer_length);
2993 
2994 	check_trb_math(urb, num_trbs, running_total);
2995 	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2996 			start_cycle, start_trb);
2997 	return 0;
2998 }
2999 
3000 /* Caller must have locked xhci->lock */
3001 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3002 		struct urb *urb, int slot_id, unsigned int ep_index)
3003 {
3004 	struct xhci_ring *ep_ring;
3005 	int num_trbs;
3006 	int ret;
3007 	struct usb_ctrlrequest *setup;
3008 	struct xhci_generic_trb *start_trb;
3009 	int start_cycle;
3010 	u32 field, length_field;
3011 	struct urb_priv *urb_priv;
3012 	struct xhci_td *td;
3013 
3014 	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3015 	if (!ep_ring)
3016 		return -EINVAL;
3017 
3018 	/*
3019 	 * Need to copy setup packet into setup TRB, so we can't use the setup
3020 	 * DMA address.
3021 	 */
3022 	if (!urb->setup_packet)
3023 		return -EINVAL;
3024 
3025 	if (!in_interrupt())
3026 		xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
3027 				slot_id, ep_index);
3028 	/* 1 TRB for setup, 1 for status */
3029 	num_trbs = 2;
3030 	/*
3031 	 * Don't need to check if we need additional event data and normal TRBs,
3032 	 * since data in control transfers will never get bigger than 16MB
3033 	 * XXX: can we get a buffer that crosses 64KB boundaries?
3034 	 */
3035 	if (urb->transfer_buffer_length > 0)
3036 		num_trbs++;
3037 	ret = prepare_transfer(xhci, xhci->devs[slot_id],
3038 			ep_index, urb->stream_id,
3039 			num_trbs, urb, 0, mem_flags);
3040 	if (ret < 0)
3041 		return ret;
3042 
3043 	urb_priv = urb->hcpriv;
3044 	td = urb_priv->td[0];
3045 
3046 	/*
3047 	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3048 	 * until we've finished creating all the other TRBs.  The ring's cycle
3049 	 * state may change as we enqueue the other TRBs, so save it too.
3050 	 */
3051 	start_trb = &ep_ring->enqueue->generic;
3052 	start_cycle = ep_ring->cycle_state;
3053 
3054 	/* Queue setup TRB - see section 6.4.1.2.1 */
3055 	/* FIXME better way to translate setup_packet into two u32 fields? */
3056 	setup = (struct usb_ctrlrequest *) urb->setup_packet;
3057 	field = 0;
3058 	field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3059 	if (start_cycle == 0)
3060 		field |= 0x1;
3061 
3062 	/* xHCI 1.0 6.4.1.2.1: Transfer Type field */
3063 	if (xhci->hci_version == 0x100) {
3064 		if (urb->transfer_buffer_length > 0) {
3065 			if (setup->bRequestType & USB_DIR_IN)
3066 				field |= TRB_TX_TYPE(TRB_DATA_IN);
3067 			else
3068 				field |= TRB_TX_TYPE(TRB_DATA_OUT);
3069 		}
3070 	}
3071 
3072 	queue_trb(xhci, ep_ring, false, true,
3073 		  setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3074 		  le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3075 		  TRB_LEN(8) | TRB_INTR_TARGET(0),
3076 		  /* Immediate data in pointer */
3077 		  field);
3078 
3079 	/* If there's data, queue data TRBs */
3080 	/* Only set interrupt on short packet for IN endpoints */
3081 	if (usb_urb_dir_in(urb))
3082 		field = TRB_ISP | TRB_TYPE(TRB_DATA);
3083 	else
3084 		field = TRB_TYPE(TRB_DATA);
3085 
3086 	length_field = TRB_LEN(urb->transfer_buffer_length) |
3087 		xhci_td_remainder(urb->transfer_buffer_length) |
3088 		TRB_INTR_TARGET(0);
3089 	if (urb->transfer_buffer_length > 0) {
3090 		if (setup->bRequestType & USB_DIR_IN)
3091 			field |= TRB_DIR_IN;
3092 		queue_trb(xhci, ep_ring, false, true,
3093 				lower_32_bits(urb->transfer_dma),
3094 				upper_32_bits(urb->transfer_dma),
3095 				length_field,
3096 				field | ep_ring->cycle_state);
3097 	}
3098 
3099 	/* Save the DMA address of the last TRB in the TD */
3100 	td->last_trb = ep_ring->enqueue;
3101 
3102 	/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
3103 	/* If the device sent data, the status stage is an OUT transfer */
3104 	if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3105 		field = 0;
3106 	else
3107 		field = TRB_DIR_IN;
3108 	queue_trb(xhci, ep_ring, false, false,
3109 			0,
3110 			0,
3111 			TRB_INTR_TARGET(0),
3112 			/* Event on completion */
3113 			field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3114 
3115 	giveback_first_trb(xhci, slot_id, ep_index, 0,
3116 			start_cycle, start_trb);
3117 	return 0;
3118 }
3119 
3120 static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
3121 		struct urb *urb, int i)
3122 {
3123 	int num_trbs = 0;
3124 	u64 addr, td_len, running_total;
3125 
3126 	addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3127 	td_len = urb->iso_frame_desc[i].length;
3128 
3129 	running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
3130 	running_total &= TRB_MAX_BUFF_SIZE - 1;
3131 	if (running_total != 0)
3132 		num_trbs++;
3133 
3134 	while (running_total < td_len) {
3135 		num_trbs++;
3136 		running_total += TRB_MAX_BUFF_SIZE;
3137 	}
3138 
3139 	return num_trbs;
3140 }
3141 
3142 /*
3143  * The transfer burst count field of the isochronous TRB defines the number of
3144  * bursts that are required to move all packets in this TD.  Only SuperSpeed
3145  * devices can burst up to bMaxBurst number of packets per service interval.
3146  * This field is zero based, meaning a value of zero in the field means one
3147  * burst.  Basically, for everything but SuperSpeed devices, this field will be
3148  * zero.  Only xHCI 1.0 host controllers support this field.
3149  */
3150 static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
3151 		struct usb_device *udev,
3152 		struct urb *urb, unsigned int total_packet_count)
3153 {
3154 	unsigned int max_burst;
3155 
3156 	if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
3157 		return 0;
3158 
3159 	max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3160 	return roundup(total_packet_count, max_burst + 1) - 1;
3161 }
3162 
3163 /*
3164  * Returns the number of packets in the last "burst" of packets.  This field is
3165  * valid for all speeds of devices.  USB 2.0 devices can only do one "burst", so
3166  * the last burst packet count is equal to the total number of packets in the
3167  * TD.  SuperSpeed endpoints can have up to 3 bursts.  All but the last burst
3168  * must contain (bMaxBurst + 1) number of packets, but the last burst can
3169  * contain 1 to (bMaxBurst + 1) packets.
3170  */
3171 static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
3172 		struct usb_device *udev,
3173 		struct urb *urb, unsigned int total_packet_count)
3174 {
3175 	unsigned int max_burst;
3176 	unsigned int residue;
3177 
3178 	if (xhci->hci_version < 0x100)
3179 		return 0;
3180 
3181 	switch (udev->speed) {
3182 	case USB_SPEED_SUPER:
3183 		/* bMaxBurst is zero based: 0 means 1 packet per burst */
3184 		max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3185 		residue = total_packet_count % (max_burst + 1);
3186 		/* If residue is zero, the last burst contains (max_burst + 1)
3187 		 * number of packets, but the TLBPC field is zero-based.
3188 		 */
3189 		if (residue == 0)
3190 			return max_burst;
3191 		return residue - 1;
3192 	default:
3193 		if (total_packet_count == 0)
3194 			return 0;
3195 		return total_packet_count - 1;
3196 	}
3197 }
3198 
3199 /* This is for isoc transfer */
3200 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3201 		struct urb *urb, int slot_id, unsigned int ep_index)
3202 {
3203 	struct xhci_ring *ep_ring;
3204 	struct urb_priv *urb_priv;
3205 	struct xhci_td *td;
3206 	int num_tds, trbs_per_td;
3207 	struct xhci_generic_trb *start_trb;
3208 	bool first_trb;
3209 	int start_cycle;
3210 	u32 field, length_field;
3211 	int running_total, trb_buff_len, td_len, td_remain_len, ret;
3212 	u64 start_addr, addr;
3213 	int i, j;
3214 	bool more_trbs_coming;
3215 
3216 	ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
3217 
3218 	num_tds = urb->number_of_packets;
3219 	if (num_tds < 1) {
3220 		xhci_dbg(xhci, "Isoc URB with zero packets?\n");
3221 		return -EINVAL;
3222 	}
3223 
3224 	if (!in_interrupt())
3225 		xhci_dbg(xhci, "ep %#x - urb len = %#x (%d),"
3226 				" addr = %#llx, num_tds = %d\n",
3227 				urb->ep->desc.bEndpointAddress,
3228 				urb->transfer_buffer_length,
3229 				urb->transfer_buffer_length,
3230 				(unsigned long long)urb->transfer_dma,
3231 				num_tds);
3232 
3233 	start_addr = (u64) urb->transfer_dma;
3234 	start_trb = &ep_ring->enqueue->generic;
3235 	start_cycle = ep_ring->cycle_state;
3236 
3237 	/* Queue the first TRB, even if it's zero-length */
3238 	for (i = 0; i < num_tds; i++) {
3239 		unsigned int total_packet_count;
3240 		unsigned int burst_count;
3241 		unsigned int residue;
3242 
3243 		first_trb = true;
3244 		running_total = 0;
3245 		addr = start_addr + urb->iso_frame_desc[i].offset;
3246 		td_len = urb->iso_frame_desc[i].length;
3247 		td_remain_len = td_len;
3248 		/* FIXME: Ignoring zero-length packets, can those happen? */
3249 		total_packet_count = roundup(td_len,
3250 				le16_to_cpu(urb->ep->desc.wMaxPacketSize));
3251 		burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
3252 				total_packet_count);
3253 		residue = xhci_get_last_burst_packet_count(xhci,
3254 				urb->dev, urb, total_packet_count);
3255 
3256 		trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
3257 
3258 		ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3259 				urb->stream_id, trbs_per_td, urb, i, mem_flags);
3260 		if (ret < 0)
3261 			return ret;
3262 
3263 		urb_priv = urb->hcpriv;
3264 		td = urb_priv->td[i];
3265 
3266 		for (j = 0; j < trbs_per_td; j++) {
3267 			u32 remainder = 0;
3268 			field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
3269 
3270 			if (first_trb) {
3271 				/* Queue the isoc TRB */
3272 				field |= TRB_TYPE(TRB_ISOC);
3273 				/* Assume URB_ISO_ASAP is set */
3274 				field |= TRB_SIA;
3275 				if (i == 0) {
3276 					if (start_cycle == 0)
3277 						field |= 0x1;
3278 				} else
3279 					field |= ep_ring->cycle_state;
3280 				first_trb = false;
3281 			} else {
3282 				/* Queue other normal TRBs */
3283 				field |= TRB_TYPE(TRB_NORMAL);
3284 				field |= ep_ring->cycle_state;
3285 			}
3286 
3287 			/* Only set interrupt on short packet for IN EPs */
3288 			if (usb_urb_dir_in(urb))
3289 				field |= TRB_ISP;
3290 
3291 			/* Chain all the TRBs together; clear the chain bit in
3292 			 * the last TRB to indicate it's the last TRB in the
3293 			 * chain.
3294 			 */
3295 			if (j < trbs_per_td - 1) {
3296 				field |= TRB_CHAIN;
3297 				more_trbs_coming = true;
3298 			} else {
3299 				td->last_trb = ep_ring->enqueue;
3300 				field |= TRB_IOC;
3301 				if (xhci->hci_version == 0x100) {
3302 					/* Set BEI bit except for the last td */
3303 					if (i < num_tds - 1)
3304 						field |= TRB_BEI;
3305 				}
3306 				more_trbs_coming = false;
3307 			}
3308 
3309 			/* Calculate TRB length */
3310 			trb_buff_len = TRB_MAX_BUFF_SIZE -
3311 				(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
3312 			if (trb_buff_len > td_remain_len)
3313 				trb_buff_len = td_remain_len;
3314 
3315 			/* Set the TRB length, TD size, & interrupter fields. */
3316 			if (xhci->hci_version < 0x100) {
3317 				remainder = xhci_td_remainder(
3318 						td_len - running_total);
3319 			} else {
3320 				remainder = xhci_v1_0_td_remainder(
3321 						running_total, trb_buff_len,
3322 						total_packet_count, urb);
3323 			}
3324 			length_field = TRB_LEN(trb_buff_len) |
3325 				remainder |
3326 				TRB_INTR_TARGET(0);
3327 
3328 			queue_trb(xhci, ep_ring, false, more_trbs_coming,
3329 				lower_32_bits(addr),
3330 				upper_32_bits(addr),
3331 				length_field,
3332 				field);
3333 			running_total += trb_buff_len;
3334 
3335 			addr += trb_buff_len;
3336 			td_remain_len -= trb_buff_len;
3337 		}
3338 
3339 		/* Check TD length */
3340 		if (running_total != td_len) {
3341 			xhci_err(xhci, "ISOC TD length unmatch\n");
3342 			return -EINVAL;
3343 		}
3344 	}
3345 
3346 	if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
3347 		if (xhci->quirks & XHCI_AMD_PLL_FIX)
3348 			usb_amd_quirk_pll_disable();
3349 	}
3350 	xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
3351 
3352 	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3353 			start_cycle, start_trb);
3354 	return 0;
3355 }
3356 
3357 /*
3358  * Check transfer ring to guarantee there is enough room for the urb.
3359  * Update ISO URB start_frame and interval.
3360  * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
3361  * update the urb->start_frame by now.
3362  * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
3363  */
3364 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3365 		struct urb *urb, int slot_id, unsigned int ep_index)
3366 {
3367 	struct xhci_virt_device *xdev;
3368 	struct xhci_ring *ep_ring;
3369 	struct xhci_ep_ctx *ep_ctx;
3370 	int start_frame;
3371 	int xhci_interval;
3372 	int ep_interval;
3373 	int num_tds, num_trbs, i;
3374 	int ret;
3375 
3376 	xdev = xhci->devs[slot_id];
3377 	ep_ring = xdev->eps[ep_index].ring;
3378 	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3379 
3380 	num_trbs = 0;
3381 	num_tds = urb->number_of_packets;
3382 	for (i = 0; i < num_tds; i++)
3383 		num_trbs += count_isoc_trbs_needed(xhci, urb, i);
3384 
3385 	/* Check the ring to guarantee there is enough room for the whole urb.
3386 	 * Do not insert any td of the urb to the ring if the check failed.
3387 	 */
3388 	ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3389 			   num_trbs, mem_flags);
3390 	if (ret)
3391 		return ret;
3392 
3393 	start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
3394 	start_frame &= 0x3fff;
3395 
3396 	urb->start_frame = start_frame;
3397 	if (urb->dev->speed == USB_SPEED_LOW ||
3398 			urb->dev->speed == USB_SPEED_FULL)
3399 		urb->start_frame >>= 3;
3400 
3401 	xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3402 	ep_interval = urb->interval;
3403 	/* Convert to microframes */
3404 	if (urb->dev->speed == USB_SPEED_LOW ||
3405 			urb->dev->speed == USB_SPEED_FULL)
3406 		ep_interval *= 8;
3407 	/* FIXME change this to a warning and a suggestion to use the new API
3408 	 * to set the polling interval (once the API is added).
3409 	 */
3410 	if (xhci_interval != ep_interval) {
3411 		if (printk_ratelimit())
3412 			dev_dbg(&urb->dev->dev, "Driver uses different interval"
3413 					" (%d microframe%s) than xHCI "
3414 					"(%d microframe%s)\n",
3415 					ep_interval,
3416 					ep_interval == 1 ? "" : "s",
3417 					xhci_interval,
3418 					xhci_interval == 1 ? "" : "s");
3419 		urb->interval = xhci_interval;
3420 		/* Convert back to frames for LS/FS devices */
3421 		if (urb->dev->speed == USB_SPEED_LOW ||
3422 				urb->dev->speed == USB_SPEED_FULL)
3423 			urb->interval /= 8;
3424 	}
3425 	return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
3426 }
3427 
3428 /****		Command Ring Operations		****/
3429 
3430 /* Generic function for queueing a command TRB on the command ring.
3431  * Check to make sure there's room on the command ring for one command TRB.
3432  * Also check that there's room reserved for commands that must not fail.
3433  * If this is a command that must not fail, meaning command_must_succeed = TRUE,
3434  * then only check for the number of reserved spots.
3435  * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
3436  * because the command event handler may want to resubmit a failed command.
3437  */
3438 static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
3439 		u32 field3, u32 field4, bool command_must_succeed)
3440 {
3441 	int reserved_trbs = xhci->cmd_ring_reserved_trbs;
3442 	int ret;
3443 
3444 	if (!command_must_succeed)
3445 		reserved_trbs++;
3446 
3447 	ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
3448 			reserved_trbs, GFP_ATOMIC);
3449 	if (ret < 0) {
3450 		xhci_err(xhci, "ERR: No room for command on command ring\n");
3451 		if (command_must_succeed)
3452 			xhci_err(xhci, "ERR: Reserved TRB counting for "
3453 					"unfailable commands failed.\n");
3454 		return ret;
3455 	}
3456 	queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3,
3457 			field4 | xhci->cmd_ring->cycle_state);
3458 	return 0;
3459 }
3460 
3461 /* Queue a slot enable or disable request on the command ring */
3462 int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
3463 {
3464 	return queue_command(xhci, 0, 0, 0,
3465 			TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
3466 }
3467 
3468 /* Queue an address device command TRB */
3469 int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3470 		u32 slot_id)
3471 {
3472 	return queue_command(xhci, lower_32_bits(in_ctx_ptr),
3473 			upper_32_bits(in_ctx_ptr), 0,
3474 			TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
3475 			false);
3476 }
3477 
3478 int xhci_queue_vendor_command(struct xhci_hcd *xhci,
3479 		u32 field1, u32 field2, u32 field3, u32 field4)
3480 {
3481 	return queue_command(xhci, field1, field2, field3, field4, false);
3482 }
3483 
3484 /* Queue a reset device command TRB */
3485 int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
3486 {
3487 	return queue_command(xhci, 0, 0, 0,
3488 			TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
3489 			false);
3490 }
3491 
3492 /* Queue a configure endpoint command TRB */
3493 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3494 		u32 slot_id, bool command_must_succeed)
3495 {
3496 	return queue_command(xhci, lower_32_bits(in_ctx_ptr),
3497 			upper_32_bits(in_ctx_ptr), 0,
3498 			TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
3499 			command_must_succeed);
3500 }
3501 
3502 /* Queue an evaluate context command TRB */
3503 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3504 		u32 slot_id)
3505 {
3506 	return queue_command(xhci, lower_32_bits(in_ctx_ptr),
3507 			upper_32_bits(in_ctx_ptr), 0,
3508 			TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
3509 			false);
3510 }
3511 
3512 /*
3513  * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
3514  * activity on an endpoint that is about to be suspended.
3515  */
3516 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
3517 		unsigned int ep_index, int suspend)
3518 {
3519 	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3520 	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3521 	u32 type = TRB_TYPE(TRB_STOP_RING);
3522 	u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
3523 
3524 	return queue_command(xhci, 0, 0, 0,
3525 			trb_slot_id | trb_ep_index | type | trb_suspend, false);
3526 }
3527 
3528 /* Set Transfer Ring Dequeue Pointer command.
3529  * This should not be used for endpoints that have streams enabled.
3530  */
3531 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
3532 		unsigned int ep_index, unsigned int stream_id,
3533 		struct xhci_segment *deq_seg,
3534 		union xhci_trb *deq_ptr, u32 cycle_state)
3535 {
3536 	dma_addr_t addr;
3537 	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3538 	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3539 	u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
3540 	u32 type = TRB_TYPE(TRB_SET_DEQ);
3541 	struct xhci_virt_ep *ep;
3542 
3543 	addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
3544 	if (addr == 0) {
3545 		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
3546 		xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
3547 				deq_seg, deq_ptr);
3548 		return 0;
3549 	}
3550 	ep = &xhci->devs[slot_id]->eps[ep_index];
3551 	if ((ep->ep_state & SET_DEQ_PENDING)) {
3552 		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
3553 		xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
3554 		return 0;
3555 	}
3556 	ep->queued_deq_seg = deq_seg;
3557 	ep->queued_deq_ptr = deq_ptr;
3558 	return queue_command(xhci, lower_32_bits(addr) | cycle_state,
3559 			upper_32_bits(addr), trb_stream_id,
3560 			trb_slot_id | trb_ep_index | type, false);
3561 }
3562 
3563 int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
3564 		unsigned int ep_index)
3565 {
3566 	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3567 	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3568 	u32 type = TRB_TYPE(TRB_RESET_EP);
3569 
3570 	return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
3571 			false);
3572 }
3573