xref: /linux/drivers/usb/host/xhci-ring.c (revision a1087ef6abedf0bfd60e5e3fddf33192cb2c1325)
1 /*
2  * xHCI host controller driver
3  *
4  * Copyright (C) 2008 Intel Corp.
5  *
6  * Author: Sarah Sharp
7  * Some code borrowed from the Linux EHCI driver.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16  * for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software Foundation,
20  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22 
23 /*
24  * Ring initialization rules:
25  * 1. Each segment is initialized to zero, except for link TRBs.
26  * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
27  *    Consumer Cycle State (CCS), depending on ring function.
28  * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
29  *
30  * Ring behavior rules:
31  * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
32  *    least one free TRB in the ring.  This is useful if you want to turn that
33  *    into a link TRB and expand the ring.
34  * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
35  *    link TRB, then load the pointer with the address in the link TRB.  If the
36  *    link TRB had its toggle bit set, you may need to update the ring cycle
37  *    state (see cycle bit rules).  You may have to do this multiple times
38  *    until you reach a non-link TRB.
39  * 3. A ring is full if enqueue++ (for the definition of increment above)
40  *    equals the dequeue pointer.
41  *
42  * Cycle bit rules:
43  * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
44  *    in a link TRB, it must toggle the ring cycle state.
45  * 2. When a producer increments an enqueue pointer and encounters a toggle bit
46  *    in a link TRB, it must toggle the ring cycle state.
47  *
48  * Producer rules:
49  * 1. Check if ring is full before you enqueue.
50  * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
51  *    Update enqueue pointer between each write (which may update the ring
52  *    cycle state).
53  * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
54  *    and endpoint rings.  If HC is the producer for the event ring,
55  *    and it generates an interrupt according to interrupt modulation rules.
56  *
57  * Consumer rules:
58  * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
59  *    the TRB is owned by the consumer.
60  * 2. Update dequeue pointer (which may update the ring cycle state) and
61  *    continue processing TRBs until you reach a TRB which is not owned by you.
62  * 3. Notify the producer.  SW is the consumer for the event ring, and it
63  *   updates event ring dequeue pointer.  HC is the consumer for the command and
64  *   endpoint rings; it generates events on the event ring for these.
65  */
66 
67 #include <linux/scatterlist.h>
68 #include <linux/slab.h>
69 #include "xhci.h"
70 
71 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
72 		struct xhci_virt_device *virt_dev,
73 		struct xhci_event_cmd *event);
74 
75 /*
76  * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
77  * address of the TRB.
78  */
79 dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
80 		union xhci_trb *trb)
81 {
82 	unsigned long segment_offset;
83 
84 	if (!seg || !trb || trb < seg->trbs)
85 		return 0;
86 	/* offset in TRBs */
87 	segment_offset = trb - seg->trbs;
88 	if (segment_offset > TRBS_PER_SEGMENT)
89 		return 0;
90 	return seg->dma + (segment_offset * sizeof(*trb));
91 }
92 
93 /* Does this link TRB point to the first segment in a ring,
94  * or was the previous TRB the last TRB on the last segment in the ERST?
95  */
96 static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
97 		struct xhci_segment *seg, union xhci_trb *trb)
98 {
99 	if (ring == xhci->event_ring)
100 		return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
101 			(seg->next == xhci->event_ring->first_seg);
102 	else
103 		return trb->link.control & LINK_TOGGLE;
104 }
105 
106 /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
107  * segment?  I.e. would the updated event TRB pointer step off the end of the
108  * event seg?
109  */
110 static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
111 		struct xhci_segment *seg, union xhci_trb *trb)
112 {
113 	if (ring == xhci->event_ring)
114 		return trb == &seg->trbs[TRBS_PER_SEGMENT];
115 	else
116 		return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
117 }
118 
119 static inline int enqueue_is_link_trb(struct xhci_ring *ring)
120 {
121 	struct xhci_link_trb *link = &ring->enqueue->link;
122 	return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK));
123 }
124 
125 /* Updates trb to point to the next TRB in the ring, and updates seg if the next
126  * TRB is in a new segment.  This does not skip over link TRBs, and it does not
127  * effect the ring dequeue or enqueue pointers.
128  */
129 static void next_trb(struct xhci_hcd *xhci,
130 		struct xhci_ring *ring,
131 		struct xhci_segment **seg,
132 		union xhci_trb **trb)
133 {
134 	if (last_trb(xhci, ring, *seg, *trb)) {
135 		*seg = (*seg)->next;
136 		*trb = ((*seg)->trbs);
137 	} else {
138 		(*trb)++;
139 	}
140 }
141 
142 /*
143  * See Cycle bit rules. SW is the consumer for the event ring only.
144  * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
145  */
146 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
147 {
148 	union xhci_trb *next = ++(ring->dequeue);
149 	unsigned long long addr;
150 
151 	ring->deq_updates++;
152 	/* Update the dequeue pointer further if that was a link TRB or we're at
153 	 * the end of an event ring segment (which doesn't have link TRBS)
154 	 */
155 	while (last_trb(xhci, ring, ring->deq_seg, next)) {
156 		if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
157 			ring->cycle_state = (ring->cycle_state ? 0 : 1);
158 			if (!in_interrupt())
159 				xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
160 						ring,
161 						(unsigned int) ring->cycle_state);
162 		}
163 		ring->deq_seg = ring->deq_seg->next;
164 		ring->dequeue = ring->deq_seg->trbs;
165 		next = ring->dequeue;
166 	}
167 	addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
168 	if (ring == xhci->event_ring)
169 		xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr);
170 	else if (ring == xhci->cmd_ring)
171 		xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr);
172 	else
173 		xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr);
174 }
175 
176 /*
177  * See Cycle bit rules. SW is the consumer for the event ring only.
178  * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
179  *
180  * If we've just enqueued a TRB that is in the middle of a TD (meaning the
181  * chain bit is set), then set the chain bit in all the following link TRBs.
182  * If we've enqueued the last TRB in a TD, make sure the following link TRBs
183  * have their chain bit cleared (so that each Link TRB is a separate TD).
184  *
185  * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
186  * set, but other sections talk about dealing with the chain bit set.  This was
187  * fixed in the 0.96 specification errata, but we have to assume that all 0.95
188  * xHCI hardware can't handle the chain bit being cleared on a link TRB.
189  *
190  * @more_trbs_coming:	Will you enqueue more TRBs before calling
191  *			prepare_transfer()?
192  */
193 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
194 		bool consumer, bool more_trbs_coming)
195 {
196 	u32 chain;
197 	union xhci_trb *next;
198 	unsigned long long addr;
199 
200 	chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
201 	next = ++(ring->enqueue);
202 
203 	ring->enq_updates++;
204 	/* Update the dequeue pointer further if that was a link TRB or we're at
205 	 * the end of an event ring segment (which doesn't have link TRBS)
206 	 */
207 	while (last_trb(xhci, ring, ring->enq_seg, next)) {
208 		if (!consumer) {
209 			if (ring != xhci->event_ring) {
210 				/*
211 				 * If the caller doesn't plan on enqueueing more
212 				 * TDs before ringing the doorbell, then we
213 				 * don't want to give the link TRB to the
214 				 * hardware just yet.  We'll give the link TRB
215 				 * back in prepare_ring() just before we enqueue
216 				 * the TD at the top of the ring.
217 				 */
218 				if (!chain && !more_trbs_coming)
219 					break;
220 
221 				/* If we're not dealing with 0.95 hardware,
222 				 * carry over the chain bit of the previous TRB
223 				 * (which may mean the chain bit is cleared).
224 				 */
225 				if (!xhci_link_trb_quirk(xhci)) {
226 					next->link.control &= ~TRB_CHAIN;
227 					next->link.control |= chain;
228 				}
229 				/* Give this link TRB to the hardware */
230 				wmb();
231 				next->link.control ^= TRB_CYCLE;
232 			}
233 			/* Toggle the cycle bit after the last ring segment. */
234 			if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
235 				ring->cycle_state = (ring->cycle_state ? 0 : 1);
236 				if (!in_interrupt())
237 					xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
238 							ring,
239 							(unsigned int) ring->cycle_state);
240 			}
241 		}
242 		ring->enq_seg = ring->enq_seg->next;
243 		ring->enqueue = ring->enq_seg->trbs;
244 		next = ring->enqueue;
245 	}
246 	addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
247 	if (ring == xhci->event_ring)
248 		xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr);
249 	else if (ring == xhci->cmd_ring)
250 		xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr);
251 	else
252 		xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr);
253 }
254 
255 /*
256  * Check to see if there's room to enqueue num_trbs on the ring.  See rules
257  * above.
258  * FIXME: this would be simpler and faster if we just kept track of the number
259  * of free TRBs in a ring.
260  */
261 static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
262 		unsigned int num_trbs)
263 {
264 	int i;
265 	union xhci_trb *enq = ring->enqueue;
266 	struct xhci_segment *enq_seg = ring->enq_seg;
267 	struct xhci_segment *cur_seg;
268 	unsigned int left_on_ring;
269 
270 	/* If we are currently pointing to a link TRB, advance the
271 	 * enqueue pointer before checking for space */
272 	while (last_trb(xhci, ring, enq_seg, enq)) {
273 		enq_seg = enq_seg->next;
274 		enq = enq_seg->trbs;
275 	}
276 
277 	/* Check if ring is empty */
278 	if (enq == ring->dequeue) {
279 		/* Can't use link trbs */
280 		left_on_ring = TRBS_PER_SEGMENT - 1;
281 		for (cur_seg = enq_seg->next; cur_seg != enq_seg;
282 				cur_seg = cur_seg->next)
283 			left_on_ring += TRBS_PER_SEGMENT - 1;
284 
285 		/* Always need one TRB free in the ring. */
286 		left_on_ring -= 1;
287 		if (num_trbs > left_on_ring) {
288 			xhci_warn(xhci, "Not enough room on ring; "
289 					"need %u TRBs, %u TRBs left\n",
290 					num_trbs, left_on_ring);
291 			return 0;
292 		}
293 		return 1;
294 	}
295 	/* Make sure there's an extra empty TRB available */
296 	for (i = 0; i <= num_trbs; ++i) {
297 		if (enq == ring->dequeue)
298 			return 0;
299 		enq++;
300 		while (last_trb(xhci, ring, enq_seg, enq)) {
301 			enq_seg = enq_seg->next;
302 			enq = enq_seg->trbs;
303 		}
304 	}
305 	return 1;
306 }
307 
308 /* Ring the host controller doorbell after placing a command on the ring */
309 void xhci_ring_cmd_db(struct xhci_hcd *xhci)
310 {
311 	u32 temp;
312 
313 	xhci_dbg(xhci, "// Ding dong!\n");
314 	temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
315 	xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
316 	/* Flush PCI posted writes */
317 	xhci_readl(xhci, &xhci->dba->doorbell[0]);
318 }
319 
320 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
321 		unsigned int slot_id,
322 		unsigned int ep_index,
323 		unsigned int stream_id)
324 {
325 	struct xhci_virt_ep *ep;
326 	unsigned int ep_state;
327 	u32 field;
328 	__u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
329 
330 	ep = &xhci->devs[slot_id]->eps[ep_index];
331 	ep_state = ep->ep_state;
332 	/* Don't ring the doorbell for this endpoint if there are pending
333 	 * cancellations because the we don't want to interrupt processing.
334 	 * We don't want to restart any stream rings if there's a set dequeue
335 	 * pointer command pending because the device can choose to start any
336 	 * stream once the endpoint is on the HW schedule.
337 	 * FIXME - check all the stream rings for pending cancellations.
338 	 */
339 	if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
340 			&& !(ep_state & EP_HALTED)) {
341 		field = xhci_readl(xhci, db_addr) & DB_MASK;
342 		field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
343 		xhci_writel(xhci, field, db_addr);
344 	}
345 }
346 
347 /* Ring the doorbell for any rings with pending URBs */
348 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
349 		unsigned int slot_id,
350 		unsigned int ep_index)
351 {
352 	unsigned int stream_id;
353 	struct xhci_virt_ep *ep;
354 
355 	ep = &xhci->devs[slot_id]->eps[ep_index];
356 
357 	/* A ring has pending URBs if its TD list is not empty */
358 	if (!(ep->ep_state & EP_HAS_STREAMS)) {
359 		if (!(list_empty(&ep->ring->td_list)))
360 			xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
361 		return;
362 	}
363 
364 	for (stream_id = 1; stream_id < ep->stream_info->num_streams;
365 			stream_id++) {
366 		struct xhci_stream_info *stream_info = ep->stream_info;
367 		if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
368 			xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
369 						stream_id);
370 	}
371 }
372 
373 /*
374  * Find the segment that trb is in.  Start searching in start_seg.
375  * If we must move past a segment that has a link TRB with a toggle cycle state
376  * bit set, then we will toggle the value pointed at by cycle_state.
377  */
378 static struct xhci_segment *find_trb_seg(
379 		struct xhci_segment *start_seg,
380 		union xhci_trb	*trb, int *cycle_state)
381 {
382 	struct xhci_segment *cur_seg = start_seg;
383 	struct xhci_generic_trb *generic_trb;
384 
385 	while (cur_seg->trbs > trb ||
386 			&cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
387 		generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
388 		if ((generic_trb->field[3] & TRB_TYPE_BITMASK) ==
389 				TRB_TYPE(TRB_LINK) &&
390 				(generic_trb->field[3] & LINK_TOGGLE))
391 			*cycle_state = ~(*cycle_state) & 0x1;
392 		cur_seg = cur_seg->next;
393 		if (cur_seg == start_seg)
394 			/* Looped over the entire list.  Oops! */
395 			return NULL;
396 	}
397 	return cur_seg;
398 }
399 
400 
401 static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
402 		unsigned int slot_id, unsigned int ep_index,
403 		unsigned int stream_id)
404 {
405 	struct xhci_virt_ep *ep;
406 
407 	ep = &xhci->devs[slot_id]->eps[ep_index];
408 	/* Common case: no streams */
409 	if (!(ep->ep_state & EP_HAS_STREAMS))
410 		return ep->ring;
411 
412 	if (stream_id == 0) {
413 		xhci_warn(xhci,
414 				"WARN: Slot ID %u, ep index %u has streams, "
415 				"but URB has no stream ID.\n",
416 				slot_id, ep_index);
417 		return NULL;
418 	}
419 
420 	if (stream_id < ep->stream_info->num_streams)
421 		return ep->stream_info->stream_rings[stream_id];
422 
423 	xhci_warn(xhci,
424 			"WARN: Slot ID %u, ep index %u has "
425 			"stream IDs 1 to %u allocated, "
426 			"but stream ID %u is requested.\n",
427 			slot_id, ep_index,
428 			ep->stream_info->num_streams - 1,
429 			stream_id);
430 	return NULL;
431 }
432 
433 /* Get the right ring for the given URB.
434  * If the endpoint supports streams, boundary check the URB's stream ID.
435  * If the endpoint doesn't support streams, return the singular endpoint ring.
436  */
437 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
438 		struct urb *urb)
439 {
440 	return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
441 		xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
442 }
443 
444 /*
445  * Move the xHC's endpoint ring dequeue pointer past cur_td.
446  * Record the new state of the xHC's endpoint ring dequeue segment,
447  * dequeue pointer, and new consumer cycle state in state.
448  * Update our internal representation of the ring's dequeue pointer.
449  *
450  * We do this in three jumps:
451  *  - First we update our new ring state to be the same as when the xHC stopped.
452  *  - Then we traverse the ring to find the segment that contains
453  *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
454  *    any link TRBs with the toggle cycle bit set.
455  *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
456  *    if we've moved it past a link TRB with the toggle cycle bit set.
457  */
458 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
459 		unsigned int slot_id, unsigned int ep_index,
460 		unsigned int stream_id, struct xhci_td *cur_td,
461 		struct xhci_dequeue_state *state)
462 {
463 	struct xhci_virt_device *dev = xhci->devs[slot_id];
464 	struct xhci_ring *ep_ring;
465 	struct xhci_generic_trb *trb;
466 	struct xhci_ep_ctx *ep_ctx;
467 	dma_addr_t addr;
468 
469 	ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
470 			ep_index, stream_id);
471 	if (!ep_ring) {
472 		xhci_warn(xhci, "WARN can't find new dequeue state "
473 				"for invalid stream ID %u.\n",
474 				stream_id);
475 		return;
476 	}
477 	state->new_cycle_state = 0;
478 	xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
479 	state->new_deq_seg = find_trb_seg(cur_td->start_seg,
480 			dev->eps[ep_index].stopped_trb,
481 			&state->new_cycle_state);
482 	if (!state->new_deq_seg)
483 		BUG();
484 	/* Dig out the cycle state saved by the xHC during the stop ep cmd */
485 	xhci_dbg(xhci, "Finding endpoint context\n");
486 	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
487 	state->new_cycle_state = 0x1 & ep_ctx->deq;
488 
489 	state->new_deq_ptr = cur_td->last_trb;
490 	xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
491 	state->new_deq_seg = find_trb_seg(state->new_deq_seg,
492 			state->new_deq_ptr,
493 			&state->new_cycle_state);
494 	if (!state->new_deq_seg)
495 		BUG();
496 
497 	trb = &state->new_deq_ptr->generic;
498 	if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
499 				(trb->field[3] & LINK_TOGGLE))
500 		state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
501 	next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
502 
503 	/* Don't update the ring cycle state for the producer (us). */
504 	xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
505 			state->new_deq_seg);
506 	addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
507 	xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
508 			(unsigned long long) addr);
509 	xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n");
510 	ep_ring->dequeue = state->new_deq_ptr;
511 	ep_ring->deq_seg = state->new_deq_seg;
512 }
513 
514 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
515 		struct xhci_td *cur_td)
516 {
517 	struct xhci_segment *cur_seg;
518 	union xhci_trb *cur_trb;
519 
520 	for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
521 			true;
522 			next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
523 		if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) ==
524 				TRB_TYPE(TRB_LINK)) {
525 			/* Unchain any chained Link TRBs, but
526 			 * leave the pointers intact.
527 			 */
528 			cur_trb->generic.field[3] &= ~TRB_CHAIN;
529 			xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
530 			xhci_dbg(xhci, "Address = %p (0x%llx dma); "
531 					"in seg %p (0x%llx dma)\n",
532 					cur_trb,
533 					(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
534 					cur_seg,
535 					(unsigned long long)cur_seg->dma);
536 		} else {
537 			cur_trb->generic.field[0] = 0;
538 			cur_trb->generic.field[1] = 0;
539 			cur_trb->generic.field[2] = 0;
540 			/* Preserve only the cycle bit of this TRB */
541 			cur_trb->generic.field[3] &= TRB_CYCLE;
542 			cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP);
543 			xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
544 					"in seg %p (0x%llx dma)\n",
545 					cur_trb,
546 					(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
547 					cur_seg,
548 					(unsigned long long)cur_seg->dma);
549 		}
550 		if (cur_trb == cur_td->last_trb)
551 			break;
552 	}
553 }
554 
555 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
556 		unsigned int ep_index, unsigned int stream_id,
557 		struct xhci_segment *deq_seg,
558 		union xhci_trb *deq_ptr, u32 cycle_state);
559 
560 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
561 		unsigned int slot_id, unsigned int ep_index,
562 		unsigned int stream_id,
563 		struct xhci_dequeue_state *deq_state)
564 {
565 	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
566 
567 	xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
568 			"new deq ptr = %p (0x%llx dma), new cycle = %u\n",
569 			deq_state->new_deq_seg,
570 			(unsigned long long)deq_state->new_deq_seg->dma,
571 			deq_state->new_deq_ptr,
572 			(unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
573 			deq_state->new_cycle_state);
574 	queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
575 			deq_state->new_deq_seg,
576 			deq_state->new_deq_ptr,
577 			(u32) deq_state->new_cycle_state);
578 	/* Stop the TD queueing code from ringing the doorbell until
579 	 * this command completes.  The HC won't set the dequeue pointer
580 	 * if the ring is running, and ringing the doorbell starts the
581 	 * ring running.
582 	 */
583 	ep->ep_state |= SET_DEQ_PENDING;
584 }
585 
586 static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
587 		struct xhci_virt_ep *ep)
588 {
589 	ep->ep_state &= ~EP_HALT_PENDING;
590 	/* Can't del_timer_sync in interrupt, so we attempt to cancel.  If the
591 	 * timer is running on another CPU, we don't decrement stop_cmds_pending
592 	 * (since we didn't successfully stop the watchdog timer).
593 	 */
594 	if (del_timer(&ep->stop_cmd_timer))
595 		ep->stop_cmds_pending--;
596 }
597 
598 /* Must be called with xhci->lock held in interrupt context */
599 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
600 		struct xhci_td *cur_td, int status, char *adjective)
601 {
602 	struct usb_hcd *hcd = xhci_to_hcd(xhci);
603 	struct urb	*urb;
604 	struct urb_priv	*urb_priv;
605 
606 	urb = cur_td->urb;
607 	urb_priv = urb->hcpriv;
608 	urb_priv->td_cnt++;
609 
610 	/* Only giveback urb when this is the last td in urb */
611 	if (urb_priv->td_cnt == urb_priv->length) {
612 		usb_hcd_unlink_urb_from_ep(hcd, urb);
613 		xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
614 
615 		spin_unlock(&xhci->lock);
616 		usb_hcd_giveback_urb(hcd, urb, status);
617 		xhci_urb_free_priv(xhci, urb_priv);
618 		spin_lock(&xhci->lock);
619 		xhci_dbg(xhci, "%s URB given back\n", adjective);
620 	}
621 }
622 
623 /*
624  * When we get a command completion for a Stop Endpoint Command, we need to
625  * unlink any cancelled TDs from the ring.  There are two ways to do that:
626  *
627  *  1. If the HW was in the middle of processing the TD that needs to be
628  *     cancelled, then we must move the ring's dequeue pointer past the last TRB
629  *     in the TD with a Set Dequeue Pointer Command.
630  *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
631  *     bit cleared) so that the HW will skip over them.
632  */
633 static void handle_stopped_endpoint(struct xhci_hcd *xhci,
634 		union xhci_trb *trb, struct xhci_event_cmd *event)
635 {
636 	unsigned int slot_id;
637 	unsigned int ep_index;
638 	struct xhci_virt_device *virt_dev;
639 	struct xhci_ring *ep_ring;
640 	struct xhci_virt_ep *ep;
641 	struct list_head *entry;
642 	struct xhci_td *cur_td = NULL;
643 	struct xhci_td *last_unlinked_td;
644 
645 	struct xhci_dequeue_state deq_state;
646 
647 	if (unlikely(TRB_TO_SUSPEND_PORT(
648 			xhci->cmd_ring->dequeue->generic.field[3]))) {
649 		slot_id = TRB_TO_SLOT_ID(
650 			xhci->cmd_ring->dequeue->generic.field[3]);
651 		virt_dev = xhci->devs[slot_id];
652 		if (virt_dev)
653 			handle_cmd_in_cmd_wait_list(xhci, virt_dev,
654 				event);
655 		else
656 			xhci_warn(xhci, "Stop endpoint command "
657 				"completion for disabled slot %u\n",
658 				slot_id);
659 		return;
660 	}
661 
662 	memset(&deq_state, 0, sizeof(deq_state));
663 	slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
664 	ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
665 	ep = &xhci->devs[slot_id]->eps[ep_index];
666 
667 	if (list_empty(&ep->cancelled_td_list)) {
668 		xhci_stop_watchdog_timer_in_irq(xhci, ep);
669 		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
670 		return;
671 	}
672 
673 	/* Fix up the ep ring first, so HW stops executing cancelled TDs.
674 	 * We have the xHCI lock, so nothing can modify this list until we drop
675 	 * it.  We're also in the event handler, so we can't get re-interrupted
676 	 * if another Stop Endpoint command completes
677 	 */
678 	list_for_each(entry, &ep->cancelled_td_list) {
679 		cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
680 		xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
681 				cur_td->first_trb,
682 				(unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
683 		ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
684 		if (!ep_ring) {
685 			/* This shouldn't happen unless a driver is mucking
686 			 * with the stream ID after submission.  This will
687 			 * leave the TD on the hardware ring, and the hardware
688 			 * will try to execute it, and may access a buffer
689 			 * that has already been freed.  In the best case, the
690 			 * hardware will execute it, and the event handler will
691 			 * ignore the completion event for that TD, since it was
692 			 * removed from the td_list for that endpoint.  In
693 			 * short, don't muck with the stream ID after
694 			 * submission.
695 			 */
696 			xhci_warn(xhci, "WARN Cancelled URB %p "
697 					"has invalid stream ID %u.\n",
698 					cur_td->urb,
699 					cur_td->urb->stream_id);
700 			goto remove_finished_td;
701 		}
702 		/*
703 		 * If we stopped on the TD we need to cancel, then we have to
704 		 * move the xHC endpoint ring dequeue pointer past this TD.
705 		 */
706 		if (cur_td == ep->stopped_td)
707 			xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
708 					cur_td->urb->stream_id,
709 					cur_td, &deq_state);
710 		else
711 			td_to_noop(xhci, ep_ring, cur_td);
712 remove_finished_td:
713 		/*
714 		 * The event handler won't see a completion for this TD anymore,
715 		 * so remove it from the endpoint ring's TD list.  Keep it in
716 		 * the cancelled TD list for URB completion later.
717 		 */
718 		list_del(&cur_td->td_list);
719 	}
720 	last_unlinked_td = cur_td;
721 	xhci_stop_watchdog_timer_in_irq(xhci, ep);
722 
723 	/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
724 	if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
725 		xhci_queue_new_dequeue_state(xhci,
726 				slot_id, ep_index,
727 				ep->stopped_td->urb->stream_id,
728 				&deq_state);
729 		xhci_ring_cmd_db(xhci);
730 	} else {
731 		/* Otherwise ring the doorbell(s) to restart queued transfers */
732 		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
733 	}
734 	ep->stopped_td = NULL;
735 	ep->stopped_trb = NULL;
736 
737 	/*
738 	 * Drop the lock and complete the URBs in the cancelled TD list.
739 	 * New TDs to be cancelled might be added to the end of the list before
740 	 * we can complete all the URBs for the TDs we already unlinked.
741 	 * So stop when we've completed the URB for the last TD we unlinked.
742 	 */
743 	do {
744 		cur_td = list_entry(ep->cancelled_td_list.next,
745 				struct xhci_td, cancelled_td_list);
746 		list_del(&cur_td->cancelled_td_list);
747 
748 		/* Clean up the cancelled URB */
749 		/* Doesn't matter what we pass for status, since the core will
750 		 * just overwrite it (because the URB has been unlinked).
751 		 */
752 		xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
753 
754 		/* Stop processing the cancelled list if the watchdog timer is
755 		 * running.
756 		 */
757 		if (xhci->xhc_state & XHCI_STATE_DYING)
758 			return;
759 	} while (cur_td != last_unlinked_td);
760 
761 	/* Return to the event handler with xhci->lock re-acquired */
762 }
763 
764 /* Watchdog timer function for when a stop endpoint command fails to complete.
765  * In this case, we assume the host controller is broken or dying or dead.  The
766  * host may still be completing some other events, so we have to be careful to
767  * let the event ring handler and the URB dequeueing/enqueueing functions know
768  * through xhci->state.
769  *
770  * The timer may also fire if the host takes a very long time to respond to the
771  * command, and the stop endpoint command completion handler cannot delete the
772  * timer before the timer function is called.  Another endpoint cancellation may
773  * sneak in before the timer function can grab the lock, and that may queue
774  * another stop endpoint command and add the timer back.  So we cannot use a
775  * simple flag to say whether there is a pending stop endpoint command for a
776  * particular endpoint.
777  *
778  * Instead we use a combination of that flag and a counter for the number of
779  * pending stop endpoint commands.  If the timer is the tail end of the last
780  * stop endpoint command, and the endpoint's command is still pending, we assume
781  * the host is dying.
782  */
783 void xhci_stop_endpoint_command_watchdog(unsigned long arg)
784 {
785 	struct xhci_hcd *xhci;
786 	struct xhci_virt_ep *ep;
787 	struct xhci_virt_ep *temp_ep;
788 	struct xhci_ring *ring;
789 	struct xhci_td *cur_td;
790 	int ret, i, j;
791 
792 	ep = (struct xhci_virt_ep *) arg;
793 	xhci = ep->xhci;
794 
795 	spin_lock(&xhci->lock);
796 
797 	ep->stop_cmds_pending--;
798 	if (xhci->xhc_state & XHCI_STATE_DYING) {
799 		xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
800 				"xHCI as DYING, exiting.\n");
801 		spin_unlock(&xhci->lock);
802 		return;
803 	}
804 	if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
805 		xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
806 				"exiting.\n");
807 		spin_unlock(&xhci->lock);
808 		return;
809 	}
810 
811 	xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
812 	xhci_warn(xhci, "Assuming host is dying, halting host.\n");
813 	/* Oops, HC is dead or dying or at least not responding to the stop
814 	 * endpoint command.
815 	 */
816 	xhci->xhc_state |= XHCI_STATE_DYING;
817 	/* Disable interrupts from the host controller and start halting it */
818 	xhci_quiesce(xhci);
819 	spin_unlock(&xhci->lock);
820 
821 	ret = xhci_halt(xhci);
822 
823 	spin_lock(&xhci->lock);
824 	if (ret < 0) {
825 		/* This is bad; the host is not responding to commands and it's
826 		 * not allowing itself to be halted.  At least interrupts are
827 		 * disabled, so we can set HC_STATE_HALT and notify the
828 		 * USB core.  But if we call usb_hc_died(), it will attempt to
829 		 * disconnect all device drivers under this host.  Those
830 		 * disconnect() methods will wait for all URBs to be unlinked,
831 		 * so we must complete them.
832 		 */
833 		xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
834 		xhci_warn(xhci, "Completing active URBs anyway.\n");
835 		/* We could turn all TDs on the rings to no-ops.  This won't
836 		 * help if the host has cached part of the ring, and is slow if
837 		 * we want to preserve the cycle bit.  Skip it and hope the host
838 		 * doesn't touch the memory.
839 		 */
840 	}
841 	for (i = 0; i < MAX_HC_SLOTS; i++) {
842 		if (!xhci->devs[i])
843 			continue;
844 		for (j = 0; j < 31; j++) {
845 			temp_ep = &xhci->devs[i]->eps[j];
846 			ring = temp_ep->ring;
847 			if (!ring)
848 				continue;
849 			xhci_dbg(xhci, "Killing URBs for slot ID %u, "
850 					"ep index %u\n", i, j);
851 			while (!list_empty(&ring->td_list)) {
852 				cur_td = list_first_entry(&ring->td_list,
853 						struct xhci_td,
854 						td_list);
855 				list_del(&cur_td->td_list);
856 				if (!list_empty(&cur_td->cancelled_td_list))
857 					list_del(&cur_td->cancelled_td_list);
858 				xhci_giveback_urb_in_irq(xhci, cur_td,
859 						-ESHUTDOWN, "killed");
860 			}
861 			while (!list_empty(&temp_ep->cancelled_td_list)) {
862 				cur_td = list_first_entry(
863 						&temp_ep->cancelled_td_list,
864 						struct xhci_td,
865 						cancelled_td_list);
866 				list_del(&cur_td->cancelled_td_list);
867 				xhci_giveback_urb_in_irq(xhci, cur_td,
868 						-ESHUTDOWN, "killed");
869 			}
870 		}
871 	}
872 	spin_unlock(&xhci->lock);
873 	xhci_to_hcd(xhci)->state = HC_STATE_HALT;
874 	xhci_dbg(xhci, "Calling usb_hc_died()\n");
875 	usb_hc_died(xhci_to_hcd(xhci));
876 	xhci_dbg(xhci, "xHCI host controller is dead.\n");
877 }
878 
879 /*
880  * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
881  * we need to clear the set deq pending flag in the endpoint ring state, so that
882  * the TD queueing code can ring the doorbell again.  We also need to ring the
883  * endpoint doorbell to restart the ring, but only if there aren't more
884  * cancellations pending.
885  */
886 static void handle_set_deq_completion(struct xhci_hcd *xhci,
887 		struct xhci_event_cmd *event,
888 		union xhci_trb *trb)
889 {
890 	unsigned int slot_id;
891 	unsigned int ep_index;
892 	unsigned int stream_id;
893 	struct xhci_ring *ep_ring;
894 	struct xhci_virt_device *dev;
895 	struct xhci_ep_ctx *ep_ctx;
896 	struct xhci_slot_ctx *slot_ctx;
897 
898 	slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
899 	ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
900 	stream_id = TRB_TO_STREAM_ID(trb->generic.field[2]);
901 	dev = xhci->devs[slot_id];
902 
903 	ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
904 	if (!ep_ring) {
905 		xhci_warn(xhci, "WARN Set TR deq ptr command for "
906 				"freed stream ID %u\n",
907 				stream_id);
908 		/* XXX: Harmless??? */
909 		dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
910 		return;
911 	}
912 
913 	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
914 	slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
915 
916 	if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
917 		unsigned int ep_state;
918 		unsigned int slot_state;
919 
920 		switch (GET_COMP_CODE(event->status)) {
921 		case COMP_TRB_ERR:
922 			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
923 					"of stream ID configuration\n");
924 			break;
925 		case COMP_CTX_STATE:
926 			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
927 					"to incorrect slot or ep state.\n");
928 			ep_state = ep_ctx->ep_info;
929 			ep_state &= EP_STATE_MASK;
930 			slot_state = slot_ctx->dev_state;
931 			slot_state = GET_SLOT_STATE(slot_state);
932 			xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
933 					slot_state, ep_state);
934 			break;
935 		case COMP_EBADSLT:
936 			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
937 					"slot %u was not enabled.\n", slot_id);
938 			break;
939 		default:
940 			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
941 					"completion code of %u.\n",
942 					GET_COMP_CODE(event->status));
943 			break;
944 		}
945 		/* OK what do we do now?  The endpoint state is hosed, and we
946 		 * should never get to this point if the synchronization between
947 		 * queueing, and endpoint state are correct.  This might happen
948 		 * if the device gets disconnected after we've finished
949 		 * cancelling URBs, which might not be an error...
950 		 */
951 	} else {
952 		xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
953 				ep_ctx->deq);
954 	}
955 
956 	dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
957 	/* Restart any rings with pending URBs */
958 	ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
959 }
960 
961 static void handle_reset_ep_completion(struct xhci_hcd *xhci,
962 		struct xhci_event_cmd *event,
963 		union xhci_trb *trb)
964 {
965 	int slot_id;
966 	unsigned int ep_index;
967 
968 	slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
969 	ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
970 	/* This command will only fail if the endpoint wasn't halted,
971 	 * but we don't care.
972 	 */
973 	xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
974 			(unsigned int) GET_COMP_CODE(event->status));
975 
976 	/* HW with the reset endpoint quirk needs to have a configure endpoint
977 	 * command complete before the endpoint can be used.  Queue that here
978 	 * because the HW can't handle two commands being queued in a row.
979 	 */
980 	if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
981 		xhci_dbg(xhci, "Queueing configure endpoint command\n");
982 		xhci_queue_configure_endpoint(xhci,
983 				xhci->devs[slot_id]->in_ctx->dma, slot_id,
984 				false);
985 		xhci_ring_cmd_db(xhci);
986 	} else {
987 		/* Clear our internal halted state and restart the ring(s) */
988 		xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
989 		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
990 	}
991 }
992 
993 /* Check to see if a command in the device's command queue matches this one.
994  * Signal the completion or free the command, and return 1.  Return 0 if the
995  * completed command isn't at the head of the command list.
996  */
997 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
998 		struct xhci_virt_device *virt_dev,
999 		struct xhci_event_cmd *event)
1000 {
1001 	struct xhci_command *command;
1002 
1003 	if (list_empty(&virt_dev->cmd_list))
1004 		return 0;
1005 
1006 	command = list_entry(virt_dev->cmd_list.next,
1007 			struct xhci_command, cmd_list);
1008 	if (xhci->cmd_ring->dequeue != command->command_trb)
1009 		return 0;
1010 
1011 	command->status =
1012 		GET_COMP_CODE(event->status);
1013 	list_del(&command->cmd_list);
1014 	if (command->completion)
1015 		complete(command->completion);
1016 	else
1017 		xhci_free_command(xhci, command);
1018 	return 1;
1019 }
1020 
1021 static void handle_cmd_completion(struct xhci_hcd *xhci,
1022 		struct xhci_event_cmd *event)
1023 {
1024 	int slot_id = TRB_TO_SLOT_ID(event->flags);
1025 	u64 cmd_dma;
1026 	dma_addr_t cmd_dequeue_dma;
1027 	struct xhci_input_control_ctx *ctrl_ctx;
1028 	struct xhci_virt_device *virt_dev;
1029 	unsigned int ep_index;
1030 	struct xhci_ring *ep_ring;
1031 	unsigned int ep_state;
1032 
1033 	cmd_dma = event->cmd_trb;
1034 	cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1035 			xhci->cmd_ring->dequeue);
1036 	/* Is the command ring deq ptr out of sync with the deq seg ptr? */
1037 	if (cmd_dequeue_dma == 0) {
1038 		xhci->error_bitmask |= 1 << 4;
1039 		return;
1040 	}
1041 	/* Does the DMA address match our internal dequeue pointer address? */
1042 	if (cmd_dma != (u64) cmd_dequeue_dma) {
1043 		xhci->error_bitmask |= 1 << 5;
1044 		return;
1045 	}
1046 	switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) {
1047 	case TRB_TYPE(TRB_ENABLE_SLOT):
1048 		if (GET_COMP_CODE(event->status) == COMP_SUCCESS)
1049 			xhci->slot_id = slot_id;
1050 		else
1051 			xhci->slot_id = 0;
1052 		complete(&xhci->addr_dev);
1053 		break;
1054 	case TRB_TYPE(TRB_DISABLE_SLOT):
1055 		if (xhci->devs[slot_id])
1056 			xhci_free_virt_device(xhci, slot_id);
1057 		break;
1058 	case TRB_TYPE(TRB_CONFIG_EP):
1059 		virt_dev = xhci->devs[slot_id];
1060 		if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1061 			break;
1062 		/*
1063 		 * Configure endpoint commands can come from the USB core
1064 		 * configuration or alt setting changes, or because the HW
1065 		 * needed an extra configure endpoint command after a reset
1066 		 * endpoint command or streams were being configured.
1067 		 * If the command was for a halted endpoint, the xHCI driver
1068 		 * is not waiting on the configure endpoint command.
1069 		 */
1070 		ctrl_ctx = xhci_get_input_control_ctx(xhci,
1071 				virt_dev->in_ctx);
1072 		/* Input ctx add_flags are the endpoint index plus one */
1073 		ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
1074 		/* A usb_set_interface() call directly after clearing a halted
1075 		 * condition may race on this quirky hardware.  Not worth
1076 		 * worrying about, since this is prototype hardware.  Not sure
1077 		 * if this will work for streams, but streams support was
1078 		 * untested on this prototype.
1079 		 */
1080 		if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1081 				ep_index != (unsigned int) -1 &&
1082 				ctrl_ctx->add_flags - SLOT_FLAG ==
1083 					ctrl_ctx->drop_flags) {
1084 			ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
1085 			ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1086 			if (!(ep_state & EP_HALTED))
1087 				goto bandwidth_change;
1088 			xhci_dbg(xhci, "Completed config ep cmd - "
1089 					"last ep index = %d, state = %d\n",
1090 					ep_index, ep_state);
1091 			/* Clear internal halted state and restart ring(s) */
1092 			xhci->devs[slot_id]->eps[ep_index].ep_state &=
1093 				~EP_HALTED;
1094 			ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1095 			break;
1096 		}
1097 bandwidth_change:
1098 		xhci_dbg(xhci, "Completed config ep cmd\n");
1099 		xhci->devs[slot_id]->cmd_status =
1100 			GET_COMP_CODE(event->status);
1101 		complete(&xhci->devs[slot_id]->cmd_completion);
1102 		break;
1103 	case TRB_TYPE(TRB_EVAL_CONTEXT):
1104 		virt_dev = xhci->devs[slot_id];
1105 		if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1106 			break;
1107 		xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
1108 		complete(&xhci->devs[slot_id]->cmd_completion);
1109 		break;
1110 	case TRB_TYPE(TRB_ADDR_DEV):
1111 		xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
1112 		complete(&xhci->addr_dev);
1113 		break;
1114 	case TRB_TYPE(TRB_STOP_RING):
1115 		handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event);
1116 		break;
1117 	case TRB_TYPE(TRB_SET_DEQ):
1118 		handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
1119 		break;
1120 	case TRB_TYPE(TRB_CMD_NOOP):
1121 		++xhci->noops_handled;
1122 		break;
1123 	case TRB_TYPE(TRB_RESET_EP):
1124 		handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
1125 		break;
1126 	case TRB_TYPE(TRB_RESET_DEV):
1127 		xhci_dbg(xhci, "Completed reset device command.\n");
1128 		slot_id = TRB_TO_SLOT_ID(
1129 				xhci->cmd_ring->dequeue->generic.field[3]);
1130 		virt_dev = xhci->devs[slot_id];
1131 		if (virt_dev)
1132 			handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
1133 		else
1134 			xhci_warn(xhci, "Reset device command completion "
1135 					"for disabled slot %u\n", slot_id);
1136 		break;
1137 	case TRB_TYPE(TRB_NEC_GET_FW):
1138 		if (!(xhci->quirks & XHCI_NEC_HOST)) {
1139 			xhci->error_bitmask |= 1 << 6;
1140 			break;
1141 		}
1142 		xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
1143 				NEC_FW_MAJOR(event->status),
1144 				NEC_FW_MINOR(event->status));
1145 		break;
1146 	default:
1147 		/* Skip over unknown commands on the event ring */
1148 		xhci->error_bitmask |= 1 << 6;
1149 		break;
1150 	}
1151 	inc_deq(xhci, xhci->cmd_ring, false);
1152 }
1153 
1154 static void handle_vendor_event(struct xhci_hcd *xhci,
1155 		union xhci_trb *event)
1156 {
1157 	u32 trb_type;
1158 
1159 	trb_type = TRB_FIELD_TO_TYPE(event->generic.field[3]);
1160 	xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1161 	if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1162 		handle_cmd_completion(xhci, &event->event_cmd);
1163 }
1164 
1165 static void handle_port_status(struct xhci_hcd *xhci,
1166 		union xhci_trb *event)
1167 {
1168 	struct usb_hcd *hcd = xhci_to_hcd(xhci);
1169 	u32 port_id;
1170 	u32 temp, temp1;
1171 	u32 __iomem *addr;
1172 	int ports;
1173 	int slot_id;
1174 
1175 	/* Port status change events always have a successful completion code */
1176 	if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
1177 		xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
1178 		xhci->error_bitmask |= 1 << 8;
1179 	}
1180 	port_id = GET_PORT_ID(event->generic.field[0]);
1181 	xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1182 
1183 	ports = HCS_MAX_PORTS(xhci->hcs_params1);
1184 	if ((port_id <= 0) || (port_id > ports)) {
1185 		xhci_warn(xhci, "Invalid port id %d\n", port_id);
1186 		goto cleanup;
1187 	}
1188 
1189 	addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS * (port_id - 1);
1190 	temp = xhci_readl(xhci, addr);
1191 	if ((temp & PORT_CONNECT) && (hcd->state == HC_STATE_SUSPENDED)) {
1192 		xhci_dbg(xhci, "resume root hub\n");
1193 		usb_hcd_resume_root_hub(hcd);
1194 	}
1195 
1196 	if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1197 		xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1198 
1199 		temp1 = xhci_readl(xhci, &xhci->op_regs->command);
1200 		if (!(temp1 & CMD_RUN)) {
1201 			xhci_warn(xhci, "xHC is not running.\n");
1202 			goto cleanup;
1203 		}
1204 
1205 		if (DEV_SUPERSPEED(temp)) {
1206 			xhci_dbg(xhci, "resume SS port %d\n", port_id);
1207 			temp = xhci_port_state_to_neutral(temp);
1208 			temp &= ~PORT_PLS_MASK;
1209 			temp |= PORT_LINK_STROBE | XDEV_U0;
1210 			xhci_writel(xhci, temp, addr);
1211 			slot_id = xhci_find_slot_id_by_port(xhci, port_id);
1212 			if (!slot_id) {
1213 				xhci_dbg(xhci, "slot_id is zero\n");
1214 				goto cleanup;
1215 			}
1216 			xhci_ring_device(xhci, slot_id);
1217 			xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1218 			/* Clear PORT_PLC */
1219 			temp = xhci_readl(xhci, addr);
1220 			temp = xhci_port_state_to_neutral(temp);
1221 			temp |= PORT_PLC;
1222 			xhci_writel(xhci, temp, addr);
1223 		} else {
1224 			xhci_dbg(xhci, "resume HS port %d\n", port_id);
1225 			xhci->resume_done[port_id - 1] = jiffies +
1226 				msecs_to_jiffies(20);
1227 			mod_timer(&hcd->rh_timer,
1228 				  xhci->resume_done[port_id - 1]);
1229 			/* Do the rest in GetPortStatus */
1230 		}
1231 	}
1232 
1233 cleanup:
1234 	/* Update event ring dequeue pointer before dropping the lock */
1235 	inc_deq(xhci, xhci->event_ring, true);
1236 
1237 	spin_unlock(&xhci->lock);
1238 	/* Pass this up to the core */
1239 	usb_hcd_poll_rh_status(xhci_to_hcd(xhci));
1240 	spin_lock(&xhci->lock);
1241 }
1242 
1243 /*
1244  * This TD is defined by the TRBs starting at start_trb in start_seg and ending
1245  * at end_trb, which may be in another segment.  If the suspect DMA address is a
1246  * TRB in this TD, this function returns that TRB's segment.  Otherwise it
1247  * returns 0.
1248  */
1249 struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1250 		union xhci_trb	*start_trb,
1251 		union xhci_trb	*end_trb,
1252 		dma_addr_t	suspect_dma)
1253 {
1254 	dma_addr_t start_dma;
1255 	dma_addr_t end_seg_dma;
1256 	dma_addr_t end_trb_dma;
1257 	struct xhci_segment *cur_seg;
1258 
1259 	start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1260 	cur_seg = start_seg;
1261 
1262 	do {
1263 		if (start_dma == 0)
1264 			return NULL;
1265 		/* We may get an event for a Link TRB in the middle of a TD */
1266 		end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1267 				&cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1268 		/* If the end TRB isn't in this segment, this is set to 0 */
1269 		end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1270 
1271 		if (end_trb_dma > 0) {
1272 			/* The end TRB is in this segment, so suspect should be here */
1273 			if (start_dma <= end_trb_dma) {
1274 				if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
1275 					return cur_seg;
1276 			} else {
1277 				/* Case for one segment with
1278 				 * a TD wrapped around to the top
1279 				 */
1280 				if ((suspect_dma >= start_dma &&
1281 							suspect_dma <= end_seg_dma) ||
1282 						(suspect_dma >= cur_seg->dma &&
1283 						 suspect_dma <= end_trb_dma))
1284 					return cur_seg;
1285 			}
1286 			return NULL;
1287 		} else {
1288 			/* Might still be somewhere in this segment */
1289 			if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
1290 				return cur_seg;
1291 		}
1292 		cur_seg = cur_seg->next;
1293 		start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1294 	} while (cur_seg != start_seg);
1295 
1296 	return NULL;
1297 }
1298 
1299 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1300 		unsigned int slot_id, unsigned int ep_index,
1301 		unsigned int stream_id,
1302 		struct xhci_td *td, union xhci_trb *event_trb)
1303 {
1304 	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1305 	ep->ep_state |= EP_HALTED;
1306 	ep->stopped_td = td;
1307 	ep->stopped_trb = event_trb;
1308 	ep->stopped_stream = stream_id;
1309 
1310 	xhci_queue_reset_ep(xhci, slot_id, ep_index);
1311 	xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1312 
1313 	ep->stopped_td = NULL;
1314 	ep->stopped_trb = NULL;
1315 	ep->stopped_stream = 0;
1316 
1317 	xhci_ring_cmd_db(xhci);
1318 }
1319 
1320 /* Check if an error has halted the endpoint ring.  The class driver will
1321  * cleanup the halt for a non-default control endpoint if we indicate a stall.
1322  * However, a babble and other errors also halt the endpoint ring, and the class
1323  * driver won't clear the halt in that case, so we need to issue a Set Transfer
1324  * Ring Dequeue Pointer command manually.
1325  */
1326 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1327 		struct xhci_ep_ctx *ep_ctx,
1328 		unsigned int trb_comp_code)
1329 {
1330 	/* TRB completion codes that may require a manual halt cleanup */
1331 	if (trb_comp_code == COMP_TX_ERR ||
1332 			trb_comp_code == COMP_BABBLE ||
1333 			trb_comp_code == COMP_SPLIT_ERR)
1334 		/* The 0.96 spec says a babbling control endpoint
1335 		 * is not halted. The 0.96 spec says it is.  Some HW
1336 		 * claims to be 0.95 compliant, but it halts the control
1337 		 * endpoint anyway.  Check if a babble halted the
1338 		 * endpoint.
1339 		 */
1340 		if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_HALTED)
1341 			return 1;
1342 
1343 	return 0;
1344 }
1345 
1346 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1347 {
1348 	if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1349 		/* Vendor defined "informational" completion code,
1350 		 * treat as not-an-error.
1351 		 */
1352 		xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1353 				trb_comp_code);
1354 		xhci_dbg(xhci, "Treating code as success.\n");
1355 		return 1;
1356 	}
1357 	return 0;
1358 }
1359 
1360 /*
1361  * Finish the td processing, remove the td from td list;
1362  * Return 1 if the urb can be given back.
1363  */
1364 static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1365 	union xhci_trb *event_trb, struct xhci_transfer_event *event,
1366 	struct xhci_virt_ep *ep, int *status, bool skip)
1367 {
1368 	struct xhci_virt_device *xdev;
1369 	struct xhci_ring *ep_ring;
1370 	unsigned int slot_id;
1371 	int ep_index;
1372 	struct urb *urb = NULL;
1373 	struct xhci_ep_ctx *ep_ctx;
1374 	int ret = 0;
1375 	struct urb_priv	*urb_priv;
1376 	u32 trb_comp_code;
1377 
1378 	slot_id = TRB_TO_SLOT_ID(event->flags);
1379 	xdev = xhci->devs[slot_id];
1380 	ep_index = TRB_TO_EP_ID(event->flags) - 1;
1381 	ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
1382 	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1383 	trb_comp_code = GET_COMP_CODE(event->transfer_len);
1384 
1385 	if (skip)
1386 		goto td_cleanup;
1387 
1388 	if (trb_comp_code == COMP_STOP_INVAL ||
1389 			trb_comp_code == COMP_STOP) {
1390 		/* The Endpoint Stop Command completion will take care of any
1391 		 * stopped TDs.  A stopped TD may be restarted, so don't update
1392 		 * the ring dequeue pointer or take this TD off any lists yet.
1393 		 */
1394 		ep->stopped_td = td;
1395 		ep->stopped_trb = event_trb;
1396 		return 0;
1397 	} else {
1398 		if (trb_comp_code == COMP_STALL) {
1399 			/* The transfer is completed from the driver's
1400 			 * perspective, but we need to issue a set dequeue
1401 			 * command for this stalled endpoint to move the dequeue
1402 			 * pointer past the TD.  We can't do that here because
1403 			 * the halt condition must be cleared first.  Let the
1404 			 * USB class driver clear the stall later.
1405 			 */
1406 			ep->stopped_td = td;
1407 			ep->stopped_trb = event_trb;
1408 			ep->stopped_stream = ep_ring->stream_id;
1409 		} else if (xhci_requires_manual_halt_cleanup(xhci,
1410 					ep_ctx, trb_comp_code)) {
1411 			/* Other types of errors halt the endpoint, but the
1412 			 * class driver doesn't call usb_reset_endpoint() unless
1413 			 * the error is -EPIPE.  Clear the halted status in the
1414 			 * xHCI hardware manually.
1415 			 */
1416 			xhci_cleanup_halted_endpoint(xhci,
1417 					slot_id, ep_index, ep_ring->stream_id,
1418 					td, event_trb);
1419 		} else {
1420 			/* Update ring dequeue pointer */
1421 			while (ep_ring->dequeue != td->last_trb)
1422 				inc_deq(xhci, ep_ring, false);
1423 			inc_deq(xhci, ep_ring, false);
1424 		}
1425 
1426 td_cleanup:
1427 		/* Clean up the endpoint's TD list */
1428 		urb = td->urb;
1429 		urb_priv = urb->hcpriv;
1430 
1431 		/* Do one last check of the actual transfer length.
1432 		 * If the host controller said we transferred more data than
1433 		 * the buffer length, urb->actual_length will be a very big
1434 		 * number (since it's unsigned).  Play it safe and say we didn't
1435 		 * transfer anything.
1436 		 */
1437 		if (urb->actual_length > urb->transfer_buffer_length) {
1438 			xhci_warn(xhci, "URB transfer length is wrong, "
1439 					"xHC issue? req. len = %u, "
1440 					"act. len = %u\n",
1441 					urb->transfer_buffer_length,
1442 					urb->actual_length);
1443 			urb->actual_length = 0;
1444 			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1445 				*status = -EREMOTEIO;
1446 			else
1447 				*status = 0;
1448 		}
1449 		list_del(&td->td_list);
1450 		/* Was this TD slated to be cancelled but completed anyway? */
1451 		if (!list_empty(&td->cancelled_td_list))
1452 			list_del(&td->cancelled_td_list);
1453 
1454 		urb_priv->td_cnt++;
1455 		/* Giveback the urb when all the tds are completed */
1456 		if (urb_priv->td_cnt == urb_priv->length)
1457 			ret = 1;
1458 	}
1459 
1460 	return ret;
1461 }
1462 
1463 /*
1464  * Process control tds, update urb status and actual_length.
1465  */
1466 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1467 	union xhci_trb *event_trb, struct xhci_transfer_event *event,
1468 	struct xhci_virt_ep *ep, int *status)
1469 {
1470 	struct xhci_virt_device *xdev;
1471 	struct xhci_ring *ep_ring;
1472 	unsigned int slot_id;
1473 	int ep_index;
1474 	struct xhci_ep_ctx *ep_ctx;
1475 	u32 trb_comp_code;
1476 
1477 	slot_id = TRB_TO_SLOT_ID(event->flags);
1478 	xdev = xhci->devs[slot_id];
1479 	ep_index = TRB_TO_EP_ID(event->flags) - 1;
1480 	ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
1481 	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1482 	trb_comp_code = GET_COMP_CODE(event->transfer_len);
1483 
1484 	xhci_debug_trb(xhci, xhci->event_ring->dequeue);
1485 	switch (trb_comp_code) {
1486 	case COMP_SUCCESS:
1487 		if (event_trb == ep_ring->dequeue) {
1488 			xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
1489 					"without IOC set??\n");
1490 			*status = -ESHUTDOWN;
1491 		} else if (event_trb != td->last_trb) {
1492 			xhci_warn(xhci, "WARN: Success on ctrl data TRB "
1493 					"without IOC set??\n");
1494 			*status = -ESHUTDOWN;
1495 		} else {
1496 			xhci_dbg(xhci, "Successful control transfer!\n");
1497 			*status = 0;
1498 		}
1499 		break;
1500 	case COMP_SHORT_TX:
1501 		xhci_warn(xhci, "WARN: short transfer on control ep\n");
1502 		if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1503 			*status = -EREMOTEIO;
1504 		else
1505 			*status = 0;
1506 		break;
1507 	default:
1508 		if (!xhci_requires_manual_halt_cleanup(xhci,
1509 					ep_ctx, trb_comp_code))
1510 			break;
1511 		xhci_dbg(xhci, "TRB error code %u, "
1512 				"halted endpoint index = %u\n",
1513 				trb_comp_code, ep_index);
1514 		/* else fall through */
1515 	case COMP_STALL:
1516 		/* Did we transfer part of the data (middle) phase? */
1517 		if (event_trb != ep_ring->dequeue &&
1518 				event_trb != td->last_trb)
1519 			td->urb->actual_length =
1520 				td->urb->transfer_buffer_length
1521 				- TRB_LEN(event->transfer_len);
1522 		else
1523 			td->urb->actual_length = 0;
1524 
1525 		xhci_cleanup_halted_endpoint(xhci,
1526 			slot_id, ep_index, 0, td, event_trb);
1527 		return finish_td(xhci, td, event_trb, event, ep, status, true);
1528 	}
1529 	/*
1530 	 * Did we transfer any data, despite the errors that might have
1531 	 * happened?  I.e. did we get past the setup stage?
1532 	 */
1533 	if (event_trb != ep_ring->dequeue) {
1534 		/* The event was for the status stage */
1535 		if (event_trb == td->last_trb) {
1536 			if (td->urb->actual_length != 0) {
1537 				/* Don't overwrite a previously set error code
1538 				 */
1539 				if ((*status == -EINPROGRESS || *status == 0) &&
1540 						(td->urb->transfer_flags
1541 						 & URB_SHORT_NOT_OK))
1542 					/* Did we already see a short data
1543 					 * stage? */
1544 					*status = -EREMOTEIO;
1545 			} else {
1546 				td->urb->actual_length =
1547 					td->urb->transfer_buffer_length;
1548 			}
1549 		} else {
1550 		/* Maybe the event was for the data stage? */
1551 			if (trb_comp_code != COMP_STOP_INVAL) {
1552 				/* We didn't stop on a link TRB in the middle */
1553 				td->urb->actual_length =
1554 					td->urb->transfer_buffer_length -
1555 					TRB_LEN(event->transfer_len);
1556 				xhci_dbg(xhci, "Waiting for status "
1557 						"stage event\n");
1558 				return 0;
1559 			}
1560 		}
1561 	}
1562 
1563 	return finish_td(xhci, td, event_trb, event, ep, status, false);
1564 }
1565 
1566 /*
1567  * Process isochronous tds, update urb packet status and actual_length.
1568  */
1569 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1570 	union xhci_trb *event_trb, struct xhci_transfer_event *event,
1571 	struct xhci_virt_ep *ep, int *status)
1572 {
1573 	struct xhci_ring *ep_ring;
1574 	struct urb_priv *urb_priv;
1575 	int idx;
1576 	int len = 0;
1577 	int skip_td = 0;
1578 	union xhci_trb *cur_trb;
1579 	struct xhci_segment *cur_seg;
1580 	u32 trb_comp_code;
1581 
1582 	ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
1583 	trb_comp_code = GET_COMP_CODE(event->transfer_len);
1584 	urb_priv = td->urb->hcpriv;
1585 	idx = urb_priv->td_cnt;
1586 
1587 	if (ep->skip) {
1588 		/* The transfer is partly done */
1589 		*status = -EXDEV;
1590 		td->urb->iso_frame_desc[idx].status = -EXDEV;
1591 	} else {
1592 		/* handle completion code */
1593 		switch (trb_comp_code) {
1594 		case COMP_SUCCESS:
1595 			td->urb->iso_frame_desc[idx].status = 0;
1596 			xhci_dbg(xhci, "Successful isoc transfer!\n");
1597 			break;
1598 		case COMP_SHORT_TX:
1599 			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1600 				td->urb->iso_frame_desc[idx].status =
1601 					 -EREMOTEIO;
1602 			else
1603 				td->urb->iso_frame_desc[idx].status = 0;
1604 			break;
1605 		case COMP_BW_OVER:
1606 			td->urb->iso_frame_desc[idx].status = -ECOMM;
1607 			skip_td = 1;
1608 			break;
1609 		case COMP_BUFF_OVER:
1610 		case COMP_BABBLE:
1611 			td->urb->iso_frame_desc[idx].status = -EOVERFLOW;
1612 			skip_td = 1;
1613 			break;
1614 		case COMP_STALL:
1615 			td->urb->iso_frame_desc[idx].status = -EPROTO;
1616 			skip_td = 1;
1617 			break;
1618 		case COMP_STOP:
1619 		case COMP_STOP_INVAL:
1620 			break;
1621 		default:
1622 			td->urb->iso_frame_desc[idx].status = -1;
1623 			break;
1624 		}
1625 	}
1626 
1627 	/* calc actual length */
1628 	if (ep->skip) {
1629 		td->urb->iso_frame_desc[idx].actual_length = 0;
1630 		/* Update ring dequeue pointer */
1631 		while (ep_ring->dequeue != td->last_trb)
1632 			inc_deq(xhci, ep_ring, false);
1633 		inc_deq(xhci, ep_ring, false);
1634 		return finish_td(xhci, td, event_trb, event, ep, status, true);
1635 	}
1636 
1637 	if (trb_comp_code == COMP_SUCCESS || skip_td == 1) {
1638 		td->urb->iso_frame_desc[idx].actual_length =
1639 			td->urb->iso_frame_desc[idx].length;
1640 		td->urb->actual_length +=
1641 			td->urb->iso_frame_desc[idx].length;
1642 	} else {
1643 		for (cur_trb = ep_ring->dequeue,
1644 		     cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
1645 		     next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1646 			if ((cur_trb->generic.field[3] &
1647 			 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
1648 			    (cur_trb->generic.field[3] &
1649 			 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
1650 				len +=
1651 				    TRB_LEN(cur_trb->generic.field[2]);
1652 		}
1653 		len += TRB_LEN(cur_trb->generic.field[2]) -
1654 			TRB_LEN(event->transfer_len);
1655 
1656 		if (trb_comp_code != COMP_STOP_INVAL) {
1657 			td->urb->iso_frame_desc[idx].actual_length = len;
1658 			td->urb->actual_length += len;
1659 		}
1660 	}
1661 
1662 	if ((idx == urb_priv->length - 1) && *status == -EINPROGRESS)
1663 		*status = 0;
1664 
1665 	return finish_td(xhci, td, event_trb, event, ep, status, false);
1666 }
1667 
1668 /*
1669  * Process bulk and interrupt tds, update urb status and actual_length.
1670  */
1671 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
1672 	union xhci_trb *event_trb, struct xhci_transfer_event *event,
1673 	struct xhci_virt_ep *ep, int *status)
1674 {
1675 	struct xhci_ring *ep_ring;
1676 	union xhci_trb *cur_trb;
1677 	struct xhci_segment *cur_seg;
1678 	u32 trb_comp_code;
1679 
1680 	ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
1681 	trb_comp_code = GET_COMP_CODE(event->transfer_len);
1682 
1683 	switch (trb_comp_code) {
1684 	case COMP_SUCCESS:
1685 		/* Double check that the HW transferred everything. */
1686 		if (event_trb != td->last_trb) {
1687 			xhci_warn(xhci, "WARN Successful completion "
1688 					"on short TX\n");
1689 			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1690 				*status = -EREMOTEIO;
1691 			else
1692 				*status = 0;
1693 		} else {
1694 			if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
1695 				xhci_dbg(xhci, "Successful bulk "
1696 						"transfer!\n");
1697 			else
1698 				xhci_dbg(xhci, "Successful interrupt "
1699 						"transfer!\n");
1700 			*status = 0;
1701 		}
1702 		break;
1703 	case COMP_SHORT_TX:
1704 		if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1705 			*status = -EREMOTEIO;
1706 		else
1707 			*status = 0;
1708 		break;
1709 	default:
1710 		/* Others already handled above */
1711 		break;
1712 	}
1713 	dev_dbg(&td->urb->dev->dev,
1714 			"ep %#x - asked for %d bytes, "
1715 			"%d bytes untransferred\n",
1716 			td->urb->ep->desc.bEndpointAddress,
1717 			td->urb->transfer_buffer_length,
1718 			TRB_LEN(event->transfer_len));
1719 	/* Fast path - was this the last TRB in the TD for this URB? */
1720 	if (event_trb == td->last_trb) {
1721 		if (TRB_LEN(event->transfer_len) != 0) {
1722 			td->urb->actual_length =
1723 				td->urb->transfer_buffer_length -
1724 				TRB_LEN(event->transfer_len);
1725 			if (td->urb->transfer_buffer_length <
1726 					td->urb->actual_length) {
1727 				xhci_warn(xhci, "HC gave bad length "
1728 						"of %d bytes left\n",
1729 						TRB_LEN(event->transfer_len));
1730 				td->urb->actual_length = 0;
1731 				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1732 					*status = -EREMOTEIO;
1733 				else
1734 					*status = 0;
1735 			}
1736 			/* Don't overwrite a previously set error code */
1737 			if (*status == -EINPROGRESS) {
1738 				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1739 					*status = -EREMOTEIO;
1740 				else
1741 					*status = 0;
1742 			}
1743 		} else {
1744 			td->urb->actual_length =
1745 				td->urb->transfer_buffer_length;
1746 			/* Ignore a short packet completion if the
1747 			 * untransferred length was zero.
1748 			 */
1749 			if (*status == -EREMOTEIO)
1750 				*status = 0;
1751 		}
1752 	} else {
1753 		/* Slow path - walk the list, starting from the dequeue
1754 		 * pointer, to get the actual length transferred.
1755 		 */
1756 		td->urb->actual_length = 0;
1757 		for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
1758 				cur_trb != event_trb;
1759 				next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1760 			if ((cur_trb->generic.field[3] &
1761 			 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
1762 			    (cur_trb->generic.field[3] &
1763 			 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
1764 				td->urb->actual_length +=
1765 					TRB_LEN(cur_trb->generic.field[2]);
1766 		}
1767 		/* If the ring didn't stop on a Link or No-op TRB, add
1768 		 * in the actual bytes transferred from the Normal TRB
1769 		 */
1770 		if (trb_comp_code != COMP_STOP_INVAL)
1771 			td->urb->actual_length +=
1772 				TRB_LEN(cur_trb->generic.field[2]) -
1773 				TRB_LEN(event->transfer_len);
1774 	}
1775 
1776 	return finish_td(xhci, td, event_trb, event, ep, status, false);
1777 }
1778 
1779 /*
1780  * If this function returns an error condition, it means it got a Transfer
1781  * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
1782  * At this point, the host controller is probably hosed and should be reset.
1783  */
1784 static int handle_tx_event(struct xhci_hcd *xhci,
1785 		struct xhci_transfer_event *event)
1786 {
1787 	struct xhci_virt_device *xdev;
1788 	struct xhci_virt_ep *ep;
1789 	struct xhci_ring *ep_ring;
1790 	unsigned int slot_id;
1791 	int ep_index;
1792 	struct xhci_td *td = NULL;
1793 	dma_addr_t event_dma;
1794 	struct xhci_segment *event_seg;
1795 	union xhci_trb *event_trb;
1796 	struct urb *urb = NULL;
1797 	int status = -EINPROGRESS;
1798 	struct urb_priv *urb_priv;
1799 	struct xhci_ep_ctx *ep_ctx;
1800 	u32 trb_comp_code;
1801 	int ret = 0;
1802 
1803 	slot_id = TRB_TO_SLOT_ID(event->flags);
1804 	xdev = xhci->devs[slot_id];
1805 	if (!xdev) {
1806 		xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
1807 		return -ENODEV;
1808 	}
1809 
1810 	/* Endpoint ID is 1 based, our index is zero based */
1811 	ep_index = TRB_TO_EP_ID(event->flags) - 1;
1812 	xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
1813 	ep = &xdev->eps[ep_index];
1814 	ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
1815 	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1816 	if (!ep_ring ||
1817 		(ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
1818 		xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
1819 				"or incorrect stream ring\n");
1820 		return -ENODEV;
1821 	}
1822 
1823 	event_dma = event->buffer;
1824 	trb_comp_code = GET_COMP_CODE(event->transfer_len);
1825 	/* Look for common error cases */
1826 	switch (trb_comp_code) {
1827 	/* Skip codes that require special handling depending on
1828 	 * transfer type
1829 	 */
1830 	case COMP_SUCCESS:
1831 	case COMP_SHORT_TX:
1832 		break;
1833 	case COMP_STOP:
1834 		xhci_dbg(xhci, "Stopped on Transfer TRB\n");
1835 		break;
1836 	case COMP_STOP_INVAL:
1837 		xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
1838 		break;
1839 	case COMP_STALL:
1840 		xhci_warn(xhci, "WARN: Stalled endpoint\n");
1841 		ep->ep_state |= EP_HALTED;
1842 		status = -EPIPE;
1843 		break;
1844 	case COMP_TRB_ERR:
1845 		xhci_warn(xhci, "WARN: TRB error on endpoint\n");
1846 		status = -EILSEQ;
1847 		break;
1848 	case COMP_SPLIT_ERR:
1849 	case COMP_TX_ERR:
1850 		xhci_warn(xhci, "WARN: transfer error on endpoint\n");
1851 		status = -EPROTO;
1852 		break;
1853 	case COMP_BABBLE:
1854 		xhci_warn(xhci, "WARN: babble error on endpoint\n");
1855 		status = -EOVERFLOW;
1856 		break;
1857 	case COMP_DB_ERR:
1858 		xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
1859 		status = -ENOSR;
1860 		break;
1861 	case COMP_BW_OVER:
1862 		xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
1863 		break;
1864 	case COMP_BUFF_OVER:
1865 		xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
1866 		break;
1867 	case COMP_UNDERRUN:
1868 		/*
1869 		 * When the Isoch ring is empty, the xHC will generate
1870 		 * a Ring Overrun Event for IN Isoch endpoint or Ring
1871 		 * Underrun Event for OUT Isoch endpoint.
1872 		 */
1873 		xhci_dbg(xhci, "underrun event on endpoint\n");
1874 		if (!list_empty(&ep_ring->td_list))
1875 			xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
1876 					"still with TDs queued?\n",
1877 				TRB_TO_SLOT_ID(event->flags), ep_index);
1878 		goto cleanup;
1879 	case COMP_OVERRUN:
1880 		xhci_dbg(xhci, "overrun event on endpoint\n");
1881 		if (!list_empty(&ep_ring->td_list))
1882 			xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
1883 					"still with TDs queued?\n",
1884 				TRB_TO_SLOT_ID(event->flags), ep_index);
1885 		goto cleanup;
1886 	case COMP_MISSED_INT:
1887 		/*
1888 		 * When encounter missed service error, one or more isoc tds
1889 		 * may be missed by xHC.
1890 		 * Set skip flag of the ep_ring; Complete the missed tds as
1891 		 * short transfer when process the ep_ring next time.
1892 		 */
1893 		ep->skip = true;
1894 		xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
1895 		goto cleanup;
1896 	default:
1897 		if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
1898 			status = 0;
1899 			break;
1900 		}
1901 		xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
1902 				"busted\n");
1903 		goto cleanup;
1904 	}
1905 
1906 	do {
1907 		/* This TRB should be in the TD at the head of this ring's
1908 		 * TD list.
1909 		 */
1910 		if (list_empty(&ep_ring->td_list)) {
1911 			xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
1912 					"with no TDs queued?\n",
1913 				  TRB_TO_SLOT_ID(event->flags), ep_index);
1914 			xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
1915 			  (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
1916 			xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
1917 			if (ep->skip) {
1918 				ep->skip = false;
1919 				xhci_dbg(xhci, "td_list is empty while skip "
1920 						"flag set. Clear skip flag.\n");
1921 			}
1922 			ret = 0;
1923 			goto cleanup;
1924 		}
1925 
1926 		td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
1927 		/* Is this a TRB in the currently executing TD? */
1928 		event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
1929 				td->last_trb, event_dma);
1930 		if (event_seg && ep->skip) {
1931 			xhci_dbg(xhci, "Found td. Clear skip flag.\n");
1932 			ep->skip = false;
1933 		}
1934 		if (!event_seg &&
1935 		   (!ep->skip || !usb_endpoint_xfer_isoc(&td->urb->ep->desc))) {
1936 			/* HC is busted, give up! */
1937 			xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not "
1938 					"part of current TD\n");
1939 			return -ESHUTDOWN;
1940 		}
1941 
1942 		if (event_seg) {
1943 			event_trb = &event_seg->trbs[(event_dma -
1944 					 event_seg->dma) / sizeof(*event_trb)];
1945 			/*
1946 			 * No-op TRB should not trigger interrupts.
1947 			 * If event_trb is a no-op TRB, it means the
1948 			 * corresponding TD has been cancelled. Just ignore
1949 			 * the TD.
1950 			 */
1951 			if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK)
1952 					 == TRB_TYPE(TRB_TR_NOOP)) {
1953 				xhci_dbg(xhci, "event_trb is a no-op TRB. "
1954 						"Skip it\n");
1955 				goto cleanup;
1956 			}
1957 		}
1958 
1959 		/* Now update the urb's actual_length and give back to
1960 		 * the core
1961 		 */
1962 		if (usb_endpoint_xfer_control(&td->urb->ep->desc))
1963 			ret = process_ctrl_td(xhci, td, event_trb, event, ep,
1964 						 &status);
1965 		else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
1966 			ret = process_isoc_td(xhci, td, event_trb, event, ep,
1967 						 &status);
1968 		else
1969 			ret = process_bulk_intr_td(xhci, td, event_trb, event,
1970 						 ep, &status);
1971 
1972 cleanup:
1973 		/*
1974 		 * Do not update event ring dequeue pointer if ep->skip is set.
1975 		 * Will roll back to continue process missed tds.
1976 		 */
1977 		if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
1978 			inc_deq(xhci, xhci->event_ring, true);
1979 		}
1980 
1981 		if (ret) {
1982 			urb = td->urb;
1983 			urb_priv = urb->hcpriv;
1984 			/* Leave the TD around for the reset endpoint function
1985 			 * to use(but only if it's not a control endpoint,
1986 			 * since we already queued the Set TR dequeue pointer
1987 			 * command for stalled control endpoints).
1988 			 */
1989 			if (usb_endpoint_xfer_control(&urb->ep->desc) ||
1990 				(trb_comp_code != COMP_STALL &&
1991 					trb_comp_code != COMP_BABBLE))
1992 				xhci_urb_free_priv(xhci, urb_priv);
1993 
1994 			usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
1995 			xhci_dbg(xhci, "Giveback URB %p, len = %d, "
1996 					"status = %d\n",
1997 					urb, urb->actual_length, status);
1998 			spin_unlock(&xhci->lock);
1999 			usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
2000 			spin_lock(&xhci->lock);
2001 		}
2002 
2003 	/*
2004 	 * If ep->skip is set, it means there are missed tds on the
2005 	 * endpoint ring need to take care of.
2006 	 * Process them as short transfer until reach the td pointed by
2007 	 * the event.
2008 	 */
2009 	} while (ep->skip && trb_comp_code != COMP_MISSED_INT);
2010 
2011 	return 0;
2012 }
2013 
2014 /*
2015  * This function handles all OS-owned events on the event ring.  It may drop
2016  * xhci->lock between event processing (e.g. to pass up port status changes).
2017  */
2018 static void xhci_handle_event(struct xhci_hcd *xhci)
2019 {
2020 	union xhci_trb *event;
2021 	int update_ptrs = 1;
2022 	int ret;
2023 
2024 	xhci_dbg(xhci, "In %s\n", __func__);
2025 	if (!xhci->event_ring || !xhci->event_ring->dequeue) {
2026 		xhci->error_bitmask |= 1 << 1;
2027 		return;
2028 	}
2029 
2030 	event = xhci->event_ring->dequeue;
2031 	/* Does the HC or OS own the TRB? */
2032 	if ((event->event_cmd.flags & TRB_CYCLE) !=
2033 			xhci->event_ring->cycle_state) {
2034 		xhci->error_bitmask |= 1 << 2;
2035 		return;
2036 	}
2037 	xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
2038 
2039 	/* FIXME: Handle more event types. */
2040 	switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
2041 	case TRB_TYPE(TRB_COMPLETION):
2042 		xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
2043 		handle_cmd_completion(xhci, &event->event_cmd);
2044 		xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
2045 		break;
2046 	case TRB_TYPE(TRB_PORT_STATUS):
2047 		xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
2048 		handle_port_status(xhci, event);
2049 		xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
2050 		update_ptrs = 0;
2051 		break;
2052 	case TRB_TYPE(TRB_TRANSFER):
2053 		xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
2054 		ret = handle_tx_event(xhci, &event->trans_event);
2055 		xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
2056 		if (ret < 0)
2057 			xhci->error_bitmask |= 1 << 9;
2058 		else
2059 			update_ptrs = 0;
2060 		break;
2061 	default:
2062 		if ((event->event_cmd.flags & TRB_TYPE_BITMASK) >= TRB_TYPE(48))
2063 			handle_vendor_event(xhci, event);
2064 		else
2065 			xhci->error_bitmask |= 1 << 3;
2066 	}
2067 	/* Any of the above functions may drop and re-acquire the lock, so check
2068 	 * to make sure a watchdog timer didn't mark the host as non-responsive.
2069 	 */
2070 	if (xhci->xhc_state & XHCI_STATE_DYING) {
2071 		xhci_dbg(xhci, "xHCI host dying, returning from "
2072 				"event handler.\n");
2073 		return;
2074 	}
2075 
2076 	if (update_ptrs)
2077 		/* Update SW event ring dequeue pointer */
2078 		inc_deq(xhci, xhci->event_ring, true);
2079 
2080 	/* Are there more items on the event ring? */
2081 	xhci_handle_event(xhci);
2082 }
2083 
2084 /*
2085  * xHCI spec says we can get an interrupt, and if the HC has an error condition,
2086  * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
2087  * indicators of an event TRB error, but we check the status *first* to be safe.
2088  */
2089 irqreturn_t xhci_irq(struct usb_hcd *hcd)
2090 {
2091 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2092 	u32 status;
2093 	union xhci_trb *trb;
2094 	u64 temp_64;
2095 	union xhci_trb *event_ring_deq;
2096 	dma_addr_t deq;
2097 
2098 	spin_lock(&xhci->lock);
2099 	trb = xhci->event_ring->dequeue;
2100 	/* Check if the xHC generated the interrupt, or the irq is shared */
2101 	status = xhci_readl(xhci, &xhci->op_regs->status);
2102 	if (status == 0xffffffff)
2103 		goto hw_died;
2104 
2105 	if (!(status & STS_EINT)) {
2106 		spin_unlock(&xhci->lock);
2107 		xhci_warn(xhci, "Spurious interrupt.\n");
2108 		return IRQ_NONE;
2109 	}
2110 	xhci_dbg(xhci, "op reg status = %08x\n", status);
2111 	xhci_dbg(xhci, "Event ring dequeue ptr:\n");
2112 	xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
2113 			(unsigned long long)
2114 			xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
2115 			lower_32_bits(trb->link.segment_ptr),
2116 			upper_32_bits(trb->link.segment_ptr),
2117 			(unsigned int) trb->link.intr_target,
2118 			(unsigned int) trb->link.control);
2119 
2120 	if (status & STS_FATAL) {
2121 		xhci_warn(xhci, "WARNING: Host System Error\n");
2122 		xhci_halt(xhci);
2123 hw_died:
2124 		xhci_to_hcd(xhci)->state = HC_STATE_HALT;
2125 		spin_unlock(&xhci->lock);
2126 		return -ESHUTDOWN;
2127 	}
2128 
2129 	/*
2130 	 * Clear the op reg interrupt status first,
2131 	 * so we can receive interrupts from other MSI-X interrupters.
2132 	 * Write 1 to clear the interrupt status.
2133 	 */
2134 	status |= STS_EINT;
2135 	xhci_writel(xhci, status, &xhci->op_regs->status);
2136 	/* FIXME when MSI-X is supported and there are multiple vectors */
2137 	/* Clear the MSI-X event interrupt status */
2138 
2139 	if (hcd->irq != -1) {
2140 		u32 irq_pending;
2141 		/* Acknowledge the PCI interrupt */
2142 		irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
2143 		irq_pending |= 0x3;
2144 		xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
2145 	}
2146 
2147 	if (xhci->xhc_state & XHCI_STATE_DYING) {
2148 		xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2149 				"Shouldn't IRQs be disabled?\n");
2150 		/* Clear the event handler busy flag (RW1C);
2151 		 * the event ring should be empty.
2152 		 */
2153 		temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2154 		xhci_write_64(xhci, temp_64 | ERST_EHB,
2155 				&xhci->ir_set->erst_dequeue);
2156 		spin_unlock(&xhci->lock);
2157 
2158 		return IRQ_HANDLED;
2159 	}
2160 
2161 	event_ring_deq = xhci->event_ring->dequeue;
2162 	/* FIXME this should be a delayed service routine
2163 	 * that clears the EHB.
2164 	 */
2165 	xhci_handle_event(xhci);
2166 
2167 	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2168 	/* If necessary, update the HW's version of the event ring deq ptr. */
2169 	if (event_ring_deq != xhci->event_ring->dequeue) {
2170 		deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2171 				xhci->event_ring->dequeue);
2172 		if (deq == 0)
2173 			xhci_warn(xhci, "WARN something wrong with SW event "
2174 					"ring dequeue ptr.\n");
2175 		/* Update HC event ring dequeue pointer */
2176 		temp_64 &= ERST_PTR_MASK;
2177 		temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2178 	}
2179 
2180 	/* Clear the event handler busy flag (RW1C); event ring is empty. */
2181 	temp_64 |= ERST_EHB;
2182 	xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2183 
2184 	spin_unlock(&xhci->lock);
2185 
2186 	return IRQ_HANDLED;
2187 }
2188 
2189 irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
2190 {
2191 	irqreturn_t ret;
2192 
2193 	set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
2194 
2195 	ret = xhci_irq(hcd);
2196 
2197 	return ret;
2198 }
2199 
2200 /****		Endpoint Ring Operations	****/
2201 
2202 /*
2203  * Generic function for queueing a TRB on a ring.
2204  * The caller must have checked to make sure there's room on the ring.
2205  *
2206  * @more_trbs_coming:	Will you enqueue more TRBs before calling
2207  *			prepare_transfer()?
2208  */
2209 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2210 		bool consumer, bool more_trbs_coming,
2211 		u32 field1, u32 field2, u32 field3, u32 field4)
2212 {
2213 	struct xhci_generic_trb *trb;
2214 
2215 	trb = &ring->enqueue->generic;
2216 	trb->field[0] = field1;
2217 	trb->field[1] = field2;
2218 	trb->field[2] = field3;
2219 	trb->field[3] = field4;
2220 	inc_enq(xhci, ring, consumer, more_trbs_coming);
2221 }
2222 
2223 /*
2224  * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
2225  * FIXME allocate segments if the ring is full.
2226  */
2227 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2228 		u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2229 {
2230 	/* Make sure the endpoint has been added to xHC schedule */
2231 	xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
2232 	switch (ep_state) {
2233 	case EP_STATE_DISABLED:
2234 		/*
2235 		 * USB core changed config/interfaces without notifying us,
2236 		 * or hardware is reporting the wrong state.
2237 		 */
2238 		xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
2239 		return -ENOENT;
2240 	case EP_STATE_ERROR:
2241 		xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2242 		/* FIXME event handling code for error needs to clear it */
2243 		/* XXX not sure if this should be -ENOENT or not */
2244 		return -EINVAL;
2245 	case EP_STATE_HALTED:
2246 		xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2247 	case EP_STATE_STOPPED:
2248 	case EP_STATE_RUNNING:
2249 		break;
2250 	default:
2251 		xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
2252 		/*
2253 		 * FIXME issue Configure Endpoint command to try to get the HC
2254 		 * back into a known state.
2255 		 */
2256 		return -EINVAL;
2257 	}
2258 	if (!room_on_ring(xhci, ep_ring, num_trbs)) {
2259 		/* FIXME allocate more room */
2260 		xhci_err(xhci, "ERROR no room on ep ring\n");
2261 		return -ENOMEM;
2262 	}
2263 
2264 	if (enqueue_is_link_trb(ep_ring)) {
2265 		struct xhci_ring *ring = ep_ring;
2266 		union xhci_trb *next;
2267 
2268 		xhci_dbg(xhci, "prepare_ring: pointing to link trb\n");
2269 		next = ring->enqueue;
2270 
2271 		while (last_trb(xhci, ring, ring->enq_seg, next)) {
2272 
2273 			/* If we're not dealing with 0.95 hardware,
2274 			 * clear the chain bit.
2275 			 */
2276 			if (!xhci_link_trb_quirk(xhci))
2277 				next->link.control &= ~TRB_CHAIN;
2278 			else
2279 				next->link.control |= TRB_CHAIN;
2280 
2281 			wmb();
2282 			next->link.control ^= (u32) TRB_CYCLE;
2283 
2284 			/* Toggle the cycle bit after the last ring segment. */
2285 			if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
2286 				ring->cycle_state = (ring->cycle_state ? 0 : 1);
2287 				if (!in_interrupt()) {
2288 					xhci_dbg(xhci, "queue_trb: Toggle cycle "
2289 						"state for ring %p = %i\n",
2290 						ring, (unsigned int)ring->cycle_state);
2291 				}
2292 			}
2293 			ring->enq_seg = ring->enq_seg->next;
2294 			ring->enqueue = ring->enq_seg->trbs;
2295 			next = ring->enqueue;
2296 		}
2297 	}
2298 
2299 	return 0;
2300 }
2301 
2302 static int prepare_transfer(struct xhci_hcd *xhci,
2303 		struct xhci_virt_device *xdev,
2304 		unsigned int ep_index,
2305 		unsigned int stream_id,
2306 		unsigned int num_trbs,
2307 		struct urb *urb,
2308 		unsigned int td_index,
2309 		gfp_t mem_flags)
2310 {
2311 	int ret;
2312 	struct urb_priv *urb_priv;
2313 	struct xhci_td	*td;
2314 	struct xhci_ring *ep_ring;
2315 	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2316 
2317 	ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
2318 	if (!ep_ring) {
2319 		xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
2320 				stream_id);
2321 		return -EINVAL;
2322 	}
2323 
2324 	ret = prepare_ring(xhci, ep_ring,
2325 			ep_ctx->ep_info & EP_STATE_MASK,
2326 			num_trbs, mem_flags);
2327 	if (ret)
2328 		return ret;
2329 
2330 	urb_priv = urb->hcpriv;
2331 	td = urb_priv->td[td_index];
2332 
2333 	INIT_LIST_HEAD(&td->td_list);
2334 	INIT_LIST_HEAD(&td->cancelled_td_list);
2335 
2336 	if (td_index == 0) {
2337 		ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
2338 		if (unlikely(ret)) {
2339 			xhci_urb_free_priv(xhci, urb_priv);
2340 			urb->hcpriv = NULL;
2341 			return ret;
2342 		}
2343 	}
2344 
2345 	td->urb = urb;
2346 	/* Add this TD to the tail of the endpoint ring's TD list */
2347 	list_add_tail(&td->td_list, &ep_ring->td_list);
2348 	td->start_seg = ep_ring->enq_seg;
2349 	td->first_trb = ep_ring->enqueue;
2350 
2351 	urb_priv->td[td_index] = td;
2352 
2353 	return 0;
2354 }
2355 
2356 static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
2357 {
2358 	int num_sgs, num_trbs, running_total, temp, i;
2359 	struct scatterlist *sg;
2360 
2361 	sg = NULL;
2362 	num_sgs = urb->num_sgs;
2363 	temp = urb->transfer_buffer_length;
2364 
2365 	xhci_dbg(xhci, "count sg list trbs: \n");
2366 	num_trbs = 0;
2367 	for_each_sg(urb->sg, sg, num_sgs, i) {
2368 		unsigned int previous_total_trbs = num_trbs;
2369 		unsigned int len = sg_dma_len(sg);
2370 
2371 		/* Scatter gather list entries may cross 64KB boundaries */
2372 		running_total = TRB_MAX_BUFF_SIZE -
2373 			(sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
2374 		if (running_total != 0)
2375 			num_trbs++;
2376 
2377 		/* How many more 64KB chunks to transfer, how many more TRBs? */
2378 		while (running_total < sg_dma_len(sg)) {
2379 			num_trbs++;
2380 			running_total += TRB_MAX_BUFF_SIZE;
2381 		}
2382 		xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
2383 				i, (unsigned long long)sg_dma_address(sg),
2384 				len, len, num_trbs - previous_total_trbs);
2385 
2386 		len = min_t(int, len, temp);
2387 		temp -= len;
2388 		if (temp == 0)
2389 			break;
2390 	}
2391 	xhci_dbg(xhci, "\n");
2392 	if (!in_interrupt())
2393 		dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n",
2394 				urb->ep->desc.bEndpointAddress,
2395 				urb->transfer_buffer_length,
2396 				num_trbs);
2397 	return num_trbs;
2398 }
2399 
2400 static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
2401 {
2402 	if (num_trbs != 0)
2403 		dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
2404 				"TRBs, %d left\n", __func__,
2405 				urb->ep->desc.bEndpointAddress, num_trbs);
2406 	if (running_total != urb->transfer_buffer_length)
2407 		dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
2408 				"queued %#x (%d), asked for %#x (%d)\n",
2409 				__func__,
2410 				urb->ep->desc.bEndpointAddress,
2411 				running_total, running_total,
2412 				urb->transfer_buffer_length,
2413 				urb->transfer_buffer_length);
2414 }
2415 
2416 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
2417 		unsigned int ep_index, unsigned int stream_id, int start_cycle,
2418 		struct xhci_generic_trb *start_trb, struct xhci_td *td)
2419 {
2420 	/*
2421 	 * Pass all the TRBs to the hardware at once and make sure this write
2422 	 * isn't reordered.
2423 	 */
2424 	wmb();
2425 	start_trb->field[3] |= start_cycle;
2426 	xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
2427 }
2428 
2429 /*
2430  * xHCI uses normal TRBs for both bulk and interrupt.  When the interrupt
2431  * endpoint is to be serviced, the xHC will consume (at most) one TD.  A TD
2432  * (comprised of sg list entries) can take several service intervals to
2433  * transmit.
2434  */
2435 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2436 		struct urb *urb, int slot_id, unsigned int ep_index)
2437 {
2438 	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
2439 			xhci->devs[slot_id]->out_ctx, ep_index);
2440 	int xhci_interval;
2441 	int ep_interval;
2442 
2443 	xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
2444 	ep_interval = urb->interval;
2445 	/* Convert to microframes */
2446 	if (urb->dev->speed == USB_SPEED_LOW ||
2447 			urb->dev->speed == USB_SPEED_FULL)
2448 		ep_interval *= 8;
2449 	/* FIXME change this to a warning and a suggestion to use the new API
2450 	 * to set the polling interval (once the API is added).
2451 	 */
2452 	if (xhci_interval != ep_interval) {
2453 		if (!printk_ratelimit())
2454 			dev_dbg(&urb->dev->dev, "Driver uses different interval"
2455 					" (%d microframe%s) than xHCI "
2456 					"(%d microframe%s)\n",
2457 					ep_interval,
2458 					ep_interval == 1 ? "" : "s",
2459 					xhci_interval,
2460 					xhci_interval == 1 ? "" : "s");
2461 		urb->interval = xhci_interval;
2462 		/* Convert back to frames for LS/FS devices */
2463 		if (urb->dev->speed == USB_SPEED_LOW ||
2464 				urb->dev->speed == USB_SPEED_FULL)
2465 			urb->interval /= 8;
2466 	}
2467 	return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
2468 }
2469 
2470 /*
2471  * The TD size is the number of bytes remaining in the TD (including this TRB),
2472  * right shifted by 10.
2473  * It must fit in bits 21:17, so it can't be bigger than 31.
2474  */
2475 static u32 xhci_td_remainder(unsigned int remainder)
2476 {
2477 	u32 max = (1 << (21 - 17 + 1)) - 1;
2478 
2479 	if ((remainder >> 10) >= max)
2480 		return max << 17;
2481 	else
2482 		return (remainder >> 10) << 17;
2483 }
2484 
2485 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2486 		struct urb *urb, int slot_id, unsigned int ep_index)
2487 {
2488 	struct xhci_ring *ep_ring;
2489 	unsigned int num_trbs;
2490 	struct urb_priv *urb_priv;
2491 	struct xhci_td *td;
2492 	struct scatterlist *sg;
2493 	int num_sgs;
2494 	int trb_buff_len, this_sg_len, running_total;
2495 	bool first_trb;
2496 	u64 addr;
2497 	bool more_trbs_coming;
2498 
2499 	struct xhci_generic_trb *start_trb;
2500 	int start_cycle;
2501 
2502 	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
2503 	if (!ep_ring)
2504 		return -EINVAL;
2505 
2506 	num_trbs = count_sg_trbs_needed(xhci, urb);
2507 	num_sgs = urb->num_sgs;
2508 
2509 	trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
2510 			ep_index, urb->stream_id,
2511 			num_trbs, urb, 0, mem_flags);
2512 	if (trb_buff_len < 0)
2513 		return trb_buff_len;
2514 
2515 	urb_priv = urb->hcpriv;
2516 	td = urb_priv->td[0];
2517 
2518 	/*
2519 	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
2520 	 * until we've finished creating all the other TRBs.  The ring's cycle
2521 	 * state may change as we enqueue the other TRBs, so save it too.
2522 	 */
2523 	start_trb = &ep_ring->enqueue->generic;
2524 	start_cycle = ep_ring->cycle_state;
2525 
2526 	running_total = 0;
2527 	/*
2528 	 * How much data is in the first TRB?
2529 	 *
2530 	 * There are three forces at work for TRB buffer pointers and lengths:
2531 	 * 1. We don't want to walk off the end of this sg-list entry buffer.
2532 	 * 2. The transfer length that the driver requested may be smaller than
2533 	 *    the amount of memory allocated for this scatter-gather list.
2534 	 * 3. TRBs buffers can't cross 64KB boundaries.
2535 	 */
2536 	sg = urb->sg;
2537 	addr = (u64) sg_dma_address(sg);
2538 	this_sg_len = sg_dma_len(sg);
2539 	trb_buff_len = TRB_MAX_BUFF_SIZE -
2540 		(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
2541 	trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
2542 	if (trb_buff_len > urb->transfer_buffer_length)
2543 		trb_buff_len = urb->transfer_buffer_length;
2544 	xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
2545 			trb_buff_len);
2546 
2547 	first_trb = true;
2548 	/* Queue the first TRB, even if it's zero-length */
2549 	do {
2550 		u32 field = 0;
2551 		u32 length_field = 0;
2552 		u32 remainder = 0;
2553 
2554 		/* Don't change the cycle bit of the first TRB until later */
2555 		if (first_trb)
2556 			first_trb = false;
2557 		else
2558 			field |= ep_ring->cycle_state;
2559 
2560 		/* Chain all the TRBs together; clear the chain bit in the last
2561 		 * TRB to indicate it's the last TRB in the chain.
2562 		 */
2563 		if (num_trbs > 1) {
2564 			field |= TRB_CHAIN;
2565 		} else {
2566 			/* FIXME - add check for ZERO_PACKET flag before this */
2567 			td->last_trb = ep_ring->enqueue;
2568 			field |= TRB_IOC;
2569 		}
2570 		xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
2571 				"64KB boundary at %#x, end dma = %#x\n",
2572 				(unsigned int) addr, trb_buff_len, trb_buff_len,
2573 				(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
2574 				(unsigned int) addr + trb_buff_len);
2575 		if (TRB_MAX_BUFF_SIZE -
2576 				(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
2577 			xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
2578 			xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
2579 					(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
2580 					(unsigned int) addr + trb_buff_len);
2581 		}
2582 		remainder = xhci_td_remainder(urb->transfer_buffer_length -
2583 				running_total) ;
2584 		length_field = TRB_LEN(trb_buff_len) |
2585 			remainder |
2586 			TRB_INTR_TARGET(0);
2587 		if (num_trbs > 1)
2588 			more_trbs_coming = true;
2589 		else
2590 			more_trbs_coming = false;
2591 		queue_trb(xhci, ep_ring, false, more_trbs_coming,
2592 				lower_32_bits(addr),
2593 				upper_32_bits(addr),
2594 				length_field,
2595 				/* We always want to know if the TRB was short,
2596 				 * or we won't get an event when it completes.
2597 				 * (Unless we use event data TRBs, which are a
2598 				 * waste of space and HC resources.)
2599 				 */
2600 				field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
2601 		--num_trbs;
2602 		running_total += trb_buff_len;
2603 
2604 		/* Calculate length for next transfer --
2605 		 * Are we done queueing all the TRBs for this sg entry?
2606 		 */
2607 		this_sg_len -= trb_buff_len;
2608 		if (this_sg_len == 0) {
2609 			--num_sgs;
2610 			if (num_sgs == 0)
2611 				break;
2612 			sg = sg_next(sg);
2613 			addr = (u64) sg_dma_address(sg);
2614 			this_sg_len = sg_dma_len(sg);
2615 		} else {
2616 			addr += trb_buff_len;
2617 		}
2618 
2619 		trb_buff_len = TRB_MAX_BUFF_SIZE -
2620 			(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
2621 		trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
2622 		if (running_total + trb_buff_len > urb->transfer_buffer_length)
2623 			trb_buff_len =
2624 				urb->transfer_buffer_length - running_total;
2625 	} while (running_total < urb->transfer_buffer_length);
2626 
2627 	check_trb_math(urb, num_trbs, running_total);
2628 	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2629 			start_cycle, start_trb, td);
2630 	return 0;
2631 }
2632 
2633 /* This is very similar to what ehci-q.c qtd_fill() does */
2634 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2635 		struct urb *urb, int slot_id, unsigned int ep_index)
2636 {
2637 	struct xhci_ring *ep_ring;
2638 	struct urb_priv *urb_priv;
2639 	struct xhci_td *td;
2640 	int num_trbs;
2641 	struct xhci_generic_trb *start_trb;
2642 	bool first_trb;
2643 	bool more_trbs_coming;
2644 	int start_cycle;
2645 	u32 field, length_field;
2646 
2647 	int running_total, trb_buff_len, ret;
2648 	u64 addr;
2649 
2650 	if (urb->num_sgs)
2651 		return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
2652 
2653 	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
2654 	if (!ep_ring)
2655 		return -EINVAL;
2656 
2657 	num_trbs = 0;
2658 	/* How much data is (potentially) left before the 64KB boundary? */
2659 	running_total = TRB_MAX_BUFF_SIZE -
2660 		(urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
2661 
2662 	/* If there's some data on this 64KB chunk, or we have to send a
2663 	 * zero-length transfer, we need at least one TRB
2664 	 */
2665 	if (running_total != 0 || urb->transfer_buffer_length == 0)
2666 		num_trbs++;
2667 	/* How many more 64KB chunks to transfer, how many more TRBs? */
2668 	while (running_total < urb->transfer_buffer_length) {
2669 		num_trbs++;
2670 		running_total += TRB_MAX_BUFF_SIZE;
2671 	}
2672 	/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
2673 
2674 	if (!in_interrupt())
2675 		dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
2676 				urb->ep->desc.bEndpointAddress,
2677 				urb->transfer_buffer_length,
2678 				urb->transfer_buffer_length,
2679 				(unsigned long long)urb->transfer_dma,
2680 				num_trbs);
2681 
2682 	ret = prepare_transfer(xhci, xhci->devs[slot_id],
2683 			ep_index, urb->stream_id,
2684 			num_trbs, urb, 0, mem_flags);
2685 	if (ret < 0)
2686 		return ret;
2687 
2688 	urb_priv = urb->hcpriv;
2689 	td = urb_priv->td[0];
2690 
2691 	/*
2692 	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
2693 	 * until we've finished creating all the other TRBs.  The ring's cycle
2694 	 * state may change as we enqueue the other TRBs, so save it too.
2695 	 */
2696 	start_trb = &ep_ring->enqueue->generic;
2697 	start_cycle = ep_ring->cycle_state;
2698 
2699 	running_total = 0;
2700 	/* How much data is in the first TRB? */
2701 	addr = (u64) urb->transfer_dma;
2702 	trb_buff_len = TRB_MAX_BUFF_SIZE -
2703 		(urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
2704 	if (urb->transfer_buffer_length < trb_buff_len)
2705 		trb_buff_len = urb->transfer_buffer_length;
2706 
2707 	first_trb = true;
2708 
2709 	/* Queue the first TRB, even if it's zero-length */
2710 	do {
2711 		u32 remainder = 0;
2712 		field = 0;
2713 
2714 		/* Don't change the cycle bit of the first TRB until later */
2715 		if (first_trb)
2716 			first_trb = false;
2717 		else
2718 			field |= ep_ring->cycle_state;
2719 
2720 		/* Chain all the TRBs together; clear the chain bit in the last
2721 		 * TRB to indicate it's the last TRB in the chain.
2722 		 */
2723 		if (num_trbs > 1) {
2724 			field |= TRB_CHAIN;
2725 		} else {
2726 			/* FIXME - add check for ZERO_PACKET flag before this */
2727 			td->last_trb = ep_ring->enqueue;
2728 			field |= TRB_IOC;
2729 		}
2730 		remainder = xhci_td_remainder(urb->transfer_buffer_length -
2731 				running_total);
2732 		length_field = TRB_LEN(trb_buff_len) |
2733 			remainder |
2734 			TRB_INTR_TARGET(0);
2735 		if (num_trbs > 1)
2736 			more_trbs_coming = true;
2737 		else
2738 			more_trbs_coming = false;
2739 		queue_trb(xhci, ep_ring, false, more_trbs_coming,
2740 				lower_32_bits(addr),
2741 				upper_32_bits(addr),
2742 				length_field,
2743 				/* We always want to know if the TRB was short,
2744 				 * or we won't get an event when it completes.
2745 				 * (Unless we use event data TRBs, which are a
2746 				 * waste of space and HC resources.)
2747 				 */
2748 				field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
2749 		--num_trbs;
2750 		running_total += trb_buff_len;
2751 
2752 		/* Calculate length for next transfer */
2753 		addr += trb_buff_len;
2754 		trb_buff_len = urb->transfer_buffer_length - running_total;
2755 		if (trb_buff_len > TRB_MAX_BUFF_SIZE)
2756 			trb_buff_len = TRB_MAX_BUFF_SIZE;
2757 	} while (running_total < urb->transfer_buffer_length);
2758 
2759 	check_trb_math(urb, num_trbs, running_total);
2760 	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2761 			start_cycle, start_trb, td);
2762 	return 0;
2763 }
2764 
2765 /* Caller must have locked xhci->lock */
2766 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2767 		struct urb *urb, int slot_id, unsigned int ep_index)
2768 {
2769 	struct xhci_ring *ep_ring;
2770 	int num_trbs;
2771 	int ret;
2772 	struct usb_ctrlrequest *setup;
2773 	struct xhci_generic_trb *start_trb;
2774 	int start_cycle;
2775 	u32 field, length_field;
2776 	struct urb_priv *urb_priv;
2777 	struct xhci_td *td;
2778 
2779 	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
2780 	if (!ep_ring)
2781 		return -EINVAL;
2782 
2783 	/*
2784 	 * Need to copy setup packet into setup TRB, so we can't use the setup
2785 	 * DMA address.
2786 	 */
2787 	if (!urb->setup_packet)
2788 		return -EINVAL;
2789 
2790 	if (!in_interrupt())
2791 		xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
2792 				slot_id, ep_index);
2793 	/* 1 TRB for setup, 1 for status */
2794 	num_trbs = 2;
2795 	/*
2796 	 * Don't need to check if we need additional event data and normal TRBs,
2797 	 * since data in control transfers will never get bigger than 16MB
2798 	 * XXX: can we get a buffer that crosses 64KB boundaries?
2799 	 */
2800 	if (urb->transfer_buffer_length > 0)
2801 		num_trbs++;
2802 	ret = prepare_transfer(xhci, xhci->devs[slot_id],
2803 			ep_index, urb->stream_id,
2804 			num_trbs, urb, 0, mem_flags);
2805 	if (ret < 0)
2806 		return ret;
2807 
2808 	urb_priv = urb->hcpriv;
2809 	td = urb_priv->td[0];
2810 
2811 	/*
2812 	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
2813 	 * until we've finished creating all the other TRBs.  The ring's cycle
2814 	 * state may change as we enqueue the other TRBs, so save it too.
2815 	 */
2816 	start_trb = &ep_ring->enqueue->generic;
2817 	start_cycle = ep_ring->cycle_state;
2818 
2819 	/* Queue setup TRB - see section 6.4.1.2.1 */
2820 	/* FIXME better way to translate setup_packet into two u32 fields? */
2821 	setup = (struct usb_ctrlrequest *) urb->setup_packet;
2822 	queue_trb(xhci, ep_ring, false, true,
2823 			/* FIXME endianness is probably going to bite my ass here. */
2824 			setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
2825 			setup->wIndex | setup->wLength << 16,
2826 			TRB_LEN(8) | TRB_INTR_TARGET(0),
2827 			/* Immediate data in pointer */
2828 			TRB_IDT | TRB_TYPE(TRB_SETUP));
2829 
2830 	/* If there's data, queue data TRBs */
2831 	field = 0;
2832 	length_field = TRB_LEN(urb->transfer_buffer_length) |
2833 		xhci_td_remainder(urb->transfer_buffer_length) |
2834 		TRB_INTR_TARGET(0);
2835 	if (urb->transfer_buffer_length > 0) {
2836 		if (setup->bRequestType & USB_DIR_IN)
2837 			field |= TRB_DIR_IN;
2838 		queue_trb(xhci, ep_ring, false, true,
2839 				lower_32_bits(urb->transfer_dma),
2840 				upper_32_bits(urb->transfer_dma),
2841 				length_field,
2842 				/* Event on short tx */
2843 				field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
2844 	}
2845 
2846 	/* Save the DMA address of the last TRB in the TD */
2847 	td->last_trb = ep_ring->enqueue;
2848 
2849 	/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
2850 	/* If the device sent data, the status stage is an OUT transfer */
2851 	if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
2852 		field = 0;
2853 	else
2854 		field = TRB_DIR_IN;
2855 	queue_trb(xhci, ep_ring, false, false,
2856 			0,
2857 			0,
2858 			TRB_INTR_TARGET(0),
2859 			/* Event on completion */
2860 			field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
2861 
2862 	giveback_first_trb(xhci, slot_id, ep_index, 0,
2863 			start_cycle, start_trb, td);
2864 	return 0;
2865 }
2866 
2867 static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
2868 		struct urb *urb, int i)
2869 {
2870 	int num_trbs = 0;
2871 	u64 addr, td_len, running_total;
2872 
2873 	addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
2874 	td_len = urb->iso_frame_desc[i].length;
2875 
2876 	running_total = TRB_MAX_BUFF_SIZE -
2877 			(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
2878 	if (running_total != 0)
2879 		num_trbs++;
2880 
2881 	while (running_total < td_len) {
2882 		num_trbs++;
2883 		running_total += TRB_MAX_BUFF_SIZE;
2884 	}
2885 
2886 	return num_trbs;
2887 }
2888 
2889 /* This is for isoc transfer */
2890 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2891 		struct urb *urb, int slot_id, unsigned int ep_index)
2892 {
2893 	struct xhci_ring *ep_ring;
2894 	struct urb_priv *urb_priv;
2895 	struct xhci_td *td;
2896 	int num_tds, trbs_per_td;
2897 	struct xhci_generic_trb *start_trb;
2898 	bool first_trb;
2899 	int start_cycle;
2900 	u32 field, length_field;
2901 	int running_total, trb_buff_len, td_len, td_remain_len, ret;
2902 	u64 start_addr, addr;
2903 	int i, j;
2904 
2905 	ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
2906 
2907 	num_tds = urb->number_of_packets;
2908 	if (num_tds < 1) {
2909 		xhci_dbg(xhci, "Isoc URB with zero packets?\n");
2910 		return -EINVAL;
2911 	}
2912 
2913 	if (!in_interrupt())
2914 		dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d),"
2915 				" addr = %#llx, num_tds = %d\n",
2916 				urb->ep->desc.bEndpointAddress,
2917 				urb->transfer_buffer_length,
2918 				urb->transfer_buffer_length,
2919 				(unsigned long long)urb->transfer_dma,
2920 				num_tds);
2921 
2922 	start_addr = (u64) urb->transfer_dma;
2923 	start_trb = &ep_ring->enqueue->generic;
2924 	start_cycle = ep_ring->cycle_state;
2925 
2926 	/* Queue the first TRB, even if it's zero-length */
2927 	for (i = 0; i < num_tds; i++) {
2928 		first_trb = true;
2929 
2930 		running_total = 0;
2931 		addr = start_addr + urb->iso_frame_desc[i].offset;
2932 		td_len = urb->iso_frame_desc[i].length;
2933 		td_remain_len = td_len;
2934 
2935 		trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
2936 
2937 		ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
2938 				urb->stream_id, trbs_per_td, urb, i, mem_flags);
2939 		if (ret < 0)
2940 			return ret;
2941 
2942 		urb_priv = urb->hcpriv;
2943 		td = urb_priv->td[i];
2944 
2945 		for (j = 0; j < trbs_per_td; j++) {
2946 			u32 remainder = 0;
2947 			field = 0;
2948 
2949 			if (first_trb) {
2950 				/* Queue the isoc TRB */
2951 				field |= TRB_TYPE(TRB_ISOC);
2952 				/* Assume URB_ISO_ASAP is set */
2953 				field |= TRB_SIA;
2954 				if (i > 0)
2955 					field |= ep_ring->cycle_state;
2956 				first_trb = false;
2957 			} else {
2958 				/* Queue other normal TRBs */
2959 				field |= TRB_TYPE(TRB_NORMAL);
2960 				field |= ep_ring->cycle_state;
2961 			}
2962 
2963 			/* Chain all the TRBs together; clear the chain bit in
2964 			 * the last TRB to indicate it's the last TRB in the
2965 			 * chain.
2966 			 */
2967 			if (j < trbs_per_td - 1) {
2968 				field |= TRB_CHAIN;
2969 			} else {
2970 				td->last_trb = ep_ring->enqueue;
2971 				field |= TRB_IOC;
2972 			}
2973 
2974 			/* Calculate TRB length */
2975 			trb_buff_len = TRB_MAX_BUFF_SIZE -
2976 				(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
2977 			if (trb_buff_len > td_remain_len)
2978 				trb_buff_len = td_remain_len;
2979 
2980 			remainder = xhci_td_remainder(td_len - running_total);
2981 			length_field = TRB_LEN(trb_buff_len) |
2982 				remainder |
2983 				TRB_INTR_TARGET(0);
2984 			queue_trb(xhci, ep_ring, false, false,
2985 				lower_32_bits(addr),
2986 				upper_32_bits(addr),
2987 				length_field,
2988 				/* We always want to know if the TRB was short,
2989 				 * or we won't get an event when it completes.
2990 				 * (Unless we use event data TRBs, which are a
2991 				 * waste of space and HC resources.)
2992 				 */
2993 				field | TRB_ISP);
2994 			running_total += trb_buff_len;
2995 
2996 			addr += trb_buff_len;
2997 			td_remain_len -= trb_buff_len;
2998 		}
2999 
3000 		/* Check TD length */
3001 		if (running_total != td_len) {
3002 			xhci_err(xhci, "ISOC TD length unmatch\n");
3003 			return -EINVAL;
3004 		}
3005 	}
3006 
3007 	wmb();
3008 	start_trb->field[3] |= start_cycle;
3009 
3010 	xhci_ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id);
3011 	return 0;
3012 }
3013 
3014 /*
3015  * Check transfer ring to guarantee there is enough room for the urb.
3016  * Update ISO URB start_frame and interval.
3017  * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
3018  * update the urb->start_frame by now.
3019  * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
3020  */
3021 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3022 		struct urb *urb, int slot_id, unsigned int ep_index)
3023 {
3024 	struct xhci_virt_device *xdev;
3025 	struct xhci_ring *ep_ring;
3026 	struct xhci_ep_ctx *ep_ctx;
3027 	int start_frame;
3028 	int xhci_interval;
3029 	int ep_interval;
3030 	int num_tds, num_trbs, i;
3031 	int ret;
3032 
3033 	xdev = xhci->devs[slot_id];
3034 	ep_ring = xdev->eps[ep_index].ring;
3035 	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3036 
3037 	num_trbs = 0;
3038 	num_tds = urb->number_of_packets;
3039 	for (i = 0; i < num_tds; i++)
3040 		num_trbs += count_isoc_trbs_needed(xhci, urb, i);
3041 
3042 	/* Check the ring to guarantee there is enough room for the whole urb.
3043 	 * Do not insert any td of the urb to the ring if the check failed.
3044 	 */
3045 	ret = prepare_ring(xhci, ep_ring, ep_ctx->ep_info & EP_STATE_MASK,
3046 				num_trbs, mem_flags);
3047 	if (ret)
3048 		return ret;
3049 
3050 	start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
3051 	start_frame &= 0x3fff;
3052 
3053 	urb->start_frame = start_frame;
3054 	if (urb->dev->speed == USB_SPEED_LOW ||
3055 			urb->dev->speed == USB_SPEED_FULL)
3056 		urb->start_frame >>= 3;
3057 
3058 	xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
3059 	ep_interval = urb->interval;
3060 	/* Convert to microframes */
3061 	if (urb->dev->speed == USB_SPEED_LOW ||
3062 			urb->dev->speed == USB_SPEED_FULL)
3063 		ep_interval *= 8;
3064 	/* FIXME change this to a warning and a suggestion to use the new API
3065 	 * to set the polling interval (once the API is added).
3066 	 */
3067 	if (xhci_interval != ep_interval) {
3068 		if (!printk_ratelimit())
3069 			dev_dbg(&urb->dev->dev, "Driver uses different interval"
3070 					" (%d microframe%s) than xHCI "
3071 					"(%d microframe%s)\n",
3072 					ep_interval,
3073 					ep_interval == 1 ? "" : "s",
3074 					xhci_interval,
3075 					xhci_interval == 1 ? "" : "s");
3076 		urb->interval = xhci_interval;
3077 		/* Convert back to frames for LS/FS devices */
3078 		if (urb->dev->speed == USB_SPEED_LOW ||
3079 				urb->dev->speed == USB_SPEED_FULL)
3080 			urb->interval /= 8;
3081 	}
3082 	return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
3083 }
3084 
3085 /****		Command Ring Operations		****/
3086 
3087 /* Generic function for queueing a command TRB on the command ring.
3088  * Check to make sure there's room on the command ring for one command TRB.
3089  * Also check that there's room reserved for commands that must not fail.
3090  * If this is a command that must not fail, meaning command_must_succeed = TRUE,
3091  * then only check for the number of reserved spots.
3092  * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
3093  * because the command event handler may want to resubmit a failed command.
3094  */
3095 static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
3096 		u32 field3, u32 field4, bool command_must_succeed)
3097 {
3098 	int reserved_trbs = xhci->cmd_ring_reserved_trbs;
3099 	int ret;
3100 
3101 	if (!command_must_succeed)
3102 		reserved_trbs++;
3103 
3104 	ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
3105 			reserved_trbs, GFP_ATOMIC);
3106 	if (ret < 0) {
3107 		xhci_err(xhci, "ERR: No room for command on command ring\n");
3108 		if (command_must_succeed)
3109 			xhci_err(xhci, "ERR: Reserved TRB counting for "
3110 					"unfailable commands failed.\n");
3111 		return ret;
3112 	}
3113 	queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3,
3114 			field4 | xhci->cmd_ring->cycle_state);
3115 	return 0;
3116 }
3117 
3118 /* Queue a no-op command on the command ring */
3119 static int queue_cmd_noop(struct xhci_hcd *xhci)
3120 {
3121 	return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP), false);
3122 }
3123 
3124 /*
3125  * Place a no-op command on the command ring to test the command and
3126  * event ring.
3127  */
3128 void *xhci_setup_one_noop(struct xhci_hcd *xhci)
3129 {
3130 	if (queue_cmd_noop(xhci) < 0)
3131 		return NULL;
3132 	xhci->noops_submitted++;
3133 	return xhci_ring_cmd_db;
3134 }
3135 
3136 /* Queue a slot enable or disable request on the command ring */
3137 int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
3138 {
3139 	return queue_command(xhci, 0, 0, 0,
3140 			TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
3141 }
3142 
3143 /* Queue an address device command TRB */
3144 int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3145 		u32 slot_id)
3146 {
3147 	return queue_command(xhci, lower_32_bits(in_ctx_ptr),
3148 			upper_32_bits(in_ctx_ptr), 0,
3149 			TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
3150 			false);
3151 }
3152 
3153 int xhci_queue_vendor_command(struct xhci_hcd *xhci,
3154 		u32 field1, u32 field2, u32 field3, u32 field4)
3155 {
3156 	return queue_command(xhci, field1, field2, field3, field4, false);
3157 }
3158 
3159 /* Queue a reset device command TRB */
3160 int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
3161 {
3162 	return queue_command(xhci, 0, 0, 0,
3163 			TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
3164 			false);
3165 }
3166 
3167 /* Queue a configure endpoint command TRB */
3168 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3169 		u32 slot_id, bool command_must_succeed)
3170 {
3171 	return queue_command(xhci, lower_32_bits(in_ctx_ptr),
3172 			upper_32_bits(in_ctx_ptr), 0,
3173 			TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
3174 			command_must_succeed);
3175 }
3176 
3177 /* Queue an evaluate context command TRB */
3178 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3179 		u32 slot_id)
3180 {
3181 	return queue_command(xhci, lower_32_bits(in_ctx_ptr),
3182 			upper_32_bits(in_ctx_ptr), 0,
3183 			TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
3184 			false);
3185 }
3186 
3187 /*
3188  * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
3189  * activity on an endpoint that is about to be suspended.
3190  */
3191 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
3192 		unsigned int ep_index, int suspend)
3193 {
3194 	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3195 	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3196 	u32 type = TRB_TYPE(TRB_STOP_RING);
3197 	u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
3198 
3199 	return queue_command(xhci, 0, 0, 0,
3200 			trb_slot_id | trb_ep_index | type | trb_suspend, false);
3201 }
3202 
3203 /* Set Transfer Ring Dequeue Pointer command.
3204  * This should not be used for endpoints that have streams enabled.
3205  */
3206 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
3207 		unsigned int ep_index, unsigned int stream_id,
3208 		struct xhci_segment *deq_seg,
3209 		union xhci_trb *deq_ptr, u32 cycle_state)
3210 {
3211 	dma_addr_t addr;
3212 	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3213 	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3214 	u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
3215 	u32 type = TRB_TYPE(TRB_SET_DEQ);
3216 
3217 	addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
3218 	if (addr == 0) {
3219 		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
3220 		xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
3221 				deq_seg, deq_ptr);
3222 		return 0;
3223 	}
3224 	return queue_command(xhci, lower_32_bits(addr) | cycle_state,
3225 			upper_32_bits(addr), trb_stream_id,
3226 			trb_slot_id | trb_ep_index | type, false);
3227 }
3228 
3229 int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
3230 		unsigned int ep_index)
3231 {
3232 	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3233 	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3234 	u32 type = TRB_TYPE(TRB_RESET_EP);
3235 
3236 	return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
3237 			false);
3238 }
3239