1 /* 2 * xHCI host controller driver 3 * 4 * Copyright (C) 2008 Intel Corp. 5 * 6 * Author: Sarah Sharp 7 * Some code borrowed from the Linux EHCI driver. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 * for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software Foundation, 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 /* 24 * Ring initialization rules: 25 * 1. Each segment is initialized to zero, except for link TRBs. 26 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or 27 * Consumer Cycle State (CCS), depending on ring function. 28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. 29 * 30 * Ring behavior rules: 31 * 1. A ring is empty if enqueue == dequeue. This means there will always be at 32 * least one free TRB in the ring. This is useful if you want to turn that 33 * into a link TRB and expand the ring. 34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a 35 * link TRB, then load the pointer with the address in the link TRB. If the 36 * link TRB had its toggle bit set, you may need to update the ring cycle 37 * state (see cycle bit rules). You may have to do this multiple times 38 * until you reach a non-link TRB. 39 * 3. A ring is full if enqueue++ (for the definition of increment above) 40 * equals the dequeue pointer. 41 * 42 * Cycle bit rules: 43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit 44 * in a link TRB, it must toggle the ring cycle state. 45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit 46 * in a link TRB, it must toggle the ring cycle state. 47 * 48 * Producer rules: 49 * 1. Check if ring is full before you enqueue. 50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. 51 * Update enqueue pointer between each write (which may update the ring 52 * cycle state). 53 * 3. Notify consumer. If SW is producer, it rings the doorbell for command 54 * and endpoint rings. If HC is the producer for the event ring, 55 * and it generates an interrupt according to interrupt modulation rules. 56 * 57 * Consumer rules: 58 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, 59 * the TRB is owned by the consumer. 60 * 2. Update dequeue pointer (which may update the ring cycle state) and 61 * continue processing TRBs until you reach a TRB which is not owned by you. 62 * 3. Notify the producer. SW is the consumer for the event ring, and it 63 * updates event ring dequeue pointer. HC is the consumer for the command and 64 * endpoint rings; it generates events on the event ring for these. 65 */ 66 67 #include <linux/scatterlist.h> 68 #include <linux/slab.h> 69 #include "xhci.h" 70 #include "xhci-trace.h" 71 72 /* 73 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA 74 * address of the TRB. 75 */ 76 dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, 77 union xhci_trb *trb) 78 { 79 unsigned long segment_offset; 80 81 if (!seg || !trb || trb < seg->trbs) 82 return 0; 83 /* offset in TRBs */ 84 segment_offset = trb - seg->trbs; 85 if (segment_offset > TRBS_PER_SEGMENT) 86 return 0; 87 return seg->dma + (segment_offset * sizeof(*trb)); 88 } 89 90 /* Does this link TRB point to the first segment in a ring, 91 * or was the previous TRB the last TRB on the last segment in the ERST? 92 */ 93 static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, 94 struct xhci_segment *seg, union xhci_trb *trb) 95 { 96 if (ring == xhci->event_ring) 97 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) && 98 (seg->next == xhci->event_ring->first_seg); 99 else 100 return le32_to_cpu(trb->link.control) & LINK_TOGGLE; 101 } 102 103 /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring 104 * segment? I.e. would the updated event TRB pointer step off the end of the 105 * event seg? 106 */ 107 static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 108 struct xhci_segment *seg, union xhci_trb *trb) 109 { 110 if (ring == xhci->event_ring) 111 return trb == &seg->trbs[TRBS_PER_SEGMENT]; 112 else 113 return TRB_TYPE_LINK_LE32(trb->link.control); 114 } 115 116 static int enqueue_is_link_trb(struct xhci_ring *ring) 117 { 118 struct xhci_link_trb *link = &ring->enqueue->link; 119 return TRB_TYPE_LINK_LE32(link->control); 120 } 121 122 /* Updates trb to point to the next TRB in the ring, and updates seg if the next 123 * TRB is in a new segment. This does not skip over link TRBs, and it does not 124 * effect the ring dequeue or enqueue pointers. 125 */ 126 static void next_trb(struct xhci_hcd *xhci, 127 struct xhci_ring *ring, 128 struct xhci_segment **seg, 129 union xhci_trb **trb) 130 { 131 if (last_trb(xhci, ring, *seg, *trb)) { 132 *seg = (*seg)->next; 133 *trb = ((*seg)->trbs); 134 } else { 135 (*trb)++; 136 } 137 } 138 139 /* 140 * See Cycle bit rules. SW is the consumer for the event ring only. 141 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 142 */ 143 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) 144 { 145 ring->deq_updates++; 146 147 /* 148 * If this is not event ring, and the dequeue pointer 149 * is not on a link TRB, there is one more usable TRB 150 */ 151 if (ring->type != TYPE_EVENT && 152 !last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) 153 ring->num_trbs_free++; 154 155 do { 156 /* 157 * Update the dequeue pointer further if that was a link TRB or 158 * we're at the end of an event ring segment (which doesn't have 159 * link TRBS) 160 */ 161 if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) { 162 if (ring->type == TYPE_EVENT && 163 last_trb_on_last_seg(xhci, ring, 164 ring->deq_seg, ring->dequeue)) { 165 ring->cycle_state ^= 1; 166 } 167 ring->deq_seg = ring->deq_seg->next; 168 ring->dequeue = ring->deq_seg->trbs; 169 } else { 170 ring->dequeue++; 171 } 172 } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)); 173 } 174 175 /* 176 * See Cycle bit rules. SW is the consumer for the event ring only. 177 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 178 * 179 * If we've just enqueued a TRB that is in the middle of a TD (meaning the 180 * chain bit is set), then set the chain bit in all the following link TRBs. 181 * If we've enqueued the last TRB in a TD, make sure the following link TRBs 182 * have their chain bit cleared (so that each Link TRB is a separate TD). 183 * 184 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit 185 * set, but other sections talk about dealing with the chain bit set. This was 186 * fixed in the 0.96 specification errata, but we have to assume that all 0.95 187 * xHCI hardware can't handle the chain bit being cleared on a link TRB. 188 * 189 * @more_trbs_coming: Will you enqueue more TRBs before calling 190 * prepare_transfer()? 191 */ 192 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, 193 bool more_trbs_coming) 194 { 195 u32 chain; 196 union xhci_trb *next; 197 198 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; 199 /* If this is not event ring, there is one less usable TRB */ 200 if (ring->type != TYPE_EVENT && 201 !last_trb(xhci, ring, ring->enq_seg, ring->enqueue)) 202 ring->num_trbs_free--; 203 next = ++(ring->enqueue); 204 205 ring->enq_updates++; 206 /* Update the dequeue pointer further if that was a link TRB or we're at 207 * the end of an event ring segment (which doesn't have link TRBS) 208 */ 209 while (last_trb(xhci, ring, ring->enq_seg, next)) { 210 if (ring->type != TYPE_EVENT) { 211 /* 212 * If the caller doesn't plan on enqueueing more 213 * TDs before ringing the doorbell, then we 214 * don't want to give the link TRB to the 215 * hardware just yet. We'll give the link TRB 216 * back in prepare_ring() just before we enqueue 217 * the TD at the top of the ring. 218 */ 219 if (!chain && !more_trbs_coming) 220 break; 221 222 /* If we're not dealing with 0.95 hardware or 223 * isoc rings on AMD 0.96 host, 224 * carry over the chain bit of the previous TRB 225 * (which may mean the chain bit is cleared). 226 */ 227 if (!(ring->type == TYPE_ISOC && 228 (xhci->quirks & XHCI_AMD_0x96_HOST)) 229 && !xhci_link_trb_quirk(xhci)) { 230 next->link.control &= 231 cpu_to_le32(~TRB_CHAIN); 232 next->link.control |= 233 cpu_to_le32(chain); 234 } 235 /* Give this link TRB to the hardware */ 236 wmb(); 237 next->link.control ^= cpu_to_le32(TRB_CYCLE); 238 239 /* Toggle the cycle bit after the last ring segment. */ 240 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 241 ring->cycle_state = (ring->cycle_state ? 0 : 1); 242 } 243 } 244 ring->enq_seg = ring->enq_seg->next; 245 ring->enqueue = ring->enq_seg->trbs; 246 next = ring->enqueue; 247 } 248 } 249 250 /* 251 * Check to see if there's room to enqueue num_trbs on the ring and make sure 252 * enqueue pointer will not advance into dequeue segment. See rules above. 253 */ 254 static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, 255 unsigned int num_trbs) 256 { 257 int num_trbs_in_deq_seg; 258 259 if (ring->num_trbs_free < num_trbs) 260 return 0; 261 262 if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) { 263 num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs; 264 if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg) 265 return 0; 266 } 267 268 return 1; 269 } 270 271 /* Ring the host controller doorbell after placing a command on the ring */ 272 void xhci_ring_cmd_db(struct xhci_hcd *xhci) 273 { 274 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) 275 return; 276 277 xhci_dbg(xhci, "// Ding dong!\n"); 278 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]); 279 /* Flush PCI posted writes */ 280 readl(&xhci->dba->doorbell[0]); 281 } 282 283 static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) 284 { 285 u64 temp_64; 286 int ret; 287 288 xhci_dbg(xhci, "Abort command ring\n"); 289 290 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 291 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; 292 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, 293 &xhci->op_regs->cmd_ring); 294 295 /* Section 4.6.1.2 of xHCI 1.0 spec says software should 296 * time the completion od all xHCI commands, including 297 * the Command Abort operation. If software doesn't see 298 * CRR negated in a timely manner (e.g. longer than 5 299 * seconds), then it should assume that the there are 300 * larger problems with the xHC and assert HCRST. 301 */ 302 ret = xhci_handshake(xhci, &xhci->op_regs->cmd_ring, 303 CMD_RING_RUNNING, 0, 5 * 1000 * 1000); 304 if (ret < 0) { 305 xhci_err(xhci, "Stopped the command ring failed, " 306 "maybe the host is dead\n"); 307 xhci->xhc_state |= XHCI_STATE_DYING; 308 xhci_quiesce(xhci); 309 xhci_halt(xhci); 310 return -ESHUTDOWN; 311 } 312 313 return 0; 314 } 315 316 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, 317 unsigned int slot_id, 318 unsigned int ep_index, 319 unsigned int stream_id) 320 { 321 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; 322 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 323 unsigned int ep_state = ep->ep_state; 324 325 /* Don't ring the doorbell for this endpoint if there are pending 326 * cancellations because we don't want to interrupt processing. 327 * We don't want to restart any stream rings if there's a set dequeue 328 * pointer command pending because the device can choose to start any 329 * stream once the endpoint is on the HW schedule. 330 * FIXME - check all the stream rings for pending cancellations. 331 */ 332 if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) || 333 (ep_state & EP_HALTED)) 334 return; 335 writel(DB_VALUE(ep_index, stream_id), db_addr); 336 /* The CPU has better things to do at this point than wait for a 337 * write-posting flush. It'll get there soon enough. 338 */ 339 } 340 341 /* Ring the doorbell for any rings with pending URBs */ 342 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, 343 unsigned int slot_id, 344 unsigned int ep_index) 345 { 346 unsigned int stream_id; 347 struct xhci_virt_ep *ep; 348 349 ep = &xhci->devs[slot_id]->eps[ep_index]; 350 351 /* A ring has pending URBs if its TD list is not empty */ 352 if (!(ep->ep_state & EP_HAS_STREAMS)) { 353 if (ep->ring && !(list_empty(&ep->ring->td_list))) 354 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); 355 return; 356 } 357 358 for (stream_id = 1; stream_id < ep->stream_info->num_streams; 359 stream_id++) { 360 struct xhci_stream_info *stream_info = ep->stream_info; 361 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list)) 362 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 363 stream_id); 364 } 365 } 366 367 /* 368 * Find the segment that trb is in. Start searching in start_seg. 369 * If we must move past a segment that has a link TRB with a toggle cycle state 370 * bit set, then we will toggle the value pointed at by cycle_state. 371 */ 372 static struct xhci_segment *find_trb_seg( 373 struct xhci_segment *start_seg, 374 union xhci_trb *trb, int *cycle_state) 375 { 376 struct xhci_segment *cur_seg = start_seg; 377 struct xhci_generic_trb *generic_trb; 378 379 while (cur_seg->trbs > trb || 380 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { 381 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; 382 if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE)) 383 *cycle_state ^= 0x1; 384 cur_seg = cur_seg->next; 385 if (cur_seg == start_seg) 386 /* Looped over the entire list. Oops! */ 387 return NULL; 388 } 389 return cur_seg; 390 } 391 392 393 static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, 394 unsigned int slot_id, unsigned int ep_index, 395 unsigned int stream_id) 396 { 397 struct xhci_virt_ep *ep; 398 399 ep = &xhci->devs[slot_id]->eps[ep_index]; 400 /* Common case: no streams */ 401 if (!(ep->ep_state & EP_HAS_STREAMS)) 402 return ep->ring; 403 404 if (stream_id == 0) { 405 xhci_warn(xhci, 406 "WARN: Slot ID %u, ep index %u has streams, " 407 "but URB has no stream ID.\n", 408 slot_id, ep_index); 409 return NULL; 410 } 411 412 if (stream_id < ep->stream_info->num_streams) 413 return ep->stream_info->stream_rings[stream_id]; 414 415 xhci_warn(xhci, 416 "WARN: Slot ID %u, ep index %u has " 417 "stream IDs 1 to %u allocated, " 418 "but stream ID %u is requested.\n", 419 slot_id, ep_index, 420 ep->stream_info->num_streams - 1, 421 stream_id); 422 return NULL; 423 } 424 425 /* Get the right ring for the given URB. 426 * If the endpoint supports streams, boundary check the URB's stream ID. 427 * If the endpoint doesn't support streams, return the singular endpoint ring. 428 */ 429 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, 430 struct urb *urb) 431 { 432 return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id, 433 xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id); 434 } 435 436 /* 437 * Move the xHC's endpoint ring dequeue pointer past cur_td. 438 * Record the new state of the xHC's endpoint ring dequeue segment, 439 * dequeue pointer, and new consumer cycle state in state. 440 * Update our internal representation of the ring's dequeue pointer. 441 * 442 * We do this in three jumps: 443 * - First we update our new ring state to be the same as when the xHC stopped. 444 * - Then we traverse the ring to find the segment that contains 445 * the last TRB in the TD. We toggle the xHC's new cycle state when we pass 446 * any link TRBs with the toggle cycle bit set. 447 * - Finally we move the dequeue state one TRB further, toggling the cycle bit 448 * if we've moved it past a link TRB with the toggle cycle bit set. 449 * 450 * Some of the uses of xhci_generic_trb are grotty, but if they're done 451 * with correct __le32 accesses they should work fine. Only users of this are 452 * in here. 453 */ 454 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, 455 unsigned int slot_id, unsigned int ep_index, 456 unsigned int stream_id, struct xhci_td *cur_td, 457 struct xhci_dequeue_state *state) 458 { 459 struct xhci_virt_device *dev = xhci->devs[slot_id]; 460 struct xhci_virt_ep *ep = &dev->eps[ep_index]; 461 struct xhci_ring *ep_ring; 462 struct xhci_generic_trb *trb; 463 dma_addr_t addr; 464 u64 hw_dequeue; 465 466 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, 467 ep_index, stream_id); 468 if (!ep_ring) { 469 xhci_warn(xhci, "WARN can't find new dequeue state " 470 "for invalid stream ID %u.\n", 471 stream_id); 472 return; 473 } 474 475 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 476 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 477 "Finding endpoint context"); 478 /* 4.6.9 the css flag is written to the stream context for streams */ 479 if (ep->ep_state & EP_HAS_STREAMS) { 480 struct xhci_stream_ctx *ctx = 481 &ep->stream_info->stream_ctx_array[stream_id]; 482 hw_dequeue = le64_to_cpu(ctx->stream_ring); 483 } else { 484 struct xhci_ep_ctx *ep_ctx 485 = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 486 hw_dequeue = le64_to_cpu(ep_ctx->deq); 487 } 488 489 /* Find virtual address and segment of hardware dequeue pointer */ 490 state->new_deq_seg = ep_ring->deq_seg; 491 state->new_deq_ptr = ep_ring->dequeue; 492 while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr) 493 != (dma_addr_t)(hw_dequeue & ~0xf)) { 494 next_trb(xhci, ep_ring, &state->new_deq_seg, 495 &state->new_deq_ptr); 496 if (state->new_deq_ptr == ep_ring->dequeue) { 497 WARN_ON(1); 498 return; 499 } 500 } 501 /* 502 * Find cycle state for last_trb, starting at old cycle state of 503 * hw_dequeue. If there is only one segment ring, find_trb_seg() will 504 * return immediately and cannot toggle the cycle state if this search 505 * wraps around, so add one more toggle manually in that case. 506 */ 507 state->new_cycle_state = hw_dequeue & 0x1; 508 if (ep_ring->first_seg == ep_ring->first_seg->next && 509 cur_td->last_trb < state->new_deq_ptr) 510 state->new_cycle_state ^= 0x1; 511 512 state->new_deq_ptr = cur_td->last_trb; 513 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 514 "Finding segment containing last TRB in TD."); 515 state->new_deq_seg = find_trb_seg(state->new_deq_seg, 516 state->new_deq_ptr, &state->new_cycle_state); 517 if (!state->new_deq_seg) { 518 WARN_ON(1); 519 return; 520 } 521 522 /* Increment to find next TRB after last_trb. Cycle if appropriate. */ 523 trb = &state->new_deq_ptr->generic; 524 if (TRB_TYPE_LINK_LE32(trb->field[3]) && 525 (trb->field[3] & cpu_to_le32(LINK_TOGGLE))) 526 state->new_cycle_state ^= 0x1; 527 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); 528 529 /* Don't update the ring cycle state for the producer (us). */ 530 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 531 "Cycle state = 0x%x", state->new_cycle_state); 532 533 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 534 "New dequeue segment = %p (virtual)", 535 state->new_deq_seg); 536 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); 537 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 538 "New dequeue pointer = 0x%llx (DMA)", 539 (unsigned long long) addr); 540 } 541 542 /* flip_cycle means flip the cycle bit of all but the first and last TRB. 543 * (The last TRB actually points to the ring enqueue pointer, which is not part 544 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring. 545 */ 546 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 547 struct xhci_td *cur_td, bool flip_cycle) 548 { 549 struct xhci_segment *cur_seg; 550 union xhci_trb *cur_trb; 551 552 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; 553 true; 554 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 555 if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) { 556 /* Unchain any chained Link TRBs, but 557 * leave the pointers intact. 558 */ 559 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN); 560 /* Flip the cycle bit (link TRBs can't be the first 561 * or last TRB). 562 */ 563 if (flip_cycle) 564 cur_trb->generic.field[3] ^= 565 cpu_to_le32(TRB_CYCLE); 566 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 567 "Cancel (unchain) link TRB"); 568 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 569 "Address = %p (0x%llx dma); " 570 "in seg %p (0x%llx dma)", 571 cur_trb, 572 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb), 573 cur_seg, 574 (unsigned long long)cur_seg->dma); 575 } else { 576 cur_trb->generic.field[0] = 0; 577 cur_trb->generic.field[1] = 0; 578 cur_trb->generic.field[2] = 0; 579 /* Preserve only the cycle bit of this TRB */ 580 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); 581 /* Flip the cycle bit except on the first or last TRB */ 582 if (flip_cycle && cur_trb != cur_td->first_trb && 583 cur_trb != cur_td->last_trb) 584 cur_trb->generic.field[3] ^= 585 cpu_to_le32(TRB_CYCLE); 586 cur_trb->generic.field[3] |= cpu_to_le32( 587 TRB_TYPE(TRB_TR_NOOP)); 588 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 589 "TRB to noop at offset 0x%llx", 590 (unsigned long long) 591 xhci_trb_virt_to_dma(cur_seg, cur_trb)); 592 } 593 if (cur_trb == cur_td->last_trb) 594 break; 595 } 596 } 597 598 static int queue_set_tr_deq(struct xhci_hcd *xhci, 599 struct xhci_command *cmd, int slot_id, 600 unsigned int ep_index, unsigned int stream_id, 601 struct xhci_segment *deq_seg, 602 union xhci_trb *deq_ptr, u32 cycle_state); 603 604 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, 605 struct xhci_command *cmd, 606 unsigned int slot_id, unsigned int ep_index, 607 unsigned int stream_id, 608 struct xhci_dequeue_state *deq_state) 609 { 610 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 611 612 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 613 "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " 614 "new deq ptr = %p (0x%llx dma), new cycle = %u", 615 deq_state->new_deq_seg, 616 (unsigned long long)deq_state->new_deq_seg->dma, 617 deq_state->new_deq_ptr, 618 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), 619 deq_state->new_cycle_state); 620 queue_set_tr_deq(xhci, cmd, slot_id, ep_index, stream_id, 621 deq_state->new_deq_seg, 622 deq_state->new_deq_ptr, 623 (u32) deq_state->new_cycle_state); 624 /* Stop the TD queueing code from ringing the doorbell until 625 * this command completes. The HC won't set the dequeue pointer 626 * if the ring is running, and ringing the doorbell starts the 627 * ring running. 628 */ 629 ep->ep_state |= SET_DEQ_PENDING; 630 } 631 632 static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, 633 struct xhci_virt_ep *ep) 634 { 635 ep->ep_state &= ~EP_HALT_PENDING; 636 /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the 637 * timer is running on another CPU, we don't decrement stop_cmds_pending 638 * (since we didn't successfully stop the watchdog timer). 639 */ 640 if (del_timer(&ep->stop_cmd_timer)) 641 ep->stop_cmds_pending--; 642 } 643 644 /* Must be called with xhci->lock held in interrupt context */ 645 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, 646 struct xhci_td *cur_td, int status) 647 { 648 struct usb_hcd *hcd; 649 struct urb *urb; 650 struct urb_priv *urb_priv; 651 652 urb = cur_td->urb; 653 urb_priv = urb->hcpriv; 654 urb_priv->td_cnt++; 655 hcd = bus_to_hcd(urb->dev->bus); 656 657 /* Only giveback urb when this is the last td in urb */ 658 if (urb_priv->td_cnt == urb_priv->length) { 659 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 660 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; 661 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { 662 if (xhci->quirks & XHCI_AMD_PLL_FIX) 663 usb_amd_quirk_pll_enable(); 664 } 665 } 666 usb_hcd_unlink_urb_from_ep(hcd, urb); 667 668 spin_unlock(&xhci->lock); 669 usb_hcd_giveback_urb(hcd, urb, status); 670 xhci_urb_free_priv(xhci, urb_priv); 671 spin_lock(&xhci->lock); 672 } 673 } 674 675 /* 676 * When we get a command completion for a Stop Endpoint Command, we need to 677 * unlink any cancelled TDs from the ring. There are two ways to do that: 678 * 679 * 1. If the HW was in the middle of processing the TD that needs to be 680 * cancelled, then we must move the ring's dequeue pointer past the last TRB 681 * in the TD with a Set Dequeue Pointer Command. 682 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain 683 * bit cleared) so that the HW will skip over them. 684 */ 685 static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, 686 union xhci_trb *trb, struct xhci_event_cmd *event) 687 { 688 unsigned int ep_index; 689 struct xhci_ring *ep_ring; 690 struct xhci_virt_ep *ep; 691 struct list_head *entry; 692 struct xhci_td *cur_td = NULL; 693 struct xhci_td *last_unlinked_td; 694 695 struct xhci_dequeue_state deq_state; 696 697 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { 698 if (!xhci->devs[slot_id]) 699 xhci_warn(xhci, "Stop endpoint command " 700 "completion for disabled slot %u\n", 701 slot_id); 702 return; 703 } 704 705 memset(&deq_state, 0, sizeof(deq_state)); 706 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); 707 ep = &xhci->devs[slot_id]->eps[ep_index]; 708 709 if (list_empty(&ep->cancelled_td_list)) { 710 xhci_stop_watchdog_timer_in_irq(xhci, ep); 711 ep->stopped_td = NULL; 712 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 713 return; 714 } 715 716 /* Fix up the ep ring first, so HW stops executing cancelled TDs. 717 * We have the xHCI lock, so nothing can modify this list until we drop 718 * it. We're also in the event handler, so we can't get re-interrupted 719 * if another Stop Endpoint command completes 720 */ 721 list_for_each(entry, &ep->cancelled_td_list) { 722 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); 723 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 724 "Removing canceled TD starting at 0x%llx (dma).", 725 (unsigned long long)xhci_trb_virt_to_dma( 726 cur_td->start_seg, cur_td->first_trb)); 727 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); 728 if (!ep_ring) { 729 /* This shouldn't happen unless a driver is mucking 730 * with the stream ID after submission. This will 731 * leave the TD on the hardware ring, and the hardware 732 * will try to execute it, and may access a buffer 733 * that has already been freed. In the best case, the 734 * hardware will execute it, and the event handler will 735 * ignore the completion event for that TD, since it was 736 * removed from the td_list for that endpoint. In 737 * short, don't muck with the stream ID after 738 * submission. 739 */ 740 xhci_warn(xhci, "WARN Cancelled URB %p " 741 "has invalid stream ID %u.\n", 742 cur_td->urb, 743 cur_td->urb->stream_id); 744 goto remove_finished_td; 745 } 746 /* 747 * If we stopped on the TD we need to cancel, then we have to 748 * move the xHC endpoint ring dequeue pointer past this TD. 749 */ 750 if (cur_td == ep->stopped_td) 751 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, 752 cur_td->urb->stream_id, 753 cur_td, &deq_state); 754 else 755 td_to_noop(xhci, ep_ring, cur_td, false); 756 remove_finished_td: 757 /* 758 * The event handler won't see a completion for this TD anymore, 759 * so remove it from the endpoint ring's TD list. Keep it in 760 * the cancelled TD list for URB completion later. 761 */ 762 list_del_init(&cur_td->td_list); 763 } 764 last_unlinked_td = cur_td; 765 xhci_stop_watchdog_timer_in_irq(xhci, ep); 766 767 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 768 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 769 struct xhci_command *command; 770 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); 771 xhci_queue_new_dequeue_state(xhci, command, 772 slot_id, ep_index, 773 ep->stopped_td->urb->stream_id, 774 &deq_state); 775 xhci_ring_cmd_db(xhci); 776 } else { 777 /* Otherwise ring the doorbell(s) to restart queued transfers */ 778 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 779 } 780 781 /* Clear stopped_td if endpoint is not halted */ 782 if (!(ep->ep_state & EP_HALTED)) 783 ep->stopped_td = NULL; 784 785 /* 786 * Drop the lock and complete the URBs in the cancelled TD list. 787 * New TDs to be cancelled might be added to the end of the list before 788 * we can complete all the URBs for the TDs we already unlinked. 789 * So stop when we've completed the URB for the last TD we unlinked. 790 */ 791 do { 792 cur_td = list_entry(ep->cancelled_td_list.next, 793 struct xhci_td, cancelled_td_list); 794 list_del_init(&cur_td->cancelled_td_list); 795 796 /* Clean up the cancelled URB */ 797 /* Doesn't matter what we pass for status, since the core will 798 * just overwrite it (because the URB has been unlinked). 799 */ 800 xhci_giveback_urb_in_irq(xhci, cur_td, 0); 801 802 /* Stop processing the cancelled list if the watchdog timer is 803 * running. 804 */ 805 if (xhci->xhc_state & XHCI_STATE_DYING) 806 return; 807 } while (cur_td != last_unlinked_td); 808 809 /* Return to the event handler with xhci->lock re-acquired */ 810 } 811 812 static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) 813 { 814 struct xhci_td *cur_td; 815 816 while (!list_empty(&ring->td_list)) { 817 cur_td = list_first_entry(&ring->td_list, 818 struct xhci_td, td_list); 819 list_del_init(&cur_td->td_list); 820 if (!list_empty(&cur_td->cancelled_td_list)) 821 list_del_init(&cur_td->cancelled_td_list); 822 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); 823 } 824 } 825 826 static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, 827 int slot_id, int ep_index) 828 { 829 struct xhci_td *cur_td; 830 struct xhci_virt_ep *ep; 831 struct xhci_ring *ring; 832 833 ep = &xhci->devs[slot_id]->eps[ep_index]; 834 if ((ep->ep_state & EP_HAS_STREAMS) || 835 (ep->ep_state & EP_GETTING_NO_STREAMS)) { 836 int stream_id; 837 838 for (stream_id = 0; stream_id < ep->stream_info->num_streams; 839 stream_id++) { 840 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 841 "Killing URBs for slot ID %u, ep index %u, stream %u", 842 slot_id, ep_index, stream_id + 1); 843 xhci_kill_ring_urbs(xhci, 844 ep->stream_info->stream_rings[stream_id]); 845 } 846 } else { 847 ring = ep->ring; 848 if (!ring) 849 return; 850 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 851 "Killing URBs for slot ID %u, ep index %u", 852 slot_id, ep_index); 853 xhci_kill_ring_urbs(xhci, ring); 854 } 855 while (!list_empty(&ep->cancelled_td_list)) { 856 cur_td = list_first_entry(&ep->cancelled_td_list, 857 struct xhci_td, cancelled_td_list); 858 list_del_init(&cur_td->cancelled_td_list); 859 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); 860 } 861 } 862 863 /* Watchdog timer function for when a stop endpoint command fails to complete. 864 * In this case, we assume the host controller is broken or dying or dead. The 865 * host may still be completing some other events, so we have to be careful to 866 * let the event ring handler and the URB dequeueing/enqueueing functions know 867 * through xhci->state. 868 * 869 * The timer may also fire if the host takes a very long time to respond to the 870 * command, and the stop endpoint command completion handler cannot delete the 871 * timer before the timer function is called. Another endpoint cancellation may 872 * sneak in before the timer function can grab the lock, and that may queue 873 * another stop endpoint command and add the timer back. So we cannot use a 874 * simple flag to say whether there is a pending stop endpoint command for a 875 * particular endpoint. 876 * 877 * Instead we use a combination of that flag and a counter for the number of 878 * pending stop endpoint commands. If the timer is the tail end of the last 879 * stop endpoint command, and the endpoint's command is still pending, we assume 880 * the host is dying. 881 */ 882 void xhci_stop_endpoint_command_watchdog(unsigned long arg) 883 { 884 struct xhci_hcd *xhci; 885 struct xhci_virt_ep *ep; 886 int ret, i, j; 887 unsigned long flags; 888 889 ep = (struct xhci_virt_ep *) arg; 890 xhci = ep->xhci; 891 892 spin_lock_irqsave(&xhci->lock, flags); 893 894 ep->stop_cmds_pending--; 895 if (xhci->xhc_state & XHCI_STATE_DYING) { 896 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 897 "Stop EP timer ran, but another timer marked " 898 "xHCI as DYING, exiting."); 899 spin_unlock_irqrestore(&xhci->lock, flags); 900 return; 901 } 902 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { 903 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 904 "Stop EP timer ran, but no command pending, " 905 "exiting."); 906 spin_unlock_irqrestore(&xhci->lock, flags); 907 return; 908 } 909 910 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n"); 911 xhci_warn(xhci, "Assuming host is dying, halting host.\n"); 912 /* Oops, HC is dead or dying or at least not responding to the stop 913 * endpoint command. 914 */ 915 xhci->xhc_state |= XHCI_STATE_DYING; 916 /* Disable interrupts from the host controller and start halting it */ 917 xhci_quiesce(xhci); 918 spin_unlock_irqrestore(&xhci->lock, flags); 919 920 ret = xhci_halt(xhci); 921 922 spin_lock_irqsave(&xhci->lock, flags); 923 if (ret < 0) { 924 /* This is bad; the host is not responding to commands and it's 925 * not allowing itself to be halted. At least interrupts are 926 * disabled. If we call usb_hc_died(), it will attempt to 927 * disconnect all device drivers under this host. Those 928 * disconnect() methods will wait for all URBs to be unlinked, 929 * so we must complete them. 930 */ 931 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n"); 932 xhci_warn(xhci, "Completing active URBs anyway.\n"); 933 /* We could turn all TDs on the rings to no-ops. This won't 934 * help if the host has cached part of the ring, and is slow if 935 * we want to preserve the cycle bit. Skip it and hope the host 936 * doesn't touch the memory. 937 */ 938 } 939 for (i = 0; i < MAX_HC_SLOTS; i++) { 940 if (!xhci->devs[i]) 941 continue; 942 for (j = 0; j < 31; j++) 943 xhci_kill_endpoint_urbs(xhci, i, j); 944 } 945 spin_unlock_irqrestore(&xhci->lock, flags); 946 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 947 "Calling usb_hc_died()"); 948 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); 949 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 950 "xHCI host controller is dead."); 951 } 952 953 954 static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci, 955 struct xhci_virt_device *dev, 956 struct xhci_ring *ep_ring, 957 unsigned int ep_index) 958 { 959 union xhci_trb *dequeue_temp; 960 int num_trbs_free_temp; 961 bool revert = false; 962 963 num_trbs_free_temp = ep_ring->num_trbs_free; 964 dequeue_temp = ep_ring->dequeue; 965 966 /* If we get two back-to-back stalls, and the first stalled transfer 967 * ends just before a link TRB, the dequeue pointer will be left on 968 * the link TRB by the code in the while loop. So we have to update 969 * the dequeue pointer one segment further, or we'll jump off 970 * the segment into la-la-land. 971 */ 972 if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) { 973 ep_ring->deq_seg = ep_ring->deq_seg->next; 974 ep_ring->dequeue = ep_ring->deq_seg->trbs; 975 } 976 977 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) { 978 /* We have more usable TRBs */ 979 ep_ring->num_trbs_free++; 980 ep_ring->dequeue++; 981 if (last_trb(xhci, ep_ring, ep_ring->deq_seg, 982 ep_ring->dequeue)) { 983 if (ep_ring->dequeue == 984 dev->eps[ep_index].queued_deq_ptr) 985 break; 986 ep_ring->deq_seg = ep_ring->deq_seg->next; 987 ep_ring->dequeue = ep_ring->deq_seg->trbs; 988 } 989 if (ep_ring->dequeue == dequeue_temp) { 990 revert = true; 991 break; 992 } 993 } 994 995 if (revert) { 996 xhci_dbg(xhci, "Unable to find new dequeue pointer\n"); 997 ep_ring->num_trbs_free = num_trbs_free_temp; 998 } 999 } 1000 1001 /* 1002 * When we get a completion for a Set Transfer Ring Dequeue Pointer command, 1003 * we need to clear the set deq pending flag in the endpoint ring state, so that 1004 * the TD queueing code can ring the doorbell again. We also need to ring the 1005 * endpoint doorbell to restart the ring, but only if there aren't more 1006 * cancellations pending. 1007 */ 1008 static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, 1009 union xhci_trb *trb, u32 cmd_comp_code) 1010 { 1011 unsigned int ep_index; 1012 unsigned int stream_id; 1013 struct xhci_ring *ep_ring; 1014 struct xhci_virt_device *dev; 1015 struct xhci_virt_ep *ep; 1016 struct xhci_ep_ctx *ep_ctx; 1017 struct xhci_slot_ctx *slot_ctx; 1018 1019 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); 1020 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); 1021 dev = xhci->devs[slot_id]; 1022 ep = &dev->eps[ep_index]; 1023 1024 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id); 1025 if (!ep_ring) { 1026 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n", 1027 stream_id); 1028 /* XXX: Harmless??? */ 1029 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; 1030 return; 1031 } 1032 1033 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 1034 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); 1035 1036 if (cmd_comp_code != COMP_SUCCESS) { 1037 unsigned int ep_state; 1038 unsigned int slot_state; 1039 1040 switch (cmd_comp_code) { 1041 case COMP_TRB_ERR: 1042 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n"); 1043 break; 1044 case COMP_CTX_STATE: 1045 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n"); 1046 ep_state = le32_to_cpu(ep_ctx->ep_info); 1047 ep_state &= EP_STATE_MASK; 1048 slot_state = le32_to_cpu(slot_ctx->dev_state); 1049 slot_state = GET_SLOT_STATE(slot_state); 1050 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1051 "Slot state = %u, EP state = %u", 1052 slot_state, ep_state); 1053 break; 1054 case COMP_EBADSLT: 1055 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n", 1056 slot_id); 1057 break; 1058 default: 1059 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n", 1060 cmd_comp_code); 1061 break; 1062 } 1063 /* OK what do we do now? The endpoint state is hosed, and we 1064 * should never get to this point if the synchronization between 1065 * queueing, and endpoint state are correct. This might happen 1066 * if the device gets disconnected after we've finished 1067 * cancelling URBs, which might not be an error... 1068 */ 1069 } else { 1070 u64 deq; 1071 /* 4.6.10 deq ptr is written to the stream ctx for streams */ 1072 if (ep->ep_state & EP_HAS_STREAMS) { 1073 struct xhci_stream_ctx *ctx = 1074 &ep->stream_info->stream_ctx_array[stream_id]; 1075 deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK; 1076 } else { 1077 deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; 1078 } 1079 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1080 "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq); 1081 if (xhci_trb_virt_to_dma(ep->queued_deq_seg, 1082 ep->queued_deq_ptr) == deq) { 1083 /* Update the ring's dequeue segment and dequeue pointer 1084 * to reflect the new position. 1085 */ 1086 update_ring_for_set_deq_completion(xhci, dev, 1087 ep_ring, ep_index); 1088 } else { 1089 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n"); 1090 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", 1091 ep->queued_deq_seg, ep->queued_deq_ptr); 1092 } 1093 } 1094 1095 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; 1096 dev->eps[ep_index].queued_deq_seg = NULL; 1097 dev->eps[ep_index].queued_deq_ptr = NULL; 1098 /* Restart any rings with pending URBs */ 1099 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 1100 } 1101 1102 static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, 1103 union xhci_trb *trb, u32 cmd_comp_code) 1104 { 1105 unsigned int ep_index; 1106 1107 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); 1108 /* This command will only fail if the endpoint wasn't halted, 1109 * but we don't care. 1110 */ 1111 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, 1112 "Ignoring reset ep completion code of %u", cmd_comp_code); 1113 1114 /* HW with the reset endpoint quirk needs to have a configure endpoint 1115 * command complete before the endpoint can be used. Queue that here 1116 * because the HW can't handle two commands being queued in a row. 1117 */ 1118 if (xhci->quirks & XHCI_RESET_EP_QUIRK) { 1119 struct xhci_command *command; 1120 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); 1121 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1122 "Queueing configure endpoint command"); 1123 xhci_queue_configure_endpoint(xhci, command, 1124 xhci->devs[slot_id]->in_ctx->dma, slot_id, 1125 false); 1126 xhci_ring_cmd_db(xhci); 1127 } else { 1128 /* Clear our internal halted state and restart the ring(s) */ 1129 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; 1130 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 1131 } 1132 } 1133 1134 static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, 1135 u32 cmd_comp_code) 1136 { 1137 if (cmd_comp_code == COMP_SUCCESS) 1138 xhci->slot_id = slot_id; 1139 else 1140 xhci->slot_id = 0; 1141 } 1142 1143 static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) 1144 { 1145 struct xhci_virt_device *virt_dev; 1146 1147 virt_dev = xhci->devs[slot_id]; 1148 if (!virt_dev) 1149 return; 1150 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) 1151 /* Delete default control endpoint resources */ 1152 xhci_free_device_endpoint_resources(xhci, virt_dev, true); 1153 xhci_free_virt_device(xhci, slot_id); 1154 } 1155 1156 static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, 1157 struct xhci_event_cmd *event, u32 cmd_comp_code) 1158 { 1159 struct xhci_virt_device *virt_dev; 1160 struct xhci_input_control_ctx *ctrl_ctx; 1161 unsigned int ep_index; 1162 unsigned int ep_state; 1163 u32 add_flags, drop_flags; 1164 1165 /* 1166 * Configure endpoint commands can come from the USB core 1167 * configuration or alt setting changes, or because the HW 1168 * needed an extra configure endpoint command after a reset 1169 * endpoint command or streams were being configured. 1170 * If the command was for a halted endpoint, the xHCI driver 1171 * is not waiting on the configure endpoint command. 1172 */ 1173 virt_dev = xhci->devs[slot_id]; 1174 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 1175 if (!ctrl_ctx) { 1176 xhci_warn(xhci, "Could not get input context, bad type.\n"); 1177 return; 1178 } 1179 1180 add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1181 drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1182 /* Input ctx add_flags are the endpoint index plus one */ 1183 ep_index = xhci_last_valid_endpoint(add_flags) - 1; 1184 1185 /* A usb_set_interface() call directly after clearing a halted 1186 * condition may race on this quirky hardware. Not worth 1187 * worrying about, since this is prototype hardware. Not sure 1188 * if this will work for streams, but streams support was 1189 * untested on this prototype. 1190 */ 1191 if (xhci->quirks & XHCI_RESET_EP_QUIRK && 1192 ep_index != (unsigned int) -1 && 1193 add_flags - SLOT_FLAG == drop_flags) { 1194 ep_state = virt_dev->eps[ep_index].ep_state; 1195 if (!(ep_state & EP_HALTED)) 1196 return; 1197 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1198 "Completed config ep cmd - " 1199 "last ep index = %d, state = %d", 1200 ep_index, ep_state); 1201 /* Clear internal halted state and restart ring(s) */ 1202 virt_dev->eps[ep_index].ep_state &= ~EP_HALTED; 1203 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 1204 return; 1205 } 1206 return; 1207 } 1208 1209 static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id, 1210 struct xhci_event_cmd *event) 1211 { 1212 xhci_dbg(xhci, "Completed reset device command.\n"); 1213 if (!xhci->devs[slot_id]) 1214 xhci_warn(xhci, "Reset device command completion " 1215 "for disabled slot %u\n", slot_id); 1216 } 1217 1218 static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, 1219 struct xhci_event_cmd *event) 1220 { 1221 if (!(xhci->quirks & XHCI_NEC_HOST)) { 1222 xhci->error_bitmask |= 1 << 6; 1223 return; 1224 } 1225 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1226 "NEC firmware version %2x.%02x", 1227 NEC_FW_MAJOR(le32_to_cpu(event->status)), 1228 NEC_FW_MINOR(le32_to_cpu(event->status))); 1229 } 1230 1231 static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status) 1232 { 1233 list_del(&cmd->cmd_list); 1234 1235 if (cmd->completion) { 1236 cmd->status = status; 1237 complete(cmd->completion); 1238 } else { 1239 kfree(cmd); 1240 } 1241 } 1242 1243 void xhci_cleanup_command_queue(struct xhci_hcd *xhci) 1244 { 1245 struct xhci_command *cur_cmd, *tmp_cmd; 1246 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) 1247 xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT); 1248 } 1249 1250 /* 1251 * Turn all commands on command ring with status set to "aborted" to no-op trbs. 1252 * If there are other commands waiting then restart the ring and kick the timer. 1253 * This must be called with command ring stopped and xhci->lock held. 1254 */ 1255 static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, 1256 struct xhci_command *cur_cmd) 1257 { 1258 struct xhci_command *i_cmd, *tmp_cmd; 1259 u32 cycle_state; 1260 1261 /* Turn all aborted commands in list to no-ops, then restart */ 1262 list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list, 1263 cmd_list) { 1264 1265 if (i_cmd->status != COMP_CMD_ABORT) 1266 continue; 1267 1268 i_cmd->status = COMP_CMD_STOP; 1269 1270 xhci_dbg(xhci, "Turn aborted command %p to no-op\n", 1271 i_cmd->command_trb); 1272 /* get cycle state from the original cmd trb */ 1273 cycle_state = le32_to_cpu( 1274 i_cmd->command_trb->generic.field[3]) & TRB_CYCLE; 1275 /* modify the command trb to no-op command */ 1276 i_cmd->command_trb->generic.field[0] = 0; 1277 i_cmd->command_trb->generic.field[1] = 0; 1278 i_cmd->command_trb->generic.field[2] = 0; 1279 i_cmd->command_trb->generic.field[3] = cpu_to_le32( 1280 TRB_TYPE(TRB_CMD_NOOP) | cycle_state); 1281 1282 /* 1283 * caller waiting for completion is called when command 1284 * completion event is received for these no-op commands 1285 */ 1286 } 1287 1288 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; 1289 1290 /* ring command ring doorbell to restart the command ring */ 1291 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) && 1292 !(xhci->xhc_state & XHCI_STATE_DYING)) { 1293 xhci->current_cmd = cur_cmd; 1294 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); 1295 xhci_ring_cmd_db(xhci); 1296 } 1297 return; 1298 } 1299 1300 1301 void xhci_handle_command_timeout(unsigned long data) 1302 { 1303 struct xhci_hcd *xhci; 1304 int ret; 1305 unsigned long flags; 1306 u64 hw_ring_state; 1307 struct xhci_command *cur_cmd = NULL; 1308 xhci = (struct xhci_hcd *) data; 1309 1310 /* mark this command to be cancelled */ 1311 spin_lock_irqsave(&xhci->lock, flags); 1312 if (xhci->current_cmd) { 1313 cur_cmd = xhci->current_cmd; 1314 cur_cmd->status = COMP_CMD_ABORT; 1315 } 1316 1317 1318 /* Make sure command ring is running before aborting it */ 1319 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 1320 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && 1321 (hw_ring_state & CMD_RING_RUNNING)) { 1322 1323 spin_unlock_irqrestore(&xhci->lock, flags); 1324 xhci_dbg(xhci, "Command timeout\n"); 1325 ret = xhci_abort_cmd_ring(xhci); 1326 if (unlikely(ret == -ESHUTDOWN)) { 1327 xhci_err(xhci, "Abort command ring failed\n"); 1328 xhci_cleanup_command_queue(xhci); 1329 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); 1330 xhci_dbg(xhci, "xHCI host controller is dead.\n"); 1331 } 1332 return; 1333 } 1334 /* command timeout on stopped ring, ring can't be aborted */ 1335 xhci_dbg(xhci, "Command timeout on stopped ring\n"); 1336 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); 1337 spin_unlock_irqrestore(&xhci->lock, flags); 1338 return; 1339 } 1340 1341 static void handle_cmd_completion(struct xhci_hcd *xhci, 1342 struct xhci_event_cmd *event) 1343 { 1344 int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1345 u64 cmd_dma; 1346 dma_addr_t cmd_dequeue_dma; 1347 u32 cmd_comp_code; 1348 union xhci_trb *cmd_trb; 1349 struct xhci_command *cmd; 1350 u32 cmd_type; 1351 1352 cmd_dma = le64_to_cpu(event->cmd_trb); 1353 cmd_trb = xhci->cmd_ring->dequeue; 1354 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 1355 cmd_trb); 1356 /* Is the command ring deq ptr out of sync with the deq seg ptr? */ 1357 if (cmd_dequeue_dma == 0) { 1358 xhci->error_bitmask |= 1 << 4; 1359 return; 1360 } 1361 /* Does the DMA address match our internal dequeue pointer address? */ 1362 if (cmd_dma != (u64) cmd_dequeue_dma) { 1363 xhci->error_bitmask |= 1 << 5; 1364 return; 1365 } 1366 1367 cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list); 1368 1369 if (cmd->command_trb != xhci->cmd_ring->dequeue) { 1370 xhci_err(xhci, 1371 "Command completion event does not match command\n"); 1372 return; 1373 } 1374 1375 del_timer(&xhci->cmd_timer); 1376 1377 trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event); 1378 1379 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); 1380 1381 /* If CMD ring stopped we own the trbs between enqueue and dequeue */ 1382 if (cmd_comp_code == COMP_CMD_STOP) { 1383 xhci_handle_stopped_cmd_ring(xhci, cmd); 1384 return; 1385 } 1386 /* 1387 * Host aborted the command ring, check if the current command was 1388 * supposed to be aborted, otherwise continue normally. 1389 * The command ring is stopped now, but the xHC will issue a Command 1390 * Ring Stopped event which will cause us to restart it. 1391 */ 1392 if (cmd_comp_code == COMP_CMD_ABORT) { 1393 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; 1394 if (cmd->status == COMP_CMD_ABORT) 1395 goto event_handled; 1396 } 1397 1398 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); 1399 switch (cmd_type) { 1400 case TRB_ENABLE_SLOT: 1401 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code); 1402 break; 1403 case TRB_DISABLE_SLOT: 1404 xhci_handle_cmd_disable_slot(xhci, slot_id); 1405 break; 1406 case TRB_CONFIG_EP: 1407 if (!cmd->completion) 1408 xhci_handle_cmd_config_ep(xhci, slot_id, event, 1409 cmd_comp_code); 1410 break; 1411 case TRB_EVAL_CONTEXT: 1412 break; 1413 case TRB_ADDR_DEV: 1414 break; 1415 case TRB_STOP_RING: 1416 WARN_ON(slot_id != TRB_TO_SLOT_ID( 1417 le32_to_cpu(cmd_trb->generic.field[3]))); 1418 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event); 1419 break; 1420 case TRB_SET_DEQ: 1421 WARN_ON(slot_id != TRB_TO_SLOT_ID( 1422 le32_to_cpu(cmd_trb->generic.field[3]))); 1423 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code); 1424 break; 1425 case TRB_CMD_NOOP: 1426 /* Is this an aborted command turned to NO-OP? */ 1427 if (cmd->status == COMP_CMD_STOP) 1428 cmd_comp_code = COMP_CMD_STOP; 1429 break; 1430 case TRB_RESET_EP: 1431 WARN_ON(slot_id != TRB_TO_SLOT_ID( 1432 le32_to_cpu(cmd_trb->generic.field[3]))); 1433 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code); 1434 break; 1435 case TRB_RESET_DEV: 1436 /* SLOT_ID field in reset device cmd completion event TRB is 0. 1437 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11) 1438 */ 1439 slot_id = TRB_TO_SLOT_ID( 1440 le32_to_cpu(cmd_trb->generic.field[3])); 1441 xhci_handle_cmd_reset_dev(xhci, slot_id, event); 1442 break; 1443 case TRB_NEC_GET_FW: 1444 xhci_handle_cmd_nec_get_fw(xhci, event); 1445 break; 1446 default: 1447 /* Skip over unknown commands on the event ring */ 1448 xhci->error_bitmask |= 1 << 6; 1449 break; 1450 } 1451 1452 /* restart timer if this wasn't the last command */ 1453 if (cmd->cmd_list.next != &xhci->cmd_list) { 1454 xhci->current_cmd = list_entry(cmd->cmd_list.next, 1455 struct xhci_command, cmd_list); 1456 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); 1457 } 1458 1459 event_handled: 1460 xhci_complete_del_and_free_cmd(cmd, cmd_comp_code); 1461 1462 inc_deq(xhci, xhci->cmd_ring); 1463 } 1464 1465 static void handle_vendor_event(struct xhci_hcd *xhci, 1466 union xhci_trb *event) 1467 { 1468 u32 trb_type; 1469 1470 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3])); 1471 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); 1472 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) 1473 handle_cmd_completion(xhci, &event->event_cmd); 1474 } 1475 1476 /* @port_id: the one-based port ID from the hardware (indexed from array of all 1477 * port registers -- USB 3.0 and USB 2.0). 1478 * 1479 * Returns a zero-based port number, which is suitable for indexing into each of 1480 * the split roothubs' port arrays and bus state arrays. 1481 * Add one to it in order to call xhci_find_slot_id_by_port. 1482 */ 1483 static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd, 1484 struct xhci_hcd *xhci, u32 port_id) 1485 { 1486 unsigned int i; 1487 unsigned int num_similar_speed_ports = 0; 1488 1489 /* port_id from the hardware is 1-based, but port_array[], usb3_ports[], 1490 * and usb2_ports are 0-based indexes. Count the number of similar 1491 * speed ports, up to 1 port before this port. 1492 */ 1493 for (i = 0; i < (port_id - 1); i++) { 1494 u8 port_speed = xhci->port_array[i]; 1495 1496 /* 1497 * Skip ports that don't have known speeds, or have duplicate 1498 * Extended Capabilities port speed entries. 1499 */ 1500 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY) 1501 continue; 1502 1503 /* 1504 * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and 1505 * 1.1 ports are under the USB 2.0 hub. If the port speed 1506 * matches the device speed, it's a similar speed port. 1507 */ 1508 if ((port_speed == 0x03) == (hcd->speed == HCD_USB3)) 1509 num_similar_speed_ports++; 1510 } 1511 return num_similar_speed_ports; 1512 } 1513 1514 static void handle_device_notification(struct xhci_hcd *xhci, 1515 union xhci_trb *event) 1516 { 1517 u32 slot_id; 1518 struct usb_device *udev; 1519 1520 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3])); 1521 if (!xhci->devs[slot_id]) { 1522 xhci_warn(xhci, "Device Notification event for " 1523 "unused slot %u\n", slot_id); 1524 return; 1525 } 1526 1527 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n", 1528 slot_id); 1529 udev = xhci->devs[slot_id]->udev; 1530 if (udev && udev->parent) 1531 usb_wakeup_notification(udev->parent, udev->portnum); 1532 } 1533 1534 static void handle_port_status(struct xhci_hcd *xhci, 1535 union xhci_trb *event) 1536 { 1537 struct usb_hcd *hcd; 1538 u32 port_id; 1539 u32 temp, temp1; 1540 int max_ports; 1541 int slot_id; 1542 unsigned int faked_port_index; 1543 u8 major_revision; 1544 struct xhci_bus_state *bus_state; 1545 __le32 __iomem **port_array; 1546 bool bogus_port_status = false; 1547 1548 /* Port status change events always have a successful completion code */ 1549 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) { 1550 xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); 1551 xhci->error_bitmask |= 1 << 8; 1552 } 1553 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); 1554 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); 1555 1556 max_ports = HCS_MAX_PORTS(xhci->hcs_params1); 1557 if ((port_id <= 0) || (port_id > max_ports)) { 1558 xhci_warn(xhci, "Invalid port id %d\n", port_id); 1559 inc_deq(xhci, xhci->event_ring); 1560 return; 1561 } 1562 1563 /* Figure out which usb_hcd this port is attached to: 1564 * is it a USB 3.0 port or a USB 2.0/1.1 port? 1565 */ 1566 major_revision = xhci->port_array[port_id - 1]; 1567 1568 /* Find the right roothub. */ 1569 hcd = xhci_to_hcd(xhci); 1570 if ((major_revision == 0x03) != (hcd->speed == HCD_USB3)) 1571 hcd = xhci->shared_hcd; 1572 1573 if (major_revision == 0) { 1574 xhci_warn(xhci, "Event for port %u not in " 1575 "Extended Capabilities, ignoring.\n", 1576 port_id); 1577 bogus_port_status = true; 1578 goto cleanup; 1579 } 1580 if (major_revision == DUPLICATE_ENTRY) { 1581 xhci_warn(xhci, "Event for port %u duplicated in" 1582 "Extended Capabilities, ignoring.\n", 1583 port_id); 1584 bogus_port_status = true; 1585 goto cleanup; 1586 } 1587 1588 /* 1589 * Hardware port IDs reported by a Port Status Change Event include USB 1590 * 3.0 and USB 2.0 ports. We want to check if the port has reported a 1591 * resume event, but we first need to translate the hardware port ID 1592 * into the index into the ports on the correct split roothub, and the 1593 * correct bus_state structure. 1594 */ 1595 bus_state = &xhci->bus_state[hcd_index(hcd)]; 1596 if (hcd->speed == HCD_USB3) 1597 port_array = xhci->usb3_ports; 1598 else 1599 port_array = xhci->usb2_ports; 1600 /* Find the faked port hub number */ 1601 faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci, 1602 port_id); 1603 1604 temp = readl(port_array[faked_port_index]); 1605 if (hcd->state == HC_STATE_SUSPENDED) { 1606 xhci_dbg(xhci, "resume root hub\n"); 1607 usb_hcd_resume_root_hub(hcd); 1608 } 1609 1610 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) { 1611 xhci_dbg(xhci, "port resume event for port %d\n", port_id); 1612 1613 temp1 = readl(&xhci->op_regs->command); 1614 if (!(temp1 & CMD_RUN)) { 1615 xhci_warn(xhci, "xHC is not running.\n"); 1616 goto cleanup; 1617 } 1618 1619 if (DEV_SUPERSPEED(temp)) { 1620 xhci_dbg(xhci, "remote wake SS port %d\n", port_id); 1621 /* Set a flag to say the port signaled remote wakeup, 1622 * so we can tell the difference between the end of 1623 * device and host initiated resume. 1624 */ 1625 bus_state->port_remote_wakeup |= 1 << faked_port_index; 1626 xhci_test_and_clear_bit(xhci, port_array, 1627 faked_port_index, PORT_PLC); 1628 xhci_set_link_state(xhci, port_array, faked_port_index, 1629 XDEV_U0); 1630 /* Need to wait until the next link state change 1631 * indicates the device is actually in U0. 1632 */ 1633 bogus_port_status = true; 1634 goto cleanup; 1635 } else { 1636 xhci_dbg(xhci, "resume HS port %d\n", port_id); 1637 bus_state->resume_done[faked_port_index] = jiffies + 1638 msecs_to_jiffies(20); 1639 set_bit(faked_port_index, &bus_state->resuming_ports); 1640 mod_timer(&hcd->rh_timer, 1641 bus_state->resume_done[faked_port_index]); 1642 /* Do the rest in GetPortStatus */ 1643 } 1644 } 1645 1646 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 && 1647 DEV_SUPERSPEED(temp)) { 1648 xhci_dbg(xhci, "resume SS port %d finished\n", port_id); 1649 /* We've just brought the device into U0 through either the 1650 * Resume state after a device remote wakeup, or through the 1651 * U3Exit state after a host-initiated resume. If it's a device 1652 * initiated remote wake, don't pass up the link state change, 1653 * so the roothub behavior is consistent with external 1654 * USB 3.0 hub behavior. 1655 */ 1656 slot_id = xhci_find_slot_id_by_port(hcd, xhci, 1657 faked_port_index + 1); 1658 if (slot_id && xhci->devs[slot_id]) 1659 xhci_ring_device(xhci, slot_id); 1660 if (bus_state->port_remote_wakeup & (1 << faked_port_index)) { 1661 bus_state->port_remote_wakeup &= 1662 ~(1 << faked_port_index); 1663 xhci_test_and_clear_bit(xhci, port_array, 1664 faked_port_index, PORT_PLC); 1665 usb_wakeup_notification(hcd->self.root_hub, 1666 faked_port_index + 1); 1667 bogus_port_status = true; 1668 goto cleanup; 1669 } 1670 } 1671 1672 /* 1673 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or 1674 * RExit to a disconnect state). If so, let the the driver know it's 1675 * out of the RExit state. 1676 */ 1677 if (!DEV_SUPERSPEED(temp) && 1678 test_and_clear_bit(faked_port_index, 1679 &bus_state->rexit_ports)) { 1680 complete(&bus_state->rexit_done[faked_port_index]); 1681 bogus_port_status = true; 1682 goto cleanup; 1683 } 1684 1685 if (hcd->speed != HCD_USB3) 1686 xhci_test_and_clear_bit(xhci, port_array, faked_port_index, 1687 PORT_PLC); 1688 1689 cleanup: 1690 /* Update event ring dequeue pointer before dropping the lock */ 1691 inc_deq(xhci, xhci->event_ring); 1692 1693 /* Don't make the USB core poll the roothub if we got a bad port status 1694 * change event. Besides, at that point we can't tell which roothub 1695 * (USB 2.0 or USB 3.0) to kick. 1696 */ 1697 if (bogus_port_status) 1698 return; 1699 1700 /* 1701 * xHCI port-status-change events occur when the "or" of all the 1702 * status-change bits in the portsc register changes from 0 to 1. 1703 * New status changes won't cause an event if any other change 1704 * bits are still set. When an event occurs, switch over to 1705 * polling to avoid losing status changes. 1706 */ 1707 xhci_dbg(xhci, "%s: starting port polling.\n", __func__); 1708 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); 1709 spin_unlock(&xhci->lock); 1710 /* Pass this up to the core */ 1711 usb_hcd_poll_rh_status(hcd); 1712 spin_lock(&xhci->lock); 1713 } 1714 1715 /* 1716 * This TD is defined by the TRBs starting at start_trb in start_seg and ending 1717 * at end_trb, which may be in another segment. If the suspect DMA address is a 1718 * TRB in this TD, this function returns that TRB's segment. Otherwise it 1719 * returns 0. 1720 */ 1721 struct xhci_segment *trb_in_td(struct xhci_segment *start_seg, 1722 union xhci_trb *start_trb, 1723 union xhci_trb *end_trb, 1724 dma_addr_t suspect_dma) 1725 { 1726 dma_addr_t start_dma; 1727 dma_addr_t end_seg_dma; 1728 dma_addr_t end_trb_dma; 1729 struct xhci_segment *cur_seg; 1730 1731 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb); 1732 cur_seg = start_seg; 1733 1734 do { 1735 if (start_dma == 0) 1736 return NULL; 1737 /* We may get an event for a Link TRB in the middle of a TD */ 1738 end_seg_dma = xhci_trb_virt_to_dma(cur_seg, 1739 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); 1740 /* If the end TRB isn't in this segment, this is set to 0 */ 1741 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb); 1742 1743 if (end_trb_dma > 0) { 1744 /* The end TRB is in this segment, so suspect should be here */ 1745 if (start_dma <= end_trb_dma) { 1746 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) 1747 return cur_seg; 1748 } else { 1749 /* Case for one segment with 1750 * a TD wrapped around to the top 1751 */ 1752 if ((suspect_dma >= start_dma && 1753 suspect_dma <= end_seg_dma) || 1754 (suspect_dma >= cur_seg->dma && 1755 suspect_dma <= end_trb_dma)) 1756 return cur_seg; 1757 } 1758 return NULL; 1759 } else { 1760 /* Might still be somewhere in this segment */ 1761 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) 1762 return cur_seg; 1763 } 1764 cur_seg = cur_seg->next; 1765 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); 1766 } while (cur_seg != start_seg); 1767 1768 return NULL; 1769 } 1770 1771 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, 1772 unsigned int slot_id, unsigned int ep_index, 1773 unsigned int stream_id, 1774 struct xhci_td *td, union xhci_trb *event_trb) 1775 { 1776 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 1777 struct xhci_command *command; 1778 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); 1779 if (!command) 1780 return; 1781 1782 ep->ep_state |= EP_HALTED; 1783 ep->stopped_td = td; 1784 ep->stopped_stream = stream_id; 1785 1786 xhci_queue_reset_ep(xhci, command, slot_id, ep_index); 1787 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); 1788 1789 ep->stopped_td = NULL; 1790 ep->stopped_stream = 0; 1791 1792 xhci_ring_cmd_db(xhci); 1793 } 1794 1795 /* Check if an error has halted the endpoint ring. The class driver will 1796 * cleanup the halt for a non-default control endpoint if we indicate a stall. 1797 * However, a babble and other errors also halt the endpoint ring, and the class 1798 * driver won't clear the halt in that case, so we need to issue a Set Transfer 1799 * Ring Dequeue Pointer command manually. 1800 */ 1801 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, 1802 struct xhci_ep_ctx *ep_ctx, 1803 unsigned int trb_comp_code) 1804 { 1805 /* TRB completion codes that may require a manual halt cleanup */ 1806 if (trb_comp_code == COMP_TX_ERR || 1807 trb_comp_code == COMP_BABBLE || 1808 trb_comp_code == COMP_SPLIT_ERR) 1809 /* The 0.96 spec says a babbling control endpoint 1810 * is not halted. The 0.96 spec says it is. Some HW 1811 * claims to be 0.95 compliant, but it halts the control 1812 * endpoint anyway. Check if a babble halted the 1813 * endpoint. 1814 */ 1815 if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == 1816 cpu_to_le32(EP_STATE_HALTED)) 1817 return 1; 1818 1819 return 0; 1820 } 1821 1822 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) 1823 { 1824 if (trb_comp_code >= 224 && trb_comp_code <= 255) { 1825 /* Vendor defined "informational" completion code, 1826 * treat as not-an-error. 1827 */ 1828 xhci_dbg(xhci, "Vendor defined info completion code %u\n", 1829 trb_comp_code); 1830 xhci_dbg(xhci, "Treating code as success.\n"); 1831 return 1; 1832 } 1833 return 0; 1834 } 1835 1836 /* 1837 * Finish the td processing, remove the td from td list; 1838 * Return 1 if the urb can be given back. 1839 */ 1840 static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, 1841 union xhci_trb *event_trb, struct xhci_transfer_event *event, 1842 struct xhci_virt_ep *ep, int *status, bool skip) 1843 { 1844 struct xhci_virt_device *xdev; 1845 struct xhci_ring *ep_ring; 1846 unsigned int slot_id; 1847 int ep_index; 1848 struct urb *urb = NULL; 1849 struct xhci_ep_ctx *ep_ctx; 1850 int ret = 0; 1851 struct urb_priv *urb_priv; 1852 u32 trb_comp_code; 1853 1854 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1855 xdev = xhci->devs[slot_id]; 1856 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 1857 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1858 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1859 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1860 1861 if (skip) 1862 goto td_cleanup; 1863 1864 if (trb_comp_code == COMP_STOP_INVAL || 1865 trb_comp_code == COMP_STOP) { 1866 /* The Endpoint Stop Command completion will take care of any 1867 * stopped TDs. A stopped TD may be restarted, so don't update 1868 * the ring dequeue pointer or take this TD off any lists yet. 1869 */ 1870 ep->stopped_td = td; 1871 return 0; 1872 } else { 1873 if (trb_comp_code == COMP_STALL) { 1874 /* The transfer is completed from the driver's 1875 * perspective, but we need to issue a set dequeue 1876 * command for this stalled endpoint to move the dequeue 1877 * pointer past the TD. We can't do that here because 1878 * the halt condition must be cleared first. Let the 1879 * USB class driver clear the stall later. 1880 */ 1881 ep->stopped_td = td; 1882 ep->stopped_stream = ep_ring->stream_id; 1883 } else if (xhci_requires_manual_halt_cleanup(xhci, 1884 ep_ctx, trb_comp_code)) { 1885 /* Other types of errors halt the endpoint, but the 1886 * class driver doesn't call usb_reset_endpoint() unless 1887 * the error is -EPIPE. Clear the halted status in the 1888 * xHCI hardware manually. 1889 */ 1890 xhci_cleanup_halted_endpoint(xhci, 1891 slot_id, ep_index, ep_ring->stream_id, 1892 td, event_trb); 1893 } else { 1894 /* Update ring dequeue pointer */ 1895 while (ep_ring->dequeue != td->last_trb) 1896 inc_deq(xhci, ep_ring); 1897 inc_deq(xhci, ep_ring); 1898 } 1899 1900 td_cleanup: 1901 /* Clean up the endpoint's TD list */ 1902 urb = td->urb; 1903 urb_priv = urb->hcpriv; 1904 1905 /* Do one last check of the actual transfer length. 1906 * If the host controller said we transferred more data than 1907 * the buffer length, urb->actual_length will be a very big 1908 * number (since it's unsigned). Play it safe and say we didn't 1909 * transfer anything. 1910 */ 1911 if (urb->actual_length > urb->transfer_buffer_length) { 1912 xhci_warn(xhci, "URB transfer length is wrong, " 1913 "xHC issue? req. len = %u, " 1914 "act. len = %u\n", 1915 urb->transfer_buffer_length, 1916 urb->actual_length); 1917 urb->actual_length = 0; 1918 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1919 *status = -EREMOTEIO; 1920 else 1921 *status = 0; 1922 } 1923 list_del_init(&td->td_list); 1924 /* Was this TD slated to be cancelled but completed anyway? */ 1925 if (!list_empty(&td->cancelled_td_list)) 1926 list_del_init(&td->cancelled_td_list); 1927 1928 urb_priv->td_cnt++; 1929 /* Giveback the urb when all the tds are completed */ 1930 if (urb_priv->td_cnt == urb_priv->length) { 1931 ret = 1; 1932 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 1933 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; 1934 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs 1935 == 0) { 1936 if (xhci->quirks & XHCI_AMD_PLL_FIX) 1937 usb_amd_quirk_pll_enable(); 1938 } 1939 } 1940 } 1941 } 1942 1943 return ret; 1944 } 1945 1946 /* 1947 * Process control tds, update urb status and actual_length. 1948 */ 1949 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, 1950 union xhci_trb *event_trb, struct xhci_transfer_event *event, 1951 struct xhci_virt_ep *ep, int *status) 1952 { 1953 struct xhci_virt_device *xdev; 1954 struct xhci_ring *ep_ring; 1955 unsigned int slot_id; 1956 int ep_index; 1957 struct xhci_ep_ctx *ep_ctx; 1958 u32 trb_comp_code; 1959 1960 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1961 xdev = xhci->devs[slot_id]; 1962 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 1963 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1964 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1965 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1966 1967 switch (trb_comp_code) { 1968 case COMP_SUCCESS: 1969 if (event_trb == ep_ring->dequeue) { 1970 xhci_warn(xhci, "WARN: Success on ctrl setup TRB " 1971 "without IOC set??\n"); 1972 *status = -ESHUTDOWN; 1973 } else if (event_trb != td->last_trb) { 1974 xhci_warn(xhci, "WARN: Success on ctrl data TRB " 1975 "without IOC set??\n"); 1976 *status = -ESHUTDOWN; 1977 } else { 1978 *status = 0; 1979 } 1980 break; 1981 case COMP_SHORT_TX: 1982 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1983 *status = -EREMOTEIO; 1984 else 1985 *status = 0; 1986 break; 1987 case COMP_STOP_INVAL: 1988 case COMP_STOP: 1989 return finish_td(xhci, td, event_trb, event, ep, status, false); 1990 default: 1991 if (!xhci_requires_manual_halt_cleanup(xhci, 1992 ep_ctx, trb_comp_code)) 1993 break; 1994 xhci_dbg(xhci, "TRB error code %u, " 1995 "halted endpoint index = %u\n", 1996 trb_comp_code, ep_index); 1997 /* else fall through */ 1998 case COMP_STALL: 1999 /* Did we transfer part of the data (middle) phase? */ 2000 if (event_trb != ep_ring->dequeue && 2001 event_trb != td->last_trb) 2002 td->urb->actual_length = 2003 td->urb->transfer_buffer_length - 2004 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 2005 else 2006 td->urb->actual_length = 0; 2007 2008 xhci_cleanup_halted_endpoint(xhci, 2009 slot_id, ep_index, 0, td, event_trb); 2010 return finish_td(xhci, td, event_trb, event, ep, status, true); 2011 } 2012 /* 2013 * Did we transfer any data, despite the errors that might have 2014 * happened? I.e. did we get past the setup stage? 2015 */ 2016 if (event_trb != ep_ring->dequeue) { 2017 /* The event was for the status stage */ 2018 if (event_trb == td->last_trb) { 2019 if (td->urb->actual_length != 0) { 2020 /* Don't overwrite a previously set error code 2021 */ 2022 if ((*status == -EINPROGRESS || *status == 0) && 2023 (td->urb->transfer_flags 2024 & URB_SHORT_NOT_OK)) 2025 /* Did we already see a short data 2026 * stage? */ 2027 *status = -EREMOTEIO; 2028 } else { 2029 td->urb->actual_length = 2030 td->urb->transfer_buffer_length; 2031 } 2032 } else { 2033 /* Maybe the event was for the data stage? */ 2034 td->urb->actual_length = 2035 td->urb->transfer_buffer_length - 2036 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 2037 xhci_dbg(xhci, "Waiting for status " 2038 "stage event\n"); 2039 return 0; 2040 } 2041 } 2042 2043 return finish_td(xhci, td, event_trb, event, ep, status, false); 2044 } 2045 2046 /* 2047 * Process isochronous tds, update urb packet status and actual_length. 2048 */ 2049 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, 2050 union xhci_trb *event_trb, struct xhci_transfer_event *event, 2051 struct xhci_virt_ep *ep, int *status) 2052 { 2053 struct xhci_ring *ep_ring; 2054 struct urb_priv *urb_priv; 2055 int idx; 2056 int len = 0; 2057 union xhci_trb *cur_trb; 2058 struct xhci_segment *cur_seg; 2059 struct usb_iso_packet_descriptor *frame; 2060 u32 trb_comp_code; 2061 bool skip_td = false; 2062 2063 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 2064 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 2065 urb_priv = td->urb->hcpriv; 2066 idx = urb_priv->td_cnt; 2067 frame = &td->urb->iso_frame_desc[idx]; 2068 2069 /* handle completion code */ 2070 switch (trb_comp_code) { 2071 case COMP_SUCCESS: 2072 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) { 2073 frame->status = 0; 2074 break; 2075 } 2076 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH)) 2077 trb_comp_code = COMP_SHORT_TX; 2078 case COMP_SHORT_TX: 2079 frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ? 2080 -EREMOTEIO : 0; 2081 break; 2082 case COMP_BW_OVER: 2083 frame->status = -ECOMM; 2084 skip_td = true; 2085 break; 2086 case COMP_BUFF_OVER: 2087 case COMP_BABBLE: 2088 frame->status = -EOVERFLOW; 2089 skip_td = true; 2090 break; 2091 case COMP_DEV_ERR: 2092 case COMP_STALL: 2093 case COMP_TX_ERR: 2094 frame->status = -EPROTO; 2095 skip_td = true; 2096 break; 2097 case COMP_STOP: 2098 case COMP_STOP_INVAL: 2099 break; 2100 default: 2101 frame->status = -1; 2102 break; 2103 } 2104 2105 if (trb_comp_code == COMP_SUCCESS || skip_td) { 2106 frame->actual_length = frame->length; 2107 td->urb->actual_length += frame->length; 2108 } else { 2109 for (cur_trb = ep_ring->dequeue, 2110 cur_seg = ep_ring->deq_seg; cur_trb != event_trb; 2111 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 2112 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) && 2113 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) 2114 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); 2115 } 2116 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - 2117 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 2118 2119 if (trb_comp_code != COMP_STOP_INVAL) { 2120 frame->actual_length = len; 2121 td->urb->actual_length += len; 2122 } 2123 } 2124 2125 return finish_td(xhci, td, event_trb, event, ep, status, false); 2126 } 2127 2128 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, 2129 struct xhci_transfer_event *event, 2130 struct xhci_virt_ep *ep, int *status) 2131 { 2132 struct xhci_ring *ep_ring; 2133 struct urb_priv *urb_priv; 2134 struct usb_iso_packet_descriptor *frame; 2135 int idx; 2136 2137 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 2138 urb_priv = td->urb->hcpriv; 2139 idx = urb_priv->td_cnt; 2140 frame = &td->urb->iso_frame_desc[idx]; 2141 2142 /* The transfer is partly done. */ 2143 frame->status = -EXDEV; 2144 2145 /* calc actual length */ 2146 frame->actual_length = 0; 2147 2148 /* Update ring dequeue pointer */ 2149 while (ep_ring->dequeue != td->last_trb) 2150 inc_deq(xhci, ep_ring); 2151 inc_deq(xhci, ep_ring); 2152 2153 return finish_td(xhci, td, NULL, event, ep, status, true); 2154 } 2155 2156 /* 2157 * Process bulk and interrupt tds, update urb status and actual_length. 2158 */ 2159 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, 2160 union xhci_trb *event_trb, struct xhci_transfer_event *event, 2161 struct xhci_virt_ep *ep, int *status) 2162 { 2163 struct xhci_ring *ep_ring; 2164 union xhci_trb *cur_trb; 2165 struct xhci_segment *cur_seg; 2166 u32 trb_comp_code; 2167 2168 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 2169 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 2170 2171 switch (trb_comp_code) { 2172 case COMP_SUCCESS: 2173 /* Double check that the HW transferred everything. */ 2174 if (event_trb != td->last_trb || 2175 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { 2176 xhci_warn(xhci, "WARN Successful completion " 2177 "on short TX\n"); 2178 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 2179 *status = -EREMOTEIO; 2180 else 2181 *status = 0; 2182 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH)) 2183 trb_comp_code = COMP_SHORT_TX; 2184 } else { 2185 *status = 0; 2186 } 2187 break; 2188 case COMP_SHORT_TX: 2189 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 2190 *status = -EREMOTEIO; 2191 else 2192 *status = 0; 2193 break; 2194 default: 2195 /* Others already handled above */ 2196 break; 2197 } 2198 if (trb_comp_code == COMP_SHORT_TX) 2199 xhci_dbg(xhci, "ep %#x - asked for %d bytes, " 2200 "%d bytes untransferred\n", 2201 td->urb->ep->desc.bEndpointAddress, 2202 td->urb->transfer_buffer_length, 2203 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); 2204 /* Fast path - was this the last TRB in the TD for this URB? */ 2205 if (event_trb == td->last_trb) { 2206 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { 2207 td->urb->actual_length = 2208 td->urb->transfer_buffer_length - 2209 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 2210 if (td->urb->transfer_buffer_length < 2211 td->urb->actual_length) { 2212 xhci_warn(xhci, "HC gave bad length " 2213 "of %d bytes left\n", 2214 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); 2215 td->urb->actual_length = 0; 2216 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 2217 *status = -EREMOTEIO; 2218 else 2219 *status = 0; 2220 } 2221 /* Don't overwrite a previously set error code */ 2222 if (*status == -EINPROGRESS) { 2223 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 2224 *status = -EREMOTEIO; 2225 else 2226 *status = 0; 2227 } 2228 } else { 2229 td->urb->actual_length = 2230 td->urb->transfer_buffer_length; 2231 /* Ignore a short packet completion if the 2232 * untransferred length was zero. 2233 */ 2234 if (*status == -EREMOTEIO) 2235 *status = 0; 2236 } 2237 } else { 2238 /* Slow path - walk the list, starting from the dequeue 2239 * pointer, to get the actual length transferred. 2240 */ 2241 td->urb->actual_length = 0; 2242 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; 2243 cur_trb != event_trb; 2244 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 2245 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) && 2246 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) 2247 td->urb->actual_length += 2248 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); 2249 } 2250 /* If the ring didn't stop on a Link or No-op TRB, add 2251 * in the actual bytes transferred from the Normal TRB 2252 */ 2253 if (trb_comp_code != COMP_STOP_INVAL) 2254 td->urb->actual_length += 2255 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - 2256 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 2257 } 2258 2259 return finish_td(xhci, td, event_trb, event, ep, status, false); 2260 } 2261 2262 /* 2263 * If this function returns an error condition, it means it got a Transfer 2264 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. 2265 * At this point, the host controller is probably hosed and should be reset. 2266 */ 2267 static int handle_tx_event(struct xhci_hcd *xhci, 2268 struct xhci_transfer_event *event) 2269 __releases(&xhci->lock) 2270 __acquires(&xhci->lock) 2271 { 2272 struct xhci_virt_device *xdev; 2273 struct xhci_virt_ep *ep; 2274 struct xhci_ring *ep_ring; 2275 unsigned int slot_id; 2276 int ep_index; 2277 struct xhci_td *td = NULL; 2278 dma_addr_t event_dma; 2279 struct xhci_segment *event_seg; 2280 union xhci_trb *event_trb; 2281 struct urb *urb = NULL; 2282 int status = -EINPROGRESS; 2283 struct urb_priv *urb_priv; 2284 struct xhci_ep_ctx *ep_ctx; 2285 struct list_head *tmp; 2286 u32 trb_comp_code; 2287 int ret = 0; 2288 int td_num = 0; 2289 2290 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 2291 xdev = xhci->devs[slot_id]; 2292 if (!xdev) { 2293 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); 2294 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", 2295 (unsigned long long) xhci_trb_virt_to_dma( 2296 xhci->event_ring->deq_seg, 2297 xhci->event_ring->dequeue), 2298 lower_32_bits(le64_to_cpu(event->buffer)), 2299 upper_32_bits(le64_to_cpu(event->buffer)), 2300 le32_to_cpu(event->transfer_len), 2301 le32_to_cpu(event->flags)); 2302 xhci_dbg(xhci, "Event ring:\n"); 2303 xhci_debug_segment(xhci, xhci->event_ring->deq_seg); 2304 return -ENODEV; 2305 } 2306 2307 /* Endpoint ID is 1 based, our index is zero based */ 2308 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 2309 ep = &xdev->eps[ep_index]; 2310 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 2311 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 2312 if (!ep_ring || 2313 (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == 2314 EP_STATE_DISABLED) { 2315 xhci_err(xhci, "ERROR Transfer event for disabled endpoint " 2316 "or incorrect stream ring\n"); 2317 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", 2318 (unsigned long long) xhci_trb_virt_to_dma( 2319 xhci->event_ring->deq_seg, 2320 xhci->event_ring->dequeue), 2321 lower_32_bits(le64_to_cpu(event->buffer)), 2322 upper_32_bits(le64_to_cpu(event->buffer)), 2323 le32_to_cpu(event->transfer_len), 2324 le32_to_cpu(event->flags)); 2325 xhci_dbg(xhci, "Event ring:\n"); 2326 xhci_debug_segment(xhci, xhci->event_ring->deq_seg); 2327 return -ENODEV; 2328 } 2329 2330 /* Count current td numbers if ep->skip is set */ 2331 if (ep->skip) { 2332 list_for_each(tmp, &ep_ring->td_list) 2333 td_num++; 2334 } 2335 2336 event_dma = le64_to_cpu(event->buffer); 2337 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 2338 /* Look for common error cases */ 2339 switch (trb_comp_code) { 2340 /* Skip codes that require special handling depending on 2341 * transfer type 2342 */ 2343 case COMP_SUCCESS: 2344 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) 2345 break; 2346 if (xhci->quirks & XHCI_TRUST_TX_LENGTH) 2347 trb_comp_code = COMP_SHORT_TX; 2348 else 2349 xhci_warn_ratelimited(xhci, 2350 "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n"); 2351 case COMP_SHORT_TX: 2352 break; 2353 case COMP_STOP: 2354 xhci_dbg(xhci, "Stopped on Transfer TRB\n"); 2355 break; 2356 case COMP_STOP_INVAL: 2357 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n"); 2358 break; 2359 case COMP_STALL: 2360 xhci_dbg(xhci, "Stalled endpoint\n"); 2361 ep->ep_state |= EP_HALTED; 2362 status = -EPIPE; 2363 break; 2364 case COMP_TRB_ERR: 2365 xhci_warn(xhci, "WARN: TRB error on endpoint\n"); 2366 status = -EILSEQ; 2367 break; 2368 case COMP_SPLIT_ERR: 2369 case COMP_TX_ERR: 2370 xhci_dbg(xhci, "Transfer error on endpoint\n"); 2371 status = -EPROTO; 2372 break; 2373 case COMP_BABBLE: 2374 xhci_dbg(xhci, "Babble error on endpoint\n"); 2375 status = -EOVERFLOW; 2376 break; 2377 case COMP_DB_ERR: 2378 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); 2379 status = -ENOSR; 2380 break; 2381 case COMP_BW_OVER: 2382 xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n"); 2383 break; 2384 case COMP_BUFF_OVER: 2385 xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n"); 2386 break; 2387 case COMP_UNDERRUN: 2388 /* 2389 * When the Isoch ring is empty, the xHC will generate 2390 * a Ring Overrun Event for IN Isoch endpoint or Ring 2391 * Underrun Event for OUT Isoch endpoint. 2392 */ 2393 xhci_dbg(xhci, "underrun event on endpoint\n"); 2394 if (!list_empty(&ep_ring->td_list)) 2395 xhci_dbg(xhci, "Underrun Event for slot %d ep %d " 2396 "still with TDs queued?\n", 2397 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2398 ep_index); 2399 goto cleanup; 2400 case COMP_OVERRUN: 2401 xhci_dbg(xhci, "overrun event on endpoint\n"); 2402 if (!list_empty(&ep_ring->td_list)) 2403 xhci_dbg(xhci, "Overrun Event for slot %d ep %d " 2404 "still with TDs queued?\n", 2405 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2406 ep_index); 2407 goto cleanup; 2408 case COMP_DEV_ERR: 2409 xhci_warn(xhci, "WARN: detect an incompatible device"); 2410 status = -EPROTO; 2411 break; 2412 case COMP_MISSED_INT: 2413 /* 2414 * When encounter missed service error, one or more isoc tds 2415 * may be missed by xHC. 2416 * Set skip flag of the ep_ring; Complete the missed tds as 2417 * short transfer when process the ep_ring next time. 2418 */ 2419 ep->skip = true; 2420 xhci_dbg(xhci, "Miss service interval error, set skip flag\n"); 2421 goto cleanup; 2422 default: 2423 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { 2424 status = 0; 2425 break; 2426 } 2427 xhci_warn(xhci, "ERROR Unknown event condition, HC probably " 2428 "busted\n"); 2429 goto cleanup; 2430 } 2431 2432 do { 2433 /* This TRB should be in the TD at the head of this ring's 2434 * TD list. 2435 */ 2436 if (list_empty(&ep_ring->td_list)) { 2437 /* 2438 * A stopped endpoint may generate an extra completion 2439 * event if the device was suspended. Don't print 2440 * warnings. 2441 */ 2442 if (!(trb_comp_code == COMP_STOP || 2443 trb_comp_code == COMP_STOP_INVAL)) { 2444 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", 2445 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2446 ep_index); 2447 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", 2448 (le32_to_cpu(event->flags) & 2449 TRB_TYPE_BITMASK)>>10); 2450 xhci_print_trb_offsets(xhci, (union xhci_trb *) event); 2451 } 2452 if (ep->skip) { 2453 ep->skip = false; 2454 xhci_dbg(xhci, "td_list is empty while skip " 2455 "flag set. Clear skip flag.\n"); 2456 } 2457 ret = 0; 2458 goto cleanup; 2459 } 2460 2461 /* We've skipped all the TDs on the ep ring when ep->skip set */ 2462 if (ep->skip && td_num == 0) { 2463 ep->skip = false; 2464 xhci_dbg(xhci, "All tds on the ep_ring skipped. " 2465 "Clear skip flag.\n"); 2466 ret = 0; 2467 goto cleanup; 2468 } 2469 2470 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); 2471 if (ep->skip) 2472 td_num--; 2473 2474 /* Is this a TRB in the currently executing TD? */ 2475 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, 2476 td->last_trb, event_dma); 2477 2478 /* 2479 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE 2480 * is not in the current TD pointed by ep_ring->dequeue because 2481 * that the hardware dequeue pointer still at the previous TRB 2482 * of the current TD. The previous TRB maybe a Link TD or the 2483 * last TRB of the previous TD. The command completion handle 2484 * will take care the rest. 2485 */ 2486 if (!event_seg && trb_comp_code == COMP_STOP_INVAL) { 2487 ret = 0; 2488 goto cleanup; 2489 } 2490 2491 if (!event_seg) { 2492 if (!ep->skip || 2493 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { 2494 /* Some host controllers give a spurious 2495 * successful event after a short transfer. 2496 * Ignore it. 2497 */ 2498 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && 2499 ep_ring->last_td_was_short) { 2500 ep_ring->last_td_was_short = false; 2501 ret = 0; 2502 goto cleanup; 2503 } 2504 /* HC is busted, give up! */ 2505 xhci_err(xhci, 2506 "ERROR Transfer event TRB DMA ptr not " 2507 "part of current TD\n"); 2508 return -ESHUTDOWN; 2509 } 2510 2511 ret = skip_isoc_td(xhci, td, event, ep, &status); 2512 goto cleanup; 2513 } 2514 if (trb_comp_code == COMP_SHORT_TX) 2515 ep_ring->last_td_was_short = true; 2516 else 2517 ep_ring->last_td_was_short = false; 2518 2519 if (ep->skip) { 2520 xhci_dbg(xhci, "Found td. Clear skip flag.\n"); 2521 ep->skip = false; 2522 } 2523 2524 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / 2525 sizeof(*event_trb)]; 2526 /* 2527 * No-op TRB should not trigger interrupts. 2528 * If event_trb is a no-op TRB, it means the 2529 * corresponding TD has been cancelled. Just ignore 2530 * the TD. 2531 */ 2532 if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) { 2533 xhci_dbg(xhci, 2534 "event_trb is a no-op TRB. Skip it\n"); 2535 goto cleanup; 2536 } 2537 2538 /* Now update the urb's actual_length and give back to 2539 * the core 2540 */ 2541 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) 2542 ret = process_ctrl_td(xhci, td, event_trb, event, ep, 2543 &status); 2544 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) 2545 ret = process_isoc_td(xhci, td, event_trb, event, ep, 2546 &status); 2547 else 2548 ret = process_bulk_intr_td(xhci, td, event_trb, event, 2549 ep, &status); 2550 2551 cleanup: 2552 /* 2553 * Do not update event ring dequeue pointer if ep->skip is set. 2554 * Will roll back to continue process missed tds. 2555 */ 2556 if (trb_comp_code == COMP_MISSED_INT || !ep->skip) { 2557 inc_deq(xhci, xhci->event_ring); 2558 } 2559 2560 if (ret) { 2561 urb = td->urb; 2562 urb_priv = urb->hcpriv; 2563 /* Leave the TD around for the reset endpoint function 2564 * to use(but only if it's not a control endpoint, 2565 * since we already queued the Set TR dequeue pointer 2566 * command for stalled control endpoints). 2567 */ 2568 if (usb_endpoint_xfer_control(&urb->ep->desc) || 2569 (trb_comp_code != COMP_STALL && 2570 trb_comp_code != COMP_BABBLE)) 2571 xhci_urb_free_priv(xhci, urb_priv); 2572 else 2573 kfree(urb_priv); 2574 2575 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); 2576 if ((urb->actual_length != urb->transfer_buffer_length && 2577 (urb->transfer_flags & 2578 URB_SHORT_NOT_OK)) || 2579 (status != 0 && 2580 !usb_endpoint_xfer_isoc(&urb->ep->desc))) 2581 xhci_dbg(xhci, "Giveback URB %p, len = %d, " 2582 "expected = %d, status = %d\n", 2583 urb, urb->actual_length, 2584 urb->transfer_buffer_length, 2585 status); 2586 spin_unlock(&xhci->lock); 2587 /* EHCI, UHCI, and OHCI always unconditionally set the 2588 * urb->status of an isochronous endpoint to 0. 2589 */ 2590 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) 2591 status = 0; 2592 usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status); 2593 spin_lock(&xhci->lock); 2594 } 2595 2596 /* 2597 * If ep->skip is set, it means there are missed tds on the 2598 * endpoint ring need to take care of. 2599 * Process them as short transfer until reach the td pointed by 2600 * the event. 2601 */ 2602 } while (ep->skip && trb_comp_code != COMP_MISSED_INT); 2603 2604 return 0; 2605 } 2606 2607 /* 2608 * This function handles all OS-owned events on the event ring. It may drop 2609 * xhci->lock between event processing (e.g. to pass up port status changes). 2610 * Returns >0 for "possibly more events to process" (caller should call again), 2611 * otherwise 0 if done. In future, <0 returns should indicate error code. 2612 */ 2613 static int xhci_handle_event(struct xhci_hcd *xhci) 2614 { 2615 union xhci_trb *event; 2616 int update_ptrs = 1; 2617 int ret; 2618 2619 if (!xhci->event_ring || !xhci->event_ring->dequeue) { 2620 xhci->error_bitmask |= 1 << 1; 2621 return 0; 2622 } 2623 2624 event = xhci->event_ring->dequeue; 2625 /* Does the HC or OS own the TRB? */ 2626 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) != 2627 xhci->event_ring->cycle_state) { 2628 xhci->error_bitmask |= 1 << 2; 2629 return 0; 2630 } 2631 2632 /* 2633 * Barrier between reading the TRB_CYCLE (valid) flag above and any 2634 * speculative reads of the event's flags/data below. 2635 */ 2636 rmb(); 2637 /* FIXME: Handle more event types. */ 2638 switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) { 2639 case TRB_TYPE(TRB_COMPLETION): 2640 handle_cmd_completion(xhci, &event->event_cmd); 2641 break; 2642 case TRB_TYPE(TRB_PORT_STATUS): 2643 handle_port_status(xhci, event); 2644 update_ptrs = 0; 2645 break; 2646 case TRB_TYPE(TRB_TRANSFER): 2647 ret = handle_tx_event(xhci, &event->trans_event); 2648 if (ret < 0) 2649 xhci->error_bitmask |= 1 << 9; 2650 else 2651 update_ptrs = 0; 2652 break; 2653 case TRB_TYPE(TRB_DEV_NOTE): 2654 handle_device_notification(xhci, event); 2655 break; 2656 default: 2657 if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >= 2658 TRB_TYPE(48)) 2659 handle_vendor_event(xhci, event); 2660 else 2661 xhci->error_bitmask |= 1 << 3; 2662 } 2663 /* Any of the above functions may drop and re-acquire the lock, so check 2664 * to make sure a watchdog timer didn't mark the host as non-responsive. 2665 */ 2666 if (xhci->xhc_state & XHCI_STATE_DYING) { 2667 xhci_dbg(xhci, "xHCI host dying, returning from " 2668 "event handler.\n"); 2669 return 0; 2670 } 2671 2672 if (update_ptrs) 2673 /* Update SW event ring dequeue pointer */ 2674 inc_deq(xhci, xhci->event_ring); 2675 2676 /* Are there more items on the event ring? Caller will call us again to 2677 * check. 2678 */ 2679 return 1; 2680 } 2681 2682 /* 2683 * xHCI spec says we can get an interrupt, and if the HC has an error condition, 2684 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of 2685 * indicators of an event TRB error, but we check the status *first* to be safe. 2686 */ 2687 irqreturn_t xhci_irq(struct usb_hcd *hcd) 2688 { 2689 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 2690 u32 status; 2691 u64 temp_64; 2692 union xhci_trb *event_ring_deq; 2693 dma_addr_t deq; 2694 2695 spin_lock(&xhci->lock); 2696 /* Check if the xHC generated the interrupt, or the irq is shared */ 2697 status = readl(&xhci->op_regs->status); 2698 if (status == 0xffffffff) 2699 goto hw_died; 2700 2701 if (!(status & STS_EINT)) { 2702 spin_unlock(&xhci->lock); 2703 return IRQ_NONE; 2704 } 2705 if (status & STS_FATAL) { 2706 xhci_warn(xhci, "WARNING: Host System Error\n"); 2707 xhci_halt(xhci); 2708 hw_died: 2709 spin_unlock(&xhci->lock); 2710 return -ESHUTDOWN; 2711 } 2712 2713 /* 2714 * Clear the op reg interrupt status first, 2715 * so we can receive interrupts from other MSI-X interrupters. 2716 * Write 1 to clear the interrupt status. 2717 */ 2718 status |= STS_EINT; 2719 writel(status, &xhci->op_regs->status); 2720 /* FIXME when MSI-X is supported and there are multiple vectors */ 2721 /* Clear the MSI-X event interrupt status */ 2722 2723 if (hcd->irq) { 2724 u32 irq_pending; 2725 /* Acknowledge the PCI interrupt */ 2726 irq_pending = readl(&xhci->ir_set->irq_pending); 2727 irq_pending |= IMAN_IP; 2728 writel(irq_pending, &xhci->ir_set->irq_pending); 2729 } 2730 2731 if (xhci->xhc_state & XHCI_STATE_DYING) { 2732 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " 2733 "Shouldn't IRQs be disabled?\n"); 2734 /* Clear the event handler busy flag (RW1C); 2735 * the event ring should be empty. 2736 */ 2737 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 2738 xhci_write_64(xhci, temp_64 | ERST_EHB, 2739 &xhci->ir_set->erst_dequeue); 2740 spin_unlock(&xhci->lock); 2741 2742 return IRQ_HANDLED; 2743 } 2744 2745 event_ring_deq = xhci->event_ring->dequeue; 2746 /* FIXME this should be a delayed service routine 2747 * that clears the EHB. 2748 */ 2749 while (xhci_handle_event(xhci) > 0) {} 2750 2751 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 2752 /* If necessary, update the HW's version of the event ring deq ptr. */ 2753 if (event_ring_deq != xhci->event_ring->dequeue) { 2754 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, 2755 xhci->event_ring->dequeue); 2756 if (deq == 0) 2757 xhci_warn(xhci, "WARN something wrong with SW event " 2758 "ring dequeue ptr.\n"); 2759 /* Update HC event ring dequeue pointer */ 2760 temp_64 &= ERST_PTR_MASK; 2761 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK); 2762 } 2763 2764 /* Clear the event handler busy flag (RW1C); event ring is empty. */ 2765 temp_64 |= ERST_EHB; 2766 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); 2767 2768 spin_unlock(&xhci->lock); 2769 2770 return IRQ_HANDLED; 2771 } 2772 2773 irqreturn_t xhci_msi_irq(int irq, void *hcd) 2774 { 2775 return xhci_irq(hcd); 2776 } 2777 2778 /**** Endpoint Ring Operations ****/ 2779 2780 /* 2781 * Generic function for queueing a TRB on a ring. 2782 * The caller must have checked to make sure there's room on the ring. 2783 * 2784 * @more_trbs_coming: Will you enqueue more TRBs before calling 2785 * prepare_transfer()? 2786 */ 2787 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 2788 bool more_trbs_coming, 2789 u32 field1, u32 field2, u32 field3, u32 field4) 2790 { 2791 struct xhci_generic_trb *trb; 2792 2793 trb = &ring->enqueue->generic; 2794 trb->field[0] = cpu_to_le32(field1); 2795 trb->field[1] = cpu_to_le32(field2); 2796 trb->field[2] = cpu_to_le32(field3); 2797 trb->field[3] = cpu_to_le32(field4); 2798 inc_enq(xhci, ring, more_trbs_coming); 2799 } 2800 2801 /* 2802 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs. 2803 * FIXME allocate segments if the ring is full. 2804 */ 2805 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 2806 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) 2807 { 2808 unsigned int num_trbs_needed; 2809 2810 /* Make sure the endpoint has been added to xHC schedule */ 2811 switch (ep_state) { 2812 case EP_STATE_DISABLED: 2813 /* 2814 * USB core changed config/interfaces without notifying us, 2815 * or hardware is reporting the wrong state. 2816 */ 2817 xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); 2818 return -ENOENT; 2819 case EP_STATE_ERROR: 2820 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); 2821 /* FIXME event handling code for error needs to clear it */ 2822 /* XXX not sure if this should be -ENOENT or not */ 2823 return -EINVAL; 2824 case EP_STATE_HALTED: 2825 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); 2826 case EP_STATE_STOPPED: 2827 case EP_STATE_RUNNING: 2828 break; 2829 default: 2830 xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); 2831 /* 2832 * FIXME issue Configure Endpoint command to try to get the HC 2833 * back into a known state. 2834 */ 2835 return -EINVAL; 2836 } 2837 2838 while (1) { 2839 if (room_on_ring(xhci, ep_ring, num_trbs)) 2840 break; 2841 2842 if (ep_ring == xhci->cmd_ring) { 2843 xhci_err(xhci, "Do not support expand command ring\n"); 2844 return -ENOMEM; 2845 } 2846 2847 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion, 2848 "ERROR no room on ep ring, try ring expansion"); 2849 num_trbs_needed = num_trbs - ep_ring->num_trbs_free; 2850 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed, 2851 mem_flags)) { 2852 xhci_err(xhci, "Ring expansion failed\n"); 2853 return -ENOMEM; 2854 } 2855 } 2856 2857 if (enqueue_is_link_trb(ep_ring)) { 2858 struct xhci_ring *ring = ep_ring; 2859 union xhci_trb *next; 2860 2861 next = ring->enqueue; 2862 2863 while (last_trb(xhci, ring, ring->enq_seg, next)) { 2864 /* If we're not dealing with 0.95 hardware or isoc rings 2865 * on AMD 0.96 host, clear the chain bit. 2866 */ 2867 if (!xhci_link_trb_quirk(xhci) && 2868 !(ring->type == TYPE_ISOC && 2869 (xhci->quirks & XHCI_AMD_0x96_HOST))) 2870 next->link.control &= cpu_to_le32(~TRB_CHAIN); 2871 else 2872 next->link.control |= cpu_to_le32(TRB_CHAIN); 2873 2874 wmb(); 2875 next->link.control ^= cpu_to_le32(TRB_CYCLE); 2876 2877 /* Toggle the cycle bit after the last ring segment. */ 2878 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 2879 ring->cycle_state = (ring->cycle_state ? 0 : 1); 2880 } 2881 ring->enq_seg = ring->enq_seg->next; 2882 ring->enqueue = ring->enq_seg->trbs; 2883 next = ring->enqueue; 2884 } 2885 } 2886 2887 return 0; 2888 } 2889 2890 static int prepare_transfer(struct xhci_hcd *xhci, 2891 struct xhci_virt_device *xdev, 2892 unsigned int ep_index, 2893 unsigned int stream_id, 2894 unsigned int num_trbs, 2895 struct urb *urb, 2896 unsigned int td_index, 2897 gfp_t mem_flags) 2898 { 2899 int ret; 2900 struct urb_priv *urb_priv; 2901 struct xhci_td *td; 2902 struct xhci_ring *ep_ring; 2903 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 2904 2905 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id); 2906 if (!ep_ring) { 2907 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", 2908 stream_id); 2909 return -EINVAL; 2910 } 2911 2912 ret = prepare_ring(xhci, ep_ring, 2913 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, 2914 num_trbs, mem_flags); 2915 if (ret) 2916 return ret; 2917 2918 urb_priv = urb->hcpriv; 2919 td = urb_priv->td[td_index]; 2920 2921 INIT_LIST_HEAD(&td->td_list); 2922 INIT_LIST_HEAD(&td->cancelled_td_list); 2923 2924 if (td_index == 0) { 2925 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); 2926 if (unlikely(ret)) 2927 return ret; 2928 } 2929 2930 td->urb = urb; 2931 /* Add this TD to the tail of the endpoint ring's TD list */ 2932 list_add_tail(&td->td_list, &ep_ring->td_list); 2933 td->start_seg = ep_ring->enq_seg; 2934 td->first_trb = ep_ring->enqueue; 2935 2936 urb_priv->td[td_index] = td; 2937 2938 return 0; 2939 } 2940 2941 static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) 2942 { 2943 int num_sgs, num_trbs, running_total, temp, i; 2944 struct scatterlist *sg; 2945 2946 sg = NULL; 2947 num_sgs = urb->num_mapped_sgs; 2948 temp = urb->transfer_buffer_length; 2949 2950 num_trbs = 0; 2951 for_each_sg(urb->sg, sg, num_sgs, i) { 2952 unsigned int len = sg_dma_len(sg); 2953 2954 /* Scatter gather list entries may cross 64KB boundaries */ 2955 running_total = TRB_MAX_BUFF_SIZE - 2956 (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1)); 2957 running_total &= TRB_MAX_BUFF_SIZE - 1; 2958 if (running_total != 0) 2959 num_trbs++; 2960 2961 /* How many more 64KB chunks to transfer, how many more TRBs? */ 2962 while (running_total < sg_dma_len(sg) && running_total < temp) { 2963 num_trbs++; 2964 running_total += TRB_MAX_BUFF_SIZE; 2965 } 2966 len = min_t(int, len, temp); 2967 temp -= len; 2968 if (temp == 0) 2969 break; 2970 } 2971 return num_trbs; 2972 } 2973 2974 static void check_trb_math(struct urb *urb, int num_trbs, int running_total) 2975 { 2976 if (num_trbs != 0) 2977 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " 2978 "TRBs, %d left\n", __func__, 2979 urb->ep->desc.bEndpointAddress, num_trbs); 2980 if (running_total != urb->transfer_buffer_length) 2981 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " 2982 "queued %#x (%d), asked for %#x (%d)\n", 2983 __func__, 2984 urb->ep->desc.bEndpointAddress, 2985 running_total, running_total, 2986 urb->transfer_buffer_length, 2987 urb->transfer_buffer_length); 2988 } 2989 2990 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, 2991 unsigned int ep_index, unsigned int stream_id, int start_cycle, 2992 struct xhci_generic_trb *start_trb) 2993 { 2994 /* 2995 * Pass all the TRBs to the hardware at once and make sure this write 2996 * isn't reordered. 2997 */ 2998 wmb(); 2999 if (start_cycle) 3000 start_trb->field[3] |= cpu_to_le32(start_cycle); 3001 else 3002 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); 3003 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); 3004 } 3005 3006 /* 3007 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt 3008 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD 3009 * (comprised of sg list entries) can take several service intervals to 3010 * transmit. 3011 */ 3012 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3013 struct urb *urb, int slot_id, unsigned int ep_index) 3014 { 3015 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, 3016 xhci->devs[slot_id]->out_ctx, ep_index); 3017 int xhci_interval; 3018 int ep_interval; 3019 3020 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); 3021 ep_interval = urb->interval; 3022 /* Convert to microframes */ 3023 if (urb->dev->speed == USB_SPEED_LOW || 3024 urb->dev->speed == USB_SPEED_FULL) 3025 ep_interval *= 8; 3026 /* FIXME change this to a warning and a suggestion to use the new API 3027 * to set the polling interval (once the API is added). 3028 */ 3029 if (xhci_interval != ep_interval) { 3030 dev_dbg_ratelimited(&urb->dev->dev, 3031 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n", 3032 ep_interval, ep_interval == 1 ? "" : "s", 3033 xhci_interval, xhci_interval == 1 ? "" : "s"); 3034 urb->interval = xhci_interval; 3035 /* Convert back to frames for LS/FS devices */ 3036 if (urb->dev->speed == USB_SPEED_LOW || 3037 urb->dev->speed == USB_SPEED_FULL) 3038 urb->interval /= 8; 3039 } 3040 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); 3041 } 3042 3043 /* 3044 * The TD size is the number of bytes remaining in the TD (including this TRB), 3045 * right shifted by 10. 3046 * It must fit in bits 21:17, so it can't be bigger than 31. 3047 */ 3048 static u32 xhci_td_remainder(unsigned int remainder) 3049 { 3050 u32 max = (1 << (21 - 17 + 1)) - 1; 3051 3052 if ((remainder >> 10) >= max) 3053 return max << 17; 3054 else 3055 return (remainder >> 10) << 17; 3056 } 3057 3058 /* 3059 * For xHCI 1.0 host controllers, TD size is the number of max packet sized 3060 * packets remaining in the TD (*not* including this TRB). 3061 * 3062 * Total TD packet count = total_packet_count = 3063 * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize) 3064 * 3065 * Packets transferred up to and including this TRB = packets_transferred = 3066 * rounddown(total bytes transferred including this TRB / wMaxPacketSize) 3067 * 3068 * TD size = total_packet_count - packets_transferred 3069 * 3070 * It must fit in bits 21:17, so it can't be bigger than 31. 3071 * The last TRB in a TD must have the TD size set to zero. 3072 */ 3073 static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len, 3074 unsigned int total_packet_count, struct urb *urb, 3075 unsigned int num_trbs_left) 3076 { 3077 int packets_transferred; 3078 3079 /* One TRB with a zero-length data packet. */ 3080 if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0)) 3081 return 0; 3082 3083 /* All the TRB queueing functions don't count the current TRB in 3084 * running_total. 3085 */ 3086 packets_transferred = (running_total + trb_buff_len) / 3087 GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); 3088 3089 if ((total_packet_count - packets_transferred) > 31) 3090 return 31 << 17; 3091 return (total_packet_count - packets_transferred) << 17; 3092 } 3093 3094 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3095 struct urb *urb, int slot_id, unsigned int ep_index) 3096 { 3097 struct xhci_ring *ep_ring; 3098 unsigned int num_trbs; 3099 struct urb_priv *urb_priv; 3100 struct xhci_td *td; 3101 struct scatterlist *sg; 3102 int num_sgs; 3103 int trb_buff_len, this_sg_len, running_total; 3104 unsigned int total_packet_count; 3105 bool first_trb; 3106 u64 addr; 3107 bool more_trbs_coming; 3108 3109 struct xhci_generic_trb *start_trb; 3110 int start_cycle; 3111 3112 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 3113 if (!ep_ring) 3114 return -EINVAL; 3115 3116 num_trbs = count_sg_trbs_needed(xhci, urb); 3117 num_sgs = urb->num_mapped_sgs; 3118 total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length, 3119 usb_endpoint_maxp(&urb->ep->desc)); 3120 3121 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], 3122 ep_index, urb->stream_id, 3123 num_trbs, urb, 0, mem_flags); 3124 if (trb_buff_len < 0) 3125 return trb_buff_len; 3126 3127 urb_priv = urb->hcpriv; 3128 td = urb_priv->td[0]; 3129 3130 /* 3131 * Don't give the first TRB to the hardware (by toggling the cycle bit) 3132 * until we've finished creating all the other TRBs. The ring's cycle 3133 * state may change as we enqueue the other TRBs, so save it too. 3134 */ 3135 start_trb = &ep_ring->enqueue->generic; 3136 start_cycle = ep_ring->cycle_state; 3137 3138 running_total = 0; 3139 /* 3140 * How much data is in the first TRB? 3141 * 3142 * There are three forces at work for TRB buffer pointers and lengths: 3143 * 1. We don't want to walk off the end of this sg-list entry buffer. 3144 * 2. The transfer length that the driver requested may be smaller than 3145 * the amount of memory allocated for this scatter-gather list. 3146 * 3. TRBs buffers can't cross 64KB boundaries. 3147 */ 3148 sg = urb->sg; 3149 addr = (u64) sg_dma_address(sg); 3150 this_sg_len = sg_dma_len(sg); 3151 trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); 3152 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 3153 if (trb_buff_len > urb->transfer_buffer_length) 3154 trb_buff_len = urb->transfer_buffer_length; 3155 3156 first_trb = true; 3157 /* Queue the first TRB, even if it's zero-length */ 3158 do { 3159 u32 field = 0; 3160 u32 length_field = 0; 3161 u32 remainder = 0; 3162 3163 /* Don't change the cycle bit of the first TRB until later */ 3164 if (first_trb) { 3165 first_trb = false; 3166 if (start_cycle == 0) 3167 field |= 0x1; 3168 } else 3169 field |= ep_ring->cycle_state; 3170 3171 /* Chain all the TRBs together; clear the chain bit in the last 3172 * TRB to indicate it's the last TRB in the chain. 3173 */ 3174 if (num_trbs > 1) { 3175 field |= TRB_CHAIN; 3176 } else { 3177 /* FIXME - add check for ZERO_PACKET flag before this */ 3178 td->last_trb = ep_ring->enqueue; 3179 field |= TRB_IOC; 3180 } 3181 3182 /* Only set interrupt on short packet for IN endpoints */ 3183 if (usb_urb_dir_in(urb)) 3184 field |= TRB_ISP; 3185 3186 if (TRB_MAX_BUFF_SIZE - 3187 (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) { 3188 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); 3189 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", 3190 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 3191 (unsigned int) addr + trb_buff_len); 3192 } 3193 3194 /* Set the TRB length, TD size, and interrupter fields. */ 3195 if (xhci->hci_version < 0x100) { 3196 remainder = xhci_td_remainder( 3197 urb->transfer_buffer_length - 3198 running_total); 3199 } else { 3200 remainder = xhci_v1_0_td_remainder(running_total, 3201 trb_buff_len, total_packet_count, urb, 3202 num_trbs - 1); 3203 } 3204 length_field = TRB_LEN(trb_buff_len) | 3205 remainder | 3206 TRB_INTR_TARGET(0); 3207 3208 if (num_trbs > 1) 3209 more_trbs_coming = true; 3210 else 3211 more_trbs_coming = false; 3212 queue_trb(xhci, ep_ring, more_trbs_coming, 3213 lower_32_bits(addr), 3214 upper_32_bits(addr), 3215 length_field, 3216 field | TRB_TYPE(TRB_NORMAL)); 3217 --num_trbs; 3218 running_total += trb_buff_len; 3219 3220 /* Calculate length for next transfer -- 3221 * Are we done queueing all the TRBs for this sg entry? 3222 */ 3223 this_sg_len -= trb_buff_len; 3224 if (this_sg_len == 0) { 3225 --num_sgs; 3226 if (num_sgs == 0) 3227 break; 3228 sg = sg_next(sg); 3229 addr = (u64) sg_dma_address(sg); 3230 this_sg_len = sg_dma_len(sg); 3231 } else { 3232 addr += trb_buff_len; 3233 } 3234 3235 trb_buff_len = TRB_MAX_BUFF_SIZE - 3236 (addr & (TRB_MAX_BUFF_SIZE - 1)); 3237 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 3238 if (running_total + trb_buff_len > urb->transfer_buffer_length) 3239 trb_buff_len = 3240 urb->transfer_buffer_length - running_total; 3241 } while (running_total < urb->transfer_buffer_length); 3242 3243 check_trb_math(urb, num_trbs, running_total); 3244 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3245 start_cycle, start_trb); 3246 return 0; 3247 } 3248 3249 /* This is very similar to what ehci-q.c qtd_fill() does */ 3250 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3251 struct urb *urb, int slot_id, unsigned int ep_index) 3252 { 3253 struct xhci_ring *ep_ring; 3254 struct urb_priv *urb_priv; 3255 struct xhci_td *td; 3256 int num_trbs; 3257 struct xhci_generic_trb *start_trb; 3258 bool first_trb; 3259 bool more_trbs_coming; 3260 int start_cycle; 3261 u32 field, length_field; 3262 3263 int running_total, trb_buff_len, ret; 3264 unsigned int total_packet_count; 3265 u64 addr; 3266 3267 if (urb->num_sgs) 3268 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); 3269 3270 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 3271 if (!ep_ring) 3272 return -EINVAL; 3273 3274 num_trbs = 0; 3275 /* How much data is (potentially) left before the 64KB boundary? */ 3276 running_total = TRB_MAX_BUFF_SIZE - 3277 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); 3278 running_total &= TRB_MAX_BUFF_SIZE - 1; 3279 3280 /* If there's some data on this 64KB chunk, or we have to send a 3281 * zero-length transfer, we need at least one TRB 3282 */ 3283 if (running_total != 0 || urb->transfer_buffer_length == 0) 3284 num_trbs++; 3285 /* How many more 64KB chunks to transfer, how many more TRBs? */ 3286 while (running_total < urb->transfer_buffer_length) { 3287 num_trbs++; 3288 running_total += TRB_MAX_BUFF_SIZE; 3289 } 3290 /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */ 3291 3292 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3293 ep_index, urb->stream_id, 3294 num_trbs, urb, 0, mem_flags); 3295 if (ret < 0) 3296 return ret; 3297 3298 urb_priv = urb->hcpriv; 3299 td = urb_priv->td[0]; 3300 3301 /* 3302 * Don't give the first TRB to the hardware (by toggling the cycle bit) 3303 * until we've finished creating all the other TRBs. The ring's cycle 3304 * state may change as we enqueue the other TRBs, so save it too. 3305 */ 3306 start_trb = &ep_ring->enqueue->generic; 3307 start_cycle = ep_ring->cycle_state; 3308 3309 running_total = 0; 3310 total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length, 3311 usb_endpoint_maxp(&urb->ep->desc)); 3312 /* How much data is in the first TRB? */ 3313 addr = (u64) urb->transfer_dma; 3314 trb_buff_len = TRB_MAX_BUFF_SIZE - 3315 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); 3316 if (trb_buff_len > urb->transfer_buffer_length) 3317 trb_buff_len = urb->transfer_buffer_length; 3318 3319 first_trb = true; 3320 3321 /* Queue the first TRB, even if it's zero-length */ 3322 do { 3323 u32 remainder = 0; 3324 field = 0; 3325 3326 /* Don't change the cycle bit of the first TRB until later */ 3327 if (first_trb) { 3328 first_trb = false; 3329 if (start_cycle == 0) 3330 field |= 0x1; 3331 } else 3332 field |= ep_ring->cycle_state; 3333 3334 /* Chain all the TRBs together; clear the chain bit in the last 3335 * TRB to indicate it's the last TRB in the chain. 3336 */ 3337 if (num_trbs > 1) { 3338 field |= TRB_CHAIN; 3339 } else { 3340 /* FIXME - add check for ZERO_PACKET flag before this */ 3341 td->last_trb = ep_ring->enqueue; 3342 field |= TRB_IOC; 3343 } 3344 3345 /* Only set interrupt on short packet for IN endpoints */ 3346 if (usb_urb_dir_in(urb)) 3347 field |= TRB_ISP; 3348 3349 /* Set the TRB length, TD size, and interrupter fields. */ 3350 if (xhci->hci_version < 0x100) { 3351 remainder = xhci_td_remainder( 3352 urb->transfer_buffer_length - 3353 running_total); 3354 } else { 3355 remainder = xhci_v1_0_td_remainder(running_total, 3356 trb_buff_len, total_packet_count, urb, 3357 num_trbs - 1); 3358 } 3359 length_field = TRB_LEN(trb_buff_len) | 3360 remainder | 3361 TRB_INTR_TARGET(0); 3362 3363 if (num_trbs > 1) 3364 more_trbs_coming = true; 3365 else 3366 more_trbs_coming = false; 3367 queue_trb(xhci, ep_ring, more_trbs_coming, 3368 lower_32_bits(addr), 3369 upper_32_bits(addr), 3370 length_field, 3371 field | TRB_TYPE(TRB_NORMAL)); 3372 --num_trbs; 3373 running_total += trb_buff_len; 3374 3375 /* Calculate length for next transfer */ 3376 addr += trb_buff_len; 3377 trb_buff_len = urb->transfer_buffer_length - running_total; 3378 if (trb_buff_len > TRB_MAX_BUFF_SIZE) 3379 trb_buff_len = TRB_MAX_BUFF_SIZE; 3380 } while (running_total < urb->transfer_buffer_length); 3381 3382 check_trb_math(urb, num_trbs, running_total); 3383 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3384 start_cycle, start_trb); 3385 return 0; 3386 } 3387 3388 /* Caller must have locked xhci->lock */ 3389 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3390 struct urb *urb, int slot_id, unsigned int ep_index) 3391 { 3392 struct xhci_ring *ep_ring; 3393 int num_trbs; 3394 int ret; 3395 struct usb_ctrlrequest *setup; 3396 struct xhci_generic_trb *start_trb; 3397 int start_cycle; 3398 u32 field, length_field; 3399 struct urb_priv *urb_priv; 3400 struct xhci_td *td; 3401 3402 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 3403 if (!ep_ring) 3404 return -EINVAL; 3405 3406 /* 3407 * Need to copy setup packet into setup TRB, so we can't use the setup 3408 * DMA address. 3409 */ 3410 if (!urb->setup_packet) 3411 return -EINVAL; 3412 3413 /* 1 TRB for setup, 1 for status */ 3414 num_trbs = 2; 3415 /* 3416 * Don't need to check if we need additional event data and normal TRBs, 3417 * since data in control transfers will never get bigger than 16MB 3418 * XXX: can we get a buffer that crosses 64KB boundaries? 3419 */ 3420 if (urb->transfer_buffer_length > 0) 3421 num_trbs++; 3422 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3423 ep_index, urb->stream_id, 3424 num_trbs, urb, 0, mem_flags); 3425 if (ret < 0) 3426 return ret; 3427 3428 urb_priv = urb->hcpriv; 3429 td = urb_priv->td[0]; 3430 3431 /* 3432 * Don't give the first TRB to the hardware (by toggling the cycle bit) 3433 * until we've finished creating all the other TRBs. The ring's cycle 3434 * state may change as we enqueue the other TRBs, so save it too. 3435 */ 3436 start_trb = &ep_ring->enqueue->generic; 3437 start_cycle = ep_ring->cycle_state; 3438 3439 /* Queue setup TRB - see section 6.4.1.2.1 */ 3440 /* FIXME better way to translate setup_packet into two u32 fields? */ 3441 setup = (struct usb_ctrlrequest *) urb->setup_packet; 3442 field = 0; 3443 field |= TRB_IDT | TRB_TYPE(TRB_SETUP); 3444 if (start_cycle == 0) 3445 field |= 0x1; 3446 3447 /* xHCI 1.0 6.4.1.2.1: Transfer Type field */ 3448 if (xhci->hci_version == 0x100) { 3449 if (urb->transfer_buffer_length > 0) { 3450 if (setup->bRequestType & USB_DIR_IN) 3451 field |= TRB_TX_TYPE(TRB_DATA_IN); 3452 else 3453 field |= TRB_TX_TYPE(TRB_DATA_OUT); 3454 } 3455 } 3456 3457 queue_trb(xhci, ep_ring, true, 3458 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, 3459 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, 3460 TRB_LEN(8) | TRB_INTR_TARGET(0), 3461 /* Immediate data in pointer */ 3462 field); 3463 3464 /* If there's data, queue data TRBs */ 3465 /* Only set interrupt on short packet for IN endpoints */ 3466 if (usb_urb_dir_in(urb)) 3467 field = TRB_ISP | TRB_TYPE(TRB_DATA); 3468 else 3469 field = TRB_TYPE(TRB_DATA); 3470 3471 length_field = TRB_LEN(urb->transfer_buffer_length) | 3472 xhci_td_remainder(urb->transfer_buffer_length) | 3473 TRB_INTR_TARGET(0); 3474 if (urb->transfer_buffer_length > 0) { 3475 if (setup->bRequestType & USB_DIR_IN) 3476 field |= TRB_DIR_IN; 3477 queue_trb(xhci, ep_ring, true, 3478 lower_32_bits(urb->transfer_dma), 3479 upper_32_bits(urb->transfer_dma), 3480 length_field, 3481 field | ep_ring->cycle_state); 3482 } 3483 3484 /* Save the DMA address of the last TRB in the TD */ 3485 td->last_trb = ep_ring->enqueue; 3486 3487 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ 3488 /* If the device sent data, the status stage is an OUT transfer */ 3489 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) 3490 field = 0; 3491 else 3492 field = TRB_DIR_IN; 3493 queue_trb(xhci, ep_ring, false, 3494 0, 3495 0, 3496 TRB_INTR_TARGET(0), 3497 /* Event on completion */ 3498 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); 3499 3500 giveback_first_trb(xhci, slot_id, ep_index, 0, 3501 start_cycle, start_trb); 3502 return 0; 3503 } 3504 3505 static int count_isoc_trbs_needed(struct xhci_hcd *xhci, 3506 struct urb *urb, int i) 3507 { 3508 int num_trbs = 0; 3509 u64 addr, td_len; 3510 3511 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); 3512 td_len = urb->iso_frame_desc[i].length; 3513 3514 num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)), 3515 TRB_MAX_BUFF_SIZE); 3516 if (num_trbs == 0) 3517 num_trbs++; 3518 3519 return num_trbs; 3520 } 3521 3522 /* 3523 * The transfer burst count field of the isochronous TRB defines the number of 3524 * bursts that are required to move all packets in this TD. Only SuperSpeed 3525 * devices can burst up to bMaxBurst number of packets per service interval. 3526 * This field is zero based, meaning a value of zero in the field means one 3527 * burst. Basically, for everything but SuperSpeed devices, this field will be 3528 * zero. Only xHCI 1.0 host controllers support this field. 3529 */ 3530 static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, 3531 struct usb_device *udev, 3532 struct urb *urb, unsigned int total_packet_count) 3533 { 3534 unsigned int max_burst; 3535 3536 if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER) 3537 return 0; 3538 3539 max_burst = urb->ep->ss_ep_comp.bMaxBurst; 3540 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1; 3541 } 3542 3543 /* 3544 * Returns the number of packets in the last "burst" of packets. This field is 3545 * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so 3546 * the last burst packet count is equal to the total number of packets in the 3547 * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst 3548 * must contain (bMaxBurst + 1) number of packets, but the last burst can 3549 * contain 1 to (bMaxBurst + 1) packets. 3550 */ 3551 static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci, 3552 struct usb_device *udev, 3553 struct urb *urb, unsigned int total_packet_count) 3554 { 3555 unsigned int max_burst; 3556 unsigned int residue; 3557 3558 if (xhci->hci_version < 0x100) 3559 return 0; 3560 3561 switch (udev->speed) { 3562 case USB_SPEED_SUPER: 3563 /* bMaxBurst is zero based: 0 means 1 packet per burst */ 3564 max_burst = urb->ep->ss_ep_comp.bMaxBurst; 3565 residue = total_packet_count % (max_burst + 1); 3566 /* If residue is zero, the last burst contains (max_burst + 1) 3567 * number of packets, but the TLBPC field is zero-based. 3568 */ 3569 if (residue == 0) 3570 return max_burst; 3571 return residue - 1; 3572 default: 3573 if (total_packet_count == 0) 3574 return 0; 3575 return total_packet_count - 1; 3576 } 3577 } 3578 3579 /* This is for isoc transfer */ 3580 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3581 struct urb *urb, int slot_id, unsigned int ep_index) 3582 { 3583 struct xhci_ring *ep_ring; 3584 struct urb_priv *urb_priv; 3585 struct xhci_td *td; 3586 int num_tds, trbs_per_td; 3587 struct xhci_generic_trb *start_trb; 3588 bool first_trb; 3589 int start_cycle; 3590 u32 field, length_field; 3591 int running_total, trb_buff_len, td_len, td_remain_len, ret; 3592 u64 start_addr, addr; 3593 int i, j; 3594 bool more_trbs_coming; 3595 3596 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 3597 3598 num_tds = urb->number_of_packets; 3599 if (num_tds < 1) { 3600 xhci_dbg(xhci, "Isoc URB with zero packets?\n"); 3601 return -EINVAL; 3602 } 3603 3604 start_addr = (u64) urb->transfer_dma; 3605 start_trb = &ep_ring->enqueue->generic; 3606 start_cycle = ep_ring->cycle_state; 3607 3608 urb_priv = urb->hcpriv; 3609 /* Queue the first TRB, even if it's zero-length */ 3610 for (i = 0; i < num_tds; i++) { 3611 unsigned int total_packet_count; 3612 unsigned int burst_count; 3613 unsigned int residue; 3614 3615 first_trb = true; 3616 running_total = 0; 3617 addr = start_addr + urb->iso_frame_desc[i].offset; 3618 td_len = urb->iso_frame_desc[i].length; 3619 td_remain_len = td_len; 3620 total_packet_count = DIV_ROUND_UP(td_len, 3621 GET_MAX_PACKET( 3622 usb_endpoint_maxp(&urb->ep->desc))); 3623 /* A zero-length transfer still involves at least one packet. */ 3624 if (total_packet_count == 0) 3625 total_packet_count++; 3626 burst_count = xhci_get_burst_count(xhci, urb->dev, urb, 3627 total_packet_count); 3628 residue = xhci_get_last_burst_packet_count(xhci, 3629 urb->dev, urb, total_packet_count); 3630 3631 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i); 3632 3633 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 3634 urb->stream_id, trbs_per_td, urb, i, mem_flags); 3635 if (ret < 0) { 3636 if (i == 0) 3637 return ret; 3638 goto cleanup; 3639 } 3640 3641 td = urb_priv->td[i]; 3642 for (j = 0; j < trbs_per_td; j++) { 3643 u32 remainder = 0; 3644 field = 0; 3645 3646 if (first_trb) { 3647 field = TRB_TBC(burst_count) | 3648 TRB_TLBPC(residue); 3649 /* Queue the isoc TRB */ 3650 field |= TRB_TYPE(TRB_ISOC); 3651 /* Assume URB_ISO_ASAP is set */ 3652 field |= TRB_SIA; 3653 if (i == 0) { 3654 if (start_cycle == 0) 3655 field |= 0x1; 3656 } else 3657 field |= ep_ring->cycle_state; 3658 first_trb = false; 3659 } else { 3660 /* Queue other normal TRBs */ 3661 field |= TRB_TYPE(TRB_NORMAL); 3662 field |= ep_ring->cycle_state; 3663 } 3664 3665 /* Only set interrupt on short packet for IN EPs */ 3666 if (usb_urb_dir_in(urb)) 3667 field |= TRB_ISP; 3668 3669 /* Chain all the TRBs together; clear the chain bit in 3670 * the last TRB to indicate it's the last TRB in the 3671 * chain. 3672 */ 3673 if (j < trbs_per_td - 1) { 3674 field |= TRB_CHAIN; 3675 more_trbs_coming = true; 3676 } else { 3677 td->last_trb = ep_ring->enqueue; 3678 field |= TRB_IOC; 3679 if (xhci->hci_version == 0x100 && 3680 !(xhci->quirks & 3681 XHCI_AVOID_BEI)) { 3682 /* Set BEI bit except for the last td */ 3683 if (i < num_tds - 1) 3684 field |= TRB_BEI; 3685 } 3686 more_trbs_coming = false; 3687 } 3688 3689 /* Calculate TRB length */ 3690 trb_buff_len = TRB_MAX_BUFF_SIZE - 3691 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 3692 if (trb_buff_len > td_remain_len) 3693 trb_buff_len = td_remain_len; 3694 3695 /* Set the TRB length, TD size, & interrupter fields. */ 3696 if (xhci->hci_version < 0x100) { 3697 remainder = xhci_td_remainder( 3698 td_len - running_total); 3699 } else { 3700 remainder = xhci_v1_0_td_remainder( 3701 running_total, trb_buff_len, 3702 total_packet_count, urb, 3703 (trbs_per_td - j - 1)); 3704 } 3705 length_field = TRB_LEN(trb_buff_len) | 3706 remainder | 3707 TRB_INTR_TARGET(0); 3708 3709 queue_trb(xhci, ep_ring, more_trbs_coming, 3710 lower_32_bits(addr), 3711 upper_32_bits(addr), 3712 length_field, 3713 field); 3714 running_total += trb_buff_len; 3715 3716 addr += trb_buff_len; 3717 td_remain_len -= trb_buff_len; 3718 } 3719 3720 /* Check TD length */ 3721 if (running_total != td_len) { 3722 xhci_err(xhci, "ISOC TD length unmatch\n"); 3723 ret = -EINVAL; 3724 goto cleanup; 3725 } 3726 } 3727 3728 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { 3729 if (xhci->quirks & XHCI_AMD_PLL_FIX) 3730 usb_amd_quirk_pll_disable(); 3731 } 3732 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; 3733 3734 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3735 start_cycle, start_trb); 3736 return 0; 3737 cleanup: 3738 /* Clean up a partially enqueued isoc transfer. */ 3739 3740 for (i--; i >= 0; i--) 3741 list_del_init(&urb_priv->td[i]->td_list); 3742 3743 /* Use the first TD as a temporary variable to turn the TDs we've queued 3744 * into No-ops with a software-owned cycle bit. That way the hardware 3745 * won't accidentally start executing bogus TDs when we partially 3746 * overwrite them. td->first_trb and td->start_seg are already set. 3747 */ 3748 urb_priv->td[0]->last_trb = ep_ring->enqueue; 3749 /* Every TRB except the first & last will have its cycle bit flipped. */ 3750 td_to_noop(xhci, ep_ring, urb_priv->td[0], true); 3751 3752 /* Reset the ring enqueue back to the first TRB and its cycle bit. */ 3753 ep_ring->enqueue = urb_priv->td[0]->first_trb; 3754 ep_ring->enq_seg = urb_priv->td[0]->start_seg; 3755 ep_ring->cycle_state = start_cycle; 3756 ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp; 3757 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); 3758 return ret; 3759 } 3760 3761 /* 3762 * Check transfer ring to guarantee there is enough room for the urb. 3763 * Update ISO URB start_frame and interval. 3764 * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to 3765 * update the urb->start_frame by now. 3766 * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input. 3767 */ 3768 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, 3769 struct urb *urb, int slot_id, unsigned int ep_index) 3770 { 3771 struct xhci_virt_device *xdev; 3772 struct xhci_ring *ep_ring; 3773 struct xhci_ep_ctx *ep_ctx; 3774 int start_frame; 3775 int xhci_interval; 3776 int ep_interval; 3777 int num_tds, num_trbs, i; 3778 int ret; 3779 3780 xdev = xhci->devs[slot_id]; 3781 ep_ring = xdev->eps[ep_index].ring; 3782 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 3783 3784 num_trbs = 0; 3785 num_tds = urb->number_of_packets; 3786 for (i = 0; i < num_tds; i++) 3787 num_trbs += count_isoc_trbs_needed(xhci, urb, i); 3788 3789 /* Check the ring to guarantee there is enough room for the whole urb. 3790 * Do not insert any td of the urb to the ring if the check failed. 3791 */ 3792 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, 3793 num_trbs, mem_flags); 3794 if (ret) 3795 return ret; 3796 3797 start_frame = readl(&xhci->run_regs->microframe_index); 3798 start_frame &= 0x3fff; 3799 3800 urb->start_frame = start_frame; 3801 if (urb->dev->speed == USB_SPEED_LOW || 3802 urb->dev->speed == USB_SPEED_FULL) 3803 urb->start_frame >>= 3; 3804 3805 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); 3806 ep_interval = urb->interval; 3807 /* Convert to microframes */ 3808 if (urb->dev->speed == USB_SPEED_LOW || 3809 urb->dev->speed == USB_SPEED_FULL) 3810 ep_interval *= 8; 3811 /* FIXME change this to a warning and a suggestion to use the new API 3812 * to set the polling interval (once the API is added). 3813 */ 3814 if (xhci_interval != ep_interval) { 3815 dev_dbg_ratelimited(&urb->dev->dev, 3816 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n", 3817 ep_interval, ep_interval == 1 ? "" : "s", 3818 xhci_interval, xhci_interval == 1 ? "" : "s"); 3819 urb->interval = xhci_interval; 3820 /* Convert back to frames for LS/FS devices */ 3821 if (urb->dev->speed == USB_SPEED_LOW || 3822 urb->dev->speed == USB_SPEED_FULL) 3823 urb->interval /= 8; 3824 } 3825 ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free; 3826 3827 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index); 3828 } 3829 3830 /**** Command Ring Operations ****/ 3831 3832 /* Generic function for queueing a command TRB on the command ring. 3833 * Check to make sure there's room on the command ring for one command TRB. 3834 * Also check that there's room reserved for commands that must not fail. 3835 * If this is a command that must not fail, meaning command_must_succeed = TRUE, 3836 * then only check for the number of reserved spots. 3837 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB 3838 * because the command event handler may want to resubmit a failed command. 3839 */ 3840 static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, 3841 u32 field1, u32 field2, 3842 u32 field3, u32 field4, bool command_must_succeed) 3843 { 3844 int reserved_trbs = xhci->cmd_ring_reserved_trbs; 3845 int ret; 3846 if (xhci->xhc_state & XHCI_STATE_DYING) 3847 return -ESHUTDOWN; 3848 3849 if (!command_must_succeed) 3850 reserved_trbs++; 3851 3852 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, 3853 reserved_trbs, GFP_ATOMIC); 3854 if (ret < 0) { 3855 xhci_err(xhci, "ERR: No room for command on command ring\n"); 3856 if (command_must_succeed) 3857 xhci_err(xhci, "ERR: Reserved TRB counting for " 3858 "unfailable commands failed.\n"); 3859 return ret; 3860 } 3861 3862 cmd->command_trb = xhci->cmd_ring->enqueue; 3863 list_add_tail(&cmd->cmd_list, &xhci->cmd_list); 3864 3865 /* if there are no other commands queued we start the timeout timer */ 3866 if (xhci->cmd_list.next == &cmd->cmd_list && 3867 !timer_pending(&xhci->cmd_timer)) { 3868 xhci->current_cmd = cmd; 3869 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); 3870 } 3871 3872 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, 3873 field4 | xhci->cmd_ring->cycle_state); 3874 return 0; 3875 } 3876 3877 /* Queue a slot enable or disable request on the command ring */ 3878 int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, 3879 u32 trb_type, u32 slot_id) 3880 { 3881 return queue_command(xhci, cmd, 0, 0, 0, 3882 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false); 3883 } 3884 3885 /* Queue an address device command TRB */ 3886 int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd, 3887 dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup) 3888 { 3889 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), 3890 upper_32_bits(in_ctx_ptr), 0, 3891 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id) 3892 | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false); 3893 } 3894 3895 int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd, 3896 u32 field1, u32 field2, u32 field3, u32 field4) 3897 { 3898 return queue_command(xhci, cmd, field1, field2, field3, field4, false); 3899 } 3900 3901 /* Queue a reset device command TRB */ 3902 int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, 3903 u32 slot_id) 3904 { 3905 return queue_command(xhci, cmd, 0, 0, 0, 3906 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id), 3907 false); 3908 } 3909 3910 /* Queue a configure endpoint command TRB */ 3911 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, 3912 struct xhci_command *cmd, dma_addr_t in_ctx_ptr, 3913 u32 slot_id, bool command_must_succeed) 3914 { 3915 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), 3916 upper_32_bits(in_ctx_ptr), 0, 3917 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id), 3918 command_must_succeed); 3919 } 3920 3921 /* Queue an evaluate context command TRB */ 3922 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, 3923 dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed) 3924 { 3925 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), 3926 upper_32_bits(in_ctx_ptr), 0, 3927 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id), 3928 command_must_succeed); 3929 } 3930 3931 /* 3932 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop 3933 * activity on an endpoint that is about to be suspended. 3934 */ 3935 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, 3936 int slot_id, unsigned int ep_index, int suspend) 3937 { 3938 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3939 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3940 u32 type = TRB_TYPE(TRB_STOP_RING); 3941 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend); 3942 3943 return queue_command(xhci, cmd, 0, 0, 0, 3944 trb_slot_id | trb_ep_index | type | trb_suspend, false); 3945 } 3946 3947 /* Set Transfer Ring Dequeue Pointer command. 3948 * This should not be used for endpoints that have streams enabled. 3949 */ 3950 static int queue_set_tr_deq(struct xhci_hcd *xhci, struct xhci_command *cmd, 3951 int slot_id, 3952 unsigned int ep_index, unsigned int stream_id, 3953 struct xhci_segment *deq_seg, 3954 union xhci_trb *deq_ptr, u32 cycle_state) 3955 { 3956 dma_addr_t addr; 3957 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3958 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3959 u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id); 3960 u32 trb_sct = 0; 3961 u32 type = TRB_TYPE(TRB_SET_DEQ); 3962 struct xhci_virt_ep *ep; 3963 3964 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); 3965 if (addr == 0) { 3966 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 3967 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", 3968 deq_seg, deq_ptr); 3969 return 0; 3970 } 3971 ep = &xhci->devs[slot_id]->eps[ep_index]; 3972 if ((ep->ep_state & SET_DEQ_PENDING)) { 3973 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 3974 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n"); 3975 return 0; 3976 } 3977 ep->queued_deq_seg = deq_seg; 3978 ep->queued_deq_ptr = deq_ptr; 3979 if (stream_id) 3980 trb_sct = SCT_FOR_TRB(SCT_PRI_TR); 3981 return queue_command(xhci, cmd, 3982 lower_32_bits(addr) | trb_sct | cycle_state, 3983 upper_32_bits(addr), trb_stream_id, 3984 trb_slot_id | trb_ep_index | type, false); 3985 } 3986 3987 int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, 3988 int slot_id, unsigned int ep_index) 3989 { 3990 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3991 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3992 u32 type = TRB_TYPE(TRB_RESET_EP); 3993 3994 return queue_command(xhci, cmd, 0, 0, 0, 3995 trb_slot_id | trb_ep_index | type, false); 3996 } 3997