1 /* 2 * xHCI host controller driver 3 * 4 * Copyright (C) 2008 Intel Corp. 5 * 6 * Author: Sarah Sharp 7 * Some code borrowed from the Linux EHCI driver. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 * for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software Foundation, 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 /* 24 * Ring initialization rules: 25 * 1. Each segment is initialized to zero, except for link TRBs. 26 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or 27 * Consumer Cycle State (CCS), depending on ring function. 28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. 29 * 30 * Ring behavior rules: 31 * 1. A ring is empty if enqueue == dequeue. This means there will always be at 32 * least one free TRB in the ring. This is useful if you want to turn that 33 * into a link TRB and expand the ring. 34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a 35 * link TRB, then load the pointer with the address in the link TRB. If the 36 * link TRB had its toggle bit set, you may need to update the ring cycle 37 * state (see cycle bit rules). You may have to do this multiple times 38 * until you reach a non-link TRB. 39 * 3. A ring is full if enqueue++ (for the definition of increment above) 40 * equals the dequeue pointer. 41 * 42 * Cycle bit rules: 43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit 44 * in a link TRB, it must toggle the ring cycle state. 45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit 46 * in a link TRB, it must toggle the ring cycle state. 47 * 48 * Producer rules: 49 * 1. Check if ring is full before you enqueue. 50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. 51 * Update enqueue pointer between each write (which may update the ring 52 * cycle state). 53 * 3. Notify consumer. If SW is producer, it rings the doorbell for command 54 * and endpoint rings. If HC is the producer for the event ring, 55 * and it generates an interrupt according to interrupt modulation rules. 56 * 57 * Consumer rules: 58 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, 59 * the TRB is owned by the consumer. 60 * 2. Update dequeue pointer (which may update the ring cycle state) and 61 * continue processing TRBs until you reach a TRB which is not owned by you. 62 * 3. Notify the producer. SW is the consumer for the event ring, and it 63 * updates event ring dequeue pointer. HC is the consumer for the command and 64 * endpoint rings; it generates events on the event ring for these. 65 */ 66 67 #include <linux/scatterlist.h> 68 #include <linux/slab.h> 69 #include "xhci.h" 70 71 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, 72 struct xhci_virt_device *virt_dev, 73 struct xhci_event_cmd *event); 74 75 /* 76 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA 77 * address of the TRB. 78 */ 79 dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, 80 union xhci_trb *trb) 81 { 82 unsigned long segment_offset; 83 84 if (!seg || !trb || trb < seg->trbs) 85 return 0; 86 /* offset in TRBs */ 87 segment_offset = trb - seg->trbs; 88 if (segment_offset > TRBS_PER_SEGMENT) 89 return 0; 90 return seg->dma + (segment_offset * sizeof(*trb)); 91 } 92 93 /* Does this link TRB point to the first segment in a ring, 94 * or was the previous TRB the last TRB on the last segment in the ERST? 95 */ 96 static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, 97 struct xhci_segment *seg, union xhci_trb *trb) 98 { 99 if (ring == xhci->event_ring) 100 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) && 101 (seg->next == xhci->event_ring->first_seg); 102 else 103 return le32_to_cpu(trb->link.control) & LINK_TOGGLE; 104 } 105 106 /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring 107 * segment? I.e. would the updated event TRB pointer step off the end of the 108 * event seg? 109 */ 110 static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 111 struct xhci_segment *seg, union xhci_trb *trb) 112 { 113 if (ring == xhci->event_ring) 114 return trb == &seg->trbs[TRBS_PER_SEGMENT]; 115 else 116 return TRB_TYPE_LINK_LE32(trb->link.control); 117 } 118 119 static int enqueue_is_link_trb(struct xhci_ring *ring) 120 { 121 struct xhci_link_trb *link = &ring->enqueue->link; 122 return TRB_TYPE_LINK_LE32(link->control); 123 } 124 125 /* Updates trb to point to the next TRB in the ring, and updates seg if the next 126 * TRB is in a new segment. This does not skip over link TRBs, and it does not 127 * effect the ring dequeue or enqueue pointers. 128 */ 129 static void next_trb(struct xhci_hcd *xhci, 130 struct xhci_ring *ring, 131 struct xhci_segment **seg, 132 union xhci_trb **trb) 133 { 134 if (last_trb(xhci, ring, *seg, *trb)) { 135 *seg = (*seg)->next; 136 *trb = ((*seg)->trbs); 137 } else { 138 (*trb)++; 139 } 140 } 141 142 /* 143 * See Cycle bit rules. SW is the consumer for the event ring only. 144 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 145 */ 146 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) 147 { 148 union xhci_trb *next = ++(ring->dequeue); 149 unsigned long long addr; 150 151 ring->deq_updates++; 152 /* Update the dequeue pointer further if that was a link TRB or we're at 153 * the end of an event ring segment (which doesn't have link TRBS) 154 */ 155 while (last_trb(xhci, ring, ring->deq_seg, next)) { 156 if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) { 157 ring->cycle_state = (ring->cycle_state ? 0 : 1); 158 if (!in_interrupt()) 159 xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n", 160 ring, 161 (unsigned int) ring->cycle_state); 162 } 163 ring->deq_seg = ring->deq_seg->next; 164 ring->dequeue = ring->deq_seg->trbs; 165 next = ring->dequeue; 166 } 167 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); 168 } 169 170 /* 171 * See Cycle bit rules. SW is the consumer for the event ring only. 172 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 173 * 174 * If we've just enqueued a TRB that is in the middle of a TD (meaning the 175 * chain bit is set), then set the chain bit in all the following link TRBs. 176 * If we've enqueued the last TRB in a TD, make sure the following link TRBs 177 * have their chain bit cleared (so that each Link TRB is a separate TD). 178 * 179 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit 180 * set, but other sections talk about dealing with the chain bit set. This was 181 * fixed in the 0.96 specification errata, but we have to assume that all 0.95 182 * xHCI hardware can't handle the chain bit being cleared on a link TRB. 183 * 184 * @more_trbs_coming: Will you enqueue more TRBs before calling 185 * prepare_transfer()? 186 */ 187 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, 188 bool consumer, bool more_trbs_coming) 189 { 190 u32 chain; 191 union xhci_trb *next; 192 unsigned long long addr; 193 194 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; 195 next = ++(ring->enqueue); 196 197 ring->enq_updates++; 198 /* Update the dequeue pointer further if that was a link TRB or we're at 199 * the end of an event ring segment (which doesn't have link TRBS) 200 */ 201 while (last_trb(xhci, ring, ring->enq_seg, next)) { 202 if (!consumer) { 203 if (ring != xhci->event_ring) { 204 /* 205 * If the caller doesn't plan on enqueueing more 206 * TDs before ringing the doorbell, then we 207 * don't want to give the link TRB to the 208 * hardware just yet. We'll give the link TRB 209 * back in prepare_ring() just before we enqueue 210 * the TD at the top of the ring. 211 */ 212 if (!chain && !more_trbs_coming) 213 break; 214 215 /* If we're not dealing with 0.95 hardware, 216 * carry over the chain bit of the previous TRB 217 * (which may mean the chain bit is cleared). 218 */ 219 if (!xhci_link_trb_quirk(xhci)) { 220 next->link.control &= 221 cpu_to_le32(~TRB_CHAIN); 222 next->link.control |= 223 cpu_to_le32(chain); 224 } 225 /* Give this link TRB to the hardware */ 226 wmb(); 227 next->link.control ^= cpu_to_le32(TRB_CYCLE); 228 } 229 /* Toggle the cycle bit after the last ring segment. */ 230 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 231 ring->cycle_state = (ring->cycle_state ? 0 : 1); 232 if (!in_interrupt()) 233 xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n", 234 ring, 235 (unsigned int) ring->cycle_state); 236 } 237 } 238 ring->enq_seg = ring->enq_seg->next; 239 ring->enqueue = ring->enq_seg->trbs; 240 next = ring->enqueue; 241 } 242 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); 243 } 244 245 /* 246 * Check to see if there's room to enqueue num_trbs on the ring. See rules 247 * above. 248 * FIXME: this would be simpler and faster if we just kept track of the number 249 * of free TRBs in a ring. 250 */ 251 static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, 252 unsigned int num_trbs) 253 { 254 int i; 255 union xhci_trb *enq = ring->enqueue; 256 struct xhci_segment *enq_seg = ring->enq_seg; 257 struct xhci_segment *cur_seg; 258 unsigned int left_on_ring; 259 260 /* If we are currently pointing to a link TRB, advance the 261 * enqueue pointer before checking for space */ 262 while (last_trb(xhci, ring, enq_seg, enq)) { 263 enq_seg = enq_seg->next; 264 enq = enq_seg->trbs; 265 } 266 267 /* Check if ring is empty */ 268 if (enq == ring->dequeue) { 269 /* Can't use link trbs */ 270 left_on_ring = TRBS_PER_SEGMENT - 1; 271 for (cur_seg = enq_seg->next; cur_seg != enq_seg; 272 cur_seg = cur_seg->next) 273 left_on_ring += TRBS_PER_SEGMENT - 1; 274 275 /* Always need one TRB free in the ring. */ 276 left_on_ring -= 1; 277 if (num_trbs > left_on_ring) { 278 xhci_warn(xhci, "Not enough room on ring; " 279 "need %u TRBs, %u TRBs left\n", 280 num_trbs, left_on_ring); 281 return 0; 282 } 283 return 1; 284 } 285 /* Make sure there's an extra empty TRB available */ 286 for (i = 0; i <= num_trbs; ++i) { 287 if (enq == ring->dequeue) 288 return 0; 289 enq++; 290 while (last_trb(xhci, ring, enq_seg, enq)) { 291 enq_seg = enq_seg->next; 292 enq = enq_seg->trbs; 293 } 294 } 295 return 1; 296 } 297 298 /* Ring the host controller doorbell after placing a command on the ring */ 299 void xhci_ring_cmd_db(struct xhci_hcd *xhci) 300 { 301 xhci_dbg(xhci, "// Ding dong!\n"); 302 xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]); 303 /* Flush PCI posted writes */ 304 xhci_readl(xhci, &xhci->dba->doorbell[0]); 305 } 306 307 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, 308 unsigned int slot_id, 309 unsigned int ep_index, 310 unsigned int stream_id) 311 { 312 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; 313 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 314 unsigned int ep_state = ep->ep_state; 315 316 /* Don't ring the doorbell for this endpoint if there are pending 317 * cancellations because we don't want to interrupt processing. 318 * We don't want to restart any stream rings if there's a set dequeue 319 * pointer command pending because the device can choose to start any 320 * stream once the endpoint is on the HW schedule. 321 * FIXME - check all the stream rings for pending cancellations. 322 */ 323 if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) || 324 (ep_state & EP_HALTED)) 325 return; 326 xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr); 327 /* The CPU has better things to do at this point than wait for a 328 * write-posting flush. It'll get there soon enough. 329 */ 330 } 331 332 /* Ring the doorbell for any rings with pending URBs */ 333 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, 334 unsigned int slot_id, 335 unsigned int ep_index) 336 { 337 unsigned int stream_id; 338 struct xhci_virt_ep *ep; 339 340 ep = &xhci->devs[slot_id]->eps[ep_index]; 341 342 /* A ring has pending URBs if its TD list is not empty */ 343 if (!(ep->ep_state & EP_HAS_STREAMS)) { 344 if (!(list_empty(&ep->ring->td_list))) 345 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); 346 return; 347 } 348 349 for (stream_id = 1; stream_id < ep->stream_info->num_streams; 350 stream_id++) { 351 struct xhci_stream_info *stream_info = ep->stream_info; 352 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list)) 353 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 354 stream_id); 355 } 356 } 357 358 /* 359 * Find the segment that trb is in. Start searching in start_seg. 360 * If we must move past a segment that has a link TRB with a toggle cycle state 361 * bit set, then we will toggle the value pointed at by cycle_state. 362 */ 363 static struct xhci_segment *find_trb_seg( 364 struct xhci_segment *start_seg, 365 union xhci_trb *trb, int *cycle_state) 366 { 367 struct xhci_segment *cur_seg = start_seg; 368 struct xhci_generic_trb *generic_trb; 369 370 while (cur_seg->trbs > trb || 371 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { 372 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; 373 if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE)) 374 *cycle_state ^= 0x1; 375 cur_seg = cur_seg->next; 376 if (cur_seg == start_seg) 377 /* Looped over the entire list. Oops! */ 378 return NULL; 379 } 380 return cur_seg; 381 } 382 383 384 static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, 385 unsigned int slot_id, unsigned int ep_index, 386 unsigned int stream_id) 387 { 388 struct xhci_virt_ep *ep; 389 390 ep = &xhci->devs[slot_id]->eps[ep_index]; 391 /* Common case: no streams */ 392 if (!(ep->ep_state & EP_HAS_STREAMS)) 393 return ep->ring; 394 395 if (stream_id == 0) { 396 xhci_warn(xhci, 397 "WARN: Slot ID %u, ep index %u has streams, " 398 "but URB has no stream ID.\n", 399 slot_id, ep_index); 400 return NULL; 401 } 402 403 if (stream_id < ep->stream_info->num_streams) 404 return ep->stream_info->stream_rings[stream_id]; 405 406 xhci_warn(xhci, 407 "WARN: Slot ID %u, ep index %u has " 408 "stream IDs 1 to %u allocated, " 409 "but stream ID %u is requested.\n", 410 slot_id, ep_index, 411 ep->stream_info->num_streams - 1, 412 stream_id); 413 return NULL; 414 } 415 416 /* Get the right ring for the given URB. 417 * If the endpoint supports streams, boundary check the URB's stream ID. 418 * If the endpoint doesn't support streams, return the singular endpoint ring. 419 */ 420 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, 421 struct urb *urb) 422 { 423 return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id, 424 xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id); 425 } 426 427 /* 428 * Move the xHC's endpoint ring dequeue pointer past cur_td. 429 * Record the new state of the xHC's endpoint ring dequeue segment, 430 * dequeue pointer, and new consumer cycle state in state. 431 * Update our internal representation of the ring's dequeue pointer. 432 * 433 * We do this in three jumps: 434 * - First we update our new ring state to be the same as when the xHC stopped. 435 * - Then we traverse the ring to find the segment that contains 436 * the last TRB in the TD. We toggle the xHC's new cycle state when we pass 437 * any link TRBs with the toggle cycle bit set. 438 * - Finally we move the dequeue state one TRB further, toggling the cycle bit 439 * if we've moved it past a link TRB with the toggle cycle bit set. 440 * 441 * Some of the uses of xhci_generic_trb are grotty, but if they're done 442 * with correct __le32 accesses they should work fine. Only users of this are 443 * in here. 444 */ 445 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, 446 unsigned int slot_id, unsigned int ep_index, 447 unsigned int stream_id, struct xhci_td *cur_td, 448 struct xhci_dequeue_state *state) 449 { 450 struct xhci_virt_device *dev = xhci->devs[slot_id]; 451 struct xhci_ring *ep_ring; 452 struct xhci_generic_trb *trb; 453 struct xhci_ep_ctx *ep_ctx; 454 dma_addr_t addr; 455 456 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, 457 ep_index, stream_id); 458 if (!ep_ring) { 459 xhci_warn(xhci, "WARN can't find new dequeue state " 460 "for invalid stream ID %u.\n", 461 stream_id); 462 return; 463 } 464 state->new_cycle_state = 0; 465 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); 466 state->new_deq_seg = find_trb_seg(cur_td->start_seg, 467 dev->eps[ep_index].stopped_trb, 468 &state->new_cycle_state); 469 if (!state->new_deq_seg) { 470 WARN_ON(1); 471 return; 472 } 473 474 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 475 xhci_dbg(xhci, "Finding endpoint context\n"); 476 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 477 state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq); 478 479 state->new_deq_ptr = cur_td->last_trb; 480 xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); 481 state->new_deq_seg = find_trb_seg(state->new_deq_seg, 482 state->new_deq_ptr, 483 &state->new_cycle_state); 484 if (!state->new_deq_seg) { 485 WARN_ON(1); 486 return; 487 } 488 489 trb = &state->new_deq_ptr->generic; 490 if (TRB_TYPE_LINK_LE32(trb->field[3]) && 491 (trb->field[3] & cpu_to_le32(LINK_TOGGLE))) 492 state->new_cycle_state ^= 0x1; 493 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); 494 495 /* 496 * If there is only one segment in a ring, find_trb_seg()'s while loop 497 * will not run, and it will return before it has a chance to see if it 498 * needs to toggle the cycle bit. It can't tell if the stalled transfer 499 * ended just before the link TRB on a one-segment ring, or if the TD 500 * wrapped around the top of the ring, because it doesn't have the TD in 501 * question. Look for the one-segment case where stalled TRB's address 502 * is greater than the new dequeue pointer address. 503 */ 504 if (ep_ring->first_seg == ep_ring->first_seg->next && 505 state->new_deq_ptr < dev->eps[ep_index].stopped_trb) 506 state->new_cycle_state ^= 0x1; 507 xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state); 508 509 /* Don't update the ring cycle state for the producer (us). */ 510 xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", 511 state->new_deq_seg); 512 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); 513 xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", 514 (unsigned long long) addr); 515 } 516 517 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 518 struct xhci_td *cur_td) 519 { 520 struct xhci_segment *cur_seg; 521 union xhci_trb *cur_trb; 522 523 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; 524 true; 525 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 526 if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) { 527 /* Unchain any chained Link TRBs, but 528 * leave the pointers intact. 529 */ 530 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN); 531 xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); 532 xhci_dbg(xhci, "Address = %p (0x%llx dma); " 533 "in seg %p (0x%llx dma)\n", 534 cur_trb, 535 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb), 536 cur_seg, 537 (unsigned long long)cur_seg->dma); 538 } else { 539 cur_trb->generic.field[0] = 0; 540 cur_trb->generic.field[1] = 0; 541 cur_trb->generic.field[2] = 0; 542 /* Preserve only the cycle bit of this TRB */ 543 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); 544 cur_trb->generic.field[3] |= cpu_to_le32( 545 TRB_TYPE(TRB_TR_NOOP)); 546 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " 547 "in seg %p (0x%llx dma)\n", 548 cur_trb, 549 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb), 550 cur_seg, 551 (unsigned long long)cur_seg->dma); 552 } 553 if (cur_trb == cur_td->last_trb) 554 break; 555 } 556 } 557 558 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 559 unsigned int ep_index, unsigned int stream_id, 560 struct xhci_segment *deq_seg, 561 union xhci_trb *deq_ptr, u32 cycle_state); 562 563 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, 564 unsigned int slot_id, unsigned int ep_index, 565 unsigned int stream_id, 566 struct xhci_dequeue_state *deq_state) 567 { 568 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 569 570 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " 571 "new deq ptr = %p (0x%llx dma), new cycle = %u\n", 572 deq_state->new_deq_seg, 573 (unsigned long long)deq_state->new_deq_seg->dma, 574 deq_state->new_deq_ptr, 575 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), 576 deq_state->new_cycle_state); 577 queue_set_tr_deq(xhci, slot_id, ep_index, stream_id, 578 deq_state->new_deq_seg, 579 deq_state->new_deq_ptr, 580 (u32) deq_state->new_cycle_state); 581 /* Stop the TD queueing code from ringing the doorbell until 582 * this command completes. The HC won't set the dequeue pointer 583 * if the ring is running, and ringing the doorbell starts the 584 * ring running. 585 */ 586 ep->ep_state |= SET_DEQ_PENDING; 587 } 588 589 static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, 590 struct xhci_virt_ep *ep) 591 { 592 ep->ep_state &= ~EP_HALT_PENDING; 593 /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the 594 * timer is running on another CPU, we don't decrement stop_cmds_pending 595 * (since we didn't successfully stop the watchdog timer). 596 */ 597 if (del_timer(&ep->stop_cmd_timer)) 598 ep->stop_cmds_pending--; 599 } 600 601 /* Must be called with xhci->lock held in interrupt context */ 602 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, 603 struct xhci_td *cur_td, int status, char *adjective) 604 { 605 struct usb_hcd *hcd; 606 struct urb *urb; 607 struct urb_priv *urb_priv; 608 609 urb = cur_td->urb; 610 urb_priv = urb->hcpriv; 611 urb_priv->td_cnt++; 612 hcd = bus_to_hcd(urb->dev->bus); 613 614 /* Only giveback urb when this is the last td in urb */ 615 if (urb_priv->td_cnt == urb_priv->length) { 616 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 617 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; 618 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { 619 if (xhci->quirks & XHCI_AMD_PLL_FIX) 620 usb_amd_quirk_pll_enable(); 621 } 622 } 623 usb_hcd_unlink_urb_from_ep(hcd, urb); 624 625 spin_unlock(&xhci->lock); 626 usb_hcd_giveback_urb(hcd, urb, status); 627 xhci_urb_free_priv(xhci, urb_priv); 628 spin_lock(&xhci->lock); 629 } 630 } 631 632 /* 633 * When we get a command completion for a Stop Endpoint Command, we need to 634 * unlink any cancelled TDs from the ring. There are two ways to do that: 635 * 636 * 1. If the HW was in the middle of processing the TD that needs to be 637 * cancelled, then we must move the ring's dequeue pointer past the last TRB 638 * in the TD with a Set Dequeue Pointer Command. 639 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain 640 * bit cleared) so that the HW will skip over them. 641 */ 642 static void handle_stopped_endpoint(struct xhci_hcd *xhci, 643 union xhci_trb *trb, struct xhci_event_cmd *event) 644 { 645 unsigned int slot_id; 646 unsigned int ep_index; 647 struct xhci_virt_device *virt_dev; 648 struct xhci_ring *ep_ring; 649 struct xhci_virt_ep *ep; 650 struct list_head *entry; 651 struct xhci_td *cur_td = NULL; 652 struct xhci_td *last_unlinked_td; 653 654 struct xhci_dequeue_state deq_state; 655 656 if (unlikely(TRB_TO_SUSPEND_PORT( 657 le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) { 658 slot_id = TRB_TO_SLOT_ID( 659 le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])); 660 virt_dev = xhci->devs[slot_id]; 661 if (virt_dev) 662 handle_cmd_in_cmd_wait_list(xhci, virt_dev, 663 event); 664 else 665 xhci_warn(xhci, "Stop endpoint command " 666 "completion for disabled slot %u\n", 667 slot_id); 668 return; 669 } 670 671 memset(&deq_state, 0, sizeof(deq_state)); 672 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3])); 673 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); 674 ep = &xhci->devs[slot_id]->eps[ep_index]; 675 676 if (list_empty(&ep->cancelled_td_list)) { 677 xhci_stop_watchdog_timer_in_irq(xhci, ep); 678 ep->stopped_td = NULL; 679 ep->stopped_trb = NULL; 680 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 681 return; 682 } 683 684 /* Fix up the ep ring first, so HW stops executing cancelled TDs. 685 * We have the xHCI lock, so nothing can modify this list until we drop 686 * it. We're also in the event handler, so we can't get re-interrupted 687 * if another Stop Endpoint command completes 688 */ 689 list_for_each(entry, &ep->cancelled_td_list) { 690 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); 691 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", 692 cur_td->first_trb, 693 (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb)); 694 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); 695 if (!ep_ring) { 696 /* This shouldn't happen unless a driver is mucking 697 * with the stream ID after submission. This will 698 * leave the TD on the hardware ring, and the hardware 699 * will try to execute it, and may access a buffer 700 * that has already been freed. In the best case, the 701 * hardware will execute it, and the event handler will 702 * ignore the completion event for that TD, since it was 703 * removed from the td_list for that endpoint. In 704 * short, don't muck with the stream ID after 705 * submission. 706 */ 707 xhci_warn(xhci, "WARN Cancelled URB %p " 708 "has invalid stream ID %u.\n", 709 cur_td->urb, 710 cur_td->urb->stream_id); 711 goto remove_finished_td; 712 } 713 /* 714 * If we stopped on the TD we need to cancel, then we have to 715 * move the xHC endpoint ring dequeue pointer past this TD. 716 */ 717 if (cur_td == ep->stopped_td) 718 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, 719 cur_td->urb->stream_id, 720 cur_td, &deq_state); 721 else 722 td_to_noop(xhci, ep_ring, cur_td); 723 remove_finished_td: 724 /* 725 * The event handler won't see a completion for this TD anymore, 726 * so remove it from the endpoint ring's TD list. Keep it in 727 * the cancelled TD list for URB completion later. 728 */ 729 list_del(&cur_td->td_list); 730 } 731 last_unlinked_td = cur_td; 732 xhci_stop_watchdog_timer_in_irq(xhci, ep); 733 734 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 735 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 736 xhci_queue_new_dequeue_state(xhci, 737 slot_id, ep_index, 738 ep->stopped_td->urb->stream_id, 739 &deq_state); 740 xhci_ring_cmd_db(xhci); 741 } else { 742 /* Otherwise ring the doorbell(s) to restart queued transfers */ 743 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 744 } 745 ep->stopped_td = NULL; 746 ep->stopped_trb = NULL; 747 748 /* 749 * Drop the lock and complete the URBs in the cancelled TD list. 750 * New TDs to be cancelled might be added to the end of the list before 751 * we can complete all the URBs for the TDs we already unlinked. 752 * So stop when we've completed the URB for the last TD we unlinked. 753 */ 754 do { 755 cur_td = list_entry(ep->cancelled_td_list.next, 756 struct xhci_td, cancelled_td_list); 757 list_del(&cur_td->cancelled_td_list); 758 759 /* Clean up the cancelled URB */ 760 /* Doesn't matter what we pass for status, since the core will 761 * just overwrite it (because the URB has been unlinked). 762 */ 763 xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled"); 764 765 /* Stop processing the cancelled list if the watchdog timer is 766 * running. 767 */ 768 if (xhci->xhc_state & XHCI_STATE_DYING) 769 return; 770 } while (cur_td != last_unlinked_td); 771 772 /* Return to the event handler with xhci->lock re-acquired */ 773 } 774 775 /* Watchdog timer function for when a stop endpoint command fails to complete. 776 * In this case, we assume the host controller is broken or dying or dead. The 777 * host may still be completing some other events, so we have to be careful to 778 * let the event ring handler and the URB dequeueing/enqueueing functions know 779 * through xhci->state. 780 * 781 * The timer may also fire if the host takes a very long time to respond to the 782 * command, and the stop endpoint command completion handler cannot delete the 783 * timer before the timer function is called. Another endpoint cancellation may 784 * sneak in before the timer function can grab the lock, and that may queue 785 * another stop endpoint command and add the timer back. So we cannot use a 786 * simple flag to say whether there is a pending stop endpoint command for a 787 * particular endpoint. 788 * 789 * Instead we use a combination of that flag and a counter for the number of 790 * pending stop endpoint commands. If the timer is the tail end of the last 791 * stop endpoint command, and the endpoint's command is still pending, we assume 792 * the host is dying. 793 */ 794 void xhci_stop_endpoint_command_watchdog(unsigned long arg) 795 { 796 struct xhci_hcd *xhci; 797 struct xhci_virt_ep *ep; 798 struct xhci_virt_ep *temp_ep; 799 struct xhci_ring *ring; 800 struct xhci_td *cur_td; 801 int ret, i, j; 802 803 ep = (struct xhci_virt_ep *) arg; 804 xhci = ep->xhci; 805 806 spin_lock(&xhci->lock); 807 808 ep->stop_cmds_pending--; 809 if (xhci->xhc_state & XHCI_STATE_DYING) { 810 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked " 811 "xHCI as DYING, exiting.\n"); 812 spin_unlock(&xhci->lock); 813 return; 814 } 815 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { 816 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, " 817 "exiting.\n"); 818 spin_unlock(&xhci->lock); 819 return; 820 } 821 822 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n"); 823 xhci_warn(xhci, "Assuming host is dying, halting host.\n"); 824 /* Oops, HC is dead or dying or at least not responding to the stop 825 * endpoint command. 826 */ 827 xhci->xhc_state |= XHCI_STATE_DYING; 828 /* Disable interrupts from the host controller and start halting it */ 829 xhci_quiesce(xhci); 830 spin_unlock(&xhci->lock); 831 832 ret = xhci_halt(xhci); 833 834 spin_lock(&xhci->lock); 835 if (ret < 0) { 836 /* This is bad; the host is not responding to commands and it's 837 * not allowing itself to be halted. At least interrupts are 838 * disabled. If we call usb_hc_died(), it will attempt to 839 * disconnect all device drivers under this host. Those 840 * disconnect() methods will wait for all URBs to be unlinked, 841 * so we must complete them. 842 */ 843 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n"); 844 xhci_warn(xhci, "Completing active URBs anyway.\n"); 845 /* We could turn all TDs on the rings to no-ops. This won't 846 * help if the host has cached part of the ring, and is slow if 847 * we want to preserve the cycle bit. Skip it and hope the host 848 * doesn't touch the memory. 849 */ 850 } 851 for (i = 0; i < MAX_HC_SLOTS; i++) { 852 if (!xhci->devs[i]) 853 continue; 854 for (j = 0; j < 31; j++) { 855 temp_ep = &xhci->devs[i]->eps[j]; 856 ring = temp_ep->ring; 857 if (!ring) 858 continue; 859 xhci_dbg(xhci, "Killing URBs for slot ID %u, " 860 "ep index %u\n", i, j); 861 while (!list_empty(&ring->td_list)) { 862 cur_td = list_first_entry(&ring->td_list, 863 struct xhci_td, 864 td_list); 865 list_del(&cur_td->td_list); 866 if (!list_empty(&cur_td->cancelled_td_list)) 867 list_del(&cur_td->cancelled_td_list); 868 xhci_giveback_urb_in_irq(xhci, cur_td, 869 -ESHUTDOWN, "killed"); 870 } 871 while (!list_empty(&temp_ep->cancelled_td_list)) { 872 cur_td = list_first_entry( 873 &temp_ep->cancelled_td_list, 874 struct xhci_td, 875 cancelled_td_list); 876 list_del(&cur_td->cancelled_td_list); 877 xhci_giveback_urb_in_irq(xhci, cur_td, 878 -ESHUTDOWN, "killed"); 879 } 880 } 881 } 882 spin_unlock(&xhci->lock); 883 xhci_dbg(xhci, "Calling usb_hc_died()\n"); 884 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); 885 xhci_dbg(xhci, "xHCI host controller is dead.\n"); 886 } 887 888 /* 889 * When we get a completion for a Set Transfer Ring Dequeue Pointer command, 890 * we need to clear the set deq pending flag in the endpoint ring state, so that 891 * the TD queueing code can ring the doorbell again. We also need to ring the 892 * endpoint doorbell to restart the ring, but only if there aren't more 893 * cancellations pending. 894 */ 895 static void handle_set_deq_completion(struct xhci_hcd *xhci, 896 struct xhci_event_cmd *event, 897 union xhci_trb *trb) 898 { 899 unsigned int slot_id; 900 unsigned int ep_index; 901 unsigned int stream_id; 902 struct xhci_ring *ep_ring; 903 struct xhci_virt_device *dev; 904 struct xhci_ep_ctx *ep_ctx; 905 struct xhci_slot_ctx *slot_ctx; 906 907 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3])); 908 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); 909 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); 910 dev = xhci->devs[slot_id]; 911 912 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id); 913 if (!ep_ring) { 914 xhci_warn(xhci, "WARN Set TR deq ptr command for " 915 "freed stream ID %u\n", 916 stream_id); 917 /* XXX: Harmless??? */ 918 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; 919 return; 920 } 921 922 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 923 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); 924 925 if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) { 926 unsigned int ep_state; 927 unsigned int slot_state; 928 929 switch (GET_COMP_CODE(le32_to_cpu(event->status))) { 930 case COMP_TRB_ERR: 931 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because " 932 "of stream ID configuration\n"); 933 break; 934 case COMP_CTX_STATE: 935 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " 936 "to incorrect slot or ep state.\n"); 937 ep_state = le32_to_cpu(ep_ctx->ep_info); 938 ep_state &= EP_STATE_MASK; 939 slot_state = le32_to_cpu(slot_ctx->dev_state); 940 slot_state = GET_SLOT_STATE(slot_state); 941 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", 942 slot_state, ep_state); 943 break; 944 case COMP_EBADSLT: 945 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because " 946 "slot %u was not enabled.\n", slot_id); 947 break; 948 default: 949 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown " 950 "completion code of %u.\n", 951 GET_COMP_CODE(le32_to_cpu(event->status))); 952 break; 953 } 954 /* OK what do we do now? The endpoint state is hosed, and we 955 * should never get to this point if the synchronization between 956 * queueing, and endpoint state are correct. This might happen 957 * if the device gets disconnected after we've finished 958 * cancelling URBs, which might not be an error... 959 */ 960 } else { 961 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", 962 le64_to_cpu(ep_ctx->deq)); 963 if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg, 964 dev->eps[ep_index].queued_deq_ptr) == 965 (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) { 966 /* Update the ring's dequeue segment and dequeue pointer 967 * to reflect the new position. 968 */ 969 ep_ring->deq_seg = dev->eps[ep_index].queued_deq_seg; 970 ep_ring->dequeue = dev->eps[ep_index].queued_deq_ptr; 971 } else { 972 xhci_warn(xhci, "Mismatch between completed Set TR Deq " 973 "Ptr command & xHCI internal state.\n"); 974 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", 975 dev->eps[ep_index].queued_deq_seg, 976 dev->eps[ep_index].queued_deq_ptr); 977 } 978 } 979 980 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; 981 dev->eps[ep_index].queued_deq_seg = NULL; 982 dev->eps[ep_index].queued_deq_ptr = NULL; 983 /* Restart any rings with pending URBs */ 984 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 985 } 986 987 static void handle_reset_ep_completion(struct xhci_hcd *xhci, 988 struct xhci_event_cmd *event, 989 union xhci_trb *trb) 990 { 991 int slot_id; 992 unsigned int ep_index; 993 994 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3])); 995 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); 996 /* This command will only fail if the endpoint wasn't halted, 997 * but we don't care. 998 */ 999 xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", 1000 GET_COMP_CODE(le32_to_cpu(event->status))); 1001 1002 /* HW with the reset endpoint quirk needs to have a configure endpoint 1003 * command complete before the endpoint can be used. Queue that here 1004 * because the HW can't handle two commands being queued in a row. 1005 */ 1006 if (xhci->quirks & XHCI_RESET_EP_QUIRK) { 1007 xhci_dbg(xhci, "Queueing configure endpoint command\n"); 1008 xhci_queue_configure_endpoint(xhci, 1009 xhci->devs[slot_id]->in_ctx->dma, slot_id, 1010 false); 1011 xhci_ring_cmd_db(xhci); 1012 } else { 1013 /* Clear our internal halted state and restart the ring(s) */ 1014 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; 1015 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 1016 } 1017 } 1018 1019 /* Check to see if a command in the device's command queue matches this one. 1020 * Signal the completion or free the command, and return 1. Return 0 if the 1021 * completed command isn't at the head of the command list. 1022 */ 1023 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, 1024 struct xhci_virt_device *virt_dev, 1025 struct xhci_event_cmd *event) 1026 { 1027 struct xhci_command *command; 1028 1029 if (list_empty(&virt_dev->cmd_list)) 1030 return 0; 1031 1032 command = list_entry(virt_dev->cmd_list.next, 1033 struct xhci_command, cmd_list); 1034 if (xhci->cmd_ring->dequeue != command->command_trb) 1035 return 0; 1036 1037 command->status = GET_COMP_CODE(le32_to_cpu(event->status)); 1038 list_del(&command->cmd_list); 1039 if (command->completion) 1040 complete(command->completion); 1041 else 1042 xhci_free_command(xhci, command); 1043 return 1; 1044 } 1045 1046 static void handle_cmd_completion(struct xhci_hcd *xhci, 1047 struct xhci_event_cmd *event) 1048 { 1049 int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1050 u64 cmd_dma; 1051 dma_addr_t cmd_dequeue_dma; 1052 struct xhci_input_control_ctx *ctrl_ctx; 1053 struct xhci_virt_device *virt_dev; 1054 unsigned int ep_index; 1055 struct xhci_ring *ep_ring; 1056 unsigned int ep_state; 1057 1058 cmd_dma = le64_to_cpu(event->cmd_trb); 1059 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 1060 xhci->cmd_ring->dequeue); 1061 /* Is the command ring deq ptr out of sync with the deq seg ptr? */ 1062 if (cmd_dequeue_dma == 0) { 1063 xhci->error_bitmask |= 1 << 4; 1064 return; 1065 } 1066 /* Does the DMA address match our internal dequeue pointer address? */ 1067 if (cmd_dma != (u64) cmd_dequeue_dma) { 1068 xhci->error_bitmask |= 1 << 5; 1069 return; 1070 } 1071 switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]) 1072 & TRB_TYPE_BITMASK) { 1073 case TRB_TYPE(TRB_ENABLE_SLOT): 1074 if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS) 1075 xhci->slot_id = slot_id; 1076 else 1077 xhci->slot_id = 0; 1078 complete(&xhci->addr_dev); 1079 break; 1080 case TRB_TYPE(TRB_DISABLE_SLOT): 1081 if (xhci->devs[slot_id]) { 1082 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) 1083 /* Delete default control endpoint resources */ 1084 xhci_free_device_endpoint_resources(xhci, 1085 xhci->devs[slot_id], true); 1086 xhci_free_virt_device(xhci, slot_id); 1087 } 1088 break; 1089 case TRB_TYPE(TRB_CONFIG_EP): 1090 virt_dev = xhci->devs[slot_id]; 1091 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) 1092 break; 1093 /* 1094 * Configure endpoint commands can come from the USB core 1095 * configuration or alt setting changes, or because the HW 1096 * needed an extra configure endpoint command after a reset 1097 * endpoint command or streams were being configured. 1098 * If the command was for a halted endpoint, the xHCI driver 1099 * is not waiting on the configure endpoint command. 1100 */ 1101 ctrl_ctx = xhci_get_input_control_ctx(xhci, 1102 virt_dev->in_ctx); 1103 /* Input ctx add_flags are the endpoint index plus one */ 1104 ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1; 1105 /* A usb_set_interface() call directly after clearing a halted 1106 * condition may race on this quirky hardware. Not worth 1107 * worrying about, since this is prototype hardware. Not sure 1108 * if this will work for streams, but streams support was 1109 * untested on this prototype. 1110 */ 1111 if (xhci->quirks & XHCI_RESET_EP_QUIRK && 1112 ep_index != (unsigned int) -1 && 1113 le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG == 1114 le32_to_cpu(ctrl_ctx->drop_flags)) { 1115 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 1116 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 1117 if (!(ep_state & EP_HALTED)) 1118 goto bandwidth_change; 1119 xhci_dbg(xhci, "Completed config ep cmd - " 1120 "last ep index = %d, state = %d\n", 1121 ep_index, ep_state); 1122 /* Clear internal halted state and restart ring(s) */ 1123 xhci->devs[slot_id]->eps[ep_index].ep_state &= 1124 ~EP_HALTED; 1125 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 1126 break; 1127 } 1128 bandwidth_change: 1129 xhci_dbg(xhci, "Completed config ep cmd\n"); 1130 xhci->devs[slot_id]->cmd_status = 1131 GET_COMP_CODE(le32_to_cpu(event->status)); 1132 complete(&xhci->devs[slot_id]->cmd_completion); 1133 break; 1134 case TRB_TYPE(TRB_EVAL_CONTEXT): 1135 virt_dev = xhci->devs[slot_id]; 1136 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) 1137 break; 1138 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status)); 1139 complete(&xhci->devs[slot_id]->cmd_completion); 1140 break; 1141 case TRB_TYPE(TRB_ADDR_DEV): 1142 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status)); 1143 complete(&xhci->addr_dev); 1144 break; 1145 case TRB_TYPE(TRB_STOP_RING): 1146 handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event); 1147 break; 1148 case TRB_TYPE(TRB_SET_DEQ): 1149 handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue); 1150 break; 1151 case TRB_TYPE(TRB_CMD_NOOP): 1152 break; 1153 case TRB_TYPE(TRB_RESET_EP): 1154 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); 1155 break; 1156 case TRB_TYPE(TRB_RESET_DEV): 1157 xhci_dbg(xhci, "Completed reset device command.\n"); 1158 slot_id = TRB_TO_SLOT_ID( 1159 le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])); 1160 virt_dev = xhci->devs[slot_id]; 1161 if (virt_dev) 1162 handle_cmd_in_cmd_wait_list(xhci, virt_dev, event); 1163 else 1164 xhci_warn(xhci, "Reset device command completion " 1165 "for disabled slot %u\n", slot_id); 1166 break; 1167 case TRB_TYPE(TRB_NEC_GET_FW): 1168 if (!(xhci->quirks & XHCI_NEC_HOST)) { 1169 xhci->error_bitmask |= 1 << 6; 1170 break; 1171 } 1172 xhci_dbg(xhci, "NEC firmware version %2x.%02x\n", 1173 NEC_FW_MAJOR(le32_to_cpu(event->status)), 1174 NEC_FW_MINOR(le32_to_cpu(event->status))); 1175 break; 1176 default: 1177 /* Skip over unknown commands on the event ring */ 1178 xhci->error_bitmask |= 1 << 6; 1179 break; 1180 } 1181 inc_deq(xhci, xhci->cmd_ring, false); 1182 } 1183 1184 static void handle_vendor_event(struct xhci_hcd *xhci, 1185 union xhci_trb *event) 1186 { 1187 u32 trb_type; 1188 1189 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3])); 1190 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); 1191 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) 1192 handle_cmd_completion(xhci, &event->event_cmd); 1193 } 1194 1195 /* @port_id: the one-based port ID from the hardware (indexed from array of all 1196 * port registers -- USB 3.0 and USB 2.0). 1197 * 1198 * Returns a zero-based port number, which is suitable for indexing into each of 1199 * the split roothubs' port arrays and bus state arrays. 1200 */ 1201 static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd, 1202 struct xhci_hcd *xhci, u32 port_id) 1203 { 1204 unsigned int i; 1205 unsigned int num_similar_speed_ports = 0; 1206 1207 /* port_id from the hardware is 1-based, but port_array[], usb3_ports[], 1208 * and usb2_ports are 0-based indexes. Count the number of similar 1209 * speed ports, up to 1 port before this port. 1210 */ 1211 for (i = 0; i < (port_id - 1); i++) { 1212 u8 port_speed = xhci->port_array[i]; 1213 1214 /* 1215 * Skip ports that don't have known speeds, or have duplicate 1216 * Extended Capabilities port speed entries. 1217 */ 1218 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY) 1219 continue; 1220 1221 /* 1222 * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and 1223 * 1.1 ports are under the USB 2.0 hub. If the port speed 1224 * matches the device speed, it's a similar speed port. 1225 */ 1226 if ((port_speed == 0x03) == (hcd->speed == HCD_USB3)) 1227 num_similar_speed_ports++; 1228 } 1229 return num_similar_speed_ports; 1230 } 1231 1232 static void handle_port_status(struct xhci_hcd *xhci, 1233 union xhci_trb *event) 1234 { 1235 struct usb_hcd *hcd; 1236 u32 port_id; 1237 u32 temp, temp1; 1238 int max_ports; 1239 int slot_id; 1240 unsigned int faked_port_index; 1241 u8 major_revision; 1242 struct xhci_bus_state *bus_state; 1243 __le32 __iomem **port_array; 1244 bool bogus_port_status = false; 1245 1246 /* Port status change events always have a successful completion code */ 1247 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) { 1248 xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); 1249 xhci->error_bitmask |= 1 << 8; 1250 } 1251 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); 1252 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); 1253 1254 max_ports = HCS_MAX_PORTS(xhci->hcs_params1); 1255 if ((port_id <= 0) || (port_id > max_ports)) { 1256 xhci_warn(xhci, "Invalid port id %d\n", port_id); 1257 bogus_port_status = true; 1258 goto cleanup; 1259 } 1260 1261 /* Figure out which usb_hcd this port is attached to: 1262 * is it a USB 3.0 port or a USB 2.0/1.1 port? 1263 */ 1264 major_revision = xhci->port_array[port_id - 1]; 1265 if (major_revision == 0) { 1266 xhci_warn(xhci, "Event for port %u not in " 1267 "Extended Capabilities, ignoring.\n", 1268 port_id); 1269 bogus_port_status = true; 1270 goto cleanup; 1271 } 1272 if (major_revision == DUPLICATE_ENTRY) { 1273 xhci_warn(xhci, "Event for port %u duplicated in" 1274 "Extended Capabilities, ignoring.\n", 1275 port_id); 1276 bogus_port_status = true; 1277 goto cleanup; 1278 } 1279 1280 /* 1281 * Hardware port IDs reported by a Port Status Change Event include USB 1282 * 3.0 and USB 2.0 ports. We want to check if the port has reported a 1283 * resume event, but we first need to translate the hardware port ID 1284 * into the index into the ports on the correct split roothub, and the 1285 * correct bus_state structure. 1286 */ 1287 /* Find the right roothub. */ 1288 hcd = xhci_to_hcd(xhci); 1289 if ((major_revision == 0x03) != (hcd->speed == HCD_USB3)) 1290 hcd = xhci->shared_hcd; 1291 bus_state = &xhci->bus_state[hcd_index(hcd)]; 1292 if (hcd->speed == HCD_USB3) 1293 port_array = xhci->usb3_ports; 1294 else 1295 port_array = xhci->usb2_ports; 1296 /* Find the faked port hub number */ 1297 faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci, 1298 port_id); 1299 1300 temp = xhci_readl(xhci, port_array[faked_port_index]); 1301 if (hcd->state == HC_STATE_SUSPENDED) { 1302 xhci_dbg(xhci, "resume root hub\n"); 1303 usb_hcd_resume_root_hub(hcd); 1304 } 1305 1306 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) { 1307 xhci_dbg(xhci, "port resume event for port %d\n", port_id); 1308 1309 temp1 = xhci_readl(xhci, &xhci->op_regs->command); 1310 if (!(temp1 & CMD_RUN)) { 1311 xhci_warn(xhci, "xHC is not running.\n"); 1312 goto cleanup; 1313 } 1314 1315 if (DEV_SUPERSPEED(temp)) { 1316 xhci_dbg(xhci, "resume SS port %d\n", port_id); 1317 temp = xhci_port_state_to_neutral(temp); 1318 temp &= ~PORT_PLS_MASK; 1319 temp |= PORT_LINK_STROBE | XDEV_U0; 1320 xhci_writel(xhci, temp, port_array[faked_port_index]); 1321 slot_id = xhci_find_slot_id_by_port(hcd, xhci, 1322 faked_port_index); 1323 if (!slot_id) { 1324 xhci_dbg(xhci, "slot_id is zero\n"); 1325 goto cleanup; 1326 } 1327 xhci_ring_device(xhci, slot_id); 1328 xhci_dbg(xhci, "resume SS port %d finished\n", port_id); 1329 /* Clear PORT_PLC */ 1330 temp = xhci_readl(xhci, port_array[faked_port_index]); 1331 temp = xhci_port_state_to_neutral(temp); 1332 temp |= PORT_PLC; 1333 xhci_writel(xhci, temp, port_array[faked_port_index]); 1334 } else { 1335 xhci_dbg(xhci, "resume HS port %d\n", port_id); 1336 bus_state->resume_done[faked_port_index] = jiffies + 1337 msecs_to_jiffies(20); 1338 mod_timer(&hcd->rh_timer, 1339 bus_state->resume_done[faked_port_index]); 1340 /* Do the rest in GetPortStatus */ 1341 } 1342 } 1343 1344 cleanup: 1345 /* Update event ring dequeue pointer before dropping the lock */ 1346 inc_deq(xhci, xhci->event_ring, true); 1347 1348 /* Don't make the USB core poll the roothub if we got a bad port status 1349 * change event. Besides, at that point we can't tell which roothub 1350 * (USB 2.0 or USB 3.0) to kick. 1351 */ 1352 if (bogus_port_status) 1353 return; 1354 1355 spin_unlock(&xhci->lock); 1356 /* Pass this up to the core */ 1357 usb_hcd_poll_rh_status(hcd); 1358 spin_lock(&xhci->lock); 1359 } 1360 1361 /* 1362 * This TD is defined by the TRBs starting at start_trb in start_seg and ending 1363 * at end_trb, which may be in another segment. If the suspect DMA address is a 1364 * TRB in this TD, this function returns that TRB's segment. Otherwise it 1365 * returns 0. 1366 */ 1367 struct xhci_segment *trb_in_td(struct xhci_segment *start_seg, 1368 union xhci_trb *start_trb, 1369 union xhci_trb *end_trb, 1370 dma_addr_t suspect_dma) 1371 { 1372 dma_addr_t start_dma; 1373 dma_addr_t end_seg_dma; 1374 dma_addr_t end_trb_dma; 1375 struct xhci_segment *cur_seg; 1376 1377 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb); 1378 cur_seg = start_seg; 1379 1380 do { 1381 if (start_dma == 0) 1382 return NULL; 1383 /* We may get an event for a Link TRB in the middle of a TD */ 1384 end_seg_dma = xhci_trb_virt_to_dma(cur_seg, 1385 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); 1386 /* If the end TRB isn't in this segment, this is set to 0 */ 1387 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb); 1388 1389 if (end_trb_dma > 0) { 1390 /* The end TRB is in this segment, so suspect should be here */ 1391 if (start_dma <= end_trb_dma) { 1392 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) 1393 return cur_seg; 1394 } else { 1395 /* Case for one segment with 1396 * a TD wrapped around to the top 1397 */ 1398 if ((suspect_dma >= start_dma && 1399 suspect_dma <= end_seg_dma) || 1400 (suspect_dma >= cur_seg->dma && 1401 suspect_dma <= end_trb_dma)) 1402 return cur_seg; 1403 } 1404 return NULL; 1405 } else { 1406 /* Might still be somewhere in this segment */ 1407 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) 1408 return cur_seg; 1409 } 1410 cur_seg = cur_seg->next; 1411 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); 1412 } while (cur_seg != start_seg); 1413 1414 return NULL; 1415 } 1416 1417 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, 1418 unsigned int slot_id, unsigned int ep_index, 1419 unsigned int stream_id, 1420 struct xhci_td *td, union xhci_trb *event_trb) 1421 { 1422 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 1423 ep->ep_state |= EP_HALTED; 1424 ep->stopped_td = td; 1425 ep->stopped_trb = event_trb; 1426 ep->stopped_stream = stream_id; 1427 1428 xhci_queue_reset_ep(xhci, slot_id, ep_index); 1429 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); 1430 1431 ep->stopped_td = NULL; 1432 ep->stopped_trb = NULL; 1433 ep->stopped_stream = 0; 1434 1435 xhci_ring_cmd_db(xhci); 1436 } 1437 1438 /* Check if an error has halted the endpoint ring. The class driver will 1439 * cleanup the halt for a non-default control endpoint if we indicate a stall. 1440 * However, a babble and other errors also halt the endpoint ring, and the class 1441 * driver won't clear the halt in that case, so we need to issue a Set Transfer 1442 * Ring Dequeue Pointer command manually. 1443 */ 1444 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, 1445 struct xhci_ep_ctx *ep_ctx, 1446 unsigned int trb_comp_code) 1447 { 1448 /* TRB completion codes that may require a manual halt cleanup */ 1449 if (trb_comp_code == COMP_TX_ERR || 1450 trb_comp_code == COMP_BABBLE || 1451 trb_comp_code == COMP_SPLIT_ERR) 1452 /* The 0.96 spec says a babbling control endpoint 1453 * is not halted. The 0.96 spec says it is. Some HW 1454 * claims to be 0.95 compliant, but it halts the control 1455 * endpoint anyway. Check if a babble halted the 1456 * endpoint. 1457 */ 1458 if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == 1459 cpu_to_le32(EP_STATE_HALTED)) 1460 return 1; 1461 1462 return 0; 1463 } 1464 1465 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) 1466 { 1467 if (trb_comp_code >= 224 && trb_comp_code <= 255) { 1468 /* Vendor defined "informational" completion code, 1469 * treat as not-an-error. 1470 */ 1471 xhci_dbg(xhci, "Vendor defined info completion code %u\n", 1472 trb_comp_code); 1473 xhci_dbg(xhci, "Treating code as success.\n"); 1474 return 1; 1475 } 1476 return 0; 1477 } 1478 1479 /* 1480 * Finish the td processing, remove the td from td list; 1481 * Return 1 if the urb can be given back. 1482 */ 1483 static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, 1484 union xhci_trb *event_trb, struct xhci_transfer_event *event, 1485 struct xhci_virt_ep *ep, int *status, bool skip) 1486 { 1487 struct xhci_virt_device *xdev; 1488 struct xhci_ring *ep_ring; 1489 unsigned int slot_id; 1490 int ep_index; 1491 struct urb *urb = NULL; 1492 struct xhci_ep_ctx *ep_ctx; 1493 int ret = 0; 1494 struct urb_priv *urb_priv; 1495 u32 trb_comp_code; 1496 1497 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1498 xdev = xhci->devs[slot_id]; 1499 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 1500 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1501 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1502 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1503 1504 if (skip) 1505 goto td_cleanup; 1506 1507 if (trb_comp_code == COMP_STOP_INVAL || 1508 trb_comp_code == COMP_STOP) { 1509 /* The Endpoint Stop Command completion will take care of any 1510 * stopped TDs. A stopped TD may be restarted, so don't update 1511 * the ring dequeue pointer or take this TD off any lists yet. 1512 */ 1513 ep->stopped_td = td; 1514 ep->stopped_trb = event_trb; 1515 return 0; 1516 } else { 1517 if (trb_comp_code == COMP_STALL) { 1518 /* The transfer is completed from the driver's 1519 * perspective, but we need to issue a set dequeue 1520 * command for this stalled endpoint to move the dequeue 1521 * pointer past the TD. We can't do that here because 1522 * the halt condition must be cleared first. Let the 1523 * USB class driver clear the stall later. 1524 */ 1525 ep->stopped_td = td; 1526 ep->stopped_trb = event_trb; 1527 ep->stopped_stream = ep_ring->stream_id; 1528 } else if (xhci_requires_manual_halt_cleanup(xhci, 1529 ep_ctx, trb_comp_code)) { 1530 /* Other types of errors halt the endpoint, but the 1531 * class driver doesn't call usb_reset_endpoint() unless 1532 * the error is -EPIPE. Clear the halted status in the 1533 * xHCI hardware manually. 1534 */ 1535 xhci_cleanup_halted_endpoint(xhci, 1536 slot_id, ep_index, ep_ring->stream_id, 1537 td, event_trb); 1538 } else { 1539 /* Update ring dequeue pointer */ 1540 while (ep_ring->dequeue != td->last_trb) 1541 inc_deq(xhci, ep_ring, false); 1542 inc_deq(xhci, ep_ring, false); 1543 } 1544 1545 td_cleanup: 1546 /* Clean up the endpoint's TD list */ 1547 urb = td->urb; 1548 urb_priv = urb->hcpriv; 1549 1550 /* Do one last check of the actual transfer length. 1551 * If the host controller said we transferred more data than 1552 * the buffer length, urb->actual_length will be a very big 1553 * number (since it's unsigned). Play it safe and say we didn't 1554 * transfer anything. 1555 */ 1556 if (urb->actual_length > urb->transfer_buffer_length) { 1557 xhci_warn(xhci, "URB transfer length is wrong, " 1558 "xHC issue? req. len = %u, " 1559 "act. len = %u\n", 1560 urb->transfer_buffer_length, 1561 urb->actual_length); 1562 urb->actual_length = 0; 1563 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1564 *status = -EREMOTEIO; 1565 else 1566 *status = 0; 1567 } 1568 list_del(&td->td_list); 1569 /* Was this TD slated to be cancelled but completed anyway? */ 1570 if (!list_empty(&td->cancelled_td_list)) 1571 list_del(&td->cancelled_td_list); 1572 1573 urb_priv->td_cnt++; 1574 /* Giveback the urb when all the tds are completed */ 1575 if (urb_priv->td_cnt == urb_priv->length) { 1576 ret = 1; 1577 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 1578 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; 1579 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs 1580 == 0) { 1581 if (xhci->quirks & XHCI_AMD_PLL_FIX) 1582 usb_amd_quirk_pll_enable(); 1583 } 1584 } 1585 } 1586 } 1587 1588 return ret; 1589 } 1590 1591 /* 1592 * Process control tds, update urb status and actual_length. 1593 */ 1594 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, 1595 union xhci_trb *event_trb, struct xhci_transfer_event *event, 1596 struct xhci_virt_ep *ep, int *status) 1597 { 1598 struct xhci_virt_device *xdev; 1599 struct xhci_ring *ep_ring; 1600 unsigned int slot_id; 1601 int ep_index; 1602 struct xhci_ep_ctx *ep_ctx; 1603 u32 trb_comp_code; 1604 1605 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1606 xdev = xhci->devs[slot_id]; 1607 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 1608 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1609 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1610 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1611 1612 xhci_debug_trb(xhci, xhci->event_ring->dequeue); 1613 switch (trb_comp_code) { 1614 case COMP_SUCCESS: 1615 if (event_trb == ep_ring->dequeue) { 1616 xhci_warn(xhci, "WARN: Success on ctrl setup TRB " 1617 "without IOC set??\n"); 1618 *status = -ESHUTDOWN; 1619 } else if (event_trb != td->last_trb) { 1620 xhci_warn(xhci, "WARN: Success on ctrl data TRB " 1621 "without IOC set??\n"); 1622 *status = -ESHUTDOWN; 1623 } else { 1624 *status = 0; 1625 } 1626 break; 1627 case COMP_SHORT_TX: 1628 xhci_warn(xhci, "WARN: short transfer on control ep\n"); 1629 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1630 *status = -EREMOTEIO; 1631 else 1632 *status = 0; 1633 break; 1634 case COMP_STOP_INVAL: 1635 case COMP_STOP: 1636 return finish_td(xhci, td, event_trb, event, ep, status, false); 1637 default: 1638 if (!xhci_requires_manual_halt_cleanup(xhci, 1639 ep_ctx, trb_comp_code)) 1640 break; 1641 xhci_dbg(xhci, "TRB error code %u, " 1642 "halted endpoint index = %u\n", 1643 trb_comp_code, ep_index); 1644 /* else fall through */ 1645 case COMP_STALL: 1646 /* Did we transfer part of the data (middle) phase? */ 1647 if (event_trb != ep_ring->dequeue && 1648 event_trb != td->last_trb) 1649 td->urb->actual_length = 1650 td->urb->transfer_buffer_length 1651 - TRB_LEN(le32_to_cpu(event->transfer_len)); 1652 else 1653 td->urb->actual_length = 0; 1654 1655 xhci_cleanup_halted_endpoint(xhci, 1656 slot_id, ep_index, 0, td, event_trb); 1657 return finish_td(xhci, td, event_trb, event, ep, status, true); 1658 } 1659 /* 1660 * Did we transfer any data, despite the errors that might have 1661 * happened? I.e. did we get past the setup stage? 1662 */ 1663 if (event_trb != ep_ring->dequeue) { 1664 /* The event was for the status stage */ 1665 if (event_trb == td->last_trb) { 1666 if (td->urb->actual_length != 0) { 1667 /* Don't overwrite a previously set error code 1668 */ 1669 if ((*status == -EINPROGRESS || *status == 0) && 1670 (td->urb->transfer_flags 1671 & URB_SHORT_NOT_OK)) 1672 /* Did we already see a short data 1673 * stage? */ 1674 *status = -EREMOTEIO; 1675 } else { 1676 td->urb->actual_length = 1677 td->urb->transfer_buffer_length; 1678 } 1679 } else { 1680 /* Maybe the event was for the data stage? */ 1681 td->urb->actual_length = 1682 td->urb->transfer_buffer_length - 1683 TRB_LEN(le32_to_cpu(event->transfer_len)); 1684 xhci_dbg(xhci, "Waiting for status " 1685 "stage event\n"); 1686 return 0; 1687 } 1688 } 1689 1690 return finish_td(xhci, td, event_trb, event, ep, status, false); 1691 } 1692 1693 /* 1694 * Process isochronous tds, update urb packet status and actual_length. 1695 */ 1696 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, 1697 union xhci_trb *event_trb, struct xhci_transfer_event *event, 1698 struct xhci_virt_ep *ep, int *status) 1699 { 1700 struct xhci_ring *ep_ring; 1701 struct urb_priv *urb_priv; 1702 int idx; 1703 int len = 0; 1704 union xhci_trb *cur_trb; 1705 struct xhci_segment *cur_seg; 1706 struct usb_iso_packet_descriptor *frame; 1707 u32 trb_comp_code; 1708 bool skip_td = false; 1709 1710 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1711 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1712 urb_priv = td->urb->hcpriv; 1713 idx = urb_priv->td_cnt; 1714 frame = &td->urb->iso_frame_desc[idx]; 1715 1716 /* handle completion code */ 1717 switch (trb_comp_code) { 1718 case COMP_SUCCESS: 1719 frame->status = 0; 1720 break; 1721 case COMP_SHORT_TX: 1722 frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ? 1723 -EREMOTEIO : 0; 1724 break; 1725 case COMP_BW_OVER: 1726 frame->status = -ECOMM; 1727 skip_td = true; 1728 break; 1729 case COMP_BUFF_OVER: 1730 case COMP_BABBLE: 1731 frame->status = -EOVERFLOW; 1732 skip_td = true; 1733 break; 1734 case COMP_DEV_ERR: 1735 case COMP_STALL: 1736 frame->status = -EPROTO; 1737 skip_td = true; 1738 break; 1739 case COMP_STOP: 1740 case COMP_STOP_INVAL: 1741 break; 1742 default: 1743 frame->status = -1; 1744 break; 1745 } 1746 1747 if (trb_comp_code == COMP_SUCCESS || skip_td) { 1748 frame->actual_length = frame->length; 1749 td->urb->actual_length += frame->length; 1750 } else { 1751 for (cur_trb = ep_ring->dequeue, 1752 cur_seg = ep_ring->deq_seg; cur_trb != event_trb; 1753 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 1754 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) && 1755 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) 1756 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); 1757 } 1758 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - 1759 TRB_LEN(le32_to_cpu(event->transfer_len)); 1760 1761 if (trb_comp_code != COMP_STOP_INVAL) { 1762 frame->actual_length = len; 1763 td->urb->actual_length += len; 1764 } 1765 } 1766 1767 return finish_td(xhci, td, event_trb, event, ep, status, false); 1768 } 1769 1770 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, 1771 struct xhci_transfer_event *event, 1772 struct xhci_virt_ep *ep, int *status) 1773 { 1774 struct xhci_ring *ep_ring; 1775 struct urb_priv *urb_priv; 1776 struct usb_iso_packet_descriptor *frame; 1777 int idx; 1778 1779 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1780 urb_priv = td->urb->hcpriv; 1781 idx = urb_priv->td_cnt; 1782 frame = &td->urb->iso_frame_desc[idx]; 1783 1784 /* The transfer is partly done. */ 1785 frame->status = -EXDEV; 1786 1787 /* calc actual length */ 1788 frame->actual_length = 0; 1789 1790 /* Update ring dequeue pointer */ 1791 while (ep_ring->dequeue != td->last_trb) 1792 inc_deq(xhci, ep_ring, false); 1793 inc_deq(xhci, ep_ring, false); 1794 1795 return finish_td(xhci, td, NULL, event, ep, status, true); 1796 } 1797 1798 /* 1799 * Process bulk and interrupt tds, update urb status and actual_length. 1800 */ 1801 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, 1802 union xhci_trb *event_trb, struct xhci_transfer_event *event, 1803 struct xhci_virt_ep *ep, int *status) 1804 { 1805 struct xhci_ring *ep_ring; 1806 union xhci_trb *cur_trb; 1807 struct xhci_segment *cur_seg; 1808 u32 trb_comp_code; 1809 1810 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1811 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1812 1813 switch (trb_comp_code) { 1814 case COMP_SUCCESS: 1815 /* Double check that the HW transferred everything. */ 1816 if (event_trb != td->last_trb) { 1817 xhci_warn(xhci, "WARN Successful completion " 1818 "on short TX\n"); 1819 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1820 *status = -EREMOTEIO; 1821 else 1822 *status = 0; 1823 } else { 1824 *status = 0; 1825 } 1826 break; 1827 case COMP_SHORT_TX: 1828 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1829 *status = -EREMOTEIO; 1830 else 1831 *status = 0; 1832 break; 1833 default: 1834 /* Others already handled above */ 1835 break; 1836 } 1837 if (trb_comp_code == COMP_SHORT_TX) 1838 xhci_dbg(xhci, "ep %#x - asked for %d bytes, " 1839 "%d bytes untransferred\n", 1840 td->urb->ep->desc.bEndpointAddress, 1841 td->urb->transfer_buffer_length, 1842 TRB_LEN(le32_to_cpu(event->transfer_len))); 1843 /* Fast path - was this the last TRB in the TD for this URB? */ 1844 if (event_trb == td->last_trb) { 1845 if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { 1846 td->urb->actual_length = 1847 td->urb->transfer_buffer_length - 1848 TRB_LEN(le32_to_cpu(event->transfer_len)); 1849 if (td->urb->transfer_buffer_length < 1850 td->urb->actual_length) { 1851 xhci_warn(xhci, "HC gave bad length " 1852 "of %d bytes left\n", 1853 TRB_LEN(le32_to_cpu(event->transfer_len))); 1854 td->urb->actual_length = 0; 1855 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1856 *status = -EREMOTEIO; 1857 else 1858 *status = 0; 1859 } 1860 /* Don't overwrite a previously set error code */ 1861 if (*status == -EINPROGRESS) { 1862 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1863 *status = -EREMOTEIO; 1864 else 1865 *status = 0; 1866 } 1867 } else { 1868 td->urb->actual_length = 1869 td->urb->transfer_buffer_length; 1870 /* Ignore a short packet completion if the 1871 * untransferred length was zero. 1872 */ 1873 if (*status == -EREMOTEIO) 1874 *status = 0; 1875 } 1876 } else { 1877 /* Slow path - walk the list, starting from the dequeue 1878 * pointer, to get the actual length transferred. 1879 */ 1880 td->urb->actual_length = 0; 1881 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; 1882 cur_trb != event_trb; 1883 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 1884 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) && 1885 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) 1886 td->urb->actual_length += 1887 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); 1888 } 1889 /* If the ring didn't stop on a Link or No-op TRB, add 1890 * in the actual bytes transferred from the Normal TRB 1891 */ 1892 if (trb_comp_code != COMP_STOP_INVAL) 1893 td->urb->actual_length += 1894 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - 1895 TRB_LEN(le32_to_cpu(event->transfer_len)); 1896 } 1897 1898 return finish_td(xhci, td, event_trb, event, ep, status, false); 1899 } 1900 1901 /* 1902 * If this function returns an error condition, it means it got a Transfer 1903 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. 1904 * At this point, the host controller is probably hosed and should be reset. 1905 */ 1906 static int handle_tx_event(struct xhci_hcd *xhci, 1907 struct xhci_transfer_event *event) 1908 { 1909 struct xhci_virt_device *xdev; 1910 struct xhci_virt_ep *ep; 1911 struct xhci_ring *ep_ring; 1912 unsigned int slot_id; 1913 int ep_index; 1914 struct xhci_td *td = NULL; 1915 dma_addr_t event_dma; 1916 struct xhci_segment *event_seg; 1917 union xhci_trb *event_trb; 1918 struct urb *urb = NULL; 1919 int status = -EINPROGRESS; 1920 struct urb_priv *urb_priv; 1921 struct xhci_ep_ctx *ep_ctx; 1922 u32 trb_comp_code; 1923 int ret = 0; 1924 1925 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1926 xdev = xhci->devs[slot_id]; 1927 if (!xdev) { 1928 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); 1929 return -ENODEV; 1930 } 1931 1932 /* Endpoint ID is 1 based, our index is zero based */ 1933 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 1934 ep = &xdev->eps[ep_index]; 1935 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1936 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1937 if (!ep_ring || 1938 (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == 1939 EP_STATE_DISABLED) { 1940 xhci_err(xhci, "ERROR Transfer event for disabled endpoint " 1941 "or incorrect stream ring\n"); 1942 return -ENODEV; 1943 } 1944 1945 event_dma = le64_to_cpu(event->buffer); 1946 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1947 /* Look for common error cases */ 1948 switch (trb_comp_code) { 1949 /* Skip codes that require special handling depending on 1950 * transfer type 1951 */ 1952 case COMP_SUCCESS: 1953 case COMP_SHORT_TX: 1954 break; 1955 case COMP_STOP: 1956 xhci_dbg(xhci, "Stopped on Transfer TRB\n"); 1957 break; 1958 case COMP_STOP_INVAL: 1959 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n"); 1960 break; 1961 case COMP_STALL: 1962 xhci_warn(xhci, "WARN: Stalled endpoint\n"); 1963 ep->ep_state |= EP_HALTED; 1964 status = -EPIPE; 1965 break; 1966 case COMP_TRB_ERR: 1967 xhci_warn(xhci, "WARN: TRB error on endpoint\n"); 1968 status = -EILSEQ; 1969 break; 1970 case COMP_SPLIT_ERR: 1971 case COMP_TX_ERR: 1972 xhci_warn(xhci, "WARN: transfer error on endpoint\n"); 1973 status = -EPROTO; 1974 break; 1975 case COMP_BABBLE: 1976 xhci_warn(xhci, "WARN: babble error on endpoint\n"); 1977 status = -EOVERFLOW; 1978 break; 1979 case COMP_DB_ERR: 1980 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); 1981 status = -ENOSR; 1982 break; 1983 case COMP_BW_OVER: 1984 xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n"); 1985 break; 1986 case COMP_BUFF_OVER: 1987 xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n"); 1988 break; 1989 case COMP_UNDERRUN: 1990 /* 1991 * When the Isoch ring is empty, the xHC will generate 1992 * a Ring Overrun Event for IN Isoch endpoint or Ring 1993 * Underrun Event for OUT Isoch endpoint. 1994 */ 1995 xhci_dbg(xhci, "underrun event on endpoint\n"); 1996 if (!list_empty(&ep_ring->td_list)) 1997 xhci_dbg(xhci, "Underrun Event for slot %d ep %d " 1998 "still with TDs queued?\n", 1999 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2000 ep_index); 2001 goto cleanup; 2002 case COMP_OVERRUN: 2003 xhci_dbg(xhci, "overrun event on endpoint\n"); 2004 if (!list_empty(&ep_ring->td_list)) 2005 xhci_dbg(xhci, "Overrun Event for slot %d ep %d " 2006 "still with TDs queued?\n", 2007 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2008 ep_index); 2009 goto cleanup; 2010 case COMP_DEV_ERR: 2011 xhci_warn(xhci, "WARN: detect an incompatible device"); 2012 status = -EPROTO; 2013 break; 2014 case COMP_MISSED_INT: 2015 /* 2016 * When encounter missed service error, one or more isoc tds 2017 * may be missed by xHC. 2018 * Set skip flag of the ep_ring; Complete the missed tds as 2019 * short transfer when process the ep_ring next time. 2020 */ 2021 ep->skip = true; 2022 xhci_dbg(xhci, "Miss service interval error, set skip flag\n"); 2023 goto cleanup; 2024 default: 2025 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { 2026 status = 0; 2027 break; 2028 } 2029 xhci_warn(xhci, "ERROR Unknown event condition, HC probably " 2030 "busted\n"); 2031 goto cleanup; 2032 } 2033 2034 do { 2035 /* This TRB should be in the TD at the head of this ring's 2036 * TD list. 2037 */ 2038 if (list_empty(&ep_ring->td_list)) { 2039 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d " 2040 "with no TDs queued?\n", 2041 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2042 ep_index); 2043 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", 2044 (le32_to_cpu(event->flags) & 2045 TRB_TYPE_BITMASK)>>10); 2046 xhci_print_trb_offsets(xhci, (union xhci_trb *) event); 2047 if (ep->skip) { 2048 ep->skip = false; 2049 xhci_dbg(xhci, "td_list is empty while skip " 2050 "flag set. Clear skip flag.\n"); 2051 } 2052 ret = 0; 2053 goto cleanup; 2054 } 2055 2056 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); 2057 2058 /* Is this a TRB in the currently executing TD? */ 2059 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, 2060 td->last_trb, event_dma); 2061 2062 /* 2063 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE 2064 * is not in the current TD pointed by ep_ring->dequeue because 2065 * that the hardware dequeue pointer still at the previous TRB 2066 * of the current TD. The previous TRB maybe a Link TD or the 2067 * last TRB of the previous TD. The command completion handle 2068 * will take care the rest. 2069 */ 2070 if (!event_seg && trb_comp_code == COMP_STOP_INVAL) { 2071 ret = 0; 2072 goto cleanup; 2073 } 2074 2075 if (!event_seg) { 2076 if (!ep->skip || 2077 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { 2078 /* Some host controllers give a spurious 2079 * successful event after a short transfer. 2080 * Ignore it. 2081 */ 2082 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && 2083 ep_ring->last_td_was_short) { 2084 ep_ring->last_td_was_short = false; 2085 ret = 0; 2086 goto cleanup; 2087 } 2088 /* HC is busted, give up! */ 2089 xhci_err(xhci, 2090 "ERROR Transfer event TRB DMA ptr not " 2091 "part of current TD\n"); 2092 return -ESHUTDOWN; 2093 } 2094 2095 ret = skip_isoc_td(xhci, td, event, ep, &status); 2096 goto cleanup; 2097 } 2098 if (trb_comp_code == COMP_SHORT_TX) 2099 ep_ring->last_td_was_short = true; 2100 else 2101 ep_ring->last_td_was_short = false; 2102 2103 if (ep->skip) { 2104 xhci_dbg(xhci, "Found td. Clear skip flag.\n"); 2105 ep->skip = false; 2106 } 2107 2108 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / 2109 sizeof(*event_trb)]; 2110 /* 2111 * No-op TRB should not trigger interrupts. 2112 * If event_trb is a no-op TRB, it means the 2113 * corresponding TD has been cancelled. Just ignore 2114 * the TD. 2115 */ 2116 if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) { 2117 xhci_dbg(xhci, 2118 "event_trb is a no-op TRB. Skip it\n"); 2119 goto cleanup; 2120 } 2121 2122 /* Now update the urb's actual_length and give back to 2123 * the core 2124 */ 2125 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) 2126 ret = process_ctrl_td(xhci, td, event_trb, event, ep, 2127 &status); 2128 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) 2129 ret = process_isoc_td(xhci, td, event_trb, event, ep, 2130 &status); 2131 else 2132 ret = process_bulk_intr_td(xhci, td, event_trb, event, 2133 ep, &status); 2134 2135 cleanup: 2136 /* 2137 * Do not update event ring dequeue pointer if ep->skip is set. 2138 * Will roll back to continue process missed tds. 2139 */ 2140 if (trb_comp_code == COMP_MISSED_INT || !ep->skip) { 2141 inc_deq(xhci, xhci->event_ring, true); 2142 } 2143 2144 if (ret) { 2145 urb = td->urb; 2146 urb_priv = urb->hcpriv; 2147 /* Leave the TD around for the reset endpoint function 2148 * to use(but only if it's not a control endpoint, 2149 * since we already queued the Set TR dequeue pointer 2150 * command for stalled control endpoints). 2151 */ 2152 if (usb_endpoint_xfer_control(&urb->ep->desc) || 2153 (trb_comp_code != COMP_STALL && 2154 trb_comp_code != COMP_BABBLE)) 2155 xhci_urb_free_priv(xhci, urb_priv); 2156 2157 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); 2158 if ((urb->actual_length != urb->transfer_buffer_length && 2159 (urb->transfer_flags & 2160 URB_SHORT_NOT_OK)) || 2161 status != 0) 2162 xhci_dbg(xhci, "Giveback URB %p, len = %d, " 2163 "expected = %x, status = %d\n", 2164 urb, urb->actual_length, 2165 urb->transfer_buffer_length, 2166 status); 2167 spin_unlock(&xhci->lock); 2168 /* EHCI, UHCI, and OHCI always unconditionally set the 2169 * urb->status of an isochronous endpoint to 0. 2170 */ 2171 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) 2172 status = 0; 2173 usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status); 2174 spin_lock(&xhci->lock); 2175 } 2176 2177 /* 2178 * If ep->skip is set, it means there are missed tds on the 2179 * endpoint ring need to take care of. 2180 * Process them as short transfer until reach the td pointed by 2181 * the event. 2182 */ 2183 } while (ep->skip && trb_comp_code != COMP_MISSED_INT); 2184 2185 return 0; 2186 } 2187 2188 /* 2189 * This function handles all OS-owned events on the event ring. It may drop 2190 * xhci->lock between event processing (e.g. to pass up port status changes). 2191 * Returns >0 for "possibly more events to process" (caller should call again), 2192 * otherwise 0 if done. In future, <0 returns should indicate error code. 2193 */ 2194 static int xhci_handle_event(struct xhci_hcd *xhci) 2195 { 2196 union xhci_trb *event; 2197 int update_ptrs = 1; 2198 int ret; 2199 2200 if (!xhci->event_ring || !xhci->event_ring->dequeue) { 2201 xhci->error_bitmask |= 1 << 1; 2202 return 0; 2203 } 2204 2205 event = xhci->event_ring->dequeue; 2206 /* Does the HC or OS own the TRB? */ 2207 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) != 2208 xhci->event_ring->cycle_state) { 2209 xhci->error_bitmask |= 1 << 2; 2210 return 0; 2211 } 2212 2213 /* 2214 * Barrier between reading the TRB_CYCLE (valid) flag above and any 2215 * speculative reads of the event's flags/data below. 2216 */ 2217 rmb(); 2218 /* FIXME: Handle more event types. */ 2219 switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) { 2220 case TRB_TYPE(TRB_COMPLETION): 2221 handle_cmd_completion(xhci, &event->event_cmd); 2222 break; 2223 case TRB_TYPE(TRB_PORT_STATUS): 2224 handle_port_status(xhci, event); 2225 update_ptrs = 0; 2226 break; 2227 case TRB_TYPE(TRB_TRANSFER): 2228 ret = handle_tx_event(xhci, &event->trans_event); 2229 if (ret < 0) 2230 xhci->error_bitmask |= 1 << 9; 2231 else 2232 update_ptrs = 0; 2233 break; 2234 default: 2235 if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >= 2236 TRB_TYPE(48)) 2237 handle_vendor_event(xhci, event); 2238 else 2239 xhci->error_bitmask |= 1 << 3; 2240 } 2241 /* Any of the above functions may drop and re-acquire the lock, so check 2242 * to make sure a watchdog timer didn't mark the host as non-responsive. 2243 */ 2244 if (xhci->xhc_state & XHCI_STATE_DYING) { 2245 xhci_dbg(xhci, "xHCI host dying, returning from " 2246 "event handler.\n"); 2247 return 0; 2248 } 2249 2250 if (update_ptrs) 2251 /* Update SW event ring dequeue pointer */ 2252 inc_deq(xhci, xhci->event_ring, true); 2253 2254 /* Are there more items on the event ring? Caller will call us again to 2255 * check. 2256 */ 2257 return 1; 2258 } 2259 2260 /* 2261 * xHCI spec says we can get an interrupt, and if the HC has an error condition, 2262 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of 2263 * indicators of an event TRB error, but we check the status *first* to be safe. 2264 */ 2265 irqreturn_t xhci_irq(struct usb_hcd *hcd) 2266 { 2267 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 2268 u32 status; 2269 union xhci_trb *trb; 2270 u64 temp_64; 2271 union xhci_trb *event_ring_deq; 2272 dma_addr_t deq; 2273 2274 spin_lock(&xhci->lock); 2275 trb = xhci->event_ring->dequeue; 2276 /* Check if the xHC generated the interrupt, or the irq is shared */ 2277 status = xhci_readl(xhci, &xhci->op_regs->status); 2278 if (status == 0xffffffff) 2279 goto hw_died; 2280 2281 if (!(status & STS_EINT)) { 2282 spin_unlock(&xhci->lock); 2283 return IRQ_NONE; 2284 } 2285 if (status & STS_FATAL) { 2286 xhci_warn(xhci, "WARNING: Host System Error\n"); 2287 xhci_halt(xhci); 2288 hw_died: 2289 spin_unlock(&xhci->lock); 2290 return -ESHUTDOWN; 2291 } 2292 2293 /* 2294 * Clear the op reg interrupt status first, 2295 * so we can receive interrupts from other MSI-X interrupters. 2296 * Write 1 to clear the interrupt status. 2297 */ 2298 status |= STS_EINT; 2299 xhci_writel(xhci, status, &xhci->op_regs->status); 2300 /* FIXME when MSI-X is supported and there are multiple vectors */ 2301 /* Clear the MSI-X event interrupt status */ 2302 2303 if (hcd->irq != -1) { 2304 u32 irq_pending; 2305 /* Acknowledge the PCI interrupt */ 2306 irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending); 2307 irq_pending |= 0x3; 2308 xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending); 2309 } 2310 2311 if (xhci->xhc_state & XHCI_STATE_DYING) { 2312 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " 2313 "Shouldn't IRQs be disabled?\n"); 2314 /* Clear the event handler busy flag (RW1C); 2315 * the event ring should be empty. 2316 */ 2317 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 2318 xhci_write_64(xhci, temp_64 | ERST_EHB, 2319 &xhci->ir_set->erst_dequeue); 2320 spin_unlock(&xhci->lock); 2321 2322 return IRQ_HANDLED; 2323 } 2324 2325 event_ring_deq = xhci->event_ring->dequeue; 2326 /* FIXME this should be a delayed service routine 2327 * that clears the EHB. 2328 */ 2329 while (xhci_handle_event(xhci) > 0) {} 2330 2331 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 2332 /* If necessary, update the HW's version of the event ring deq ptr. */ 2333 if (event_ring_deq != xhci->event_ring->dequeue) { 2334 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, 2335 xhci->event_ring->dequeue); 2336 if (deq == 0) 2337 xhci_warn(xhci, "WARN something wrong with SW event " 2338 "ring dequeue ptr.\n"); 2339 /* Update HC event ring dequeue pointer */ 2340 temp_64 &= ERST_PTR_MASK; 2341 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK); 2342 } 2343 2344 /* Clear the event handler busy flag (RW1C); event ring is empty. */ 2345 temp_64 |= ERST_EHB; 2346 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); 2347 2348 spin_unlock(&xhci->lock); 2349 2350 return IRQ_HANDLED; 2351 } 2352 2353 irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd) 2354 { 2355 irqreturn_t ret; 2356 struct xhci_hcd *xhci; 2357 2358 xhci = hcd_to_xhci(hcd); 2359 set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags); 2360 if (xhci->shared_hcd) 2361 set_bit(HCD_FLAG_SAW_IRQ, &xhci->shared_hcd->flags); 2362 2363 ret = xhci_irq(hcd); 2364 2365 return ret; 2366 } 2367 2368 /**** Endpoint Ring Operations ****/ 2369 2370 /* 2371 * Generic function for queueing a TRB on a ring. 2372 * The caller must have checked to make sure there's room on the ring. 2373 * 2374 * @more_trbs_coming: Will you enqueue more TRBs before calling 2375 * prepare_transfer()? 2376 */ 2377 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 2378 bool consumer, bool more_trbs_coming, 2379 u32 field1, u32 field2, u32 field3, u32 field4) 2380 { 2381 struct xhci_generic_trb *trb; 2382 2383 trb = &ring->enqueue->generic; 2384 trb->field[0] = cpu_to_le32(field1); 2385 trb->field[1] = cpu_to_le32(field2); 2386 trb->field[2] = cpu_to_le32(field3); 2387 trb->field[3] = cpu_to_le32(field4); 2388 inc_enq(xhci, ring, consumer, more_trbs_coming); 2389 } 2390 2391 /* 2392 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs. 2393 * FIXME allocate segments if the ring is full. 2394 */ 2395 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 2396 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) 2397 { 2398 /* Make sure the endpoint has been added to xHC schedule */ 2399 switch (ep_state) { 2400 case EP_STATE_DISABLED: 2401 /* 2402 * USB core changed config/interfaces without notifying us, 2403 * or hardware is reporting the wrong state. 2404 */ 2405 xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); 2406 return -ENOENT; 2407 case EP_STATE_ERROR: 2408 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); 2409 /* FIXME event handling code for error needs to clear it */ 2410 /* XXX not sure if this should be -ENOENT or not */ 2411 return -EINVAL; 2412 case EP_STATE_HALTED: 2413 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); 2414 case EP_STATE_STOPPED: 2415 case EP_STATE_RUNNING: 2416 break; 2417 default: 2418 xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); 2419 /* 2420 * FIXME issue Configure Endpoint command to try to get the HC 2421 * back into a known state. 2422 */ 2423 return -EINVAL; 2424 } 2425 if (!room_on_ring(xhci, ep_ring, num_trbs)) { 2426 /* FIXME allocate more room */ 2427 xhci_err(xhci, "ERROR no room on ep ring\n"); 2428 return -ENOMEM; 2429 } 2430 2431 if (enqueue_is_link_trb(ep_ring)) { 2432 struct xhci_ring *ring = ep_ring; 2433 union xhci_trb *next; 2434 2435 next = ring->enqueue; 2436 2437 while (last_trb(xhci, ring, ring->enq_seg, next)) { 2438 /* If we're not dealing with 0.95 hardware, 2439 * clear the chain bit. 2440 */ 2441 if (!xhci_link_trb_quirk(xhci)) 2442 next->link.control &= cpu_to_le32(~TRB_CHAIN); 2443 else 2444 next->link.control |= cpu_to_le32(TRB_CHAIN); 2445 2446 wmb(); 2447 next->link.control ^= cpu_to_le32(TRB_CYCLE); 2448 2449 /* Toggle the cycle bit after the last ring segment. */ 2450 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 2451 ring->cycle_state = (ring->cycle_state ? 0 : 1); 2452 if (!in_interrupt()) { 2453 xhci_dbg(xhci, "queue_trb: Toggle cycle " 2454 "state for ring %p = %i\n", 2455 ring, (unsigned int)ring->cycle_state); 2456 } 2457 } 2458 ring->enq_seg = ring->enq_seg->next; 2459 ring->enqueue = ring->enq_seg->trbs; 2460 next = ring->enqueue; 2461 } 2462 } 2463 2464 return 0; 2465 } 2466 2467 static int prepare_transfer(struct xhci_hcd *xhci, 2468 struct xhci_virt_device *xdev, 2469 unsigned int ep_index, 2470 unsigned int stream_id, 2471 unsigned int num_trbs, 2472 struct urb *urb, 2473 unsigned int td_index, 2474 gfp_t mem_flags) 2475 { 2476 int ret; 2477 struct urb_priv *urb_priv; 2478 struct xhci_td *td; 2479 struct xhci_ring *ep_ring; 2480 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 2481 2482 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id); 2483 if (!ep_ring) { 2484 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", 2485 stream_id); 2486 return -EINVAL; 2487 } 2488 2489 ret = prepare_ring(xhci, ep_ring, 2490 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, 2491 num_trbs, mem_flags); 2492 if (ret) 2493 return ret; 2494 2495 urb_priv = urb->hcpriv; 2496 td = urb_priv->td[td_index]; 2497 2498 INIT_LIST_HEAD(&td->td_list); 2499 INIT_LIST_HEAD(&td->cancelled_td_list); 2500 2501 if (td_index == 0) { 2502 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); 2503 if (unlikely(ret)) { 2504 xhci_urb_free_priv(xhci, urb_priv); 2505 urb->hcpriv = NULL; 2506 return ret; 2507 } 2508 } 2509 2510 td->urb = urb; 2511 /* Add this TD to the tail of the endpoint ring's TD list */ 2512 list_add_tail(&td->td_list, &ep_ring->td_list); 2513 td->start_seg = ep_ring->enq_seg; 2514 td->first_trb = ep_ring->enqueue; 2515 2516 urb_priv->td[td_index] = td; 2517 2518 return 0; 2519 } 2520 2521 static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) 2522 { 2523 int num_sgs, num_trbs, running_total, temp, i; 2524 struct scatterlist *sg; 2525 2526 sg = NULL; 2527 num_sgs = urb->num_sgs; 2528 temp = urb->transfer_buffer_length; 2529 2530 xhci_dbg(xhci, "count sg list trbs: \n"); 2531 num_trbs = 0; 2532 for_each_sg(urb->sg, sg, num_sgs, i) { 2533 unsigned int previous_total_trbs = num_trbs; 2534 unsigned int len = sg_dma_len(sg); 2535 2536 /* Scatter gather list entries may cross 64KB boundaries */ 2537 running_total = TRB_MAX_BUFF_SIZE - 2538 (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1)); 2539 running_total &= TRB_MAX_BUFF_SIZE - 1; 2540 if (running_total != 0) 2541 num_trbs++; 2542 2543 /* How many more 64KB chunks to transfer, how many more TRBs? */ 2544 while (running_total < sg_dma_len(sg) && running_total < temp) { 2545 num_trbs++; 2546 running_total += TRB_MAX_BUFF_SIZE; 2547 } 2548 xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n", 2549 i, (unsigned long long)sg_dma_address(sg), 2550 len, len, num_trbs - previous_total_trbs); 2551 2552 len = min_t(int, len, temp); 2553 temp -= len; 2554 if (temp == 0) 2555 break; 2556 } 2557 xhci_dbg(xhci, "\n"); 2558 if (!in_interrupt()) 2559 xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, " 2560 "num_trbs = %d\n", 2561 urb->ep->desc.bEndpointAddress, 2562 urb->transfer_buffer_length, 2563 num_trbs); 2564 return num_trbs; 2565 } 2566 2567 static void check_trb_math(struct urb *urb, int num_trbs, int running_total) 2568 { 2569 if (num_trbs != 0) 2570 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " 2571 "TRBs, %d left\n", __func__, 2572 urb->ep->desc.bEndpointAddress, num_trbs); 2573 if (running_total != urb->transfer_buffer_length) 2574 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " 2575 "queued %#x (%d), asked for %#x (%d)\n", 2576 __func__, 2577 urb->ep->desc.bEndpointAddress, 2578 running_total, running_total, 2579 urb->transfer_buffer_length, 2580 urb->transfer_buffer_length); 2581 } 2582 2583 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, 2584 unsigned int ep_index, unsigned int stream_id, int start_cycle, 2585 struct xhci_generic_trb *start_trb) 2586 { 2587 /* 2588 * Pass all the TRBs to the hardware at once and make sure this write 2589 * isn't reordered. 2590 */ 2591 wmb(); 2592 if (start_cycle) 2593 start_trb->field[3] |= cpu_to_le32(start_cycle); 2594 else 2595 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); 2596 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); 2597 } 2598 2599 /* 2600 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt 2601 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD 2602 * (comprised of sg list entries) can take several service intervals to 2603 * transmit. 2604 */ 2605 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 2606 struct urb *urb, int slot_id, unsigned int ep_index) 2607 { 2608 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, 2609 xhci->devs[slot_id]->out_ctx, ep_index); 2610 int xhci_interval; 2611 int ep_interval; 2612 2613 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); 2614 ep_interval = urb->interval; 2615 /* Convert to microframes */ 2616 if (urb->dev->speed == USB_SPEED_LOW || 2617 urb->dev->speed == USB_SPEED_FULL) 2618 ep_interval *= 8; 2619 /* FIXME change this to a warning and a suggestion to use the new API 2620 * to set the polling interval (once the API is added). 2621 */ 2622 if (xhci_interval != ep_interval) { 2623 if (printk_ratelimit()) 2624 dev_dbg(&urb->dev->dev, "Driver uses different interval" 2625 " (%d microframe%s) than xHCI " 2626 "(%d microframe%s)\n", 2627 ep_interval, 2628 ep_interval == 1 ? "" : "s", 2629 xhci_interval, 2630 xhci_interval == 1 ? "" : "s"); 2631 urb->interval = xhci_interval; 2632 /* Convert back to frames for LS/FS devices */ 2633 if (urb->dev->speed == USB_SPEED_LOW || 2634 urb->dev->speed == USB_SPEED_FULL) 2635 urb->interval /= 8; 2636 } 2637 return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); 2638 } 2639 2640 /* 2641 * The TD size is the number of bytes remaining in the TD (including this TRB), 2642 * right shifted by 10. 2643 * It must fit in bits 21:17, so it can't be bigger than 31. 2644 */ 2645 static u32 xhci_td_remainder(unsigned int remainder) 2646 { 2647 u32 max = (1 << (21 - 17 + 1)) - 1; 2648 2649 if ((remainder >> 10) >= max) 2650 return max << 17; 2651 else 2652 return (remainder >> 10) << 17; 2653 } 2654 2655 /* 2656 * For xHCI 1.0 host controllers, TD size is the number of packets remaining in 2657 * the TD (*not* including this TRB). 2658 * 2659 * Total TD packet count = total_packet_count = 2660 * roundup(TD size in bytes / wMaxPacketSize) 2661 * 2662 * Packets transferred up to and including this TRB = packets_transferred = 2663 * rounddown(total bytes transferred including this TRB / wMaxPacketSize) 2664 * 2665 * TD size = total_packet_count - packets_transferred 2666 * 2667 * It must fit in bits 21:17, so it can't be bigger than 31. 2668 */ 2669 2670 static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len, 2671 unsigned int total_packet_count, struct urb *urb) 2672 { 2673 int packets_transferred; 2674 2675 /* All the TRB queueing functions don't count the current TRB in 2676 * running_total. 2677 */ 2678 packets_transferred = (running_total + trb_buff_len) / 2679 le16_to_cpu(urb->ep->desc.wMaxPacketSize); 2680 2681 return xhci_td_remainder(total_packet_count - packets_transferred); 2682 } 2683 2684 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 2685 struct urb *urb, int slot_id, unsigned int ep_index) 2686 { 2687 struct xhci_ring *ep_ring; 2688 unsigned int num_trbs; 2689 struct urb_priv *urb_priv; 2690 struct xhci_td *td; 2691 struct scatterlist *sg; 2692 int num_sgs; 2693 int trb_buff_len, this_sg_len, running_total; 2694 unsigned int total_packet_count; 2695 bool first_trb; 2696 u64 addr; 2697 bool more_trbs_coming; 2698 2699 struct xhci_generic_trb *start_trb; 2700 int start_cycle; 2701 2702 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 2703 if (!ep_ring) 2704 return -EINVAL; 2705 2706 num_trbs = count_sg_trbs_needed(xhci, urb); 2707 num_sgs = urb->num_sgs; 2708 total_packet_count = roundup(urb->transfer_buffer_length, 2709 le16_to_cpu(urb->ep->desc.wMaxPacketSize)); 2710 2711 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], 2712 ep_index, urb->stream_id, 2713 num_trbs, urb, 0, mem_flags); 2714 if (trb_buff_len < 0) 2715 return trb_buff_len; 2716 2717 urb_priv = urb->hcpriv; 2718 td = urb_priv->td[0]; 2719 2720 /* 2721 * Don't give the first TRB to the hardware (by toggling the cycle bit) 2722 * until we've finished creating all the other TRBs. The ring's cycle 2723 * state may change as we enqueue the other TRBs, so save it too. 2724 */ 2725 start_trb = &ep_ring->enqueue->generic; 2726 start_cycle = ep_ring->cycle_state; 2727 2728 running_total = 0; 2729 /* 2730 * How much data is in the first TRB? 2731 * 2732 * There are three forces at work for TRB buffer pointers and lengths: 2733 * 1. We don't want to walk off the end of this sg-list entry buffer. 2734 * 2. The transfer length that the driver requested may be smaller than 2735 * the amount of memory allocated for this scatter-gather list. 2736 * 3. TRBs buffers can't cross 64KB boundaries. 2737 */ 2738 sg = urb->sg; 2739 addr = (u64) sg_dma_address(sg); 2740 this_sg_len = sg_dma_len(sg); 2741 trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); 2742 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 2743 if (trb_buff_len > urb->transfer_buffer_length) 2744 trb_buff_len = urb->transfer_buffer_length; 2745 xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n", 2746 trb_buff_len); 2747 2748 first_trb = true; 2749 /* Queue the first TRB, even if it's zero-length */ 2750 do { 2751 u32 field = 0; 2752 u32 length_field = 0; 2753 u32 remainder = 0; 2754 2755 /* Don't change the cycle bit of the first TRB until later */ 2756 if (first_trb) { 2757 first_trb = false; 2758 if (start_cycle == 0) 2759 field |= 0x1; 2760 } else 2761 field |= ep_ring->cycle_state; 2762 2763 /* Chain all the TRBs together; clear the chain bit in the last 2764 * TRB to indicate it's the last TRB in the chain. 2765 */ 2766 if (num_trbs > 1) { 2767 field |= TRB_CHAIN; 2768 } else { 2769 /* FIXME - add check for ZERO_PACKET flag before this */ 2770 td->last_trb = ep_ring->enqueue; 2771 field |= TRB_IOC; 2772 } 2773 2774 /* Only set interrupt on short packet for IN endpoints */ 2775 if (usb_urb_dir_in(urb)) 2776 field |= TRB_ISP; 2777 2778 xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), " 2779 "64KB boundary at %#x, end dma = %#x\n", 2780 (unsigned int) addr, trb_buff_len, trb_buff_len, 2781 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 2782 (unsigned int) addr + trb_buff_len); 2783 if (TRB_MAX_BUFF_SIZE - 2784 (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) { 2785 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); 2786 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", 2787 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 2788 (unsigned int) addr + trb_buff_len); 2789 } 2790 2791 /* Set the TRB length, TD size, and interrupter fields. */ 2792 if (xhci->hci_version < 0x100) { 2793 remainder = xhci_td_remainder( 2794 urb->transfer_buffer_length - 2795 running_total); 2796 } else { 2797 remainder = xhci_v1_0_td_remainder(running_total, 2798 trb_buff_len, total_packet_count, urb); 2799 } 2800 length_field = TRB_LEN(trb_buff_len) | 2801 remainder | 2802 TRB_INTR_TARGET(0); 2803 2804 if (num_trbs > 1) 2805 more_trbs_coming = true; 2806 else 2807 more_trbs_coming = false; 2808 queue_trb(xhci, ep_ring, false, more_trbs_coming, 2809 lower_32_bits(addr), 2810 upper_32_bits(addr), 2811 length_field, 2812 field | TRB_TYPE(TRB_NORMAL)); 2813 --num_trbs; 2814 running_total += trb_buff_len; 2815 2816 /* Calculate length for next transfer -- 2817 * Are we done queueing all the TRBs for this sg entry? 2818 */ 2819 this_sg_len -= trb_buff_len; 2820 if (this_sg_len == 0) { 2821 --num_sgs; 2822 if (num_sgs == 0) 2823 break; 2824 sg = sg_next(sg); 2825 addr = (u64) sg_dma_address(sg); 2826 this_sg_len = sg_dma_len(sg); 2827 } else { 2828 addr += trb_buff_len; 2829 } 2830 2831 trb_buff_len = TRB_MAX_BUFF_SIZE - 2832 (addr & (TRB_MAX_BUFF_SIZE - 1)); 2833 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 2834 if (running_total + trb_buff_len > urb->transfer_buffer_length) 2835 trb_buff_len = 2836 urb->transfer_buffer_length - running_total; 2837 } while (running_total < urb->transfer_buffer_length); 2838 2839 check_trb_math(urb, num_trbs, running_total); 2840 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 2841 start_cycle, start_trb); 2842 return 0; 2843 } 2844 2845 /* This is very similar to what ehci-q.c qtd_fill() does */ 2846 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 2847 struct urb *urb, int slot_id, unsigned int ep_index) 2848 { 2849 struct xhci_ring *ep_ring; 2850 struct urb_priv *urb_priv; 2851 struct xhci_td *td; 2852 int num_trbs; 2853 struct xhci_generic_trb *start_trb; 2854 bool first_trb; 2855 bool more_trbs_coming; 2856 int start_cycle; 2857 u32 field, length_field; 2858 2859 int running_total, trb_buff_len, ret; 2860 unsigned int total_packet_count; 2861 u64 addr; 2862 2863 if (urb->num_sgs) 2864 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); 2865 2866 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 2867 if (!ep_ring) 2868 return -EINVAL; 2869 2870 num_trbs = 0; 2871 /* How much data is (potentially) left before the 64KB boundary? */ 2872 running_total = TRB_MAX_BUFF_SIZE - 2873 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); 2874 running_total &= TRB_MAX_BUFF_SIZE - 1; 2875 2876 /* If there's some data on this 64KB chunk, or we have to send a 2877 * zero-length transfer, we need at least one TRB 2878 */ 2879 if (running_total != 0 || urb->transfer_buffer_length == 0) 2880 num_trbs++; 2881 /* How many more 64KB chunks to transfer, how many more TRBs? */ 2882 while (running_total < urb->transfer_buffer_length) { 2883 num_trbs++; 2884 running_total += TRB_MAX_BUFF_SIZE; 2885 } 2886 /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */ 2887 2888 if (!in_interrupt()) 2889 xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), " 2890 "addr = %#llx, num_trbs = %d\n", 2891 urb->ep->desc.bEndpointAddress, 2892 urb->transfer_buffer_length, 2893 urb->transfer_buffer_length, 2894 (unsigned long long)urb->transfer_dma, 2895 num_trbs); 2896 2897 ret = prepare_transfer(xhci, xhci->devs[slot_id], 2898 ep_index, urb->stream_id, 2899 num_trbs, urb, 0, mem_flags); 2900 if (ret < 0) 2901 return ret; 2902 2903 urb_priv = urb->hcpriv; 2904 td = urb_priv->td[0]; 2905 2906 /* 2907 * Don't give the first TRB to the hardware (by toggling the cycle bit) 2908 * until we've finished creating all the other TRBs. The ring's cycle 2909 * state may change as we enqueue the other TRBs, so save it too. 2910 */ 2911 start_trb = &ep_ring->enqueue->generic; 2912 start_cycle = ep_ring->cycle_state; 2913 2914 running_total = 0; 2915 total_packet_count = roundup(urb->transfer_buffer_length, 2916 le16_to_cpu(urb->ep->desc.wMaxPacketSize)); 2917 /* How much data is in the first TRB? */ 2918 addr = (u64) urb->transfer_dma; 2919 trb_buff_len = TRB_MAX_BUFF_SIZE - 2920 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); 2921 if (trb_buff_len > urb->transfer_buffer_length) 2922 trb_buff_len = urb->transfer_buffer_length; 2923 2924 first_trb = true; 2925 2926 /* Queue the first TRB, even if it's zero-length */ 2927 do { 2928 u32 remainder = 0; 2929 field = 0; 2930 2931 /* Don't change the cycle bit of the first TRB until later */ 2932 if (first_trb) { 2933 first_trb = false; 2934 if (start_cycle == 0) 2935 field |= 0x1; 2936 } else 2937 field |= ep_ring->cycle_state; 2938 2939 /* Chain all the TRBs together; clear the chain bit in the last 2940 * TRB to indicate it's the last TRB in the chain. 2941 */ 2942 if (num_trbs > 1) { 2943 field |= TRB_CHAIN; 2944 } else { 2945 /* FIXME - add check for ZERO_PACKET flag before this */ 2946 td->last_trb = ep_ring->enqueue; 2947 field |= TRB_IOC; 2948 } 2949 2950 /* Only set interrupt on short packet for IN endpoints */ 2951 if (usb_urb_dir_in(urb)) 2952 field |= TRB_ISP; 2953 2954 /* Set the TRB length, TD size, and interrupter fields. */ 2955 if (xhci->hci_version < 0x100) { 2956 remainder = xhci_td_remainder( 2957 urb->transfer_buffer_length - 2958 running_total); 2959 } else { 2960 remainder = xhci_v1_0_td_remainder(running_total, 2961 trb_buff_len, total_packet_count, urb); 2962 } 2963 length_field = TRB_LEN(trb_buff_len) | 2964 remainder | 2965 TRB_INTR_TARGET(0); 2966 2967 if (num_trbs > 1) 2968 more_trbs_coming = true; 2969 else 2970 more_trbs_coming = false; 2971 queue_trb(xhci, ep_ring, false, more_trbs_coming, 2972 lower_32_bits(addr), 2973 upper_32_bits(addr), 2974 length_field, 2975 field | TRB_TYPE(TRB_NORMAL)); 2976 --num_trbs; 2977 running_total += trb_buff_len; 2978 2979 /* Calculate length for next transfer */ 2980 addr += trb_buff_len; 2981 trb_buff_len = urb->transfer_buffer_length - running_total; 2982 if (trb_buff_len > TRB_MAX_BUFF_SIZE) 2983 trb_buff_len = TRB_MAX_BUFF_SIZE; 2984 } while (running_total < urb->transfer_buffer_length); 2985 2986 check_trb_math(urb, num_trbs, running_total); 2987 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 2988 start_cycle, start_trb); 2989 return 0; 2990 } 2991 2992 /* Caller must have locked xhci->lock */ 2993 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 2994 struct urb *urb, int slot_id, unsigned int ep_index) 2995 { 2996 struct xhci_ring *ep_ring; 2997 int num_trbs; 2998 int ret; 2999 struct usb_ctrlrequest *setup; 3000 struct xhci_generic_trb *start_trb; 3001 int start_cycle; 3002 u32 field, length_field; 3003 struct urb_priv *urb_priv; 3004 struct xhci_td *td; 3005 3006 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 3007 if (!ep_ring) 3008 return -EINVAL; 3009 3010 /* 3011 * Need to copy setup packet into setup TRB, so we can't use the setup 3012 * DMA address. 3013 */ 3014 if (!urb->setup_packet) 3015 return -EINVAL; 3016 3017 if (!in_interrupt()) 3018 xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n", 3019 slot_id, ep_index); 3020 /* 1 TRB for setup, 1 for status */ 3021 num_trbs = 2; 3022 /* 3023 * Don't need to check if we need additional event data and normal TRBs, 3024 * since data in control transfers will never get bigger than 16MB 3025 * XXX: can we get a buffer that crosses 64KB boundaries? 3026 */ 3027 if (urb->transfer_buffer_length > 0) 3028 num_trbs++; 3029 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3030 ep_index, urb->stream_id, 3031 num_trbs, urb, 0, mem_flags); 3032 if (ret < 0) 3033 return ret; 3034 3035 urb_priv = urb->hcpriv; 3036 td = urb_priv->td[0]; 3037 3038 /* 3039 * Don't give the first TRB to the hardware (by toggling the cycle bit) 3040 * until we've finished creating all the other TRBs. The ring's cycle 3041 * state may change as we enqueue the other TRBs, so save it too. 3042 */ 3043 start_trb = &ep_ring->enqueue->generic; 3044 start_cycle = ep_ring->cycle_state; 3045 3046 /* Queue setup TRB - see section 6.4.1.2.1 */ 3047 /* FIXME better way to translate setup_packet into two u32 fields? */ 3048 setup = (struct usb_ctrlrequest *) urb->setup_packet; 3049 field = 0; 3050 field |= TRB_IDT | TRB_TYPE(TRB_SETUP); 3051 if (start_cycle == 0) 3052 field |= 0x1; 3053 3054 /* xHCI 1.0 6.4.1.2.1: Transfer Type field */ 3055 if (xhci->hci_version == 0x100) { 3056 if (urb->transfer_buffer_length > 0) { 3057 if (setup->bRequestType & USB_DIR_IN) 3058 field |= TRB_TX_TYPE(TRB_DATA_IN); 3059 else 3060 field |= TRB_TX_TYPE(TRB_DATA_OUT); 3061 } 3062 } 3063 3064 queue_trb(xhci, ep_ring, false, true, 3065 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, 3066 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, 3067 TRB_LEN(8) | TRB_INTR_TARGET(0), 3068 /* Immediate data in pointer */ 3069 field); 3070 3071 /* If there's data, queue data TRBs */ 3072 /* Only set interrupt on short packet for IN endpoints */ 3073 if (usb_urb_dir_in(urb)) 3074 field = TRB_ISP | TRB_TYPE(TRB_DATA); 3075 else 3076 field = TRB_TYPE(TRB_DATA); 3077 3078 length_field = TRB_LEN(urb->transfer_buffer_length) | 3079 xhci_td_remainder(urb->transfer_buffer_length) | 3080 TRB_INTR_TARGET(0); 3081 if (urb->transfer_buffer_length > 0) { 3082 if (setup->bRequestType & USB_DIR_IN) 3083 field |= TRB_DIR_IN; 3084 queue_trb(xhci, ep_ring, false, true, 3085 lower_32_bits(urb->transfer_dma), 3086 upper_32_bits(urb->transfer_dma), 3087 length_field, 3088 field | ep_ring->cycle_state); 3089 } 3090 3091 /* Save the DMA address of the last TRB in the TD */ 3092 td->last_trb = ep_ring->enqueue; 3093 3094 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ 3095 /* If the device sent data, the status stage is an OUT transfer */ 3096 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) 3097 field = 0; 3098 else 3099 field = TRB_DIR_IN; 3100 queue_trb(xhci, ep_ring, false, false, 3101 0, 3102 0, 3103 TRB_INTR_TARGET(0), 3104 /* Event on completion */ 3105 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); 3106 3107 giveback_first_trb(xhci, slot_id, ep_index, 0, 3108 start_cycle, start_trb); 3109 return 0; 3110 } 3111 3112 static int count_isoc_trbs_needed(struct xhci_hcd *xhci, 3113 struct urb *urb, int i) 3114 { 3115 int num_trbs = 0; 3116 u64 addr, td_len, running_total; 3117 3118 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); 3119 td_len = urb->iso_frame_desc[i].length; 3120 3121 running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); 3122 running_total &= TRB_MAX_BUFF_SIZE - 1; 3123 if (running_total != 0) 3124 num_trbs++; 3125 3126 while (running_total < td_len) { 3127 num_trbs++; 3128 running_total += TRB_MAX_BUFF_SIZE; 3129 } 3130 3131 return num_trbs; 3132 } 3133 3134 /* 3135 * The transfer burst count field of the isochronous TRB defines the number of 3136 * bursts that are required to move all packets in this TD. Only SuperSpeed 3137 * devices can burst up to bMaxBurst number of packets per service interval. 3138 * This field is zero based, meaning a value of zero in the field means one 3139 * burst. Basically, for everything but SuperSpeed devices, this field will be 3140 * zero. Only xHCI 1.0 host controllers support this field. 3141 */ 3142 static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, 3143 struct usb_device *udev, 3144 struct urb *urb, unsigned int total_packet_count) 3145 { 3146 unsigned int max_burst; 3147 3148 if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER) 3149 return 0; 3150 3151 max_burst = urb->ep->ss_ep_comp.bMaxBurst; 3152 return roundup(total_packet_count, max_burst + 1) - 1; 3153 } 3154 3155 /* 3156 * Returns the number of packets in the last "burst" of packets. This field is 3157 * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so 3158 * the last burst packet count is equal to the total number of packets in the 3159 * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst 3160 * must contain (bMaxBurst + 1) number of packets, but the last burst can 3161 * contain 1 to (bMaxBurst + 1) packets. 3162 */ 3163 static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci, 3164 struct usb_device *udev, 3165 struct urb *urb, unsigned int total_packet_count) 3166 { 3167 unsigned int max_burst; 3168 unsigned int residue; 3169 3170 if (xhci->hci_version < 0x100) 3171 return 0; 3172 3173 switch (udev->speed) { 3174 case USB_SPEED_SUPER: 3175 /* bMaxBurst is zero based: 0 means 1 packet per burst */ 3176 max_burst = urb->ep->ss_ep_comp.bMaxBurst; 3177 residue = total_packet_count % (max_burst + 1); 3178 /* If residue is zero, the last burst contains (max_burst + 1) 3179 * number of packets, but the TLBPC field is zero-based. 3180 */ 3181 if (residue == 0) 3182 return max_burst; 3183 return residue - 1; 3184 default: 3185 if (total_packet_count == 0) 3186 return 0; 3187 return total_packet_count - 1; 3188 } 3189 } 3190 3191 /* This is for isoc transfer */ 3192 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3193 struct urb *urb, int slot_id, unsigned int ep_index) 3194 { 3195 struct xhci_ring *ep_ring; 3196 struct urb_priv *urb_priv; 3197 struct xhci_td *td; 3198 int num_tds, trbs_per_td; 3199 struct xhci_generic_trb *start_trb; 3200 bool first_trb; 3201 int start_cycle; 3202 u32 field, length_field; 3203 int running_total, trb_buff_len, td_len, td_remain_len, ret; 3204 u64 start_addr, addr; 3205 int i, j; 3206 bool more_trbs_coming; 3207 3208 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 3209 3210 num_tds = urb->number_of_packets; 3211 if (num_tds < 1) { 3212 xhci_dbg(xhci, "Isoc URB with zero packets?\n"); 3213 return -EINVAL; 3214 } 3215 3216 if (!in_interrupt()) 3217 xhci_dbg(xhci, "ep %#x - urb len = %#x (%d)," 3218 " addr = %#llx, num_tds = %d\n", 3219 urb->ep->desc.bEndpointAddress, 3220 urb->transfer_buffer_length, 3221 urb->transfer_buffer_length, 3222 (unsigned long long)urb->transfer_dma, 3223 num_tds); 3224 3225 start_addr = (u64) urb->transfer_dma; 3226 start_trb = &ep_ring->enqueue->generic; 3227 start_cycle = ep_ring->cycle_state; 3228 3229 /* Queue the first TRB, even if it's zero-length */ 3230 for (i = 0; i < num_tds; i++) { 3231 unsigned int total_packet_count; 3232 unsigned int burst_count; 3233 unsigned int residue; 3234 3235 first_trb = true; 3236 running_total = 0; 3237 addr = start_addr + urb->iso_frame_desc[i].offset; 3238 td_len = urb->iso_frame_desc[i].length; 3239 td_remain_len = td_len; 3240 /* FIXME: Ignoring zero-length packets, can those happen? */ 3241 total_packet_count = roundup(td_len, 3242 le16_to_cpu(urb->ep->desc.wMaxPacketSize)); 3243 burst_count = xhci_get_burst_count(xhci, urb->dev, urb, 3244 total_packet_count); 3245 residue = xhci_get_last_burst_packet_count(xhci, 3246 urb->dev, urb, total_packet_count); 3247 3248 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i); 3249 3250 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 3251 urb->stream_id, trbs_per_td, urb, i, mem_flags); 3252 if (ret < 0) 3253 return ret; 3254 3255 urb_priv = urb->hcpriv; 3256 td = urb_priv->td[i]; 3257 3258 for (j = 0; j < trbs_per_td; j++) { 3259 u32 remainder = 0; 3260 field = TRB_TBC(burst_count) | TRB_TLBPC(residue); 3261 3262 if (first_trb) { 3263 /* Queue the isoc TRB */ 3264 field |= TRB_TYPE(TRB_ISOC); 3265 /* Assume URB_ISO_ASAP is set */ 3266 field |= TRB_SIA; 3267 if (i == 0) { 3268 if (start_cycle == 0) 3269 field |= 0x1; 3270 } else 3271 field |= ep_ring->cycle_state; 3272 first_trb = false; 3273 } else { 3274 /* Queue other normal TRBs */ 3275 field |= TRB_TYPE(TRB_NORMAL); 3276 field |= ep_ring->cycle_state; 3277 } 3278 3279 /* Only set interrupt on short packet for IN EPs */ 3280 if (usb_urb_dir_in(urb)) 3281 field |= TRB_ISP; 3282 3283 /* Chain all the TRBs together; clear the chain bit in 3284 * the last TRB to indicate it's the last TRB in the 3285 * chain. 3286 */ 3287 if (j < trbs_per_td - 1) { 3288 field |= TRB_CHAIN; 3289 more_trbs_coming = true; 3290 } else { 3291 td->last_trb = ep_ring->enqueue; 3292 field |= TRB_IOC; 3293 if (xhci->hci_version == 0x100) { 3294 /* Set BEI bit except for the last td */ 3295 if (i < num_tds - 1) 3296 field |= TRB_BEI; 3297 } 3298 more_trbs_coming = false; 3299 } 3300 3301 /* Calculate TRB length */ 3302 trb_buff_len = TRB_MAX_BUFF_SIZE - 3303 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 3304 if (trb_buff_len > td_remain_len) 3305 trb_buff_len = td_remain_len; 3306 3307 /* Set the TRB length, TD size, & interrupter fields. */ 3308 if (xhci->hci_version < 0x100) { 3309 remainder = xhci_td_remainder( 3310 td_len - running_total); 3311 } else { 3312 remainder = xhci_v1_0_td_remainder( 3313 running_total, trb_buff_len, 3314 total_packet_count, urb); 3315 } 3316 length_field = TRB_LEN(trb_buff_len) | 3317 remainder | 3318 TRB_INTR_TARGET(0); 3319 3320 queue_trb(xhci, ep_ring, false, more_trbs_coming, 3321 lower_32_bits(addr), 3322 upper_32_bits(addr), 3323 length_field, 3324 field); 3325 running_total += trb_buff_len; 3326 3327 addr += trb_buff_len; 3328 td_remain_len -= trb_buff_len; 3329 } 3330 3331 /* Check TD length */ 3332 if (running_total != td_len) { 3333 xhci_err(xhci, "ISOC TD length unmatch\n"); 3334 return -EINVAL; 3335 } 3336 } 3337 3338 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { 3339 if (xhci->quirks & XHCI_AMD_PLL_FIX) 3340 usb_amd_quirk_pll_disable(); 3341 } 3342 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; 3343 3344 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3345 start_cycle, start_trb); 3346 return 0; 3347 } 3348 3349 /* 3350 * Check transfer ring to guarantee there is enough room for the urb. 3351 * Update ISO URB start_frame and interval. 3352 * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to 3353 * update the urb->start_frame by now. 3354 * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input. 3355 */ 3356 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, 3357 struct urb *urb, int slot_id, unsigned int ep_index) 3358 { 3359 struct xhci_virt_device *xdev; 3360 struct xhci_ring *ep_ring; 3361 struct xhci_ep_ctx *ep_ctx; 3362 int start_frame; 3363 int xhci_interval; 3364 int ep_interval; 3365 int num_tds, num_trbs, i; 3366 int ret; 3367 3368 xdev = xhci->devs[slot_id]; 3369 ep_ring = xdev->eps[ep_index].ring; 3370 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 3371 3372 num_trbs = 0; 3373 num_tds = urb->number_of_packets; 3374 for (i = 0; i < num_tds; i++) 3375 num_trbs += count_isoc_trbs_needed(xhci, urb, i); 3376 3377 /* Check the ring to guarantee there is enough room for the whole urb. 3378 * Do not insert any td of the urb to the ring if the check failed. 3379 */ 3380 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, 3381 num_trbs, mem_flags); 3382 if (ret) 3383 return ret; 3384 3385 start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index); 3386 start_frame &= 0x3fff; 3387 3388 urb->start_frame = start_frame; 3389 if (urb->dev->speed == USB_SPEED_LOW || 3390 urb->dev->speed == USB_SPEED_FULL) 3391 urb->start_frame >>= 3; 3392 3393 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); 3394 ep_interval = urb->interval; 3395 /* Convert to microframes */ 3396 if (urb->dev->speed == USB_SPEED_LOW || 3397 urb->dev->speed == USB_SPEED_FULL) 3398 ep_interval *= 8; 3399 /* FIXME change this to a warning and a suggestion to use the new API 3400 * to set the polling interval (once the API is added). 3401 */ 3402 if (xhci_interval != ep_interval) { 3403 if (printk_ratelimit()) 3404 dev_dbg(&urb->dev->dev, "Driver uses different interval" 3405 " (%d microframe%s) than xHCI " 3406 "(%d microframe%s)\n", 3407 ep_interval, 3408 ep_interval == 1 ? "" : "s", 3409 xhci_interval, 3410 xhci_interval == 1 ? "" : "s"); 3411 urb->interval = xhci_interval; 3412 /* Convert back to frames for LS/FS devices */ 3413 if (urb->dev->speed == USB_SPEED_LOW || 3414 urb->dev->speed == USB_SPEED_FULL) 3415 urb->interval /= 8; 3416 } 3417 return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); 3418 } 3419 3420 /**** Command Ring Operations ****/ 3421 3422 /* Generic function for queueing a command TRB on the command ring. 3423 * Check to make sure there's room on the command ring for one command TRB. 3424 * Also check that there's room reserved for commands that must not fail. 3425 * If this is a command that must not fail, meaning command_must_succeed = TRUE, 3426 * then only check for the number of reserved spots. 3427 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB 3428 * because the command event handler may want to resubmit a failed command. 3429 */ 3430 static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, 3431 u32 field3, u32 field4, bool command_must_succeed) 3432 { 3433 int reserved_trbs = xhci->cmd_ring_reserved_trbs; 3434 int ret; 3435 3436 if (!command_must_succeed) 3437 reserved_trbs++; 3438 3439 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, 3440 reserved_trbs, GFP_ATOMIC); 3441 if (ret < 0) { 3442 xhci_err(xhci, "ERR: No room for command on command ring\n"); 3443 if (command_must_succeed) 3444 xhci_err(xhci, "ERR: Reserved TRB counting for " 3445 "unfailable commands failed.\n"); 3446 return ret; 3447 } 3448 queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3, 3449 field4 | xhci->cmd_ring->cycle_state); 3450 return 0; 3451 } 3452 3453 /* Queue a slot enable or disable request on the command ring */ 3454 int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) 3455 { 3456 return queue_command(xhci, 0, 0, 0, 3457 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false); 3458 } 3459 3460 /* Queue an address device command TRB */ 3461 int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 3462 u32 slot_id) 3463 { 3464 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 3465 upper_32_bits(in_ctx_ptr), 0, 3466 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id), 3467 false); 3468 } 3469 3470 int xhci_queue_vendor_command(struct xhci_hcd *xhci, 3471 u32 field1, u32 field2, u32 field3, u32 field4) 3472 { 3473 return queue_command(xhci, field1, field2, field3, field4, false); 3474 } 3475 3476 /* Queue a reset device command TRB */ 3477 int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id) 3478 { 3479 return queue_command(xhci, 0, 0, 0, 3480 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id), 3481 false); 3482 } 3483 3484 /* Queue a configure endpoint command TRB */ 3485 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 3486 u32 slot_id, bool command_must_succeed) 3487 { 3488 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 3489 upper_32_bits(in_ctx_ptr), 0, 3490 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id), 3491 command_must_succeed); 3492 } 3493 3494 /* Queue an evaluate context command TRB */ 3495 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 3496 u32 slot_id) 3497 { 3498 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 3499 upper_32_bits(in_ctx_ptr), 0, 3500 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id), 3501 false); 3502 } 3503 3504 /* 3505 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop 3506 * activity on an endpoint that is about to be suspended. 3507 */ 3508 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, 3509 unsigned int ep_index, int suspend) 3510 { 3511 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3512 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3513 u32 type = TRB_TYPE(TRB_STOP_RING); 3514 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend); 3515 3516 return queue_command(xhci, 0, 0, 0, 3517 trb_slot_id | trb_ep_index | type | trb_suspend, false); 3518 } 3519 3520 /* Set Transfer Ring Dequeue Pointer command. 3521 * This should not be used for endpoints that have streams enabled. 3522 */ 3523 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 3524 unsigned int ep_index, unsigned int stream_id, 3525 struct xhci_segment *deq_seg, 3526 union xhci_trb *deq_ptr, u32 cycle_state) 3527 { 3528 dma_addr_t addr; 3529 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3530 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3531 u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id); 3532 u32 type = TRB_TYPE(TRB_SET_DEQ); 3533 struct xhci_virt_ep *ep; 3534 3535 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); 3536 if (addr == 0) { 3537 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 3538 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", 3539 deq_seg, deq_ptr); 3540 return 0; 3541 } 3542 ep = &xhci->devs[slot_id]->eps[ep_index]; 3543 if ((ep->ep_state & SET_DEQ_PENDING)) { 3544 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 3545 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n"); 3546 return 0; 3547 } 3548 ep->queued_deq_seg = deq_seg; 3549 ep->queued_deq_ptr = deq_ptr; 3550 return queue_command(xhci, lower_32_bits(addr) | cycle_state, 3551 upper_32_bits(addr), trb_stream_id, 3552 trb_slot_id | trb_ep_index | type, false); 3553 } 3554 3555 int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, 3556 unsigned int ep_index) 3557 { 3558 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3559 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3560 u32 type = TRB_TYPE(TRB_RESET_EP); 3561 3562 return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type, 3563 false); 3564 } 3565