1 /* 2 * xHCI host controller driver 3 * 4 * Copyright (C) 2008 Intel Corp. 5 * 6 * Author: Sarah Sharp 7 * Some code borrowed from the Linux EHCI driver. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 * for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software Foundation, 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 /* 24 * Ring initialization rules: 25 * 1. Each segment is initialized to zero, except for link TRBs. 26 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or 27 * Consumer Cycle State (CCS), depending on ring function. 28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. 29 * 30 * Ring behavior rules: 31 * 1. A ring is empty if enqueue == dequeue. This means there will always be at 32 * least one free TRB in the ring. This is useful if you want to turn that 33 * into a link TRB and expand the ring. 34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a 35 * link TRB, then load the pointer with the address in the link TRB. If the 36 * link TRB had its toggle bit set, you may need to update the ring cycle 37 * state (see cycle bit rules). You may have to do this multiple times 38 * until you reach a non-link TRB. 39 * 3. A ring is full if enqueue++ (for the definition of increment above) 40 * equals the dequeue pointer. 41 * 42 * Cycle bit rules: 43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit 44 * in a link TRB, it must toggle the ring cycle state. 45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit 46 * in a link TRB, it must toggle the ring cycle state. 47 * 48 * Producer rules: 49 * 1. Check if ring is full before you enqueue. 50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. 51 * Update enqueue pointer between each write (which may update the ring 52 * cycle state). 53 * 3. Notify consumer. If SW is producer, it rings the doorbell for command 54 * and endpoint rings. If HC is the producer for the event ring, 55 * and it generates an interrupt according to interrupt modulation rules. 56 * 57 * Consumer rules: 58 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, 59 * the TRB is owned by the consumer. 60 * 2. Update dequeue pointer (which may update the ring cycle state) and 61 * continue processing TRBs until you reach a TRB which is not owned by you. 62 * 3. Notify the producer. SW is the consumer for the event ring, and it 63 * updates event ring dequeue pointer. HC is the consumer for the command and 64 * endpoint rings; it generates events on the event ring for these. 65 */ 66 67 #include <linux/scatterlist.h> 68 #include <linux/slab.h> 69 #include "xhci.h" 70 71 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, 72 struct xhci_virt_device *virt_dev, 73 struct xhci_event_cmd *event); 74 75 /* 76 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA 77 * address of the TRB. 78 */ 79 dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, 80 union xhci_trb *trb) 81 { 82 unsigned long segment_offset; 83 84 if (!seg || !trb || trb < seg->trbs) 85 return 0; 86 /* offset in TRBs */ 87 segment_offset = trb - seg->trbs; 88 if (segment_offset > TRBS_PER_SEGMENT) 89 return 0; 90 return seg->dma + (segment_offset * sizeof(*trb)); 91 } 92 93 /* Does this link TRB point to the first segment in a ring, 94 * or was the previous TRB the last TRB on the last segment in the ERST? 95 */ 96 static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, 97 struct xhci_segment *seg, union xhci_trb *trb) 98 { 99 if (ring == xhci->event_ring) 100 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) && 101 (seg->next == xhci->event_ring->first_seg); 102 else 103 return le32_to_cpu(trb->link.control) & LINK_TOGGLE; 104 } 105 106 /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring 107 * segment? I.e. would the updated event TRB pointer step off the end of the 108 * event seg? 109 */ 110 static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 111 struct xhci_segment *seg, union xhci_trb *trb) 112 { 113 if (ring == xhci->event_ring) 114 return trb == &seg->trbs[TRBS_PER_SEGMENT]; 115 else 116 return TRB_TYPE_LINK_LE32(trb->link.control); 117 } 118 119 static int enqueue_is_link_trb(struct xhci_ring *ring) 120 { 121 struct xhci_link_trb *link = &ring->enqueue->link; 122 return TRB_TYPE_LINK_LE32(link->control); 123 } 124 125 /* Updates trb to point to the next TRB in the ring, and updates seg if the next 126 * TRB is in a new segment. This does not skip over link TRBs, and it does not 127 * effect the ring dequeue or enqueue pointers. 128 */ 129 static void next_trb(struct xhci_hcd *xhci, 130 struct xhci_ring *ring, 131 struct xhci_segment **seg, 132 union xhci_trb **trb) 133 { 134 if (last_trb(xhci, ring, *seg, *trb)) { 135 *seg = (*seg)->next; 136 *trb = ((*seg)->trbs); 137 } else { 138 (*trb)++; 139 } 140 } 141 142 /* 143 * See Cycle bit rules. SW is the consumer for the event ring only. 144 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 145 */ 146 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) 147 { 148 union xhci_trb *next = ++(ring->dequeue); 149 unsigned long long addr; 150 151 ring->deq_updates++; 152 /* Update the dequeue pointer further if that was a link TRB or we're at 153 * the end of an event ring segment (which doesn't have link TRBS) 154 */ 155 while (last_trb(xhci, ring, ring->deq_seg, next)) { 156 if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) { 157 ring->cycle_state = (ring->cycle_state ? 0 : 1); 158 if (!in_interrupt()) 159 xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n", 160 ring, 161 (unsigned int) ring->cycle_state); 162 } 163 ring->deq_seg = ring->deq_seg->next; 164 ring->dequeue = ring->deq_seg->trbs; 165 next = ring->dequeue; 166 } 167 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); 168 } 169 170 /* 171 * See Cycle bit rules. SW is the consumer for the event ring only. 172 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 173 * 174 * If we've just enqueued a TRB that is in the middle of a TD (meaning the 175 * chain bit is set), then set the chain bit in all the following link TRBs. 176 * If we've enqueued the last TRB in a TD, make sure the following link TRBs 177 * have their chain bit cleared (so that each Link TRB is a separate TD). 178 * 179 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit 180 * set, but other sections talk about dealing with the chain bit set. This was 181 * fixed in the 0.96 specification errata, but we have to assume that all 0.95 182 * xHCI hardware can't handle the chain bit being cleared on a link TRB. 183 * 184 * @more_trbs_coming: Will you enqueue more TRBs before calling 185 * prepare_transfer()? 186 */ 187 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, 188 bool consumer, bool more_trbs_coming, bool isoc) 189 { 190 u32 chain; 191 union xhci_trb *next; 192 unsigned long long addr; 193 194 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; 195 next = ++(ring->enqueue); 196 197 ring->enq_updates++; 198 /* Update the dequeue pointer further if that was a link TRB or we're at 199 * the end of an event ring segment (which doesn't have link TRBS) 200 */ 201 while (last_trb(xhci, ring, ring->enq_seg, next)) { 202 if (!consumer) { 203 if (ring != xhci->event_ring) { 204 /* 205 * If the caller doesn't plan on enqueueing more 206 * TDs before ringing the doorbell, then we 207 * don't want to give the link TRB to the 208 * hardware just yet. We'll give the link TRB 209 * back in prepare_ring() just before we enqueue 210 * the TD at the top of the ring. 211 */ 212 if (!chain && !more_trbs_coming) 213 break; 214 215 /* If we're not dealing with 0.95 hardware or 216 * isoc rings on AMD 0.96 host, 217 * carry over the chain bit of the previous TRB 218 * (which may mean the chain bit is cleared). 219 */ 220 if (!(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST)) 221 && !xhci_link_trb_quirk(xhci)) { 222 next->link.control &= 223 cpu_to_le32(~TRB_CHAIN); 224 next->link.control |= 225 cpu_to_le32(chain); 226 } 227 /* Give this link TRB to the hardware */ 228 wmb(); 229 next->link.control ^= cpu_to_le32(TRB_CYCLE); 230 } 231 /* Toggle the cycle bit after the last ring segment. */ 232 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 233 ring->cycle_state = (ring->cycle_state ? 0 : 1); 234 if (!in_interrupt()) 235 xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n", 236 ring, 237 (unsigned int) ring->cycle_state); 238 } 239 } 240 ring->enq_seg = ring->enq_seg->next; 241 ring->enqueue = ring->enq_seg->trbs; 242 next = ring->enqueue; 243 } 244 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); 245 } 246 247 /* 248 * Check to see if there's room to enqueue num_trbs on the ring. See rules 249 * above. 250 * FIXME: this would be simpler and faster if we just kept track of the number 251 * of free TRBs in a ring. 252 */ 253 static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, 254 unsigned int num_trbs) 255 { 256 int i; 257 union xhci_trb *enq = ring->enqueue; 258 struct xhci_segment *enq_seg = ring->enq_seg; 259 struct xhci_segment *cur_seg; 260 unsigned int left_on_ring; 261 262 /* If we are currently pointing to a link TRB, advance the 263 * enqueue pointer before checking for space */ 264 while (last_trb(xhci, ring, enq_seg, enq)) { 265 enq_seg = enq_seg->next; 266 enq = enq_seg->trbs; 267 } 268 269 /* Check if ring is empty */ 270 if (enq == ring->dequeue) { 271 /* Can't use link trbs */ 272 left_on_ring = TRBS_PER_SEGMENT - 1; 273 for (cur_seg = enq_seg->next; cur_seg != enq_seg; 274 cur_seg = cur_seg->next) 275 left_on_ring += TRBS_PER_SEGMENT - 1; 276 277 /* Always need one TRB free in the ring. */ 278 left_on_ring -= 1; 279 if (num_trbs > left_on_ring) { 280 xhci_warn(xhci, "Not enough room on ring; " 281 "need %u TRBs, %u TRBs left\n", 282 num_trbs, left_on_ring); 283 return 0; 284 } 285 return 1; 286 } 287 /* Make sure there's an extra empty TRB available */ 288 for (i = 0; i <= num_trbs; ++i) { 289 if (enq == ring->dequeue) 290 return 0; 291 enq++; 292 while (last_trb(xhci, ring, enq_seg, enq)) { 293 enq_seg = enq_seg->next; 294 enq = enq_seg->trbs; 295 } 296 } 297 return 1; 298 } 299 300 /* Ring the host controller doorbell after placing a command on the ring */ 301 void xhci_ring_cmd_db(struct xhci_hcd *xhci) 302 { 303 xhci_dbg(xhci, "// Ding dong!\n"); 304 xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]); 305 /* Flush PCI posted writes */ 306 xhci_readl(xhci, &xhci->dba->doorbell[0]); 307 } 308 309 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, 310 unsigned int slot_id, 311 unsigned int ep_index, 312 unsigned int stream_id) 313 { 314 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; 315 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 316 unsigned int ep_state = ep->ep_state; 317 318 /* Don't ring the doorbell for this endpoint if there are pending 319 * cancellations because we don't want to interrupt processing. 320 * We don't want to restart any stream rings if there's a set dequeue 321 * pointer command pending because the device can choose to start any 322 * stream once the endpoint is on the HW schedule. 323 * FIXME - check all the stream rings for pending cancellations. 324 */ 325 if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) || 326 (ep_state & EP_HALTED)) 327 return; 328 xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr); 329 /* The CPU has better things to do at this point than wait for a 330 * write-posting flush. It'll get there soon enough. 331 */ 332 } 333 334 /* Ring the doorbell for any rings with pending URBs */ 335 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, 336 unsigned int slot_id, 337 unsigned int ep_index) 338 { 339 unsigned int stream_id; 340 struct xhci_virt_ep *ep; 341 342 ep = &xhci->devs[slot_id]->eps[ep_index]; 343 344 /* A ring has pending URBs if its TD list is not empty */ 345 if (!(ep->ep_state & EP_HAS_STREAMS)) { 346 if (!(list_empty(&ep->ring->td_list))) 347 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); 348 return; 349 } 350 351 for (stream_id = 1; stream_id < ep->stream_info->num_streams; 352 stream_id++) { 353 struct xhci_stream_info *stream_info = ep->stream_info; 354 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list)) 355 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 356 stream_id); 357 } 358 } 359 360 /* 361 * Find the segment that trb is in. Start searching in start_seg. 362 * If we must move past a segment that has a link TRB with a toggle cycle state 363 * bit set, then we will toggle the value pointed at by cycle_state. 364 */ 365 static struct xhci_segment *find_trb_seg( 366 struct xhci_segment *start_seg, 367 union xhci_trb *trb, int *cycle_state) 368 { 369 struct xhci_segment *cur_seg = start_seg; 370 struct xhci_generic_trb *generic_trb; 371 372 while (cur_seg->trbs > trb || 373 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { 374 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; 375 if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE)) 376 *cycle_state ^= 0x1; 377 cur_seg = cur_seg->next; 378 if (cur_seg == start_seg) 379 /* Looped over the entire list. Oops! */ 380 return NULL; 381 } 382 return cur_seg; 383 } 384 385 386 static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, 387 unsigned int slot_id, unsigned int ep_index, 388 unsigned int stream_id) 389 { 390 struct xhci_virt_ep *ep; 391 392 ep = &xhci->devs[slot_id]->eps[ep_index]; 393 /* Common case: no streams */ 394 if (!(ep->ep_state & EP_HAS_STREAMS)) 395 return ep->ring; 396 397 if (stream_id == 0) { 398 xhci_warn(xhci, 399 "WARN: Slot ID %u, ep index %u has streams, " 400 "but URB has no stream ID.\n", 401 slot_id, ep_index); 402 return NULL; 403 } 404 405 if (stream_id < ep->stream_info->num_streams) 406 return ep->stream_info->stream_rings[stream_id]; 407 408 xhci_warn(xhci, 409 "WARN: Slot ID %u, ep index %u has " 410 "stream IDs 1 to %u allocated, " 411 "but stream ID %u is requested.\n", 412 slot_id, ep_index, 413 ep->stream_info->num_streams - 1, 414 stream_id); 415 return NULL; 416 } 417 418 /* Get the right ring for the given URB. 419 * If the endpoint supports streams, boundary check the URB's stream ID. 420 * If the endpoint doesn't support streams, return the singular endpoint ring. 421 */ 422 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, 423 struct urb *urb) 424 { 425 return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id, 426 xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id); 427 } 428 429 /* 430 * Move the xHC's endpoint ring dequeue pointer past cur_td. 431 * Record the new state of the xHC's endpoint ring dequeue segment, 432 * dequeue pointer, and new consumer cycle state in state. 433 * Update our internal representation of the ring's dequeue pointer. 434 * 435 * We do this in three jumps: 436 * - First we update our new ring state to be the same as when the xHC stopped. 437 * - Then we traverse the ring to find the segment that contains 438 * the last TRB in the TD. We toggle the xHC's new cycle state when we pass 439 * any link TRBs with the toggle cycle bit set. 440 * - Finally we move the dequeue state one TRB further, toggling the cycle bit 441 * if we've moved it past a link TRB with the toggle cycle bit set. 442 * 443 * Some of the uses of xhci_generic_trb are grotty, but if they're done 444 * with correct __le32 accesses they should work fine. Only users of this are 445 * in here. 446 */ 447 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, 448 unsigned int slot_id, unsigned int ep_index, 449 unsigned int stream_id, struct xhci_td *cur_td, 450 struct xhci_dequeue_state *state) 451 { 452 struct xhci_virt_device *dev = xhci->devs[slot_id]; 453 struct xhci_ring *ep_ring; 454 struct xhci_generic_trb *trb; 455 struct xhci_ep_ctx *ep_ctx; 456 dma_addr_t addr; 457 458 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, 459 ep_index, stream_id); 460 if (!ep_ring) { 461 xhci_warn(xhci, "WARN can't find new dequeue state " 462 "for invalid stream ID %u.\n", 463 stream_id); 464 return; 465 } 466 state->new_cycle_state = 0; 467 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); 468 state->new_deq_seg = find_trb_seg(cur_td->start_seg, 469 dev->eps[ep_index].stopped_trb, 470 &state->new_cycle_state); 471 if (!state->new_deq_seg) { 472 WARN_ON(1); 473 return; 474 } 475 476 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 477 xhci_dbg(xhci, "Finding endpoint context\n"); 478 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 479 state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq); 480 481 state->new_deq_ptr = cur_td->last_trb; 482 xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); 483 state->new_deq_seg = find_trb_seg(state->new_deq_seg, 484 state->new_deq_ptr, 485 &state->new_cycle_state); 486 if (!state->new_deq_seg) { 487 WARN_ON(1); 488 return; 489 } 490 491 trb = &state->new_deq_ptr->generic; 492 if (TRB_TYPE_LINK_LE32(trb->field[3]) && 493 (trb->field[3] & cpu_to_le32(LINK_TOGGLE))) 494 state->new_cycle_state ^= 0x1; 495 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); 496 497 /* 498 * If there is only one segment in a ring, find_trb_seg()'s while loop 499 * will not run, and it will return before it has a chance to see if it 500 * needs to toggle the cycle bit. It can't tell if the stalled transfer 501 * ended just before the link TRB on a one-segment ring, or if the TD 502 * wrapped around the top of the ring, because it doesn't have the TD in 503 * question. Look for the one-segment case where stalled TRB's address 504 * is greater than the new dequeue pointer address. 505 */ 506 if (ep_ring->first_seg == ep_ring->first_seg->next && 507 state->new_deq_ptr < dev->eps[ep_index].stopped_trb) 508 state->new_cycle_state ^= 0x1; 509 xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state); 510 511 /* Don't update the ring cycle state for the producer (us). */ 512 xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", 513 state->new_deq_seg); 514 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); 515 xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", 516 (unsigned long long) addr); 517 } 518 519 /* flip_cycle means flip the cycle bit of all but the first and last TRB. 520 * (The last TRB actually points to the ring enqueue pointer, which is not part 521 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring. 522 */ 523 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 524 struct xhci_td *cur_td, bool flip_cycle) 525 { 526 struct xhci_segment *cur_seg; 527 union xhci_trb *cur_trb; 528 529 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; 530 true; 531 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 532 if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) { 533 /* Unchain any chained Link TRBs, but 534 * leave the pointers intact. 535 */ 536 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN); 537 /* Flip the cycle bit (link TRBs can't be the first 538 * or last TRB). 539 */ 540 if (flip_cycle) 541 cur_trb->generic.field[3] ^= 542 cpu_to_le32(TRB_CYCLE); 543 xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); 544 xhci_dbg(xhci, "Address = %p (0x%llx dma); " 545 "in seg %p (0x%llx dma)\n", 546 cur_trb, 547 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb), 548 cur_seg, 549 (unsigned long long)cur_seg->dma); 550 } else { 551 cur_trb->generic.field[0] = 0; 552 cur_trb->generic.field[1] = 0; 553 cur_trb->generic.field[2] = 0; 554 /* Preserve only the cycle bit of this TRB */ 555 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); 556 /* Flip the cycle bit except on the first or last TRB */ 557 if (flip_cycle && cur_trb != cur_td->first_trb && 558 cur_trb != cur_td->last_trb) 559 cur_trb->generic.field[3] ^= 560 cpu_to_le32(TRB_CYCLE); 561 cur_trb->generic.field[3] |= cpu_to_le32( 562 TRB_TYPE(TRB_TR_NOOP)); 563 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " 564 "in seg %p (0x%llx dma)\n", 565 cur_trb, 566 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb), 567 cur_seg, 568 (unsigned long long)cur_seg->dma); 569 } 570 if (cur_trb == cur_td->last_trb) 571 break; 572 } 573 } 574 575 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 576 unsigned int ep_index, unsigned int stream_id, 577 struct xhci_segment *deq_seg, 578 union xhci_trb *deq_ptr, u32 cycle_state); 579 580 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, 581 unsigned int slot_id, unsigned int ep_index, 582 unsigned int stream_id, 583 struct xhci_dequeue_state *deq_state) 584 { 585 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 586 587 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " 588 "new deq ptr = %p (0x%llx dma), new cycle = %u\n", 589 deq_state->new_deq_seg, 590 (unsigned long long)deq_state->new_deq_seg->dma, 591 deq_state->new_deq_ptr, 592 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), 593 deq_state->new_cycle_state); 594 queue_set_tr_deq(xhci, slot_id, ep_index, stream_id, 595 deq_state->new_deq_seg, 596 deq_state->new_deq_ptr, 597 (u32) deq_state->new_cycle_state); 598 /* Stop the TD queueing code from ringing the doorbell until 599 * this command completes. The HC won't set the dequeue pointer 600 * if the ring is running, and ringing the doorbell starts the 601 * ring running. 602 */ 603 ep->ep_state |= SET_DEQ_PENDING; 604 } 605 606 static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, 607 struct xhci_virt_ep *ep) 608 { 609 ep->ep_state &= ~EP_HALT_PENDING; 610 /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the 611 * timer is running on another CPU, we don't decrement stop_cmds_pending 612 * (since we didn't successfully stop the watchdog timer). 613 */ 614 if (del_timer(&ep->stop_cmd_timer)) 615 ep->stop_cmds_pending--; 616 } 617 618 /* Must be called with xhci->lock held in interrupt context */ 619 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, 620 struct xhci_td *cur_td, int status, char *adjective) 621 { 622 struct usb_hcd *hcd; 623 struct urb *urb; 624 struct urb_priv *urb_priv; 625 626 urb = cur_td->urb; 627 urb_priv = urb->hcpriv; 628 urb_priv->td_cnt++; 629 hcd = bus_to_hcd(urb->dev->bus); 630 631 /* Only giveback urb when this is the last td in urb */ 632 if (urb_priv->td_cnt == urb_priv->length) { 633 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 634 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; 635 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { 636 if (xhci->quirks & XHCI_AMD_PLL_FIX) 637 usb_amd_quirk_pll_enable(); 638 } 639 } 640 usb_hcd_unlink_urb_from_ep(hcd, urb); 641 642 spin_unlock(&xhci->lock); 643 usb_hcd_giveback_urb(hcd, urb, status); 644 xhci_urb_free_priv(xhci, urb_priv); 645 spin_lock(&xhci->lock); 646 } 647 } 648 649 /* 650 * When we get a command completion for a Stop Endpoint Command, we need to 651 * unlink any cancelled TDs from the ring. There are two ways to do that: 652 * 653 * 1. If the HW was in the middle of processing the TD that needs to be 654 * cancelled, then we must move the ring's dequeue pointer past the last TRB 655 * in the TD with a Set Dequeue Pointer Command. 656 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain 657 * bit cleared) so that the HW will skip over them. 658 */ 659 static void handle_stopped_endpoint(struct xhci_hcd *xhci, 660 union xhci_trb *trb, struct xhci_event_cmd *event) 661 { 662 unsigned int slot_id; 663 unsigned int ep_index; 664 struct xhci_virt_device *virt_dev; 665 struct xhci_ring *ep_ring; 666 struct xhci_virt_ep *ep; 667 struct list_head *entry; 668 struct xhci_td *cur_td = NULL; 669 struct xhci_td *last_unlinked_td; 670 671 struct xhci_dequeue_state deq_state; 672 673 if (unlikely(TRB_TO_SUSPEND_PORT( 674 le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) { 675 slot_id = TRB_TO_SLOT_ID( 676 le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])); 677 virt_dev = xhci->devs[slot_id]; 678 if (virt_dev) 679 handle_cmd_in_cmd_wait_list(xhci, virt_dev, 680 event); 681 else 682 xhci_warn(xhci, "Stop endpoint command " 683 "completion for disabled slot %u\n", 684 slot_id); 685 return; 686 } 687 688 memset(&deq_state, 0, sizeof(deq_state)); 689 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3])); 690 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); 691 ep = &xhci->devs[slot_id]->eps[ep_index]; 692 693 if (list_empty(&ep->cancelled_td_list)) { 694 xhci_stop_watchdog_timer_in_irq(xhci, ep); 695 ep->stopped_td = NULL; 696 ep->stopped_trb = NULL; 697 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 698 return; 699 } 700 701 /* Fix up the ep ring first, so HW stops executing cancelled TDs. 702 * We have the xHCI lock, so nothing can modify this list until we drop 703 * it. We're also in the event handler, so we can't get re-interrupted 704 * if another Stop Endpoint command completes 705 */ 706 list_for_each(entry, &ep->cancelled_td_list) { 707 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); 708 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", 709 cur_td->first_trb, 710 (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb)); 711 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); 712 if (!ep_ring) { 713 /* This shouldn't happen unless a driver is mucking 714 * with the stream ID after submission. This will 715 * leave the TD on the hardware ring, and the hardware 716 * will try to execute it, and may access a buffer 717 * that has already been freed. In the best case, the 718 * hardware will execute it, and the event handler will 719 * ignore the completion event for that TD, since it was 720 * removed from the td_list for that endpoint. In 721 * short, don't muck with the stream ID after 722 * submission. 723 */ 724 xhci_warn(xhci, "WARN Cancelled URB %p " 725 "has invalid stream ID %u.\n", 726 cur_td->urb, 727 cur_td->urb->stream_id); 728 goto remove_finished_td; 729 } 730 /* 731 * If we stopped on the TD we need to cancel, then we have to 732 * move the xHC endpoint ring dequeue pointer past this TD. 733 */ 734 if (cur_td == ep->stopped_td) 735 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, 736 cur_td->urb->stream_id, 737 cur_td, &deq_state); 738 else 739 td_to_noop(xhci, ep_ring, cur_td, false); 740 remove_finished_td: 741 /* 742 * The event handler won't see a completion for this TD anymore, 743 * so remove it from the endpoint ring's TD list. Keep it in 744 * the cancelled TD list for URB completion later. 745 */ 746 list_del_init(&cur_td->td_list); 747 } 748 last_unlinked_td = cur_td; 749 xhci_stop_watchdog_timer_in_irq(xhci, ep); 750 751 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 752 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 753 xhci_queue_new_dequeue_state(xhci, 754 slot_id, ep_index, 755 ep->stopped_td->urb->stream_id, 756 &deq_state); 757 xhci_ring_cmd_db(xhci); 758 } else { 759 /* Otherwise ring the doorbell(s) to restart queued transfers */ 760 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 761 } 762 ep->stopped_td = NULL; 763 ep->stopped_trb = NULL; 764 765 /* 766 * Drop the lock and complete the URBs in the cancelled TD list. 767 * New TDs to be cancelled might be added to the end of the list before 768 * we can complete all the URBs for the TDs we already unlinked. 769 * So stop when we've completed the URB for the last TD we unlinked. 770 */ 771 do { 772 cur_td = list_entry(ep->cancelled_td_list.next, 773 struct xhci_td, cancelled_td_list); 774 list_del_init(&cur_td->cancelled_td_list); 775 776 /* Clean up the cancelled URB */ 777 /* Doesn't matter what we pass for status, since the core will 778 * just overwrite it (because the URB has been unlinked). 779 */ 780 xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled"); 781 782 /* Stop processing the cancelled list if the watchdog timer is 783 * running. 784 */ 785 if (xhci->xhc_state & XHCI_STATE_DYING) 786 return; 787 } while (cur_td != last_unlinked_td); 788 789 /* Return to the event handler with xhci->lock re-acquired */ 790 } 791 792 /* Watchdog timer function for when a stop endpoint command fails to complete. 793 * In this case, we assume the host controller is broken or dying or dead. The 794 * host may still be completing some other events, so we have to be careful to 795 * let the event ring handler and the URB dequeueing/enqueueing functions know 796 * through xhci->state. 797 * 798 * The timer may also fire if the host takes a very long time to respond to the 799 * command, and the stop endpoint command completion handler cannot delete the 800 * timer before the timer function is called. Another endpoint cancellation may 801 * sneak in before the timer function can grab the lock, and that may queue 802 * another stop endpoint command and add the timer back. So we cannot use a 803 * simple flag to say whether there is a pending stop endpoint command for a 804 * particular endpoint. 805 * 806 * Instead we use a combination of that flag and a counter for the number of 807 * pending stop endpoint commands. If the timer is the tail end of the last 808 * stop endpoint command, and the endpoint's command is still pending, we assume 809 * the host is dying. 810 */ 811 void xhci_stop_endpoint_command_watchdog(unsigned long arg) 812 { 813 struct xhci_hcd *xhci; 814 struct xhci_virt_ep *ep; 815 struct xhci_virt_ep *temp_ep; 816 struct xhci_ring *ring; 817 struct xhci_td *cur_td; 818 int ret, i, j; 819 unsigned long flags; 820 821 ep = (struct xhci_virt_ep *) arg; 822 xhci = ep->xhci; 823 824 spin_lock_irqsave(&xhci->lock, flags); 825 826 ep->stop_cmds_pending--; 827 if (xhci->xhc_state & XHCI_STATE_DYING) { 828 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked " 829 "xHCI as DYING, exiting.\n"); 830 spin_unlock_irqrestore(&xhci->lock, flags); 831 return; 832 } 833 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { 834 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, " 835 "exiting.\n"); 836 spin_unlock_irqrestore(&xhci->lock, flags); 837 return; 838 } 839 840 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n"); 841 xhci_warn(xhci, "Assuming host is dying, halting host.\n"); 842 /* Oops, HC is dead or dying or at least not responding to the stop 843 * endpoint command. 844 */ 845 xhci->xhc_state |= XHCI_STATE_DYING; 846 /* Disable interrupts from the host controller and start halting it */ 847 xhci_quiesce(xhci); 848 spin_unlock_irqrestore(&xhci->lock, flags); 849 850 ret = xhci_halt(xhci); 851 852 spin_lock_irqsave(&xhci->lock, flags); 853 if (ret < 0) { 854 /* This is bad; the host is not responding to commands and it's 855 * not allowing itself to be halted. At least interrupts are 856 * disabled. If we call usb_hc_died(), it will attempt to 857 * disconnect all device drivers under this host. Those 858 * disconnect() methods will wait for all URBs to be unlinked, 859 * so we must complete them. 860 */ 861 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n"); 862 xhci_warn(xhci, "Completing active URBs anyway.\n"); 863 /* We could turn all TDs on the rings to no-ops. This won't 864 * help if the host has cached part of the ring, and is slow if 865 * we want to preserve the cycle bit. Skip it and hope the host 866 * doesn't touch the memory. 867 */ 868 } 869 for (i = 0; i < MAX_HC_SLOTS; i++) { 870 if (!xhci->devs[i]) 871 continue; 872 for (j = 0; j < 31; j++) { 873 temp_ep = &xhci->devs[i]->eps[j]; 874 ring = temp_ep->ring; 875 if (!ring) 876 continue; 877 xhci_dbg(xhci, "Killing URBs for slot ID %u, " 878 "ep index %u\n", i, j); 879 while (!list_empty(&ring->td_list)) { 880 cur_td = list_first_entry(&ring->td_list, 881 struct xhci_td, 882 td_list); 883 list_del_init(&cur_td->td_list); 884 if (!list_empty(&cur_td->cancelled_td_list)) 885 list_del_init(&cur_td->cancelled_td_list); 886 xhci_giveback_urb_in_irq(xhci, cur_td, 887 -ESHUTDOWN, "killed"); 888 } 889 while (!list_empty(&temp_ep->cancelled_td_list)) { 890 cur_td = list_first_entry( 891 &temp_ep->cancelled_td_list, 892 struct xhci_td, 893 cancelled_td_list); 894 list_del_init(&cur_td->cancelled_td_list); 895 xhci_giveback_urb_in_irq(xhci, cur_td, 896 -ESHUTDOWN, "killed"); 897 } 898 } 899 } 900 spin_unlock_irqrestore(&xhci->lock, flags); 901 xhci_dbg(xhci, "Calling usb_hc_died()\n"); 902 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); 903 xhci_dbg(xhci, "xHCI host controller is dead.\n"); 904 } 905 906 /* 907 * When we get a completion for a Set Transfer Ring Dequeue Pointer command, 908 * we need to clear the set deq pending flag in the endpoint ring state, so that 909 * the TD queueing code can ring the doorbell again. We also need to ring the 910 * endpoint doorbell to restart the ring, but only if there aren't more 911 * cancellations pending. 912 */ 913 static void handle_set_deq_completion(struct xhci_hcd *xhci, 914 struct xhci_event_cmd *event, 915 union xhci_trb *trb) 916 { 917 unsigned int slot_id; 918 unsigned int ep_index; 919 unsigned int stream_id; 920 struct xhci_ring *ep_ring; 921 struct xhci_virt_device *dev; 922 struct xhci_ep_ctx *ep_ctx; 923 struct xhci_slot_ctx *slot_ctx; 924 925 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3])); 926 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); 927 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); 928 dev = xhci->devs[slot_id]; 929 930 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id); 931 if (!ep_ring) { 932 xhci_warn(xhci, "WARN Set TR deq ptr command for " 933 "freed stream ID %u\n", 934 stream_id); 935 /* XXX: Harmless??? */ 936 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; 937 return; 938 } 939 940 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 941 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); 942 943 if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) { 944 unsigned int ep_state; 945 unsigned int slot_state; 946 947 switch (GET_COMP_CODE(le32_to_cpu(event->status))) { 948 case COMP_TRB_ERR: 949 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because " 950 "of stream ID configuration\n"); 951 break; 952 case COMP_CTX_STATE: 953 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " 954 "to incorrect slot or ep state.\n"); 955 ep_state = le32_to_cpu(ep_ctx->ep_info); 956 ep_state &= EP_STATE_MASK; 957 slot_state = le32_to_cpu(slot_ctx->dev_state); 958 slot_state = GET_SLOT_STATE(slot_state); 959 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", 960 slot_state, ep_state); 961 break; 962 case COMP_EBADSLT: 963 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because " 964 "slot %u was not enabled.\n", slot_id); 965 break; 966 default: 967 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown " 968 "completion code of %u.\n", 969 GET_COMP_CODE(le32_to_cpu(event->status))); 970 break; 971 } 972 /* OK what do we do now? The endpoint state is hosed, and we 973 * should never get to this point if the synchronization between 974 * queueing, and endpoint state are correct. This might happen 975 * if the device gets disconnected after we've finished 976 * cancelling URBs, which might not be an error... 977 */ 978 } else { 979 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", 980 le64_to_cpu(ep_ctx->deq)); 981 if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg, 982 dev->eps[ep_index].queued_deq_ptr) == 983 (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) { 984 /* Update the ring's dequeue segment and dequeue pointer 985 * to reflect the new position. 986 */ 987 ep_ring->deq_seg = dev->eps[ep_index].queued_deq_seg; 988 ep_ring->dequeue = dev->eps[ep_index].queued_deq_ptr; 989 } else { 990 xhci_warn(xhci, "Mismatch between completed Set TR Deq " 991 "Ptr command & xHCI internal state.\n"); 992 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", 993 dev->eps[ep_index].queued_deq_seg, 994 dev->eps[ep_index].queued_deq_ptr); 995 } 996 } 997 998 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; 999 dev->eps[ep_index].queued_deq_seg = NULL; 1000 dev->eps[ep_index].queued_deq_ptr = NULL; 1001 /* Restart any rings with pending URBs */ 1002 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 1003 } 1004 1005 static void handle_reset_ep_completion(struct xhci_hcd *xhci, 1006 struct xhci_event_cmd *event, 1007 union xhci_trb *trb) 1008 { 1009 int slot_id; 1010 unsigned int ep_index; 1011 1012 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3])); 1013 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); 1014 /* This command will only fail if the endpoint wasn't halted, 1015 * but we don't care. 1016 */ 1017 xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", 1018 GET_COMP_CODE(le32_to_cpu(event->status))); 1019 1020 /* HW with the reset endpoint quirk needs to have a configure endpoint 1021 * command complete before the endpoint can be used. Queue that here 1022 * because the HW can't handle two commands being queued in a row. 1023 */ 1024 if (xhci->quirks & XHCI_RESET_EP_QUIRK) { 1025 xhci_dbg(xhci, "Queueing configure endpoint command\n"); 1026 xhci_queue_configure_endpoint(xhci, 1027 xhci->devs[slot_id]->in_ctx->dma, slot_id, 1028 false); 1029 xhci_ring_cmd_db(xhci); 1030 } else { 1031 /* Clear our internal halted state and restart the ring(s) */ 1032 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; 1033 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 1034 } 1035 } 1036 1037 /* Check to see if a command in the device's command queue matches this one. 1038 * Signal the completion or free the command, and return 1. Return 0 if the 1039 * completed command isn't at the head of the command list. 1040 */ 1041 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, 1042 struct xhci_virt_device *virt_dev, 1043 struct xhci_event_cmd *event) 1044 { 1045 struct xhci_command *command; 1046 1047 if (list_empty(&virt_dev->cmd_list)) 1048 return 0; 1049 1050 command = list_entry(virt_dev->cmd_list.next, 1051 struct xhci_command, cmd_list); 1052 if (xhci->cmd_ring->dequeue != command->command_trb) 1053 return 0; 1054 1055 command->status = GET_COMP_CODE(le32_to_cpu(event->status)); 1056 list_del(&command->cmd_list); 1057 if (command->completion) 1058 complete(command->completion); 1059 else 1060 xhci_free_command(xhci, command); 1061 return 1; 1062 } 1063 1064 static void handle_cmd_completion(struct xhci_hcd *xhci, 1065 struct xhci_event_cmd *event) 1066 { 1067 int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1068 u64 cmd_dma; 1069 dma_addr_t cmd_dequeue_dma; 1070 struct xhci_input_control_ctx *ctrl_ctx; 1071 struct xhci_virt_device *virt_dev; 1072 unsigned int ep_index; 1073 struct xhci_ring *ep_ring; 1074 unsigned int ep_state; 1075 1076 cmd_dma = le64_to_cpu(event->cmd_trb); 1077 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 1078 xhci->cmd_ring->dequeue); 1079 /* Is the command ring deq ptr out of sync with the deq seg ptr? */ 1080 if (cmd_dequeue_dma == 0) { 1081 xhci->error_bitmask |= 1 << 4; 1082 return; 1083 } 1084 /* Does the DMA address match our internal dequeue pointer address? */ 1085 if (cmd_dma != (u64) cmd_dequeue_dma) { 1086 xhci->error_bitmask |= 1 << 5; 1087 return; 1088 } 1089 switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]) 1090 & TRB_TYPE_BITMASK) { 1091 case TRB_TYPE(TRB_ENABLE_SLOT): 1092 if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS) 1093 xhci->slot_id = slot_id; 1094 else 1095 xhci->slot_id = 0; 1096 complete(&xhci->addr_dev); 1097 break; 1098 case TRB_TYPE(TRB_DISABLE_SLOT): 1099 if (xhci->devs[slot_id]) { 1100 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) 1101 /* Delete default control endpoint resources */ 1102 xhci_free_device_endpoint_resources(xhci, 1103 xhci->devs[slot_id], true); 1104 xhci_free_virt_device(xhci, slot_id); 1105 } 1106 break; 1107 case TRB_TYPE(TRB_CONFIG_EP): 1108 virt_dev = xhci->devs[slot_id]; 1109 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) 1110 break; 1111 /* 1112 * Configure endpoint commands can come from the USB core 1113 * configuration or alt setting changes, or because the HW 1114 * needed an extra configure endpoint command after a reset 1115 * endpoint command or streams were being configured. 1116 * If the command was for a halted endpoint, the xHCI driver 1117 * is not waiting on the configure endpoint command. 1118 */ 1119 ctrl_ctx = xhci_get_input_control_ctx(xhci, 1120 virt_dev->in_ctx); 1121 /* Input ctx add_flags are the endpoint index plus one */ 1122 ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1; 1123 /* A usb_set_interface() call directly after clearing a halted 1124 * condition may race on this quirky hardware. Not worth 1125 * worrying about, since this is prototype hardware. Not sure 1126 * if this will work for streams, but streams support was 1127 * untested on this prototype. 1128 */ 1129 if (xhci->quirks & XHCI_RESET_EP_QUIRK && 1130 ep_index != (unsigned int) -1 && 1131 le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG == 1132 le32_to_cpu(ctrl_ctx->drop_flags)) { 1133 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 1134 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 1135 if (!(ep_state & EP_HALTED)) 1136 goto bandwidth_change; 1137 xhci_dbg(xhci, "Completed config ep cmd - " 1138 "last ep index = %d, state = %d\n", 1139 ep_index, ep_state); 1140 /* Clear internal halted state and restart ring(s) */ 1141 xhci->devs[slot_id]->eps[ep_index].ep_state &= 1142 ~EP_HALTED; 1143 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 1144 break; 1145 } 1146 bandwidth_change: 1147 xhci_dbg(xhci, "Completed config ep cmd\n"); 1148 xhci->devs[slot_id]->cmd_status = 1149 GET_COMP_CODE(le32_to_cpu(event->status)); 1150 complete(&xhci->devs[slot_id]->cmd_completion); 1151 break; 1152 case TRB_TYPE(TRB_EVAL_CONTEXT): 1153 virt_dev = xhci->devs[slot_id]; 1154 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) 1155 break; 1156 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status)); 1157 complete(&xhci->devs[slot_id]->cmd_completion); 1158 break; 1159 case TRB_TYPE(TRB_ADDR_DEV): 1160 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status)); 1161 complete(&xhci->addr_dev); 1162 break; 1163 case TRB_TYPE(TRB_STOP_RING): 1164 handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event); 1165 break; 1166 case TRB_TYPE(TRB_SET_DEQ): 1167 handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue); 1168 break; 1169 case TRB_TYPE(TRB_CMD_NOOP): 1170 break; 1171 case TRB_TYPE(TRB_RESET_EP): 1172 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); 1173 break; 1174 case TRB_TYPE(TRB_RESET_DEV): 1175 xhci_dbg(xhci, "Completed reset device command.\n"); 1176 slot_id = TRB_TO_SLOT_ID( 1177 le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])); 1178 virt_dev = xhci->devs[slot_id]; 1179 if (virt_dev) 1180 handle_cmd_in_cmd_wait_list(xhci, virt_dev, event); 1181 else 1182 xhci_warn(xhci, "Reset device command completion " 1183 "for disabled slot %u\n", slot_id); 1184 break; 1185 case TRB_TYPE(TRB_NEC_GET_FW): 1186 if (!(xhci->quirks & XHCI_NEC_HOST)) { 1187 xhci->error_bitmask |= 1 << 6; 1188 break; 1189 } 1190 xhci_dbg(xhci, "NEC firmware version %2x.%02x\n", 1191 NEC_FW_MAJOR(le32_to_cpu(event->status)), 1192 NEC_FW_MINOR(le32_to_cpu(event->status))); 1193 break; 1194 default: 1195 /* Skip over unknown commands on the event ring */ 1196 xhci->error_bitmask |= 1 << 6; 1197 break; 1198 } 1199 inc_deq(xhci, xhci->cmd_ring, false); 1200 } 1201 1202 static void handle_vendor_event(struct xhci_hcd *xhci, 1203 union xhci_trb *event) 1204 { 1205 u32 trb_type; 1206 1207 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3])); 1208 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); 1209 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) 1210 handle_cmd_completion(xhci, &event->event_cmd); 1211 } 1212 1213 /* @port_id: the one-based port ID from the hardware (indexed from array of all 1214 * port registers -- USB 3.0 and USB 2.0). 1215 * 1216 * Returns a zero-based port number, which is suitable for indexing into each of 1217 * the split roothubs' port arrays and bus state arrays. 1218 */ 1219 static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd, 1220 struct xhci_hcd *xhci, u32 port_id) 1221 { 1222 unsigned int i; 1223 unsigned int num_similar_speed_ports = 0; 1224 1225 /* port_id from the hardware is 1-based, but port_array[], usb3_ports[], 1226 * and usb2_ports are 0-based indexes. Count the number of similar 1227 * speed ports, up to 1 port before this port. 1228 */ 1229 for (i = 0; i < (port_id - 1); i++) { 1230 u8 port_speed = xhci->port_array[i]; 1231 1232 /* 1233 * Skip ports that don't have known speeds, or have duplicate 1234 * Extended Capabilities port speed entries. 1235 */ 1236 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY) 1237 continue; 1238 1239 /* 1240 * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and 1241 * 1.1 ports are under the USB 2.0 hub. If the port speed 1242 * matches the device speed, it's a similar speed port. 1243 */ 1244 if ((port_speed == 0x03) == (hcd->speed == HCD_USB3)) 1245 num_similar_speed_ports++; 1246 } 1247 return num_similar_speed_ports; 1248 } 1249 1250 static void handle_port_status(struct xhci_hcd *xhci, 1251 union xhci_trb *event) 1252 { 1253 struct usb_hcd *hcd; 1254 u32 port_id; 1255 u32 temp, temp1; 1256 int max_ports; 1257 int slot_id; 1258 unsigned int faked_port_index; 1259 u8 major_revision; 1260 struct xhci_bus_state *bus_state; 1261 __le32 __iomem **port_array; 1262 bool bogus_port_status = false; 1263 1264 /* Port status change events always have a successful completion code */ 1265 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) { 1266 xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); 1267 xhci->error_bitmask |= 1 << 8; 1268 } 1269 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); 1270 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); 1271 1272 max_ports = HCS_MAX_PORTS(xhci->hcs_params1); 1273 if ((port_id <= 0) || (port_id > max_ports)) { 1274 xhci_warn(xhci, "Invalid port id %d\n", port_id); 1275 bogus_port_status = true; 1276 goto cleanup; 1277 } 1278 1279 /* Figure out which usb_hcd this port is attached to: 1280 * is it a USB 3.0 port or a USB 2.0/1.1 port? 1281 */ 1282 major_revision = xhci->port_array[port_id - 1]; 1283 if (major_revision == 0) { 1284 xhci_warn(xhci, "Event for port %u not in " 1285 "Extended Capabilities, ignoring.\n", 1286 port_id); 1287 bogus_port_status = true; 1288 goto cleanup; 1289 } 1290 if (major_revision == DUPLICATE_ENTRY) { 1291 xhci_warn(xhci, "Event for port %u duplicated in" 1292 "Extended Capabilities, ignoring.\n", 1293 port_id); 1294 bogus_port_status = true; 1295 goto cleanup; 1296 } 1297 1298 /* 1299 * Hardware port IDs reported by a Port Status Change Event include USB 1300 * 3.0 and USB 2.0 ports. We want to check if the port has reported a 1301 * resume event, but we first need to translate the hardware port ID 1302 * into the index into the ports on the correct split roothub, and the 1303 * correct bus_state structure. 1304 */ 1305 /* Find the right roothub. */ 1306 hcd = xhci_to_hcd(xhci); 1307 if ((major_revision == 0x03) != (hcd->speed == HCD_USB3)) 1308 hcd = xhci->shared_hcd; 1309 bus_state = &xhci->bus_state[hcd_index(hcd)]; 1310 if (hcd->speed == HCD_USB3) 1311 port_array = xhci->usb3_ports; 1312 else 1313 port_array = xhci->usb2_ports; 1314 /* Find the faked port hub number */ 1315 faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci, 1316 port_id); 1317 1318 temp = xhci_readl(xhci, port_array[faked_port_index]); 1319 if (hcd->state == HC_STATE_SUSPENDED) { 1320 xhci_dbg(xhci, "resume root hub\n"); 1321 usb_hcd_resume_root_hub(hcd); 1322 } 1323 1324 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) { 1325 xhci_dbg(xhci, "port resume event for port %d\n", port_id); 1326 1327 temp1 = xhci_readl(xhci, &xhci->op_regs->command); 1328 if (!(temp1 & CMD_RUN)) { 1329 xhci_warn(xhci, "xHC is not running.\n"); 1330 goto cleanup; 1331 } 1332 1333 if (DEV_SUPERSPEED(temp)) { 1334 xhci_dbg(xhci, "resume SS port %d\n", port_id); 1335 xhci_set_link_state(xhci, port_array, faked_port_index, 1336 XDEV_U0); 1337 slot_id = xhci_find_slot_id_by_port(hcd, xhci, 1338 faked_port_index); 1339 if (!slot_id) { 1340 xhci_dbg(xhci, "slot_id is zero\n"); 1341 goto cleanup; 1342 } 1343 xhci_ring_device(xhci, slot_id); 1344 xhci_dbg(xhci, "resume SS port %d finished\n", port_id); 1345 /* Clear PORT_PLC */ 1346 xhci_test_and_clear_bit(xhci, port_array, 1347 faked_port_index, PORT_PLC); 1348 } else { 1349 xhci_dbg(xhci, "resume HS port %d\n", port_id); 1350 bus_state->resume_done[faked_port_index] = jiffies + 1351 msecs_to_jiffies(20); 1352 mod_timer(&hcd->rh_timer, 1353 bus_state->resume_done[faked_port_index]); 1354 /* Do the rest in GetPortStatus */ 1355 } 1356 } 1357 1358 if (hcd->speed != HCD_USB3) 1359 xhci_test_and_clear_bit(xhci, port_array, faked_port_index, 1360 PORT_PLC); 1361 1362 cleanup: 1363 /* Update event ring dequeue pointer before dropping the lock */ 1364 inc_deq(xhci, xhci->event_ring, true); 1365 1366 /* Don't make the USB core poll the roothub if we got a bad port status 1367 * change event. Besides, at that point we can't tell which roothub 1368 * (USB 2.0 or USB 3.0) to kick. 1369 */ 1370 if (bogus_port_status) 1371 return; 1372 1373 spin_unlock(&xhci->lock); 1374 /* Pass this up to the core */ 1375 usb_hcd_poll_rh_status(hcd); 1376 spin_lock(&xhci->lock); 1377 } 1378 1379 /* 1380 * This TD is defined by the TRBs starting at start_trb in start_seg and ending 1381 * at end_trb, which may be in another segment. If the suspect DMA address is a 1382 * TRB in this TD, this function returns that TRB's segment. Otherwise it 1383 * returns 0. 1384 */ 1385 struct xhci_segment *trb_in_td(struct xhci_segment *start_seg, 1386 union xhci_trb *start_trb, 1387 union xhci_trb *end_trb, 1388 dma_addr_t suspect_dma) 1389 { 1390 dma_addr_t start_dma; 1391 dma_addr_t end_seg_dma; 1392 dma_addr_t end_trb_dma; 1393 struct xhci_segment *cur_seg; 1394 1395 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb); 1396 cur_seg = start_seg; 1397 1398 do { 1399 if (start_dma == 0) 1400 return NULL; 1401 /* We may get an event for a Link TRB in the middle of a TD */ 1402 end_seg_dma = xhci_trb_virt_to_dma(cur_seg, 1403 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); 1404 /* If the end TRB isn't in this segment, this is set to 0 */ 1405 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb); 1406 1407 if (end_trb_dma > 0) { 1408 /* The end TRB is in this segment, so suspect should be here */ 1409 if (start_dma <= end_trb_dma) { 1410 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) 1411 return cur_seg; 1412 } else { 1413 /* Case for one segment with 1414 * a TD wrapped around to the top 1415 */ 1416 if ((suspect_dma >= start_dma && 1417 suspect_dma <= end_seg_dma) || 1418 (suspect_dma >= cur_seg->dma && 1419 suspect_dma <= end_trb_dma)) 1420 return cur_seg; 1421 } 1422 return NULL; 1423 } else { 1424 /* Might still be somewhere in this segment */ 1425 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) 1426 return cur_seg; 1427 } 1428 cur_seg = cur_seg->next; 1429 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); 1430 } while (cur_seg != start_seg); 1431 1432 return NULL; 1433 } 1434 1435 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, 1436 unsigned int slot_id, unsigned int ep_index, 1437 unsigned int stream_id, 1438 struct xhci_td *td, union xhci_trb *event_trb) 1439 { 1440 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 1441 ep->ep_state |= EP_HALTED; 1442 ep->stopped_td = td; 1443 ep->stopped_trb = event_trb; 1444 ep->stopped_stream = stream_id; 1445 1446 xhci_queue_reset_ep(xhci, slot_id, ep_index); 1447 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); 1448 1449 ep->stopped_td = NULL; 1450 ep->stopped_trb = NULL; 1451 ep->stopped_stream = 0; 1452 1453 xhci_ring_cmd_db(xhci); 1454 } 1455 1456 /* Check if an error has halted the endpoint ring. The class driver will 1457 * cleanup the halt for a non-default control endpoint if we indicate a stall. 1458 * However, a babble and other errors also halt the endpoint ring, and the class 1459 * driver won't clear the halt in that case, so we need to issue a Set Transfer 1460 * Ring Dequeue Pointer command manually. 1461 */ 1462 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, 1463 struct xhci_ep_ctx *ep_ctx, 1464 unsigned int trb_comp_code) 1465 { 1466 /* TRB completion codes that may require a manual halt cleanup */ 1467 if (trb_comp_code == COMP_TX_ERR || 1468 trb_comp_code == COMP_BABBLE || 1469 trb_comp_code == COMP_SPLIT_ERR) 1470 /* The 0.96 spec says a babbling control endpoint 1471 * is not halted. The 0.96 spec says it is. Some HW 1472 * claims to be 0.95 compliant, but it halts the control 1473 * endpoint anyway. Check if a babble halted the 1474 * endpoint. 1475 */ 1476 if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == 1477 cpu_to_le32(EP_STATE_HALTED)) 1478 return 1; 1479 1480 return 0; 1481 } 1482 1483 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) 1484 { 1485 if (trb_comp_code >= 224 && trb_comp_code <= 255) { 1486 /* Vendor defined "informational" completion code, 1487 * treat as not-an-error. 1488 */ 1489 xhci_dbg(xhci, "Vendor defined info completion code %u\n", 1490 trb_comp_code); 1491 xhci_dbg(xhci, "Treating code as success.\n"); 1492 return 1; 1493 } 1494 return 0; 1495 } 1496 1497 /* 1498 * Finish the td processing, remove the td from td list; 1499 * Return 1 if the urb can be given back. 1500 */ 1501 static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, 1502 union xhci_trb *event_trb, struct xhci_transfer_event *event, 1503 struct xhci_virt_ep *ep, int *status, bool skip) 1504 { 1505 struct xhci_virt_device *xdev; 1506 struct xhci_ring *ep_ring; 1507 unsigned int slot_id; 1508 int ep_index; 1509 struct urb *urb = NULL; 1510 struct xhci_ep_ctx *ep_ctx; 1511 int ret = 0; 1512 struct urb_priv *urb_priv; 1513 u32 trb_comp_code; 1514 1515 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1516 xdev = xhci->devs[slot_id]; 1517 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 1518 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1519 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1520 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1521 1522 if (skip) 1523 goto td_cleanup; 1524 1525 if (trb_comp_code == COMP_STOP_INVAL || 1526 trb_comp_code == COMP_STOP) { 1527 /* The Endpoint Stop Command completion will take care of any 1528 * stopped TDs. A stopped TD may be restarted, so don't update 1529 * the ring dequeue pointer or take this TD off any lists yet. 1530 */ 1531 ep->stopped_td = td; 1532 ep->stopped_trb = event_trb; 1533 return 0; 1534 } else { 1535 if (trb_comp_code == COMP_STALL) { 1536 /* The transfer is completed from the driver's 1537 * perspective, but we need to issue a set dequeue 1538 * command for this stalled endpoint to move the dequeue 1539 * pointer past the TD. We can't do that here because 1540 * the halt condition must be cleared first. Let the 1541 * USB class driver clear the stall later. 1542 */ 1543 ep->stopped_td = td; 1544 ep->stopped_trb = event_trb; 1545 ep->stopped_stream = ep_ring->stream_id; 1546 } else if (xhci_requires_manual_halt_cleanup(xhci, 1547 ep_ctx, trb_comp_code)) { 1548 /* Other types of errors halt the endpoint, but the 1549 * class driver doesn't call usb_reset_endpoint() unless 1550 * the error is -EPIPE. Clear the halted status in the 1551 * xHCI hardware manually. 1552 */ 1553 xhci_cleanup_halted_endpoint(xhci, 1554 slot_id, ep_index, ep_ring->stream_id, 1555 td, event_trb); 1556 } else { 1557 /* Update ring dequeue pointer */ 1558 while (ep_ring->dequeue != td->last_trb) 1559 inc_deq(xhci, ep_ring, false); 1560 inc_deq(xhci, ep_ring, false); 1561 } 1562 1563 td_cleanup: 1564 /* Clean up the endpoint's TD list */ 1565 urb = td->urb; 1566 urb_priv = urb->hcpriv; 1567 1568 /* Do one last check of the actual transfer length. 1569 * If the host controller said we transferred more data than 1570 * the buffer length, urb->actual_length will be a very big 1571 * number (since it's unsigned). Play it safe and say we didn't 1572 * transfer anything. 1573 */ 1574 if (urb->actual_length > urb->transfer_buffer_length) { 1575 xhci_warn(xhci, "URB transfer length is wrong, " 1576 "xHC issue? req. len = %u, " 1577 "act. len = %u\n", 1578 urb->transfer_buffer_length, 1579 urb->actual_length); 1580 urb->actual_length = 0; 1581 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1582 *status = -EREMOTEIO; 1583 else 1584 *status = 0; 1585 } 1586 list_del_init(&td->td_list); 1587 /* Was this TD slated to be cancelled but completed anyway? */ 1588 if (!list_empty(&td->cancelled_td_list)) 1589 list_del_init(&td->cancelled_td_list); 1590 1591 urb_priv->td_cnt++; 1592 /* Giveback the urb when all the tds are completed */ 1593 if (urb_priv->td_cnt == urb_priv->length) { 1594 ret = 1; 1595 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 1596 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; 1597 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs 1598 == 0) { 1599 if (xhci->quirks & XHCI_AMD_PLL_FIX) 1600 usb_amd_quirk_pll_enable(); 1601 } 1602 } 1603 } 1604 } 1605 1606 return ret; 1607 } 1608 1609 /* 1610 * Process control tds, update urb status and actual_length. 1611 */ 1612 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, 1613 union xhci_trb *event_trb, struct xhci_transfer_event *event, 1614 struct xhci_virt_ep *ep, int *status) 1615 { 1616 struct xhci_virt_device *xdev; 1617 struct xhci_ring *ep_ring; 1618 unsigned int slot_id; 1619 int ep_index; 1620 struct xhci_ep_ctx *ep_ctx; 1621 u32 trb_comp_code; 1622 1623 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1624 xdev = xhci->devs[slot_id]; 1625 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 1626 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1627 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1628 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1629 1630 xhci_debug_trb(xhci, xhci->event_ring->dequeue); 1631 switch (trb_comp_code) { 1632 case COMP_SUCCESS: 1633 if (event_trb == ep_ring->dequeue) { 1634 xhci_warn(xhci, "WARN: Success on ctrl setup TRB " 1635 "without IOC set??\n"); 1636 *status = -ESHUTDOWN; 1637 } else if (event_trb != td->last_trb) { 1638 xhci_warn(xhci, "WARN: Success on ctrl data TRB " 1639 "without IOC set??\n"); 1640 *status = -ESHUTDOWN; 1641 } else { 1642 *status = 0; 1643 } 1644 break; 1645 case COMP_SHORT_TX: 1646 xhci_warn(xhci, "WARN: short transfer on control ep\n"); 1647 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1648 *status = -EREMOTEIO; 1649 else 1650 *status = 0; 1651 break; 1652 case COMP_STOP_INVAL: 1653 case COMP_STOP: 1654 return finish_td(xhci, td, event_trb, event, ep, status, false); 1655 default: 1656 if (!xhci_requires_manual_halt_cleanup(xhci, 1657 ep_ctx, trb_comp_code)) 1658 break; 1659 xhci_dbg(xhci, "TRB error code %u, " 1660 "halted endpoint index = %u\n", 1661 trb_comp_code, ep_index); 1662 /* else fall through */ 1663 case COMP_STALL: 1664 /* Did we transfer part of the data (middle) phase? */ 1665 if (event_trb != ep_ring->dequeue && 1666 event_trb != td->last_trb) 1667 td->urb->actual_length = 1668 td->urb->transfer_buffer_length 1669 - TRB_LEN(le32_to_cpu(event->transfer_len)); 1670 else 1671 td->urb->actual_length = 0; 1672 1673 xhci_cleanup_halted_endpoint(xhci, 1674 slot_id, ep_index, 0, td, event_trb); 1675 return finish_td(xhci, td, event_trb, event, ep, status, true); 1676 } 1677 /* 1678 * Did we transfer any data, despite the errors that might have 1679 * happened? I.e. did we get past the setup stage? 1680 */ 1681 if (event_trb != ep_ring->dequeue) { 1682 /* The event was for the status stage */ 1683 if (event_trb == td->last_trb) { 1684 if (td->urb->actual_length != 0) { 1685 /* Don't overwrite a previously set error code 1686 */ 1687 if ((*status == -EINPROGRESS || *status == 0) && 1688 (td->urb->transfer_flags 1689 & URB_SHORT_NOT_OK)) 1690 /* Did we already see a short data 1691 * stage? */ 1692 *status = -EREMOTEIO; 1693 } else { 1694 td->urb->actual_length = 1695 td->urb->transfer_buffer_length; 1696 } 1697 } else { 1698 /* Maybe the event was for the data stage? */ 1699 td->urb->actual_length = 1700 td->urb->transfer_buffer_length - 1701 TRB_LEN(le32_to_cpu(event->transfer_len)); 1702 xhci_dbg(xhci, "Waiting for status " 1703 "stage event\n"); 1704 return 0; 1705 } 1706 } 1707 1708 return finish_td(xhci, td, event_trb, event, ep, status, false); 1709 } 1710 1711 /* 1712 * Process isochronous tds, update urb packet status and actual_length. 1713 */ 1714 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, 1715 union xhci_trb *event_trb, struct xhci_transfer_event *event, 1716 struct xhci_virt_ep *ep, int *status) 1717 { 1718 struct xhci_ring *ep_ring; 1719 struct urb_priv *urb_priv; 1720 int idx; 1721 int len = 0; 1722 union xhci_trb *cur_trb; 1723 struct xhci_segment *cur_seg; 1724 struct usb_iso_packet_descriptor *frame; 1725 u32 trb_comp_code; 1726 bool skip_td = false; 1727 1728 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1729 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1730 urb_priv = td->urb->hcpriv; 1731 idx = urb_priv->td_cnt; 1732 frame = &td->urb->iso_frame_desc[idx]; 1733 1734 /* handle completion code */ 1735 switch (trb_comp_code) { 1736 case COMP_SUCCESS: 1737 frame->status = 0; 1738 break; 1739 case COMP_SHORT_TX: 1740 frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ? 1741 -EREMOTEIO : 0; 1742 break; 1743 case COMP_BW_OVER: 1744 frame->status = -ECOMM; 1745 skip_td = true; 1746 break; 1747 case COMP_BUFF_OVER: 1748 case COMP_BABBLE: 1749 frame->status = -EOVERFLOW; 1750 skip_td = true; 1751 break; 1752 case COMP_DEV_ERR: 1753 case COMP_STALL: 1754 frame->status = -EPROTO; 1755 skip_td = true; 1756 break; 1757 case COMP_STOP: 1758 case COMP_STOP_INVAL: 1759 break; 1760 default: 1761 frame->status = -1; 1762 break; 1763 } 1764 1765 if (trb_comp_code == COMP_SUCCESS || skip_td) { 1766 frame->actual_length = frame->length; 1767 td->urb->actual_length += frame->length; 1768 } else { 1769 for (cur_trb = ep_ring->dequeue, 1770 cur_seg = ep_ring->deq_seg; cur_trb != event_trb; 1771 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 1772 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) && 1773 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) 1774 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); 1775 } 1776 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - 1777 TRB_LEN(le32_to_cpu(event->transfer_len)); 1778 1779 if (trb_comp_code != COMP_STOP_INVAL) { 1780 frame->actual_length = len; 1781 td->urb->actual_length += len; 1782 } 1783 } 1784 1785 return finish_td(xhci, td, event_trb, event, ep, status, false); 1786 } 1787 1788 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, 1789 struct xhci_transfer_event *event, 1790 struct xhci_virt_ep *ep, int *status) 1791 { 1792 struct xhci_ring *ep_ring; 1793 struct urb_priv *urb_priv; 1794 struct usb_iso_packet_descriptor *frame; 1795 int idx; 1796 1797 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1798 urb_priv = td->urb->hcpriv; 1799 idx = urb_priv->td_cnt; 1800 frame = &td->urb->iso_frame_desc[idx]; 1801 1802 /* The transfer is partly done. */ 1803 frame->status = -EXDEV; 1804 1805 /* calc actual length */ 1806 frame->actual_length = 0; 1807 1808 /* Update ring dequeue pointer */ 1809 while (ep_ring->dequeue != td->last_trb) 1810 inc_deq(xhci, ep_ring, false); 1811 inc_deq(xhci, ep_ring, false); 1812 1813 return finish_td(xhci, td, NULL, event, ep, status, true); 1814 } 1815 1816 /* 1817 * Process bulk and interrupt tds, update urb status and actual_length. 1818 */ 1819 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, 1820 union xhci_trb *event_trb, struct xhci_transfer_event *event, 1821 struct xhci_virt_ep *ep, int *status) 1822 { 1823 struct xhci_ring *ep_ring; 1824 union xhci_trb *cur_trb; 1825 struct xhci_segment *cur_seg; 1826 u32 trb_comp_code; 1827 1828 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1829 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1830 1831 switch (trb_comp_code) { 1832 case COMP_SUCCESS: 1833 /* Double check that the HW transferred everything. */ 1834 if (event_trb != td->last_trb) { 1835 xhci_warn(xhci, "WARN Successful completion " 1836 "on short TX\n"); 1837 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1838 *status = -EREMOTEIO; 1839 else 1840 *status = 0; 1841 } else { 1842 *status = 0; 1843 } 1844 break; 1845 case COMP_SHORT_TX: 1846 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1847 *status = -EREMOTEIO; 1848 else 1849 *status = 0; 1850 break; 1851 default: 1852 /* Others already handled above */ 1853 break; 1854 } 1855 if (trb_comp_code == COMP_SHORT_TX) 1856 xhci_dbg(xhci, "ep %#x - asked for %d bytes, " 1857 "%d bytes untransferred\n", 1858 td->urb->ep->desc.bEndpointAddress, 1859 td->urb->transfer_buffer_length, 1860 TRB_LEN(le32_to_cpu(event->transfer_len))); 1861 /* Fast path - was this the last TRB in the TD for this URB? */ 1862 if (event_trb == td->last_trb) { 1863 if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { 1864 td->urb->actual_length = 1865 td->urb->transfer_buffer_length - 1866 TRB_LEN(le32_to_cpu(event->transfer_len)); 1867 if (td->urb->transfer_buffer_length < 1868 td->urb->actual_length) { 1869 xhci_warn(xhci, "HC gave bad length " 1870 "of %d bytes left\n", 1871 TRB_LEN(le32_to_cpu(event->transfer_len))); 1872 td->urb->actual_length = 0; 1873 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1874 *status = -EREMOTEIO; 1875 else 1876 *status = 0; 1877 } 1878 /* Don't overwrite a previously set error code */ 1879 if (*status == -EINPROGRESS) { 1880 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1881 *status = -EREMOTEIO; 1882 else 1883 *status = 0; 1884 } 1885 } else { 1886 td->urb->actual_length = 1887 td->urb->transfer_buffer_length; 1888 /* Ignore a short packet completion if the 1889 * untransferred length was zero. 1890 */ 1891 if (*status == -EREMOTEIO) 1892 *status = 0; 1893 } 1894 } else { 1895 /* Slow path - walk the list, starting from the dequeue 1896 * pointer, to get the actual length transferred. 1897 */ 1898 td->urb->actual_length = 0; 1899 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; 1900 cur_trb != event_trb; 1901 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 1902 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) && 1903 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) 1904 td->urb->actual_length += 1905 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); 1906 } 1907 /* If the ring didn't stop on a Link or No-op TRB, add 1908 * in the actual bytes transferred from the Normal TRB 1909 */ 1910 if (trb_comp_code != COMP_STOP_INVAL) 1911 td->urb->actual_length += 1912 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - 1913 TRB_LEN(le32_to_cpu(event->transfer_len)); 1914 } 1915 1916 return finish_td(xhci, td, event_trb, event, ep, status, false); 1917 } 1918 1919 /* 1920 * If this function returns an error condition, it means it got a Transfer 1921 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. 1922 * At this point, the host controller is probably hosed and should be reset. 1923 */ 1924 static int handle_tx_event(struct xhci_hcd *xhci, 1925 struct xhci_transfer_event *event) 1926 { 1927 struct xhci_virt_device *xdev; 1928 struct xhci_virt_ep *ep; 1929 struct xhci_ring *ep_ring; 1930 unsigned int slot_id; 1931 int ep_index; 1932 struct xhci_td *td = NULL; 1933 dma_addr_t event_dma; 1934 struct xhci_segment *event_seg; 1935 union xhci_trb *event_trb; 1936 struct urb *urb = NULL; 1937 int status = -EINPROGRESS; 1938 struct urb_priv *urb_priv; 1939 struct xhci_ep_ctx *ep_ctx; 1940 struct list_head *tmp; 1941 u32 trb_comp_code; 1942 int ret = 0; 1943 int td_num = 0; 1944 1945 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1946 xdev = xhci->devs[slot_id]; 1947 if (!xdev) { 1948 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); 1949 return -ENODEV; 1950 } 1951 1952 /* Endpoint ID is 1 based, our index is zero based */ 1953 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 1954 ep = &xdev->eps[ep_index]; 1955 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1956 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1957 if (!ep_ring || 1958 (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == 1959 EP_STATE_DISABLED) { 1960 xhci_err(xhci, "ERROR Transfer event for disabled endpoint " 1961 "or incorrect stream ring\n"); 1962 return -ENODEV; 1963 } 1964 1965 /* Count current td numbers if ep->skip is set */ 1966 if (ep->skip) { 1967 list_for_each(tmp, &ep_ring->td_list) 1968 td_num++; 1969 } 1970 1971 event_dma = le64_to_cpu(event->buffer); 1972 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1973 /* Look for common error cases */ 1974 switch (trb_comp_code) { 1975 /* Skip codes that require special handling depending on 1976 * transfer type 1977 */ 1978 case COMP_SUCCESS: 1979 case COMP_SHORT_TX: 1980 break; 1981 case COMP_STOP: 1982 xhci_dbg(xhci, "Stopped on Transfer TRB\n"); 1983 break; 1984 case COMP_STOP_INVAL: 1985 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n"); 1986 break; 1987 case COMP_STALL: 1988 xhci_warn(xhci, "WARN: Stalled endpoint\n"); 1989 ep->ep_state |= EP_HALTED; 1990 status = -EPIPE; 1991 break; 1992 case COMP_TRB_ERR: 1993 xhci_warn(xhci, "WARN: TRB error on endpoint\n"); 1994 status = -EILSEQ; 1995 break; 1996 case COMP_SPLIT_ERR: 1997 case COMP_TX_ERR: 1998 xhci_warn(xhci, "WARN: transfer error on endpoint\n"); 1999 status = -EPROTO; 2000 break; 2001 case COMP_BABBLE: 2002 xhci_warn(xhci, "WARN: babble error on endpoint\n"); 2003 status = -EOVERFLOW; 2004 break; 2005 case COMP_DB_ERR: 2006 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); 2007 status = -ENOSR; 2008 break; 2009 case COMP_BW_OVER: 2010 xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n"); 2011 break; 2012 case COMP_BUFF_OVER: 2013 xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n"); 2014 break; 2015 case COMP_UNDERRUN: 2016 /* 2017 * When the Isoch ring is empty, the xHC will generate 2018 * a Ring Overrun Event for IN Isoch endpoint or Ring 2019 * Underrun Event for OUT Isoch endpoint. 2020 */ 2021 xhci_dbg(xhci, "underrun event on endpoint\n"); 2022 if (!list_empty(&ep_ring->td_list)) 2023 xhci_dbg(xhci, "Underrun Event for slot %d ep %d " 2024 "still with TDs queued?\n", 2025 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2026 ep_index); 2027 goto cleanup; 2028 case COMP_OVERRUN: 2029 xhci_dbg(xhci, "overrun event on endpoint\n"); 2030 if (!list_empty(&ep_ring->td_list)) 2031 xhci_dbg(xhci, "Overrun Event for slot %d ep %d " 2032 "still with TDs queued?\n", 2033 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2034 ep_index); 2035 goto cleanup; 2036 case COMP_DEV_ERR: 2037 xhci_warn(xhci, "WARN: detect an incompatible device"); 2038 status = -EPROTO; 2039 break; 2040 case COMP_MISSED_INT: 2041 /* 2042 * When encounter missed service error, one or more isoc tds 2043 * may be missed by xHC. 2044 * Set skip flag of the ep_ring; Complete the missed tds as 2045 * short transfer when process the ep_ring next time. 2046 */ 2047 ep->skip = true; 2048 xhci_dbg(xhci, "Miss service interval error, set skip flag\n"); 2049 goto cleanup; 2050 default: 2051 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { 2052 status = 0; 2053 break; 2054 } 2055 xhci_warn(xhci, "ERROR Unknown event condition, HC probably " 2056 "busted\n"); 2057 goto cleanup; 2058 } 2059 2060 do { 2061 /* This TRB should be in the TD at the head of this ring's 2062 * TD list. 2063 */ 2064 if (list_empty(&ep_ring->td_list)) { 2065 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d " 2066 "with no TDs queued?\n", 2067 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2068 ep_index); 2069 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", 2070 (le32_to_cpu(event->flags) & 2071 TRB_TYPE_BITMASK)>>10); 2072 xhci_print_trb_offsets(xhci, (union xhci_trb *) event); 2073 if (ep->skip) { 2074 ep->skip = false; 2075 xhci_dbg(xhci, "td_list is empty while skip " 2076 "flag set. Clear skip flag.\n"); 2077 } 2078 ret = 0; 2079 goto cleanup; 2080 } 2081 2082 /* We've skipped all the TDs on the ep ring when ep->skip set */ 2083 if (ep->skip && td_num == 0) { 2084 ep->skip = false; 2085 xhci_dbg(xhci, "All tds on the ep_ring skipped. " 2086 "Clear skip flag.\n"); 2087 ret = 0; 2088 goto cleanup; 2089 } 2090 2091 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); 2092 if (ep->skip) 2093 td_num--; 2094 2095 /* Is this a TRB in the currently executing TD? */ 2096 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, 2097 td->last_trb, event_dma); 2098 2099 /* 2100 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE 2101 * is not in the current TD pointed by ep_ring->dequeue because 2102 * that the hardware dequeue pointer still at the previous TRB 2103 * of the current TD. The previous TRB maybe a Link TD or the 2104 * last TRB of the previous TD. The command completion handle 2105 * will take care the rest. 2106 */ 2107 if (!event_seg && trb_comp_code == COMP_STOP_INVAL) { 2108 ret = 0; 2109 goto cleanup; 2110 } 2111 2112 if (!event_seg) { 2113 if (!ep->skip || 2114 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { 2115 /* Some host controllers give a spurious 2116 * successful event after a short transfer. 2117 * Ignore it. 2118 */ 2119 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && 2120 ep_ring->last_td_was_short) { 2121 ep_ring->last_td_was_short = false; 2122 ret = 0; 2123 goto cleanup; 2124 } 2125 /* HC is busted, give up! */ 2126 xhci_err(xhci, 2127 "ERROR Transfer event TRB DMA ptr not " 2128 "part of current TD\n"); 2129 return -ESHUTDOWN; 2130 } 2131 2132 ret = skip_isoc_td(xhci, td, event, ep, &status); 2133 goto cleanup; 2134 } 2135 if (trb_comp_code == COMP_SHORT_TX) 2136 ep_ring->last_td_was_short = true; 2137 else 2138 ep_ring->last_td_was_short = false; 2139 2140 if (ep->skip) { 2141 xhci_dbg(xhci, "Found td. Clear skip flag.\n"); 2142 ep->skip = false; 2143 } 2144 2145 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / 2146 sizeof(*event_trb)]; 2147 /* 2148 * No-op TRB should not trigger interrupts. 2149 * If event_trb is a no-op TRB, it means the 2150 * corresponding TD has been cancelled. Just ignore 2151 * the TD. 2152 */ 2153 if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) { 2154 xhci_dbg(xhci, 2155 "event_trb is a no-op TRB. Skip it\n"); 2156 goto cleanup; 2157 } 2158 2159 /* Now update the urb's actual_length and give back to 2160 * the core 2161 */ 2162 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) 2163 ret = process_ctrl_td(xhci, td, event_trb, event, ep, 2164 &status); 2165 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) 2166 ret = process_isoc_td(xhci, td, event_trb, event, ep, 2167 &status); 2168 else 2169 ret = process_bulk_intr_td(xhci, td, event_trb, event, 2170 ep, &status); 2171 2172 cleanup: 2173 /* 2174 * Do not update event ring dequeue pointer if ep->skip is set. 2175 * Will roll back to continue process missed tds. 2176 */ 2177 if (trb_comp_code == COMP_MISSED_INT || !ep->skip) { 2178 inc_deq(xhci, xhci->event_ring, true); 2179 } 2180 2181 if (ret) { 2182 urb = td->urb; 2183 urb_priv = urb->hcpriv; 2184 /* Leave the TD around for the reset endpoint function 2185 * to use(but only if it's not a control endpoint, 2186 * since we already queued the Set TR dequeue pointer 2187 * command for stalled control endpoints). 2188 */ 2189 if (usb_endpoint_xfer_control(&urb->ep->desc) || 2190 (trb_comp_code != COMP_STALL && 2191 trb_comp_code != COMP_BABBLE)) 2192 xhci_urb_free_priv(xhci, urb_priv); 2193 2194 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); 2195 if ((urb->actual_length != urb->transfer_buffer_length && 2196 (urb->transfer_flags & 2197 URB_SHORT_NOT_OK)) || 2198 (status != 0 && 2199 !usb_endpoint_xfer_isoc(&urb->ep->desc))) 2200 xhci_dbg(xhci, "Giveback URB %p, len = %d, " 2201 "expected = %x, status = %d\n", 2202 urb, urb->actual_length, 2203 urb->transfer_buffer_length, 2204 status); 2205 spin_unlock(&xhci->lock); 2206 /* EHCI, UHCI, and OHCI always unconditionally set the 2207 * urb->status of an isochronous endpoint to 0. 2208 */ 2209 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) 2210 status = 0; 2211 usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status); 2212 spin_lock(&xhci->lock); 2213 } 2214 2215 /* 2216 * If ep->skip is set, it means there are missed tds on the 2217 * endpoint ring need to take care of. 2218 * Process them as short transfer until reach the td pointed by 2219 * the event. 2220 */ 2221 } while (ep->skip && trb_comp_code != COMP_MISSED_INT); 2222 2223 return 0; 2224 } 2225 2226 /* 2227 * This function handles all OS-owned events on the event ring. It may drop 2228 * xhci->lock between event processing (e.g. to pass up port status changes). 2229 * Returns >0 for "possibly more events to process" (caller should call again), 2230 * otherwise 0 if done. In future, <0 returns should indicate error code. 2231 */ 2232 static int xhci_handle_event(struct xhci_hcd *xhci) 2233 { 2234 union xhci_trb *event; 2235 int update_ptrs = 1; 2236 int ret; 2237 2238 if (!xhci->event_ring || !xhci->event_ring->dequeue) { 2239 xhci->error_bitmask |= 1 << 1; 2240 return 0; 2241 } 2242 2243 event = xhci->event_ring->dequeue; 2244 /* Does the HC or OS own the TRB? */ 2245 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) != 2246 xhci->event_ring->cycle_state) { 2247 xhci->error_bitmask |= 1 << 2; 2248 return 0; 2249 } 2250 2251 /* 2252 * Barrier between reading the TRB_CYCLE (valid) flag above and any 2253 * speculative reads of the event's flags/data below. 2254 */ 2255 rmb(); 2256 /* FIXME: Handle more event types. */ 2257 switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) { 2258 case TRB_TYPE(TRB_COMPLETION): 2259 handle_cmd_completion(xhci, &event->event_cmd); 2260 break; 2261 case TRB_TYPE(TRB_PORT_STATUS): 2262 handle_port_status(xhci, event); 2263 update_ptrs = 0; 2264 break; 2265 case TRB_TYPE(TRB_TRANSFER): 2266 ret = handle_tx_event(xhci, &event->trans_event); 2267 if (ret < 0) 2268 xhci->error_bitmask |= 1 << 9; 2269 else 2270 update_ptrs = 0; 2271 break; 2272 default: 2273 if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >= 2274 TRB_TYPE(48)) 2275 handle_vendor_event(xhci, event); 2276 else 2277 xhci->error_bitmask |= 1 << 3; 2278 } 2279 /* Any of the above functions may drop and re-acquire the lock, so check 2280 * to make sure a watchdog timer didn't mark the host as non-responsive. 2281 */ 2282 if (xhci->xhc_state & XHCI_STATE_DYING) { 2283 xhci_dbg(xhci, "xHCI host dying, returning from " 2284 "event handler.\n"); 2285 return 0; 2286 } 2287 2288 if (update_ptrs) 2289 /* Update SW event ring dequeue pointer */ 2290 inc_deq(xhci, xhci->event_ring, true); 2291 2292 /* Are there more items on the event ring? Caller will call us again to 2293 * check. 2294 */ 2295 return 1; 2296 } 2297 2298 /* 2299 * xHCI spec says we can get an interrupt, and if the HC has an error condition, 2300 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of 2301 * indicators of an event TRB error, but we check the status *first* to be safe. 2302 */ 2303 irqreturn_t xhci_irq(struct usb_hcd *hcd) 2304 { 2305 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 2306 u32 status; 2307 union xhci_trb *trb; 2308 u64 temp_64; 2309 union xhci_trb *event_ring_deq; 2310 dma_addr_t deq; 2311 2312 spin_lock(&xhci->lock); 2313 trb = xhci->event_ring->dequeue; 2314 /* Check if the xHC generated the interrupt, or the irq is shared */ 2315 status = xhci_readl(xhci, &xhci->op_regs->status); 2316 if (status == 0xffffffff) 2317 goto hw_died; 2318 2319 if (!(status & STS_EINT)) { 2320 spin_unlock(&xhci->lock); 2321 return IRQ_NONE; 2322 } 2323 if (status & STS_FATAL) { 2324 xhci_warn(xhci, "WARNING: Host System Error\n"); 2325 xhci_halt(xhci); 2326 hw_died: 2327 spin_unlock(&xhci->lock); 2328 return -ESHUTDOWN; 2329 } 2330 2331 /* 2332 * Clear the op reg interrupt status first, 2333 * so we can receive interrupts from other MSI-X interrupters. 2334 * Write 1 to clear the interrupt status. 2335 */ 2336 status |= STS_EINT; 2337 xhci_writel(xhci, status, &xhci->op_regs->status); 2338 /* FIXME when MSI-X is supported and there are multiple vectors */ 2339 /* Clear the MSI-X event interrupt status */ 2340 2341 if (hcd->irq != -1) { 2342 u32 irq_pending; 2343 /* Acknowledge the PCI interrupt */ 2344 irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending); 2345 irq_pending |= 0x3; 2346 xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending); 2347 } 2348 2349 if (xhci->xhc_state & XHCI_STATE_DYING) { 2350 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " 2351 "Shouldn't IRQs be disabled?\n"); 2352 /* Clear the event handler busy flag (RW1C); 2353 * the event ring should be empty. 2354 */ 2355 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 2356 xhci_write_64(xhci, temp_64 | ERST_EHB, 2357 &xhci->ir_set->erst_dequeue); 2358 spin_unlock(&xhci->lock); 2359 2360 return IRQ_HANDLED; 2361 } 2362 2363 event_ring_deq = xhci->event_ring->dequeue; 2364 /* FIXME this should be a delayed service routine 2365 * that clears the EHB. 2366 */ 2367 while (xhci_handle_event(xhci) > 0) {} 2368 2369 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 2370 /* If necessary, update the HW's version of the event ring deq ptr. */ 2371 if (event_ring_deq != xhci->event_ring->dequeue) { 2372 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, 2373 xhci->event_ring->dequeue); 2374 if (deq == 0) 2375 xhci_warn(xhci, "WARN something wrong with SW event " 2376 "ring dequeue ptr.\n"); 2377 /* Update HC event ring dequeue pointer */ 2378 temp_64 &= ERST_PTR_MASK; 2379 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK); 2380 } 2381 2382 /* Clear the event handler busy flag (RW1C); event ring is empty. */ 2383 temp_64 |= ERST_EHB; 2384 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); 2385 2386 spin_unlock(&xhci->lock); 2387 2388 return IRQ_HANDLED; 2389 } 2390 2391 irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd) 2392 { 2393 irqreturn_t ret; 2394 struct xhci_hcd *xhci; 2395 2396 xhci = hcd_to_xhci(hcd); 2397 set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags); 2398 if (xhci->shared_hcd) 2399 set_bit(HCD_FLAG_SAW_IRQ, &xhci->shared_hcd->flags); 2400 2401 ret = xhci_irq(hcd); 2402 2403 return ret; 2404 } 2405 2406 /**** Endpoint Ring Operations ****/ 2407 2408 /* 2409 * Generic function for queueing a TRB on a ring. 2410 * The caller must have checked to make sure there's room on the ring. 2411 * 2412 * @more_trbs_coming: Will you enqueue more TRBs before calling 2413 * prepare_transfer()? 2414 */ 2415 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 2416 bool consumer, bool more_trbs_coming, bool isoc, 2417 u32 field1, u32 field2, u32 field3, u32 field4) 2418 { 2419 struct xhci_generic_trb *trb; 2420 2421 trb = &ring->enqueue->generic; 2422 trb->field[0] = cpu_to_le32(field1); 2423 trb->field[1] = cpu_to_le32(field2); 2424 trb->field[2] = cpu_to_le32(field3); 2425 trb->field[3] = cpu_to_le32(field4); 2426 inc_enq(xhci, ring, consumer, more_trbs_coming, isoc); 2427 } 2428 2429 /* 2430 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs. 2431 * FIXME allocate segments if the ring is full. 2432 */ 2433 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 2434 u32 ep_state, unsigned int num_trbs, bool isoc, gfp_t mem_flags) 2435 { 2436 /* Make sure the endpoint has been added to xHC schedule */ 2437 switch (ep_state) { 2438 case EP_STATE_DISABLED: 2439 /* 2440 * USB core changed config/interfaces without notifying us, 2441 * or hardware is reporting the wrong state. 2442 */ 2443 xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); 2444 return -ENOENT; 2445 case EP_STATE_ERROR: 2446 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); 2447 /* FIXME event handling code for error needs to clear it */ 2448 /* XXX not sure if this should be -ENOENT or not */ 2449 return -EINVAL; 2450 case EP_STATE_HALTED: 2451 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); 2452 case EP_STATE_STOPPED: 2453 case EP_STATE_RUNNING: 2454 break; 2455 default: 2456 xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); 2457 /* 2458 * FIXME issue Configure Endpoint command to try to get the HC 2459 * back into a known state. 2460 */ 2461 return -EINVAL; 2462 } 2463 if (!room_on_ring(xhci, ep_ring, num_trbs)) { 2464 /* FIXME allocate more room */ 2465 xhci_err(xhci, "ERROR no room on ep ring\n"); 2466 return -ENOMEM; 2467 } 2468 2469 if (enqueue_is_link_trb(ep_ring)) { 2470 struct xhci_ring *ring = ep_ring; 2471 union xhci_trb *next; 2472 2473 next = ring->enqueue; 2474 2475 while (last_trb(xhci, ring, ring->enq_seg, next)) { 2476 /* If we're not dealing with 0.95 hardware or isoc rings 2477 * on AMD 0.96 host, clear the chain bit. 2478 */ 2479 if (!xhci_link_trb_quirk(xhci) && !(isoc && 2480 (xhci->quirks & XHCI_AMD_0x96_HOST))) 2481 next->link.control &= cpu_to_le32(~TRB_CHAIN); 2482 else 2483 next->link.control |= cpu_to_le32(TRB_CHAIN); 2484 2485 wmb(); 2486 next->link.control ^= cpu_to_le32(TRB_CYCLE); 2487 2488 /* Toggle the cycle bit after the last ring segment. */ 2489 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 2490 ring->cycle_state = (ring->cycle_state ? 0 : 1); 2491 if (!in_interrupt()) { 2492 xhci_dbg(xhci, "queue_trb: Toggle cycle " 2493 "state for ring %p = %i\n", 2494 ring, (unsigned int)ring->cycle_state); 2495 } 2496 } 2497 ring->enq_seg = ring->enq_seg->next; 2498 ring->enqueue = ring->enq_seg->trbs; 2499 next = ring->enqueue; 2500 } 2501 } 2502 2503 return 0; 2504 } 2505 2506 static int prepare_transfer(struct xhci_hcd *xhci, 2507 struct xhci_virt_device *xdev, 2508 unsigned int ep_index, 2509 unsigned int stream_id, 2510 unsigned int num_trbs, 2511 struct urb *urb, 2512 unsigned int td_index, 2513 bool isoc, 2514 gfp_t mem_flags) 2515 { 2516 int ret; 2517 struct urb_priv *urb_priv; 2518 struct xhci_td *td; 2519 struct xhci_ring *ep_ring; 2520 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 2521 2522 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id); 2523 if (!ep_ring) { 2524 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", 2525 stream_id); 2526 return -EINVAL; 2527 } 2528 2529 ret = prepare_ring(xhci, ep_ring, 2530 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, 2531 num_trbs, isoc, mem_flags); 2532 if (ret) 2533 return ret; 2534 2535 urb_priv = urb->hcpriv; 2536 td = urb_priv->td[td_index]; 2537 2538 INIT_LIST_HEAD(&td->td_list); 2539 INIT_LIST_HEAD(&td->cancelled_td_list); 2540 2541 if (td_index == 0) { 2542 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); 2543 if (unlikely(ret)) 2544 return ret; 2545 } 2546 2547 td->urb = urb; 2548 /* Add this TD to the tail of the endpoint ring's TD list */ 2549 list_add_tail(&td->td_list, &ep_ring->td_list); 2550 td->start_seg = ep_ring->enq_seg; 2551 td->first_trb = ep_ring->enqueue; 2552 2553 urb_priv->td[td_index] = td; 2554 2555 return 0; 2556 } 2557 2558 static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) 2559 { 2560 int num_sgs, num_trbs, running_total, temp, i; 2561 struct scatterlist *sg; 2562 2563 sg = NULL; 2564 num_sgs = urb->num_sgs; 2565 temp = urb->transfer_buffer_length; 2566 2567 xhci_dbg(xhci, "count sg list trbs: \n"); 2568 num_trbs = 0; 2569 for_each_sg(urb->sg, sg, num_sgs, i) { 2570 unsigned int previous_total_trbs = num_trbs; 2571 unsigned int len = sg_dma_len(sg); 2572 2573 /* Scatter gather list entries may cross 64KB boundaries */ 2574 running_total = TRB_MAX_BUFF_SIZE - 2575 (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1)); 2576 running_total &= TRB_MAX_BUFF_SIZE - 1; 2577 if (running_total != 0) 2578 num_trbs++; 2579 2580 /* How many more 64KB chunks to transfer, how many more TRBs? */ 2581 while (running_total < sg_dma_len(sg) && running_total < temp) { 2582 num_trbs++; 2583 running_total += TRB_MAX_BUFF_SIZE; 2584 } 2585 xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n", 2586 i, (unsigned long long)sg_dma_address(sg), 2587 len, len, num_trbs - previous_total_trbs); 2588 2589 len = min_t(int, len, temp); 2590 temp -= len; 2591 if (temp == 0) 2592 break; 2593 } 2594 xhci_dbg(xhci, "\n"); 2595 if (!in_interrupt()) 2596 xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, " 2597 "num_trbs = %d\n", 2598 urb->ep->desc.bEndpointAddress, 2599 urb->transfer_buffer_length, 2600 num_trbs); 2601 return num_trbs; 2602 } 2603 2604 static void check_trb_math(struct urb *urb, int num_trbs, int running_total) 2605 { 2606 if (num_trbs != 0) 2607 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " 2608 "TRBs, %d left\n", __func__, 2609 urb->ep->desc.bEndpointAddress, num_trbs); 2610 if (running_total != urb->transfer_buffer_length) 2611 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " 2612 "queued %#x (%d), asked for %#x (%d)\n", 2613 __func__, 2614 urb->ep->desc.bEndpointAddress, 2615 running_total, running_total, 2616 urb->transfer_buffer_length, 2617 urb->transfer_buffer_length); 2618 } 2619 2620 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, 2621 unsigned int ep_index, unsigned int stream_id, int start_cycle, 2622 struct xhci_generic_trb *start_trb) 2623 { 2624 /* 2625 * Pass all the TRBs to the hardware at once and make sure this write 2626 * isn't reordered. 2627 */ 2628 wmb(); 2629 if (start_cycle) 2630 start_trb->field[3] |= cpu_to_le32(start_cycle); 2631 else 2632 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); 2633 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); 2634 } 2635 2636 /* 2637 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt 2638 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD 2639 * (comprised of sg list entries) can take several service intervals to 2640 * transmit. 2641 */ 2642 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 2643 struct urb *urb, int slot_id, unsigned int ep_index) 2644 { 2645 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, 2646 xhci->devs[slot_id]->out_ctx, ep_index); 2647 int xhci_interval; 2648 int ep_interval; 2649 2650 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); 2651 ep_interval = urb->interval; 2652 /* Convert to microframes */ 2653 if (urb->dev->speed == USB_SPEED_LOW || 2654 urb->dev->speed == USB_SPEED_FULL) 2655 ep_interval *= 8; 2656 /* FIXME change this to a warning and a suggestion to use the new API 2657 * to set the polling interval (once the API is added). 2658 */ 2659 if (xhci_interval != ep_interval) { 2660 if (printk_ratelimit()) 2661 dev_dbg(&urb->dev->dev, "Driver uses different interval" 2662 " (%d microframe%s) than xHCI " 2663 "(%d microframe%s)\n", 2664 ep_interval, 2665 ep_interval == 1 ? "" : "s", 2666 xhci_interval, 2667 xhci_interval == 1 ? "" : "s"); 2668 urb->interval = xhci_interval; 2669 /* Convert back to frames for LS/FS devices */ 2670 if (urb->dev->speed == USB_SPEED_LOW || 2671 urb->dev->speed == USB_SPEED_FULL) 2672 urb->interval /= 8; 2673 } 2674 return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); 2675 } 2676 2677 /* 2678 * The TD size is the number of bytes remaining in the TD (including this TRB), 2679 * right shifted by 10. 2680 * It must fit in bits 21:17, so it can't be bigger than 31. 2681 */ 2682 static u32 xhci_td_remainder(unsigned int remainder) 2683 { 2684 u32 max = (1 << (21 - 17 + 1)) - 1; 2685 2686 if ((remainder >> 10) >= max) 2687 return max << 17; 2688 else 2689 return (remainder >> 10) << 17; 2690 } 2691 2692 /* 2693 * For xHCI 1.0 host controllers, TD size is the number of packets remaining in 2694 * the TD (*not* including this TRB). 2695 * 2696 * Total TD packet count = total_packet_count = 2697 * roundup(TD size in bytes / wMaxPacketSize) 2698 * 2699 * Packets transferred up to and including this TRB = packets_transferred = 2700 * rounddown(total bytes transferred including this TRB / wMaxPacketSize) 2701 * 2702 * TD size = total_packet_count - packets_transferred 2703 * 2704 * It must fit in bits 21:17, so it can't be bigger than 31. 2705 */ 2706 2707 static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len, 2708 unsigned int total_packet_count, struct urb *urb) 2709 { 2710 int packets_transferred; 2711 2712 /* One TRB with a zero-length data packet. */ 2713 if (running_total == 0 && trb_buff_len == 0) 2714 return 0; 2715 2716 /* All the TRB queueing functions don't count the current TRB in 2717 * running_total. 2718 */ 2719 packets_transferred = (running_total + trb_buff_len) / 2720 usb_endpoint_maxp(&urb->ep->desc); 2721 2722 return xhci_td_remainder(total_packet_count - packets_transferred); 2723 } 2724 2725 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 2726 struct urb *urb, int slot_id, unsigned int ep_index) 2727 { 2728 struct xhci_ring *ep_ring; 2729 unsigned int num_trbs; 2730 struct urb_priv *urb_priv; 2731 struct xhci_td *td; 2732 struct scatterlist *sg; 2733 int num_sgs; 2734 int trb_buff_len, this_sg_len, running_total; 2735 unsigned int total_packet_count; 2736 bool first_trb; 2737 u64 addr; 2738 bool more_trbs_coming; 2739 2740 struct xhci_generic_trb *start_trb; 2741 int start_cycle; 2742 2743 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 2744 if (!ep_ring) 2745 return -EINVAL; 2746 2747 num_trbs = count_sg_trbs_needed(xhci, urb); 2748 num_sgs = urb->num_sgs; 2749 total_packet_count = roundup(urb->transfer_buffer_length, 2750 usb_endpoint_maxp(&urb->ep->desc)); 2751 2752 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], 2753 ep_index, urb->stream_id, 2754 num_trbs, urb, 0, false, mem_flags); 2755 if (trb_buff_len < 0) 2756 return trb_buff_len; 2757 2758 urb_priv = urb->hcpriv; 2759 td = urb_priv->td[0]; 2760 2761 /* 2762 * Don't give the first TRB to the hardware (by toggling the cycle bit) 2763 * until we've finished creating all the other TRBs. The ring's cycle 2764 * state may change as we enqueue the other TRBs, so save it too. 2765 */ 2766 start_trb = &ep_ring->enqueue->generic; 2767 start_cycle = ep_ring->cycle_state; 2768 2769 running_total = 0; 2770 /* 2771 * How much data is in the first TRB? 2772 * 2773 * There are three forces at work for TRB buffer pointers and lengths: 2774 * 1. We don't want to walk off the end of this sg-list entry buffer. 2775 * 2. The transfer length that the driver requested may be smaller than 2776 * the amount of memory allocated for this scatter-gather list. 2777 * 3. TRBs buffers can't cross 64KB boundaries. 2778 */ 2779 sg = urb->sg; 2780 addr = (u64) sg_dma_address(sg); 2781 this_sg_len = sg_dma_len(sg); 2782 trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); 2783 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 2784 if (trb_buff_len > urb->transfer_buffer_length) 2785 trb_buff_len = urb->transfer_buffer_length; 2786 xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n", 2787 trb_buff_len); 2788 2789 first_trb = true; 2790 /* Queue the first TRB, even if it's zero-length */ 2791 do { 2792 u32 field = 0; 2793 u32 length_field = 0; 2794 u32 remainder = 0; 2795 2796 /* Don't change the cycle bit of the first TRB until later */ 2797 if (first_trb) { 2798 first_trb = false; 2799 if (start_cycle == 0) 2800 field |= 0x1; 2801 } else 2802 field |= ep_ring->cycle_state; 2803 2804 /* Chain all the TRBs together; clear the chain bit in the last 2805 * TRB to indicate it's the last TRB in the chain. 2806 */ 2807 if (num_trbs > 1) { 2808 field |= TRB_CHAIN; 2809 } else { 2810 /* FIXME - add check for ZERO_PACKET flag before this */ 2811 td->last_trb = ep_ring->enqueue; 2812 field |= TRB_IOC; 2813 } 2814 2815 /* Only set interrupt on short packet for IN endpoints */ 2816 if (usb_urb_dir_in(urb)) 2817 field |= TRB_ISP; 2818 2819 xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), " 2820 "64KB boundary at %#x, end dma = %#x\n", 2821 (unsigned int) addr, trb_buff_len, trb_buff_len, 2822 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 2823 (unsigned int) addr + trb_buff_len); 2824 if (TRB_MAX_BUFF_SIZE - 2825 (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) { 2826 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); 2827 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", 2828 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 2829 (unsigned int) addr + trb_buff_len); 2830 } 2831 2832 /* Set the TRB length, TD size, and interrupter fields. */ 2833 if (xhci->hci_version < 0x100) { 2834 remainder = xhci_td_remainder( 2835 urb->transfer_buffer_length - 2836 running_total); 2837 } else { 2838 remainder = xhci_v1_0_td_remainder(running_total, 2839 trb_buff_len, total_packet_count, urb); 2840 } 2841 length_field = TRB_LEN(trb_buff_len) | 2842 remainder | 2843 TRB_INTR_TARGET(0); 2844 2845 if (num_trbs > 1) 2846 more_trbs_coming = true; 2847 else 2848 more_trbs_coming = false; 2849 queue_trb(xhci, ep_ring, false, more_trbs_coming, false, 2850 lower_32_bits(addr), 2851 upper_32_bits(addr), 2852 length_field, 2853 field | TRB_TYPE(TRB_NORMAL)); 2854 --num_trbs; 2855 running_total += trb_buff_len; 2856 2857 /* Calculate length for next transfer -- 2858 * Are we done queueing all the TRBs for this sg entry? 2859 */ 2860 this_sg_len -= trb_buff_len; 2861 if (this_sg_len == 0) { 2862 --num_sgs; 2863 if (num_sgs == 0) 2864 break; 2865 sg = sg_next(sg); 2866 addr = (u64) sg_dma_address(sg); 2867 this_sg_len = sg_dma_len(sg); 2868 } else { 2869 addr += trb_buff_len; 2870 } 2871 2872 trb_buff_len = TRB_MAX_BUFF_SIZE - 2873 (addr & (TRB_MAX_BUFF_SIZE - 1)); 2874 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 2875 if (running_total + trb_buff_len > urb->transfer_buffer_length) 2876 trb_buff_len = 2877 urb->transfer_buffer_length - running_total; 2878 } while (running_total < urb->transfer_buffer_length); 2879 2880 check_trb_math(urb, num_trbs, running_total); 2881 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 2882 start_cycle, start_trb); 2883 return 0; 2884 } 2885 2886 /* This is very similar to what ehci-q.c qtd_fill() does */ 2887 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 2888 struct urb *urb, int slot_id, unsigned int ep_index) 2889 { 2890 struct xhci_ring *ep_ring; 2891 struct urb_priv *urb_priv; 2892 struct xhci_td *td; 2893 int num_trbs; 2894 struct xhci_generic_trb *start_trb; 2895 bool first_trb; 2896 bool more_trbs_coming; 2897 int start_cycle; 2898 u32 field, length_field; 2899 2900 int running_total, trb_buff_len, ret; 2901 unsigned int total_packet_count; 2902 u64 addr; 2903 2904 if (urb->num_sgs) 2905 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); 2906 2907 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 2908 if (!ep_ring) 2909 return -EINVAL; 2910 2911 num_trbs = 0; 2912 /* How much data is (potentially) left before the 64KB boundary? */ 2913 running_total = TRB_MAX_BUFF_SIZE - 2914 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); 2915 running_total &= TRB_MAX_BUFF_SIZE - 1; 2916 2917 /* If there's some data on this 64KB chunk, or we have to send a 2918 * zero-length transfer, we need at least one TRB 2919 */ 2920 if (running_total != 0 || urb->transfer_buffer_length == 0) 2921 num_trbs++; 2922 /* How many more 64KB chunks to transfer, how many more TRBs? */ 2923 while (running_total < urb->transfer_buffer_length) { 2924 num_trbs++; 2925 running_total += TRB_MAX_BUFF_SIZE; 2926 } 2927 /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */ 2928 2929 if (!in_interrupt()) 2930 xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), " 2931 "addr = %#llx, num_trbs = %d\n", 2932 urb->ep->desc.bEndpointAddress, 2933 urb->transfer_buffer_length, 2934 urb->transfer_buffer_length, 2935 (unsigned long long)urb->transfer_dma, 2936 num_trbs); 2937 2938 ret = prepare_transfer(xhci, xhci->devs[slot_id], 2939 ep_index, urb->stream_id, 2940 num_trbs, urb, 0, false, mem_flags); 2941 if (ret < 0) 2942 return ret; 2943 2944 urb_priv = urb->hcpriv; 2945 td = urb_priv->td[0]; 2946 2947 /* 2948 * Don't give the first TRB to the hardware (by toggling the cycle bit) 2949 * until we've finished creating all the other TRBs. The ring's cycle 2950 * state may change as we enqueue the other TRBs, so save it too. 2951 */ 2952 start_trb = &ep_ring->enqueue->generic; 2953 start_cycle = ep_ring->cycle_state; 2954 2955 running_total = 0; 2956 total_packet_count = roundup(urb->transfer_buffer_length, 2957 usb_endpoint_maxp(&urb->ep->desc)); 2958 /* How much data is in the first TRB? */ 2959 addr = (u64) urb->transfer_dma; 2960 trb_buff_len = TRB_MAX_BUFF_SIZE - 2961 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); 2962 if (trb_buff_len > urb->transfer_buffer_length) 2963 trb_buff_len = urb->transfer_buffer_length; 2964 2965 first_trb = true; 2966 2967 /* Queue the first TRB, even if it's zero-length */ 2968 do { 2969 u32 remainder = 0; 2970 field = 0; 2971 2972 /* Don't change the cycle bit of the first TRB until later */ 2973 if (first_trb) { 2974 first_trb = false; 2975 if (start_cycle == 0) 2976 field |= 0x1; 2977 } else 2978 field |= ep_ring->cycle_state; 2979 2980 /* Chain all the TRBs together; clear the chain bit in the last 2981 * TRB to indicate it's the last TRB in the chain. 2982 */ 2983 if (num_trbs > 1) { 2984 field |= TRB_CHAIN; 2985 } else { 2986 /* FIXME - add check for ZERO_PACKET flag before this */ 2987 td->last_trb = ep_ring->enqueue; 2988 field |= TRB_IOC; 2989 } 2990 2991 /* Only set interrupt on short packet for IN endpoints */ 2992 if (usb_urb_dir_in(urb)) 2993 field |= TRB_ISP; 2994 2995 /* Set the TRB length, TD size, and interrupter fields. */ 2996 if (xhci->hci_version < 0x100) { 2997 remainder = xhci_td_remainder( 2998 urb->transfer_buffer_length - 2999 running_total); 3000 } else { 3001 remainder = xhci_v1_0_td_remainder(running_total, 3002 trb_buff_len, total_packet_count, urb); 3003 } 3004 length_field = TRB_LEN(trb_buff_len) | 3005 remainder | 3006 TRB_INTR_TARGET(0); 3007 3008 if (num_trbs > 1) 3009 more_trbs_coming = true; 3010 else 3011 more_trbs_coming = false; 3012 queue_trb(xhci, ep_ring, false, more_trbs_coming, false, 3013 lower_32_bits(addr), 3014 upper_32_bits(addr), 3015 length_field, 3016 field | TRB_TYPE(TRB_NORMAL)); 3017 --num_trbs; 3018 running_total += trb_buff_len; 3019 3020 /* Calculate length for next transfer */ 3021 addr += trb_buff_len; 3022 trb_buff_len = urb->transfer_buffer_length - running_total; 3023 if (trb_buff_len > TRB_MAX_BUFF_SIZE) 3024 trb_buff_len = TRB_MAX_BUFF_SIZE; 3025 } while (running_total < urb->transfer_buffer_length); 3026 3027 check_trb_math(urb, num_trbs, running_total); 3028 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3029 start_cycle, start_trb); 3030 return 0; 3031 } 3032 3033 /* Caller must have locked xhci->lock */ 3034 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3035 struct urb *urb, int slot_id, unsigned int ep_index) 3036 { 3037 struct xhci_ring *ep_ring; 3038 int num_trbs; 3039 int ret; 3040 struct usb_ctrlrequest *setup; 3041 struct xhci_generic_trb *start_trb; 3042 int start_cycle; 3043 u32 field, length_field; 3044 struct urb_priv *urb_priv; 3045 struct xhci_td *td; 3046 3047 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 3048 if (!ep_ring) 3049 return -EINVAL; 3050 3051 /* 3052 * Need to copy setup packet into setup TRB, so we can't use the setup 3053 * DMA address. 3054 */ 3055 if (!urb->setup_packet) 3056 return -EINVAL; 3057 3058 if (!in_interrupt()) 3059 xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n", 3060 slot_id, ep_index); 3061 /* 1 TRB for setup, 1 for status */ 3062 num_trbs = 2; 3063 /* 3064 * Don't need to check if we need additional event data and normal TRBs, 3065 * since data in control transfers will never get bigger than 16MB 3066 * XXX: can we get a buffer that crosses 64KB boundaries? 3067 */ 3068 if (urb->transfer_buffer_length > 0) 3069 num_trbs++; 3070 ret = prepare_transfer(xhci, xhci->devs[slot_id], 3071 ep_index, urb->stream_id, 3072 num_trbs, urb, 0, false, mem_flags); 3073 if (ret < 0) 3074 return ret; 3075 3076 urb_priv = urb->hcpriv; 3077 td = urb_priv->td[0]; 3078 3079 /* 3080 * Don't give the first TRB to the hardware (by toggling the cycle bit) 3081 * until we've finished creating all the other TRBs. The ring's cycle 3082 * state may change as we enqueue the other TRBs, so save it too. 3083 */ 3084 start_trb = &ep_ring->enqueue->generic; 3085 start_cycle = ep_ring->cycle_state; 3086 3087 /* Queue setup TRB - see section 6.4.1.2.1 */ 3088 /* FIXME better way to translate setup_packet into two u32 fields? */ 3089 setup = (struct usb_ctrlrequest *) urb->setup_packet; 3090 field = 0; 3091 field |= TRB_IDT | TRB_TYPE(TRB_SETUP); 3092 if (start_cycle == 0) 3093 field |= 0x1; 3094 3095 /* xHCI 1.0 6.4.1.2.1: Transfer Type field */ 3096 if (xhci->hci_version == 0x100) { 3097 if (urb->transfer_buffer_length > 0) { 3098 if (setup->bRequestType & USB_DIR_IN) 3099 field |= TRB_TX_TYPE(TRB_DATA_IN); 3100 else 3101 field |= TRB_TX_TYPE(TRB_DATA_OUT); 3102 } 3103 } 3104 3105 queue_trb(xhci, ep_ring, false, true, false, 3106 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, 3107 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, 3108 TRB_LEN(8) | TRB_INTR_TARGET(0), 3109 /* Immediate data in pointer */ 3110 field); 3111 3112 /* If there's data, queue data TRBs */ 3113 /* Only set interrupt on short packet for IN endpoints */ 3114 if (usb_urb_dir_in(urb)) 3115 field = TRB_ISP | TRB_TYPE(TRB_DATA); 3116 else 3117 field = TRB_TYPE(TRB_DATA); 3118 3119 length_field = TRB_LEN(urb->transfer_buffer_length) | 3120 xhci_td_remainder(urb->transfer_buffer_length) | 3121 TRB_INTR_TARGET(0); 3122 if (urb->transfer_buffer_length > 0) { 3123 if (setup->bRequestType & USB_DIR_IN) 3124 field |= TRB_DIR_IN; 3125 queue_trb(xhci, ep_ring, false, true, false, 3126 lower_32_bits(urb->transfer_dma), 3127 upper_32_bits(urb->transfer_dma), 3128 length_field, 3129 field | ep_ring->cycle_state); 3130 } 3131 3132 /* Save the DMA address of the last TRB in the TD */ 3133 td->last_trb = ep_ring->enqueue; 3134 3135 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ 3136 /* If the device sent data, the status stage is an OUT transfer */ 3137 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) 3138 field = 0; 3139 else 3140 field = TRB_DIR_IN; 3141 queue_trb(xhci, ep_ring, false, false, false, 3142 0, 3143 0, 3144 TRB_INTR_TARGET(0), 3145 /* Event on completion */ 3146 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); 3147 3148 giveback_first_trb(xhci, slot_id, ep_index, 0, 3149 start_cycle, start_trb); 3150 return 0; 3151 } 3152 3153 static int count_isoc_trbs_needed(struct xhci_hcd *xhci, 3154 struct urb *urb, int i) 3155 { 3156 int num_trbs = 0; 3157 u64 addr, td_len; 3158 3159 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); 3160 td_len = urb->iso_frame_desc[i].length; 3161 3162 num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)), 3163 TRB_MAX_BUFF_SIZE); 3164 if (num_trbs == 0) 3165 num_trbs++; 3166 3167 return num_trbs; 3168 } 3169 3170 /* 3171 * The transfer burst count field of the isochronous TRB defines the number of 3172 * bursts that are required to move all packets in this TD. Only SuperSpeed 3173 * devices can burst up to bMaxBurst number of packets per service interval. 3174 * This field is zero based, meaning a value of zero in the field means one 3175 * burst. Basically, for everything but SuperSpeed devices, this field will be 3176 * zero. Only xHCI 1.0 host controllers support this field. 3177 */ 3178 static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, 3179 struct usb_device *udev, 3180 struct urb *urb, unsigned int total_packet_count) 3181 { 3182 unsigned int max_burst; 3183 3184 if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER) 3185 return 0; 3186 3187 max_burst = urb->ep->ss_ep_comp.bMaxBurst; 3188 return roundup(total_packet_count, max_burst + 1) - 1; 3189 } 3190 3191 /* 3192 * Returns the number of packets in the last "burst" of packets. This field is 3193 * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so 3194 * the last burst packet count is equal to the total number of packets in the 3195 * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst 3196 * must contain (bMaxBurst + 1) number of packets, but the last burst can 3197 * contain 1 to (bMaxBurst + 1) packets. 3198 */ 3199 static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci, 3200 struct usb_device *udev, 3201 struct urb *urb, unsigned int total_packet_count) 3202 { 3203 unsigned int max_burst; 3204 unsigned int residue; 3205 3206 if (xhci->hci_version < 0x100) 3207 return 0; 3208 3209 switch (udev->speed) { 3210 case USB_SPEED_SUPER: 3211 /* bMaxBurst is zero based: 0 means 1 packet per burst */ 3212 max_burst = urb->ep->ss_ep_comp.bMaxBurst; 3213 residue = total_packet_count % (max_burst + 1); 3214 /* If residue is zero, the last burst contains (max_burst + 1) 3215 * number of packets, but the TLBPC field is zero-based. 3216 */ 3217 if (residue == 0) 3218 return max_burst; 3219 return residue - 1; 3220 default: 3221 if (total_packet_count == 0) 3222 return 0; 3223 return total_packet_count - 1; 3224 } 3225 } 3226 3227 /* This is for isoc transfer */ 3228 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 3229 struct urb *urb, int slot_id, unsigned int ep_index) 3230 { 3231 struct xhci_ring *ep_ring; 3232 struct urb_priv *urb_priv; 3233 struct xhci_td *td; 3234 int num_tds, trbs_per_td; 3235 struct xhci_generic_trb *start_trb; 3236 bool first_trb; 3237 int start_cycle; 3238 u32 field, length_field; 3239 int running_total, trb_buff_len, td_len, td_remain_len, ret; 3240 u64 start_addr, addr; 3241 int i, j; 3242 bool more_trbs_coming; 3243 3244 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 3245 3246 num_tds = urb->number_of_packets; 3247 if (num_tds < 1) { 3248 xhci_dbg(xhci, "Isoc URB with zero packets?\n"); 3249 return -EINVAL; 3250 } 3251 3252 if (!in_interrupt()) 3253 xhci_dbg(xhci, "ep %#x - urb len = %#x (%d)," 3254 " addr = %#llx, num_tds = %d\n", 3255 urb->ep->desc.bEndpointAddress, 3256 urb->transfer_buffer_length, 3257 urb->transfer_buffer_length, 3258 (unsigned long long)urb->transfer_dma, 3259 num_tds); 3260 3261 start_addr = (u64) urb->transfer_dma; 3262 start_trb = &ep_ring->enqueue->generic; 3263 start_cycle = ep_ring->cycle_state; 3264 3265 urb_priv = urb->hcpriv; 3266 /* Queue the first TRB, even if it's zero-length */ 3267 for (i = 0; i < num_tds; i++) { 3268 unsigned int total_packet_count; 3269 unsigned int burst_count; 3270 unsigned int residue; 3271 3272 first_trb = true; 3273 running_total = 0; 3274 addr = start_addr + urb->iso_frame_desc[i].offset; 3275 td_len = urb->iso_frame_desc[i].length; 3276 td_remain_len = td_len; 3277 total_packet_count = roundup(td_len, 3278 usb_endpoint_maxp(&urb->ep->desc)); 3279 /* A zero-length transfer still involves at least one packet. */ 3280 if (total_packet_count == 0) 3281 total_packet_count++; 3282 burst_count = xhci_get_burst_count(xhci, urb->dev, urb, 3283 total_packet_count); 3284 residue = xhci_get_last_burst_packet_count(xhci, 3285 urb->dev, urb, total_packet_count); 3286 3287 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i); 3288 3289 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 3290 urb->stream_id, trbs_per_td, urb, i, true, 3291 mem_flags); 3292 if (ret < 0) { 3293 if (i == 0) 3294 return ret; 3295 goto cleanup; 3296 } 3297 3298 td = urb_priv->td[i]; 3299 for (j = 0; j < trbs_per_td; j++) { 3300 u32 remainder = 0; 3301 field = TRB_TBC(burst_count) | TRB_TLBPC(residue); 3302 3303 if (first_trb) { 3304 /* Queue the isoc TRB */ 3305 field |= TRB_TYPE(TRB_ISOC); 3306 /* Assume URB_ISO_ASAP is set */ 3307 field |= TRB_SIA; 3308 if (i == 0) { 3309 if (start_cycle == 0) 3310 field |= 0x1; 3311 } else 3312 field |= ep_ring->cycle_state; 3313 first_trb = false; 3314 } else { 3315 /* Queue other normal TRBs */ 3316 field |= TRB_TYPE(TRB_NORMAL); 3317 field |= ep_ring->cycle_state; 3318 } 3319 3320 /* Only set interrupt on short packet for IN EPs */ 3321 if (usb_urb_dir_in(urb)) 3322 field |= TRB_ISP; 3323 3324 /* Chain all the TRBs together; clear the chain bit in 3325 * the last TRB to indicate it's the last TRB in the 3326 * chain. 3327 */ 3328 if (j < trbs_per_td - 1) { 3329 field |= TRB_CHAIN; 3330 more_trbs_coming = true; 3331 } else { 3332 td->last_trb = ep_ring->enqueue; 3333 field |= TRB_IOC; 3334 if (xhci->hci_version == 0x100) { 3335 /* Set BEI bit except for the last td */ 3336 if (i < num_tds - 1) 3337 field |= TRB_BEI; 3338 } 3339 more_trbs_coming = false; 3340 } 3341 3342 /* Calculate TRB length */ 3343 trb_buff_len = TRB_MAX_BUFF_SIZE - 3344 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 3345 if (trb_buff_len > td_remain_len) 3346 trb_buff_len = td_remain_len; 3347 3348 /* Set the TRB length, TD size, & interrupter fields. */ 3349 if (xhci->hci_version < 0x100) { 3350 remainder = xhci_td_remainder( 3351 td_len - running_total); 3352 } else { 3353 remainder = xhci_v1_0_td_remainder( 3354 running_total, trb_buff_len, 3355 total_packet_count, urb); 3356 } 3357 length_field = TRB_LEN(trb_buff_len) | 3358 remainder | 3359 TRB_INTR_TARGET(0); 3360 3361 queue_trb(xhci, ep_ring, false, more_trbs_coming, true, 3362 lower_32_bits(addr), 3363 upper_32_bits(addr), 3364 length_field, 3365 field); 3366 running_total += trb_buff_len; 3367 3368 addr += trb_buff_len; 3369 td_remain_len -= trb_buff_len; 3370 } 3371 3372 /* Check TD length */ 3373 if (running_total != td_len) { 3374 xhci_err(xhci, "ISOC TD length unmatch\n"); 3375 return -EINVAL; 3376 } 3377 } 3378 3379 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { 3380 if (xhci->quirks & XHCI_AMD_PLL_FIX) 3381 usb_amd_quirk_pll_disable(); 3382 } 3383 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; 3384 3385 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3386 start_cycle, start_trb); 3387 return 0; 3388 cleanup: 3389 /* Clean up a partially enqueued isoc transfer. */ 3390 3391 for (i--; i >= 0; i--) 3392 list_del_init(&urb_priv->td[i]->td_list); 3393 3394 /* Use the first TD as a temporary variable to turn the TDs we've queued 3395 * into No-ops with a software-owned cycle bit. That way the hardware 3396 * won't accidentally start executing bogus TDs when we partially 3397 * overwrite them. td->first_trb and td->start_seg are already set. 3398 */ 3399 urb_priv->td[0]->last_trb = ep_ring->enqueue; 3400 /* Every TRB except the first & last will have its cycle bit flipped. */ 3401 td_to_noop(xhci, ep_ring, urb_priv->td[0], true); 3402 3403 /* Reset the ring enqueue back to the first TRB and its cycle bit. */ 3404 ep_ring->enqueue = urb_priv->td[0]->first_trb; 3405 ep_ring->enq_seg = urb_priv->td[0]->start_seg; 3406 ep_ring->cycle_state = start_cycle; 3407 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); 3408 return ret; 3409 } 3410 3411 /* 3412 * Check transfer ring to guarantee there is enough room for the urb. 3413 * Update ISO URB start_frame and interval. 3414 * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to 3415 * update the urb->start_frame by now. 3416 * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input. 3417 */ 3418 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, 3419 struct urb *urb, int slot_id, unsigned int ep_index) 3420 { 3421 struct xhci_virt_device *xdev; 3422 struct xhci_ring *ep_ring; 3423 struct xhci_ep_ctx *ep_ctx; 3424 int start_frame; 3425 int xhci_interval; 3426 int ep_interval; 3427 int num_tds, num_trbs, i; 3428 int ret; 3429 3430 xdev = xhci->devs[slot_id]; 3431 ep_ring = xdev->eps[ep_index].ring; 3432 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 3433 3434 num_trbs = 0; 3435 num_tds = urb->number_of_packets; 3436 for (i = 0; i < num_tds; i++) 3437 num_trbs += count_isoc_trbs_needed(xhci, urb, i); 3438 3439 /* Check the ring to guarantee there is enough room for the whole urb. 3440 * Do not insert any td of the urb to the ring if the check failed. 3441 */ 3442 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, 3443 num_trbs, true, mem_flags); 3444 if (ret) 3445 return ret; 3446 3447 start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index); 3448 start_frame &= 0x3fff; 3449 3450 urb->start_frame = start_frame; 3451 if (urb->dev->speed == USB_SPEED_LOW || 3452 urb->dev->speed == USB_SPEED_FULL) 3453 urb->start_frame >>= 3; 3454 3455 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); 3456 ep_interval = urb->interval; 3457 /* Convert to microframes */ 3458 if (urb->dev->speed == USB_SPEED_LOW || 3459 urb->dev->speed == USB_SPEED_FULL) 3460 ep_interval *= 8; 3461 /* FIXME change this to a warning and a suggestion to use the new API 3462 * to set the polling interval (once the API is added). 3463 */ 3464 if (xhci_interval != ep_interval) { 3465 if (printk_ratelimit()) 3466 dev_dbg(&urb->dev->dev, "Driver uses different interval" 3467 " (%d microframe%s) than xHCI " 3468 "(%d microframe%s)\n", 3469 ep_interval, 3470 ep_interval == 1 ? "" : "s", 3471 xhci_interval, 3472 xhci_interval == 1 ? "" : "s"); 3473 urb->interval = xhci_interval; 3474 /* Convert back to frames for LS/FS devices */ 3475 if (urb->dev->speed == USB_SPEED_LOW || 3476 urb->dev->speed == USB_SPEED_FULL) 3477 urb->interval /= 8; 3478 } 3479 return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); 3480 } 3481 3482 /**** Command Ring Operations ****/ 3483 3484 /* Generic function for queueing a command TRB on the command ring. 3485 * Check to make sure there's room on the command ring for one command TRB. 3486 * Also check that there's room reserved for commands that must not fail. 3487 * If this is a command that must not fail, meaning command_must_succeed = TRUE, 3488 * then only check for the number of reserved spots. 3489 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB 3490 * because the command event handler may want to resubmit a failed command. 3491 */ 3492 static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, 3493 u32 field3, u32 field4, bool command_must_succeed) 3494 { 3495 int reserved_trbs = xhci->cmd_ring_reserved_trbs; 3496 int ret; 3497 3498 if (!command_must_succeed) 3499 reserved_trbs++; 3500 3501 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, 3502 reserved_trbs, false, GFP_ATOMIC); 3503 if (ret < 0) { 3504 xhci_err(xhci, "ERR: No room for command on command ring\n"); 3505 if (command_must_succeed) 3506 xhci_err(xhci, "ERR: Reserved TRB counting for " 3507 "unfailable commands failed.\n"); 3508 return ret; 3509 } 3510 queue_trb(xhci, xhci->cmd_ring, false, false, false, field1, field2, 3511 field3, field4 | xhci->cmd_ring->cycle_state); 3512 return 0; 3513 } 3514 3515 /* Queue a slot enable or disable request on the command ring */ 3516 int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) 3517 { 3518 return queue_command(xhci, 0, 0, 0, 3519 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false); 3520 } 3521 3522 /* Queue an address device command TRB */ 3523 int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 3524 u32 slot_id) 3525 { 3526 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 3527 upper_32_bits(in_ctx_ptr), 0, 3528 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id), 3529 false); 3530 } 3531 3532 int xhci_queue_vendor_command(struct xhci_hcd *xhci, 3533 u32 field1, u32 field2, u32 field3, u32 field4) 3534 { 3535 return queue_command(xhci, field1, field2, field3, field4, false); 3536 } 3537 3538 /* Queue a reset device command TRB */ 3539 int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id) 3540 { 3541 return queue_command(xhci, 0, 0, 0, 3542 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id), 3543 false); 3544 } 3545 3546 /* Queue a configure endpoint command TRB */ 3547 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 3548 u32 slot_id, bool command_must_succeed) 3549 { 3550 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 3551 upper_32_bits(in_ctx_ptr), 0, 3552 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id), 3553 command_must_succeed); 3554 } 3555 3556 /* Queue an evaluate context command TRB */ 3557 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 3558 u32 slot_id) 3559 { 3560 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 3561 upper_32_bits(in_ctx_ptr), 0, 3562 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id), 3563 false); 3564 } 3565 3566 /* 3567 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop 3568 * activity on an endpoint that is about to be suspended. 3569 */ 3570 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, 3571 unsigned int ep_index, int suspend) 3572 { 3573 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3574 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3575 u32 type = TRB_TYPE(TRB_STOP_RING); 3576 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend); 3577 3578 return queue_command(xhci, 0, 0, 0, 3579 trb_slot_id | trb_ep_index | type | trb_suspend, false); 3580 } 3581 3582 /* Set Transfer Ring Dequeue Pointer command. 3583 * This should not be used for endpoints that have streams enabled. 3584 */ 3585 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 3586 unsigned int ep_index, unsigned int stream_id, 3587 struct xhci_segment *deq_seg, 3588 union xhci_trb *deq_ptr, u32 cycle_state) 3589 { 3590 dma_addr_t addr; 3591 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3592 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3593 u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id); 3594 u32 type = TRB_TYPE(TRB_SET_DEQ); 3595 struct xhci_virt_ep *ep; 3596 3597 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); 3598 if (addr == 0) { 3599 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 3600 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", 3601 deq_seg, deq_ptr); 3602 return 0; 3603 } 3604 ep = &xhci->devs[slot_id]->eps[ep_index]; 3605 if ((ep->ep_state & SET_DEQ_PENDING)) { 3606 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 3607 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n"); 3608 return 0; 3609 } 3610 ep->queued_deq_seg = deq_seg; 3611 ep->queued_deq_ptr = deq_ptr; 3612 return queue_command(xhci, lower_32_bits(addr) | cycle_state, 3613 upper_32_bits(addr), trb_stream_id, 3614 trb_slot_id | trb_ep_index | type, false); 3615 } 3616 3617 int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, 3618 unsigned int ep_index) 3619 { 3620 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 3621 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 3622 u32 type = TRB_TYPE(TRB_RESET_EP); 3623 3624 return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type, 3625 false); 3626 } 3627