1 /* 2 * xHCI host controller driver 3 * 4 * Copyright (C) 2008 Intel Corp. 5 * 6 * Author: Sarah Sharp 7 * Some code borrowed from the Linux EHCI driver. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 * for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software Foundation, 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 /* 24 * Ring initialization rules: 25 * 1. Each segment is initialized to zero, except for link TRBs. 26 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or 27 * Consumer Cycle State (CCS), depending on ring function. 28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. 29 * 30 * Ring behavior rules: 31 * 1. A ring is empty if enqueue == dequeue. This means there will always be at 32 * least one free TRB in the ring. This is useful if you want to turn that 33 * into a link TRB and expand the ring. 34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a 35 * link TRB, then load the pointer with the address in the link TRB. If the 36 * link TRB had its toggle bit set, you may need to update the ring cycle 37 * state (see cycle bit rules). You may have to do this multiple times 38 * until you reach a non-link TRB. 39 * 3. A ring is full if enqueue++ (for the definition of increment above) 40 * equals the dequeue pointer. 41 * 42 * Cycle bit rules: 43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit 44 * in a link TRB, it must toggle the ring cycle state. 45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit 46 * in a link TRB, it must toggle the ring cycle state. 47 * 48 * Producer rules: 49 * 1. Check if ring is full before you enqueue. 50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. 51 * Update enqueue pointer between each write (which may update the ring 52 * cycle state). 53 * 3. Notify consumer. If SW is producer, it rings the doorbell for command 54 * and endpoint rings. If HC is the producer for the event ring, 55 * and it generates an interrupt according to interrupt modulation rules. 56 * 57 * Consumer rules: 58 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, 59 * the TRB is owned by the consumer. 60 * 2. Update dequeue pointer (which may update the ring cycle state) and 61 * continue processing TRBs until you reach a TRB which is not owned by you. 62 * 3. Notify the producer. SW is the consumer for the event ring, and it 63 * updates event ring dequeue pointer. HC is the consumer for the command and 64 * endpoint rings; it generates events on the event ring for these. 65 */ 66 67 #include <linux/scatterlist.h> 68 #include <linux/slab.h> 69 #include "xhci.h" 70 71 /* 72 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA 73 * address of the TRB. 74 */ 75 dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, 76 union xhci_trb *trb) 77 { 78 unsigned long segment_offset; 79 80 if (!seg || !trb || trb < seg->trbs) 81 return 0; 82 /* offset in TRBs */ 83 segment_offset = trb - seg->trbs; 84 if (segment_offset > TRBS_PER_SEGMENT) 85 return 0; 86 return seg->dma + (segment_offset * sizeof(*trb)); 87 } 88 89 /* Does this link TRB point to the first segment in a ring, 90 * or was the previous TRB the last TRB on the last segment in the ERST? 91 */ 92 static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, 93 struct xhci_segment *seg, union xhci_trb *trb) 94 { 95 if (ring == xhci->event_ring) 96 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) && 97 (seg->next == xhci->event_ring->first_seg); 98 else 99 return trb->link.control & LINK_TOGGLE; 100 } 101 102 /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring 103 * segment? I.e. would the updated event TRB pointer step off the end of the 104 * event seg? 105 */ 106 static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 107 struct xhci_segment *seg, union xhci_trb *trb) 108 { 109 if (ring == xhci->event_ring) 110 return trb == &seg->trbs[TRBS_PER_SEGMENT]; 111 else 112 return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); 113 } 114 115 /* Updates trb to point to the next TRB in the ring, and updates seg if the next 116 * TRB is in a new segment. This does not skip over link TRBs, and it does not 117 * effect the ring dequeue or enqueue pointers. 118 */ 119 static void next_trb(struct xhci_hcd *xhci, 120 struct xhci_ring *ring, 121 struct xhci_segment **seg, 122 union xhci_trb **trb) 123 { 124 if (last_trb(xhci, ring, *seg, *trb)) { 125 *seg = (*seg)->next; 126 *trb = ((*seg)->trbs); 127 } else { 128 *trb = (*trb)++; 129 } 130 } 131 132 /* 133 * See Cycle bit rules. SW is the consumer for the event ring only. 134 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 135 */ 136 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) 137 { 138 union xhci_trb *next = ++(ring->dequeue); 139 unsigned long long addr; 140 141 ring->deq_updates++; 142 /* Update the dequeue pointer further if that was a link TRB or we're at 143 * the end of an event ring segment (which doesn't have link TRBS) 144 */ 145 while (last_trb(xhci, ring, ring->deq_seg, next)) { 146 if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) { 147 ring->cycle_state = (ring->cycle_state ? 0 : 1); 148 if (!in_interrupt()) 149 xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n", 150 ring, 151 (unsigned int) ring->cycle_state); 152 } 153 ring->deq_seg = ring->deq_seg->next; 154 ring->dequeue = ring->deq_seg->trbs; 155 next = ring->dequeue; 156 } 157 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); 158 if (ring == xhci->event_ring) 159 xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr); 160 else if (ring == xhci->cmd_ring) 161 xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr); 162 else 163 xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr); 164 } 165 166 /* 167 * See Cycle bit rules. SW is the consumer for the event ring only. 168 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 169 * 170 * If we've just enqueued a TRB that is in the middle of a TD (meaning the 171 * chain bit is set), then set the chain bit in all the following link TRBs. 172 * If we've enqueued the last TRB in a TD, make sure the following link TRBs 173 * have their chain bit cleared (so that each Link TRB is a separate TD). 174 * 175 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit 176 * set, but other sections talk about dealing with the chain bit set. This was 177 * fixed in the 0.96 specification errata, but we have to assume that all 0.95 178 * xHCI hardware can't handle the chain bit being cleared on a link TRB. 179 */ 180 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) 181 { 182 u32 chain; 183 union xhci_trb *next; 184 unsigned long long addr; 185 186 chain = ring->enqueue->generic.field[3] & TRB_CHAIN; 187 next = ++(ring->enqueue); 188 189 ring->enq_updates++; 190 /* Update the dequeue pointer further if that was a link TRB or we're at 191 * the end of an event ring segment (which doesn't have link TRBS) 192 */ 193 while (last_trb(xhci, ring, ring->enq_seg, next)) { 194 if (!consumer) { 195 if (ring != xhci->event_ring) { 196 /* If we're not dealing with 0.95 hardware, 197 * carry over the chain bit of the previous TRB 198 * (which may mean the chain bit is cleared). 199 */ 200 if (!xhci_link_trb_quirk(xhci)) { 201 next->link.control &= ~TRB_CHAIN; 202 next->link.control |= chain; 203 } 204 /* Give this link TRB to the hardware */ 205 wmb(); 206 if (next->link.control & TRB_CYCLE) 207 next->link.control &= (u32) ~TRB_CYCLE; 208 else 209 next->link.control |= (u32) TRB_CYCLE; 210 } 211 /* Toggle the cycle bit after the last ring segment. */ 212 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 213 ring->cycle_state = (ring->cycle_state ? 0 : 1); 214 if (!in_interrupt()) 215 xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n", 216 ring, 217 (unsigned int) ring->cycle_state); 218 } 219 } 220 ring->enq_seg = ring->enq_seg->next; 221 ring->enqueue = ring->enq_seg->trbs; 222 next = ring->enqueue; 223 } 224 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); 225 if (ring == xhci->event_ring) 226 xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr); 227 else if (ring == xhci->cmd_ring) 228 xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr); 229 else 230 xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr); 231 } 232 233 /* 234 * Check to see if there's room to enqueue num_trbs on the ring. See rules 235 * above. 236 * FIXME: this would be simpler and faster if we just kept track of the number 237 * of free TRBs in a ring. 238 */ 239 static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, 240 unsigned int num_trbs) 241 { 242 int i; 243 union xhci_trb *enq = ring->enqueue; 244 struct xhci_segment *enq_seg = ring->enq_seg; 245 246 /* Check if ring is empty */ 247 if (enq == ring->dequeue) 248 return 1; 249 /* Make sure there's an extra empty TRB available */ 250 for (i = 0; i <= num_trbs; ++i) { 251 if (enq == ring->dequeue) 252 return 0; 253 enq++; 254 while (last_trb(xhci, ring, enq_seg, enq)) { 255 enq_seg = enq_seg->next; 256 enq = enq_seg->trbs; 257 } 258 } 259 return 1; 260 } 261 262 void xhci_set_hc_event_deq(struct xhci_hcd *xhci) 263 { 264 u64 temp; 265 dma_addr_t deq; 266 267 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, 268 xhci->event_ring->dequeue); 269 if (deq == 0 && !in_interrupt()) 270 xhci_warn(xhci, "WARN something wrong with SW event ring " 271 "dequeue ptr.\n"); 272 /* Update HC event ring dequeue pointer */ 273 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 274 temp &= ERST_PTR_MASK; 275 /* Don't clear the EHB bit (which is RW1C) because 276 * there might be more events to service. 277 */ 278 temp &= ~ERST_EHB; 279 xhci_dbg(xhci, "// Write event ring dequeue pointer, preserving EHB bit\n"); 280 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, 281 &xhci->ir_set->erst_dequeue); 282 } 283 284 /* Ring the host controller doorbell after placing a command on the ring */ 285 void xhci_ring_cmd_db(struct xhci_hcd *xhci) 286 { 287 u32 temp; 288 289 xhci_dbg(xhci, "// Ding dong!\n"); 290 temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK; 291 xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]); 292 /* Flush PCI posted writes */ 293 xhci_readl(xhci, &xhci->dba->doorbell[0]); 294 } 295 296 static void ring_ep_doorbell(struct xhci_hcd *xhci, 297 unsigned int slot_id, 298 unsigned int ep_index) 299 { 300 struct xhci_virt_ep *ep; 301 unsigned int ep_state; 302 u32 field; 303 __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; 304 305 ep = &xhci->devs[slot_id]->eps[ep_index]; 306 ep_state = ep->ep_state; 307 /* Don't ring the doorbell for this endpoint if there are pending 308 * cancellations because the we don't want to interrupt processing. 309 */ 310 if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING) 311 && !(ep_state & EP_HALTED)) { 312 field = xhci_readl(xhci, db_addr) & DB_MASK; 313 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); 314 /* Flush PCI posted writes - FIXME Matthew Wilcox says this 315 * isn't time-critical and we shouldn't make the CPU wait for 316 * the flush. 317 */ 318 xhci_readl(xhci, db_addr); 319 } 320 } 321 322 /* 323 * Find the segment that trb is in. Start searching in start_seg. 324 * If we must move past a segment that has a link TRB with a toggle cycle state 325 * bit set, then we will toggle the value pointed at by cycle_state. 326 */ 327 static struct xhci_segment *find_trb_seg( 328 struct xhci_segment *start_seg, 329 union xhci_trb *trb, int *cycle_state) 330 { 331 struct xhci_segment *cur_seg = start_seg; 332 struct xhci_generic_trb *generic_trb; 333 334 while (cur_seg->trbs > trb || 335 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { 336 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; 337 if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK && 338 (generic_trb->field[3] & LINK_TOGGLE)) 339 *cycle_state = ~(*cycle_state) & 0x1; 340 cur_seg = cur_seg->next; 341 if (cur_seg == start_seg) 342 /* Looped over the entire list. Oops! */ 343 return 0; 344 } 345 return cur_seg; 346 } 347 348 /* 349 * Move the xHC's endpoint ring dequeue pointer past cur_td. 350 * Record the new state of the xHC's endpoint ring dequeue segment, 351 * dequeue pointer, and new consumer cycle state in state. 352 * Update our internal representation of the ring's dequeue pointer. 353 * 354 * We do this in three jumps: 355 * - First we update our new ring state to be the same as when the xHC stopped. 356 * - Then we traverse the ring to find the segment that contains 357 * the last TRB in the TD. We toggle the xHC's new cycle state when we pass 358 * any link TRBs with the toggle cycle bit set. 359 * - Finally we move the dequeue state one TRB further, toggling the cycle bit 360 * if we've moved it past a link TRB with the toggle cycle bit set. 361 */ 362 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, 363 unsigned int slot_id, unsigned int ep_index, 364 struct xhci_td *cur_td, struct xhci_dequeue_state *state) 365 { 366 struct xhci_virt_device *dev = xhci->devs[slot_id]; 367 struct xhci_ring *ep_ring = dev->eps[ep_index].ring; 368 struct xhci_generic_trb *trb; 369 struct xhci_ep_ctx *ep_ctx; 370 dma_addr_t addr; 371 372 state->new_cycle_state = 0; 373 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); 374 state->new_deq_seg = find_trb_seg(cur_td->start_seg, 375 dev->eps[ep_index].stopped_trb, 376 &state->new_cycle_state); 377 if (!state->new_deq_seg) 378 BUG(); 379 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 380 xhci_dbg(xhci, "Finding endpoint context\n"); 381 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 382 state->new_cycle_state = 0x1 & ep_ctx->deq; 383 384 state->new_deq_ptr = cur_td->last_trb; 385 xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); 386 state->new_deq_seg = find_trb_seg(state->new_deq_seg, 387 state->new_deq_ptr, 388 &state->new_cycle_state); 389 if (!state->new_deq_seg) 390 BUG(); 391 392 trb = &state->new_deq_ptr->generic; 393 if (TRB_TYPE(trb->field[3]) == TRB_LINK && 394 (trb->field[3] & LINK_TOGGLE)) 395 state->new_cycle_state = ~(state->new_cycle_state) & 0x1; 396 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); 397 398 /* Don't update the ring cycle state for the producer (us). */ 399 xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", 400 state->new_deq_seg); 401 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); 402 xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", 403 (unsigned long long) addr); 404 xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n"); 405 ep_ring->dequeue = state->new_deq_ptr; 406 ep_ring->deq_seg = state->new_deq_seg; 407 } 408 409 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 410 struct xhci_td *cur_td) 411 { 412 struct xhci_segment *cur_seg; 413 union xhci_trb *cur_trb; 414 415 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; 416 true; 417 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 418 if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) == 419 TRB_TYPE(TRB_LINK)) { 420 /* Unchain any chained Link TRBs, but 421 * leave the pointers intact. 422 */ 423 cur_trb->generic.field[3] &= ~TRB_CHAIN; 424 xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); 425 xhci_dbg(xhci, "Address = %p (0x%llx dma); " 426 "in seg %p (0x%llx dma)\n", 427 cur_trb, 428 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb), 429 cur_seg, 430 (unsigned long long)cur_seg->dma); 431 } else { 432 cur_trb->generic.field[0] = 0; 433 cur_trb->generic.field[1] = 0; 434 cur_trb->generic.field[2] = 0; 435 /* Preserve only the cycle bit of this TRB */ 436 cur_trb->generic.field[3] &= TRB_CYCLE; 437 cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP); 438 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " 439 "in seg %p (0x%llx dma)\n", 440 cur_trb, 441 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb), 442 cur_seg, 443 (unsigned long long)cur_seg->dma); 444 } 445 if (cur_trb == cur_td->last_trb) 446 break; 447 } 448 } 449 450 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 451 unsigned int ep_index, struct xhci_segment *deq_seg, 452 union xhci_trb *deq_ptr, u32 cycle_state); 453 454 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, 455 unsigned int slot_id, unsigned int ep_index, 456 struct xhci_dequeue_state *deq_state) 457 { 458 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 459 460 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " 461 "new deq ptr = %p (0x%llx dma), new cycle = %u\n", 462 deq_state->new_deq_seg, 463 (unsigned long long)deq_state->new_deq_seg->dma, 464 deq_state->new_deq_ptr, 465 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), 466 deq_state->new_cycle_state); 467 queue_set_tr_deq(xhci, slot_id, ep_index, 468 deq_state->new_deq_seg, 469 deq_state->new_deq_ptr, 470 (u32) deq_state->new_cycle_state); 471 /* Stop the TD queueing code from ringing the doorbell until 472 * this command completes. The HC won't set the dequeue pointer 473 * if the ring is running, and ringing the doorbell starts the 474 * ring running. 475 */ 476 ep->ep_state |= SET_DEQ_PENDING; 477 } 478 479 static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, 480 struct xhci_virt_ep *ep) 481 { 482 ep->ep_state &= ~EP_HALT_PENDING; 483 /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the 484 * timer is running on another CPU, we don't decrement stop_cmds_pending 485 * (since we didn't successfully stop the watchdog timer). 486 */ 487 if (del_timer(&ep->stop_cmd_timer)) 488 ep->stop_cmds_pending--; 489 } 490 491 /* Must be called with xhci->lock held in interrupt context */ 492 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, 493 struct xhci_td *cur_td, int status, char *adjective) 494 { 495 struct usb_hcd *hcd = xhci_to_hcd(xhci); 496 497 cur_td->urb->hcpriv = NULL; 498 usb_hcd_unlink_urb_from_ep(hcd, cur_td->urb); 499 xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, cur_td->urb); 500 501 spin_unlock(&xhci->lock); 502 usb_hcd_giveback_urb(hcd, cur_td->urb, status); 503 kfree(cur_td); 504 spin_lock(&xhci->lock); 505 xhci_dbg(xhci, "%s URB given back\n", adjective); 506 } 507 508 /* 509 * When we get a command completion for a Stop Endpoint Command, we need to 510 * unlink any cancelled TDs from the ring. There are two ways to do that: 511 * 512 * 1. If the HW was in the middle of processing the TD that needs to be 513 * cancelled, then we must move the ring's dequeue pointer past the last TRB 514 * in the TD with a Set Dequeue Pointer Command. 515 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain 516 * bit cleared) so that the HW will skip over them. 517 */ 518 static void handle_stopped_endpoint(struct xhci_hcd *xhci, 519 union xhci_trb *trb) 520 { 521 unsigned int slot_id; 522 unsigned int ep_index; 523 struct xhci_ring *ep_ring; 524 struct xhci_virt_ep *ep; 525 struct list_head *entry; 526 struct xhci_td *cur_td = 0; 527 struct xhci_td *last_unlinked_td; 528 529 struct xhci_dequeue_state deq_state; 530 531 memset(&deq_state, 0, sizeof(deq_state)); 532 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 533 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 534 ep = &xhci->devs[slot_id]->eps[ep_index]; 535 ep_ring = ep->ring; 536 537 if (list_empty(&ep->cancelled_td_list)) { 538 xhci_stop_watchdog_timer_in_irq(xhci, ep); 539 ring_ep_doorbell(xhci, slot_id, ep_index); 540 return; 541 } 542 543 /* Fix up the ep ring first, so HW stops executing cancelled TDs. 544 * We have the xHCI lock, so nothing can modify this list until we drop 545 * it. We're also in the event handler, so we can't get re-interrupted 546 * if another Stop Endpoint command completes 547 */ 548 list_for_each(entry, &ep->cancelled_td_list) { 549 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); 550 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", 551 cur_td->first_trb, 552 (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb)); 553 /* 554 * If we stopped on the TD we need to cancel, then we have to 555 * move the xHC endpoint ring dequeue pointer past this TD. 556 */ 557 if (cur_td == ep->stopped_td) 558 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, 559 &deq_state); 560 else 561 td_to_noop(xhci, ep_ring, cur_td); 562 /* 563 * The event handler won't see a completion for this TD anymore, 564 * so remove it from the endpoint ring's TD list. Keep it in 565 * the cancelled TD list for URB completion later. 566 */ 567 list_del(&cur_td->td_list); 568 } 569 last_unlinked_td = cur_td; 570 xhci_stop_watchdog_timer_in_irq(xhci, ep); 571 572 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 573 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 574 xhci_queue_new_dequeue_state(xhci, 575 slot_id, ep_index, &deq_state); 576 xhci_ring_cmd_db(xhci); 577 } else { 578 /* Otherwise just ring the doorbell to restart the ring */ 579 ring_ep_doorbell(xhci, slot_id, ep_index); 580 } 581 582 /* 583 * Drop the lock and complete the URBs in the cancelled TD list. 584 * New TDs to be cancelled might be added to the end of the list before 585 * we can complete all the URBs for the TDs we already unlinked. 586 * So stop when we've completed the URB for the last TD we unlinked. 587 */ 588 do { 589 cur_td = list_entry(ep->cancelled_td_list.next, 590 struct xhci_td, cancelled_td_list); 591 list_del(&cur_td->cancelled_td_list); 592 593 /* Clean up the cancelled URB */ 594 /* Doesn't matter what we pass for status, since the core will 595 * just overwrite it (because the URB has been unlinked). 596 */ 597 xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled"); 598 599 /* Stop processing the cancelled list if the watchdog timer is 600 * running. 601 */ 602 if (xhci->xhc_state & XHCI_STATE_DYING) 603 return; 604 } while (cur_td != last_unlinked_td); 605 606 /* Return to the event handler with xhci->lock re-acquired */ 607 } 608 609 /* Watchdog timer function for when a stop endpoint command fails to complete. 610 * In this case, we assume the host controller is broken or dying or dead. The 611 * host may still be completing some other events, so we have to be careful to 612 * let the event ring handler and the URB dequeueing/enqueueing functions know 613 * through xhci->state. 614 * 615 * The timer may also fire if the host takes a very long time to respond to the 616 * command, and the stop endpoint command completion handler cannot delete the 617 * timer before the timer function is called. Another endpoint cancellation may 618 * sneak in before the timer function can grab the lock, and that may queue 619 * another stop endpoint command and add the timer back. So we cannot use a 620 * simple flag to say whether there is a pending stop endpoint command for a 621 * particular endpoint. 622 * 623 * Instead we use a combination of that flag and a counter for the number of 624 * pending stop endpoint commands. If the timer is the tail end of the last 625 * stop endpoint command, and the endpoint's command is still pending, we assume 626 * the host is dying. 627 */ 628 void xhci_stop_endpoint_command_watchdog(unsigned long arg) 629 { 630 struct xhci_hcd *xhci; 631 struct xhci_virt_ep *ep; 632 struct xhci_virt_ep *temp_ep; 633 struct xhci_ring *ring; 634 struct xhci_td *cur_td; 635 int ret, i, j; 636 637 ep = (struct xhci_virt_ep *) arg; 638 xhci = ep->xhci; 639 640 spin_lock(&xhci->lock); 641 642 ep->stop_cmds_pending--; 643 if (xhci->xhc_state & XHCI_STATE_DYING) { 644 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked " 645 "xHCI as DYING, exiting.\n"); 646 spin_unlock(&xhci->lock); 647 return; 648 } 649 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { 650 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, " 651 "exiting.\n"); 652 spin_unlock(&xhci->lock); 653 return; 654 } 655 656 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n"); 657 xhci_warn(xhci, "Assuming host is dying, halting host.\n"); 658 /* Oops, HC is dead or dying or at least not responding to the stop 659 * endpoint command. 660 */ 661 xhci->xhc_state |= XHCI_STATE_DYING; 662 /* Disable interrupts from the host controller and start halting it */ 663 xhci_quiesce(xhci); 664 spin_unlock(&xhci->lock); 665 666 ret = xhci_halt(xhci); 667 668 spin_lock(&xhci->lock); 669 if (ret < 0) { 670 /* This is bad; the host is not responding to commands and it's 671 * not allowing itself to be halted. At least interrupts are 672 * disabled, so we can set HC_STATE_HALT and notify the 673 * USB core. But if we call usb_hc_died(), it will attempt to 674 * disconnect all device drivers under this host. Those 675 * disconnect() methods will wait for all URBs to be unlinked, 676 * so we must complete them. 677 */ 678 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n"); 679 xhci_warn(xhci, "Completing active URBs anyway.\n"); 680 /* We could turn all TDs on the rings to no-ops. This won't 681 * help if the host has cached part of the ring, and is slow if 682 * we want to preserve the cycle bit. Skip it and hope the host 683 * doesn't touch the memory. 684 */ 685 } 686 for (i = 0; i < MAX_HC_SLOTS; i++) { 687 if (!xhci->devs[i]) 688 continue; 689 for (j = 0; j < 31; j++) { 690 temp_ep = &xhci->devs[i]->eps[j]; 691 ring = temp_ep->ring; 692 if (!ring) 693 continue; 694 xhci_dbg(xhci, "Killing URBs for slot ID %u, " 695 "ep index %u\n", i, j); 696 while (!list_empty(&ring->td_list)) { 697 cur_td = list_first_entry(&ring->td_list, 698 struct xhci_td, 699 td_list); 700 list_del(&cur_td->td_list); 701 if (!list_empty(&cur_td->cancelled_td_list)) 702 list_del(&cur_td->cancelled_td_list); 703 xhci_giveback_urb_in_irq(xhci, cur_td, 704 -ESHUTDOWN, "killed"); 705 } 706 while (!list_empty(&temp_ep->cancelled_td_list)) { 707 cur_td = list_first_entry( 708 &temp_ep->cancelled_td_list, 709 struct xhci_td, 710 cancelled_td_list); 711 list_del(&cur_td->cancelled_td_list); 712 xhci_giveback_urb_in_irq(xhci, cur_td, 713 -ESHUTDOWN, "killed"); 714 } 715 } 716 } 717 spin_unlock(&xhci->lock); 718 xhci_to_hcd(xhci)->state = HC_STATE_HALT; 719 xhci_dbg(xhci, "Calling usb_hc_died()\n"); 720 usb_hc_died(xhci_to_hcd(xhci)); 721 xhci_dbg(xhci, "xHCI host controller is dead.\n"); 722 } 723 724 /* 725 * When we get a completion for a Set Transfer Ring Dequeue Pointer command, 726 * we need to clear the set deq pending flag in the endpoint ring state, so that 727 * the TD queueing code can ring the doorbell again. We also need to ring the 728 * endpoint doorbell to restart the ring, but only if there aren't more 729 * cancellations pending. 730 */ 731 static void handle_set_deq_completion(struct xhci_hcd *xhci, 732 struct xhci_event_cmd *event, 733 union xhci_trb *trb) 734 { 735 unsigned int slot_id; 736 unsigned int ep_index; 737 struct xhci_ring *ep_ring; 738 struct xhci_virt_device *dev; 739 struct xhci_ep_ctx *ep_ctx; 740 struct xhci_slot_ctx *slot_ctx; 741 742 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 743 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 744 dev = xhci->devs[slot_id]; 745 ep_ring = dev->eps[ep_index].ring; 746 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 747 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); 748 749 if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { 750 unsigned int ep_state; 751 unsigned int slot_state; 752 753 switch (GET_COMP_CODE(event->status)) { 754 case COMP_TRB_ERR: 755 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because " 756 "of stream ID configuration\n"); 757 break; 758 case COMP_CTX_STATE: 759 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " 760 "to incorrect slot or ep state.\n"); 761 ep_state = ep_ctx->ep_info; 762 ep_state &= EP_STATE_MASK; 763 slot_state = slot_ctx->dev_state; 764 slot_state = GET_SLOT_STATE(slot_state); 765 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", 766 slot_state, ep_state); 767 break; 768 case COMP_EBADSLT: 769 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because " 770 "slot %u was not enabled.\n", slot_id); 771 break; 772 default: 773 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown " 774 "completion code of %u.\n", 775 GET_COMP_CODE(event->status)); 776 break; 777 } 778 /* OK what do we do now? The endpoint state is hosed, and we 779 * should never get to this point if the synchronization between 780 * queueing, and endpoint state are correct. This might happen 781 * if the device gets disconnected after we've finished 782 * cancelling URBs, which might not be an error... 783 */ 784 } else { 785 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", 786 ep_ctx->deq); 787 } 788 789 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; 790 ring_ep_doorbell(xhci, slot_id, ep_index); 791 } 792 793 static void handle_reset_ep_completion(struct xhci_hcd *xhci, 794 struct xhci_event_cmd *event, 795 union xhci_trb *trb) 796 { 797 int slot_id; 798 unsigned int ep_index; 799 struct xhci_ring *ep_ring; 800 801 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 802 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 803 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 804 /* This command will only fail if the endpoint wasn't halted, 805 * but we don't care. 806 */ 807 xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", 808 (unsigned int) GET_COMP_CODE(event->status)); 809 810 /* HW with the reset endpoint quirk needs to have a configure endpoint 811 * command complete before the endpoint can be used. Queue that here 812 * because the HW can't handle two commands being queued in a row. 813 */ 814 if (xhci->quirks & XHCI_RESET_EP_QUIRK) { 815 xhci_dbg(xhci, "Queueing configure endpoint command\n"); 816 xhci_queue_configure_endpoint(xhci, 817 xhci->devs[slot_id]->in_ctx->dma, slot_id, 818 false); 819 xhci_ring_cmd_db(xhci); 820 } else { 821 /* Clear our internal halted state and restart the ring */ 822 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; 823 ring_ep_doorbell(xhci, slot_id, ep_index); 824 } 825 } 826 827 /* Check to see if a command in the device's command queue matches this one. 828 * Signal the completion or free the command, and return 1. Return 0 if the 829 * completed command isn't at the head of the command list. 830 */ 831 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, 832 struct xhci_virt_device *virt_dev, 833 struct xhci_event_cmd *event) 834 { 835 struct xhci_command *command; 836 837 if (list_empty(&virt_dev->cmd_list)) 838 return 0; 839 840 command = list_entry(virt_dev->cmd_list.next, 841 struct xhci_command, cmd_list); 842 if (xhci->cmd_ring->dequeue != command->command_trb) 843 return 0; 844 845 command->status = 846 GET_COMP_CODE(event->status); 847 list_del(&command->cmd_list); 848 if (command->completion) 849 complete(command->completion); 850 else 851 xhci_free_command(xhci, command); 852 return 1; 853 } 854 855 static void handle_cmd_completion(struct xhci_hcd *xhci, 856 struct xhci_event_cmd *event) 857 { 858 int slot_id = TRB_TO_SLOT_ID(event->flags); 859 u64 cmd_dma; 860 dma_addr_t cmd_dequeue_dma; 861 struct xhci_input_control_ctx *ctrl_ctx; 862 struct xhci_virt_device *virt_dev; 863 unsigned int ep_index; 864 struct xhci_ring *ep_ring; 865 unsigned int ep_state; 866 867 cmd_dma = event->cmd_trb; 868 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 869 xhci->cmd_ring->dequeue); 870 /* Is the command ring deq ptr out of sync with the deq seg ptr? */ 871 if (cmd_dequeue_dma == 0) { 872 xhci->error_bitmask |= 1 << 4; 873 return; 874 } 875 /* Does the DMA address match our internal dequeue pointer address? */ 876 if (cmd_dma != (u64) cmd_dequeue_dma) { 877 xhci->error_bitmask |= 1 << 5; 878 return; 879 } 880 switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) { 881 case TRB_TYPE(TRB_ENABLE_SLOT): 882 if (GET_COMP_CODE(event->status) == COMP_SUCCESS) 883 xhci->slot_id = slot_id; 884 else 885 xhci->slot_id = 0; 886 complete(&xhci->addr_dev); 887 break; 888 case TRB_TYPE(TRB_DISABLE_SLOT): 889 if (xhci->devs[slot_id]) 890 xhci_free_virt_device(xhci, slot_id); 891 break; 892 case TRB_TYPE(TRB_CONFIG_EP): 893 virt_dev = xhci->devs[slot_id]; 894 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) 895 break; 896 /* 897 * Configure endpoint commands can come from the USB core 898 * configuration or alt setting changes, or because the HW 899 * needed an extra configure endpoint command after a reset 900 * endpoint command. In the latter case, the xHCI driver is 901 * not waiting on the configure endpoint command. 902 */ 903 ctrl_ctx = xhci_get_input_control_ctx(xhci, 904 virt_dev->in_ctx); 905 /* Input ctx add_flags are the endpoint index plus one */ 906 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; 907 /* A usb_set_interface() call directly after clearing a halted 908 * condition may race on this quirky hardware. 909 * Not worth worrying about, since this is prototype hardware. 910 */ 911 if (xhci->quirks & XHCI_RESET_EP_QUIRK && 912 ep_index != (unsigned int) -1 && 913 ctrl_ctx->add_flags - SLOT_FLAG == 914 ctrl_ctx->drop_flags) { 915 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 916 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 917 if (!(ep_state & EP_HALTED)) 918 goto bandwidth_change; 919 xhci_dbg(xhci, "Completed config ep cmd - " 920 "last ep index = %d, state = %d\n", 921 ep_index, ep_state); 922 /* Clear our internal halted state and restart ring */ 923 xhci->devs[slot_id]->eps[ep_index].ep_state &= 924 ~EP_HALTED; 925 ring_ep_doorbell(xhci, slot_id, ep_index); 926 break; 927 } 928 bandwidth_change: 929 xhci_dbg(xhci, "Completed config ep cmd\n"); 930 xhci->devs[slot_id]->cmd_status = 931 GET_COMP_CODE(event->status); 932 complete(&xhci->devs[slot_id]->cmd_completion); 933 break; 934 case TRB_TYPE(TRB_EVAL_CONTEXT): 935 virt_dev = xhci->devs[slot_id]; 936 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) 937 break; 938 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); 939 complete(&xhci->devs[slot_id]->cmd_completion); 940 break; 941 case TRB_TYPE(TRB_ADDR_DEV): 942 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); 943 complete(&xhci->addr_dev); 944 break; 945 case TRB_TYPE(TRB_STOP_RING): 946 handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue); 947 break; 948 case TRB_TYPE(TRB_SET_DEQ): 949 handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue); 950 break; 951 case TRB_TYPE(TRB_CMD_NOOP): 952 ++xhci->noops_handled; 953 break; 954 case TRB_TYPE(TRB_RESET_EP): 955 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); 956 break; 957 case TRB_TYPE(TRB_RESET_DEV): 958 xhci_dbg(xhci, "Completed reset device command.\n"); 959 slot_id = TRB_TO_SLOT_ID( 960 xhci->cmd_ring->dequeue->generic.field[3]); 961 virt_dev = xhci->devs[slot_id]; 962 if (virt_dev) 963 handle_cmd_in_cmd_wait_list(xhci, virt_dev, event); 964 else 965 xhci_warn(xhci, "Reset device command completion " 966 "for disabled slot %u\n", slot_id); 967 break; 968 default: 969 /* Skip over unknown commands on the event ring */ 970 xhci->error_bitmask |= 1 << 6; 971 break; 972 } 973 inc_deq(xhci, xhci->cmd_ring, false); 974 } 975 976 static void handle_port_status(struct xhci_hcd *xhci, 977 union xhci_trb *event) 978 { 979 u32 port_id; 980 981 /* Port status change events always have a successful completion code */ 982 if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) { 983 xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); 984 xhci->error_bitmask |= 1 << 8; 985 } 986 /* FIXME: core doesn't care about all port link state changes yet */ 987 port_id = GET_PORT_ID(event->generic.field[0]); 988 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); 989 990 /* Update event ring dequeue pointer before dropping the lock */ 991 inc_deq(xhci, xhci->event_ring, true); 992 xhci_set_hc_event_deq(xhci); 993 994 spin_unlock(&xhci->lock); 995 /* Pass this up to the core */ 996 usb_hcd_poll_rh_status(xhci_to_hcd(xhci)); 997 spin_lock(&xhci->lock); 998 } 999 1000 /* 1001 * This TD is defined by the TRBs starting at start_trb in start_seg and ending 1002 * at end_trb, which may be in another segment. If the suspect DMA address is a 1003 * TRB in this TD, this function returns that TRB's segment. Otherwise it 1004 * returns 0. 1005 */ 1006 struct xhci_segment *trb_in_td(struct xhci_segment *start_seg, 1007 union xhci_trb *start_trb, 1008 union xhci_trb *end_trb, 1009 dma_addr_t suspect_dma) 1010 { 1011 dma_addr_t start_dma; 1012 dma_addr_t end_seg_dma; 1013 dma_addr_t end_trb_dma; 1014 struct xhci_segment *cur_seg; 1015 1016 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb); 1017 cur_seg = start_seg; 1018 1019 do { 1020 if (start_dma == 0) 1021 return 0; 1022 /* We may get an event for a Link TRB in the middle of a TD */ 1023 end_seg_dma = xhci_trb_virt_to_dma(cur_seg, 1024 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); 1025 /* If the end TRB isn't in this segment, this is set to 0 */ 1026 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb); 1027 1028 if (end_trb_dma > 0) { 1029 /* The end TRB is in this segment, so suspect should be here */ 1030 if (start_dma <= end_trb_dma) { 1031 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) 1032 return cur_seg; 1033 } else { 1034 /* Case for one segment with 1035 * a TD wrapped around to the top 1036 */ 1037 if ((suspect_dma >= start_dma && 1038 suspect_dma <= end_seg_dma) || 1039 (suspect_dma >= cur_seg->dma && 1040 suspect_dma <= end_trb_dma)) 1041 return cur_seg; 1042 } 1043 return 0; 1044 } else { 1045 /* Might still be somewhere in this segment */ 1046 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) 1047 return cur_seg; 1048 } 1049 cur_seg = cur_seg->next; 1050 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); 1051 } while (cur_seg != start_seg); 1052 1053 return 0; 1054 } 1055 1056 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, 1057 unsigned int slot_id, unsigned int ep_index, 1058 struct xhci_td *td, union xhci_trb *event_trb) 1059 { 1060 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 1061 ep->ep_state |= EP_HALTED; 1062 ep->stopped_td = td; 1063 ep->stopped_trb = event_trb; 1064 xhci_queue_reset_ep(xhci, slot_id, ep_index); 1065 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); 1066 xhci_ring_cmd_db(xhci); 1067 } 1068 1069 /* Check if an error has halted the endpoint ring. The class driver will 1070 * cleanup the halt for a non-default control endpoint if we indicate a stall. 1071 * However, a babble and other errors also halt the endpoint ring, and the class 1072 * driver won't clear the halt in that case, so we need to issue a Set Transfer 1073 * Ring Dequeue Pointer command manually. 1074 */ 1075 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, 1076 struct xhci_ep_ctx *ep_ctx, 1077 unsigned int trb_comp_code) 1078 { 1079 /* TRB completion codes that may require a manual halt cleanup */ 1080 if (trb_comp_code == COMP_TX_ERR || 1081 trb_comp_code == COMP_BABBLE || 1082 trb_comp_code == COMP_SPLIT_ERR) 1083 /* The 0.96 spec says a babbling control endpoint 1084 * is not halted. The 0.96 spec says it is. Some HW 1085 * claims to be 0.95 compliant, but it halts the control 1086 * endpoint anyway. Check if a babble halted the 1087 * endpoint. 1088 */ 1089 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_HALTED) 1090 return 1; 1091 1092 return 0; 1093 } 1094 1095 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) 1096 { 1097 if (trb_comp_code >= 224 && trb_comp_code <= 255) { 1098 /* Vendor defined "informational" completion code, 1099 * treat as not-an-error. 1100 */ 1101 xhci_dbg(xhci, "Vendor defined info completion code %u\n", 1102 trb_comp_code); 1103 xhci_dbg(xhci, "Treating code as success.\n"); 1104 return 1; 1105 } 1106 return 0; 1107 } 1108 1109 /* 1110 * If this function returns an error condition, it means it got a Transfer 1111 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. 1112 * At this point, the host controller is probably hosed and should be reset. 1113 */ 1114 static int handle_tx_event(struct xhci_hcd *xhci, 1115 struct xhci_transfer_event *event) 1116 { 1117 struct xhci_virt_device *xdev; 1118 struct xhci_virt_ep *ep; 1119 struct xhci_ring *ep_ring; 1120 unsigned int slot_id; 1121 int ep_index; 1122 struct xhci_td *td = 0; 1123 dma_addr_t event_dma; 1124 struct xhci_segment *event_seg; 1125 union xhci_trb *event_trb; 1126 struct urb *urb = 0; 1127 int status = -EINPROGRESS; 1128 struct xhci_ep_ctx *ep_ctx; 1129 u32 trb_comp_code; 1130 1131 xhci_dbg(xhci, "In %s\n", __func__); 1132 slot_id = TRB_TO_SLOT_ID(event->flags); 1133 xdev = xhci->devs[slot_id]; 1134 if (!xdev) { 1135 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); 1136 return -ENODEV; 1137 } 1138 1139 /* Endpoint ID is 1 based, our index is zero based */ 1140 ep_index = TRB_TO_EP_ID(event->flags) - 1; 1141 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); 1142 ep = &xdev->eps[ep_index]; 1143 ep_ring = ep->ring; 1144 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1145 if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { 1146 xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); 1147 return -ENODEV; 1148 } 1149 1150 event_dma = event->buffer; 1151 /* This TRB should be in the TD at the head of this ring's TD list */ 1152 xhci_dbg(xhci, "%s - checking for list empty\n", __func__); 1153 if (list_empty(&ep_ring->td_list)) { 1154 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", 1155 TRB_TO_SLOT_ID(event->flags), ep_index); 1156 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", 1157 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); 1158 xhci_print_trb_offsets(xhci, (union xhci_trb *) event); 1159 urb = NULL; 1160 goto cleanup; 1161 } 1162 xhci_dbg(xhci, "%s - getting list entry\n", __func__); 1163 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); 1164 1165 /* Is this a TRB in the currently executing TD? */ 1166 xhci_dbg(xhci, "%s - looking for TD\n", __func__); 1167 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, 1168 td->last_trb, event_dma); 1169 xhci_dbg(xhci, "%s - found event_seg = %p\n", __func__, event_seg); 1170 if (!event_seg) { 1171 /* HC is busted, give up! */ 1172 xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n"); 1173 return -ESHUTDOWN; 1174 } 1175 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; 1176 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", 1177 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); 1178 xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n", 1179 lower_32_bits(event->buffer)); 1180 xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n", 1181 upper_32_bits(event->buffer)); 1182 xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n", 1183 (unsigned int) event->transfer_len); 1184 xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n", 1185 (unsigned int) event->flags); 1186 1187 /* Look for common error cases */ 1188 trb_comp_code = GET_COMP_CODE(event->transfer_len); 1189 switch (trb_comp_code) { 1190 /* Skip codes that require special handling depending on 1191 * transfer type 1192 */ 1193 case COMP_SUCCESS: 1194 case COMP_SHORT_TX: 1195 break; 1196 case COMP_STOP: 1197 xhci_dbg(xhci, "Stopped on Transfer TRB\n"); 1198 break; 1199 case COMP_STOP_INVAL: 1200 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n"); 1201 break; 1202 case COMP_STALL: 1203 xhci_warn(xhci, "WARN: Stalled endpoint\n"); 1204 ep->ep_state |= EP_HALTED; 1205 status = -EPIPE; 1206 break; 1207 case COMP_TRB_ERR: 1208 xhci_warn(xhci, "WARN: TRB error on endpoint\n"); 1209 status = -EILSEQ; 1210 break; 1211 case COMP_SPLIT_ERR: 1212 case COMP_TX_ERR: 1213 xhci_warn(xhci, "WARN: transfer error on endpoint\n"); 1214 status = -EPROTO; 1215 break; 1216 case COMP_BABBLE: 1217 xhci_warn(xhci, "WARN: babble error on endpoint\n"); 1218 status = -EOVERFLOW; 1219 break; 1220 case COMP_DB_ERR: 1221 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); 1222 status = -ENOSR; 1223 break; 1224 default: 1225 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { 1226 status = 0; 1227 break; 1228 } 1229 xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n"); 1230 urb = NULL; 1231 goto cleanup; 1232 } 1233 /* Now update the urb's actual_length and give back to the core */ 1234 /* Was this a control transfer? */ 1235 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) { 1236 xhci_debug_trb(xhci, xhci->event_ring->dequeue); 1237 switch (trb_comp_code) { 1238 case COMP_SUCCESS: 1239 if (event_trb == ep_ring->dequeue) { 1240 xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n"); 1241 status = -ESHUTDOWN; 1242 } else if (event_trb != td->last_trb) { 1243 xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n"); 1244 status = -ESHUTDOWN; 1245 } else { 1246 xhci_dbg(xhci, "Successful control transfer!\n"); 1247 status = 0; 1248 } 1249 break; 1250 case COMP_SHORT_TX: 1251 xhci_warn(xhci, "WARN: short transfer on control ep\n"); 1252 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1253 status = -EREMOTEIO; 1254 else 1255 status = 0; 1256 break; 1257 1258 default: 1259 if (!xhci_requires_manual_halt_cleanup(xhci, 1260 ep_ctx, trb_comp_code)) 1261 break; 1262 xhci_dbg(xhci, "TRB error code %u, " 1263 "halted endpoint index = %u\n", 1264 trb_comp_code, ep_index); 1265 /* else fall through */ 1266 case COMP_STALL: 1267 /* Did we transfer part of the data (middle) phase? */ 1268 if (event_trb != ep_ring->dequeue && 1269 event_trb != td->last_trb) 1270 td->urb->actual_length = 1271 td->urb->transfer_buffer_length 1272 - TRB_LEN(event->transfer_len); 1273 else 1274 td->urb->actual_length = 0; 1275 1276 xhci_cleanup_halted_endpoint(xhci, 1277 slot_id, ep_index, td, event_trb); 1278 goto td_cleanup; 1279 } 1280 /* 1281 * Did we transfer any data, despite the errors that might have 1282 * happened? I.e. did we get past the setup stage? 1283 */ 1284 if (event_trb != ep_ring->dequeue) { 1285 /* The event was for the status stage */ 1286 if (event_trb == td->last_trb) { 1287 if (td->urb->actual_length != 0) { 1288 /* Don't overwrite a previously set error code */ 1289 if ((status == -EINPROGRESS || 1290 status == 0) && 1291 (td->urb->transfer_flags 1292 & URB_SHORT_NOT_OK)) 1293 /* Did we already see a short data stage? */ 1294 status = -EREMOTEIO; 1295 } else { 1296 td->urb->actual_length = 1297 td->urb->transfer_buffer_length; 1298 } 1299 } else { 1300 /* Maybe the event was for the data stage? */ 1301 if (trb_comp_code != COMP_STOP_INVAL) { 1302 /* We didn't stop on a link TRB in the middle */ 1303 td->urb->actual_length = 1304 td->urb->transfer_buffer_length - 1305 TRB_LEN(event->transfer_len); 1306 xhci_dbg(xhci, "Waiting for status stage event\n"); 1307 urb = NULL; 1308 goto cleanup; 1309 } 1310 } 1311 } 1312 } else { 1313 switch (trb_comp_code) { 1314 case COMP_SUCCESS: 1315 /* Double check that the HW transferred everything. */ 1316 if (event_trb != td->last_trb) { 1317 xhci_warn(xhci, "WARN Successful completion " 1318 "on short TX\n"); 1319 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1320 status = -EREMOTEIO; 1321 else 1322 status = 0; 1323 } else { 1324 if (usb_endpoint_xfer_bulk(&td->urb->ep->desc)) 1325 xhci_dbg(xhci, "Successful bulk " 1326 "transfer!\n"); 1327 else 1328 xhci_dbg(xhci, "Successful interrupt " 1329 "transfer!\n"); 1330 status = 0; 1331 } 1332 break; 1333 case COMP_SHORT_TX: 1334 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1335 status = -EREMOTEIO; 1336 else 1337 status = 0; 1338 break; 1339 default: 1340 /* Others already handled above */ 1341 break; 1342 } 1343 dev_dbg(&td->urb->dev->dev, 1344 "ep %#x - asked for %d bytes, " 1345 "%d bytes untransferred\n", 1346 td->urb->ep->desc.bEndpointAddress, 1347 td->urb->transfer_buffer_length, 1348 TRB_LEN(event->transfer_len)); 1349 /* Fast path - was this the last TRB in the TD for this URB? */ 1350 if (event_trb == td->last_trb) { 1351 if (TRB_LEN(event->transfer_len) != 0) { 1352 td->urb->actual_length = 1353 td->urb->transfer_buffer_length - 1354 TRB_LEN(event->transfer_len); 1355 if (td->urb->transfer_buffer_length < 1356 td->urb->actual_length) { 1357 xhci_warn(xhci, "HC gave bad length " 1358 "of %d bytes left\n", 1359 TRB_LEN(event->transfer_len)); 1360 td->urb->actual_length = 0; 1361 if (td->urb->transfer_flags & 1362 URB_SHORT_NOT_OK) 1363 status = -EREMOTEIO; 1364 else 1365 status = 0; 1366 } 1367 /* Don't overwrite a previously set error code */ 1368 if (status == -EINPROGRESS) { 1369 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1370 status = -EREMOTEIO; 1371 else 1372 status = 0; 1373 } 1374 } else { 1375 td->urb->actual_length = td->urb->transfer_buffer_length; 1376 /* Ignore a short packet completion if the 1377 * untransferred length was zero. 1378 */ 1379 if (status == -EREMOTEIO) 1380 status = 0; 1381 } 1382 } else { 1383 /* Slow path - walk the list, starting from the dequeue 1384 * pointer, to get the actual length transferred. 1385 */ 1386 union xhci_trb *cur_trb; 1387 struct xhci_segment *cur_seg; 1388 1389 td->urb->actual_length = 0; 1390 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; 1391 cur_trb != event_trb; 1392 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 1393 if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP && 1394 TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK) 1395 td->urb->actual_length += 1396 TRB_LEN(cur_trb->generic.field[2]); 1397 } 1398 /* If the ring didn't stop on a Link or No-op TRB, add 1399 * in the actual bytes transferred from the Normal TRB 1400 */ 1401 if (trb_comp_code != COMP_STOP_INVAL) 1402 td->urb->actual_length += 1403 TRB_LEN(cur_trb->generic.field[2]) - 1404 TRB_LEN(event->transfer_len); 1405 } 1406 } 1407 if (trb_comp_code == COMP_STOP_INVAL || 1408 trb_comp_code == COMP_STOP) { 1409 /* The Endpoint Stop Command completion will take care of any 1410 * stopped TDs. A stopped TD may be restarted, so don't update 1411 * the ring dequeue pointer or take this TD off any lists yet. 1412 */ 1413 ep->stopped_td = td; 1414 ep->stopped_trb = event_trb; 1415 } else { 1416 if (trb_comp_code == COMP_STALL) { 1417 /* The transfer is completed from the driver's 1418 * perspective, but we need to issue a set dequeue 1419 * command for this stalled endpoint to move the dequeue 1420 * pointer past the TD. We can't do that here because 1421 * the halt condition must be cleared first. Let the 1422 * USB class driver clear the stall later. 1423 */ 1424 ep->stopped_td = td; 1425 ep->stopped_trb = event_trb; 1426 } else if (xhci_requires_manual_halt_cleanup(xhci, 1427 ep_ctx, trb_comp_code)) { 1428 /* Other types of errors halt the endpoint, but the 1429 * class driver doesn't call usb_reset_endpoint() unless 1430 * the error is -EPIPE. Clear the halted status in the 1431 * xHCI hardware manually. 1432 */ 1433 xhci_cleanup_halted_endpoint(xhci, 1434 slot_id, ep_index, td, event_trb); 1435 } else { 1436 /* Update ring dequeue pointer */ 1437 while (ep_ring->dequeue != td->last_trb) 1438 inc_deq(xhci, ep_ring, false); 1439 inc_deq(xhci, ep_ring, false); 1440 } 1441 1442 td_cleanup: 1443 /* Clean up the endpoint's TD list */ 1444 urb = td->urb; 1445 /* Do one last check of the actual transfer length. 1446 * If the host controller said we transferred more data than 1447 * the buffer length, urb->actual_length will be a very big 1448 * number (since it's unsigned). Play it safe and say we didn't 1449 * transfer anything. 1450 */ 1451 if (urb->actual_length > urb->transfer_buffer_length) { 1452 xhci_warn(xhci, "URB transfer length is wrong, " 1453 "xHC issue? req. len = %u, " 1454 "act. len = %u\n", 1455 urb->transfer_buffer_length, 1456 urb->actual_length); 1457 urb->actual_length = 0; 1458 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1459 status = -EREMOTEIO; 1460 else 1461 status = 0; 1462 } 1463 list_del(&td->td_list); 1464 /* Was this TD slated to be cancelled but completed anyway? */ 1465 if (!list_empty(&td->cancelled_td_list)) 1466 list_del(&td->cancelled_td_list); 1467 1468 /* Leave the TD around for the reset endpoint function to use 1469 * (but only if it's not a control endpoint, since we already 1470 * queued the Set TR dequeue pointer command for stalled 1471 * control endpoints). 1472 */ 1473 if (usb_endpoint_xfer_control(&urb->ep->desc) || 1474 (trb_comp_code != COMP_STALL && 1475 trb_comp_code != COMP_BABBLE)) { 1476 kfree(td); 1477 } 1478 urb->hcpriv = NULL; 1479 } 1480 cleanup: 1481 inc_deq(xhci, xhci->event_ring, true); 1482 xhci_set_hc_event_deq(xhci); 1483 1484 /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */ 1485 if (urb) { 1486 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); 1487 xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n", 1488 urb, urb->actual_length, status); 1489 spin_unlock(&xhci->lock); 1490 usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); 1491 spin_lock(&xhci->lock); 1492 } 1493 return 0; 1494 } 1495 1496 /* 1497 * This function handles all OS-owned events on the event ring. It may drop 1498 * xhci->lock between event processing (e.g. to pass up port status changes). 1499 */ 1500 void xhci_handle_event(struct xhci_hcd *xhci) 1501 { 1502 union xhci_trb *event; 1503 int update_ptrs = 1; 1504 int ret; 1505 1506 xhci_dbg(xhci, "In %s\n", __func__); 1507 if (!xhci->event_ring || !xhci->event_ring->dequeue) { 1508 xhci->error_bitmask |= 1 << 1; 1509 return; 1510 } 1511 1512 event = xhci->event_ring->dequeue; 1513 /* Does the HC or OS own the TRB? */ 1514 if ((event->event_cmd.flags & TRB_CYCLE) != 1515 xhci->event_ring->cycle_state) { 1516 xhci->error_bitmask |= 1 << 2; 1517 return; 1518 } 1519 xhci_dbg(xhci, "%s - OS owns TRB\n", __func__); 1520 1521 /* FIXME: Handle more event types. */ 1522 switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { 1523 case TRB_TYPE(TRB_COMPLETION): 1524 xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__); 1525 handle_cmd_completion(xhci, &event->event_cmd); 1526 xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__); 1527 break; 1528 case TRB_TYPE(TRB_PORT_STATUS): 1529 xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__); 1530 handle_port_status(xhci, event); 1531 xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__); 1532 update_ptrs = 0; 1533 break; 1534 case TRB_TYPE(TRB_TRANSFER): 1535 xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__); 1536 ret = handle_tx_event(xhci, &event->trans_event); 1537 xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__); 1538 if (ret < 0) 1539 xhci->error_bitmask |= 1 << 9; 1540 else 1541 update_ptrs = 0; 1542 break; 1543 default: 1544 xhci->error_bitmask |= 1 << 3; 1545 } 1546 /* Any of the above functions may drop and re-acquire the lock, so check 1547 * to make sure a watchdog timer didn't mark the host as non-responsive. 1548 */ 1549 if (xhci->xhc_state & XHCI_STATE_DYING) { 1550 xhci_dbg(xhci, "xHCI host dying, returning from " 1551 "event handler.\n"); 1552 return; 1553 } 1554 1555 if (update_ptrs) { 1556 /* Update SW and HC event ring dequeue pointer */ 1557 inc_deq(xhci, xhci->event_ring, true); 1558 xhci_set_hc_event_deq(xhci); 1559 } 1560 /* Are there more items on the event ring? */ 1561 xhci_handle_event(xhci); 1562 } 1563 1564 /**** Endpoint Ring Operations ****/ 1565 1566 /* 1567 * Generic function for queueing a TRB on a ring. 1568 * The caller must have checked to make sure there's room on the ring. 1569 */ 1570 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 1571 bool consumer, 1572 u32 field1, u32 field2, u32 field3, u32 field4) 1573 { 1574 struct xhci_generic_trb *trb; 1575 1576 trb = &ring->enqueue->generic; 1577 trb->field[0] = field1; 1578 trb->field[1] = field2; 1579 trb->field[2] = field3; 1580 trb->field[3] = field4; 1581 inc_enq(xhci, ring, consumer); 1582 } 1583 1584 /* 1585 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs. 1586 * FIXME allocate segments if the ring is full. 1587 */ 1588 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 1589 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) 1590 { 1591 /* Make sure the endpoint has been added to xHC schedule */ 1592 xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state); 1593 switch (ep_state) { 1594 case EP_STATE_DISABLED: 1595 /* 1596 * USB core changed config/interfaces without notifying us, 1597 * or hardware is reporting the wrong state. 1598 */ 1599 xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); 1600 return -ENOENT; 1601 case EP_STATE_ERROR: 1602 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); 1603 /* FIXME event handling code for error needs to clear it */ 1604 /* XXX not sure if this should be -ENOENT or not */ 1605 return -EINVAL; 1606 case EP_STATE_HALTED: 1607 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); 1608 case EP_STATE_STOPPED: 1609 case EP_STATE_RUNNING: 1610 break; 1611 default: 1612 xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); 1613 /* 1614 * FIXME issue Configure Endpoint command to try to get the HC 1615 * back into a known state. 1616 */ 1617 return -EINVAL; 1618 } 1619 if (!room_on_ring(xhci, ep_ring, num_trbs)) { 1620 /* FIXME allocate more room */ 1621 xhci_err(xhci, "ERROR no room on ep ring\n"); 1622 return -ENOMEM; 1623 } 1624 return 0; 1625 } 1626 1627 static int prepare_transfer(struct xhci_hcd *xhci, 1628 struct xhci_virt_device *xdev, 1629 unsigned int ep_index, 1630 unsigned int num_trbs, 1631 struct urb *urb, 1632 struct xhci_td **td, 1633 gfp_t mem_flags) 1634 { 1635 int ret; 1636 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1637 ret = prepare_ring(xhci, xdev->eps[ep_index].ring, 1638 ep_ctx->ep_info & EP_STATE_MASK, 1639 num_trbs, mem_flags); 1640 if (ret) 1641 return ret; 1642 *td = kzalloc(sizeof(struct xhci_td), mem_flags); 1643 if (!*td) 1644 return -ENOMEM; 1645 INIT_LIST_HEAD(&(*td)->td_list); 1646 INIT_LIST_HEAD(&(*td)->cancelled_td_list); 1647 1648 ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb); 1649 if (unlikely(ret)) { 1650 kfree(*td); 1651 return ret; 1652 } 1653 1654 (*td)->urb = urb; 1655 urb->hcpriv = (void *) (*td); 1656 /* Add this TD to the tail of the endpoint ring's TD list */ 1657 list_add_tail(&(*td)->td_list, &xdev->eps[ep_index].ring->td_list); 1658 (*td)->start_seg = xdev->eps[ep_index].ring->enq_seg; 1659 (*td)->first_trb = xdev->eps[ep_index].ring->enqueue; 1660 1661 return 0; 1662 } 1663 1664 static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) 1665 { 1666 int num_sgs, num_trbs, running_total, temp, i; 1667 struct scatterlist *sg; 1668 1669 sg = NULL; 1670 num_sgs = urb->num_sgs; 1671 temp = urb->transfer_buffer_length; 1672 1673 xhci_dbg(xhci, "count sg list trbs: \n"); 1674 num_trbs = 0; 1675 for_each_sg(urb->sg->sg, sg, num_sgs, i) { 1676 unsigned int previous_total_trbs = num_trbs; 1677 unsigned int len = sg_dma_len(sg); 1678 1679 /* Scatter gather list entries may cross 64KB boundaries */ 1680 running_total = TRB_MAX_BUFF_SIZE - 1681 (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 1682 if (running_total != 0) 1683 num_trbs++; 1684 1685 /* How many more 64KB chunks to transfer, how many more TRBs? */ 1686 while (running_total < sg_dma_len(sg)) { 1687 num_trbs++; 1688 running_total += TRB_MAX_BUFF_SIZE; 1689 } 1690 xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n", 1691 i, (unsigned long long)sg_dma_address(sg), 1692 len, len, num_trbs - previous_total_trbs); 1693 1694 len = min_t(int, len, temp); 1695 temp -= len; 1696 if (temp == 0) 1697 break; 1698 } 1699 xhci_dbg(xhci, "\n"); 1700 if (!in_interrupt()) 1701 dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n", 1702 urb->ep->desc.bEndpointAddress, 1703 urb->transfer_buffer_length, 1704 num_trbs); 1705 return num_trbs; 1706 } 1707 1708 static void check_trb_math(struct urb *urb, int num_trbs, int running_total) 1709 { 1710 if (num_trbs != 0) 1711 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " 1712 "TRBs, %d left\n", __func__, 1713 urb->ep->desc.bEndpointAddress, num_trbs); 1714 if (running_total != urb->transfer_buffer_length) 1715 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " 1716 "queued %#x (%d), asked for %#x (%d)\n", 1717 __func__, 1718 urb->ep->desc.bEndpointAddress, 1719 running_total, running_total, 1720 urb->transfer_buffer_length, 1721 urb->transfer_buffer_length); 1722 } 1723 1724 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, 1725 unsigned int ep_index, int start_cycle, 1726 struct xhci_generic_trb *start_trb, struct xhci_td *td) 1727 { 1728 /* 1729 * Pass all the TRBs to the hardware at once and make sure this write 1730 * isn't reordered. 1731 */ 1732 wmb(); 1733 start_trb->field[3] |= start_cycle; 1734 ring_ep_doorbell(xhci, slot_id, ep_index); 1735 } 1736 1737 /* 1738 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt 1739 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD 1740 * (comprised of sg list entries) can take several service intervals to 1741 * transmit. 1742 */ 1743 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 1744 struct urb *urb, int slot_id, unsigned int ep_index) 1745 { 1746 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, 1747 xhci->devs[slot_id]->out_ctx, ep_index); 1748 int xhci_interval; 1749 int ep_interval; 1750 1751 xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info); 1752 ep_interval = urb->interval; 1753 /* Convert to microframes */ 1754 if (urb->dev->speed == USB_SPEED_LOW || 1755 urb->dev->speed == USB_SPEED_FULL) 1756 ep_interval *= 8; 1757 /* FIXME change this to a warning and a suggestion to use the new API 1758 * to set the polling interval (once the API is added). 1759 */ 1760 if (xhci_interval != ep_interval) { 1761 if (!printk_ratelimit()) 1762 dev_dbg(&urb->dev->dev, "Driver uses different interval" 1763 " (%d microframe%s) than xHCI " 1764 "(%d microframe%s)\n", 1765 ep_interval, 1766 ep_interval == 1 ? "" : "s", 1767 xhci_interval, 1768 xhci_interval == 1 ? "" : "s"); 1769 urb->interval = xhci_interval; 1770 /* Convert back to frames for LS/FS devices */ 1771 if (urb->dev->speed == USB_SPEED_LOW || 1772 urb->dev->speed == USB_SPEED_FULL) 1773 urb->interval /= 8; 1774 } 1775 return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); 1776 } 1777 1778 /* 1779 * The TD size is the number of bytes remaining in the TD (including this TRB), 1780 * right shifted by 10. 1781 * It must fit in bits 21:17, so it can't be bigger than 31. 1782 */ 1783 static u32 xhci_td_remainder(unsigned int remainder) 1784 { 1785 u32 max = (1 << (21 - 17 + 1)) - 1; 1786 1787 if ((remainder >> 10) >= max) 1788 return max << 17; 1789 else 1790 return (remainder >> 10) << 17; 1791 } 1792 1793 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 1794 struct urb *urb, int slot_id, unsigned int ep_index) 1795 { 1796 struct xhci_ring *ep_ring; 1797 unsigned int num_trbs; 1798 struct xhci_td *td; 1799 struct scatterlist *sg; 1800 int num_sgs; 1801 int trb_buff_len, this_sg_len, running_total; 1802 bool first_trb; 1803 u64 addr; 1804 1805 struct xhci_generic_trb *start_trb; 1806 int start_cycle; 1807 1808 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 1809 num_trbs = count_sg_trbs_needed(xhci, urb); 1810 num_sgs = urb->num_sgs; 1811 1812 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], 1813 ep_index, num_trbs, urb, &td, mem_flags); 1814 if (trb_buff_len < 0) 1815 return trb_buff_len; 1816 /* 1817 * Don't give the first TRB to the hardware (by toggling the cycle bit) 1818 * until we've finished creating all the other TRBs. The ring's cycle 1819 * state may change as we enqueue the other TRBs, so save it too. 1820 */ 1821 start_trb = &ep_ring->enqueue->generic; 1822 start_cycle = ep_ring->cycle_state; 1823 1824 running_total = 0; 1825 /* 1826 * How much data is in the first TRB? 1827 * 1828 * There are three forces at work for TRB buffer pointers and lengths: 1829 * 1. We don't want to walk off the end of this sg-list entry buffer. 1830 * 2. The transfer length that the driver requested may be smaller than 1831 * the amount of memory allocated for this scatter-gather list. 1832 * 3. TRBs buffers can't cross 64KB boundaries. 1833 */ 1834 sg = urb->sg->sg; 1835 addr = (u64) sg_dma_address(sg); 1836 this_sg_len = sg_dma_len(sg); 1837 trb_buff_len = TRB_MAX_BUFF_SIZE - 1838 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 1839 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 1840 if (trb_buff_len > urb->transfer_buffer_length) 1841 trb_buff_len = urb->transfer_buffer_length; 1842 xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n", 1843 trb_buff_len); 1844 1845 first_trb = true; 1846 /* Queue the first TRB, even if it's zero-length */ 1847 do { 1848 u32 field = 0; 1849 u32 length_field = 0; 1850 u32 remainder = 0; 1851 1852 /* Don't change the cycle bit of the first TRB until later */ 1853 if (first_trb) 1854 first_trb = false; 1855 else 1856 field |= ep_ring->cycle_state; 1857 1858 /* Chain all the TRBs together; clear the chain bit in the last 1859 * TRB to indicate it's the last TRB in the chain. 1860 */ 1861 if (num_trbs > 1) { 1862 field |= TRB_CHAIN; 1863 } else { 1864 /* FIXME - add check for ZERO_PACKET flag before this */ 1865 td->last_trb = ep_ring->enqueue; 1866 field |= TRB_IOC; 1867 } 1868 xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), " 1869 "64KB boundary at %#x, end dma = %#x\n", 1870 (unsigned int) addr, trb_buff_len, trb_buff_len, 1871 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 1872 (unsigned int) addr + trb_buff_len); 1873 if (TRB_MAX_BUFF_SIZE - 1874 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) { 1875 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); 1876 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", 1877 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 1878 (unsigned int) addr + trb_buff_len); 1879 } 1880 remainder = xhci_td_remainder(urb->transfer_buffer_length - 1881 running_total) ; 1882 length_field = TRB_LEN(trb_buff_len) | 1883 remainder | 1884 TRB_INTR_TARGET(0); 1885 queue_trb(xhci, ep_ring, false, 1886 lower_32_bits(addr), 1887 upper_32_bits(addr), 1888 length_field, 1889 /* We always want to know if the TRB was short, 1890 * or we won't get an event when it completes. 1891 * (Unless we use event data TRBs, which are a 1892 * waste of space and HC resources.) 1893 */ 1894 field | TRB_ISP | TRB_TYPE(TRB_NORMAL)); 1895 --num_trbs; 1896 running_total += trb_buff_len; 1897 1898 /* Calculate length for next transfer -- 1899 * Are we done queueing all the TRBs for this sg entry? 1900 */ 1901 this_sg_len -= trb_buff_len; 1902 if (this_sg_len == 0) { 1903 --num_sgs; 1904 if (num_sgs == 0) 1905 break; 1906 sg = sg_next(sg); 1907 addr = (u64) sg_dma_address(sg); 1908 this_sg_len = sg_dma_len(sg); 1909 } else { 1910 addr += trb_buff_len; 1911 } 1912 1913 trb_buff_len = TRB_MAX_BUFF_SIZE - 1914 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 1915 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 1916 if (running_total + trb_buff_len > urb->transfer_buffer_length) 1917 trb_buff_len = 1918 urb->transfer_buffer_length - running_total; 1919 } while (running_total < urb->transfer_buffer_length); 1920 1921 check_trb_math(urb, num_trbs, running_total); 1922 giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); 1923 return 0; 1924 } 1925 1926 /* This is very similar to what ehci-q.c qtd_fill() does */ 1927 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 1928 struct urb *urb, int slot_id, unsigned int ep_index) 1929 { 1930 struct xhci_ring *ep_ring; 1931 struct xhci_td *td; 1932 int num_trbs; 1933 struct xhci_generic_trb *start_trb; 1934 bool first_trb; 1935 int start_cycle; 1936 u32 field, length_field; 1937 1938 int running_total, trb_buff_len, ret; 1939 u64 addr; 1940 1941 if (urb->sg) 1942 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); 1943 1944 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 1945 1946 num_trbs = 0; 1947 /* How much data is (potentially) left before the 64KB boundary? */ 1948 running_total = TRB_MAX_BUFF_SIZE - 1949 (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 1950 1951 /* If there's some data on this 64KB chunk, or we have to send a 1952 * zero-length transfer, we need at least one TRB 1953 */ 1954 if (running_total != 0 || urb->transfer_buffer_length == 0) 1955 num_trbs++; 1956 /* How many more 64KB chunks to transfer, how many more TRBs? */ 1957 while (running_total < urb->transfer_buffer_length) { 1958 num_trbs++; 1959 running_total += TRB_MAX_BUFF_SIZE; 1960 } 1961 /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */ 1962 1963 if (!in_interrupt()) 1964 dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n", 1965 urb->ep->desc.bEndpointAddress, 1966 urb->transfer_buffer_length, 1967 urb->transfer_buffer_length, 1968 (unsigned long long)urb->transfer_dma, 1969 num_trbs); 1970 1971 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 1972 num_trbs, urb, &td, mem_flags); 1973 if (ret < 0) 1974 return ret; 1975 1976 /* 1977 * Don't give the first TRB to the hardware (by toggling the cycle bit) 1978 * until we've finished creating all the other TRBs. The ring's cycle 1979 * state may change as we enqueue the other TRBs, so save it too. 1980 */ 1981 start_trb = &ep_ring->enqueue->generic; 1982 start_cycle = ep_ring->cycle_state; 1983 1984 running_total = 0; 1985 /* How much data is in the first TRB? */ 1986 addr = (u64) urb->transfer_dma; 1987 trb_buff_len = TRB_MAX_BUFF_SIZE - 1988 (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 1989 if (urb->transfer_buffer_length < trb_buff_len) 1990 trb_buff_len = urb->transfer_buffer_length; 1991 1992 first_trb = true; 1993 1994 /* Queue the first TRB, even if it's zero-length */ 1995 do { 1996 u32 remainder = 0; 1997 field = 0; 1998 1999 /* Don't change the cycle bit of the first TRB until later */ 2000 if (first_trb) 2001 first_trb = false; 2002 else 2003 field |= ep_ring->cycle_state; 2004 2005 /* Chain all the TRBs together; clear the chain bit in the last 2006 * TRB to indicate it's the last TRB in the chain. 2007 */ 2008 if (num_trbs > 1) { 2009 field |= TRB_CHAIN; 2010 } else { 2011 /* FIXME - add check for ZERO_PACKET flag before this */ 2012 td->last_trb = ep_ring->enqueue; 2013 field |= TRB_IOC; 2014 } 2015 remainder = xhci_td_remainder(urb->transfer_buffer_length - 2016 running_total); 2017 length_field = TRB_LEN(trb_buff_len) | 2018 remainder | 2019 TRB_INTR_TARGET(0); 2020 queue_trb(xhci, ep_ring, false, 2021 lower_32_bits(addr), 2022 upper_32_bits(addr), 2023 length_field, 2024 /* We always want to know if the TRB was short, 2025 * or we won't get an event when it completes. 2026 * (Unless we use event data TRBs, which are a 2027 * waste of space and HC resources.) 2028 */ 2029 field | TRB_ISP | TRB_TYPE(TRB_NORMAL)); 2030 --num_trbs; 2031 running_total += trb_buff_len; 2032 2033 /* Calculate length for next transfer */ 2034 addr += trb_buff_len; 2035 trb_buff_len = urb->transfer_buffer_length - running_total; 2036 if (trb_buff_len > TRB_MAX_BUFF_SIZE) 2037 trb_buff_len = TRB_MAX_BUFF_SIZE; 2038 } while (running_total < urb->transfer_buffer_length); 2039 2040 check_trb_math(urb, num_trbs, running_total); 2041 giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); 2042 return 0; 2043 } 2044 2045 /* Caller must have locked xhci->lock */ 2046 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 2047 struct urb *urb, int slot_id, unsigned int ep_index) 2048 { 2049 struct xhci_ring *ep_ring; 2050 int num_trbs; 2051 int ret; 2052 struct usb_ctrlrequest *setup; 2053 struct xhci_generic_trb *start_trb; 2054 int start_cycle; 2055 u32 field, length_field; 2056 struct xhci_td *td; 2057 2058 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 2059 2060 /* 2061 * Need to copy setup packet into setup TRB, so we can't use the setup 2062 * DMA address. 2063 */ 2064 if (!urb->setup_packet) 2065 return -EINVAL; 2066 2067 if (!in_interrupt()) 2068 xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n", 2069 slot_id, ep_index); 2070 /* 1 TRB for setup, 1 for status */ 2071 num_trbs = 2; 2072 /* 2073 * Don't need to check if we need additional event data and normal TRBs, 2074 * since data in control transfers will never get bigger than 16MB 2075 * XXX: can we get a buffer that crosses 64KB boundaries? 2076 */ 2077 if (urb->transfer_buffer_length > 0) 2078 num_trbs++; 2079 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs, 2080 urb, &td, mem_flags); 2081 if (ret < 0) 2082 return ret; 2083 2084 /* 2085 * Don't give the first TRB to the hardware (by toggling the cycle bit) 2086 * until we've finished creating all the other TRBs. The ring's cycle 2087 * state may change as we enqueue the other TRBs, so save it too. 2088 */ 2089 start_trb = &ep_ring->enqueue->generic; 2090 start_cycle = ep_ring->cycle_state; 2091 2092 /* Queue setup TRB - see section 6.4.1.2.1 */ 2093 /* FIXME better way to translate setup_packet into two u32 fields? */ 2094 setup = (struct usb_ctrlrequest *) urb->setup_packet; 2095 queue_trb(xhci, ep_ring, false, 2096 /* FIXME endianness is probably going to bite my ass here. */ 2097 setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, 2098 setup->wIndex | setup->wLength << 16, 2099 TRB_LEN(8) | TRB_INTR_TARGET(0), 2100 /* Immediate data in pointer */ 2101 TRB_IDT | TRB_TYPE(TRB_SETUP)); 2102 2103 /* If there's data, queue data TRBs */ 2104 field = 0; 2105 length_field = TRB_LEN(urb->transfer_buffer_length) | 2106 xhci_td_remainder(urb->transfer_buffer_length) | 2107 TRB_INTR_TARGET(0); 2108 if (urb->transfer_buffer_length > 0) { 2109 if (setup->bRequestType & USB_DIR_IN) 2110 field |= TRB_DIR_IN; 2111 queue_trb(xhci, ep_ring, false, 2112 lower_32_bits(urb->transfer_dma), 2113 upper_32_bits(urb->transfer_dma), 2114 length_field, 2115 /* Event on short tx */ 2116 field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state); 2117 } 2118 2119 /* Save the DMA address of the last TRB in the TD */ 2120 td->last_trb = ep_ring->enqueue; 2121 2122 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ 2123 /* If the device sent data, the status stage is an OUT transfer */ 2124 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) 2125 field = 0; 2126 else 2127 field = TRB_DIR_IN; 2128 queue_trb(xhci, ep_ring, false, 2129 0, 2130 0, 2131 TRB_INTR_TARGET(0), 2132 /* Event on completion */ 2133 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); 2134 2135 giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); 2136 return 0; 2137 } 2138 2139 /**** Command Ring Operations ****/ 2140 2141 /* Generic function for queueing a command TRB on the command ring. 2142 * Check to make sure there's room on the command ring for one command TRB. 2143 * Also check that there's room reserved for commands that must not fail. 2144 * If this is a command that must not fail, meaning command_must_succeed = TRUE, 2145 * then only check for the number of reserved spots. 2146 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB 2147 * because the command event handler may want to resubmit a failed command. 2148 */ 2149 static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, 2150 u32 field3, u32 field4, bool command_must_succeed) 2151 { 2152 int reserved_trbs = xhci->cmd_ring_reserved_trbs; 2153 if (!command_must_succeed) 2154 reserved_trbs++; 2155 2156 if (!room_on_ring(xhci, xhci->cmd_ring, reserved_trbs)) { 2157 if (!in_interrupt()) 2158 xhci_err(xhci, "ERR: No room for command on command ring\n"); 2159 if (command_must_succeed) 2160 xhci_err(xhci, "ERR: Reserved TRB counting for " 2161 "unfailable commands failed.\n"); 2162 return -ENOMEM; 2163 } 2164 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, 2165 field4 | xhci->cmd_ring->cycle_state); 2166 return 0; 2167 } 2168 2169 /* Queue a no-op command on the command ring */ 2170 static int queue_cmd_noop(struct xhci_hcd *xhci) 2171 { 2172 return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP), false); 2173 } 2174 2175 /* 2176 * Place a no-op command on the command ring to test the command and 2177 * event ring. 2178 */ 2179 void *xhci_setup_one_noop(struct xhci_hcd *xhci) 2180 { 2181 if (queue_cmd_noop(xhci) < 0) 2182 return NULL; 2183 xhci->noops_submitted++; 2184 return xhci_ring_cmd_db; 2185 } 2186 2187 /* Queue a slot enable or disable request on the command ring */ 2188 int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) 2189 { 2190 return queue_command(xhci, 0, 0, 0, 2191 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false); 2192 } 2193 2194 /* Queue an address device command TRB */ 2195 int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 2196 u32 slot_id) 2197 { 2198 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 2199 upper_32_bits(in_ctx_ptr), 0, 2200 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id), 2201 false); 2202 } 2203 2204 /* Queue a reset device command TRB */ 2205 int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id) 2206 { 2207 return queue_command(xhci, 0, 0, 0, 2208 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id), 2209 false); 2210 } 2211 2212 /* Queue a configure endpoint command TRB */ 2213 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 2214 u32 slot_id, bool command_must_succeed) 2215 { 2216 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 2217 upper_32_bits(in_ctx_ptr), 0, 2218 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id), 2219 command_must_succeed); 2220 } 2221 2222 /* Queue an evaluate context command TRB */ 2223 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 2224 u32 slot_id) 2225 { 2226 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 2227 upper_32_bits(in_ctx_ptr), 0, 2228 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id), 2229 false); 2230 } 2231 2232 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, 2233 unsigned int ep_index) 2234 { 2235 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 2236 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 2237 u32 type = TRB_TYPE(TRB_STOP_RING); 2238 2239 return queue_command(xhci, 0, 0, 0, 2240 trb_slot_id | trb_ep_index | type, false); 2241 } 2242 2243 /* Set Transfer Ring Dequeue Pointer command. 2244 * This should not be used for endpoints that have streams enabled. 2245 */ 2246 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 2247 unsigned int ep_index, struct xhci_segment *deq_seg, 2248 union xhci_trb *deq_ptr, u32 cycle_state) 2249 { 2250 dma_addr_t addr; 2251 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 2252 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 2253 u32 type = TRB_TYPE(TRB_SET_DEQ); 2254 2255 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); 2256 if (addr == 0) { 2257 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 2258 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", 2259 deq_seg, deq_ptr); 2260 return 0; 2261 } 2262 return queue_command(xhci, lower_32_bits(addr) | cycle_state, 2263 upper_32_bits(addr), 0, 2264 trb_slot_id | trb_ep_index | type, false); 2265 } 2266 2267 int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, 2268 unsigned int ep_index) 2269 { 2270 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 2271 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 2272 u32 type = TRB_TYPE(TRB_RESET_EP); 2273 2274 return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type, 2275 false); 2276 } 2277