1 /* 2 * xHCI host controller driver 3 * 4 * Copyright (C) 2008 Intel Corp. 5 * 6 * Author: Sarah Sharp 7 * Some code borrowed from the Linux EHCI driver. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 * for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software Foundation, 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 /* 24 * Ring initialization rules: 25 * 1. Each segment is initialized to zero, except for link TRBs. 26 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or 27 * Consumer Cycle State (CCS), depending on ring function. 28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. 29 * 30 * Ring behavior rules: 31 * 1. A ring is empty if enqueue == dequeue. This means there will always be at 32 * least one free TRB in the ring. This is useful if you want to turn that 33 * into a link TRB and expand the ring. 34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a 35 * link TRB, then load the pointer with the address in the link TRB. If the 36 * link TRB had its toggle bit set, you may need to update the ring cycle 37 * state (see cycle bit rules). You may have to do this multiple times 38 * until you reach a non-link TRB. 39 * 3. A ring is full if enqueue++ (for the definition of increment above) 40 * equals the dequeue pointer. 41 * 42 * Cycle bit rules: 43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit 44 * in a link TRB, it must toggle the ring cycle state. 45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit 46 * in a link TRB, it must toggle the ring cycle state. 47 * 48 * Producer rules: 49 * 1. Check if ring is full before you enqueue. 50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. 51 * Update enqueue pointer between each write (which may update the ring 52 * cycle state). 53 * 3. Notify consumer. If SW is producer, it rings the doorbell for command 54 * and endpoint rings. If HC is the producer for the event ring, 55 * and it generates an interrupt according to interrupt modulation rules. 56 * 57 * Consumer rules: 58 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, 59 * the TRB is owned by the consumer. 60 * 2. Update dequeue pointer (which may update the ring cycle state) and 61 * continue processing TRBs until you reach a TRB which is not owned by you. 62 * 3. Notify the producer. SW is the consumer for the event ring, and it 63 * updates event ring dequeue pointer. HC is the consumer for the command and 64 * endpoint rings; it generates events on the event ring for these. 65 */ 66 67 #include <linux/scatterlist.h> 68 #include "xhci.h" 69 70 /* 71 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA 72 * address of the TRB. 73 */ 74 dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, 75 union xhci_trb *trb) 76 { 77 unsigned long segment_offset; 78 79 if (!seg || !trb || trb < seg->trbs) 80 return 0; 81 /* offset in TRBs */ 82 segment_offset = trb - seg->trbs; 83 if (segment_offset > TRBS_PER_SEGMENT) 84 return 0; 85 return seg->dma + (segment_offset * sizeof(*trb)); 86 } 87 88 /* Does this link TRB point to the first segment in a ring, 89 * or was the previous TRB the last TRB on the last segment in the ERST? 90 */ 91 static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, 92 struct xhci_segment *seg, union xhci_trb *trb) 93 { 94 if (ring == xhci->event_ring) 95 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) && 96 (seg->next == xhci->event_ring->first_seg); 97 else 98 return trb->link.control & LINK_TOGGLE; 99 } 100 101 /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring 102 * segment? I.e. would the updated event TRB pointer step off the end of the 103 * event seg? 104 */ 105 static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 106 struct xhci_segment *seg, union xhci_trb *trb) 107 { 108 if (ring == xhci->event_ring) 109 return trb == &seg->trbs[TRBS_PER_SEGMENT]; 110 else 111 return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); 112 } 113 114 /* Updates trb to point to the next TRB in the ring, and updates seg if the next 115 * TRB is in a new segment. This does not skip over link TRBs, and it does not 116 * effect the ring dequeue or enqueue pointers. 117 */ 118 static void next_trb(struct xhci_hcd *xhci, 119 struct xhci_ring *ring, 120 struct xhci_segment **seg, 121 union xhci_trb **trb) 122 { 123 if (last_trb(xhci, ring, *seg, *trb)) { 124 *seg = (*seg)->next; 125 *trb = ((*seg)->trbs); 126 } else { 127 *trb = (*trb)++; 128 } 129 } 130 131 /* 132 * See Cycle bit rules. SW is the consumer for the event ring only. 133 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 134 */ 135 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) 136 { 137 union xhci_trb *next = ++(ring->dequeue); 138 139 ring->deq_updates++; 140 /* Update the dequeue pointer further if that was a link TRB or we're at 141 * the end of an event ring segment (which doesn't have link TRBS) 142 */ 143 while (last_trb(xhci, ring, ring->deq_seg, next)) { 144 if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) { 145 ring->cycle_state = (ring->cycle_state ? 0 : 1); 146 if (!in_interrupt()) 147 xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n", 148 ring, 149 (unsigned int) ring->cycle_state); 150 } 151 ring->deq_seg = ring->deq_seg->next; 152 ring->dequeue = ring->deq_seg->trbs; 153 next = ring->dequeue; 154 } 155 } 156 157 /* 158 * See Cycle bit rules. SW is the consumer for the event ring only. 159 * Don't make a ring full of link TRBs. That would be dumb and this would loop. 160 * 161 * If we've just enqueued a TRB that is in the middle of a TD (meaning the 162 * chain bit is set), then set the chain bit in all the following link TRBs. 163 * If we've enqueued the last TRB in a TD, make sure the following link TRBs 164 * have their chain bit cleared (so that each Link TRB is a separate TD). 165 * 166 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit 167 * set, but other sections talk about dealing with the chain bit set. 168 * Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB. 169 */ 170 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) 171 { 172 u32 chain; 173 union xhci_trb *next; 174 175 chain = ring->enqueue->generic.field[3] & TRB_CHAIN; 176 next = ++(ring->enqueue); 177 178 ring->enq_updates++; 179 /* Update the dequeue pointer further if that was a link TRB or we're at 180 * the end of an event ring segment (which doesn't have link TRBS) 181 */ 182 while (last_trb(xhci, ring, ring->enq_seg, next)) { 183 if (!consumer) { 184 if (ring != xhci->event_ring) { 185 next->link.control &= ~TRB_CHAIN; 186 next->link.control |= chain; 187 /* Give this link TRB to the hardware */ 188 wmb(); 189 if (next->link.control & TRB_CYCLE) 190 next->link.control &= (u32) ~TRB_CYCLE; 191 else 192 next->link.control |= (u32) TRB_CYCLE; 193 } 194 /* Toggle the cycle bit after the last ring segment. */ 195 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 196 ring->cycle_state = (ring->cycle_state ? 0 : 1); 197 if (!in_interrupt()) 198 xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n", 199 ring, 200 (unsigned int) ring->cycle_state); 201 } 202 } 203 ring->enq_seg = ring->enq_seg->next; 204 ring->enqueue = ring->enq_seg->trbs; 205 next = ring->enqueue; 206 } 207 } 208 209 /* 210 * Check to see if there's room to enqueue num_trbs on the ring. See rules 211 * above. 212 * FIXME: this would be simpler and faster if we just kept track of the number 213 * of free TRBs in a ring. 214 */ 215 static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, 216 unsigned int num_trbs) 217 { 218 int i; 219 union xhci_trb *enq = ring->enqueue; 220 struct xhci_segment *enq_seg = ring->enq_seg; 221 222 /* Check if ring is empty */ 223 if (enq == ring->dequeue) 224 return 1; 225 /* Make sure there's an extra empty TRB available */ 226 for (i = 0; i <= num_trbs; ++i) { 227 if (enq == ring->dequeue) 228 return 0; 229 enq++; 230 while (last_trb(xhci, ring, enq_seg, enq)) { 231 enq_seg = enq_seg->next; 232 enq = enq_seg->trbs; 233 } 234 } 235 return 1; 236 } 237 238 void xhci_set_hc_event_deq(struct xhci_hcd *xhci) 239 { 240 u32 temp; 241 dma_addr_t deq; 242 243 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, 244 xhci->event_ring->dequeue); 245 if (deq == 0 && !in_interrupt()) 246 xhci_warn(xhci, "WARN something wrong with SW event ring " 247 "dequeue ptr.\n"); 248 /* Update HC event ring dequeue pointer */ 249 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); 250 temp &= ERST_PTR_MASK; 251 if (!in_interrupt()) 252 xhci_dbg(xhci, "// Write event ring dequeue pointer\n"); 253 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); 254 xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp, 255 &xhci->ir_set->erst_dequeue[0]); 256 } 257 258 /* Ring the host controller doorbell after placing a command on the ring */ 259 void xhci_ring_cmd_db(struct xhci_hcd *xhci) 260 { 261 u32 temp; 262 263 xhci_dbg(xhci, "// Ding dong!\n"); 264 temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK; 265 xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]); 266 /* Flush PCI posted writes */ 267 xhci_readl(xhci, &xhci->dba->doorbell[0]); 268 } 269 270 static void ring_ep_doorbell(struct xhci_hcd *xhci, 271 unsigned int slot_id, 272 unsigned int ep_index) 273 { 274 struct xhci_ring *ep_ring; 275 u32 field; 276 __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; 277 278 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; 279 /* Don't ring the doorbell for this endpoint if there are pending 280 * cancellations because the we don't want to interrupt processing. 281 */ 282 if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)) { 283 field = xhci_readl(xhci, db_addr) & DB_MASK; 284 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); 285 /* Flush PCI posted writes - FIXME Matthew Wilcox says this 286 * isn't time-critical and we shouldn't make the CPU wait for 287 * the flush. 288 */ 289 xhci_readl(xhci, db_addr); 290 } 291 } 292 293 /* 294 * Find the segment that trb is in. Start searching in start_seg. 295 * If we must move past a segment that has a link TRB with a toggle cycle state 296 * bit set, then we will toggle the value pointed at by cycle_state. 297 */ 298 static struct xhci_segment *find_trb_seg( 299 struct xhci_segment *start_seg, 300 union xhci_trb *trb, int *cycle_state) 301 { 302 struct xhci_segment *cur_seg = start_seg; 303 struct xhci_generic_trb *generic_trb; 304 305 while (cur_seg->trbs > trb || 306 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { 307 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; 308 if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK && 309 (generic_trb->field[3] & LINK_TOGGLE)) 310 *cycle_state = ~(*cycle_state) & 0x1; 311 cur_seg = cur_seg->next; 312 if (cur_seg == start_seg) 313 /* Looped over the entire list. Oops! */ 314 return 0; 315 } 316 return cur_seg; 317 } 318 319 struct dequeue_state { 320 struct xhci_segment *new_deq_seg; 321 union xhci_trb *new_deq_ptr; 322 int new_cycle_state; 323 }; 324 325 /* 326 * Move the xHC's endpoint ring dequeue pointer past cur_td. 327 * Record the new state of the xHC's endpoint ring dequeue segment, 328 * dequeue pointer, and new consumer cycle state in state. 329 * Update our internal representation of the ring's dequeue pointer. 330 * 331 * We do this in three jumps: 332 * - First we update our new ring state to be the same as when the xHC stopped. 333 * - Then we traverse the ring to find the segment that contains 334 * the last TRB in the TD. We toggle the xHC's new cycle state when we pass 335 * any link TRBs with the toggle cycle bit set. 336 * - Finally we move the dequeue state one TRB further, toggling the cycle bit 337 * if we've moved it past a link TRB with the toggle cycle bit set. 338 */ 339 static void find_new_dequeue_state(struct xhci_hcd *xhci, 340 unsigned int slot_id, unsigned int ep_index, 341 struct xhci_td *cur_td, struct dequeue_state *state) 342 { 343 struct xhci_virt_device *dev = xhci->devs[slot_id]; 344 struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; 345 struct xhci_generic_trb *trb; 346 347 state->new_cycle_state = 0; 348 state->new_deq_seg = find_trb_seg(cur_td->start_seg, 349 ep_ring->stopped_trb, 350 &state->new_cycle_state); 351 if (!state->new_deq_seg) 352 BUG(); 353 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 354 state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0]; 355 356 state->new_deq_ptr = cur_td->last_trb; 357 state->new_deq_seg = find_trb_seg(state->new_deq_seg, 358 state->new_deq_ptr, 359 &state->new_cycle_state); 360 if (!state->new_deq_seg) 361 BUG(); 362 363 trb = &state->new_deq_ptr->generic; 364 if (TRB_TYPE(trb->field[3]) == TRB_LINK && 365 (trb->field[3] & LINK_TOGGLE)) 366 state->new_cycle_state = ~(state->new_cycle_state) & 0x1; 367 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); 368 369 /* Don't update the ring cycle state for the producer (us). */ 370 ep_ring->dequeue = state->new_deq_ptr; 371 ep_ring->deq_seg = state->new_deq_seg; 372 } 373 374 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 375 struct xhci_td *cur_td) 376 { 377 struct xhci_segment *cur_seg; 378 union xhci_trb *cur_trb; 379 380 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; 381 true; 382 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 383 if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) == 384 TRB_TYPE(TRB_LINK)) { 385 /* Unchain any chained Link TRBs, but 386 * leave the pointers intact. 387 */ 388 cur_trb->generic.field[3] &= ~TRB_CHAIN; 389 xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); 390 xhci_dbg(xhci, "Address = %p (0x%llx dma); " 391 "in seg %p (0x%llx dma)\n", 392 cur_trb, 393 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb), 394 cur_seg, 395 (unsigned long long)cur_seg->dma); 396 } else { 397 cur_trb->generic.field[0] = 0; 398 cur_trb->generic.field[1] = 0; 399 cur_trb->generic.field[2] = 0; 400 /* Preserve only the cycle bit of this TRB */ 401 cur_trb->generic.field[3] &= TRB_CYCLE; 402 cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP); 403 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " 404 "in seg %p (0x%llx dma)\n", 405 cur_trb, 406 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb), 407 cur_seg, 408 (unsigned long long)cur_seg->dma); 409 } 410 if (cur_trb == cur_td->last_trb) 411 break; 412 } 413 } 414 415 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 416 unsigned int ep_index, struct xhci_segment *deq_seg, 417 union xhci_trb *deq_ptr, u32 cycle_state); 418 419 /* 420 * When we get a command completion for a Stop Endpoint Command, we need to 421 * unlink any cancelled TDs from the ring. There are two ways to do that: 422 * 423 * 1. If the HW was in the middle of processing the TD that needs to be 424 * cancelled, then we must move the ring's dequeue pointer past the last TRB 425 * in the TD with a Set Dequeue Pointer Command. 426 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain 427 * bit cleared) so that the HW will skip over them. 428 */ 429 static void handle_stopped_endpoint(struct xhci_hcd *xhci, 430 union xhci_trb *trb) 431 { 432 unsigned int slot_id; 433 unsigned int ep_index; 434 struct xhci_ring *ep_ring; 435 struct list_head *entry; 436 struct xhci_td *cur_td = 0; 437 struct xhci_td *last_unlinked_td; 438 439 struct dequeue_state deq_state; 440 #ifdef CONFIG_USB_HCD_STAT 441 ktime_t stop_time = ktime_get(); 442 #endif 443 444 memset(&deq_state, 0, sizeof(deq_state)); 445 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 446 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 447 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; 448 449 if (list_empty(&ep_ring->cancelled_td_list)) 450 return; 451 452 /* Fix up the ep ring first, so HW stops executing cancelled TDs. 453 * We have the xHCI lock, so nothing can modify this list until we drop 454 * it. We're also in the event handler, so we can't get re-interrupted 455 * if another Stop Endpoint command completes 456 */ 457 list_for_each(entry, &ep_ring->cancelled_td_list) { 458 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); 459 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", 460 cur_td->first_trb, 461 (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb)); 462 /* 463 * If we stopped on the TD we need to cancel, then we have to 464 * move the xHC endpoint ring dequeue pointer past this TD. 465 */ 466 if (cur_td == ep_ring->stopped_td) 467 find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, 468 &deq_state); 469 else 470 td_to_noop(xhci, ep_ring, cur_td); 471 /* 472 * The event handler won't see a completion for this TD anymore, 473 * so remove it from the endpoint ring's TD list. Keep it in 474 * the cancelled TD list for URB completion later. 475 */ 476 list_del(&cur_td->td_list); 477 ep_ring->cancels_pending--; 478 } 479 last_unlinked_td = cur_td; 480 481 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 482 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 483 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " 484 "new deq ptr = %p (0x%llx dma), new cycle = %u\n", 485 deq_state.new_deq_seg, 486 (unsigned long long)deq_state.new_deq_seg->dma, 487 deq_state.new_deq_ptr, 488 (unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr), 489 deq_state.new_cycle_state); 490 queue_set_tr_deq(xhci, slot_id, ep_index, 491 deq_state.new_deq_seg, 492 deq_state.new_deq_ptr, 493 (u32) deq_state.new_cycle_state); 494 /* Stop the TD queueing code from ringing the doorbell until 495 * this command completes. The HC won't set the dequeue pointer 496 * if the ring is running, and ringing the doorbell starts the 497 * ring running. 498 */ 499 ep_ring->state |= SET_DEQ_PENDING; 500 xhci_ring_cmd_db(xhci); 501 } else { 502 /* Otherwise just ring the doorbell to restart the ring */ 503 ring_ep_doorbell(xhci, slot_id, ep_index); 504 } 505 506 /* 507 * Drop the lock and complete the URBs in the cancelled TD list. 508 * New TDs to be cancelled might be added to the end of the list before 509 * we can complete all the URBs for the TDs we already unlinked. 510 * So stop when we've completed the URB for the last TD we unlinked. 511 */ 512 do { 513 cur_td = list_entry(ep_ring->cancelled_td_list.next, 514 struct xhci_td, cancelled_td_list); 515 list_del(&cur_td->cancelled_td_list); 516 517 /* Clean up the cancelled URB */ 518 #ifdef CONFIG_USB_HCD_STAT 519 hcd_stat_update(xhci->tp_stat, cur_td->urb->actual_length, 520 ktime_sub(stop_time, cur_td->start_time)); 521 #endif 522 cur_td->urb->hcpriv = NULL; 523 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb); 524 525 xhci_dbg(xhci, "Giveback cancelled URB %p\n", cur_td->urb); 526 spin_unlock(&xhci->lock); 527 /* Doesn't matter what we pass for status, since the core will 528 * just overwrite it (because the URB has been unlinked). 529 */ 530 usb_hcd_giveback_urb(xhci_to_hcd(xhci), cur_td->urb, 0); 531 kfree(cur_td); 532 533 spin_lock(&xhci->lock); 534 } while (cur_td != last_unlinked_td); 535 536 /* Return to the event handler with xhci->lock re-acquired */ 537 } 538 539 /* 540 * When we get a completion for a Set Transfer Ring Dequeue Pointer command, 541 * we need to clear the set deq pending flag in the endpoint ring state, so that 542 * the TD queueing code can ring the doorbell again. We also need to ring the 543 * endpoint doorbell to restart the ring, but only if there aren't more 544 * cancellations pending. 545 */ 546 static void handle_set_deq_completion(struct xhci_hcd *xhci, 547 struct xhci_event_cmd *event, 548 union xhci_trb *trb) 549 { 550 unsigned int slot_id; 551 unsigned int ep_index; 552 struct xhci_ring *ep_ring; 553 struct xhci_virt_device *dev; 554 555 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 556 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 557 dev = xhci->devs[slot_id]; 558 ep_ring = dev->ep_rings[ep_index]; 559 560 if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { 561 unsigned int ep_state; 562 unsigned int slot_state; 563 564 switch (GET_COMP_CODE(event->status)) { 565 case COMP_TRB_ERR: 566 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because " 567 "of stream ID configuration\n"); 568 break; 569 case COMP_CTX_STATE: 570 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " 571 "to incorrect slot or ep state.\n"); 572 ep_state = dev->out_ctx->ep[ep_index].ep_info; 573 ep_state &= EP_STATE_MASK; 574 slot_state = dev->out_ctx->slot.dev_state; 575 slot_state = GET_SLOT_STATE(slot_state); 576 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", 577 slot_state, ep_state); 578 break; 579 case COMP_EBADSLT: 580 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because " 581 "slot %u was not enabled.\n", slot_id); 582 break; 583 default: 584 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown " 585 "completion code of %u.\n", 586 GET_COMP_CODE(event->status)); 587 break; 588 } 589 /* OK what do we do now? The endpoint state is hosed, and we 590 * should never get to this point if the synchronization between 591 * queueing, and endpoint state are correct. This might happen 592 * if the device gets disconnected after we've finished 593 * cancelling URBs, which might not be an error... 594 */ 595 } else { 596 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, " 597 "deq[1] = 0x%x.\n", 598 dev->out_ctx->ep[ep_index].deq[0], 599 dev->out_ctx->ep[ep_index].deq[1]); 600 } 601 602 ep_ring->state &= ~SET_DEQ_PENDING; 603 ring_ep_doorbell(xhci, slot_id, ep_index); 604 } 605 606 607 static void handle_cmd_completion(struct xhci_hcd *xhci, 608 struct xhci_event_cmd *event) 609 { 610 int slot_id = TRB_TO_SLOT_ID(event->flags); 611 u64 cmd_dma; 612 dma_addr_t cmd_dequeue_dma; 613 614 cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0]; 615 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 616 xhci->cmd_ring->dequeue); 617 /* Is the command ring deq ptr out of sync with the deq seg ptr? */ 618 if (cmd_dequeue_dma == 0) { 619 xhci->error_bitmask |= 1 << 4; 620 return; 621 } 622 /* Does the DMA address match our internal dequeue pointer address? */ 623 if (cmd_dma != (u64) cmd_dequeue_dma) { 624 xhci->error_bitmask |= 1 << 5; 625 return; 626 } 627 switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) { 628 case TRB_TYPE(TRB_ENABLE_SLOT): 629 if (GET_COMP_CODE(event->status) == COMP_SUCCESS) 630 xhci->slot_id = slot_id; 631 else 632 xhci->slot_id = 0; 633 complete(&xhci->addr_dev); 634 break; 635 case TRB_TYPE(TRB_DISABLE_SLOT): 636 if (xhci->devs[slot_id]) 637 xhci_free_virt_device(xhci, slot_id); 638 break; 639 case TRB_TYPE(TRB_CONFIG_EP): 640 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); 641 complete(&xhci->devs[slot_id]->cmd_completion); 642 break; 643 case TRB_TYPE(TRB_ADDR_DEV): 644 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); 645 complete(&xhci->addr_dev); 646 break; 647 case TRB_TYPE(TRB_STOP_RING): 648 handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue); 649 break; 650 case TRB_TYPE(TRB_SET_DEQ): 651 handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue); 652 break; 653 case TRB_TYPE(TRB_CMD_NOOP): 654 ++xhci->noops_handled; 655 break; 656 default: 657 /* Skip over unknown commands on the event ring */ 658 xhci->error_bitmask |= 1 << 6; 659 break; 660 } 661 inc_deq(xhci, xhci->cmd_ring, false); 662 } 663 664 static void handle_port_status(struct xhci_hcd *xhci, 665 union xhci_trb *event) 666 { 667 u32 port_id; 668 669 /* Port status change events always have a successful completion code */ 670 if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) { 671 xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); 672 xhci->error_bitmask |= 1 << 8; 673 } 674 /* FIXME: core doesn't care about all port link state changes yet */ 675 port_id = GET_PORT_ID(event->generic.field[0]); 676 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); 677 678 /* Update event ring dequeue pointer before dropping the lock */ 679 inc_deq(xhci, xhci->event_ring, true); 680 xhci_set_hc_event_deq(xhci); 681 682 spin_unlock(&xhci->lock); 683 /* Pass this up to the core */ 684 usb_hcd_poll_rh_status(xhci_to_hcd(xhci)); 685 spin_lock(&xhci->lock); 686 } 687 688 /* 689 * This TD is defined by the TRBs starting at start_trb in start_seg and ending 690 * at end_trb, which may be in another segment. If the suspect DMA address is a 691 * TRB in this TD, this function returns that TRB's segment. Otherwise it 692 * returns 0. 693 */ 694 static struct xhci_segment *trb_in_td( 695 struct xhci_segment *start_seg, 696 union xhci_trb *start_trb, 697 union xhci_trb *end_trb, 698 dma_addr_t suspect_dma) 699 { 700 dma_addr_t start_dma; 701 dma_addr_t end_seg_dma; 702 dma_addr_t end_trb_dma; 703 struct xhci_segment *cur_seg; 704 705 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb); 706 cur_seg = start_seg; 707 708 do { 709 /* We may get an event for a Link TRB in the middle of a TD */ 710 end_seg_dma = xhci_trb_virt_to_dma(cur_seg, 711 &start_seg->trbs[TRBS_PER_SEGMENT - 1]); 712 /* If the end TRB isn't in this segment, this is set to 0 */ 713 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb); 714 715 if (end_trb_dma > 0) { 716 /* The end TRB is in this segment, so suspect should be here */ 717 if (start_dma <= end_trb_dma) { 718 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) 719 return cur_seg; 720 } else { 721 /* Case for one segment with 722 * a TD wrapped around to the top 723 */ 724 if ((suspect_dma >= start_dma && 725 suspect_dma <= end_seg_dma) || 726 (suspect_dma >= cur_seg->dma && 727 suspect_dma <= end_trb_dma)) 728 return cur_seg; 729 } 730 return 0; 731 } else { 732 /* Might still be somewhere in this segment */ 733 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) 734 return cur_seg; 735 } 736 cur_seg = cur_seg->next; 737 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); 738 } while (1); 739 740 } 741 742 /* 743 * If this function returns an error condition, it means it got a Transfer 744 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. 745 * At this point, the host controller is probably hosed and should be reset. 746 */ 747 static int handle_tx_event(struct xhci_hcd *xhci, 748 struct xhci_transfer_event *event) 749 { 750 struct xhci_virt_device *xdev; 751 struct xhci_ring *ep_ring; 752 int ep_index; 753 struct xhci_td *td = 0; 754 dma_addr_t event_dma; 755 struct xhci_segment *event_seg; 756 union xhci_trb *event_trb; 757 struct urb *urb = 0; 758 int status = -EINPROGRESS; 759 760 xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; 761 if (!xdev) { 762 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); 763 return -ENODEV; 764 } 765 766 /* Endpoint ID is 1 based, our index is zero based */ 767 ep_index = TRB_TO_EP_ID(event->flags) - 1; 768 ep_ring = xdev->ep_rings[ep_index]; 769 if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { 770 xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); 771 return -ENODEV; 772 } 773 774 event_dma = event->buffer[0]; 775 if (event->buffer[1] != 0) 776 xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n"); 777 778 /* This TRB should be in the TD at the head of this ring's TD list */ 779 if (list_empty(&ep_ring->td_list)) { 780 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", 781 TRB_TO_SLOT_ID(event->flags), ep_index); 782 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", 783 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); 784 xhci_print_trb_offsets(xhci, (union xhci_trb *) event); 785 urb = NULL; 786 goto cleanup; 787 } 788 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); 789 790 /* Is this a TRB in the currently executing TD? */ 791 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, 792 td->last_trb, event_dma); 793 if (!event_seg) { 794 /* HC is busted, give up! */ 795 xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n"); 796 return -ESHUTDOWN; 797 } 798 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; 799 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", 800 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); 801 xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n", 802 (unsigned int) event->buffer[0]); 803 xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n", 804 (unsigned int) event->buffer[1]); 805 xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n", 806 (unsigned int) event->transfer_len); 807 xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n", 808 (unsigned int) event->flags); 809 810 /* Look for common error cases */ 811 switch (GET_COMP_CODE(event->transfer_len)) { 812 /* Skip codes that require special handling depending on 813 * transfer type 814 */ 815 case COMP_SUCCESS: 816 case COMP_SHORT_TX: 817 break; 818 case COMP_STOP: 819 xhci_dbg(xhci, "Stopped on Transfer TRB\n"); 820 break; 821 case COMP_STOP_INVAL: 822 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n"); 823 break; 824 case COMP_STALL: 825 xhci_warn(xhci, "WARN: Stalled endpoint\n"); 826 status = -EPIPE; 827 break; 828 case COMP_TRB_ERR: 829 xhci_warn(xhci, "WARN: TRB error on endpoint\n"); 830 status = -EILSEQ; 831 break; 832 case COMP_TX_ERR: 833 xhci_warn(xhci, "WARN: transfer error on endpoint\n"); 834 status = -EPROTO; 835 break; 836 case COMP_DB_ERR: 837 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); 838 status = -ENOSR; 839 break; 840 default: 841 xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n"); 842 urb = NULL; 843 goto cleanup; 844 } 845 /* Now update the urb's actual_length and give back to the core */ 846 /* Was this a control transfer? */ 847 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) { 848 xhci_debug_trb(xhci, xhci->event_ring->dequeue); 849 switch (GET_COMP_CODE(event->transfer_len)) { 850 case COMP_SUCCESS: 851 if (event_trb == ep_ring->dequeue) { 852 xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n"); 853 status = -ESHUTDOWN; 854 } else if (event_trb != td->last_trb) { 855 xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n"); 856 status = -ESHUTDOWN; 857 } else { 858 xhci_dbg(xhci, "Successful control transfer!\n"); 859 status = 0; 860 } 861 break; 862 case COMP_SHORT_TX: 863 xhci_warn(xhci, "WARN: short transfer on control ep\n"); 864 status = -EREMOTEIO; 865 break; 866 default: 867 /* Others already handled above */ 868 break; 869 } 870 /* 871 * Did we transfer any data, despite the errors that might have 872 * happened? I.e. did we get past the setup stage? 873 */ 874 if (event_trb != ep_ring->dequeue) { 875 /* The event was for the status stage */ 876 if (event_trb == td->last_trb) { 877 td->urb->actual_length = 878 td->urb->transfer_buffer_length; 879 } else { 880 /* Maybe the event was for the data stage? */ 881 if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) 882 /* We didn't stop on a link TRB in the middle */ 883 td->urb->actual_length = 884 td->urb->transfer_buffer_length - 885 TRB_LEN(event->transfer_len); 886 } 887 } 888 } else { 889 switch (GET_COMP_CODE(event->transfer_len)) { 890 case COMP_SUCCESS: 891 /* Double check that the HW transferred everything. */ 892 if (event_trb != td->last_trb) { 893 xhci_warn(xhci, "WARN Successful completion " 894 "on short TX\n"); 895 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 896 status = -EREMOTEIO; 897 else 898 status = 0; 899 } else { 900 xhci_dbg(xhci, "Successful bulk transfer!\n"); 901 status = 0; 902 } 903 break; 904 case COMP_SHORT_TX: 905 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 906 status = -EREMOTEIO; 907 else 908 status = 0; 909 break; 910 default: 911 /* Others already handled above */ 912 break; 913 } 914 dev_dbg(&td->urb->dev->dev, 915 "ep %#x - asked for %d bytes, " 916 "%d bytes untransferred\n", 917 td->urb->ep->desc.bEndpointAddress, 918 td->urb->transfer_buffer_length, 919 TRB_LEN(event->transfer_len)); 920 /* Fast path - was this the last TRB in the TD for this URB? */ 921 if (event_trb == td->last_trb) { 922 if (TRB_LEN(event->transfer_len) != 0) { 923 td->urb->actual_length = 924 td->urb->transfer_buffer_length - 925 TRB_LEN(event->transfer_len); 926 if (td->urb->actual_length < 0) { 927 xhci_warn(xhci, "HC gave bad length " 928 "of %d bytes left\n", 929 TRB_LEN(event->transfer_len)); 930 td->urb->actual_length = 0; 931 } 932 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 933 status = -EREMOTEIO; 934 else 935 status = 0; 936 } else { 937 td->urb->actual_length = td->urb->transfer_buffer_length; 938 /* Ignore a short packet completion if the 939 * untransferred length was zero. 940 */ 941 status = 0; 942 } 943 } else { 944 /* Slow path - walk the list, starting from the dequeue 945 * pointer, to get the actual length transferred. 946 */ 947 union xhci_trb *cur_trb; 948 struct xhci_segment *cur_seg; 949 950 td->urb->actual_length = 0; 951 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; 952 cur_trb != event_trb; 953 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 954 if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP && 955 TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK) 956 td->urb->actual_length += 957 TRB_LEN(cur_trb->generic.field[2]); 958 } 959 /* If the ring didn't stop on a Link or No-op TRB, add 960 * in the actual bytes transferred from the Normal TRB 961 */ 962 if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) 963 td->urb->actual_length += 964 TRB_LEN(cur_trb->generic.field[2]) - 965 TRB_LEN(event->transfer_len); 966 } 967 } 968 /* The Endpoint Stop Command completion will take care of 969 * any stopped TDs. A stopped TD may be restarted, so don't update the 970 * ring dequeue pointer or take this TD off any lists yet. 971 */ 972 if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL || 973 GET_COMP_CODE(event->transfer_len) == COMP_STOP) { 974 ep_ring->stopped_td = td; 975 ep_ring->stopped_trb = event_trb; 976 } else { 977 /* Update ring dequeue pointer */ 978 while (ep_ring->dequeue != td->last_trb) 979 inc_deq(xhci, ep_ring, false); 980 inc_deq(xhci, ep_ring, false); 981 982 /* Clean up the endpoint's TD list */ 983 urb = td->urb; 984 list_del(&td->td_list); 985 /* Was this TD slated to be cancelled but completed anyway? */ 986 if (!list_empty(&td->cancelled_td_list)) { 987 list_del(&td->cancelled_td_list); 988 ep_ring->cancels_pending--; 989 } 990 kfree(td); 991 urb->hcpriv = NULL; 992 } 993 cleanup: 994 inc_deq(xhci, xhci->event_ring, true); 995 xhci_set_hc_event_deq(xhci); 996 997 /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */ 998 if (urb) { 999 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); 1000 spin_unlock(&xhci->lock); 1001 usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); 1002 spin_lock(&xhci->lock); 1003 } 1004 return 0; 1005 } 1006 1007 /* 1008 * This function handles all OS-owned events on the event ring. It may drop 1009 * xhci->lock between event processing (e.g. to pass up port status changes). 1010 */ 1011 void xhci_handle_event(struct xhci_hcd *xhci) 1012 { 1013 union xhci_trb *event; 1014 int update_ptrs = 1; 1015 int ret; 1016 1017 if (!xhci->event_ring || !xhci->event_ring->dequeue) { 1018 xhci->error_bitmask |= 1 << 1; 1019 return; 1020 } 1021 1022 event = xhci->event_ring->dequeue; 1023 /* Does the HC or OS own the TRB? */ 1024 if ((event->event_cmd.flags & TRB_CYCLE) != 1025 xhci->event_ring->cycle_state) { 1026 xhci->error_bitmask |= 1 << 2; 1027 return; 1028 } 1029 1030 /* FIXME: Handle more event types. */ 1031 switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { 1032 case TRB_TYPE(TRB_COMPLETION): 1033 handle_cmd_completion(xhci, &event->event_cmd); 1034 break; 1035 case TRB_TYPE(TRB_PORT_STATUS): 1036 handle_port_status(xhci, event); 1037 update_ptrs = 0; 1038 break; 1039 case TRB_TYPE(TRB_TRANSFER): 1040 ret = handle_tx_event(xhci, &event->trans_event); 1041 if (ret < 0) 1042 xhci->error_bitmask |= 1 << 9; 1043 else 1044 update_ptrs = 0; 1045 break; 1046 default: 1047 xhci->error_bitmask |= 1 << 3; 1048 } 1049 1050 if (update_ptrs) { 1051 /* Update SW and HC event ring dequeue pointer */ 1052 inc_deq(xhci, xhci->event_ring, true); 1053 xhci_set_hc_event_deq(xhci); 1054 } 1055 /* Are there more items on the event ring? */ 1056 xhci_handle_event(xhci); 1057 } 1058 1059 /**** Endpoint Ring Operations ****/ 1060 1061 /* 1062 * Generic function for queueing a TRB on a ring. 1063 * The caller must have checked to make sure there's room on the ring. 1064 */ 1065 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 1066 bool consumer, 1067 u32 field1, u32 field2, u32 field3, u32 field4) 1068 { 1069 struct xhci_generic_trb *trb; 1070 1071 trb = &ring->enqueue->generic; 1072 trb->field[0] = field1; 1073 trb->field[1] = field2; 1074 trb->field[2] = field3; 1075 trb->field[3] = field4; 1076 inc_enq(xhci, ring, consumer); 1077 } 1078 1079 /* 1080 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs. 1081 * FIXME allocate segments if the ring is full. 1082 */ 1083 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 1084 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) 1085 { 1086 /* Make sure the endpoint has been added to xHC schedule */ 1087 xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state); 1088 switch (ep_state) { 1089 case EP_STATE_DISABLED: 1090 /* 1091 * USB core changed config/interfaces without notifying us, 1092 * or hardware is reporting the wrong state. 1093 */ 1094 xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); 1095 return -ENOENT; 1096 case EP_STATE_HALTED: 1097 case EP_STATE_ERROR: 1098 xhci_warn(xhci, "WARN waiting for halt or error on ep " 1099 "to be cleared\n"); 1100 /* FIXME event handling code for error needs to clear it */ 1101 /* XXX not sure if this should be -ENOENT or not */ 1102 return -EINVAL; 1103 case EP_STATE_STOPPED: 1104 case EP_STATE_RUNNING: 1105 break; 1106 default: 1107 xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); 1108 /* 1109 * FIXME issue Configure Endpoint command to try to get the HC 1110 * back into a known state. 1111 */ 1112 return -EINVAL; 1113 } 1114 if (!room_on_ring(xhci, ep_ring, num_trbs)) { 1115 /* FIXME allocate more room */ 1116 xhci_err(xhci, "ERROR no room on ep ring\n"); 1117 return -ENOMEM; 1118 } 1119 return 0; 1120 } 1121 1122 static int prepare_transfer(struct xhci_hcd *xhci, 1123 struct xhci_virt_device *xdev, 1124 unsigned int ep_index, 1125 unsigned int num_trbs, 1126 struct urb *urb, 1127 struct xhci_td **td, 1128 gfp_t mem_flags) 1129 { 1130 int ret; 1131 1132 ret = prepare_ring(xhci, xdev->ep_rings[ep_index], 1133 xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK, 1134 num_trbs, mem_flags); 1135 if (ret) 1136 return ret; 1137 *td = kzalloc(sizeof(struct xhci_td), mem_flags); 1138 if (!*td) 1139 return -ENOMEM; 1140 INIT_LIST_HEAD(&(*td)->td_list); 1141 INIT_LIST_HEAD(&(*td)->cancelled_td_list); 1142 1143 ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb); 1144 if (unlikely(ret)) { 1145 kfree(*td); 1146 return ret; 1147 } 1148 1149 (*td)->urb = urb; 1150 urb->hcpriv = (void *) (*td); 1151 /* Add this TD to the tail of the endpoint ring's TD list */ 1152 list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list); 1153 (*td)->start_seg = xdev->ep_rings[ep_index]->enq_seg; 1154 (*td)->first_trb = xdev->ep_rings[ep_index]->enqueue; 1155 1156 return 0; 1157 } 1158 1159 static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) 1160 { 1161 int num_sgs, num_trbs, running_total, temp, i; 1162 struct scatterlist *sg; 1163 1164 sg = NULL; 1165 num_sgs = urb->num_sgs; 1166 temp = urb->transfer_buffer_length; 1167 1168 xhci_dbg(xhci, "count sg list trbs: \n"); 1169 num_trbs = 0; 1170 for_each_sg(urb->sg->sg, sg, num_sgs, i) { 1171 unsigned int previous_total_trbs = num_trbs; 1172 unsigned int len = sg_dma_len(sg); 1173 1174 /* Scatter gather list entries may cross 64KB boundaries */ 1175 running_total = TRB_MAX_BUFF_SIZE - 1176 (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 1177 if (running_total != 0) 1178 num_trbs++; 1179 1180 /* How many more 64KB chunks to transfer, how many more TRBs? */ 1181 while (running_total < sg_dma_len(sg)) { 1182 num_trbs++; 1183 running_total += TRB_MAX_BUFF_SIZE; 1184 } 1185 xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n", 1186 i, (unsigned long long)sg_dma_address(sg), 1187 len, len, num_trbs - previous_total_trbs); 1188 1189 len = min_t(int, len, temp); 1190 temp -= len; 1191 if (temp == 0) 1192 break; 1193 } 1194 xhci_dbg(xhci, "\n"); 1195 if (!in_interrupt()) 1196 dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n", 1197 urb->ep->desc.bEndpointAddress, 1198 urb->transfer_buffer_length, 1199 num_trbs); 1200 return num_trbs; 1201 } 1202 1203 static void check_trb_math(struct urb *urb, int num_trbs, int running_total) 1204 { 1205 if (num_trbs != 0) 1206 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " 1207 "TRBs, %d left\n", __func__, 1208 urb->ep->desc.bEndpointAddress, num_trbs); 1209 if (running_total != urb->transfer_buffer_length) 1210 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " 1211 "queued %#x (%d), asked for %#x (%d)\n", 1212 __func__, 1213 urb->ep->desc.bEndpointAddress, 1214 running_total, running_total, 1215 urb->transfer_buffer_length, 1216 urb->transfer_buffer_length); 1217 } 1218 1219 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, 1220 unsigned int ep_index, int start_cycle, 1221 struct xhci_generic_trb *start_trb, struct xhci_td *td) 1222 { 1223 /* 1224 * Pass all the TRBs to the hardware at once and make sure this write 1225 * isn't reordered. 1226 */ 1227 wmb(); 1228 start_trb->field[3] |= start_cycle; 1229 ring_ep_doorbell(xhci, slot_id, ep_index); 1230 } 1231 1232 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 1233 struct urb *urb, int slot_id, unsigned int ep_index) 1234 { 1235 struct xhci_ring *ep_ring; 1236 unsigned int num_trbs; 1237 struct xhci_td *td; 1238 struct scatterlist *sg; 1239 int num_sgs; 1240 int trb_buff_len, this_sg_len, running_total; 1241 bool first_trb; 1242 u64 addr; 1243 1244 struct xhci_generic_trb *start_trb; 1245 int start_cycle; 1246 1247 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; 1248 num_trbs = count_sg_trbs_needed(xhci, urb); 1249 num_sgs = urb->num_sgs; 1250 1251 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], 1252 ep_index, num_trbs, urb, &td, mem_flags); 1253 if (trb_buff_len < 0) 1254 return trb_buff_len; 1255 /* 1256 * Don't give the first TRB to the hardware (by toggling the cycle bit) 1257 * until we've finished creating all the other TRBs. The ring's cycle 1258 * state may change as we enqueue the other TRBs, so save it too. 1259 */ 1260 start_trb = &ep_ring->enqueue->generic; 1261 start_cycle = ep_ring->cycle_state; 1262 1263 running_total = 0; 1264 /* 1265 * How much data is in the first TRB? 1266 * 1267 * There are three forces at work for TRB buffer pointers and lengths: 1268 * 1. We don't want to walk off the end of this sg-list entry buffer. 1269 * 2. The transfer length that the driver requested may be smaller than 1270 * the amount of memory allocated for this scatter-gather list. 1271 * 3. TRBs buffers can't cross 64KB boundaries. 1272 */ 1273 sg = urb->sg->sg; 1274 addr = (u64) sg_dma_address(sg); 1275 this_sg_len = sg_dma_len(sg); 1276 trb_buff_len = TRB_MAX_BUFF_SIZE - 1277 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 1278 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 1279 if (trb_buff_len > urb->transfer_buffer_length) 1280 trb_buff_len = urb->transfer_buffer_length; 1281 xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n", 1282 trb_buff_len); 1283 1284 first_trb = true; 1285 /* Queue the first TRB, even if it's zero-length */ 1286 do { 1287 u32 field = 0; 1288 1289 /* Don't change the cycle bit of the first TRB until later */ 1290 if (first_trb) 1291 first_trb = false; 1292 else 1293 field |= ep_ring->cycle_state; 1294 1295 /* Chain all the TRBs together; clear the chain bit in the last 1296 * TRB to indicate it's the last TRB in the chain. 1297 */ 1298 if (num_trbs > 1) { 1299 field |= TRB_CHAIN; 1300 } else { 1301 /* FIXME - add check for ZERO_PACKET flag before this */ 1302 td->last_trb = ep_ring->enqueue; 1303 field |= TRB_IOC; 1304 } 1305 xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), " 1306 "64KB boundary at %#x, end dma = %#x\n", 1307 (unsigned int) addr, trb_buff_len, trb_buff_len, 1308 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 1309 (unsigned int) addr + trb_buff_len); 1310 if (TRB_MAX_BUFF_SIZE - 1311 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) { 1312 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); 1313 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", 1314 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 1315 (unsigned int) addr + trb_buff_len); 1316 } 1317 queue_trb(xhci, ep_ring, false, 1318 (u32) addr, 1319 (u32) ((u64) addr >> 32), 1320 TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), 1321 /* We always want to know if the TRB was short, 1322 * or we won't get an event when it completes. 1323 * (Unless we use event data TRBs, which are a 1324 * waste of space and HC resources.) 1325 */ 1326 field | TRB_ISP | TRB_TYPE(TRB_NORMAL)); 1327 --num_trbs; 1328 running_total += trb_buff_len; 1329 1330 /* Calculate length for next transfer -- 1331 * Are we done queueing all the TRBs for this sg entry? 1332 */ 1333 this_sg_len -= trb_buff_len; 1334 if (this_sg_len == 0) { 1335 --num_sgs; 1336 if (num_sgs == 0) 1337 break; 1338 sg = sg_next(sg); 1339 addr = (u64) sg_dma_address(sg); 1340 this_sg_len = sg_dma_len(sg); 1341 } else { 1342 addr += trb_buff_len; 1343 } 1344 1345 trb_buff_len = TRB_MAX_BUFF_SIZE - 1346 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 1347 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 1348 if (running_total + trb_buff_len > urb->transfer_buffer_length) 1349 trb_buff_len = 1350 urb->transfer_buffer_length - running_total; 1351 } while (running_total < urb->transfer_buffer_length); 1352 1353 check_trb_math(urb, num_trbs, running_total); 1354 giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); 1355 return 0; 1356 } 1357 1358 /* This is very similar to what ehci-q.c qtd_fill() does */ 1359 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 1360 struct urb *urb, int slot_id, unsigned int ep_index) 1361 { 1362 struct xhci_ring *ep_ring; 1363 struct xhci_td *td; 1364 int num_trbs; 1365 struct xhci_generic_trb *start_trb; 1366 bool first_trb; 1367 int start_cycle; 1368 u32 field; 1369 1370 int running_total, trb_buff_len, ret; 1371 u64 addr; 1372 1373 if (urb->sg) 1374 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); 1375 1376 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; 1377 1378 num_trbs = 0; 1379 /* How much data is (potentially) left before the 64KB boundary? */ 1380 running_total = TRB_MAX_BUFF_SIZE - 1381 (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 1382 1383 /* If there's some data on this 64KB chunk, or we have to send a 1384 * zero-length transfer, we need at least one TRB 1385 */ 1386 if (running_total != 0 || urb->transfer_buffer_length == 0) 1387 num_trbs++; 1388 /* How many more 64KB chunks to transfer, how many more TRBs? */ 1389 while (running_total < urb->transfer_buffer_length) { 1390 num_trbs++; 1391 running_total += TRB_MAX_BUFF_SIZE; 1392 } 1393 /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */ 1394 1395 if (!in_interrupt()) 1396 dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n", 1397 urb->ep->desc.bEndpointAddress, 1398 urb->transfer_buffer_length, 1399 urb->transfer_buffer_length, 1400 (unsigned long long)urb->transfer_dma, 1401 num_trbs); 1402 1403 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 1404 num_trbs, urb, &td, mem_flags); 1405 if (ret < 0) 1406 return ret; 1407 1408 /* 1409 * Don't give the first TRB to the hardware (by toggling the cycle bit) 1410 * until we've finished creating all the other TRBs. The ring's cycle 1411 * state may change as we enqueue the other TRBs, so save it too. 1412 */ 1413 start_trb = &ep_ring->enqueue->generic; 1414 start_cycle = ep_ring->cycle_state; 1415 1416 running_total = 0; 1417 /* How much data is in the first TRB? */ 1418 addr = (u64) urb->transfer_dma; 1419 trb_buff_len = TRB_MAX_BUFF_SIZE - 1420 (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 1421 if (urb->transfer_buffer_length < trb_buff_len) 1422 trb_buff_len = urb->transfer_buffer_length; 1423 1424 first_trb = true; 1425 1426 /* Queue the first TRB, even if it's zero-length */ 1427 do { 1428 field = 0; 1429 1430 /* Don't change the cycle bit of the first TRB until later */ 1431 if (first_trb) 1432 first_trb = false; 1433 else 1434 field |= ep_ring->cycle_state; 1435 1436 /* Chain all the TRBs together; clear the chain bit in the last 1437 * TRB to indicate it's the last TRB in the chain. 1438 */ 1439 if (num_trbs > 1) { 1440 field |= TRB_CHAIN; 1441 } else { 1442 /* FIXME - add check for ZERO_PACKET flag before this */ 1443 td->last_trb = ep_ring->enqueue; 1444 field |= TRB_IOC; 1445 } 1446 queue_trb(xhci, ep_ring, false, 1447 (u32) addr, 1448 (u32) ((u64) addr >> 32), 1449 TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), 1450 /* We always want to know if the TRB was short, 1451 * or we won't get an event when it completes. 1452 * (Unless we use event data TRBs, which are a 1453 * waste of space and HC resources.) 1454 */ 1455 field | TRB_ISP | TRB_TYPE(TRB_NORMAL)); 1456 --num_trbs; 1457 running_total += trb_buff_len; 1458 1459 /* Calculate length for next transfer */ 1460 addr += trb_buff_len; 1461 trb_buff_len = urb->transfer_buffer_length - running_total; 1462 if (trb_buff_len > TRB_MAX_BUFF_SIZE) 1463 trb_buff_len = TRB_MAX_BUFF_SIZE; 1464 } while (running_total < urb->transfer_buffer_length); 1465 1466 check_trb_math(urb, num_trbs, running_total); 1467 giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); 1468 return 0; 1469 } 1470 1471 /* Caller must have locked xhci->lock */ 1472 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 1473 struct urb *urb, int slot_id, unsigned int ep_index) 1474 { 1475 struct xhci_ring *ep_ring; 1476 int num_trbs; 1477 int ret; 1478 struct usb_ctrlrequest *setup; 1479 struct xhci_generic_trb *start_trb; 1480 int start_cycle; 1481 u32 field; 1482 struct xhci_td *td; 1483 1484 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; 1485 1486 /* 1487 * Need to copy setup packet into setup TRB, so we can't use the setup 1488 * DMA address. 1489 */ 1490 if (!urb->setup_packet) 1491 return -EINVAL; 1492 1493 if (!in_interrupt()) 1494 xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n", 1495 slot_id, ep_index); 1496 /* 1 TRB for setup, 1 for status */ 1497 num_trbs = 2; 1498 /* 1499 * Don't need to check if we need additional event data and normal TRBs, 1500 * since data in control transfers will never get bigger than 16MB 1501 * XXX: can we get a buffer that crosses 64KB boundaries? 1502 */ 1503 if (urb->transfer_buffer_length > 0) 1504 num_trbs++; 1505 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs, 1506 urb, &td, mem_flags); 1507 if (ret < 0) 1508 return ret; 1509 1510 /* 1511 * Don't give the first TRB to the hardware (by toggling the cycle bit) 1512 * until we've finished creating all the other TRBs. The ring's cycle 1513 * state may change as we enqueue the other TRBs, so save it too. 1514 */ 1515 start_trb = &ep_ring->enqueue->generic; 1516 start_cycle = ep_ring->cycle_state; 1517 1518 /* Queue setup TRB - see section 6.4.1.2.1 */ 1519 /* FIXME better way to translate setup_packet into two u32 fields? */ 1520 setup = (struct usb_ctrlrequest *) urb->setup_packet; 1521 queue_trb(xhci, ep_ring, false, 1522 /* FIXME endianness is probably going to bite my ass here. */ 1523 setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, 1524 setup->wIndex | setup->wLength << 16, 1525 TRB_LEN(8) | TRB_INTR_TARGET(0), 1526 /* Immediate data in pointer */ 1527 TRB_IDT | TRB_TYPE(TRB_SETUP)); 1528 1529 /* If there's data, queue data TRBs */ 1530 field = 0; 1531 if (urb->transfer_buffer_length > 0) { 1532 if (setup->bRequestType & USB_DIR_IN) 1533 field |= TRB_DIR_IN; 1534 queue_trb(xhci, ep_ring, false, 1535 lower_32_bits(urb->transfer_dma), 1536 upper_32_bits(urb->transfer_dma), 1537 TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0), 1538 /* Event on short tx */ 1539 field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state); 1540 } 1541 1542 /* Save the DMA address of the last TRB in the TD */ 1543 td->last_trb = ep_ring->enqueue; 1544 1545 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ 1546 /* If the device sent data, the status stage is an OUT transfer */ 1547 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) 1548 field = 0; 1549 else 1550 field = TRB_DIR_IN; 1551 queue_trb(xhci, ep_ring, false, 1552 0, 1553 0, 1554 TRB_INTR_TARGET(0), 1555 /* Event on completion */ 1556 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); 1557 1558 giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); 1559 return 0; 1560 } 1561 1562 /**** Command Ring Operations ****/ 1563 1564 /* Generic function for queueing a command TRB on the command ring */ 1565 static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4) 1566 { 1567 if (!room_on_ring(xhci, xhci->cmd_ring, 1)) { 1568 if (!in_interrupt()) 1569 xhci_err(xhci, "ERR: No room for command on command ring\n"); 1570 return -ENOMEM; 1571 } 1572 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, 1573 field4 | xhci->cmd_ring->cycle_state); 1574 return 0; 1575 } 1576 1577 /* Queue a no-op command on the command ring */ 1578 static int queue_cmd_noop(struct xhci_hcd *xhci) 1579 { 1580 return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP)); 1581 } 1582 1583 /* 1584 * Place a no-op command on the command ring to test the command and 1585 * event ring. 1586 */ 1587 void *xhci_setup_one_noop(struct xhci_hcd *xhci) 1588 { 1589 if (queue_cmd_noop(xhci) < 0) 1590 return NULL; 1591 xhci->noops_submitted++; 1592 return xhci_ring_cmd_db; 1593 } 1594 1595 /* Queue a slot enable or disable request on the command ring */ 1596 int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) 1597 { 1598 return queue_command(xhci, 0, 0, 0, 1599 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id)); 1600 } 1601 1602 /* Queue an address device command TRB */ 1603 int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 1604 u32 slot_id) 1605 { 1606 return queue_command(xhci, in_ctx_ptr, 0, 0, 1607 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); 1608 } 1609 1610 /* Queue a configure endpoint command TRB */ 1611 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 1612 u32 slot_id) 1613 { 1614 return queue_command(xhci, in_ctx_ptr, 0, 0, 1615 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); 1616 } 1617 1618 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, 1619 unsigned int ep_index) 1620 { 1621 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 1622 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 1623 u32 type = TRB_TYPE(TRB_STOP_RING); 1624 1625 return queue_command(xhci, 0, 0, 0, 1626 trb_slot_id | trb_ep_index | type); 1627 } 1628 1629 /* Set Transfer Ring Dequeue Pointer command. 1630 * This should not be used for endpoints that have streams enabled. 1631 */ 1632 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 1633 unsigned int ep_index, struct xhci_segment *deq_seg, 1634 union xhci_trb *deq_ptr, u32 cycle_state) 1635 { 1636 dma_addr_t addr; 1637 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 1638 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 1639 u32 type = TRB_TYPE(TRB_SET_DEQ); 1640 1641 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); 1642 if (addr == 0) 1643 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 1644 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", 1645 deq_seg, deq_ptr); 1646 return queue_command(xhci, (u32) addr | cycle_state, 0, 0, 1647 trb_slot_id | trb_ep_index | type); 1648 } 1649