1 /* 2 * xHCI host controller driver 3 * 4 * Copyright (C) 2008 Intel Corp. 5 * 6 * Author: Sarah Sharp 7 * Some code borrowed from the Linux EHCI driver. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 * for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software Foundation, 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #include <linux/usb.h> 24 #include <linux/pci.h> 25 #include <linux/slab.h> 26 #include <linux/dmapool.h> 27 28 #include "xhci.h" 29 30 /* 31 * Allocates a generic ring segment from the ring pool, sets the dma address, 32 * initializes the segment to zero, and sets the private next pointer to NULL. 33 * 34 * Section 4.11.1.1: 35 * "All components of all Command and Transfer TRBs shall be initialized to '0'" 36 */ 37 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags) 38 { 39 struct xhci_segment *seg; 40 dma_addr_t dma; 41 42 seg = kzalloc(sizeof *seg, flags); 43 if (!seg) 44 return NULL; 45 xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg); 46 47 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma); 48 if (!seg->trbs) { 49 kfree(seg); 50 return NULL; 51 } 52 xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n", 53 seg->trbs, (unsigned long long)dma); 54 55 memset(seg->trbs, 0, SEGMENT_SIZE); 56 seg->dma = dma; 57 seg->next = NULL; 58 59 return seg; 60 } 61 62 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) 63 { 64 if (seg->trbs) { 65 xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n", 66 seg->trbs, (unsigned long long)seg->dma); 67 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); 68 seg->trbs = NULL; 69 } 70 xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg); 71 kfree(seg); 72 } 73 74 /* 75 * Make the prev segment point to the next segment. 76 * 77 * Change the last TRB in the prev segment to be a Link TRB which points to the 78 * DMA address of the next segment. The caller needs to set any Link TRB 79 * related flags, such as End TRB, Toggle Cycle, and no snoop. 80 */ 81 static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, 82 struct xhci_segment *next, bool link_trbs, bool isoc) 83 { 84 u32 val; 85 86 if (!prev || !next) 87 return; 88 prev->next = next; 89 if (link_trbs) { 90 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = 91 cpu_to_le64(next->dma); 92 93 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ 94 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); 95 val &= ~TRB_TYPE_BITMASK; 96 val |= TRB_TYPE(TRB_LINK); 97 /* Always set the chain bit with 0.95 hardware */ 98 /* Set chain bit for isoc rings on AMD 0.96 host */ 99 if (xhci_link_trb_quirk(xhci) || 100 (isoc && (xhci->quirks & XHCI_AMD_0x96_HOST))) 101 val |= TRB_CHAIN; 102 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); 103 } 104 xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n", 105 (unsigned long long)prev->dma, 106 (unsigned long long)next->dma); 107 } 108 109 /* XXX: Do we need the hcd structure in all these functions? */ 110 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) 111 { 112 struct xhci_segment *seg; 113 struct xhci_segment *first_seg; 114 115 if (!ring) 116 return; 117 if (ring->first_seg) { 118 first_seg = ring->first_seg; 119 seg = first_seg->next; 120 xhci_dbg(xhci, "Freeing ring at %p\n", ring); 121 while (seg != first_seg) { 122 struct xhci_segment *next = seg->next; 123 xhci_segment_free(xhci, seg); 124 seg = next; 125 } 126 xhci_segment_free(xhci, first_seg); 127 ring->first_seg = NULL; 128 } 129 kfree(ring); 130 } 131 132 static void xhci_initialize_ring_info(struct xhci_ring *ring) 133 { 134 /* The ring is empty, so the enqueue pointer == dequeue pointer */ 135 ring->enqueue = ring->first_seg->trbs; 136 ring->enq_seg = ring->first_seg; 137 ring->dequeue = ring->enqueue; 138 ring->deq_seg = ring->first_seg; 139 /* The ring is initialized to 0. The producer must write 1 to the cycle 140 * bit to handover ownership of the TRB, so PCS = 1. The consumer must 141 * compare CCS to the cycle bit to check ownership, so CCS = 1. 142 */ 143 ring->cycle_state = 1; 144 /* Not necessary for new rings, but needed for re-initialized rings */ 145 ring->enq_updates = 0; 146 ring->deq_updates = 0; 147 } 148 149 /** 150 * Create a new ring with zero or more segments. 151 * 152 * Link each segment together into a ring. 153 * Set the end flag and the cycle toggle bit on the last segment. 154 * See section 4.9.1 and figures 15 and 16. 155 */ 156 static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, 157 unsigned int num_segs, bool link_trbs, bool isoc, gfp_t flags) 158 { 159 struct xhci_ring *ring; 160 struct xhci_segment *prev; 161 162 ring = kzalloc(sizeof *(ring), flags); 163 xhci_dbg(xhci, "Allocating ring at %p\n", ring); 164 if (!ring) 165 return NULL; 166 167 INIT_LIST_HEAD(&ring->td_list); 168 if (num_segs == 0) 169 return ring; 170 171 ring->first_seg = xhci_segment_alloc(xhci, flags); 172 if (!ring->first_seg) 173 goto fail; 174 num_segs--; 175 176 prev = ring->first_seg; 177 while (num_segs > 0) { 178 struct xhci_segment *next; 179 180 next = xhci_segment_alloc(xhci, flags); 181 if (!next) 182 goto fail; 183 xhci_link_segments(xhci, prev, next, link_trbs, isoc); 184 185 prev = next; 186 num_segs--; 187 } 188 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs, isoc); 189 190 if (link_trbs) { 191 /* See section 4.9.2.1 and 6.4.4.1 */ 192 prev->trbs[TRBS_PER_SEGMENT-1].link.control |= 193 cpu_to_le32(LINK_TOGGLE); 194 xhci_dbg(xhci, "Wrote link toggle flag to" 195 " segment %p (virtual), 0x%llx (DMA)\n", 196 prev, (unsigned long long)prev->dma); 197 } 198 xhci_initialize_ring_info(ring); 199 return ring; 200 201 fail: 202 xhci_ring_free(xhci, ring); 203 return NULL; 204 } 205 206 void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, 207 struct xhci_virt_device *virt_dev, 208 unsigned int ep_index) 209 { 210 int rings_cached; 211 212 rings_cached = virt_dev->num_rings_cached; 213 if (rings_cached < XHCI_MAX_RINGS_CACHED) { 214 virt_dev->ring_cache[rings_cached] = 215 virt_dev->eps[ep_index].ring; 216 virt_dev->num_rings_cached++; 217 xhci_dbg(xhci, "Cached old ring, " 218 "%d ring%s cached\n", 219 virt_dev->num_rings_cached, 220 (virt_dev->num_rings_cached > 1) ? "s" : ""); 221 } else { 222 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring); 223 xhci_dbg(xhci, "Ring cache full (%d rings), " 224 "freeing ring\n", 225 virt_dev->num_rings_cached); 226 } 227 virt_dev->eps[ep_index].ring = NULL; 228 } 229 230 /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue 231 * pointers to the beginning of the ring. 232 */ 233 static void xhci_reinit_cached_ring(struct xhci_hcd *xhci, 234 struct xhci_ring *ring, bool isoc) 235 { 236 struct xhci_segment *seg = ring->first_seg; 237 do { 238 memset(seg->trbs, 0, 239 sizeof(union xhci_trb)*TRBS_PER_SEGMENT); 240 /* All endpoint rings have link TRBs */ 241 xhci_link_segments(xhci, seg, seg->next, 1, isoc); 242 seg = seg->next; 243 } while (seg != ring->first_seg); 244 xhci_initialize_ring_info(ring); 245 /* td list should be empty since all URBs have been cancelled, 246 * but just in case... 247 */ 248 INIT_LIST_HEAD(&ring->td_list); 249 } 250 251 #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) 252 253 static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, 254 int type, gfp_t flags) 255 { 256 struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags); 257 if (!ctx) 258 return NULL; 259 260 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); 261 ctx->type = type; 262 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024; 263 if (type == XHCI_CTX_TYPE_INPUT) 264 ctx->size += CTX_SIZE(xhci->hcc_params); 265 266 ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma); 267 memset(ctx->bytes, 0, ctx->size); 268 return ctx; 269 } 270 271 static void xhci_free_container_ctx(struct xhci_hcd *xhci, 272 struct xhci_container_ctx *ctx) 273 { 274 if (!ctx) 275 return; 276 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); 277 kfree(ctx); 278 } 279 280 struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, 281 struct xhci_container_ctx *ctx) 282 { 283 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT); 284 return (struct xhci_input_control_ctx *)ctx->bytes; 285 } 286 287 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, 288 struct xhci_container_ctx *ctx) 289 { 290 if (ctx->type == XHCI_CTX_TYPE_DEVICE) 291 return (struct xhci_slot_ctx *)ctx->bytes; 292 293 return (struct xhci_slot_ctx *) 294 (ctx->bytes + CTX_SIZE(xhci->hcc_params)); 295 } 296 297 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, 298 struct xhci_container_ctx *ctx, 299 unsigned int ep_index) 300 { 301 /* increment ep index by offset of start of ep ctx array */ 302 ep_index++; 303 if (ctx->type == XHCI_CTX_TYPE_INPUT) 304 ep_index++; 305 306 return (struct xhci_ep_ctx *) 307 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); 308 } 309 310 311 /***************** Streams structures manipulation *************************/ 312 313 static void xhci_free_stream_ctx(struct xhci_hcd *xhci, 314 unsigned int num_stream_ctxs, 315 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) 316 { 317 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 318 319 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE) 320 dma_free_coherent(&pdev->dev, 321 sizeof(struct xhci_stream_ctx)*num_stream_ctxs, 322 stream_ctx, dma); 323 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE) 324 return dma_pool_free(xhci->small_streams_pool, 325 stream_ctx, dma); 326 else 327 return dma_pool_free(xhci->medium_streams_pool, 328 stream_ctx, dma); 329 } 330 331 /* 332 * The stream context array for each endpoint with bulk streams enabled can 333 * vary in size, based on: 334 * - how many streams the endpoint supports, 335 * - the maximum primary stream array size the host controller supports, 336 * - and how many streams the device driver asks for. 337 * 338 * The stream context array must be a power of 2, and can be as small as 339 * 64 bytes or as large as 1MB. 340 */ 341 static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, 342 unsigned int num_stream_ctxs, dma_addr_t *dma, 343 gfp_t mem_flags) 344 { 345 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 346 347 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE) 348 return dma_alloc_coherent(&pdev->dev, 349 sizeof(struct xhci_stream_ctx)*num_stream_ctxs, 350 dma, mem_flags); 351 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE) 352 return dma_pool_alloc(xhci->small_streams_pool, 353 mem_flags, dma); 354 else 355 return dma_pool_alloc(xhci->medium_streams_pool, 356 mem_flags, dma); 357 } 358 359 struct xhci_ring *xhci_dma_to_transfer_ring( 360 struct xhci_virt_ep *ep, 361 u64 address) 362 { 363 if (ep->ep_state & EP_HAS_STREAMS) 364 return radix_tree_lookup(&ep->stream_info->trb_address_map, 365 address >> SEGMENT_SHIFT); 366 return ep->ring; 367 } 368 369 /* Only use this when you know stream_info is valid */ 370 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 371 static struct xhci_ring *dma_to_stream_ring( 372 struct xhci_stream_info *stream_info, 373 u64 address) 374 { 375 return radix_tree_lookup(&stream_info->trb_address_map, 376 address >> SEGMENT_SHIFT); 377 } 378 #endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */ 379 380 struct xhci_ring *xhci_stream_id_to_ring( 381 struct xhci_virt_device *dev, 382 unsigned int ep_index, 383 unsigned int stream_id) 384 { 385 struct xhci_virt_ep *ep = &dev->eps[ep_index]; 386 387 if (stream_id == 0) 388 return ep->ring; 389 if (!ep->stream_info) 390 return NULL; 391 392 if (stream_id > ep->stream_info->num_streams) 393 return NULL; 394 return ep->stream_info->stream_rings[stream_id]; 395 } 396 397 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 398 static int xhci_test_radix_tree(struct xhci_hcd *xhci, 399 unsigned int num_streams, 400 struct xhci_stream_info *stream_info) 401 { 402 u32 cur_stream; 403 struct xhci_ring *cur_ring; 404 u64 addr; 405 406 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { 407 struct xhci_ring *mapped_ring; 408 int trb_size = sizeof(union xhci_trb); 409 410 cur_ring = stream_info->stream_rings[cur_stream]; 411 for (addr = cur_ring->first_seg->dma; 412 addr < cur_ring->first_seg->dma + SEGMENT_SIZE; 413 addr += trb_size) { 414 mapped_ring = dma_to_stream_ring(stream_info, addr); 415 if (cur_ring != mapped_ring) { 416 xhci_warn(xhci, "WARN: DMA address 0x%08llx " 417 "didn't map to stream ID %u; " 418 "mapped to ring %p\n", 419 (unsigned long long) addr, 420 cur_stream, 421 mapped_ring); 422 return -EINVAL; 423 } 424 } 425 /* One TRB after the end of the ring segment shouldn't return a 426 * pointer to the current ring (although it may be a part of a 427 * different ring). 428 */ 429 mapped_ring = dma_to_stream_ring(stream_info, addr); 430 if (mapped_ring != cur_ring) { 431 /* One TRB before should also fail */ 432 addr = cur_ring->first_seg->dma - trb_size; 433 mapped_ring = dma_to_stream_ring(stream_info, addr); 434 } 435 if (mapped_ring == cur_ring) { 436 xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx " 437 "mapped to valid stream ID %u; " 438 "mapped ring = %p\n", 439 (unsigned long long) addr, 440 cur_stream, 441 mapped_ring); 442 return -EINVAL; 443 } 444 } 445 return 0; 446 } 447 #endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */ 448 449 /* 450 * Change an endpoint's internal structure so it supports stream IDs. The 451 * number of requested streams includes stream 0, which cannot be used by device 452 * drivers. 453 * 454 * The number of stream contexts in the stream context array may be bigger than 455 * the number of streams the driver wants to use. This is because the number of 456 * stream context array entries must be a power of two. 457 * 458 * We need a radix tree for mapping physical addresses of TRBs to which stream 459 * ID they belong to. We need to do this because the host controller won't tell 460 * us which stream ring the TRB came from. We could store the stream ID in an 461 * event data TRB, but that doesn't help us for the cancellation case, since the 462 * endpoint may stop before it reaches that event data TRB. 463 * 464 * The radix tree maps the upper portion of the TRB DMA address to a ring 465 * segment that has the same upper portion of DMA addresses. For example, say I 466 * have segments of size 1KB, that are always 64-byte aligned. A segment may 467 * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the 468 * key to the stream ID is 0x43244. I can use the DMA address of the TRB to 469 * pass the radix tree a key to get the right stream ID: 470 * 471 * 0x10c90fff >> 10 = 0x43243 472 * 0x10c912c0 >> 10 = 0x43244 473 * 0x10c91400 >> 10 = 0x43245 474 * 475 * Obviously, only those TRBs with DMA addresses that are within the segment 476 * will make the radix tree return the stream ID for that ring. 477 * 478 * Caveats for the radix tree: 479 * 480 * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an 481 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be 482 * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the 483 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit 484 * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit 485 * extended systems (where the DMA address can be bigger than 32-bits), 486 * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that. 487 */ 488 struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, 489 unsigned int num_stream_ctxs, 490 unsigned int num_streams, gfp_t mem_flags) 491 { 492 struct xhci_stream_info *stream_info; 493 u32 cur_stream; 494 struct xhci_ring *cur_ring; 495 unsigned long key; 496 u64 addr; 497 int ret; 498 499 xhci_dbg(xhci, "Allocating %u streams and %u " 500 "stream context array entries.\n", 501 num_streams, num_stream_ctxs); 502 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) { 503 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n"); 504 return NULL; 505 } 506 xhci->cmd_ring_reserved_trbs++; 507 508 stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags); 509 if (!stream_info) 510 goto cleanup_trbs; 511 512 stream_info->num_streams = num_streams; 513 stream_info->num_stream_ctxs = num_stream_ctxs; 514 515 /* Initialize the array of virtual pointers to stream rings. */ 516 stream_info->stream_rings = kzalloc( 517 sizeof(struct xhci_ring *)*num_streams, 518 mem_flags); 519 if (!stream_info->stream_rings) 520 goto cleanup_info; 521 522 /* Initialize the array of DMA addresses for stream rings for the HW. */ 523 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci, 524 num_stream_ctxs, &stream_info->ctx_array_dma, 525 mem_flags); 526 if (!stream_info->stream_ctx_array) 527 goto cleanup_ctx; 528 memset(stream_info->stream_ctx_array, 0, 529 sizeof(struct xhci_stream_ctx)*num_stream_ctxs); 530 531 /* Allocate everything needed to free the stream rings later */ 532 stream_info->free_streams_command = 533 xhci_alloc_command(xhci, true, true, mem_flags); 534 if (!stream_info->free_streams_command) 535 goto cleanup_ctx; 536 537 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC); 538 539 /* Allocate rings for all the streams that the driver will use, 540 * and add their segment DMA addresses to the radix tree. 541 * Stream 0 is reserved. 542 */ 543 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { 544 stream_info->stream_rings[cur_stream] = 545 xhci_ring_alloc(xhci, 1, true, false, mem_flags); 546 cur_ring = stream_info->stream_rings[cur_stream]; 547 if (!cur_ring) 548 goto cleanup_rings; 549 cur_ring->stream_id = cur_stream; 550 /* Set deq ptr, cycle bit, and stream context type */ 551 addr = cur_ring->first_seg->dma | 552 SCT_FOR_CTX(SCT_PRI_TR) | 553 cur_ring->cycle_state; 554 stream_info->stream_ctx_array[cur_stream].stream_ring = 555 cpu_to_le64(addr); 556 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", 557 cur_stream, (unsigned long long) addr); 558 559 key = (unsigned long) 560 (cur_ring->first_seg->dma >> SEGMENT_SHIFT); 561 ret = radix_tree_insert(&stream_info->trb_address_map, 562 key, cur_ring); 563 if (ret) { 564 xhci_ring_free(xhci, cur_ring); 565 stream_info->stream_rings[cur_stream] = NULL; 566 goto cleanup_rings; 567 } 568 } 569 /* Leave the other unused stream ring pointers in the stream context 570 * array initialized to zero. This will cause the xHC to give us an 571 * error if the device asks for a stream ID we don't have setup (if it 572 * was any other way, the host controller would assume the ring is 573 * "empty" and wait forever for data to be queued to that stream ID). 574 */ 575 #if XHCI_DEBUG 576 /* Do a little test on the radix tree to make sure it returns the 577 * correct values. 578 */ 579 if (xhci_test_radix_tree(xhci, num_streams, stream_info)) 580 goto cleanup_rings; 581 #endif 582 583 return stream_info; 584 585 cleanup_rings: 586 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { 587 cur_ring = stream_info->stream_rings[cur_stream]; 588 if (cur_ring) { 589 addr = cur_ring->first_seg->dma; 590 radix_tree_delete(&stream_info->trb_address_map, 591 addr >> SEGMENT_SHIFT); 592 xhci_ring_free(xhci, cur_ring); 593 stream_info->stream_rings[cur_stream] = NULL; 594 } 595 } 596 xhci_free_command(xhci, stream_info->free_streams_command); 597 cleanup_ctx: 598 kfree(stream_info->stream_rings); 599 cleanup_info: 600 kfree(stream_info); 601 cleanup_trbs: 602 xhci->cmd_ring_reserved_trbs--; 603 return NULL; 604 } 605 /* 606 * Sets the MaxPStreams field and the Linear Stream Array field. 607 * Sets the dequeue pointer to the stream context array. 608 */ 609 void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, 610 struct xhci_ep_ctx *ep_ctx, 611 struct xhci_stream_info *stream_info) 612 { 613 u32 max_primary_streams; 614 /* MaxPStreams is the number of stream context array entries, not the 615 * number we're actually using. Must be in 2^(MaxPstreams + 1) format. 616 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc. 617 */ 618 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; 619 xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n", 620 1 << (max_primary_streams + 1)); 621 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK); 622 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams) 623 | EP_HAS_LSA); 624 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma); 625 } 626 627 /* 628 * Sets the MaxPStreams field and the Linear Stream Array field to 0. 629 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark, 630 * not at the beginning of the ring). 631 */ 632 void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci, 633 struct xhci_ep_ctx *ep_ctx, 634 struct xhci_virt_ep *ep) 635 { 636 dma_addr_t addr; 637 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA)); 638 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue); 639 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state); 640 } 641 642 /* Frees all stream contexts associated with the endpoint, 643 * 644 * Caller should fix the endpoint context streams fields. 645 */ 646 void xhci_free_stream_info(struct xhci_hcd *xhci, 647 struct xhci_stream_info *stream_info) 648 { 649 int cur_stream; 650 struct xhci_ring *cur_ring; 651 dma_addr_t addr; 652 653 if (!stream_info) 654 return; 655 656 for (cur_stream = 1; cur_stream < stream_info->num_streams; 657 cur_stream++) { 658 cur_ring = stream_info->stream_rings[cur_stream]; 659 if (cur_ring) { 660 addr = cur_ring->first_seg->dma; 661 radix_tree_delete(&stream_info->trb_address_map, 662 addr >> SEGMENT_SHIFT); 663 xhci_ring_free(xhci, cur_ring); 664 stream_info->stream_rings[cur_stream] = NULL; 665 } 666 } 667 xhci_free_command(xhci, stream_info->free_streams_command); 668 xhci->cmd_ring_reserved_trbs--; 669 if (stream_info->stream_ctx_array) 670 xhci_free_stream_ctx(xhci, 671 stream_info->num_stream_ctxs, 672 stream_info->stream_ctx_array, 673 stream_info->ctx_array_dma); 674 675 if (stream_info) 676 kfree(stream_info->stream_rings); 677 kfree(stream_info); 678 } 679 680 681 /***************** Device context manipulation *************************/ 682 683 static void xhci_init_endpoint_timer(struct xhci_hcd *xhci, 684 struct xhci_virt_ep *ep) 685 { 686 init_timer(&ep->stop_cmd_timer); 687 ep->stop_cmd_timer.data = (unsigned long) ep; 688 ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog; 689 ep->xhci = xhci; 690 } 691 692 static void xhci_free_tt_info(struct xhci_hcd *xhci, 693 struct xhci_virt_device *virt_dev, 694 int slot_id) 695 { 696 struct list_head *tt; 697 struct list_head *tt_list_head; 698 struct list_head *tt_next; 699 struct xhci_tt_bw_info *tt_info; 700 701 /* If the device never made it past the Set Address stage, 702 * it may not have the real_port set correctly. 703 */ 704 if (virt_dev->real_port == 0 || 705 virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) { 706 xhci_dbg(xhci, "Bad real port.\n"); 707 return; 708 } 709 710 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts); 711 if (list_empty(tt_list_head)) 712 return; 713 714 list_for_each(tt, tt_list_head) { 715 tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list); 716 if (tt_info->slot_id == slot_id) 717 break; 718 } 719 /* Cautionary measure in case the hub was disconnected before we 720 * stored the TT information. 721 */ 722 if (tt_info->slot_id != slot_id) 723 return; 724 725 tt_next = tt->next; 726 tt_info = list_entry(tt, struct xhci_tt_bw_info, 727 tt_list); 728 /* Multi-TT hubs will have more than one entry */ 729 do { 730 list_del(tt); 731 kfree(tt_info); 732 tt = tt_next; 733 if (list_empty(tt_list_head)) 734 break; 735 tt_next = tt->next; 736 tt_info = list_entry(tt, struct xhci_tt_bw_info, 737 tt_list); 738 } while (tt_info->slot_id == slot_id); 739 } 740 741 int xhci_alloc_tt_info(struct xhci_hcd *xhci, 742 struct xhci_virt_device *virt_dev, 743 struct usb_device *hdev, 744 struct usb_tt *tt, gfp_t mem_flags) 745 { 746 struct xhci_tt_bw_info *tt_info; 747 unsigned int num_ports; 748 int i, j; 749 750 if (!tt->multi) 751 num_ports = 1; 752 else 753 num_ports = hdev->maxchild; 754 755 for (i = 0; i < num_ports; i++, tt_info++) { 756 struct xhci_interval_bw_table *bw_table; 757 758 tt_info = kzalloc(sizeof(*tt_info), mem_flags); 759 if (!tt_info) 760 goto free_tts; 761 INIT_LIST_HEAD(&tt_info->tt_list); 762 list_add(&tt_info->tt_list, 763 &xhci->rh_bw[virt_dev->real_port - 1].tts); 764 tt_info->slot_id = virt_dev->udev->slot_id; 765 if (tt->multi) 766 tt_info->ttport = i+1; 767 bw_table = &tt_info->bw_table; 768 for (j = 0; j < XHCI_MAX_INTERVAL; j++) 769 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints); 770 } 771 return 0; 772 773 free_tts: 774 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id); 775 return -ENOMEM; 776 } 777 778 779 /* All the xhci_tds in the ring's TD list should be freed at this point. 780 * Should be called with xhci->lock held if there is any chance the TT lists 781 * will be manipulated by the configure endpoint, allocate device, or update 782 * hub functions while this function is removing the TT entries from the list. 783 */ 784 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) 785 { 786 struct xhci_virt_device *dev; 787 int i; 788 int old_active_eps = 0; 789 790 /* Slot ID 0 is reserved */ 791 if (slot_id == 0 || !xhci->devs[slot_id]) 792 return; 793 794 dev = xhci->devs[slot_id]; 795 xhci->dcbaa->dev_context_ptrs[slot_id] = 0; 796 if (!dev) 797 return; 798 799 if (dev->tt_info) 800 old_active_eps = dev->tt_info->active_eps; 801 802 for (i = 0; i < 31; ++i) { 803 if (dev->eps[i].ring) 804 xhci_ring_free(xhci, dev->eps[i].ring); 805 if (dev->eps[i].stream_info) 806 xhci_free_stream_info(xhci, 807 dev->eps[i].stream_info); 808 /* Endpoints on the TT/root port lists should have been removed 809 * when usb_disable_device() was called for the device. 810 * We can't drop them anyway, because the udev might have gone 811 * away by this point, and we can't tell what speed it was. 812 */ 813 if (!list_empty(&dev->eps[i].bw_endpoint_list)) 814 xhci_warn(xhci, "Slot %u endpoint %u " 815 "not removed from BW list!\n", 816 slot_id, i); 817 } 818 /* If this is a hub, free the TT(s) from the TT list */ 819 xhci_free_tt_info(xhci, dev, slot_id); 820 /* If necessary, update the number of active TTs on this root port */ 821 xhci_update_tt_active_eps(xhci, dev, old_active_eps); 822 823 if (dev->ring_cache) { 824 for (i = 0; i < dev->num_rings_cached; i++) 825 xhci_ring_free(xhci, dev->ring_cache[i]); 826 kfree(dev->ring_cache); 827 } 828 829 if (dev->in_ctx) 830 xhci_free_container_ctx(xhci, dev->in_ctx); 831 if (dev->out_ctx) 832 xhci_free_container_ctx(xhci, dev->out_ctx); 833 834 kfree(xhci->devs[slot_id]); 835 xhci->devs[slot_id] = NULL; 836 } 837 838 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, 839 struct usb_device *udev, gfp_t flags) 840 { 841 struct xhci_virt_device *dev; 842 int i; 843 844 /* Slot ID 0 is reserved */ 845 if (slot_id == 0 || xhci->devs[slot_id]) { 846 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id); 847 return 0; 848 } 849 850 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags); 851 if (!xhci->devs[slot_id]) 852 return 0; 853 dev = xhci->devs[slot_id]; 854 855 /* Allocate the (output) device context that will be used in the HC. */ 856 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); 857 if (!dev->out_ctx) 858 goto fail; 859 860 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, 861 (unsigned long long)dev->out_ctx->dma); 862 863 /* Allocate the (input) device context for address device command */ 864 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags); 865 if (!dev->in_ctx) 866 goto fail; 867 868 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, 869 (unsigned long long)dev->in_ctx->dma); 870 871 /* Initialize the cancellation list and watchdog timers for each ep */ 872 for (i = 0; i < 31; i++) { 873 xhci_init_endpoint_timer(xhci, &dev->eps[i]); 874 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); 875 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list); 876 } 877 878 /* Allocate endpoint 0 ring */ 879 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, false, flags); 880 if (!dev->eps[0].ring) 881 goto fail; 882 883 /* Allocate pointers to the ring cache */ 884 dev->ring_cache = kzalloc( 885 sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED, 886 flags); 887 if (!dev->ring_cache) 888 goto fail; 889 dev->num_rings_cached = 0; 890 891 init_completion(&dev->cmd_completion); 892 INIT_LIST_HEAD(&dev->cmd_list); 893 dev->udev = udev; 894 895 /* Point to output device context in dcbaa. */ 896 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma); 897 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", 898 slot_id, 899 &xhci->dcbaa->dev_context_ptrs[slot_id], 900 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id])); 901 902 return 1; 903 fail: 904 xhci_free_virt_device(xhci, slot_id); 905 return 0; 906 } 907 908 void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, 909 struct usb_device *udev) 910 { 911 struct xhci_virt_device *virt_dev; 912 struct xhci_ep_ctx *ep0_ctx; 913 struct xhci_ring *ep_ring; 914 915 virt_dev = xhci->devs[udev->slot_id]; 916 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0); 917 ep_ring = virt_dev->eps[0].ring; 918 /* 919 * FIXME we don't keep track of the dequeue pointer very well after a 920 * Set TR dequeue pointer, so we're setting the dequeue pointer of the 921 * host to our enqueue pointer. This should only be called after a 922 * configured device has reset, so all control transfers should have 923 * been completed or cancelled before the reset. 924 */ 925 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg, 926 ep_ring->enqueue) 927 | ep_ring->cycle_state); 928 } 929 930 /* 931 * The xHCI roothub may have ports of differing speeds in any order in the port 932 * status registers. xhci->port_array provides an array of the port speed for 933 * each offset into the port status registers. 934 * 935 * The xHCI hardware wants to know the roothub port number that the USB device 936 * is attached to (or the roothub port its ancestor hub is attached to). All we 937 * know is the index of that port under either the USB 2.0 or the USB 3.0 938 * roothub, but that doesn't give us the real index into the HW port status 939 * registers. Scan through the xHCI roothub port array, looking for the Nth 940 * entry of the correct port speed. Return the port number of that entry. 941 */ 942 static u32 xhci_find_real_port_number(struct xhci_hcd *xhci, 943 struct usb_device *udev) 944 { 945 struct usb_device *top_dev; 946 unsigned int num_similar_speed_ports; 947 unsigned int faked_port_num; 948 int i; 949 950 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 951 top_dev = top_dev->parent) 952 /* Found device below root hub */; 953 faked_port_num = top_dev->portnum; 954 for (i = 0, num_similar_speed_ports = 0; 955 i < HCS_MAX_PORTS(xhci->hcs_params1); i++) { 956 u8 port_speed = xhci->port_array[i]; 957 958 /* 959 * Skip ports that don't have known speeds, or have duplicate 960 * Extended Capabilities port speed entries. 961 */ 962 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY) 963 continue; 964 965 /* 966 * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and 967 * 1.1 ports are under the USB 2.0 hub. If the port speed 968 * matches the device speed, it's a similar speed port. 969 */ 970 if ((port_speed == 0x03) == (udev->speed == USB_SPEED_SUPER)) 971 num_similar_speed_ports++; 972 if (num_similar_speed_ports == faked_port_num) 973 /* Roothub ports are numbered from 1 to N */ 974 return i+1; 975 } 976 return 0; 977 } 978 979 /* Setup an xHCI virtual device for a Set Address command */ 980 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev) 981 { 982 struct xhci_virt_device *dev; 983 struct xhci_ep_ctx *ep0_ctx; 984 struct xhci_slot_ctx *slot_ctx; 985 struct xhci_input_control_ctx *ctrl_ctx; 986 u32 port_num; 987 struct usb_device *top_dev; 988 989 dev = xhci->devs[udev->slot_id]; 990 /* Slot ID 0 is reserved */ 991 if (udev->slot_id == 0 || !dev) { 992 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n", 993 udev->slot_id); 994 return -EINVAL; 995 } 996 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); 997 ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx); 998 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); 999 1000 /* 2) New slot context and endpoint 0 context are valid*/ 1001 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); 1002 1003 /* 3) Only the control endpoint is valid - one endpoint context */ 1004 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route); 1005 switch (udev->speed) { 1006 case USB_SPEED_SUPER: 1007 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS); 1008 break; 1009 case USB_SPEED_HIGH: 1010 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS); 1011 break; 1012 case USB_SPEED_FULL: 1013 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS); 1014 break; 1015 case USB_SPEED_LOW: 1016 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS); 1017 break; 1018 case USB_SPEED_WIRELESS: 1019 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 1020 return -EINVAL; 1021 break; 1022 default: 1023 /* Speed was set earlier, this shouldn't happen. */ 1024 BUG(); 1025 } 1026 /* Find the root hub port this device is under */ 1027 port_num = xhci_find_real_port_number(xhci, udev); 1028 if (!port_num) 1029 return -EINVAL; 1030 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num)); 1031 /* Set the port number in the virtual_device to the faked port number */ 1032 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 1033 top_dev = top_dev->parent) 1034 /* Found device below root hub */; 1035 dev->fake_port = top_dev->portnum; 1036 dev->real_port = port_num; 1037 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num); 1038 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port); 1039 1040 /* Find the right bandwidth table that this device will be a part of. 1041 * If this is a full speed device attached directly to a root port (or a 1042 * decendent of one), it counts as a primary bandwidth domain, not a 1043 * secondary bandwidth domain under a TT. An xhci_tt_info structure 1044 * will never be created for the HS root hub. 1045 */ 1046 if (!udev->tt || !udev->tt->hub->parent) { 1047 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table; 1048 } else { 1049 struct xhci_root_port_bw_info *rh_bw; 1050 struct xhci_tt_bw_info *tt_bw; 1051 1052 rh_bw = &xhci->rh_bw[port_num - 1]; 1053 /* Find the right TT. */ 1054 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) { 1055 if (tt_bw->slot_id != udev->tt->hub->slot_id) 1056 continue; 1057 1058 if (!dev->udev->tt->multi || 1059 (udev->tt->multi && 1060 tt_bw->ttport == dev->udev->ttport)) { 1061 dev->bw_table = &tt_bw->bw_table; 1062 dev->tt_info = tt_bw; 1063 break; 1064 } 1065 } 1066 if (!dev->tt_info) 1067 xhci_warn(xhci, "WARN: Didn't find a matching TT\n"); 1068 } 1069 1070 /* Is this a LS/FS device under an external HS hub? */ 1071 if (udev->tt && udev->tt->hub->parent) { 1072 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id | 1073 (udev->ttport << 8)); 1074 if (udev->tt->multi) 1075 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); 1076 } 1077 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); 1078 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); 1079 1080 /* Step 4 - ring already allocated */ 1081 /* Step 5 */ 1082 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP)); 1083 /* 1084 * XXX: Not sure about wireless USB devices. 1085 */ 1086 switch (udev->speed) { 1087 case USB_SPEED_SUPER: 1088 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(512)); 1089 break; 1090 case USB_SPEED_HIGH: 1091 /* USB core guesses at a 64-byte max packet first for FS devices */ 1092 case USB_SPEED_FULL: 1093 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(64)); 1094 break; 1095 case USB_SPEED_LOW: 1096 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(8)); 1097 break; 1098 case USB_SPEED_WIRELESS: 1099 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 1100 return -EINVAL; 1101 break; 1102 default: 1103 /* New speed? */ 1104 BUG(); 1105 } 1106 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ 1107 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3)); 1108 1109 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma | 1110 dev->eps[0].ring->cycle_state); 1111 1112 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ 1113 1114 return 0; 1115 } 1116 1117 /* 1118 * Convert interval expressed as 2^(bInterval - 1) == interval into 1119 * straight exponent value 2^n == interval. 1120 * 1121 */ 1122 static unsigned int xhci_parse_exponent_interval(struct usb_device *udev, 1123 struct usb_host_endpoint *ep) 1124 { 1125 unsigned int interval; 1126 1127 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1; 1128 if (interval != ep->desc.bInterval - 1) 1129 dev_warn(&udev->dev, 1130 "ep %#x - rounding interval to %d %sframes\n", 1131 ep->desc.bEndpointAddress, 1132 1 << interval, 1133 udev->speed == USB_SPEED_FULL ? "" : "micro"); 1134 1135 if (udev->speed == USB_SPEED_FULL) { 1136 /* 1137 * Full speed isoc endpoints specify interval in frames, 1138 * not microframes. We are using microframes everywhere, 1139 * so adjust accordingly. 1140 */ 1141 interval += 3; /* 1 frame = 2^3 uframes */ 1142 } 1143 1144 return interval; 1145 } 1146 1147 /* 1148 * Convert bInterval expressed in frames (in 1-255 range) to exponent of 1149 * microframes, rounded down to nearest power of 2. 1150 */ 1151 static unsigned int xhci_parse_frame_interval(struct usb_device *udev, 1152 struct usb_host_endpoint *ep) 1153 { 1154 unsigned int interval; 1155 1156 interval = fls(8 * ep->desc.bInterval) - 1; 1157 interval = clamp_val(interval, 3, 10); 1158 if ((1 << interval) != 8 * ep->desc.bInterval) 1159 dev_warn(&udev->dev, 1160 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n", 1161 ep->desc.bEndpointAddress, 1162 1 << interval, 1163 8 * ep->desc.bInterval); 1164 1165 return interval; 1166 } 1167 1168 /* Return the polling or NAK interval. 1169 * 1170 * The polling interval is expressed in "microframes". If xHCI's Interval field 1171 * is set to N, it will service the endpoint every 2^(Interval)*125us. 1172 * 1173 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval 1174 * is set to 0. 1175 */ 1176 static unsigned int xhci_get_endpoint_interval(struct usb_device *udev, 1177 struct usb_host_endpoint *ep) 1178 { 1179 unsigned int interval = 0; 1180 1181 switch (udev->speed) { 1182 case USB_SPEED_HIGH: 1183 /* Max NAK rate */ 1184 if (usb_endpoint_xfer_control(&ep->desc) || 1185 usb_endpoint_xfer_bulk(&ep->desc)) { 1186 interval = ep->desc.bInterval; 1187 break; 1188 } 1189 /* Fall through - SS and HS isoc/int have same decoding */ 1190 1191 case USB_SPEED_SUPER: 1192 if (usb_endpoint_xfer_int(&ep->desc) || 1193 usb_endpoint_xfer_isoc(&ep->desc)) { 1194 interval = xhci_parse_exponent_interval(udev, ep); 1195 } 1196 break; 1197 1198 case USB_SPEED_FULL: 1199 if (usb_endpoint_xfer_isoc(&ep->desc)) { 1200 interval = xhci_parse_exponent_interval(udev, ep); 1201 break; 1202 } 1203 /* 1204 * Fall through for interrupt endpoint interval decoding 1205 * since it uses the same rules as low speed interrupt 1206 * endpoints. 1207 */ 1208 1209 case USB_SPEED_LOW: 1210 if (usb_endpoint_xfer_int(&ep->desc) || 1211 usb_endpoint_xfer_isoc(&ep->desc)) { 1212 1213 interval = xhci_parse_frame_interval(udev, ep); 1214 } 1215 break; 1216 1217 default: 1218 BUG(); 1219 } 1220 return EP_INTERVAL(interval); 1221 } 1222 1223 /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps. 1224 * High speed endpoint descriptors can define "the number of additional 1225 * transaction opportunities per microframe", but that goes in the Max Burst 1226 * endpoint context field. 1227 */ 1228 static u32 xhci_get_endpoint_mult(struct usb_device *udev, 1229 struct usb_host_endpoint *ep) 1230 { 1231 if (udev->speed != USB_SPEED_SUPER || 1232 !usb_endpoint_xfer_isoc(&ep->desc)) 1233 return 0; 1234 return ep->ss_ep_comp.bmAttributes; 1235 } 1236 1237 static u32 xhci_get_endpoint_type(struct usb_device *udev, 1238 struct usb_host_endpoint *ep) 1239 { 1240 int in; 1241 u32 type; 1242 1243 in = usb_endpoint_dir_in(&ep->desc); 1244 if (usb_endpoint_xfer_control(&ep->desc)) { 1245 type = EP_TYPE(CTRL_EP); 1246 } else if (usb_endpoint_xfer_bulk(&ep->desc)) { 1247 if (in) 1248 type = EP_TYPE(BULK_IN_EP); 1249 else 1250 type = EP_TYPE(BULK_OUT_EP); 1251 } else if (usb_endpoint_xfer_isoc(&ep->desc)) { 1252 if (in) 1253 type = EP_TYPE(ISOC_IN_EP); 1254 else 1255 type = EP_TYPE(ISOC_OUT_EP); 1256 } else if (usb_endpoint_xfer_int(&ep->desc)) { 1257 if (in) 1258 type = EP_TYPE(INT_IN_EP); 1259 else 1260 type = EP_TYPE(INT_OUT_EP); 1261 } else { 1262 BUG(); 1263 } 1264 return type; 1265 } 1266 1267 /* Return the maximum endpoint service interval time (ESIT) payload. 1268 * Basically, this is the maxpacket size, multiplied by the burst size 1269 * and mult size. 1270 */ 1271 static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, 1272 struct usb_device *udev, 1273 struct usb_host_endpoint *ep) 1274 { 1275 int max_burst; 1276 int max_packet; 1277 1278 /* Only applies for interrupt or isochronous endpoints */ 1279 if (usb_endpoint_xfer_control(&ep->desc) || 1280 usb_endpoint_xfer_bulk(&ep->desc)) 1281 return 0; 1282 1283 if (udev->speed == USB_SPEED_SUPER) 1284 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval); 1285 1286 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); 1287 max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11; 1288 /* A 0 in max burst means 1 transfer per ESIT */ 1289 return max_packet * (max_burst + 1); 1290 } 1291 1292 /* Set up an endpoint with one ring segment. Do not allocate stream rings. 1293 * Drivers will have to call usb_alloc_streams() to do that. 1294 */ 1295 int xhci_endpoint_init(struct xhci_hcd *xhci, 1296 struct xhci_virt_device *virt_dev, 1297 struct usb_device *udev, 1298 struct usb_host_endpoint *ep, 1299 gfp_t mem_flags) 1300 { 1301 unsigned int ep_index; 1302 struct xhci_ep_ctx *ep_ctx; 1303 struct xhci_ring *ep_ring; 1304 unsigned int max_packet; 1305 unsigned int max_burst; 1306 u32 max_esit_payload; 1307 1308 ep_index = xhci_get_endpoint_index(&ep->desc); 1309 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 1310 1311 /* Set up the endpoint ring */ 1312 /* 1313 * Isochronous endpoint ring needs bigger size because one isoc URB 1314 * carries multiple packets and it will insert multiple tds to the 1315 * ring. 1316 * This should be replaced with dynamic ring resizing in the future. 1317 */ 1318 if (usb_endpoint_xfer_isoc(&ep->desc)) 1319 virt_dev->eps[ep_index].new_ring = 1320 xhci_ring_alloc(xhci, 8, true, true, mem_flags); 1321 else 1322 virt_dev->eps[ep_index].new_ring = 1323 xhci_ring_alloc(xhci, 1, true, false, mem_flags); 1324 if (!virt_dev->eps[ep_index].new_ring) { 1325 /* Attempt to use the ring cache */ 1326 if (virt_dev->num_rings_cached == 0) 1327 return -ENOMEM; 1328 virt_dev->eps[ep_index].new_ring = 1329 virt_dev->ring_cache[virt_dev->num_rings_cached]; 1330 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; 1331 virt_dev->num_rings_cached--; 1332 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, 1333 usb_endpoint_xfer_isoc(&ep->desc) ? true : false); 1334 } 1335 virt_dev->eps[ep_index].skip = false; 1336 ep_ring = virt_dev->eps[ep_index].new_ring; 1337 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state); 1338 1339 ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep) 1340 | EP_MULT(xhci_get_endpoint_mult(udev, ep))); 1341 1342 /* FIXME dig Mult and streams info out of ep companion desc */ 1343 1344 /* Allow 3 retries for everything but isoc; 1345 * CErr shall be set to 0 for Isoch endpoints. 1346 */ 1347 if (!usb_endpoint_xfer_isoc(&ep->desc)) 1348 ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(3)); 1349 else 1350 ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(0)); 1351 1352 ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep)); 1353 1354 /* Set the max packet size and max burst */ 1355 switch (udev->speed) { 1356 case USB_SPEED_SUPER: 1357 max_packet = usb_endpoint_maxp(&ep->desc); 1358 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); 1359 /* dig out max burst from ep companion desc */ 1360 max_packet = ep->ss_ep_comp.bMaxBurst; 1361 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet)); 1362 break; 1363 case USB_SPEED_HIGH: 1364 /* bits 11:12 specify the number of additional transaction 1365 * opportunities per microframe (USB 2.0, section 9.6.6) 1366 */ 1367 if (usb_endpoint_xfer_isoc(&ep->desc) || 1368 usb_endpoint_xfer_int(&ep->desc)) { 1369 max_burst = (usb_endpoint_maxp(&ep->desc) 1370 & 0x1800) >> 11; 1371 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst)); 1372 } 1373 /* Fall through */ 1374 case USB_SPEED_FULL: 1375 case USB_SPEED_LOW: 1376 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); 1377 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); 1378 break; 1379 default: 1380 BUG(); 1381 } 1382 max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep); 1383 ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload)); 1384 1385 /* 1386 * XXX no idea how to calculate the average TRB buffer length for bulk 1387 * endpoints, as the driver gives us no clue how big each scatter gather 1388 * list entry (or buffer) is going to be. 1389 * 1390 * For isochronous and interrupt endpoints, we set it to the max 1391 * available, until we have new API in the USB core to allow drivers to 1392 * declare how much bandwidth they actually need. 1393 * 1394 * Normally, it would be calculated by taking the total of the buffer 1395 * lengths in the TD and then dividing by the number of TRBs in a TD, 1396 * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't 1397 * use Event Data TRBs, and we don't chain in a link TRB on short 1398 * transfers, we're basically dividing by 1. 1399 * 1400 * xHCI 1.0 specification indicates that the Average TRB Length should 1401 * be set to 8 for control endpoints. 1402 */ 1403 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100) 1404 ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8)); 1405 else 1406 ep_ctx->tx_info |= 1407 cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload)); 1408 1409 /* FIXME Debug endpoint context */ 1410 return 0; 1411 } 1412 1413 void xhci_endpoint_zero(struct xhci_hcd *xhci, 1414 struct xhci_virt_device *virt_dev, 1415 struct usb_host_endpoint *ep) 1416 { 1417 unsigned int ep_index; 1418 struct xhci_ep_ctx *ep_ctx; 1419 1420 ep_index = xhci_get_endpoint_index(&ep->desc); 1421 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 1422 1423 ep_ctx->ep_info = 0; 1424 ep_ctx->ep_info2 = 0; 1425 ep_ctx->deq = 0; 1426 ep_ctx->tx_info = 0; 1427 /* Don't free the endpoint ring until the set interface or configuration 1428 * request succeeds. 1429 */ 1430 } 1431 1432 void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info) 1433 { 1434 bw_info->ep_interval = 0; 1435 bw_info->mult = 0; 1436 bw_info->num_packets = 0; 1437 bw_info->max_packet_size = 0; 1438 bw_info->type = 0; 1439 bw_info->max_esit_payload = 0; 1440 } 1441 1442 void xhci_update_bw_info(struct xhci_hcd *xhci, 1443 struct xhci_container_ctx *in_ctx, 1444 struct xhci_input_control_ctx *ctrl_ctx, 1445 struct xhci_virt_device *virt_dev) 1446 { 1447 struct xhci_bw_info *bw_info; 1448 struct xhci_ep_ctx *ep_ctx; 1449 unsigned int ep_type; 1450 int i; 1451 1452 for (i = 1; i < 31; ++i) { 1453 bw_info = &virt_dev->eps[i].bw_info; 1454 1455 /* We can't tell what endpoint type is being dropped, but 1456 * unconditionally clearing the bandwidth info for non-periodic 1457 * endpoints should be harmless because the info will never be 1458 * set in the first place. 1459 */ 1460 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) { 1461 /* Dropped endpoint */ 1462 xhci_clear_endpoint_bw_info(bw_info); 1463 continue; 1464 } 1465 1466 if (EP_IS_ADDED(ctrl_ctx, i)) { 1467 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i); 1468 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2)); 1469 1470 /* Ignore non-periodic endpoints */ 1471 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && 1472 ep_type != ISOC_IN_EP && 1473 ep_type != INT_IN_EP) 1474 continue; 1475 1476 /* Added or changed endpoint */ 1477 bw_info->ep_interval = CTX_TO_EP_INTERVAL( 1478 le32_to_cpu(ep_ctx->ep_info)); 1479 /* Number of packets and mult are zero-based in the 1480 * input context, but we want one-based for the 1481 * interval table. 1482 */ 1483 bw_info->mult = CTX_TO_EP_MULT( 1484 le32_to_cpu(ep_ctx->ep_info)) + 1; 1485 bw_info->num_packets = CTX_TO_MAX_BURST( 1486 le32_to_cpu(ep_ctx->ep_info2)) + 1; 1487 bw_info->max_packet_size = MAX_PACKET_DECODED( 1488 le32_to_cpu(ep_ctx->ep_info2)); 1489 bw_info->type = ep_type; 1490 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD( 1491 le32_to_cpu(ep_ctx->tx_info)); 1492 } 1493 } 1494 } 1495 1496 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy. 1497 * Useful when you want to change one particular aspect of the endpoint and then 1498 * issue a configure endpoint command. 1499 */ 1500 void xhci_endpoint_copy(struct xhci_hcd *xhci, 1501 struct xhci_container_ctx *in_ctx, 1502 struct xhci_container_ctx *out_ctx, 1503 unsigned int ep_index) 1504 { 1505 struct xhci_ep_ctx *out_ep_ctx; 1506 struct xhci_ep_ctx *in_ep_ctx; 1507 1508 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1509 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 1510 1511 in_ep_ctx->ep_info = out_ep_ctx->ep_info; 1512 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; 1513 in_ep_ctx->deq = out_ep_ctx->deq; 1514 in_ep_ctx->tx_info = out_ep_ctx->tx_info; 1515 } 1516 1517 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx. 1518 * Useful when you want to change one particular aspect of the endpoint and then 1519 * issue a configure endpoint command. Only the context entries field matters, 1520 * but we'll copy the whole thing anyway. 1521 */ 1522 void xhci_slot_copy(struct xhci_hcd *xhci, 1523 struct xhci_container_ctx *in_ctx, 1524 struct xhci_container_ctx *out_ctx) 1525 { 1526 struct xhci_slot_ctx *in_slot_ctx; 1527 struct xhci_slot_ctx *out_slot_ctx; 1528 1529 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 1530 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx); 1531 1532 in_slot_ctx->dev_info = out_slot_ctx->dev_info; 1533 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2; 1534 in_slot_ctx->tt_info = out_slot_ctx->tt_info; 1535 in_slot_ctx->dev_state = out_slot_ctx->dev_state; 1536 } 1537 1538 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ 1539 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) 1540 { 1541 int i; 1542 struct device *dev = xhci_to_hcd(xhci)->self.controller; 1543 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); 1544 1545 xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp); 1546 1547 if (!num_sp) 1548 return 0; 1549 1550 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags); 1551 if (!xhci->scratchpad) 1552 goto fail_sp; 1553 1554 xhci->scratchpad->sp_array = dma_alloc_coherent(dev, 1555 num_sp * sizeof(u64), 1556 &xhci->scratchpad->sp_dma, flags); 1557 if (!xhci->scratchpad->sp_array) 1558 goto fail_sp2; 1559 1560 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags); 1561 if (!xhci->scratchpad->sp_buffers) 1562 goto fail_sp3; 1563 1564 xhci->scratchpad->sp_dma_buffers = 1565 kzalloc(sizeof(dma_addr_t) * num_sp, flags); 1566 1567 if (!xhci->scratchpad->sp_dma_buffers) 1568 goto fail_sp4; 1569 1570 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); 1571 for (i = 0; i < num_sp; i++) { 1572 dma_addr_t dma; 1573 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma, 1574 flags); 1575 if (!buf) 1576 goto fail_sp5; 1577 1578 xhci->scratchpad->sp_array[i] = dma; 1579 xhci->scratchpad->sp_buffers[i] = buf; 1580 xhci->scratchpad->sp_dma_buffers[i] = dma; 1581 } 1582 1583 return 0; 1584 1585 fail_sp5: 1586 for (i = i - 1; i >= 0; i--) { 1587 dma_free_coherent(dev, xhci->page_size, 1588 xhci->scratchpad->sp_buffers[i], 1589 xhci->scratchpad->sp_dma_buffers[i]); 1590 } 1591 kfree(xhci->scratchpad->sp_dma_buffers); 1592 1593 fail_sp4: 1594 kfree(xhci->scratchpad->sp_buffers); 1595 1596 fail_sp3: 1597 dma_free_coherent(dev, num_sp * sizeof(u64), 1598 xhci->scratchpad->sp_array, 1599 xhci->scratchpad->sp_dma); 1600 1601 fail_sp2: 1602 kfree(xhci->scratchpad); 1603 xhci->scratchpad = NULL; 1604 1605 fail_sp: 1606 return -ENOMEM; 1607 } 1608 1609 static void scratchpad_free(struct xhci_hcd *xhci) 1610 { 1611 int num_sp; 1612 int i; 1613 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 1614 1615 if (!xhci->scratchpad) 1616 return; 1617 1618 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); 1619 1620 for (i = 0; i < num_sp; i++) { 1621 dma_free_coherent(&pdev->dev, xhci->page_size, 1622 xhci->scratchpad->sp_buffers[i], 1623 xhci->scratchpad->sp_dma_buffers[i]); 1624 } 1625 kfree(xhci->scratchpad->sp_dma_buffers); 1626 kfree(xhci->scratchpad->sp_buffers); 1627 dma_free_coherent(&pdev->dev, num_sp * sizeof(u64), 1628 xhci->scratchpad->sp_array, 1629 xhci->scratchpad->sp_dma); 1630 kfree(xhci->scratchpad); 1631 xhci->scratchpad = NULL; 1632 } 1633 1634 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, 1635 bool allocate_in_ctx, bool allocate_completion, 1636 gfp_t mem_flags) 1637 { 1638 struct xhci_command *command; 1639 1640 command = kzalloc(sizeof(*command), mem_flags); 1641 if (!command) 1642 return NULL; 1643 1644 if (allocate_in_ctx) { 1645 command->in_ctx = 1646 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, 1647 mem_flags); 1648 if (!command->in_ctx) { 1649 kfree(command); 1650 return NULL; 1651 } 1652 } 1653 1654 if (allocate_completion) { 1655 command->completion = 1656 kzalloc(sizeof(struct completion), mem_flags); 1657 if (!command->completion) { 1658 xhci_free_container_ctx(xhci, command->in_ctx); 1659 kfree(command); 1660 return NULL; 1661 } 1662 init_completion(command->completion); 1663 } 1664 1665 command->status = 0; 1666 INIT_LIST_HEAD(&command->cmd_list); 1667 return command; 1668 } 1669 1670 void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv) 1671 { 1672 if (urb_priv) { 1673 kfree(urb_priv->td[0]); 1674 kfree(urb_priv); 1675 } 1676 } 1677 1678 void xhci_free_command(struct xhci_hcd *xhci, 1679 struct xhci_command *command) 1680 { 1681 xhci_free_container_ctx(xhci, 1682 command->in_ctx); 1683 kfree(command->completion); 1684 kfree(command); 1685 } 1686 1687 void xhci_mem_cleanup(struct xhci_hcd *xhci) 1688 { 1689 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 1690 struct dev_info *dev_info, *next; 1691 unsigned long flags; 1692 int size; 1693 int i; 1694 1695 /* Free the Event Ring Segment Table and the actual Event Ring */ 1696 if (xhci->ir_set) { 1697 xhci_writel(xhci, 0, &xhci->ir_set->erst_size); 1698 xhci_write_64(xhci, 0, &xhci->ir_set->erst_base); 1699 xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue); 1700 } 1701 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 1702 if (xhci->erst.entries) 1703 dma_free_coherent(&pdev->dev, size, 1704 xhci->erst.entries, xhci->erst.erst_dma_addr); 1705 xhci->erst.entries = NULL; 1706 xhci_dbg(xhci, "Freed ERST\n"); 1707 if (xhci->event_ring) 1708 xhci_ring_free(xhci, xhci->event_ring); 1709 xhci->event_ring = NULL; 1710 xhci_dbg(xhci, "Freed event ring\n"); 1711 1712 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); 1713 if (xhci->cmd_ring) 1714 xhci_ring_free(xhci, xhci->cmd_ring); 1715 xhci->cmd_ring = NULL; 1716 xhci_dbg(xhci, "Freed command ring\n"); 1717 1718 for (i = 1; i < MAX_HC_SLOTS; ++i) 1719 xhci_free_virt_device(xhci, i); 1720 1721 if (xhci->segment_pool) 1722 dma_pool_destroy(xhci->segment_pool); 1723 xhci->segment_pool = NULL; 1724 xhci_dbg(xhci, "Freed segment pool\n"); 1725 1726 if (xhci->device_pool) 1727 dma_pool_destroy(xhci->device_pool); 1728 xhci->device_pool = NULL; 1729 xhci_dbg(xhci, "Freed device context pool\n"); 1730 1731 if (xhci->small_streams_pool) 1732 dma_pool_destroy(xhci->small_streams_pool); 1733 xhci->small_streams_pool = NULL; 1734 xhci_dbg(xhci, "Freed small stream array pool\n"); 1735 1736 if (xhci->medium_streams_pool) 1737 dma_pool_destroy(xhci->medium_streams_pool); 1738 xhci->medium_streams_pool = NULL; 1739 xhci_dbg(xhci, "Freed medium stream array pool\n"); 1740 1741 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); 1742 if (xhci->dcbaa) 1743 dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa), 1744 xhci->dcbaa, xhci->dcbaa->dma); 1745 xhci->dcbaa = NULL; 1746 1747 scratchpad_free(xhci); 1748 1749 spin_lock_irqsave(&xhci->lock, flags); 1750 list_for_each_entry_safe(dev_info, next, &xhci->lpm_failed_devs, list) { 1751 list_del(&dev_info->list); 1752 kfree(dev_info); 1753 } 1754 spin_unlock_irqrestore(&xhci->lock, flags); 1755 1756 xhci->num_usb2_ports = 0; 1757 xhci->num_usb3_ports = 0; 1758 kfree(xhci->usb2_ports); 1759 kfree(xhci->usb3_ports); 1760 kfree(xhci->port_array); 1761 kfree(xhci->rh_bw); 1762 1763 xhci->page_size = 0; 1764 xhci->page_shift = 0; 1765 xhci->bus_state[0].bus_suspended = 0; 1766 xhci->bus_state[1].bus_suspended = 0; 1767 } 1768 1769 static int xhci_test_trb_in_td(struct xhci_hcd *xhci, 1770 struct xhci_segment *input_seg, 1771 union xhci_trb *start_trb, 1772 union xhci_trb *end_trb, 1773 dma_addr_t input_dma, 1774 struct xhci_segment *result_seg, 1775 char *test_name, int test_number) 1776 { 1777 unsigned long long start_dma; 1778 unsigned long long end_dma; 1779 struct xhci_segment *seg; 1780 1781 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb); 1782 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb); 1783 1784 seg = trb_in_td(input_seg, start_trb, end_trb, input_dma); 1785 if (seg != result_seg) { 1786 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n", 1787 test_name, test_number); 1788 xhci_warn(xhci, "Tested TRB math w/ seg %p and " 1789 "input DMA 0x%llx\n", 1790 input_seg, 1791 (unsigned long long) input_dma); 1792 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), " 1793 "ending TRB %p (0x%llx DMA)\n", 1794 start_trb, start_dma, 1795 end_trb, end_dma); 1796 xhci_warn(xhci, "Expected seg %p, got seg %p\n", 1797 result_seg, seg); 1798 return -1; 1799 } 1800 return 0; 1801 } 1802 1803 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */ 1804 static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags) 1805 { 1806 struct { 1807 dma_addr_t input_dma; 1808 struct xhci_segment *result_seg; 1809 } simple_test_vector [] = { 1810 /* A zeroed DMA field should fail */ 1811 { 0, NULL }, 1812 /* One TRB before the ring start should fail */ 1813 { xhci->event_ring->first_seg->dma - 16, NULL }, 1814 /* One byte before the ring start should fail */ 1815 { xhci->event_ring->first_seg->dma - 1, NULL }, 1816 /* Starting TRB should succeed */ 1817 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg }, 1818 /* Ending TRB should succeed */ 1819 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16, 1820 xhci->event_ring->first_seg }, 1821 /* One byte after the ring end should fail */ 1822 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL }, 1823 /* One TRB after the ring end should fail */ 1824 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL }, 1825 /* An address of all ones should fail */ 1826 { (dma_addr_t) (~0), NULL }, 1827 }; 1828 struct { 1829 struct xhci_segment *input_seg; 1830 union xhci_trb *start_trb; 1831 union xhci_trb *end_trb; 1832 dma_addr_t input_dma; 1833 struct xhci_segment *result_seg; 1834 } complex_test_vector [] = { 1835 /* Test feeding a valid DMA address from a different ring */ 1836 { .input_seg = xhci->event_ring->first_seg, 1837 .start_trb = xhci->event_ring->first_seg->trbs, 1838 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], 1839 .input_dma = xhci->cmd_ring->first_seg->dma, 1840 .result_seg = NULL, 1841 }, 1842 /* Test feeding a valid end TRB from a different ring */ 1843 { .input_seg = xhci->event_ring->first_seg, 1844 .start_trb = xhci->event_ring->first_seg->trbs, 1845 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], 1846 .input_dma = xhci->cmd_ring->first_seg->dma, 1847 .result_seg = NULL, 1848 }, 1849 /* Test feeding a valid start and end TRB from a different ring */ 1850 { .input_seg = xhci->event_ring->first_seg, 1851 .start_trb = xhci->cmd_ring->first_seg->trbs, 1852 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], 1853 .input_dma = xhci->cmd_ring->first_seg->dma, 1854 .result_seg = NULL, 1855 }, 1856 /* TRB in this ring, but after this TD */ 1857 { .input_seg = xhci->event_ring->first_seg, 1858 .start_trb = &xhci->event_ring->first_seg->trbs[0], 1859 .end_trb = &xhci->event_ring->first_seg->trbs[3], 1860 .input_dma = xhci->event_ring->first_seg->dma + 4*16, 1861 .result_seg = NULL, 1862 }, 1863 /* TRB in this ring, but before this TD */ 1864 { .input_seg = xhci->event_ring->first_seg, 1865 .start_trb = &xhci->event_ring->first_seg->trbs[3], 1866 .end_trb = &xhci->event_ring->first_seg->trbs[6], 1867 .input_dma = xhci->event_ring->first_seg->dma + 2*16, 1868 .result_seg = NULL, 1869 }, 1870 /* TRB in this ring, but after this wrapped TD */ 1871 { .input_seg = xhci->event_ring->first_seg, 1872 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], 1873 .end_trb = &xhci->event_ring->first_seg->trbs[1], 1874 .input_dma = xhci->event_ring->first_seg->dma + 2*16, 1875 .result_seg = NULL, 1876 }, 1877 /* TRB in this ring, but before this wrapped TD */ 1878 { .input_seg = xhci->event_ring->first_seg, 1879 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], 1880 .end_trb = &xhci->event_ring->first_seg->trbs[1], 1881 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16, 1882 .result_seg = NULL, 1883 }, 1884 /* TRB not in this ring, and we have a wrapped TD */ 1885 { .input_seg = xhci->event_ring->first_seg, 1886 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], 1887 .end_trb = &xhci->event_ring->first_seg->trbs[1], 1888 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16, 1889 .result_seg = NULL, 1890 }, 1891 }; 1892 1893 unsigned int num_tests; 1894 int i, ret; 1895 1896 num_tests = ARRAY_SIZE(simple_test_vector); 1897 for (i = 0; i < num_tests; i++) { 1898 ret = xhci_test_trb_in_td(xhci, 1899 xhci->event_ring->first_seg, 1900 xhci->event_ring->first_seg->trbs, 1901 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], 1902 simple_test_vector[i].input_dma, 1903 simple_test_vector[i].result_seg, 1904 "Simple", i); 1905 if (ret < 0) 1906 return ret; 1907 } 1908 1909 num_tests = ARRAY_SIZE(complex_test_vector); 1910 for (i = 0; i < num_tests; i++) { 1911 ret = xhci_test_trb_in_td(xhci, 1912 complex_test_vector[i].input_seg, 1913 complex_test_vector[i].start_trb, 1914 complex_test_vector[i].end_trb, 1915 complex_test_vector[i].input_dma, 1916 complex_test_vector[i].result_seg, 1917 "Complex", i); 1918 if (ret < 0) 1919 return ret; 1920 } 1921 xhci_dbg(xhci, "TRB math tests passed.\n"); 1922 return 0; 1923 } 1924 1925 static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) 1926 { 1927 u64 temp; 1928 dma_addr_t deq; 1929 1930 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, 1931 xhci->event_ring->dequeue); 1932 if (deq == 0 && !in_interrupt()) 1933 xhci_warn(xhci, "WARN something wrong with SW event ring " 1934 "dequeue ptr.\n"); 1935 /* Update HC event ring dequeue pointer */ 1936 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 1937 temp &= ERST_PTR_MASK; 1938 /* Don't clear the EHB bit (which is RW1C) because 1939 * there might be more events to service. 1940 */ 1941 temp &= ~ERST_EHB; 1942 xhci_dbg(xhci, "// Write event ring dequeue pointer, " 1943 "preserving EHB bit\n"); 1944 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, 1945 &xhci->ir_set->erst_dequeue); 1946 } 1947 1948 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, 1949 __le32 __iomem *addr, u8 major_revision) 1950 { 1951 u32 temp, port_offset, port_count; 1952 int i; 1953 1954 if (major_revision > 0x03) { 1955 xhci_warn(xhci, "Ignoring unknown port speed, " 1956 "Ext Cap %p, revision = 0x%x\n", 1957 addr, major_revision); 1958 /* Ignoring port protocol we can't understand. FIXME */ 1959 return; 1960 } 1961 1962 /* Port offset and count in the third dword, see section 7.2 */ 1963 temp = xhci_readl(xhci, addr + 2); 1964 port_offset = XHCI_EXT_PORT_OFF(temp); 1965 port_count = XHCI_EXT_PORT_COUNT(temp); 1966 xhci_dbg(xhci, "Ext Cap %p, port offset = %u, " 1967 "count = %u, revision = 0x%x\n", 1968 addr, port_offset, port_count, major_revision); 1969 /* Port count includes the current port offset */ 1970 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports) 1971 /* WTF? "Valid values are ‘1’ to MaxPorts" */ 1972 return; 1973 1974 /* Check the host's USB2 LPM capability */ 1975 if ((xhci->hci_version == 0x96) && (major_revision != 0x03) && 1976 (temp & XHCI_L1C)) { 1977 xhci_dbg(xhci, "xHCI 0.96: support USB2 software lpm\n"); 1978 xhci->sw_lpm_support = 1; 1979 } 1980 1981 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) { 1982 xhci_dbg(xhci, "xHCI 1.0: support USB2 software lpm\n"); 1983 xhci->sw_lpm_support = 1; 1984 if (temp & XHCI_HLC) { 1985 xhci_dbg(xhci, "xHCI 1.0: support USB2 hardware lpm\n"); 1986 xhci->hw_lpm_support = 1; 1987 } 1988 } 1989 1990 port_offset--; 1991 for (i = port_offset; i < (port_offset + port_count); i++) { 1992 /* Duplicate entry. Ignore the port if the revisions differ. */ 1993 if (xhci->port_array[i] != 0) { 1994 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p," 1995 " port %u\n", addr, i); 1996 xhci_warn(xhci, "Port was marked as USB %u, " 1997 "duplicated as USB %u\n", 1998 xhci->port_array[i], major_revision); 1999 /* Only adjust the roothub port counts if we haven't 2000 * found a similar duplicate. 2001 */ 2002 if (xhci->port_array[i] != major_revision && 2003 xhci->port_array[i] != DUPLICATE_ENTRY) { 2004 if (xhci->port_array[i] == 0x03) 2005 xhci->num_usb3_ports--; 2006 else 2007 xhci->num_usb2_ports--; 2008 xhci->port_array[i] = DUPLICATE_ENTRY; 2009 } 2010 /* FIXME: Should we disable the port? */ 2011 continue; 2012 } 2013 xhci->port_array[i] = major_revision; 2014 if (major_revision == 0x03) 2015 xhci->num_usb3_ports++; 2016 else 2017 xhci->num_usb2_ports++; 2018 } 2019 /* FIXME: Should we disable ports not in the Extended Capabilities? */ 2020 } 2021 2022 /* 2023 * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that 2024 * specify what speeds each port is supposed to be. We can't count on the port 2025 * speed bits in the PORTSC register being correct until a device is connected, 2026 * but we need to set up the two fake roothubs with the correct number of USB 2027 * 3.0 and USB 2.0 ports at host controller initialization time. 2028 */ 2029 static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) 2030 { 2031 __le32 __iomem *addr; 2032 u32 offset; 2033 unsigned int num_ports; 2034 int i, j, port_index; 2035 2036 addr = &xhci->cap_regs->hcc_params; 2037 offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr)); 2038 if (offset == 0) { 2039 xhci_err(xhci, "No Extended Capability registers, " 2040 "unable to set up roothub.\n"); 2041 return -ENODEV; 2042 } 2043 2044 num_ports = HCS_MAX_PORTS(xhci->hcs_params1); 2045 xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags); 2046 if (!xhci->port_array) 2047 return -ENOMEM; 2048 2049 xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags); 2050 if (!xhci->rh_bw) 2051 return -ENOMEM; 2052 for (i = 0; i < num_ports; i++) { 2053 struct xhci_interval_bw_table *bw_table; 2054 2055 INIT_LIST_HEAD(&xhci->rh_bw[i].tts); 2056 bw_table = &xhci->rh_bw[i].bw_table; 2057 for (j = 0; j < XHCI_MAX_INTERVAL; j++) 2058 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints); 2059 } 2060 2061 /* 2062 * For whatever reason, the first capability offset is from the 2063 * capability register base, not from the HCCPARAMS register. 2064 * See section 5.3.6 for offset calculation. 2065 */ 2066 addr = &xhci->cap_regs->hc_capbase + offset; 2067 while (1) { 2068 u32 cap_id; 2069 2070 cap_id = xhci_readl(xhci, addr); 2071 if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL) 2072 xhci_add_in_port(xhci, num_ports, addr, 2073 (u8) XHCI_EXT_PORT_MAJOR(cap_id)); 2074 offset = XHCI_EXT_CAPS_NEXT(cap_id); 2075 if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports) 2076 == num_ports) 2077 break; 2078 /* 2079 * Once you're into the Extended Capabilities, the offset is 2080 * always relative to the register holding the offset. 2081 */ 2082 addr += offset; 2083 } 2084 2085 if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) { 2086 xhci_warn(xhci, "No ports on the roothubs?\n"); 2087 return -ENODEV; 2088 } 2089 xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n", 2090 xhci->num_usb2_ports, xhci->num_usb3_ports); 2091 2092 /* Place limits on the number of roothub ports so that the hub 2093 * descriptors aren't longer than the USB core will allocate. 2094 */ 2095 if (xhci->num_usb3_ports > 15) { 2096 xhci_dbg(xhci, "Limiting USB 3.0 roothub ports to 15.\n"); 2097 xhci->num_usb3_ports = 15; 2098 } 2099 if (xhci->num_usb2_ports > USB_MAXCHILDREN) { 2100 xhci_dbg(xhci, "Limiting USB 2.0 roothub ports to %u.\n", 2101 USB_MAXCHILDREN); 2102 xhci->num_usb2_ports = USB_MAXCHILDREN; 2103 } 2104 2105 /* 2106 * Note we could have all USB 3.0 ports, or all USB 2.0 ports. 2107 * Not sure how the USB core will handle a hub with no ports... 2108 */ 2109 if (xhci->num_usb2_ports) { 2110 xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)* 2111 xhci->num_usb2_ports, flags); 2112 if (!xhci->usb2_ports) 2113 return -ENOMEM; 2114 2115 port_index = 0; 2116 for (i = 0; i < num_ports; i++) { 2117 if (xhci->port_array[i] == 0x03 || 2118 xhci->port_array[i] == 0 || 2119 xhci->port_array[i] == DUPLICATE_ENTRY) 2120 continue; 2121 2122 xhci->usb2_ports[port_index] = 2123 &xhci->op_regs->port_status_base + 2124 NUM_PORT_REGS*i; 2125 xhci_dbg(xhci, "USB 2.0 port at index %u, " 2126 "addr = %p\n", i, 2127 xhci->usb2_ports[port_index]); 2128 port_index++; 2129 if (port_index == xhci->num_usb2_ports) 2130 break; 2131 } 2132 } 2133 if (xhci->num_usb3_ports) { 2134 xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)* 2135 xhci->num_usb3_ports, flags); 2136 if (!xhci->usb3_ports) 2137 return -ENOMEM; 2138 2139 port_index = 0; 2140 for (i = 0; i < num_ports; i++) 2141 if (xhci->port_array[i] == 0x03) { 2142 xhci->usb3_ports[port_index] = 2143 &xhci->op_regs->port_status_base + 2144 NUM_PORT_REGS*i; 2145 xhci_dbg(xhci, "USB 3.0 port at index %u, " 2146 "addr = %p\n", i, 2147 xhci->usb3_ports[port_index]); 2148 port_index++; 2149 if (port_index == xhci->num_usb3_ports) 2150 break; 2151 } 2152 } 2153 return 0; 2154 } 2155 2156 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) 2157 { 2158 dma_addr_t dma; 2159 struct device *dev = xhci_to_hcd(xhci)->self.controller; 2160 unsigned int val, val2; 2161 u64 val_64; 2162 struct xhci_segment *seg; 2163 u32 page_size; 2164 int i; 2165 2166 page_size = xhci_readl(xhci, &xhci->op_regs->page_size); 2167 xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size); 2168 for (i = 0; i < 16; i++) { 2169 if ((0x1 & page_size) != 0) 2170 break; 2171 page_size = page_size >> 1; 2172 } 2173 if (i < 16) 2174 xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024); 2175 else 2176 xhci_warn(xhci, "WARN: no supported page size\n"); 2177 /* Use 4K pages, since that's common and the minimum the HC supports */ 2178 xhci->page_shift = 12; 2179 xhci->page_size = 1 << xhci->page_shift; 2180 xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024); 2181 2182 /* 2183 * Program the Number of Device Slots Enabled field in the CONFIG 2184 * register with the max value of slots the HC can handle. 2185 */ 2186 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1)); 2187 xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n", 2188 (unsigned int) val); 2189 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg); 2190 val |= (val2 & ~HCS_SLOTS_MASK); 2191 xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n", 2192 (unsigned int) val); 2193 xhci_writel(xhci, val, &xhci->op_regs->config_reg); 2194 2195 /* 2196 * Section 5.4.8 - doorbell array must be 2197 * "physically contiguous and 64-byte (cache line) aligned". 2198 */ 2199 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, 2200 GFP_KERNEL); 2201 if (!xhci->dcbaa) 2202 goto fail; 2203 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); 2204 xhci->dcbaa->dma = dma; 2205 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", 2206 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); 2207 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); 2208 2209 /* 2210 * Initialize the ring segment pool. The ring must be a contiguous 2211 * structure comprised of TRBs. The TRBs must be 16 byte aligned, 2212 * however, the command ring segment needs 64-byte aligned segments, 2213 * so we pick the greater alignment need. 2214 */ 2215 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, 2216 SEGMENT_SIZE, 64, xhci->page_size); 2217 2218 /* See Table 46 and Note on Figure 55 */ 2219 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, 2220 2112, 64, xhci->page_size); 2221 if (!xhci->segment_pool || !xhci->device_pool) 2222 goto fail; 2223 2224 /* Linear stream context arrays don't have any boundary restrictions, 2225 * and only need to be 16-byte aligned. 2226 */ 2227 xhci->small_streams_pool = 2228 dma_pool_create("xHCI 256 byte stream ctx arrays", 2229 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0); 2230 xhci->medium_streams_pool = 2231 dma_pool_create("xHCI 1KB stream ctx arrays", 2232 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0); 2233 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE 2234 * will be allocated with dma_alloc_coherent() 2235 */ 2236 2237 if (!xhci->small_streams_pool || !xhci->medium_streams_pool) 2238 goto fail; 2239 2240 /* Set up the command ring to have one segments for now. */ 2241 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags); 2242 if (!xhci->cmd_ring) 2243 goto fail; 2244 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); 2245 xhci_dbg(xhci, "First segment DMA is 0x%llx\n", 2246 (unsigned long long)xhci->cmd_ring->first_seg->dma); 2247 2248 /* Set the address in the Command Ring Control register */ 2249 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 2250 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 2251 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | 2252 xhci->cmd_ring->cycle_state; 2253 xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val); 2254 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); 2255 xhci_dbg_cmd_ptrs(xhci); 2256 2257 val = xhci_readl(xhci, &xhci->cap_regs->db_off); 2258 val &= DBOFF_MASK; 2259 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" 2260 " from cap regs base addr\n", val); 2261 xhci->dba = (void __iomem *) xhci->cap_regs + val; 2262 xhci_dbg_regs(xhci); 2263 xhci_print_run_regs(xhci); 2264 /* Set ir_set to interrupt register set 0 */ 2265 xhci->ir_set = &xhci->run_regs->ir_set[0]; 2266 2267 /* 2268 * Event ring setup: Allocate a normal ring, but also setup 2269 * the event ring segment table (ERST). Section 4.9.3. 2270 */ 2271 xhci_dbg(xhci, "// Allocating event ring\n"); 2272 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, false, 2273 flags); 2274 if (!xhci->event_ring) 2275 goto fail; 2276 if (xhci_check_trb_in_td_math(xhci, flags) < 0) 2277 goto fail; 2278 2279 xhci->erst.entries = dma_alloc_coherent(dev, 2280 sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma, 2281 GFP_KERNEL); 2282 if (!xhci->erst.entries) 2283 goto fail; 2284 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n", 2285 (unsigned long long)dma); 2286 2287 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS); 2288 xhci->erst.num_entries = ERST_NUM_SEGS; 2289 xhci->erst.erst_dma_addr = dma; 2290 xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n", 2291 xhci->erst.num_entries, 2292 xhci->erst.entries, 2293 (unsigned long long)xhci->erst.erst_dma_addr); 2294 2295 /* set ring base address and size for each segment table entry */ 2296 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { 2297 struct xhci_erst_entry *entry = &xhci->erst.entries[val]; 2298 entry->seg_addr = cpu_to_le64(seg->dma); 2299 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); 2300 entry->rsvd = 0; 2301 seg = seg->next; 2302 } 2303 2304 /* set ERST count with the number of entries in the segment table */ 2305 val = xhci_readl(xhci, &xhci->ir_set->erst_size); 2306 val &= ERST_SIZE_MASK; 2307 val |= ERST_NUM_SEGS; 2308 xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n", 2309 val); 2310 xhci_writel(xhci, val, &xhci->ir_set->erst_size); 2311 2312 xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n"); 2313 /* set the segment table base address */ 2314 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", 2315 (unsigned long long)xhci->erst.erst_dma_addr); 2316 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); 2317 val_64 &= ERST_PTR_MASK; 2318 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); 2319 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); 2320 2321 /* Set the event ring dequeue address */ 2322 xhci_set_hc_event_deq(xhci); 2323 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); 2324 xhci_print_ir_set(xhci, 0); 2325 2326 /* 2327 * XXX: Might need to set the Interrupter Moderation Register to 2328 * something other than the default (~1ms minimum between interrupts). 2329 * See section 5.5.1.2. 2330 */ 2331 init_completion(&xhci->addr_dev); 2332 for (i = 0; i < MAX_HC_SLOTS; ++i) 2333 xhci->devs[i] = NULL; 2334 for (i = 0; i < USB_MAXCHILDREN; ++i) { 2335 xhci->bus_state[0].resume_done[i] = 0; 2336 xhci->bus_state[1].resume_done[i] = 0; 2337 } 2338 2339 if (scratchpad_alloc(xhci, flags)) 2340 goto fail; 2341 if (xhci_setup_port_arrays(xhci, flags)) 2342 goto fail; 2343 2344 INIT_LIST_HEAD(&xhci->lpm_failed_devs); 2345 2346 return 0; 2347 2348 fail: 2349 xhci_warn(xhci, "Couldn't initialize memory\n"); 2350 xhci_mem_cleanup(xhci); 2351 return -ENOMEM; 2352 } 2353