1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * xHCI host controller driver 4 * 5 * Copyright (C) 2008 Intel Corp. 6 * 7 * Author: Sarah Sharp 8 * Some code borrowed from the Linux EHCI driver. 9 */ 10 11 #include <linux/usb.h> 12 #include <linux/overflow.h> 13 #include <linux/pci.h> 14 #include <linux/slab.h> 15 #include <linux/dmapool.h> 16 #include <linux/dma-mapping.h> 17 18 #include "xhci.h" 19 #include "xhci-trace.h" 20 #include "xhci-debugfs.h" 21 22 /* 23 * Allocates a generic ring segment from the ring pool, sets the dma address, 24 * initializes the segment to zero, and sets the private next pointer to NULL. 25 * 26 * Section 4.11.1.1: 27 * "All components of all Command and Transfer TRBs shall be initialized to '0'" 28 */ 29 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, 30 unsigned int max_packet, 31 unsigned int num, 32 gfp_t flags) 33 { 34 struct xhci_segment *seg; 35 dma_addr_t dma; 36 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 37 38 seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev)); 39 if (!seg) 40 return NULL; 41 42 seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma); 43 if (!seg->trbs) { 44 kfree(seg); 45 return NULL; 46 } 47 48 if (max_packet) { 49 seg->bounce_buf = kzalloc_node(max_packet, flags, 50 dev_to_node(dev)); 51 if (!seg->bounce_buf) { 52 dma_pool_free(xhci->segment_pool, seg->trbs, dma); 53 kfree(seg); 54 return NULL; 55 } 56 } 57 seg->num = num; 58 seg->dma = dma; 59 seg->next = NULL; 60 61 return seg; 62 } 63 64 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) 65 { 66 if (seg->trbs) { 67 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); 68 seg->trbs = NULL; 69 } 70 kfree(seg->bounce_buf); 71 kfree(seg); 72 } 73 74 static void xhci_ring_segments_free(struct xhci_hcd *xhci, struct xhci_ring *ring) 75 { 76 struct xhci_segment *seg, *next; 77 78 ring->last_seg->next = NULL; 79 seg = ring->first_seg; 80 81 while (seg) { 82 next = seg->next; 83 xhci_segment_free(xhci, seg); 84 seg = next; 85 } 86 } 87 88 /* 89 * Only for transfer and command rings where driver is the producer, not for 90 * event rings. 91 * 92 * Change the last TRB in the segment to be a Link TRB which points to the 93 * DMA address of the next segment. The caller needs to set any Link TRB 94 * related flags, such as End TRB, Toggle Cycle, and no snoop. 95 */ 96 static void xhci_set_link_trb(struct xhci_segment *seg, bool chain_links) 97 { 98 union xhci_trb *trb; 99 u32 val; 100 101 if (!seg || !seg->next) 102 return; 103 104 trb = &seg->trbs[TRBS_PER_SEGMENT - 1]; 105 106 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ 107 val = le32_to_cpu(trb->link.control); 108 val &= ~TRB_TYPE_BITMASK; 109 val |= TRB_TYPE(TRB_LINK); 110 if (chain_links) 111 val |= TRB_CHAIN; 112 trb->link.control = cpu_to_le32(val); 113 trb->link.segment_ptr = cpu_to_le64(seg->next->dma); 114 } 115 116 static void xhci_initialize_ring_segments(struct xhci_hcd *xhci, struct xhci_ring *ring) 117 { 118 struct xhci_segment *seg; 119 bool chain_links; 120 121 if (ring->type == TYPE_EVENT) 122 return; 123 124 chain_links = xhci_link_chain_quirk(xhci, ring->type); 125 xhci_for_each_ring_seg(ring->first_seg, seg) 126 xhci_set_link_trb(seg, chain_links); 127 128 /* See section 4.9.2.1 and 6.4.4.1 */ 129 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= cpu_to_le32(LINK_TOGGLE); 130 } 131 132 /* 133 * Link the src ring segments to the dst ring. 134 * Set Toggle Cycle for the new ring if needed. 135 */ 136 static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *src, struct xhci_ring *dst) 137 { 138 struct xhci_segment *seg; 139 bool chain_links; 140 141 if (!src || !dst) 142 return; 143 144 /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ 145 if (dst->cycle_state == 0) { 146 xhci_for_each_ring_seg(src->first_seg, seg) { 147 for (int i = 0; i < TRBS_PER_SEGMENT; i++) 148 seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE); 149 } 150 } 151 152 src->last_seg->next = dst->enq_seg->next; 153 dst->enq_seg->next = src->first_seg; 154 if (dst->type != TYPE_EVENT) { 155 chain_links = xhci_link_chain_quirk(xhci, dst->type); 156 xhci_set_link_trb(dst->enq_seg, chain_links); 157 xhci_set_link_trb(src->last_seg, chain_links); 158 } 159 dst->num_segs += src->num_segs; 160 161 if (dst->enq_seg == dst->last_seg) { 162 if (dst->type != TYPE_EVENT) 163 dst->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control 164 &= ~cpu_to_le32(LINK_TOGGLE); 165 166 dst->last_seg = src->last_seg; 167 } else if (dst->type != TYPE_EVENT) { 168 src->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control &= ~cpu_to_le32(LINK_TOGGLE); 169 } 170 171 for (seg = dst->enq_seg; seg != dst->last_seg; seg = seg->next) 172 seg->next->num = seg->num + 1; 173 } 174 175 /* 176 * We need a radix tree for mapping physical addresses of TRBs to which stream 177 * ID they belong to. We need to do this because the host controller won't tell 178 * us which stream ring the TRB came from. We could store the stream ID in an 179 * event data TRB, but that doesn't help us for the cancellation case, since the 180 * endpoint may stop before it reaches that event data TRB. 181 * 182 * The radix tree maps the upper portion of the TRB DMA address to a ring 183 * segment that has the same upper portion of DMA addresses. For example, say I 184 * have segments of size 1KB, that are always 1KB aligned. A segment may 185 * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the 186 * key to the stream ID is 0x43244. I can use the DMA address of the TRB to 187 * pass the radix tree a key to get the right stream ID: 188 * 189 * 0x10c90fff >> 10 = 0x43243 190 * 0x10c912c0 >> 10 = 0x43244 191 * 0x10c91400 >> 10 = 0x43245 192 * 193 * Obviously, only those TRBs with DMA addresses that are within the segment 194 * will make the radix tree return the stream ID for that ring. 195 * 196 * Caveats for the radix tree: 197 * 198 * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an 199 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be 200 * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the 201 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit 202 * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit 203 * extended systems (where the DMA address can be bigger than 32-bits), 204 * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that. 205 */ 206 static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map, 207 struct xhci_ring *ring, 208 struct xhci_segment *seg, 209 gfp_t mem_flags) 210 { 211 unsigned long key; 212 int ret; 213 214 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT); 215 /* Skip any segments that were already added. */ 216 if (radix_tree_lookup(trb_address_map, key)) 217 return 0; 218 219 ret = radix_tree_maybe_preload(mem_flags); 220 if (ret) 221 return ret; 222 ret = radix_tree_insert(trb_address_map, 223 key, ring); 224 radix_tree_preload_end(); 225 return ret; 226 } 227 228 static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map, 229 struct xhci_segment *seg) 230 { 231 unsigned long key; 232 233 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT); 234 if (radix_tree_lookup(trb_address_map, key)) 235 radix_tree_delete(trb_address_map, key); 236 } 237 238 static int xhci_update_stream_segment_mapping( 239 struct radix_tree_root *trb_address_map, 240 struct xhci_ring *ring, 241 struct xhci_segment *first_seg, 242 gfp_t mem_flags) 243 { 244 struct xhci_segment *seg; 245 struct xhci_segment *failed_seg; 246 int ret; 247 248 if (WARN_ON_ONCE(trb_address_map == NULL)) 249 return 0; 250 251 xhci_for_each_ring_seg(first_seg, seg) { 252 ret = xhci_insert_segment_mapping(trb_address_map, 253 ring, seg, mem_flags); 254 if (ret) 255 goto remove_streams; 256 } 257 258 return 0; 259 260 remove_streams: 261 failed_seg = seg; 262 xhci_for_each_ring_seg(first_seg, seg) { 263 xhci_remove_segment_mapping(trb_address_map, seg); 264 if (seg == failed_seg) 265 return ret; 266 } 267 268 return ret; 269 } 270 271 static void xhci_remove_stream_mapping(struct xhci_ring *ring) 272 { 273 struct xhci_segment *seg; 274 275 if (WARN_ON_ONCE(ring->trb_address_map == NULL)) 276 return; 277 278 xhci_for_each_ring_seg(ring->first_seg, seg) 279 xhci_remove_segment_mapping(ring->trb_address_map, seg); 280 } 281 282 static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags) 283 { 284 return xhci_update_stream_segment_mapping(ring->trb_address_map, ring, 285 ring->first_seg, mem_flags); 286 } 287 288 /* XXX: Do we need the hcd structure in all these functions? */ 289 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) 290 { 291 if (!ring) 292 return; 293 294 trace_xhci_ring_free(ring); 295 296 if (ring->first_seg) { 297 if (ring->type == TYPE_STREAM) 298 xhci_remove_stream_mapping(ring); 299 xhci_ring_segments_free(xhci, ring); 300 } 301 302 kfree(ring); 303 } 304 305 void xhci_initialize_ring_info(struct xhci_ring *ring) 306 { 307 /* The ring is empty, so the enqueue pointer == dequeue pointer */ 308 ring->enqueue = ring->first_seg->trbs; 309 ring->enq_seg = ring->first_seg; 310 ring->dequeue = ring->enqueue; 311 ring->deq_seg = ring->first_seg; 312 /* The ring is initialized to 0. The producer must write 1 to the cycle 313 * bit to handover ownership of the TRB, so PCS = 1. The consumer must 314 * compare CCS to the cycle bit to check ownership, so CCS = 1. 315 * 316 * New rings are initialized with cycle state equal to 1; if we are 317 * handling ring expansion, set the cycle state equal to the old ring. 318 */ 319 ring->cycle_state = 1; 320 321 /* 322 * Each segment has a link TRB, and leave an extra TRB for SW 323 * accounting purpose 324 */ 325 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; 326 } 327 EXPORT_SYMBOL_GPL(xhci_initialize_ring_info); 328 329 /* Allocate segments and link them for a ring */ 330 static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, gfp_t flags) 331 { 332 struct xhci_segment *prev; 333 unsigned int num = 0; 334 335 prev = xhci_segment_alloc(xhci, ring->bounce_buf_len, num, flags); 336 if (!prev) 337 return -ENOMEM; 338 num++; 339 340 ring->first_seg = prev; 341 while (num < ring->num_segs) { 342 struct xhci_segment *next; 343 344 next = xhci_segment_alloc(xhci, ring->bounce_buf_len, num, flags); 345 if (!next) 346 goto free_segments; 347 348 prev->next = next; 349 prev = next; 350 num++; 351 } 352 ring->last_seg = prev; 353 354 ring->last_seg->next = ring->first_seg; 355 return 0; 356 357 free_segments: 358 ring->last_seg = prev; 359 xhci_ring_segments_free(xhci, ring); 360 return -ENOMEM; 361 } 362 363 /* 364 * Create a new ring with zero or more segments. 365 * 366 * Link each segment together into a ring. 367 * Set the end flag and the cycle toggle bit on the last segment. 368 * See section 4.9.1 and figures 15 and 16. 369 */ 370 struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs, 371 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) 372 { 373 struct xhci_ring *ring; 374 int ret; 375 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 376 377 ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev)); 378 if (!ring) 379 return NULL; 380 381 ring->num_segs = num_segs; 382 ring->bounce_buf_len = max_packet; 383 INIT_LIST_HEAD(&ring->td_list); 384 ring->type = type; 385 if (num_segs == 0) 386 return ring; 387 388 ret = xhci_alloc_segments_for_ring(xhci, ring, flags); 389 if (ret) 390 goto fail; 391 392 xhci_initialize_ring_segments(xhci, ring); 393 xhci_initialize_ring_info(ring); 394 trace_xhci_ring_alloc(ring); 395 return ring; 396 397 fail: 398 kfree(ring); 399 return NULL; 400 } 401 402 void xhci_free_endpoint_ring(struct xhci_hcd *xhci, 403 struct xhci_virt_device *virt_dev, 404 unsigned int ep_index) 405 { 406 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring); 407 virt_dev->eps[ep_index].ring = NULL; 408 } 409 410 /* 411 * Expand an existing ring. 412 * Allocate a new ring which has same segment numbers and link the two rings. 413 */ 414 int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, 415 unsigned int num_new_segs, gfp_t flags) 416 { 417 struct xhci_ring new_ring; 418 int ret; 419 420 if (num_new_segs == 0) 421 return 0; 422 423 new_ring.num_segs = num_new_segs; 424 new_ring.bounce_buf_len = ring->bounce_buf_len; 425 new_ring.type = ring->type; 426 ret = xhci_alloc_segments_for_ring(xhci, &new_ring, flags); 427 if (ret) 428 return -ENOMEM; 429 430 xhci_initialize_ring_segments(xhci, &new_ring); 431 432 if (ring->type == TYPE_STREAM) { 433 ret = xhci_update_stream_segment_mapping(ring->trb_address_map, ring, 434 new_ring.first_seg, flags); 435 if (ret) 436 goto free_segments; 437 } 438 439 xhci_link_rings(xhci, ring, &new_ring); 440 trace_xhci_ring_expansion(ring); 441 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion, 442 "ring expansion succeed, now has %d segments", 443 ring->num_segs); 444 445 return 0; 446 447 free_segments: 448 xhci_ring_segments_free(xhci, &new_ring); 449 return ret; 450 } 451 452 struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, 453 int type, gfp_t flags) 454 { 455 struct xhci_container_ctx *ctx; 456 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 457 458 if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)) 459 return NULL; 460 461 ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev)); 462 if (!ctx) 463 return NULL; 464 465 ctx->type = type; 466 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024; 467 if (type == XHCI_CTX_TYPE_INPUT) 468 ctx->size += CTX_SIZE(xhci->hcc_params); 469 470 ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma); 471 if (!ctx->bytes) { 472 kfree(ctx); 473 return NULL; 474 } 475 return ctx; 476 } 477 478 void xhci_free_container_ctx(struct xhci_hcd *xhci, 479 struct xhci_container_ctx *ctx) 480 { 481 if (!ctx) 482 return; 483 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); 484 kfree(ctx); 485 } 486 487 struct xhci_input_control_ctx *xhci_get_input_control_ctx( 488 struct xhci_container_ctx *ctx) 489 { 490 if (ctx->type != XHCI_CTX_TYPE_INPUT) 491 return NULL; 492 493 return (struct xhci_input_control_ctx *)ctx->bytes; 494 } 495 496 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, 497 struct xhci_container_ctx *ctx) 498 { 499 if (ctx->type == XHCI_CTX_TYPE_DEVICE) 500 return (struct xhci_slot_ctx *)ctx->bytes; 501 502 return (struct xhci_slot_ctx *) 503 (ctx->bytes + CTX_SIZE(xhci->hcc_params)); 504 } 505 506 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, 507 struct xhci_container_ctx *ctx, 508 unsigned int ep_index) 509 { 510 /* increment ep index by offset of start of ep ctx array */ 511 ep_index++; 512 if (ctx->type == XHCI_CTX_TYPE_INPUT) 513 ep_index++; 514 515 return (struct xhci_ep_ctx *) 516 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); 517 } 518 EXPORT_SYMBOL_GPL(xhci_get_ep_ctx); 519 520 /***************** Streams structures manipulation *************************/ 521 522 static void xhci_free_stream_ctx(struct xhci_hcd *xhci, 523 unsigned int num_stream_ctxs, 524 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) 525 { 526 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 527 size_t size = array_size(sizeof(struct xhci_stream_ctx), num_stream_ctxs); 528 529 if (size > MEDIUM_STREAM_ARRAY_SIZE) 530 dma_free_coherent(dev, size, stream_ctx, dma); 531 else if (size > SMALL_STREAM_ARRAY_SIZE) 532 dma_pool_free(xhci->medium_streams_pool, stream_ctx, dma); 533 else 534 dma_pool_free(xhci->small_streams_pool, stream_ctx, dma); 535 } 536 537 /* 538 * The stream context array for each endpoint with bulk streams enabled can 539 * vary in size, based on: 540 * - how many streams the endpoint supports, 541 * - the maximum primary stream array size the host controller supports, 542 * - and how many streams the device driver asks for. 543 * 544 * The stream context array must be a power of 2, and can be as small as 545 * 64 bytes or as large as 1MB. 546 */ 547 static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, 548 unsigned int num_stream_ctxs, dma_addr_t *dma, 549 gfp_t mem_flags) 550 { 551 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 552 size_t size = array_size(sizeof(struct xhci_stream_ctx), num_stream_ctxs); 553 554 if (size > MEDIUM_STREAM_ARRAY_SIZE) 555 return dma_alloc_coherent(dev, size, dma, mem_flags); 556 if (size > SMALL_STREAM_ARRAY_SIZE) 557 return dma_pool_zalloc(xhci->medium_streams_pool, mem_flags, dma); 558 else 559 return dma_pool_zalloc(xhci->small_streams_pool, mem_flags, dma); 560 } 561 562 struct xhci_ring *xhci_dma_to_transfer_ring( 563 struct xhci_virt_ep *ep, 564 u64 address) 565 { 566 if (ep->ep_state & EP_HAS_STREAMS) 567 return radix_tree_lookup(&ep->stream_info->trb_address_map, 568 address >> TRB_SEGMENT_SHIFT); 569 return ep->ring; 570 } 571 572 /* 573 * Change an endpoint's internal structure so it supports stream IDs. The 574 * number of requested streams includes stream 0, which cannot be used by device 575 * drivers. 576 * 577 * The number of stream contexts in the stream context array may be bigger than 578 * the number of streams the driver wants to use. This is because the number of 579 * stream context array entries must be a power of two. 580 */ 581 struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, 582 unsigned int num_stream_ctxs, 583 unsigned int num_streams, 584 unsigned int max_packet, gfp_t mem_flags) 585 { 586 struct xhci_stream_info *stream_info; 587 u32 cur_stream; 588 struct xhci_ring *cur_ring; 589 u64 addr; 590 int ret; 591 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 592 593 xhci_dbg(xhci, "Allocating %u streams and %u stream context array entries.\n", 594 num_streams, num_stream_ctxs); 595 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) { 596 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n"); 597 return NULL; 598 } 599 xhci->cmd_ring_reserved_trbs++; 600 601 stream_info = kzalloc_node(sizeof(*stream_info), mem_flags, 602 dev_to_node(dev)); 603 if (!stream_info) 604 goto cleanup_trbs; 605 606 stream_info->num_streams = num_streams; 607 stream_info->num_stream_ctxs = num_stream_ctxs; 608 609 /* Initialize the array of virtual pointers to stream rings. */ 610 stream_info->stream_rings = kcalloc_node( 611 num_streams, sizeof(struct xhci_ring *), mem_flags, 612 dev_to_node(dev)); 613 if (!stream_info->stream_rings) 614 goto cleanup_info; 615 616 /* Initialize the array of DMA addresses for stream rings for the HW. */ 617 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci, 618 num_stream_ctxs, &stream_info->ctx_array_dma, 619 mem_flags); 620 if (!stream_info->stream_ctx_array) 621 goto cleanup_ring_array; 622 623 /* Allocate everything needed to free the stream rings later */ 624 stream_info->free_streams_command = 625 xhci_alloc_command_with_ctx(xhci, true, mem_flags); 626 if (!stream_info->free_streams_command) 627 goto cleanup_ctx; 628 629 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC); 630 631 /* Allocate rings for all the streams that the driver will use, 632 * and add their segment DMA addresses to the radix tree. 633 * Stream 0 is reserved. 634 */ 635 636 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { 637 stream_info->stream_rings[cur_stream] = 638 xhci_ring_alloc(xhci, 2, TYPE_STREAM, max_packet, mem_flags); 639 cur_ring = stream_info->stream_rings[cur_stream]; 640 if (!cur_ring) 641 goto cleanup_rings; 642 cur_ring->stream_id = cur_stream; 643 cur_ring->trb_address_map = &stream_info->trb_address_map; 644 /* Set deq ptr, cycle bit, and stream context type */ 645 addr = cur_ring->first_seg->dma | 646 SCT_FOR_CTX(SCT_PRI_TR) | 647 cur_ring->cycle_state; 648 stream_info->stream_ctx_array[cur_stream].stream_ring = 649 cpu_to_le64(addr); 650 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", cur_stream, addr); 651 652 ret = xhci_update_stream_mapping(cur_ring, mem_flags); 653 654 trace_xhci_alloc_stream_info_ctx(stream_info, cur_stream); 655 if (ret) { 656 xhci_ring_free(xhci, cur_ring); 657 stream_info->stream_rings[cur_stream] = NULL; 658 goto cleanup_rings; 659 } 660 } 661 /* Leave the other unused stream ring pointers in the stream context 662 * array initialized to zero. This will cause the xHC to give us an 663 * error if the device asks for a stream ID we don't have setup (if it 664 * was any other way, the host controller would assume the ring is 665 * "empty" and wait forever for data to be queued to that stream ID). 666 */ 667 668 return stream_info; 669 670 cleanup_rings: 671 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { 672 cur_ring = stream_info->stream_rings[cur_stream]; 673 if (cur_ring) { 674 xhci_ring_free(xhci, cur_ring); 675 stream_info->stream_rings[cur_stream] = NULL; 676 } 677 } 678 xhci_free_command(xhci, stream_info->free_streams_command); 679 cleanup_ctx: 680 xhci_free_stream_ctx(xhci, 681 stream_info->num_stream_ctxs, 682 stream_info->stream_ctx_array, 683 stream_info->ctx_array_dma); 684 cleanup_ring_array: 685 kfree(stream_info->stream_rings); 686 cleanup_info: 687 kfree(stream_info); 688 cleanup_trbs: 689 xhci->cmd_ring_reserved_trbs--; 690 return NULL; 691 } 692 /* 693 * Sets the MaxPStreams field and the Linear Stream Array field. 694 * Sets the dequeue pointer to the stream context array. 695 */ 696 void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, 697 struct xhci_ep_ctx *ep_ctx, 698 struct xhci_stream_info *stream_info) 699 { 700 u32 max_primary_streams; 701 /* MaxPStreams is the number of stream context array entries, not the 702 * number we're actually using. Must be in 2^(MaxPstreams + 1) format. 703 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc. 704 */ 705 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; 706 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 707 "Setting number of stream ctx array entries to %u", 708 1 << (max_primary_streams + 1)); 709 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK); 710 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams) 711 | EP_HAS_LSA); 712 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma); 713 } 714 715 /* 716 * Sets the MaxPStreams field and the Linear Stream Array field to 0. 717 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark, 718 * not at the beginning of the ring). 719 */ 720 void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx, 721 struct xhci_virt_ep *ep) 722 { 723 dma_addr_t addr; 724 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA)); 725 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue); 726 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state); 727 } 728 729 /* Frees all stream contexts associated with the endpoint, 730 * 731 * Caller should fix the endpoint context streams fields. 732 */ 733 void xhci_free_stream_info(struct xhci_hcd *xhci, 734 struct xhci_stream_info *stream_info) 735 { 736 int cur_stream; 737 struct xhci_ring *cur_ring; 738 739 if (!stream_info) 740 return; 741 742 for (cur_stream = 1; cur_stream < stream_info->num_streams; 743 cur_stream++) { 744 cur_ring = stream_info->stream_rings[cur_stream]; 745 if (cur_ring) { 746 xhci_ring_free(xhci, cur_ring); 747 stream_info->stream_rings[cur_stream] = NULL; 748 } 749 } 750 xhci_free_command(xhci, stream_info->free_streams_command); 751 xhci->cmd_ring_reserved_trbs--; 752 if (stream_info->stream_ctx_array) 753 xhci_free_stream_ctx(xhci, 754 stream_info->num_stream_ctxs, 755 stream_info->stream_ctx_array, 756 stream_info->ctx_array_dma); 757 758 kfree(stream_info->stream_rings); 759 kfree(stream_info); 760 } 761 762 763 /***************** Device context manipulation *************************/ 764 765 static void xhci_free_tt_info(struct xhci_hcd *xhci, 766 struct xhci_virt_device *virt_dev, 767 int slot_id) 768 { 769 struct list_head *tt_list_head; 770 struct xhci_tt_bw_info *tt_info, *next; 771 bool slot_found = false; 772 773 /* If the device never made it past the Set Address stage, 774 * it may not have the root hub port pointer set correctly. 775 */ 776 if (!virt_dev->rhub_port) { 777 xhci_dbg(xhci, "Bad rhub port.\n"); 778 return; 779 } 780 781 tt_list_head = &(xhci->rh_bw[virt_dev->rhub_port->hw_portnum].tts); 782 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { 783 /* Multi-TT hubs will have more than one entry */ 784 if (tt_info->slot_id == slot_id) { 785 slot_found = true; 786 list_del(&tt_info->tt_list); 787 kfree(tt_info); 788 } else if (slot_found) { 789 break; 790 } 791 } 792 } 793 794 int xhci_alloc_tt_info(struct xhci_hcd *xhci, 795 struct xhci_virt_device *virt_dev, 796 struct usb_device *hdev, 797 struct usb_tt *tt, gfp_t mem_flags) 798 { 799 struct xhci_tt_bw_info *tt_info; 800 unsigned int num_ports; 801 int i, j; 802 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 803 804 if (!tt->multi) 805 num_ports = 1; 806 else 807 num_ports = hdev->maxchild; 808 809 for (i = 0; i < num_ports; i++, tt_info++) { 810 struct xhci_interval_bw_table *bw_table; 811 812 tt_info = kzalloc_node(sizeof(*tt_info), mem_flags, 813 dev_to_node(dev)); 814 if (!tt_info) 815 goto free_tts; 816 INIT_LIST_HEAD(&tt_info->tt_list); 817 list_add(&tt_info->tt_list, 818 &xhci->rh_bw[virt_dev->rhub_port->hw_portnum].tts); 819 tt_info->slot_id = virt_dev->udev->slot_id; 820 if (tt->multi) 821 tt_info->ttport = i+1; 822 bw_table = &tt_info->bw_table; 823 for (j = 0; j < XHCI_MAX_INTERVAL; j++) 824 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints); 825 } 826 return 0; 827 828 free_tts: 829 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id); 830 return -ENOMEM; 831 } 832 833 834 /* All the xhci_tds in the ring's TD list should be freed at this point. 835 * Should be called with xhci->lock held if there is any chance the TT lists 836 * will be manipulated by the configure endpoint, allocate device, or update 837 * hub functions while this function is removing the TT entries from the list. 838 */ 839 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) 840 { 841 struct xhci_virt_device *dev; 842 int i; 843 int old_active_eps = 0; 844 845 /* Slot ID 0 is reserved */ 846 if (slot_id == 0 || !xhci->devs[slot_id]) 847 return; 848 849 dev = xhci->devs[slot_id]; 850 851 xhci->dcbaa->dev_context_ptrs[slot_id] = 0; 852 if (!dev) 853 return; 854 855 trace_xhci_free_virt_device(dev); 856 857 if (dev->tt_info) 858 old_active_eps = dev->tt_info->active_eps; 859 860 for (i = 0; i < 31; i++) { 861 if (dev->eps[i].ring) 862 xhci_ring_free(xhci, dev->eps[i].ring); 863 if (dev->eps[i].stream_info) 864 xhci_free_stream_info(xhci, 865 dev->eps[i].stream_info); 866 /* 867 * Endpoints are normally deleted from the bandwidth list when 868 * endpoints are dropped, before device is freed. 869 * If host is dying or being removed then endpoints aren't 870 * dropped cleanly, so delete the endpoint from list here. 871 * Only applicable for hosts with software bandwidth checking. 872 */ 873 874 if (!list_empty(&dev->eps[i].bw_endpoint_list)) { 875 list_del_init(&dev->eps[i].bw_endpoint_list); 876 xhci_dbg(xhci, "Slot %u endpoint %u not removed from BW list!\n", 877 slot_id, i); 878 } 879 } 880 /* If this is a hub, free the TT(s) from the TT list */ 881 xhci_free_tt_info(xhci, dev, slot_id); 882 /* If necessary, update the number of active TTs on this root port */ 883 xhci_update_tt_active_eps(xhci, dev, old_active_eps); 884 885 if (dev->in_ctx) 886 xhci_free_container_ctx(xhci, dev->in_ctx); 887 if (dev->out_ctx) 888 xhci_free_container_ctx(xhci, dev->out_ctx); 889 890 if (dev->udev && dev->udev->slot_id) 891 dev->udev->slot_id = 0; 892 if (dev->rhub_port && dev->rhub_port->slot_id == slot_id) 893 dev->rhub_port->slot_id = 0; 894 kfree(xhci->devs[slot_id]); 895 xhci->devs[slot_id] = NULL; 896 } 897 898 /* 899 * Free a virt_device structure. 900 * If the virt_device added a tt_info (a hub) and has children pointing to 901 * that tt_info, then free the child first. Recursive. 902 * We can't rely on udev at this point to find child-parent relationships. 903 */ 904 static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id) 905 { 906 struct xhci_virt_device *vdev; 907 struct list_head *tt_list_head; 908 struct xhci_tt_bw_info *tt_info, *next; 909 int i; 910 911 vdev = xhci->devs[slot_id]; 912 if (!vdev) 913 return; 914 915 if (!vdev->rhub_port) { 916 xhci_dbg(xhci, "Bad rhub port.\n"); 917 goto out; 918 } 919 920 tt_list_head = &(xhci->rh_bw[vdev->rhub_port->hw_portnum].tts); 921 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { 922 /* is this a hub device that added a tt_info to the tts list */ 923 if (tt_info->slot_id == slot_id) { 924 /* are any devices using this tt_info? */ 925 for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) { 926 vdev = xhci->devs[i]; 927 if (vdev && (vdev->tt_info == tt_info)) 928 xhci_free_virt_devices_depth_first( 929 xhci, i); 930 } 931 } 932 } 933 out: 934 /* we are now at a leaf device */ 935 xhci_debugfs_remove_slot(xhci, slot_id); 936 xhci_free_virt_device(xhci, slot_id); 937 } 938 939 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, 940 struct usb_device *udev, gfp_t flags) 941 { 942 struct xhci_virt_device *dev; 943 int i; 944 945 /* Slot ID 0 is reserved */ 946 if (slot_id == 0 || xhci->devs[slot_id]) { 947 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id); 948 return 0; 949 } 950 951 dev = kzalloc(sizeof(*dev), flags); 952 if (!dev) 953 return 0; 954 955 dev->slot_id = slot_id; 956 957 /* Allocate the (output) device context that will be used in the HC. */ 958 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); 959 if (!dev->out_ctx) 960 goto fail; 961 962 xhci_dbg(xhci, "Slot %d output ctx = 0x%pad (dma)\n", slot_id, &dev->out_ctx->dma); 963 964 /* Allocate the (input) device context for address device command */ 965 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags); 966 if (!dev->in_ctx) 967 goto fail; 968 969 xhci_dbg(xhci, "Slot %d input ctx = 0x%pad (dma)\n", slot_id, &dev->in_ctx->dma); 970 971 /* Initialize the cancellation and bandwidth list for each ep */ 972 for (i = 0; i < 31; i++) { 973 dev->eps[i].ep_index = i; 974 dev->eps[i].vdev = dev; 975 dev->eps[i].xhci = xhci; 976 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); 977 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list); 978 } 979 980 /* Allocate endpoint 0 ring */ 981 dev->eps[0].ring = xhci_ring_alloc(xhci, 2, TYPE_CTRL, 0, flags); 982 if (!dev->eps[0].ring) 983 goto fail; 984 985 dev->udev = udev; 986 987 /* Point to output device context in dcbaa. */ 988 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma); 989 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", 990 slot_id, 991 &xhci->dcbaa->dev_context_ptrs[slot_id], 992 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id])); 993 994 trace_xhci_alloc_virt_device(dev); 995 996 xhci->devs[slot_id] = dev; 997 998 return 1; 999 fail: 1000 1001 if (dev->in_ctx) 1002 xhci_free_container_ctx(xhci, dev->in_ctx); 1003 if (dev->out_ctx) 1004 xhci_free_container_ctx(xhci, dev->out_ctx); 1005 kfree(dev); 1006 1007 return 0; 1008 } 1009 1010 void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, 1011 struct usb_device *udev) 1012 { 1013 struct xhci_virt_device *virt_dev; 1014 struct xhci_ep_ctx *ep0_ctx; 1015 struct xhci_ring *ep_ring; 1016 1017 virt_dev = xhci->devs[udev->slot_id]; 1018 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0); 1019 ep_ring = virt_dev->eps[0].ring; 1020 /* 1021 * FIXME we don't keep track of the dequeue pointer very well after a 1022 * Set TR dequeue pointer, so we're setting the dequeue pointer of the 1023 * host to our enqueue pointer. This should only be called after a 1024 * configured device has reset, so all control transfers should have 1025 * been completed or cancelled before the reset. 1026 */ 1027 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg, 1028 ep_ring->enqueue) 1029 | ep_ring->cycle_state); 1030 } 1031 1032 /* 1033 * The xHCI roothub may have ports of differing speeds in any order in the port 1034 * status registers. 1035 * 1036 * The xHCI hardware wants to know the roothub port that the USB device 1037 * is attached to (or the roothub port its ancestor hub is attached to). All we 1038 * know is the index of that port under either the USB 2.0 or the USB 3.0 1039 * roothub, but that doesn't give us the real index into the HW port status 1040 * registers. 1041 */ 1042 static struct xhci_port *xhci_find_rhub_port(struct xhci_hcd *xhci, struct usb_device *udev) 1043 { 1044 struct usb_device *top_dev; 1045 struct xhci_hub *rhub; 1046 struct usb_hcd *hcd; 1047 1048 if (udev->speed >= USB_SPEED_SUPER) 1049 hcd = xhci_get_usb3_hcd(xhci); 1050 else 1051 hcd = xhci->main_hcd; 1052 1053 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 1054 top_dev = top_dev->parent) 1055 /* Found device below root hub */; 1056 1057 rhub = xhci_get_rhub(hcd); 1058 return rhub->ports[top_dev->portnum - 1]; 1059 } 1060 1061 /* Setup an xHCI virtual device for a Set Address command */ 1062 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev) 1063 { 1064 struct xhci_virt_device *dev; 1065 struct xhci_ep_ctx *ep0_ctx; 1066 struct xhci_slot_ctx *slot_ctx; 1067 u32 max_packets; 1068 1069 dev = xhci->devs[udev->slot_id]; 1070 /* Slot ID 0 is reserved */ 1071 if (udev->slot_id == 0 || !dev) { 1072 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n", 1073 udev->slot_id); 1074 return -EINVAL; 1075 } 1076 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); 1077 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); 1078 1079 /* 3) Only the control endpoint is valid - one endpoint context */ 1080 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route); 1081 switch (udev->speed) { 1082 case USB_SPEED_SUPER_PLUS: 1083 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP); 1084 max_packets = MAX_PACKET(512); 1085 break; 1086 case USB_SPEED_SUPER: 1087 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS); 1088 max_packets = MAX_PACKET(512); 1089 break; 1090 case USB_SPEED_HIGH: 1091 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS); 1092 max_packets = MAX_PACKET(64); 1093 break; 1094 /* USB core guesses at a 64-byte max packet first for FS devices */ 1095 case USB_SPEED_FULL: 1096 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS); 1097 max_packets = MAX_PACKET(64); 1098 break; 1099 case USB_SPEED_LOW: 1100 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS); 1101 max_packets = MAX_PACKET(8); 1102 break; 1103 default: 1104 /* Speed was set earlier, this shouldn't happen. */ 1105 return -EINVAL; 1106 } 1107 /* Find the root hub port this device is under */ 1108 dev->rhub_port = xhci_find_rhub_port(xhci, udev); 1109 if (!dev->rhub_port) 1110 return -EINVAL; 1111 /* Slot ID is set to the device directly below the root hub */ 1112 if (!udev->parent->parent) 1113 dev->rhub_port->slot_id = udev->slot_id; 1114 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(dev->rhub_port->hw_portnum + 1)); 1115 xhci_dbg(xhci, "Slot ID %d: HW portnum %d, hcd portnum %d\n", 1116 udev->slot_id, dev->rhub_port->hw_portnum, dev->rhub_port->hcd_portnum); 1117 1118 /* Find the right bandwidth table that this device will be a part of. 1119 * If this is a full speed device attached directly to a root port (or a 1120 * decendent of one), it counts as a primary bandwidth domain, not a 1121 * secondary bandwidth domain under a TT. An xhci_tt_info structure 1122 * will never be created for the HS root hub. 1123 */ 1124 if (!udev->tt || !udev->tt->hub->parent) { 1125 dev->bw_table = &xhci->rh_bw[dev->rhub_port->hw_portnum].bw_table; 1126 } else { 1127 struct xhci_root_port_bw_info *rh_bw; 1128 struct xhci_tt_bw_info *tt_bw; 1129 1130 rh_bw = &xhci->rh_bw[dev->rhub_port->hw_portnum]; 1131 /* Find the right TT. */ 1132 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) { 1133 if (tt_bw->slot_id != udev->tt->hub->slot_id) 1134 continue; 1135 1136 if (!dev->udev->tt->multi || 1137 (udev->tt->multi && 1138 tt_bw->ttport == dev->udev->ttport)) { 1139 dev->bw_table = &tt_bw->bw_table; 1140 dev->tt_info = tt_bw; 1141 break; 1142 } 1143 } 1144 if (!dev->tt_info) 1145 xhci_warn(xhci, "WARN: Didn't find a matching TT\n"); 1146 } 1147 1148 /* Is this a LS/FS device under an external HS hub? */ 1149 if (udev->tt && udev->tt->hub->parent) { 1150 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id | 1151 (udev->ttport << 8)); 1152 if (udev->tt->multi) 1153 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); 1154 } 1155 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); 1156 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); 1157 1158 /* Step 4 - ring already allocated */ 1159 /* Step 5 */ 1160 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP)); 1161 1162 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ 1163 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) | 1164 max_packets); 1165 1166 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma | 1167 dev->eps[0].ring->cycle_state); 1168 1169 trace_xhci_setup_addressable_virt_device(dev); 1170 1171 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ 1172 1173 return 0; 1174 } 1175 1176 /* 1177 * Convert interval expressed as 2^(bInterval - 1) == interval into 1178 * straight exponent value 2^n == interval. 1179 * 1180 */ 1181 static unsigned int xhci_parse_exponent_interval(struct usb_device *udev, 1182 struct usb_host_endpoint *ep) 1183 { 1184 unsigned int interval; 1185 1186 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1; 1187 if (interval != ep->desc.bInterval - 1) 1188 dev_warn(&udev->dev, 1189 "ep %#x - rounding interval to %d %sframes\n", 1190 ep->desc.bEndpointAddress, 1191 1 << interval, 1192 udev->speed == USB_SPEED_FULL ? "" : "micro"); 1193 1194 if (udev->speed == USB_SPEED_FULL) { 1195 /* 1196 * Full speed isoc endpoints specify interval in frames, 1197 * not microframes. We are using microframes everywhere, 1198 * so adjust accordingly. 1199 */ 1200 interval += 3; /* 1 frame = 2^3 uframes */ 1201 } 1202 1203 return interval; 1204 } 1205 1206 /* 1207 * Convert bInterval expressed in microframes (in 1-255 range) to exponent of 1208 * microframes, rounded down to nearest power of 2. 1209 */ 1210 static unsigned int xhci_microframes_to_exponent(struct usb_device *udev, 1211 struct usb_host_endpoint *ep, unsigned int desc_interval, 1212 unsigned int min_exponent, unsigned int max_exponent) 1213 { 1214 unsigned int interval; 1215 1216 interval = fls(desc_interval) - 1; 1217 interval = clamp_val(interval, min_exponent, max_exponent); 1218 if ((1 << interval) != desc_interval) 1219 dev_dbg(&udev->dev, 1220 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n", 1221 ep->desc.bEndpointAddress, 1222 1 << interval, 1223 desc_interval); 1224 1225 return interval; 1226 } 1227 1228 static unsigned int xhci_parse_microframe_interval(struct usb_device *udev, 1229 struct usb_host_endpoint *ep) 1230 { 1231 if (ep->desc.bInterval == 0) 1232 return 0; 1233 return xhci_microframes_to_exponent(udev, ep, 1234 ep->desc.bInterval, 0, 15); 1235 } 1236 1237 1238 static unsigned int xhci_parse_frame_interval(struct usb_device *udev, 1239 struct usb_host_endpoint *ep) 1240 { 1241 return xhci_microframes_to_exponent(udev, ep, 1242 ep->desc.bInterval * 8, 3, 10); 1243 } 1244 1245 /* Return the polling or NAK interval. 1246 * 1247 * The polling interval is expressed in "microframes". If xHCI's Interval field 1248 * is set to N, it will service the endpoint every 2^(Interval)*125us. 1249 * 1250 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval 1251 * is set to 0. 1252 */ 1253 static unsigned int xhci_get_endpoint_interval(struct usb_device *udev, 1254 struct usb_host_endpoint *ep) 1255 { 1256 unsigned int interval = 0; 1257 1258 switch (udev->speed) { 1259 case USB_SPEED_HIGH: 1260 /* Max NAK rate */ 1261 if (usb_endpoint_xfer_control(&ep->desc) || 1262 usb_endpoint_xfer_bulk(&ep->desc)) { 1263 interval = xhci_parse_microframe_interval(udev, ep); 1264 break; 1265 } 1266 fallthrough; /* SS and HS isoc/int have same decoding */ 1267 1268 case USB_SPEED_SUPER_PLUS: 1269 case USB_SPEED_SUPER: 1270 if (usb_endpoint_xfer_int(&ep->desc) || 1271 usb_endpoint_xfer_isoc(&ep->desc)) { 1272 interval = xhci_parse_exponent_interval(udev, ep); 1273 } 1274 break; 1275 1276 case USB_SPEED_FULL: 1277 if (usb_endpoint_xfer_isoc(&ep->desc)) { 1278 interval = xhci_parse_exponent_interval(udev, ep); 1279 break; 1280 } 1281 /* 1282 * Fall through for interrupt endpoint interval decoding 1283 * since it uses the same rules as low speed interrupt 1284 * endpoints. 1285 */ 1286 fallthrough; 1287 1288 case USB_SPEED_LOW: 1289 if (usb_endpoint_xfer_int(&ep->desc) || 1290 usb_endpoint_xfer_isoc(&ep->desc)) { 1291 1292 interval = xhci_parse_frame_interval(udev, ep); 1293 } 1294 break; 1295 1296 default: 1297 BUG(); 1298 } 1299 return interval; 1300 } 1301 1302 /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps. 1303 * High speed endpoint descriptors can define "the number of additional 1304 * transaction opportunities per microframe", but that goes in the Max Burst 1305 * endpoint context field. 1306 */ 1307 static u32 xhci_get_endpoint_mult(struct usb_device *udev, 1308 struct usb_host_endpoint *ep) 1309 { 1310 if (udev->speed < USB_SPEED_SUPER || 1311 !usb_endpoint_xfer_isoc(&ep->desc)) 1312 return 0; 1313 return ep->ss_ep_comp.bmAttributes; 1314 } 1315 1316 static u32 xhci_get_endpoint_max_burst(struct usb_device *udev, 1317 struct usb_host_endpoint *ep) 1318 { 1319 /* Super speed and Plus have max burst in ep companion desc */ 1320 if (udev->speed >= USB_SPEED_SUPER) 1321 return ep->ss_ep_comp.bMaxBurst; 1322 1323 if (udev->speed == USB_SPEED_HIGH && 1324 (usb_endpoint_xfer_isoc(&ep->desc) || 1325 usb_endpoint_xfer_int(&ep->desc))) 1326 return usb_endpoint_maxp_mult(&ep->desc) - 1; 1327 1328 return 0; 1329 } 1330 1331 static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep) 1332 { 1333 int in; 1334 1335 in = usb_endpoint_dir_in(&ep->desc); 1336 1337 switch (usb_endpoint_type(&ep->desc)) { 1338 case USB_ENDPOINT_XFER_CONTROL: 1339 return CTRL_EP; 1340 case USB_ENDPOINT_XFER_BULK: 1341 return in ? BULK_IN_EP : BULK_OUT_EP; 1342 case USB_ENDPOINT_XFER_ISOC: 1343 return in ? ISOC_IN_EP : ISOC_OUT_EP; 1344 case USB_ENDPOINT_XFER_INT: 1345 return in ? INT_IN_EP : INT_OUT_EP; 1346 } 1347 return 0; 1348 } 1349 1350 /* Return the maximum endpoint service interval time (ESIT) payload. 1351 * Basically, this is the maxpacket size, multiplied by the burst size 1352 * and mult size. 1353 */ 1354 static u32 xhci_get_max_esit_payload(struct usb_device *udev, 1355 struct usb_host_endpoint *ep) 1356 { 1357 int max_burst; 1358 int max_packet; 1359 1360 /* Only applies for interrupt or isochronous endpoints */ 1361 if (usb_endpoint_xfer_control(&ep->desc) || 1362 usb_endpoint_xfer_bulk(&ep->desc)) 1363 return 0; 1364 1365 /* SuperSpeedPlus Isoc ep sending over 48k per esit */ 1366 if ((udev->speed >= USB_SPEED_SUPER_PLUS) && 1367 USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes)) 1368 return le32_to_cpu(ep->ssp_isoc_ep_comp.dwBytesPerInterval); 1369 1370 /* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */ 1371 if (udev->speed >= USB_SPEED_SUPER) 1372 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval); 1373 1374 max_packet = usb_endpoint_maxp(&ep->desc); 1375 max_burst = usb_endpoint_maxp_mult(&ep->desc); 1376 /* A 0 in max burst means 1 transfer per ESIT */ 1377 return max_packet * max_burst; 1378 } 1379 1380 /* Set up an endpoint with one ring segment. Do not allocate stream rings. 1381 * Drivers will have to call usb_alloc_streams() to do that. 1382 */ 1383 int xhci_endpoint_init(struct xhci_hcd *xhci, 1384 struct xhci_virt_device *virt_dev, 1385 struct usb_device *udev, 1386 struct usb_host_endpoint *ep, 1387 gfp_t mem_flags) 1388 { 1389 unsigned int ep_index; 1390 struct xhci_ep_ctx *ep_ctx; 1391 struct xhci_ring *ep_ring; 1392 unsigned int max_packet; 1393 enum xhci_ring_type ring_type; 1394 u32 max_esit_payload; 1395 u32 endpoint_type; 1396 unsigned int max_burst; 1397 unsigned int interval; 1398 unsigned int mult; 1399 unsigned int avg_trb_len; 1400 unsigned int err_count = 0; 1401 1402 ep_index = xhci_get_endpoint_index(&ep->desc); 1403 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 1404 1405 endpoint_type = xhci_get_endpoint_type(ep); 1406 if (!endpoint_type) 1407 return -EINVAL; 1408 1409 ring_type = usb_endpoint_type(&ep->desc); 1410 1411 /* 1412 * Get values to fill the endpoint context, mostly from ep descriptor. 1413 * The average TRB buffer lengt for bulk endpoints is unclear as we 1414 * have no clue on scatter gather list entry size. For Isoc and Int, 1415 * set it to max available. See xHCI 1.1 spec 4.14.1.1 for details. 1416 */ 1417 max_esit_payload = xhci_get_max_esit_payload(udev, ep); 1418 interval = xhci_get_endpoint_interval(udev, ep); 1419 1420 /* Periodic endpoint bInterval limit quirk */ 1421 if (usb_endpoint_xfer_int(&ep->desc) || 1422 usb_endpoint_xfer_isoc(&ep->desc)) { 1423 if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) && 1424 udev->speed >= USB_SPEED_HIGH && 1425 interval >= 7) { 1426 interval = 6; 1427 } 1428 } 1429 1430 mult = xhci_get_endpoint_mult(udev, ep); 1431 max_packet = usb_endpoint_maxp(&ep->desc); 1432 max_burst = xhci_get_endpoint_max_burst(udev, ep); 1433 avg_trb_len = max_esit_payload; 1434 1435 /* FIXME dig Mult and streams info out of ep companion desc */ 1436 1437 /* Allow 3 retries for everything but isoc, set CErr = 3 */ 1438 if (!usb_endpoint_xfer_isoc(&ep->desc)) 1439 err_count = 3; 1440 /* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */ 1441 if (usb_endpoint_xfer_bulk(&ep->desc)) { 1442 if (udev->speed == USB_SPEED_HIGH) 1443 max_packet = 512; 1444 if (udev->speed == USB_SPEED_FULL) { 1445 max_packet = rounddown_pow_of_two(max_packet); 1446 max_packet = clamp_val(max_packet, 8, 64); 1447 } 1448 } 1449 /* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */ 1450 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100) 1451 avg_trb_len = 8; 1452 /* xhci 1.1 with LEC support doesn't use mult field, use RsvdZ */ 1453 if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2)) 1454 mult = 0; 1455 1456 /* Set up the endpoint ring */ 1457 virt_dev->eps[ep_index].new_ring = 1458 xhci_ring_alloc(xhci, 2, ring_type, max_packet, mem_flags); 1459 if (!virt_dev->eps[ep_index].new_ring) 1460 return -ENOMEM; 1461 1462 virt_dev->eps[ep_index].skip = false; 1463 ep_ring = virt_dev->eps[ep_index].new_ring; 1464 1465 /* Fill the endpoint context */ 1466 ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) | 1467 EP_INTERVAL(interval) | 1468 EP_MULT(mult)); 1469 ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) | 1470 MAX_PACKET(max_packet) | 1471 MAX_BURST(max_burst) | 1472 ERROR_COUNT(err_count)); 1473 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | 1474 ep_ring->cycle_state); 1475 1476 ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) | 1477 EP_AVG_TRB_LENGTH(avg_trb_len)); 1478 1479 return 0; 1480 } 1481 1482 void xhci_endpoint_zero(struct xhci_hcd *xhci, 1483 struct xhci_virt_device *virt_dev, 1484 struct usb_host_endpoint *ep) 1485 { 1486 unsigned int ep_index; 1487 struct xhci_ep_ctx *ep_ctx; 1488 1489 ep_index = xhci_get_endpoint_index(&ep->desc); 1490 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 1491 1492 ep_ctx->ep_info = 0; 1493 ep_ctx->ep_info2 = 0; 1494 ep_ctx->deq = 0; 1495 ep_ctx->tx_info = 0; 1496 /* Don't free the endpoint ring until the set interface or configuration 1497 * request succeeds. 1498 */ 1499 } 1500 1501 void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info) 1502 { 1503 bw_info->ep_interval = 0; 1504 bw_info->mult = 0; 1505 bw_info->num_packets = 0; 1506 bw_info->max_packet_size = 0; 1507 bw_info->type = 0; 1508 bw_info->max_esit_payload = 0; 1509 } 1510 1511 void xhci_update_bw_info(struct xhci_hcd *xhci, 1512 struct xhci_container_ctx *in_ctx, 1513 struct xhci_input_control_ctx *ctrl_ctx, 1514 struct xhci_virt_device *virt_dev) 1515 { 1516 struct xhci_bw_info *bw_info; 1517 struct xhci_ep_ctx *ep_ctx; 1518 unsigned int ep_type; 1519 int i; 1520 1521 for (i = 1; i < 31; i++) { 1522 bw_info = &virt_dev->eps[i].bw_info; 1523 1524 /* We can't tell what endpoint type is being dropped, but 1525 * unconditionally clearing the bandwidth info for non-periodic 1526 * endpoints should be harmless because the info will never be 1527 * set in the first place. 1528 */ 1529 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) { 1530 /* Dropped endpoint */ 1531 xhci_clear_endpoint_bw_info(bw_info); 1532 continue; 1533 } 1534 1535 if (EP_IS_ADDED(ctrl_ctx, i)) { 1536 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i); 1537 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2)); 1538 1539 /* Ignore non-periodic endpoints */ 1540 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && 1541 ep_type != ISOC_IN_EP && 1542 ep_type != INT_IN_EP) 1543 continue; 1544 1545 /* Added or changed endpoint */ 1546 bw_info->ep_interval = CTX_TO_EP_INTERVAL( 1547 le32_to_cpu(ep_ctx->ep_info)); 1548 /* Number of packets and mult are zero-based in the 1549 * input context, but we want one-based for the 1550 * interval table. 1551 */ 1552 bw_info->mult = CTX_TO_EP_MULT( 1553 le32_to_cpu(ep_ctx->ep_info)) + 1; 1554 bw_info->num_packets = CTX_TO_MAX_BURST( 1555 le32_to_cpu(ep_ctx->ep_info2)) + 1; 1556 bw_info->max_packet_size = MAX_PACKET_DECODED( 1557 le32_to_cpu(ep_ctx->ep_info2)); 1558 bw_info->type = ep_type; 1559 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD( 1560 le32_to_cpu(ep_ctx->tx_info)); 1561 } 1562 } 1563 } 1564 1565 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy. 1566 * Useful when you want to change one particular aspect of the endpoint and then 1567 * issue a configure endpoint command. 1568 */ 1569 void xhci_endpoint_copy(struct xhci_hcd *xhci, 1570 struct xhci_container_ctx *in_ctx, 1571 struct xhci_container_ctx *out_ctx, 1572 unsigned int ep_index) 1573 { 1574 struct xhci_ep_ctx *out_ep_ctx; 1575 struct xhci_ep_ctx *in_ep_ctx; 1576 1577 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1578 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 1579 1580 in_ep_ctx->ep_info = out_ep_ctx->ep_info; 1581 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; 1582 in_ep_ctx->deq = out_ep_ctx->deq; 1583 in_ep_ctx->tx_info = out_ep_ctx->tx_info; 1584 if (xhci->quirks & XHCI_MTK_HOST) { 1585 in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0]; 1586 in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1]; 1587 } 1588 } 1589 1590 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx. 1591 * Useful when you want to change one particular aspect of the endpoint and then 1592 * issue a configure endpoint command. Only the context entries field matters, 1593 * but we'll copy the whole thing anyway. 1594 */ 1595 void xhci_slot_copy(struct xhci_hcd *xhci, 1596 struct xhci_container_ctx *in_ctx, 1597 struct xhci_container_ctx *out_ctx) 1598 { 1599 struct xhci_slot_ctx *in_slot_ctx; 1600 struct xhci_slot_ctx *out_slot_ctx; 1601 1602 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 1603 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx); 1604 1605 in_slot_ctx->dev_info = out_slot_ctx->dev_info; 1606 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2; 1607 in_slot_ctx->tt_info = out_slot_ctx->tt_info; 1608 in_slot_ctx->dev_state = out_slot_ctx->dev_state; 1609 } 1610 1611 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ 1612 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) 1613 { 1614 int i; 1615 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 1616 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); 1617 1618 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 1619 "Allocating %d scratchpad buffers", num_sp); 1620 1621 if (!num_sp) 1622 return 0; 1623 1624 xhci->scratchpad = kzalloc_node(sizeof(*xhci->scratchpad), flags, 1625 dev_to_node(dev)); 1626 if (!xhci->scratchpad) 1627 goto fail_sp; 1628 1629 xhci->scratchpad->sp_array = dma_alloc_coherent(dev, 1630 array_size(sizeof(u64), num_sp), 1631 &xhci->scratchpad->sp_dma, flags); 1632 if (!xhci->scratchpad->sp_array) 1633 goto fail_sp2; 1634 1635 xhci->scratchpad->sp_buffers = kcalloc_node(num_sp, sizeof(void *), 1636 flags, dev_to_node(dev)); 1637 if (!xhci->scratchpad->sp_buffers) 1638 goto fail_sp3; 1639 1640 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); 1641 for (i = 0; i < num_sp; i++) { 1642 dma_addr_t dma; 1643 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma, 1644 flags); 1645 if (!buf) 1646 goto fail_sp4; 1647 1648 xhci->scratchpad->sp_array[i] = dma; 1649 xhci->scratchpad->sp_buffers[i] = buf; 1650 } 1651 1652 return 0; 1653 1654 fail_sp4: 1655 while (i--) 1656 dma_free_coherent(dev, xhci->page_size, 1657 xhci->scratchpad->sp_buffers[i], 1658 xhci->scratchpad->sp_array[i]); 1659 1660 kfree(xhci->scratchpad->sp_buffers); 1661 1662 fail_sp3: 1663 dma_free_coherent(dev, array_size(sizeof(u64), num_sp), 1664 xhci->scratchpad->sp_array, 1665 xhci->scratchpad->sp_dma); 1666 1667 fail_sp2: 1668 kfree(xhci->scratchpad); 1669 xhci->scratchpad = NULL; 1670 1671 fail_sp: 1672 return -ENOMEM; 1673 } 1674 1675 static void scratchpad_free(struct xhci_hcd *xhci) 1676 { 1677 int num_sp; 1678 int i; 1679 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 1680 1681 if (!xhci->scratchpad) 1682 return; 1683 1684 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); 1685 1686 for (i = 0; i < num_sp; i++) { 1687 dma_free_coherent(dev, xhci->page_size, 1688 xhci->scratchpad->sp_buffers[i], 1689 xhci->scratchpad->sp_array[i]); 1690 } 1691 kfree(xhci->scratchpad->sp_buffers); 1692 dma_free_coherent(dev, array_size(sizeof(u64), num_sp), 1693 xhci->scratchpad->sp_array, 1694 xhci->scratchpad->sp_dma); 1695 kfree(xhci->scratchpad); 1696 xhci->scratchpad = NULL; 1697 } 1698 1699 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, 1700 bool allocate_completion, gfp_t mem_flags) 1701 { 1702 struct xhci_command *command; 1703 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 1704 1705 command = kzalloc_node(sizeof(*command), mem_flags, dev_to_node(dev)); 1706 if (!command) 1707 return NULL; 1708 1709 if (allocate_completion) { 1710 command->completion = 1711 kzalloc_node(sizeof(struct completion), mem_flags, 1712 dev_to_node(dev)); 1713 if (!command->completion) { 1714 kfree(command); 1715 return NULL; 1716 } 1717 init_completion(command->completion); 1718 } 1719 1720 command->status = 0; 1721 /* set default timeout to 5000 ms */ 1722 command->timeout_ms = XHCI_CMD_DEFAULT_TIMEOUT; 1723 INIT_LIST_HEAD(&command->cmd_list); 1724 return command; 1725 } 1726 1727 struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci, 1728 bool allocate_completion, gfp_t mem_flags) 1729 { 1730 struct xhci_command *command; 1731 1732 command = xhci_alloc_command(xhci, allocate_completion, mem_flags); 1733 if (!command) 1734 return NULL; 1735 1736 command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, 1737 mem_flags); 1738 if (!command->in_ctx) { 1739 kfree(command->completion); 1740 kfree(command); 1741 return NULL; 1742 } 1743 return command; 1744 } 1745 1746 void xhci_urb_free_priv(struct urb_priv *urb_priv) 1747 { 1748 kfree(urb_priv); 1749 } 1750 1751 void xhci_free_command(struct xhci_hcd *xhci, 1752 struct xhci_command *command) 1753 { 1754 xhci_free_container_ctx(xhci, 1755 command->in_ctx); 1756 kfree(command->completion); 1757 kfree(command); 1758 } 1759 1760 static int xhci_alloc_erst(struct xhci_hcd *xhci, 1761 struct xhci_ring *evt_ring, 1762 struct xhci_erst *erst, 1763 gfp_t flags) 1764 { 1765 size_t size; 1766 unsigned int val; 1767 struct xhci_segment *seg; 1768 struct xhci_erst_entry *entry; 1769 1770 size = array_size(sizeof(struct xhci_erst_entry), evt_ring->num_segs); 1771 erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev, 1772 size, &erst->erst_dma_addr, flags); 1773 if (!erst->entries) 1774 return -ENOMEM; 1775 1776 erst->num_entries = evt_ring->num_segs; 1777 1778 seg = evt_ring->first_seg; 1779 for (val = 0; val < evt_ring->num_segs; val++) { 1780 entry = &erst->entries[val]; 1781 entry->seg_addr = cpu_to_le64(seg->dma); 1782 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); 1783 entry->rsvd = 0; 1784 seg = seg->next; 1785 } 1786 1787 return 0; 1788 } 1789 1790 static void 1791 xhci_remove_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) 1792 { 1793 u32 tmp; 1794 1795 if (!ir) 1796 return; 1797 1798 /* 1799 * Clean out interrupter registers except ERSTBA. Clearing either the 1800 * low or high 32 bits of ERSTBA immediately causes the controller to 1801 * dereference the partially cleared 64 bit address, causing IOMMU error. 1802 */ 1803 if (ir->ir_set) { 1804 tmp = readl(&ir->ir_set->erst_size); 1805 tmp &= ERST_SIZE_MASK; 1806 writel(tmp, &ir->ir_set->erst_size); 1807 1808 xhci_write_64(xhci, ERST_EHB, &ir->ir_set->erst_dequeue); 1809 } 1810 } 1811 1812 static void 1813 xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) 1814 { 1815 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 1816 size_t erst_size; 1817 1818 if (!ir) 1819 return; 1820 1821 erst_size = array_size(sizeof(struct xhci_erst_entry), ir->erst.num_entries); 1822 if (ir->erst.entries) 1823 dma_free_coherent(dev, erst_size, 1824 ir->erst.entries, 1825 ir->erst.erst_dma_addr); 1826 ir->erst.entries = NULL; 1827 1828 /* free interrupter event ring */ 1829 if (ir->event_ring) 1830 xhci_ring_free(xhci, ir->event_ring); 1831 1832 ir->event_ring = NULL; 1833 1834 kfree(ir); 1835 } 1836 1837 void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrupter *ir) 1838 { 1839 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1840 unsigned int intr_num; 1841 1842 spin_lock_irq(&xhci->lock); 1843 1844 /* interrupter 0 is primary interrupter, don't touch it */ 1845 if (!ir || !ir->intr_num || ir->intr_num >= xhci->max_interrupters) { 1846 xhci_dbg(xhci, "Invalid secondary interrupter, can't remove\n"); 1847 spin_unlock_irq(&xhci->lock); 1848 return; 1849 } 1850 1851 intr_num = ir->intr_num; 1852 1853 xhci_remove_interrupter(xhci, ir); 1854 xhci->interrupters[intr_num] = NULL; 1855 1856 spin_unlock_irq(&xhci->lock); 1857 1858 xhci_free_interrupter(xhci, ir); 1859 } 1860 EXPORT_SYMBOL_GPL(xhci_remove_secondary_interrupter); 1861 1862 void xhci_mem_cleanup(struct xhci_hcd *xhci) 1863 { 1864 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 1865 int i, j, num_ports; 1866 1867 cancel_delayed_work_sync(&xhci->cmd_timer); 1868 1869 for (i = 0; xhci->interrupters && i < xhci->max_interrupters; i++) { 1870 if (xhci->interrupters[i]) { 1871 xhci_remove_interrupter(xhci, xhci->interrupters[i]); 1872 xhci_free_interrupter(xhci, xhci->interrupters[i]); 1873 xhci->interrupters[i] = NULL; 1874 } 1875 } 1876 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed interrupters"); 1877 1878 if (xhci->cmd_ring) 1879 xhci_ring_free(xhci, xhci->cmd_ring); 1880 xhci->cmd_ring = NULL; 1881 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring"); 1882 xhci_cleanup_command_queue(xhci); 1883 1884 num_ports = HCS_MAX_PORTS(xhci->hcs_params1); 1885 for (i = 0; i < num_ports && xhci->rh_bw; i++) { 1886 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; 1887 for (j = 0; j < XHCI_MAX_INTERVAL; j++) { 1888 struct list_head *ep = &bwt->interval_bw[j].endpoints; 1889 while (!list_empty(ep)) 1890 list_del_init(ep->next); 1891 } 1892 } 1893 1894 for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--) 1895 xhci_free_virt_devices_depth_first(xhci, i); 1896 1897 dma_pool_destroy(xhci->segment_pool); 1898 xhci->segment_pool = NULL; 1899 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool"); 1900 1901 dma_pool_destroy(xhci->device_pool); 1902 xhci->device_pool = NULL; 1903 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool"); 1904 1905 dma_pool_destroy(xhci->small_streams_pool); 1906 xhci->small_streams_pool = NULL; 1907 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 1908 "Freed small stream array pool"); 1909 1910 dma_pool_destroy(xhci->medium_streams_pool); 1911 xhci->medium_streams_pool = NULL; 1912 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 1913 "Freed medium stream array pool"); 1914 1915 if (xhci->dcbaa) 1916 dma_free_coherent(dev, sizeof(*xhci->dcbaa), 1917 xhci->dcbaa, xhci->dcbaa->dma); 1918 xhci->dcbaa = NULL; 1919 1920 scratchpad_free(xhci); 1921 1922 if (!xhci->rh_bw) 1923 goto no_bw; 1924 1925 for (i = 0; i < num_ports; i++) { 1926 struct xhci_tt_bw_info *tt, *n; 1927 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) { 1928 list_del(&tt->tt_list); 1929 kfree(tt); 1930 } 1931 } 1932 1933 no_bw: 1934 xhci->cmd_ring_reserved_trbs = 0; 1935 xhci->usb2_rhub.num_ports = 0; 1936 xhci->usb3_rhub.num_ports = 0; 1937 xhci->num_active_eps = 0; 1938 kfree(xhci->usb2_rhub.ports); 1939 kfree(xhci->usb3_rhub.ports); 1940 kfree(xhci->hw_ports); 1941 kfree(xhci->rh_bw); 1942 for (i = 0; i < xhci->num_port_caps; i++) 1943 kfree(xhci->port_caps[i].psi); 1944 kfree(xhci->port_caps); 1945 kfree(xhci->interrupters); 1946 xhci->num_port_caps = 0; 1947 1948 xhci->usb2_rhub.ports = NULL; 1949 xhci->usb3_rhub.ports = NULL; 1950 xhci->hw_ports = NULL; 1951 xhci->rh_bw = NULL; 1952 xhci->port_caps = NULL; 1953 xhci->interrupters = NULL; 1954 1955 xhci->page_size = 0; 1956 xhci->page_shift = 0; 1957 xhci->usb2_rhub.bus_state.bus_suspended = 0; 1958 xhci->usb3_rhub.bus_state.bus_suspended = 0; 1959 } 1960 1961 static void xhci_set_hc_event_deq(struct xhci_hcd *xhci, struct xhci_interrupter *ir) 1962 { 1963 dma_addr_t deq; 1964 1965 deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg, 1966 ir->event_ring->dequeue); 1967 if (!deq) 1968 xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr.\n"); 1969 /* Update HC event ring dequeue pointer */ 1970 /* Don't clear the EHB bit (which is RW1C) because 1971 * there might be more events to service. 1972 */ 1973 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 1974 "// Write event ring dequeue pointer, preserving EHB bit"); 1975 xhci_write_64(xhci, deq & ERST_PTR_MASK, &ir->ir_set->erst_dequeue); 1976 } 1977 1978 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, 1979 __le32 __iomem *addr, int max_caps) 1980 { 1981 u32 temp, port_offset, port_count; 1982 int i; 1983 u8 major_revision, minor_revision, tmp_minor_revision; 1984 struct xhci_hub *rhub; 1985 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 1986 struct xhci_port_cap *port_cap; 1987 1988 temp = readl(addr); 1989 major_revision = XHCI_EXT_PORT_MAJOR(temp); 1990 minor_revision = XHCI_EXT_PORT_MINOR(temp); 1991 1992 if (major_revision == 0x03) { 1993 rhub = &xhci->usb3_rhub; 1994 /* 1995 * Some hosts incorrectly use sub-minor version for minor 1996 * version (i.e. 0x02 instead of 0x20 for bcdUSB 0x320 and 0x01 1997 * for bcdUSB 0x310). Since there is no USB release with sub 1998 * minor version 0x301 to 0x309, we can assume that they are 1999 * incorrect and fix it here. 2000 */ 2001 if (minor_revision > 0x00 && minor_revision < 0x10) 2002 minor_revision <<= 4; 2003 /* 2004 * Some zhaoxin's xHCI controller that follow usb3.1 spec 2005 * but only support Gen1. 2006 */ 2007 if (xhci->quirks & XHCI_ZHAOXIN_HOST) { 2008 tmp_minor_revision = minor_revision; 2009 minor_revision = 0; 2010 } 2011 2012 } else if (major_revision <= 0x02) { 2013 rhub = &xhci->usb2_rhub; 2014 } else { 2015 xhci_warn(xhci, "Ignoring unknown port speed, Ext Cap %p, revision = 0x%x\n", 2016 addr, major_revision); 2017 /* Ignoring port protocol we can't understand. FIXME */ 2018 return; 2019 } 2020 2021 /* Port offset and count in the third dword, see section 7.2 */ 2022 temp = readl(addr + 2); 2023 port_offset = XHCI_EXT_PORT_OFF(temp); 2024 port_count = XHCI_EXT_PORT_COUNT(temp); 2025 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2026 "Ext Cap %p, port offset = %u, count = %u, revision = 0x%x", 2027 addr, port_offset, port_count, major_revision); 2028 /* Port count includes the current port offset */ 2029 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports) 2030 /* WTF? "Valid values are ‘1’ to MaxPorts" */ 2031 return; 2032 2033 port_cap = &xhci->port_caps[xhci->num_port_caps++]; 2034 if (xhci->num_port_caps > max_caps) 2035 return; 2036 2037 port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp); 2038 2039 if (port_cap->psi_count) { 2040 port_cap->psi = kcalloc_node(port_cap->psi_count, 2041 sizeof(*port_cap->psi), 2042 GFP_KERNEL, dev_to_node(dev)); 2043 if (!port_cap->psi) 2044 port_cap->psi_count = 0; 2045 2046 port_cap->psi_uid_count++; 2047 for (i = 0; i < port_cap->psi_count; i++) { 2048 port_cap->psi[i] = readl(addr + 4 + i); 2049 2050 /* count unique ID values, two consecutive entries can 2051 * have the same ID if link is assymetric 2052 */ 2053 if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) != 2054 XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1]))) 2055 port_cap->psi_uid_count++; 2056 2057 if (xhci->quirks & XHCI_ZHAOXIN_HOST && 2058 major_revision == 0x03 && 2059 XHCI_EXT_PORT_PSIV(port_cap->psi[i]) >= 5) 2060 minor_revision = tmp_minor_revision; 2061 2062 xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n", 2063 XHCI_EXT_PORT_PSIV(port_cap->psi[i]), 2064 XHCI_EXT_PORT_PSIE(port_cap->psi[i]), 2065 XHCI_EXT_PORT_PLT(port_cap->psi[i]), 2066 XHCI_EXT_PORT_PFD(port_cap->psi[i]), 2067 XHCI_EXT_PORT_LP(port_cap->psi[i]), 2068 XHCI_EXT_PORT_PSIM(port_cap->psi[i])); 2069 } 2070 } 2071 2072 rhub->maj_rev = major_revision; 2073 2074 if (rhub->min_rev < minor_revision) 2075 rhub->min_rev = minor_revision; 2076 2077 port_cap->maj_rev = major_revision; 2078 port_cap->min_rev = minor_revision; 2079 port_cap->protocol_caps = temp; 2080 2081 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03) && 2082 (temp & XHCI_HLC)) { 2083 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2084 "xHCI 1.0: support USB2 hardware lpm"); 2085 xhci->hw_lpm_support = 1; 2086 } 2087 2088 port_offset--; 2089 for (i = port_offset; i < (port_offset + port_count); i++) { 2090 struct xhci_port *hw_port = &xhci->hw_ports[i]; 2091 /* Duplicate entry. Ignore the port if the revisions differ. */ 2092 if (hw_port->rhub) { 2093 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p, port %u\n", addr, i); 2094 xhci_warn(xhci, "Port was marked as USB %u, duplicated as USB %u\n", 2095 hw_port->rhub->maj_rev, major_revision); 2096 /* Only adjust the roothub port counts if we haven't 2097 * found a similar duplicate. 2098 */ 2099 if (hw_port->rhub != rhub && 2100 hw_port->hcd_portnum != DUPLICATE_ENTRY) { 2101 hw_port->rhub->num_ports--; 2102 hw_port->hcd_portnum = DUPLICATE_ENTRY; 2103 } 2104 continue; 2105 } 2106 hw_port->rhub = rhub; 2107 hw_port->port_cap = port_cap; 2108 rhub->num_ports++; 2109 } 2110 /* FIXME: Should we disable ports not in the Extended Capabilities? */ 2111 } 2112 2113 static void xhci_create_rhub_port_array(struct xhci_hcd *xhci, 2114 struct xhci_hub *rhub, gfp_t flags) 2115 { 2116 int port_index = 0; 2117 int i; 2118 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 2119 2120 if (!rhub->num_ports) 2121 return; 2122 rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports), 2123 flags, dev_to_node(dev)); 2124 if (!rhub->ports) 2125 return; 2126 2127 for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) { 2128 if (xhci->hw_ports[i].rhub != rhub || 2129 xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY) 2130 continue; 2131 xhci->hw_ports[i].hcd_portnum = port_index; 2132 rhub->ports[port_index] = &xhci->hw_ports[i]; 2133 port_index++; 2134 if (port_index == rhub->num_ports) 2135 break; 2136 } 2137 } 2138 2139 /* 2140 * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that 2141 * specify what speeds each port is supposed to be. We can't count on the port 2142 * speed bits in the PORTSC register being correct until a device is connected, 2143 * but we need to set up the two fake roothubs with the correct number of USB 2144 * 3.0 and USB 2.0 ports at host controller initialization time. 2145 */ 2146 static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) 2147 { 2148 void __iomem *base; 2149 u32 offset; 2150 unsigned int num_ports; 2151 int i, j; 2152 int cap_count = 0; 2153 u32 cap_start; 2154 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 2155 2156 num_ports = HCS_MAX_PORTS(xhci->hcs_params1); 2157 xhci->hw_ports = kcalloc_node(num_ports, sizeof(*xhci->hw_ports), 2158 flags, dev_to_node(dev)); 2159 if (!xhci->hw_ports) 2160 return -ENOMEM; 2161 2162 for (i = 0; i < num_ports; i++) { 2163 xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base + 2164 NUM_PORT_REGS * i; 2165 xhci->hw_ports[i].hw_portnum = i; 2166 2167 init_completion(&xhci->hw_ports[i].rexit_done); 2168 init_completion(&xhci->hw_ports[i].u3exit_done); 2169 } 2170 2171 xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags, 2172 dev_to_node(dev)); 2173 if (!xhci->rh_bw) 2174 return -ENOMEM; 2175 for (i = 0; i < num_ports; i++) { 2176 struct xhci_interval_bw_table *bw_table; 2177 2178 INIT_LIST_HEAD(&xhci->rh_bw[i].tts); 2179 bw_table = &xhci->rh_bw[i].bw_table; 2180 for (j = 0; j < XHCI_MAX_INTERVAL; j++) 2181 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints); 2182 } 2183 base = &xhci->cap_regs->hc_capbase; 2184 2185 cap_start = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_PROTOCOL); 2186 if (!cap_start) { 2187 xhci_err(xhci, "No Extended Capability registers, unable to set up roothub\n"); 2188 return -ENODEV; 2189 } 2190 2191 offset = cap_start; 2192 /* count extended protocol capability entries for later caching */ 2193 while (offset) { 2194 cap_count++; 2195 offset = xhci_find_next_ext_cap(base, offset, 2196 XHCI_EXT_CAPS_PROTOCOL); 2197 } 2198 2199 xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps), 2200 flags, dev_to_node(dev)); 2201 if (!xhci->port_caps) 2202 return -ENOMEM; 2203 2204 offset = cap_start; 2205 2206 while (offset) { 2207 xhci_add_in_port(xhci, num_ports, base + offset, cap_count); 2208 if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports == 2209 num_ports) 2210 break; 2211 offset = xhci_find_next_ext_cap(base, offset, 2212 XHCI_EXT_CAPS_PROTOCOL); 2213 } 2214 if (xhci->usb2_rhub.num_ports == 0 && xhci->usb3_rhub.num_ports == 0) { 2215 xhci_warn(xhci, "No ports on the roothubs?\n"); 2216 return -ENODEV; 2217 } 2218 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2219 "Found %u USB 2.0 ports and %u USB 3.0 ports.", 2220 xhci->usb2_rhub.num_ports, xhci->usb3_rhub.num_ports); 2221 2222 /* Place limits on the number of roothub ports so that the hub 2223 * descriptors aren't longer than the USB core will allocate. 2224 */ 2225 if (xhci->usb3_rhub.num_ports > USB_SS_MAXPORTS) { 2226 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2227 "Limiting USB 3.0 roothub ports to %u.", 2228 USB_SS_MAXPORTS); 2229 xhci->usb3_rhub.num_ports = USB_SS_MAXPORTS; 2230 } 2231 if (xhci->usb2_rhub.num_ports > USB_MAXCHILDREN) { 2232 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2233 "Limiting USB 2.0 roothub ports to %u.", 2234 USB_MAXCHILDREN); 2235 xhci->usb2_rhub.num_ports = USB_MAXCHILDREN; 2236 } 2237 2238 if (!xhci->usb2_rhub.num_ports) 2239 xhci_info(xhci, "USB2 root hub has no ports\n"); 2240 2241 if (!xhci->usb3_rhub.num_ports) 2242 xhci_info(xhci, "USB3 root hub has no ports\n"); 2243 2244 xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags); 2245 xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags); 2246 2247 return 0; 2248 } 2249 2250 static struct xhci_interrupter * 2251 xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int segs, gfp_t flags) 2252 { 2253 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 2254 struct xhci_interrupter *ir; 2255 unsigned int max_segs; 2256 int ret; 2257 2258 if (!segs) 2259 segs = ERST_DEFAULT_SEGS; 2260 2261 max_segs = BIT(HCS_ERST_MAX(xhci->hcs_params2)); 2262 segs = min(segs, max_segs); 2263 2264 ir = kzalloc_node(sizeof(*ir), flags, dev_to_node(dev)); 2265 if (!ir) 2266 return NULL; 2267 2268 ir->event_ring = xhci_ring_alloc(xhci, segs, TYPE_EVENT, 0, flags); 2269 if (!ir->event_ring) { 2270 xhci_warn(xhci, "Failed to allocate interrupter event ring\n"); 2271 kfree(ir); 2272 return NULL; 2273 } 2274 2275 ret = xhci_alloc_erst(xhci, ir->event_ring, &ir->erst, flags); 2276 if (ret) { 2277 xhci_warn(xhci, "Failed to allocate interrupter erst\n"); 2278 xhci_ring_free(xhci, ir->event_ring); 2279 kfree(ir); 2280 return NULL; 2281 } 2282 2283 return ir; 2284 } 2285 2286 static int 2287 xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir, 2288 unsigned int intr_num) 2289 { 2290 u64 erst_base; 2291 u32 erst_size; 2292 2293 if (intr_num >= xhci->max_interrupters) { 2294 xhci_warn(xhci, "Can't add interrupter %d, max interrupters %d\n", 2295 intr_num, xhci->max_interrupters); 2296 return -EINVAL; 2297 } 2298 2299 if (xhci->interrupters[intr_num]) { 2300 xhci_warn(xhci, "Interrupter %d\n already set up", intr_num); 2301 return -EINVAL; 2302 } 2303 2304 xhci->interrupters[intr_num] = ir; 2305 ir->intr_num = intr_num; 2306 ir->ir_set = &xhci->run_regs->ir_set[intr_num]; 2307 2308 /* set ERST count with the number of entries in the segment table */ 2309 erst_size = readl(&ir->ir_set->erst_size); 2310 erst_size &= ERST_SIZE_MASK; 2311 erst_size |= ir->event_ring->num_segs; 2312 writel(erst_size, &ir->ir_set->erst_size); 2313 2314 erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base); 2315 erst_base &= ERST_BASE_RSVDP; 2316 erst_base |= ir->erst.erst_dma_addr & ~ERST_BASE_RSVDP; 2317 if (xhci->quirks & XHCI_WRITE_64_HI_LO) 2318 hi_lo_writeq(erst_base, &ir->ir_set->erst_base); 2319 else 2320 xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base); 2321 2322 /* Set the event ring dequeue address of this interrupter */ 2323 xhci_set_hc_event_deq(xhci, ir); 2324 2325 return 0; 2326 } 2327 2328 struct xhci_interrupter * 2329 xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs, 2330 u32 imod_interval) 2331 { 2332 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 2333 struct xhci_interrupter *ir; 2334 unsigned int i; 2335 int err = -ENOSPC; 2336 2337 if (!xhci->interrupters || xhci->max_interrupters <= 1) 2338 return NULL; 2339 2340 ir = xhci_alloc_interrupter(xhci, segs, GFP_KERNEL); 2341 if (!ir) 2342 return NULL; 2343 2344 spin_lock_irq(&xhci->lock); 2345 2346 /* Find available secondary interrupter, interrupter 0 is reserved for primary */ 2347 for (i = 1; i < xhci->max_interrupters; i++) { 2348 if (xhci->interrupters[i] == NULL) { 2349 err = xhci_add_interrupter(xhci, ir, i); 2350 break; 2351 } 2352 } 2353 2354 spin_unlock_irq(&xhci->lock); 2355 2356 if (err) { 2357 xhci_warn(xhci, "Failed to add secondary interrupter, max interrupters %d\n", 2358 xhci->max_interrupters); 2359 xhci_free_interrupter(xhci, ir); 2360 return NULL; 2361 } 2362 2363 err = xhci_set_interrupter_moderation(ir, imod_interval); 2364 if (err) 2365 xhci_warn(xhci, "Failed to set interrupter %d moderation to %uns\n", 2366 i, imod_interval); 2367 2368 xhci_dbg(xhci, "Add secondary interrupter %d, max interrupters %d\n", 2369 i, xhci->max_interrupters); 2370 2371 return ir; 2372 } 2373 EXPORT_SYMBOL_GPL(xhci_create_secondary_interrupter); 2374 2375 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) 2376 { 2377 struct xhci_interrupter *ir; 2378 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 2379 dma_addr_t dma; 2380 unsigned int val, val2; 2381 u64 val_64; 2382 u32 page_size, temp; 2383 int i; 2384 2385 INIT_LIST_HEAD(&xhci->cmd_list); 2386 2387 /* init command timeout work */ 2388 INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout); 2389 init_completion(&xhci->cmd_ring_stop_completion); 2390 2391 page_size = readl(&xhci->op_regs->page_size); 2392 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2393 "Supported page size register = 0x%x", page_size); 2394 i = ffs(page_size); 2395 if (i < 16) 2396 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2397 "Supported page size of %iK", (1 << (i+12)) / 1024); 2398 else 2399 xhci_warn(xhci, "WARN: no supported page size\n"); 2400 /* Use 4K pages, since that's common and the minimum the HC supports */ 2401 xhci->page_shift = 12; 2402 xhci->page_size = 1 << xhci->page_shift; 2403 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2404 "HCD page size set to %iK", xhci->page_size / 1024); 2405 2406 /* 2407 * Program the Number of Device Slots Enabled field in the CONFIG 2408 * register with the max value of slots the HC can handle. 2409 */ 2410 val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1)); 2411 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2412 "// xHC can handle at most %d device slots.", val); 2413 val2 = readl(&xhci->op_regs->config_reg); 2414 val |= (val2 & ~HCS_SLOTS_MASK); 2415 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2416 "// Setting Max device slots reg = 0x%x.", val); 2417 writel(val, &xhci->op_regs->config_reg); 2418 2419 /* 2420 * xHCI section 5.4.6 - Device Context array must be 2421 * "physically contiguous and 64-byte (cache line) aligned". 2422 */ 2423 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, 2424 flags); 2425 if (!xhci->dcbaa) 2426 goto fail; 2427 xhci->dcbaa->dma = dma; 2428 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2429 "// Device context base array address = 0x%pad (DMA), %p (virt)", 2430 &xhci->dcbaa->dma, xhci->dcbaa); 2431 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); 2432 2433 /* 2434 * Initialize the ring segment pool. The ring must be a contiguous 2435 * structure comprised of TRBs. The TRBs must be 16 byte aligned, 2436 * however, the command ring segment needs 64-byte aligned segments 2437 * and our use of dma addresses in the trb_address_map radix tree needs 2438 * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need. 2439 */ 2440 if (xhci->quirks & XHCI_ZHAOXIN_TRB_FETCH) 2441 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, 2442 TRB_SEGMENT_SIZE * 2, TRB_SEGMENT_SIZE * 2, xhci->page_size * 2); 2443 else 2444 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, 2445 TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size); 2446 2447 /* See Table 46 and Note on Figure 55 */ 2448 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, 2449 2112, 64, xhci->page_size); 2450 if (!xhci->segment_pool || !xhci->device_pool) 2451 goto fail; 2452 2453 /* Linear stream context arrays don't have any boundary restrictions, 2454 * and only need to be 16-byte aligned. 2455 */ 2456 xhci->small_streams_pool = 2457 dma_pool_create("xHCI 256 byte stream ctx arrays", 2458 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0); 2459 xhci->medium_streams_pool = 2460 dma_pool_create("xHCI 1KB stream ctx arrays", 2461 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0); 2462 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE 2463 * will be allocated with dma_alloc_coherent() 2464 */ 2465 2466 if (!xhci->small_streams_pool || !xhci->medium_streams_pool) 2467 goto fail; 2468 2469 /* Set up the command ring to have one segments for now. */ 2470 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, 0, flags); 2471 if (!xhci->cmd_ring) 2472 goto fail; 2473 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2474 "Allocated command ring at %p", xhci->cmd_ring); 2475 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%pad", 2476 &xhci->cmd_ring->first_seg->dma); 2477 2478 /* Set the address in the Command Ring Control register */ 2479 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 2480 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 2481 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | 2482 xhci->cmd_ring->cycle_state; 2483 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2484 "// Setting command ring address to 0x%016llx", val_64); 2485 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); 2486 2487 /* Reserve one command ring TRB for disabling LPM. 2488 * Since the USB core grabs the shared usb_bus bandwidth mutex before 2489 * disabling LPM, we only need to reserve one TRB for all devices. 2490 */ 2491 xhci->cmd_ring_reserved_trbs++; 2492 2493 val = readl(&xhci->cap_regs->db_off); 2494 val &= DBOFF_MASK; 2495 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2496 "// Doorbell array is located at offset 0x%x from cap regs base addr", 2497 val); 2498 xhci->dba = (void __iomem *) xhci->cap_regs + val; 2499 2500 /* Allocate and set up primary interrupter 0 with an event ring. */ 2501 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2502 "Allocating primary event ring"); 2503 xhci->interrupters = kcalloc_node(xhci->max_interrupters, sizeof(*xhci->interrupters), 2504 flags, dev_to_node(dev)); 2505 2506 ir = xhci_alloc_interrupter(xhci, 0, flags); 2507 if (!ir) 2508 goto fail; 2509 2510 if (xhci_add_interrupter(xhci, ir, 0)) 2511 goto fail; 2512 2513 ir->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX; 2514 2515 for (i = 0; i < MAX_HC_SLOTS; i++) 2516 xhci->devs[i] = NULL; 2517 2518 if (scratchpad_alloc(xhci, flags)) 2519 goto fail; 2520 if (xhci_setup_port_arrays(xhci, flags)) 2521 goto fail; 2522 2523 /* Enable USB 3.0 device notifications for function remote wake, which 2524 * is necessary for allowing USB 3.0 devices to do remote wakeup from 2525 * U3 (device suspend). 2526 */ 2527 temp = readl(&xhci->op_regs->dev_notification); 2528 temp &= ~DEV_NOTE_MASK; 2529 temp |= DEV_NOTE_FWAKE; 2530 writel(temp, &xhci->op_regs->dev_notification); 2531 2532 return 0; 2533 2534 fail: 2535 xhci_halt(xhci); 2536 xhci_reset(xhci, XHCI_RESET_SHORT_USEC); 2537 xhci_mem_cleanup(xhci); 2538 return -ENOMEM; 2539 } 2540