1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - switch/port utility functions 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2018, Intel Corporation 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/hex.h> 11 #include <linux/idr.h> 12 #include <linux/module.h> 13 #include <linux/nvmem-provider.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/sched/signal.h> 16 #include <linux/sizes.h> 17 #include <linux/slab.h> 18 #include <linux/string_helpers.h> 19 20 #include "tb.h" 21 22 /* Switch NVM support */ 23 24 struct nvm_auth_status { 25 struct list_head list; 26 uuid_t uuid; 27 u32 status; 28 }; 29 30 /* 31 * Hold NVM authentication failure status per switch This information 32 * needs to stay around even when the switch gets power cycled so we 33 * keep it separately. 34 */ 35 static LIST_HEAD(nvm_auth_status_cache); 36 static DEFINE_MUTEX(nvm_auth_status_lock); 37 38 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) 39 { 40 struct nvm_auth_status *st; 41 42 list_for_each_entry(st, &nvm_auth_status_cache, list) { 43 if (uuid_equal(&st->uuid, sw->uuid)) 44 return st; 45 } 46 47 return NULL; 48 } 49 50 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) 51 { 52 struct nvm_auth_status *st; 53 54 mutex_lock(&nvm_auth_status_lock); 55 st = __nvm_get_auth_status(sw); 56 mutex_unlock(&nvm_auth_status_lock); 57 58 *status = st ? st->status : 0; 59 } 60 61 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) 62 { 63 struct nvm_auth_status *st; 64 65 if (WARN_ON(!sw->uuid)) 66 return; 67 68 mutex_lock(&nvm_auth_status_lock); 69 st = __nvm_get_auth_status(sw); 70 71 if (!st) { 72 st = kzalloc_obj(*st); 73 if (!st) 74 goto unlock; 75 76 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); 77 INIT_LIST_HEAD(&st->list); 78 list_add_tail(&st->list, &nvm_auth_status_cache); 79 } 80 81 st->status = status; 82 unlock: 83 mutex_unlock(&nvm_auth_status_lock); 84 } 85 86 static void nvm_clear_auth_status(const struct tb_switch *sw) 87 { 88 struct nvm_auth_status *st; 89 90 mutex_lock(&nvm_auth_status_lock); 91 st = __nvm_get_auth_status(sw); 92 if (st) { 93 list_del(&st->list); 94 kfree(st); 95 } 96 mutex_unlock(&nvm_auth_status_lock); 97 } 98 99 static int nvm_validate_and_write(struct tb_switch *sw) 100 { 101 unsigned int image_size; 102 const u8 *buf; 103 int ret; 104 105 ret = tb_nvm_validate(sw->nvm); 106 if (ret) 107 return ret; 108 109 ret = tb_nvm_write_headers(sw->nvm); 110 if (ret) 111 return ret; 112 113 buf = sw->nvm->buf_data_start; 114 image_size = sw->nvm->buf_data_size; 115 116 if (tb_switch_is_usb4(sw)) 117 ret = usb4_switch_nvm_write(sw, 0, buf, image_size); 118 else 119 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size); 120 if (ret) 121 return ret; 122 123 sw->nvm->flushed = true; 124 return 0; 125 } 126 127 static int nvm_authenticate_host_dma_port(struct tb_switch *sw) 128 { 129 int ret = 0; 130 131 /* 132 * Root switch NVM upgrade requires that we disconnect the 133 * existing paths first (in case it is not in safe mode 134 * already). 135 */ 136 if (!sw->safe_mode) { 137 u32 status; 138 139 ret = tb_domain_disconnect_all_paths(sw->tb); 140 if (ret) 141 return ret; 142 /* 143 * The host controller goes away pretty soon after this if 144 * everything goes well so getting timeout is expected. 145 */ 146 ret = dma_port_flash_update_auth(sw->dma_port); 147 if (!ret || ret == -ETIMEDOUT) 148 return 0; 149 150 /* 151 * Any error from update auth operation requires power 152 * cycling of the host router. 153 */ 154 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n"); 155 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0) 156 nvm_set_auth_status(sw, status); 157 } 158 159 /* 160 * From safe mode we can get out by just power cycling the 161 * switch. 162 */ 163 dma_port_power_cycle(sw->dma_port); 164 return ret; 165 } 166 167 static int nvm_authenticate_device_dma_port(struct tb_switch *sw) 168 { 169 int ret, retries = 10; 170 171 ret = dma_port_flash_update_auth(sw->dma_port); 172 switch (ret) { 173 case 0: 174 case -ETIMEDOUT: 175 case -EACCES: 176 case -EINVAL: 177 /* Power cycle is required */ 178 break; 179 default: 180 return ret; 181 } 182 183 /* 184 * Poll here for the authentication status. It takes some time 185 * for the device to respond (we get timeout for a while). Once 186 * we get response the device needs to be power cycled in order 187 * to the new NVM to be taken into use. 188 */ 189 do { 190 u32 status; 191 192 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 193 if (ret < 0 && ret != -ETIMEDOUT) 194 return ret; 195 if (ret > 0) { 196 if (status) { 197 tb_sw_warn(sw, "failed to authenticate NVM\n"); 198 nvm_set_auth_status(sw, status); 199 } 200 201 tb_sw_info(sw, "power cycling the switch now\n"); 202 dma_port_power_cycle(sw->dma_port); 203 return 0; 204 } 205 206 msleep(500); 207 } while (--retries); 208 209 return -ETIMEDOUT; 210 } 211 212 static void nvm_authenticate_start_dma_port(struct tb_switch *sw) 213 { 214 struct pci_dev *root_port; 215 216 /* 217 * During host router NVM upgrade we should not allow root port to 218 * go into D3cold because some root ports cannot trigger PME 219 * itself. To be on the safe side keep the root port in D0 during 220 * the whole upgrade process. 221 */ 222 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 223 if (root_port) 224 pm_runtime_get_noresume(&root_port->dev); 225 } 226 227 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw) 228 { 229 struct pci_dev *root_port; 230 231 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 232 if (root_port) 233 pm_runtime_put(&root_port->dev); 234 } 235 236 static inline bool nvm_readable(struct tb_switch *sw) 237 { 238 if (tb_switch_is_usb4(sw)) { 239 /* 240 * USB4 devices must support NVM operations but it is 241 * optional for hosts. Therefore we query the NVM sector 242 * size here and if it is supported assume NVM 243 * operations are implemented. 244 */ 245 return usb4_switch_nvm_sector_size(sw) > 0; 246 } 247 248 /* Thunderbolt 2 and 3 devices support NVM through DMA port */ 249 return !!sw->dma_port; 250 } 251 252 static inline bool nvm_upgradeable(struct tb_switch *sw) 253 { 254 if (sw->no_nvm_upgrade) 255 return false; 256 return nvm_readable(sw); 257 } 258 259 static int nvm_authenticate(struct tb_switch *sw, bool auth_only) 260 { 261 int ret; 262 263 if (tb_switch_is_usb4(sw)) { 264 if (auth_only) { 265 ret = usb4_switch_nvm_set_offset(sw, 0); 266 if (ret) 267 return ret; 268 } 269 sw->nvm->authenticating = true; 270 return usb4_switch_nvm_authenticate(sw); 271 } 272 if (auth_only) 273 return -EOPNOTSUPP; 274 275 sw->nvm->authenticating = true; 276 if (!tb_route(sw)) { 277 nvm_authenticate_start_dma_port(sw); 278 ret = nvm_authenticate_host_dma_port(sw); 279 } else { 280 ret = nvm_authenticate_device_dma_port(sw); 281 } 282 283 return ret; 284 } 285 286 /** 287 * tb_switch_nvm_read() - Read router NVM 288 * @sw: Router whose NVM to read 289 * @address: Start address on the NVM 290 * @buf: Buffer where the read data is copied 291 * @size: Size of the buffer in bytes 292 * 293 * Reads from router NVM and returns the requested data in @buf. Locking 294 * is up to the caller. 295 * 296 * Return: %0 on success, negative errno otherwise. 297 */ 298 int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, 299 size_t size) 300 { 301 if (tb_switch_is_usb4(sw)) 302 return usb4_switch_nvm_read(sw, address, buf, size); 303 return dma_port_flash_read(sw->dma_port, address, buf, size); 304 } 305 306 static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes) 307 { 308 struct tb_nvm *nvm = priv; 309 struct tb_switch *sw = tb_to_switch(nvm->dev); 310 int ret; 311 312 pm_runtime_get_sync(&sw->dev); 313 314 if (!mutex_trylock(&sw->tb->lock)) { 315 ret = restart_syscall(); 316 goto out; 317 } 318 319 ret = tb_switch_nvm_read(sw, offset, val, bytes); 320 mutex_unlock(&sw->tb->lock); 321 322 out: 323 pm_runtime_mark_last_busy(&sw->dev); 324 pm_runtime_put_autosuspend(&sw->dev); 325 326 return ret; 327 } 328 329 static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes) 330 { 331 struct tb_nvm *nvm = priv; 332 struct tb_switch *sw = tb_to_switch(nvm->dev); 333 int ret; 334 335 if (!mutex_trylock(&sw->tb->lock)) 336 return restart_syscall(); 337 338 /* 339 * Since writing the NVM image might require some special steps, 340 * for example when CSS headers are written, we cache the image 341 * locally here and handle the special cases when the user asks 342 * us to authenticate the image. 343 */ 344 ret = tb_nvm_write_buf(nvm, offset, val, bytes); 345 mutex_unlock(&sw->tb->lock); 346 347 return ret; 348 } 349 350 static int tb_switch_nvm_init(struct tb_switch *sw) 351 { 352 struct tb_nvm *nvm; 353 int ret; 354 355 if (!nvm_readable(sw)) 356 return 0; 357 358 nvm = tb_nvm_alloc(&sw->dev); 359 if (IS_ERR(nvm)) { 360 ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm); 361 goto err_nvm; 362 } 363 364 ret = tb_nvm_read_version(nvm); 365 if (ret) 366 goto err_nvm; 367 368 sw->nvm = nvm; 369 return 0; 370 371 err_nvm: 372 tb_sw_dbg(sw, "NVM upgrade disabled\n"); 373 sw->no_nvm_upgrade = true; 374 if (!IS_ERR(nvm)) 375 tb_nvm_free(nvm); 376 377 return ret; 378 } 379 380 static int tb_switch_nvm_add(struct tb_switch *sw) 381 { 382 struct tb_nvm *nvm = sw->nvm; 383 int ret; 384 385 if (!nvm) 386 return 0; 387 388 /* 389 * If the switch is in safe-mode the only accessible portion of 390 * the NVM is the non-active one where userspace is expected to 391 * write new functional NVM. 392 */ 393 if (!sw->safe_mode) { 394 ret = tb_nvm_add_active(nvm, nvm_read); 395 if (ret) 396 goto err_nvm; 397 tb_sw_dbg(sw, "NVM version %x.%x\n", nvm->major, nvm->minor); 398 } 399 400 if (!sw->no_nvm_upgrade) { 401 ret = tb_nvm_add_non_active(nvm, nvm_write); 402 if (ret) 403 goto err_nvm; 404 } 405 406 return 0; 407 408 err_nvm: 409 tb_sw_dbg(sw, "NVM upgrade disabled\n"); 410 sw->no_nvm_upgrade = true; 411 tb_nvm_free(nvm); 412 413 return ret; 414 } 415 416 static void tb_switch_nvm_remove(struct tb_switch *sw) 417 { 418 struct tb_nvm *nvm; 419 420 nvm = sw->nvm; 421 sw->nvm = NULL; 422 423 if (!nvm) 424 return; 425 426 /* Remove authentication status in case the switch is unplugged */ 427 if (!nvm->authenticating) 428 nvm_clear_auth_status(sw); 429 430 tb_nvm_free(nvm); 431 } 432 433 /* port utility functions */ 434 435 static const char *tb_port_type(const struct tb_regs_port_header *port) 436 { 437 switch (port->type >> 16) { 438 case 0: 439 switch ((u8) port->type) { 440 case 0: 441 return "Inactive"; 442 case 1: 443 return "Port"; 444 case 2: 445 return "NHI"; 446 default: 447 return "unknown"; 448 } 449 case 0x2: 450 return "Ethernet"; 451 case 0x8: 452 return "SATA"; 453 case 0xe: 454 return "DP/HDMI"; 455 case 0x10: 456 return "PCIe"; 457 case 0x20: 458 return "USB"; 459 default: 460 return "unknown"; 461 } 462 } 463 464 static void tb_dump_port(struct tb *tb, const struct tb_port *port) 465 { 466 const struct tb_regs_port_header *regs = &port->config; 467 468 tb_dbg(tb, 469 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n", 470 regs->port_number, regs->vendor_id, regs->device_id, 471 regs->revision, regs->thunderbolt_version, tb_port_type(regs), 472 regs->type); 473 tb_dbg(tb, " Max hop id (in/out): %d/%d\n", 474 regs->max_in_hop_id, regs->max_out_hop_id); 475 tb_dbg(tb, " Max counters: %d\n", regs->max_counters); 476 tb_dbg(tb, " NFC Credits: %#x\n", regs->nfc_credits); 477 tb_dbg(tb, " Credits (total/control): %u/%u\n", port->total_credits, 478 port->ctl_credits); 479 } 480 481 /** 482 * tb_port_state() - get connectedness state of a port 483 * @port: the port to check 484 * 485 * The port must have a TB_CAP_PHY (i.e. it should be a real port). 486 * 487 * Return: &enum tb_port_state or negative error code on failure. 488 */ 489 int tb_port_state(struct tb_port *port) 490 { 491 struct tb_cap_phy phy; 492 int res; 493 if (port->cap_phy == 0) { 494 tb_port_WARN(port, "does not have a PHY\n"); 495 return -EINVAL; 496 } 497 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2); 498 if (res) 499 return res; 500 return phy.state; 501 } 502 503 /** 504 * tb_wait_for_port() - wait for a port to become ready 505 * @port: Port to wait 506 * @wait_if_unplugged: Wait also when port is unplugged 507 * 508 * Wait up to 1 second for a port to reach state TB_PORT_UP. If 509 * wait_if_unplugged is set then we also wait if the port is in state 510 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after 511 * switch resume). Otherwise we only wait if a device is registered but the link 512 * has not yet been established. 513 * 514 * Return: 515 * * %0 - If the port is not connected or failed to reach 516 * state %TB_PORT_UP within one second. 517 * * %1 - If the port is connected and in state %TB_PORT_UP. 518 * * Negative errno - An error occurred. 519 */ 520 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) 521 { 522 int retries = 10; 523 int state; 524 if (!port->cap_phy) { 525 tb_port_WARN(port, "does not have PHY\n"); 526 return -EINVAL; 527 } 528 if (tb_is_upstream_port(port)) { 529 tb_port_WARN(port, "is the upstream port\n"); 530 return -EINVAL; 531 } 532 533 while (retries--) { 534 state = tb_port_state(port); 535 switch (state) { 536 case TB_PORT_DISABLED: 537 tb_port_dbg(port, "is disabled (state: 0)\n"); 538 return 0; 539 540 case TB_PORT_UNPLUGGED: 541 if (wait_if_unplugged) { 542 /* used during resume */ 543 tb_port_dbg(port, 544 "is unplugged (state: 7), retrying...\n"); 545 msleep(100); 546 break; 547 } 548 tb_port_dbg(port, "is unplugged (state: 7)\n"); 549 return 0; 550 551 case TB_PORT_UP: 552 case TB_PORT_TX_CL0S: 553 case TB_PORT_RX_CL0S: 554 case TB_PORT_CL1: 555 case TB_PORT_CL2: 556 tb_port_dbg(port, "is connected, link is up (state: %d)\n", state); 557 return 1; 558 559 default: 560 if (state < 0) 561 return state; 562 563 /* 564 * After plug-in the state is TB_PORT_CONNECTING. Give it some 565 * time. 566 */ 567 tb_port_dbg(port, 568 "is connected, link is not up (state: %d), retrying...\n", 569 state); 570 msleep(100); 571 } 572 573 } 574 tb_port_warn(port, 575 "failed to reach state TB_PORT_UP. Ignoring port...\n"); 576 return 0; 577 } 578 579 /** 580 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port 581 * @port: Port to add/remove NFC credits 582 * @credits: Credits to add/remove 583 * 584 * Change the number of NFC credits allocated to @port by @credits. To remove 585 * NFC credits pass a negative amount of credits. 586 * 587 * Return: %0 on success, negative errno otherwise. 588 */ 589 int tb_port_add_nfc_credits(struct tb_port *port, int credits) 590 { 591 u32 nfc_credits; 592 593 if (credits == 0 || port->sw->is_unplugged) 594 return 0; 595 596 /* 597 * USB4 restricts programming NFC buffers to lane adapters only 598 * so skip other ports. 599 */ 600 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port)) 601 return 0; 602 603 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; 604 if (credits < 0) 605 credits = max_t(int, -nfc_credits, credits); 606 607 nfc_credits += credits; 608 609 tb_port_dbg(port, "adding %d NFC credits to %lu", credits, 610 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK); 611 612 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK; 613 port->config.nfc_credits |= nfc_credits; 614 615 return tb_port_write(port, &port->config.nfc_credits, 616 TB_CFG_PORT, ADP_CS_4, 1); 617 } 618 619 /** 620 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER 621 * @port: Port whose counters to clear 622 * @counter: Counter index to clear 623 * 624 * Return: %0 on success, negative errno otherwise. 625 */ 626 int tb_port_clear_counter(struct tb_port *port, int counter) 627 { 628 u32 zero[3] = { 0, 0, 0 }; 629 tb_port_dbg(port, "clearing counter %d\n", counter); 630 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3); 631 } 632 633 /** 634 * tb_port_unlock() - Unlock downstream port 635 * @port: Port to unlock 636 * 637 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the 638 * downstream router accessible for CM. 639 * 640 * Return: %0 on success, negative errno otherwise. 641 */ 642 int tb_port_unlock(struct tb_port *port) 643 { 644 if (tb_switch_is_icm(port->sw)) 645 return 0; 646 if (!tb_port_is_null(port)) 647 return -EINVAL; 648 if (tb_switch_is_usb4(port->sw)) 649 return usb4_port_unlock(port); 650 return 0; 651 } 652 653 static int __tb_port_enable(struct tb_port *port, bool enable) 654 { 655 int ret; 656 u32 phy; 657 658 if (!tb_port_is_null(port)) 659 return -EINVAL; 660 661 ret = tb_port_read(port, &phy, TB_CFG_PORT, 662 port->cap_phy + LANE_ADP_CS_1, 1); 663 if (ret) 664 return ret; 665 666 if (enable) 667 phy &= ~LANE_ADP_CS_1_LD; 668 else 669 phy |= LANE_ADP_CS_1_LD; 670 671 672 ret = tb_port_write(port, &phy, TB_CFG_PORT, 673 port->cap_phy + LANE_ADP_CS_1, 1); 674 if (ret) 675 return ret; 676 677 tb_port_dbg(port, "lane %s\n", str_enabled_disabled(enable)); 678 return 0; 679 } 680 681 /** 682 * tb_port_enable() - Enable lane adapter 683 * @port: Port to enable (can be %NULL) 684 * 685 * This is used for lane 0 and 1 adapters to enable it. 686 * 687 * Return: %0 on success, negative errno otherwise. 688 */ 689 int tb_port_enable(struct tb_port *port) 690 { 691 return __tb_port_enable(port, true); 692 } 693 694 /** 695 * tb_port_disable() - Disable lane adapter 696 * @port: Port to disable (can be %NULL) 697 * 698 * This is used for lane 0 and 1 adapters to disable it. 699 * 700 * Return: %0 on success, negative errno otherwise. 701 */ 702 int tb_port_disable(struct tb_port *port) 703 { 704 return __tb_port_enable(port, false); 705 } 706 707 static int tb_port_reset(struct tb_port *port) 708 { 709 if (tb_switch_is_usb4(port->sw)) 710 return port->cap_usb4 ? usb4_port_reset(port) : 0; 711 return tb_lc_reset_port(port); 712 } 713 714 /* 715 * tb_init_port() - initialize a port 716 * 717 * This is a helper method for tb_switch_alloc. Does not check or initialize 718 * any downstream switches. 719 * 720 * Return: %0 on success, negative errno otherwise. 721 */ 722 static int tb_init_port(struct tb_port *port) 723 { 724 int res; 725 int cap; 726 727 INIT_LIST_HEAD(&port->list); 728 729 /* Control adapter does not have configuration space */ 730 if (!port->port) 731 return 0; 732 733 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); 734 if (res) { 735 if (res == -ENODEV) { 736 tb_dbg(port->sw->tb, " Port %d: not implemented\n", 737 port->port); 738 port->disabled = true; 739 return 0; 740 } 741 return res; 742 } 743 744 /* Port 0 is the switch itself and has no PHY. */ 745 if (port->config.type == TB_TYPE_PORT) { 746 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY); 747 748 if (cap > 0) 749 port->cap_phy = cap; 750 else 751 tb_port_WARN(port, "non switch port without a PHY\n"); 752 753 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4); 754 if (cap > 0) 755 port->cap_usb4 = cap; 756 757 /* 758 * USB4 port buffers allocated for the control path 759 * can be read from the path config space. Legacy 760 * devices use hard-coded value. 761 */ 762 if (port->cap_usb4) { 763 struct tb_regs_hop hop; 764 765 if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2)) 766 port->ctl_credits = hop.initial_credits; 767 } 768 if (!port->ctl_credits) 769 port->ctl_credits = 2; 770 771 } else { 772 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP); 773 if (cap > 0) 774 port->cap_adap = cap; 775 } 776 777 port->total_credits = 778 (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> 779 ADP_CS_4_TOTAL_BUFFERS_SHIFT; 780 781 tb_dump_port(port->sw->tb, port); 782 return 0; 783 } 784 785 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid, 786 int max_hopid) 787 { 788 int port_max_hopid; 789 struct ida *ida; 790 791 if (in) { 792 port_max_hopid = port->config.max_in_hop_id; 793 ida = &port->in_hopids; 794 } else { 795 port_max_hopid = port->config.max_out_hop_id; 796 ida = &port->out_hopids; 797 } 798 799 /* 800 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are 801 * reserved. 802 */ 803 if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID) 804 min_hopid = TB_PATH_MIN_HOPID; 805 806 if (max_hopid < 0 || max_hopid > port_max_hopid) 807 max_hopid = port_max_hopid; 808 809 return ida_alloc_range(ida, min_hopid, max_hopid, GFP_KERNEL); 810 } 811 812 /** 813 * tb_port_alloc_in_hopid() - Allocate input HopID from port 814 * @port: Port to allocate HopID for 815 * @min_hopid: Minimum acceptable input HopID 816 * @max_hopid: Maximum acceptable input HopID 817 * 818 * Return: HopID between @min_hopid and @max_hopid or negative errno in 819 * case of error. 820 */ 821 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid) 822 { 823 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid); 824 } 825 826 /** 827 * tb_port_alloc_out_hopid() - Allocate output HopID from port 828 * @port: Port to allocate HopID for 829 * @min_hopid: Minimum acceptable output HopID 830 * @max_hopid: Maximum acceptable output HopID 831 * 832 * Return: HopID between @min_hopid and @max_hopid or negative errno in 833 * case of error. 834 */ 835 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid) 836 { 837 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid); 838 } 839 840 /** 841 * tb_port_release_in_hopid() - Release allocated input HopID from port 842 * @port: Port whose HopID to release 843 * @hopid: HopID to release 844 */ 845 void tb_port_release_in_hopid(struct tb_port *port, int hopid) 846 { 847 ida_free(&port->in_hopids, hopid); 848 } 849 850 /** 851 * tb_port_release_out_hopid() - Release allocated output HopID from port 852 * @port: Port whose HopID to release 853 * @hopid: HopID to release 854 */ 855 void tb_port_release_out_hopid(struct tb_port *port, int hopid) 856 { 857 ida_free(&port->out_hopids, hopid); 858 } 859 860 static inline bool tb_switch_is_reachable(const struct tb_switch *parent, 861 const struct tb_switch *sw) 862 { 863 u64 mask = (1ULL << parent->config.depth * 8) - 1; 864 return (tb_route(parent) & mask) == (tb_route(sw) & mask); 865 } 866 867 /** 868 * tb_next_port_on_path() - Return next port for given port on a path 869 * @start: Start port of the walk 870 * @end: End port of the walk 871 * @prev: Previous port (%NULL if this is the first) 872 * 873 * This function can be used to walk from one port to another if they 874 * are connected through zero or more switches. If the @prev is dual 875 * link port, the function follows that link and returns another end on 876 * that same link. 877 * 878 * Domain tb->lock must be held when this function is called. 879 * 880 * Return: Pointer to &struct tb_port, %NULL if the @end port has been reached. 881 */ 882 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, 883 struct tb_port *prev) 884 { 885 struct tb_port *next; 886 887 if (!prev) 888 return start; 889 890 if (prev->sw == end->sw) { 891 if (prev == end) 892 return NULL; 893 return end; 894 } 895 896 if (tb_switch_is_reachable(prev->sw, end->sw)) { 897 next = tb_port_at(tb_route(end->sw), prev->sw); 898 /* Walk down the topology if next == prev */ 899 if (prev->remote && 900 (next == prev || next->dual_link_port == prev)) 901 next = prev->remote; 902 } else { 903 if (tb_is_upstream_port(prev)) { 904 next = prev->remote; 905 } else { 906 next = tb_upstream_port(prev->sw); 907 /* 908 * Keep the same link if prev and next are both 909 * dual link ports. 910 */ 911 if (next->dual_link_port && 912 next->link_nr != prev->link_nr) { 913 next = next->dual_link_port; 914 } 915 } 916 } 917 918 return next != prev ? next : NULL; 919 } 920 921 /** 922 * tb_port_get_link_speed() - Get current link speed 923 * @port: Port to check (USB4 or CIO) 924 * 925 * Return: Link speed in Gb/s or negative errno in case of failure. 926 */ 927 int tb_port_get_link_speed(struct tb_port *port) 928 { 929 u32 val, speed; 930 int ret; 931 932 if (!port->cap_phy) 933 return -EINVAL; 934 935 ret = tb_port_read(port, &val, TB_CFG_PORT, 936 port->cap_phy + LANE_ADP_CS_1, 1); 937 if (ret) 938 return ret; 939 940 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >> 941 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT; 942 943 switch (speed) { 944 case LANE_ADP_CS_1_CURRENT_SPEED_GEN4: 945 return 40; 946 case LANE_ADP_CS_1_CURRENT_SPEED_GEN3: 947 return 20; 948 default: 949 return 10; 950 } 951 } 952 953 /** 954 * tb_port_get_link_generation() - Returns link generation 955 * @port: Lane adapter 956 * 957 * Return: Link generation as a number or negative errno in case of 958 * failure. 959 * 960 * Does not distinguish between Thunderbolt 1 and Thunderbolt 2 961 * links so for those always returns %2. 962 */ 963 int tb_port_get_link_generation(struct tb_port *port) 964 { 965 int ret; 966 967 ret = tb_port_get_link_speed(port); 968 if (ret < 0) 969 return ret; 970 971 switch (ret) { 972 case 40: 973 return 4; 974 case 20: 975 return 3; 976 default: 977 return 2; 978 } 979 } 980 981 /** 982 * tb_port_get_link_width() - Get current link width 983 * @port: Port to check (USB4 or CIO) 984 * 985 * Return: Link width encoded in &enum tb_link_width or 986 * negative errno in case of failure. 987 */ 988 int tb_port_get_link_width(struct tb_port *port) 989 { 990 u32 val; 991 int ret; 992 993 if (!port->cap_phy) 994 return -EINVAL; 995 996 ret = tb_port_read(port, &val, TB_CFG_PORT, 997 port->cap_phy + LANE_ADP_CS_1, 1); 998 if (ret) 999 return ret; 1000 1001 /* Matches the values in enum tb_link_width */ 1002 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >> 1003 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT; 1004 } 1005 1006 /** 1007 * tb_port_width_supported() - Is the given link width supported 1008 * @port: Port to check 1009 * @width: Widths to check (bitmask) 1010 * 1011 * Can be called to any lane adapter. Checks if given @width is 1012 * supported by the hardware. 1013 * 1014 * Return: %true if link width is supported, %false otherwise. 1015 */ 1016 bool tb_port_width_supported(struct tb_port *port, unsigned int width) 1017 { 1018 u32 phy, widths; 1019 int ret; 1020 1021 if (!port->cap_phy) 1022 return false; 1023 1024 if (width & (TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX)) { 1025 if (tb_port_get_link_generation(port) < 4 || 1026 !usb4_port_asym_supported(port)) 1027 return false; 1028 } 1029 1030 ret = tb_port_read(port, &phy, TB_CFG_PORT, 1031 port->cap_phy + LANE_ADP_CS_0, 1); 1032 if (ret) 1033 return false; 1034 1035 /* 1036 * The field encoding is the same as &enum tb_link_width (which is 1037 * passed to @width). 1038 */ 1039 widths = FIELD_GET(LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK, phy); 1040 return widths & width; 1041 } 1042 1043 /** 1044 * tb_port_set_link_width() - Set target link width of the lane adapter 1045 * @port: Lane adapter 1046 * @width: Target link width 1047 * 1048 * Sets the target link width of the lane adapter to @width. Does not 1049 * enable/disable lane bonding. For that call tb_port_set_lane_bonding(). 1050 * 1051 * Return: %0 on success, negative errno otherwise. 1052 */ 1053 int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width) 1054 { 1055 u32 val; 1056 int ret; 1057 1058 if (!port->cap_phy) 1059 return -EINVAL; 1060 1061 ret = tb_port_read(port, &val, TB_CFG_PORT, 1062 port->cap_phy + LANE_ADP_CS_1, 1); 1063 if (ret) 1064 return ret; 1065 1066 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK; 1067 switch (width) { 1068 case TB_LINK_WIDTH_SINGLE: 1069 /* Gen 4 link cannot be single */ 1070 if (tb_port_get_link_generation(port) >= 4) 1071 return -EOPNOTSUPP; 1072 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE << 1073 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 1074 break; 1075 1076 case TB_LINK_WIDTH_DUAL: 1077 if (tb_port_get_link_generation(port) >= 4) 1078 return usb4_port_asym_set_link_width(port, width); 1079 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL << 1080 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 1081 break; 1082 1083 case TB_LINK_WIDTH_ASYM_TX: 1084 case TB_LINK_WIDTH_ASYM_RX: 1085 return usb4_port_asym_set_link_width(port, width); 1086 1087 default: 1088 return -EINVAL; 1089 } 1090 1091 return tb_port_write(port, &val, TB_CFG_PORT, 1092 port->cap_phy + LANE_ADP_CS_1, 1); 1093 } 1094 1095 /** 1096 * tb_port_set_lane_bonding() - Enable/disable lane bonding 1097 * @port: Lane adapter 1098 * @bonding: enable/disable bonding 1099 * 1100 * Enables or disables lane bonding. This should be called after target 1101 * link width has been set (tb_port_set_link_width()). Note in most 1102 * cases one should use tb_port_lane_bonding_enable() instead to enable 1103 * lane bonding. 1104 * 1105 * Return: %0 on success, negative errno otherwise. 1106 */ 1107 static int tb_port_set_lane_bonding(struct tb_port *port, bool bonding) 1108 { 1109 u32 val; 1110 int ret; 1111 1112 if (!port->cap_phy) 1113 return -EINVAL; 1114 1115 ret = tb_port_read(port, &val, TB_CFG_PORT, 1116 port->cap_phy + LANE_ADP_CS_1, 1); 1117 if (ret) 1118 return ret; 1119 1120 if (bonding) 1121 val |= LANE_ADP_CS_1_LB; 1122 else 1123 val &= ~LANE_ADP_CS_1_LB; 1124 1125 return tb_port_write(port, &val, TB_CFG_PORT, 1126 port->cap_phy + LANE_ADP_CS_1, 1); 1127 } 1128 1129 /** 1130 * tb_port_lane_bonding_enable() - Enable bonding on port 1131 * @port: port to enable 1132 * 1133 * Enable bonding by setting the link width of the port and the other 1134 * port in case of dual link port. Does not wait for the link to 1135 * actually reach the bonded state so caller needs to call 1136 * tb_port_wait_for_link_width() before enabling any paths through the 1137 * link to make sure the link is in expected state. 1138 * 1139 * Return: %0 on success, negative errno otherwise. 1140 */ 1141 int tb_port_lane_bonding_enable(struct tb_port *port) 1142 { 1143 enum tb_link_width width; 1144 int ret; 1145 1146 /* 1147 * Enable lane bonding for both links if not already enabled by 1148 * for example the boot firmware. 1149 */ 1150 width = tb_port_get_link_width(port); 1151 if (width == TB_LINK_WIDTH_SINGLE) { 1152 ret = tb_port_set_link_width(port, TB_LINK_WIDTH_DUAL); 1153 if (ret) 1154 goto err_lane0; 1155 } 1156 1157 width = tb_port_get_link_width(port->dual_link_port); 1158 if (width == TB_LINK_WIDTH_SINGLE) { 1159 ret = tb_port_set_link_width(port->dual_link_port, 1160 TB_LINK_WIDTH_DUAL); 1161 if (ret) 1162 goto err_lane1; 1163 } 1164 1165 /* 1166 * Only set bonding if the link was not already bonded. This 1167 * avoids the lane adapter to re-enter bonding state. 1168 */ 1169 if (width == TB_LINK_WIDTH_SINGLE && !tb_is_upstream_port(port)) { 1170 ret = tb_port_set_lane_bonding(port, true); 1171 if (ret) 1172 goto err_lane1; 1173 } 1174 1175 /* 1176 * When lane 0 bonding is set it will affect lane 1 too so 1177 * update both. 1178 */ 1179 port->bonded = true; 1180 port->dual_link_port->bonded = true; 1181 1182 return 0; 1183 1184 err_lane1: 1185 tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE); 1186 err_lane0: 1187 tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE); 1188 1189 return ret; 1190 } 1191 1192 /** 1193 * tb_port_lane_bonding_disable() - Disable bonding on port 1194 * @port: port to disable 1195 * 1196 * Disable bonding by setting the link width of the port and the 1197 * other port in case of dual link port. 1198 */ 1199 void tb_port_lane_bonding_disable(struct tb_port *port) 1200 { 1201 tb_port_set_lane_bonding(port, false); 1202 tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE); 1203 tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE); 1204 port->dual_link_port->bonded = false; 1205 port->bonded = false; 1206 } 1207 1208 /** 1209 * tb_port_wait_for_link_width() - Wait until link reaches specific width 1210 * @port: Port to wait for 1211 * @width: Expected link width (bitmask) 1212 * @timeout_msec: Timeout in ms how long to wait 1213 * 1214 * Should be used after both ends of the link have been bonded (or 1215 * bonding has been disabled) to wait until the link actually reaches 1216 * the expected state. 1217 * 1218 * Can be passed a mask of expected widths. 1219 * 1220 * Return: 1221 * * %0 - If link reaches any of the specified widths. 1222 * * %-ETIMEDOUT - If link does not reach specified width. 1223 * * Negative errno - Another error occurred. 1224 */ 1225 int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width, 1226 int timeout_msec) 1227 { 1228 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); 1229 int ret; 1230 1231 /* Gen 4 link does not support single lane */ 1232 if ((width & TB_LINK_WIDTH_SINGLE) && 1233 tb_port_get_link_generation(port) >= 4) 1234 return -EOPNOTSUPP; 1235 1236 do { 1237 ret = tb_port_get_link_width(port); 1238 if (ret < 0) { 1239 /* 1240 * Sometimes we get port locked error when 1241 * polling the lanes so we can ignore it and 1242 * retry. 1243 */ 1244 if (ret != -EACCES) 1245 return ret; 1246 } else if (ret & width) { 1247 return 0; 1248 } 1249 1250 usleep_range(1000, 2000); 1251 } while (ktime_before(ktime_get(), timeout)); 1252 1253 return -ETIMEDOUT; 1254 } 1255 1256 static int tb_port_do_update_credits(struct tb_port *port) 1257 { 1258 u32 nfc_credits; 1259 int ret; 1260 1261 ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1); 1262 if (ret) 1263 return ret; 1264 1265 if (nfc_credits != port->config.nfc_credits) { 1266 u32 total; 1267 1268 total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> 1269 ADP_CS_4_TOTAL_BUFFERS_SHIFT; 1270 1271 tb_port_dbg(port, "total credits changed %u -> %u\n", 1272 port->total_credits, total); 1273 1274 port->config.nfc_credits = nfc_credits; 1275 port->total_credits = total; 1276 } 1277 1278 return 0; 1279 } 1280 1281 /** 1282 * tb_port_update_credits() - Re-read port total credits 1283 * @port: Port to update 1284 * 1285 * After the link is bonded (or bonding was disabled) the port total 1286 * credits may change, so this function needs to be called to re-read 1287 * the credits. Updates also the second lane adapter. 1288 * 1289 * Return: %0 on success, negative errno otherwise. 1290 */ 1291 int tb_port_update_credits(struct tb_port *port) 1292 { 1293 int ret; 1294 1295 ret = tb_port_do_update_credits(port); 1296 if (ret) 1297 return ret; 1298 1299 if (!port->dual_link_port) 1300 return 0; 1301 return tb_port_do_update_credits(port->dual_link_port); 1302 } 1303 1304 static int tb_port_start_lane_initialization(struct tb_port *port) 1305 { 1306 int ret; 1307 1308 if (tb_switch_is_usb4(port->sw)) 1309 return 0; 1310 1311 ret = tb_lc_start_lane_initialization(port); 1312 return ret == -EINVAL ? 0 : ret; 1313 } 1314 1315 /* 1316 * Returns true if the port had something (router, XDomain) connected 1317 * before suspend. 1318 */ 1319 static bool tb_port_resume(struct tb_port *port) 1320 { 1321 bool has_remote = tb_port_has_remote(port); 1322 1323 if (port->usb4) { 1324 usb4_port_device_resume(port->usb4); 1325 } else if (!has_remote) { 1326 /* 1327 * For disconnected downstream lane adapters start lane 1328 * initialization now so we detect future connects. 1329 * 1330 * For XDomain start the lane initialzation now so the 1331 * link gets re-established. 1332 * 1333 * This is only needed for non-USB4 ports. 1334 */ 1335 if (!tb_is_upstream_port(port) || port->xdomain) 1336 tb_port_start_lane_initialization(port); 1337 } 1338 1339 return has_remote || port->xdomain; 1340 } 1341 1342 /** 1343 * tb_port_is_enabled() - Is the adapter port enabled 1344 * @port: Port to check 1345 * 1346 * Return: %true if port is enabled, %false otherwise. 1347 */ 1348 bool tb_port_is_enabled(struct tb_port *port) 1349 { 1350 switch (port->config.type) { 1351 case TB_TYPE_PCIE_UP: 1352 case TB_TYPE_PCIE_DOWN: 1353 return tb_pci_port_is_enabled(port); 1354 1355 case TB_TYPE_DP_HDMI_IN: 1356 case TB_TYPE_DP_HDMI_OUT: 1357 return tb_dp_port_is_enabled(port); 1358 1359 case TB_TYPE_USB3_UP: 1360 case TB_TYPE_USB3_DOWN: 1361 return tb_usb3_port_is_enabled(port); 1362 1363 default: 1364 return false; 1365 } 1366 } 1367 1368 /** 1369 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled 1370 * @port: USB3 adapter port to check 1371 * 1372 * Return: %true if port is enabled, %false otherwise. 1373 */ 1374 bool tb_usb3_port_is_enabled(struct tb_port *port) 1375 { 1376 u32 data; 1377 1378 if (tb_port_read(port, &data, TB_CFG_PORT, 1379 port->cap_adap + ADP_USB3_CS_0, 1)) 1380 return false; 1381 1382 return !!(data & ADP_USB3_CS_0_PE); 1383 } 1384 1385 /** 1386 * tb_usb3_port_enable() - Enable USB3 adapter port 1387 * @port: USB3 adapter port to enable 1388 * @enable: Enable/disable the USB3 adapter 1389 * 1390 * Return: %0 on success, negative errno otherwise. 1391 */ 1392 int tb_usb3_port_enable(struct tb_port *port, bool enable) 1393 { 1394 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V) 1395 : ADP_USB3_CS_0_V; 1396 1397 if (!port->cap_adap) 1398 return -ENXIO; 1399 return tb_port_write(port, &word, TB_CFG_PORT, 1400 port->cap_adap + ADP_USB3_CS_0, 1); 1401 } 1402 1403 /** 1404 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled 1405 * @port: PCIe port to check 1406 * 1407 * Return: %true if port is enabled, %false otherwise. 1408 */ 1409 bool tb_pci_port_is_enabled(struct tb_port *port) 1410 { 1411 u32 data; 1412 1413 if (tb_port_read(port, &data, TB_CFG_PORT, 1414 port->cap_adap + ADP_PCIE_CS_0, 1)) 1415 return false; 1416 1417 return !!(data & ADP_PCIE_CS_0_PE); 1418 } 1419 1420 /** 1421 * tb_pci_port_enable() - Enable PCIe adapter port 1422 * @port: PCIe port to enable 1423 * @enable: Enable/disable the PCIe adapter 1424 * 1425 * Return: %0 on success, negative errno otherwise. 1426 */ 1427 int tb_pci_port_enable(struct tb_port *port, bool enable) 1428 { 1429 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0; 1430 if (!port->cap_adap) 1431 return -ENXIO; 1432 return tb_port_write(port, &word, TB_CFG_PORT, 1433 port->cap_adap + ADP_PCIE_CS_0, 1); 1434 } 1435 1436 /** 1437 * tb_dp_port_hpd_is_active() - Is HPD already active 1438 * @port: DP out port to check 1439 * 1440 * Checks if the DP OUT adapter port has HPD bit already set. 1441 * 1442 * Return: %1 if HPD is active, %0 otherwise. 1443 */ 1444 int tb_dp_port_hpd_is_active(struct tb_port *port) 1445 { 1446 u32 data; 1447 int ret; 1448 1449 ret = tb_port_read(port, &data, TB_CFG_PORT, 1450 port->cap_adap + ADP_DP_CS_2, 1); 1451 if (ret) 1452 return ret; 1453 1454 return !!(data & ADP_DP_CS_2_HPD); 1455 } 1456 1457 /** 1458 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port 1459 * @port: Port to clear HPD 1460 * 1461 * If the DP IN port has HPD set, this function can be used to clear it. 1462 * 1463 * Return: %0 on success, negative errno otherwise. 1464 */ 1465 int tb_dp_port_hpd_clear(struct tb_port *port) 1466 { 1467 u32 data; 1468 int ret; 1469 1470 ret = tb_port_read(port, &data, TB_CFG_PORT, 1471 port->cap_adap + ADP_DP_CS_3, 1); 1472 if (ret) 1473 return ret; 1474 1475 data |= ADP_DP_CS_3_HPDC; 1476 return tb_port_write(port, &data, TB_CFG_PORT, 1477 port->cap_adap + ADP_DP_CS_3, 1); 1478 } 1479 1480 /** 1481 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port 1482 * @port: DP IN/OUT port to set hops 1483 * @video: Video Hop ID 1484 * @aux_tx: AUX TX Hop ID 1485 * @aux_rx: AUX RX Hop ID 1486 * 1487 * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4 1488 * router DP adapters too but does not program the values as the fields 1489 * are read-only. 1490 * 1491 * Return: %0 on success, negative errno otherwise. 1492 */ 1493 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, 1494 unsigned int aux_tx, unsigned int aux_rx) 1495 { 1496 u32 data[2]; 1497 int ret; 1498 1499 if (tb_switch_is_usb4(port->sw)) 1500 return 0; 1501 1502 ret = tb_port_read(port, data, TB_CFG_PORT, 1503 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1504 if (ret) 1505 return ret; 1506 1507 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK; 1508 data[1] &= ~ADP_DP_CS_1_AUX_TX_HOPID_MASK; 1509 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1510 1511 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) & 1512 ADP_DP_CS_0_VIDEO_HOPID_MASK; 1513 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK; 1514 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) & 1515 ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1516 1517 return tb_port_write(port, data, TB_CFG_PORT, 1518 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1519 } 1520 1521 /** 1522 * tb_dp_port_is_enabled() - Is DP adapter port enabled 1523 * @port: DP adapter port to check 1524 * 1525 * Return: %true if DP port is enabled, %false otherwise. 1526 */ 1527 bool tb_dp_port_is_enabled(struct tb_port *port) 1528 { 1529 u32 data[2]; 1530 1531 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0, 1532 ARRAY_SIZE(data))) 1533 return false; 1534 1535 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE)); 1536 } 1537 1538 /** 1539 * tb_dp_port_enable() - Enables/disables DP paths of a port 1540 * @port: DP IN/OUT port 1541 * @enable: Enable/disable DP path 1542 * 1543 * Once Hop IDs are programmed DP paths can be enabled or disabled by 1544 * calling this function. 1545 * 1546 * Return: %0 on success, negative errno otherwise. 1547 */ 1548 int tb_dp_port_enable(struct tb_port *port, bool enable) 1549 { 1550 u32 data[2]; 1551 int ret; 1552 1553 ret = tb_port_read(port, data, TB_CFG_PORT, 1554 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1555 if (ret) 1556 return ret; 1557 1558 if (enable) 1559 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE; 1560 else 1561 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE); 1562 1563 return tb_port_write(port, data, TB_CFG_PORT, 1564 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1565 } 1566 1567 /* switch utility functions */ 1568 1569 static const char *tb_switch_generation_name(const struct tb_switch *sw) 1570 { 1571 switch (sw->generation) { 1572 case 1: 1573 return "Thunderbolt 1"; 1574 case 2: 1575 return "Thunderbolt 2"; 1576 case 3: 1577 return "Thunderbolt 3"; 1578 case 4: 1579 return "USB4"; 1580 default: 1581 return "Unknown"; 1582 } 1583 } 1584 1585 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) 1586 { 1587 const struct tb_regs_switch_header *regs = &sw->config; 1588 1589 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n", 1590 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id, 1591 regs->revision, regs->thunderbolt_version); 1592 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number); 1593 tb_dbg(tb, " Config:\n"); 1594 tb_dbg(tb, 1595 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", 1596 regs->upstream_port_number, regs->depth, 1597 (((u64) regs->route_hi) << 32) | regs->route_lo, 1598 regs->enabled, regs->plug_events_delay); 1599 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n", 1600 regs->__unknown1, regs->__unknown4); 1601 } 1602 1603 static int tb_switch_reset_host(struct tb_switch *sw) 1604 { 1605 if (sw->generation > 1) { 1606 struct tb_port *port; 1607 1608 tb_switch_for_each_port(sw, port) { 1609 int i, ret; 1610 1611 /* 1612 * For lane adapters we issue downstream port 1613 * reset and clear up path config spaces. 1614 * 1615 * For protocol adapters we disable the path and 1616 * clear path config space one by one (from 8 to 1617 * Max Input HopID of the adapter). 1618 */ 1619 if (tb_port_is_null(port) && !tb_is_upstream_port(port)) { 1620 ret = tb_port_reset(port); 1621 if (ret) 1622 return ret; 1623 } else if (tb_port_is_usb3_down(port) || 1624 tb_port_is_usb3_up(port)) { 1625 tb_usb3_port_enable(port, false); 1626 } else if (tb_port_is_dpin(port) || 1627 tb_port_is_dpout(port)) { 1628 tb_dp_port_enable(port, false); 1629 } else if (tb_port_is_pcie_down(port) || 1630 tb_port_is_pcie_up(port)) { 1631 tb_pci_port_enable(port, false); 1632 } else { 1633 continue; 1634 } 1635 1636 /* Cleanup path config space of protocol adapter */ 1637 for (i = TB_PATH_MIN_HOPID; 1638 i <= port->config.max_in_hop_id; i++) { 1639 ret = tb_path_deactivate_hop(port, i); 1640 if (ret) 1641 return ret; 1642 } 1643 } 1644 } else { 1645 struct tb_cfg_result res; 1646 1647 /* Thunderbolt 1 uses the "reset" config space packet */ 1648 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2, 1649 TB_CFG_SWITCH, 2, 2); 1650 if (res.err) 1651 return res.err; 1652 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw)); 1653 if (res.err > 0) 1654 return -EIO; 1655 else if (res.err < 0) 1656 return res.err; 1657 } 1658 1659 return 0; 1660 } 1661 1662 static int tb_switch_reset_device(struct tb_switch *sw) 1663 { 1664 return tb_port_reset(tb_switch_downstream_port(sw)); 1665 } 1666 1667 static bool tb_switch_enumerated(struct tb_switch *sw) 1668 { 1669 u32 val; 1670 int ret; 1671 1672 /* 1673 * Read directly from the hardware because we use this also 1674 * during system sleep where sw->config.enabled is already set 1675 * by us. 1676 */ 1677 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_3, 1); 1678 if (ret) 1679 return false; 1680 1681 return !!(val & ROUTER_CS_3_V); 1682 } 1683 1684 /** 1685 * tb_switch_reset() - Perform reset to the router 1686 * @sw: Router to reset 1687 * 1688 * Issues reset to the router @sw. Can be used for any router. For host 1689 * routers, resets all the downstream ports and cleans up path config 1690 * spaces accordingly. For device routers issues downstream port reset 1691 * through the parent router, so as side effect there will be unplug 1692 * soon after this is finished. 1693 * 1694 * If the router is not enumerated does nothing. 1695 * 1696 * Return: %0 on success, negative errno otherwise. 1697 */ 1698 int tb_switch_reset(struct tb_switch *sw) 1699 { 1700 int ret; 1701 1702 /* 1703 * We cannot access the port config spaces unless the router is 1704 * already enumerated. If the router is not enumerated it is 1705 * equal to being reset so we can skip that here. 1706 */ 1707 if (!tb_switch_enumerated(sw)) 1708 return 0; 1709 1710 tb_sw_dbg(sw, "resetting\n"); 1711 1712 if (tb_route(sw)) 1713 ret = tb_switch_reset_device(sw); 1714 else 1715 ret = tb_switch_reset_host(sw); 1716 1717 if (ret) 1718 tb_sw_warn(sw, "failed to reset\n"); 1719 1720 return ret; 1721 } 1722 1723 /** 1724 * tb_switch_wait_for_bit() - Wait for specified value of bits in offset 1725 * @sw: Router to read the offset value from 1726 * @offset: Offset in the router config space to read from 1727 * @bit: Bit mask in the offset to wait for 1728 * @value: Value of the bits to wait for 1729 * @timeout_msec: Timeout in ms how long to wait 1730 * 1731 * Wait till the specified bits in specified offset reach specified value. 1732 * 1733 * Return: 1734 * * %0 - On success. 1735 * * %-ETIMEDOUT - If the @value was not reached within 1736 * the given timeout. 1737 * * Negative errno - In case of failure. 1738 */ 1739 int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, 1740 u32 value, int timeout_msec) 1741 { 1742 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); 1743 1744 do { 1745 u32 val; 1746 int ret; 1747 1748 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); 1749 if (ret) 1750 return ret; 1751 1752 if ((val & bit) == value) 1753 return 0; 1754 1755 usleep_range(50, 100); 1756 } while (ktime_before(ktime_get(), timeout)); 1757 1758 return -ETIMEDOUT; 1759 } 1760 1761 /* 1762 * tb_plug_events_active() - enable/disable plug events on a switch 1763 * 1764 * Also configures a sane plug_events_delay of 255ms. 1765 * 1766 * Return: %0 on success, negative errno otherwise. 1767 */ 1768 static int tb_plug_events_active(struct tb_switch *sw, bool active) 1769 { 1770 u32 data; 1771 int res; 1772 1773 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw)) 1774 return 0; 1775 1776 sw->config.plug_events_delay = 0xff; 1777 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); 1778 if (res) 1779 return res; 1780 1781 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); 1782 if (res) 1783 return res; 1784 1785 if (active) { 1786 data = data & 0xFFFFFF83; 1787 switch (sw->config.device_id) { 1788 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1789 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 1790 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 1791 break; 1792 default: 1793 /* 1794 * Skip Alpine Ridge, it needs to have vendor 1795 * specific USB hotplug event enabled for the 1796 * internal xHCI to work. 1797 */ 1798 if (!tb_switch_is_alpine_ridge(sw)) 1799 data |= TB_PLUG_EVENTS_USB_DISABLE; 1800 } 1801 } else { 1802 data = data | 0x7c; 1803 } 1804 return tb_sw_write(sw, &data, TB_CFG_SWITCH, 1805 sw->cap_plug_events + 1, 1); 1806 } 1807 1808 static ssize_t authorized_show(struct device *dev, 1809 struct device_attribute *attr, 1810 char *buf) 1811 { 1812 struct tb_switch *sw = tb_to_switch(dev); 1813 1814 return sysfs_emit(buf, "%u\n", sw->authorized); 1815 } 1816 1817 static int disapprove_switch(struct device *dev, void *not_used) 1818 { 1819 char *envp[] = { "AUTHORIZED=0", NULL }; 1820 struct tb_switch *sw; 1821 1822 sw = tb_to_switch(dev); 1823 if (sw && sw->authorized) { 1824 int ret; 1825 1826 /* First children */ 1827 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch); 1828 if (ret) 1829 return ret; 1830 1831 ret = tb_domain_disapprove_switch(sw->tb, sw); 1832 if (ret) 1833 return ret; 1834 1835 sw->authorized = 0; 1836 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp); 1837 } 1838 1839 return 0; 1840 } 1841 1842 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) 1843 { 1844 char envp_string[13]; 1845 int ret = -EINVAL; 1846 char *envp[] = { envp_string, NULL }; 1847 1848 if (!mutex_trylock(&sw->tb->lock)) 1849 return restart_syscall(); 1850 1851 if (!!sw->authorized == !!val) 1852 goto unlock; 1853 1854 switch (val) { 1855 /* Disapprove switch */ 1856 case 0: 1857 if (tb_route(sw)) { 1858 ret = disapprove_switch(&sw->dev, NULL); 1859 goto unlock; 1860 } 1861 break; 1862 1863 /* Approve switch */ 1864 case 1: 1865 if (sw->key) 1866 ret = tb_domain_approve_switch_key(sw->tb, sw); 1867 else 1868 ret = tb_domain_approve_switch(sw->tb, sw); 1869 break; 1870 1871 /* Challenge switch */ 1872 case 2: 1873 if (sw->key) 1874 ret = tb_domain_challenge_switch_key(sw->tb, sw); 1875 break; 1876 1877 default: 1878 break; 1879 } 1880 1881 if (!ret) { 1882 sw->authorized = val; 1883 /* 1884 * Notify status change to the userspace, informing the new 1885 * value of /sys/bus/thunderbolt/devices/.../authorized. 1886 */ 1887 sprintf(envp_string, "AUTHORIZED=%u", sw->authorized); 1888 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp); 1889 } 1890 1891 unlock: 1892 mutex_unlock(&sw->tb->lock); 1893 return ret; 1894 } 1895 1896 static ssize_t authorized_store(struct device *dev, 1897 struct device_attribute *attr, 1898 const char *buf, size_t count) 1899 { 1900 struct tb_switch *sw = tb_to_switch(dev); 1901 unsigned int val; 1902 ssize_t ret; 1903 1904 ret = kstrtouint(buf, 0, &val); 1905 if (ret) 1906 return ret; 1907 if (val > 2) 1908 return -EINVAL; 1909 1910 pm_runtime_get_sync(&sw->dev); 1911 ret = tb_switch_set_authorized(sw, val); 1912 pm_runtime_mark_last_busy(&sw->dev); 1913 pm_runtime_put_autosuspend(&sw->dev); 1914 1915 return ret ? ret : count; 1916 } 1917 static DEVICE_ATTR_RW(authorized); 1918 1919 static ssize_t boot_show(struct device *dev, struct device_attribute *attr, 1920 char *buf) 1921 { 1922 struct tb_switch *sw = tb_to_switch(dev); 1923 1924 return sysfs_emit(buf, "%u\n", sw->boot); 1925 } 1926 static DEVICE_ATTR_RO(boot); 1927 1928 static ssize_t device_show(struct device *dev, struct device_attribute *attr, 1929 char *buf) 1930 { 1931 struct tb_switch *sw = tb_to_switch(dev); 1932 1933 return sysfs_emit(buf, "%#x\n", sw->device); 1934 } 1935 static DEVICE_ATTR_RO(device); 1936 1937 static ssize_t 1938 device_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1939 { 1940 struct tb_switch *sw = tb_to_switch(dev); 1941 1942 return sysfs_emit(buf, "%s\n", sw->device_name ?: ""); 1943 } 1944 static DEVICE_ATTR_RO(device_name); 1945 1946 static ssize_t 1947 generation_show(struct device *dev, struct device_attribute *attr, char *buf) 1948 { 1949 struct tb_switch *sw = tb_to_switch(dev); 1950 1951 return sysfs_emit(buf, "%u\n", sw->generation); 1952 } 1953 static DEVICE_ATTR_RO(generation); 1954 1955 static ssize_t key_show(struct device *dev, struct device_attribute *attr, 1956 char *buf) 1957 { 1958 struct tb_switch *sw = tb_to_switch(dev); 1959 ssize_t ret; 1960 1961 if (!mutex_trylock(&sw->tb->lock)) 1962 return restart_syscall(); 1963 1964 if (sw->key) 1965 ret = sysfs_emit(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); 1966 else 1967 ret = sysfs_emit(buf, "\n"); 1968 1969 mutex_unlock(&sw->tb->lock); 1970 return ret; 1971 } 1972 1973 static ssize_t key_store(struct device *dev, struct device_attribute *attr, 1974 const char *buf, size_t count) 1975 { 1976 struct tb_switch *sw = tb_to_switch(dev); 1977 u8 key[TB_SWITCH_KEY_SIZE]; 1978 ssize_t ret = count; 1979 bool clear = false; 1980 1981 if (!strcmp(buf, "\n")) 1982 clear = true; 1983 else if (hex2bin(key, buf, sizeof(key))) 1984 return -EINVAL; 1985 1986 if (!mutex_trylock(&sw->tb->lock)) 1987 return restart_syscall(); 1988 1989 if (sw->authorized) { 1990 ret = -EBUSY; 1991 } else { 1992 kfree(sw->key); 1993 if (clear) { 1994 sw->key = NULL; 1995 } else { 1996 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL); 1997 if (!sw->key) 1998 ret = -ENOMEM; 1999 } 2000 } 2001 2002 mutex_unlock(&sw->tb->lock); 2003 return ret; 2004 } 2005 static DEVICE_ATTR(key, 0600, key_show, key_store); 2006 2007 static ssize_t speed_show(struct device *dev, struct device_attribute *attr, 2008 char *buf) 2009 { 2010 struct tb_switch *sw = tb_to_switch(dev); 2011 2012 return sysfs_emit(buf, "%u.0 Gb/s\n", sw->link_speed); 2013 } 2014 2015 /* 2016 * Currently all lanes must run at the same speed but we expose here 2017 * both directions to allow possible asymmetric links in the future. 2018 */ 2019 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); 2020 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); 2021 2022 static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr, 2023 char *buf) 2024 { 2025 struct tb_switch *sw = tb_to_switch(dev); 2026 unsigned int width; 2027 2028 switch (sw->link_width) { 2029 case TB_LINK_WIDTH_SINGLE: 2030 case TB_LINK_WIDTH_ASYM_TX: 2031 width = 1; 2032 break; 2033 case TB_LINK_WIDTH_DUAL: 2034 width = 2; 2035 break; 2036 case TB_LINK_WIDTH_ASYM_RX: 2037 width = 3; 2038 break; 2039 default: 2040 WARN_ON_ONCE(1); 2041 return -EINVAL; 2042 } 2043 2044 return sysfs_emit(buf, "%u\n", width); 2045 } 2046 static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL); 2047 2048 static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr, 2049 char *buf) 2050 { 2051 struct tb_switch *sw = tb_to_switch(dev); 2052 unsigned int width; 2053 2054 switch (sw->link_width) { 2055 case TB_LINK_WIDTH_SINGLE: 2056 case TB_LINK_WIDTH_ASYM_RX: 2057 width = 1; 2058 break; 2059 case TB_LINK_WIDTH_DUAL: 2060 width = 2; 2061 break; 2062 case TB_LINK_WIDTH_ASYM_TX: 2063 width = 3; 2064 break; 2065 default: 2066 WARN_ON_ONCE(1); 2067 return -EINVAL; 2068 } 2069 2070 return sysfs_emit(buf, "%u\n", width); 2071 } 2072 static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL); 2073 2074 static ssize_t nvm_authenticate_show(struct device *dev, 2075 struct device_attribute *attr, char *buf) 2076 { 2077 struct tb_switch *sw = tb_to_switch(dev); 2078 u32 status; 2079 2080 nvm_get_auth_status(sw, &status); 2081 return sysfs_emit(buf, "%#x\n", status); 2082 } 2083 2084 static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf, 2085 bool disconnect) 2086 { 2087 struct tb_switch *sw = tb_to_switch(dev); 2088 int val, ret; 2089 2090 pm_runtime_get_sync(&sw->dev); 2091 2092 if (!mutex_trylock(&sw->tb->lock)) { 2093 ret = restart_syscall(); 2094 goto exit_rpm; 2095 } 2096 2097 if (sw->no_nvm_upgrade) { 2098 ret = -EOPNOTSUPP; 2099 goto exit_unlock; 2100 } 2101 2102 /* If NVMem devices are not yet added */ 2103 if (!sw->nvm) { 2104 ret = -EAGAIN; 2105 goto exit_unlock; 2106 } 2107 2108 ret = kstrtoint(buf, 10, &val); 2109 if (ret) 2110 goto exit_unlock; 2111 2112 /* Always clear the authentication status */ 2113 nvm_clear_auth_status(sw); 2114 2115 if (val > 0) { 2116 if (val == AUTHENTICATE_ONLY) { 2117 if (disconnect) 2118 ret = -EINVAL; 2119 else 2120 ret = nvm_authenticate(sw, true); 2121 } else { 2122 if (!sw->nvm->flushed) { 2123 if (!sw->nvm->buf) { 2124 ret = -EINVAL; 2125 goto exit_unlock; 2126 } 2127 2128 ret = nvm_validate_and_write(sw); 2129 if (ret || val == WRITE_ONLY) 2130 goto exit_unlock; 2131 } 2132 if (val == WRITE_AND_AUTHENTICATE) { 2133 if (disconnect) 2134 ret = tb_lc_force_power(sw); 2135 else 2136 ret = nvm_authenticate(sw, false); 2137 } 2138 } 2139 } 2140 2141 exit_unlock: 2142 mutex_unlock(&sw->tb->lock); 2143 exit_rpm: 2144 pm_runtime_mark_last_busy(&sw->dev); 2145 pm_runtime_put_autosuspend(&sw->dev); 2146 2147 return ret; 2148 } 2149 2150 static ssize_t nvm_authenticate_store(struct device *dev, 2151 struct device_attribute *attr, const char *buf, size_t count) 2152 { 2153 int ret = nvm_authenticate_sysfs(dev, buf, false); 2154 if (ret) 2155 return ret; 2156 return count; 2157 } 2158 static DEVICE_ATTR_RW(nvm_authenticate); 2159 2160 static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev, 2161 struct device_attribute *attr, char *buf) 2162 { 2163 return nvm_authenticate_show(dev, attr, buf); 2164 } 2165 2166 static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev, 2167 struct device_attribute *attr, const char *buf, size_t count) 2168 { 2169 int ret; 2170 2171 ret = nvm_authenticate_sysfs(dev, buf, true); 2172 return ret ? ret : count; 2173 } 2174 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect); 2175 2176 static ssize_t nvm_version_show(struct device *dev, 2177 struct device_attribute *attr, char *buf) 2178 { 2179 struct tb_switch *sw = tb_to_switch(dev); 2180 int ret; 2181 2182 if (!mutex_trylock(&sw->tb->lock)) 2183 return restart_syscall(); 2184 2185 if (sw->safe_mode) 2186 ret = -ENODATA; 2187 else if (!sw->nvm) 2188 ret = -EAGAIN; 2189 else 2190 ret = sysfs_emit(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); 2191 2192 mutex_unlock(&sw->tb->lock); 2193 2194 return ret; 2195 } 2196 static DEVICE_ATTR_RO(nvm_version); 2197 2198 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, 2199 char *buf) 2200 { 2201 struct tb_switch *sw = tb_to_switch(dev); 2202 2203 return sysfs_emit(buf, "%#x\n", sw->vendor); 2204 } 2205 static DEVICE_ATTR_RO(vendor); 2206 2207 static ssize_t 2208 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) 2209 { 2210 struct tb_switch *sw = tb_to_switch(dev); 2211 2212 return sysfs_emit(buf, "%s\n", sw->vendor_name ?: ""); 2213 } 2214 static DEVICE_ATTR_RO(vendor_name); 2215 2216 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, 2217 char *buf) 2218 { 2219 struct tb_switch *sw = tb_to_switch(dev); 2220 2221 return sysfs_emit(buf, "%pUb\n", sw->uuid); 2222 } 2223 static DEVICE_ATTR_RO(unique_id); 2224 2225 static struct attribute *switch_attrs[] = { 2226 &dev_attr_authorized.attr, 2227 &dev_attr_boot.attr, 2228 &dev_attr_device.attr, 2229 &dev_attr_device_name.attr, 2230 &dev_attr_generation.attr, 2231 &dev_attr_key.attr, 2232 &dev_attr_nvm_authenticate.attr, 2233 &dev_attr_nvm_authenticate_on_disconnect.attr, 2234 &dev_attr_nvm_version.attr, 2235 &dev_attr_rx_speed.attr, 2236 &dev_attr_rx_lanes.attr, 2237 &dev_attr_tx_speed.attr, 2238 &dev_attr_tx_lanes.attr, 2239 &dev_attr_vendor.attr, 2240 &dev_attr_vendor_name.attr, 2241 &dev_attr_unique_id.attr, 2242 NULL, 2243 }; 2244 2245 static umode_t switch_attr_is_visible(struct kobject *kobj, 2246 struct attribute *attr, int n) 2247 { 2248 struct device *dev = kobj_to_dev(kobj); 2249 struct tb_switch *sw = tb_to_switch(dev); 2250 2251 if (attr == &dev_attr_authorized.attr) { 2252 if (sw->tb->security_level == TB_SECURITY_NOPCIE || 2253 sw->tb->security_level == TB_SECURITY_DPONLY) 2254 return 0; 2255 } else if (attr == &dev_attr_device.attr) { 2256 if (!sw->device) 2257 return 0; 2258 } else if (attr == &dev_attr_device_name.attr) { 2259 if (!sw->device_name) 2260 return 0; 2261 } else if (attr == &dev_attr_vendor.attr) { 2262 if (!sw->vendor) 2263 return 0; 2264 } else if (attr == &dev_attr_vendor_name.attr) { 2265 if (!sw->vendor_name) 2266 return 0; 2267 } else if (attr == &dev_attr_key.attr) { 2268 if (tb_route(sw) && 2269 sw->tb->security_level == TB_SECURITY_SECURE && 2270 sw->security_level == TB_SECURITY_SECURE) 2271 return attr->mode; 2272 return 0; 2273 } else if (attr == &dev_attr_rx_speed.attr || 2274 attr == &dev_attr_rx_lanes.attr || 2275 attr == &dev_attr_tx_speed.attr || 2276 attr == &dev_attr_tx_lanes.attr) { 2277 if (tb_route(sw)) 2278 return attr->mode; 2279 return 0; 2280 } else if (attr == &dev_attr_nvm_authenticate.attr) { 2281 if (nvm_upgradeable(sw)) 2282 return attr->mode; 2283 return 0; 2284 } else if (attr == &dev_attr_nvm_version.attr) { 2285 if (nvm_readable(sw)) 2286 return attr->mode; 2287 return 0; 2288 } else if (attr == &dev_attr_boot.attr) { 2289 if (tb_route(sw)) 2290 return attr->mode; 2291 return 0; 2292 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) { 2293 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER) 2294 return attr->mode; 2295 return 0; 2296 } 2297 2298 return sw->safe_mode ? 0 : attr->mode; 2299 } 2300 2301 static const struct attribute_group switch_group = { 2302 .is_visible = switch_attr_is_visible, 2303 .attrs = switch_attrs, 2304 }; 2305 2306 static const struct attribute_group *switch_groups[] = { 2307 &switch_group, 2308 NULL, 2309 }; 2310 2311 static void tb_switch_release(struct device *dev) 2312 { 2313 struct tb_switch *sw = tb_to_switch(dev); 2314 struct tb_port *port; 2315 2316 dma_port_free(sw->dma_port); 2317 2318 tb_switch_for_each_port(sw, port) { 2319 ida_destroy(&port->in_hopids); 2320 ida_destroy(&port->out_hopids); 2321 } 2322 2323 kfree(sw->uuid); 2324 kfree(sw->device_name); 2325 kfree(sw->vendor_name); 2326 kfree(sw->ports); 2327 kfree(sw->drom); 2328 kfree(sw->key); 2329 kfree(sw); 2330 } 2331 2332 static int tb_switch_uevent(const struct device *dev, struct kobj_uevent_env *env) 2333 { 2334 const struct tb_switch *sw = tb_to_switch(dev); 2335 const char *type; 2336 2337 if (tb_switch_is_usb4(sw)) { 2338 if (add_uevent_var(env, "USB4_VERSION=%u.0", 2339 usb4_switch_version(sw))) 2340 return -ENOMEM; 2341 } 2342 2343 if (!tb_route(sw)) { 2344 type = "host"; 2345 } else { 2346 const struct tb_port *port; 2347 bool hub = false; 2348 2349 /* Device is hub if it has any downstream ports */ 2350 tb_switch_for_each_port(sw, port) { 2351 if (!port->disabled && !tb_is_upstream_port(port) && 2352 tb_port_is_null(port)) { 2353 hub = true; 2354 break; 2355 } 2356 } 2357 2358 type = hub ? "hub" : "device"; 2359 } 2360 2361 if (add_uevent_var(env, "USB4_TYPE=%s", type)) 2362 return -ENOMEM; 2363 return 0; 2364 } 2365 2366 /* 2367 * Currently only need to provide the callbacks. Everything else is handled 2368 * in the connection manager. 2369 */ 2370 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev) 2371 { 2372 struct tb_switch *sw = tb_to_switch(dev); 2373 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 2374 2375 if (cm_ops->runtime_suspend_switch) 2376 return cm_ops->runtime_suspend_switch(sw); 2377 2378 return 0; 2379 } 2380 2381 static int __maybe_unused tb_switch_runtime_resume(struct device *dev) 2382 { 2383 struct tb_switch *sw = tb_to_switch(dev); 2384 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 2385 2386 if (cm_ops->runtime_resume_switch) 2387 return cm_ops->runtime_resume_switch(sw); 2388 return 0; 2389 } 2390 2391 static const struct dev_pm_ops tb_switch_pm_ops = { 2392 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume, 2393 NULL) 2394 }; 2395 2396 const struct device_type tb_switch_type = { 2397 .name = "thunderbolt_device", 2398 .release = tb_switch_release, 2399 .uevent = tb_switch_uevent, 2400 .pm = &tb_switch_pm_ops, 2401 }; 2402 2403 static int tb_switch_get_generation(struct tb_switch *sw) 2404 { 2405 if (tb_switch_is_usb4(sw)) 2406 return 4; 2407 2408 if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { 2409 switch (sw->config.device_id) { 2410 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 2411 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 2412 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK: 2413 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: 2414 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: 2415 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 2416 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE: 2417 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE: 2418 return 1; 2419 2420 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE: 2421 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: 2422 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: 2423 return 2; 2424 2425 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: 2426 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: 2427 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: 2428 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: 2429 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: 2430 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: 2431 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: 2432 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: 2433 case PCI_DEVICE_ID_INTEL_ICL_NHI0: 2434 case PCI_DEVICE_ID_INTEL_ICL_NHI1: 2435 return 3; 2436 } 2437 } 2438 2439 /* 2440 * For unknown switches assume generation to be 1 to be on the 2441 * safe side. 2442 */ 2443 tb_sw_warn(sw, "unsupported switch device id %#x\n", 2444 sw->config.device_id); 2445 return 1; 2446 } 2447 2448 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth) 2449 { 2450 int max_depth; 2451 2452 if (tb_switch_is_usb4(sw) || 2453 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch))) 2454 max_depth = USB4_SWITCH_MAX_DEPTH; 2455 else 2456 max_depth = TB_SWITCH_MAX_DEPTH; 2457 2458 return depth > max_depth; 2459 } 2460 2461 /** 2462 * tb_switch_alloc() - allocate a switch 2463 * @tb: Pointer to the owning domain 2464 * @parent: Parent device for this switch 2465 * @route: Route string for this switch 2466 * 2467 * Allocates and initializes a switch. Will not upload configuration to 2468 * the switch. For that you need to call tb_switch_configure() 2469 * separately. The returned switch should be released by calling 2470 * tb_switch_put(). 2471 * 2472 * Return: Pointer to &struct tb_switch or ERR_PTR() in case of failure. 2473 */ 2474 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, 2475 u64 route) 2476 { 2477 struct tb_switch *sw; 2478 int upstream_port; 2479 int i, ret, depth; 2480 2481 /* Unlock the downstream port so we can access the switch below */ 2482 if (route) { 2483 struct tb_switch *parent_sw = tb_to_switch(parent); 2484 struct tb_port *down; 2485 2486 down = tb_port_at(route, parent_sw); 2487 tb_port_unlock(down); 2488 } 2489 2490 depth = tb_route_length(route); 2491 2492 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); 2493 if (upstream_port < 0) 2494 return ERR_PTR(upstream_port); 2495 2496 sw = kzalloc_obj(*sw); 2497 if (!sw) 2498 return ERR_PTR(-ENOMEM); 2499 2500 sw->tb = tb; 2501 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5); 2502 if (ret) 2503 goto err_free_sw_ports; 2504 2505 sw->generation = tb_switch_get_generation(sw); 2506 2507 tb_dbg(tb, "current switch config:\n"); 2508 tb_dump_switch(tb, sw); 2509 2510 /* configure switch */ 2511 sw->config.upstream_port_number = upstream_port; 2512 sw->config.depth = depth; 2513 sw->config.route_hi = upper_32_bits(route); 2514 sw->config.route_lo = lower_32_bits(route); 2515 sw->config.enabled = 0; 2516 2517 /* Make sure we do not exceed maximum topology limit */ 2518 if (tb_switch_exceeds_max_depth(sw, depth)) { 2519 ret = -EADDRNOTAVAIL; 2520 goto err_free_sw_ports; 2521 } 2522 2523 /* initialize ports */ 2524 sw->ports = kzalloc_objs(*sw->ports, sw->config.max_port_number + 1); 2525 if (!sw->ports) { 2526 ret = -ENOMEM; 2527 goto err_free_sw_ports; 2528 } 2529 2530 for (i = 0; i <= sw->config.max_port_number; i++) { 2531 /* minimum setup for tb_find_cap and tb_drom_read to work */ 2532 sw->ports[i].sw = sw; 2533 sw->ports[i].port = i; 2534 2535 /* Control port does not need HopID allocation */ 2536 if (i) { 2537 ida_init(&sw->ports[i].in_hopids); 2538 ida_init(&sw->ports[i].out_hopids); 2539 } 2540 } 2541 2542 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); 2543 if (ret > 0) 2544 sw->cap_plug_events = ret; 2545 2546 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2); 2547 if (ret > 0) 2548 sw->cap_vsec_tmu = ret; 2549 2550 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); 2551 if (ret > 0) 2552 sw->cap_lc = ret; 2553 2554 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP); 2555 if (ret > 0) 2556 sw->cap_lp = ret; 2557 2558 /* Root switch is always authorized */ 2559 if (!route) 2560 sw->authorized = true; 2561 2562 device_initialize(&sw->dev); 2563 sw->dev.parent = parent; 2564 sw->dev.bus = &tb_bus_type; 2565 sw->dev.type = &tb_switch_type; 2566 sw->dev.groups = switch_groups; 2567 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 2568 2569 return sw; 2570 2571 err_free_sw_ports: 2572 kfree(sw->ports); 2573 kfree(sw); 2574 2575 return ERR_PTR(ret); 2576 } 2577 2578 /** 2579 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode 2580 * @tb: Pointer to the owning domain 2581 * @parent: Parent device for this switch 2582 * @route: Route string for this switch 2583 * 2584 * This creates a switch in safe mode. This means the switch pretty much 2585 * lacks all capabilities except DMA configuration port before it is 2586 * flashed with a valid NVM firmware. 2587 * 2588 * The returned switch must be released by calling tb_switch_put(). 2589 * 2590 * Return: Pointer to &struct tb_switch or ERR_PTR() in case of failure. 2591 */ 2592 struct tb_switch * 2593 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) 2594 { 2595 struct tb_switch *sw; 2596 2597 sw = kzalloc_obj(*sw); 2598 if (!sw) 2599 return ERR_PTR(-ENOMEM); 2600 2601 sw->tb = tb; 2602 sw->config.depth = tb_route_length(route); 2603 sw->config.route_hi = upper_32_bits(route); 2604 sw->config.route_lo = lower_32_bits(route); 2605 sw->safe_mode = true; 2606 2607 device_initialize(&sw->dev); 2608 sw->dev.parent = parent; 2609 sw->dev.bus = &tb_bus_type; 2610 sw->dev.type = &tb_switch_type; 2611 sw->dev.groups = switch_groups; 2612 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 2613 2614 return sw; 2615 } 2616 2617 /** 2618 * tb_switch_configure() - Uploads configuration to the switch 2619 * @sw: Switch to configure 2620 * 2621 * Call this function before the switch is added to the system. It will 2622 * upload configuration to the switch and makes it available for the 2623 * connection manager to use. Can be called to the switch again after 2624 * resume from low power states to re-initialize it. 2625 * 2626 * Return: %0 on success, negative errno otherwise. 2627 */ 2628 int tb_switch_configure(struct tb_switch *sw) 2629 { 2630 struct tb *tb = sw->tb; 2631 u64 route; 2632 int ret; 2633 2634 route = tb_route(sw); 2635 2636 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n", 2637 sw->config.enabled ? "restoring" : "initializing", route, 2638 tb_route_length(route), sw->config.upstream_port_number); 2639 2640 sw->config.enabled = 1; 2641 2642 if (tb_switch_is_usb4(sw)) { 2643 /* 2644 * For USB4 devices, we need to program the CM version 2645 * accordingly so that it knows to expose all the 2646 * additional capabilities. Program it according to USB4 2647 * version to avoid changing existing (v1) routers behaviour. 2648 */ 2649 if (usb4_switch_version(sw) < 2) 2650 sw->config.cmuv = ROUTER_CS_4_CMUV_V1; 2651 else 2652 sw->config.cmuv = ROUTER_CS_4_CMUV_V2; 2653 sw->config.plug_events_delay = 0xa; 2654 2655 /* Enumerate the switch */ 2656 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2657 ROUTER_CS_1, 4); 2658 if (ret) 2659 return ret; 2660 2661 ret = usb4_switch_setup(sw); 2662 } else { 2663 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) 2664 tb_sw_warn(sw, "unknown switch vendor id %#x\n", 2665 sw->config.vendor_id); 2666 2667 if (!sw->cap_plug_events) { 2668 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); 2669 return -ENODEV; 2670 } 2671 2672 /* Enumerate the switch */ 2673 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2674 ROUTER_CS_1, 3); 2675 } 2676 if (ret) 2677 return ret; 2678 2679 return tb_plug_events_active(sw, true); 2680 } 2681 2682 /** 2683 * tb_switch_configuration_valid() - Set the tunneling configuration to be valid 2684 * @sw: Router to configure 2685 * 2686 * Needs to be called before any tunnels can be setup through the 2687 * router. Can be called to any router. 2688 * 2689 * Return: %0 on success, negative errno otherwise. 2690 */ 2691 int tb_switch_configuration_valid(struct tb_switch *sw) 2692 { 2693 if (tb_switch_is_usb4(sw)) 2694 return usb4_switch_configuration_valid(sw); 2695 return 0; 2696 } 2697 2698 static int tb_switch_set_uuid(struct tb_switch *sw) 2699 { 2700 bool uid = false; 2701 u32 uuid[4]; 2702 int ret; 2703 2704 if (sw->uuid) 2705 return 0; 2706 2707 if (tb_switch_is_usb4(sw)) { 2708 ret = usb4_switch_read_uid(sw, &sw->uid); 2709 if (ret) 2710 return ret; 2711 uid = true; 2712 } else { 2713 /* 2714 * The newer controllers include fused UUID as part of 2715 * link controller specific registers 2716 */ 2717 ret = tb_lc_read_uuid(sw, uuid); 2718 if (ret) { 2719 if (ret != -EINVAL) 2720 return ret; 2721 uid = true; 2722 } 2723 } 2724 2725 if (uid) { 2726 /* 2727 * ICM generates UUID based on UID and fills the upper 2728 * two words with ones. This is not strictly following 2729 * UUID format but we want to be compatible with it so 2730 * we do the same here. 2731 */ 2732 uuid[0] = sw->uid & 0xffffffff; 2733 uuid[1] = (sw->uid >> 32) & 0xffffffff; 2734 uuid[2] = 0xffffffff; 2735 uuid[3] = 0xffffffff; 2736 } 2737 2738 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); 2739 if (!sw->uuid) 2740 return -ENOMEM; 2741 return 0; 2742 } 2743 2744 static int tb_switch_add_dma_port(struct tb_switch *sw) 2745 { 2746 u32 status; 2747 int ret; 2748 2749 switch (sw->generation) { 2750 case 2: 2751 /* Only root switch can be upgraded */ 2752 if (tb_route(sw)) 2753 return 0; 2754 2755 fallthrough; 2756 case 3: 2757 case 4: 2758 ret = tb_switch_set_uuid(sw); 2759 if (ret) 2760 return ret; 2761 break; 2762 2763 default: 2764 /* 2765 * DMA port is the only thing available when the switch 2766 * is in safe mode. 2767 */ 2768 if (!sw->safe_mode) 2769 return 0; 2770 break; 2771 } 2772 2773 if (sw->no_nvm_upgrade) 2774 return 0; 2775 2776 if (tb_switch_is_usb4(sw)) { 2777 ret = usb4_switch_nvm_authenticate_status(sw, &status); 2778 if (ret) 2779 return ret; 2780 2781 if (status) { 2782 tb_sw_info(sw, "switch flash authentication failed\n"); 2783 nvm_set_auth_status(sw, status); 2784 } 2785 2786 return 0; 2787 } 2788 2789 /* Root switch DMA port requires running firmware */ 2790 if (!tb_route(sw) && !tb_switch_is_icm(sw)) 2791 return 0; 2792 2793 sw->dma_port = dma_port_alloc(sw); 2794 if (!sw->dma_port) 2795 return 0; 2796 2797 /* 2798 * If there is status already set then authentication failed 2799 * when the dma_port_flash_update_auth() returned. Power cycling 2800 * is not needed (it was done already) so only thing we do here 2801 * is to unblock runtime PM of the root port. 2802 */ 2803 nvm_get_auth_status(sw, &status); 2804 if (status) { 2805 if (!tb_route(sw)) 2806 nvm_authenticate_complete_dma_port(sw); 2807 return 0; 2808 } 2809 2810 /* 2811 * Check status of the previous flash authentication. If there 2812 * is one we need to power cycle the switch in any case to make 2813 * it functional again. 2814 */ 2815 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 2816 if (ret <= 0) 2817 return ret; 2818 2819 /* Now we can allow root port to suspend again */ 2820 if (!tb_route(sw)) 2821 nvm_authenticate_complete_dma_port(sw); 2822 2823 if (status) { 2824 tb_sw_info(sw, "switch flash authentication failed\n"); 2825 nvm_set_auth_status(sw, status); 2826 } 2827 2828 tb_sw_info(sw, "power cycling the switch now\n"); 2829 dma_port_power_cycle(sw->dma_port); 2830 2831 /* 2832 * We return error here which causes the switch adding failure. 2833 * It should appear back after power cycle is complete. 2834 */ 2835 return -ESHUTDOWN; 2836 } 2837 2838 static void tb_switch_default_link_ports(struct tb_switch *sw) 2839 { 2840 int i; 2841 2842 for (i = 1; i <= sw->config.max_port_number; i++) { 2843 struct tb_port *port = &sw->ports[i]; 2844 struct tb_port *subordinate; 2845 2846 if (!tb_port_is_null(port)) 2847 continue; 2848 2849 /* Check for the subordinate port */ 2850 if (i == sw->config.max_port_number || 2851 !tb_port_is_null(&sw->ports[i + 1])) 2852 continue; 2853 2854 /* Link them if not already done so (by DROM) */ 2855 subordinate = &sw->ports[i + 1]; 2856 if (!port->dual_link_port && !subordinate->dual_link_port) { 2857 port->link_nr = 0; 2858 port->dual_link_port = subordinate; 2859 subordinate->link_nr = 1; 2860 subordinate->dual_link_port = port; 2861 2862 tb_sw_dbg(sw, "linked ports %d <-> %d\n", 2863 port->port, subordinate->port); 2864 } 2865 } 2866 } 2867 2868 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) 2869 { 2870 const struct tb_port *up = tb_upstream_port(sw); 2871 2872 if (!up->dual_link_port || !up->dual_link_port->remote) 2873 return false; 2874 2875 if (tb_switch_is_usb4(sw)) 2876 return usb4_switch_lane_bonding_possible(sw); 2877 return tb_lc_lane_bonding_possible(sw); 2878 } 2879 2880 static int tb_switch_update_link_attributes(struct tb_switch *sw) 2881 { 2882 struct tb_port *up; 2883 bool change = false; 2884 int ret; 2885 2886 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2887 return 0; 2888 2889 up = tb_upstream_port(sw); 2890 2891 ret = tb_port_get_link_speed(up); 2892 if (ret < 0) 2893 return ret; 2894 if (sw->link_speed != ret) 2895 change = true; 2896 sw->link_speed = ret; 2897 2898 ret = tb_port_get_link_width(up); 2899 if (ret < 0) 2900 return ret; 2901 if (sw->link_width != ret) 2902 change = true; 2903 sw->link_width = ret; 2904 2905 /* Notify userspace that there is possible link attribute change */ 2906 if (device_is_registered(&sw->dev) && change) 2907 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 2908 2909 return 0; 2910 } 2911 2912 /* Must be called after tb_switch_update_link_attributes() */ 2913 static void tb_switch_link_init(struct tb_switch *sw) 2914 { 2915 struct tb_port *up, *down; 2916 bool bonded; 2917 2918 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2919 return; 2920 2921 tb_sw_dbg(sw, "current link speed %u.0 Gb/s\n", sw->link_speed); 2922 tb_sw_dbg(sw, "current link width %s\n", tb_width_name(sw->link_width)); 2923 2924 bonded = sw->link_width >= TB_LINK_WIDTH_DUAL; 2925 2926 /* 2927 * Gen 4 links come up as bonded so update the port structures 2928 * accordingly. 2929 */ 2930 up = tb_upstream_port(sw); 2931 down = tb_switch_downstream_port(sw); 2932 2933 up->bonded = bonded; 2934 if (up->dual_link_port) 2935 up->dual_link_port->bonded = bonded; 2936 tb_port_update_credits(up); 2937 2938 down->bonded = bonded; 2939 if (down->dual_link_port) 2940 down->dual_link_port->bonded = bonded; 2941 tb_port_update_credits(down); 2942 2943 if (tb_port_get_link_generation(up) < 4) 2944 return; 2945 2946 /* 2947 * Set the Gen 4 preferred link width. This is what the router 2948 * prefers when the link is brought up. If the router does not 2949 * support asymmetric link configuration, this also will be set 2950 * to TB_LINK_WIDTH_DUAL. 2951 */ 2952 sw->preferred_link_width = sw->link_width; 2953 tb_sw_dbg(sw, "preferred link width %s\n", 2954 tb_width_name(sw->preferred_link_width)); 2955 } 2956 2957 /** 2958 * tb_switch_lane_bonding_enable() - Enable lane bonding 2959 * @sw: Switch to enable lane bonding 2960 * 2961 * Connection manager can call this function to enable lane bonding of a 2962 * switch. If conditions are correct and both switches support the feature, 2963 * lanes are bonded. It is safe to call this to any switch. 2964 * 2965 * Return: %0 on success, negative errno otherwise. 2966 */ 2967 static int tb_switch_lane_bonding_enable(struct tb_switch *sw) 2968 { 2969 struct tb_port *up, *down; 2970 unsigned int width; 2971 int ret; 2972 2973 if (!tb_switch_lane_bonding_possible(sw)) 2974 return 0; 2975 2976 up = tb_upstream_port(sw); 2977 down = tb_switch_downstream_port(sw); 2978 2979 if (!tb_port_width_supported(up, TB_LINK_WIDTH_DUAL) || 2980 !tb_port_width_supported(down, TB_LINK_WIDTH_DUAL)) 2981 return 0; 2982 2983 /* 2984 * Both lanes need to be in CL0. Here we assume lane 0 already be in 2985 * CL0 and check just for lane 1. 2986 */ 2987 if (tb_wait_for_port(down->dual_link_port, false) <= 0) 2988 return -ENOTCONN; 2989 2990 ret = tb_port_lane_bonding_enable(up); 2991 if (ret) { 2992 tb_port_warn(up, "failed to enable lane bonding\n"); 2993 return ret; 2994 } 2995 2996 ret = tb_port_lane_bonding_enable(down); 2997 if (ret) { 2998 tb_port_warn(down, "failed to enable lane bonding\n"); 2999 tb_port_lane_bonding_disable(up); 3000 return ret; 3001 } 3002 3003 /* Any of the widths are all bonded */ 3004 width = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX | 3005 TB_LINK_WIDTH_ASYM_RX; 3006 3007 return tb_port_wait_for_link_width(down, width, 100); 3008 } 3009 3010 /** 3011 * tb_switch_lane_bonding_disable() - Disable lane bonding 3012 * @sw: Switch whose lane bonding to disable 3013 * 3014 * Disables lane bonding between @sw and parent. This can be called even 3015 * if lanes were not bonded originally. 3016 * 3017 * Return: %0 on success, negative errno otherwise. 3018 */ 3019 static int tb_switch_lane_bonding_disable(struct tb_switch *sw) 3020 { 3021 struct tb_port *up, *down; 3022 int ret; 3023 3024 up = tb_upstream_port(sw); 3025 if (!up->bonded) 3026 return 0; 3027 3028 /* 3029 * If the link is Gen 4 there is no way to switch the link to 3030 * two single lane links so avoid that here. Also don't bother 3031 * if the link is not up anymore (sw is unplugged). 3032 */ 3033 ret = tb_port_get_link_generation(up); 3034 if (ret < 0) 3035 return ret; 3036 if (ret >= 4) 3037 return -EOPNOTSUPP; 3038 3039 down = tb_switch_downstream_port(sw); 3040 tb_port_lane_bonding_disable(up); 3041 tb_port_lane_bonding_disable(down); 3042 3043 /* 3044 * It is fine if we get other errors as the router might have 3045 * been unplugged. 3046 */ 3047 return tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100); 3048 } 3049 3050 /* Note updating sw->link_width done in tb_switch_update_link_attributes() */ 3051 static int tb_switch_asym_enable(struct tb_switch *sw, enum tb_link_width width) 3052 { 3053 struct tb_port *up, *down, *port; 3054 enum tb_link_width down_width; 3055 int ret; 3056 3057 up = tb_upstream_port(sw); 3058 down = tb_switch_downstream_port(sw); 3059 3060 if (width == TB_LINK_WIDTH_ASYM_TX) { 3061 down_width = TB_LINK_WIDTH_ASYM_RX; 3062 port = down; 3063 } else { 3064 down_width = TB_LINK_WIDTH_ASYM_TX; 3065 port = up; 3066 } 3067 3068 ret = tb_port_set_link_width(up, width); 3069 if (ret) 3070 return ret; 3071 3072 ret = tb_port_set_link_width(down, down_width); 3073 if (ret) 3074 return ret; 3075 3076 /* 3077 * Initiate the change in the router that one of its TX lanes is 3078 * changing to RX but do so only if there is an actual change. 3079 */ 3080 if (sw->link_width != width) { 3081 ret = usb4_port_asym_start(port); 3082 if (ret) 3083 return ret; 3084 3085 ret = tb_port_wait_for_link_width(up, width, 100); 3086 if (ret) 3087 return ret; 3088 } 3089 3090 return 0; 3091 } 3092 3093 /* Note updating sw->link_width done in tb_switch_update_link_attributes() */ 3094 static int tb_switch_asym_disable(struct tb_switch *sw) 3095 { 3096 struct tb_port *up, *down; 3097 int ret; 3098 3099 up = tb_upstream_port(sw); 3100 down = tb_switch_downstream_port(sw); 3101 3102 ret = tb_port_set_link_width(up, TB_LINK_WIDTH_DUAL); 3103 if (ret) 3104 return ret; 3105 3106 ret = tb_port_set_link_width(down, TB_LINK_WIDTH_DUAL); 3107 if (ret) 3108 return ret; 3109 3110 /* 3111 * Initiate the change in the router that has three TX lanes and 3112 * is changing one of its TX lanes to RX but only if there is a 3113 * change in the link width. 3114 */ 3115 if (sw->link_width > TB_LINK_WIDTH_DUAL) { 3116 if (sw->link_width == TB_LINK_WIDTH_ASYM_TX) 3117 ret = usb4_port_asym_start(up); 3118 else 3119 ret = usb4_port_asym_start(down); 3120 if (ret) 3121 return ret; 3122 3123 ret = tb_port_wait_for_link_width(up, TB_LINK_WIDTH_DUAL, 100); 3124 if (ret) 3125 return ret; 3126 } 3127 3128 return 0; 3129 } 3130 3131 /** 3132 * tb_switch_set_link_width() - Configure router link width 3133 * @sw: Router to configure 3134 * @width: The new link width 3135 * 3136 * Set device router link width to @width from router upstream port 3137 * perspective. Supports also asymmetric links if the routers both side 3138 * of the link supports it. 3139 * 3140 * Does nothing for host router. 3141 * 3142 * Return: %0 on success, negative errno otherwise. 3143 */ 3144 int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width) 3145 { 3146 struct tb_port *up, *down; 3147 int ret = 0; 3148 3149 if (!tb_route(sw)) 3150 return 0; 3151 3152 up = tb_upstream_port(sw); 3153 down = tb_switch_downstream_port(sw); 3154 3155 switch (width) { 3156 case TB_LINK_WIDTH_SINGLE: 3157 ret = tb_switch_lane_bonding_disable(sw); 3158 break; 3159 3160 case TB_LINK_WIDTH_DUAL: 3161 if (sw->link_width == TB_LINK_WIDTH_ASYM_TX || 3162 sw->link_width == TB_LINK_WIDTH_ASYM_RX) { 3163 ret = tb_switch_asym_disable(sw); 3164 if (ret) 3165 break; 3166 } 3167 ret = tb_switch_lane_bonding_enable(sw); 3168 break; 3169 3170 case TB_LINK_WIDTH_ASYM_TX: 3171 case TB_LINK_WIDTH_ASYM_RX: 3172 ret = tb_switch_asym_enable(sw, width); 3173 break; 3174 } 3175 3176 switch (ret) { 3177 case 0: 3178 break; 3179 3180 case -ETIMEDOUT: 3181 tb_sw_warn(sw, "timeout changing link width\n"); 3182 return ret; 3183 3184 case -ENOTCONN: 3185 case -EOPNOTSUPP: 3186 case -ENODEV: 3187 return ret; 3188 3189 default: 3190 tb_sw_dbg(sw, "failed to change link width: %d\n", ret); 3191 return ret; 3192 } 3193 3194 tb_port_update_credits(down); 3195 tb_port_update_credits(up); 3196 3197 tb_switch_update_link_attributes(sw); 3198 3199 tb_sw_dbg(sw, "link width set to %s\n", tb_width_name(width)); 3200 return ret; 3201 } 3202 3203 /** 3204 * tb_switch_configure_link() - Set link configured 3205 * @sw: Switch whose link is configured 3206 * 3207 * Sets the link upstream from @sw configured (from both ends) so that 3208 * it will not be disconnected when the domain exits sleep. Can be 3209 * called for any switch. 3210 * 3211 * It is recommended that this is called after lane bonding is enabled. 3212 * 3213 * Return: %0 on success and negative errno otherwise. 3214 */ 3215 int tb_switch_configure_link(struct tb_switch *sw) 3216 { 3217 struct tb_port *up, *down; 3218 int ret; 3219 3220 if (!tb_route(sw) || tb_switch_is_icm(sw)) 3221 return 0; 3222 3223 up = tb_upstream_port(sw); 3224 if (tb_switch_is_usb4(up->sw)) 3225 ret = usb4_port_configure(up); 3226 else 3227 ret = tb_lc_configure_port(up); 3228 if (ret) 3229 return ret; 3230 3231 down = up->remote; 3232 if (tb_switch_is_usb4(down->sw)) 3233 return usb4_port_configure(down); 3234 return tb_lc_configure_port(down); 3235 } 3236 3237 /** 3238 * tb_switch_unconfigure_link() - Unconfigure link 3239 * @sw: Switch whose link is unconfigured 3240 * 3241 * Sets the link unconfigured so the @sw will be disconnected if the 3242 * domain exits sleep. 3243 */ 3244 void tb_switch_unconfigure_link(struct tb_switch *sw) 3245 { 3246 struct tb_port *up, *down; 3247 3248 if (!tb_route(sw) || tb_switch_is_icm(sw)) 3249 return; 3250 3251 /* 3252 * Unconfigure downstream port so that wake-on-connect can be 3253 * configured after router unplug. No need to unconfigure upstream port 3254 * since its router is unplugged. 3255 */ 3256 up = tb_upstream_port(sw); 3257 down = up->remote; 3258 if (tb_switch_is_usb4(down->sw)) 3259 usb4_port_unconfigure(down); 3260 else 3261 tb_lc_unconfigure_port(down); 3262 3263 if (sw->is_unplugged) 3264 return; 3265 3266 up = tb_upstream_port(sw); 3267 if (tb_switch_is_usb4(up->sw)) 3268 usb4_port_unconfigure(up); 3269 else 3270 tb_lc_unconfigure_port(up); 3271 } 3272 3273 static void tb_switch_credits_init(struct tb_switch *sw) 3274 { 3275 if (tb_switch_is_icm(sw)) 3276 return; 3277 if (!tb_switch_is_usb4(sw)) 3278 return; 3279 if (usb4_switch_credits_init(sw)) 3280 tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n"); 3281 } 3282 3283 static int tb_switch_port_hotplug_enable(struct tb_switch *sw) 3284 { 3285 struct tb_port *port; 3286 3287 if (tb_switch_is_icm(sw)) 3288 return 0; 3289 3290 tb_switch_for_each_port(sw, port) { 3291 int res; 3292 3293 if (!port->cap_usb4) 3294 continue; 3295 3296 res = usb4_port_hotplug_enable(port); 3297 if (res) 3298 return res; 3299 } 3300 return 0; 3301 } 3302 3303 /** 3304 * tb_switch_add() - Add a switch to the domain 3305 * @sw: Switch to add 3306 * 3307 * This is the last step in adding switch to the domain. It will read 3308 * identification information from DROM and initializes ports so that 3309 * they can be used to connect other switches. The switch will be 3310 * exposed to the userspace when this function successfully returns. To 3311 * remove and release the switch, call tb_switch_remove(). 3312 * 3313 * Return: %0 on success, negative errno otherwise. 3314 */ 3315 int tb_switch_add(struct tb_switch *sw) 3316 { 3317 int i, ret; 3318 3319 /* 3320 * Initialize DMA control port now before we read DROM. Recent 3321 * host controllers have more complete DROM on NVM that includes 3322 * vendor and model identification strings which we then expose 3323 * to the userspace. NVM can be accessed through DMA 3324 * configuration based mailbox. 3325 */ 3326 ret = tb_switch_add_dma_port(sw); 3327 if (ret) { 3328 dev_err(&sw->dev, "failed to add DMA port\n"); 3329 return ret; 3330 } 3331 3332 ret = tb_switch_nvm_init(sw); 3333 if (ret) 3334 return ret; 3335 3336 if (!sw->safe_mode) { 3337 tb_switch_credits_init(sw); 3338 3339 /* read drom */ 3340 ret = tb_drom_read(sw); 3341 if (ret) 3342 dev_warn(&sw->dev, "reading DROM failed: %d\n", ret); 3343 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); 3344 3345 ret = tb_switch_set_uuid(sw); 3346 if (ret) { 3347 dev_err(&sw->dev, "failed to set UUID\n"); 3348 return ret; 3349 } 3350 3351 for (i = 0; i <= sw->config.max_port_number; i++) { 3352 if (sw->ports[i].disabled) { 3353 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n"); 3354 continue; 3355 } 3356 ret = tb_init_port(&sw->ports[i]); 3357 if (ret) { 3358 dev_err(&sw->dev, "failed to initialize port %d\n", i); 3359 return ret; 3360 } 3361 } 3362 3363 tb_check_quirks(sw); 3364 3365 tb_switch_default_link_ports(sw); 3366 3367 ret = tb_switch_update_link_attributes(sw); 3368 if (ret) 3369 return ret; 3370 3371 tb_switch_link_init(sw); 3372 3373 ret = tb_switch_clx_init(sw); 3374 if (ret) 3375 return ret; 3376 3377 ret = tb_switch_tmu_init(sw); 3378 if (ret) 3379 return ret; 3380 } 3381 3382 ret = tb_switch_port_hotplug_enable(sw); 3383 if (ret) 3384 return ret; 3385 3386 ret = device_add(&sw->dev); 3387 if (ret) { 3388 dev_err(&sw->dev, "failed to add device: %d\n", ret); 3389 return ret; 3390 } 3391 3392 if (tb_route(sw)) { 3393 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n", 3394 sw->vendor, sw->device); 3395 if (sw->vendor_name && sw->device_name) 3396 dev_info(&sw->dev, "%s %s\n", sw->vendor_name, 3397 sw->device_name); 3398 } 3399 3400 ret = usb4_switch_add_ports(sw); 3401 if (ret) { 3402 dev_err(&sw->dev, "failed to add USB4 ports\n"); 3403 goto err_del; 3404 } 3405 3406 ret = tb_switch_nvm_add(sw); 3407 if (ret) { 3408 dev_err(&sw->dev, "failed to add NVM devices\n"); 3409 goto err_ports; 3410 } 3411 3412 /* 3413 * Thunderbolt routers do not generate wakeups themselves but 3414 * they forward wakeups from tunneled protocols, so enable it 3415 * here. 3416 */ 3417 device_init_wakeup(&sw->dev, true); 3418 3419 pm_runtime_set_active(&sw->dev); 3420 if (sw->rpm) { 3421 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); 3422 pm_runtime_use_autosuspend(&sw->dev); 3423 pm_runtime_mark_last_busy(&sw->dev); 3424 pm_runtime_enable(&sw->dev); 3425 pm_request_autosuspend(&sw->dev); 3426 } 3427 3428 tb_switch_debugfs_init(sw); 3429 return 0; 3430 3431 err_ports: 3432 usb4_switch_remove_ports(sw); 3433 err_del: 3434 device_del(&sw->dev); 3435 3436 return ret; 3437 } 3438 3439 /** 3440 * tb_switch_remove() - Remove and release a switch 3441 * @sw: Switch to remove 3442 * 3443 * This will remove the switch from the domain and release it after last 3444 * reference count drops to zero. If there are switches connected below 3445 * this switch, they will be removed as well. 3446 */ 3447 void tb_switch_remove(struct tb_switch *sw) 3448 { 3449 struct tb_port *port; 3450 3451 tb_switch_debugfs_remove(sw); 3452 3453 if (sw->rpm) { 3454 pm_runtime_get_sync(&sw->dev); 3455 pm_runtime_disable(&sw->dev); 3456 } 3457 3458 /* port 0 is the switch itself and never has a remote */ 3459 tb_switch_for_each_port(sw, port) { 3460 if (tb_port_has_remote(port)) { 3461 tb_switch_remove(port->remote->sw); 3462 port->remote = NULL; 3463 } else if (port->xdomain) { 3464 port->xdomain->is_unplugged = true; 3465 tb_xdomain_remove(port->xdomain); 3466 port->xdomain = NULL; 3467 } 3468 3469 /* Remove any downstream retimers */ 3470 tb_retimer_remove_all(port); 3471 } 3472 3473 if (!sw->is_unplugged) 3474 tb_plug_events_active(sw, false); 3475 3476 tb_switch_nvm_remove(sw); 3477 usb4_switch_remove_ports(sw); 3478 3479 if (tb_route(sw)) 3480 dev_info(&sw->dev, "device disconnected\n"); 3481 device_unregister(&sw->dev); 3482 } 3483 3484 /** 3485 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches 3486 * @sw: Router to mark unplugged 3487 */ 3488 void tb_sw_set_unplugged(struct tb_switch *sw) 3489 { 3490 struct tb_port *port; 3491 3492 if (sw == sw->tb->root_switch) { 3493 tb_sw_WARN(sw, "cannot unplug root switch\n"); 3494 return; 3495 } 3496 if (sw->is_unplugged) { 3497 tb_sw_WARN(sw, "is_unplugged already set\n"); 3498 return; 3499 } 3500 sw->is_unplugged = true; 3501 tb_switch_for_each_port(sw, port) { 3502 if (tb_port_has_remote(port)) 3503 tb_sw_set_unplugged(port->remote->sw); 3504 else if (port->xdomain) 3505 port->xdomain->is_unplugged = true; 3506 } 3507 } 3508 3509 static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime) 3510 { 3511 if (flags) 3512 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags); 3513 else 3514 tb_sw_dbg(sw, "disabling wakeup\n"); 3515 3516 if (tb_switch_is_usb4(sw)) 3517 return usb4_switch_set_wake(sw, flags, runtime); 3518 return tb_lc_set_wake(sw, flags); 3519 } 3520 3521 static void tb_switch_check_wakes(struct tb_switch *sw) 3522 { 3523 if (device_may_wakeup(&sw->dev)) { 3524 if (tb_switch_is_usb4(sw)) 3525 usb4_switch_check_wakes(sw); 3526 } 3527 } 3528 3529 /** 3530 * tb_switch_resume() - Resume a switch after sleep 3531 * @sw: Switch to resume 3532 * @runtime: Is this resume from runtime suspend or system sleep 3533 * 3534 * Resumes and re-enumerates router (and all its children), if still plugged 3535 * after suspend. Don't enumerate device router whose UID was changed during 3536 * suspend. If this is resume from system sleep, notifies PM core about the 3537 * wakes occurred during suspend. Disables all wakes, except USB4 wake of 3538 * upstream port for USB4 routers that shall be always enabled. 3539 * 3540 * Return: %0 on success, negative errno otherwise. 3541 */ 3542 int tb_switch_resume(struct tb_switch *sw, bool runtime) 3543 { 3544 struct tb_port *port; 3545 int err; 3546 3547 tb_sw_dbg(sw, "resuming switch\n"); 3548 3549 /* 3550 * Check for UID of the connected switches except for root 3551 * switch which we assume cannot be removed. 3552 */ 3553 if (tb_route(sw)) { 3554 u64 uid; 3555 3556 /* 3557 * Check first that we can still read the switch config 3558 * space. It may be that there is now another domain 3559 * connected. 3560 */ 3561 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw)); 3562 if (err < 0) { 3563 tb_sw_info(sw, "switch not present anymore\n"); 3564 return err; 3565 } 3566 3567 /* We don't have any way to confirm this was the same device */ 3568 if (!sw->uid) 3569 return -ENODEV; 3570 3571 if (tb_switch_is_usb4(sw)) 3572 err = usb4_switch_read_uid(sw, &uid); 3573 else 3574 err = tb_drom_read_uid_only(sw, &uid); 3575 if (err) { 3576 tb_sw_warn(sw, "uid read failed\n"); 3577 return err; 3578 } 3579 if (sw->uid != uid) { 3580 tb_sw_info(sw, 3581 "changed while suspended (uid %#llx -> %#llx)\n", 3582 sw->uid, uid); 3583 return -ENODEV; 3584 } 3585 } 3586 3587 err = tb_switch_configure(sw); 3588 if (err) 3589 return err; 3590 3591 if (!runtime) 3592 tb_switch_check_wakes(sw); 3593 3594 /* Disable wakes */ 3595 tb_switch_set_wake(sw, 0, true); 3596 3597 err = tb_switch_tmu_init(sw); 3598 if (err) 3599 return err; 3600 3601 /* check for surviving downstream switches */ 3602 tb_switch_for_each_port(sw, port) { 3603 if (!tb_port_is_null(port)) 3604 continue; 3605 3606 if (!tb_port_resume(port)) 3607 continue; 3608 3609 if (tb_wait_for_port(port, true) <= 0) { 3610 tb_port_warn(port, 3611 "lost during suspend, disconnecting\n"); 3612 if (tb_port_has_remote(port)) 3613 tb_sw_set_unplugged(port->remote->sw); 3614 else if (port->xdomain) 3615 port->xdomain->is_unplugged = true; 3616 } else { 3617 /* 3618 * Always unlock the port so the downstream 3619 * switch/domain is accessible. 3620 */ 3621 if (tb_port_unlock(port)) 3622 tb_port_warn(port, "failed to unlock port\n"); 3623 if (port->remote && 3624 tb_switch_resume(port->remote->sw, runtime)) { 3625 tb_port_warn(port, 3626 "lost during suspend, disconnecting\n"); 3627 tb_sw_set_unplugged(port->remote->sw); 3628 } 3629 } 3630 } 3631 return 0; 3632 } 3633 3634 /** 3635 * tb_switch_suspend() - Put a switch to sleep 3636 * @sw: Switch to suspend 3637 * @runtime: Is this runtime suspend or system sleep 3638 * 3639 * Suspends router and all its children. Enables wakes according to 3640 * value of @runtime and then sets sleep bit for the router. If @sw is 3641 * host router the domain is ready to go to sleep once this function 3642 * returns. 3643 */ 3644 void tb_switch_suspend(struct tb_switch *sw, bool runtime) 3645 { 3646 unsigned int flags = 0; 3647 struct tb_port *port; 3648 int err; 3649 3650 tb_sw_dbg(sw, "suspending switch\n"); 3651 3652 /* 3653 * Actually only needed for Titan Ridge but for simplicity can be 3654 * done for USB4 device too as CLx is re-enabled at resume. 3655 */ 3656 tb_switch_clx_disable(sw); 3657 3658 err = tb_plug_events_active(sw, false); 3659 if (err) 3660 return; 3661 3662 tb_switch_for_each_port(sw, port) { 3663 if (tb_port_has_remote(port)) 3664 tb_switch_suspend(port->remote->sw, runtime); 3665 } 3666 3667 if (runtime) { 3668 /* Trigger wake when something is plugged in/out */ 3669 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT; 3670 flags |= TB_WAKE_ON_USB4; 3671 flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP; 3672 } else if (device_may_wakeup(&sw->dev)) { 3673 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT; 3674 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE; 3675 } 3676 3677 tb_switch_set_wake(sw, flags, runtime); 3678 3679 if (tb_switch_is_usb4(sw)) 3680 usb4_switch_set_sleep(sw); 3681 else 3682 tb_lc_set_sleep(sw); 3683 } 3684 3685 /** 3686 * tb_switch_query_dp_resource() - Query availability of DP resource 3687 * @sw: Switch whose DP resource is queried 3688 * @in: DP IN port 3689 * 3690 * Queries availability of DP resource for DP tunneling using switch 3691 * specific means. 3692 * 3693 * Return: %true if resource is available, %false otherwise. 3694 */ 3695 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) 3696 { 3697 if (tb_switch_is_usb4(sw)) 3698 return usb4_switch_query_dp_resource(sw, in); 3699 return tb_lc_dp_sink_query(sw, in); 3700 } 3701 3702 /** 3703 * tb_switch_alloc_dp_resource() - Allocate available DP resource 3704 * @sw: Switch whose DP resource is allocated 3705 * @in: DP IN port 3706 * 3707 * Allocates DP resource for DP tunneling. The resource must be 3708 * available for this to succeed (see tb_switch_query_dp_resource()). 3709 * 3710 * Return: %0 on success, negative errno otherwise. 3711 */ 3712 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 3713 { 3714 int ret; 3715 3716 if (tb_switch_is_usb4(sw)) 3717 ret = usb4_switch_alloc_dp_resource(sw, in); 3718 else 3719 ret = tb_lc_dp_sink_alloc(sw, in); 3720 3721 if (ret) 3722 tb_sw_warn(sw, "failed to allocate DP resource for port %d\n", 3723 in->port); 3724 else 3725 tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port); 3726 3727 return ret; 3728 } 3729 3730 /** 3731 * tb_switch_dealloc_dp_resource() - De-allocate DP resource 3732 * @sw: Switch whose DP resource is de-allocated 3733 * @in: DP IN port 3734 * 3735 * De-allocates DP resource that was previously allocated for DP 3736 * tunneling. 3737 */ 3738 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 3739 { 3740 int ret; 3741 3742 if (tb_switch_is_usb4(sw)) 3743 ret = usb4_switch_dealloc_dp_resource(sw, in); 3744 else 3745 ret = tb_lc_dp_sink_dealloc(sw, in); 3746 3747 if (ret) 3748 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n", 3749 in->port); 3750 else 3751 tb_sw_dbg(sw, "released DP resource for port %d\n", in->port); 3752 } 3753 3754 struct tb_sw_lookup { 3755 struct tb *tb; 3756 u8 link; 3757 u8 depth; 3758 const uuid_t *uuid; 3759 u64 route; 3760 }; 3761 3762 static int tb_switch_match(struct device *dev, const void *data) 3763 { 3764 struct tb_switch *sw = tb_to_switch(dev); 3765 const struct tb_sw_lookup *lookup = data; 3766 3767 if (!sw) 3768 return 0; 3769 if (sw->tb != lookup->tb) 3770 return 0; 3771 3772 if (lookup->uuid) 3773 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid)); 3774 3775 if (lookup->route) { 3776 return sw->config.route_lo == lower_32_bits(lookup->route) && 3777 sw->config.route_hi == upper_32_bits(lookup->route); 3778 } 3779 3780 /* Root switch is matched only by depth */ 3781 if (!lookup->depth) 3782 return !sw->depth; 3783 3784 return sw->link == lookup->link && sw->depth == lookup->depth; 3785 } 3786 3787 /** 3788 * tb_switch_find_by_link_depth() - Find switch by link and depth 3789 * @tb: Domain the switch belongs 3790 * @link: Link number the switch is connected 3791 * @depth: Depth of the switch in link 3792 * 3793 * Returned switch has reference count increased so the caller needs to 3794 * call tb_switch_put() when done with the switch. 3795 * 3796 * Return: Pointer to &struct tb_switch, %NULL if not found. 3797 */ 3798 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) 3799 { 3800 struct tb_sw_lookup lookup; 3801 struct device *dev; 3802 3803 memset(&lookup, 0, sizeof(lookup)); 3804 lookup.tb = tb; 3805 lookup.link = link; 3806 lookup.depth = depth; 3807 3808 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 3809 if (dev) 3810 return tb_to_switch(dev); 3811 3812 return NULL; 3813 } 3814 3815 /** 3816 * tb_switch_find_by_uuid() - Find switch by UUID 3817 * @tb: Domain the switch belongs 3818 * @uuid: UUID to look for 3819 * 3820 * Returned switch has reference count increased so the caller needs to 3821 * call tb_switch_put() when done with the switch. 3822 * 3823 * Return: Pointer to &struct tb_switch, %NULL if not found. 3824 */ 3825 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) 3826 { 3827 struct tb_sw_lookup lookup; 3828 struct device *dev; 3829 3830 memset(&lookup, 0, sizeof(lookup)); 3831 lookup.tb = tb; 3832 lookup.uuid = uuid; 3833 3834 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 3835 if (dev) 3836 return tb_to_switch(dev); 3837 3838 return NULL; 3839 } 3840 3841 /** 3842 * tb_switch_find_by_route() - Find switch by route string 3843 * @tb: Domain the switch belongs 3844 * @route: Route string to look for 3845 * 3846 * Returned switch has reference count increased so the caller needs to 3847 * call tb_switch_put() when done with the switch. 3848 * 3849 * Return: Pointer to &struct tb_switch, %NULL if not found. 3850 */ 3851 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) 3852 { 3853 struct tb_sw_lookup lookup; 3854 struct device *dev; 3855 3856 if (!route) 3857 return tb_switch_get(tb->root_switch); 3858 3859 memset(&lookup, 0, sizeof(lookup)); 3860 lookup.tb = tb; 3861 lookup.route = route; 3862 3863 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 3864 if (dev) 3865 return tb_to_switch(dev); 3866 3867 return NULL; 3868 } 3869 3870 /** 3871 * tb_switch_find_port() - return the first port of @type on @sw or NULL 3872 * @sw: Switch to find the port from 3873 * @type: Port type to look for 3874 * 3875 * Return: Pointer to &struct tb_port, %NULL if not found. 3876 */ 3877 struct tb_port *tb_switch_find_port(struct tb_switch *sw, 3878 enum tb_port_type type) 3879 { 3880 struct tb_port *port; 3881 3882 tb_switch_for_each_port(sw, port) { 3883 if (port->config.type == type) 3884 return port; 3885 } 3886 3887 return NULL; 3888 } 3889 3890 /* 3891 * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3 3892 * device. For now used only for Titan Ridge. 3893 */ 3894 static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge, 3895 unsigned int pcie_offset, u32 value) 3896 { 3897 u32 offset, command, val; 3898 int ret; 3899 3900 if (sw->generation != 3) 3901 return -EOPNOTSUPP; 3902 3903 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA; 3904 ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1); 3905 if (ret) 3906 return ret; 3907 3908 command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK; 3909 command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT); 3910 command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK; 3911 command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL 3912 << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT; 3913 command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK; 3914 3915 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD; 3916 3917 ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1); 3918 if (ret) 3919 return ret; 3920 3921 ret = tb_switch_wait_for_bit(sw, offset, 3922 TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100); 3923 if (ret) 3924 return ret; 3925 3926 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); 3927 if (ret) 3928 return ret; 3929 3930 if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK) 3931 return -ETIMEDOUT; 3932 3933 return 0; 3934 } 3935 3936 /** 3937 * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state 3938 * @sw: Router to enable PCIe L1 3939 * 3940 * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable 3941 * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel 3942 * was configured. Due to Intel platforms limitation, shall be called only 3943 * for first hop switch. 3944 * 3945 * Return: %0 on success, negative errno otherwise. 3946 */ 3947 int tb_switch_pcie_l1_enable(struct tb_switch *sw) 3948 { 3949 struct tb_switch *parent = tb_switch_parent(sw); 3950 int ret; 3951 3952 if (!tb_route(sw)) 3953 return 0; 3954 3955 if (!tb_switch_is_titan_ridge(sw)) 3956 return 0; 3957 3958 /* Enable PCIe L1 enable only for first hop router (depth = 1) */ 3959 if (tb_route(parent)) 3960 return 0; 3961 3962 /* Write to downstream PCIe bridge #5 aka Dn4 */ 3963 ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1); 3964 if (ret) 3965 return ret; 3966 3967 /* Write to Upstream PCIe bridge #0 aka Up0 */ 3968 return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1); 3969 } 3970 3971 /** 3972 * tb_switch_xhci_connect() - Connect internal xHCI 3973 * @sw: Router whose xHCI to connect 3974 * 3975 * Can be called to any router. For Alpine Ridge and Titan Ridge 3976 * performs special flows that bring the xHCI functional for any device 3977 * connected to the type-C port. Call only after PCIe tunnel has been 3978 * established. The function only does the connect if not done already 3979 * so can be called several times for the same router. 3980 * 3981 * Return: %0 on success, negative errno otherwise. 3982 */ 3983 int tb_switch_xhci_connect(struct tb_switch *sw) 3984 { 3985 struct tb_port *port1, *port3; 3986 int ret; 3987 3988 if (sw->generation != 3) 3989 return 0; 3990 3991 port1 = &sw->ports[1]; 3992 port3 = &sw->ports[3]; 3993 3994 if (tb_switch_is_alpine_ridge(sw)) { 3995 bool usb_port1, usb_port3, xhci_port1, xhci_port3; 3996 3997 usb_port1 = tb_lc_is_usb_plugged(port1); 3998 usb_port3 = tb_lc_is_usb_plugged(port3); 3999 xhci_port1 = tb_lc_is_xhci_connected(port1); 4000 xhci_port3 = tb_lc_is_xhci_connected(port3); 4001 4002 /* Figure out correct USB port to connect */ 4003 if (usb_port1 && !xhci_port1) { 4004 ret = tb_lc_xhci_connect(port1); 4005 if (ret) 4006 return ret; 4007 } 4008 if (usb_port3 && !xhci_port3) 4009 return tb_lc_xhci_connect(port3); 4010 } else if (tb_switch_is_titan_ridge(sw)) { 4011 ret = tb_lc_xhci_connect(port1); 4012 if (ret) 4013 return ret; 4014 return tb_lc_xhci_connect(port3); 4015 } 4016 4017 return 0; 4018 } 4019 4020 /** 4021 * tb_switch_xhci_disconnect() - Disconnect internal xHCI 4022 * @sw: Router whose xHCI to disconnect 4023 * 4024 * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both 4025 * ports. 4026 */ 4027 void tb_switch_xhci_disconnect(struct tb_switch *sw) 4028 { 4029 if (sw->generation == 3) { 4030 struct tb_port *port1 = &sw->ports[1]; 4031 struct tb_port *port3 = &sw->ports[3]; 4032 4033 tb_lc_xhci_disconnect(port1); 4034 tb_port_dbg(port1, "disconnected xHCI\n"); 4035 tb_lc_xhci_disconnect(port3); 4036 tb_port_dbg(port3, "disconnected xHCI\n"); 4037 } 4038 } 4039