1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - switch/port utility functions 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2018, Intel Corporation 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/hex.h> 11 #include <linux/idr.h> 12 #include <linux/module.h> 13 #include <linux/nvmem-provider.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/sched/signal.h> 16 #include <linux/sizes.h> 17 #include <linux/slab.h> 18 #include <linux/string_helpers.h> 19 20 #include "tb.h" 21 22 /* Switch NVM support */ 23 24 struct nvm_auth_status { 25 struct list_head list; 26 uuid_t uuid; 27 u32 status; 28 }; 29 30 /* 31 * Hold NVM authentication failure status per switch This information 32 * needs to stay around even when the switch gets power cycled so we 33 * keep it separately. 34 */ 35 static LIST_HEAD(nvm_auth_status_cache); 36 static DEFINE_MUTEX(nvm_auth_status_lock); 37 38 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) 39 { 40 struct nvm_auth_status *st; 41 42 list_for_each_entry(st, &nvm_auth_status_cache, list) { 43 if (uuid_equal(&st->uuid, sw->uuid)) 44 return st; 45 } 46 47 return NULL; 48 } 49 50 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) 51 { 52 struct nvm_auth_status *st; 53 54 mutex_lock(&nvm_auth_status_lock); 55 st = __nvm_get_auth_status(sw); 56 mutex_unlock(&nvm_auth_status_lock); 57 58 *status = st ? st->status : 0; 59 } 60 61 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) 62 { 63 struct nvm_auth_status *st; 64 65 if (WARN_ON(!sw->uuid)) 66 return; 67 68 mutex_lock(&nvm_auth_status_lock); 69 st = __nvm_get_auth_status(sw); 70 71 if (!st) { 72 st = kzalloc(sizeof(*st), GFP_KERNEL); 73 if (!st) 74 goto unlock; 75 76 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); 77 INIT_LIST_HEAD(&st->list); 78 list_add_tail(&st->list, &nvm_auth_status_cache); 79 } 80 81 st->status = status; 82 unlock: 83 mutex_unlock(&nvm_auth_status_lock); 84 } 85 86 static void nvm_clear_auth_status(const struct tb_switch *sw) 87 { 88 struct nvm_auth_status *st; 89 90 mutex_lock(&nvm_auth_status_lock); 91 st = __nvm_get_auth_status(sw); 92 if (st) { 93 list_del(&st->list); 94 kfree(st); 95 } 96 mutex_unlock(&nvm_auth_status_lock); 97 } 98 99 static int nvm_validate_and_write(struct tb_switch *sw) 100 { 101 unsigned int image_size; 102 const u8 *buf; 103 int ret; 104 105 ret = tb_nvm_validate(sw->nvm); 106 if (ret) 107 return ret; 108 109 ret = tb_nvm_write_headers(sw->nvm); 110 if (ret) 111 return ret; 112 113 buf = sw->nvm->buf_data_start; 114 image_size = sw->nvm->buf_data_size; 115 116 if (tb_switch_is_usb4(sw)) 117 ret = usb4_switch_nvm_write(sw, 0, buf, image_size); 118 else 119 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size); 120 if (ret) 121 return ret; 122 123 sw->nvm->flushed = true; 124 return 0; 125 } 126 127 static int nvm_authenticate_host_dma_port(struct tb_switch *sw) 128 { 129 int ret = 0; 130 131 /* 132 * Root switch NVM upgrade requires that we disconnect the 133 * existing paths first (in case it is not in safe mode 134 * already). 135 */ 136 if (!sw->safe_mode) { 137 u32 status; 138 139 ret = tb_domain_disconnect_all_paths(sw->tb); 140 if (ret) 141 return ret; 142 /* 143 * The host controller goes away pretty soon after this if 144 * everything goes well so getting timeout is expected. 145 */ 146 ret = dma_port_flash_update_auth(sw->dma_port); 147 if (!ret || ret == -ETIMEDOUT) 148 return 0; 149 150 /* 151 * Any error from update auth operation requires power 152 * cycling of the host router. 153 */ 154 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n"); 155 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0) 156 nvm_set_auth_status(sw, status); 157 } 158 159 /* 160 * From safe mode we can get out by just power cycling the 161 * switch. 162 */ 163 dma_port_power_cycle(sw->dma_port); 164 return ret; 165 } 166 167 static int nvm_authenticate_device_dma_port(struct tb_switch *sw) 168 { 169 int ret, retries = 10; 170 171 ret = dma_port_flash_update_auth(sw->dma_port); 172 switch (ret) { 173 case 0: 174 case -ETIMEDOUT: 175 case -EACCES: 176 case -EINVAL: 177 /* Power cycle is required */ 178 break; 179 default: 180 return ret; 181 } 182 183 /* 184 * Poll here for the authentication status. It takes some time 185 * for the device to respond (we get timeout for a while). Once 186 * we get response the device needs to be power cycled in order 187 * to the new NVM to be taken into use. 188 */ 189 do { 190 u32 status; 191 192 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 193 if (ret < 0 && ret != -ETIMEDOUT) 194 return ret; 195 if (ret > 0) { 196 if (status) { 197 tb_sw_warn(sw, "failed to authenticate NVM\n"); 198 nvm_set_auth_status(sw, status); 199 } 200 201 tb_sw_info(sw, "power cycling the switch now\n"); 202 dma_port_power_cycle(sw->dma_port); 203 return 0; 204 } 205 206 msleep(500); 207 } while (--retries); 208 209 return -ETIMEDOUT; 210 } 211 212 static void nvm_authenticate_start_dma_port(struct tb_switch *sw) 213 { 214 struct pci_dev *root_port; 215 216 /* 217 * During host router NVM upgrade we should not allow root port to 218 * go into D3cold because some root ports cannot trigger PME 219 * itself. To be on the safe side keep the root port in D0 during 220 * the whole upgrade process. 221 */ 222 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 223 if (root_port) 224 pm_runtime_get_noresume(&root_port->dev); 225 } 226 227 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw) 228 { 229 struct pci_dev *root_port; 230 231 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 232 if (root_port) 233 pm_runtime_put(&root_port->dev); 234 } 235 236 static inline bool nvm_readable(struct tb_switch *sw) 237 { 238 if (tb_switch_is_usb4(sw)) { 239 /* 240 * USB4 devices must support NVM operations but it is 241 * optional for hosts. Therefore we query the NVM sector 242 * size here and if it is supported assume NVM 243 * operations are implemented. 244 */ 245 return usb4_switch_nvm_sector_size(sw) > 0; 246 } 247 248 /* Thunderbolt 2 and 3 devices support NVM through DMA port */ 249 return !!sw->dma_port; 250 } 251 252 static inline bool nvm_upgradeable(struct tb_switch *sw) 253 { 254 if (sw->no_nvm_upgrade) 255 return false; 256 return nvm_readable(sw); 257 } 258 259 static int nvm_authenticate(struct tb_switch *sw, bool auth_only) 260 { 261 int ret; 262 263 if (tb_switch_is_usb4(sw)) { 264 if (auth_only) { 265 ret = usb4_switch_nvm_set_offset(sw, 0); 266 if (ret) 267 return ret; 268 } 269 sw->nvm->authenticating = true; 270 return usb4_switch_nvm_authenticate(sw); 271 } 272 if (auth_only) 273 return -EOPNOTSUPP; 274 275 sw->nvm->authenticating = true; 276 if (!tb_route(sw)) { 277 nvm_authenticate_start_dma_port(sw); 278 ret = nvm_authenticate_host_dma_port(sw); 279 } else { 280 ret = nvm_authenticate_device_dma_port(sw); 281 } 282 283 return ret; 284 } 285 286 /** 287 * tb_switch_nvm_read() - Read router NVM 288 * @sw: Router whose NVM to read 289 * @address: Start address on the NVM 290 * @buf: Buffer where the read data is copied 291 * @size: Size of the buffer in bytes 292 * 293 * Reads from router NVM and returns the requested data in @buf. Locking 294 * is up to the caller. 295 * 296 * Return: %0 on success, negative errno otherwise. 297 */ 298 int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, 299 size_t size) 300 { 301 if (tb_switch_is_usb4(sw)) 302 return usb4_switch_nvm_read(sw, address, buf, size); 303 return dma_port_flash_read(sw->dma_port, address, buf, size); 304 } 305 306 static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes) 307 { 308 struct tb_nvm *nvm = priv; 309 struct tb_switch *sw = tb_to_switch(nvm->dev); 310 int ret; 311 312 pm_runtime_get_sync(&sw->dev); 313 314 if (!mutex_trylock(&sw->tb->lock)) { 315 ret = restart_syscall(); 316 goto out; 317 } 318 319 ret = tb_switch_nvm_read(sw, offset, val, bytes); 320 mutex_unlock(&sw->tb->lock); 321 322 out: 323 pm_runtime_mark_last_busy(&sw->dev); 324 pm_runtime_put_autosuspend(&sw->dev); 325 326 return ret; 327 } 328 329 static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes) 330 { 331 struct tb_nvm *nvm = priv; 332 struct tb_switch *sw = tb_to_switch(nvm->dev); 333 int ret; 334 335 if (!mutex_trylock(&sw->tb->lock)) 336 return restart_syscall(); 337 338 /* 339 * Since writing the NVM image might require some special steps, 340 * for example when CSS headers are written, we cache the image 341 * locally here and handle the special cases when the user asks 342 * us to authenticate the image. 343 */ 344 ret = tb_nvm_write_buf(nvm, offset, val, bytes); 345 mutex_unlock(&sw->tb->lock); 346 347 return ret; 348 } 349 350 static int tb_switch_nvm_add(struct tb_switch *sw) 351 { 352 struct tb_nvm *nvm; 353 int ret; 354 355 if (!nvm_readable(sw)) 356 return 0; 357 358 nvm = tb_nvm_alloc(&sw->dev); 359 if (IS_ERR(nvm)) { 360 ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm); 361 goto err_nvm; 362 } 363 364 ret = tb_nvm_read_version(nvm); 365 if (ret) 366 goto err_nvm; 367 368 /* 369 * If the switch is in safe-mode the only accessible portion of 370 * the NVM is the non-active one where userspace is expected to 371 * write new functional NVM. 372 */ 373 if (!sw->safe_mode) { 374 ret = tb_nvm_add_active(nvm, nvm_read); 375 if (ret) 376 goto err_nvm; 377 tb_sw_dbg(sw, "NVM version %x.%x\n", nvm->major, nvm->minor); 378 } 379 380 if (!sw->no_nvm_upgrade) { 381 ret = tb_nvm_add_non_active(nvm, nvm_write); 382 if (ret) 383 goto err_nvm; 384 } 385 386 sw->nvm = nvm; 387 return 0; 388 389 err_nvm: 390 tb_sw_dbg(sw, "NVM upgrade disabled\n"); 391 sw->no_nvm_upgrade = true; 392 if (!IS_ERR(nvm)) 393 tb_nvm_free(nvm); 394 395 return ret; 396 } 397 398 static void tb_switch_nvm_remove(struct tb_switch *sw) 399 { 400 struct tb_nvm *nvm; 401 402 nvm = sw->nvm; 403 sw->nvm = NULL; 404 405 if (!nvm) 406 return; 407 408 /* Remove authentication status in case the switch is unplugged */ 409 if (!nvm->authenticating) 410 nvm_clear_auth_status(sw); 411 412 tb_nvm_free(nvm); 413 } 414 415 /* port utility functions */ 416 417 static const char *tb_port_type(const struct tb_regs_port_header *port) 418 { 419 switch (port->type >> 16) { 420 case 0: 421 switch ((u8) port->type) { 422 case 0: 423 return "Inactive"; 424 case 1: 425 return "Port"; 426 case 2: 427 return "NHI"; 428 default: 429 return "unknown"; 430 } 431 case 0x2: 432 return "Ethernet"; 433 case 0x8: 434 return "SATA"; 435 case 0xe: 436 return "DP/HDMI"; 437 case 0x10: 438 return "PCIe"; 439 case 0x20: 440 return "USB"; 441 default: 442 return "unknown"; 443 } 444 } 445 446 static void tb_dump_port(struct tb *tb, const struct tb_port *port) 447 { 448 const struct tb_regs_port_header *regs = &port->config; 449 450 tb_dbg(tb, 451 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n", 452 regs->port_number, regs->vendor_id, regs->device_id, 453 regs->revision, regs->thunderbolt_version, tb_port_type(regs), 454 regs->type); 455 tb_dbg(tb, " Max hop id (in/out): %d/%d\n", 456 regs->max_in_hop_id, regs->max_out_hop_id); 457 tb_dbg(tb, " Max counters: %d\n", regs->max_counters); 458 tb_dbg(tb, " NFC Credits: %#x\n", regs->nfc_credits); 459 tb_dbg(tb, " Credits (total/control): %u/%u\n", port->total_credits, 460 port->ctl_credits); 461 } 462 463 /** 464 * tb_port_state() - get connectedness state of a port 465 * @port: the port to check 466 * 467 * The port must have a TB_CAP_PHY (i.e. it should be a real port). 468 * 469 * Return: &enum tb_port_state or negative error code on failure. 470 */ 471 int tb_port_state(struct tb_port *port) 472 { 473 struct tb_cap_phy phy; 474 int res; 475 if (port->cap_phy == 0) { 476 tb_port_WARN(port, "does not have a PHY\n"); 477 return -EINVAL; 478 } 479 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2); 480 if (res) 481 return res; 482 return phy.state; 483 } 484 485 /** 486 * tb_wait_for_port() - wait for a port to become ready 487 * @port: Port to wait 488 * @wait_if_unplugged: Wait also when port is unplugged 489 * 490 * Wait up to 1 second for a port to reach state TB_PORT_UP. If 491 * wait_if_unplugged is set then we also wait if the port is in state 492 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after 493 * switch resume). Otherwise we only wait if a device is registered but the link 494 * has not yet been established. 495 * 496 * Return: 497 * * %0 - If the port is not connected or failed to reach 498 * state %TB_PORT_UP within one second. 499 * * %1 - If the port is connected and in state %TB_PORT_UP. 500 * * Negative errno - An error occurred. 501 */ 502 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) 503 { 504 int retries = 10; 505 int state; 506 if (!port->cap_phy) { 507 tb_port_WARN(port, "does not have PHY\n"); 508 return -EINVAL; 509 } 510 if (tb_is_upstream_port(port)) { 511 tb_port_WARN(port, "is the upstream port\n"); 512 return -EINVAL; 513 } 514 515 while (retries--) { 516 state = tb_port_state(port); 517 switch (state) { 518 case TB_PORT_DISABLED: 519 tb_port_dbg(port, "is disabled (state: 0)\n"); 520 return 0; 521 522 case TB_PORT_UNPLUGGED: 523 if (wait_if_unplugged) { 524 /* used during resume */ 525 tb_port_dbg(port, 526 "is unplugged (state: 7), retrying...\n"); 527 msleep(100); 528 break; 529 } 530 tb_port_dbg(port, "is unplugged (state: 7)\n"); 531 return 0; 532 533 case TB_PORT_UP: 534 case TB_PORT_TX_CL0S: 535 case TB_PORT_RX_CL0S: 536 case TB_PORT_CL1: 537 case TB_PORT_CL2: 538 tb_port_dbg(port, "is connected, link is up (state: %d)\n", state); 539 return 1; 540 541 default: 542 if (state < 0) 543 return state; 544 545 /* 546 * After plug-in the state is TB_PORT_CONNECTING. Give it some 547 * time. 548 */ 549 tb_port_dbg(port, 550 "is connected, link is not up (state: %d), retrying...\n", 551 state); 552 msleep(100); 553 } 554 555 } 556 tb_port_warn(port, 557 "failed to reach state TB_PORT_UP. Ignoring port...\n"); 558 return 0; 559 } 560 561 /** 562 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port 563 * @port: Port to add/remove NFC credits 564 * @credits: Credits to add/remove 565 * 566 * Change the number of NFC credits allocated to @port by @credits. To remove 567 * NFC credits pass a negative amount of credits. 568 * 569 * Return: %0 on success, negative errno otherwise. 570 */ 571 int tb_port_add_nfc_credits(struct tb_port *port, int credits) 572 { 573 u32 nfc_credits; 574 575 if (credits == 0 || port->sw->is_unplugged) 576 return 0; 577 578 /* 579 * USB4 restricts programming NFC buffers to lane adapters only 580 * so skip other ports. 581 */ 582 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port)) 583 return 0; 584 585 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; 586 if (credits < 0) 587 credits = max_t(int, -nfc_credits, credits); 588 589 nfc_credits += credits; 590 591 tb_port_dbg(port, "adding %d NFC credits to %lu", credits, 592 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK); 593 594 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK; 595 port->config.nfc_credits |= nfc_credits; 596 597 return tb_port_write(port, &port->config.nfc_credits, 598 TB_CFG_PORT, ADP_CS_4, 1); 599 } 600 601 /** 602 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER 603 * @port: Port whose counters to clear 604 * @counter: Counter index to clear 605 * 606 * Return: %0 on success, negative errno otherwise. 607 */ 608 int tb_port_clear_counter(struct tb_port *port, int counter) 609 { 610 u32 zero[3] = { 0, 0, 0 }; 611 tb_port_dbg(port, "clearing counter %d\n", counter); 612 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3); 613 } 614 615 /** 616 * tb_port_unlock() - Unlock downstream port 617 * @port: Port to unlock 618 * 619 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the 620 * downstream router accessible for CM. 621 * 622 * Return: %0 on success, negative errno otherwise. 623 */ 624 int tb_port_unlock(struct tb_port *port) 625 { 626 if (tb_switch_is_icm(port->sw)) 627 return 0; 628 if (!tb_port_is_null(port)) 629 return -EINVAL; 630 if (tb_switch_is_usb4(port->sw)) 631 return usb4_port_unlock(port); 632 return 0; 633 } 634 635 static int __tb_port_enable(struct tb_port *port, bool enable) 636 { 637 int ret; 638 u32 phy; 639 640 if (!tb_port_is_null(port)) 641 return -EINVAL; 642 643 ret = tb_port_read(port, &phy, TB_CFG_PORT, 644 port->cap_phy + LANE_ADP_CS_1, 1); 645 if (ret) 646 return ret; 647 648 if (enable) 649 phy &= ~LANE_ADP_CS_1_LD; 650 else 651 phy |= LANE_ADP_CS_1_LD; 652 653 654 ret = tb_port_write(port, &phy, TB_CFG_PORT, 655 port->cap_phy + LANE_ADP_CS_1, 1); 656 if (ret) 657 return ret; 658 659 tb_port_dbg(port, "lane %s\n", str_enabled_disabled(enable)); 660 return 0; 661 } 662 663 /** 664 * tb_port_enable() - Enable lane adapter 665 * @port: Port to enable (can be %NULL) 666 * 667 * This is used for lane 0 and 1 adapters to enable it. 668 * 669 * Return: %0 on success, negative errno otherwise. 670 */ 671 int tb_port_enable(struct tb_port *port) 672 { 673 return __tb_port_enable(port, true); 674 } 675 676 /** 677 * tb_port_disable() - Disable lane adapter 678 * @port: Port to disable (can be %NULL) 679 * 680 * This is used for lane 0 and 1 adapters to disable it. 681 * 682 * Return: %0 on success, negative errno otherwise. 683 */ 684 int tb_port_disable(struct tb_port *port) 685 { 686 return __tb_port_enable(port, false); 687 } 688 689 static int tb_port_reset(struct tb_port *port) 690 { 691 if (tb_switch_is_usb4(port->sw)) 692 return port->cap_usb4 ? usb4_port_reset(port) : 0; 693 return tb_lc_reset_port(port); 694 } 695 696 /* 697 * tb_init_port() - initialize a port 698 * 699 * This is a helper method for tb_switch_alloc. Does not check or initialize 700 * any downstream switches. 701 * 702 * Return: %0 on success, negative errno otherwise. 703 */ 704 static int tb_init_port(struct tb_port *port) 705 { 706 int res; 707 int cap; 708 709 INIT_LIST_HEAD(&port->list); 710 711 /* Control adapter does not have configuration space */ 712 if (!port->port) 713 return 0; 714 715 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); 716 if (res) { 717 if (res == -ENODEV) { 718 tb_dbg(port->sw->tb, " Port %d: not implemented\n", 719 port->port); 720 port->disabled = true; 721 return 0; 722 } 723 return res; 724 } 725 726 /* Port 0 is the switch itself and has no PHY. */ 727 if (port->config.type == TB_TYPE_PORT) { 728 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY); 729 730 if (cap > 0) 731 port->cap_phy = cap; 732 else 733 tb_port_WARN(port, "non switch port without a PHY\n"); 734 735 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4); 736 if (cap > 0) 737 port->cap_usb4 = cap; 738 739 /* 740 * USB4 port buffers allocated for the control path 741 * can be read from the path config space. Legacy 742 * devices use hard-coded value. 743 */ 744 if (port->cap_usb4) { 745 struct tb_regs_hop hop; 746 747 if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2)) 748 port->ctl_credits = hop.initial_credits; 749 } 750 if (!port->ctl_credits) 751 port->ctl_credits = 2; 752 753 } else { 754 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP); 755 if (cap > 0) 756 port->cap_adap = cap; 757 } 758 759 port->total_credits = 760 (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> 761 ADP_CS_4_TOTAL_BUFFERS_SHIFT; 762 763 tb_dump_port(port->sw->tb, port); 764 return 0; 765 } 766 767 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid, 768 int max_hopid) 769 { 770 int port_max_hopid; 771 struct ida *ida; 772 773 if (in) { 774 port_max_hopid = port->config.max_in_hop_id; 775 ida = &port->in_hopids; 776 } else { 777 port_max_hopid = port->config.max_out_hop_id; 778 ida = &port->out_hopids; 779 } 780 781 /* 782 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are 783 * reserved. 784 */ 785 if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID) 786 min_hopid = TB_PATH_MIN_HOPID; 787 788 if (max_hopid < 0 || max_hopid > port_max_hopid) 789 max_hopid = port_max_hopid; 790 791 return ida_alloc_range(ida, min_hopid, max_hopid, GFP_KERNEL); 792 } 793 794 /** 795 * tb_port_alloc_in_hopid() - Allocate input HopID from port 796 * @port: Port to allocate HopID for 797 * @min_hopid: Minimum acceptable input HopID 798 * @max_hopid: Maximum acceptable input HopID 799 * 800 * Return: HopID between @min_hopid and @max_hopid or negative errno in 801 * case of error. 802 */ 803 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid) 804 { 805 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid); 806 } 807 808 /** 809 * tb_port_alloc_out_hopid() - Allocate output HopID from port 810 * @port: Port to allocate HopID for 811 * @min_hopid: Minimum acceptable output HopID 812 * @max_hopid: Maximum acceptable output HopID 813 * 814 * Return: HopID between @min_hopid and @max_hopid or negative errno in 815 * case of error. 816 */ 817 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid) 818 { 819 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid); 820 } 821 822 /** 823 * tb_port_release_in_hopid() - Release allocated input HopID from port 824 * @port: Port whose HopID to release 825 * @hopid: HopID to release 826 */ 827 void tb_port_release_in_hopid(struct tb_port *port, int hopid) 828 { 829 ida_free(&port->in_hopids, hopid); 830 } 831 832 /** 833 * tb_port_release_out_hopid() - Release allocated output HopID from port 834 * @port: Port whose HopID to release 835 * @hopid: HopID to release 836 */ 837 void tb_port_release_out_hopid(struct tb_port *port, int hopid) 838 { 839 ida_free(&port->out_hopids, hopid); 840 } 841 842 static inline bool tb_switch_is_reachable(const struct tb_switch *parent, 843 const struct tb_switch *sw) 844 { 845 u64 mask = (1ULL << parent->config.depth * 8) - 1; 846 return (tb_route(parent) & mask) == (tb_route(sw) & mask); 847 } 848 849 /** 850 * tb_next_port_on_path() - Return next port for given port on a path 851 * @start: Start port of the walk 852 * @end: End port of the walk 853 * @prev: Previous port (%NULL if this is the first) 854 * 855 * This function can be used to walk from one port to another if they 856 * are connected through zero or more switches. If the @prev is dual 857 * link port, the function follows that link and returns another end on 858 * that same link. 859 * 860 * Domain tb->lock must be held when this function is called. 861 * 862 * Return: Pointer to &struct tb_port, %NULL if the @end port has been reached. 863 */ 864 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, 865 struct tb_port *prev) 866 { 867 struct tb_port *next; 868 869 if (!prev) 870 return start; 871 872 if (prev->sw == end->sw) { 873 if (prev == end) 874 return NULL; 875 return end; 876 } 877 878 if (tb_switch_is_reachable(prev->sw, end->sw)) { 879 next = tb_port_at(tb_route(end->sw), prev->sw); 880 /* Walk down the topology if next == prev */ 881 if (prev->remote && 882 (next == prev || next->dual_link_port == prev)) 883 next = prev->remote; 884 } else { 885 if (tb_is_upstream_port(prev)) { 886 next = prev->remote; 887 } else { 888 next = tb_upstream_port(prev->sw); 889 /* 890 * Keep the same link if prev and next are both 891 * dual link ports. 892 */ 893 if (next->dual_link_port && 894 next->link_nr != prev->link_nr) { 895 next = next->dual_link_port; 896 } 897 } 898 } 899 900 return next != prev ? next : NULL; 901 } 902 903 /** 904 * tb_port_get_link_speed() - Get current link speed 905 * @port: Port to check (USB4 or CIO) 906 * 907 * Return: Link speed in Gb/s or negative errno in case of failure. 908 */ 909 int tb_port_get_link_speed(struct tb_port *port) 910 { 911 u32 val, speed; 912 int ret; 913 914 if (!port->cap_phy) 915 return -EINVAL; 916 917 ret = tb_port_read(port, &val, TB_CFG_PORT, 918 port->cap_phy + LANE_ADP_CS_1, 1); 919 if (ret) 920 return ret; 921 922 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >> 923 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT; 924 925 switch (speed) { 926 case LANE_ADP_CS_1_CURRENT_SPEED_GEN4: 927 return 40; 928 case LANE_ADP_CS_1_CURRENT_SPEED_GEN3: 929 return 20; 930 default: 931 return 10; 932 } 933 } 934 935 /** 936 * tb_port_get_link_generation() - Returns link generation 937 * @port: Lane adapter 938 * 939 * Return: Link generation as a number or negative errno in case of 940 * failure. 941 * 942 * Does not distinguish between Thunderbolt 1 and Thunderbolt 2 943 * links so for those always returns %2. 944 */ 945 int tb_port_get_link_generation(struct tb_port *port) 946 { 947 int ret; 948 949 ret = tb_port_get_link_speed(port); 950 if (ret < 0) 951 return ret; 952 953 switch (ret) { 954 case 40: 955 return 4; 956 case 20: 957 return 3; 958 default: 959 return 2; 960 } 961 } 962 963 /** 964 * tb_port_get_link_width() - Get current link width 965 * @port: Port to check (USB4 or CIO) 966 * 967 * Return: Link width encoded in &enum tb_link_width or 968 * negative errno in case of failure. 969 */ 970 int tb_port_get_link_width(struct tb_port *port) 971 { 972 u32 val; 973 int ret; 974 975 if (!port->cap_phy) 976 return -EINVAL; 977 978 ret = tb_port_read(port, &val, TB_CFG_PORT, 979 port->cap_phy + LANE_ADP_CS_1, 1); 980 if (ret) 981 return ret; 982 983 /* Matches the values in enum tb_link_width */ 984 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >> 985 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT; 986 } 987 988 /** 989 * tb_port_width_supported() - Is the given link width supported 990 * @port: Port to check 991 * @width: Widths to check (bitmask) 992 * 993 * Can be called to any lane adapter. Checks if given @width is 994 * supported by the hardware. 995 * 996 * Return: %true if link width is supported, %false otherwise. 997 */ 998 bool tb_port_width_supported(struct tb_port *port, unsigned int width) 999 { 1000 u32 phy, widths; 1001 int ret; 1002 1003 if (!port->cap_phy) 1004 return false; 1005 1006 if (width & (TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX)) { 1007 if (tb_port_get_link_generation(port) < 4 || 1008 !usb4_port_asym_supported(port)) 1009 return false; 1010 } 1011 1012 ret = tb_port_read(port, &phy, TB_CFG_PORT, 1013 port->cap_phy + LANE_ADP_CS_0, 1); 1014 if (ret) 1015 return false; 1016 1017 /* 1018 * The field encoding is the same as &enum tb_link_width (which is 1019 * passed to @width). 1020 */ 1021 widths = FIELD_GET(LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK, phy); 1022 return widths & width; 1023 } 1024 1025 /** 1026 * tb_port_set_link_width() - Set target link width of the lane adapter 1027 * @port: Lane adapter 1028 * @width: Target link width 1029 * 1030 * Sets the target link width of the lane adapter to @width. Does not 1031 * enable/disable lane bonding. For that call tb_port_set_lane_bonding(). 1032 * 1033 * Return: %0 on success, negative errno otherwise. 1034 */ 1035 int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width) 1036 { 1037 u32 val; 1038 int ret; 1039 1040 if (!port->cap_phy) 1041 return -EINVAL; 1042 1043 ret = tb_port_read(port, &val, TB_CFG_PORT, 1044 port->cap_phy + LANE_ADP_CS_1, 1); 1045 if (ret) 1046 return ret; 1047 1048 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK; 1049 switch (width) { 1050 case TB_LINK_WIDTH_SINGLE: 1051 /* Gen 4 link cannot be single */ 1052 if (tb_port_get_link_generation(port) >= 4) 1053 return -EOPNOTSUPP; 1054 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE << 1055 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 1056 break; 1057 1058 case TB_LINK_WIDTH_DUAL: 1059 if (tb_port_get_link_generation(port) >= 4) 1060 return usb4_port_asym_set_link_width(port, width); 1061 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL << 1062 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 1063 break; 1064 1065 case TB_LINK_WIDTH_ASYM_TX: 1066 case TB_LINK_WIDTH_ASYM_RX: 1067 return usb4_port_asym_set_link_width(port, width); 1068 1069 default: 1070 return -EINVAL; 1071 } 1072 1073 return tb_port_write(port, &val, TB_CFG_PORT, 1074 port->cap_phy + LANE_ADP_CS_1, 1); 1075 } 1076 1077 /** 1078 * tb_port_set_lane_bonding() - Enable/disable lane bonding 1079 * @port: Lane adapter 1080 * @bonding: enable/disable bonding 1081 * 1082 * Enables or disables lane bonding. This should be called after target 1083 * link width has been set (tb_port_set_link_width()). Note in most 1084 * cases one should use tb_port_lane_bonding_enable() instead to enable 1085 * lane bonding. 1086 * 1087 * Return: %0 on success, negative errno otherwise. 1088 */ 1089 static int tb_port_set_lane_bonding(struct tb_port *port, bool bonding) 1090 { 1091 u32 val; 1092 int ret; 1093 1094 if (!port->cap_phy) 1095 return -EINVAL; 1096 1097 ret = tb_port_read(port, &val, TB_CFG_PORT, 1098 port->cap_phy + LANE_ADP_CS_1, 1); 1099 if (ret) 1100 return ret; 1101 1102 if (bonding) 1103 val |= LANE_ADP_CS_1_LB; 1104 else 1105 val &= ~LANE_ADP_CS_1_LB; 1106 1107 return tb_port_write(port, &val, TB_CFG_PORT, 1108 port->cap_phy + LANE_ADP_CS_1, 1); 1109 } 1110 1111 /** 1112 * tb_port_lane_bonding_enable() - Enable bonding on port 1113 * @port: port to enable 1114 * 1115 * Enable bonding by setting the link width of the port and the other 1116 * port in case of dual link port. Does not wait for the link to 1117 * actually reach the bonded state so caller needs to call 1118 * tb_port_wait_for_link_width() before enabling any paths through the 1119 * link to make sure the link is in expected state. 1120 * 1121 * Return: %0 on success, negative errno otherwise. 1122 */ 1123 int tb_port_lane_bonding_enable(struct tb_port *port) 1124 { 1125 enum tb_link_width width; 1126 int ret; 1127 1128 /* 1129 * Enable lane bonding for both links if not already enabled by 1130 * for example the boot firmware. 1131 */ 1132 width = tb_port_get_link_width(port); 1133 if (width == TB_LINK_WIDTH_SINGLE) { 1134 ret = tb_port_set_link_width(port, TB_LINK_WIDTH_DUAL); 1135 if (ret) 1136 goto err_lane0; 1137 } 1138 1139 width = tb_port_get_link_width(port->dual_link_port); 1140 if (width == TB_LINK_WIDTH_SINGLE) { 1141 ret = tb_port_set_link_width(port->dual_link_port, 1142 TB_LINK_WIDTH_DUAL); 1143 if (ret) 1144 goto err_lane1; 1145 } 1146 1147 /* 1148 * Only set bonding if the link was not already bonded. This 1149 * avoids the lane adapter to re-enter bonding state. 1150 */ 1151 if (width == TB_LINK_WIDTH_SINGLE && !tb_is_upstream_port(port)) { 1152 ret = tb_port_set_lane_bonding(port, true); 1153 if (ret) 1154 goto err_lane1; 1155 } 1156 1157 /* 1158 * When lane 0 bonding is set it will affect lane 1 too so 1159 * update both. 1160 */ 1161 port->bonded = true; 1162 port->dual_link_port->bonded = true; 1163 1164 return 0; 1165 1166 err_lane1: 1167 tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE); 1168 err_lane0: 1169 tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE); 1170 1171 return ret; 1172 } 1173 1174 /** 1175 * tb_port_lane_bonding_disable() - Disable bonding on port 1176 * @port: port to disable 1177 * 1178 * Disable bonding by setting the link width of the port and the 1179 * other port in case of dual link port. 1180 */ 1181 void tb_port_lane_bonding_disable(struct tb_port *port) 1182 { 1183 tb_port_set_lane_bonding(port, false); 1184 tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE); 1185 tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE); 1186 port->dual_link_port->bonded = false; 1187 port->bonded = false; 1188 } 1189 1190 /** 1191 * tb_port_wait_for_link_width() - Wait until link reaches specific width 1192 * @port: Port to wait for 1193 * @width: Expected link width (bitmask) 1194 * @timeout_msec: Timeout in ms how long to wait 1195 * 1196 * Should be used after both ends of the link have been bonded (or 1197 * bonding has been disabled) to wait until the link actually reaches 1198 * the expected state. 1199 * 1200 * Can be passed a mask of expected widths. 1201 * 1202 * Return: 1203 * * %0 - If link reaches any of the specified widths. 1204 * * %-ETIMEDOUT - If link does not reach specified width. 1205 * * Negative errno - Another error occurred. 1206 */ 1207 int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width, 1208 int timeout_msec) 1209 { 1210 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); 1211 int ret; 1212 1213 /* Gen 4 link does not support single lane */ 1214 if ((width & TB_LINK_WIDTH_SINGLE) && 1215 tb_port_get_link_generation(port) >= 4) 1216 return -EOPNOTSUPP; 1217 1218 do { 1219 ret = tb_port_get_link_width(port); 1220 if (ret < 0) { 1221 /* 1222 * Sometimes we get port locked error when 1223 * polling the lanes so we can ignore it and 1224 * retry. 1225 */ 1226 if (ret != -EACCES) 1227 return ret; 1228 } else if (ret & width) { 1229 return 0; 1230 } 1231 1232 usleep_range(1000, 2000); 1233 } while (ktime_before(ktime_get(), timeout)); 1234 1235 return -ETIMEDOUT; 1236 } 1237 1238 static int tb_port_do_update_credits(struct tb_port *port) 1239 { 1240 u32 nfc_credits; 1241 int ret; 1242 1243 ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1); 1244 if (ret) 1245 return ret; 1246 1247 if (nfc_credits != port->config.nfc_credits) { 1248 u32 total; 1249 1250 total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> 1251 ADP_CS_4_TOTAL_BUFFERS_SHIFT; 1252 1253 tb_port_dbg(port, "total credits changed %u -> %u\n", 1254 port->total_credits, total); 1255 1256 port->config.nfc_credits = nfc_credits; 1257 port->total_credits = total; 1258 } 1259 1260 return 0; 1261 } 1262 1263 /** 1264 * tb_port_update_credits() - Re-read port total credits 1265 * @port: Port to update 1266 * 1267 * After the link is bonded (or bonding was disabled) the port total 1268 * credits may change, so this function needs to be called to re-read 1269 * the credits. Updates also the second lane adapter. 1270 * 1271 * Return: %0 on success, negative errno otherwise. 1272 */ 1273 int tb_port_update_credits(struct tb_port *port) 1274 { 1275 int ret; 1276 1277 ret = tb_port_do_update_credits(port); 1278 if (ret) 1279 return ret; 1280 1281 if (!port->dual_link_port) 1282 return 0; 1283 return tb_port_do_update_credits(port->dual_link_port); 1284 } 1285 1286 static int tb_port_start_lane_initialization(struct tb_port *port) 1287 { 1288 int ret; 1289 1290 if (tb_switch_is_usb4(port->sw)) 1291 return 0; 1292 1293 ret = tb_lc_start_lane_initialization(port); 1294 return ret == -EINVAL ? 0 : ret; 1295 } 1296 1297 /* 1298 * Returns true if the port had something (router, XDomain) connected 1299 * before suspend. 1300 */ 1301 static bool tb_port_resume(struct tb_port *port) 1302 { 1303 bool has_remote = tb_port_has_remote(port); 1304 1305 if (port->usb4) { 1306 usb4_port_device_resume(port->usb4); 1307 } else if (!has_remote) { 1308 /* 1309 * For disconnected downstream lane adapters start lane 1310 * initialization now so we detect future connects. 1311 * 1312 * For XDomain start the lane initialzation now so the 1313 * link gets re-established. 1314 * 1315 * This is only needed for non-USB4 ports. 1316 */ 1317 if (!tb_is_upstream_port(port) || port->xdomain) 1318 tb_port_start_lane_initialization(port); 1319 } 1320 1321 return has_remote || port->xdomain; 1322 } 1323 1324 /** 1325 * tb_port_is_enabled() - Is the adapter port enabled 1326 * @port: Port to check 1327 * 1328 * Return: %true if port is enabled, %false otherwise. 1329 */ 1330 bool tb_port_is_enabled(struct tb_port *port) 1331 { 1332 switch (port->config.type) { 1333 case TB_TYPE_PCIE_UP: 1334 case TB_TYPE_PCIE_DOWN: 1335 return tb_pci_port_is_enabled(port); 1336 1337 case TB_TYPE_DP_HDMI_IN: 1338 case TB_TYPE_DP_HDMI_OUT: 1339 return tb_dp_port_is_enabled(port); 1340 1341 case TB_TYPE_USB3_UP: 1342 case TB_TYPE_USB3_DOWN: 1343 return tb_usb3_port_is_enabled(port); 1344 1345 default: 1346 return false; 1347 } 1348 } 1349 1350 /** 1351 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled 1352 * @port: USB3 adapter port to check 1353 * 1354 * Return: %true if port is enabled, %false otherwise. 1355 */ 1356 bool tb_usb3_port_is_enabled(struct tb_port *port) 1357 { 1358 u32 data; 1359 1360 if (tb_port_read(port, &data, TB_CFG_PORT, 1361 port->cap_adap + ADP_USB3_CS_0, 1)) 1362 return false; 1363 1364 return !!(data & ADP_USB3_CS_0_PE); 1365 } 1366 1367 /** 1368 * tb_usb3_port_enable() - Enable USB3 adapter port 1369 * @port: USB3 adapter port to enable 1370 * @enable: Enable/disable the USB3 adapter 1371 * 1372 * Return: %0 on success, negative errno otherwise. 1373 */ 1374 int tb_usb3_port_enable(struct tb_port *port, bool enable) 1375 { 1376 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V) 1377 : ADP_USB3_CS_0_V; 1378 1379 if (!port->cap_adap) 1380 return -ENXIO; 1381 return tb_port_write(port, &word, TB_CFG_PORT, 1382 port->cap_adap + ADP_USB3_CS_0, 1); 1383 } 1384 1385 /** 1386 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled 1387 * @port: PCIe port to check 1388 * 1389 * Return: %true if port is enabled, %false otherwise. 1390 */ 1391 bool tb_pci_port_is_enabled(struct tb_port *port) 1392 { 1393 u32 data; 1394 1395 if (tb_port_read(port, &data, TB_CFG_PORT, 1396 port->cap_adap + ADP_PCIE_CS_0, 1)) 1397 return false; 1398 1399 return !!(data & ADP_PCIE_CS_0_PE); 1400 } 1401 1402 /** 1403 * tb_pci_port_enable() - Enable PCIe adapter port 1404 * @port: PCIe port to enable 1405 * @enable: Enable/disable the PCIe adapter 1406 * 1407 * Return: %0 on success, negative errno otherwise. 1408 */ 1409 int tb_pci_port_enable(struct tb_port *port, bool enable) 1410 { 1411 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0; 1412 if (!port->cap_adap) 1413 return -ENXIO; 1414 return tb_port_write(port, &word, TB_CFG_PORT, 1415 port->cap_adap + ADP_PCIE_CS_0, 1); 1416 } 1417 1418 /** 1419 * tb_dp_port_hpd_is_active() - Is HPD already active 1420 * @port: DP out port to check 1421 * 1422 * Checks if the DP OUT adapter port has HPD bit already set. 1423 * 1424 * Return: %1 if HPD is active, %0 otherwise. 1425 */ 1426 int tb_dp_port_hpd_is_active(struct tb_port *port) 1427 { 1428 u32 data; 1429 int ret; 1430 1431 ret = tb_port_read(port, &data, TB_CFG_PORT, 1432 port->cap_adap + ADP_DP_CS_2, 1); 1433 if (ret) 1434 return ret; 1435 1436 return !!(data & ADP_DP_CS_2_HPD); 1437 } 1438 1439 /** 1440 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port 1441 * @port: Port to clear HPD 1442 * 1443 * If the DP IN port has HPD set, this function can be used to clear it. 1444 * 1445 * Return: %0 on success, negative errno otherwise. 1446 */ 1447 int tb_dp_port_hpd_clear(struct tb_port *port) 1448 { 1449 u32 data; 1450 int ret; 1451 1452 ret = tb_port_read(port, &data, TB_CFG_PORT, 1453 port->cap_adap + ADP_DP_CS_3, 1); 1454 if (ret) 1455 return ret; 1456 1457 data |= ADP_DP_CS_3_HPDC; 1458 return tb_port_write(port, &data, TB_CFG_PORT, 1459 port->cap_adap + ADP_DP_CS_3, 1); 1460 } 1461 1462 /** 1463 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port 1464 * @port: DP IN/OUT port to set hops 1465 * @video: Video Hop ID 1466 * @aux_tx: AUX TX Hop ID 1467 * @aux_rx: AUX RX Hop ID 1468 * 1469 * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4 1470 * router DP adapters too but does not program the values as the fields 1471 * are read-only. 1472 * 1473 * Return: %0 on success, negative errno otherwise. 1474 */ 1475 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, 1476 unsigned int aux_tx, unsigned int aux_rx) 1477 { 1478 u32 data[2]; 1479 int ret; 1480 1481 if (tb_switch_is_usb4(port->sw)) 1482 return 0; 1483 1484 ret = tb_port_read(port, data, TB_CFG_PORT, 1485 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1486 if (ret) 1487 return ret; 1488 1489 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK; 1490 data[1] &= ~ADP_DP_CS_1_AUX_TX_HOPID_MASK; 1491 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1492 1493 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) & 1494 ADP_DP_CS_0_VIDEO_HOPID_MASK; 1495 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK; 1496 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) & 1497 ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1498 1499 return tb_port_write(port, data, TB_CFG_PORT, 1500 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1501 } 1502 1503 /** 1504 * tb_dp_port_is_enabled() - Is DP adapter port enabled 1505 * @port: DP adapter port to check 1506 * 1507 * Return: %true if DP port is enabled, %false otherwise. 1508 */ 1509 bool tb_dp_port_is_enabled(struct tb_port *port) 1510 { 1511 u32 data[2]; 1512 1513 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0, 1514 ARRAY_SIZE(data))) 1515 return false; 1516 1517 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE)); 1518 } 1519 1520 /** 1521 * tb_dp_port_enable() - Enables/disables DP paths of a port 1522 * @port: DP IN/OUT port 1523 * @enable: Enable/disable DP path 1524 * 1525 * Once Hop IDs are programmed DP paths can be enabled or disabled by 1526 * calling this function. 1527 * 1528 * Return: %0 on success, negative errno otherwise. 1529 */ 1530 int tb_dp_port_enable(struct tb_port *port, bool enable) 1531 { 1532 u32 data[2]; 1533 int ret; 1534 1535 ret = tb_port_read(port, data, TB_CFG_PORT, 1536 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1537 if (ret) 1538 return ret; 1539 1540 if (enable) 1541 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE; 1542 else 1543 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE); 1544 1545 return tb_port_write(port, data, TB_CFG_PORT, 1546 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1547 } 1548 1549 /* switch utility functions */ 1550 1551 static const char *tb_switch_generation_name(const struct tb_switch *sw) 1552 { 1553 switch (sw->generation) { 1554 case 1: 1555 return "Thunderbolt 1"; 1556 case 2: 1557 return "Thunderbolt 2"; 1558 case 3: 1559 return "Thunderbolt 3"; 1560 case 4: 1561 return "USB4"; 1562 default: 1563 return "Unknown"; 1564 } 1565 } 1566 1567 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) 1568 { 1569 const struct tb_regs_switch_header *regs = &sw->config; 1570 1571 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n", 1572 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id, 1573 regs->revision, regs->thunderbolt_version); 1574 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number); 1575 tb_dbg(tb, " Config:\n"); 1576 tb_dbg(tb, 1577 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", 1578 regs->upstream_port_number, regs->depth, 1579 (((u64) regs->route_hi) << 32) | regs->route_lo, 1580 regs->enabled, regs->plug_events_delay); 1581 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n", 1582 regs->__unknown1, regs->__unknown4); 1583 } 1584 1585 static int tb_switch_reset_host(struct tb_switch *sw) 1586 { 1587 if (sw->generation > 1) { 1588 struct tb_port *port; 1589 1590 tb_switch_for_each_port(sw, port) { 1591 int i, ret; 1592 1593 /* 1594 * For lane adapters we issue downstream port 1595 * reset and clear up path config spaces. 1596 * 1597 * For protocol adapters we disable the path and 1598 * clear path config space one by one (from 8 to 1599 * Max Input HopID of the adapter). 1600 */ 1601 if (tb_port_is_null(port) && !tb_is_upstream_port(port)) { 1602 ret = tb_port_reset(port); 1603 if (ret) 1604 return ret; 1605 } else if (tb_port_is_usb3_down(port) || 1606 tb_port_is_usb3_up(port)) { 1607 tb_usb3_port_enable(port, false); 1608 } else if (tb_port_is_dpin(port) || 1609 tb_port_is_dpout(port)) { 1610 tb_dp_port_enable(port, false); 1611 } else if (tb_port_is_pcie_down(port) || 1612 tb_port_is_pcie_up(port)) { 1613 tb_pci_port_enable(port, false); 1614 } else { 1615 continue; 1616 } 1617 1618 /* Cleanup path config space of protocol adapter */ 1619 for (i = TB_PATH_MIN_HOPID; 1620 i <= port->config.max_in_hop_id; i++) { 1621 ret = tb_path_deactivate_hop(port, i); 1622 if (ret) 1623 return ret; 1624 } 1625 } 1626 } else { 1627 struct tb_cfg_result res; 1628 1629 /* Thunderbolt 1 uses the "reset" config space packet */ 1630 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2, 1631 TB_CFG_SWITCH, 2, 2); 1632 if (res.err) 1633 return res.err; 1634 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw)); 1635 if (res.err > 0) 1636 return -EIO; 1637 else if (res.err < 0) 1638 return res.err; 1639 } 1640 1641 return 0; 1642 } 1643 1644 static int tb_switch_reset_device(struct tb_switch *sw) 1645 { 1646 return tb_port_reset(tb_switch_downstream_port(sw)); 1647 } 1648 1649 static bool tb_switch_enumerated(struct tb_switch *sw) 1650 { 1651 u32 val; 1652 int ret; 1653 1654 /* 1655 * Read directly from the hardware because we use this also 1656 * during system sleep where sw->config.enabled is already set 1657 * by us. 1658 */ 1659 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_3, 1); 1660 if (ret) 1661 return false; 1662 1663 return !!(val & ROUTER_CS_3_V); 1664 } 1665 1666 /** 1667 * tb_switch_reset() - Perform reset to the router 1668 * @sw: Router to reset 1669 * 1670 * Issues reset to the router @sw. Can be used for any router. For host 1671 * routers, resets all the downstream ports and cleans up path config 1672 * spaces accordingly. For device routers issues downstream port reset 1673 * through the parent router, so as side effect there will be unplug 1674 * soon after this is finished. 1675 * 1676 * If the router is not enumerated does nothing. 1677 * 1678 * Return: %0 on success, negative errno otherwise. 1679 */ 1680 int tb_switch_reset(struct tb_switch *sw) 1681 { 1682 int ret; 1683 1684 /* 1685 * We cannot access the port config spaces unless the router is 1686 * already enumerated. If the router is not enumerated it is 1687 * equal to being reset so we can skip that here. 1688 */ 1689 if (!tb_switch_enumerated(sw)) 1690 return 0; 1691 1692 tb_sw_dbg(sw, "resetting\n"); 1693 1694 if (tb_route(sw)) 1695 ret = tb_switch_reset_device(sw); 1696 else 1697 ret = tb_switch_reset_host(sw); 1698 1699 if (ret) 1700 tb_sw_warn(sw, "failed to reset\n"); 1701 1702 return ret; 1703 } 1704 1705 /** 1706 * tb_switch_wait_for_bit() - Wait for specified value of bits in offset 1707 * @sw: Router to read the offset value from 1708 * @offset: Offset in the router config space to read from 1709 * @bit: Bit mask in the offset to wait for 1710 * @value: Value of the bits to wait for 1711 * @timeout_msec: Timeout in ms how long to wait 1712 * 1713 * Wait till the specified bits in specified offset reach specified value. 1714 * 1715 * Return: 1716 * * %0 - On success. 1717 * * %-ETIMEDOUT - If the @value was not reached within 1718 * the given timeout. 1719 * * Negative errno - In case of failure. 1720 */ 1721 int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, 1722 u32 value, int timeout_msec) 1723 { 1724 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); 1725 1726 do { 1727 u32 val; 1728 int ret; 1729 1730 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); 1731 if (ret) 1732 return ret; 1733 1734 if ((val & bit) == value) 1735 return 0; 1736 1737 usleep_range(50, 100); 1738 } while (ktime_before(ktime_get(), timeout)); 1739 1740 return -ETIMEDOUT; 1741 } 1742 1743 /* 1744 * tb_plug_events_active() - enable/disable plug events on a switch 1745 * 1746 * Also configures a sane plug_events_delay of 255ms. 1747 * 1748 * Return: %0 on success, negative errno otherwise. 1749 */ 1750 static int tb_plug_events_active(struct tb_switch *sw, bool active) 1751 { 1752 u32 data; 1753 int res; 1754 1755 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw)) 1756 return 0; 1757 1758 sw->config.plug_events_delay = 0xff; 1759 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); 1760 if (res) 1761 return res; 1762 1763 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); 1764 if (res) 1765 return res; 1766 1767 if (active) { 1768 data = data & 0xFFFFFF83; 1769 switch (sw->config.device_id) { 1770 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1771 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 1772 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 1773 break; 1774 default: 1775 /* 1776 * Skip Alpine Ridge, it needs to have vendor 1777 * specific USB hotplug event enabled for the 1778 * internal xHCI to work. 1779 */ 1780 if (!tb_switch_is_alpine_ridge(sw)) 1781 data |= TB_PLUG_EVENTS_USB_DISABLE; 1782 } 1783 } else { 1784 data = data | 0x7c; 1785 } 1786 return tb_sw_write(sw, &data, TB_CFG_SWITCH, 1787 sw->cap_plug_events + 1, 1); 1788 } 1789 1790 static ssize_t authorized_show(struct device *dev, 1791 struct device_attribute *attr, 1792 char *buf) 1793 { 1794 struct tb_switch *sw = tb_to_switch(dev); 1795 1796 return sysfs_emit(buf, "%u\n", sw->authorized); 1797 } 1798 1799 static int disapprove_switch(struct device *dev, void *not_used) 1800 { 1801 char *envp[] = { "AUTHORIZED=0", NULL }; 1802 struct tb_switch *sw; 1803 1804 sw = tb_to_switch(dev); 1805 if (sw && sw->authorized) { 1806 int ret; 1807 1808 /* First children */ 1809 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch); 1810 if (ret) 1811 return ret; 1812 1813 ret = tb_domain_disapprove_switch(sw->tb, sw); 1814 if (ret) 1815 return ret; 1816 1817 sw->authorized = 0; 1818 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp); 1819 } 1820 1821 return 0; 1822 } 1823 1824 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) 1825 { 1826 char envp_string[13]; 1827 int ret = -EINVAL; 1828 char *envp[] = { envp_string, NULL }; 1829 1830 if (!mutex_trylock(&sw->tb->lock)) 1831 return restart_syscall(); 1832 1833 if (!!sw->authorized == !!val) 1834 goto unlock; 1835 1836 switch (val) { 1837 /* Disapprove switch */ 1838 case 0: 1839 if (tb_route(sw)) { 1840 ret = disapprove_switch(&sw->dev, NULL); 1841 goto unlock; 1842 } 1843 break; 1844 1845 /* Approve switch */ 1846 case 1: 1847 if (sw->key) 1848 ret = tb_domain_approve_switch_key(sw->tb, sw); 1849 else 1850 ret = tb_domain_approve_switch(sw->tb, sw); 1851 break; 1852 1853 /* Challenge switch */ 1854 case 2: 1855 if (sw->key) 1856 ret = tb_domain_challenge_switch_key(sw->tb, sw); 1857 break; 1858 1859 default: 1860 break; 1861 } 1862 1863 if (!ret) { 1864 sw->authorized = val; 1865 /* 1866 * Notify status change to the userspace, informing the new 1867 * value of /sys/bus/thunderbolt/devices/.../authorized. 1868 */ 1869 sprintf(envp_string, "AUTHORIZED=%u", sw->authorized); 1870 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp); 1871 } 1872 1873 unlock: 1874 mutex_unlock(&sw->tb->lock); 1875 return ret; 1876 } 1877 1878 static ssize_t authorized_store(struct device *dev, 1879 struct device_attribute *attr, 1880 const char *buf, size_t count) 1881 { 1882 struct tb_switch *sw = tb_to_switch(dev); 1883 unsigned int val; 1884 ssize_t ret; 1885 1886 ret = kstrtouint(buf, 0, &val); 1887 if (ret) 1888 return ret; 1889 if (val > 2) 1890 return -EINVAL; 1891 1892 pm_runtime_get_sync(&sw->dev); 1893 ret = tb_switch_set_authorized(sw, val); 1894 pm_runtime_mark_last_busy(&sw->dev); 1895 pm_runtime_put_autosuspend(&sw->dev); 1896 1897 return ret ? ret : count; 1898 } 1899 static DEVICE_ATTR_RW(authorized); 1900 1901 static ssize_t boot_show(struct device *dev, struct device_attribute *attr, 1902 char *buf) 1903 { 1904 struct tb_switch *sw = tb_to_switch(dev); 1905 1906 return sysfs_emit(buf, "%u\n", sw->boot); 1907 } 1908 static DEVICE_ATTR_RO(boot); 1909 1910 static ssize_t device_show(struct device *dev, struct device_attribute *attr, 1911 char *buf) 1912 { 1913 struct tb_switch *sw = tb_to_switch(dev); 1914 1915 return sysfs_emit(buf, "%#x\n", sw->device); 1916 } 1917 static DEVICE_ATTR_RO(device); 1918 1919 static ssize_t 1920 device_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1921 { 1922 struct tb_switch *sw = tb_to_switch(dev); 1923 1924 return sysfs_emit(buf, "%s\n", sw->device_name ?: ""); 1925 } 1926 static DEVICE_ATTR_RO(device_name); 1927 1928 static ssize_t 1929 generation_show(struct device *dev, struct device_attribute *attr, char *buf) 1930 { 1931 struct tb_switch *sw = tb_to_switch(dev); 1932 1933 return sysfs_emit(buf, "%u\n", sw->generation); 1934 } 1935 static DEVICE_ATTR_RO(generation); 1936 1937 static ssize_t key_show(struct device *dev, struct device_attribute *attr, 1938 char *buf) 1939 { 1940 struct tb_switch *sw = tb_to_switch(dev); 1941 ssize_t ret; 1942 1943 if (!mutex_trylock(&sw->tb->lock)) 1944 return restart_syscall(); 1945 1946 if (sw->key) 1947 ret = sysfs_emit(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); 1948 else 1949 ret = sysfs_emit(buf, "\n"); 1950 1951 mutex_unlock(&sw->tb->lock); 1952 return ret; 1953 } 1954 1955 static ssize_t key_store(struct device *dev, struct device_attribute *attr, 1956 const char *buf, size_t count) 1957 { 1958 struct tb_switch *sw = tb_to_switch(dev); 1959 u8 key[TB_SWITCH_KEY_SIZE]; 1960 ssize_t ret = count; 1961 bool clear = false; 1962 1963 if (!strcmp(buf, "\n")) 1964 clear = true; 1965 else if (hex2bin(key, buf, sizeof(key))) 1966 return -EINVAL; 1967 1968 if (!mutex_trylock(&sw->tb->lock)) 1969 return restart_syscall(); 1970 1971 if (sw->authorized) { 1972 ret = -EBUSY; 1973 } else { 1974 kfree(sw->key); 1975 if (clear) { 1976 sw->key = NULL; 1977 } else { 1978 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL); 1979 if (!sw->key) 1980 ret = -ENOMEM; 1981 } 1982 } 1983 1984 mutex_unlock(&sw->tb->lock); 1985 return ret; 1986 } 1987 static DEVICE_ATTR(key, 0600, key_show, key_store); 1988 1989 static ssize_t speed_show(struct device *dev, struct device_attribute *attr, 1990 char *buf) 1991 { 1992 struct tb_switch *sw = tb_to_switch(dev); 1993 1994 return sysfs_emit(buf, "%u.0 Gb/s\n", sw->link_speed); 1995 } 1996 1997 /* 1998 * Currently all lanes must run at the same speed but we expose here 1999 * both directions to allow possible asymmetric links in the future. 2000 */ 2001 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); 2002 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); 2003 2004 static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr, 2005 char *buf) 2006 { 2007 struct tb_switch *sw = tb_to_switch(dev); 2008 unsigned int width; 2009 2010 switch (sw->link_width) { 2011 case TB_LINK_WIDTH_SINGLE: 2012 case TB_LINK_WIDTH_ASYM_TX: 2013 width = 1; 2014 break; 2015 case TB_LINK_WIDTH_DUAL: 2016 width = 2; 2017 break; 2018 case TB_LINK_WIDTH_ASYM_RX: 2019 width = 3; 2020 break; 2021 default: 2022 WARN_ON_ONCE(1); 2023 return -EINVAL; 2024 } 2025 2026 return sysfs_emit(buf, "%u\n", width); 2027 } 2028 static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL); 2029 2030 static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr, 2031 char *buf) 2032 { 2033 struct tb_switch *sw = tb_to_switch(dev); 2034 unsigned int width; 2035 2036 switch (sw->link_width) { 2037 case TB_LINK_WIDTH_SINGLE: 2038 case TB_LINK_WIDTH_ASYM_RX: 2039 width = 1; 2040 break; 2041 case TB_LINK_WIDTH_DUAL: 2042 width = 2; 2043 break; 2044 case TB_LINK_WIDTH_ASYM_TX: 2045 width = 3; 2046 break; 2047 default: 2048 WARN_ON_ONCE(1); 2049 return -EINVAL; 2050 } 2051 2052 return sysfs_emit(buf, "%u\n", width); 2053 } 2054 static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL); 2055 2056 static ssize_t nvm_authenticate_show(struct device *dev, 2057 struct device_attribute *attr, char *buf) 2058 { 2059 struct tb_switch *sw = tb_to_switch(dev); 2060 u32 status; 2061 2062 nvm_get_auth_status(sw, &status); 2063 return sysfs_emit(buf, "%#x\n", status); 2064 } 2065 2066 static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf, 2067 bool disconnect) 2068 { 2069 struct tb_switch *sw = tb_to_switch(dev); 2070 int val, ret; 2071 2072 pm_runtime_get_sync(&sw->dev); 2073 2074 if (!mutex_trylock(&sw->tb->lock)) { 2075 ret = restart_syscall(); 2076 goto exit_rpm; 2077 } 2078 2079 if (sw->no_nvm_upgrade) { 2080 ret = -EOPNOTSUPP; 2081 goto exit_unlock; 2082 } 2083 2084 /* If NVMem devices are not yet added */ 2085 if (!sw->nvm) { 2086 ret = -EAGAIN; 2087 goto exit_unlock; 2088 } 2089 2090 ret = kstrtoint(buf, 10, &val); 2091 if (ret) 2092 goto exit_unlock; 2093 2094 /* Always clear the authentication status */ 2095 nvm_clear_auth_status(sw); 2096 2097 if (val > 0) { 2098 if (val == AUTHENTICATE_ONLY) { 2099 if (disconnect) 2100 ret = -EINVAL; 2101 else 2102 ret = nvm_authenticate(sw, true); 2103 } else { 2104 if (!sw->nvm->flushed) { 2105 if (!sw->nvm->buf) { 2106 ret = -EINVAL; 2107 goto exit_unlock; 2108 } 2109 2110 ret = nvm_validate_and_write(sw); 2111 if (ret || val == WRITE_ONLY) 2112 goto exit_unlock; 2113 } 2114 if (val == WRITE_AND_AUTHENTICATE) { 2115 if (disconnect) 2116 ret = tb_lc_force_power(sw); 2117 else 2118 ret = nvm_authenticate(sw, false); 2119 } 2120 } 2121 } 2122 2123 exit_unlock: 2124 mutex_unlock(&sw->tb->lock); 2125 exit_rpm: 2126 pm_runtime_mark_last_busy(&sw->dev); 2127 pm_runtime_put_autosuspend(&sw->dev); 2128 2129 return ret; 2130 } 2131 2132 static ssize_t nvm_authenticate_store(struct device *dev, 2133 struct device_attribute *attr, const char *buf, size_t count) 2134 { 2135 int ret = nvm_authenticate_sysfs(dev, buf, false); 2136 if (ret) 2137 return ret; 2138 return count; 2139 } 2140 static DEVICE_ATTR_RW(nvm_authenticate); 2141 2142 static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev, 2143 struct device_attribute *attr, char *buf) 2144 { 2145 return nvm_authenticate_show(dev, attr, buf); 2146 } 2147 2148 static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev, 2149 struct device_attribute *attr, const char *buf, size_t count) 2150 { 2151 int ret; 2152 2153 ret = nvm_authenticate_sysfs(dev, buf, true); 2154 return ret ? ret : count; 2155 } 2156 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect); 2157 2158 static ssize_t nvm_version_show(struct device *dev, 2159 struct device_attribute *attr, char *buf) 2160 { 2161 struct tb_switch *sw = tb_to_switch(dev); 2162 int ret; 2163 2164 if (!mutex_trylock(&sw->tb->lock)) 2165 return restart_syscall(); 2166 2167 if (sw->safe_mode) 2168 ret = -ENODATA; 2169 else if (!sw->nvm) 2170 ret = -EAGAIN; 2171 else 2172 ret = sysfs_emit(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); 2173 2174 mutex_unlock(&sw->tb->lock); 2175 2176 return ret; 2177 } 2178 static DEVICE_ATTR_RO(nvm_version); 2179 2180 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, 2181 char *buf) 2182 { 2183 struct tb_switch *sw = tb_to_switch(dev); 2184 2185 return sysfs_emit(buf, "%#x\n", sw->vendor); 2186 } 2187 static DEVICE_ATTR_RO(vendor); 2188 2189 static ssize_t 2190 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) 2191 { 2192 struct tb_switch *sw = tb_to_switch(dev); 2193 2194 return sysfs_emit(buf, "%s\n", sw->vendor_name ?: ""); 2195 } 2196 static DEVICE_ATTR_RO(vendor_name); 2197 2198 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, 2199 char *buf) 2200 { 2201 struct tb_switch *sw = tb_to_switch(dev); 2202 2203 return sysfs_emit(buf, "%pUb\n", sw->uuid); 2204 } 2205 static DEVICE_ATTR_RO(unique_id); 2206 2207 static struct attribute *switch_attrs[] = { 2208 &dev_attr_authorized.attr, 2209 &dev_attr_boot.attr, 2210 &dev_attr_device.attr, 2211 &dev_attr_device_name.attr, 2212 &dev_attr_generation.attr, 2213 &dev_attr_key.attr, 2214 &dev_attr_nvm_authenticate.attr, 2215 &dev_attr_nvm_authenticate_on_disconnect.attr, 2216 &dev_attr_nvm_version.attr, 2217 &dev_attr_rx_speed.attr, 2218 &dev_attr_rx_lanes.attr, 2219 &dev_attr_tx_speed.attr, 2220 &dev_attr_tx_lanes.attr, 2221 &dev_attr_vendor.attr, 2222 &dev_attr_vendor_name.attr, 2223 &dev_attr_unique_id.attr, 2224 NULL, 2225 }; 2226 2227 static umode_t switch_attr_is_visible(struct kobject *kobj, 2228 struct attribute *attr, int n) 2229 { 2230 struct device *dev = kobj_to_dev(kobj); 2231 struct tb_switch *sw = tb_to_switch(dev); 2232 2233 if (attr == &dev_attr_authorized.attr) { 2234 if (sw->tb->security_level == TB_SECURITY_NOPCIE || 2235 sw->tb->security_level == TB_SECURITY_DPONLY) 2236 return 0; 2237 } else if (attr == &dev_attr_device.attr) { 2238 if (!sw->device) 2239 return 0; 2240 } else if (attr == &dev_attr_device_name.attr) { 2241 if (!sw->device_name) 2242 return 0; 2243 } else if (attr == &dev_attr_vendor.attr) { 2244 if (!sw->vendor) 2245 return 0; 2246 } else if (attr == &dev_attr_vendor_name.attr) { 2247 if (!sw->vendor_name) 2248 return 0; 2249 } else if (attr == &dev_attr_key.attr) { 2250 if (tb_route(sw) && 2251 sw->tb->security_level == TB_SECURITY_SECURE && 2252 sw->security_level == TB_SECURITY_SECURE) 2253 return attr->mode; 2254 return 0; 2255 } else if (attr == &dev_attr_rx_speed.attr || 2256 attr == &dev_attr_rx_lanes.attr || 2257 attr == &dev_attr_tx_speed.attr || 2258 attr == &dev_attr_tx_lanes.attr) { 2259 if (tb_route(sw)) 2260 return attr->mode; 2261 return 0; 2262 } else if (attr == &dev_attr_nvm_authenticate.attr) { 2263 if (nvm_upgradeable(sw)) 2264 return attr->mode; 2265 return 0; 2266 } else if (attr == &dev_attr_nvm_version.attr) { 2267 if (nvm_readable(sw)) 2268 return attr->mode; 2269 return 0; 2270 } else if (attr == &dev_attr_boot.attr) { 2271 if (tb_route(sw)) 2272 return attr->mode; 2273 return 0; 2274 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) { 2275 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER) 2276 return attr->mode; 2277 return 0; 2278 } 2279 2280 return sw->safe_mode ? 0 : attr->mode; 2281 } 2282 2283 static const struct attribute_group switch_group = { 2284 .is_visible = switch_attr_is_visible, 2285 .attrs = switch_attrs, 2286 }; 2287 2288 static const struct attribute_group *switch_groups[] = { 2289 &switch_group, 2290 NULL, 2291 }; 2292 2293 static void tb_switch_release(struct device *dev) 2294 { 2295 struct tb_switch *sw = tb_to_switch(dev); 2296 struct tb_port *port; 2297 2298 dma_port_free(sw->dma_port); 2299 2300 tb_switch_for_each_port(sw, port) { 2301 ida_destroy(&port->in_hopids); 2302 ida_destroy(&port->out_hopids); 2303 } 2304 2305 kfree(sw->uuid); 2306 kfree(sw->device_name); 2307 kfree(sw->vendor_name); 2308 kfree(sw->ports); 2309 kfree(sw->drom); 2310 kfree(sw->key); 2311 kfree(sw); 2312 } 2313 2314 static int tb_switch_uevent(const struct device *dev, struct kobj_uevent_env *env) 2315 { 2316 const struct tb_switch *sw = tb_to_switch(dev); 2317 const char *type; 2318 2319 if (tb_switch_is_usb4(sw)) { 2320 if (add_uevent_var(env, "USB4_VERSION=%u.0", 2321 usb4_switch_version(sw))) 2322 return -ENOMEM; 2323 } 2324 2325 if (!tb_route(sw)) { 2326 type = "host"; 2327 } else { 2328 const struct tb_port *port; 2329 bool hub = false; 2330 2331 /* Device is hub if it has any downstream ports */ 2332 tb_switch_for_each_port(sw, port) { 2333 if (!port->disabled && !tb_is_upstream_port(port) && 2334 tb_port_is_null(port)) { 2335 hub = true; 2336 break; 2337 } 2338 } 2339 2340 type = hub ? "hub" : "device"; 2341 } 2342 2343 if (add_uevent_var(env, "USB4_TYPE=%s", type)) 2344 return -ENOMEM; 2345 return 0; 2346 } 2347 2348 /* 2349 * Currently only need to provide the callbacks. Everything else is handled 2350 * in the connection manager. 2351 */ 2352 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev) 2353 { 2354 struct tb_switch *sw = tb_to_switch(dev); 2355 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 2356 2357 if (cm_ops->runtime_suspend_switch) 2358 return cm_ops->runtime_suspend_switch(sw); 2359 2360 return 0; 2361 } 2362 2363 static int __maybe_unused tb_switch_runtime_resume(struct device *dev) 2364 { 2365 struct tb_switch *sw = tb_to_switch(dev); 2366 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 2367 2368 if (cm_ops->runtime_resume_switch) 2369 return cm_ops->runtime_resume_switch(sw); 2370 return 0; 2371 } 2372 2373 static const struct dev_pm_ops tb_switch_pm_ops = { 2374 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume, 2375 NULL) 2376 }; 2377 2378 const struct device_type tb_switch_type = { 2379 .name = "thunderbolt_device", 2380 .release = tb_switch_release, 2381 .uevent = tb_switch_uevent, 2382 .pm = &tb_switch_pm_ops, 2383 }; 2384 2385 static int tb_switch_get_generation(struct tb_switch *sw) 2386 { 2387 if (tb_switch_is_usb4(sw)) 2388 return 4; 2389 2390 if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { 2391 switch (sw->config.device_id) { 2392 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 2393 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 2394 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK: 2395 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: 2396 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: 2397 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 2398 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE: 2399 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE: 2400 return 1; 2401 2402 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE: 2403 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: 2404 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: 2405 return 2; 2406 2407 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: 2408 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: 2409 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: 2410 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: 2411 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: 2412 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: 2413 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: 2414 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: 2415 case PCI_DEVICE_ID_INTEL_ICL_NHI0: 2416 case PCI_DEVICE_ID_INTEL_ICL_NHI1: 2417 return 3; 2418 } 2419 } 2420 2421 /* 2422 * For unknown switches assume generation to be 1 to be on the 2423 * safe side. 2424 */ 2425 tb_sw_warn(sw, "unsupported switch device id %#x\n", 2426 sw->config.device_id); 2427 return 1; 2428 } 2429 2430 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth) 2431 { 2432 int max_depth; 2433 2434 if (tb_switch_is_usb4(sw) || 2435 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch))) 2436 max_depth = USB4_SWITCH_MAX_DEPTH; 2437 else 2438 max_depth = TB_SWITCH_MAX_DEPTH; 2439 2440 return depth > max_depth; 2441 } 2442 2443 /** 2444 * tb_switch_alloc() - allocate a switch 2445 * @tb: Pointer to the owning domain 2446 * @parent: Parent device for this switch 2447 * @route: Route string for this switch 2448 * 2449 * Allocates and initializes a switch. Will not upload configuration to 2450 * the switch. For that you need to call tb_switch_configure() 2451 * separately. The returned switch should be released by calling 2452 * tb_switch_put(). 2453 * 2454 * Return: Pointer to &struct tb_switch or ERR_PTR() in case of failure. 2455 */ 2456 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, 2457 u64 route) 2458 { 2459 struct tb_switch *sw; 2460 int upstream_port; 2461 int i, ret, depth; 2462 2463 /* Unlock the downstream port so we can access the switch below */ 2464 if (route) { 2465 struct tb_switch *parent_sw = tb_to_switch(parent); 2466 struct tb_port *down; 2467 2468 down = tb_port_at(route, parent_sw); 2469 tb_port_unlock(down); 2470 } 2471 2472 depth = tb_route_length(route); 2473 2474 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); 2475 if (upstream_port < 0) 2476 return ERR_PTR(upstream_port); 2477 2478 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 2479 if (!sw) 2480 return ERR_PTR(-ENOMEM); 2481 2482 sw->tb = tb; 2483 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5); 2484 if (ret) 2485 goto err_free_sw_ports; 2486 2487 sw->generation = tb_switch_get_generation(sw); 2488 2489 tb_dbg(tb, "current switch config:\n"); 2490 tb_dump_switch(tb, sw); 2491 2492 /* configure switch */ 2493 sw->config.upstream_port_number = upstream_port; 2494 sw->config.depth = depth; 2495 sw->config.route_hi = upper_32_bits(route); 2496 sw->config.route_lo = lower_32_bits(route); 2497 sw->config.enabled = 0; 2498 2499 /* Make sure we do not exceed maximum topology limit */ 2500 if (tb_switch_exceeds_max_depth(sw, depth)) { 2501 ret = -EADDRNOTAVAIL; 2502 goto err_free_sw_ports; 2503 } 2504 2505 /* initialize ports */ 2506 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), 2507 GFP_KERNEL); 2508 if (!sw->ports) { 2509 ret = -ENOMEM; 2510 goto err_free_sw_ports; 2511 } 2512 2513 for (i = 0; i <= sw->config.max_port_number; i++) { 2514 /* minimum setup for tb_find_cap and tb_drom_read to work */ 2515 sw->ports[i].sw = sw; 2516 sw->ports[i].port = i; 2517 2518 /* Control port does not need HopID allocation */ 2519 if (i) { 2520 ida_init(&sw->ports[i].in_hopids); 2521 ida_init(&sw->ports[i].out_hopids); 2522 } 2523 } 2524 2525 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); 2526 if (ret > 0) 2527 sw->cap_plug_events = ret; 2528 2529 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2); 2530 if (ret > 0) 2531 sw->cap_vsec_tmu = ret; 2532 2533 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); 2534 if (ret > 0) 2535 sw->cap_lc = ret; 2536 2537 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP); 2538 if (ret > 0) 2539 sw->cap_lp = ret; 2540 2541 /* Root switch is always authorized */ 2542 if (!route) 2543 sw->authorized = true; 2544 2545 device_initialize(&sw->dev); 2546 sw->dev.parent = parent; 2547 sw->dev.bus = &tb_bus_type; 2548 sw->dev.type = &tb_switch_type; 2549 sw->dev.groups = switch_groups; 2550 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 2551 2552 return sw; 2553 2554 err_free_sw_ports: 2555 kfree(sw->ports); 2556 kfree(sw); 2557 2558 return ERR_PTR(ret); 2559 } 2560 2561 /** 2562 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode 2563 * @tb: Pointer to the owning domain 2564 * @parent: Parent device for this switch 2565 * @route: Route string for this switch 2566 * 2567 * This creates a switch in safe mode. This means the switch pretty much 2568 * lacks all capabilities except DMA configuration port before it is 2569 * flashed with a valid NVM firmware. 2570 * 2571 * The returned switch must be released by calling tb_switch_put(). 2572 * 2573 * Return: Pointer to &struct tb_switch or ERR_PTR() in case of failure. 2574 */ 2575 struct tb_switch * 2576 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) 2577 { 2578 struct tb_switch *sw; 2579 2580 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 2581 if (!sw) 2582 return ERR_PTR(-ENOMEM); 2583 2584 sw->tb = tb; 2585 sw->config.depth = tb_route_length(route); 2586 sw->config.route_hi = upper_32_bits(route); 2587 sw->config.route_lo = lower_32_bits(route); 2588 sw->safe_mode = true; 2589 2590 device_initialize(&sw->dev); 2591 sw->dev.parent = parent; 2592 sw->dev.bus = &tb_bus_type; 2593 sw->dev.type = &tb_switch_type; 2594 sw->dev.groups = switch_groups; 2595 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 2596 2597 return sw; 2598 } 2599 2600 /** 2601 * tb_switch_configure() - Uploads configuration to the switch 2602 * @sw: Switch to configure 2603 * 2604 * Call this function before the switch is added to the system. It will 2605 * upload configuration to the switch and makes it available for the 2606 * connection manager to use. Can be called to the switch again after 2607 * resume from low power states to re-initialize it. 2608 * 2609 * Return: %0 on success, negative errno otherwise. 2610 */ 2611 int tb_switch_configure(struct tb_switch *sw) 2612 { 2613 struct tb *tb = sw->tb; 2614 u64 route; 2615 int ret; 2616 2617 route = tb_route(sw); 2618 2619 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n", 2620 sw->config.enabled ? "restoring" : "initializing", route, 2621 tb_route_length(route), sw->config.upstream_port_number); 2622 2623 sw->config.enabled = 1; 2624 2625 if (tb_switch_is_usb4(sw)) { 2626 /* 2627 * For USB4 devices, we need to program the CM version 2628 * accordingly so that it knows to expose all the 2629 * additional capabilities. Program it according to USB4 2630 * version to avoid changing existing (v1) routers behaviour. 2631 */ 2632 if (usb4_switch_version(sw) < 2) 2633 sw->config.cmuv = ROUTER_CS_4_CMUV_V1; 2634 else 2635 sw->config.cmuv = ROUTER_CS_4_CMUV_V2; 2636 sw->config.plug_events_delay = 0xa; 2637 2638 /* Enumerate the switch */ 2639 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2640 ROUTER_CS_1, 4); 2641 if (ret) 2642 return ret; 2643 2644 ret = usb4_switch_setup(sw); 2645 } else { 2646 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) 2647 tb_sw_warn(sw, "unknown switch vendor id %#x\n", 2648 sw->config.vendor_id); 2649 2650 if (!sw->cap_plug_events) { 2651 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); 2652 return -ENODEV; 2653 } 2654 2655 /* Enumerate the switch */ 2656 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2657 ROUTER_CS_1, 3); 2658 } 2659 if (ret) 2660 return ret; 2661 2662 return tb_plug_events_active(sw, true); 2663 } 2664 2665 /** 2666 * tb_switch_configuration_valid() - Set the tunneling configuration to be valid 2667 * @sw: Router to configure 2668 * 2669 * Needs to be called before any tunnels can be setup through the 2670 * router. Can be called to any router. 2671 * 2672 * Return: %0 on success, negative errno otherwise. 2673 */ 2674 int tb_switch_configuration_valid(struct tb_switch *sw) 2675 { 2676 if (tb_switch_is_usb4(sw)) 2677 return usb4_switch_configuration_valid(sw); 2678 return 0; 2679 } 2680 2681 static int tb_switch_set_uuid(struct tb_switch *sw) 2682 { 2683 bool uid = false; 2684 u32 uuid[4]; 2685 int ret; 2686 2687 if (sw->uuid) 2688 return 0; 2689 2690 if (tb_switch_is_usb4(sw)) { 2691 ret = usb4_switch_read_uid(sw, &sw->uid); 2692 if (ret) 2693 return ret; 2694 uid = true; 2695 } else { 2696 /* 2697 * The newer controllers include fused UUID as part of 2698 * link controller specific registers 2699 */ 2700 ret = tb_lc_read_uuid(sw, uuid); 2701 if (ret) { 2702 if (ret != -EINVAL) 2703 return ret; 2704 uid = true; 2705 } 2706 } 2707 2708 if (uid) { 2709 /* 2710 * ICM generates UUID based on UID and fills the upper 2711 * two words with ones. This is not strictly following 2712 * UUID format but we want to be compatible with it so 2713 * we do the same here. 2714 */ 2715 uuid[0] = sw->uid & 0xffffffff; 2716 uuid[1] = (sw->uid >> 32) & 0xffffffff; 2717 uuid[2] = 0xffffffff; 2718 uuid[3] = 0xffffffff; 2719 } 2720 2721 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); 2722 if (!sw->uuid) 2723 return -ENOMEM; 2724 return 0; 2725 } 2726 2727 static int tb_switch_add_dma_port(struct tb_switch *sw) 2728 { 2729 u32 status; 2730 int ret; 2731 2732 switch (sw->generation) { 2733 case 2: 2734 /* Only root switch can be upgraded */ 2735 if (tb_route(sw)) 2736 return 0; 2737 2738 fallthrough; 2739 case 3: 2740 case 4: 2741 ret = tb_switch_set_uuid(sw); 2742 if (ret) 2743 return ret; 2744 break; 2745 2746 default: 2747 /* 2748 * DMA port is the only thing available when the switch 2749 * is in safe mode. 2750 */ 2751 if (!sw->safe_mode) 2752 return 0; 2753 break; 2754 } 2755 2756 if (sw->no_nvm_upgrade) 2757 return 0; 2758 2759 if (tb_switch_is_usb4(sw)) { 2760 ret = usb4_switch_nvm_authenticate_status(sw, &status); 2761 if (ret) 2762 return ret; 2763 2764 if (status) { 2765 tb_sw_info(sw, "switch flash authentication failed\n"); 2766 nvm_set_auth_status(sw, status); 2767 } 2768 2769 return 0; 2770 } 2771 2772 /* Root switch DMA port requires running firmware */ 2773 if (!tb_route(sw) && !tb_switch_is_icm(sw)) 2774 return 0; 2775 2776 sw->dma_port = dma_port_alloc(sw); 2777 if (!sw->dma_port) 2778 return 0; 2779 2780 /* 2781 * If there is status already set then authentication failed 2782 * when the dma_port_flash_update_auth() returned. Power cycling 2783 * is not needed (it was done already) so only thing we do here 2784 * is to unblock runtime PM of the root port. 2785 */ 2786 nvm_get_auth_status(sw, &status); 2787 if (status) { 2788 if (!tb_route(sw)) 2789 nvm_authenticate_complete_dma_port(sw); 2790 return 0; 2791 } 2792 2793 /* 2794 * Check status of the previous flash authentication. If there 2795 * is one we need to power cycle the switch in any case to make 2796 * it functional again. 2797 */ 2798 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 2799 if (ret <= 0) 2800 return ret; 2801 2802 /* Now we can allow root port to suspend again */ 2803 if (!tb_route(sw)) 2804 nvm_authenticate_complete_dma_port(sw); 2805 2806 if (status) { 2807 tb_sw_info(sw, "switch flash authentication failed\n"); 2808 nvm_set_auth_status(sw, status); 2809 } 2810 2811 tb_sw_info(sw, "power cycling the switch now\n"); 2812 dma_port_power_cycle(sw->dma_port); 2813 2814 /* 2815 * We return error here which causes the switch adding failure. 2816 * It should appear back after power cycle is complete. 2817 */ 2818 return -ESHUTDOWN; 2819 } 2820 2821 static void tb_switch_default_link_ports(struct tb_switch *sw) 2822 { 2823 int i; 2824 2825 for (i = 1; i <= sw->config.max_port_number; i++) { 2826 struct tb_port *port = &sw->ports[i]; 2827 struct tb_port *subordinate; 2828 2829 if (!tb_port_is_null(port)) 2830 continue; 2831 2832 /* Check for the subordinate port */ 2833 if (i == sw->config.max_port_number || 2834 !tb_port_is_null(&sw->ports[i + 1])) 2835 continue; 2836 2837 /* Link them if not already done so (by DROM) */ 2838 subordinate = &sw->ports[i + 1]; 2839 if (!port->dual_link_port && !subordinate->dual_link_port) { 2840 port->link_nr = 0; 2841 port->dual_link_port = subordinate; 2842 subordinate->link_nr = 1; 2843 subordinate->dual_link_port = port; 2844 2845 tb_sw_dbg(sw, "linked ports %d <-> %d\n", 2846 port->port, subordinate->port); 2847 } 2848 } 2849 } 2850 2851 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) 2852 { 2853 const struct tb_port *up = tb_upstream_port(sw); 2854 2855 if (!up->dual_link_port || !up->dual_link_port->remote) 2856 return false; 2857 2858 if (tb_switch_is_usb4(sw)) 2859 return usb4_switch_lane_bonding_possible(sw); 2860 return tb_lc_lane_bonding_possible(sw); 2861 } 2862 2863 static int tb_switch_update_link_attributes(struct tb_switch *sw) 2864 { 2865 struct tb_port *up; 2866 bool change = false; 2867 int ret; 2868 2869 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2870 return 0; 2871 2872 up = tb_upstream_port(sw); 2873 2874 ret = tb_port_get_link_speed(up); 2875 if (ret < 0) 2876 return ret; 2877 if (sw->link_speed != ret) 2878 change = true; 2879 sw->link_speed = ret; 2880 2881 ret = tb_port_get_link_width(up); 2882 if (ret < 0) 2883 return ret; 2884 if (sw->link_width != ret) 2885 change = true; 2886 sw->link_width = ret; 2887 2888 /* Notify userspace that there is possible link attribute change */ 2889 if (device_is_registered(&sw->dev) && change) 2890 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 2891 2892 return 0; 2893 } 2894 2895 /* Must be called after tb_switch_update_link_attributes() */ 2896 static void tb_switch_link_init(struct tb_switch *sw) 2897 { 2898 struct tb_port *up, *down; 2899 bool bonded; 2900 2901 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2902 return; 2903 2904 tb_sw_dbg(sw, "current link speed %u.0 Gb/s\n", sw->link_speed); 2905 tb_sw_dbg(sw, "current link width %s\n", tb_width_name(sw->link_width)); 2906 2907 bonded = sw->link_width >= TB_LINK_WIDTH_DUAL; 2908 2909 /* 2910 * Gen 4 links come up as bonded so update the port structures 2911 * accordingly. 2912 */ 2913 up = tb_upstream_port(sw); 2914 down = tb_switch_downstream_port(sw); 2915 2916 up->bonded = bonded; 2917 if (up->dual_link_port) 2918 up->dual_link_port->bonded = bonded; 2919 tb_port_update_credits(up); 2920 2921 down->bonded = bonded; 2922 if (down->dual_link_port) 2923 down->dual_link_port->bonded = bonded; 2924 tb_port_update_credits(down); 2925 2926 if (tb_port_get_link_generation(up) < 4) 2927 return; 2928 2929 /* 2930 * Set the Gen 4 preferred link width. This is what the router 2931 * prefers when the link is brought up. If the router does not 2932 * support asymmetric link configuration, this also will be set 2933 * to TB_LINK_WIDTH_DUAL. 2934 */ 2935 sw->preferred_link_width = sw->link_width; 2936 tb_sw_dbg(sw, "preferred link width %s\n", 2937 tb_width_name(sw->preferred_link_width)); 2938 } 2939 2940 /** 2941 * tb_switch_lane_bonding_enable() - Enable lane bonding 2942 * @sw: Switch to enable lane bonding 2943 * 2944 * Connection manager can call this function to enable lane bonding of a 2945 * switch. If conditions are correct and both switches support the feature, 2946 * lanes are bonded. It is safe to call this to any switch. 2947 * 2948 * Return: %0 on success, negative errno otherwise. 2949 */ 2950 static int tb_switch_lane_bonding_enable(struct tb_switch *sw) 2951 { 2952 struct tb_port *up, *down; 2953 unsigned int width; 2954 int ret; 2955 2956 if (!tb_switch_lane_bonding_possible(sw)) 2957 return 0; 2958 2959 up = tb_upstream_port(sw); 2960 down = tb_switch_downstream_port(sw); 2961 2962 if (!tb_port_width_supported(up, TB_LINK_WIDTH_DUAL) || 2963 !tb_port_width_supported(down, TB_LINK_WIDTH_DUAL)) 2964 return 0; 2965 2966 /* 2967 * Both lanes need to be in CL0. Here we assume lane 0 already be in 2968 * CL0 and check just for lane 1. 2969 */ 2970 if (tb_wait_for_port(down->dual_link_port, false) <= 0) 2971 return -ENOTCONN; 2972 2973 ret = tb_port_lane_bonding_enable(up); 2974 if (ret) { 2975 tb_port_warn(up, "failed to enable lane bonding\n"); 2976 return ret; 2977 } 2978 2979 ret = tb_port_lane_bonding_enable(down); 2980 if (ret) { 2981 tb_port_warn(down, "failed to enable lane bonding\n"); 2982 tb_port_lane_bonding_disable(up); 2983 return ret; 2984 } 2985 2986 /* Any of the widths are all bonded */ 2987 width = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX | 2988 TB_LINK_WIDTH_ASYM_RX; 2989 2990 return tb_port_wait_for_link_width(down, width, 100); 2991 } 2992 2993 /** 2994 * tb_switch_lane_bonding_disable() - Disable lane bonding 2995 * @sw: Switch whose lane bonding to disable 2996 * 2997 * Disables lane bonding between @sw and parent. This can be called even 2998 * if lanes were not bonded originally. 2999 * 3000 * Return: %0 on success, negative errno otherwise. 3001 */ 3002 static int tb_switch_lane_bonding_disable(struct tb_switch *sw) 3003 { 3004 struct tb_port *up, *down; 3005 int ret; 3006 3007 up = tb_upstream_port(sw); 3008 if (!up->bonded) 3009 return 0; 3010 3011 /* 3012 * If the link is Gen 4 there is no way to switch the link to 3013 * two single lane links so avoid that here. Also don't bother 3014 * if the link is not up anymore (sw is unplugged). 3015 */ 3016 ret = tb_port_get_link_generation(up); 3017 if (ret < 0) 3018 return ret; 3019 if (ret >= 4) 3020 return -EOPNOTSUPP; 3021 3022 down = tb_switch_downstream_port(sw); 3023 tb_port_lane_bonding_disable(up); 3024 tb_port_lane_bonding_disable(down); 3025 3026 /* 3027 * It is fine if we get other errors as the router might have 3028 * been unplugged. 3029 */ 3030 return tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100); 3031 } 3032 3033 /* Note updating sw->link_width done in tb_switch_update_link_attributes() */ 3034 static int tb_switch_asym_enable(struct tb_switch *sw, enum tb_link_width width) 3035 { 3036 struct tb_port *up, *down, *port; 3037 enum tb_link_width down_width; 3038 int ret; 3039 3040 up = tb_upstream_port(sw); 3041 down = tb_switch_downstream_port(sw); 3042 3043 if (width == TB_LINK_WIDTH_ASYM_TX) { 3044 down_width = TB_LINK_WIDTH_ASYM_RX; 3045 port = down; 3046 } else { 3047 down_width = TB_LINK_WIDTH_ASYM_TX; 3048 port = up; 3049 } 3050 3051 ret = tb_port_set_link_width(up, width); 3052 if (ret) 3053 return ret; 3054 3055 ret = tb_port_set_link_width(down, down_width); 3056 if (ret) 3057 return ret; 3058 3059 /* 3060 * Initiate the change in the router that one of its TX lanes is 3061 * changing to RX but do so only if there is an actual change. 3062 */ 3063 if (sw->link_width != width) { 3064 ret = usb4_port_asym_start(port); 3065 if (ret) 3066 return ret; 3067 3068 ret = tb_port_wait_for_link_width(up, width, 100); 3069 if (ret) 3070 return ret; 3071 } 3072 3073 return 0; 3074 } 3075 3076 /* Note updating sw->link_width done in tb_switch_update_link_attributes() */ 3077 static int tb_switch_asym_disable(struct tb_switch *sw) 3078 { 3079 struct tb_port *up, *down; 3080 int ret; 3081 3082 up = tb_upstream_port(sw); 3083 down = tb_switch_downstream_port(sw); 3084 3085 ret = tb_port_set_link_width(up, TB_LINK_WIDTH_DUAL); 3086 if (ret) 3087 return ret; 3088 3089 ret = tb_port_set_link_width(down, TB_LINK_WIDTH_DUAL); 3090 if (ret) 3091 return ret; 3092 3093 /* 3094 * Initiate the change in the router that has three TX lanes and 3095 * is changing one of its TX lanes to RX but only if there is a 3096 * change in the link width. 3097 */ 3098 if (sw->link_width > TB_LINK_WIDTH_DUAL) { 3099 if (sw->link_width == TB_LINK_WIDTH_ASYM_TX) 3100 ret = usb4_port_asym_start(up); 3101 else 3102 ret = usb4_port_asym_start(down); 3103 if (ret) 3104 return ret; 3105 3106 ret = tb_port_wait_for_link_width(up, TB_LINK_WIDTH_DUAL, 100); 3107 if (ret) 3108 return ret; 3109 } 3110 3111 return 0; 3112 } 3113 3114 /** 3115 * tb_switch_set_link_width() - Configure router link width 3116 * @sw: Router to configure 3117 * @width: The new link width 3118 * 3119 * Set device router link width to @width from router upstream port 3120 * perspective. Supports also asymmetric links if the routers both side 3121 * of the link supports it. 3122 * 3123 * Does nothing for host router. 3124 * 3125 * Return: %0 on success, negative errno otherwise. 3126 */ 3127 int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width) 3128 { 3129 struct tb_port *up, *down; 3130 int ret = 0; 3131 3132 if (!tb_route(sw)) 3133 return 0; 3134 3135 up = tb_upstream_port(sw); 3136 down = tb_switch_downstream_port(sw); 3137 3138 switch (width) { 3139 case TB_LINK_WIDTH_SINGLE: 3140 ret = tb_switch_lane_bonding_disable(sw); 3141 break; 3142 3143 case TB_LINK_WIDTH_DUAL: 3144 if (sw->link_width == TB_LINK_WIDTH_ASYM_TX || 3145 sw->link_width == TB_LINK_WIDTH_ASYM_RX) { 3146 ret = tb_switch_asym_disable(sw); 3147 if (ret) 3148 break; 3149 } 3150 ret = tb_switch_lane_bonding_enable(sw); 3151 break; 3152 3153 case TB_LINK_WIDTH_ASYM_TX: 3154 case TB_LINK_WIDTH_ASYM_RX: 3155 ret = tb_switch_asym_enable(sw, width); 3156 break; 3157 } 3158 3159 switch (ret) { 3160 case 0: 3161 break; 3162 3163 case -ETIMEDOUT: 3164 tb_sw_warn(sw, "timeout changing link width\n"); 3165 return ret; 3166 3167 case -ENOTCONN: 3168 case -EOPNOTSUPP: 3169 case -ENODEV: 3170 return ret; 3171 3172 default: 3173 tb_sw_dbg(sw, "failed to change link width: %d\n", ret); 3174 return ret; 3175 } 3176 3177 tb_port_update_credits(down); 3178 tb_port_update_credits(up); 3179 3180 tb_switch_update_link_attributes(sw); 3181 3182 tb_sw_dbg(sw, "link width set to %s\n", tb_width_name(width)); 3183 return ret; 3184 } 3185 3186 /** 3187 * tb_switch_configure_link() - Set link configured 3188 * @sw: Switch whose link is configured 3189 * 3190 * Sets the link upstream from @sw configured (from both ends) so that 3191 * it will not be disconnected when the domain exits sleep. Can be 3192 * called for any switch. 3193 * 3194 * It is recommended that this is called after lane bonding is enabled. 3195 * 3196 * Return: %0 on success and negative errno otherwise. 3197 */ 3198 int tb_switch_configure_link(struct tb_switch *sw) 3199 { 3200 struct tb_port *up, *down; 3201 int ret; 3202 3203 if (!tb_route(sw) || tb_switch_is_icm(sw)) 3204 return 0; 3205 3206 up = tb_upstream_port(sw); 3207 if (tb_switch_is_usb4(up->sw)) 3208 ret = usb4_port_configure(up); 3209 else 3210 ret = tb_lc_configure_port(up); 3211 if (ret) 3212 return ret; 3213 3214 down = up->remote; 3215 if (tb_switch_is_usb4(down->sw)) 3216 return usb4_port_configure(down); 3217 return tb_lc_configure_port(down); 3218 } 3219 3220 /** 3221 * tb_switch_unconfigure_link() - Unconfigure link 3222 * @sw: Switch whose link is unconfigured 3223 * 3224 * Sets the link unconfigured so the @sw will be disconnected if the 3225 * domain exits sleep. 3226 */ 3227 void tb_switch_unconfigure_link(struct tb_switch *sw) 3228 { 3229 struct tb_port *up, *down; 3230 3231 if (!tb_route(sw) || tb_switch_is_icm(sw)) 3232 return; 3233 3234 /* 3235 * Unconfigure downstream port so that wake-on-connect can be 3236 * configured after router unplug. No need to unconfigure upstream port 3237 * since its router is unplugged. 3238 */ 3239 up = tb_upstream_port(sw); 3240 down = up->remote; 3241 if (tb_switch_is_usb4(down->sw)) 3242 usb4_port_unconfigure(down); 3243 else 3244 tb_lc_unconfigure_port(down); 3245 3246 if (sw->is_unplugged) 3247 return; 3248 3249 up = tb_upstream_port(sw); 3250 if (tb_switch_is_usb4(up->sw)) 3251 usb4_port_unconfigure(up); 3252 else 3253 tb_lc_unconfigure_port(up); 3254 } 3255 3256 static void tb_switch_credits_init(struct tb_switch *sw) 3257 { 3258 if (tb_switch_is_icm(sw)) 3259 return; 3260 if (!tb_switch_is_usb4(sw)) 3261 return; 3262 if (usb4_switch_credits_init(sw)) 3263 tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n"); 3264 } 3265 3266 static int tb_switch_port_hotplug_enable(struct tb_switch *sw) 3267 { 3268 struct tb_port *port; 3269 3270 if (tb_switch_is_icm(sw)) 3271 return 0; 3272 3273 tb_switch_for_each_port(sw, port) { 3274 int res; 3275 3276 if (!port->cap_usb4) 3277 continue; 3278 3279 res = usb4_port_hotplug_enable(port); 3280 if (res) 3281 return res; 3282 } 3283 return 0; 3284 } 3285 3286 /** 3287 * tb_switch_add() - Add a switch to the domain 3288 * @sw: Switch to add 3289 * 3290 * This is the last step in adding switch to the domain. It will read 3291 * identification information from DROM and initializes ports so that 3292 * they can be used to connect other switches. The switch will be 3293 * exposed to the userspace when this function successfully returns. To 3294 * remove and release the switch, call tb_switch_remove(). 3295 * 3296 * Return: %0 on success, negative errno otherwise. 3297 */ 3298 int tb_switch_add(struct tb_switch *sw) 3299 { 3300 int i, ret; 3301 3302 /* 3303 * Initialize DMA control port now before we read DROM. Recent 3304 * host controllers have more complete DROM on NVM that includes 3305 * vendor and model identification strings which we then expose 3306 * to the userspace. NVM can be accessed through DMA 3307 * configuration based mailbox. 3308 */ 3309 ret = tb_switch_add_dma_port(sw); 3310 if (ret) { 3311 dev_err(&sw->dev, "failed to add DMA port\n"); 3312 return ret; 3313 } 3314 3315 if (!sw->safe_mode) { 3316 tb_switch_credits_init(sw); 3317 3318 /* read drom */ 3319 ret = tb_drom_read(sw); 3320 if (ret) 3321 dev_warn(&sw->dev, "reading DROM failed: %d\n", ret); 3322 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); 3323 3324 ret = tb_switch_set_uuid(sw); 3325 if (ret) { 3326 dev_err(&sw->dev, "failed to set UUID\n"); 3327 return ret; 3328 } 3329 3330 for (i = 0; i <= sw->config.max_port_number; i++) { 3331 if (sw->ports[i].disabled) { 3332 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n"); 3333 continue; 3334 } 3335 ret = tb_init_port(&sw->ports[i]); 3336 if (ret) { 3337 dev_err(&sw->dev, "failed to initialize port %d\n", i); 3338 return ret; 3339 } 3340 } 3341 3342 tb_check_quirks(sw); 3343 3344 tb_switch_default_link_ports(sw); 3345 3346 ret = tb_switch_update_link_attributes(sw); 3347 if (ret) 3348 return ret; 3349 3350 tb_switch_link_init(sw); 3351 3352 ret = tb_switch_clx_init(sw); 3353 if (ret) 3354 return ret; 3355 3356 ret = tb_switch_tmu_init(sw); 3357 if (ret) 3358 return ret; 3359 } 3360 3361 ret = tb_switch_port_hotplug_enable(sw); 3362 if (ret) 3363 return ret; 3364 3365 ret = device_add(&sw->dev); 3366 if (ret) { 3367 dev_err(&sw->dev, "failed to add device: %d\n", ret); 3368 return ret; 3369 } 3370 3371 if (tb_route(sw)) { 3372 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n", 3373 sw->vendor, sw->device); 3374 if (sw->vendor_name && sw->device_name) 3375 dev_info(&sw->dev, "%s %s\n", sw->vendor_name, 3376 sw->device_name); 3377 } 3378 3379 ret = usb4_switch_add_ports(sw); 3380 if (ret) { 3381 dev_err(&sw->dev, "failed to add USB4 ports\n"); 3382 goto err_del; 3383 } 3384 3385 ret = tb_switch_nvm_add(sw); 3386 if (ret) { 3387 dev_err(&sw->dev, "failed to add NVM devices\n"); 3388 goto err_ports; 3389 } 3390 3391 /* 3392 * Thunderbolt routers do not generate wakeups themselves but 3393 * they forward wakeups from tunneled protocols, so enable it 3394 * here. 3395 */ 3396 device_init_wakeup(&sw->dev, true); 3397 3398 pm_runtime_set_active(&sw->dev); 3399 if (sw->rpm) { 3400 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); 3401 pm_runtime_use_autosuspend(&sw->dev); 3402 pm_runtime_mark_last_busy(&sw->dev); 3403 pm_runtime_enable(&sw->dev); 3404 pm_request_autosuspend(&sw->dev); 3405 } 3406 3407 tb_switch_debugfs_init(sw); 3408 return 0; 3409 3410 err_ports: 3411 usb4_switch_remove_ports(sw); 3412 err_del: 3413 device_del(&sw->dev); 3414 3415 return ret; 3416 } 3417 3418 /** 3419 * tb_switch_remove() - Remove and release a switch 3420 * @sw: Switch to remove 3421 * 3422 * This will remove the switch from the domain and release it after last 3423 * reference count drops to zero. If there are switches connected below 3424 * this switch, they will be removed as well. 3425 */ 3426 void tb_switch_remove(struct tb_switch *sw) 3427 { 3428 struct tb_port *port; 3429 3430 tb_switch_debugfs_remove(sw); 3431 3432 if (sw->rpm) { 3433 pm_runtime_get_sync(&sw->dev); 3434 pm_runtime_disable(&sw->dev); 3435 } 3436 3437 /* port 0 is the switch itself and never has a remote */ 3438 tb_switch_for_each_port(sw, port) { 3439 if (tb_port_has_remote(port)) { 3440 tb_switch_remove(port->remote->sw); 3441 port->remote = NULL; 3442 } else if (port->xdomain) { 3443 port->xdomain->is_unplugged = true; 3444 tb_xdomain_remove(port->xdomain); 3445 port->xdomain = NULL; 3446 } 3447 3448 /* Remove any downstream retimers */ 3449 tb_retimer_remove_all(port); 3450 } 3451 3452 if (!sw->is_unplugged) 3453 tb_plug_events_active(sw, false); 3454 3455 tb_switch_nvm_remove(sw); 3456 usb4_switch_remove_ports(sw); 3457 3458 if (tb_route(sw)) 3459 dev_info(&sw->dev, "device disconnected\n"); 3460 device_unregister(&sw->dev); 3461 } 3462 3463 /** 3464 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches 3465 * @sw: Router to mark unplugged 3466 */ 3467 void tb_sw_set_unplugged(struct tb_switch *sw) 3468 { 3469 struct tb_port *port; 3470 3471 if (sw == sw->tb->root_switch) { 3472 tb_sw_WARN(sw, "cannot unplug root switch\n"); 3473 return; 3474 } 3475 if (sw->is_unplugged) { 3476 tb_sw_WARN(sw, "is_unplugged already set\n"); 3477 return; 3478 } 3479 sw->is_unplugged = true; 3480 tb_switch_for_each_port(sw, port) { 3481 if (tb_port_has_remote(port)) 3482 tb_sw_set_unplugged(port->remote->sw); 3483 else if (port->xdomain) 3484 port->xdomain->is_unplugged = true; 3485 } 3486 } 3487 3488 static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime) 3489 { 3490 if (flags) 3491 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags); 3492 else 3493 tb_sw_dbg(sw, "disabling wakeup\n"); 3494 3495 if (tb_switch_is_usb4(sw)) 3496 return usb4_switch_set_wake(sw, flags, runtime); 3497 return tb_lc_set_wake(sw, flags); 3498 } 3499 3500 static void tb_switch_check_wakes(struct tb_switch *sw) 3501 { 3502 if (device_may_wakeup(&sw->dev)) { 3503 if (tb_switch_is_usb4(sw)) 3504 usb4_switch_check_wakes(sw); 3505 } 3506 } 3507 3508 /** 3509 * tb_switch_resume() - Resume a switch after sleep 3510 * @sw: Switch to resume 3511 * @runtime: Is this resume from runtime suspend or system sleep 3512 * 3513 * Resumes and re-enumerates router (and all its children), if still plugged 3514 * after suspend. Don't enumerate device router whose UID was changed during 3515 * suspend. If this is resume from system sleep, notifies PM core about the 3516 * wakes occurred during suspend. Disables all wakes, except USB4 wake of 3517 * upstream port for USB4 routers that shall be always enabled. 3518 * 3519 * Return: %0 on success, negative errno otherwise. 3520 */ 3521 int tb_switch_resume(struct tb_switch *sw, bool runtime) 3522 { 3523 struct tb_port *port; 3524 int err; 3525 3526 tb_sw_dbg(sw, "resuming switch\n"); 3527 3528 /* 3529 * Check for UID of the connected switches except for root 3530 * switch which we assume cannot be removed. 3531 */ 3532 if (tb_route(sw)) { 3533 u64 uid; 3534 3535 /* 3536 * Check first that we can still read the switch config 3537 * space. It may be that there is now another domain 3538 * connected. 3539 */ 3540 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw)); 3541 if (err < 0) { 3542 tb_sw_info(sw, "switch not present anymore\n"); 3543 return err; 3544 } 3545 3546 /* We don't have any way to confirm this was the same device */ 3547 if (!sw->uid) 3548 return -ENODEV; 3549 3550 if (tb_switch_is_usb4(sw)) 3551 err = usb4_switch_read_uid(sw, &uid); 3552 else 3553 err = tb_drom_read_uid_only(sw, &uid); 3554 if (err) { 3555 tb_sw_warn(sw, "uid read failed\n"); 3556 return err; 3557 } 3558 if (sw->uid != uid) { 3559 tb_sw_info(sw, 3560 "changed while suspended (uid %#llx -> %#llx)\n", 3561 sw->uid, uid); 3562 return -ENODEV; 3563 } 3564 } 3565 3566 err = tb_switch_configure(sw); 3567 if (err) 3568 return err; 3569 3570 if (!runtime) 3571 tb_switch_check_wakes(sw); 3572 3573 /* Disable wakes */ 3574 tb_switch_set_wake(sw, 0, true); 3575 3576 err = tb_switch_tmu_init(sw); 3577 if (err) 3578 return err; 3579 3580 /* check for surviving downstream switches */ 3581 tb_switch_for_each_port(sw, port) { 3582 if (!tb_port_is_null(port)) 3583 continue; 3584 3585 if (!tb_port_resume(port)) 3586 continue; 3587 3588 if (tb_wait_for_port(port, true) <= 0) { 3589 tb_port_warn(port, 3590 "lost during suspend, disconnecting\n"); 3591 if (tb_port_has_remote(port)) 3592 tb_sw_set_unplugged(port->remote->sw); 3593 else if (port->xdomain) 3594 port->xdomain->is_unplugged = true; 3595 } else { 3596 /* 3597 * Always unlock the port so the downstream 3598 * switch/domain is accessible. 3599 */ 3600 if (tb_port_unlock(port)) 3601 tb_port_warn(port, "failed to unlock port\n"); 3602 if (port->remote && 3603 tb_switch_resume(port->remote->sw, runtime)) { 3604 tb_port_warn(port, 3605 "lost during suspend, disconnecting\n"); 3606 tb_sw_set_unplugged(port->remote->sw); 3607 } 3608 } 3609 } 3610 return 0; 3611 } 3612 3613 /** 3614 * tb_switch_suspend() - Put a switch to sleep 3615 * @sw: Switch to suspend 3616 * @runtime: Is this runtime suspend or system sleep 3617 * 3618 * Suspends router and all its children. Enables wakes according to 3619 * value of @runtime and then sets sleep bit for the router. If @sw is 3620 * host router the domain is ready to go to sleep once this function 3621 * returns. 3622 */ 3623 void tb_switch_suspend(struct tb_switch *sw, bool runtime) 3624 { 3625 unsigned int flags = 0; 3626 struct tb_port *port; 3627 int err; 3628 3629 tb_sw_dbg(sw, "suspending switch\n"); 3630 3631 /* 3632 * Actually only needed for Titan Ridge but for simplicity can be 3633 * done for USB4 device too as CLx is re-enabled at resume. 3634 */ 3635 tb_switch_clx_disable(sw); 3636 3637 err = tb_plug_events_active(sw, false); 3638 if (err) 3639 return; 3640 3641 tb_switch_for_each_port(sw, port) { 3642 if (tb_port_has_remote(port)) 3643 tb_switch_suspend(port->remote->sw, runtime); 3644 } 3645 3646 if (runtime) { 3647 /* Trigger wake when something is plugged in/out */ 3648 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT; 3649 flags |= TB_WAKE_ON_USB4; 3650 flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP; 3651 } else if (device_may_wakeup(&sw->dev)) { 3652 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT; 3653 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE; 3654 } 3655 3656 tb_switch_set_wake(sw, flags, runtime); 3657 3658 if (tb_switch_is_usb4(sw)) 3659 usb4_switch_set_sleep(sw); 3660 else 3661 tb_lc_set_sleep(sw); 3662 } 3663 3664 /** 3665 * tb_switch_query_dp_resource() - Query availability of DP resource 3666 * @sw: Switch whose DP resource is queried 3667 * @in: DP IN port 3668 * 3669 * Queries availability of DP resource for DP tunneling using switch 3670 * specific means. 3671 * 3672 * Return: %true if resource is available, %false otherwise. 3673 */ 3674 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) 3675 { 3676 if (tb_switch_is_usb4(sw)) 3677 return usb4_switch_query_dp_resource(sw, in); 3678 return tb_lc_dp_sink_query(sw, in); 3679 } 3680 3681 /** 3682 * tb_switch_alloc_dp_resource() - Allocate available DP resource 3683 * @sw: Switch whose DP resource is allocated 3684 * @in: DP IN port 3685 * 3686 * Allocates DP resource for DP tunneling. The resource must be 3687 * available for this to succeed (see tb_switch_query_dp_resource()). 3688 * 3689 * Return: %0 on success, negative errno otherwise. 3690 */ 3691 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 3692 { 3693 int ret; 3694 3695 if (tb_switch_is_usb4(sw)) 3696 ret = usb4_switch_alloc_dp_resource(sw, in); 3697 else 3698 ret = tb_lc_dp_sink_alloc(sw, in); 3699 3700 if (ret) 3701 tb_sw_warn(sw, "failed to allocate DP resource for port %d\n", 3702 in->port); 3703 else 3704 tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port); 3705 3706 return ret; 3707 } 3708 3709 /** 3710 * tb_switch_dealloc_dp_resource() - De-allocate DP resource 3711 * @sw: Switch whose DP resource is de-allocated 3712 * @in: DP IN port 3713 * 3714 * De-allocates DP resource that was previously allocated for DP 3715 * tunneling. 3716 */ 3717 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 3718 { 3719 int ret; 3720 3721 if (tb_switch_is_usb4(sw)) 3722 ret = usb4_switch_dealloc_dp_resource(sw, in); 3723 else 3724 ret = tb_lc_dp_sink_dealloc(sw, in); 3725 3726 if (ret) 3727 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n", 3728 in->port); 3729 else 3730 tb_sw_dbg(sw, "released DP resource for port %d\n", in->port); 3731 } 3732 3733 struct tb_sw_lookup { 3734 struct tb *tb; 3735 u8 link; 3736 u8 depth; 3737 const uuid_t *uuid; 3738 u64 route; 3739 }; 3740 3741 static int tb_switch_match(struct device *dev, const void *data) 3742 { 3743 struct tb_switch *sw = tb_to_switch(dev); 3744 const struct tb_sw_lookup *lookup = data; 3745 3746 if (!sw) 3747 return 0; 3748 if (sw->tb != lookup->tb) 3749 return 0; 3750 3751 if (lookup->uuid) 3752 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid)); 3753 3754 if (lookup->route) { 3755 return sw->config.route_lo == lower_32_bits(lookup->route) && 3756 sw->config.route_hi == upper_32_bits(lookup->route); 3757 } 3758 3759 /* Root switch is matched only by depth */ 3760 if (!lookup->depth) 3761 return !sw->depth; 3762 3763 return sw->link == lookup->link && sw->depth == lookup->depth; 3764 } 3765 3766 /** 3767 * tb_switch_find_by_link_depth() - Find switch by link and depth 3768 * @tb: Domain the switch belongs 3769 * @link: Link number the switch is connected 3770 * @depth: Depth of the switch in link 3771 * 3772 * Returned switch has reference count increased so the caller needs to 3773 * call tb_switch_put() when done with the switch. 3774 * 3775 * Return: Pointer to &struct tb_switch, %NULL if not found. 3776 */ 3777 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) 3778 { 3779 struct tb_sw_lookup lookup; 3780 struct device *dev; 3781 3782 memset(&lookup, 0, sizeof(lookup)); 3783 lookup.tb = tb; 3784 lookup.link = link; 3785 lookup.depth = depth; 3786 3787 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 3788 if (dev) 3789 return tb_to_switch(dev); 3790 3791 return NULL; 3792 } 3793 3794 /** 3795 * tb_switch_find_by_uuid() - Find switch by UUID 3796 * @tb: Domain the switch belongs 3797 * @uuid: UUID to look for 3798 * 3799 * Returned switch has reference count increased so the caller needs to 3800 * call tb_switch_put() when done with the switch. 3801 * 3802 * Return: Pointer to &struct tb_switch, %NULL if not found. 3803 */ 3804 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) 3805 { 3806 struct tb_sw_lookup lookup; 3807 struct device *dev; 3808 3809 memset(&lookup, 0, sizeof(lookup)); 3810 lookup.tb = tb; 3811 lookup.uuid = uuid; 3812 3813 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 3814 if (dev) 3815 return tb_to_switch(dev); 3816 3817 return NULL; 3818 } 3819 3820 /** 3821 * tb_switch_find_by_route() - Find switch by route string 3822 * @tb: Domain the switch belongs 3823 * @route: Route string to look for 3824 * 3825 * Returned switch has reference count increased so the caller needs to 3826 * call tb_switch_put() when done with the switch. 3827 * 3828 * Return: Pointer to &struct tb_switch, %NULL if not found. 3829 */ 3830 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) 3831 { 3832 struct tb_sw_lookup lookup; 3833 struct device *dev; 3834 3835 if (!route) 3836 return tb_switch_get(tb->root_switch); 3837 3838 memset(&lookup, 0, sizeof(lookup)); 3839 lookup.tb = tb; 3840 lookup.route = route; 3841 3842 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 3843 if (dev) 3844 return tb_to_switch(dev); 3845 3846 return NULL; 3847 } 3848 3849 /** 3850 * tb_switch_find_port() - return the first port of @type on @sw or NULL 3851 * @sw: Switch to find the port from 3852 * @type: Port type to look for 3853 * 3854 * Return: Pointer to &struct tb_port, %NULL if not found. 3855 */ 3856 struct tb_port *tb_switch_find_port(struct tb_switch *sw, 3857 enum tb_port_type type) 3858 { 3859 struct tb_port *port; 3860 3861 tb_switch_for_each_port(sw, port) { 3862 if (port->config.type == type) 3863 return port; 3864 } 3865 3866 return NULL; 3867 } 3868 3869 /* 3870 * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3 3871 * device. For now used only for Titan Ridge. 3872 */ 3873 static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge, 3874 unsigned int pcie_offset, u32 value) 3875 { 3876 u32 offset, command, val; 3877 int ret; 3878 3879 if (sw->generation != 3) 3880 return -EOPNOTSUPP; 3881 3882 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA; 3883 ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1); 3884 if (ret) 3885 return ret; 3886 3887 command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK; 3888 command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT); 3889 command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK; 3890 command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL 3891 << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT; 3892 command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK; 3893 3894 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD; 3895 3896 ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1); 3897 if (ret) 3898 return ret; 3899 3900 ret = tb_switch_wait_for_bit(sw, offset, 3901 TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100); 3902 if (ret) 3903 return ret; 3904 3905 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); 3906 if (ret) 3907 return ret; 3908 3909 if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK) 3910 return -ETIMEDOUT; 3911 3912 return 0; 3913 } 3914 3915 /** 3916 * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state 3917 * @sw: Router to enable PCIe L1 3918 * 3919 * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable 3920 * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel 3921 * was configured. Due to Intel platforms limitation, shall be called only 3922 * for first hop switch. 3923 * 3924 * Return: %0 on success, negative errno otherwise. 3925 */ 3926 int tb_switch_pcie_l1_enable(struct tb_switch *sw) 3927 { 3928 struct tb_switch *parent = tb_switch_parent(sw); 3929 int ret; 3930 3931 if (!tb_route(sw)) 3932 return 0; 3933 3934 if (!tb_switch_is_titan_ridge(sw)) 3935 return 0; 3936 3937 /* Enable PCIe L1 enable only for first hop router (depth = 1) */ 3938 if (tb_route(parent)) 3939 return 0; 3940 3941 /* Write to downstream PCIe bridge #5 aka Dn4 */ 3942 ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1); 3943 if (ret) 3944 return ret; 3945 3946 /* Write to Upstream PCIe bridge #0 aka Up0 */ 3947 return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1); 3948 } 3949 3950 /** 3951 * tb_switch_xhci_connect() - Connect internal xHCI 3952 * @sw: Router whose xHCI to connect 3953 * 3954 * Can be called to any router. For Alpine Ridge and Titan Ridge 3955 * performs special flows that bring the xHCI functional for any device 3956 * connected to the type-C port. Call only after PCIe tunnel has been 3957 * established. The function only does the connect if not done already 3958 * so can be called several times for the same router. 3959 * 3960 * Return: %0 on success, negative errno otherwise. 3961 */ 3962 int tb_switch_xhci_connect(struct tb_switch *sw) 3963 { 3964 struct tb_port *port1, *port3; 3965 int ret; 3966 3967 if (sw->generation != 3) 3968 return 0; 3969 3970 port1 = &sw->ports[1]; 3971 port3 = &sw->ports[3]; 3972 3973 if (tb_switch_is_alpine_ridge(sw)) { 3974 bool usb_port1, usb_port3, xhci_port1, xhci_port3; 3975 3976 usb_port1 = tb_lc_is_usb_plugged(port1); 3977 usb_port3 = tb_lc_is_usb_plugged(port3); 3978 xhci_port1 = tb_lc_is_xhci_connected(port1); 3979 xhci_port3 = tb_lc_is_xhci_connected(port3); 3980 3981 /* Figure out correct USB port to connect */ 3982 if (usb_port1 && !xhci_port1) { 3983 ret = tb_lc_xhci_connect(port1); 3984 if (ret) 3985 return ret; 3986 } 3987 if (usb_port3 && !xhci_port3) 3988 return tb_lc_xhci_connect(port3); 3989 } else if (tb_switch_is_titan_ridge(sw)) { 3990 ret = tb_lc_xhci_connect(port1); 3991 if (ret) 3992 return ret; 3993 return tb_lc_xhci_connect(port3); 3994 } 3995 3996 return 0; 3997 } 3998 3999 /** 4000 * tb_switch_xhci_disconnect() - Disconnect internal xHCI 4001 * @sw: Router whose xHCI to disconnect 4002 * 4003 * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both 4004 * ports. 4005 */ 4006 void tb_switch_xhci_disconnect(struct tb_switch *sw) 4007 { 4008 if (sw->generation == 3) { 4009 struct tb_port *port1 = &sw->ports[1]; 4010 struct tb_port *port3 = &sw->ports[3]; 4011 4012 tb_lc_xhci_disconnect(port1); 4013 tb_port_dbg(port1, "disconnected xHCI\n"); 4014 tb_lc_xhci_disconnect(port3); 4015 tb_port_dbg(port3, "disconnected xHCI\n"); 4016 } 4017 } 4018