1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - switch/port utility functions 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2018, Intel Corporation 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/idr.h> 11 #include <linux/nvmem-provider.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/sched/signal.h> 14 #include <linux/sizes.h> 15 #include <linux/slab.h> 16 17 #include "tb.h" 18 19 /* Switch NVM support */ 20 21 #define NVM_CSS 0x10 22 23 struct nvm_auth_status { 24 struct list_head list; 25 uuid_t uuid; 26 u32 status; 27 }; 28 29 enum nvm_write_ops { 30 WRITE_AND_AUTHENTICATE = 1, 31 WRITE_ONLY = 2, 32 }; 33 34 /* 35 * Hold NVM authentication failure status per switch This information 36 * needs to stay around even when the switch gets power cycled so we 37 * keep it separately. 38 */ 39 static LIST_HEAD(nvm_auth_status_cache); 40 static DEFINE_MUTEX(nvm_auth_status_lock); 41 42 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) 43 { 44 struct nvm_auth_status *st; 45 46 list_for_each_entry(st, &nvm_auth_status_cache, list) { 47 if (uuid_equal(&st->uuid, sw->uuid)) 48 return st; 49 } 50 51 return NULL; 52 } 53 54 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) 55 { 56 struct nvm_auth_status *st; 57 58 mutex_lock(&nvm_auth_status_lock); 59 st = __nvm_get_auth_status(sw); 60 mutex_unlock(&nvm_auth_status_lock); 61 62 *status = st ? st->status : 0; 63 } 64 65 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) 66 { 67 struct nvm_auth_status *st; 68 69 if (WARN_ON(!sw->uuid)) 70 return; 71 72 mutex_lock(&nvm_auth_status_lock); 73 st = __nvm_get_auth_status(sw); 74 75 if (!st) { 76 st = kzalloc(sizeof(*st), GFP_KERNEL); 77 if (!st) 78 goto unlock; 79 80 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); 81 INIT_LIST_HEAD(&st->list); 82 list_add_tail(&st->list, &nvm_auth_status_cache); 83 } 84 85 st->status = status; 86 unlock: 87 mutex_unlock(&nvm_auth_status_lock); 88 } 89 90 static void nvm_clear_auth_status(const struct tb_switch *sw) 91 { 92 struct nvm_auth_status *st; 93 94 mutex_lock(&nvm_auth_status_lock); 95 st = __nvm_get_auth_status(sw); 96 if (st) { 97 list_del(&st->list); 98 kfree(st); 99 } 100 mutex_unlock(&nvm_auth_status_lock); 101 } 102 103 static int nvm_validate_and_write(struct tb_switch *sw) 104 { 105 unsigned int image_size, hdr_size; 106 const u8 *buf = sw->nvm->buf; 107 u16 ds_size; 108 int ret; 109 110 if (!buf) 111 return -EINVAL; 112 113 image_size = sw->nvm->buf_data_size; 114 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE) 115 return -EINVAL; 116 117 /* 118 * FARB pointer must point inside the image and must at least 119 * contain parts of the digital section we will be reading here. 120 */ 121 hdr_size = (*(u32 *)buf) & 0xffffff; 122 if (hdr_size + NVM_DEVID + 2 >= image_size) 123 return -EINVAL; 124 125 /* Digital section start should be aligned to 4k page */ 126 if (!IS_ALIGNED(hdr_size, SZ_4K)) 127 return -EINVAL; 128 129 /* 130 * Read digital section size and check that it also fits inside 131 * the image. 132 */ 133 ds_size = *(u16 *)(buf + hdr_size); 134 if (ds_size >= image_size) 135 return -EINVAL; 136 137 if (!sw->safe_mode) { 138 u16 device_id; 139 140 /* 141 * Make sure the device ID in the image matches the one 142 * we read from the switch config space. 143 */ 144 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID); 145 if (device_id != sw->config.device_id) 146 return -EINVAL; 147 148 if (sw->generation < 3) { 149 /* Write CSS headers first */ 150 ret = dma_port_flash_write(sw->dma_port, 151 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS, 152 DMA_PORT_CSS_MAX_SIZE); 153 if (ret) 154 return ret; 155 } 156 157 /* Skip headers in the image */ 158 buf += hdr_size; 159 image_size -= hdr_size; 160 } 161 162 if (tb_switch_is_usb4(sw)) 163 ret = usb4_switch_nvm_write(sw, 0, buf, image_size); 164 else 165 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size); 166 if (!ret) 167 sw->nvm->flushed = true; 168 return ret; 169 } 170 171 static int nvm_authenticate_host_dma_port(struct tb_switch *sw) 172 { 173 int ret = 0; 174 175 /* 176 * Root switch NVM upgrade requires that we disconnect the 177 * existing paths first (in case it is not in safe mode 178 * already). 179 */ 180 if (!sw->safe_mode) { 181 u32 status; 182 183 ret = tb_domain_disconnect_all_paths(sw->tb); 184 if (ret) 185 return ret; 186 /* 187 * The host controller goes away pretty soon after this if 188 * everything goes well so getting timeout is expected. 189 */ 190 ret = dma_port_flash_update_auth(sw->dma_port); 191 if (!ret || ret == -ETIMEDOUT) 192 return 0; 193 194 /* 195 * Any error from update auth operation requires power 196 * cycling of the host router. 197 */ 198 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n"); 199 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0) 200 nvm_set_auth_status(sw, status); 201 } 202 203 /* 204 * From safe mode we can get out by just power cycling the 205 * switch. 206 */ 207 dma_port_power_cycle(sw->dma_port); 208 return ret; 209 } 210 211 static int nvm_authenticate_device_dma_port(struct tb_switch *sw) 212 { 213 int ret, retries = 10; 214 215 ret = dma_port_flash_update_auth(sw->dma_port); 216 switch (ret) { 217 case 0: 218 case -ETIMEDOUT: 219 case -EACCES: 220 case -EINVAL: 221 /* Power cycle is required */ 222 break; 223 default: 224 return ret; 225 } 226 227 /* 228 * Poll here for the authentication status. It takes some time 229 * for the device to respond (we get timeout for a while). Once 230 * we get response the device needs to be power cycled in order 231 * to the new NVM to be taken into use. 232 */ 233 do { 234 u32 status; 235 236 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 237 if (ret < 0 && ret != -ETIMEDOUT) 238 return ret; 239 if (ret > 0) { 240 if (status) { 241 tb_sw_warn(sw, "failed to authenticate NVM\n"); 242 nvm_set_auth_status(sw, status); 243 } 244 245 tb_sw_info(sw, "power cycling the switch now\n"); 246 dma_port_power_cycle(sw->dma_port); 247 return 0; 248 } 249 250 msleep(500); 251 } while (--retries); 252 253 return -ETIMEDOUT; 254 } 255 256 static void nvm_authenticate_start_dma_port(struct tb_switch *sw) 257 { 258 struct pci_dev *root_port; 259 260 /* 261 * During host router NVM upgrade we should not allow root port to 262 * go into D3cold because some root ports cannot trigger PME 263 * itself. To be on the safe side keep the root port in D0 during 264 * the whole upgrade process. 265 */ 266 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 267 if (root_port) 268 pm_runtime_get_noresume(&root_port->dev); 269 } 270 271 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw) 272 { 273 struct pci_dev *root_port; 274 275 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 276 if (root_port) 277 pm_runtime_put(&root_port->dev); 278 } 279 280 static inline bool nvm_readable(struct tb_switch *sw) 281 { 282 if (tb_switch_is_usb4(sw)) { 283 /* 284 * USB4 devices must support NVM operations but it is 285 * optional for hosts. Therefore we query the NVM sector 286 * size here and if it is supported assume NVM 287 * operations are implemented. 288 */ 289 return usb4_switch_nvm_sector_size(sw) > 0; 290 } 291 292 /* Thunderbolt 2 and 3 devices support NVM through DMA port */ 293 return !!sw->dma_port; 294 } 295 296 static inline bool nvm_upgradeable(struct tb_switch *sw) 297 { 298 if (sw->no_nvm_upgrade) 299 return false; 300 return nvm_readable(sw); 301 } 302 303 static inline int nvm_read(struct tb_switch *sw, unsigned int address, 304 void *buf, size_t size) 305 { 306 if (tb_switch_is_usb4(sw)) 307 return usb4_switch_nvm_read(sw, address, buf, size); 308 return dma_port_flash_read(sw->dma_port, address, buf, size); 309 } 310 311 static int nvm_authenticate(struct tb_switch *sw) 312 { 313 int ret; 314 315 if (tb_switch_is_usb4(sw)) 316 return usb4_switch_nvm_authenticate(sw); 317 318 if (!tb_route(sw)) { 319 nvm_authenticate_start_dma_port(sw); 320 ret = nvm_authenticate_host_dma_port(sw); 321 } else { 322 ret = nvm_authenticate_device_dma_port(sw); 323 } 324 325 return ret; 326 } 327 328 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val, 329 size_t bytes) 330 { 331 struct tb_nvm *nvm = priv; 332 struct tb_switch *sw = tb_to_switch(nvm->dev); 333 int ret; 334 335 pm_runtime_get_sync(&sw->dev); 336 337 if (!mutex_trylock(&sw->tb->lock)) { 338 ret = restart_syscall(); 339 goto out; 340 } 341 342 ret = nvm_read(sw, offset, val, bytes); 343 mutex_unlock(&sw->tb->lock); 344 345 out: 346 pm_runtime_mark_last_busy(&sw->dev); 347 pm_runtime_put_autosuspend(&sw->dev); 348 349 return ret; 350 } 351 352 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, 353 size_t bytes) 354 { 355 struct tb_nvm *nvm = priv; 356 struct tb_switch *sw = tb_to_switch(nvm->dev); 357 int ret; 358 359 if (!mutex_trylock(&sw->tb->lock)) 360 return restart_syscall(); 361 362 /* 363 * Since writing the NVM image might require some special steps, 364 * for example when CSS headers are written, we cache the image 365 * locally here and handle the special cases when the user asks 366 * us to authenticate the image. 367 */ 368 ret = tb_nvm_write_buf(nvm, offset, val, bytes); 369 mutex_unlock(&sw->tb->lock); 370 371 return ret; 372 } 373 374 static int tb_switch_nvm_add(struct tb_switch *sw) 375 { 376 struct tb_nvm *nvm; 377 u32 val; 378 int ret; 379 380 if (!nvm_readable(sw)) 381 return 0; 382 383 /* 384 * The NVM format of non-Intel hardware is not known so 385 * currently restrict NVM upgrade for Intel hardware. We may 386 * relax this in the future when we learn other NVM formats. 387 */ 388 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL && 389 sw->config.vendor_id != 0x8087) { 390 dev_info(&sw->dev, 391 "NVM format of vendor %#x is not known, disabling NVM upgrade\n", 392 sw->config.vendor_id); 393 return 0; 394 } 395 396 nvm = tb_nvm_alloc(&sw->dev); 397 if (IS_ERR(nvm)) 398 return PTR_ERR(nvm); 399 400 /* 401 * If the switch is in safe-mode the only accessible portion of 402 * the NVM is the non-active one where userspace is expected to 403 * write new functional NVM. 404 */ 405 if (!sw->safe_mode) { 406 u32 nvm_size, hdr_size; 407 408 ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val)); 409 if (ret) 410 goto err_nvm; 411 412 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K; 413 nvm_size = (SZ_1M << (val & 7)) / 8; 414 nvm_size = (nvm_size - hdr_size) / 2; 415 416 ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val)); 417 if (ret) 418 goto err_nvm; 419 420 nvm->major = val >> 16; 421 nvm->minor = val >> 8; 422 423 ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read); 424 if (ret) 425 goto err_nvm; 426 } 427 428 if (!sw->no_nvm_upgrade) { 429 ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE, 430 tb_switch_nvm_write); 431 if (ret) 432 goto err_nvm; 433 } 434 435 sw->nvm = nvm; 436 return 0; 437 438 err_nvm: 439 tb_nvm_free(nvm); 440 return ret; 441 } 442 443 static void tb_switch_nvm_remove(struct tb_switch *sw) 444 { 445 struct tb_nvm *nvm; 446 447 nvm = sw->nvm; 448 sw->nvm = NULL; 449 450 if (!nvm) 451 return; 452 453 /* Remove authentication status in case the switch is unplugged */ 454 if (!nvm->authenticating) 455 nvm_clear_auth_status(sw); 456 457 tb_nvm_free(nvm); 458 } 459 460 /* port utility functions */ 461 462 static const char *tb_port_type(struct tb_regs_port_header *port) 463 { 464 switch (port->type >> 16) { 465 case 0: 466 switch ((u8) port->type) { 467 case 0: 468 return "Inactive"; 469 case 1: 470 return "Port"; 471 case 2: 472 return "NHI"; 473 default: 474 return "unknown"; 475 } 476 case 0x2: 477 return "Ethernet"; 478 case 0x8: 479 return "SATA"; 480 case 0xe: 481 return "DP/HDMI"; 482 case 0x10: 483 return "PCIe"; 484 case 0x20: 485 return "USB"; 486 default: 487 return "unknown"; 488 } 489 } 490 491 static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port) 492 { 493 tb_dbg(tb, 494 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n", 495 port->port_number, port->vendor_id, port->device_id, 496 port->revision, port->thunderbolt_version, tb_port_type(port), 497 port->type); 498 tb_dbg(tb, " Max hop id (in/out): %d/%d\n", 499 port->max_in_hop_id, port->max_out_hop_id); 500 tb_dbg(tb, " Max counters: %d\n", port->max_counters); 501 tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits); 502 } 503 504 /** 505 * tb_port_state() - get connectedness state of a port 506 * @port: the port to check 507 * 508 * The port must have a TB_CAP_PHY (i.e. it should be a real port). 509 * 510 * Return: Returns an enum tb_port_state on success or an error code on failure. 511 */ 512 int tb_port_state(struct tb_port *port) 513 { 514 struct tb_cap_phy phy; 515 int res; 516 if (port->cap_phy == 0) { 517 tb_port_WARN(port, "does not have a PHY\n"); 518 return -EINVAL; 519 } 520 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2); 521 if (res) 522 return res; 523 return phy.state; 524 } 525 526 /** 527 * tb_wait_for_port() - wait for a port to become ready 528 * 529 * Wait up to 1 second for a port to reach state TB_PORT_UP. If 530 * wait_if_unplugged is set then we also wait if the port is in state 531 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after 532 * switch resume). Otherwise we only wait if a device is registered but the link 533 * has not yet been established. 534 * 535 * Return: Returns an error code on failure. Returns 0 if the port is not 536 * connected or failed to reach state TB_PORT_UP within one second. Returns 1 537 * if the port is connected and in state TB_PORT_UP. 538 */ 539 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) 540 { 541 int retries = 10; 542 int state; 543 if (!port->cap_phy) { 544 tb_port_WARN(port, "does not have PHY\n"); 545 return -EINVAL; 546 } 547 if (tb_is_upstream_port(port)) { 548 tb_port_WARN(port, "is the upstream port\n"); 549 return -EINVAL; 550 } 551 552 while (retries--) { 553 state = tb_port_state(port); 554 if (state < 0) 555 return state; 556 if (state == TB_PORT_DISABLED) { 557 tb_port_dbg(port, "is disabled (state: 0)\n"); 558 return 0; 559 } 560 if (state == TB_PORT_UNPLUGGED) { 561 if (wait_if_unplugged) { 562 /* used during resume */ 563 tb_port_dbg(port, 564 "is unplugged (state: 7), retrying...\n"); 565 msleep(100); 566 continue; 567 } 568 tb_port_dbg(port, "is unplugged (state: 7)\n"); 569 return 0; 570 } 571 if (state == TB_PORT_UP) { 572 tb_port_dbg(port, "is connected, link is up (state: 2)\n"); 573 return 1; 574 } 575 576 /* 577 * After plug-in the state is TB_PORT_CONNECTING. Give it some 578 * time. 579 */ 580 tb_port_dbg(port, 581 "is connected, link is not up (state: %d), retrying...\n", 582 state); 583 msleep(100); 584 } 585 tb_port_warn(port, 586 "failed to reach state TB_PORT_UP. Ignoring port...\n"); 587 return 0; 588 } 589 590 /** 591 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port 592 * 593 * Change the number of NFC credits allocated to @port by @credits. To remove 594 * NFC credits pass a negative amount of credits. 595 * 596 * Return: Returns 0 on success or an error code on failure. 597 */ 598 int tb_port_add_nfc_credits(struct tb_port *port, int credits) 599 { 600 u32 nfc_credits; 601 602 if (credits == 0 || port->sw->is_unplugged) 603 return 0; 604 605 /* 606 * USB4 restricts programming NFC buffers to lane adapters only 607 * so skip other ports. 608 */ 609 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port)) 610 return 0; 611 612 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; 613 nfc_credits += credits; 614 615 tb_port_dbg(port, "adding %d NFC credits to %lu", credits, 616 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK); 617 618 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK; 619 port->config.nfc_credits |= nfc_credits; 620 621 return tb_port_write(port, &port->config.nfc_credits, 622 TB_CFG_PORT, ADP_CS_4, 1); 623 } 624 625 /** 626 * tb_port_set_initial_credits() - Set initial port link credits allocated 627 * @port: Port to set the initial credits 628 * @credits: Number of credits to to allocate 629 * 630 * Set initial credits value to be used for ingress shared buffering. 631 */ 632 int tb_port_set_initial_credits(struct tb_port *port, u32 credits) 633 { 634 u32 data; 635 int ret; 636 637 ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1); 638 if (ret) 639 return ret; 640 641 data &= ~ADP_CS_5_LCA_MASK; 642 data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK; 643 644 return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1); 645 } 646 647 /** 648 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER 649 * 650 * Return: Returns 0 on success or an error code on failure. 651 */ 652 int tb_port_clear_counter(struct tb_port *port, int counter) 653 { 654 u32 zero[3] = { 0, 0, 0 }; 655 tb_port_dbg(port, "clearing counter %d\n", counter); 656 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3); 657 } 658 659 /** 660 * tb_port_unlock() - Unlock downstream port 661 * @port: Port to unlock 662 * 663 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the 664 * downstream router accessible for CM. 665 */ 666 int tb_port_unlock(struct tb_port *port) 667 { 668 if (tb_switch_is_icm(port->sw)) 669 return 0; 670 if (!tb_port_is_null(port)) 671 return -EINVAL; 672 if (tb_switch_is_usb4(port->sw)) 673 return usb4_port_unlock(port); 674 return 0; 675 } 676 677 static int __tb_port_enable(struct tb_port *port, bool enable) 678 { 679 int ret; 680 u32 phy; 681 682 if (!tb_port_is_null(port)) 683 return -EINVAL; 684 685 ret = tb_port_read(port, &phy, TB_CFG_PORT, 686 port->cap_phy + LANE_ADP_CS_1, 1); 687 if (ret) 688 return ret; 689 690 if (enable) 691 phy &= ~LANE_ADP_CS_1_LD; 692 else 693 phy |= LANE_ADP_CS_1_LD; 694 695 return tb_port_write(port, &phy, TB_CFG_PORT, 696 port->cap_phy + LANE_ADP_CS_1, 1); 697 } 698 699 /** 700 * tb_port_enable() - Enable lane adapter 701 * @port: Port to enable (can be %NULL) 702 * 703 * This is used for lane 0 and 1 adapters to enable it. 704 */ 705 int tb_port_enable(struct tb_port *port) 706 { 707 return __tb_port_enable(port, true); 708 } 709 710 /** 711 * tb_port_disable() - Disable lane adapter 712 * @port: Port to disable (can be %NULL) 713 * 714 * This is used for lane 0 and 1 adapters to disable it. 715 */ 716 int tb_port_disable(struct tb_port *port) 717 { 718 return __tb_port_enable(port, false); 719 } 720 721 /** 722 * tb_init_port() - initialize a port 723 * 724 * This is a helper method for tb_switch_alloc. Does not check or initialize 725 * any downstream switches. 726 * 727 * Return: Returns 0 on success or an error code on failure. 728 */ 729 static int tb_init_port(struct tb_port *port) 730 { 731 int res; 732 int cap; 733 734 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); 735 if (res) { 736 if (res == -ENODEV) { 737 tb_dbg(port->sw->tb, " Port %d: not implemented\n", 738 port->port); 739 port->disabled = true; 740 return 0; 741 } 742 return res; 743 } 744 745 /* Port 0 is the switch itself and has no PHY. */ 746 if (port->config.type == TB_TYPE_PORT && port->port != 0) { 747 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY); 748 749 if (cap > 0) 750 port->cap_phy = cap; 751 else 752 tb_port_WARN(port, "non switch port without a PHY\n"); 753 754 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4); 755 if (cap > 0) 756 port->cap_usb4 = cap; 757 } else if (port->port != 0) { 758 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP); 759 if (cap > 0) 760 port->cap_adap = cap; 761 } 762 763 tb_dump_port(port->sw->tb, &port->config); 764 765 /* Control port does not need HopID allocation */ 766 if (port->port) { 767 ida_init(&port->in_hopids); 768 ida_init(&port->out_hopids); 769 } 770 771 INIT_LIST_HEAD(&port->list); 772 return 0; 773 774 } 775 776 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid, 777 int max_hopid) 778 { 779 int port_max_hopid; 780 struct ida *ida; 781 782 if (in) { 783 port_max_hopid = port->config.max_in_hop_id; 784 ida = &port->in_hopids; 785 } else { 786 port_max_hopid = port->config.max_out_hop_id; 787 ida = &port->out_hopids; 788 } 789 790 /* 791 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are 792 * reserved. 793 */ 794 if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID) 795 min_hopid = TB_PATH_MIN_HOPID; 796 797 if (max_hopid < 0 || max_hopid > port_max_hopid) 798 max_hopid = port_max_hopid; 799 800 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL); 801 } 802 803 /** 804 * tb_port_alloc_in_hopid() - Allocate input HopID from port 805 * @port: Port to allocate HopID for 806 * @min_hopid: Minimum acceptable input HopID 807 * @max_hopid: Maximum acceptable input HopID 808 * 809 * Return: HopID between @min_hopid and @max_hopid or negative errno in 810 * case of error. 811 */ 812 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid) 813 { 814 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid); 815 } 816 817 /** 818 * tb_port_alloc_out_hopid() - Allocate output HopID from port 819 * @port: Port to allocate HopID for 820 * @min_hopid: Minimum acceptable output HopID 821 * @max_hopid: Maximum acceptable output HopID 822 * 823 * Return: HopID between @min_hopid and @max_hopid or negative errno in 824 * case of error. 825 */ 826 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid) 827 { 828 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid); 829 } 830 831 /** 832 * tb_port_release_in_hopid() - Release allocated input HopID from port 833 * @port: Port whose HopID to release 834 * @hopid: HopID to release 835 */ 836 void tb_port_release_in_hopid(struct tb_port *port, int hopid) 837 { 838 ida_simple_remove(&port->in_hopids, hopid); 839 } 840 841 /** 842 * tb_port_release_out_hopid() - Release allocated output HopID from port 843 * @port: Port whose HopID to release 844 * @hopid: HopID to release 845 */ 846 void tb_port_release_out_hopid(struct tb_port *port, int hopid) 847 { 848 ida_simple_remove(&port->out_hopids, hopid); 849 } 850 851 static inline bool tb_switch_is_reachable(const struct tb_switch *parent, 852 const struct tb_switch *sw) 853 { 854 u64 mask = (1ULL << parent->config.depth * 8) - 1; 855 return (tb_route(parent) & mask) == (tb_route(sw) & mask); 856 } 857 858 /** 859 * tb_next_port_on_path() - Return next port for given port on a path 860 * @start: Start port of the walk 861 * @end: End port of the walk 862 * @prev: Previous port (%NULL if this is the first) 863 * 864 * This function can be used to walk from one port to another if they 865 * are connected through zero or more switches. If the @prev is dual 866 * link port, the function follows that link and returns another end on 867 * that same link. 868 * 869 * If the @end port has been reached, return %NULL. 870 * 871 * Domain tb->lock must be held when this function is called. 872 */ 873 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, 874 struct tb_port *prev) 875 { 876 struct tb_port *next; 877 878 if (!prev) 879 return start; 880 881 if (prev->sw == end->sw) { 882 if (prev == end) 883 return NULL; 884 return end; 885 } 886 887 if (tb_switch_is_reachable(prev->sw, end->sw)) { 888 next = tb_port_at(tb_route(end->sw), prev->sw); 889 /* Walk down the topology if next == prev */ 890 if (prev->remote && 891 (next == prev || next->dual_link_port == prev)) 892 next = prev->remote; 893 } else { 894 if (tb_is_upstream_port(prev)) { 895 next = prev->remote; 896 } else { 897 next = tb_upstream_port(prev->sw); 898 /* 899 * Keep the same link if prev and next are both 900 * dual link ports. 901 */ 902 if (next->dual_link_port && 903 next->link_nr != prev->link_nr) { 904 next = next->dual_link_port; 905 } 906 } 907 } 908 909 return next != prev ? next : NULL; 910 } 911 912 /** 913 * tb_port_get_link_speed() - Get current link speed 914 * @port: Port to check (USB4 or CIO) 915 * 916 * Returns link speed in Gb/s or negative errno in case of failure. 917 */ 918 int tb_port_get_link_speed(struct tb_port *port) 919 { 920 u32 val, speed; 921 int ret; 922 923 if (!port->cap_phy) 924 return -EINVAL; 925 926 ret = tb_port_read(port, &val, TB_CFG_PORT, 927 port->cap_phy + LANE_ADP_CS_1, 1); 928 if (ret) 929 return ret; 930 931 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >> 932 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT; 933 return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10; 934 } 935 936 /** 937 * tb_port_get_link_width() - Get current link width 938 * @port: Port to check (USB4 or CIO) 939 * 940 * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane) 941 * or negative errno in case of failure. 942 */ 943 int tb_port_get_link_width(struct tb_port *port) 944 { 945 u32 val; 946 int ret; 947 948 if (!port->cap_phy) 949 return -EINVAL; 950 951 ret = tb_port_read(port, &val, TB_CFG_PORT, 952 port->cap_phy + LANE_ADP_CS_1, 1); 953 if (ret) 954 return ret; 955 956 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >> 957 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT; 958 } 959 960 static bool tb_port_is_width_supported(struct tb_port *port, int width) 961 { 962 u32 phy, widths; 963 int ret; 964 965 if (!port->cap_phy) 966 return false; 967 968 ret = tb_port_read(port, &phy, TB_CFG_PORT, 969 port->cap_phy + LANE_ADP_CS_0, 1); 970 if (ret) 971 return false; 972 973 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> 974 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; 975 976 return !!(widths & width); 977 } 978 979 static int tb_port_set_link_width(struct tb_port *port, unsigned int width) 980 { 981 u32 val; 982 int ret; 983 984 if (!port->cap_phy) 985 return -EINVAL; 986 987 ret = tb_port_read(port, &val, TB_CFG_PORT, 988 port->cap_phy + LANE_ADP_CS_1, 1); 989 if (ret) 990 return ret; 991 992 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK; 993 switch (width) { 994 case 1: 995 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE << 996 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 997 break; 998 case 2: 999 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL << 1000 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 1001 break; 1002 default: 1003 return -EINVAL; 1004 } 1005 1006 val |= LANE_ADP_CS_1_LB; 1007 1008 return tb_port_write(port, &val, TB_CFG_PORT, 1009 port->cap_phy + LANE_ADP_CS_1, 1); 1010 } 1011 1012 /** 1013 * tb_port_lane_bonding_enable() - Enable bonding on port 1014 * @port: port to enable 1015 * 1016 * Enable bonding by setting the link width of the port and the 1017 * other port in case of dual link port. 1018 * 1019 * Return: %0 in case of success and negative errno in case of error 1020 */ 1021 int tb_port_lane_bonding_enable(struct tb_port *port) 1022 { 1023 int ret; 1024 1025 /* 1026 * Enable lane bonding for both links if not already enabled by 1027 * for example the boot firmware. 1028 */ 1029 ret = tb_port_get_link_width(port); 1030 if (ret == 1) { 1031 ret = tb_port_set_link_width(port, 2); 1032 if (ret) 1033 return ret; 1034 } 1035 1036 ret = tb_port_get_link_width(port->dual_link_port); 1037 if (ret == 1) { 1038 ret = tb_port_set_link_width(port->dual_link_port, 2); 1039 if (ret) { 1040 tb_port_set_link_width(port, 1); 1041 return ret; 1042 } 1043 } 1044 1045 port->bonded = true; 1046 port->dual_link_port->bonded = true; 1047 1048 return 0; 1049 } 1050 1051 /** 1052 * tb_port_lane_bonding_disable() - Disable bonding on port 1053 * @port: port to disable 1054 * 1055 * Disable bonding by setting the link width of the port and the 1056 * other port in case of dual link port. 1057 * 1058 */ 1059 void tb_port_lane_bonding_disable(struct tb_port *port) 1060 { 1061 port->dual_link_port->bonded = false; 1062 port->bonded = false; 1063 1064 tb_port_set_link_width(port->dual_link_port, 1); 1065 tb_port_set_link_width(port, 1); 1066 } 1067 1068 /** 1069 * tb_port_is_enabled() - Is the adapter port enabled 1070 * @port: Port to check 1071 */ 1072 bool tb_port_is_enabled(struct tb_port *port) 1073 { 1074 switch (port->config.type) { 1075 case TB_TYPE_PCIE_UP: 1076 case TB_TYPE_PCIE_DOWN: 1077 return tb_pci_port_is_enabled(port); 1078 1079 case TB_TYPE_DP_HDMI_IN: 1080 case TB_TYPE_DP_HDMI_OUT: 1081 return tb_dp_port_is_enabled(port); 1082 1083 case TB_TYPE_USB3_UP: 1084 case TB_TYPE_USB3_DOWN: 1085 return tb_usb3_port_is_enabled(port); 1086 1087 default: 1088 return false; 1089 } 1090 } 1091 1092 /** 1093 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled 1094 * @port: USB3 adapter port to check 1095 */ 1096 bool tb_usb3_port_is_enabled(struct tb_port *port) 1097 { 1098 u32 data; 1099 1100 if (tb_port_read(port, &data, TB_CFG_PORT, 1101 port->cap_adap + ADP_USB3_CS_0, 1)) 1102 return false; 1103 1104 return !!(data & ADP_USB3_CS_0_PE); 1105 } 1106 1107 /** 1108 * tb_usb3_port_enable() - Enable USB3 adapter port 1109 * @port: USB3 adapter port to enable 1110 * @enable: Enable/disable the USB3 adapter 1111 */ 1112 int tb_usb3_port_enable(struct tb_port *port, bool enable) 1113 { 1114 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V) 1115 : ADP_USB3_CS_0_V; 1116 1117 if (!port->cap_adap) 1118 return -ENXIO; 1119 return tb_port_write(port, &word, TB_CFG_PORT, 1120 port->cap_adap + ADP_USB3_CS_0, 1); 1121 } 1122 1123 /** 1124 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled 1125 * @port: PCIe port to check 1126 */ 1127 bool tb_pci_port_is_enabled(struct tb_port *port) 1128 { 1129 u32 data; 1130 1131 if (tb_port_read(port, &data, TB_CFG_PORT, 1132 port->cap_adap + ADP_PCIE_CS_0, 1)) 1133 return false; 1134 1135 return !!(data & ADP_PCIE_CS_0_PE); 1136 } 1137 1138 /** 1139 * tb_pci_port_enable() - Enable PCIe adapter port 1140 * @port: PCIe port to enable 1141 * @enable: Enable/disable the PCIe adapter 1142 */ 1143 int tb_pci_port_enable(struct tb_port *port, bool enable) 1144 { 1145 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0; 1146 if (!port->cap_adap) 1147 return -ENXIO; 1148 return tb_port_write(port, &word, TB_CFG_PORT, 1149 port->cap_adap + ADP_PCIE_CS_0, 1); 1150 } 1151 1152 /** 1153 * tb_dp_port_hpd_is_active() - Is HPD already active 1154 * @port: DP out port to check 1155 * 1156 * Checks if the DP OUT adapter port has HDP bit already set. 1157 */ 1158 int tb_dp_port_hpd_is_active(struct tb_port *port) 1159 { 1160 u32 data; 1161 int ret; 1162 1163 ret = tb_port_read(port, &data, TB_CFG_PORT, 1164 port->cap_adap + ADP_DP_CS_2, 1); 1165 if (ret) 1166 return ret; 1167 1168 return !!(data & ADP_DP_CS_2_HDP); 1169 } 1170 1171 /** 1172 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port 1173 * @port: Port to clear HPD 1174 * 1175 * If the DP IN port has HDP set, this function can be used to clear it. 1176 */ 1177 int tb_dp_port_hpd_clear(struct tb_port *port) 1178 { 1179 u32 data; 1180 int ret; 1181 1182 ret = tb_port_read(port, &data, TB_CFG_PORT, 1183 port->cap_adap + ADP_DP_CS_3, 1); 1184 if (ret) 1185 return ret; 1186 1187 data |= ADP_DP_CS_3_HDPC; 1188 return tb_port_write(port, &data, TB_CFG_PORT, 1189 port->cap_adap + ADP_DP_CS_3, 1); 1190 } 1191 1192 /** 1193 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port 1194 * @port: DP IN/OUT port to set hops 1195 * @video: Video Hop ID 1196 * @aux_tx: AUX TX Hop ID 1197 * @aux_rx: AUX RX Hop ID 1198 * 1199 * Programs specified Hop IDs for DP IN/OUT port. 1200 */ 1201 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, 1202 unsigned int aux_tx, unsigned int aux_rx) 1203 { 1204 u32 data[2]; 1205 int ret; 1206 1207 ret = tb_port_read(port, data, TB_CFG_PORT, 1208 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1209 if (ret) 1210 return ret; 1211 1212 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK; 1213 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1214 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1215 1216 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) & 1217 ADP_DP_CS_0_VIDEO_HOPID_MASK; 1218 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK; 1219 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) & 1220 ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1221 1222 return tb_port_write(port, data, TB_CFG_PORT, 1223 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1224 } 1225 1226 /** 1227 * tb_dp_port_is_enabled() - Is DP adapter port enabled 1228 * @port: DP adapter port to check 1229 */ 1230 bool tb_dp_port_is_enabled(struct tb_port *port) 1231 { 1232 u32 data[2]; 1233 1234 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0, 1235 ARRAY_SIZE(data))) 1236 return false; 1237 1238 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE)); 1239 } 1240 1241 /** 1242 * tb_dp_port_enable() - Enables/disables DP paths of a port 1243 * @port: DP IN/OUT port 1244 * @enable: Enable/disable DP path 1245 * 1246 * Once Hop IDs are programmed DP paths can be enabled or disabled by 1247 * calling this function. 1248 */ 1249 int tb_dp_port_enable(struct tb_port *port, bool enable) 1250 { 1251 u32 data[2]; 1252 int ret; 1253 1254 ret = tb_port_read(port, data, TB_CFG_PORT, 1255 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1256 if (ret) 1257 return ret; 1258 1259 if (enable) 1260 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE; 1261 else 1262 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE); 1263 1264 return tb_port_write(port, data, TB_CFG_PORT, 1265 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1266 } 1267 1268 /* switch utility functions */ 1269 1270 static const char *tb_switch_generation_name(const struct tb_switch *sw) 1271 { 1272 switch (sw->generation) { 1273 case 1: 1274 return "Thunderbolt 1"; 1275 case 2: 1276 return "Thunderbolt 2"; 1277 case 3: 1278 return "Thunderbolt 3"; 1279 case 4: 1280 return "USB4"; 1281 default: 1282 return "Unknown"; 1283 } 1284 } 1285 1286 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) 1287 { 1288 const struct tb_regs_switch_header *regs = &sw->config; 1289 1290 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n", 1291 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id, 1292 regs->revision, regs->thunderbolt_version); 1293 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number); 1294 tb_dbg(tb, " Config:\n"); 1295 tb_dbg(tb, 1296 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", 1297 regs->upstream_port_number, regs->depth, 1298 (((u64) regs->route_hi) << 32) | regs->route_lo, 1299 regs->enabled, regs->plug_events_delay); 1300 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n", 1301 regs->__unknown1, regs->__unknown4); 1302 } 1303 1304 /** 1305 * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET 1306 * @sw: Switch to reset 1307 * 1308 * Return: Returns 0 on success or an error code on failure. 1309 */ 1310 int tb_switch_reset(struct tb_switch *sw) 1311 { 1312 struct tb_cfg_result res; 1313 1314 if (sw->generation > 1) 1315 return 0; 1316 1317 tb_sw_dbg(sw, "resetting switch\n"); 1318 1319 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2, 1320 TB_CFG_SWITCH, 2, 2); 1321 if (res.err) 1322 return res.err; 1323 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw), TB_CFG_DEFAULT_TIMEOUT); 1324 if (res.err > 0) 1325 return -EIO; 1326 return res.err; 1327 } 1328 1329 /** 1330 * tb_plug_events_active() - enable/disable plug events on a switch 1331 * 1332 * Also configures a sane plug_events_delay of 255ms. 1333 * 1334 * Return: Returns 0 on success or an error code on failure. 1335 */ 1336 static int tb_plug_events_active(struct tb_switch *sw, bool active) 1337 { 1338 u32 data; 1339 int res; 1340 1341 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw)) 1342 return 0; 1343 1344 sw->config.plug_events_delay = 0xff; 1345 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); 1346 if (res) 1347 return res; 1348 1349 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); 1350 if (res) 1351 return res; 1352 1353 if (active) { 1354 data = data & 0xFFFFFF83; 1355 switch (sw->config.device_id) { 1356 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1357 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 1358 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 1359 break; 1360 default: 1361 data |= 4; 1362 } 1363 } else { 1364 data = data | 0x7c; 1365 } 1366 return tb_sw_write(sw, &data, TB_CFG_SWITCH, 1367 sw->cap_plug_events + 1, 1); 1368 } 1369 1370 static ssize_t authorized_show(struct device *dev, 1371 struct device_attribute *attr, 1372 char *buf) 1373 { 1374 struct tb_switch *sw = tb_to_switch(dev); 1375 1376 return sprintf(buf, "%u\n", sw->authorized); 1377 } 1378 1379 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) 1380 { 1381 int ret = -EINVAL; 1382 1383 if (!mutex_trylock(&sw->tb->lock)) 1384 return restart_syscall(); 1385 1386 if (sw->authorized) 1387 goto unlock; 1388 1389 switch (val) { 1390 /* Approve switch */ 1391 case 1: 1392 if (sw->key) 1393 ret = tb_domain_approve_switch_key(sw->tb, sw); 1394 else 1395 ret = tb_domain_approve_switch(sw->tb, sw); 1396 break; 1397 1398 /* Challenge switch */ 1399 case 2: 1400 if (sw->key) 1401 ret = tb_domain_challenge_switch_key(sw->tb, sw); 1402 break; 1403 1404 default: 1405 break; 1406 } 1407 1408 if (!ret) { 1409 sw->authorized = val; 1410 /* Notify status change to the userspace */ 1411 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 1412 } 1413 1414 unlock: 1415 mutex_unlock(&sw->tb->lock); 1416 return ret; 1417 } 1418 1419 static ssize_t authorized_store(struct device *dev, 1420 struct device_attribute *attr, 1421 const char *buf, size_t count) 1422 { 1423 struct tb_switch *sw = tb_to_switch(dev); 1424 unsigned int val; 1425 ssize_t ret; 1426 1427 ret = kstrtouint(buf, 0, &val); 1428 if (ret) 1429 return ret; 1430 if (val > 2) 1431 return -EINVAL; 1432 1433 pm_runtime_get_sync(&sw->dev); 1434 ret = tb_switch_set_authorized(sw, val); 1435 pm_runtime_mark_last_busy(&sw->dev); 1436 pm_runtime_put_autosuspend(&sw->dev); 1437 1438 return ret ? ret : count; 1439 } 1440 static DEVICE_ATTR_RW(authorized); 1441 1442 static ssize_t boot_show(struct device *dev, struct device_attribute *attr, 1443 char *buf) 1444 { 1445 struct tb_switch *sw = tb_to_switch(dev); 1446 1447 return sprintf(buf, "%u\n", sw->boot); 1448 } 1449 static DEVICE_ATTR_RO(boot); 1450 1451 static ssize_t device_show(struct device *dev, struct device_attribute *attr, 1452 char *buf) 1453 { 1454 struct tb_switch *sw = tb_to_switch(dev); 1455 1456 return sprintf(buf, "%#x\n", sw->device); 1457 } 1458 static DEVICE_ATTR_RO(device); 1459 1460 static ssize_t 1461 device_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1462 { 1463 struct tb_switch *sw = tb_to_switch(dev); 1464 1465 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : ""); 1466 } 1467 static DEVICE_ATTR_RO(device_name); 1468 1469 static ssize_t 1470 generation_show(struct device *dev, struct device_attribute *attr, char *buf) 1471 { 1472 struct tb_switch *sw = tb_to_switch(dev); 1473 1474 return sprintf(buf, "%u\n", sw->generation); 1475 } 1476 static DEVICE_ATTR_RO(generation); 1477 1478 static ssize_t key_show(struct device *dev, struct device_attribute *attr, 1479 char *buf) 1480 { 1481 struct tb_switch *sw = tb_to_switch(dev); 1482 ssize_t ret; 1483 1484 if (!mutex_trylock(&sw->tb->lock)) 1485 return restart_syscall(); 1486 1487 if (sw->key) 1488 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); 1489 else 1490 ret = sprintf(buf, "\n"); 1491 1492 mutex_unlock(&sw->tb->lock); 1493 return ret; 1494 } 1495 1496 static ssize_t key_store(struct device *dev, struct device_attribute *attr, 1497 const char *buf, size_t count) 1498 { 1499 struct tb_switch *sw = tb_to_switch(dev); 1500 u8 key[TB_SWITCH_KEY_SIZE]; 1501 ssize_t ret = count; 1502 bool clear = false; 1503 1504 if (!strcmp(buf, "\n")) 1505 clear = true; 1506 else if (hex2bin(key, buf, sizeof(key))) 1507 return -EINVAL; 1508 1509 if (!mutex_trylock(&sw->tb->lock)) 1510 return restart_syscall(); 1511 1512 if (sw->authorized) { 1513 ret = -EBUSY; 1514 } else { 1515 kfree(sw->key); 1516 if (clear) { 1517 sw->key = NULL; 1518 } else { 1519 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL); 1520 if (!sw->key) 1521 ret = -ENOMEM; 1522 } 1523 } 1524 1525 mutex_unlock(&sw->tb->lock); 1526 return ret; 1527 } 1528 static DEVICE_ATTR(key, 0600, key_show, key_store); 1529 1530 static ssize_t speed_show(struct device *dev, struct device_attribute *attr, 1531 char *buf) 1532 { 1533 struct tb_switch *sw = tb_to_switch(dev); 1534 1535 return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed); 1536 } 1537 1538 /* 1539 * Currently all lanes must run at the same speed but we expose here 1540 * both directions to allow possible asymmetric links in the future. 1541 */ 1542 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); 1543 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); 1544 1545 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr, 1546 char *buf) 1547 { 1548 struct tb_switch *sw = tb_to_switch(dev); 1549 1550 return sprintf(buf, "%u\n", sw->link_width); 1551 } 1552 1553 /* 1554 * Currently link has same amount of lanes both directions (1 or 2) but 1555 * expose them separately to allow possible asymmetric links in the future. 1556 */ 1557 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL); 1558 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL); 1559 1560 static ssize_t nvm_authenticate_show(struct device *dev, 1561 struct device_attribute *attr, char *buf) 1562 { 1563 struct tb_switch *sw = tb_to_switch(dev); 1564 u32 status; 1565 1566 nvm_get_auth_status(sw, &status); 1567 return sprintf(buf, "%#x\n", status); 1568 } 1569 1570 static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf, 1571 bool disconnect) 1572 { 1573 struct tb_switch *sw = tb_to_switch(dev); 1574 int val; 1575 int ret; 1576 1577 pm_runtime_get_sync(&sw->dev); 1578 1579 if (!mutex_trylock(&sw->tb->lock)) { 1580 ret = restart_syscall(); 1581 goto exit_rpm; 1582 } 1583 1584 /* If NVMem devices are not yet added */ 1585 if (!sw->nvm) { 1586 ret = -EAGAIN; 1587 goto exit_unlock; 1588 } 1589 1590 ret = kstrtoint(buf, 10, &val); 1591 if (ret) 1592 goto exit_unlock; 1593 1594 /* Always clear the authentication status */ 1595 nvm_clear_auth_status(sw); 1596 1597 if (val > 0) { 1598 if (!sw->nvm->flushed) { 1599 if (!sw->nvm->buf) { 1600 ret = -EINVAL; 1601 goto exit_unlock; 1602 } 1603 1604 ret = nvm_validate_and_write(sw); 1605 if (ret || val == WRITE_ONLY) 1606 goto exit_unlock; 1607 } 1608 if (val == WRITE_AND_AUTHENTICATE) { 1609 if (disconnect) { 1610 ret = tb_lc_force_power(sw); 1611 } else { 1612 sw->nvm->authenticating = true; 1613 ret = nvm_authenticate(sw); 1614 } 1615 } 1616 } 1617 1618 exit_unlock: 1619 mutex_unlock(&sw->tb->lock); 1620 exit_rpm: 1621 pm_runtime_mark_last_busy(&sw->dev); 1622 pm_runtime_put_autosuspend(&sw->dev); 1623 1624 return ret; 1625 } 1626 1627 static ssize_t nvm_authenticate_store(struct device *dev, 1628 struct device_attribute *attr, const char *buf, size_t count) 1629 { 1630 int ret = nvm_authenticate_sysfs(dev, buf, false); 1631 if (ret) 1632 return ret; 1633 return count; 1634 } 1635 static DEVICE_ATTR_RW(nvm_authenticate); 1636 1637 static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev, 1638 struct device_attribute *attr, char *buf) 1639 { 1640 return nvm_authenticate_show(dev, attr, buf); 1641 } 1642 1643 static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev, 1644 struct device_attribute *attr, const char *buf, size_t count) 1645 { 1646 int ret; 1647 1648 ret = nvm_authenticate_sysfs(dev, buf, true); 1649 return ret ? ret : count; 1650 } 1651 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect); 1652 1653 static ssize_t nvm_version_show(struct device *dev, 1654 struct device_attribute *attr, char *buf) 1655 { 1656 struct tb_switch *sw = tb_to_switch(dev); 1657 int ret; 1658 1659 if (!mutex_trylock(&sw->tb->lock)) 1660 return restart_syscall(); 1661 1662 if (sw->safe_mode) 1663 ret = -ENODATA; 1664 else if (!sw->nvm) 1665 ret = -EAGAIN; 1666 else 1667 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); 1668 1669 mutex_unlock(&sw->tb->lock); 1670 1671 return ret; 1672 } 1673 static DEVICE_ATTR_RO(nvm_version); 1674 1675 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, 1676 char *buf) 1677 { 1678 struct tb_switch *sw = tb_to_switch(dev); 1679 1680 return sprintf(buf, "%#x\n", sw->vendor); 1681 } 1682 static DEVICE_ATTR_RO(vendor); 1683 1684 static ssize_t 1685 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1686 { 1687 struct tb_switch *sw = tb_to_switch(dev); 1688 1689 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : ""); 1690 } 1691 static DEVICE_ATTR_RO(vendor_name); 1692 1693 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, 1694 char *buf) 1695 { 1696 struct tb_switch *sw = tb_to_switch(dev); 1697 1698 return sprintf(buf, "%pUb\n", sw->uuid); 1699 } 1700 static DEVICE_ATTR_RO(unique_id); 1701 1702 static struct attribute *switch_attrs[] = { 1703 &dev_attr_authorized.attr, 1704 &dev_attr_boot.attr, 1705 &dev_attr_device.attr, 1706 &dev_attr_device_name.attr, 1707 &dev_attr_generation.attr, 1708 &dev_attr_key.attr, 1709 &dev_attr_nvm_authenticate.attr, 1710 &dev_attr_nvm_authenticate_on_disconnect.attr, 1711 &dev_attr_nvm_version.attr, 1712 &dev_attr_rx_speed.attr, 1713 &dev_attr_rx_lanes.attr, 1714 &dev_attr_tx_speed.attr, 1715 &dev_attr_tx_lanes.attr, 1716 &dev_attr_vendor.attr, 1717 &dev_attr_vendor_name.attr, 1718 &dev_attr_unique_id.attr, 1719 NULL, 1720 }; 1721 1722 static umode_t switch_attr_is_visible(struct kobject *kobj, 1723 struct attribute *attr, int n) 1724 { 1725 struct device *dev = kobj_to_dev(kobj); 1726 struct tb_switch *sw = tb_to_switch(dev); 1727 1728 if (attr == &dev_attr_device.attr) { 1729 if (!sw->device) 1730 return 0; 1731 } else if (attr == &dev_attr_device_name.attr) { 1732 if (!sw->device_name) 1733 return 0; 1734 } else if (attr == &dev_attr_vendor.attr) { 1735 if (!sw->vendor) 1736 return 0; 1737 } else if (attr == &dev_attr_vendor_name.attr) { 1738 if (!sw->vendor_name) 1739 return 0; 1740 } else if (attr == &dev_attr_key.attr) { 1741 if (tb_route(sw) && 1742 sw->tb->security_level == TB_SECURITY_SECURE && 1743 sw->security_level == TB_SECURITY_SECURE) 1744 return attr->mode; 1745 return 0; 1746 } else if (attr == &dev_attr_rx_speed.attr || 1747 attr == &dev_attr_rx_lanes.attr || 1748 attr == &dev_attr_tx_speed.attr || 1749 attr == &dev_attr_tx_lanes.attr) { 1750 if (tb_route(sw)) 1751 return attr->mode; 1752 return 0; 1753 } else if (attr == &dev_attr_nvm_authenticate.attr) { 1754 if (nvm_upgradeable(sw)) 1755 return attr->mode; 1756 return 0; 1757 } else if (attr == &dev_attr_nvm_version.attr) { 1758 if (nvm_readable(sw)) 1759 return attr->mode; 1760 return 0; 1761 } else if (attr == &dev_attr_boot.attr) { 1762 if (tb_route(sw)) 1763 return attr->mode; 1764 return 0; 1765 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) { 1766 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER) 1767 return attr->mode; 1768 return 0; 1769 } 1770 1771 return sw->safe_mode ? 0 : attr->mode; 1772 } 1773 1774 static struct attribute_group switch_group = { 1775 .is_visible = switch_attr_is_visible, 1776 .attrs = switch_attrs, 1777 }; 1778 1779 static const struct attribute_group *switch_groups[] = { 1780 &switch_group, 1781 NULL, 1782 }; 1783 1784 static void tb_switch_release(struct device *dev) 1785 { 1786 struct tb_switch *sw = tb_to_switch(dev); 1787 struct tb_port *port; 1788 1789 dma_port_free(sw->dma_port); 1790 1791 tb_switch_for_each_port(sw, port) { 1792 if (!port->disabled) { 1793 ida_destroy(&port->in_hopids); 1794 ida_destroy(&port->out_hopids); 1795 } 1796 } 1797 1798 kfree(sw->uuid); 1799 kfree(sw->device_name); 1800 kfree(sw->vendor_name); 1801 kfree(sw->ports); 1802 kfree(sw->drom); 1803 kfree(sw->key); 1804 kfree(sw); 1805 } 1806 1807 /* 1808 * Currently only need to provide the callbacks. Everything else is handled 1809 * in the connection manager. 1810 */ 1811 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev) 1812 { 1813 struct tb_switch *sw = tb_to_switch(dev); 1814 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 1815 1816 if (cm_ops->runtime_suspend_switch) 1817 return cm_ops->runtime_suspend_switch(sw); 1818 1819 return 0; 1820 } 1821 1822 static int __maybe_unused tb_switch_runtime_resume(struct device *dev) 1823 { 1824 struct tb_switch *sw = tb_to_switch(dev); 1825 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 1826 1827 if (cm_ops->runtime_resume_switch) 1828 return cm_ops->runtime_resume_switch(sw); 1829 return 0; 1830 } 1831 1832 static const struct dev_pm_ops tb_switch_pm_ops = { 1833 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume, 1834 NULL) 1835 }; 1836 1837 struct device_type tb_switch_type = { 1838 .name = "thunderbolt_device", 1839 .release = tb_switch_release, 1840 .pm = &tb_switch_pm_ops, 1841 }; 1842 1843 static int tb_switch_get_generation(struct tb_switch *sw) 1844 { 1845 switch (sw->config.device_id) { 1846 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1847 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 1848 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK: 1849 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: 1850 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: 1851 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 1852 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE: 1853 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE: 1854 return 1; 1855 1856 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE: 1857 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: 1858 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: 1859 return 2; 1860 1861 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: 1862 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: 1863 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: 1864 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: 1865 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: 1866 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: 1867 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: 1868 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: 1869 case PCI_DEVICE_ID_INTEL_ICL_NHI0: 1870 case PCI_DEVICE_ID_INTEL_ICL_NHI1: 1871 return 3; 1872 1873 default: 1874 if (tb_switch_is_usb4(sw)) 1875 return 4; 1876 1877 /* 1878 * For unknown switches assume generation to be 1 to be 1879 * on the safe side. 1880 */ 1881 tb_sw_warn(sw, "unsupported switch device id %#x\n", 1882 sw->config.device_id); 1883 return 1; 1884 } 1885 } 1886 1887 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth) 1888 { 1889 int max_depth; 1890 1891 if (tb_switch_is_usb4(sw) || 1892 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch))) 1893 max_depth = USB4_SWITCH_MAX_DEPTH; 1894 else 1895 max_depth = TB_SWITCH_MAX_DEPTH; 1896 1897 return depth > max_depth; 1898 } 1899 1900 /** 1901 * tb_switch_alloc() - allocate a switch 1902 * @tb: Pointer to the owning domain 1903 * @parent: Parent device for this switch 1904 * @route: Route string for this switch 1905 * 1906 * Allocates and initializes a switch. Will not upload configuration to 1907 * the switch. For that you need to call tb_switch_configure() 1908 * separately. The returned switch should be released by calling 1909 * tb_switch_put(). 1910 * 1911 * Return: Pointer to the allocated switch or ERR_PTR() in case of 1912 * failure. 1913 */ 1914 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, 1915 u64 route) 1916 { 1917 struct tb_switch *sw; 1918 int upstream_port; 1919 int i, ret, depth; 1920 1921 /* Unlock the downstream port so we can access the switch below */ 1922 if (route) { 1923 struct tb_switch *parent_sw = tb_to_switch(parent); 1924 struct tb_port *down; 1925 1926 down = tb_port_at(route, parent_sw); 1927 tb_port_unlock(down); 1928 } 1929 1930 depth = tb_route_length(route); 1931 1932 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); 1933 if (upstream_port < 0) 1934 return ERR_PTR(upstream_port); 1935 1936 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 1937 if (!sw) 1938 return ERR_PTR(-ENOMEM); 1939 1940 sw->tb = tb; 1941 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5); 1942 if (ret) 1943 goto err_free_sw_ports; 1944 1945 sw->generation = tb_switch_get_generation(sw); 1946 1947 tb_dbg(tb, "current switch config:\n"); 1948 tb_dump_switch(tb, sw); 1949 1950 /* configure switch */ 1951 sw->config.upstream_port_number = upstream_port; 1952 sw->config.depth = depth; 1953 sw->config.route_hi = upper_32_bits(route); 1954 sw->config.route_lo = lower_32_bits(route); 1955 sw->config.enabled = 0; 1956 1957 /* Make sure we do not exceed maximum topology limit */ 1958 if (tb_switch_exceeds_max_depth(sw, depth)) { 1959 ret = -EADDRNOTAVAIL; 1960 goto err_free_sw_ports; 1961 } 1962 1963 /* initialize ports */ 1964 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), 1965 GFP_KERNEL); 1966 if (!sw->ports) { 1967 ret = -ENOMEM; 1968 goto err_free_sw_ports; 1969 } 1970 1971 for (i = 0; i <= sw->config.max_port_number; i++) { 1972 /* minimum setup for tb_find_cap and tb_drom_read to work */ 1973 sw->ports[i].sw = sw; 1974 sw->ports[i].port = i; 1975 } 1976 1977 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); 1978 if (ret > 0) 1979 sw->cap_plug_events = ret; 1980 1981 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); 1982 if (ret > 0) 1983 sw->cap_lc = ret; 1984 1985 /* Root switch is always authorized */ 1986 if (!route) 1987 sw->authorized = true; 1988 1989 device_initialize(&sw->dev); 1990 sw->dev.parent = parent; 1991 sw->dev.bus = &tb_bus_type; 1992 sw->dev.type = &tb_switch_type; 1993 sw->dev.groups = switch_groups; 1994 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 1995 1996 return sw; 1997 1998 err_free_sw_ports: 1999 kfree(sw->ports); 2000 kfree(sw); 2001 2002 return ERR_PTR(ret); 2003 } 2004 2005 /** 2006 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode 2007 * @tb: Pointer to the owning domain 2008 * @parent: Parent device for this switch 2009 * @route: Route string for this switch 2010 * 2011 * This creates a switch in safe mode. This means the switch pretty much 2012 * lacks all capabilities except DMA configuration port before it is 2013 * flashed with a valid NVM firmware. 2014 * 2015 * The returned switch must be released by calling tb_switch_put(). 2016 * 2017 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure 2018 */ 2019 struct tb_switch * 2020 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) 2021 { 2022 struct tb_switch *sw; 2023 2024 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 2025 if (!sw) 2026 return ERR_PTR(-ENOMEM); 2027 2028 sw->tb = tb; 2029 sw->config.depth = tb_route_length(route); 2030 sw->config.route_hi = upper_32_bits(route); 2031 sw->config.route_lo = lower_32_bits(route); 2032 sw->safe_mode = true; 2033 2034 device_initialize(&sw->dev); 2035 sw->dev.parent = parent; 2036 sw->dev.bus = &tb_bus_type; 2037 sw->dev.type = &tb_switch_type; 2038 sw->dev.groups = switch_groups; 2039 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 2040 2041 return sw; 2042 } 2043 2044 /** 2045 * tb_switch_configure() - Uploads configuration to the switch 2046 * @sw: Switch to configure 2047 * 2048 * Call this function before the switch is added to the system. It will 2049 * upload configuration to the switch and makes it available for the 2050 * connection manager to use. Can be called to the switch again after 2051 * resume from low power states to re-initialize it. 2052 * 2053 * Return: %0 in case of success and negative errno in case of failure 2054 */ 2055 int tb_switch_configure(struct tb_switch *sw) 2056 { 2057 struct tb *tb = sw->tb; 2058 u64 route; 2059 int ret; 2060 2061 route = tb_route(sw); 2062 2063 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n", 2064 sw->config.enabled ? "restoring" : "initializing", route, 2065 tb_route_length(route), sw->config.upstream_port_number); 2066 2067 sw->config.enabled = 1; 2068 2069 if (tb_switch_is_usb4(sw)) { 2070 /* 2071 * For USB4 devices, we need to program the CM version 2072 * accordingly so that it knows to expose all the 2073 * additional capabilities. 2074 */ 2075 sw->config.cmuv = USB4_VERSION_1_0; 2076 2077 /* Enumerate the switch */ 2078 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2079 ROUTER_CS_1, 4); 2080 if (ret) 2081 return ret; 2082 2083 ret = usb4_switch_setup(sw); 2084 } else { 2085 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) 2086 tb_sw_warn(sw, "unknown switch vendor id %#x\n", 2087 sw->config.vendor_id); 2088 2089 if (!sw->cap_plug_events) { 2090 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); 2091 return -ENODEV; 2092 } 2093 2094 /* Enumerate the switch */ 2095 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2096 ROUTER_CS_1, 3); 2097 } 2098 if (ret) 2099 return ret; 2100 2101 return tb_plug_events_active(sw, true); 2102 } 2103 2104 static int tb_switch_set_uuid(struct tb_switch *sw) 2105 { 2106 bool uid = false; 2107 u32 uuid[4]; 2108 int ret; 2109 2110 if (sw->uuid) 2111 return 0; 2112 2113 if (tb_switch_is_usb4(sw)) { 2114 ret = usb4_switch_read_uid(sw, &sw->uid); 2115 if (ret) 2116 return ret; 2117 uid = true; 2118 } else { 2119 /* 2120 * The newer controllers include fused UUID as part of 2121 * link controller specific registers 2122 */ 2123 ret = tb_lc_read_uuid(sw, uuid); 2124 if (ret) { 2125 if (ret != -EINVAL) 2126 return ret; 2127 uid = true; 2128 } 2129 } 2130 2131 if (uid) { 2132 /* 2133 * ICM generates UUID based on UID and fills the upper 2134 * two words with ones. This is not strictly following 2135 * UUID format but we want to be compatible with it so 2136 * we do the same here. 2137 */ 2138 uuid[0] = sw->uid & 0xffffffff; 2139 uuid[1] = (sw->uid >> 32) & 0xffffffff; 2140 uuid[2] = 0xffffffff; 2141 uuid[3] = 0xffffffff; 2142 } 2143 2144 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); 2145 if (!sw->uuid) 2146 return -ENOMEM; 2147 return 0; 2148 } 2149 2150 static int tb_switch_add_dma_port(struct tb_switch *sw) 2151 { 2152 u32 status; 2153 int ret; 2154 2155 switch (sw->generation) { 2156 case 2: 2157 /* Only root switch can be upgraded */ 2158 if (tb_route(sw)) 2159 return 0; 2160 2161 fallthrough; 2162 case 3: 2163 case 4: 2164 ret = tb_switch_set_uuid(sw); 2165 if (ret) 2166 return ret; 2167 break; 2168 2169 default: 2170 /* 2171 * DMA port is the only thing available when the switch 2172 * is in safe mode. 2173 */ 2174 if (!sw->safe_mode) 2175 return 0; 2176 break; 2177 } 2178 2179 if (sw->no_nvm_upgrade) 2180 return 0; 2181 2182 if (tb_switch_is_usb4(sw)) { 2183 ret = usb4_switch_nvm_authenticate_status(sw, &status); 2184 if (ret) 2185 return ret; 2186 2187 if (status) { 2188 tb_sw_info(sw, "switch flash authentication failed\n"); 2189 nvm_set_auth_status(sw, status); 2190 } 2191 2192 return 0; 2193 } 2194 2195 /* Root switch DMA port requires running firmware */ 2196 if (!tb_route(sw) && !tb_switch_is_icm(sw)) 2197 return 0; 2198 2199 sw->dma_port = dma_port_alloc(sw); 2200 if (!sw->dma_port) 2201 return 0; 2202 2203 /* 2204 * If there is status already set then authentication failed 2205 * when the dma_port_flash_update_auth() returned. Power cycling 2206 * is not needed (it was done already) so only thing we do here 2207 * is to unblock runtime PM of the root port. 2208 */ 2209 nvm_get_auth_status(sw, &status); 2210 if (status) { 2211 if (!tb_route(sw)) 2212 nvm_authenticate_complete_dma_port(sw); 2213 return 0; 2214 } 2215 2216 /* 2217 * Check status of the previous flash authentication. If there 2218 * is one we need to power cycle the switch in any case to make 2219 * it functional again. 2220 */ 2221 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 2222 if (ret <= 0) 2223 return ret; 2224 2225 /* Now we can allow root port to suspend again */ 2226 if (!tb_route(sw)) 2227 nvm_authenticate_complete_dma_port(sw); 2228 2229 if (status) { 2230 tb_sw_info(sw, "switch flash authentication failed\n"); 2231 nvm_set_auth_status(sw, status); 2232 } 2233 2234 tb_sw_info(sw, "power cycling the switch now\n"); 2235 dma_port_power_cycle(sw->dma_port); 2236 2237 /* 2238 * We return error here which causes the switch adding failure. 2239 * It should appear back after power cycle is complete. 2240 */ 2241 return -ESHUTDOWN; 2242 } 2243 2244 static void tb_switch_default_link_ports(struct tb_switch *sw) 2245 { 2246 int i; 2247 2248 for (i = 1; i <= sw->config.max_port_number; i += 2) { 2249 struct tb_port *port = &sw->ports[i]; 2250 struct tb_port *subordinate; 2251 2252 if (!tb_port_is_null(port)) 2253 continue; 2254 2255 /* Check for the subordinate port */ 2256 if (i == sw->config.max_port_number || 2257 !tb_port_is_null(&sw->ports[i + 1])) 2258 continue; 2259 2260 /* Link them if not already done so (by DROM) */ 2261 subordinate = &sw->ports[i + 1]; 2262 if (!port->dual_link_port && !subordinate->dual_link_port) { 2263 port->link_nr = 0; 2264 port->dual_link_port = subordinate; 2265 subordinate->link_nr = 1; 2266 subordinate->dual_link_port = port; 2267 2268 tb_sw_dbg(sw, "linked ports %d <-> %d\n", 2269 port->port, subordinate->port); 2270 } 2271 } 2272 } 2273 2274 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) 2275 { 2276 const struct tb_port *up = tb_upstream_port(sw); 2277 2278 if (!up->dual_link_port || !up->dual_link_port->remote) 2279 return false; 2280 2281 if (tb_switch_is_usb4(sw)) 2282 return usb4_switch_lane_bonding_possible(sw); 2283 return tb_lc_lane_bonding_possible(sw); 2284 } 2285 2286 static int tb_switch_update_link_attributes(struct tb_switch *sw) 2287 { 2288 struct tb_port *up; 2289 bool change = false; 2290 int ret; 2291 2292 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2293 return 0; 2294 2295 up = tb_upstream_port(sw); 2296 2297 ret = tb_port_get_link_speed(up); 2298 if (ret < 0) 2299 return ret; 2300 if (sw->link_speed != ret) 2301 change = true; 2302 sw->link_speed = ret; 2303 2304 ret = tb_port_get_link_width(up); 2305 if (ret < 0) 2306 return ret; 2307 if (sw->link_width != ret) 2308 change = true; 2309 sw->link_width = ret; 2310 2311 /* Notify userspace that there is possible link attribute change */ 2312 if (device_is_registered(&sw->dev) && change) 2313 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 2314 2315 return 0; 2316 } 2317 2318 /** 2319 * tb_switch_lane_bonding_enable() - Enable lane bonding 2320 * @sw: Switch to enable lane bonding 2321 * 2322 * Connection manager can call this function to enable lane bonding of a 2323 * switch. If conditions are correct and both switches support the feature, 2324 * lanes are bonded. It is safe to call this to any switch. 2325 */ 2326 int tb_switch_lane_bonding_enable(struct tb_switch *sw) 2327 { 2328 struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2329 struct tb_port *up, *down; 2330 u64 route = tb_route(sw); 2331 int ret; 2332 2333 if (!route) 2334 return 0; 2335 2336 if (!tb_switch_lane_bonding_possible(sw)) 2337 return 0; 2338 2339 up = tb_upstream_port(sw); 2340 down = tb_port_at(route, parent); 2341 2342 if (!tb_port_is_width_supported(up, 2) || 2343 !tb_port_is_width_supported(down, 2)) 2344 return 0; 2345 2346 ret = tb_port_lane_bonding_enable(up); 2347 if (ret) { 2348 tb_port_warn(up, "failed to enable lane bonding\n"); 2349 return ret; 2350 } 2351 2352 ret = tb_port_lane_bonding_enable(down); 2353 if (ret) { 2354 tb_port_warn(down, "failed to enable lane bonding\n"); 2355 tb_port_lane_bonding_disable(up); 2356 return ret; 2357 } 2358 2359 tb_switch_update_link_attributes(sw); 2360 2361 tb_sw_dbg(sw, "lane bonding enabled\n"); 2362 return ret; 2363 } 2364 2365 /** 2366 * tb_switch_lane_bonding_disable() - Disable lane bonding 2367 * @sw: Switch whose lane bonding to disable 2368 * 2369 * Disables lane bonding between @sw and parent. This can be called even 2370 * if lanes were not bonded originally. 2371 */ 2372 void tb_switch_lane_bonding_disable(struct tb_switch *sw) 2373 { 2374 struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2375 struct tb_port *up, *down; 2376 2377 if (!tb_route(sw)) 2378 return; 2379 2380 up = tb_upstream_port(sw); 2381 if (!up->bonded) 2382 return; 2383 2384 down = tb_port_at(tb_route(sw), parent); 2385 2386 tb_port_lane_bonding_disable(up); 2387 tb_port_lane_bonding_disable(down); 2388 2389 tb_switch_update_link_attributes(sw); 2390 tb_sw_dbg(sw, "lane bonding disabled\n"); 2391 } 2392 2393 /** 2394 * tb_switch_configure_link() - Set link configured 2395 * @sw: Switch whose link is configured 2396 * 2397 * Sets the link upstream from @sw configured (from both ends) so that 2398 * it will not be disconnected when the domain exits sleep. Can be 2399 * called for any switch. 2400 * 2401 * It is recommended that this is called after lane bonding is enabled. 2402 * 2403 * Returns %0 on success and negative errno in case of error. 2404 */ 2405 int tb_switch_configure_link(struct tb_switch *sw) 2406 { 2407 struct tb_port *up, *down; 2408 int ret; 2409 2410 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2411 return 0; 2412 2413 up = tb_upstream_port(sw); 2414 if (tb_switch_is_usb4(up->sw)) 2415 ret = usb4_port_configure(up); 2416 else 2417 ret = tb_lc_configure_port(up); 2418 if (ret) 2419 return ret; 2420 2421 down = up->remote; 2422 if (tb_switch_is_usb4(down->sw)) 2423 return usb4_port_configure(down); 2424 return tb_lc_configure_port(down); 2425 } 2426 2427 /** 2428 * tb_switch_unconfigure_link() - Unconfigure link 2429 * @sw: Switch whose link is unconfigured 2430 * 2431 * Sets the link unconfigured so the @sw will be disconnected if the 2432 * domain exists sleep. 2433 */ 2434 void tb_switch_unconfigure_link(struct tb_switch *sw) 2435 { 2436 struct tb_port *up, *down; 2437 2438 if (sw->is_unplugged) 2439 return; 2440 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2441 return; 2442 2443 up = tb_upstream_port(sw); 2444 if (tb_switch_is_usb4(up->sw)) 2445 usb4_port_unconfigure(up); 2446 else 2447 tb_lc_unconfigure_port(up); 2448 2449 down = up->remote; 2450 if (tb_switch_is_usb4(down->sw)) 2451 usb4_port_unconfigure(down); 2452 else 2453 tb_lc_unconfigure_port(down); 2454 } 2455 2456 /** 2457 * tb_switch_add() - Add a switch to the domain 2458 * @sw: Switch to add 2459 * 2460 * This is the last step in adding switch to the domain. It will read 2461 * identification information from DROM and initializes ports so that 2462 * they can be used to connect other switches. The switch will be 2463 * exposed to the userspace when this function successfully returns. To 2464 * remove and release the switch, call tb_switch_remove(). 2465 * 2466 * Return: %0 in case of success and negative errno in case of failure 2467 */ 2468 int tb_switch_add(struct tb_switch *sw) 2469 { 2470 int i, ret; 2471 2472 /* 2473 * Initialize DMA control port now before we read DROM. Recent 2474 * host controllers have more complete DROM on NVM that includes 2475 * vendor and model identification strings which we then expose 2476 * to the userspace. NVM can be accessed through DMA 2477 * configuration based mailbox. 2478 */ 2479 ret = tb_switch_add_dma_port(sw); 2480 if (ret) { 2481 dev_err(&sw->dev, "failed to add DMA port\n"); 2482 return ret; 2483 } 2484 2485 if (!sw->safe_mode) { 2486 /* read drom */ 2487 ret = tb_drom_read(sw); 2488 if (ret) { 2489 dev_err(&sw->dev, "reading DROM failed\n"); 2490 return ret; 2491 } 2492 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); 2493 2494 ret = tb_switch_set_uuid(sw); 2495 if (ret) { 2496 dev_err(&sw->dev, "failed to set UUID\n"); 2497 return ret; 2498 } 2499 2500 for (i = 0; i <= sw->config.max_port_number; i++) { 2501 if (sw->ports[i].disabled) { 2502 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n"); 2503 continue; 2504 } 2505 ret = tb_init_port(&sw->ports[i]); 2506 if (ret) { 2507 dev_err(&sw->dev, "failed to initialize port %d\n", i); 2508 return ret; 2509 } 2510 } 2511 2512 tb_switch_default_link_ports(sw); 2513 2514 ret = tb_switch_update_link_attributes(sw); 2515 if (ret) 2516 return ret; 2517 2518 ret = tb_switch_tmu_init(sw); 2519 if (ret) 2520 return ret; 2521 } 2522 2523 ret = device_add(&sw->dev); 2524 if (ret) { 2525 dev_err(&sw->dev, "failed to add device: %d\n", ret); 2526 return ret; 2527 } 2528 2529 if (tb_route(sw)) { 2530 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n", 2531 sw->vendor, sw->device); 2532 if (sw->vendor_name && sw->device_name) 2533 dev_info(&sw->dev, "%s %s\n", sw->vendor_name, 2534 sw->device_name); 2535 } 2536 2537 ret = tb_switch_nvm_add(sw); 2538 if (ret) { 2539 dev_err(&sw->dev, "failed to add NVM devices\n"); 2540 device_del(&sw->dev); 2541 return ret; 2542 } 2543 2544 /* 2545 * Thunderbolt routers do not generate wakeups themselves but 2546 * they forward wakeups from tunneled protocols, so enable it 2547 * here. 2548 */ 2549 device_init_wakeup(&sw->dev, true); 2550 2551 pm_runtime_set_active(&sw->dev); 2552 if (sw->rpm) { 2553 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); 2554 pm_runtime_use_autosuspend(&sw->dev); 2555 pm_runtime_mark_last_busy(&sw->dev); 2556 pm_runtime_enable(&sw->dev); 2557 pm_request_autosuspend(&sw->dev); 2558 } 2559 2560 tb_switch_debugfs_init(sw); 2561 return 0; 2562 } 2563 2564 /** 2565 * tb_switch_remove() - Remove and release a switch 2566 * @sw: Switch to remove 2567 * 2568 * This will remove the switch from the domain and release it after last 2569 * reference count drops to zero. If there are switches connected below 2570 * this switch, they will be removed as well. 2571 */ 2572 void tb_switch_remove(struct tb_switch *sw) 2573 { 2574 struct tb_port *port; 2575 2576 tb_switch_debugfs_remove(sw); 2577 2578 if (sw->rpm) { 2579 pm_runtime_get_sync(&sw->dev); 2580 pm_runtime_disable(&sw->dev); 2581 } 2582 2583 /* port 0 is the switch itself and never has a remote */ 2584 tb_switch_for_each_port(sw, port) { 2585 if (tb_port_has_remote(port)) { 2586 tb_switch_remove(port->remote->sw); 2587 port->remote = NULL; 2588 } else if (port->xdomain) { 2589 tb_xdomain_remove(port->xdomain); 2590 port->xdomain = NULL; 2591 } 2592 2593 /* Remove any downstream retimers */ 2594 tb_retimer_remove_all(port); 2595 } 2596 2597 if (!sw->is_unplugged) 2598 tb_plug_events_active(sw, false); 2599 2600 tb_switch_nvm_remove(sw); 2601 2602 if (tb_route(sw)) 2603 dev_info(&sw->dev, "device disconnected\n"); 2604 device_unregister(&sw->dev); 2605 } 2606 2607 /** 2608 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches 2609 */ 2610 void tb_sw_set_unplugged(struct tb_switch *sw) 2611 { 2612 struct tb_port *port; 2613 2614 if (sw == sw->tb->root_switch) { 2615 tb_sw_WARN(sw, "cannot unplug root switch\n"); 2616 return; 2617 } 2618 if (sw->is_unplugged) { 2619 tb_sw_WARN(sw, "is_unplugged already set\n"); 2620 return; 2621 } 2622 sw->is_unplugged = true; 2623 tb_switch_for_each_port(sw, port) { 2624 if (tb_port_has_remote(port)) 2625 tb_sw_set_unplugged(port->remote->sw); 2626 else if (port->xdomain) 2627 port->xdomain->is_unplugged = true; 2628 } 2629 } 2630 2631 static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags) 2632 { 2633 if (flags) 2634 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags); 2635 else 2636 tb_sw_dbg(sw, "disabling wakeup\n"); 2637 2638 if (tb_switch_is_usb4(sw)) 2639 return usb4_switch_set_wake(sw, flags); 2640 return tb_lc_set_wake(sw, flags); 2641 } 2642 2643 int tb_switch_resume(struct tb_switch *sw) 2644 { 2645 struct tb_port *port; 2646 int err; 2647 2648 tb_sw_dbg(sw, "resuming switch\n"); 2649 2650 /* 2651 * Check for UID of the connected switches except for root 2652 * switch which we assume cannot be removed. 2653 */ 2654 if (tb_route(sw)) { 2655 u64 uid; 2656 2657 /* 2658 * Check first that we can still read the switch config 2659 * space. It may be that there is now another domain 2660 * connected. 2661 */ 2662 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw)); 2663 if (err < 0) { 2664 tb_sw_info(sw, "switch not present anymore\n"); 2665 return err; 2666 } 2667 2668 if (tb_switch_is_usb4(sw)) 2669 err = usb4_switch_read_uid(sw, &uid); 2670 else 2671 err = tb_drom_read_uid_only(sw, &uid); 2672 if (err) { 2673 tb_sw_warn(sw, "uid read failed\n"); 2674 return err; 2675 } 2676 if (sw->uid != uid) { 2677 tb_sw_info(sw, 2678 "changed while suspended (uid %#llx -> %#llx)\n", 2679 sw->uid, uid); 2680 return -ENODEV; 2681 } 2682 } 2683 2684 err = tb_switch_configure(sw); 2685 if (err) 2686 return err; 2687 2688 /* Disable wakes */ 2689 tb_switch_set_wake(sw, 0); 2690 2691 err = tb_switch_tmu_init(sw); 2692 if (err) 2693 return err; 2694 2695 /* check for surviving downstream switches */ 2696 tb_switch_for_each_port(sw, port) { 2697 if (!tb_port_has_remote(port) && !port->xdomain) 2698 continue; 2699 2700 if (tb_wait_for_port(port, true) <= 0) { 2701 tb_port_warn(port, 2702 "lost during suspend, disconnecting\n"); 2703 if (tb_port_has_remote(port)) 2704 tb_sw_set_unplugged(port->remote->sw); 2705 else if (port->xdomain) 2706 port->xdomain->is_unplugged = true; 2707 } else if (tb_port_has_remote(port) || port->xdomain) { 2708 /* 2709 * Always unlock the port so the downstream 2710 * switch/domain is accessible. 2711 */ 2712 if (tb_port_unlock(port)) 2713 tb_port_warn(port, "failed to unlock port\n"); 2714 if (port->remote && tb_switch_resume(port->remote->sw)) { 2715 tb_port_warn(port, 2716 "lost during suspend, disconnecting\n"); 2717 tb_sw_set_unplugged(port->remote->sw); 2718 } 2719 } 2720 } 2721 return 0; 2722 } 2723 2724 /** 2725 * tb_switch_suspend() - Put a switch to sleep 2726 * @sw: Switch to suspend 2727 * @runtime: Is this runtime suspend or system sleep 2728 * 2729 * Suspends router and all its children. Enables wakes according to 2730 * value of @runtime and then sets sleep bit for the router. If @sw is 2731 * host router the domain is ready to go to sleep once this function 2732 * returns. 2733 */ 2734 void tb_switch_suspend(struct tb_switch *sw, bool runtime) 2735 { 2736 unsigned int flags = 0; 2737 struct tb_port *port; 2738 int err; 2739 2740 tb_sw_dbg(sw, "suspending switch\n"); 2741 2742 err = tb_plug_events_active(sw, false); 2743 if (err) 2744 return; 2745 2746 tb_switch_for_each_port(sw, port) { 2747 if (tb_port_has_remote(port)) 2748 tb_switch_suspend(port->remote->sw, runtime); 2749 } 2750 2751 if (runtime) { 2752 /* Trigger wake when something is plugged in/out */ 2753 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT; 2754 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE; 2755 } else if (device_may_wakeup(&sw->dev)) { 2756 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE; 2757 } 2758 2759 tb_switch_set_wake(sw, flags); 2760 2761 if (tb_switch_is_usb4(sw)) 2762 usb4_switch_set_sleep(sw); 2763 else 2764 tb_lc_set_sleep(sw); 2765 } 2766 2767 /** 2768 * tb_switch_query_dp_resource() - Query availability of DP resource 2769 * @sw: Switch whose DP resource is queried 2770 * @in: DP IN port 2771 * 2772 * Queries availability of DP resource for DP tunneling using switch 2773 * specific means. Returns %true if resource is available. 2774 */ 2775 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) 2776 { 2777 if (tb_switch_is_usb4(sw)) 2778 return usb4_switch_query_dp_resource(sw, in); 2779 return tb_lc_dp_sink_query(sw, in); 2780 } 2781 2782 /** 2783 * tb_switch_alloc_dp_resource() - Allocate available DP resource 2784 * @sw: Switch whose DP resource is allocated 2785 * @in: DP IN port 2786 * 2787 * Allocates DP resource for DP tunneling. The resource must be 2788 * available for this to succeed (see tb_switch_query_dp_resource()). 2789 * Returns %0 in success and negative errno otherwise. 2790 */ 2791 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 2792 { 2793 if (tb_switch_is_usb4(sw)) 2794 return usb4_switch_alloc_dp_resource(sw, in); 2795 return tb_lc_dp_sink_alloc(sw, in); 2796 } 2797 2798 /** 2799 * tb_switch_dealloc_dp_resource() - De-allocate DP resource 2800 * @sw: Switch whose DP resource is de-allocated 2801 * @in: DP IN port 2802 * 2803 * De-allocates DP resource that was previously allocated for DP 2804 * tunneling. 2805 */ 2806 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 2807 { 2808 int ret; 2809 2810 if (tb_switch_is_usb4(sw)) 2811 ret = usb4_switch_dealloc_dp_resource(sw, in); 2812 else 2813 ret = tb_lc_dp_sink_dealloc(sw, in); 2814 2815 if (ret) 2816 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n", 2817 in->port); 2818 } 2819 2820 struct tb_sw_lookup { 2821 struct tb *tb; 2822 u8 link; 2823 u8 depth; 2824 const uuid_t *uuid; 2825 u64 route; 2826 }; 2827 2828 static int tb_switch_match(struct device *dev, const void *data) 2829 { 2830 struct tb_switch *sw = tb_to_switch(dev); 2831 const struct tb_sw_lookup *lookup = data; 2832 2833 if (!sw) 2834 return 0; 2835 if (sw->tb != lookup->tb) 2836 return 0; 2837 2838 if (lookup->uuid) 2839 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid)); 2840 2841 if (lookup->route) { 2842 return sw->config.route_lo == lower_32_bits(lookup->route) && 2843 sw->config.route_hi == upper_32_bits(lookup->route); 2844 } 2845 2846 /* Root switch is matched only by depth */ 2847 if (!lookup->depth) 2848 return !sw->depth; 2849 2850 return sw->link == lookup->link && sw->depth == lookup->depth; 2851 } 2852 2853 /** 2854 * tb_switch_find_by_link_depth() - Find switch by link and depth 2855 * @tb: Domain the switch belongs 2856 * @link: Link number the switch is connected 2857 * @depth: Depth of the switch in link 2858 * 2859 * Returned switch has reference count increased so the caller needs to 2860 * call tb_switch_put() when done with the switch. 2861 */ 2862 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) 2863 { 2864 struct tb_sw_lookup lookup; 2865 struct device *dev; 2866 2867 memset(&lookup, 0, sizeof(lookup)); 2868 lookup.tb = tb; 2869 lookup.link = link; 2870 lookup.depth = depth; 2871 2872 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 2873 if (dev) 2874 return tb_to_switch(dev); 2875 2876 return NULL; 2877 } 2878 2879 /** 2880 * tb_switch_find_by_uuid() - Find switch by UUID 2881 * @tb: Domain the switch belongs 2882 * @uuid: UUID to look for 2883 * 2884 * Returned switch has reference count increased so the caller needs to 2885 * call tb_switch_put() when done with the switch. 2886 */ 2887 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) 2888 { 2889 struct tb_sw_lookup lookup; 2890 struct device *dev; 2891 2892 memset(&lookup, 0, sizeof(lookup)); 2893 lookup.tb = tb; 2894 lookup.uuid = uuid; 2895 2896 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 2897 if (dev) 2898 return tb_to_switch(dev); 2899 2900 return NULL; 2901 } 2902 2903 /** 2904 * tb_switch_find_by_route() - Find switch by route string 2905 * @tb: Domain the switch belongs 2906 * @route: Route string to look for 2907 * 2908 * Returned switch has reference count increased so the caller needs to 2909 * call tb_switch_put() when done with the switch. 2910 */ 2911 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) 2912 { 2913 struct tb_sw_lookup lookup; 2914 struct device *dev; 2915 2916 if (!route) 2917 return tb_switch_get(tb->root_switch); 2918 2919 memset(&lookup, 0, sizeof(lookup)); 2920 lookup.tb = tb; 2921 lookup.route = route; 2922 2923 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 2924 if (dev) 2925 return tb_to_switch(dev); 2926 2927 return NULL; 2928 } 2929 2930 /** 2931 * tb_switch_find_port() - return the first port of @type on @sw or NULL 2932 * @sw: Switch to find the port from 2933 * @type: Port type to look for 2934 */ 2935 struct tb_port *tb_switch_find_port(struct tb_switch *sw, 2936 enum tb_port_type type) 2937 { 2938 struct tb_port *port; 2939 2940 tb_switch_for_each_port(sw, port) { 2941 if (port->config.type == type) 2942 return port; 2943 } 2944 2945 return NULL; 2946 } 2947