1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - switch/port utility functions 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2018, Intel Corporation 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/idr.h> 11 #include <linux/nvmem-provider.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/sched/signal.h> 14 #include <linux/sizes.h> 15 #include <linux/slab.h> 16 17 #include "tb.h" 18 19 /* Switch NVM support */ 20 21 #define NVM_CSS 0x10 22 23 struct nvm_auth_status { 24 struct list_head list; 25 uuid_t uuid; 26 u32 status; 27 }; 28 29 enum nvm_write_ops { 30 WRITE_AND_AUTHENTICATE = 1, 31 WRITE_ONLY = 2, 32 }; 33 34 /* 35 * Hold NVM authentication failure status per switch This information 36 * needs to stay around even when the switch gets power cycled so we 37 * keep it separately. 38 */ 39 static LIST_HEAD(nvm_auth_status_cache); 40 static DEFINE_MUTEX(nvm_auth_status_lock); 41 42 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) 43 { 44 struct nvm_auth_status *st; 45 46 list_for_each_entry(st, &nvm_auth_status_cache, list) { 47 if (uuid_equal(&st->uuid, sw->uuid)) 48 return st; 49 } 50 51 return NULL; 52 } 53 54 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) 55 { 56 struct nvm_auth_status *st; 57 58 mutex_lock(&nvm_auth_status_lock); 59 st = __nvm_get_auth_status(sw); 60 mutex_unlock(&nvm_auth_status_lock); 61 62 *status = st ? st->status : 0; 63 } 64 65 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) 66 { 67 struct nvm_auth_status *st; 68 69 if (WARN_ON(!sw->uuid)) 70 return; 71 72 mutex_lock(&nvm_auth_status_lock); 73 st = __nvm_get_auth_status(sw); 74 75 if (!st) { 76 st = kzalloc(sizeof(*st), GFP_KERNEL); 77 if (!st) 78 goto unlock; 79 80 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); 81 INIT_LIST_HEAD(&st->list); 82 list_add_tail(&st->list, &nvm_auth_status_cache); 83 } 84 85 st->status = status; 86 unlock: 87 mutex_unlock(&nvm_auth_status_lock); 88 } 89 90 static void nvm_clear_auth_status(const struct tb_switch *sw) 91 { 92 struct nvm_auth_status *st; 93 94 mutex_lock(&nvm_auth_status_lock); 95 st = __nvm_get_auth_status(sw); 96 if (st) { 97 list_del(&st->list); 98 kfree(st); 99 } 100 mutex_unlock(&nvm_auth_status_lock); 101 } 102 103 static int nvm_validate_and_write(struct tb_switch *sw) 104 { 105 unsigned int image_size, hdr_size; 106 const u8 *buf = sw->nvm->buf; 107 u16 ds_size; 108 int ret; 109 110 if (!buf) 111 return -EINVAL; 112 113 image_size = sw->nvm->buf_data_size; 114 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE) 115 return -EINVAL; 116 117 /* 118 * FARB pointer must point inside the image and must at least 119 * contain parts of the digital section we will be reading here. 120 */ 121 hdr_size = (*(u32 *)buf) & 0xffffff; 122 if (hdr_size + NVM_DEVID + 2 >= image_size) 123 return -EINVAL; 124 125 /* Digital section start should be aligned to 4k page */ 126 if (!IS_ALIGNED(hdr_size, SZ_4K)) 127 return -EINVAL; 128 129 /* 130 * Read digital section size and check that it also fits inside 131 * the image. 132 */ 133 ds_size = *(u16 *)(buf + hdr_size); 134 if (ds_size >= image_size) 135 return -EINVAL; 136 137 if (!sw->safe_mode) { 138 u16 device_id; 139 140 /* 141 * Make sure the device ID in the image matches the one 142 * we read from the switch config space. 143 */ 144 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID); 145 if (device_id != sw->config.device_id) 146 return -EINVAL; 147 148 if (sw->generation < 3) { 149 /* Write CSS headers first */ 150 ret = dma_port_flash_write(sw->dma_port, 151 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS, 152 DMA_PORT_CSS_MAX_SIZE); 153 if (ret) 154 return ret; 155 } 156 157 /* Skip headers in the image */ 158 buf += hdr_size; 159 image_size -= hdr_size; 160 } 161 162 if (tb_switch_is_usb4(sw)) 163 ret = usb4_switch_nvm_write(sw, 0, buf, image_size); 164 else 165 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size); 166 if (!ret) 167 sw->nvm->flushed = true; 168 return ret; 169 } 170 171 static int nvm_authenticate_host_dma_port(struct tb_switch *sw) 172 { 173 int ret = 0; 174 175 /* 176 * Root switch NVM upgrade requires that we disconnect the 177 * existing paths first (in case it is not in safe mode 178 * already). 179 */ 180 if (!sw->safe_mode) { 181 u32 status; 182 183 ret = tb_domain_disconnect_all_paths(sw->tb); 184 if (ret) 185 return ret; 186 /* 187 * The host controller goes away pretty soon after this if 188 * everything goes well so getting timeout is expected. 189 */ 190 ret = dma_port_flash_update_auth(sw->dma_port); 191 if (!ret || ret == -ETIMEDOUT) 192 return 0; 193 194 /* 195 * Any error from update auth operation requires power 196 * cycling of the host router. 197 */ 198 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n"); 199 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0) 200 nvm_set_auth_status(sw, status); 201 } 202 203 /* 204 * From safe mode we can get out by just power cycling the 205 * switch. 206 */ 207 dma_port_power_cycle(sw->dma_port); 208 return ret; 209 } 210 211 static int nvm_authenticate_device_dma_port(struct tb_switch *sw) 212 { 213 int ret, retries = 10; 214 215 ret = dma_port_flash_update_auth(sw->dma_port); 216 switch (ret) { 217 case 0: 218 case -ETIMEDOUT: 219 case -EACCES: 220 case -EINVAL: 221 /* Power cycle is required */ 222 break; 223 default: 224 return ret; 225 } 226 227 /* 228 * Poll here for the authentication status. It takes some time 229 * for the device to respond (we get timeout for a while). Once 230 * we get response the device needs to be power cycled in order 231 * to the new NVM to be taken into use. 232 */ 233 do { 234 u32 status; 235 236 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 237 if (ret < 0 && ret != -ETIMEDOUT) 238 return ret; 239 if (ret > 0) { 240 if (status) { 241 tb_sw_warn(sw, "failed to authenticate NVM\n"); 242 nvm_set_auth_status(sw, status); 243 } 244 245 tb_sw_info(sw, "power cycling the switch now\n"); 246 dma_port_power_cycle(sw->dma_port); 247 return 0; 248 } 249 250 msleep(500); 251 } while (--retries); 252 253 return -ETIMEDOUT; 254 } 255 256 static void nvm_authenticate_start_dma_port(struct tb_switch *sw) 257 { 258 struct pci_dev *root_port; 259 260 /* 261 * During host router NVM upgrade we should not allow root port to 262 * go into D3cold because some root ports cannot trigger PME 263 * itself. To be on the safe side keep the root port in D0 during 264 * the whole upgrade process. 265 */ 266 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 267 if (root_port) 268 pm_runtime_get_noresume(&root_port->dev); 269 } 270 271 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw) 272 { 273 struct pci_dev *root_port; 274 275 root_port = pcie_find_root_port(sw->tb->nhi->pdev); 276 if (root_port) 277 pm_runtime_put(&root_port->dev); 278 } 279 280 static inline bool nvm_readable(struct tb_switch *sw) 281 { 282 if (tb_switch_is_usb4(sw)) { 283 /* 284 * USB4 devices must support NVM operations but it is 285 * optional for hosts. Therefore we query the NVM sector 286 * size here and if it is supported assume NVM 287 * operations are implemented. 288 */ 289 return usb4_switch_nvm_sector_size(sw) > 0; 290 } 291 292 /* Thunderbolt 2 and 3 devices support NVM through DMA port */ 293 return !!sw->dma_port; 294 } 295 296 static inline bool nvm_upgradeable(struct tb_switch *sw) 297 { 298 if (sw->no_nvm_upgrade) 299 return false; 300 return nvm_readable(sw); 301 } 302 303 static inline int nvm_read(struct tb_switch *sw, unsigned int address, 304 void *buf, size_t size) 305 { 306 if (tb_switch_is_usb4(sw)) 307 return usb4_switch_nvm_read(sw, address, buf, size); 308 return dma_port_flash_read(sw->dma_port, address, buf, size); 309 } 310 311 static int nvm_authenticate(struct tb_switch *sw) 312 { 313 int ret; 314 315 if (tb_switch_is_usb4(sw)) 316 return usb4_switch_nvm_authenticate(sw); 317 318 if (!tb_route(sw)) { 319 nvm_authenticate_start_dma_port(sw); 320 ret = nvm_authenticate_host_dma_port(sw); 321 } else { 322 ret = nvm_authenticate_device_dma_port(sw); 323 } 324 325 return ret; 326 } 327 328 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val, 329 size_t bytes) 330 { 331 struct tb_nvm *nvm = priv; 332 struct tb_switch *sw = tb_to_switch(nvm->dev); 333 int ret; 334 335 pm_runtime_get_sync(&sw->dev); 336 337 if (!mutex_trylock(&sw->tb->lock)) { 338 ret = restart_syscall(); 339 goto out; 340 } 341 342 ret = nvm_read(sw, offset, val, bytes); 343 mutex_unlock(&sw->tb->lock); 344 345 out: 346 pm_runtime_mark_last_busy(&sw->dev); 347 pm_runtime_put_autosuspend(&sw->dev); 348 349 return ret; 350 } 351 352 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, 353 size_t bytes) 354 { 355 struct tb_nvm *nvm = priv; 356 struct tb_switch *sw = tb_to_switch(nvm->dev); 357 int ret; 358 359 if (!mutex_trylock(&sw->tb->lock)) 360 return restart_syscall(); 361 362 /* 363 * Since writing the NVM image might require some special steps, 364 * for example when CSS headers are written, we cache the image 365 * locally here and handle the special cases when the user asks 366 * us to authenticate the image. 367 */ 368 ret = tb_nvm_write_buf(nvm, offset, val, bytes); 369 mutex_unlock(&sw->tb->lock); 370 371 return ret; 372 } 373 374 static int tb_switch_nvm_add(struct tb_switch *sw) 375 { 376 struct tb_nvm *nvm; 377 u32 val; 378 int ret; 379 380 if (!nvm_readable(sw)) 381 return 0; 382 383 /* 384 * The NVM format of non-Intel hardware is not known so 385 * currently restrict NVM upgrade for Intel hardware. We may 386 * relax this in the future when we learn other NVM formats. 387 */ 388 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL && 389 sw->config.vendor_id != 0x8087) { 390 dev_info(&sw->dev, 391 "NVM format of vendor %#x is not known, disabling NVM upgrade\n", 392 sw->config.vendor_id); 393 return 0; 394 } 395 396 nvm = tb_nvm_alloc(&sw->dev); 397 if (IS_ERR(nvm)) 398 return PTR_ERR(nvm); 399 400 /* 401 * If the switch is in safe-mode the only accessible portion of 402 * the NVM is the non-active one where userspace is expected to 403 * write new functional NVM. 404 */ 405 if (!sw->safe_mode) { 406 u32 nvm_size, hdr_size; 407 408 ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val)); 409 if (ret) 410 goto err_nvm; 411 412 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K; 413 nvm_size = (SZ_1M << (val & 7)) / 8; 414 nvm_size = (nvm_size - hdr_size) / 2; 415 416 ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val)); 417 if (ret) 418 goto err_nvm; 419 420 nvm->major = val >> 16; 421 nvm->minor = val >> 8; 422 423 ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read); 424 if (ret) 425 goto err_nvm; 426 } 427 428 if (!sw->no_nvm_upgrade) { 429 ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE, 430 tb_switch_nvm_write); 431 if (ret) 432 goto err_nvm; 433 } 434 435 sw->nvm = nvm; 436 return 0; 437 438 err_nvm: 439 tb_nvm_free(nvm); 440 return ret; 441 } 442 443 static void tb_switch_nvm_remove(struct tb_switch *sw) 444 { 445 struct tb_nvm *nvm; 446 447 nvm = sw->nvm; 448 sw->nvm = NULL; 449 450 if (!nvm) 451 return; 452 453 /* Remove authentication status in case the switch is unplugged */ 454 if (!nvm->authenticating) 455 nvm_clear_auth_status(sw); 456 457 tb_nvm_free(nvm); 458 } 459 460 /* port utility functions */ 461 462 static const char *tb_port_type(struct tb_regs_port_header *port) 463 { 464 switch (port->type >> 16) { 465 case 0: 466 switch ((u8) port->type) { 467 case 0: 468 return "Inactive"; 469 case 1: 470 return "Port"; 471 case 2: 472 return "NHI"; 473 default: 474 return "unknown"; 475 } 476 case 0x2: 477 return "Ethernet"; 478 case 0x8: 479 return "SATA"; 480 case 0xe: 481 return "DP/HDMI"; 482 case 0x10: 483 return "PCIe"; 484 case 0x20: 485 return "USB"; 486 default: 487 return "unknown"; 488 } 489 } 490 491 static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port) 492 { 493 tb_dbg(tb, 494 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n", 495 port->port_number, port->vendor_id, port->device_id, 496 port->revision, port->thunderbolt_version, tb_port_type(port), 497 port->type); 498 tb_dbg(tb, " Max hop id (in/out): %d/%d\n", 499 port->max_in_hop_id, port->max_out_hop_id); 500 tb_dbg(tb, " Max counters: %d\n", port->max_counters); 501 tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits); 502 } 503 504 /** 505 * tb_port_state() - get connectedness state of a port 506 * 507 * The port must have a TB_CAP_PHY (i.e. it should be a real port). 508 * 509 * Return: Returns an enum tb_port_state on success or an error code on failure. 510 */ 511 static int tb_port_state(struct tb_port *port) 512 { 513 struct tb_cap_phy phy; 514 int res; 515 if (port->cap_phy == 0) { 516 tb_port_WARN(port, "does not have a PHY\n"); 517 return -EINVAL; 518 } 519 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2); 520 if (res) 521 return res; 522 return phy.state; 523 } 524 525 /** 526 * tb_wait_for_port() - wait for a port to become ready 527 * 528 * Wait up to 1 second for a port to reach state TB_PORT_UP. If 529 * wait_if_unplugged is set then we also wait if the port is in state 530 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after 531 * switch resume). Otherwise we only wait if a device is registered but the link 532 * has not yet been established. 533 * 534 * Return: Returns an error code on failure. Returns 0 if the port is not 535 * connected or failed to reach state TB_PORT_UP within one second. Returns 1 536 * if the port is connected and in state TB_PORT_UP. 537 */ 538 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) 539 { 540 int retries = 10; 541 int state; 542 if (!port->cap_phy) { 543 tb_port_WARN(port, "does not have PHY\n"); 544 return -EINVAL; 545 } 546 if (tb_is_upstream_port(port)) { 547 tb_port_WARN(port, "is the upstream port\n"); 548 return -EINVAL; 549 } 550 551 while (retries--) { 552 state = tb_port_state(port); 553 if (state < 0) 554 return state; 555 if (state == TB_PORT_DISABLED) { 556 tb_port_dbg(port, "is disabled (state: 0)\n"); 557 return 0; 558 } 559 if (state == TB_PORT_UNPLUGGED) { 560 if (wait_if_unplugged) { 561 /* used during resume */ 562 tb_port_dbg(port, 563 "is unplugged (state: 7), retrying...\n"); 564 msleep(100); 565 continue; 566 } 567 tb_port_dbg(port, "is unplugged (state: 7)\n"); 568 return 0; 569 } 570 if (state == TB_PORT_UP) { 571 tb_port_dbg(port, "is connected, link is up (state: 2)\n"); 572 return 1; 573 } 574 575 /* 576 * After plug-in the state is TB_PORT_CONNECTING. Give it some 577 * time. 578 */ 579 tb_port_dbg(port, 580 "is connected, link is not up (state: %d), retrying...\n", 581 state); 582 msleep(100); 583 } 584 tb_port_warn(port, 585 "failed to reach state TB_PORT_UP. Ignoring port...\n"); 586 return 0; 587 } 588 589 /** 590 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port 591 * 592 * Change the number of NFC credits allocated to @port by @credits. To remove 593 * NFC credits pass a negative amount of credits. 594 * 595 * Return: Returns 0 on success or an error code on failure. 596 */ 597 int tb_port_add_nfc_credits(struct tb_port *port, int credits) 598 { 599 u32 nfc_credits; 600 601 if (credits == 0 || port->sw->is_unplugged) 602 return 0; 603 604 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; 605 nfc_credits += credits; 606 607 tb_port_dbg(port, "adding %d NFC credits to %lu", credits, 608 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK); 609 610 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK; 611 port->config.nfc_credits |= nfc_credits; 612 613 return tb_port_write(port, &port->config.nfc_credits, 614 TB_CFG_PORT, ADP_CS_4, 1); 615 } 616 617 /** 618 * tb_port_set_initial_credits() - Set initial port link credits allocated 619 * @port: Port to set the initial credits 620 * @credits: Number of credits to to allocate 621 * 622 * Set initial credits value to be used for ingress shared buffering. 623 */ 624 int tb_port_set_initial_credits(struct tb_port *port, u32 credits) 625 { 626 u32 data; 627 int ret; 628 629 ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1); 630 if (ret) 631 return ret; 632 633 data &= ~ADP_CS_5_LCA_MASK; 634 data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK; 635 636 return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1); 637 } 638 639 /** 640 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER 641 * 642 * Return: Returns 0 on success or an error code on failure. 643 */ 644 int tb_port_clear_counter(struct tb_port *port, int counter) 645 { 646 u32 zero[3] = { 0, 0, 0 }; 647 tb_port_dbg(port, "clearing counter %d\n", counter); 648 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3); 649 } 650 651 /** 652 * tb_port_unlock() - Unlock downstream port 653 * @port: Port to unlock 654 * 655 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the 656 * downstream router accessible for CM. 657 */ 658 int tb_port_unlock(struct tb_port *port) 659 { 660 if (tb_switch_is_icm(port->sw)) 661 return 0; 662 if (!tb_port_is_null(port)) 663 return -EINVAL; 664 if (tb_switch_is_usb4(port->sw)) 665 return usb4_port_unlock(port); 666 return 0; 667 } 668 669 /** 670 * tb_init_port() - initialize a port 671 * 672 * This is a helper method for tb_switch_alloc. Does not check or initialize 673 * any downstream switches. 674 * 675 * Return: Returns 0 on success or an error code on failure. 676 */ 677 static int tb_init_port(struct tb_port *port) 678 { 679 int res; 680 int cap; 681 682 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); 683 if (res) { 684 if (res == -ENODEV) { 685 tb_dbg(port->sw->tb, " Port %d: not implemented\n", 686 port->port); 687 port->disabled = true; 688 return 0; 689 } 690 return res; 691 } 692 693 /* Port 0 is the switch itself and has no PHY. */ 694 if (port->config.type == TB_TYPE_PORT && port->port != 0) { 695 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY); 696 697 if (cap > 0) 698 port->cap_phy = cap; 699 else 700 tb_port_WARN(port, "non switch port without a PHY\n"); 701 702 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4); 703 if (cap > 0) 704 port->cap_usb4 = cap; 705 } else if (port->port != 0) { 706 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP); 707 if (cap > 0) 708 port->cap_adap = cap; 709 } 710 711 tb_dump_port(port->sw->tb, &port->config); 712 713 /* Control port does not need HopID allocation */ 714 if (port->port) { 715 ida_init(&port->in_hopids); 716 ida_init(&port->out_hopids); 717 } 718 719 INIT_LIST_HEAD(&port->list); 720 return 0; 721 722 } 723 724 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid, 725 int max_hopid) 726 { 727 int port_max_hopid; 728 struct ida *ida; 729 730 if (in) { 731 port_max_hopid = port->config.max_in_hop_id; 732 ida = &port->in_hopids; 733 } else { 734 port_max_hopid = port->config.max_out_hop_id; 735 ida = &port->out_hopids; 736 } 737 738 /* 739 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are 740 * reserved. 741 */ 742 if (port->config.type != TB_TYPE_NHI && min_hopid < TB_PATH_MIN_HOPID) 743 min_hopid = TB_PATH_MIN_HOPID; 744 745 if (max_hopid < 0 || max_hopid > port_max_hopid) 746 max_hopid = port_max_hopid; 747 748 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL); 749 } 750 751 /** 752 * tb_port_alloc_in_hopid() - Allocate input HopID from port 753 * @port: Port to allocate HopID for 754 * @min_hopid: Minimum acceptable input HopID 755 * @max_hopid: Maximum acceptable input HopID 756 * 757 * Return: HopID between @min_hopid and @max_hopid or negative errno in 758 * case of error. 759 */ 760 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid) 761 { 762 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid); 763 } 764 765 /** 766 * tb_port_alloc_out_hopid() - Allocate output HopID from port 767 * @port: Port to allocate HopID for 768 * @min_hopid: Minimum acceptable output HopID 769 * @max_hopid: Maximum acceptable output HopID 770 * 771 * Return: HopID between @min_hopid and @max_hopid or negative errno in 772 * case of error. 773 */ 774 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid) 775 { 776 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid); 777 } 778 779 /** 780 * tb_port_release_in_hopid() - Release allocated input HopID from port 781 * @port: Port whose HopID to release 782 * @hopid: HopID to release 783 */ 784 void tb_port_release_in_hopid(struct tb_port *port, int hopid) 785 { 786 ida_simple_remove(&port->in_hopids, hopid); 787 } 788 789 /** 790 * tb_port_release_out_hopid() - Release allocated output HopID from port 791 * @port: Port whose HopID to release 792 * @hopid: HopID to release 793 */ 794 void tb_port_release_out_hopid(struct tb_port *port, int hopid) 795 { 796 ida_simple_remove(&port->out_hopids, hopid); 797 } 798 799 static inline bool tb_switch_is_reachable(const struct tb_switch *parent, 800 const struct tb_switch *sw) 801 { 802 u64 mask = (1ULL << parent->config.depth * 8) - 1; 803 return (tb_route(parent) & mask) == (tb_route(sw) & mask); 804 } 805 806 /** 807 * tb_next_port_on_path() - Return next port for given port on a path 808 * @start: Start port of the walk 809 * @end: End port of the walk 810 * @prev: Previous port (%NULL if this is the first) 811 * 812 * This function can be used to walk from one port to another if they 813 * are connected through zero or more switches. If the @prev is dual 814 * link port, the function follows that link and returns another end on 815 * that same link. 816 * 817 * If the @end port has been reached, return %NULL. 818 * 819 * Domain tb->lock must be held when this function is called. 820 */ 821 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, 822 struct tb_port *prev) 823 { 824 struct tb_port *next; 825 826 if (!prev) 827 return start; 828 829 if (prev->sw == end->sw) { 830 if (prev == end) 831 return NULL; 832 return end; 833 } 834 835 if (tb_switch_is_reachable(prev->sw, end->sw)) { 836 next = tb_port_at(tb_route(end->sw), prev->sw); 837 /* Walk down the topology if next == prev */ 838 if (prev->remote && 839 (next == prev || next->dual_link_port == prev)) 840 next = prev->remote; 841 } else { 842 if (tb_is_upstream_port(prev)) { 843 next = prev->remote; 844 } else { 845 next = tb_upstream_port(prev->sw); 846 /* 847 * Keep the same link if prev and next are both 848 * dual link ports. 849 */ 850 if (next->dual_link_port && 851 next->link_nr != prev->link_nr) { 852 next = next->dual_link_port; 853 } 854 } 855 } 856 857 return next != prev ? next : NULL; 858 } 859 860 /** 861 * tb_port_get_link_speed() - Get current link speed 862 * @port: Port to check (USB4 or CIO) 863 * 864 * Returns link speed in Gb/s or negative errno in case of failure. 865 */ 866 int tb_port_get_link_speed(struct tb_port *port) 867 { 868 u32 val, speed; 869 int ret; 870 871 if (!port->cap_phy) 872 return -EINVAL; 873 874 ret = tb_port_read(port, &val, TB_CFG_PORT, 875 port->cap_phy + LANE_ADP_CS_1, 1); 876 if (ret) 877 return ret; 878 879 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >> 880 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT; 881 return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10; 882 } 883 884 static int tb_port_get_link_width(struct tb_port *port) 885 { 886 u32 val; 887 int ret; 888 889 if (!port->cap_phy) 890 return -EINVAL; 891 892 ret = tb_port_read(port, &val, TB_CFG_PORT, 893 port->cap_phy + LANE_ADP_CS_1, 1); 894 if (ret) 895 return ret; 896 897 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >> 898 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT; 899 } 900 901 static bool tb_port_is_width_supported(struct tb_port *port, int width) 902 { 903 u32 phy, widths; 904 int ret; 905 906 if (!port->cap_phy) 907 return false; 908 909 ret = tb_port_read(port, &phy, TB_CFG_PORT, 910 port->cap_phy + LANE_ADP_CS_0, 1); 911 if (ret) 912 return false; 913 914 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> 915 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; 916 917 return !!(widths & width); 918 } 919 920 static int tb_port_set_link_width(struct tb_port *port, unsigned int width) 921 { 922 u32 val; 923 int ret; 924 925 if (!port->cap_phy) 926 return -EINVAL; 927 928 ret = tb_port_read(port, &val, TB_CFG_PORT, 929 port->cap_phy + LANE_ADP_CS_1, 1); 930 if (ret) 931 return ret; 932 933 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK; 934 switch (width) { 935 case 1: 936 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE << 937 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 938 break; 939 case 2: 940 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL << 941 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 942 break; 943 default: 944 return -EINVAL; 945 } 946 947 val |= LANE_ADP_CS_1_LB; 948 949 return tb_port_write(port, &val, TB_CFG_PORT, 950 port->cap_phy + LANE_ADP_CS_1, 1); 951 } 952 953 static int tb_port_lane_bonding_enable(struct tb_port *port) 954 { 955 int ret; 956 957 /* 958 * Enable lane bonding for both links if not already enabled by 959 * for example the boot firmware. 960 */ 961 ret = tb_port_get_link_width(port); 962 if (ret == 1) { 963 ret = tb_port_set_link_width(port, 2); 964 if (ret) 965 return ret; 966 } 967 968 ret = tb_port_get_link_width(port->dual_link_port); 969 if (ret == 1) { 970 ret = tb_port_set_link_width(port->dual_link_port, 2); 971 if (ret) { 972 tb_port_set_link_width(port, 1); 973 return ret; 974 } 975 } 976 977 port->bonded = true; 978 port->dual_link_port->bonded = true; 979 980 return 0; 981 } 982 983 static void tb_port_lane_bonding_disable(struct tb_port *port) 984 { 985 port->dual_link_port->bonded = false; 986 port->bonded = false; 987 988 tb_port_set_link_width(port->dual_link_port, 1); 989 tb_port_set_link_width(port, 1); 990 } 991 992 /** 993 * tb_port_is_enabled() - Is the adapter port enabled 994 * @port: Port to check 995 */ 996 bool tb_port_is_enabled(struct tb_port *port) 997 { 998 switch (port->config.type) { 999 case TB_TYPE_PCIE_UP: 1000 case TB_TYPE_PCIE_DOWN: 1001 return tb_pci_port_is_enabled(port); 1002 1003 case TB_TYPE_DP_HDMI_IN: 1004 case TB_TYPE_DP_HDMI_OUT: 1005 return tb_dp_port_is_enabled(port); 1006 1007 case TB_TYPE_USB3_UP: 1008 case TB_TYPE_USB3_DOWN: 1009 return tb_usb3_port_is_enabled(port); 1010 1011 default: 1012 return false; 1013 } 1014 } 1015 1016 /** 1017 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled 1018 * @port: USB3 adapter port to check 1019 */ 1020 bool tb_usb3_port_is_enabled(struct tb_port *port) 1021 { 1022 u32 data; 1023 1024 if (tb_port_read(port, &data, TB_CFG_PORT, 1025 port->cap_adap + ADP_USB3_CS_0, 1)) 1026 return false; 1027 1028 return !!(data & ADP_USB3_CS_0_PE); 1029 } 1030 1031 /** 1032 * tb_usb3_port_enable() - Enable USB3 adapter port 1033 * @port: USB3 adapter port to enable 1034 * @enable: Enable/disable the USB3 adapter 1035 */ 1036 int tb_usb3_port_enable(struct tb_port *port, bool enable) 1037 { 1038 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V) 1039 : ADP_USB3_CS_0_V; 1040 1041 if (!port->cap_adap) 1042 return -ENXIO; 1043 return tb_port_write(port, &word, TB_CFG_PORT, 1044 port->cap_adap + ADP_USB3_CS_0, 1); 1045 } 1046 1047 /** 1048 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled 1049 * @port: PCIe port to check 1050 */ 1051 bool tb_pci_port_is_enabled(struct tb_port *port) 1052 { 1053 u32 data; 1054 1055 if (tb_port_read(port, &data, TB_CFG_PORT, 1056 port->cap_adap + ADP_PCIE_CS_0, 1)) 1057 return false; 1058 1059 return !!(data & ADP_PCIE_CS_0_PE); 1060 } 1061 1062 /** 1063 * tb_pci_port_enable() - Enable PCIe adapter port 1064 * @port: PCIe port to enable 1065 * @enable: Enable/disable the PCIe adapter 1066 */ 1067 int tb_pci_port_enable(struct tb_port *port, bool enable) 1068 { 1069 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0; 1070 if (!port->cap_adap) 1071 return -ENXIO; 1072 return tb_port_write(port, &word, TB_CFG_PORT, 1073 port->cap_adap + ADP_PCIE_CS_0, 1); 1074 } 1075 1076 /** 1077 * tb_dp_port_hpd_is_active() - Is HPD already active 1078 * @port: DP out port to check 1079 * 1080 * Checks if the DP OUT adapter port has HDP bit already set. 1081 */ 1082 int tb_dp_port_hpd_is_active(struct tb_port *port) 1083 { 1084 u32 data; 1085 int ret; 1086 1087 ret = tb_port_read(port, &data, TB_CFG_PORT, 1088 port->cap_adap + ADP_DP_CS_2, 1); 1089 if (ret) 1090 return ret; 1091 1092 return !!(data & ADP_DP_CS_2_HDP); 1093 } 1094 1095 /** 1096 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port 1097 * @port: Port to clear HPD 1098 * 1099 * If the DP IN port has HDP set, this function can be used to clear it. 1100 */ 1101 int tb_dp_port_hpd_clear(struct tb_port *port) 1102 { 1103 u32 data; 1104 int ret; 1105 1106 ret = tb_port_read(port, &data, TB_CFG_PORT, 1107 port->cap_adap + ADP_DP_CS_3, 1); 1108 if (ret) 1109 return ret; 1110 1111 data |= ADP_DP_CS_3_HDPC; 1112 return tb_port_write(port, &data, TB_CFG_PORT, 1113 port->cap_adap + ADP_DP_CS_3, 1); 1114 } 1115 1116 /** 1117 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port 1118 * @port: DP IN/OUT port to set hops 1119 * @video: Video Hop ID 1120 * @aux_tx: AUX TX Hop ID 1121 * @aux_rx: AUX RX Hop ID 1122 * 1123 * Programs specified Hop IDs for DP IN/OUT port. 1124 */ 1125 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, 1126 unsigned int aux_tx, unsigned int aux_rx) 1127 { 1128 u32 data[2]; 1129 int ret; 1130 1131 ret = tb_port_read(port, data, TB_CFG_PORT, 1132 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1133 if (ret) 1134 return ret; 1135 1136 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK; 1137 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1138 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1139 1140 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) & 1141 ADP_DP_CS_0_VIDEO_HOPID_MASK; 1142 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK; 1143 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) & 1144 ADP_DP_CS_1_AUX_RX_HOPID_MASK; 1145 1146 return tb_port_write(port, data, TB_CFG_PORT, 1147 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1148 } 1149 1150 /** 1151 * tb_dp_port_is_enabled() - Is DP adapter port enabled 1152 * @port: DP adapter port to check 1153 */ 1154 bool tb_dp_port_is_enabled(struct tb_port *port) 1155 { 1156 u32 data[2]; 1157 1158 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0, 1159 ARRAY_SIZE(data))) 1160 return false; 1161 1162 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE)); 1163 } 1164 1165 /** 1166 * tb_dp_port_enable() - Enables/disables DP paths of a port 1167 * @port: DP IN/OUT port 1168 * @enable: Enable/disable DP path 1169 * 1170 * Once Hop IDs are programmed DP paths can be enabled or disabled by 1171 * calling this function. 1172 */ 1173 int tb_dp_port_enable(struct tb_port *port, bool enable) 1174 { 1175 u32 data[2]; 1176 int ret; 1177 1178 ret = tb_port_read(port, data, TB_CFG_PORT, 1179 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1180 if (ret) 1181 return ret; 1182 1183 if (enable) 1184 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE; 1185 else 1186 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE); 1187 1188 return tb_port_write(port, data, TB_CFG_PORT, 1189 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); 1190 } 1191 1192 /* switch utility functions */ 1193 1194 static const char *tb_switch_generation_name(const struct tb_switch *sw) 1195 { 1196 switch (sw->generation) { 1197 case 1: 1198 return "Thunderbolt 1"; 1199 case 2: 1200 return "Thunderbolt 2"; 1201 case 3: 1202 return "Thunderbolt 3"; 1203 case 4: 1204 return "USB4"; 1205 default: 1206 return "Unknown"; 1207 } 1208 } 1209 1210 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) 1211 { 1212 const struct tb_regs_switch_header *regs = &sw->config; 1213 1214 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n", 1215 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id, 1216 regs->revision, regs->thunderbolt_version); 1217 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number); 1218 tb_dbg(tb, " Config:\n"); 1219 tb_dbg(tb, 1220 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", 1221 regs->upstream_port_number, regs->depth, 1222 (((u64) regs->route_hi) << 32) | regs->route_lo, 1223 regs->enabled, regs->plug_events_delay); 1224 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n", 1225 regs->__unknown1, regs->__unknown4); 1226 } 1227 1228 /** 1229 * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET 1230 * 1231 * Return: Returns 0 on success or an error code on failure. 1232 */ 1233 int tb_switch_reset(struct tb *tb, u64 route) 1234 { 1235 struct tb_cfg_result res; 1236 struct tb_regs_switch_header header = { 1237 header.route_hi = route >> 32, 1238 header.route_lo = route, 1239 header.enabled = true, 1240 }; 1241 tb_dbg(tb, "resetting switch at %llx\n", route); 1242 res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route, 1243 0, 2, 2, 2); 1244 if (res.err) 1245 return res.err; 1246 res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT); 1247 if (res.err > 0) 1248 return -EIO; 1249 return res.err; 1250 } 1251 1252 /** 1253 * tb_plug_events_active() - enable/disable plug events on a switch 1254 * 1255 * Also configures a sane plug_events_delay of 255ms. 1256 * 1257 * Return: Returns 0 on success or an error code on failure. 1258 */ 1259 static int tb_plug_events_active(struct tb_switch *sw, bool active) 1260 { 1261 u32 data; 1262 int res; 1263 1264 if (tb_switch_is_icm(sw)) 1265 return 0; 1266 1267 sw->config.plug_events_delay = 0xff; 1268 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); 1269 if (res) 1270 return res; 1271 1272 /* Plug events are always enabled in USB4 */ 1273 if (tb_switch_is_usb4(sw)) 1274 return 0; 1275 1276 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); 1277 if (res) 1278 return res; 1279 1280 if (active) { 1281 data = data & 0xFFFFFF83; 1282 switch (sw->config.device_id) { 1283 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1284 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 1285 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 1286 break; 1287 default: 1288 data |= 4; 1289 } 1290 } else { 1291 data = data | 0x7c; 1292 } 1293 return tb_sw_write(sw, &data, TB_CFG_SWITCH, 1294 sw->cap_plug_events + 1, 1); 1295 } 1296 1297 static ssize_t authorized_show(struct device *dev, 1298 struct device_attribute *attr, 1299 char *buf) 1300 { 1301 struct tb_switch *sw = tb_to_switch(dev); 1302 1303 return sprintf(buf, "%u\n", sw->authorized); 1304 } 1305 1306 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) 1307 { 1308 int ret = -EINVAL; 1309 1310 if (!mutex_trylock(&sw->tb->lock)) 1311 return restart_syscall(); 1312 1313 if (sw->authorized) 1314 goto unlock; 1315 1316 switch (val) { 1317 /* Approve switch */ 1318 case 1: 1319 if (sw->key) 1320 ret = tb_domain_approve_switch_key(sw->tb, sw); 1321 else 1322 ret = tb_domain_approve_switch(sw->tb, sw); 1323 break; 1324 1325 /* Challenge switch */ 1326 case 2: 1327 if (sw->key) 1328 ret = tb_domain_challenge_switch_key(sw->tb, sw); 1329 break; 1330 1331 default: 1332 break; 1333 } 1334 1335 if (!ret) { 1336 sw->authorized = val; 1337 /* Notify status change to the userspace */ 1338 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 1339 } 1340 1341 unlock: 1342 mutex_unlock(&sw->tb->lock); 1343 return ret; 1344 } 1345 1346 static ssize_t authorized_store(struct device *dev, 1347 struct device_attribute *attr, 1348 const char *buf, size_t count) 1349 { 1350 struct tb_switch *sw = tb_to_switch(dev); 1351 unsigned int val; 1352 ssize_t ret; 1353 1354 ret = kstrtouint(buf, 0, &val); 1355 if (ret) 1356 return ret; 1357 if (val > 2) 1358 return -EINVAL; 1359 1360 pm_runtime_get_sync(&sw->dev); 1361 ret = tb_switch_set_authorized(sw, val); 1362 pm_runtime_mark_last_busy(&sw->dev); 1363 pm_runtime_put_autosuspend(&sw->dev); 1364 1365 return ret ? ret : count; 1366 } 1367 static DEVICE_ATTR_RW(authorized); 1368 1369 static ssize_t boot_show(struct device *dev, struct device_attribute *attr, 1370 char *buf) 1371 { 1372 struct tb_switch *sw = tb_to_switch(dev); 1373 1374 return sprintf(buf, "%u\n", sw->boot); 1375 } 1376 static DEVICE_ATTR_RO(boot); 1377 1378 static ssize_t device_show(struct device *dev, struct device_attribute *attr, 1379 char *buf) 1380 { 1381 struct tb_switch *sw = tb_to_switch(dev); 1382 1383 return sprintf(buf, "%#x\n", sw->device); 1384 } 1385 static DEVICE_ATTR_RO(device); 1386 1387 static ssize_t 1388 device_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1389 { 1390 struct tb_switch *sw = tb_to_switch(dev); 1391 1392 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : ""); 1393 } 1394 static DEVICE_ATTR_RO(device_name); 1395 1396 static ssize_t 1397 generation_show(struct device *dev, struct device_attribute *attr, char *buf) 1398 { 1399 struct tb_switch *sw = tb_to_switch(dev); 1400 1401 return sprintf(buf, "%u\n", sw->generation); 1402 } 1403 static DEVICE_ATTR_RO(generation); 1404 1405 static ssize_t key_show(struct device *dev, struct device_attribute *attr, 1406 char *buf) 1407 { 1408 struct tb_switch *sw = tb_to_switch(dev); 1409 ssize_t ret; 1410 1411 if (!mutex_trylock(&sw->tb->lock)) 1412 return restart_syscall(); 1413 1414 if (sw->key) 1415 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); 1416 else 1417 ret = sprintf(buf, "\n"); 1418 1419 mutex_unlock(&sw->tb->lock); 1420 return ret; 1421 } 1422 1423 static ssize_t key_store(struct device *dev, struct device_attribute *attr, 1424 const char *buf, size_t count) 1425 { 1426 struct tb_switch *sw = tb_to_switch(dev); 1427 u8 key[TB_SWITCH_KEY_SIZE]; 1428 ssize_t ret = count; 1429 bool clear = false; 1430 1431 if (!strcmp(buf, "\n")) 1432 clear = true; 1433 else if (hex2bin(key, buf, sizeof(key))) 1434 return -EINVAL; 1435 1436 if (!mutex_trylock(&sw->tb->lock)) 1437 return restart_syscall(); 1438 1439 if (sw->authorized) { 1440 ret = -EBUSY; 1441 } else { 1442 kfree(sw->key); 1443 if (clear) { 1444 sw->key = NULL; 1445 } else { 1446 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL); 1447 if (!sw->key) 1448 ret = -ENOMEM; 1449 } 1450 } 1451 1452 mutex_unlock(&sw->tb->lock); 1453 return ret; 1454 } 1455 static DEVICE_ATTR(key, 0600, key_show, key_store); 1456 1457 static ssize_t speed_show(struct device *dev, struct device_attribute *attr, 1458 char *buf) 1459 { 1460 struct tb_switch *sw = tb_to_switch(dev); 1461 1462 return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed); 1463 } 1464 1465 /* 1466 * Currently all lanes must run at the same speed but we expose here 1467 * both directions to allow possible asymmetric links in the future. 1468 */ 1469 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); 1470 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); 1471 1472 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr, 1473 char *buf) 1474 { 1475 struct tb_switch *sw = tb_to_switch(dev); 1476 1477 return sprintf(buf, "%u\n", sw->link_width); 1478 } 1479 1480 /* 1481 * Currently link has same amount of lanes both directions (1 or 2) but 1482 * expose them separately to allow possible asymmetric links in the future. 1483 */ 1484 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL); 1485 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL); 1486 1487 static ssize_t nvm_authenticate_show(struct device *dev, 1488 struct device_attribute *attr, char *buf) 1489 { 1490 struct tb_switch *sw = tb_to_switch(dev); 1491 u32 status; 1492 1493 nvm_get_auth_status(sw, &status); 1494 return sprintf(buf, "%#x\n", status); 1495 } 1496 1497 static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf, 1498 bool disconnect) 1499 { 1500 struct tb_switch *sw = tb_to_switch(dev); 1501 int val; 1502 int ret; 1503 1504 pm_runtime_get_sync(&sw->dev); 1505 1506 if (!mutex_trylock(&sw->tb->lock)) { 1507 ret = restart_syscall(); 1508 goto exit_rpm; 1509 } 1510 1511 /* If NVMem devices are not yet added */ 1512 if (!sw->nvm) { 1513 ret = -EAGAIN; 1514 goto exit_unlock; 1515 } 1516 1517 ret = kstrtoint(buf, 10, &val); 1518 if (ret) 1519 goto exit_unlock; 1520 1521 /* Always clear the authentication status */ 1522 nvm_clear_auth_status(sw); 1523 1524 if (val > 0) { 1525 if (!sw->nvm->flushed) { 1526 if (!sw->nvm->buf) { 1527 ret = -EINVAL; 1528 goto exit_unlock; 1529 } 1530 1531 ret = nvm_validate_and_write(sw); 1532 if (ret || val == WRITE_ONLY) 1533 goto exit_unlock; 1534 } 1535 if (val == WRITE_AND_AUTHENTICATE) { 1536 if (disconnect) { 1537 ret = tb_lc_force_power(sw); 1538 } else { 1539 sw->nvm->authenticating = true; 1540 ret = nvm_authenticate(sw); 1541 } 1542 } 1543 } 1544 1545 exit_unlock: 1546 mutex_unlock(&sw->tb->lock); 1547 exit_rpm: 1548 pm_runtime_mark_last_busy(&sw->dev); 1549 pm_runtime_put_autosuspend(&sw->dev); 1550 1551 return ret; 1552 } 1553 1554 static ssize_t nvm_authenticate_store(struct device *dev, 1555 struct device_attribute *attr, const char *buf, size_t count) 1556 { 1557 int ret = nvm_authenticate_sysfs(dev, buf, false); 1558 if (ret) 1559 return ret; 1560 return count; 1561 } 1562 static DEVICE_ATTR_RW(nvm_authenticate); 1563 1564 static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev, 1565 struct device_attribute *attr, char *buf) 1566 { 1567 return nvm_authenticate_show(dev, attr, buf); 1568 } 1569 1570 static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev, 1571 struct device_attribute *attr, const char *buf, size_t count) 1572 { 1573 int ret; 1574 1575 ret = nvm_authenticate_sysfs(dev, buf, true); 1576 return ret ? ret : count; 1577 } 1578 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect); 1579 1580 static ssize_t nvm_version_show(struct device *dev, 1581 struct device_attribute *attr, char *buf) 1582 { 1583 struct tb_switch *sw = tb_to_switch(dev); 1584 int ret; 1585 1586 if (!mutex_trylock(&sw->tb->lock)) 1587 return restart_syscall(); 1588 1589 if (sw->safe_mode) 1590 ret = -ENODATA; 1591 else if (!sw->nvm) 1592 ret = -EAGAIN; 1593 else 1594 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); 1595 1596 mutex_unlock(&sw->tb->lock); 1597 1598 return ret; 1599 } 1600 static DEVICE_ATTR_RO(nvm_version); 1601 1602 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, 1603 char *buf) 1604 { 1605 struct tb_switch *sw = tb_to_switch(dev); 1606 1607 return sprintf(buf, "%#x\n", sw->vendor); 1608 } 1609 static DEVICE_ATTR_RO(vendor); 1610 1611 static ssize_t 1612 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1613 { 1614 struct tb_switch *sw = tb_to_switch(dev); 1615 1616 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : ""); 1617 } 1618 static DEVICE_ATTR_RO(vendor_name); 1619 1620 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, 1621 char *buf) 1622 { 1623 struct tb_switch *sw = tb_to_switch(dev); 1624 1625 return sprintf(buf, "%pUb\n", sw->uuid); 1626 } 1627 static DEVICE_ATTR_RO(unique_id); 1628 1629 static struct attribute *switch_attrs[] = { 1630 &dev_attr_authorized.attr, 1631 &dev_attr_boot.attr, 1632 &dev_attr_device.attr, 1633 &dev_attr_device_name.attr, 1634 &dev_attr_generation.attr, 1635 &dev_attr_key.attr, 1636 &dev_attr_nvm_authenticate.attr, 1637 &dev_attr_nvm_authenticate_on_disconnect.attr, 1638 &dev_attr_nvm_version.attr, 1639 &dev_attr_rx_speed.attr, 1640 &dev_attr_rx_lanes.attr, 1641 &dev_attr_tx_speed.attr, 1642 &dev_attr_tx_lanes.attr, 1643 &dev_attr_vendor.attr, 1644 &dev_attr_vendor_name.attr, 1645 &dev_attr_unique_id.attr, 1646 NULL, 1647 }; 1648 1649 static umode_t switch_attr_is_visible(struct kobject *kobj, 1650 struct attribute *attr, int n) 1651 { 1652 struct device *dev = container_of(kobj, struct device, kobj); 1653 struct tb_switch *sw = tb_to_switch(dev); 1654 1655 if (attr == &dev_attr_device.attr) { 1656 if (!sw->device) 1657 return 0; 1658 } else if (attr == &dev_attr_device_name.attr) { 1659 if (!sw->device_name) 1660 return 0; 1661 } else if (attr == &dev_attr_vendor.attr) { 1662 if (!sw->vendor) 1663 return 0; 1664 } else if (attr == &dev_attr_vendor_name.attr) { 1665 if (!sw->vendor_name) 1666 return 0; 1667 } else if (attr == &dev_attr_key.attr) { 1668 if (tb_route(sw) && 1669 sw->tb->security_level == TB_SECURITY_SECURE && 1670 sw->security_level == TB_SECURITY_SECURE) 1671 return attr->mode; 1672 return 0; 1673 } else if (attr == &dev_attr_rx_speed.attr || 1674 attr == &dev_attr_rx_lanes.attr || 1675 attr == &dev_attr_tx_speed.attr || 1676 attr == &dev_attr_tx_lanes.attr) { 1677 if (tb_route(sw)) 1678 return attr->mode; 1679 return 0; 1680 } else if (attr == &dev_attr_nvm_authenticate.attr) { 1681 if (nvm_upgradeable(sw)) 1682 return attr->mode; 1683 return 0; 1684 } else if (attr == &dev_attr_nvm_version.attr) { 1685 if (nvm_readable(sw)) 1686 return attr->mode; 1687 return 0; 1688 } else if (attr == &dev_attr_boot.attr) { 1689 if (tb_route(sw)) 1690 return attr->mode; 1691 return 0; 1692 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) { 1693 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER) 1694 return attr->mode; 1695 return 0; 1696 } 1697 1698 return sw->safe_mode ? 0 : attr->mode; 1699 } 1700 1701 static struct attribute_group switch_group = { 1702 .is_visible = switch_attr_is_visible, 1703 .attrs = switch_attrs, 1704 }; 1705 1706 static const struct attribute_group *switch_groups[] = { 1707 &switch_group, 1708 NULL, 1709 }; 1710 1711 static void tb_switch_release(struct device *dev) 1712 { 1713 struct tb_switch *sw = tb_to_switch(dev); 1714 struct tb_port *port; 1715 1716 dma_port_free(sw->dma_port); 1717 1718 tb_switch_for_each_port(sw, port) { 1719 if (!port->disabled) { 1720 ida_destroy(&port->in_hopids); 1721 ida_destroy(&port->out_hopids); 1722 } 1723 } 1724 1725 kfree(sw->uuid); 1726 kfree(sw->device_name); 1727 kfree(sw->vendor_name); 1728 kfree(sw->ports); 1729 kfree(sw->drom); 1730 kfree(sw->key); 1731 kfree(sw); 1732 } 1733 1734 /* 1735 * Currently only need to provide the callbacks. Everything else is handled 1736 * in the connection manager. 1737 */ 1738 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev) 1739 { 1740 struct tb_switch *sw = tb_to_switch(dev); 1741 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 1742 1743 if (cm_ops->runtime_suspend_switch) 1744 return cm_ops->runtime_suspend_switch(sw); 1745 1746 return 0; 1747 } 1748 1749 static int __maybe_unused tb_switch_runtime_resume(struct device *dev) 1750 { 1751 struct tb_switch *sw = tb_to_switch(dev); 1752 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 1753 1754 if (cm_ops->runtime_resume_switch) 1755 return cm_ops->runtime_resume_switch(sw); 1756 return 0; 1757 } 1758 1759 static const struct dev_pm_ops tb_switch_pm_ops = { 1760 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume, 1761 NULL) 1762 }; 1763 1764 struct device_type tb_switch_type = { 1765 .name = "thunderbolt_device", 1766 .release = tb_switch_release, 1767 .pm = &tb_switch_pm_ops, 1768 }; 1769 1770 static int tb_switch_get_generation(struct tb_switch *sw) 1771 { 1772 switch (sw->config.device_id) { 1773 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: 1774 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: 1775 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK: 1776 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: 1777 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: 1778 case PCI_DEVICE_ID_INTEL_PORT_RIDGE: 1779 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE: 1780 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE: 1781 return 1; 1782 1783 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE: 1784 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: 1785 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: 1786 return 2; 1787 1788 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: 1789 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: 1790 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: 1791 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: 1792 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: 1793 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: 1794 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: 1795 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: 1796 case PCI_DEVICE_ID_INTEL_ICL_NHI0: 1797 case PCI_DEVICE_ID_INTEL_ICL_NHI1: 1798 return 3; 1799 1800 default: 1801 if (tb_switch_is_usb4(sw)) 1802 return 4; 1803 1804 /* 1805 * For unknown switches assume generation to be 1 to be 1806 * on the safe side. 1807 */ 1808 tb_sw_warn(sw, "unsupported switch device id %#x\n", 1809 sw->config.device_id); 1810 return 1; 1811 } 1812 } 1813 1814 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth) 1815 { 1816 int max_depth; 1817 1818 if (tb_switch_is_usb4(sw) || 1819 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch))) 1820 max_depth = USB4_SWITCH_MAX_DEPTH; 1821 else 1822 max_depth = TB_SWITCH_MAX_DEPTH; 1823 1824 return depth > max_depth; 1825 } 1826 1827 /** 1828 * tb_switch_alloc() - allocate a switch 1829 * @tb: Pointer to the owning domain 1830 * @parent: Parent device for this switch 1831 * @route: Route string for this switch 1832 * 1833 * Allocates and initializes a switch. Will not upload configuration to 1834 * the switch. For that you need to call tb_switch_configure() 1835 * separately. The returned switch should be released by calling 1836 * tb_switch_put(). 1837 * 1838 * Return: Pointer to the allocated switch or ERR_PTR() in case of 1839 * failure. 1840 */ 1841 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, 1842 u64 route) 1843 { 1844 struct tb_switch *sw; 1845 int upstream_port; 1846 int i, ret, depth; 1847 1848 /* Unlock the downstream port so we can access the switch below */ 1849 if (route) { 1850 struct tb_switch *parent_sw = tb_to_switch(parent); 1851 struct tb_port *down; 1852 1853 down = tb_port_at(route, parent_sw); 1854 tb_port_unlock(down); 1855 } 1856 1857 depth = tb_route_length(route); 1858 1859 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); 1860 if (upstream_port < 0) 1861 return ERR_PTR(upstream_port); 1862 1863 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 1864 if (!sw) 1865 return ERR_PTR(-ENOMEM); 1866 1867 sw->tb = tb; 1868 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5); 1869 if (ret) 1870 goto err_free_sw_ports; 1871 1872 sw->generation = tb_switch_get_generation(sw); 1873 1874 tb_dbg(tb, "current switch config:\n"); 1875 tb_dump_switch(tb, sw); 1876 1877 /* configure switch */ 1878 sw->config.upstream_port_number = upstream_port; 1879 sw->config.depth = depth; 1880 sw->config.route_hi = upper_32_bits(route); 1881 sw->config.route_lo = lower_32_bits(route); 1882 sw->config.enabled = 0; 1883 1884 /* Make sure we do not exceed maximum topology limit */ 1885 if (tb_switch_exceeds_max_depth(sw, depth)) { 1886 ret = -EADDRNOTAVAIL; 1887 goto err_free_sw_ports; 1888 } 1889 1890 /* initialize ports */ 1891 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), 1892 GFP_KERNEL); 1893 if (!sw->ports) { 1894 ret = -ENOMEM; 1895 goto err_free_sw_ports; 1896 } 1897 1898 for (i = 0; i <= sw->config.max_port_number; i++) { 1899 /* minimum setup for tb_find_cap and tb_drom_read to work */ 1900 sw->ports[i].sw = sw; 1901 sw->ports[i].port = i; 1902 } 1903 1904 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); 1905 if (ret > 0) 1906 sw->cap_plug_events = ret; 1907 1908 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); 1909 if (ret > 0) 1910 sw->cap_lc = ret; 1911 1912 /* Root switch is always authorized */ 1913 if (!route) 1914 sw->authorized = true; 1915 1916 device_initialize(&sw->dev); 1917 sw->dev.parent = parent; 1918 sw->dev.bus = &tb_bus_type; 1919 sw->dev.type = &tb_switch_type; 1920 sw->dev.groups = switch_groups; 1921 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 1922 1923 return sw; 1924 1925 err_free_sw_ports: 1926 kfree(sw->ports); 1927 kfree(sw); 1928 1929 return ERR_PTR(ret); 1930 } 1931 1932 /** 1933 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode 1934 * @tb: Pointer to the owning domain 1935 * @parent: Parent device for this switch 1936 * @route: Route string for this switch 1937 * 1938 * This creates a switch in safe mode. This means the switch pretty much 1939 * lacks all capabilities except DMA configuration port before it is 1940 * flashed with a valid NVM firmware. 1941 * 1942 * The returned switch must be released by calling tb_switch_put(). 1943 * 1944 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure 1945 */ 1946 struct tb_switch * 1947 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) 1948 { 1949 struct tb_switch *sw; 1950 1951 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 1952 if (!sw) 1953 return ERR_PTR(-ENOMEM); 1954 1955 sw->tb = tb; 1956 sw->config.depth = tb_route_length(route); 1957 sw->config.route_hi = upper_32_bits(route); 1958 sw->config.route_lo = lower_32_bits(route); 1959 sw->safe_mode = true; 1960 1961 device_initialize(&sw->dev); 1962 sw->dev.parent = parent; 1963 sw->dev.bus = &tb_bus_type; 1964 sw->dev.type = &tb_switch_type; 1965 sw->dev.groups = switch_groups; 1966 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); 1967 1968 return sw; 1969 } 1970 1971 /** 1972 * tb_switch_configure() - Uploads configuration to the switch 1973 * @sw: Switch to configure 1974 * 1975 * Call this function before the switch is added to the system. It will 1976 * upload configuration to the switch and makes it available for the 1977 * connection manager to use. Can be called to the switch again after 1978 * resume from low power states to re-initialize it. 1979 * 1980 * Return: %0 in case of success and negative errno in case of failure 1981 */ 1982 int tb_switch_configure(struct tb_switch *sw) 1983 { 1984 struct tb *tb = sw->tb; 1985 u64 route; 1986 int ret; 1987 1988 route = tb_route(sw); 1989 1990 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n", 1991 sw->config.enabled ? "restoring " : "initializing", route, 1992 tb_route_length(route), sw->config.upstream_port_number); 1993 1994 sw->config.enabled = 1; 1995 1996 if (tb_switch_is_usb4(sw)) { 1997 /* 1998 * For USB4 devices, we need to program the CM version 1999 * accordingly so that it knows to expose all the 2000 * additional capabilities. 2001 */ 2002 sw->config.cmuv = USB4_VERSION_1_0; 2003 2004 /* Enumerate the switch */ 2005 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2006 ROUTER_CS_1, 4); 2007 if (ret) 2008 return ret; 2009 2010 ret = usb4_switch_setup(sw); 2011 if (ret) 2012 return ret; 2013 2014 ret = usb4_switch_configure_link(sw); 2015 } else { 2016 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) 2017 tb_sw_warn(sw, "unknown switch vendor id %#x\n", 2018 sw->config.vendor_id); 2019 2020 if (!sw->cap_plug_events) { 2021 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); 2022 return -ENODEV; 2023 } 2024 2025 /* Enumerate the switch */ 2026 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, 2027 ROUTER_CS_1, 3); 2028 if (ret) 2029 return ret; 2030 2031 ret = tb_lc_configure_link(sw); 2032 } 2033 if (ret) 2034 return ret; 2035 2036 return tb_plug_events_active(sw, true); 2037 } 2038 2039 static int tb_switch_set_uuid(struct tb_switch *sw) 2040 { 2041 bool uid = false; 2042 u32 uuid[4]; 2043 int ret; 2044 2045 if (sw->uuid) 2046 return 0; 2047 2048 if (tb_switch_is_usb4(sw)) { 2049 ret = usb4_switch_read_uid(sw, &sw->uid); 2050 if (ret) 2051 return ret; 2052 uid = true; 2053 } else { 2054 /* 2055 * The newer controllers include fused UUID as part of 2056 * link controller specific registers 2057 */ 2058 ret = tb_lc_read_uuid(sw, uuid); 2059 if (ret) { 2060 if (ret != -EINVAL) 2061 return ret; 2062 uid = true; 2063 } 2064 } 2065 2066 if (uid) { 2067 /* 2068 * ICM generates UUID based on UID and fills the upper 2069 * two words with ones. This is not strictly following 2070 * UUID format but we want to be compatible with it so 2071 * we do the same here. 2072 */ 2073 uuid[0] = sw->uid & 0xffffffff; 2074 uuid[1] = (sw->uid >> 32) & 0xffffffff; 2075 uuid[2] = 0xffffffff; 2076 uuid[3] = 0xffffffff; 2077 } 2078 2079 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); 2080 if (!sw->uuid) 2081 return -ENOMEM; 2082 return 0; 2083 } 2084 2085 static int tb_switch_add_dma_port(struct tb_switch *sw) 2086 { 2087 u32 status; 2088 int ret; 2089 2090 switch (sw->generation) { 2091 case 2: 2092 /* Only root switch can be upgraded */ 2093 if (tb_route(sw)) 2094 return 0; 2095 2096 fallthrough; 2097 case 3: 2098 ret = tb_switch_set_uuid(sw); 2099 if (ret) 2100 return ret; 2101 break; 2102 2103 default: 2104 /* 2105 * DMA port is the only thing available when the switch 2106 * is in safe mode. 2107 */ 2108 if (!sw->safe_mode) 2109 return 0; 2110 break; 2111 } 2112 2113 /* Root switch DMA port requires running firmware */ 2114 if (!tb_route(sw) && !tb_switch_is_icm(sw)) 2115 return 0; 2116 2117 sw->dma_port = dma_port_alloc(sw); 2118 if (!sw->dma_port) 2119 return 0; 2120 2121 if (sw->no_nvm_upgrade) 2122 return 0; 2123 2124 /* 2125 * If there is status already set then authentication failed 2126 * when the dma_port_flash_update_auth() returned. Power cycling 2127 * is not needed (it was done already) so only thing we do here 2128 * is to unblock runtime PM of the root port. 2129 */ 2130 nvm_get_auth_status(sw, &status); 2131 if (status) { 2132 if (!tb_route(sw)) 2133 nvm_authenticate_complete_dma_port(sw); 2134 return 0; 2135 } 2136 2137 /* 2138 * Check status of the previous flash authentication. If there 2139 * is one we need to power cycle the switch in any case to make 2140 * it functional again. 2141 */ 2142 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); 2143 if (ret <= 0) 2144 return ret; 2145 2146 /* Now we can allow root port to suspend again */ 2147 if (!tb_route(sw)) 2148 nvm_authenticate_complete_dma_port(sw); 2149 2150 if (status) { 2151 tb_sw_info(sw, "switch flash authentication failed\n"); 2152 nvm_set_auth_status(sw, status); 2153 } 2154 2155 tb_sw_info(sw, "power cycling the switch now\n"); 2156 dma_port_power_cycle(sw->dma_port); 2157 2158 /* 2159 * We return error here which causes the switch adding failure. 2160 * It should appear back after power cycle is complete. 2161 */ 2162 return -ESHUTDOWN; 2163 } 2164 2165 static void tb_switch_default_link_ports(struct tb_switch *sw) 2166 { 2167 int i; 2168 2169 for (i = 1; i <= sw->config.max_port_number; i += 2) { 2170 struct tb_port *port = &sw->ports[i]; 2171 struct tb_port *subordinate; 2172 2173 if (!tb_port_is_null(port)) 2174 continue; 2175 2176 /* Check for the subordinate port */ 2177 if (i == sw->config.max_port_number || 2178 !tb_port_is_null(&sw->ports[i + 1])) 2179 continue; 2180 2181 /* Link them if not already done so (by DROM) */ 2182 subordinate = &sw->ports[i + 1]; 2183 if (!port->dual_link_port && !subordinate->dual_link_port) { 2184 port->link_nr = 0; 2185 port->dual_link_port = subordinate; 2186 subordinate->link_nr = 1; 2187 subordinate->dual_link_port = port; 2188 2189 tb_sw_dbg(sw, "linked ports %d <-> %d\n", 2190 port->port, subordinate->port); 2191 } 2192 } 2193 } 2194 2195 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) 2196 { 2197 const struct tb_port *up = tb_upstream_port(sw); 2198 2199 if (!up->dual_link_port || !up->dual_link_port->remote) 2200 return false; 2201 2202 if (tb_switch_is_usb4(sw)) 2203 return usb4_switch_lane_bonding_possible(sw); 2204 return tb_lc_lane_bonding_possible(sw); 2205 } 2206 2207 static int tb_switch_update_link_attributes(struct tb_switch *sw) 2208 { 2209 struct tb_port *up; 2210 bool change = false; 2211 int ret; 2212 2213 if (!tb_route(sw) || tb_switch_is_icm(sw)) 2214 return 0; 2215 2216 up = tb_upstream_port(sw); 2217 2218 ret = tb_port_get_link_speed(up); 2219 if (ret < 0) 2220 return ret; 2221 if (sw->link_speed != ret) 2222 change = true; 2223 sw->link_speed = ret; 2224 2225 ret = tb_port_get_link_width(up); 2226 if (ret < 0) 2227 return ret; 2228 if (sw->link_width != ret) 2229 change = true; 2230 sw->link_width = ret; 2231 2232 /* Notify userspace that there is possible link attribute change */ 2233 if (device_is_registered(&sw->dev) && change) 2234 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); 2235 2236 return 0; 2237 } 2238 2239 /** 2240 * tb_switch_lane_bonding_enable() - Enable lane bonding 2241 * @sw: Switch to enable lane bonding 2242 * 2243 * Connection manager can call this function to enable lane bonding of a 2244 * switch. If conditions are correct and both switches support the feature, 2245 * lanes are bonded. It is safe to call this to any switch. 2246 */ 2247 int tb_switch_lane_bonding_enable(struct tb_switch *sw) 2248 { 2249 struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2250 struct tb_port *up, *down; 2251 u64 route = tb_route(sw); 2252 int ret; 2253 2254 if (!route) 2255 return 0; 2256 2257 if (!tb_switch_lane_bonding_possible(sw)) 2258 return 0; 2259 2260 up = tb_upstream_port(sw); 2261 down = tb_port_at(route, parent); 2262 2263 if (!tb_port_is_width_supported(up, 2) || 2264 !tb_port_is_width_supported(down, 2)) 2265 return 0; 2266 2267 ret = tb_port_lane_bonding_enable(up); 2268 if (ret) { 2269 tb_port_warn(up, "failed to enable lane bonding\n"); 2270 return ret; 2271 } 2272 2273 ret = tb_port_lane_bonding_enable(down); 2274 if (ret) { 2275 tb_port_warn(down, "failed to enable lane bonding\n"); 2276 tb_port_lane_bonding_disable(up); 2277 return ret; 2278 } 2279 2280 tb_switch_update_link_attributes(sw); 2281 2282 tb_sw_dbg(sw, "lane bonding enabled\n"); 2283 return ret; 2284 } 2285 2286 /** 2287 * tb_switch_lane_bonding_disable() - Disable lane bonding 2288 * @sw: Switch whose lane bonding to disable 2289 * 2290 * Disables lane bonding between @sw and parent. This can be called even 2291 * if lanes were not bonded originally. 2292 */ 2293 void tb_switch_lane_bonding_disable(struct tb_switch *sw) 2294 { 2295 struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2296 struct tb_port *up, *down; 2297 2298 if (!tb_route(sw)) 2299 return; 2300 2301 up = tb_upstream_port(sw); 2302 if (!up->bonded) 2303 return; 2304 2305 down = tb_port_at(tb_route(sw), parent); 2306 2307 tb_port_lane_bonding_disable(up); 2308 tb_port_lane_bonding_disable(down); 2309 2310 tb_switch_update_link_attributes(sw); 2311 tb_sw_dbg(sw, "lane bonding disabled\n"); 2312 } 2313 2314 /** 2315 * tb_switch_add() - Add a switch to the domain 2316 * @sw: Switch to add 2317 * 2318 * This is the last step in adding switch to the domain. It will read 2319 * identification information from DROM and initializes ports so that 2320 * they can be used to connect other switches. The switch will be 2321 * exposed to the userspace when this function successfully returns. To 2322 * remove and release the switch, call tb_switch_remove(). 2323 * 2324 * Return: %0 in case of success and negative errno in case of failure 2325 */ 2326 int tb_switch_add(struct tb_switch *sw) 2327 { 2328 int i, ret; 2329 2330 /* 2331 * Initialize DMA control port now before we read DROM. Recent 2332 * host controllers have more complete DROM on NVM that includes 2333 * vendor and model identification strings which we then expose 2334 * to the userspace. NVM can be accessed through DMA 2335 * configuration based mailbox. 2336 */ 2337 ret = tb_switch_add_dma_port(sw); 2338 if (ret) { 2339 dev_err(&sw->dev, "failed to add DMA port\n"); 2340 return ret; 2341 } 2342 2343 if (!sw->safe_mode) { 2344 /* read drom */ 2345 ret = tb_drom_read(sw); 2346 if (ret) { 2347 dev_err(&sw->dev, "reading DROM failed\n"); 2348 return ret; 2349 } 2350 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); 2351 2352 ret = tb_switch_set_uuid(sw); 2353 if (ret) { 2354 dev_err(&sw->dev, "failed to set UUID\n"); 2355 return ret; 2356 } 2357 2358 for (i = 0; i <= sw->config.max_port_number; i++) { 2359 if (sw->ports[i].disabled) { 2360 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n"); 2361 continue; 2362 } 2363 ret = tb_init_port(&sw->ports[i]); 2364 if (ret) { 2365 dev_err(&sw->dev, "failed to initialize port %d\n", i); 2366 return ret; 2367 } 2368 } 2369 2370 tb_switch_default_link_ports(sw); 2371 2372 ret = tb_switch_update_link_attributes(sw); 2373 if (ret) 2374 return ret; 2375 2376 ret = tb_switch_tmu_init(sw); 2377 if (ret) 2378 return ret; 2379 } 2380 2381 ret = device_add(&sw->dev); 2382 if (ret) { 2383 dev_err(&sw->dev, "failed to add device: %d\n", ret); 2384 return ret; 2385 } 2386 2387 if (tb_route(sw)) { 2388 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n", 2389 sw->vendor, sw->device); 2390 if (sw->vendor_name && sw->device_name) 2391 dev_info(&sw->dev, "%s %s\n", sw->vendor_name, 2392 sw->device_name); 2393 } 2394 2395 ret = tb_switch_nvm_add(sw); 2396 if (ret) { 2397 dev_err(&sw->dev, "failed to add NVM devices\n"); 2398 device_del(&sw->dev); 2399 return ret; 2400 } 2401 2402 pm_runtime_set_active(&sw->dev); 2403 if (sw->rpm) { 2404 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); 2405 pm_runtime_use_autosuspend(&sw->dev); 2406 pm_runtime_mark_last_busy(&sw->dev); 2407 pm_runtime_enable(&sw->dev); 2408 pm_request_autosuspend(&sw->dev); 2409 } 2410 2411 return 0; 2412 } 2413 2414 /** 2415 * tb_switch_remove() - Remove and release a switch 2416 * @sw: Switch to remove 2417 * 2418 * This will remove the switch from the domain and release it after last 2419 * reference count drops to zero. If there are switches connected below 2420 * this switch, they will be removed as well. 2421 */ 2422 void tb_switch_remove(struct tb_switch *sw) 2423 { 2424 struct tb_port *port; 2425 2426 if (sw->rpm) { 2427 pm_runtime_get_sync(&sw->dev); 2428 pm_runtime_disable(&sw->dev); 2429 } 2430 2431 /* port 0 is the switch itself and never has a remote */ 2432 tb_switch_for_each_port(sw, port) { 2433 if (tb_port_has_remote(port)) { 2434 tb_switch_remove(port->remote->sw); 2435 port->remote = NULL; 2436 } else if (port->xdomain) { 2437 tb_xdomain_remove(port->xdomain); 2438 port->xdomain = NULL; 2439 } 2440 2441 /* Remove any downstream retimers */ 2442 tb_retimer_remove_all(port); 2443 } 2444 2445 if (!sw->is_unplugged) 2446 tb_plug_events_active(sw, false); 2447 2448 if (tb_switch_is_usb4(sw)) 2449 usb4_switch_unconfigure_link(sw); 2450 else 2451 tb_lc_unconfigure_link(sw); 2452 2453 tb_switch_nvm_remove(sw); 2454 2455 if (tb_route(sw)) 2456 dev_info(&sw->dev, "device disconnected\n"); 2457 device_unregister(&sw->dev); 2458 } 2459 2460 /** 2461 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches 2462 */ 2463 void tb_sw_set_unplugged(struct tb_switch *sw) 2464 { 2465 struct tb_port *port; 2466 2467 if (sw == sw->tb->root_switch) { 2468 tb_sw_WARN(sw, "cannot unplug root switch\n"); 2469 return; 2470 } 2471 if (sw->is_unplugged) { 2472 tb_sw_WARN(sw, "is_unplugged already set\n"); 2473 return; 2474 } 2475 sw->is_unplugged = true; 2476 tb_switch_for_each_port(sw, port) { 2477 if (tb_port_has_remote(port)) 2478 tb_sw_set_unplugged(port->remote->sw); 2479 else if (port->xdomain) 2480 port->xdomain->is_unplugged = true; 2481 } 2482 } 2483 2484 int tb_switch_resume(struct tb_switch *sw) 2485 { 2486 struct tb_port *port; 2487 int err; 2488 2489 tb_sw_dbg(sw, "resuming switch\n"); 2490 2491 /* 2492 * Check for UID of the connected switches except for root 2493 * switch which we assume cannot be removed. 2494 */ 2495 if (tb_route(sw)) { 2496 u64 uid; 2497 2498 /* 2499 * Check first that we can still read the switch config 2500 * space. It may be that there is now another domain 2501 * connected. 2502 */ 2503 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw)); 2504 if (err < 0) { 2505 tb_sw_info(sw, "switch not present anymore\n"); 2506 return err; 2507 } 2508 2509 if (tb_switch_is_usb4(sw)) 2510 err = usb4_switch_read_uid(sw, &uid); 2511 else 2512 err = tb_drom_read_uid_only(sw, &uid); 2513 if (err) { 2514 tb_sw_warn(sw, "uid read failed\n"); 2515 return err; 2516 } 2517 if (sw->uid != uid) { 2518 tb_sw_info(sw, 2519 "changed while suspended (uid %#llx -> %#llx)\n", 2520 sw->uid, uid); 2521 return -ENODEV; 2522 } 2523 } 2524 2525 err = tb_switch_configure(sw); 2526 if (err) 2527 return err; 2528 2529 /* check for surviving downstream switches */ 2530 tb_switch_for_each_port(sw, port) { 2531 if (!tb_port_has_remote(port) && !port->xdomain) 2532 continue; 2533 2534 if (tb_wait_for_port(port, true) <= 0) { 2535 tb_port_warn(port, 2536 "lost during suspend, disconnecting\n"); 2537 if (tb_port_has_remote(port)) 2538 tb_sw_set_unplugged(port->remote->sw); 2539 else if (port->xdomain) 2540 port->xdomain->is_unplugged = true; 2541 } else if (tb_port_has_remote(port) || port->xdomain) { 2542 /* 2543 * Always unlock the port so the downstream 2544 * switch/domain is accessible. 2545 */ 2546 if (tb_port_unlock(port)) 2547 tb_port_warn(port, "failed to unlock port\n"); 2548 if (port->remote && tb_switch_resume(port->remote->sw)) { 2549 tb_port_warn(port, 2550 "lost during suspend, disconnecting\n"); 2551 tb_sw_set_unplugged(port->remote->sw); 2552 } 2553 } 2554 } 2555 return 0; 2556 } 2557 2558 void tb_switch_suspend(struct tb_switch *sw) 2559 { 2560 struct tb_port *port; 2561 int err; 2562 2563 err = tb_plug_events_active(sw, false); 2564 if (err) 2565 return; 2566 2567 tb_switch_for_each_port(sw, port) { 2568 if (tb_port_has_remote(port)) 2569 tb_switch_suspend(port->remote->sw); 2570 } 2571 2572 if (tb_switch_is_usb4(sw)) 2573 usb4_switch_set_sleep(sw); 2574 else 2575 tb_lc_set_sleep(sw); 2576 } 2577 2578 /** 2579 * tb_switch_query_dp_resource() - Query availability of DP resource 2580 * @sw: Switch whose DP resource is queried 2581 * @in: DP IN port 2582 * 2583 * Queries availability of DP resource for DP tunneling using switch 2584 * specific means. Returns %true if resource is available. 2585 */ 2586 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) 2587 { 2588 if (tb_switch_is_usb4(sw)) 2589 return usb4_switch_query_dp_resource(sw, in); 2590 return tb_lc_dp_sink_query(sw, in); 2591 } 2592 2593 /** 2594 * tb_switch_alloc_dp_resource() - Allocate available DP resource 2595 * @sw: Switch whose DP resource is allocated 2596 * @in: DP IN port 2597 * 2598 * Allocates DP resource for DP tunneling. The resource must be 2599 * available for this to succeed (see tb_switch_query_dp_resource()). 2600 * Returns %0 in success and negative errno otherwise. 2601 */ 2602 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 2603 { 2604 if (tb_switch_is_usb4(sw)) 2605 return usb4_switch_alloc_dp_resource(sw, in); 2606 return tb_lc_dp_sink_alloc(sw, in); 2607 } 2608 2609 /** 2610 * tb_switch_dealloc_dp_resource() - De-allocate DP resource 2611 * @sw: Switch whose DP resource is de-allocated 2612 * @in: DP IN port 2613 * 2614 * De-allocates DP resource that was previously allocated for DP 2615 * tunneling. 2616 */ 2617 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 2618 { 2619 int ret; 2620 2621 if (tb_switch_is_usb4(sw)) 2622 ret = usb4_switch_dealloc_dp_resource(sw, in); 2623 else 2624 ret = tb_lc_dp_sink_dealloc(sw, in); 2625 2626 if (ret) 2627 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n", 2628 in->port); 2629 } 2630 2631 struct tb_sw_lookup { 2632 struct tb *tb; 2633 u8 link; 2634 u8 depth; 2635 const uuid_t *uuid; 2636 u64 route; 2637 }; 2638 2639 static int tb_switch_match(struct device *dev, const void *data) 2640 { 2641 struct tb_switch *sw = tb_to_switch(dev); 2642 const struct tb_sw_lookup *lookup = data; 2643 2644 if (!sw) 2645 return 0; 2646 if (sw->tb != lookup->tb) 2647 return 0; 2648 2649 if (lookup->uuid) 2650 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid)); 2651 2652 if (lookup->route) { 2653 return sw->config.route_lo == lower_32_bits(lookup->route) && 2654 sw->config.route_hi == upper_32_bits(lookup->route); 2655 } 2656 2657 /* Root switch is matched only by depth */ 2658 if (!lookup->depth) 2659 return !sw->depth; 2660 2661 return sw->link == lookup->link && sw->depth == lookup->depth; 2662 } 2663 2664 /** 2665 * tb_switch_find_by_link_depth() - Find switch by link and depth 2666 * @tb: Domain the switch belongs 2667 * @link: Link number the switch is connected 2668 * @depth: Depth of the switch in link 2669 * 2670 * Returned switch has reference count increased so the caller needs to 2671 * call tb_switch_put() when done with the switch. 2672 */ 2673 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) 2674 { 2675 struct tb_sw_lookup lookup; 2676 struct device *dev; 2677 2678 memset(&lookup, 0, sizeof(lookup)); 2679 lookup.tb = tb; 2680 lookup.link = link; 2681 lookup.depth = depth; 2682 2683 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 2684 if (dev) 2685 return tb_to_switch(dev); 2686 2687 return NULL; 2688 } 2689 2690 /** 2691 * tb_switch_find_by_uuid() - Find switch by UUID 2692 * @tb: Domain the switch belongs 2693 * @uuid: UUID to look for 2694 * 2695 * Returned switch has reference count increased so the caller needs to 2696 * call tb_switch_put() when done with the switch. 2697 */ 2698 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) 2699 { 2700 struct tb_sw_lookup lookup; 2701 struct device *dev; 2702 2703 memset(&lookup, 0, sizeof(lookup)); 2704 lookup.tb = tb; 2705 lookup.uuid = uuid; 2706 2707 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 2708 if (dev) 2709 return tb_to_switch(dev); 2710 2711 return NULL; 2712 } 2713 2714 /** 2715 * tb_switch_find_by_route() - Find switch by route string 2716 * @tb: Domain the switch belongs 2717 * @route: Route string to look for 2718 * 2719 * Returned switch has reference count increased so the caller needs to 2720 * call tb_switch_put() when done with the switch. 2721 */ 2722 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) 2723 { 2724 struct tb_sw_lookup lookup; 2725 struct device *dev; 2726 2727 if (!route) 2728 return tb_switch_get(tb->root_switch); 2729 2730 memset(&lookup, 0, sizeof(lookup)); 2731 lookup.tb = tb; 2732 lookup.route = route; 2733 2734 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); 2735 if (dev) 2736 return tb_to_switch(dev); 2737 2738 return NULL; 2739 } 2740 2741 /** 2742 * tb_switch_find_port() - return the first port of @type on @sw or NULL 2743 * @sw: Switch to find the port from 2744 * @type: Port type to look for 2745 */ 2746 struct tb_port *tb_switch_find_port(struct tb_switch *sw, 2747 enum tb_port_type type) 2748 { 2749 struct tb_port *port; 2750 2751 tb_switch_for_each_port(sw, port) { 2752 if (port->config.type == type) 2753 return port; 2754 } 2755 2756 return NULL; 2757 } 2758