1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - Tunneling support 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2019, Intel Corporation 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/slab.h> 11 #include <linux/list.h> 12 #include <linux/ktime.h> 13 #include <linux/string_helpers.h> 14 15 #include "tunnel.h" 16 #include "tb.h" 17 18 /* PCIe adapters use always HopID of 8 for both directions */ 19 #define TB_PCI_HOPID 8 20 21 #define TB_PCI_PATH_DOWN 0 22 #define TB_PCI_PATH_UP 1 23 24 #define TB_PCI_PRIORITY 3 25 #define TB_PCI_WEIGHT 1 26 27 /* USB3 adapters use always HopID of 8 for both directions */ 28 #define TB_USB3_HOPID 8 29 30 #define TB_USB3_PATH_DOWN 0 31 #define TB_USB3_PATH_UP 1 32 33 #define TB_USB3_PRIORITY 3 34 #define TB_USB3_WEIGHT 2 35 36 /* DP adapters use HopID 8 for AUX and 9 for Video */ 37 #define TB_DP_AUX_TX_HOPID 8 38 #define TB_DP_AUX_RX_HOPID 8 39 #define TB_DP_VIDEO_HOPID 9 40 41 #define TB_DP_VIDEO_PATH_OUT 0 42 #define TB_DP_AUX_PATH_OUT 1 43 #define TB_DP_AUX_PATH_IN 2 44 45 #define TB_DP_VIDEO_PRIORITY 1 46 #define TB_DP_VIDEO_WEIGHT 1 47 48 #define TB_DP_AUX_PRIORITY 2 49 #define TB_DP_AUX_WEIGHT 1 50 51 /* Minimum number of credits needed for PCIe path */ 52 #define TB_MIN_PCIE_CREDITS 6U 53 /* 54 * Number of credits we try to allocate for each DMA path if not limited 55 * by the host router baMaxHI. 56 */ 57 #define TB_DMA_CREDITS 14 58 /* Minimum number of credits for DMA path */ 59 #define TB_MIN_DMA_CREDITS 1 60 61 #define TB_DMA_PRIORITY 5 62 #define TB_DMA_WEIGHT 1 63 64 /* 65 * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic 66 * according to USB4 v2 Connection Manager guide. This ends up reserving 67 * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into 68 * account. 69 */ 70 #define USB4_V2_PCI_MIN_BANDWIDTH (1500 * TB_PCI_WEIGHT) 71 #define USB4_V2_USB3_MIN_BANDWIDTH (1500 * TB_USB3_WEIGHT) 72 73 /* 74 * According to VESA spec, the DPRX negotiation shall compete in 5 75 * seconds after tunnel is established. Since at least i915 can runtime 76 * suspend if there is nothing connected, and that it polls any new 77 * connections every 10 seconds, we use 12 seconds here. 78 * 79 * These are in ms. 80 */ 81 #define TB_DPRX_TIMEOUT 12000 82 #define TB_DPRX_WAIT_TIMEOUT 25 83 #define TB_DPRX_POLL_DELAY 50 84 85 static int dprx_timeout = TB_DPRX_TIMEOUT; 86 module_param(dprx_timeout, int, 0444); 87 MODULE_PARM_DESC(dprx_timeout, 88 "DPRX capability read timeout in ms, -1 waits forever (default: " 89 __MODULE_STRING(TB_DPRX_TIMEOUT) ")"); 90 91 static unsigned int dma_credits = TB_DMA_CREDITS; 92 module_param(dma_credits, uint, 0444); 93 MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: " 94 __MODULE_STRING(TB_DMA_CREDITS) ")"); 95 96 static bool bw_alloc_mode = true; 97 module_param(bw_alloc_mode, bool, 0444); 98 MODULE_PARM_DESC(bw_alloc_mode, 99 "enable bandwidth allocation mode if supported (default: true)"); 100 101 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" }; 102 103 static const char * const tb_event_names[] = { 104 [TB_TUNNEL_ACTIVATED] = "activated", 105 [TB_TUNNEL_CHANGED] = "changed", 106 [TB_TUNNEL_DEACTIVATED] = "deactivated", 107 [TB_TUNNEL_LOW_BANDWIDTH] = "low bandwidth", 108 [TB_TUNNEL_NO_BANDWIDTH] = "insufficient bandwidth", 109 }; 110 111 /* Synchronizes kref_get()/put() of struct tb_tunnel */ 112 static DEFINE_MUTEX(tb_tunnel_lock); 113 114 static inline unsigned int tb_usable_credits(const struct tb_port *port) 115 { 116 return port->total_credits - port->ctl_credits; 117 } 118 119 /** 120 * tb_available_credits() - Available credits for PCIe and DMA 121 * @port: Lane adapter to check 122 * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP 123 * streams possible through this lane adapter 124 */ 125 static unsigned int tb_available_credits(const struct tb_port *port, 126 size_t *max_dp_streams) 127 { 128 const struct tb_switch *sw = port->sw; 129 int credits, usb3, pcie, spare; 130 size_t ndp; 131 132 usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0; 133 pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0; 134 135 if (tb_acpi_is_xdomain_allowed()) { 136 spare = min_not_zero(sw->max_dma_credits, dma_credits); 137 /* Add some credits for potential second DMA tunnel */ 138 spare += TB_MIN_DMA_CREDITS; 139 } else { 140 spare = 0; 141 } 142 143 credits = tb_usable_credits(port); 144 if (tb_acpi_may_tunnel_dp()) { 145 /* 146 * Maximum number of DP streams possible through the 147 * lane adapter. 148 */ 149 if (sw->min_dp_aux_credits + sw->min_dp_main_credits) 150 ndp = (credits - (usb3 + pcie + spare)) / 151 (sw->min_dp_aux_credits + sw->min_dp_main_credits); 152 else 153 ndp = 0; 154 } else { 155 ndp = 0; 156 } 157 credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits); 158 credits -= usb3; 159 160 if (max_dp_streams) 161 *max_dp_streams = ndp; 162 163 return credits > 0 ? credits : 0; 164 } 165 166 static void tb_init_pm_support(struct tb_path_hop *hop) 167 { 168 struct tb_port *out_port = hop->out_port; 169 struct tb_port *in_port = hop->in_port; 170 171 if (tb_port_is_null(in_port) && tb_port_is_null(out_port) && 172 usb4_switch_version(in_port->sw) >= 2) 173 hop->pm_support = true; 174 } 175 176 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths, 177 enum tb_tunnel_type type) 178 { 179 struct tb_tunnel *tunnel; 180 181 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL); 182 if (!tunnel) 183 return NULL; 184 185 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL); 186 if (!tunnel->paths) { 187 kfree(tunnel); 188 return NULL; 189 } 190 191 INIT_LIST_HEAD(&tunnel->list); 192 tunnel->tb = tb; 193 tunnel->npaths = npaths; 194 tunnel->type = type; 195 kref_init(&tunnel->kref); 196 197 return tunnel; 198 } 199 200 static void tb_tunnel_get(struct tb_tunnel *tunnel) 201 { 202 mutex_lock(&tb_tunnel_lock); 203 kref_get(&tunnel->kref); 204 mutex_unlock(&tb_tunnel_lock); 205 } 206 207 static void tb_tunnel_destroy(struct kref *kref) 208 { 209 struct tb_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref); 210 int i; 211 212 if (tunnel->destroy) 213 tunnel->destroy(tunnel); 214 215 for (i = 0; i < tunnel->npaths; i++) { 216 if (tunnel->paths[i]) 217 tb_path_free(tunnel->paths[i]); 218 } 219 220 kfree(tunnel->paths); 221 kfree(tunnel); 222 } 223 224 void tb_tunnel_put(struct tb_tunnel *tunnel) 225 { 226 mutex_lock(&tb_tunnel_lock); 227 kref_put(&tunnel->kref, tb_tunnel_destroy); 228 mutex_unlock(&tb_tunnel_lock); 229 } 230 231 /** 232 * tb_tunnel_event() - Notify userspace about tunneling event 233 * @tb: Domain where the event occurred 234 * @event: Event that happened 235 * @type: Type of the tunnel in question 236 * @src_port: Tunnel source port (can be %NULL) 237 * @dst_port: Tunnel destination port (can be %NULL) 238 * 239 * Notifies userspace about tunneling @event in the domain. The tunnel 240 * does not need to exist (e.g the tunnel was not activated because 241 * there is not enough bandwidth). If the @src_port and @dst_port are 242 * given fill in full %TUNNEL_DETAILS environment variable. Otherwise 243 * uses the shorter one (just the tunnel type). 244 */ 245 void tb_tunnel_event(struct tb *tb, enum tb_tunnel_event event, 246 enum tb_tunnel_type type, 247 const struct tb_port *src_port, 248 const struct tb_port *dst_port) 249 { 250 char *envp[3] = { NULL }; 251 252 if (WARN_ON_ONCE(event >= ARRAY_SIZE(tb_event_names))) 253 return; 254 if (WARN_ON_ONCE(type >= ARRAY_SIZE(tb_tunnel_names))) 255 return; 256 257 envp[0] = kasprintf(GFP_KERNEL, "TUNNEL_EVENT=%s", tb_event_names[event]); 258 if (!envp[0]) 259 return; 260 261 if (src_port != NULL && dst_port != NULL) { 262 envp[1] = kasprintf(GFP_KERNEL, "TUNNEL_DETAILS=%llx:%u <-> %llx:%u (%s)", 263 tb_route(src_port->sw), src_port->port, 264 tb_route(dst_port->sw), dst_port->port, 265 tb_tunnel_names[type]); 266 } else { 267 envp[1] = kasprintf(GFP_KERNEL, "TUNNEL_DETAILS=(%s)", 268 tb_tunnel_names[type]); 269 } 270 271 if (envp[1]) 272 tb_domain_event(tb, envp); 273 274 kfree(envp[1]); 275 kfree(envp[0]); 276 } 277 278 static inline void tb_tunnel_set_active(struct tb_tunnel *tunnel, bool active) 279 { 280 if (active) { 281 tunnel->state = TB_TUNNEL_ACTIVE; 282 tb_tunnel_event(tunnel->tb, TB_TUNNEL_ACTIVATED, tunnel->type, 283 tunnel->src_port, tunnel->dst_port); 284 } else { 285 tunnel->state = TB_TUNNEL_INACTIVE; 286 tb_tunnel_event(tunnel->tb, TB_TUNNEL_DEACTIVATED, tunnel->type, 287 tunnel->src_port, tunnel->dst_port); 288 } 289 } 290 291 static inline void tb_tunnel_changed(struct tb_tunnel *tunnel) 292 { 293 tb_tunnel_event(tunnel->tb, TB_TUNNEL_CHANGED, tunnel->type, 294 tunnel->src_port, tunnel->dst_port); 295 } 296 297 static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable) 298 { 299 struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw); 300 int ret; 301 302 /* Only supported of both routers are at least USB4 v2 */ 303 if ((usb4_switch_version(tunnel->src_port->sw) < 2) || 304 (usb4_switch_version(tunnel->dst_port->sw) < 2)) 305 return 0; 306 307 if (enable && tb_port_get_link_generation(port) < 4) 308 return 0; 309 310 ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable); 311 if (ret) 312 return ret; 313 314 /* 315 * Downstream router could be unplugged so disable of encapsulation 316 * in upstream router is still possible. 317 */ 318 ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable); 319 if (ret) { 320 if (enable) 321 return ret; 322 if (ret != -ENODEV) 323 return ret; 324 } 325 326 tb_tunnel_dbg(tunnel, "extended encapsulation %s\n", 327 str_enabled_disabled(enable)); 328 return 0; 329 } 330 331 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate) 332 { 333 int res; 334 335 if (activate) { 336 res = tb_pci_set_ext_encapsulation(tunnel, activate); 337 if (res) 338 return res; 339 } 340 341 if (activate) 342 res = tb_pci_port_enable(tunnel->dst_port, activate); 343 else 344 res = tb_pci_port_enable(tunnel->src_port, activate); 345 if (res) 346 return res; 347 348 349 if (activate) { 350 res = tb_pci_port_enable(tunnel->src_port, activate); 351 if (res) 352 return res; 353 } else { 354 /* Downstream router could be unplugged */ 355 tb_pci_port_enable(tunnel->dst_port, activate); 356 } 357 358 return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate); 359 } 360 361 static int tb_pci_init_credits(struct tb_path_hop *hop) 362 { 363 struct tb_port *port = hop->in_port; 364 struct tb_switch *sw = port->sw; 365 unsigned int credits; 366 367 if (tb_port_use_credit_allocation(port)) { 368 unsigned int available; 369 370 available = tb_available_credits(port, NULL); 371 credits = min(sw->max_pcie_credits, available); 372 373 if (credits < TB_MIN_PCIE_CREDITS) 374 return -ENOSPC; 375 376 credits = max(TB_MIN_PCIE_CREDITS, credits); 377 } else { 378 if (tb_port_is_null(port)) 379 credits = port->bonded ? 32 : 16; 380 else 381 credits = 7; 382 } 383 384 hop->initial_credits = credits; 385 return 0; 386 } 387 388 static int tb_pci_init_path(struct tb_path *path) 389 { 390 struct tb_path_hop *hop; 391 392 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; 393 path->egress_shared_buffer = TB_PATH_NONE; 394 path->ingress_fc_enable = TB_PATH_ALL; 395 path->ingress_shared_buffer = TB_PATH_NONE; 396 path->priority = TB_PCI_PRIORITY; 397 path->weight = TB_PCI_WEIGHT; 398 path->drop_packages = 0; 399 400 tb_path_for_each_hop(path, hop) { 401 int ret; 402 403 ret = tb_pci_init_credits(hop); 404 if (ret) 405 return ret; 406 } 407 408 return 0; 409 } 410 411 /** 412 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels 413 * @tb: Pointer to the domain structure 414 * @down: PCIe downstream adapter 415 * @alloc_hopid: Allocate HopIDs from visited ports 416 * 417 * If @down adapter is active, follows the tunnel to the PCIe upstream 418 * adapter and back. Returns the discovered tunnel or %NULL if there was 419 * no tunnel. 420 */ 421 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down, 422 bool alloc_hopid) 423 { 424 struct tb_tunnel *tunnel; 425 struct tb_path *path; 426 427 if (!tb_pci_port_is_enabled(down)) 428 return NULL; 429 430 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI); 431 if (!tunnel) 432 return NULL; 433 434 tunnel->activate = tb_pci_activate; 435 tunnel->src_port = down; 436 437 /* 438 * Discover both paths even if they are not complete. We will 439 * clean them up by calling tb_tunnel_deactivate() below in that 440 * case. 441 */ 442 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1, 443 &tunnel->dst_port, "PCIe Up", alloc_hopid); 444 if (!path) { 445 /* Just disable the downstream port */ 446 tb_pci_port_enable(down, false); 447 goto err_free; 448 } 449 tunnel->paths[TB_PCI_PATH_UP] = path; 450 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP])) 451 goto err_free; 452 453 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL, 454 "PCIe Down", alloc_hopid); 455 if (!path) 456 goto err_deactivate; 457 tunnel->paths[TB_PCI_PATH_DOWN] = path; 458 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN])) 459 goto err_deactivate; 460 461 /* Validate that the tunnel is complete */ 462 if (!tb_port_is_pcie_up(tunnel->dst_port)) { 463 tb_port_warn(tunnel->dst_port, 464 "path does not end on a PCIe adapter, cleaning up\n"); 465 goto err_deactivate; 466 } 467 468 if (down != tunnel->src_port) { 469 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n"); 470 goto err_deactivate; 471 } 472 473 if (!tb_pci_port_is_enabled(tunnel->dst_port)) { 474 tb_tunnel_warn(tunnel, 475 "tunnel is not fully activated, cleaning up\n"); 476 goto err_deactivate; 477 } 478 479 tb_tunnel_dbg(tunnel, "discovered\n"); 480 return tunnel; 481 482 err_deactivate: 483 tb_tunnel_deactivate(tunnel); 484 err_free: 485 tb_tunnel_put(tunnel); 486 487 return NULL; 488 } 489 490 /** 491 * tb_tunnel_alloc_pci() - allocate a pci tunnel 492 * @tb: Pointer to the domain structure 493 * @up: PCIe upstream adapter port 494 * @down: PCIe downstream adapter port 495 * 496 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and 497 * TB_TYPE_PCIE_DOWN. 498 * 499 * Return: Returns a tb_tunnel on success or NULL on failure. 500 */ 501 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, 502 struct tb_port *down) 503 { 504 struct tb_tunnel *tunnel; 505 struct tb_path *path; 506 507 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI); 508 if (!tunnel) 509 return NULL; 510 511 tunnel->activate = tb_pci_activate; 512 tunnel->src_port = down; 513 tunnel->dst_port = up; 514 515 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0, 516 "PCIe Down"); 517 if (!path) 518 goto err_free; 519 tunnel->paths[TB_PCI_PATH_DOWN] = path; 520 if (tb_pci_init_path(path)) 521 goto err_free; 522 523 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0, 524 "PCIe Up"); 525 if (!path) 526 goto err_free; 527 tunnel->paths[TB_PCI_PATH_UP] = path; 528 if (tb_pci_init_path(path)) 529 goto err_free; 530 531 return tunnel; 532 533 err_free: 534 tb_tunnel_put(tunnel); 535 return NULL; 536 } 537 538 /** 539 * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe 540 * @port: Lane 0 adapter 541 * @reserved_up: Upstream bandwidth in Mb/s to reserve 542 * @reserved_down: Downstream bandwidth in Mb/s to reserve 543 * 544 * Can be called to any connected lane 0 adapter to find out how much 545 * bandwidth needs to be left in reserve for possible PCIe bulk traffic. 546 * Returns true if there is something to be reserved and writes the 547 * amount to @reserved_down/@reserved_up. Otherwise returns false and 548 * does not touch the parameters. 549 */ 550 bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up, 551 int *reserved_down) 552 { 553 if (WARN_ON_ONCE(!port->remote)) 554 return false; 555 556 if (!tb_acpi_may_tunnel_pcie()) 557 return false; 558 559 if (tb_port_get_link_generation(port) < 4) 560 return false; 561 562 /* Must have PCIe adapters */ 563 if (tb_is_upstream_port(port)) { 564 if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP)) 565 return false; 566 if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN)) 567 return false; 568 } else { 569 if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN)) 570 return false; 571 if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP)) 572 return false; 573 } 574 575 *reserved_up = USB4_V2_PCI_MIN_BANDWIDTH; 576 *reserved_down = USB4_V2_PCI_MIN_BANDWIDTH; 577 578 tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up, 579 *reserved_down); 580 return true; 581 } 582 583 static bool tb_dp_is_usb4(const struct tb_switch *sw) 584 { 585 /* Titan Ridge DP adapters need the same treatment as USB4 */ 586 return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw); 587 } 588 589 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out, 590 int timeout_msec) 591 { 592 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); 593 u32 val; 594 int ret; 595 596 /* Both ends need to support this */ 597 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw)) 598 return 0; 599 600 ret = tb_port_read(out, &val, TB_CFG_PORT, 601 out->cap_adap + DP_STATUS_CTRL, 1); 602 if (ret) 603 return ret; 604 605 val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS; 606 607 ret = tb_port_write(out, &val, TB_CFG_PORT, 608 out->cap_adap + DP_STATUS_CTRL, 1); 609 if (ret) 610 return ret; 611 612 do { 613 ret = tb_port_read(out, &val, TB_CFG_PORT, 614 out->cap_adap + DP_STATUS_CTRL, 1); 615 if (ret) 616 return ret; 617 if (!(val & DP_STATUS_CTRL_CMHS)) 618 return 0; 619 usleep_range(100, 150); 620 } while (ktime_before(ktime_get(), timeout)); 621 622 return -ETIMEDOUT; 623 } 624 625 /* 626 * Returns maximum possible rate from capability supporting only DP 2.0 627 * and below. Used when DP BW allocation mode is not enabled. 628 */ 629 static inline u32 tb_dp_cap_get_rate(u32 val) 630 { 631 u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT; 632 633 switch (rate) { 634 case DP_COMMON_CAP_RATE_RBR: 635 return 1620; 636 case DP_COMMON_CAP_RATE_HBR: 637 return 2700; 638 case DP_COMMON_CAP_RATE_HBR2: 639 return 5400; 640 case DP_COMMON_CAP_RATE_HBR3: 641 return 8100; 642 default: 643 return 0; 644 } 645 } 646 647 /* 648 * Returns maximum possible rate from capability supporting DP 2.1 649 * UHBR20, 13.5 and 10 rates as well. Use only when DP BW allocation 650 * mode is enabled. 651 */ 652 static inline u32 tb_dp_cap_get_rate_ext(u32 val) 653 { 654 if (val & DP_COMMON_CAP_UHBR20) 655 return 20000; 656 else if (val & DP_COMMON_CAP_UHBR13_5) 657 return 13500; 658 else if (val & DP_COMMON_CAP_UHBR10) 659 return 10000; 660 661 return tb_dp_cap_get_rate(val); 662 } 663 664 static inline bool tb_dp_is_uhbr_rate(unsigned int rate) 665 { 666 return rate >= 10000; 667 } 668 669 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate) 670 { 671 val &= ~DP_COMMON_CAP_RATE_MASK; 672 switch (rate) { 673 default: 674 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate); 675 fallthrough; 676 case 1620: 677 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT; 678 break; 679 case 2700: 680 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT; 681 break; 682 case 5400: 683 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT; 684 break; 685 case 8100: 686 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT; 687 break; 688 } 689 return val; 690 } 691 692 static inline u32 tb_dp_cap_get_lanes(u32 val) 693 { 694 u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT; 695 696 switch (lanes) { 697 case DP_COMMON_CAP_1_LANE: 698 return 1; 699 case DP_COMMON_CAP_2_LANES: 700 return 2; 701 case DP_COMMON_CAP_4_LANES: 702 return 4; 703 default: 704 return 0; 705 } 706 } 707 708 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes) 709 { 710 val &= ~DP_COMMON_CAP_LANES_MASK; 711 switch (lanes) { 712 default: 713 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n", 714 lanes); 715 fallthrough; 716 case 1: 717 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT; 718 break; 719 case 2: 720 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT; 721 break; 722 case 4: 723 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT; 724 break; 725 } 726 return val; 727 } 728 729 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes) 730 { 731 /* Tunneling removes the DP 8b/10b 128/132b encoding */ 732 if (tb_dp_is_uhbr_rate(rate)) 733 return rate * lanes * 128 / 132; 734 return rate * lanes * 8 / 10; 735 } 736 737 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes, 738 u32 out_rate, u32 out_lanes, u32 *new_rate, 739 u32 *new_lanes) 740 { 741 static const u32 dp_bw[][2] = { 742 /* Mb/s, lanes */ 743 { 8100, 4 }, /* 25920 Mb/s */ 744 { 5400, 4 }, /* 17280 Mb/s */ 745 { 8100, 2 }, /* 12960 Mb/s */ 746 { 2700, 4 }, /* 8640 Mb/s */ 747 { 5400, 2 }, /* 8640 Mb/s */ 748 { 8100, 1 }, /* 6480 Mb/s */ 749 { 1620, 4 }, /* 5184 Mb/s */ 750 { 5400, 1 }, /* 4320 Mb/s */ 751 { 2700, 2 }, /* 4320 Mb/s */ 752 { 1620, 2 }, /* 2592 Mb/s */ 753 { 2700, 1 }, /* 2160 Mb/s */ 754 { 1620, 1 }, /* 1296 Mb/s */ 755 }; 756 unsigned int i; 757 758 /* 759 * Find a combination that can fit into max_bw and does not 760 * exceed the maximum rate and lanes supported by the DP OUT and 761 * DP IN adapters. 762 */ 763 for (i = 0; i < ARRAY_SIZE(dp_bw); i++) { 764 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes) 765 continue; 766 767 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes) 768 continue; 769 770 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) { 771 *new_rate = dp_bw[i][0]; 772 *new_lanes = dp_bw[i][1]; 773 return 0; 774 } 775 } 776 777 return -ENOSR; 778 } 779 780 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) 781 { 782 u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw; 783 struct tb_port *out = tunnel->dst_port; 784 struct tb_port *in = tunnel->src_port; 785 int ret, max_bw; 786 787 /* 788 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for 789 * newer generation hardware. 790 */ 791 if (in->sw->generation < 2 || out->sw->generation < 2) 792 return 0; 793 794 /* 795 * Perform connection manager handshake between IN and OUT ports 796 * before capabilities exchange can take place. 797 */ 798 ret = tb_dp_cm_handshake(in, out, 3000); 799 if (ret) 800 return ret; 801 802 /* Read both DP_LOCAL_CAP registers */ 803 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT, 804 in->cap_adap + DP_LOCAL_CAP, 1); 805 if (ret) 806 return ret; 807 808 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT, 809 out->cap_adap + DP_LOCAL_CAP, 1); 810 if (ret) 811 return ret; 812 813 /* Write IN local caps to OUT remote caps */ 814 ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT, 815 out->cap_adap + DP_REMOTE_CAP, 1); 816 if (ret) 817 return ret; 818 819 in_rate = tb_dp_cap_get_rate(in_dp_cap); 820 in_lanes = tb_dp_cap_get_lanes(in_dp_cap); 821 tb_tunnel_dbg(tunnel, 822 "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", 823 in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes)); 824 825 /* 826 * If the tunnel bandwidth is limited (max_bw is set) then see 827 * if we need to reduce bandwidth to fit there. 828 */ 829 out_rate = tb_dp_cap_get_rate(out_dp_cap); 830 out_lanes = tb_dp_cap_get_lanes(out_dp_cap); 831 bw = tb_dp_bandwidth(out_rate, out_lanes); 832 tb_tunnel_dbg(tunnel, 833 "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", 834 out_rate, out_lanes, bw); 835 836 if (tb_tunnel_direction_downstream(tunnel)) 837 max_bw = tunnel->max_down; 838 else 839 max_bw = tunnel->max_up; 840 841 if (max_bw && bw > max_bw) { 842 u32 new_rate, new_lanes, new_bw; 843 844 ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes, 845 out_rate, out_lanes, &new_rate, 846 &new_lanes); 847 if (ret) { 848 tb_tunnel_info(tunnel, "not enough bandwidth\n"); 849 return ret; 850 } 851 852 new_bw = tb_dp_bandwidth(new_rate, new_lanes); 853 tb_tunnel_dbg(tunnel, 854 "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n", 855 new_rate, new_lanes, new_bw); 856 857 /* 858 * Set new rate and number of lanes before writing it to 859 * the IN port remote caps. 860 */ 861 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate); 862 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes); 863 } 864 865 /* 866 * Titan Ridge does not disable AUX timers when it gets 867 * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with 868 * DP tunneling. 869 */ 870 if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) { 871 out_dp_cap |= DP_COMMON_CAP_LTTPR_NS; 872 tb_tunnel_dbg(tunnel, "disabling LTTPR\n"); 873 } 874 875 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT, 876 in->cap_adap + DP_REMOTE_CAP, 1); 877 } 878 879 static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel) 880 { 881 int ret, estimated_bw, granularity, tmp; 882 struct tb_port *out = tunnel->dst_port; 883 struct tb_port *in = tunnel->src_port; 884 u32 out_dp_cap, out_rate, out_lanes; 885 u32 in_dp_cap, in_rate, in_lanes; 886 u32 rate, lanes; 887 888 if (!bw_alloc_mode) 889 return 0; 890 891 ret = usb4_dp_port_set_cm_bandwidth_mode_supported(in, true); 892 if (ret) 893 return ret; 894 895 ret = usb4_dp_port_set_group_id(in, in->group->index); 896 if (ret) 897 return ret; 898 899 /* 900 * Get the non-reduced rate and lanes based on the lowest 901 * capability of both adapters. 902 */ 903 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT, 904 in->cap_adap + DP_LOCAL_CAP, 1); 905 if (ret) 906 return ret; 907 908 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT, 909 out->cap_adap + DP_LOCAL_CAP, 1); 910 if (ret) 911 return ret; 912 913 in_rate = tb_dp_cap_get_rate(in_dp_cap); 914 in_lanes = tb_dp_cap_get_lanes(in_dp_cap); 915 out_rate = tb_dp_cap_get_rate(out_dp_cap); 916 out_lanes = tb_dp_cap_get_lanes(out_dp_cap); 917 918 rate = min(in_rate, out_rate); 919 lanes = min(in_lanes, out_lanes); 920 tmp = tb_dp_bandwidth(rate, lanes); 921 922 tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n", 923 rate, lanes, tmp); 924 925 ret = usb4_dp_port_set_nrd(in, rate, lanes); 926 if (ret) 927 return ret; 928 929 /* 930 * Pick up granularity that supports maximum possible bandwidth. 931 * For that we use the UHBR rates too. 932 */ 933 in_rate = tb_dp_cap_get_rate_ext(in_dp_cap); 934 out_rate = tb_dp_cap_get_rate_ext(out_dp_cap); 935 rate = min(in_rate, out_rate); 936 tmp = tb_dp_bandwidth(rate, lanes); 937 938 tb_tunnel_dbg(tunnel, 939 "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n", 940 rate, lanes, tmp); 941 942 for (granularity = 250; tmp / granularity > 255 && granularity <= 1000; 943 granularity *= 2) 944 ; 945 946 tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity); 947 948 /* 949 * Returns -EINVAL if granularity above is outside of the 950 * accepted ranges. 951 */ 952 ret = usb4_dp_port_set_granularity(in, granularity); 953 if (ret) 954 return ret; 955 956 /* 957 * Bandwidth estimation is pretty much what we have in 958 * max_up/down fields. For discovery we just read what the 959 * estimation was set to. 960 */ 961 if (tb_tunnel_direction_downstream(tunnel)) 962 estimated_bw = tunnel->max_down; 963 else 964 estimated_bw = tunnel->max_up; 965 966 tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw); 967 968 ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw); 969 if (ret) 970 return ret; 971 972 /* Initial allocation should be 0 according the spec */ 973 ret = usb4_dp_port_allocate_bandwidth(in, 0); 974 if (ret) 975 return ret; 976 977 tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n"); 978 return 0; 979 } 980 981 static int tb_dp_pre_activate(struct tb_tunnel *tunnel) 982 { 983 struct tb_port *in = tunnel->src_port; 984 struct tb_switch *sw = in->sw; 985 struct tb *tb = in->sw->tb; 986 int ret; 987 988 ret = tb_dp_xchg_caps(tunnel); 989 if (ret) 990 return ret; 991 992 if (!tb_switch_is_usb4(sw)) 993 return 0; 994 995 if (!usb4_dp_port_bandwidth_mode_supported(in)) 996 return 0; 997 998 tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n"); 999 1000 ret = usb4_dp_port_set_cm_id(in, tb->index); 1001 if (ret) 1002 return ret; 1003 1004 return tb_dp_bandwidth_alloc_mode_enable(tunnel); 1005 } 1006 1007 static void tb_dp_post_deactivate(struct tb_tunnel *tunnel) 1008 { 1009 struct tb_port *in = tunnel->src_port; 1010 1011 if (!usb4_dp_port_bandwidth_mode_supported(in)) 1012 return; 1013 if (usb4_dp_port_bandwidth_mode_enabled(in)) { 1014 usb4_dp_port_set_cm_bandwidth_mode_supported(in, false); 1015 tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n"); 1016 } 1017 } 1018 1019 static ktime_t dprx_timeout_to_ktime(int timeout_msec) 1020 { 1021 return timeout_msec >= 0 ? 1022 ktime_add_ms(ktime_get(), timeout_msec) : KTIME_MAX; 1023 } 1024 1025 static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec) 1026 { 1027 ktime_t timeout = dprx_timeout_to_ktime(timeout_msec); 1028 struct tb_port *in = tunnel->src_port; 1029 1030 /* 1031 * Wait for DPRX done. Normally it should be already set for 1032 * active tunnel. 1033 */ 1034 do { 1035 u32 val; 1036 int ret; 1037 1038 ret = tb_port_read(in, &val, TB_CFG_PORT, 1039 in->cap_adap + DP_COMMON_CAP, 1); 1040 if (ret) 1041 return ret; 1042 1043 if (val & DP_COMMON_CAP_DPRX_DONE) 1044 return 0; 1045 1046 usleep_range(100, 150); 1047 } while (ktime_before(ktime_get(), timeout)); 1048 1049 tb_tunnel_dbg(tunnel, "DPRX read timeout\n"); 1050 return -ETIMEDOUT; 1051 } 1052 1053 static void tb_dp_dprx_work(struct work_struct *work) 1054 { 1055 struct tb_tunnel *tunnel = container_of(work, typeof(*tunnel), dprx_work.work); 1056 struct tb *tb = tunnel->tb; 1057 1058 if (!tunnel->dprx_canceled) { 1059 mutex_lock(&tb->lock); 1060 if (tb_dp_is_usb4(tunnel->src_port->sw) && 1061 tb_dp_wait_dprx(tunnel, TB_DPRX_WAIT_TIMEOUT)) { 1062 if (ktime_before(ktime_get(), tunnel->dprx_timeout)) { 1063 queue_delayed_work(tb->wq, &tunnel->dprx_work, 1064 msecs_to_jiffies(TB_DPRX_POLL_DELAY)); 1065 mutex_unlock(&tb->lock); 1066 return; 1067 } 1068 } else { 1069 tb_tunnel_set_active(tunnel, true); 1070 } 1071 mutex_unlock(&tb->lock); 1072 } 1073 1074 if (tunnel->callback) 1075 tunnel->callback(tunnel, tunnel->callback_data); 1076 } 1077 1078 static int tb_dp_dprx_start(struct tb_tunnel *tunnel) 1079 { 1080 /* 1081 * Bump up the reference to keep the tunnel around. It will be 1082 * dropped in tb_dp_dprx_stop() once the tunnel is deactivated. 1083 */ 1084 tb_tunnel_get(tunnel); 1085 1086 tunnel->dprx_started = true; 1087 1088 if (tunnel->callback) { 1089 tunnel->dprx_timeout = dprx_timeout_to_ktime(dprx_timeout); 1090 queue_delayed_work(tunnel->tb->wq, &tunnel->dprx_work, 0); 1091 return -EINPROGRESS; 1092 } 1093 1094 return tb_dp_is_usb4(tunnel->src_port->sw) ? 1095 tb_dp_wait_dprx(tunnel, dprx_timeout) : 0; 1096 } 1097 1098 static void tb_dp_dprx_stop(struct tb_tunnel *tunnel) 1099 { 1100 if (tunnel->dprx_started) { 1101 tunnel->dprx_started = false; 1102 tunnel->dprx_canceled = true; 1103 cancel_delayed_work(&tunnel->dprx_work); 1104 tb_tunnel_put(tunnel); 1105 } 1106 } 1107 1108 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active) 1109 { 1110 int ret; 1111 1112 if (active) { 1113 struct tb_path **paths; 1114 int last; 1115 1116 paths = tunnel->paths; 1117 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1; 1118 1119 tb_dp_port_set_hops(tunnel->src_port, 1120 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index, 1121 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index, 1122 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index); 1123 1124 tb_dp_port_set_hops(tunnel->dst_port, 1125 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index, 1126 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index, 1127 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index); 1128 } else { 1129 tb_dp_dprx_stop(tunnel); 1130 tb_dp_port_hpd_clear(tunnel->src_port); 1131 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0); 1132 if (tb_port_is_dpout(tunnel->dst_port)) 1133 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0); 1134 } 1135 1136 ret = tb_dp_port_enable(tunnel->src_port, active); 1137 if (ret) 1138 return ret; 1139 1140 if (tb_port_is_dpout(tunnel->dst_port)) { 1141 ret = tb_dp_port_enable(tunnel->dst_port, active); 1142 if (ret) 1143 return ret; 1144 } 1145 1146 return active ? tb_dp_dprx_start(tunnel) : 0; 1147 } 1148 1149 /** 1150 * tb_dp_bandwidth_mode_maximum_bandwidth() - Maximum possible bandwidth 1151 * @tunnel: DP tunnel to check 1152 * @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity 1153 * 1154 * Returns maximum possible bandwidth for this tunnel in Mb/s. 1155 */ 1156 static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel, 1157 int *max_bw_rounded) 1158 { 1159 struct tb_port *in = tunnel->src_port; 1160 int ret, rate, lanes, max_bw; 1161 u32 cap; 1162 1163 /* 1164 * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX 1165 * read parameter values so this so we can use this to determine 1166 * the maximum possible bandwidth over this link. 1167 * 1168 * See USB4 v2 spec 1.0 10.4.4.5. 1169 */ 1170 ret = tb_port_read(in, &cap, TB_CFG_PORT, 1171 in->cap_adap + DP_LOCAL_CAP, 1); 1172 if (ret) 1173 return ret; 1174 1175 rate = tb_dp_cap_get_rate_ext(cap); 1176 lanes = tb_dp_cap_get_lanes(cap); 1177 1178 max_bw = tb_dp_bandwidth(rate, lanes); 1179 1180 if (max_bw_rounded) { 1181 ret = usb4_dp_port_granularity(in); 1182 if (ret < 0) 1183 return ret; 1184 *max_bw_rounded = roundup(max_bw, ret); 1185 } 1186 1187 return max_bw; 1188 } 1189 1190 static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel, 1191 int *consumed_up, 1192 int *consumed_down) 1193 { 1194 struct tb_port *in = tunnel->src_port; 1195 int ret, allocated_bw, max_bw_rounded; 1196 1197 if (!usb4_dp_port_bandwidth_mode_enabled(in)) 1198 return -EOPNOTSUPP; 1199 1200 if (!tunnel->bw_mode) 1201 return -EOPNOTSUPP; 1202 1203 /* Read what was allocated previously if any */ 1204 ret = usb4_dp_port_allocated_bandwidth(in); 1205 if (ret < 0) 1206 return ret; 1207 allocated_bw = ret; 1208 1209 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded); 1210 if (ret < 0) 1211 return ret; 1212 if (allocated_bw == max_bw_rounded) 1213 allocated_bw = ret; 1214 1215 if (tb_tunnel_direction_downstream(tunnel)) { 1216 *consumed_up = 0; 1217 *consumed_down = allocated_bw; 1218 } else { 1219 *consumed_up = allocated_bw; 1220 *consumed_down = 0; 1221 } 1222 1223 return 0; 1224 } 1225 1226 static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up, 1227 int *allocated_down) 1228 { 1229 struct tb_port *in = tunnel->src_port; 1230 1231 /* 1232 * If we have already set the allocated bandwidth then use that. 1233 * Otherwise we read it from the DPRX. 1234 */ 1235 if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) { 1236 int ret, allocated_bw, max_bw_rounded; 1237 1238 ret = usb4_dp_port_allocated_bandwidth(in); 1239 if (ret < 0) 1240 return ret; 1241 allocated_bw = ret; 1242 1243 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, 1244 &max_bw_rounded); 1245 if (ret < 0) 1246 return ret; 1247 if (allocated_bw == max_bw_rounded) 1248 allocated_bw = ret; 1249 1250 if (tb_tunnel_direction_downstream(tunnel)) { 1251 *allocated_up = 0; 1252 *allocated_down = allocated_bw; 1253 } else { 1254 *allocated_up = allocated_bw; 1255 *allocated_down = 0; 1256 } 1257 return 0; 1258 } 1259 1260 return tunnel->consumed_bandwidth(tunnel, allocated_up, 1261 allocated_down); 1262 } 1263 1264 static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, 1265 int *alloc_down) 1266 { 1267 struct tb_port *in = tunnel->src_port; 1268 int max_bw_rounded, ret, tmp; 1269 1270 if (!usb4_dp_port_bandwidth_mode_enabled(in)) 1271 return -EOPNOTSUPP; 1272 1273 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded); 1274 if (ret < 0) 1275 return ret; 1276 1277 if (tb_tunnel_direction_downstream(tunnel)) { 1278 tmp = min(*alloc_down, max_bw_rounded); 1279 ret = usb4_dp_port_allocate_bandwidth(in, tmp); 1280 if (ret) 1281 return ret; 1282 *alloc_down = tmp; 1283 *alloc_up = 0; 1284 } else { 1285 tmp = min(*alloc_up, max_bw_rounded); 1286 ret = usb4_dp_port_allocate_bandwidth(in, tmp); 1287 if (ret) 1288 return ret; 1289 *alloc_down = 0; 1290 *alloc_up = tmp; 1291 } 1292 1293 /* Now we can use BW mode registers to figure out the bandwidth */ 1294 /* TODO: need to handle discovery too */ 1295 tunnel->bw_mode = true; 1296 return 0; 1297 } 1298 1299 /* Read cap from tunnel DP IN */ 1300 static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate, 1301 u32 *lanes) 1302 { 1303 struct tb_port *in = tunnel->src_port; 1304 u32 val; 1305 int ret; 1306 1307 switch (cap) { 1308 case DP_LOCAL_CAP: 1309 case DP_REMOTE_CAP: 1310 case DP_COMMON_CAP: 1311 break; 1312 1313 default: 1314 tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap); 1315 return -EINVAL; 1316 } 1317 1318 /* 1319 * Read from the copied remote cap so that we take into account 1320 * if capabilities were reduced during exchange. 1321 */ 1322 ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1); 1323 if (ret) 1324 return ret; 1325 1326 *rate = tb_dp_cap_get_rate(val); 1327 *lanes = tb_dp_cap_get_lanes(val); 1328 return 0; 1329 } 1330 1331 static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, 1332 int *max_down) 1333 { 1334 int ret; 1335 1336 if (!usb4_dp_port_bandwidth_mode_enabled(tunnel->src_port)) 1337 return -EOPNOTSUPP; 1338 1339 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL); 1340 if (ret < 0) 1341 return ret; 1342 1343 if (tb_tunnel_direction_downstream(tunnel)) { 1344 *max_up = 0; 1345 *max_down = ret; 1346 } else { 1347 *max_up = ret; 1348 *max_down = 0; 1349 } 1350 1351 return 0; 1352 } 1353 1354 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, 1355 int *consumed_down) 1356 { 1357 const struct tb_switch *sw = tunnel->src_port->sw; 1358 u32 rate = 0, lanes = 0; 1359 int ret; 1360 1361 if (tb_dp_is_usb4(sw)) { 1362 ret = tb_dp_wait_dprx(tunnel, 0); 1363 if (ret) { 1364 if (ret == -ETIMEDOUT) { 1365 /* 1366 * While we wait for DPRX complete the 1367 * tunnel consumes as much as it had 1368 * been reserved initially. 1369 */ 1370 ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, 1371 &rate, &lanes); 1372 if (ret) 1373 return ret; 1374 } else { 1375 return ret; 1376 } 1377 } else { 1378 /* 1379 * On USB4 routers check if the bandwidth allocation 1380 * mode is enabled first and then read the bandwidth 1381 * through those registers. 1382 */ 1383 ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up, 1384 consumed_down); 1385 if (ret < 0) { 1386 if (ret != -EOPNOTSUPP) 1387 return ret; 1388 } else if (!ret) { 1389 return 0; 1390 } 1391 ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes); 1392 if (ret) 1393 return ret; 1394 } 1395 } else if (sw->generation >= 2) { 1396 ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes); 1397 if (ret) 1398 return ret; 1399 } else { 1400 /* No bandwidth management for legacy devices */ 1401 *consumed_up = 0; 1402 *consumed_down = 0; 1403 return 0; 1404 } 1405 1406 if (tb_tunnel_direction_downstream(tunnel)) { 1407 *consumed_up = 0; 1408 *consumed_down = tb_dp_bandwidth(rate, lanes); 1409 } else { 1410 *consumed_up = tb_dp_bandwidth(rate, lanes); 1411 *consumed_down = 0; 1412 } 1413 1414 return 0; 1415 } 1416 1417 static void tb_dp_init_aux_credits(struct tb_path_hop *hop) 1418 { 1419 struct tb_port *port = hop->in_port; 1420 struct tb_switch *sw = port->sw; 1421 1422 if (tb_port_use_credit_allocation(port)) 1423 hop->initial_credits = sw->min_dp_aux_credits; 1424 else 1425 hop->initial_credits = 1; 1426 } 1427 1428 static void tb_dp_init_aux_path(struct tb_path *path, bool pm_support) 1429 { 1430 struct tb_path_hop *hop; 1431 1432 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; 1433 path->egress_shared_buffer = TB_PATH_NONE; 1434 path->ingress_fc_enable = TB_PATH_ALL; 1435 path->ingress_shared_buffer = TB_PATH_NONE; 1436 path->priority = TB_DP_AUX_PRIORITY; 1437 path->weight = TB_DP_AUX_WEIGHT; 1438 1439 tb_path_for_each_hop(path, hop) { 1440 tb_dp_init_aux_credits(hop); 1441 if (pm_support) 1442 tb_init_pm_support(hop); 1443 } 1444 } 1445 1446 static int tb_dp_init_video_credits(struct tb_path_hop *hop) 1447 { 1448 struct tb_port *port = hop->in_port; 1449 struct tb_switch *sw = port->sw; 1450 1451 if (tb_port_use_credit_allocation(port)) { 1452 unsigned int nfc_credits; 1453 size_t max_dp_streams; 1454 1455 tb_available_credits(port, &max_dp_streams); 1456 /* 1457 * Read the number of currently allocated NFC credits 1458 * from the lane adapter. Since we only use them for DP 1459 * tunneling we can use that to figure out how many DP 1460 * tunnels already go through the lane adapter. 1461 */ 1462 nfc_credits = port->config.nfc_credits & 1463 ADP_CS_4_NFC_BUFFERS_MASK; 1464 if (nfc_credits / sw->min_dp_main_credits > max_dp_streams) 1465 return -ENOSPC; 1466 1467 hop->nfc_credits = sw->min_dp_main_credits; 1468 } else { 1469 hop->nfc_credits = min(port->total_credits - 2, 12U); 1470 } 1471 1472 return 0; 1473 } 1474 1475 static int tb_dp_init_video_path(struct tb_path *path, bool pm_support) 1476 { 1477 struct tb_path_hop *hop; 1478 1479 path->egress_fc_enable = TB_PATH_NONE; 1480 path->egress_shared_buffer = TB_PATH_NONE; 1481 path->ingress_fc_enable = TB_PATH_NONE; 1482 path->ingress_shared_buffer = TB_PATH_NONE; 1483 path->priority = TB_DP_VIDEO_PRIORITY; 1484 path->weight = TB_DP_VIDEO_WEIGHT; 1485 1486 tb_path_for_each_hop(path, hop) { 1487 int ret; 1488 1489 ret = tb_dp_init_video_credits(hop); 1490 if (ret) 1491 return ret; 1492 if (pm_support) 1493 tb_init_pm_support(hop); 1494 } 1495 1496 return 0; 1497 } 1498 1499 static void tb_dp_dump(struct tb_tunnel *tunnel) 1500 { 1501 struct tb_port *in, *out; 1502 u32 dp_cap, rate, lanes; 1503 1504 in = tunnel->src_port; 1505 out = tunnel->dst_port; 1506 1507 if (tb_port_read(in, &dp_cap, TB_CFG_PORT, 1508 in->cap_adap + DP_LOCAL_CAP, 1)) 1509 return; 1510 1511 rate = tb_dp_cap_get_rate(dp_cap); 1512 lanes = tb_dp_cap_get_lanes(dp_cap); 1513 1514 tb_tunnel_dbg(tunnel, 1515 "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", 1516 rate, lanes, tb_dp_bandwidth(rate, lanes)); 1517 1518 if (tb_port_read(out, &dp_cap, TB_CFG_PORT, 1519 out->cap_adap + DP_LOCAL_CAP, 1)) 1520 return; 1521 1522 rate = tb_dp_cap_get_rate(dp_cap); 1523 lanes = tb_dp_cap_get_lanes(dp_cap); 1524 1525 tb_tunnel_dbg(tunnel, 1526 "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", 1527 rate, lanes, tb_dp_bandwidth(rate, lanes)); 1528 1529 if (tb_port_read(in, &dp_cap, TB_CFG_PORT, 1530 in->cap_adap + DP_REMOTE_CAP, 1)) 1531 return; 1532 1533 rate = tb_dp_cap_get_rate(dp_cap); 1534 lanes = tb_dp_cap_get_lanes(dp_cap); 1535 1536 tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n", 1537 rate, lanes, tb_dp_bandwidth(rate, lanes)); 1538 } 1539 1540 /** 1541 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels 1542 * @tb: Pointer to the domain structure 1543 * @in: DP in adapter 1544 * @alloc_hopid: Allocate HopIDs from visited ports 1545 * 1546 * If @in adapter is active, follows the tunnel to the DP out adapter 1547 * and back. Returns the discovered tunnel or %NULL if there was no 1548 * tunnel. 1549 * 1550 * Return: DP tunnel or %NULL if no tunnel found. 1551 */ 1552 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, 1553 bool alloc_hopid) 1554 { 1555 struct tb_tunnel *tunnel; 1556 struct tb_port *port; 1557 struct tb_path *path; 1558 1559 if (!tb_dp_port_is_enabled(in)) 1560 return NULL; 1561 1562 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP); 1563 if (!tunnel) 1564 return NULL; 1565 1566 tunnel->pre_activate = tb_dp_pre_activate; 1567 tunnel->activate = tb_dp_activate; 1568 tunnel->post_deactivate = tb_dp_post_deactivate; 1569 tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth; 1570 tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth; 1571 tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth; 1572 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; 1573 tunnel->src_port = in; 1574 1575 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1, 1576 &tunnel->dst_port, "Video", alloc_hopid); 1577 if (!path) { 1578 /* Just disable the DP IN port */ 1579 tb_dp_port_enable(in, false); 1580 goto err_free; 1581 } 1582 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path; 1583 if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], false)) 1584 goto err_free; 1585 1586 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX", 1587 alloc_hopid); 1588 if (!path) 1589 goto err_deactivate; 1590 tunnel->paths[TB_DP_AUX_PATH_OUT] = path; 1591 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT], false); 1592 1593 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID, 1594 &port, "AUX RX", alloc_hopid); 1595 if (!path) 1596 goto err_deactivate; 1597 tunnel->paths[TB_DP_AUX_PATH_IN] = path; 1598 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN], false); 1599 1600 /* Validate that the tunnel is complete */ 1601 if (!tb_port_is_dpout(tunnel->dst_port)) { 1602 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n"); 1603 goto err_deactivate; 1604 } 1605 1606 if (!tb_dp_port_is_enabled(tunnel->dst_port)) 1607 goto err_deactivate; 1608 1609 if (!tb_dp_port_hpd_is_active(tunnel->dst_port)) 1610 goto err_deactivate; 1611 1612 if (port != tunnel->src_port) { 1613 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n"); 1614 goto err_deactivate; 1615 } 1616 1617 tb_dp_dump(tunnel); 1618 1619 tb_tunnel_dbg(tunnel, "discovered\n"); 1620 return tunnel; 1621 1622 err_deactivate: 1623 tb_tunnel_deactivate(tunnel); 1624 err_free: 1625 tb_tunnel_put(tunnel); 1626 1627 return NULL; 1628 } 1629 1630 /** 1631 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel 1632 * @tb: Pointer to the domain structure 1633 * @in: DP in adapter port 1634 * @out: DP out adapter port 1635 * @link_nr: Preferred lane adapter when the link is not bonded 1636 * @max_up: Maximum available upstream bandwidth for the DP tunnel. 1637 * %0 if no available bandwidth. 1638 * @max_down: Maximum available downstream bandwidth for the DP tunnel. 1639 * %0 if no available bandwidth. 1640 * @callback: Optional callback that is called when the DP tunnel is 1641 * fully activated (or there is an error) 1642 * @callback_data: Optional data for @callback 1643 * 1644 * Allocates a tunnel between @in and @out that is capable of tunneling 1645 * Display Port traffic. If @callback is not %NULL it will be called 1646 * after tb_tunnel_activate() once the tunnel has been fully activated. 1647 * It can call tb_tunnel_is_active() to check if activation was 1648 * successful (or if it returns %false there was some sort of issue). 1649 * The @callback is called without @tb->lock held. 1650 * 1651 * Return: Returns a tb_tunnel on success or &NULL on failure. 1652 */ 1653 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, 1654 struct tb_port *out, int link_nr, 1655 int max_up, int max_down, 1656 void (*callback)(struct tb_tunnel *, void *), 1657 void *callback_data) 1658 { 1659 struct tb_tunnel *tunnel; 1660 struct tb_path **paths; 1661 struct tb_path *path; 1662 bool pm_support; 1663 1664 if (WARN_ON(!in->cap_adap || !out->cap_adap)) 1665 return NULL; 1666 1667 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP); 1668 if (!tunnel) 1669 return NULL; 1670 1671 tunnel->pre_activate = tb_dp_pre_activate; 1672 tunnel->activate = tb_dp_activate; 1673 tunnel->post_deactivate = tb_dp_post_deactivate; 1674 tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth; 1675 tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth; 1676 tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth; 1677 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; 1678 tunnel->src_port = in; 1679 tunnel->dst_port = out; 1680 tunnel->max_up = max_up; 1681 tunnel->max_down = max_down; 1682 tunnel->callback = callback; 1683 tunnel->callback_data = callback_data; 1684 INIT_DELAYED_WORK(&tunnel->dprx_work, tb_dp_dprx_work); 1685 1686 paths = tunnel->paths; 1687 pm_support = usb4_switch_version(in->sw) >= 2; 1688 1689 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID, 1690 link_nr, "Video"); 1691 if (!path) 1692 goto err_free; 1693 tb_dp_init_video_path(path, pm_support); 1694 paths[TB_DP_VIDEO_PATH_OUT] = path; 1695 1696 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out, 1697 TB_DP_AUX_TX_HOPID, link_nr, "AUX TX"); 1698 if (!path) 1699 goto err_free; 1700 tb_dp_init_aux_path(path, pm_support); 1701 paths[TB_DP_AUX_PATH_OUT] = path; 1702 1703 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in, 1704 TB_DP_AUX_RX_HOPID, link_nr, "AUX RX"); 1705 if (!path) 1706 goto err_free; 1707 tb_dp_init_aux_path(path, pm_support); 1708 paths[TB_DP_AUX_PATH_IN] = path; 1709 1710 return tunnel; 1711 1712 err_free: 1713 tb_tunnel_put(tunnel); 1714 return NULL; 1715 } 1716 1717 static unsigned int tb_dma_available_credits(const struct tb_port *port) 1718 { 1719 const struct tb_switch *sw = port->sw; 1720 int credits; 1721 1722 credits = tb_available_credits(port, NULL); 1723 if (tb_acpi_may_tunnel_pcie()) 1724 credits -= sw->max_pcie_credits; 1725 credits -= port->dma_credits; 1726 1727 return credits > 0 ? credits : 0; 1728 } 1729 1730 static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits) 1731 { 1732 struct tb_port *port = hop->in_port; 1733 1734 if (tb_port_use_credit_allocation(port)) { 1735 unsigned int available = tb_dma_available_credits(port); 1736 1737 /* 1738 * Need to have at least TB_MIN_DMA_CREDITS, otherwise 1739 * DMA path cannot be established. 1740 */ 1741 if (available < TB_MIN_DMA_CREDITS) 1742 return -ENOSPC; 1743 1744 while (credits > available) 1745 credits--; 1746 1747 tb_port_dbg(port, "reserving %u credits for DMA path\n", 1748 credits); 1749 1750 port->dma_credits += credits; 1751 } else { 1752 if (tb_port_is_null(port)) 1753 credits = port->bonded ? 14 : 6; 1754 else 1755 credits = min(port->total_credits, credits); 1756 } 1757 1758 hop->initial_credits = credits; 1759 return 0; 1760 } 1761 1762 /* Path from lane adapter to NHI */ 1763 static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits) 1764 { 1765 struct tb_path_hop *hop; 1766 unsigned int i, tmp; 1767 1768 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; 1769 path->ingress_fc_enable = TB_PATH_ALL; 1770 path->egress_shared_buffer = TB_PATH_NONE; 1771 path->ingress_shared_buffer = TB_PATH_NONE; 1772 path->priority = TB_DMA_PRIORITY; 1773 path->weight = TB_DMA_WEIGHT; 1774 path->clear_fc = true; 1775 1776 /* 1777 * First lane adapter is the one connected to the remote host. 1778 * We don't tunnel other traffic over this link so can use all 1779 * the credits (except the ones reserved for control traffic). 1780 */ 1781 hop = &path->hops[0]; 1782 tmp = min(tb_usable_credits(hop->in_port), credits); 1783 hop->initial_credits = tmp; 1784 hop->in_port->dma_credits += tmp; 1785 1786 for (i = 1; i < path->path_length; i++) { 1787 int ret; 1788 1789 ret = tb_dma_reserve_credits(&path->hops[i], credits); 1790 if (ret) 1791 return ret; 1792 } 1793 1794 return 0; 1795 } 1796 1797 /* Path from NHI to lane adapter */ 1798 static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits) 1799 { 1800 struct tb_path_hop *hop; 1801 1802 path->egress_fc_enable = TB_PATH_ALL; 1803 path->ingress_fc_enable = TB_PATH_ALL; 1804 path->egress_shared_buffer = TB_PATH_NONE; 1805 path->ingress_shared_buffer = TB_PATH_NONE; 1806 path->priority = TB_DMA_PRIORITY; 1807 path->weight = TB_DMA_WEIGHT; 1808 path->clear_fc = true; 1809 1810 tb_path_for_each_hop(path, hop) { 1811 int ret; 1812 1813 ret = tb_dma_reserve_credits(hop, credits); 1814 if (ret) 1815 return ret; 1816 } 1817 1818 return 0; 1819 } 1820 1821 static void tb_dma_release_credits(struct tb_path_hop *hop) 1822 { 1823 struct tb_port *port = hop->in_port; 1824 1825 if (tb_port_use_credit_allocation(port)) { 1826 port->dma_credits -= hop->initial_credits; 1827 1828 tb_port_dbg(port, "released %u DMA path credits\n", 1829 hop->initial_credits); 1830 } 1831 } 1832 1833 static void tb_dma_destroy_path(struct tb_path *path) 1834 { 1835 struct tb_path_hop *hop; 1836 1837 tb_path_for_each_hop(path, hop) 1838 tb_dma_release_credits(hop); 1839 } 1840 1841 static void tb_dma_destroy(struct tb_tunnel *tunnel) 1842 { 1843 int i; 1844 1845 for (i = 0; i < tunnel->npaths; i++) { 1846 if (!tunnel->paths[i]) 1847 continue; 1848 tb_dma_destroy_path(tunnel->paths[i]); 1849 } 1850 } 1851 1852 /** 1853 * tb_tunnel_alloc_dma() - allocate a DMA tunnel 1854 * @tb: Pointer to the domain structure 1855 * @nhi: Host controller port 1856 * @dst: Destination null port which the other domain is connected to 1857 * @transmit_path: HopID used for transmitting packets 1858 * @transmit_ring: NHI ring number used to send packets towards the 1859 * other domain. Set to %-1 if TX path is not needed. 1860 * @receive_path: HopID used for receiving packets 1861 * @receive_ring: NHI ring number used to receive packets from the 1862 * other domain. Set to %-1 if RX path is not needed. 1863 * 1864 * Return: Returns a tb_tunnel on success or NULL on failure. 1865 */ 1866 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, 1867 struct tb_port *dst, int transmit_path, 1868 int transmit_ring, int receive_path, 1869 int receive_ring) 1870 { 1871 struct tb_tunnel *tunnel; 1872 size_t npaths = 0, i = 0; 1873 struct tb_path *path; 1874 int credits; 1875 1876 /* Ring 0 is reserved for control channel */ 1877 if (WARN_ON(!receive_ring || !transmit_ring)) 1878 return NULL; 1879 1880 if (receive_ring > 0) 1881 npaths++; 1882 if (transmit_ring > 0) 1883 npaths++; 1884 1885 if (WARN_ON(!npaths)) 1886 return NULL; 1887 1888 tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA); 1889 if (!tunnel) 1890 return NULL; 1891 1892 tunnel->src_port = nhi; 1893 tunnel->dst_port = dst; 1894 tunnel->destroy = tb_dma_destroy; 1895 1896 credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits); 1897 1898 if (receive_ring > 0) { 1899 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, 1900 "DMA RX"); 1901 if (!path) 1902 goto err_free; 1903 tunnel->paths[i++] = path; 1904 if (tb_dma_init_rx_path(path, credits)) { 1905 tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n"); 1906 goto err_free; 1907 } 1908 } 1909 1910 if (transmit_ring > 0) { 1911 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, 1912 "DMA TX"); 1913 if (!path) 1914 goto err_free; 1915 tunnel->paths[i++] = path; 1916 if (tb_dma_init_tx_path(path, credits)) { 1917 tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n"); 1918 goto err_free; 1919 } 1920 } 1921 1922 return tunnel; 1923 1924 err_free: 1925 tb_tunnel_put(tunnel); 1926 return NULL; 1927 } 1928 1929 /** 1930 * tb_tunnel_match_dma() - Match DMA tunnel 1931 * @tunnel: Tunnel to match 1932 * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore. 1933 * @transmit_ring: NHI ring number used to send packets towards the 1934 * other domain. Pass %-1 to ignore. 1935 * @receive_path: HopID used for receiving packets. Pass %-1 to ignore. 1936 * @receive_ring: NHI ring number used to receive packets from the 1937 * other domain. Pass %-1 to ignore. 1938 * 1939 * This function can be used to match specific DMA tunnel, if there are 1940 * multiple DMA tunnels going through the same XDomain connection. 1941 * Returns true if there is match and false otherwise. 1942 */ 1943 bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path, 1944 int transmit_ring, int receive_path, int receive_ring) 1945 { 1946 const struct tb_path *tx_path = NULL, *rx_path = NULL; 1947 int i; 1948 1949 if (!receive_ring || !transmit_ring) 1950 return false; 1951 1952 for (i = 0; i < tunnel->npaths; i++) { 1953 const struct tb_path *path = tunnel->paths[i]; 1954 1955 if (!path) 1956 continue; 1957 1958 if (tb_port_is_nhi(path->hops[0].in_port)) 1959 tx_path = path; 1960 else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port)) 1961 rx_path = path; 1962 } 1963 1964 if (transmit_ring > 0 || transmit_path > 0) { 1965 if (!tx_path) 1966 return false; 1967 if (transmit_ring > 0 && 1968 (tx_path->hops[0].in_hop_index != transmit_ring)) 1969 return false; 1970 if (transmit_path > 0 && 1971 (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path)) 1972 return false; 1973 } 1974 1975 if (receive_ring > 0 || receive_path > 0) { 1976 if (!rx_path) 1977 return false; 1978 if (receive_path > 0 && 1979 (rx_path->hops[0].in_hop_index != receive_path)) 1980 return false; 1981 if (receive_ring > 0 && 1982 (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring)) 1983 return false; 1984 } 1985 1986 return true; 1987 } 1988 1989 static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down) 1990 { 1991 int ret, up_max_rate, down_max_rate; 1992 1993 ret = usb4_usb3_port_max_link_rate(up); 1994 if (ret < 0) 1995 return ret; 1996 up_max_rate = ret; 1997 1998 ret = usb4_usb3_port_max_link_rate(down); 1999 if (ret < 0) 2000 return ret; 2001 down_max_rate = ret; 2002 2003 return min(up_max_rate, down_max_rate); 2004 } 2005 2006 static int tb_usb3_pre_activate(struct tb_tunnel *tunnel) 2007 { 2008 tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n", 2009 tunnel->allocated_up, tunnel->allocated_down); 2010 2011 return usb4_usb3_port_allocate_bandwidth(tunnel->src_port, 2012 &tunnel->allocated_up, 2013 &tunnel->allocated_down); 2014 } 2015 2016 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate) 2017 { 2018 int res; 2019 2020 res = tb_usb3_port_enable(tunnel->src_port, activate); 2021 if (res) 2022 return res; 2023 2024 if (tb_port_is_usb3_up(tunnel->dst_port)) 2025 return tb_usb3_port_enable(tunnel->dst_port, activate); 2026 2027 return 0; 2028 } 2029 2030 static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel, 2031 int *consumed_up, int *consumed_down) 2032 { 2033 struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw); 2034 int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0; 2035 2036 /* 2037 * PCIe tunneling, if enabled, affects the USB3 bandwidth so 2038 * take that it into account here. 2039 */ 2040 *consumed_up = tunnel->allocated_up * 2041 (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT; 2042 *consumed_down = tunnel->allocated_down * 2043 (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT; 2044 2045 if (tb_port_get_link_generation(port) >= 4) { 2046 *consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH); 2047 *consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH); 2048 } 2049 2050 return 0; 2051 } 2052 2053 static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel) 2054 { 2055 int ret; 2056 2057 ret = usb4_usb3_port_release_bandwidth(tunnel->src_port, 2058 &tunnel->allocated_up, 2059 &tunnel->allocated_down); 2060 if (ret) 2061 return ret; 2062 2063 tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n", 2064 tunnel->allocated_up, tunnel->allocated_down); 2065 return 0; 2066 } 2067 2068 static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel, 2069 int *available_up, 2070 int *available_down) 2071 { 2072 int ret, max_rate, allocate_up, allocate_down; 2073 2074 ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port); 2075 if (ret < 0) { 2076 tb_tunnel_warn(tunnel, "failed to read maximum link rate\n"); 2077 return; 2078 } 2079 2080 /* 2081 * 90% of the max rate can be allocated for isochronous 2082 * transfers. 2083 */ 2084 max_rate = ret * 90 / 100; 2085 2086 /* No need to reclaim if already at maximum */ 2087 if (tunnel->allocated_up >= max_rate && 2088 tunnel->allocated_down >= max_rate) 2089 return; 2090 2091 /* Don't go lower than what is already allocated */ 2092 allocate_up = min(max_rate, *available_up); 2093 if (allocate_up < tunnel->allocated_up) 2094 allocate_up = tunnel->allocated_up; 2095 2096 allocate_down = min(max_rate, *available_down); 2097 if (allocate_down < tunnel->allocated_down) 2098 allocate_down = tunnel->allocated_down; 2099 2100 /* If no changes no need to do more */ 2101 if (allocate_up == tunnel->allocated_up && 2102 allocate_down == tunnel->allocated_down) 2103 return; 2104 2105 ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up, 2106 &allocate_down); 2107 if (ret) { 2108 tb_tunnel_info(tunnel, "failed to allocate bandwidth\n"); 2109 return; 2110 } 2111 2112 tunnel->allocated_up = allocate_up; 2113 *available_up -= tunnel->allocated_up; 2114 2115 tunnel->allocated_down = allocate_down; 2116 *available_down -= tunnel->allocated_down; 2117 2118 tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n", 2119 tunnel->allocated_up, tunnel->allocated_down); 2120 } 2121 2122 static void tb_usb3_init_credits(struct tb_path_hop *hop) 2123 { 2124 struct tb_port *port = hop->in_port; 2125 struct tb_switch *sw = port->sw; 2126 unsigned int credits; 2127 2128 if (tb_port_use_credit_allocation(port)) { 2129 credits = sw->max_usb3_credits; 2130 } else { 2131 if (tb_port_is_null(port)) 2132 credits = port->bonded ? 32 : 16; 2133 else 2134 credits = 7; 2135 } 2136 2137 hop->initial_credits = credits; 2138 } 2139 2140 static void tb_usb3_init_path(struct tb_path *path) 2141 { 2142 struct tb_path_hop *hop; 2143 2144 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; 2145 path->egress_shared_buffer = TB_PATH_NONE; 2146 path->ingress_fc_enable = TB_PATH_ALL; 2147 path->ingress_shared_buffer = TB_PATH_NONE; 2148 path->priority = TB_USB3_PRIORITY; 2149 path->weight = TB_USB3_WEIGHT; 2150 path->drop_packages = 0; 2151 2152 tb_path_for_each_hop(path, hop) 2153 tb_usb3_init_credits(hop); 2154 } 2155 2156 /** 2157 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels 2158 * @tb: Pointer to the domain structure 2159 * @down: USB3 downstream adapter 2160 * @alloc_hopid: Allocate HopIDs from visited ports 2161 * 2162 * If @down adapter is active, follows the tunnel to the USB3 upstream 2163 * adapter and back. Returns the discovered tunnel or %NULL if there was 2164 * no tunnel. 2165 */ 2166 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down, 2167 bool alloc_hopid) 2168 { 2169 struct tb_tunnel *tunnel; 2170 struct tb_path *path; 2171 2172 if (!tb_usb3_port_is_enabled(down)) 2173 return NULL; 2174 2175 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3); 2176 if (!tunnel) 2177 return NULL; 2178 2179 tunnel->activate = tb_usb3_activate; 2180 tunnel->src_port = down; 2181 2182 /* 2183 * Discover both paths even if they are not complete. We will 2184 * clean them up by calling tb_tunnel_deactivate() below in that 2185 * case. 2186 */ 2187 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1, 2188 &tunnel->dst_port, "USB3 Down", alloc_hopid); 2189 if (!path) { 2190 /* Just disable the downstream port */ 2191 tb_usb3_port_enable(down, false); 2192 goto err_free; 2193 } 2194 tunnel->paths[TB_USB3_PATH_DOWN] = path; 2195 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]); 2196 2197 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL, 2198 "USB3 Up", alloc_hopid); 2199 if (!path) 2200 goto err_deactivate; 2201 tunnel->paths[TB_USB3_PATH_UP] = path; 2202 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]); 2203 2204 /* Validate that the tunnel is complete */ 2205 if (!tb_port_is_usb3_up(tunnel->dst_port)) { 2206 tb_port_warn(tunnel->dst_port, 2207 "path does not end on an USB3 adapter, cleaning up\n"); 2208 goto err_deactivate; 2209 } 2210 2211 if (down != tunnel->src_port) { 2212 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n"); 2213 goto err_deactivate; 2214 } 2215 2216 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) { 2217 tb_tunnel_warn(tunnel, 2218 "tunnel is not fully activated, cleaning up\n"); 2219 goto err_deactivate; 2220 } 2221 2222 if (!tb_route(down->sw)) { 2223 int ret; 2224 2225 /* 2226 * Read the initial bandwidth allocation for the first 2227 * hop tunnel. 2228 */ 2229 ret = usb4_usb3_port_allocated_bandwidth(down, 2230 &tunnel->allocated_up, &tunnel->allocated_down); 2231 if (ret) 2232 goto err_deactivate; 2233 2234 tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n", 2235 tunnel->allocated_up, tunnel->allocated_down); 2236 2237 tunnel->pre_activate = tb_usb3_pre_activate; 2238 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; 2239 tunnel->release_unused_bandwidth = 2240 tb_usb3_release_unused_bandwidth; 2241 tunnel->reclaim_available_bandwidth = 2242 tb_usb3_reclaim_available_bandwidth; 2243 } 2244 2245 tb_tunnel_dbg(tunnel, "discovered\n"); 2246 return tunnel; 2247 2248 err_deactivate: 2249 tb_tunnel_deactivate(tunnel); 2250 err_free: 2251 tb_tunnel_put(tunnel); 2252 2253 return NULL; 2254 } 2255 2256 /** 2257 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel 2258 * @tb: Pointer to the domain structure 2259 * @up: USB3 upstream adapter port 2260 * @down: USB3 downstream adapter port 2261 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel. 2262 * %0 if no available bandwidth. 2263 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel. 2264 * %0 if no available bandwidth. 2265 * 2266 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and 2267 * @TB_TYPE_USB3_DOWN. 2268 * 2269 * Return: Returns a tb_tunnel on success or %NULL on failure. 2270 */ 2271 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, 2272 struct tb_port *down, int max_up, 2273 int max_down) 2274 { 2275 struct tb_tunnel *tunnel; 2276 struct tb_path *path; 2277 int max_rate = 0; 2278 2279 if (!tb_route(down->sw) && (max_up > 0 || max_down > 0)) { 2280 /* 2281 * For USB3 isochronous transfers, we allow bandwidth which is 2282 * not higher than 90% of maximum supported bandwidth by USB3 2283 * adapters. 2284 */ 2285 max_rate = tb_usb3_max_link_rate(down, up); 2286 if (max_rate < 0) 2287 return NULL; 2288 2289 max_rate = max_rate * 90 / 100; 2290 tb_port_dbg(up, "maximum required bandwidth for USB3 tunnel %d Mb/s\n", 2291 max_rate); 2292 } 2293 2294 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3); 2295 if (!tunnel) 2296 return NULL; 2297 2298 tunnel->activate = tb_usb3_activate; 2299 tunnel->src_port = down; 2300 tunnel->dst_port = up; 2301 tunnel->max_up = max_up; 2302 tunnel->max_down = max_down; 2303 2304 path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0, 2305 "USB3 Down"); 2306 if (!path) 2307 goto err_free; 2308 tb_usb3_init_path(path); 2309 tunnel->paths[TB_USB3_PATH_DOWN] = path; 2310 2311 path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0, 2312 "USB3 Up"); 2313 if (!path) 2314 goto err_free; 2315 tb_usb3_init_path(path); 2316 tunnel->paths[TB_USB3_PATH_UP] = path; 2317 2318 if (!tb_route(down->sw)) { 2319 tunnel->allocated_up = min(max_rate, max_up); 2320 tunnel->allocated_down = min(max_rate, max_down); 2321 2322 tunnel->pre_activate = tb_usb3_pre_activate; 2323 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; 2324 tunnel->release_unused_bandwidth = 2325 tb_usb3_release_unused_bandwidth; 2326 tunnel->reclaim_available_bandwidth = 2327 tb_usb3_reclaim_available_bandwidth; 2328 } 2329 2330 return tunnel; 2331 2332 err_free: 2333 tb_tunnel_put(tunnel); 2334 return NULL; 2335 } 2336 2337 /** 2338 * tb_tunnel_is_invalid - check whether an activated path is still valid 2339 * @tunnel: Tunnel to check 2340 */ 2341 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel) 2342 { 2343 int i; 2344 2345 for (i = 0; i < tunnel->npaths; i++) { 2346 WARN_ON(!tunnel->paths[i]->activated); 2347 if (tb_path_is_invalid(tunnel->paths[i])) 2348 return true; 2349 } 2350 2351 return false; 2352 } 2353 2354 /** 2355 * tb_tunnel_activate() - activate a tunnel 2356 * @tunnel: Tunnel to activate 2357 * 2358 * Return: 0 on success and negative errno in case if failure. 2359 * Specifically returns %-EINPROGRESS if the tunnel activation is still 2360 * in progress (that's for DP tunnels to complete DPRX capabilities 2361 * read). 2362 */ 2363 int tb_tunnel_activate(struct tb_tunnel *tunnel) 2364 { 2365 int res, i; 2366 2367 tb_tunnel_dbg(tunnel, "activating\n"); 2368 2369 /* 2370 * Make sure all paths are properly disabled before enabling 2371 * them again. 2372 */ 2373 for (i = 0; i < tunnel->npaths; i++) { 2374 if (tunnel->paths[i]->activated) { 2375 tb_path_deactivate(tunnel->paths[i]); 2376 tunnel->paths[i]->activated = false; 2377 } 2378 } 2379 2380 tunnel->state = TB_TUNNEL_ACTIVATING; 2381 2382 if (tunnel->pre_activate) { 2383 res = tunnel->pre_activate(tunnel); 2384 if (res) 2385 return res; 2386 } 2387 2388 for (i = 0; i < tunnel->npaths; i++) { 2389 res = tb_path_activate(tunnel->paths[i]); 2390 if (res) 2391 goto err; 2392 } 2393 2394 if (tunnel->activate) { 2395 res = tunnel->activate(tunnel, true); 2396 if (res) { 2397 if (res == -EINPROGRESS) 2398 return res; 2399 goto err; 2400 } 2401 } 2402 2403 tb_tunnel_set_active(tunnel, true); 2404 return 0; 2405 2406 err: 2407 tb_tunnel_warn(tunnel, "activation failed\n"); 2408 tb_tunnel_deactivate(tunnel); 2409 return res; 2410 } 2411 2412 /** 2413 * tb_tunnel_deactivate() - deactivate a tunnel 2414 * @tunnel: Tunnel to deactivate 2415 */ 2416 void tb_tunnel_deactivate(struct tb_tunnel *tunnel) 2417 { 2418 int i; 2419 2420 tb_tunnel_dbg(tunnel, "deactivating\n"); 2421 2422 if (tunnel->activate) 2423 tunnel->activate(tunnel, false); 2424 2425 for (i = 0; i < tunnel->npaths; i++) { 2426 if (tunnel->paths[i] && tunnel->paths[i]->activated) 2427 tb_path_deactivate(tunnel->paths[i]); 2428 } 2429 2430 if (tunnel->post_deactivate) 2431 tunnel->post_deactivate(tunnel); 2432 2433 tb_tunnel_set_active(tunnel, false); 2434 } 2435 2436 /** 2437 * tb_tunnel_port_on_path() - Does the tunnel go through port 2438 * @tunnel: Tunnel to check 2439 * @port: Port to check 2440 * 2441 * Returns true if @tunnel goes through @port (direction does not matter), 2442 * false otherwise. 2443 */ 2444 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel, 2445 const struct tb_port *port) 2446 { 2447 int i; 2448 2449 for (i = 0; i < tunnel->npaths; i++) { 2450 if (!tunnel->paths[i]) 2451 continue; 2452 2453 if (tb_path_port_on_path(tunnel->paths[i], port)) 2454 return true; 2455 } 2456 2457 return false; 2458 } 2459 2460 // Is tb_tunnel_activate() called for the tunnel 2461 static bool tb_tunnel_is_activated(const struct tb_tunnel *tunnel) 2462 { 2463 return tunnel->state == TB_TUNNEL_ACTIVATING || tb_tunnel_is_active(tunnel); 2464 } 2465 2466 /** 2467 * tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth 2468 * @tunnel: Tunnel to check 2469 * @max_up: Maximum upstream bandwidth in Mb/s 2470 * @max_down: Maximum downstream bandwidth in Mb/s 2471 * 2472 * Returns maximum possible bandwidth this tunnel can go if not limited 2473 * by other bandwidth clients. If the tunnel does not support this 2474 * returns %-EOPNOTSUPP. 2475 */ 2476 int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, 2477 int *max_down) 2478 { 2479 if (!tb_tunnel_is_active(tunnel)) 2480 return -ENOTCONN; 2481 2482 if (tunnel->maximum_bandwidth) 2483 return tunnel->maximum_bandwidth(tunnel, max_up, max_down); 2484 return -EOPNOTSUPP; 2485 } 2486 2487 /** 2488 * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel 2489 * @tunnel: Tunnel to check 2490 * @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here 2491 * @allocated_down: Currently allocated downstream bandwidth in Mb/s is 2492 * stored here 2493 * 2494 * Returns the bandwidth allocated for the tunnel. This may be higher 2495 * than what the tunnel actually consumes. 2496 */ 2497 int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up, 2498 int *allocated_down) 2499 { 2500 if (!tb_tunnel_is_active(tunnel)) 2501 return -ENOTCONN; 2502 2503 if (tunnel->allocated_bandwidth) 2504 return tunnel->allocated_bandwidth(tunnel, allocated_up, 2505 allocated_down); 2506 return -EOPNOTSUPP; 2507 } 2508 2509 /** 2510 * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation 2511 * @tunnel: Tunnel whose bandwidth allocation to change 2512 * @alloc_up: New upstream bandwidth in Mb/s 2513 * @alloc_down: New downstream bandwidth in Mb/s 2514 * 2515 * Tries to change tunnel bandwidth allocation. If succeeds returns %0 2516 * and updates @alloc_up and @alloc_down to that was actually allocated 2517 * (it may not be the same as passed originally). Returns negative errno 2518 * in case of failure. 2519 */ 2520 int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, 2521 int *alloc_down) 2522 { 2523 if (!tb_tunnel_is_active(tunnel)) 2524 return -ENOTCONN; 2525 2526 if (tunnel->alloc_bandwidth) { 2527 int ret; 2528 2529 ret = tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down); 2530 if (ret) 2531 return ret; 2532 2533 tb_tunnel_changed(tunnel); 2534 return 0; 2535 } 2536 2537 return -EOPNOTSUPP; 2538 } 2539 2540 /** 2541 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel 2542 * @tunnel: Tunnel to check 2543 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port. 2544 * Can be %NULL. 2545 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port. 2546 * Can be %NULL. 2547 * 2548 * Stores the amount of isochronous bandwidth @tunnel consumes in 2549 * @consumed_up and @consumed_down. In case of success returns %0, 2550 * negative errno otherwise. 2551 */ 2552 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, 2553 int *consumed_down) 2554 { 2555 int up_bw = 0, down_bw = 0; 2556 2557 /* 2558 * Here we need to distinguish between not active tunnel from 2559 * tunnels that are either fully active or activation started. 2560 * The latter is true for DP tunnels where we must report the 2561 * consumed to be the maximum we gave it until DPRX capabilities 2562 * read is done by the graphics driver. 2563 */ 2564 if (tb_tunnel_is_activated(tunnel) && tunnel->consumed_bandwidth) { 2565 int ret; 2566 2567 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw); 2568 if (ret) 2569 return ret; 2570 } 2571 2572 if (consumed_up) 2573 *consumed_up = up_bw; 2574 if (consumed_down) 2575 *consumed_down = down_bw; 2576 2577 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw, down_bw); 2578 return 0; 2579 } 2580 2581 /** 2582 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth 2583 * @tunnel: Tunnel whose unused bandwidth to release 2584 * 2585 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the 2586 * moment) this function makes it to release all the unused bandwidth. 2587 * 2588 * Returns %0 in case of success and negative errno otherwise. 2589 */ 2590 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel) 2591 { 2592 if (!tb_tunnel_is_active(tunnel)) 2593 return -ENOTCONN; 2594 2595 if (tunnel->release_unused_bandwidth) { 2596 int ret; 2597 2598 ret = tunnel->release_unused_bandwidth(tunnel); 2599 if (ret) 2600 return ret; 2601 } 2602 2603 return 0; 2604 } 2605 2606 /** 2607 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth 2608 * @tunnel: Tunnel reclaiming available bandwidth 2609 * @available_up: Available upstream bandwidth (in Mb/s) 2610 * @available_down: Available downstream bandwidth (in Mb/s) 2611 * 2612 * Reclaims bandwidth from @available_up and @available_down and updates 2613 * the variables accordingly (e.g decreases both according to what was 2614 * reclaimed by the tunnel). If nothing was reclaimed the values are 2615 * kept as is. 2616 */ 2617 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel, 2618 int *available_up, 2619 int *available_down) 2620 { 2621 if (!tb_tunnel_is_active(tunnel)) 2622 return; 2623 2624 if (tunnel->reclaim_available_bandwidth) 2625 tunnel->reclaim_available_bandwidth(tunnel, available_up, 2626 available_down); 2627 } 2628 2629 const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel) 2630 { 2631 return tb_tunnel_names[tunnel->type]; 2632 } 2633