1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt driver - Tunneling support 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2019, Intel Corporation 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/slab.h> 11 #include <linux/list.h> 12 13 #include "tunnel.h" 14 #include "tb.h" 15 16 /* PCIe adapters use always HopID of 8 for both directions */ 17 #define TB_PCI_HOPID 8 18 19 #define TB_PCI_PATH_DOWN 0 20 #define TB_PCI_PATH_UP 1 21 22 /* USB3 adapters use always HopID of 8 for both directions */ 23 #define TB_USB3_HOPID 8 24 25 #define TB_USB3_PATH_DOWN 0 26 #define TB_USB3_PATH_UP 1 27 28 /* DP adapters use HopID 8 for AUX and 9 for Video */ 29 #define TB_DP_AUX_TX_HOPID 8 30 #define TB_DP_AUX_RX_HOPID 8 31 #define TB_DP_VIDEO_HOPID 9 32 33 #define TB_DP_VIDEO_PATH_OUT 0 34 #define TB_DP_AUX_PATH_OUT 1 35 #define TB_DP_AUX_PATH_IN 2 36 37 #define TB_DMA_PATH_OUT 0 38 #define TB_DMA_PATH_IN 1 39 40 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" }; 41 42 #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \ 43 do { \ 44 struct tb_tunnel *__tunnel = (tunnel); \ 45 level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \ 46 tb_route(__tunnel->src_port->sw), \ 47 __tunnel->src_port->port, \ 48 tb_route(__tunnel->dst_port->sw), \ 49 __tunnel->dst_port->port, \ 50 tb_tunnel_names[__tunnel->type], \ 51 ## arg); \ 52 } while (0) 53 54 #define tb_tunnel_WARN(tunnel, fmt, arg...) \ 55 __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg) 56 #define tb_tunnel_warn(tunnel, fmt, arg...) \ 57 __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg) 58 #define tb_tunnel_info(tunnel, fmt, arg...) \ 59 __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg) 60 #define tb_tunnel_dbg(tunnel, fmt, arg...) \ 61 __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg) 62 63 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths, 64 enum tb_tunnel_type type) 65 { 66 struct tb_tunnel *tunnel; 67 68 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL); 69 if (!tunnel) 70 return NULL; 71 72 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL); 73 if (!tunnel->paths) { 74 tb_tunnel_free(tunnel); 75 return NULL; 76 } 77 78 INIT_LIST_HEAD(&tunnel->list); 79 tunnel->tb = tb; 80 tunnel->npaths = npaths; 81 tunnel->type = type; 82 83 return tunnel; 84 } 85 86 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate) 87 { 88 int res; 89 90 res = tb_pci_port_enable(tunnel->src_port, activate); 91 if (res) 92 return res; 93 94 if (tb_port_is_pcie_up(tunnel->dst_port)) 95 return tb_pci_port_enable(tunnel->dst_port, activate); 96 97 return 0; 98 } 99 100 static int tb_initial_credits(const struct tb_switch *sw) 101 { 102 /* If the path is complete sw is not NULL */ 103 if (sw) { 104 /* More credits for faster link */ 105 switch (sw->link_speed * sw->link_width) { 106 case 40: 107 return 32; 108 case 20: 109 return 24; 110 } 111 } 112 113 return 16; 114 } 115 116 static void tb_pci_init_path(struct tb_path *path) 117 { 118 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; 119 path->egress_shared_buffer = TB_PATH_NONE; 120 path->ingress_fc_enable = TB_PATH_ALL; 121 path->ingress_shared_buffer = TB_PATH_NONE; 122 path->priority = 3; 123 path->weight = 1; 124 path->drop_packages = 0; 125 path->nfc_credits = 0; 126 path->hops[0].initial_credits = 7; 127 if (path->path_length > 1) 128 path->hops[1].initial_credits = 129 tb_initial_credits(path->hops[1].in_port->sw); 130 } 131 132 /** 133 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels 134 * @tb: Pointer to the domain structure 135 * @down: PCIe downstream adapter 136 * 137 * If @down adapter is active, follows the tunnel to the PCIe upstream 138 * adapter and back. Returns the discovered tunnel or %NULL if there was 139 * no tunnel. 140 */ 141 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down) 142 { 143 struct tb_tunnel *tunnel; 144 struct tb_path *path; 145 146 if (!tb_pci_port_is_enabled(down)) 147 return NULL; 148 149 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI); 150 if (!tunnel) 151 return NULL; 152 153 tunnel->activate = tb_pci_activate; 154 tunnel->src_port = down; 155 156 /* 157 * Discover both paths even if they are not complete. We will 158 * clean them up by calling tb_tunnel_deactivate() below in that 159 * case. 160 */ 161 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1, 162 &tunnel->dst_port, "PCIe Up"); 163 if (!path) { 164 /* Just disable the downstream port */ 165 tb_pci_port_enable(down, false); 166 goto err_free; 167 } 168 tunnel->paths[TB_PCI_PATH_UP] = path; 169 tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]); 170 171 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL, 172 "PCIe Down"); 173 if (!path) 174 goto err_deactivate; 175 tunnel->paths[TB_PCI_PATH_DOWN] = path; 176 tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]); 177 178 /* Validate that the tunnel is complete */ 179 if (!tb_port_is_pcie_up(tunnel->dst_port)) { 180 tb_port_warn(tunnel->dst_port, 181 "path does not end on a PCIe adapter, cleaning up\n"); 182 goto err_deactivate; 183 } 184 185 if (down != tunnel->src_port) { 186 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n"); 187 goto err_deactivate; 188 } 189 190 if (!tb_pci_port_is_enabled(tunnel->dst_port)) { 191 tb_tunnel_warn(tunnel, 192 "tunnel is not fully activated, cleaning up\n"); 193 goto err_deactivate; 194 } 195 196 tb_tunnel_dbg(tunnel, "discovered\n"); 197 return tunnel; 198 199 err_deactivate: 200 tb_tunnel_deactivate(tunnel); 201 err_free: 202 tb_tunnel_free(tunnel); 203 204 return NULL; 205 } 206 207 /** 208 * tb_tunnel_alloc_pci() - allocate a pci tunnel 209 * @tb: Pointer to the domain structure 210 * @up: PCIe upstream adapter port 211 * @down: PCIe downstream adapter port 212 * 213 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and 214 * TB_TYPE_PCIE_DOWN. 215 * 216 * Return: Returns a tb_tunnel on success or NULL on failure. 217 */ 218 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, 219 struct tb_port *down) 220 { 221 struct tb_tunnel *tunnel; 222 struct tb_path *path; 223 224 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI); 225 if (!tunnel) 226 return NULL; 227 228 tunnel->activate = tb_pci_activate; 229 tunnel->src_port = down; 230 tunnel->dst_port = up; 231 232 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0, 233 "PCIe Down"); 234 if (!path) { 235 tb_tunnel_free(tunnel); 236 return NULL; 237 } 238 tb_pci_init_path(path); 239 tunnel->paths[TB_PCI_PATH_DOWN] = path; 240 241 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0, 242 "PCIe Up"); 243 if (!path) { 244 tb_tunnel_free(tunnel); 245 return NULL; 246 } 247 tb_pci_init_path(path); 248 tunnel->paths[TB_PCI_PATH_UP] = path; 249 250 return tunnel; 251 } 252 253 static bool tb_dp_is_usb4(const struct tb_switch *sw) 254 { 255 /* Titan Ridge DP adapters need the same treatment as USB4 */ 256 return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw); 257 } 258 259 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out) 260 { 261 int timeout = 10; 262 u32 val; 263 int ret; 264 265 /* Both ends need to support this */ 266 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw)) 267 return 0; 268 269 ret = tb_port_read(out, &val, TB_CFG_PORT, 270 out->cap_adap + DP_STATUS_CTRL, 1); 271 if (ret) 272 return ret; 273 274 val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS; 275 276 ret = tb_port_write(out, &val, TB_CFG_PORT, 277 out->cap_adap + DP_STATUS_CTRL, 1); 278 if (ret) 279 return ret; 280 281 do { 282 ret = tb_port_read(out, &val, TB_CFG_PORT, 283 out->cap_adap + DP_STATUS_CTRL, 1); 284 if (ret) 285 return ret; 286 if (!(val & DP_STATUS_CTRL_CMHS)) 287 return 0; 288 usleep_range(10, 100); 289 } while (timeout--); 290 291 return -ETIMEDOUT; 292 } 293 294 static inline u32 tb_dp_cap_get_rate(u32 val) 295 { 296 u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT; 297 298 switch (rate) { 299 case DP_COMMON_CAP_RATE_RBR: 300 return 1620; 301 case DP_COMMON_CAP_RATE_HBR: 302 return 2700; 303 case DP_COMMON_CAP_RATE_HBR2: 304 return 5400; 305 case DP_COMMON_CAP_RATE_HBR3: 306 return 8100; 307 default: 308 return 0; 309 } 310 } 311 312 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate) 313 { 314 val &= ~DP_COMMON_CAP_RATE_MASK; 315 switch (rate) { 316 default: 317 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate); 318 /* Fallthrough */ 319 case 1620: 320 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT; 321 break; 322 case 2700: 323 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT; 324 break; 325 case 5400: 326 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT; 327 break; 328 case 8100: 329 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT; 330 break; 331 } 332 return val; 333 } 334 335 static inline u32 tb_dp_cap_get_lanes(u32 val) 336 { 337 u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT; 338 339 switch (lanes) { 340 case DP_COMMON_CAP_1_LANE: 341 return 1; 342 case DP_COMMON_CAP_2_LANES: 343 return 2; 344 case DP_COMMON_CAP_4_LANES: 345 return 4; 346 default: 347 return 0; 348 } 349 } 350 351 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes) 352 { 353 val &= ~DP_COMMON_CAP_LANES_MASK; 354 switch (lanes) { 355 default: 356 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n", 357 lanes); 358 /* Fallthrough */ 359 case 1: 360 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT; 361 break; 362 case 2: 363 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT; 364 break; 365 case 4: 366 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT; 367 break; 368 } 369 return val; 370 } 371 372 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes) 373 { 374 /* Tunneling removes the DP 8b/10b encoding */ 375 return rate * lanes * 8 / 10; 376 } 377 378 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes, 379 u32 out_rate, u32 out_lanes, u32 *new_rate, 380 u32 *new_lanes) 381 { 382 static const u32 dp_bw[][2] = { 383 /* Mb/s, lanes */ 384 { 8100, 4 }, /* 25920 Mb/s */ 385 { 5400, 4 }, /* 17280 Mb/s */ 386 { 8100, 2 }, /* 12960 Mb/s */ 387 { 2700, 4 }, /* 8640 Mb/s */ 388 { 5400, 2 }, /* 8640 Mb/s */ 389 { 8100, 1 }, /* 6480 Mb/s */ 390 { 1620, 4 }, /* 5184 Mb/s */ 391 { 5400, 1 }, /* 4320 Mb/s */ 392 { 2700, 2 }, /* 4320 Mb/s */ 393 { 1620, 2 }, /* 2592 Mb/s */ 394 { 2700, 1 }, /* 2160 Mb/s */ 395 { 1620, 1 }, /* 1296 Mb/s */ 396 }; 397 unsigned int i; 398 399 /* 400 * Find a combination that can fit into max_bw and does not 401 * exceed the maximum rate and lanes supported by the DP OUT and 402 * DP IN adapters. 403 */ 404 for (i = 0; i < ARRAY_SIZE(dp_bw); i++) { 405 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes) 406 continue; 407 408 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes) 409 continue; 410 411 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) { 412 *new_rate = dp_bw[i][0]; 413 *new_lanes = dp_bw[i][1]; 414 return 0; 415 } 416 } 417 418 return -ENOSR; 419 } 420 421 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) 422 { 423 u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw; 424 struct tb_port *out = tunnel->dst_port; 425 struct tb_port *in = tunnel->src_port; 426 int ret, max_bw; 427 428 /* 429 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for 430 * newer generation hardware. 431 */ 432 if (in->sw->generation < 2 || out->sw->generation < 2) 433 return 0; 434 435 /* 436 * Perform connection manager handshake between IN and OUT ports 437 * before capabilities exchange can take place. 438 */ 439 ret = tb_dp_cm_handshake(in, out); 440 if (ret) 441 return ret; 442 443 /* Read both DP_LOCAL_CAP registers */ 444 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT, 445 in->cap_adap + DP_LOCAL_CAP, 1); 446 if (ret) 447 return ret; 448 449 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT, 450 out->cap_adap + DP_LOCAL_CAP, 1); 451 if (ret) 452 return ret; 453 454 /* Write IN local caps to OUT remote caps */ 455 ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT, 456 out->cap_adap + DP_REMOTE_CAP, 1); 457 if (ret) 458 return ret; 459 460 in_rate = tb_dp_cap_get_rate(in_dp_cap); 461 in_lanes = tb_dp_cap_get_lanes(in_dp_cap); 462 tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", 463 in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes)); 464 465 /* 466 * If the tunnel bandwidth is limited (max_bw is set) then see 467 * if we need to reduce bandwidth to fit there. 468 */ 469 out_rate = tb_dp_cap_get_rate(out_dp_cap); 470 out_lanes = tb_dp_cap_get_lanes(out_dp_cap); 471 bw = tb_dp_bandwidth(out_rate, out_lanes); 472 tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", 473 out_rate, out_lanes, bw); 474 475 if (in->sw->config.depth < out->sw->config.depth) 476 max_bw = tunnel->max_down; 477 else 478 max_bw = tunnel->max_up; 479 480 if (max_bw && bw > max_bw) { 481 u32 new_rate, new_lanes, new_bw; 482 483 ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes, 484 out_rate, out_lanes, &new_rate, 485 &new_lanes); 486 if (ret) { 487 tb_port_info(out, "not enough bandwidth for DP tunnel\n"); 488 return ret; 489 } 490 491 new_bw = tb_dp_bandwidth(new_rate, new_lanes); 492 tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n", 493 new_rate, new_lanes, new_bw); 494 495 /* 496 * Set new rate and number of lanes before writing it to 497 * the IN port remote caps. 498 */ 499 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate); 500 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes); 501 } 502 503 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT, 504 in->cap_adap + DP_REMOTE_CAP, 1); 505 } 506 507 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active) 508 { 509 int ret; 510 511 if (active) { 512 struct tb_path **paths; 513 int last; 514 515 paths = tunnel->paths; 516 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1; 517 518 tb_dp_port_set_hops(tunnel->src_port, 519 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index, 520 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index, 521 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index); 522 523 tb_dp_port_set_hops(tunnel->dst_port, 524 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index, 525 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index, 526 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index); 527 } else { 528 tb_dp_port_hpd_clear(tunnel->src_port); 529 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0); 530 if (tb_port_is_dpout(tunnel->dst_port)) 531 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0); 532 } 533 534 ret = tb_dp_port_enable(tunnel->src_port, active); 535 if (ret) 536 return ret; 537 538 if (tb_port_is_dpout(tunnel->dst_port)) 539 return tb_dp_port_enable(tunnel->dst_port, active); 540 541 return 0; 542 } 543 544 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, 545 int *consumed_down) 546 { 547 struct tb_port *in = tunnel->src_port; 548 const struct tb_switch *sw = in->sw; 549 u32 val, rate = 0, lanes = 0; 550 int ret; 551 552 if (tb_dp_is_usb4(sw)) { 553 int timeout = 20; 554 555 /* 556 * Wait for DPRX done. Normally it should be already set 557 * for active tunnel. 558 */ 559 do { 560 ret = tb_port_read(in, &val, TB_CFG_PORT, 561 in->cap_adap + DP_COMMON_CAP, 1); 562 if (ret) 563 return ret; 564 565 if (val & DP_COMMON_CAP_DPRX_DONE) { 566 rate = tb_dp_cap_get_rate(val); 567 lanes = tb_dp_cap_get_lanes(val); 568 break; 569 } 570 msleep(250); 571 } while (timeout--); 572 573 if (!timeout) 574 return -ETIMEDOUT; 575 } else if (sw->generation >= 2) { 576 /* 577 * Read from the copied remote cap so that we take into 578 * account if capabilities were reduced during exchange. 579 */ 580 ret = tb_port_read(in, &val, TB_CFG_PORT, 581 in->cap_adap + DP_REMOTE_CAP, 1); 582 if (ret) 583 return ret; 584 585 rate = tb_dp_cap_get_rate(val); 586 lanes = tb_dp_cap_get_lanes(val); 587 } else { 588 /* No bandwidth management for legacy devices */ 589 *consumed_up = 0; 590 *consumed_down = 0; 591 return 0; 592 } 593 594 if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) { 595 *consumed_up = 0; 596 *consumed_down = tb_dp_bandwidth(rate, lanes); 597 } else { 598 *consumed_up = tb_dp_bandwidth(rate, lanes); 599 *consumed_down = 0; 600 } 601 602 return 0; 603 } 604 605 static void tb_dp_init_aux_path(struct tb_path *path) 606 { 607 int i; 608 609 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; 610 path->egress_shared_buffer = TB_PATH_NONE; 611 path->ingress_fc_enable = TB_PATH_ALL; 612 path->ingress_shared_buffer = TB_PATH_NONE; 613 path->priority = 2; 614 path->weight = 1; 615 616 for (i = 0; i < path->path_length; i++) 617 path->hops[i].initial_credits = 1; 618 } 619 620 static void tb_dp_init_video_path(struct tb_path *path, bool discover) 621 { 622 u32 nfc_credits = path->hops[0].in_port->config.nfc_credits; 623 624 path->egress_fc_enable = TB_PATH_NONE; 625 path->egress_shared_buffer = TB_PATH_NONE; 626 path->ingress_fc_enable = TB_PATH_NONE; 627 path->ingress_shared_buffer = TB_PATH_NONE; 628 path->priority = 1; 629 path->weight = 1; 630 631 if (discover) { 632 path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; 633 } else { 634 u32 max_credits; 635 636 max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> 637 ADP_CS_4_TOTAL_BUFFERS_SHIFT; 638 /* Leave some credits for AUX path */ 639 path->nfc_credits = min(max_credits - 2, 12U); 640 } 641 } 642 643 /** 644 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels 645 * @tb: Pointer to the domain structure 646 * @in: DP in adapter 647 * 648 * If @in adapter is active, follows the tunnel to the DP out adapter 649 * and back. Returns the discovered tunnel or %NULL if there was no 650 * tunnel. 651 * 652 * Return: DP tunnel or %NULL if no tunnel found. 653 */ 654 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in) 655 { 656 struct tb_tunnel *tunnel; 657 struct tb_port *port; 658 struct tb_path *path; 659 660 if (!tb_dp_port_is_enabled(in)) 661 return NULL; 662 663 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP); 664 if (!tunnel) 665 return NULL; 666 667 tunnel->init = tb_dp_xchg_caps; 668 tunnel->activate = tb_dp_activate; 669 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; 670 tunnel->src_port = in; 671 672 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1, 673 &tunnel->dst_port, "Video"); 674 if (!path) { 675 /* Just disable the DP IN port */ 676 tb_dp_port_enable(in, false); 677 goto err_free; 678 } 679 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path; 680 tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true); 681 682 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX"); 683 if (!path) 684 goto err_deactivate; 685 tunnel->paths[TB_DP_AUX_PATH_OUT] = path; 686 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]); 687 688 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID, 689 &port, "AUX RX"); 690 if (!path) 691 goto err_deactivate; 692 tunnel->paths[TB_DP_AUX_PATH_IN] = path; 693 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]); 694 695 /* Validate that the tunnel is complete */ 696 if (!tb_port_is_dpout(tunnel->dst_port)) { 697 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n"); 698 goto err_deactivate; 699 } 700 701 if (!tb_dp_port_is_enabled(tunnel->dst_port)) 702 goto err_deactivate; 703 704 if (!tb_dp_port_hpd_is_active(tunnel->dst_port)) 705 goto err_deactivate; 706 707 if (port != tunnel->src_port) { 708 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n"); 709 goto err_deactivate; 710 } 711 712 tb_tunnel_dbg(tunnel, "discovered\n"); 713 return tunnel; 714 715 err_deactivate: 716 tb_tunnel_deactivate(tunnel); 717 err_free: 718 tb_tunnel_free(tunnel); 719 720 return NULL; 721 } 722 723 /** 724 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel 725 * @tb: Pointer to the domain structure 726 * @in: DP in adapter port 727 * @out: DP out adapter port 728 * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0 729 * if not limited) 730 * @max_down: Maximum available downstream bandwidth for the DP tunnel 731 * (%0 if not limited) 732 * 733 * Allocates a tunnel between @in and @out that is capable of tunneling 734 * Display Port traffic. 735 * 736 * Return: Returns a tb_tunnel on success or NULL on failure. 737 */ 738 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, 739 struct tb_port *out, int max_up, 740 int max_down) 741 { 742 struct tb_tunnel *tunnel; 743 struct tb_path **paths; 744 struct tb_path *path; 745 746 if (WARN_ON(!in->cap_adap || !out->cap_adap)) 747 return NULL; 748 749 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP); 750 if (!tunnel) 751 return NULL; 752 753 tunnel->init = tb_dp_xchg_caps; 754 tunnel->activate = tb_dp_activate; 755 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; 756 tunnel->src_port = in; 757 tunnel->dst_port = out; 758 tunnel->max_up = max_up; 759 tunnel->max_down = max_down; 760 761 paths = tunnel->paths; 762 763 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID, 764 1, "Video"); 765 if (!path) 766 goto err_free; 767 tb_dp_init_video_path(path, false); 768 paths[TB_DP_VIDEO_PATH_OUT] = path; 769 770 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out, 771 TB_DP_AUX_TX_HOPID, 1, "AUX TX"); 772 if (!path) 773 goto err_free; 774 tb_dp_init_aux_path(path); 775 paths[TB_DP_AUX_PATH_OUT] = path; 776 777 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in, 778 TB_DP_AUX_RX_HOPID, 1, "AUX RX"); 779 if (!path) 780 goto err_free; 781 tb_dp_init_aux_path(path); 782 paths[TB_DP_AUX_PATH_IN] = path; 783 784 return tunnel; 785 786 err_free: 787 tb_tunnel_free(tunnel); 788 return NULL; 789 } 790 791 static u32 tb_dma_credits(struct tb_port *nhi) 792 { 793 u32 max_credits; 794 795 max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> 796 ADP_CS_4_TOTAL_BUFFERS_SHIFT; 797 return min(max_credits, 13U); 798 } 799 800 static int tb_dma_activate(struct tb_tunnel *tunnel, bool active) 801 { 802 struct tb_port *nhi = tunnel->src_port; 803 u32 credits; 804 805 credits = active ? tb_dma_credits(nhi) : 0; 806 return tb_port_set_initial_credits(nhi, credits); 807 } 808 809 static void tb_dma_init_path(struct tb_path *path, unsigned int isb, 810 unsigned int efc, u32 credits) 811 { 812 int i; 813 814 path->egress_fc_enable = efc; 815 path->ingress_fc_enable = TB_PATH_ALL; 816 path->egress_shared_buffer = TB_PATH_NONE; 817 path->ingress_shared_buffer = isb; 818 path->priority = 5; 819 path->weight = 1; 820 path->clear_fc = true; 821 822 for (i = 0; i < path->path_length; i++) 823 path->hops[i].initial_credits = credits; 824 } 825 826 /** 827 * tb_tunnel_alloc_dma() - allocate a DMA tunnel 828 * @tb: Pointer to the domain structure 829 * @nhi: Host controller port 830 * @dst: Destination null port which the other domain is connected to 831 * @transmit_ring: NHI ring number used to send packets towards the 832 * other domain 833 * @transmit_path: HopID used for transmitting packets 834 * @receive_ring: NHI ring number used to receive packets from the 835 * other domain 836 * @reveive_path: HopID used for receiving packets 837 * 838 * Return: Returns a tb_tunnel on success or NULL on failure. 839 */ 840 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, 841 struct tb_port *dst, int transmit_ring, 842 int transmit_path, int receive_ring, 843 int receive_path) 844 { 845 struct tb_tunnel *tunnel; 846 struct tb_path *path; 847 u32 credits; 848 849 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA); 850 if (!tunnel) 851 return NULL; 852 853 tunnel->activate = tb_dma_activate; 854 tunnel->src_port = nhi; 855 tunnel->dst_port = dst; 856 857 credits = tb_dma_credits(nhi); 858 859 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX"); 860 if (!path) { 861 tb_tunnel_free(tunnel); 862 return NULL; 863 } 864 tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL, 865 credits); 866 tunnel->paths[TB_DMA_PATH_IN] = path; 867 868 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX"); 869 if (!path) { 870 tb_tunnel_free(tunnel); 871 return NULL; 872 } 873 tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits); 874 tunnel->paths[TB_DMA_PATH_OUT] = path; 875 876 return tunnel; 877 } 878 879 static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down) 880 { 881 int ret, up_max_rate, down_max_rate; 882 883 ret = usb4_usb3_port_max_link_rate(up); 884 if (ret < 0) 885 return ret; 886 up_max_rate = ret; 887 888 ret = usb4_usb3_port_max_link_rate(down); 889 if (ret < 0) 890 return ret; 891 down_max_rate = ret; 892 893 return min(up_max_rate, down_max_rate); 894 } 895 896 static int tb_usb3_init(struct tb_tunnel *tunnel) 897 { 898 tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n", 899 tunnel->allocated_up, tunnel->allocated_down); 900 901 return usb4_usb3_port_allocate_bandwidth(tunnel->src_port, 902 &tunnel->allocated_up, 903 &tunnel->allocated_down); 904 } 905 906 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate) 907 { 908 int res; 909 910 res = tb_usb3_port_enable(tunnel->src_port, activate); 911 if (res) 912 return res; 913 914 if (tb_port_is_usb3_up(tunnel->dst_port)) 915 return tb_usb3_port_enable(tunnel->dst_port, activate); 916 917 return 0; 918 } 919 920 static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel, 921 int *consumed_up, int *consumed_down) 922 { 923 /* 924 * PCIe tunneling affects the USB3 bandwidth so take that it 925 * into account here. 926 */ 927 *consumed_up = tunnel->allocated_up * (3 + 1) / 3; 928 *consumed_down = tunnel->allocated_down * (3 + 1) / 3; 929 return 0; 930 } 931 932 static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel) 933 { 934 int ret; 935 936 ret = usb4_usb3_port_release_bandwidth(tunnel->src_port, 937 &tunnel->allocated_up, 938 &tunnel->allocated_down); 939 if (ret) 940 return ret; 941 942 tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n", 943 tunnel->allocated_up, tunnel->allocated_down); 944 return 0; 945 } 946 947 static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel, 948 int *available_up, 949 int *available_down) 950 { 951 int ret, max_rate, allocate_up, allocate_down; 952 953 ret = usb4_usb3_port_actual_link_rate(tunnel->src_port); 954 if (ret <= 0) { 955 tb_tunnel_warn(tunnel, "tunnel is not up\n"); 956 return; 957 } 958 /* 959 * 90% of the max rate can be allocated for isochronous 960 * transfers. 961 */ 962 max_rate = ret * 90 / 100; 963 964 /* No need to reclaim if already at maximum */ 965 if (tunnel->allocated_up >= max_rate && 966 tunnel->allocated_down >= max_rate) 967 return; 968 969 /* Don't go lower than what is already allocated */ 970 allocate_up = min(max_rate, *available_up); 971 if (allocate_up < tunnel->allocated_up) 972 allocate_up = tunnel->allocated_up; 973 974 allocate_down = min(max_rate, *available_down); 975 if (allocate_down < tunnel->allocated_down) 976 allocate_down = tunnel->allocated_down; 977 978 /* If no changes no need to do more */ 979 if (allocate_up == tunnel->allocated_up && 980 allocate_down == tunnel->allocated_down) 981 return; 982 983 ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up, 984 &allocate_down); 985 if (ret) { 986 tb_tunnel_info(tunnel, "failed to allocate bandwidth\n"); 987 return; 988 } 989 990 tunnel->allocated_up = allocate_up; 991 *available_up -= tunnel->allocated_up; 992 993 tunnel->allocated_down = allocate_down; 994 *available_down -= tunnel->allocated_down; 995 996 tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n", 997 tunnel->allocated_up, tunnel->allocated_down); 998 } 999 1000 static void tb_usb3_init_path(struct tb_path *path) 1001 { 1002 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; 1003 path->egress_shared_buffer = TB_PATH_NONE; 1004 path->ingress_fc_enable = TB_PATH_ALL; 1005 path->ingress_shared_buffer = TB_PATH_NONE; 1006 path->priority = 3; 1007 path->weight = 3; 1008 path->drop_packages = 0; 1009 path->nfc_credits = 0; 1010 path->hops[0].initial_credits = 7; 1011 if (path->path_length > 1) 1012 path->hops[1].initial_credits = 1013 tb_initial_credits(path->hops[1].in_port->sw); 1014 } 1015 1016 /** 1017 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels 1018 * @tb: Pointer to the domain structure 1019 * @down: USB3 downstream adapter 1020 * 1021 * If @down adapter is active, follows the tunnel to the USB3 upstream 1022 * adapter and back. Returns the discovered tunnel or %NULL if there was 1023 * no tunnel. 1024 */ 1025 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down) 1026 { 1027 struct tb_tunnel *tunnel; 1028 struct tb_path *path; 1029 1030 if (!tb_usb3_port_is_enabled(down)) 1031 return NULL; 1032 1033 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3); 1034 if (!tunnel) 1035 return NULL; 1036 1037 tunnel->activate = tb_usb3_activate; 1038 tunnel->src_port = down; 1039 1040 /* 1041 * Discover both paths even if they are not complete. We will 1042 * clean them up by calling tb_tunnel_deactivate() below in that 1043 * case. 1044 */ 1045 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1, 1046 &tunnel->dst_port, "USB3 Down"); 1047 if (!path) { 1048 /* Just disable the downstream port */ 1049 tb_usb3_port_enable(down, false); 1050 goto err_free; 1051 } 1052 tunnel->paths[TB_USB3_PATH_DOWN] = path; 1053 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]); 1054 1055 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL, 1056 "USB3 Up"); 1057 if (!path) 1058 goto err_deactivate; 1059 tunnel->paths[TB_USB3_PATH_UP] = path; 1060 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]); 1061 1062 /* Validate that the tunnel is complete */ 1063 if (!tb_port_is_usb3_up(tunnel->dst_port)) { 1064 tb_port_warn(tunnel->dst_port, 1065 "path does not end on an USB3 adapter, cleaning up\n"); 1066 goto err_deactivate; 1067 } 1068 1069 if (down != tunnel->src_port) { 1070 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n"); 1071 goto err_deactivate; 1072 } 1073 1074 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) { 1075 tb_tunnel_warn(tunnel, 1076 "tunnel is not fully activated, cleaning up\n"); 1077 goto err_deactivate; 1078 } 1079 1080 if (!tb_route(down->sw)) { 1081 int ret; 1082 1083 /* 1084 * Read the initial bandwidth allocation for the first 1085 * hop tunnel. 1086 */ 1087 ret = usb4_usb3_port_allocated_bandwidth(down, 1088 &tunnel->allocated_up, &tunnel->allocated_down); 1089 if (ret) 1090 goto err_deactivate; 1091 1092 tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n", 1093 tunnel->allocated_up, tunnel->allocated_down); 1094 1095 tunnel->init = tb_usb3_init; 1096 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; 1097 tunnel->release_unused_bandwidth = 1098 tb_usb3_release_unused_bandwidth; 1099 tunnel->reclaim_available_bandwidth = 1100 tb_usb3_reclaim_available_bandwidth; 1101 } 1102 1103 tb_tunnel_dbg(tunnel, "discovered\n"); 1104 return tunnel; 1105 1106 err_deactivate: 1107 tb_tunnel_deactivate(tunnel); 1108 err_free: 1109 tb_tunnel_free(tunnel); 1110 1111 return NULL; 1112 } 1113 1114 /** 1115 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel 1116 * @tb: Pointer to the domain structure 1117 * @up: USB3 upstream adapter port 1118 * @down: USB3 downstream adapter port 1119 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0 1120 * if not limited). 1121 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel 1122 * (%0 if not limited). 1123 * 1124 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and 1125 * @TB_TYPE_USB3_DOWN. 1126 * 1127 * Return: Returns a tb_tunnel on success or %NULL on failure. 1128 */ 1129 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, 1130 struct tb_port *down, int max_up, 1131 int max_down) 1132 { 1133 struct tb_tunnel *tunnel; 1134 struct tb_path *path; 1135 int max_rate = 0; 1136 1137 /* 1138 * Check that we have enough bandwidth available for the new 1139 * USB3 tunnel. 1140 */ 1141 if (max_up > 0 || max_down > 0) { 1142 max_rate = tb_usb3_max_link_rate(down, up); 1143 if (max_rate < 0) 1144 return NULL; 1145 1146 /* Only 90% can be allocated for USB3 isochronous transfers */ 1147 max_rate = max_rate * 90 / 100; 1148 tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n", 1149 max_rate); 1150 1151 if (max_rate > max_up || max_rate > max_down) { 1152 tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n"); 1153 return NULL; 1154 } 1155 } 1156 1157 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3); 1158 if (!tunnel) 1159 return NULL; 1160 1161 tunnel->activate = tb_usb3_activate; 1162 tunnel->src_port = down; 1163 tunnel->dst_port = up; 1164 tunnel->max_up = max_up; 1165 tunnel->max_down = max_down; 1166 1167 path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0, 1168 "USB3 Down"); 1169 if (!path) { 1170 tb_tunnel_free(tunnel); 1171 return NULL; 1172 } 1173 tb_usb3_init_path(path); 1174 tunnel->paths[TB_USB3_PATH_DOWN] = path; 1175 1176 path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0, 1177 "USB3 Up"); 1178 if (!path) { 1179 tb_tunnel_free(tunnel); 1180 return NULL; 1181 } 1182 tb_usb3_init_path(path); 1183 tunnel->paths[TB_USB3_PATH_UP] = path; 1184 1185 if (!tb_route(down->sw)) { 1186 tunnel->allocated_up = max_rate; 1187 tunnel->allocated_down = max_rate; 1188 1189 tunnel->init = tb_usb3_init; 1190 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; 1191 tunnel->release_unused_bandwidth = 1192 tb_usb3_release_unused_bandwidth; 1193 tunnel->reclaim_available_bandwidth = 1194 tb_usb3_reclaim_available_bandwidth; 1195 } 1196 1197 return tunnel; 1198 } 1199 1200 /** 1201 * tb_tunnel_free() - free a tunnel 1202 * @tunnel: Tunnel to be freed 1203 * 1204 * Frees a tunnel. The tunnel does not need to be deactivated. 1205 */ 1206 void tb_tunnel_free(struct tb_tunnel *tunnel) 1207 { 1208 int i; 1209 1210 if (!tunnel) 1211 return; 1212 1213 for (i = 0; i < tunnel->npaths; i++) { 1214 if (tunnel->paths[i]) 1215 tb_path_free(tunnel->paths[i]); 1216 } 1217 1218 kfree(tunnel->paths); 1219 kfree(tunnel); 1220 } 1221 1222 /** 1223 * tb_tunnel_is_invalid - check whether an activated path is still valid 1224 * @tunnel: Tunnel to check 1225 */ 1226 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel) 1227 { 1228 int i; 1229 1230 for (i = 0; i < tunnel->npaths; i++) { 1231 WARN_ON(!tunnel->paths[i]->activated); 1232 if (tb_path_is_invalid(tunnel->paths[i])) 1233 return true; 1234 } 1235 1236 return false; 1237 } 1238 1239 /** 1240 * tb_tunnel_restart() - activate a tunnel after a hardware reset 1241 * @tunnel: Tunnel to restart 1242 * 1243 * Return: 0 on success and negative errno in case if failure 1244 */ 1245 int tb_tunnel_restart(struct tb_tunnel *tunnel) 1246 { 1247 int res, i; 1248 1249 tb_tunnel_dbg(tunnel, "activating\n"); 1250 1251 /* 1252 * Make sure all paths are properly disabled before enabling 1253 * them again. 1254 */ 1255 for (i = 0; i < tunnel->npaths; i++) { 1256 if (tunnel->paths[i]->activated) { 1257 tb_path_deactivate(tunnel->paths[i]); 1258 tunnel->paths[i]->activated = false; 1259 } 1260 } 1261 1262 if (tunnel->init) { 1263 res = tunnel->init(tunnel); 1264 if (res) 1265 return res; 1266 } 1267 1268 for (i = 0; i < tunnel->npaths; i++) { 1269 res = tb_path_activate(tunnel->paths[i]); 1270 if (res) 1271 goto err; 1272 } 1273 1274 if (tunnel->activate) { 1275 res = tunnel->activate(tunnel, true); 1276 if (res) 1277 goto err; 1278 } 1279 1280 return 0; 1281 1282 err: 1283 tb_tunnel_warn(tunnel, "activation failed\n"); 1284 tb_tunnel_deactivate(tunnel); 1285 return res; 1286 } 1287 1288 /** 1289 * tb_tunnel_activate() - activate a tunnel 1290 * @tunnel: Tunnel to activate 1291 * 1292 * Return: Returns 0 on success or an error code on failure. 1293 */ 1294 int tb_tunnel_activate(struct tb_tunnel *tunnel) 1295 { 1296 int i; 1297 1298 for (i = 0; i < tunnel->npaths; i++) { 1299 if (tunnel->paths[i]->activated) { 1300 tb_tunnel_WARN(tunnel, 1301 "trying to activate an already activated tunnel\n"); 1302 return -EINVAL; 1303 } 1304 } 1305 1306 return tb_tunnel_restart(tunnel); 1307 } 1308 1309 /** 1310 * tb_tunnel_deactivate() - deactivate a tunnel 1311 * @tunnel: Tunnel to deactivate 1312 */ 1313 void tb_tunnel_deactivate(struct tb_tunnel *tunnel) 1314 { 1315 int i; 1316 1317 tb_tunnel_dbg(tunnel, "deactivating\n"); 1318 1319 if (tunnel->activate) 1320 tunnel->activate(tunnel, false); 1321 1322 for (i = 0; i < tunnel->npaths; i++) { 1323 if (tunnel->paths[i] && tunnel->paths[i]->activated) 1324 tb_path_deactivate(tunnel->paths[i]); 1325 } 1326 } 1327 1328 /** 1329 * tb_tunnel_port_on_path() - Does the tunnel go through port 1330 * @tunnel: Tunnel to check 1331 * @port: Port to check 1332 * 1333 * Returns true if @tunnel goes through @port (direction does not matter), 1334 * false otherwise. 1335 */ 1336 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel, 1337 const struct tb_port *port) 1338 { 1339 int i; 1340 1341 for (i = 0; i < tunnel->npaths; i++) { 1342 if (!tunnel->paths[i]) 1343 continue; 1344 1345 if (tb_path_port_on_path(tunnel->paths[i], port)) 1346 return true; 1347 } 1348 1349 return false; 1350 } 1351 1352 static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel) 1353 { 1354 int i; 1355 1356 for (i = 0; i < tunnel->npaths; i++) { 1357 if (!tunnel->paths[i]) 1358 return false; 1359 if (!tunnel->paths[i]->activated) 1360 return false; 1361 } 1362 1363 return true; 1364 } 1365 1366 /** 1367 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel 1368 * @tunnel: Tunnel to check 1369 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port. 1370 * Can be %NULL. 1371 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port. 1372 * Can be %NULL. 1373 * 1374 * Stores the amount of isochronous bandwidth @tunnel consumes in 1375 * @consumed_up and @consumed_down. In case of success returns %0, 1376 * negative errno otherwise. 1377 */ 1378 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, 1379 int *consumed_down) 1380 { 1381 int up_bw = 0, down_bw = 0; 1382 1383 if (!tb_tunnel_is_active(tunnel)) 1384 goto out; 1385 1386 if (tunnel->consumed_bandwidth) { 1387 int ret; 1388 1389 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw); 1390 if (ret) 1391 return ret; 1392 1393 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw, 1394 down_bw); 1395 } 1396 1397 out: 1398 if (consumed_up) 1399 *consumed_up = up_bw; 1400 if (consumed_down) 1401 *consumed_down = down_bw; 1402 1403 return 0; 1404 } 1405 1406 /** 1407 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth 1408 * @tunnel: Tunnel whose unused bandwidth to release 1409 * 1410 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the 1411 * moment) this function makes it to release all the unused bandwidth. 1412 * 1413 * Returns %0 in case of success and negative errno otherwise. 1414 */ 1415 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel) 1416 { 1417 if (!tb_tunnel_is_active(tunnel)) 1418 return 0; 1419 1420 if (tunnel->release_unused_bandwidth) { 1421 int ret; 1422 1423 ret = tunnel->release_unused_bandwidth(tunnel); 1424 if (ret) 1425 return ret; 1426 } 1427 1428 return 0; 1429 } 1430 1431 /** 1432 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth 1433 * @tunnel: Tunnel reclaiming available bandwidth 1434 * @available_up: Available upstream bandwidth (in Mb/s) 1435 * @available_down: Available downstream bandwidth (in Mb/s) 1436 * 1437 * Reclaims bandwidth from @available_up and @available_down and updates 1438 * the variables accordingly (e.g decreases both according to what was 1439 * reclaimed by the tunnel). If nothing was reclaimed the values are 1440 * kept as is. 1441 */ 1442 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel, 1443 int *available_up, 1444 int *available_down) 1445 { 1446 if (!tb_tunnel_is_active(tunnel)) 1447 return; 1448 1449 if (tunnel->reclaim_available_bandwidth) 1450 tunnel->reclaim_available_bandwidth(tunnel, available_up, 1451 available_down); 1452 } 1453