1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Thunderbolt driver - Tunneling support
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
7 */
8
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
12 #include <linux/ktime.h>
13 #include <linux/string_helpers.h>
14
15 #include "tunnel.h"
16 #include "tb.h"
17
18 /* PCIe adapters use always HopID of 8 for both directions */
19 #define TB_PCI_HOPID 8
20
21 #define TB_PCI_PATH_DOWN 0
22 #define TB_PCI_PATH_UP 1
23
24 #define TB_PCI_PRIORITY 3
25 #define TB_PCI_WEIGHT 1
26
27 /* USB3 adapters use always HopID of 8 for both directions */
28 #define TB_USB3_HOPID 8
29
30 #define TB_USB3_PATH_DOWN 0
31 #define TB_USB3_PATH_UP 1
32
33 #define TB_USB3_PRIORITY 3
34 #define TB_USB3_WEIGHT 2
35
36 /* DP adapters use HopID 8 for AUX and 9 for Video */
37 #define TB_DP_AUX_TX_HOPID 8
38 #define TB_DP_AUX_RX_HOPID 8
39 #define TB_DP_VIDEO_HOPID 9
40
41 #define TB_DP_VIDEO_PATH_OUT 0
42 #define TB_DP_AUX_PATH_OUT 1
43 #define TB_DP_AUX_PATH_IN 2
44
45 #define TB_DP_VIDEO_PRIORITY 1
46 #define TB_DP_VIDEO_WEIGHT 1
47
48 #define TB_DP_AUX_PRIORITY 2
49 #define TB_DP_AUX_WEIGHT 1
50
51 /* Minimum number of credits needed for PCIe path */
52 #define TB_MIN_PCIE_CREDITS 6U
53 /*
54 * Number of credits we try to allocate for each DMA path if not limited
55 * by the host router baMaxHI.
56 */
57 #define TB_DMA_CREDITS 14
58 /* Minimum number of credits for DMA path */
59 #define TB_MIN_DMA_CREDITS 1
60
61 #define TB_DMA_PRIORITY 5
62 #define TB_DMA_WEIGHT 1
63
64 /*
65 * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic
66 * according to USB4 v2 Connection Manager guide. This ends up reserving
67 * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into
68 * account.
69 */
70 #define USB4_V2_PCI_MIN_BANDWIDTH (1500 * TB_PCI_WEIGHT)
71 #define USB4_V2_USB3_MIN_BANDWIDTH (1500 * TB_USB3_WEIGHT)
72
73 /*
74 * According to VESA spec, the DPRX negotiation shall compete in 5
75 * seconds after tunnel is established. Since at least i915 can runtime
76 * suspend if there is nothing connected, and that it polls any new
77 * connections every 10 seconds, we use 12 seconds here.
78 *
79 * These are in ms.
80 */
81 #define TB_DPRX_TIMEOUT 12000
82 #define TB_DPRX_WAIT_TIMEOUT 25
83 #define TB_DPRX_POLL_DELAY 50
84
85 static int dprx_timeout = TB_DPRX_TIMEOUT;
86 module_param(dprx_timeout, int, 0444);
87 MODULE_PARM_DESC(dprx_timeout,
88 "DPRX capability read timeout in ms, -1 waits forever (default: "
89 __MODULE_STRING(TB_DPRX_TIMEOUT) ")");
90
91 static unsigned int dma_credits = TB_DMA_CREDITS;
92 module_param(dma_credits, uint, 0444);
93 MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
94 __MODULE_STRING(TB_DMA_CREDITS) ")");
95
96 static bool bw_alloc_mode = true;
97 module_param(bw_alloc_mode, bool, 0444);
98 MODULE_PARM_DESC(bw_alloc_mode,
99 "enable bandwidth allocation mode if supported (default: true)");
100
101 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
102
103 static const char * const tb_event_names[] = {
104 [TB_TUNNEL_ACTIVATED] = "activated",
105 [TB_TUNNEL_CHANGED] = "changed",
106 [TB_TUNNEL_DEACTIVATED] = "deactivated",
107 [TB_TUNNEL_LOW_BANDWIDTH] = "low bandwidth",
108 [TB_TUNNEL_NO_BANDWIDTH] = "insufficient bandwidth",
109 };
110
111 /* Synchronizes kref_get()/put() of struct tb_tunnel */
112 static DEFINE_MUTEX(tb_tunnel_lock);
113
tb_usable_credits(const struct tb_port * port)114 static inline unsigned int tb_usable_credits(const struct tb_port *port)
115 {
116 return port->total_credits - port->ctl_credits;
117 }
118
119 /**
120 * tb_available_credits() - Available credits for PCIe and DMA
121 * @port: Lane adapter to check
122 * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
123 * streams possible through this lane adapter
124 *
125 * Return: Number of available credits.
126 */
tb_available_credits(const struct tb_port * port,size_t * max_dp_streams)127 static unsigned int tb_available_credits(const struct tb_port *port,
128 size_t *max_dp_streams)
129 {
130 const struct tb_switch *sw = port->sw;
131 int credits, usb3, pcie, spare;
132 size_t ndp;
133
134 usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
135 pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
136
137 if (tb_acpi_is_xdomain_allowed()) {
138 spare = min_not_zero(sw->max_dma_credits, dma_credits);
139 /* Add some credits for potential second DMA tunnel */
140 spare += TB_MIN_DMA_CREDITS;
141 } else {
142 spare = 0;
143 }
144
145 credits = tb_usable_credits(port);
146 if (tb_acpi_may_tunnel_dp()) {
147 /*
148 * Maximum number of DP streams possible through the
149 * lane adapter.
150 */
151 if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
152 ndp = (credits - (usb3 + pcie + spare)) /
153 (sw->min_dp_aux_credits + sw->min_dp_main_credits);
154 else
155 ndp = 0;
156 } else {
157 ndp = 0;
158 }
159 credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
160 credits -= usb3;
161
162 if (max_dp_streams)
163 *max_dp_streams = ndp;
164
165 return credits > 0 ? credits : 0;
166 }
167
tb_init_pm_support(struct tb_path_hop * hop)168 static void tb_init_pm_support(struct tb_path_hop *hop)
169 {
170 struct tb_port *out_port = hop->out_port;
171 struct tb_port *in_port = hop->in_port;
172
173 if (tb_port_is_null(in_port) && tb_port_is_null(out_port) &&
174 usb4_switch_version(in_port->sw) >= 2)
175 hop->pm_support = true;
176 }
177
tb_tunnel_alloc(struct tb * tb,size_t npaths,enum tb_tunnel_type type)178 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
179 enum tb_tunnel_type type)
180 {
181 struct tb_tunnel *tunnel;
182
183 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
184 if (!tunnel)
185 return NULL;
186
187 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
188 if (!tunnel->paths) {
189 kfree(tunnel);
190 return NULL;
191 }
192
193 INIT_LIST_HEAD(&tunnel->list);
194 tunnel->tb = tb;
195 tunnel->npaths = npaths;
196 tunnel->type = type;
197 kref_init(&tunnel->kref);
198
199 return tunnel;
200 }
201
tb_tunnel_get(struct tb_tunnel * tunnel)202 static void tb_tunnel_get(struct tb_tunnel *tunnel)
203 {
204 mutex_lock(&tb_tunnel_lock);
205 kref_get(&tunnel->kref);
206 mutex_unlock(&tb_tunnel_lock);
207 }
208
tb_tunnel_destroy(struct kref * kref)209 static void tb_tunnel_destroy(struct kref *kref)
210 {
211 struct tb_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref);
212 int i;
213
214 if (tunnel->destroy)
215 tunnel->destroy(tunnel);
216
217 for (i = 0; i < tunnel->npaths; i++) {
218 if (tunnel->paths[i])
219 tb_path_free(tunnel->paths[i]);
220 }
221
222 kfree(tunnel->paths);
223 kfree(tunnel);
224 }
225
tb_tunnel_put(struct tb_tunnel * tunnel)226 void tb_tunnel_put(struct tb_tunnel *tunnel)
227 {
228 mutex_lock(&tb_tunnel_lock);
229 kref_put(&tunnel->kref, tb_tunnel_destroy);
230 mutex_unlock(&tb_tunnel_lock);
231 }
232
233 /**
234 * tb_tunnel_event() - Notify userspace about tunneling event
235 * @tb: Domain where the event occurred
236 * @event: Event that happened
237 * @type: Type of the tunnel in question
238 * @src_port: Tunnel source port (can be %NULL)
239 * @dst_port: Tunnel destination port (can be %NULL)
240 *
241 * Notifies userspace about tunneling @event in the domain. The tunnel
242 * does not need to exist (e.g the tunnel was not activated because
243 * there is not enough bandwidth). If the @src_port and @dst_port are
244 * given fill in full %TUNNEL_DETAILS environment variable. Otherwise
245 * uses the shorter one (just the tunnel type).
246 */
tb_tunnel_event(struct tb * tb,enum tb_tunnel_event event,enum tb_tunnel_type type,const struct tb_port * src_port,const struct tb_port * dst_port)247 void tb_tunnel_event(struct tb *tb, enum tb_tunnel_event event,
248 enum tb_tunnel_type type,
249 const struct tb_port *src_port,
250 const struct tb_port *dst_port)
251 {
252 char *envp[3] = { NULL };
253
254 if (WARN_ON_ONCE(event >= ARRAY_SIZE(tb_event_names)))
255 return;
256 if (WARN_ON_ONCE(type >= ARRAY_SIZE(tb_tunnel_names)))
257 return;
258
259 envp[0] = kasprintf(GFP_KERNEL, "TUNNEL_EVENT=%s", tb_event_names[event]);
260 if (!envp[0])
261 return;
262
263 if (src_port != NULL && dst_port != NULL) {
264 envp[1] = kasprintf(GFP_KERNEL, "TUNNEL_DETAILS=%llx:%u <-> %llx:%u (%s)",
265 tb_route(src_port->sw), src_port->port,
266 tb_route(dst_port->sw), dst_port->port,
267 tb_tunnel_names[type]);
268 } else {
269 envp[1] = kasprintf(GFP_KERNEL, "TUNNEL_DETAILS=(%s)",
270 tb_tunnel_names[type]);
271 }
272
273 if (envp[1])
274 tb_domain_event(tb, envp);
275
276 kfree(envp[1]);
277 kfree(envp[0]);
278 }
279
tb_tunnel_set_active(struct tb_tunnel * tunnel,bool active)280 static inline void tb_tunnel_set_active(struct tb_tunnel *tunnel, bool active)
281 {
282 if (active) {
283 tunnel->state = TB_TUNNEL_ACTIVE;
284 tb_tunnel_event(tunnel->tb, TB_TUNNEL_ACTIVATED, tunnel->type,
285 tunnel->src_port, tunnel->dst_port);
286 } else {
287 tunnel->state = TB_TUNNEL_INACTIVE;
288 tb_tunnel_event(tunnel->tb, TB_TUNNEL_DEACTIVATED, tunnel->type,
289 tunnel->src_port, tunnel->dst_port);
290 }
291 }
292
tb_tunnel_changed(struct tb_tunnel * tunnel)293 static inline void tb_tunnel_changed(struct tb_tunnel *tunnel)
294 {
295 tb_tunnel_event(tunnel->tb, TB_TUNNEL_CHANGED, tunnel->type,
296 tunnel->src_port, tunnel->dst_port);
297 }
298
tb_pci_set_ext_encapsulation(struct tb_tunnel * tunnel,bool enable)299 static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
300 {
301 struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
302 int ret;
303
304 /* Only supported if both routers are at least USB4 v2 */
305 if ((usb4_switch_version(tunnel->src_port->sw) < 2) ||
306 (usb4_switch_version(tunnel->dst_port->sw) < 2))
307 return 0;
308
309 if (enable && tb_port_get_link_generation(port) < 4)
310 return 0;
311
312 ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
313 if (ret)
314 return ret;
315
316 /*
317 * Downstream router could be unplugged so disable of encapsulation
318 * in upstream router is still possible.
319 */
320 ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable);
321 if (ret) {
322 if (enable)
323 return ret;
324 if (ret != -ENODEV)
325 return ret;
326 }
327
328 tb_tunnel_dbg(tunnel, "extended encapsulation %s\n",
329 str_enabled_disabled(enable));
330 return 0;
331 }
332
tb_pci_activate(struct tb_tunnel * tunnel,bool activate)333 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
334 {
335 int res;
336
337 if (activate) {
338 res = tb_pci_set_ext_encapsulation(tunnel, activate);
339 if (res)
340 return res;
341 }
342
343 if (activate)
344 res = tb_pci_port_enable(tunnel->dst_port, activate);
345 else
346 res = tb_pci_port_enable(tunnel->src_port, activate);
347 if (res)
348 return res;
349
350
351 if (activate) {
352 res = tb_pci_port_enable(tunnel->src_port, activate);
353 if (res)
354 return res;
355 } else {
356 /* Downstream router could be unplugged */
357 tb_pci_port_enable(tunnel->dst_port, activate);
358 }
359
360 return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate);
361 }
362
tb_pci_init_credits(struct tb_path_hop * hop)363 static int tb_pci_init_credits(struct tb_path_hop *hop)
364 {
365 struct tb_port *port = hop->in_port;
366 struct tb_switch *sw = port->sw;
367 unsigned int credits;
368
369 if (tb_port_use_credit_allocation(port)) {
370 unsigned int available;
371
372 available = tb_available_credits(port, NULL);
373 credits = min(sw->max_pcie_credits, available);
374
375 if (credits < TB_MIN_PCIE_CREDITS)
376 return -ENOSPC;
377
378 credits = max(TB_MIN_PCIE_CREDITS, credits);
379 } else {
380 if (tb_port_is_null(port))
381 credits = port->bonded ? 32 : 16;
382 else
383 credits = 7;
384 }
385
386 hop->initial_credits = credits;
387 return 0;
388 }
389
tb_pci_init_path(struct tb_path * path)390 static int tb_pci_init_path(struct tb_path *path)
391 {
392 struct tb_path_hop *hop;
393
394 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
395 path->egress_shared_buffer = TB_PATH_NONE;
396 path->ingress_fc_enable = TB_PATH_ALL;
397 path->ingress_shared_buffer = TB_PATH_NONE;
398 path->priority = TB_PCI_PRIORITY;
399 path->weight = TB_PCI_WEIGHT;
400 path->drop_packages = 0;
401
402 tb_path_for_each_hop(path, hop) {
403 int ret;
404
405 ret = tb_pci_init_credits(hop);
406 if (ret)
407 return ret;
408 }
409
410 return 0;
411 }
412
413 /**
414 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
415 * @tb: Pointer to the domain structure
416 * @down: PCIe downstream adapter
417 * @alloc_hopid: Allocate HopIDs from visited ports
418 *
419 * If @down adapter is active, follows the tunnel to the PCIe upstream
420 * adapter and back.
421 *
422 * Return: Pointer to &struct tb_tunnel or %NULL if there was no tunnel.
423 */
tb_tunnel_discover_pci(struct tb * tb,struct tb_port * down,bool alloc_hopid)424 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
425 bool alloc_hopid)
426 {
427 struct tb_tunnel *tunnel;
428 struct tb_path *path;
429
430 if (!tb_pci_port_is_enabled(down))
431 return NULL;
432
433 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
434 if (!tunnel)
435 return NULL;
436
437 tunnel->activate = tb_pci_activate;
438 tunnel->src_port = down;
439
440 /*
441 * Discover both paths even if they are not complete. We will
442 * clean them up by calling tb_tunnel_deactivate() below in that
443 * case.
444 */
445 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
446 &tunnel->dst_port, "PCIe Up", alloc_hopid);
447 if (!path) {
448 /* Just disable the downstream port */
449 tb_pci_port_enable(down, false);
450 goto err_free;
451 }
452 tunnel->paths[TB_PCI_PATH_UP] = path;
453 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
454 goto err_free;
455
456 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
457 "PCIe Down", alloc_hopid);
458 if (!path)
459 goto err_deactivate;
460 tunnel->paths[TB_PCI_PATH_DOWN] = path;
461 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
462 goto err_deactivate;
463
464 /* Validate that the tunnel is complete */
465 if (!tb_port_is_pcie_up(tunnel->dst_port)) {
466 tb_port_warn(tunnel->dst_port,
467 "path does not end on a PCIe adapter, cleaning up\n");
468 goto err_deactivate;
469 }
470
471 if (down != tunnel->src_port) {
472 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
473 goto err_deactivate;
474 }
475
476 if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
477 tb_tunnel_warn(tunnel,
478 "tunnel is not fully activated, cleaning up\n");
479 goto err_deactivate;
480 }
481
482 tb_tunnel_dbg(tunnel, "discovered\n");
483 return tunnel;
484
485 err_deactivate:
486 tb_tunnel_deactivate(tunnel);
487 err_free:
488 tb_tunnel_put(tunnel);
489
490 return NULL;
491 }
492
493 /**
494 * tb_tunnel_alloc_pci() - allocate a pci tunnel
495 * @tb: Pointer to the domain structure
496 * @up: PCIe upstream adapter port
497 * @down: PCIe downstream adapter port
498 *
499 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
500 * TB_TYPE_PCIE_DOWN.
501 *
502 * Return: Pointer to @struct tb_tunnel or %NULL on failure.
503 */
tb_tunnel_alloc_pci(struct tb * tb,struct tb_port * up,struct tb_port * down)504 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
505 struct tb_port *down)
506 {
507 struct tb_tunnel *tunnel;
508 struct tb_path *path;
509
510 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
511 if (!tunnel)
512 return NULL;
513
514 tunnel->activate = tb_pci_activate;
515 tunnel->src_port = down;
516 tunnel->dst_port = up;
517
518 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
519 "PCIe Down");
520 if (!path)
521 goto err_free;
522 tunnel->paths[TB_PCI_PATH_DOWN] = path;
523 if (tb_pci_init_path(path))
524 goto err_free;
525
526 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
527 "PCIe Up");
528 if (!path)
529 goto err_free;
530 tunnel->paths[TB_PCI_PATH_UP] = path;
531 if (tb_pci_init_path(path))
532 goto err_free;
533
534 return tunnel;
535
536 err_free:
537 tb_tunnel_put(tunnel);
538 return NULL;
539 }
540
541 /**
542 * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe
543 * @port: Lane 0 adapter
544 * @reserved_up: Upstream bandwidth in Mb/s to reserve
545 * @reserved_down: Downstream bandwidth in Mb/s to reserve
546 *
547 * Can be called to any connected lane 0 adapter to find out how much
548 * bandwidth needs to be left in reserve for possible PCIe bulk traffic.
549 *
550 * Return:
551 * * %true - If there is something to be reserved. Writes the amount to
552 * @reserved_down/@reserved_up.
553 * * %false - Nothing to be reserved. Leaves @reserved_down/@reserved_up
554 * unmodified.
555 */
tb_tunnel_reserved_pci(struct tb_port * port,int * reserved_up,int * reserved_down)556 bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
557 int *reserved_down)
558 {
559 if (WARN_ON_ONCE(!port->remote))
560 return false;
561
562 if (!tb_acpi_may_tunnel_pcie())
563 return false;
564
565 if (tb_port_get_link_generation(port) < 4)
566 return false;
567
568 /* Must have PCIe adapters */
569 if (tb_is_upstream_port(port)) {
570 if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP))
571 return false;
572 if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN))
573 return false;
574 } else {
575 if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN))
576 return false;
577 if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP))
578 return false;
579 }
580
581 *reserved_up = USB4_V2_PCI_MIN_BANDWIDTH;
582 *reserved_down = USB4_V2_PCI_MIN_BANDWIDTH;
583
584 tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up,
585 *reserved_down);
586 return true;
587 }
588
tb_dp_is_usb4(const struct tb_switch * sw)589 static bool tb_dp_is_usb4(const struct tb_switch *sw)
590 {
591 /* Titan Ridge DP adapters need the same treatment as USB4 */
592 return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
593 }
594
tb_dp_cm_handshake(struct tb_port * in,struct tb_port * out,int timeout_msec)595 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
596 int timeout_msec)
597 {
598 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
599 u32 val;
600 int ret;
601
602 /* Both ends need to support this */
603 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
604 return 0;
605
606 ret = tb_port_read(out, &val, TB_CFG_PORT,
607 out->cap_adap + DP_STATUS_CTRL, 1);
608 if (ret)
609 return ret;
610
611 val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
612
613 ret = tb_port_write(out, &val, TB_CFG_PORT,
614 out->cap_adap + DP_STATUS_CTRL, 1);
615 if (ret)
616 return ret;
617
618 do {
619 ret = tb_port_read(out, &val, TB_CFG_PORT,
620 out->cap_adap + DP_STATUS_CTRL, 1);
621 if (ret)
622 return ret;
623 if (!(val & DP_STATUS_CTRL_CMHS))
624 return 0;
625 usleep_range(100, 150);
626 } while (ktime_before(ktime_get(), timeout));
627
628 return -ETIMEDOUT;
629 }
630
631 /*
632 * Returns maximum possible rate from capability supporting only DP 2.0
633 * and below. Used when DP BW allocation mode is not enabled.
634 */
tb_dp_cap_get_rate(u32 val)635 static inline u32 tb_dp_cap_get_rate(u32 val)
636 {
637 u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
638
639 switch (rate) {
640 case DP_COMMON_CAP_RATE_RBR:
641 return 1620;
642 case DP_COMMON_CAP_RATE_HBR:
643 return 2700;
644 case DP_COMMON_CAP_RATE_HBR2:
645 return 5400;
646 case DP_COMMON_CAP_RATE_HBR3:
647 return 8100;
648 default:
649 return 0;
650 }
651 }
652
653 /*
654 * Returns maximum possible rate from capability supporting DP 2.1
655 * UHBR20, 13.5 and 10 rates as well. Use only when DP BW allocation
656 * mode is enabled.
657 */
tb_dp_cap_get_rate_ext(u32 val)658 static inline u32 tb_dp_cap_get_rate_ext(u32 val)
659 {
660 if (val & DP_COMMON_CAP_UHBR20)
661 return 20000;
662 else if (val & DP_COMMON_CAP_UHBR13_5)
663 return 13500;
664 else if (val & DP_COMMON_CAP_UHBR10)
665 return 10000;
666
667 return tb_dp_cap_get_rate(val);
668 }
669
tb_dp_is_uhbr_rate(unsigned int rate)670 static inline bool tb_dp_is_uhbr_rate(unsigned int rate)
671 {
672 return rate >= 10000;
673 }
674
tb_dp_cap_set_rate(u32 val,u32 rate)675 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
676 {
677 val &= ~DP_COMMON_CAP_RATE_MASK;
678 switch (rate) {
679 default:
680 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
681 fallthrough;
682 case 1620:
683 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
684 break;
685 case 2700:
686 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
687 break;
688 case 5400:
689 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
690 break;
691 case 8100:
692 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
693 break;
694 }
695 return val;
696 }
697
tb_dp_cap_get_lanes(u32 val)698 static inline u32 tb_dp_cap_get_lanes(u32 val)
699 {
700 u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
701
702 switch (lanes) {
703 case DP_COMMON_CAP_1_LANE:
704 return 1;
705 case DP_COMMON_CAP_2_LANES:
706 return 2;
707 case DP_COMMON_CAP_4_LANES:
708 return 4;
709 default:
710 return 0;
711 }
712 }
713
tb_dp_cap_set_lanes(u32 val,u32 lanes)714 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
715 {
716 val &= ~DP_COMMON_CAP_LANES_MASK;
717 switch (lanes) {
718 default:
719 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
720 lanes);
721 fallthrough;
722 case 1:
723 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
724 break;
725 case 2:
726 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
727 break;
728 case 4:
729 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
730 break;
731 }
732 return val;
733 }
734
tb_dp_bandwidth(unsigned int rate,unsigned int lanes)735 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
736 {
737 /* Tunneling removes the DP 8b/10b 128/132b encoding */
738 if (tb_dp_is_uhbr_rate(rate))
739 return rate * lanes * 128 / 132;
740 return rate * lanes * 8 / 10;
741 }
742
tb_dp_reduce_bandwidth(int max_bw,u32 in_rate,u32 in_lanes,u32 out_rate,u32 out_lanes,u32 * new_rate,u32 * new_lanes)743 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
744 u32 out_rate, u32 out_lanes, u32 *new_rate,
745 u32 *new_lanes)
746 {
747 static const u32 dp_bw[][2] = {
748 /* Mb/s, lanes */
749 { 8100, 4 }, /* 25920 Mb/s */
750 { 5400, 4 }, /* 17280 Mb/s */
751 { 8100, 2 }, /* 12960 Mb/s */
752 { 2700, 4 }, /* 8640 Mb/s */
753 { 5400, 2 }, /* 8640 Mb/s */
754 { 8100, 1 }, /* 6480 Mb/s */
755 { 1620, 4 }, /* 5184 Mb/s */
756 { 5400, 1 }, /* 4320 Mb/s */
757 { 2700, 2 }, /* 4320 Mb/s */
758 { 1620, 2 }, /* 2592 Mb/s */
759 { 2700, 1 }, /* 2160 Mb/s */
760 { 1620, 1 }, /* 1296 Mb/s */
761 };
762 unsigned int i;
763
764 /*
765 * Find a combination that can fit into max_bw and does not
766 * exceed the maximum rate and lanes supported by the DP OUT and
767 * DP IN adapters.
768 */
769 for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
770 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
771 continue;
772
773 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
774 continue;
775
776 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
777 *new_rate = dp_bw[i][0];
778 *new_lanes = dp_bw[i][1];
779 return 0;
780 }
781 }
782
783 return -ENOSR;
784 }
785
tb_dp_xchg_caps(struct tb_tunnel * tunnel)786 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
787 {
788 u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
789 struct tb_port *out = tunnel->dst_port;
790 struct tb_port *in = tunnel->src_port;
791 int ret, max_bw;
792
793 /*
794 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
795 * newer generation hardware.
796 */
797 if (in->sw->generation < 2 || out->sw->generation < 2)
798 return 0;
799
800 /*
801 * Perform connection manager handshake between IN and OUT ports
802 * before capabilities exchange can take place.
803 */
804 ret = tb_dp_cm_handshake(in, out, 3000);
805 if (ret)
806 return ret;
807
808 /* Read both DP_LOCAL_CAP registers */
809 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
810 in->cap_adap + DP_LOCAL_CAP, 1);
811 if (ret)
812 return ret;
813
814 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
815 out->cap_adap + DP_LOCAL_CAP, 1);
816 if (ret)
817 return ret;
818
819 /* Write IN local caps to OUT remote caps */
820 ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
821 out->cap_adap + DP_REMOTE_CAP, 1);
822 if (ret)
823 return ret;
824
825 in_rate = tb_dp_cap_get_rate(in_dp_cap);
826 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
827 tb_tunnel_dbg(tunnel,
828 "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
829 in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
830
831 /*
832 * If the tunnel bandwidth is limited (max_bw is set) then see
833 * if we need to reduce bandwidth to fit there.
834 */
835 out_rate = tb_dp_cap_get_rate(out_dp_cap);
836 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
837 bw = tb_dp_bandwidth(out_rate, out_lanes);
838 tb_tunnel_dbg(tunnel,
839 "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
840 out_rate, out_lanes, bw);
841
842 if (tb_tunnel_direction_downstream(tunnel))
843 max_bw = tunnel->max_down;
844 else
845 max_bw = tunnel->max_up;
846
847 if (max_bw && bw > max_bw) {
848 u32 new_rate, new_lanes, new_bw;
849
850 ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
851 out_rate, out_lanes, &new_rate,
852 &new_lanes);
853 if (ret) {
854 tb_tunnel_info(tunnel, "not enough bandwidth\n");
855 return ret;
856 }
857
858 new_bw = tb_dp_bandwidth(new_rate, new_lanes);
859 tb_tunnel_dbg(tunnel,
860 "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
861 new_rate, new_lanes, new_bw);
862
863 /*
864 * Set new rate and number of lanes before writing it to
865 * the IN port remote caps.
866 */
867 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
868 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
869 }
870
871 /*
872 * Titan Ridge does not disable AUX timers when it gets
873 * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with
874 * DP tunneling.
875 */
876 if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
877 out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
878 tb_tunnel_dbg(tunnel, "disabling LTTPR\n");
879 }
880
881 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
882 in->cap_adap + DP_REMOTE_CAP, 1);
883 }
884
tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel * tunnel)885 static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
886 {
887 int ret, estimated_bw, granularity, tmp;
888 struct tb_port *out = tunnel->dst_port;
889 struct tb_port *in = tunnel->src_port;
890 u32 out_dp_cap, out_rate, out_lanes;
891 u32 in_dp_cap, in_rate, in_lanes;
892 u32 rate, lanes;
893
894 if (!bw_alloc_mode)
895 return 0;
896
897 ret = usb4_dp_port_set_cm_bandwidth_mode_supported(in, true);
898 if (ret)
899 return ret;
900
901 ret = usb4_dp_port_set_group_id(in, in->group->index);
902 if (ret)
903 return ret;
904
905 /*
906 * Get the non-reduced rate and lanes based on the lowest
907 * capability of both adapters.
908 */
909 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
910 in->cap_adap + DP_LOCAL_CAP, 1);
911 if (ret)
912 return ret;
913
914 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
915 out->cap_adap + DP_LOCAL_CAP, 1);
916 if (ret)
917 return ret;
918
919 in_rate = tb_dp_cap_get_rate(in_dp_cap);
920 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
921 out_rate = tb_dp_cap_get_rate(out_dp_cap);
922 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
923
924 rate = min(in_rate, out_rate);
925 lanes = min(in_lanes, out_lanes);
926 tmp = tb_dp_bandwidth(rate, lanes);
927
928 tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
929 rate, lanes, tmp);
930
931 ret = usb4_dp_port_set_nrd(in, rate, lanes);
932 if (ret)
933 return ret;
934
935 /*
936 * Pick up granularity that supports maximum possible bandwidth.
937 * For that we use the UHBR rates too.
938 */
939 in_rate = tb_dp_cap_get_rate_ext(in_dp_cap);
940 out_rate = tb_dp_cap_get_rate_ext(out_dp_cap);
941 rate = min(in_rate, out_rate);
942 tmp = tb_dp_bandwidth(rate, lanes);
943
944 tb_tunnel_dbg(tunnel,
945 "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
946 rate, lanes, tmp);
947
948 for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
949 granularity *= 2)
950 ;
951
952 tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity);
953
954 /*
955 * Returns -EINVAL if granularity above is outside of the
956 * accepted ranges.
957 */
958 ret = usb4_dp_port_set_granularity(in, granularity);
959 if (ret)
960 return ret;
961
962 /*
963 * Bandwidth estimation is pretty much what we have in
964 * max_up/down fields. For discovery we just read what the
965 * estimation was set to.
966 */
967 if (tb_tunnel_direction_downstream(tunnel))
968 estimated_bw = tunnel->max_down;
969 else
970 estimated_bw = tunnel->max_up;
971
972 tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw);
973
974 ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
975 if (ret)
976 return ret;
977
978 /* Initial allocation should be 0 according the spec */
979 ret = usb4_dp_port_allocate_bandwidth(in, 0);
980 if (ret)
981 return ret;
982
983 tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n");
984 return 0;
985 }
986
tb_dp_pre_activate(struct tb_tunnel * tunnel)987 static int tb_dp_pre_activate(struct tb_tunnel *tunnel)
988 {
989 struct tb_port *in = tunnel->src_port;
990 struct tb_switch *sw = in->sw;
991 struct tb *tb = in->sw->tb;
992 int ret;
993
994 ret = tb_dp_xchg_caps(tunnel);
995 if (ret)
996 return ret;
997
998 if (!tb_switch_is_usb4(sw))
999 return 0;
1000
1001 if (!usb4_dp_port_bandwidth_mode_supported(in))
1002 return 0;
1003
1004 tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n");
1005
1006 ret = usb4_dp_port_set_cm_id(in, tb->index);
1007 if (ret)
1008 return ret;
1009
1010 return tb_dp_bandwidth_alloc_mode_enable(tunnel);
1011 }
1012
tb_dp_post_deactivate(struct tb_tunnel * tunnel)1013 static void tb_dp_post_deactivate(struct tb_tunnel *tunnel)
1014 {
1015 struct tb_port *in = tunnel->src_port;
1016
1017 if (!usb4_dp_port_bandwidth_mode_supported(in))
1018 return;
1019 if (usb4_dp_port_bandwidth_mode_enabled(in)) {
1020 usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
1021 tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n");
1022 }
1023 }
1024
dprx_timeout_to_ktime(int timeout_msec)1025 static ktime_t dprx_timeout_to_ktime(int timeout_msec)
1026 {
1027 return timeout_msec >= 0 ?
1028 ktime_add_ms(ktime_get(), timeout_msec) : KTIME_MAX;
1029 }
1030
tb_dp_wait_dprx(struct tb_tunnel * tunnel,int timeout_msec)1031 static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
1032 {
1033 ktime_t timeout = dprx_timeout_to_ktime(timeout_msec);
1034 struct tb_port *in = tunnel->src_port;
1035
1036 /*
1037 * Wait for DPRX done. Normally it should be already set for
1038 * active tunnel.
1039 */
1040 do {
1041 u32 val;
1042 int ret;
1043
1044 ret = tb_port_read(in, &val, TB_CFG_PORT,
1045 in->cap_adap + DP_COMMON_CAP, 1);
1046 if (ret)
1047 return ret;
1048
1049 if (val & DP_COMMON_CAP_DPRX_DONE)
1050 return 0;
1051
1052 usleep_range(100, 150);
1053 } while (ktime_before(ktime_get(), timeout));
1054
1055 tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
1056 return -ETIMEDOUT;
1057 }
1058
tb_dp_dprx_work(struct work_struct * work)1059 static void tb_dp_dprx_work(struct work_struct *work)
1060 {
1061 struct tb_tunnel *tunnel = container_of(work, typeof(*tunnel), dprx_work.work);
1062 struct tb *tb = tunnel->tb;
1063
1064 if (!tunnel->dprx_canceled) {
1065 mutex_lock(&tb->lock);
1066 if (tb_dp_is_usb4(tunnel->src_port->sw) &&
1067 tb_dp_wait_dprx(tunnel, TB_DPRX_WAIT_TIMEOUT)) {
1068 if (ktime_before(ktime_get(), tunnel->dprx_timeout)) {
1069 queue_delayed_work(tb->wq, &tunnel->dprx_work,
1070 msecs_to_jiffies(TB_DPRX_POLL_DELAY));
1071 mutex_unlock(&tb->lock);
1072 return;
1073 }
1074 } else {
1075 tb_tunnel_set_active(tunnel, true);
1076 }
1077 mutex_unlock(&tb->lock);
1078 }
1079
1080 if (tunnel->callback)
1081 tunnel->callback(tunnel, tunnel->callback_data);
1082 tb_tunnel_put(tunnel);
1083 }
1084
tb_dp_dprx_start(struct tb_tunnel * tunnel)1085 static int tb_dp_dprx_start(struct tb_tunnel *tunnel)
1086 {
1087 /*
1088 * Bump up the reference to keep the tunnel around. It will be
1089 * dropped in tb_dp_dprx_stop() once the tunnel is deactivated.
1090 */
1091 tb_tunnel_get(tunnel);
1092
1093 tunnel->dprx_started = true;
1094
1095 if (tunnel->callback) {
1096 tunnel->dprx_timeout = dprx_timeout_to_ktime(dprx_timeout);
1097 queue_delayed_work(tunnel->tb->wq, &tunnel->dprx_work, 0);
1098 return -EINPROGRESS;
1099 }
1100
1101 return tb_dp_is_usb4(tunnel->src_port->sw) ?
1102 tb_dp_wait_dprx(tunnel, dprx_timeout) : 0;
1103 }
1104
tb_dp_dprx_stop(struct tb_tunnel * tunnel)1105 static void tb_dp_dprx_stop(struct tb_tunnel *tunnel)
1106 {
1107 if (tunnel->dprx_started) {
1108 tunnel->dprx_started = false;
1109 tunnel->dprx_canceled = true;
1110 if (cancel_delayed_work(&tunnel->dprx_work))
1111 tb_tunnel_put(tunnel);
1112 }
1113 }
1114
tb_dp_activate(struct tb_tunnel * tunnel,bool active)1115 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
1116 {
1117 int ret;
1118
1119 if (active) {
1120 struct tb_path **paths;
1121 int last;
1122
1123 paths = tunnel->paths;
1124 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
1125
1126 tb_dp_port_set_hops(tunnel->src_port,
1127 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
1128 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
1129 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
1130
1131 tb_dp_port_set_hops(tunnel->dst_port,
1132 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
1133 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
1134 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
1135 } else {
1136 tb_dp_dprx_stop(tunnel);
1137 tb_dp_port_hpd_clear(tunnel->src_port);
1138 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
1139 if (tb_port_is_dpout(tunnel->dst_port))
1140 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
1141 }
1142
1143 ret = tb_dp_port_enable(tunnel->src_port, active);
1144 if (ret)
1145 return ret;
1146
1147 if (tb_port_is_dpout(tunnel->dst_port)) {
1148 ret = tb_dp_port_enable(tunnel->dst_port, active);
1149 if (ret)
1150 return ret;
1151 }
1152
1153 return active ? tb_dp_dprx_start(tunnel) : 0;
1154 }
1155
1156 /**
1157 * tb_dp_bandwidth_mode_maximum_bandwidth() - Maximum possible bandwidth
1158 * @tunnel: DP tunnel to check
1159 * @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity
1160 *
1161 * Return: Maximum possible bandwidth for this tunnel in Mb/s, negative errno
1162 * in case of failure.
1163 */
tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel * tunnel,int * max_bw_rounded)1164 static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
1165 int *max_bw_rounded)
1166 {
1167 struct tb_port *in = tunnel->src_port;
1168 int ret, rate, lanes, max_bw;
1169 u32 cap;
1170
1171 /*
1172 * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX
1173 * read parameter values so we can use this to determine the
1174 * maximum possible bandwidth over this link.
1175 *
1176 * See USB4 v2 spec 1.0 10.4.4.5.
1177 */
1178 ret = tb_port_read(in, &cap, TB_CFG_PORT,
1179 in->cap_adap + DP_LOCAL_CAP, 1);
1180 if (ret)
1181 return ret;
1182
1183 rate = tb_dp_cap_get_rate_ext(cap);
1184 lanes = tb_dp_cap_get_lanes(cap);
1185
1186 max_bw = tb_dp_bandwidth(rate, lanes);
1187
1188 if (max_bw_rounded) {
1189 ret = usb4_dp_port_granularity(in);
1190 if (ret < 0)
1191 return ret;
1192 *max_bw_rounded = roundup(max_bw, ret);
1193 }
1194
1195 return max_bw;
1196 }
1197
tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel * tunnel,int * consumed_up,int * consumed_down)1198 static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
1199 int *consumed_up,
1200 int *consumed_down)
1201 {
1202 struct tb_port *in = tunnel->src_port;
1203 int ret, allocated_bw, max_bw_rounded;
1204
1205 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1206 return -EOPNOTSUPP;
1207
1208 if (!tunnel->bw_mode)
1209 return -EOPNOTSUPP;
1210
1211 /* Read what was allocated previously if any */
1212 ret = usb4_dp_port_allocated_bandwidth(in);
1213 if (ret < 0)
1214 return ret;
1215 allocated_bw = ret;
1216
1217 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
1218 if (ret < 0)
1219 return ret;
1220 if (allocated_bw == max_bw_rounded)
1221 allocated_bw = ret;
1222
1223 if (tb_tunnel_direction_downstream(tunnel)) {
1224 *consumed_up = 0;
1225 *consumed_down = allocated_bw;
1226 } else {
1227 *consumed_up = allocated_bw;
1228 *consumed_down = 0;
1229 }
1230
1231 return 0;
1232 }
1233
tb_dp_allocated_bandwidth(struct tb_tunnel * tunnel,int * allocated_up,int * allocated_down)1234 static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
1235 int *allocated_down)
1236 {
1237 struct tb_port *in = tunnel->src_port;
1238
1239 /*
1240 * If we have already set the allocated bandwidth then use that.
1241 * Otherwise we read it from the DPRX.
1242 */
1243 if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
1244 int ret, allocated_bw, max_bw_rounded;
1245
1246 ret = usb4_dp_port_allocated_bandwidth(in);
1247 if (ret < 0)
1248 return ret;
1249 allocated_bw = ret;
1250
1251 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel,
1252 &max_bw_rounded);
1253 if (ret < 0)
1254 return ret;
1255 if (allocated_bw == max_bw_rounded)
1256 allocated_bw = ret;
1257
1258 if (tb_tunnel_direction_downstream(tunnel)) {
1259 *allocated_up = 0;
1260 *allocated_down = allocated_bw;
1261 } else {
1262 *allocated_up = allocated_bw;
1263 *allocated_down = 0;
1264 }
1265 return 0;
1266 }
1267
1268 return tunnel->consumed_bandwidth(tunnel, allocated_up,
1269 allocated_down);
1270 }
1271
tb_dp_alloc_bandwidth(struct tb_tunnel * tunnel,int * alloc_up,int * alloc_down)1272 static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
1273 int *alloc_down)
1274 {
1275 struct tb_port *in = tunnel->src_port;
1276 int max_bw_rounded, ret, tmp;
1277
1278 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1279 return -EOPNOTSUPP;
1280
1281 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
1282 if (ret < 0)
1283 return ret;
1284
1285 if (tb_tunnel_direction_downstream(tunnel)) {
1286 tmp = min(*alloc_down, max_bw_rounded);
1287 ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1288 if (ret)
1289 return ret;
1290 *alloc_down = tmp;
1291 *alloc_up = 0;
1292 } else {
1293 tmp = min(*alloc_up, max_bw_rounded);
1294 ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1295 if (ret)
1296 return ret;
1297 *alloc_down = 0;
1298 *alloc_up = tmp;
1299 }
1300
1301 /* Now we can use BW mode registers to figure out the bandwidth */
1302 /* TODO: need to handle discovery too */
1303 tunnel->bw_mode = true;
1304 return 0;
1305 }
1306
1307 /* Read cap from tunnel DP IN */
tb_dp_read_cap(struct tb_tunnel * tunnel,unsigned int cap,u32 * rate,u32 * lanes)1308 static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
1309 u32 *lanes)
1310 {
1311 struct tb_port *in = tunnel->src_port;
1312 u32 val;
1313 int ret;
1314
1315 switch (cap) {
1316 case DP_LOCAL_CAP:
1317 case DP_REMOTE_CAP:
1318 case DP_COMMON_CAP:
1319 break;
1320
1321 default:
1322 tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap);
1323 return -EINVAL;
1324 }
1325
1326 /*
1327 * Read from the copied remote cap so that we take into account
1328 * if capabilities were reduced during exchange.
1329 */
1330 ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1);
1331 if (ret)
1332 return ret;
1333
1334 *rate = tb_dp_cap_get_rate(val);
1335 *lanes = tb_dp_cap_get_lanes(val);
1336 return 0;
1337 }
1338
tb_dp_maximum_bandwidth(struct tb_tunnel * tunnel,int * max_up,int * max_down)1339 static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
1340 int *max_down)
1341 {
1342 int ret;
1343
1344 if (!usb4_dp_port_bandwidth_mode_enabled(tunnel->src_port))
1345 return -EOPNOTSUPP;
1346
1347 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
1348 if (ret < 0)
1349 return ret;
1350
1351 if (tb_tunnel_direction_downstream(tunnel)) {
1352 *max_up = 0;
1353 *max_down = ret;
1354 } else {
1355 *max_up = ret;
1356 *max_down = 0;
1357 }
1358
1359 return 0;
1360 }
1361
tb_dp_consumed_bandwidth(struct tb_tunnel * tunnel,int * consumed_up,int * consumed_down)1362 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1363 int *consumed_down)
1364 {
1365 const struct tb_switch *sw = tunnel->src_port->sw;
1366 u32 rate = 0, lanes = 0;
1367 int ret;
1368
1369 if (tb_dp_is_usb4(sw)) {
1370 ret = tb_dp_wait_dprx(tunnel, 0);
1371 if (ret) {
1372 if (ret == -ETIMEDOUT) {
1373 /*
1374 * While we wait for DPRX complete the
1375 * tunnel consumes as much as it had
1376 * been reserved initially.
1377 */
1378 ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
1379 &rate, &lanes);
1380 if (ret)
1381 return ret;
1382 } else {
1383 return ret;
1384 }
1385 } else {
1386 /*
1387 * On USB4 routers check if the bandwidth allocation
1388 * mode is enabled first and then read the bandwidth
1389 * through those registers.
1390 */
1391 ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
1392 consumed_down);
1393 if (ret < 0) {
1394 if (ret != -EOPNOTSUPP)
1395 return ret;
1396 } else if (!ret) {
1397 return 0;
1398 }
1399 ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
1400 if (ret)
1401 return ret;
1402 }
1403 } else if (sw->generation >= 2) {
1404 ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
1405 if (ret)
1406 return ret;
1407 } else {
1408 /* No bandwidth management for legacy devices */
1409 *consumed_up = 0;
1410 *consumed_down = 0;
1411 return 0;
1412 }
1413
1414 if (tb_tunnel_direction_downstream(tunnel)) {
1415 *consumed_up = 0;
1416 *consumed_down = tb_dp_bandwidth(rate, lanes);
1417 } else {
1418 *consumed_up = tb_dp_bandwidth(rate, lanes);
1419 *consumed_down = 0;
1420 }
1421
1422 return 0;
1423 }
1424
tb_dp_init_aux_credits(struct tb_path_hop * hop)1425 static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
1426 {
1427 struct tb_port *port = hop->in_port;
1428 struct tb_switch *sw = port->sw;
1429
1430 if (tb_port_use_credit_allocation(port))
1431 hop->initial_credits = sw->min_dp_aux_credits;
1432 else
1433 hop->initial_credits = 1;
1434 }
1435
tb_dp_init_aux_path(struct tb_path * path,bool pm_support)1436 static void tb_dp_init_aux_path(struct tb_path *path, bool pm_support)
1437 {
1438 struct tb_path_hop *hop;
1439
1440 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1441 path->egress_shared_buffer = TB_PATH_NONE;
1442 path->ingress_fc_enable = TB_PATH_ALL;
1443 path->ingress_shared_buffer = TB_PATH_NONE;
1444 path->priority = TB_DP_AUX_PRIORITY;
1445 path->weight = TB_DP_AUX_WEIGHT;
1446
1447 tb_path_for_each_hop(path, hop) {
1448 tb_dp_init_aux_credits(hop);
1449 if (pm_support)
1450 tb_init_pm_support(hop);
1451 }
1452 }
1453
tb_dp_init_video_credits(struct tb_path_hop * hop)1454 static int tb_dp_init_video_credits(struct tb_path_hop *hop)
1455 {
1456 struct tb_port *port = hop->in_port;
1457 struct tb_switch *sw = port->sw;
1458
1459 if (tb_port_use_credit_allocation(port)) {
1460 unsigned int nfc_credits;
1461 size_t max_dp_streams;
1462
1463 tb_available_credits(port, &max_dp_streams);
1464 /*
1465 * Read the number of currently allocated NFC credits
1466 * from the lane adapter. Since we only use them for DP
1467 * tunneling we can use that to figure out how many DP
1468 * tunnels already go through the lane adapter.
1469 */
1470 nfc_credits = port->config.nfc_credits &
1471 ADP_CS_4_NFC_BUFFERS_MASK;
1472 if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
1473 return -ENOSPC;
1474
1475 hop->nfc_credits = sw->min_dp_main_credits;
1476 } else {
1477 hop->nfc_credits = min(port->total_credits - 2, 12U);
1478 }
1479
1480 return 0;
1481 }
1482
tb_dp_init_video_path(struct tb_path * path,bool pm_support)1483 static int tb_dp_init_video_path(struct tb_path *path, bool pm_support)
1484 {
1485 struct tb_path_hop *hop;
1486
1487 path->egress_fc_enable = TB_PATH_NONE;
1488 path->egress_shared_buffer = TB_PATH_NONE;
1489 path->ingress_fc_enable = TB_PATH_NONE;
1490 path->ingress_shared_buffer = TB_PATH_NONE;
1491 path->priority = TB_DP_VIDEO_PRIORITY;
1492 path->weight = TB_DP_VIDEO_WEIGHT;
1493
1494 tb_path_for_each_hop(path, hop) {
1495 int ret;
1496
1497 ret = tb_dp_init_video_credits(hop);
1498 if (ret)
1499 return ret;
1500 if (pm_support)
1501 tb_init_pm_support(hop);
1502 }
1503
1504 return 0;
1505 }
1506
tb_dp_dump(struct tb_tunnel * tunnel)1507 static void tb_dp_dump(struct tb_tunnel *tunnel)
1508 {
1509 struct tb_port *in, *out;
1510 u32 dp_cap, rate, lanes;
1511
1512 in = tunnel->src_port;
1513 out = tunnel->dst_port;
1514
1515 if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1516 in->cap_adap + DP_LOCAL_CAP, 1))
1517 return;
1518
1519 rate = tb_dp_cap_get_rate(dp_cap);
1520 lanes = tb_dp_cap_get_lanes(dp_cap);
1521
1522 tb_tunnel_dbg(tunnel,
1523 "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1524 rate, lanes, tb_dp_bandwidth(rate, lanes));
1525
1526 if (tb_port_read(out, &dp_cap, TB_CFG_PORT,
1527 out->cap_adap + DP_LOCAL_CAP, 1))
1528 return;
1529
1530 rate = tb_dp_cap_get_rate(dp_cap);
1531 lanes = tb_dp_cap_get_lanes(dp_cap);
1532
1533 tb_tunnel_dbg(tunnel,
1534 "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1535 rate, lanes, tb_dp_bandwidth(rate, lanes));
1536
1537 if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1538 in->cap_adap + DP_REMOTE_CAP, 1))
1539 return;
1540
1541 rate = tb_dp_cap_get_rate(dp_cap);
1542 lanes = tb_dp_cap_get_lanes(dp_cap);
1543
1544 tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
1545 rate, lanes, tb_dp_bandwidth(rate, lanes));
1546 }
1547
1548 /**
1549 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
1550 * @tb: Pointer to the domain structure
1551 * @in: DP in adapter
1552 * @alloc_hopid: Allocate HopIDs from visited ports
1553 *
1554 * If @in adapter is active, follows the tunnel to the DP out adapter
1555 * and back. Returns the discovered tunnel or %NULL if there was no
1556 * tunnel.
1557 *
1558 * Return: Pointer to &struct tb_tunnel or %NULL if no tunnel found.
1559 */
tb_tunnel_discover_dp(struct tb * tb,struct tb_port * in,bool alloc_hopid)1560 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
1561 bool alloc_hopid)
1562 {
1563 struct tb_tunnel *tunnel;
1564 struct tb_port *port;
1565 struct tb_path *path;
1566
1567 if (!tb_dp_port_is_enabled(in))
1568 return NULL;
1569
1570 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1571 if (!tunnel)
1572 return NULL;
1573
1574 tunnel->pre_activate = tb_dp_pre_activate;
1575 tunnel->activate = tb_dp_activate;
1576 tunnel->post_deactivate = tb_dp_post_deactivate;
1577 tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1578 tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1579 tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1580 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1581 tunnel->src_port = in;
1582
1583 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
1584 &tunnel->dst_port, "Video", alloc_hopid);
1585 if (!path) {
1586 /* Just disable the DP IN port */
1587 tb_dp_port_enable(in, false);
1588 goto err_free;
1589 }
1590 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
1591 if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], false))
1592 goto err_free;
1593
1594 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
1595 alloc_hopid);
1596 if (!path)
1597 goto err_deactivate;
1598 tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
1599 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT], false);
1600
1601 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
1602 &port, "AUX RX", alloc_hopid);
1603 if (!path)
1604 goto err_deactivate;
1605 tunnel->paths[TB_DP_AUX_PATH_IN] = path;
1606 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN], false);
1607
1608 /* Validate that the tunnel is complete */
1609 if (!tb_port_is_dpout(tunnel->dst_port)) {
1610 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
1611 goto err_deactivate;
1612 }
1613
1614 if (!tb_dp_port_is_enabled(tunnel->dst_port))
1615 goto err_deactivate;
1616
1617 if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
1618 goto err_deactivate;
1619
1620 if (port != tunnel->src_port) {
1621 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1622 goto err_deactivate;
1623 }
1624
1625 tb_dp_dump(tunnel);
1626
1627 tb_tunnel_dbg(tunnel, "discovered\n");
1628 return tunnel;
1629
1630 err_deactivate:
1631 tb_tunnel_deactivate(tunnel);
1632 err_free:
1633 tb_tunnel_put(tunnel);
1634
1635 return NULL;
1636 }
1637
1638 /**
1639 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
1640 * @tb: Pointer to the domain structure
1641 * @in: DP in adapter port
1642 * @out: DP out adapter port
1643 * @link_nr: Preferred lane adapter when the link is not bonded
1644 * @max_up: Maximum available upstream bandwidth for the DP tunnel.
1645 * %0 if no available bandwidth.
1646 * @max_down: Maximum available downstream bandwidth for the DP tunnel.
1647 * %0 if no available bandwidth.
1648 * @callback: Optional callback that is called when the DP tunnel is
1649 * fully activated (or there is an error)
1650 * @callback_data: Optional data for @callback
1651 *
1652 * Allocates a tunnel between @in and @out that is capable of tunneling
1653 * Display Port traffic. If @callback is not %NULL it will be called
1654 * after tb_tunnel_activate() once the tunnel has been fully activated.
1655 * It can call tb_tunnel_is_active() to check if activation was
1656 * successful (or if it returns %false there was some sort of issue).
1657 * The @callback is called without @tb->lock held.
1658 *
1659 * Return: Pointer to @struct tb_tunnel or %NULL in case of failure.
1660 */
tb_tunnel_alloc_dp(struct tb * tb,struct tb_port * in,struct tb_port * out,int link_nr,int max_up,int max_down,void (* callback)(struct tb_tunnel *,void *),void * callback_data)1661 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
1662 struct tb_port *out, int link_nr,
1663 int max_up, int max_down,
1664 void (*callback)(struct tb_tunnel *, void *),
1665 void *callback_data)
1666 {
1667 struct tb_tunnel *tunnel;
1668 struct tb_path **paths;
1669 struct tb_path *path;
1670 bool pm_support;
1671
1672 if (WARN_ON(!in->cap_adap || !out->cap_adap))
1673 return NULL;
1674
1675 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1676 if (!tunnel)
1677 return NULL;
1678
1679 tunnel->pre_activate = tb_dp_pre_activate;
1680 tunnel->activate = tb_dp_activate;
1681 tunnel->post_deactivate = tb_dp_post_deactivate;
1682 tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1683 tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1684 tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1685 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1686 tunnel->src_port = in;
1687 tunnel->dst_port = out;
1688 tunnel->max_up = max_up;
1689 tunnel->max_down = max_down;
1690 tunnel->callback = callback;
1691 tunnel->callback_data = callback_data;
1692 INIT_DELAYED_WORK(&tunnel->dprx_work, tb_dp_dprx_work);
1693
1694 paths = tunnel->paths;
1695 pm_support = usb4_switch_version(in->sw) >= 2;
1696
1697 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
1698 link_nr, "Video");
1699 if (!path)
1700 goto err_free;
1701 tb_dp_init_video_path(path, pm_support);
1702 paths[TB_DP_VIDEO_PATH_OUT] = path;
1703
1704 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
1705 TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
1706 if (!path)
1707 goto err_free;
1708 tb_dp_init_aux_path(path, pm_support);
1709 paths[TB_DP_AUX_PATH_OUT] = path;
1710
1711 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
1712 TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
1713 if (!path)
1714 goto err_free;
1715 tb_dp_init_aux_path(path, pm_support);
1716 paths[TB_DP_AUX_PATH_IN] = path;
1717
1718 return tunnel;
1719
1720 err_free:
1721 tb_tunnel_put(tunnel);
1722 return NULL;
1723 }
1724
tb_dma_available_credits(const struct tb_port * port)1725 static unsigned int tb_dma_available_credits(const struct tb_port *port)
1726 {
1727 const struct tb_switch *sw = port->sw;
1728 int credits;
1729
1730 credits = tb_available_credits(port, NULL);
1731 if (tb_acpi_may_tunnel_pcie())
1732 credits -= sw->max_pcie_credits;
1733 credits -= port->dma_credits;
1734
1735 return credits > 0 ? credits : 0;
1736 }
1737
tb_dma_reserve_credits(struct tb_path_hop * hop,unsigned int credits)1738 static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
1739 {
1740 struct tb_port *port = hop->in_port;
1741
1742 if (tb_port_use_credit_allocation(port)) {
1743 unsigned int available = tb_dma_available_credits(port);
1744
1745 /*
1746 * Need to have at least TB_MIN_DMA_CREDITS, otherwise
1747 * DMA path cannot be established.
1748 */
1749 if (available < TB_MIN_DMA_CREDITS)
1750 return -ENOSPC;
1751
1752 while (credits > available)
1753 credits--;
1754
1755 tb_port_dbg(port, "reserving %u credits for DMA path\n",
1756 credits);
1757
1758 port->dma_credits += credits;
1759 } else {
1760 if (tb_port_is_null(port))
1761 credits = port->bonded ? 14 : 6;
1762 else
1763 credits = min(port->total_credits, credits);
1764 }
1765
1766 hop->initial_credits = credits;
1767 return 0;
1768 }
1769
1770 /* Path from lane adapter to NHI */
tb_dma_init_rx_path(struct tb_path * path,unsigned int credits)1771 static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
1772 {
1773 struct tb_path_hop *hop;
1774 unsigned int i, tmp;
1775
1776 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1777 path->ingress_fc_enable = TB_PATH_ALL;
1778 path->egress_shared_buffer = TB_PATH_NONE;
1779 path->ingress_shared_buffer = TB_PATH_NONE;
1780 path->priority = TB_DMA_PRIORITY;
1781 path->weight = TB_DMA_WEIGHT;
1782 path->clear_fc = true;
1783
1784 /*
1785 * First lane adapter is the one connected to the remote host.
1786 * We don't tunnel other traffic over this link so we can use
1787 * all the credits (except the ones reserved for control traffic).
1788 */
1789 hop = &path->hops[0];
1790 tmp = min(tb_usable_credits(hop->in_port), credits);
1791 hop->initial_credits = tmp;
1792 hop->in_port->dma_credits += tmp;
1793
1794 for (i = 1; i < path->path_length; i++) {
1795 int ret;
1796
1797 ret = tb_dma_reserve_credits(&path->hops[i], credits);
1798 if (ret)
1799 return ret;
1800 }
1801
1802 return 0;
1803 }
1804
1805 /* Path from NHI to lane adapter */
tb_dma_init_tx_path(struct tb_path * path,unsigned int credits)1806 static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
1807 {
1808 struct tb_path_hop *hop;
1809
1810 path->egress_fc_enable = TB_PATH_ALL;
1811 path->ingress_fc_enable = TB_PATH_ALL;
1812 path->egress_shared_buffer = TB_PATH_NONE;
1813 path->ingress_shared_buffer = TB_PATH_NONE;
1814 path->priority = TB_DMA_PRIORITY;
1815 path->weight = TB_DMA_WEIGHT;
1816 path->clear_fc = true;
1817
1818 tb_path_for_each_hop(path, hop) {
1819 int ret;
1820
1821 ret = tb_dma_reserve_credits(hop, credits);
1822 if (ret)
1823 return ret;
1824 }
1825
1826 return 0;
1827 }
1828
tb_dma_release_credits(struct tb_path_hop * hop)1829 static void tb_dma_release_credits(struct tb_path_hop *hop)
1830 {
1831 struct tb_port *port = hop->in_port;
1832
1833 if (tb_port_use_credit_allocation(port)) {
1834 port->dma_credits -= hop->initial_credits;
1835
1836 tb_port_dbg(port, "released %u DMA path credits\n",
1837 hop->initial_credits);
1838 }
1839 }
1840
tb_dma_destroy_path(struct tb_path * path)1841 static void tb_dma_destroy_path(struct tb_path *path)
1842 {
1843 struct tb_path_hop *hop;
1844
1845 tb_path_for_each_hop(path, hop)
1846 tb_dma_release_credits(hop);
1847 }
1848
tb_dma_destroy(struct tb_tunnel * tunnel)1849 static void tb_dma_destroy(struct tb_tunnel *tunnel)
1850 {
1851 int i;
1852
1853 for (i = 0; i < tunnel->npaths; i++) {
1854 if (!tunnel->paths[i])
1855 continue;
1856 tb_dma_destroy_path(tunnel->paths[i]);
1857 }
1858 }
1859
1860 /**
1861 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1862 * @tb: Pointer to the domain structure
1863 * @nhi: Host controller port
1864 * @dst: Destination null port which the other domain is connected to
1865 * @transmit_path: HopID used for transmitting packets
1866 * @transmit_ring: NHI ring number used to send packets towards the
1867 * other domain. Set to %-1 if TX path is not needed.
1868 * @receive_path: HopID used for receiving packets
1869 * @receive_ring: NHI ring number used to receive packets from the
1870 * other domain. Set to %-1 if RX path is not needed.
1871 *
1872 * Return: Pointer to @struct tb_tunnel or %NULL in case of failure.
1873 */
tb_tunnel_alloc_dma(struct tb * tb,struct tb_port * nhi,struct tb_port * dst,int transmit_path,int transmit_ring,int receive_path,int receive_ring)1874 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
1875 struct tb_port *dst, int transmit_path,
1876 int transmit_ring, int receive_path,
1877 int receive_ring)
1878 {
1879 struct tb_tunnel *tunnel;
1880 size_t npaths = 0, i = 0;
1881 struct tb_path *path;
1882 int credits;
1883
1884 /* Ring 0 is reserved for control channel */
1885 if (WARN_ON(!receive_ring || !transmit_ring))
1886 return NULL;
1887
1888 if (receive_ring > 0)
1889 npaths++;
1890 if (transmit_ring > 0)
1891 npaths++;
1892
1893 if (WARN_ON(!npaths))
1894 return NULL;
1895
1896 tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
1897 if (!tunnel)
1898 return NULL;
1899
1900 tunnel->src_port = nhi;
1901 tunnel->dst_port = dst;
1902 tunnel->destroy = tb_dma_destroy;
1903
1904 credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
1905
1906 if (receive_ring > 0) {
1907 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
1908 "DMA RX");
1909 if (!path)
1910 goto err_free;
1911 tunnel->paths[i++] = path;
1912 if (tb_dma_init_rx_path(path, credits)) {
1913 tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
1914 goto err_free;
1915 }
1916 }
1917
1918 if (transmit_ring > 0) {
1919 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
1920 "DMA TX");
1921 if (!path)
1922 goto err_free;
1923 tunnel->paths[i++] = path;
1924 if (tb_dma_init_tx_path(path, credits)) {
1925 tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
1926 goto err_free;
1927 }
1928 }
1929
1930 return tunnel;
1931
1932 err_free:
1933 tb_tunnel_put(tunnel);
1934 return NULL;
1935 }
1936
1937 /**
1938 * tb_tunnel_match_dma() - Match DMA tunnel
1939 * @tunnel: Tunnel to match
1940 * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1941 * @transmit_ring: NHI ring number used to send packets towards the
1942 * other domain. Pass %-1 to ignore.
1943 * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1944 * @receive_ring: NHI ring number used to receive packets from the
1945 * other domain. Pass %-1 to ignore.
1946 *
1947 * This function can be used to match specific DMA tunnel, if there are
1948 * multiple DMA tunnels going through the same XDomain connection.
1949 *
1950 * Return: %true if there is a match, %false otherwise.
1951 */
tb_tunnel_match_dma(const struct tb_tunnel * tunnel,int transmit_path,int transmit_ring,int receive_path,int receive_ring)1952 bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1953 int transmit_ring, int receive_path, int receive_ring)
1954 {
1955 const struct tb_path *tx_path = NULL, *rx_path = NULL;
1956 int i;
1957
1958 if (!receive_ring || !transmit_ring)
1959 return false;
1960
1961 for (i = 0; i < tunnel->npaths; i++) {
1962 const struct tb_path *path = tunnel->paths[i];
1963
1964 if (!path)
1965 continue;
1966
1967 if (tb_port_is_nhi(path->hops[0].in_port))
1968 tx_path = path;
1969 else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1970 rx_path = path;
1971 }
1972
1973 if (transmit_ring > 0 || transmit_path > 0) {
1974 if (!tx_path)
1975 return false;
1976 if (transmit_ring > 0 &&
1977 (tx_path->hops[0].in_hop_index != transmit_ring))
1978 return false;
1979 if (transmit_path > 0 &&
1980 (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1981 return false;
1982 }
1983
1984 if (receive_ring > 0 || receive_path > 0) {
1985 if (!rx_path)
1986 return false;
1987 if (receive_path > 0 &&
1988 (rx_path->hops[0].in_hop_index != receive_path))
1989 return false;
1990 if (receive_ring > 0 &&
1991 (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1992 return false;
1993 }
1994
1995 return true;
1996 }
1997
tb_usb3_max_link_rate(struct tb_port * up,struct tb_port * down)1998 static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
1999 {
2000 int ret, up_max_rate, down_max_rate;
2001
2002 ret = usb4_usb3_port_max_link_rate(up);
2003 if (ret < 0)
2004 return ret;
2005 up_max_rate = ret;
2006
2007 ret = usb4_usb3_port_max_link_rate(down);
2008 if (ret < 0)
2009 return ret;
2010 down_max_rate = ret;
2011
2012 return min(up_max_rate, down_max_rate);
2013 }
2014
tb_usb3_pre_activate(struct tb_tunnel * tunnel)2015 static int tb_usb3_pre_activate(struct tb_tunnel *tunnel)
2016 {
2017 tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
2018 tunnel->allocated_up, tunnel->allocated_down);
2019
2020 return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
2021 &tunnel->allocated_up,
2022 &tunnel->allocated_down);
2023 }
2024
tb_usb3_activate(struct tb_tunnel * tunnel,bool activate)2025 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
2026 {
2027 int res;
2028
2029 res = tb_usb3_port_enable(tunnel->src_port, activate);
2030 if (res)
2031 return res;
2032
2033 if (tb_port_is_usb3_up(tunnel->dst_port))
2034 return tb_usb3_port_enable(tunnel->dst_port, activate);
2035
2036 return 0;
2037 }
2038
tb_usb3_consumed_bandwidth(struct tb_tunnel * tunnel,int * consumed_up,int * consumed_down)2039 static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
2040 int *consumed_up, int *consumed_down)
2041 {
2042 struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
2043 int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0;
2044
2045 /*
2046 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
2047 * take that into account here.
2048 */
2049 *consumed_up = tunnel->allocated_up *
2050 (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
2051 *consumed_down = tunnel->allocated_down *
2052 (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
2053
2054 if (tb_port_get_link_generation(port) >= 4) {
2055 *consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH);
2056 *consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH);
2057 }
2058
2059 return 0;
2060 }
2061
tb_usb3_release_unused_bandwidth(struct tb_tunnel * tunnel)2062 static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
2063 {
2064 int ret;
2065
2066 ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
2067 &tunnel->allocated_up,
2068 &tunnel->allocated_down);
2069 if (ret)
2070 return ret;
2071
2072 tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
2073 tunnel->allocated_up, tunnel->allocated_down);
2074 return 0;
2075 }
2076
tb_usb3_reclaim_available_bandwidth(struct tb_tunnel * tunnel,int * available_up,int * available_down)2077 static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
2078 int *available_up,
2079 int *available_down)
2080 {
2081 int ret, max_rate, allocate_up, allocate_down;
2082
2083 ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
2084 if (ret < 0) {
2085 tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
2086 return;
2087 }
2088
2089 /*
2090 * 90% of the max rate can be allocated for isochronous
2091 * transfers.
2092 */
2093 max_rate = ret * 90 / 100;
2094
2095 /* No need to reclaim if already at maximum */
2096 if (tunnel->allocated_up >= max_rate &&
2097 tunnel->allocated_down >= max_rate)
2098 return;
2099
2100 /* Don't go lower than what is already allocated */
2101 allocate_up = min(max_rate, *available_up);
2102 if (allocate_up < tunnel->allocated_up)
2103 allocate_up = tunnel->allocated_up;
2104
2105 allocate_down = min(max_rate, *available_down);
2106 if (allocate_down < tunnel->allocated_down)
2107 allocate_down = tunnel->allocated_down;
2108
2109 /* If no changes no need to do more */
2110 if (allocate_up == tunnel->allocated_up &&
2111 allocate_down == tunnel->allocated_down)
2112 return;
2113
2114 ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
2115 &allocate_down);
2116 if (ret) {
2117 tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
2118 return;
2119 }
2120
2121 tunnel->allocated_up = allocate_up;
2122 *available_up -= tunnel->allocated_up;
2123
2124 tunnel->allocated_down = allocate_down;
2125 *available_down -= tunnel->allocated_down;
2126
2127 tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
2128 tunnel->allocated_up, tunnel->allocated_down);
2129 }
2130
tb_usb3_init_credits(struct tb_path_hop * hop)2131 static void tb_usb3_init_credits(struct tb_path_hop *hop)
2132 {
2133 struct tb_port *port = hop->in_port;
2134 struct tb_switch *sw = port->sw;
2135 unsigned int credits;
2136
2137 if (tb_port_use_credit_allocation(port)) {
2138 credits = sw->max_usb3_credits;
2139 } else {
2140 if (tb_port_is_null(port))
2141 credits = port->bonded ? 32 : 16;
2142 else
2143 credits = 7;
2144 }
2145
2146 hop->initial_credits = credits;
2147 }
2148
tb_usb3_init_path(struct tb_path * path)2149 static void tb_usb3_init_path(struct tb_path *path)
2150 {
2151 struct tb_path_hop *hop;
2152
2153 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
2154 path->egress_shared_buffer = TB_PATH_NONE;
2155 path->ingress_fc_enable = TB_PATH_ALL;
2156 path->ingress_shared_buffer = TB_PATH_NONE;
2157 path->priority = TB_USB3_PRIORITY;
2158 path->weight = TB_USB3_WEIGHT;
2159 path->drop_packages = 0;
2160
2161 tb_path_for_each_hop(path, hop)
2162 tb_usb3_init_credits(hop);
2163 }
2164
2165 /**
2166 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
2167 * @tb: Pointer to the domain structure
2168 * @down: USB3 downstream adapter
2169 * @alloc_hopid: Allocate HopIDs from visited ports
2170 *
2171 * If @down adapter is active, follows the tunnel to the USB3 upstream
2172 * adapter and back.
2173 *
2174 * Return: Pointer to &struct tb_tunnel or %NULL if there was no tunnel.
2175 */
tb_tunnel_discover_usb3(struct tb * tb,struct tb_port * down,bool alloc_hopid)2176 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
2177 bool alloc_hopid)
2178 {
2179 struct tb_tunnel *tunnel;
2180 struct tb_path *path;
2181
2182 if (!tb_usb3_port_is_enabled(down))
2183 return NULL;
2184
2185 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
2186 if (!tunnel)
2187 return NULL;
2188
2189 tunnel->activate = tb_usb3_activate;
2190 tunnel->src_port = down;
2191
2192 /*
2193 * Discover both paths even if they are not complete. We will
2194 * clean them up by calling tb_tunnel_deactivate() below in that
2195 * case.
2196 */
2197 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
2198 &tunnel->dst_port, "USB3 Down", alloc_hopid);
2199 if (!path) {
2200 /* Just disable the downstream port */
2201 tb_usb3_port_enable(down, false);
2202 goto err_free;
2203 }
2204 tunnel->paths[TB_USB3_PATH_DOWN] = path;
2205 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
2206
2207 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
2208 "USB3 Up", alloc_hopid);
2209 if (!path)
2210 goto err_deactivate;
2211 tunnel->paths[TB_USB3_PATH_UP] = path;
2212 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
2213
2214 /* Validate that the tunnel is complete */
2215 if (!tb_port_is_usb3_up(tunnel->dst_port)) {
2216 tb_port_warn(tunnel->dst_port,
2217 "path does not end on an USB3 adapter, cleaning up\n");
2218 goto err_deactivate;
2219 }
2220
2221 if (down != tunnel->src_port) {
2222 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
2223 goto err_deactivate;
2224 }
2225
2226 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
2227 tb_tunnel_warn(tunnel,
2228 "tunnel is not fully activated, cleaning up\n");
2229 goto err_deactivate;
2230 }
2231
2232 if (!tb_route(down->sw)) {
2233 int ret;
2234
2235 /*
2236 * Read the initial bandwidth allocation for the first
2237 * hop tunnel.
2238 */
2239 ret = usb4_usb3_port_allocated_bandwidth(down,
2240 &tunnel->allocated_up, &tunnel->allocated_down);
2241 if (ret)
2242 goto err_deactivate;
2243
2244 tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
2245 tunnel->allocated_up, tunnel->allocated_down);
2246
2247 tunnel->pre_activate = tb_usb3_pre_activate;
2248 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2249 tunnel->release_unused_bandwidth =
2250 tb_usb3_release_unused_bandwidth;
2251 tunnel->reclaim_available_bandwidth =
2252 tb_usb3_reclaim_available_bandwidth;
2253 }
2254
2255 tb_tunnel_dbg(tunnel, "discovered\n");
2256 return tunnel;
2257
2258 err_deactivate:
2259 tb_tunnel_deactivate(tunnel);
2260 err_free:
2261 tb_tunnel_put(tunnel);
2262
2263 return NULL;
2264 }
2265
2266 /**
2267 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
2268 * @tb: Pointer to the domain structure
2269 * @up: USB3 upstream adapter port
2270 * @down: USB3 downstream adapter port
2271 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel.
2272 * %0 if no available bandwidth.
2273 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel.
2274 * %0 if no available bandwidth.
2275 *
2276 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
2277 * @TB_TYPE_USB3_DOWN.
2278 *
2279 * Return: Pointer to @struct tb_tunnel or %NULL in case of failure.
2280 */
tb_tunnel_alloc_usb3(struct tb * tb,struct tb_port * up,struct tb_port * down,int max_up,int max_down)2281 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
2282 struct tb_port *down, int max_up,
2283 int max_down)
2284 {
2285 struct tb_tunnel *tunnel;
2286 struct tb_path *path;
2287 int max_rate = 0;
2288
2289 if (!tb_route(down->sw) && (max_up > 0 || max_down > 0)) {
2290 /*
2291 * For USB3 isochronous transfers, we allow bandwidth which is
2292 * not higher than 90% of maximum supported bandwidth by USB3
2293 * adapters.
2294 */
2295 max_rate = tb_usb3_max_link_rate(down, up);
2296 if (max_rate < 0)
2297 return NULL;
2298
2299 max_rate = max_rate * 90 / 100;
2300 tb_port_dbg(up, "maximum required bandwidth for USB3 tunnel %d Mb/s\n",
2301 max_rate);
2302 }
2303
2304 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
2305 if (!tunnel)
2306 return NULL;
2307
2308 tunnel->activate = tb_usb3_activate;
2309 tunnel->src_port = down;
2310 tunnel->dst_port = up;
2311 tunnel->max_up = max_up;
2312 tunnel->max_down = max_down;
2313
2314 path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
2315 "USB3 Down");
2316 if (!path)
2317 goto err_free;
2318 tb_usb3_init_path(path);
2319 tunnel->paths[TB_USB3_PATH_DOWN] = path;
2320
2321 path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
2322 "USB3 Up");
2323 if (!path)
2324 goto err_free;
2325 tb_usb3_init_path(path);
2326 tunnel->paths[TB_USB3_PATH_UP] = path;
2327
2328 if (!tb_route(down->sw)) {
2329 tunnel->allocated_up = min(max_rate, max_up);
2330 tunnel->allocated_down = min(max_rate, max_down);
2331
2332 tunnel->pre_activate = tb_usb3_pre_activate;
2333 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2334 tunnel->release_unused_bandwidth =
2335 tb_usb3_release_unused_bandwidth;
2336 tunnel->reclaim_available_bandwidth =
2337 tb_usb3_reclaim_available_bandwidth;
2338 }
2339
2340 return tunnel;
2341
2342 err_free:
2343 tb_tunnel_put(tunnel);
2344 return NULL;
2345 }
2346
2347 /**
2348 * tb_tunnel_is_invalid - check whether an activated path is still valid
2349 * @tunnel: Tunnel to check
2350 *
2351 * Return: %true if path is valid, %false otherwise.
2352 */
tb_tunnel_is_invalid(struct tb_tunnel * tunnel)2353 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
2354 {
2355 int i;
2356
2357 for (i = 0; i < tunnel->npaths; i++) {
2358 WARN_ON(!tunnel->paths[i]->activated);
2359 if (tb_path_is_invalid(tunnel->paths[i]))
2360 return true;
2361 }
2362
2363 return false;
2364 }
2365
2366 /**
2367 * tb_tunnel_activate() - activate a tunnel
2368 * @tunnel: Tunnel to activate
2369 *
2370 * Return:
2371 * * %0 - On success.
2372 * * %-EINPROGRESS - If the tunnel activation is still in progress (that's
2373 * for DP tunnels to complete DPRX capabilities read).
2374 * * Negative errno - Another error occurred.
2375 */
tb_tunnel_activate(struct tb_tunnel * tunnel)2376 int tb_tunnel_activate(struct tb_tunnel *tunnel)
2377 {
2378 int res, i;
2379
2380 tb_tunnel_dbg(tunnel, "activating\n");
2381
2382 /*
2383 * Make sure all paths are properly disabled before enabling
2384 * them again.
2385 */
2386 for (i = 0; i < tunnel->npaths; i++) {
2387 if (tunnel->paths[i]->activated) {
2388 tb_path_deactivate(tunnel->paths[i]);
2389 tunnel->paths[i]->activated = false;
2390 }
2391 }
2392
2393 tunnel->state = TB_TUNNEL_ACTIVATING;
2394
2395 if (tunnel->pre_activate) {
2396 res = tunnel->pre_activate(tunnel);
2397 if (res)
2398 return res;
2399 }
2400
2401 for (i = 0; i < tunnel->npaths; i++) {
2402 res = tb_path_activate(tunnel->paths[i]);
2403 if (res)
2404 goto err;
2405 }
2406
2407 if (tunnel->activate) {
2408 res = tunnel->activate(tunnel, true);
2409 if (res) {
2410 if (res == -EINPROGRESS)
2411 return res;
2412 goto err;
2413 }
2414 }
2415
2416 tb_tunnel_set_active(tunnel, true);
2417 return 0;
2418
2419 err:
2420 tb_tunnel_warn(tunnel, "activation failed\n");
2421 tb_tunnel_deactivate(tunnel);
2422 return res;
2423 }
2424
2425 /**
2426 * tb_tunnel_deactivate() - deactivate a tunnel
2427 * @tunnel: Tunnel to deactivate
2428 */
tb_tunnel_deactivate(struct tb_tunnel * tunnel)2429 void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
2430 {
2431 int i;
2432
2433 tb_tunnel_dbg(tunnel, "deactivating\n");
2434
2435 if (tunnel->activate)
2436 tunnel->activate(tunnel, false);
2437
2438 for (i = 0; i < tunnel->npaths; i++) {
2439 if (tunnel->paths[i] && tunnel->paths[i]->activated)
2440 tb_path_deactivate(tunnel->paths[i]);
2441 }
2442
2443 if (tunnel->post_deactivate)
2444 tunnel->post_deactivate(tunnel);
2445
2446 tb_tunnel_set_active(tunnel, false);
2447 }
2448
2449 /**
2450 * tb_tunnel_port_on_path() - Does the tunnel go through port
2451 * @tunnel: Tunnel to check
2452 * @port: Port to check
2453 *
2454 * Return: %true if @tunnel goes through @port (direction does not matter),
2455 * %false otherwise.
2456 */
tb_tunnel_port_on_path(const struct tb_tunnel * tunnel,const struct tb_port * port)2457 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
2458 const struct tb_port *port)
2459 {
2460 int i;
2461
2462 for (i = 0; i < tunnel->npaths; i++) {
2463 if (!tunnel->paths[i])
2464 continue;
2465
2466 if (tb_path_port_on_path(tunnel->paths[i], port))
2467 return true;
2468 }
2469
2470 return false;
2471 }
2472
2473 // Is tb_tunnel_activate() called for the tunnel
tb_tunnel_is_activated(const struct tb_tunnel * tunnel)2474 static bool tb_tunnel_is_activated(const struct tb_tunnel *tunnel)
2475 {
2476 return tunnel->state == TB_TUNNEL_ACTIVATING || tb_tunnel_is_active(tunnel);
2477 }
2478
2479 /**
2480 * tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth
2481 * @tunnel: Tunnel to check
2482 * @max_up: Maximum upstream bandwidth in Mb/s
2483 * @max_down: Maximum downstream bandwidth in Mb/s
2484 *
2485 * Return:
2486 * * Maximum possible bandwidth this tunnel can support if not
2487 * limited by other bandwidth clients.
2488 * * %-EOPNOTSUPP - If the tunnel does not support this function.
2489 * * %-ENOTCONN - If the tunnel is not active.
2490 */
tb_tunnel_maximum_bandwidth(struct tb_tunnel * tunnel,int * max_up,int * max_down)2491 int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
2492 int *max_down)
2493 {
2494 if (!tb_tunnel_is_active(tunnel))
2495 return -ENOTCONN;
2496
2497 if (tunnel->maximum_bandwidth)
2498 return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
2499 return -EOPNOTSUPP;
2500 }
2501
2502 /**
2503 * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel
2504 * @tunnel: Tunnel to check
2505 * @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here
2506 * @allocated_down: Currently allocated downstream bandwidth in Mb/s is
2507 * stored here
2508 *
2509 * Return:
2510 * * Bandwidth allocated for the tunnel. This may be higher than what the
2511 * tunnel actually consumes.
2512 * * %-EOPNOTSUPP - If the tunnel does not support this function.
2513 * * %-ENOTCONN - If the tunnel is not active.
2514 * * Negative errno - Another error occurred.
2515 */
tb_tunnel_allocated_bandwidth(struct tb_tunnel * tunnel,int * allocated_up,int * allocated_down)2516 int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
2517 int *allocated_down)
2518 {
2519 if (!tb_tunnel_is_active(tunnel))
2520 return -ENOTCONN;
2521
2522 if (tunnel->allocated_bandwidth)
2523 return tunnel->allocated_bandwidth(tunnel, allocated_up,
2524 allocated_down);
2525 return -EOPNOTSUPP;
2526 }
2527
2528 /**
2529 * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation
2530 * @tunnel: Tunnel whose bandwidth allocation to change
2531 * @alloc_up: New upstream bandwidth in Mb/s
2532 * @alloc_down: New downstream bandwidth in Mb/s
2533 *
2534 * Tries to change tunnel bandwidth allocation.
2535 *
2536 * Return:
2537 * * %0 - On success. Updates @alloc_up and @alloc_down to values that were
2538 * actually allocated (it may not be the same as passed originally).
2539 * * Negative errno - In case of failure.
2540 */
tb_tunnel_alloc_bandwidth(struct tb_tunnel * tunnel,int * alloc_up,int * alloc_down)2541 int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
2542 int *alloc_down)
2543 {
2544 if (!tb_tunnel_is_active(tunnel))
2545 return -ENOTCONN;
2546
2547 if (tunnel->alloc_bandwidth) {
2548 int ret;
2549
2550 ret = tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
2551 if (ret)
2552 return ret;
2553
2554 tb_tunnel_changed(tunnel);
2555 return 0;
2556 }
2557
2558 return -EOPNOTSUPP;
2559 }
2560
2561 /**
2562 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
2563 * @tunnel: Tunnel to check
2564 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
2565 * Can be %NULL.
2566 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
2567 * Can be %NULL.
2568 *
2569 * Stores the amount of isochronous bandwidth @tunnel consumes in
2570 * @consumed_up and @consumed_down.
2571 *
2572 * Return: %0 on success, negative errno otherwise.
2573 */
tb_tunnel_consumed_bandwidth(struct tb_tunnel * tunnel,int * consumed_up,int * consumed_down)2574 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
2575 int *consumed_down)
2576 {
2577 int up_bw = 0, down_bw = 0;
2578
2579 /*
2580 * Here we need to distinguish between not active tunnel from
2581 * tunnels that are either fully active or activation started.
2582 * The latter is true for DP tunnels where we must report the
2583 * consumed to be the maximum we gave it until DPRX capabilities
2584 * read is done by the graphics driver.
2585 */
2586 if (tb_tunnel_is_activated(tunnel) && tunnel->consumed_bandwidth) {
2587 int ret;
2588
2589 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
2590 if (ret)
2591 return ret;
2592 }
2593
2594 if (consumed_up)
2595 *consumed_up = up_bw;
2596 if (consumed_down)
2597 *consumed_down = down_bw;
2598
2599 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw, down_bw);
2600 return 0;
2601 }
2602
2603 /**
2604 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
2605 * @tunnel: Tunnel whose unused bandwidth to release
2606 *
2607 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
2608 * moment) this function makes it release all the unused bandwidth.
2609 *
2610 * Return: %0 on success, negative errno otherwise.
2611 */
tb_tunnel_release_unused_bandwidth(struct tb_tunnel * tunnel)2612 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
2613 {
2614 if (!tb_tunnel_is_active(tunnel))
2615 return -ENOTCONN;
2616
2617 if (tunnel->release_unused_bandwidth) {
2618 int ret;
2619
2620 ret = tunnel->release_unused_bandwidth(tunnel);
2621 if (ret)
2622 return ret;
2623 }
2624
2625 return 0;
2626 }
2627
2628 /**
2629 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
2630 * @tunnel: Tunnel reclaiming available bandwidth
2631 * @available_up: Available upstream bandwidth (in Mb/s)
2632 * @available_down: Available downstream bandwidth (in Mb/s)
2633 *
2634 * Reclaims bandwidth from @available_up and @available_down and updates
2635 * the variables accordingly (e.g decreases both according to what was
2636 * reclaimed by the tunnel). If nothing was reclaimed the values are
2637 * kept as is.
2638 */
tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel * tunnel,int * available_up,int * available_down)2639 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
2640 int *available_up,
2641 int *available_down)
2642 {
2643 if (!tb_tunnel_is_active(tunnel))
2644 return;
2645
2646 if (tunnel->reclaim_available_bandwidth)
2647 tunnel->reclaim_available_bandwidth(tunnel, available_up,
2648 available_down);
2649 }
2650
tb_tunnel_type_name(const struct tb_tunnel * tunnel)2651 const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel)
2652 {
2653 return tb_tunnel_names[tunnel->type];
2654 }
2655