xref: /linux/drivers/thunderbolt/tunnel.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - Tunneling support
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
12 #include <linux/ktime.h>
13 #include <linux/string_helpers.h>
14 
15 #include "tunnel.h"
16 #include "tb.h"
17 
18 /* PCIe adapters use always HopID of 8 for both directions */
19 #define TB_PCI_HOPID			8
20 
21 #define TB_PCI_PATH_DOWN		0
22 #define TB_PCI_PATH_UP			1
23 
24 #define TB_PCI_PRIORITY			3
25 #define TB_PCI_WEIGHT			1
26 
27 /* USB3 adapters use always HopID of 8 for both directions */
28 #define TB_USB3_HOPID			8
29 
30 #define TB_USB3_PATH_DOWN		0
31 #define TB_USB3_PATH_UP			1
32 
33 #define TB_USB3_PRIORITY		3
34 #define TB_USB3_WEIGHT			2
35 
36 /* DP adapters use HopID 8 for AUX and 9 for Video */
37 #define TB_DP_AUX_TX_HOPID		8
38 #define TB_DP_AUX_RX_HOPID		8
39 #define TB_DP_VIDEO_HOPID		9
40 
41 #define TB_DP_VIDEO_PATH_OUT		0
42 #define TB_DP_AUX_PATH_OUT		1
43 #define TB_DP_AUX_PATH_IN		2
44 
45 #define TB_DP_VIDEO_PRIORITY		1
46 #define TB_DP_VIDEO_WEIGHT		1
47 
48 #define TB_DP_AUX_PRIORITY		2
49 #define TB_DP_AUX_WEIGHT		1
50 
51 /* Minimum number of credits needed for PCIe path */
52 #define TB_MIN_PCIE_CREDITS		6U
53 /*
54  * Number of credits we try to allocate for each DMA path if not limited
55  * by the host router baMaxHI.
56  */
57 #define TB_DMA_CREDITS			14
58 /* Minimum number of credits for DMA path */
59 #define TB_MIN_DMA_CREDITS		1
60 
61 #define TB_DMA_PRIORITY			5
62 #define TB_DMA_WEIGHT			1
63 
64 /*
65  * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic
66  * according to USB4 v2 Connection Manager guide. This ends up reserving
67  * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into
68  * account.
69  */
70 #define USB4_V2_PCI_MIN_BANDWIDTH	(1500 * TB_PCI_WEIGHT)
71 #define USB4_V2_USB3_MIN_BANDWIDTH	(1500 * TB_USB3_WEIGHT)
72 
73 /*
74  * According to VESA spec, the DPRX negotiation shall compete in 5
75  * seconds after tunnel is established. Since at least i915 can runtime
76  * suspend if there is nothing connected, and that it polls any new
77  * connections every 10 seconds, we use 12 seconds here.
78  *
79  * These are in ms.
80  */
81 #define TB_DPRX_TIMEOUT			12000
82 #define TB_DPRX_WAIT_TIMEOUT		25
83 #define TB_DPRX_POLL_DELAY		50
84 
85 static int dprx_timeout = TB_DPRX_TIMEOUT;
86 module_param(dprx_timeout, int, 0444);
87 MODULE_PARM_DESC(dprx_timeout,
88 		 "DPRX capability read timeout in ms, -1 waits forever (default: "
89 		 __MODULE_STRING(TB_DPRX_TIMEOUT) ")");
90 
91 static unsigned int dma_credits = TB_DMA_CREDITS;
92 module_param(dma_credits, uint, 0444);
93 MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
94                 __MODULE_STRING(TB_DMA_CREDITS) ")");
95 
96 static bool bw_alloc_mode = true;
97 module_param(bw_alloc_mode, bool, 0444);
98 MODULE_PARM_DESC(bw_alloc_mode,
99 		 "enable bandwidth allocation mode if supported (default: true)");
100 
101 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
102 
103 static const char * const tb_event_names[] = {
104 	[TB_TUNNEL_ACTIVATED] = "activated",
105 	[TB_TUNNEL_CHANGED] = "changed",
106 	[TB_TUNNEL_DEACTIVATED] = "deactivated",
107 	[TB_TUNNEL_LOW_BANDWIDTH] = "low bandwidth",
108 	[TB_TUNNEL_NO_BANDWIDTH] = "insufficient bandwidth",
109 };
110 
111 /* Synchronizes kref_get()/put() of struct tb_tunnel */
112 static DEFINE_MUTEX(tb_tunnel_lock);
113 
114 static inline unsigned int tb_usable_credits(const struct tb_port *port)
115 {
116 	return port->total_credits - port->ctl_credits;
117 }
118 
119 /**
120  * tb_available_credits() - Available credits for PCIe and DMA
121  * @port: Lane adapter to check
122  * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
123  *		    streams possible through this lane adapter
124  *
125  * Return: Number of available credits.
126  */
127 static unsigned int tb_available_credits(const struct tb_port *port,
128 					 size_t *max_dp_streams)
129 {
130 	const struct tb_switch *sw = port->sw;
131 	int credits, usb3, pcie, spare;
132 	size_t ndp;
133 
134 	usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
135 	pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
136 
137 	if (tb_acpi_is_xdomain_allowed()) {
138 		spare = min_not_zero(sw->max_dma_credits, dma_credits);
139 		/* Add some credits for potential second DMA tunnel */
140 		spare += TB_MIN_DMA_CREDITS;
141 	} else {
142 		spare = 0;
143 	}
144 
145 	credits = tb_usable_credits(port);
146 	if (tb_acpi_may_tunnel_dp()) {
147 		/*
148 		 * Maximum number of DP streams possible through the
149 		 * lane adapter.
150 		 */
151 		if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
152 			ndp = (credits - (usb3 + pcie + spare)) /
153 			      (sw->min_dp_aux_credits + sw->min_dp_main_credits);
154 		else
155 			ndp = 0;
156 	} else {
157 		ndp = 0;
158 	}
159 	credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
160 	credits -= usb3;
161 
162 	if (max_dp_streams)
163 		*max_dp_streams = ndp;
164 
165 	return credits > 0 ? credits : 0;
166 }
167 
168 static void tb_init_pm_support(struct tb_path_hop *hop)
169 {
170 	struct tb_port *out_port = hop->out_port;
171 	struct tb_port *in_port = hop->in_port;
172 
173 	if (tb_port_is_null(in_port) && tb_port_is_null(out_port) &&
174 	    usb4_switch_version(in_port->sw) >= 2)
175 		hop->pm_support = true;
176 }
177 
178 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
179 					 enum tb_tunnel_type type)
180 {
181 	struct tb_tunnel *tunnel;
182 
183 	tunnel = kzalloc_flex(*tunnel, paths, npaths);
184 	if (!tunnel)
185 		return NULL;
186 
187 	tunnel->npaths = npaths;
188 
189 	INIT_LIST_HEAD(&tunnel->list);
190 	tunnel->tb = tb;
191 	tunnel->type = type;
192 	kref_init(&tunnel->kref);
193 
194 	return tunnel;
195 }
196 
197 static void tb_tunnel_get(struct tb_tunnel *tunnel)
198 {
199 	mutex_lock(&tb_tunnel_lock);
200 	kref_get(&tunnel->kref);
201 	mutex_unlock(&tb_tunnel_lock);
202 }
203 
204 static void tb_tunnel_destroy(struct kref *kref)
205 {
206 	struct tb_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref);
207 	int i;
208 
209 	if (tunnel->destroy)
210 		tunnel->destroy(tunnel);
211 
212 	for (i = 0; i < tunnel->npaths; i++) {
213 		if (tunnel->paths[i])
214 			tb_path_free(tunnel->paths[i]);
215 	}
216 
217 	kfree(tunnel);
218 }
219 
220 void tb_tunnel_put(struct tb_tunnel *tunnel)
221 {
222 	mutex_lock(&tb_tunnel_lock);
223 	kref_put(&tunnel->kref, tb_tunnel_destroy);
224 	mutex_unlock(&tb_tunnel_lock);
225 }
226 
227 /**
228  * tb_tunnel_event() - Notify userspace about tunneling event
229  * @tb: Domain where the event occurred
230  * @event: Event that happened
231  * @type: Type of the tunnel in question
232  * @src_port: Tunnel source port (can be %NULL)
233  * @dst_port: Tunnel destination port (can be %NULL)
234  *
235  * Notifies userspace about tunneling @event in the domain. The tunnel
236  * does not need to exist (e.g the tunnel was not activated because
237  * there is not enough bandwidth). If the @src_port and @dst_port are
238  * given fill in full %TUNNEL_DETAILS environment variable. Otherwise
239  * uses the shorter one (just the tunnel type).
240  */
241 void tb_tunnel_event(struct tb *tb, enum tb_tunnel_event event,
242 		     enum tb_tunnel_type type,
243 		     const struct tb_port *src_port,
244 		     const struct tb_port *dst_port)
245 {
246 	char *envp[3] = { NULL };
247 
248 	if (WARN_ON_ONCE(event >= ARRAY_SIZE(tb_event_names)))
249 		return;
250 	if (WARN_ON_ONCE(type >= ARRAY_SIZE(tb_tunnel_names)))
251 		return;
252 
253 	envp[0] = kasprintf(GFP_KERNEL, "TUNNEL_EVENT=%s", tb_event_names[event]);
254 	if (!envp[0])
255 		return;
256 
257 	if (src_port != NULL && dst_port != NULL) {
258 		envp[1] = kasprintf(GFP_KERNEL, "TUNNEL_DETAILS=%llx:%u <-> %llx:%u (%s)",
259 				    tb_route(src_port->sw), src_port->port,
260 				    tb_route(dst_port->sw), dst_port->port,
261 				    tb_tunnel_names[type]);
262 	} else {
263 		envp[1] = kasprintf(GFP_KERNEL, "TUNNEL_DETAILS=(%s)",
264 				    tb_tunnel_names[type]);
265 	}
266 
267 	if (envp[1])
268 		tb_domain_event(tb, envp);
269 
270 	kfree(envp[1]);
271 	kfree(envp[0]);
272 }
273 
274 static inline void tb_tunnel_set_active(struct tb_tunnel *tunnel, bool active)
275 {
276 	if (active) {
277 		tunnel->state = TB_TUNNEL_ACTIVE;
278 		tb_tunnel_event(tunnel->tb, TB_TUNNEL_ACTIVATED, tunnel->type,
279 				tunnel->src_port, tunnel->dst_port);
280 	} else {
281 		tunnel->state = TB_TUNNEL_INACTIVE;
282 		tb_tunnel_event(tunnel->tb, TB_TUNNEL_DEACTIVATED, tunnel->type,
283 				tunnel->src_port, tunnel->dst_port);
284 	}
285 }
286 
287 static inline void tb_tunnel_changed(struct tb_tunnel *tunnel)
288 {
289 	tb_tunnel_event(tunnel->tb, TB_TUNNEL_CHANGED, tunnel->type,
290 			tunnel->src_port, tunnel->dst_port);
291 }
292 
293 static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
294 {
295 	struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
296 	int ret;
297 
298 	/* Only supported if both routers are at least USB4 v2 */
299 	if ((usb4_switch_version(tunnel->src_port->sw) < 2) ||
300 	   (usb4_switch_version(tunnel->dst_port->sw) < 2))
301 		return 0;
302 
303 	if (enable && tb_port_get_link_generation(port) < 4)
304 		return 0;
305 
306 	ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
307 	if (ret)
308 		return ret;
309 
310 	/*
311 	 * Downstream router could be unplugged so disable of encapsulation
312 	 * in upstream router is still possible.
313 	 */
314 	ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable);
315 	if (ret) {
316 		if (enable)
317 			return ret;
318 		if (ret != -ENODEV)
319 			return ret;
320 	}
321 
322 	tb_tunnel_dbg(tunnel, "extended encapsulation %s\n",
323 		      str_enabled_disabled(enable));
324 	return 0;
325 }
326 
327 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
328 {
329 	int res;
330 
331 	if (activate) {
332 		res = tb_pci_set_ext_encapsulation(tunnel, activate);
333 		if (res)
334 			return res;
335 	}
336 
337 	if (activate)
338 		res = tb_pci_port_enable(tunnel->dst_port, activate);
339 	else
340 		res = tb_pci_port_enable(tunnel->src_port, activate);
341 	if (res)
342 		return res;
343 
344 
345 	if (activate) {
346 		res = tb_pci_port_enable(tunnel->src_port, activate);
347 		if (res)
348 			return res;
349 	} else {
350 		/* Downstream router could be unplugged */
351 		tb_pci_port_enable(tunnel->dst_port, activate);
352 	}
353 
354 	return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate);
355 }
356 
357 static int tb_pci_init_credits(struct tb_path_hop *hop)
358 {
359 	struct tb_port *port = hop->in_port;
360 	struct tb_switch *sw = port->sw;
361 	unsigned int credits;
362 
363 	if (tb_port_use_credit_allocation(port)) {
364 		unsigned int available;
365 
366 		available = tb_available_credits(port, NULL);
367 		credits = min(sw->max_pcie_credits, available);
368 
369 		if (credits < TB_MIN_PCIE_CREDITS)
370 			return -ENOSPC;
371 
372 		credits = max(TB_MIN_PCIE_CREDITS, credits);
373 	} else {
374 		if (tb_port_is_null(port))
375 			credits = port->bonded ? 32 : 16;
376 		else
377 			credits = 7;
378 	}
379 
380 	hop->initial_credits = credits;
381 	return 0;
382 }
383 
384 static int tb_pci_init_path(struct tb_path *path)
385 {
386 	struct tb_path_hop *hop;
387 
388 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
389 	path->egress_shared_buffer = TB_PATH_NONE;
390 	path->ingress_fc_enable = TB_PATH_ALL;
391 	path->ingress_shared_buffer = TB_PATH_NONE;
392 	path->priority = TB_PCI_PRIORITY;
393 	path->weight = TB_PCI_WEIGHT;
394 	path->drop_packages = 0;
395 
396 	tb_path_for_each_hop(path, hop) {
397 		int ret;
398 
399 		ret = tb_pci_init_credits(hop);
400 		if (ret)
401 			return ret;
402 	}
403 
404 	return 0;
405 }
406 
407 /**
408  * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
409  * @tb: Pointer to the domain structure
410  * @down: PCIe downstream adapter
411  * @alloc_hopid: Allocate HopIDs from visited ports
412  *
413  * If @down adapter is active, follows the tunnel to the PCIe upstream
414  * adapter and back.
415  *
416  * Return: Pointer to &struct tb_tunnel or %NULL if there was no tunnel.
417  */
418 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
419 					 bool alloc_hopid)
420 {
421 	struct tb_tunnel *tunnel;
422 	struct tb_path *path;
423 
424 	if (!tb_pci_port_is_enabled(down))
425 		return NULL;
426 
427 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
428 	if (!tunnel)
429 		return NULL;
430 
431 	tunnel->activate = tb_pci_activate;
432 	tunnel->src_port = down;
433 
434 	/*
435 	 * Discover both paths even if they are not complete. We will
436 	 * clean them up by calling tb_tunnel_deactivate() below in that
437 	 * case.
438 	 */
439 	path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
440 				&tunnel->dst_port, "PCIe Up", alloc_hopid);
441 	if (!path) {
442 		/* Just disable the downstream port */
443 		tb_pci_port_enable(down, false);
444 		goto err_free;
445 	}
446 	tunnel->paths[TB_PCI_PATH_UP] = path;
447 	if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
448 		goto err_free;
449 
450 	path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
451 				"PCIe Down", alloc_hopid);
452 	if (!path)
453 		goto err_deactivate;
454 	tunnel->paths[TB_PCI_PATH_DOWN] = path;
455 	if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
456 		goto err_deactivate;
457 
458 	/* Validate that the tunnel is complete */
459 	if (!tb_port_is_pcie_up(tunnel->dst_port)) {
460 		tb_port_warn(tunnel->dst_port,
461 			     "path does not end on a PCIe adapter, cleaning up\n");
462 		goto err_deactivate;
463 	}
464 
465 	if (down != tunnel->src_port) {
466 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
467 		goto err_deactivate;
468 	}
469 
470 	if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
471 		tb_tunnel_warn(tunnel,
472 			       "tunnel is not fully activated, cleaning up\n");
473 		goto err_deactivate;
474 	}
475 
476 	tb_tunnel_dbg(tunnel, "discovered\n");
477 	return tunnel;
478 
479 err_deactivate:
480 	tb_tunnel_deactivate(tunnel);
481 err_free:
482 	tb_tunnel_put(tunnel);
483 
484 	return NULL;
485 }
486 
487 /**
488  * tb_tunnel_alloc_pci() - allocate a pci tunnel
489  * @tb: Pointer to the domain structure
490  * @up: PCIe upstream adapter port
491  * @down: PCIe downstream adapter port
492  *
493  * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
494  * TB_TYPE_PCIE_DOWN.
495  *
496  * Return: Pointer to @struct tb_tunnel or %NULL on failure.
497  */
498 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
499 				      struct tb_port *down)
500 {
501 	struct tb_tunnel *tunnel;
502 	struct tb_path *path;
503 
504 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
505 	if (!tunnel)
506 		return NULL;
507 
508 	tunnel->activate = tb_pci_activate;
509 	tunnel->src_port = down;
510 	tunnel->dst_port = up;
511 
512 	path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
513 			     "PCIe Down");
514 	if (!path)
515 		goto err_free;
516 	tunnel->paths[TB_PCI_PATH_DOWN] = path;
517 	if (tb_pci_init_path(path))
518 		goto err_free;
519 
520 	path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
521 			     "PCIe Up");
522 	if (!path)
523 		goto err_free;
524 	tunnel->paths[TB_PCI_PATH_UP] = path;
525 	if (tb_pci_init_path(path))
526 		goto err_free;
527 
528 	return tunnel;
529 
530 err_free:
531 	tb_tunnel_put(tunnel);
532 	return NULL;
533 }
534 
535 /**
536  * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe
537  * @port: Lane 0 adapter
538  * @reserved_up: Upstream bandwidth in Mb/s to reserve
539  * @reserved_down: Downstream bandwidth in Mb/s to reserve
540  *
541  * Can be called to any connected lane 0 adapter to find out how much
542  * bandwidth needs to be left in reserve for possible PCIe bulk traffic.
543  *
544  * Return:
545  * * %true - If there is something to be reserved. Writes the amount to
546  *   @reserved_down/@reserved_up.
547  * * %false - Nothing to be reserved. Leaves @reserved_down/@reserved_up
548  *   unmodified.
549  */
550 bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
551 			    int *reserved_down)
552 {
553 	if (WARN_ON_ONCE(!port->remote))
554 		return false;
555 
556 	if (!tb_acpi_may_tunnel_pcie())
557 		return false;
558 
559 	if (tb_port_get_link_generation(port) < 4)
560 		return false;
561 
562 	/* Must have PCIe adapters */
563 	if (tb_is_upstream_port(port)) {
564 		if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP))
565 			return false;
566 		if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN))
567 			return false;
568 	} else {
569 		if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN))
570 			return false;
571 		if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP))
572 			return false;
573 	}
574 
575 	*reserved_up = USB4_V2_PCI_MIN_BANDWIDTH;
576 	*reserved_down = USB4_V2_PCI_MIN_BANDWIDTH;
577 
578 	tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up,
579 		    *reserved_down);
580 	return true;
581 }
582 
583 static bool tb_dp_is_usb4(const struct tb_switch *sw)
584 {
585 	/* Titan Ridge DP adapters need the same treatment as USB4 */
586 	return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
587 }
588 
589 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
590 			      int timeout_msec)
591 {
592 	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
593 	u32 val;
594 	int ret;
595 
596 	/* Both ends need to support this */
597 	if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
598 		return 0;
599 
600 	ret = tb_port_read(out, &val, TB_CFG_PORT,
601 			   out->cap_adap + DP_STATUS_CTRL, 1);
602 	if (ret)
603 		return ret;
604 
605 	val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
606 
607 	ret = tb_port_write(out, &val, TB_CFG_PORT,
608 			    out->cap_adap + DP_STATUS_CTRL, 1);
609 	if (ret)
610 		return ret;
611 
612 	do {
613 		ret = tb_port_read(out, &val, TB_CFG_PORT,
614 				   out->cap_adap + DP_STATUS_CTRL, 1);
615 		if (ret)
616 			return ret;
617 		if (!(val & DP_STATUS_CTRL_CMHS))
618 			return 0;
619 		usleep_range(100, 150);
620 	} while (ktime_before(ktime_get(), timeout));
621 
622 	return -ETIMEDOUT;
623 }
624 
625 /*
626  * Returns maximum possible rate from capability supporting only DP 2.0
627  * and below. Used when DP BW allocation mode is not enabled.
628  */
629 static inline u32 tb_dp_cap_get_rate(u32 val)
630 {
631 	u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
632 
633 	switch (rate) {
634 	case DP_COMMON_CAP_RATE_RBR:
635 		return 1620;
636 	case DP_COMMON_CAP_RATE_HBR:
637 		return 2700;
638 	case DP_COMMON_CAP_RATE_HBR2:
639 		return 5400;
640 	case DP_COMMON_CAP_RATE_HBR3:
641 		return 8100;
642 	default:
643 		return 0;
644 	}
645 }
646 
647 /*
648  * Returns maximum possible rate from capability supporting DP 2.1
649  * UHBR20, 13.5 and 10 rates as well. Use only when DP BW allocation
650  * mode is enabled.
651  */
652 static inline u32 tb_dp_cap_get_rate_ext(u32 val)
653 {
654 	if (val & DP_COMMON_CAP_UHBR20)
655 		return 20000;
656 	else if (val & DP_COMMON_CAP_UHBR13_5)
657 		return 13500;
658 	else if (val & DP_COMMON_CAP_UHBR10)
659 		return 10000;
660 
661 	return tb_dp_cap_get_rate(val);
662 }
663 
664 static inline bool tb_dp_is_uhbr_rate(unsigned int rate)
665 {
666 	return rate >= 10000;
667 }
668 
669 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
670 {
671 	val &= ~DP_COMMON_CAP_RATE_MASK;
672 	switch (rate) {
673 	default:
674 		WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
675 		fallthrough;
676 	case 1620:
677 		val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
678 		break;
679 	case 2700:
680 		val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
681 		break;
682 	case 5400:
683 		val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
684 		break;
685 	case 8100:
686 		val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
687 		break;
688 	}
689 	return val;
690 }
691 
692 static inline u32 tb_dp_cap_get_lanes(u32 val)
693 {
694 	u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
695 
696 	switch (lanes) {
697 	case DP_COMMON_CAP_1_LANE:
698 		return 1;
699 	case DP_COMMON_CAP_2_LANES:
700 		return 2;
701 	case DP_COMMON_CAP_4_LANES:
702 		return 4;
703 	default:
704 		return 0;
705 	}
706 }
707 
708 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
709 {
710 	val &= ~DP_COMMON_CAP_LANES_MASK;
711 	switch (lanes) {
712 	default:
713 		WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
714 		     lanes);
715 		fallthrough;
716 	case 1:
717 		val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
718 		break;
719 	case 2:
720 		val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
721 		break;
722 	case 4:
723 		val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
724 		break;
725 	}
726 	return val;
727 }
728 
729 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
730 {
731 	/* Tunneling removes the DP 8b/10b 128/132b encoding */
732 	if (tb_dp_is_uhbr_rate(rate))
733 		return rate * lanes * 128 / 132;
734 	return rate * lanes * 8 / 10;
735 }
736 
737 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
738 				  u32 out_rate, u32 out_lanes, u32 *new_rate,
739 				  u32 *new_lanes)
740 {
741 	static const u32 dp_bw[][2] = {
742 		/* Mb/s, lanes */
743 		{ 8100, 4 }, /* 25920 Mb/s */
744 		{ 5400, 4 }, /* 17280 Mb/s */
745 		{ 8100, 2 }, /* 12960 Mb/s */
746 		{ 2700, 4 }, /* 8640 Mb/s */
747 		{ 5400, 2 }, /* 8640 Mb/s */
748 		{ 8100, 1 }, /* 6480 Mb/s */
749 		{ 1620, 4 }, /* 5184 Mb/s */
750 		{ 5400, 1 }, /* 4320 Mb/s */
751 		{ 2700, 2 }, /* 4320 Mb/s */
752 		{ 1620, 2 }, /* 2592 Mb/s */
753 		{ 2700, 1 }, /* 2160 Mb/s */
754 		{ 1620, 1 }, /* 1296 Mb/s */
755 	};
756 	unsigned int i;
757 
758 	/*
759 	 * Find a combination that can fit into max_bw and does not
760 	 * exceed the maximum rate and lanes supported by the DP OUT and
761 	 * DP IN adapters.
762 	 */
763 	for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
764 		if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
765 			continue;
766 
767 		if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
768 			continue;
769 
770 		if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
771 			*new_rate = dp_bw[i][0];
772 			*new_lanes = dp_bw[i][1];
773 			return 0;
774 		}
775 	}
776 
777 	return -ENOSR;
778 }
779 
780 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
781 {
782 	u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
783 	struct tb_port *out = tunnel->dst_port;
784 	struct tb_port *in = tunnel->src_port;
785 	int ret, max_bw;
786 
787 	/*
788 	 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
789 	 * newer generation hardware.
790 	 */
791 	if (in->sw->generation < 2 || out->sw->generation < 2)
792 		return 0;
793 
794 	/*
795 	 * Perform connection manager handshake between IN and OUT ports
796 	 * before capabilities exchange can take place.
797 	 */
798 	ret = tb_dp_cm_handshake(in, out, 3000);
799 	if (ret)
800 		return ret;
801 
802 	/* Read both DP_LOCAL_CAP registers */
803 	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
804 			   in->cap_adap + DP_LOCAL_CAP, 1);
805 	if (ret)
806 		return ret;
807 
808 	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
809 			   out->cap_adap + DP_LOCAL_CAP, 1);
810 	if (ret)
811 		return ret;
812 
813 	/* Write IN local caps to OUT remote caps */
814 	ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
815 			    out->cap_adap + DP_REMOTE_CAP, 1);
816 	if (ret)
817 		return ret;
818 
819 	in_rate = tb_dp_cap_get_rate(in_dp_cap);
820 	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
821 	tb_tunnel_dbg(tunnel,
822 		      "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
823 		      in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
824 
825 	/*
826 	 * If the tunnel bandwidth is limited (max_bw is set) then see
827 	 * if we need to reduce bandwidth to fit there.
828 	 */
829 	out_rate = tb_dp_cap_get_rate(out_dp_cap);
830 	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
831 	bw = tb_dp_bandwidth(out_rate, out_lanes);
832 	tb_tunnel_dbg(tunnel,
833 		      "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
834 		      out_rate, out_lanes, bw);
835 
836 	if (tb_tunnel_direction_downstream(tunnel))
837 		max_bw = tunnel->max_down;
838 	else
839 		max_bw = tunnel->max_up;
840 
841 	if (max_bw && bw > max_bw) {
842 		u32 new_rate, new_lanes, new_bw;
843 
844 		ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
845 					     out_rate, out_lanes, &new_rate,
846 					     &new_lanes);
847 		if (ret) {
848 			tb_tunnel_info(tunnel, "not enough bandwidth\n");
849 			return ret;
850 		}
851 
852 		new_bw = tb_dp_bandwidth(new_rate, new_lanes);
853 		tb_tunnel_dbg(tunnel,
854 			      "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
855 			      new_rate, new_lanes, new_bw);
856 
857 		/*
858 		 * Set new rate and number of lanes before writing it to
859 		 * the IN port remote caps.
860 		 */
861 		out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
862 		out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
863 	}
864 
865 	/*
866 	 * Titan Ridge does not disable AUX timers when it gets
867 	 * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with
868 	 * DP tunneling.
869 	 */
870 	if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
871 		out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
872 		tb_tunnel_dbg(tunnel, "disabling LTTPR\n");
873 	}
874 
875 	return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
876 			     in->cap_adap + DP_REMOTE_CAP, 1);
877 }
878 
879 static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
880 {
881 	int ret, estimated_bw, granularity, tmp;
882 	struct tb_port *out = tunnel->dst_port;
883 	struct tb_port *in = tunnel->src_port;
884 	u32 out_dp_cap, out_rate, out_lanes;
885 	u32 in_dp_cap, in_rate, in_lanes;
886 	u32 rate, lanes;
887 
888 	if (!bw_alloc_mode)
889 		return 0;
890 
891 	ret = usb4_dp_port_set_cm_bandwidth_mode_supported(in, true);
892 	if (ret)
893 		return ret;
894 
895 	ret = usb4_dp_port_set_group_id(in, in->group->index);
896 	if (ret)
897 		return ret;
898 
899 	/*
900 	 * Get the non-reduced rate and lanes based on the lowest
901 	 * capability of both adapters.
902 	 */
903 	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
904 			   in->cap_adap + DP_LOCAL_CAP, 1);
905 	if (ret)
906 		return ret;
907 
908 	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
909 			   out->cap_adap + DP_LOCAL_CAP, 1);
910 	if (ret)
911 		return ret;
912 
913 	in_rate = tb_dp_cap_get_rate(in_dp_cap);
914 	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
915 	out_rate = tb_dp_cap_get_rate(out_dp_cap);
916 	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
917 
918 	rate = min(in_rate, out_rate);
919 	lanes = min(in_lanes, out_lanes);
920 	tmp = tb_dp_bandwidth(rate, lanes);
921 
922 	tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
923 		      rate, lanes, tmp);
924 
925 	ret = usb4_dp_port_set_nrd(in, rate, lanes);
926 	if (ret)
927 		return ret;
928 
929 	/*
930 	 * Pick up granularity that supports maximum possible bandwidth.
931 	 * For that we use the UHBR rates too.
932 	 */
933 	in_rate = tb_dp_cap_get_rate_ext(in_dp_cap);
934 	out_rate = tb_dp_cap_get_rate_ext(out_dp_cap);
935 	rate = min(in_rate, out_rate);
936 	tmp = tb_dp_bandwidth(rate, lanes);
937 
938 	tb_tunnel_dbg(tunnel,
939 		      "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
940 		      rate, lanes, tmp);
941 
942 	for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
943 	     granularity *= 2)
944 		;
945 
946 	tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity);
947 
948 	/*
949 	 * Returns -EINVAL if granularity above is outside of the
950 	 * accepted ranges.
951 	 */
952 	ret = usb4_dp_port_set_granularity(in, granularity);
953 	if (ret)
954 		return ret;
955 
956 	/*
957 	 * Bandwidth estimation is pretty much what we have in
958 	 * max_up/down fields. For discovery we just read what the
959 	 * estimation was set to.
960 	 */
961 	if (tb_tunnel_direction_downstream(tunnel))
962 		estimated_bw = tunnel->max_down;
963 	else
964 		estimated_bw = tunnel->max_up;
965 
966 	tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw);
967 
968 	ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
969 	if (ret)
970 		return ret;
971 
972 	/* Initial allocation should be 0 according the spec */
973 	ret = usb4_dp_port_allocate_bandwidth(in, 0);
974 	if (ret)
975 		return ret;
976 
977 	tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n");
978 	return 0;
979 }
980 
981 static int tb_dp_pre_activate(struct tb_tunnel *tunnel)
982 {
983 	struct tb_port *in = tunnel->src_port;
984 	struct tb_switch *sw = in->sw;
985 	struct tb *tb = in->sw->tb;
986 	int ret;
987 
988 	ret = tb_dp_xchg_caps(tunnel);
989 	if (ret)
990 		return ret;
991 
992 	if (!tb_switch_is_usb4(sw))
993 		return 0;
994 
995 	if (!usb4_dp_port_bandwidth_mode_supported(in))
996 		return 0;
997 
998 	tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n");
999 
1000 	ret = usb4_dp_port_set_cm_id(in, tb->index);
1001 	if (ret)
1002 		return ret;
1003 
1004 	return tb_dp_bandwidth_alloc_mode_enable(tunnel);
1005 }
1006 
1007 static void tb_dp_post_deactivate(struct tb_tunnel *tunnel)
1008 {
1009 	struct tb_port *in = tunnel->src_port;
1010 
1011 	if (!usb4_dp_port_bandwidth_mode_supported(in))
1012 		return;
1013 	if (usb4_dp_port_bandwidth_mode_enabled(in)) {
1014 		usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
1015 		tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n");
1016 	}
1017 }
1018 
1019 static ktime_t dprx_timeout_to_ktime(int timeout_msec)
1020 {
1021 	return timeout_msec >= 0 ?
1022 		ktime_add_ms(ktime_get(), timeout_msec) : KTIME_MAX;
1023 }
1024 
1025 static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
1026 {
1027 	ktime_t timeout = dprx_timeout_to_ktime(timeout_msec);
1028 	struct tb_port *in = tunnel->src_port;
1029 
1030 	/*
1031 	 * Wait for DPRX done. Normally it should be already set for
1032 	 * active tunnel.
1033 	 */
1034 	do {
1035 		u32 val;
1036 		int ret;
1037 
1038 		ret = tb_port_read(in, &val, TB_CFG_PORT,
1039 				   in->cap_adap + DP_COMMON_CAP, 1);
1040 		if (ret)
1041 			return ret;
1042 
1043 		if (val & DP_COMMON_CAP_DPRX_DONE)
1044 			return 0;
1045 
1046 		usleep_range(100, 150);
1047 	} while (ktime_before(ktime_get(), timeout));
1048 
1049 	tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
1050 	return -ETIMEDOUT;
1051 }
1052 
1053 static void tb_dp_dprx_work(struct work_struct *work)
1054 {
1055 	struct tb_tunnel *tunnel = container_of(work, typeof(*tunnel), dprx_work.work);
1056 	struct tb *tb = tunnel->tb;
1057 
1058 	if (!tunnel->dprx_canceled) {
1059 		mutex_lock(&tb->lock);
1060 		if (tb_dp_is_usb4(tunnel->src_port->sw) &&
1061 		    tb_dp_wait_dprx(tunnel, TB_DPRX_WAIT_TIMEOUT)) {
1062 			if (ktime_before(ktime_get(), tunnel->dprx_timeout)) {
1063 				queue_delayed_work(tb->wq, &tunnel->dprx_work,
1064 						   msecs_to_jiffies(TB_DPRX_POLL_DELAY));
1065 				mutex_unlock(&tb->lock);
1066 				return;
1067 			}
1068 		} else {
1069 			tb_tunnel_set_active(tunnel, true);
1070 		}
1071 		mutex_unlock(&tb->lock);
1072 	}
1073 
1074 	if (tunnel->callback)
1075 		tunnel->callback(tunnel, tunnel->callback_data);
1076 	tb_tunnel_put(tunnel);
1077 }
1078 
1079 static int tb_dp_dprx_start(struct tb_tunnel *tunnel)
1080 {
1081 	/*
1082 	 * Bump up the reference to keep the tunnel around. It will be
1083 	 * dropped in tb_dp_dprx_stop() once the tunnel is deactivated.
1084 	 */
1085 	tb_tunnel_get(tunnel);
1086 
1087 	tunnel->dprx_started = true;
1088 
1089 	if (tunnel->callback) {
1090 		tunnel->dprx_timeout = dprx_timeout_to_ktime(dprx_timeout);
1091 		queue_delayed_work(tunnel->tb->wq, &tunnel->dprx_work, 0);
1092 		return -EINPROGRESS;
1093 	}
1094 
1095 	return tb_dp_is_usb4(tunnel->src_port->sw) ?
1096 		tb_dp_wait_dprx(tunnel, dprx_timeout) : 0;
1097 }
1098 
1099 static void tb_dp_dprx_stop(struct tb_tunnel *tunnel)
1100 {
1101 	if (tunnel->dprx_started) {
1102 		tunnel->dprx_started = false;
1103 		tunnel->dprx_canceled = true;
1104 		if (cancel_delayed_work(&tunnel->dprx_work))
1105 			tb_tunnel_put(tunnel);
1106 	}
1107 }
1108 
1109 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
1110 {
1111 	int ret;
1112 
1113 	if (active) {
1114 		struct tb_path **paths;
1115 		int last;
1116 
1117 		paths = tunnel->paths;
1118 		last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
1119 
1120 		tb_dp_port_set_hops(tunnel->src_port,
1121 			paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
1122 			paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
1123 			paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
1124 
1125 		tb_dp_port_set_hops(tunnel->dst_port,
1126 			paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
1127 			paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
1128 			paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
1129 	} else {
1130 		tb_dp_dprx_stop(tunnel);
1131 		tb_dp_port_hpd_clear(tunnel->src_port);
1132 		tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
1133 		if (tb_port_is_dpout(tunnel->dst_port))
1134 			tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
1135 	}
1136 
1137 	ret = tb_dp_port_enable(tunnel->src_port, active);
1138 	if (ret)
1139 		return ret;
1140 
1141 	if (tb_port_is_dpout(tunnel->dst_port)) {
1142 		ret = tb_dp_port_enable(tunnel->dst_port, active);
1143 		if (ret)
1144 			return ret;
1145 	}
1146 
1147 	return active ? tb_dp_dprx_start(tunnel) : 0;
1148 }
1149 
1150 /**
1151  * tb_dp_bandwidth_mode_maximum_bandwidth() - Maximum possible bandwidth
1152  * @tunnel: DP tunnel to check
1153  * @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity
1154  *
1155  * Return: Maximum possible bandwidth for this tunnel in Mb/s, negative errno
1156  * in case of failure.
1157  */
1158 static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
1159 						  int *max_bw_rounded)
1160 {
1161 	struct tb_port *in = tunnel->src_port;
1162 	int ret, rate, lanes, max_bw;
1163 	u32 cap;
1164 
1165 	/*
1166 	 * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX
1167 	 * read parameter values so we can use this to determine the
1168 	 * maximum possible bandwidth over this link.
1169 	 *
1170 	 * See USB4 v2 spec 1.0 10.4.4.5.
1171 	 */
1172 	ret = tb_port_read(in, &cap, TB_CFG_PORT,
1173 			   in->cap_adap + DP_LOCAL_CAP, 1);
1174 	if (ret)
1175 		return ret;
1176 
1177 	rate = tb_dp_cap_get_rate_ext(cap);
1178 	lanes = tb_dp_cap_get_lanes(cap);
1179 
1180 	max_bw = tb_dp_bandwidth(rate, lanes);
1181 
1182 	if (max_bw_rounded) {
1183 		ret = usb4_dp_port_granularity(in);
1184 		if (ret < 0)
1185 			return ret;
1186 		*max_bw_rounded = roundup(max_bw, ret);
1187 	}
1188 
1189 	return max_bw;
1190 }
1191 
1192 static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
1193 						   int *consumed_up,
1194 						   int *consumed_down)
1195 {
1196 	struct tb_port *in = tunnel->src_port;
1197 	int ret, allocated_bw, max_bw_rounded;
1198 
1199 	if (!usb4_dp_port_bandwidth_mode_enabled(in))
1200 		return -EOPNOTSUPP;
1201 
1202 	if (!tunnel->bw_mode)
1203 		return -EOPNOTSUPP;
1204 
1205 	/* Read what was allocated previously if any */
1206 	ret = usb4_dp_port_allocated_bandwidth(in);
1207 	if (ret < 0)
1208 		return ret;
1209 	allocated_bw = ret;
1210 
1211 	ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
1212 	if (ret < 0)
1213 		return ret;
1214 	if (allocated_bw == max_bw_rounded)
1215 		allocated_bw = ret;
1216 
1217 	if (tb_tunnel_direction_downstream(tunnel)) {
1218 		*consumed_up = 0;
1219 		*consumed_down = allocated_bw;
1220 	} else {
1221 		*consumed_up = allocated_bw;
1222 		*consumed_down = 0;
1223 	}
1224 
1225 	return 0;
1226 }
1227 
1228 static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
1229 				     int *allocated_down)
1230 {
1231 	struct tb_port *in = tunnel->src_port;
1232 
1233 	/*
1234 	 * If we have already set the allocated bandwidth then use that.
1235 	 * Otherwise we read it from the DPRX.
1236 	 */
1237 	if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
1238 		int ret, allocated_bw, max_bw_rounded;
1239 
1240 		ret = usb4_dp_port_allocated_bandwidth(in);
1241 		if (ret < 0)
1242 			return ret;
1243 		allocated_bw = ret;
1244 
1245 		ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel,
1246 							     &max_bw_rounded);
1247 		if (ret < 0)
1248 			return ret;
1249 		if (allocated_bw == max_bw_rounded)
1250 			allocated_bw = ret;
1251 
1252 		if (tb_tunnel_direction_downstream(tunnel)) {
1253 			*allocated_up = 0;
1254 			*allocated_down = allocated_bw;
1255 		} else {
1256 			*allocated_up = allocated_bw;
1257 			*allocated_down = 0;
1258 		}
1259 		return 0;
1260 	}
1261 
1262 	return tunnel->consumed_bandwidth(tunnel, allocated_up,
1263 					  allocated_down);
1264 }
1265 
1266 static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
1267 				 int *alloc_down)
1268 {
1269 	struct tb_port *in = tunnel->src_port;
1270 	int max_bw_rounded, ret, tmp;
1271 
1272 	if (!usb4_dp_port_bandwidth_mode_enabled(in))
1273 		return -EOPNOTSUPP;
1274 
1275 	ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
1276 	if (ret < 0)
1277 		return ret;
1278 
1279 	if (tb_tunnel_direction_downstream(tunnel)) {
1280 		tmp = min(*alloc_down, max_bw_rounded);
1281 		ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1282 		if (ret)
1283 			return ret;
1284 		*alloc_down = tmp;
1285 		*alloc_up = 0;
1286 	} else {
1287 		tmp = min(*alloc_up, max_bw_rounded);
1288 		ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1289 		if (ret)
1290 			return ret;
1291 		*alloc_down = 0;
1292 		*alloc_up = tmp;
1293 	}
1294 
1295 	/* Now we can use BW mode registers to figure out the bandwidth */
1296 	/* TODO: need to handle discovery too */
1297 	tunnel->bw_mode = true;
1298 	return 0;
1299 }
1300 
1301 /* Read cap from tunnel DP IN */
1302 static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
1303 			  u32 *lanes)
1304 {
1305 	struct tb_port *in = tunnel->src_port;
1306 	u32 val;
1307 	int ret;
1308 
1309 	switch (cap) {
1310 	case DP_LOCAL_CAP:
1311 	case DP_REMOTE_CAP:
1312 	case DP_COMMON_CAP:
1313 		break;
1314 
1315 	default:
1316 		tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap);
1317 		return -EINVAL;
1318 	}
1319 
1320 	/*
1321 	 * Read from the copied remote cap so that we take into account
1322 	 * if capabilities were reduced during exchange.
1323 	 */
1324 	ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1);
1325 	if (ret)
1326 		return ret;
1327 
1328 	*rate = tb_dp_cap_get_rate(val);
1329 	*lanes = tb_dp_cap_get_lanes(val);
1330 	return 0;
1331 }
1332 
1333 static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
1334 				   int *max_down)
1335 {
1336 	int ret;
1337 
1338 	if (!usb4_dp_port_bandwidth_mode_enabled(tunnel->src_port))
1339 		return -EOPNOTSUPP;
1340 
1341 	ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
1342 	if (ret < 0)
1343 		return ret;
1344 
1345 	if (tb_tunnel_direction_downstream(tunnel)) {
1346 		*max_up = 0;
1347 		*max_down = ret;
1348 	} else {
1349 		*max_up = ret;
1350 		*max_down = 0;
1351 	}
1352 
1353 	return 0;
1354 }
1355 
1356 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1357 				    int *consumed_down)
1358 {
1359 	const struct tb_switch *sw = tunnel->src_port->sw;
1360 	u32 rate = 0, lanes = 0;
1361 	int ret;
1362 
1363 	if (tb_dp_is_usb4(sw)) {
1364 		ret = tb_dp_wait_dprx(tunnel, 0);
1365 		if (ret) {
1366 			if (ret == -ETIMEDOUT) {
1367 				/*
1368 				 * While we wait for DPRX complete the
1369 				 * tunnel consumes as much as it had
1370 				 * been reserved initially.
1371 				 */
1372 				ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
1373 						     &rate, &lanes);
1374 				if (ret)
1375 					return ret;
1376 			} else {
1377 				return ret;
1378 			}
1379 		} else {
1380 			/*
1381 			 * On USB4 routers check if the bandwidth allocation
1382 			 * mode is enabled first and then read the bandwidth
1383 			 * through those registers.
1384 			 */
1385 			ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
1386 								      consumed_down);
1387 			if (ret < 0) {
1388 				if (ret != -EOPNOTSUPP)
1389 					return ret;
1390 			} else if (!ret) {
1391 				return 0;
1392 			}
1393 			ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
1394 			if (ret)
1395 				return ret;
1396 		}
1397 	} else if (sw->generation >= 2) {
1398 		ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
1399 		if (ret)
1400 			return ret;
1401 	} else {
1402 		/* No bandwidth management for legacy devices  */
1403 		*consumed_up = 0;
1404 		*consumed_down = 0;
1405 		return 0;
1406 	}
1407 
1408 	if (tb_tunnel_direction_downstream(tunnel)) {
1409 		*consumed_up = 0;
1410 		*consumed_down = tb_dp_bandwidth(rate, lanes);
1411 	} else {
1412 		*consumed_up = tb_dp_bandwidth(rate, lanes);
1413 		*consumed_down = 0;
1414 	}
1415 
1416 	return 0;
1417 }
1418 
1419 static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
1420 {
1421 	struct tb_port *port = hop->in_port;
1422 	struct tb_switch *sw = port->sw;
1423 
1424 	if (tb_port_use_credit_allocation(port))
1425 		hop->initial_credits = sw->min_dp_aux_credits;
1426 	else
1427 		hop->initial_credits = 1;
1428 }
1429 
1430 static void tb_dp_init_aux_path(struct tb_path *path, bool pm_support)
1431 {
1432 	struct tb_path_hop *hop;
1433 
1434 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1435 	path->egress_shared_buffer = TB_PATH_NONE;
1436 	path->ingress_fc_enable = TB_PATH_ALL;
1437 	path->ingress_shared_buffer = TB_PATH_NONE;
1438 	path->priority = TB_DP_AUX_PRIORITY;
1439 	path->weight = TB_DP_AUX_WEIGHT;
1440 
1441 	tb_path_for_each_hop(path, hop) {
1442 		tb_dp_init_aux_credits(hop);
1443 		if (pm_support)
1444 			tb_init_pm_support(hop);
1445 	}
1446 }
1447 
1448 static int tb_dp_init_video_credits(struct tb_path_hop *hop)
1449 {
1450 	struct tb_port *port = hop->in_port;
1451 	struct tb_switch *sw = port->sw;
1452 
1453 	if (tb_port_use_credit_allocation(port)) {
1454 		unsigned int nfc_credits;
1455 		size_t max_dp_streams;
1456 
1457 		tb_available_credits(port, &max_dp_streams);
1458 		/*
1459 		 * Read the number of currently allocated NFC credits
1460 		 * from the lane adapter. Since we only use them for DP
1461 		 * tunneling we can use that to figure out how many DP
1462 		 * tunnels already go through the lane adapter.
1463 		 */
1464 		nfc_credits = port->config.nfc_credits &
1465 				ADP_CS_4_NFC_BUFFERS_MASK;
1466 		if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
1467 			return -ENOSPC;
1468 
1469 		hop->nfc_credits = sw->min_dp_main_credits;
1470 	} else {
1471 		hop->nfc_credits = min(port->total_credits - 2, 12U);
1472 	}
1473 
1474 	return 0;
1475 }
1476 
1477 static int tb_dp_init_video_path(struct tb_path *path, bool pm_support)
1478 {
1479 	struct tb_path_hop *hop;
1480 
1481 	path->egress_fc_enable = TB_PATH_NONE;
1482 	path->egress_shared_buffer = TB_PATH_NONE;
1483 	path->ingress_fc_enable = TB_PATH_NONE;
1484 	path->ingress_shared_buffer = TB_PATH_NONE;
1485 	path->priority = TB_DP_VIDEO_PRIORITY;
1486 	path->weight = TB_DP_VIDEO_WEIGHT;
1487 
1488 	tb_path_for_each_hop(path, hop) {
1489 		int ret;
1490 
1491 		ret = tb_dp_init_video_credits(hop);
1492 		if (ret)
1493 			return ret;
1494 		if (pm_support)
1495 			tb_init_pm_support(hop);
1496 	}
1497 
1498 	return 0;
1499 }
1500 
1501 static void tb_dp_dump(struct tb_tunnel *tunnel)
1502 {
1503 	struct tb_port *in, *out;
1504 	u32 dp_cap, rate, lanes;
1505 
1506 	in = tunnel->src_port;
1507 	out = tunnel->dst_port;
1508 
1509 	if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1510 			 in->cap_adap + DP_LOCAL_CAP, 1))
1511 		return;
1512 
1513 	rate = tb_dp_cap_get_rate(dp_cap);
1514 	lanes = tb_dp_cap_get_lanes(dp_cap);
1515 
1516 	tb_tunnel_dbg(tunnel,
1517 		      "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1518 		      rate, lanes, tb_dp_bandwidth(rate, lanes));
1519 
1520 	if (tb_port_read(out, &dp_cap, TB_CFG_PORT,
1521 			 out->cap_adap + DP_LOCAL_CAP, 1))
1522 		return;
1523 
1524 	rate = tb_dp_cap_get_rate(dp_cap);
1525 	lanes = tb_dp_cap_get_lanes(dp_cap);
1526 
1527 	tb_tunnel_dbg(tunnel,
1528 		      "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1529 		      rate, lanes, tb_dp_bandwidth(rate, lanes));
1530 
1531 	if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1532 			 in->cap_adap + DP_REMOTE_CAP, 1))
1533 		return;
1534 
1535 	rate = tb_dp_cap_get_rate(dp_cap);
1536 	lanes = tb_dp_cap_get_lanes(dp_cap);
1537 
1538 	tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
1539 		      rate, lanes, tb_dp_bandwidth(rate, lanes));
1540 }
1541 
1542 /**
1543  * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
1544  * @tb: Pointer to the domain structure
1545  * @in: DP in adapter
1546  * @alloc_hopid: Allocate HopIDs from visited ports
1547  *
1548  * If @in adapter is active, follows the tunnel to the DP out adapter
1549  * and back. Returns the discovered tunnel or %NULL if there was no
1550  * tunnel.
1551  *
1552  * Return: Pointer to &struct tb_tunnel or %NULL if no tunnel found.
1553  */
1554 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
1555 					bool alloc_hopid)
1556 {
1557 	struct tb_tunnel *tunnel;
1558 	struct tb_port *port;
1559 	struct tb_path *path;
1560 
1561 	if (!tb_dp_port_is_enabled(in))
1562 		return NULL;
1563 
1564 	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1565 	if (!tunnel)
1566 		return NULL;
1567 
1568 	tunnel->pre_activate = tb_dp_pre_activate;
1569 	tunnel->activate = tb_dp_activate;
1570 	tunnel->post_deactivate = tb_dp_post_deactivate;
1571 	tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1572 	tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1573 	tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1574 	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1575 	tunnel->src_port = in;
1576 
1577 	path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
1578 				&tunnel->dst_port, "Video", alloc_hopid);
1579 	if (!path) {
1580 		/* Just disable the DP IN port */
1581 		tb_dp_port_enable(in, false);
1582 		goto err_free;
1583 	}
1584 	tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
1585 	if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], false))
1586 		goto err_free;
1587 
1588 	path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
1589 				alloc_hopid);
1590 	if (!path)
1591 		goto err_deactivate;
1592 	tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
1593 	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT], false);
1594 
1595 	path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
1596 				&port, "AUX RX", alloc_hopid);
1597 	if (!path)
1598 		goto err_deactivate;
1599 	tunnel->paths[TB_DP_AUX_PATH_IN] = path;
1600 	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN], false);
1601 
1602 	/* Validate that the tunnel is complete */
1603 	if (!tb_port_is_dpout(tunnel->dst_port)) {
1604 		tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
1605 		goto err_deactivate;
1606 	}
1607 
1608 	if (!tb_dp_port_is_enabled(tunnel->dst_port))
1609 		goto err_deactivate;
1610 
1611 	if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
1612 		goto err_deactivate;
1613 
1614 	if (port != tunnel->src_port) {
1615 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1616 		goto err_deactivate;
1617 	}
1618 
1619 	tb_dp_dump(tunnel);
1620 
1621 	tb_tunnel_dbg(tunnel, "discovered\n");
1622 	return tunnel;
1623 
1624 err_deactivate:
1625 	tb_tunnel_deactivate(tunnel);
1626 err_free:
1627 	tb_tunnel_put(tunnel);
1628 
1629 	return NULL;
1630 }
1631 
1632 /**
1633  * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
1634  * @tb: Pointer to the domain structure
1635  * @in: DP in adapter port
1636  * @out: DP out adapter port
1637  * @link_nr: Preferred lane adapter when the link is not bonded
1638  * @max_up: Maximum available upstream bandwidth for the DP tunnel.
1639  *	    %0 if no available bandwidth.
1640  * @max_down: Maximum available downstream bandwidth for the DP tunnel.
1641  *	      %0 if no available bandwidth.
1642  * @callback: Optional callback that is called when the DP tunnel is
1643  *	      fully activated (or there is an error)
1644  * @callback_data: Optional data for @callback
1645  *
1646  * Allocates a tunnel between @in and @out that is capable of tunneling
1647  * Display Port traffic. If @callback is not %NULL it will be called
1648  * after tb_tunnel_activate() once the tunnel has been fully activated.
1649  * It can call tb_tunnel_is_active() to check if activation was
1650  * successful (or if it returns %false there was some sort of issue).
1651  * The @callback is called without @tb->lock held.
1652  *
1653  * Return: Pointer to @struct tb_tunnel or %NULL in case of failure.
1654  */
1655 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
1656 				     struct tb_port *out, int link_nr,
1657 				     int max_up, int max_down,
1658 				     void (*callback)(struct tb_tunnel *, void *),
1659 				     void *callback_data)
1660 {
1661 	struct tb_tunnel *tunnel;
1662 	struct tb_path **paths;
1663 	struct tb_path *path;
1664 	bool pm_support;
1665 
1666 	if (WARN_ON(!in->cap_adap || !out->cap_adap))
1667 		return NULL;
1668 
1669 	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1670 	if (!tunnel)
1671 		return NULL;
1672 
1673 	tunnel->pre_activate = tb_dp_pre_activate;
1674 	tunnel->activate = tb_dp_activate;
1675 	tunnel->post_deactivate = tb_dp_post_deactivate;
1676 	tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1677 	tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1678 	tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1679 	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1680 	tunnel->src_port = in;
1681 	tunnel->dst_port = out;
1682 	tunnel->max_up = max_up;
1683 	tunnel->max_down = max_down;
1684 	tunnel->callback = callback;
1685 	tunnel->callback_data = callback_data;
1686 	INIT_DELAYED_WORK(&tunnel->dprx_work, tb_dp_dprx_work);
1687 
1688 	paths = tunnel->paths;
1689 	pm_support = usb4_switch_version(in->sw) >= 2;
1690 
1691 	path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
1692 			     link_nr, "Video");
1693 	if (!path)
1694 		goto err_free;
1695 	tb_dp_init_video_path(path, pm_support);
1696 	paths[TB_DP_VIDEO_PATH_OUT] = path;
1697 
1698 	path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
1699 			     TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
1700 	if (!path)
1701 		goto err_free;
1702 	tb_dp_init_aux_path(path, pm_support);
1703 	paths[TB_DP_AUX_PATH_OUT] = path;
1704 
1705 	path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
1706 			     TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
1707 	if (!path)
1708 		goto err_free;
1709 	tb_dp_init_aux_path(path, pm_support);
1710 	paths[TB_DP_AUX_PATH_IN] = path;
1711 
1712 	return tunnel;
1713 
1714 err_free:
1715 	tb_tunnel_put(tunnel);
1716 	return NULL;
1717 }
1718 
1719 static unsigned int tb_dma_available_credits(const struct tb_port *port)
1720 {
1721 	const struct tb_switch *sw = port->sw;
1722 	int credits;
1723 
1724 	credits = tb_available_credits(port, NULL);
1725 	if (tb_acpi_may_tunnel_pcie())
1726 		credits -= sw->max_pcie_credits;
1727 	credits -= port->dma_credits;
1728 
1729 	return credits > 0 ? credits : 0;
1730 }
1731 
1732 static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
1733 {
1734 	struct tb_port *port = hop->in_port;
1735 
1736 	if (tb_port_use_credit_allocation(port)) {
1737 		unsigned int available = tb_dma_available_credits(port);
1738 
1739 		/*
1740 		 * Need to have at least TB_MIN_DMA_CREDITS, otherwise
1741 		 * DMA path cannot be established.
1742 		 */
1743 		if (available < TB_MIN_DMA_CREDITS)
1744 			return -ENOSPC;
1745 
1746 		while (credits > available)
1747 			credits--;
1748 
1749 		tb_port_dbg(port, "reserving %u credits for DMA path\n",
1750 			    credits);
1751 
1752 		port->dma_credits += credits;
1753 	} else {
1754 		if (tb_port_is_null(port))
1755 			credits = port->bonded ? 14 : 6;
1756 		else
1757 			credits = min(port->total_credits, credits);
1758 	}
1759 
1760 	hop->initial_credits = credits;
1761 	return 0;
1762 }
1763 
1764 /* Path from lane adapter to NHI */
1765 static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
1766 {
1767 	struct tb_path_hop *hop;
1768 	unsigned int i, tmp;
1769 
1770 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1771 	path->ingress_fc_enable = TB_PATH_ALL;
1772 	path->egress_shared_buffer = TB_PATH_NONE;
1773 	path->ingress_shared_buffer = TB_PATH_NONE;
1774 	path->priority = TB_DMA_PRIORITY;
1775 	path->weight = TB_DMA_WEIGHT;
1776 	path->clear_fc = true;
1777 
1778 	/*
1779 	 * First lane adapter is the one connected to the remote host.
1780 	 * We don't tunnel other traffic over this link so we can use
1781 	 * all the credits (except the ones reserved for control traffic).
1782 	 */
1783 	hop = &path->hops[0];
1784 	tmp = min(tb_usable_credits(hop->in_port), credits);
1785 	hop->initial_credits = tmp;
1786 	hop->in_port->dma_credits += tmp;
1787 
1788 	for (i = 1; i < path->path_length; i++) {
1789 		int ret;
1790 
1791 		ret = tb_dma_reserve_credits(&path->hops[i], credits);
1792 		if (ret)
1793 			return ret;
1794 	}
1795 
1796 	return 0;
1797 }
1798 
1799 /* Path from NHI to lane adapter */
1800 static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
1801 {
1802 	struct tb_path_hop *hop;
1803 
1804 	path->egress_fc_enable = TB_PATH_ALL;
1805 	path->ingress_fc_enable = TB_PATH_ALL;
1806 	path->egress_shared_buffer = TB_PATH_NONE;
1807 	path->ingress_shared_buffer = TB_PATH_NONE;
1808 	path->priority = TB_DMA_PRIORITY;
1809 	path->weight = TB_DMA_WEIGHT;
1810 	path->clear_fc = true;
1811 
1812 	tb_path_for_each_hop(path, hop) {
1813 		int ret;
1814 
1815 		ret = tb_dma_reserve_credits(hop, credits);
1816 		if (ret)
1817 			return ret;
1818 	}
1819 
1820 	return 0;
1821 }
1822 
1823 static void tb_dma_release_credits(struct tb_path_hop *hop)
1824 {
1825 	struct tb_port *port = hop->in_port;
1826 
1827 	if (tb_port_use_credit_allocation(port)) {
1828 		port->dma_credits -= hop->initial_credits;
1829 
1830 		tb_port_dbg(port, "released %u DMA path credits\n",
1831 			    hop->initial_credits);
1832 	}
1833 }
1834 
1835 static void tb_dma_destroy_path(struct tb_path *path)
1836 {
1837 	struct tb_path_hop *hop;
1838 
1839 	tb_path_for_each_hop(path, hop)
1840 		tb_dma_release_credits(hop);
1841 }
1842 
1843 static void tb_dma_destroy(struct tb_tunnel *tunnel)
1844 {
1845 	int i;
1846 
1847 	for (i = 0; i < tunnel->npaths; i++) {
1848 		if (!tunnel->paths[i])
1849 			continue;
1850 		tb_dma_destroy_path(tunnel->paths[i]);
1851 	}
1852 }
1853 
1854 /**
1855  * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1856  * @tb: Pointer to the domain structure
1857  * @nhi: Host controller port
1858  * @dst: Destination null port which the other domain is connected to
1859  * @transmit_path: HopID used for transmitting packets
1860  * @transmit_ring: NHI ring number used to send packets towards the
1861  *		   other domain. Set to %-1 if TX path is not needed.
1862  * @receive_path: HopID used for receiving packets
1863  * @receive_ring: NHI ring number used to receive packets from the
1864  *		  other domain. Set to %-1 if RX path is not needed.
1865  *
1866  * Return: Pointer to @struct tb_tunnel or %NULL in case of failure.
1867  */
1868 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
1869 				      struct tb_port *dst, int transmit_path,
1870 				      int transmit_ring, int receive_path,
1871 				      int receive_ring)
1872 {
1873 	struct tb_tunnel *tunnel;
1874 	size_t npaths = 0, i = 0;
1875 	struct tb_path *path;
1876 	int credits;
1877 
1878 	/* Ring 0 is reserved for control channel */
1879 	if (WARN_ON(!receive_ring || !transmit_ring))
1880 		return NULL;
1881 
1882 	if (receive_ring > 0)
1883 		npaths++;
1884 	if (transmit_ring > 0)
1885 		npaths++;
1886 
1887 	if (WARN_ON(!npaths))
1888 		return NULL;
1889 
1890 	tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
1891 	if (!tunnel)
1892 		return NULL;
1893 
1894 	tunnel->src_port = nhi;
1895 	tunnel->dst_port = dst;
1896 	tunnel->destroy = tb_dma_destroy;
1897 
1898 	credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
1899 
1900 	if (receive_ring > 0) {
1901 		path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
1902 				     "DMA RX");
1903 		if (!path)
1904 			goto err_free;
1905 		tunnel->paths[i++] = path;
1906 		if (tb_dma_init_rx_path(path, credits)) {
1907 			tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
1908 			goto err_free;
1909 		}
1910 	}
1911 
1912 	if (transmit_ring > 0) {
1913 		path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
1914 				     "DMA TX");
1915 		if (!path)
1916 			goto err_free;
1917 		tunnel->paths[i++] = path;
1918 		if (tb_dma_init_tx_path(path, credits)) {
1919 			tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
1920 			goto err_free;
1921 		}
1922 	}
1923 
1924 	return tunnel;
1925 
1926 err_free:
1927 	tb_tunnel_put(tunnel);
1928 	return NULL;
1929 }
1930 
1931 /**
1932  * tb_tunnel_match_dma() - Match DMA tunnel
1933  * @tunnel: Tunnel to match
1934  * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1935  * @transmit_ring: NHI ring number used to send packets towards the
1936  *		   other domain. Pass %-1 to ignore.
1937  * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1938  * @receive_ring: NHI ring number used to receive packets from the
1939  *		  other domain. Pass %-1 to ignore.
1940  *
1941  * This function can be used to match specific DMA tunnel, if there are
1942  * multiple DMA tunnels going through the same XDomain connection.
1943  *
1944  * Return: %true if there is a match, %false otherwise.
1945  */
1946 bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1947 			 int transmit_ring, int receive_path, int receive_ring)
1948 {
1949 	const struct tb_path *tx_path = NULL, *rx_path = NULL;
1950 	int i;
1951 
1952 	if (!receive_ring || !transmit_ring)
1953 		return false;
1954 
1955 	for (i = 0; i < tunnel->npaths; i++) {
1956 		const struct tb_path *path = tunnel->paths[i];
1957 
1958 		if (!path)
1959 			continue;
1960 
1961 		if (tb_port_is_nhi(path->hops[0].in_port))
1962 			tx_path = path;
1963 		else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1964 			rx_path = path;
1965 	}
1966 
1967 	if (transmit_ring > 0 || transmit_path > 0) {
1968 		if (!tx_path)
1969 			return false;
1970 		if (transmit_ring > 0 &&
1971 		    (tx_path->hops[0].in_hop_index != transmit_ring))
1972 			return false;
1973 		if (transmit_path > 0 &&
1974 		    (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1975 			return false;
1976 	}
1977 
1978 	if (receive_ring > 0 || receive_path > 0) {
1979 		if (!rx_path)
1980 			return false;
1981 		if (receive_path > 0 &&
1982 		    (rx_path->hops[0].in_hop_index != receive_path))
1983 			return false;
1984 		if (receive_ring > 0 &&
1985 		    (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1986 			return false;
1987 	}
1988 
1989 	return true;
1990 }
1991 
1992 static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
1993 {
1994 	int ret, up_max_rate, down_max_rate;
1995 
1996 	ret = usb4_usb3_port_max_link_rate(up);
1997 	if (ret < 0)
1998 		return ret;
1999 	up_max_rate = ret;
2000 
2001 	ret = usb4_usb3_port_max_link_rate(down);
2002 	if (ret < 0)
2003 		return ret;
2004 	down_max_rate = ret;
2005 
2006 	return min(up_max_rate, down_max_rate);
2007 }
2008 
2009 static int tb_usb3_pre_activate(struct tb_tunnel *tunnel)
2010 {
2011 	tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
2012 		      tunnel->allocated_up, tunnel->allocated_down);
2013 
2014 	return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
2015 						 &tunnel->allocated_up,
2016 						 &tunnel->allocated_down);
2017 }
2018 
2019 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
2020 {
2021 	int res;
2022 
2023 	res = tb_usb3_port_enable(tunnel->src_port, activate);
2024 	if (res)
2025 		return res;
2026 
2027 	if (tb_port_is_usb3_up(tunnel->dst_port))
2028 		return tb_usb3_port_enable(tunnel->dst_port, activate);
2029 
2030 	return 0;
2031 }
2032 
2033 static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
2034 		int *consumed_up, int *consumed_down)
2035 {
2036 	struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
2037 	int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0;
2038 
2039 	/*
2040 	 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
2041 	 * take that into account here.
2042 	 */
2043 	*consumed_up = tunnel->allocated_up *
2044 		(TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
2045 	*consumed_down = tunnel->allocated_down *
2046 		(TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
2047 
2048 	if (tb_port_get_link_generation(port) >= 4) {
2049 		*consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH);
2050 		*consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH);
2051 	}
2052 
2053 	return 0;
2054 }
2055 
2056 static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
2057 {
2058 	int ret;
2059 
2060 	ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
2061 					       &tunnel->allocated_up,
2062 					       &tunnel->allocated_down);
2063 	if (ret)
2064 		return ret;
2065 
2066 	tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
2067 		      tunnel->allocated_up, tunnel->allocated_down);
2068 	return 0;
2069 }
2070 
2071 static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
2072 						int *available_up,
2073 						int *available_down)
2074 {
2075 	int ret, max_rate, allocate_up, allocate_down;
2076 
2077 	ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
2078 	if (ret < 0) {
2079 		tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
2080 		return;
2081 	}
2082 
2083 	/*
2084 	 * 90% of the max rate can be allocated for isochronous
2085 	 * transfers.
2086 	 */
2087 	max_rate = ret * 90 / 100;
2088 
2089 	/* No need to reclaim if already at maximum */
2090 	if (tunnel->allocated_up >= max_rate &&
2091 	    tunnel->allocated_down >= max_rate)
2092 		return;
2093 
2094 	/* Don't go lower than what is already allocated */
2095 	allocate_up = min(max_rate, *available_up);
2096 	if (allocate_up < tunnel->allocated_up)
2097 		allocate_up = tunnel->allocated_up;
2098 
2099 	allocate_down = min(max_rate, *available_down);
2100 	if (allocate_down < tunnel->allocated_down)
2101 		allocate_down = tunnel->allocated_down;
2102 
2103 	/* If no changes no need to do more */
2104 	if (allocate_up == tunnel->allocated_up &&
2105 	    allocate_down == tunnel->allocated_down)
2106 		return;
2107 
2108 	ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
2109 						&allocate_down);
2110 	if (ret) {
2111 		tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
2112 		return;
2113 	}
2114 
2115 	tunnel->allocated_up = allocate_up;
2116 	*available_up -= tunnel->allocated_up;
2117 
2118 	tunnel->allocated_down = allocate_down;
2119 	*available_down -= tunnel->allocated_down;
2120 
2121 	tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
2122 		      tunnel->allocated_up, tunnel->allocated_down);
2123 }
2124 
2125 static void tb_usb3_init_credits(struct tb_path_hop *hop)
2126 {
2127 	struct tb_port *port = hop->in_port;
2128 	struct tb_switch *sw = port->sw;
2129 	unsigned int credits;
2130 
2131 	if (tb_port_use_credit_allocation(port)) {
2132 		credits = sw->max_usb3_credits;
2133 	} else {
2134 		if (tb_port_is_null(port))
2135 			credits = port->bonded ? 32 : 16;
2136 		else
2137 			credits = 7;
2138 	}
2139 
2140 	hop->initial_credits = credits;
2141 }
2142 
2143 static void tb_usb3_init_path(struct tb_path *path)
2144 {
2145 	struct tb_path_hop *hop;
2146 
2147 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
2148 	path->egress_shared_buffer = TB_PATH_NONE;
2149 	path->ingress_fc_enable = TB_PATH_ALL;
2150 	path->ingress_shared_buffer = TB_PATH_NONE;
2151 	path->priority = TB_USB3_PRIORITY;
2152 	path->weight = TB_USB3_WEIGHT;
2153 	path->drop_packages = 0;
2154 
2155 	tb_path_for_each_hop(path, hop)
2156 		tb_usb3_init_credits(hop);
2157 }
2158 
2159 /**
2160  * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
2161  * @tb: Pointer to the domain structure
2162  * @down: USB3 downstream adapter
2163  * @alloc_hopid: Allocate HopIDs from visited ports
2164  *
2165  * If @down adapter is active, follows the tunnel to the USB3 upstream
2166  * adapter and back.
2167  *
2168  * Return: Pointer to &struct tb_tunnel or %NULL if there was no tunnel.
2169  */
2170 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
2171 					  bool alloc_hopid)
2172 {
2173 	struct tb_tunnel *tunnel;
2174 	struct tb_path *path;
2175 
2176 	if (!tb_usb3_port_is_enabled(down))
2177 		return NULL;
2178 
2179 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
2180 	if (!tunnel)
2181 		return NULL;
2182 
2183 	tunnel->activate = tb_usb3_activate;
2184 	tunnel->src_port = down;
2185 
2186 	/*
2187 	 * Discover both paths even if they are not complete. We will
2188 	 * clean them up by calling tb_tunnel_deactivate() below in that
2189 	 * case.
2190 	 */
2191 	path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
2192 				&tunnel->dst_port, "USB3 Down", alloc_hopid);
2193 	if (!path) {
2194 		/* Just disable the downstream port */
2195 		tb_usb3_port_enable(down, false);
2196 		goto err_free;
2197 	}
2198 	tunnel->paths[TB_USB3_PATH_DOWN] = path;
2199 	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
2200 
2201 	path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
2202 				"USB3 Up", alloc_hopid);
2203 	if (!path)
2204 		goto err_deactivate;
2205 	tunnel->paths[TB_USB3_PATH_UP] = path;
2206 	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
2207 
2208 	/* Validate that the tunnel is complete */
2209 	if (!tb_port_is_usb3_up(tunnel->dst_port)) {
2210 		tb_port_warn(tunnel->dst_port,
2211 			     "path does not end on an USB3 adapter, cleaning up\n");
2212 		goto err_deactivate;
2213 	}
2214 
2215 	if (down != tunnel->src_port) {
2216 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
2217 		goto err_deactivate;
2218 	}
2219 
2220 	if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
2221 		tb_tunnel_warn(tunnel,
2222 			       "tunnel is not fully activated, cleaning up\n");
2223 		goto err_deactivate;
2224 	}
2225 
2226 	if (!tb_route(down->sw)) {
2227 		int ret;
2228 
2229 		/*
2230 		 * Read the initial bandwidth allocation for the first
2231 		 * hop tunnel.
2232 		 */
2233 		ret = usb4_usb3_port_allocated_bandwidth(down,
2234 			&tunnel->allocated_up, &tunnel->allocated_down);
2235 		if (ret)
2236 			goto err_deactivate;
2237 
2238 		tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
2239 			      tunnel->allocated_up, tunnel->allocated_down);
2240 
2241 		tunnel->pre_activate = tb_usb3_pre_activate;
2242 		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2243 		tunnel->release_unused_bandwidth =
2244 			tb_usb3_release_unused_bandwidth;
2245 		tunnel->reclaim_available_bandwidth =
2246 			tb_usb3_reclaim_available_bandwidth;
2247 	}
2248 
2249 	tb_tunnel_dbg(tunnel, "discovered\n");
2250 	return tunnel;
2251 
2252 err_deactivate:
2253 	tb_tunnel_deactivate(tunnel);
2254 err_free:
2255 	tb_tunnel_put(tunnel);
2256 
2257 	return NULL;
2258 }
2259 
2260 /**
2261  * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
2262  * @tb: Pointer to the domain structure
2263  * @up: USB3 upstream adapter port
2264  * @down: USB3 downstream adapter port
2265  * @max_up: Maximum available upstream bandwidth for the USB3 tunnel.
2266  *	    %0 if no available bandwidth.
2267  * @max_down: Maximum available downstream bandwidth for the USB3 tunnel.
2268  *	      %0 if no available bandwidth.
2269  *
2270  * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
2271  * @TB_TYPE_USB3_DOWN.
2272  *
2273  * Return: Pointer to @struct tb_tunnel or %NULL in case of failure.
2274  */
2275 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
2276 				       struct tb_port *down, int max_up,
2277 				       int max_down)
2278 {
2279 	struct tb_tunnel *tunnel;
2280 	struct tb_path *path;
2281 	int max_rate = 0;
2282 
2283 	if (!tb_route(down->sw) && (max_up > 0 || max_down > 0)) {
2284 		/*
2285 		 * For USB3 isochronous transfers, we allow bandwidth which is
2286 		 * not higher than 90% of maximum supported bandwidth by USB3
2287 		 * adapters.
2288 		 */
2289 		max_rate = tb_usb3_max_link_rate(down, up);
2290 		if (max_rate < 0)
2291 			return NULL;
2292 
2293 		max_rate = max_rate * 90 / 100;
2294 		tb_port_dbg(up, "maximum required bandwidth for USB3 tunnel %d Mb/s\n",
2295 			    max_rate);
2296 	}
2297 
2298 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
2299 	if (!tunnel)
2300 		return NULL;
2301 
2302 	tunnel->activate = tb_usb3_activate;
2303 	tunnel->src_port = down;
2304 	tunnel->dst_port = up;
2305 	tunnel->max_up = max_up;
2306 	tunnel->max_down = max_down;
2307 
2308 	path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
2309 			     "USB3 Down");
2310 	if (!path)
2311 		goto err_free;
2312 	tb_usb3_init_path(path);
2313 	tunnel->paths[TB_USB3_PATH_DOWN] = path;
2314 
2315 	path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
2316 			     "USB3 Up");
2317 	if (!path)
2318 		goto err_free;
2319 	tb_usb3_init_path(path);
2320 	tunnel->paths[TB_USB3_PATH_UP] = path;
2321 
2322 	if (!tb_route(down->sw)) {
2323 		tunnel->allocated_up = min(max_rate, max_up);
2324 		tunnel->allocated_down = min(max_rate, max_down);
2325 
2326 		tunnel->pre_activate = tb_usb3_pre_activate;
2327 		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2328 		tunnel->release_unused_bandwidth =
2329 			tb_usb3_release_unused_bandwidth;
2330 		tunnel->reclaim_available_bandwidth =
2331 			tb_usb3_reclaim_available_bandwidth;
2332 	}
2333 
2334 	return tunnel;
2335 
2336 err_free:
2337 	tb_tunnel_put(tunnel);
2338 	return NULL;
2339 }
2340 
2341 /**
2342  * tb_tunnel_is_invalid - check whether an activated path is still valid
2343  * @tunnel: Tunnel to check
2344  *
2345  * Return: %true if path is valid, %false otherwise.
2346  */
2347 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
2348 {
2349 	int i;
2350 
2351 	for (i = 0; i < tunnel->npaths; i++) {
2352 		WARN_ON(!tunnel->paths[i]->activated);
2353 		if (tb_path_is_invalid(tunnel->paths[i]))
2354 			return true;
2355 	}
2356 
2357 	return false;
2358 }
2359 
2360 /**
2361  * tb_tunnel_activate() - activate a tunnel
2362  * @tunnel: Tunnel to activate
2363  *
2364  * Return:
2365  * * %0 - On success.
2366  * * %-EINPROGRESS - If the tunnel activation is still in progress (that's
2367  *   for DP tunnels to complete DPRX capabilities read).
2368  * * Negative errno - Another error occurred.
2369  */
2370 int tb_tunnel_activate(struct tb_tunnel *tunnel)
2371 {
2372 	int res, i;
2373 
2374 	tb_tunnel_dbg(tunnel, "activating\n");
2375 
2376 	/*
2377 	 * Make sure all paths are properly disabled before enabling
2378 	 * them again.
2379 	 */
2380 	for (i = 0; i < tunnel->npaths; i++) {
2381 		if (tunnel->paths[i]->activated) {
2382 			tb_path_deactivate(tunnel->paths[i]);
2383 			tunnel->paths[i]->activated = false;
2384 		}
2385 	}
2386 
2387 	tunnel->state = TB_TUNNEL_ACTIVATING;
2388 
2389 	if (tunnel->pre_activate) {
2390 		res = tunnel->pre_activate(tunnel);
2391 		if (res)
2392 			return res;
2393 	}
2394 
2395 	for (i = 0; i < tunnel->npaths; i++) {
2396 		res = tb_path_activate(tunnel->paths[i]);
2397 		if (res)
2398 			goto err;
2399 	}
2400 
2401 	if (tunnel->activate) {
2402 		res = tunnel->activate(tunnel, true);
2403 		if (res) {
2404 			if (res == -EINPROGRESS)
2405 				return res;
2406 			goto err;
2407 		}
2408 	}
2409 
2410 	tb_tunnel_set_active(tunnel, true);
2411 	return 0;
2412 
2413 err:
2414 	tb_tunnel_warn(tunnel, "activation failed\n");
2415 	tb_tunnel_deactivate(tunnel);
2416 	return res;
2417 }
2418 
2419 /**
2420  * tb_tunnel_deactivate() - deactivate a tunnel
2421  * @tunnel: Tunnel to deactivate
2422  */
2423 void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
2424 {
2425 	int i;
2426 
2427 	tb_tunnel_dbg(tunnel, "deactivating\n");
2428 
2429 	if (tunnel->activate)
2430 		tunnel->activate(tunnel, false);
2431 
2432 	for (i = 0; i < tunnel->npaths; i++) {
2433 		if (tunnel->paths[i] && tunnel->paths[i]->activated)
2434 			tb_path_deactivate(tunnel->paths[i]);
2435 	}
2436 
2437 	if (tunnel->post_deactivate)
2438 		tunnel->post_deactivate(tunnel);
2439 
2440 	tb_tunnel_set_active(tunnel, false);
2441 }
2442 
2443 /**
2444  * tb_tunnel_port_on_path() - Does the tunnel go through port
2445  * @tunnel: Tunnel to check
2446  * @port: Port to check
2447  *
2448  * Return: %true if @tunnel goes through @port (direction does not matter),
2449  * %false otherwise.
2450  */
2451 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
2452 			    const struct tb_port *port)
2453 {
2454 	int i;
2455 
2456 	for (i = 0; i < tunnel->npaths; i++) {
2457 		if (!tunnel->paths[i])
2458 			continue;
2459 
2460 		if (tb_path_port_on_path(tunnel->paths[i], port))
2461 			return true;
2462 	}
2463 
2464 	return false;
2465 }
2466 
2467 // Is tb_tunnel_activate() called for the tunnel
2468 static bool tb_tunnel_is_activated(const struct tb_tunnel *tunnel)
2469 {
2470 	return tunnel->state == TB_TUNNEL_ACTIVATING || tb_tunnel_is_active(tunnel);
2471 }
2472 
2473 /**
2474  * tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth
2475  * @tunnel: Tunnel to check
2476  * @max_up: Maximum upstream bandwidth in Mb/s
2477  * @max_down: Maximum downstream bandwidth in Mb/s
2478  *
2479  * Return:
2480  * * Maximum possible bandwidth this tunnel can support if not
2481  *   limited by other bandwidth clients.
2482  * * %-EOPNOTSUPP - If the tunnel does not support this function.
2483  * * %-ENOTCONN - If the tunnel is not active.
2484  */
2485 int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
2486 				int *max_down)
2487 {
2488 	if (!tb_tunnel_is_active(tunnel))
2489 		return -ENOTCONN;
2490 
2491 	if (tunnel->maximum_bandwidth)
2492 		return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
2493 	return -EOPNOTSUPP;
2494 }
2495 
2496 /**
2497  * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel
2498  * @tunnel: Tunnel to check
2499  * @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here
2500  * @allocated_down: Currently allocated downstream bandwidth in Mb/s is
2501  *		    stored here
2502  *
2503  * Return:
2504  * * Bandwidth allocated for the tunnel. This may be higher than what the
2505  *   tunnel actually consumes.
2506  * * %-EOPNOTSUPP - If the tunnel does not support this function.
2507  * * %-ENOTCONN - If the tunnel is not active.
2508  * * Negative errno - Another error occurred.
2509  */
2510 int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
2511 				  int *allocated_down)
2512 {
2513 	if (!tb_tunnel_is_active(tunnel))
2514 		return -ENOTCONN;
2515 
2516 	if (tunnel->allocated_bandwidth)
2517 		return tunnel->allocated_bandwidth(tunnel, allocated_up,
2518 						   allocated_down);
2519 	return -EOPNOTSUPP;
2520 }
2521 
2522 /**
2523  * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation
2524  * @tunnel: Tunnel whose bandwidth allocation to change
2525  * @alloc_up: New upstream bandwidth in Mb/s
2526  * @alloc_down: New downstream bandwidth in Mb/s
2527  *
2528  * Tries to change tunnel bandwidth allocation.
2529  *
2530  * Return:
2531  * * %0 - On success. Updates @alloc_up and @alloc_down to values that were
2532  *   actually allocated (it may not be the same as passed originally).
2533  * * Negative errno - In case of failure.
2534  */
2535 int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
2536 			      int *alloc_down)
2537 {
2538 	if (!tb_tunnel_is_active(tunnel))
2539 		return -ENOTCONN;
2540 
2541 	if (tunnel->alloc_bandwidth) {
2542 		int ret;
2543 
2544 		ret = tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
2545 		if (ret)
2546 			return ret;
2547 
2548 		tb_tunnel_changed(tunnel);
2549 		return 0;
2550 	}
2551 
2552 	return -EOPNOTSUPP;
2553 }
2554 
2555 /**
2556  * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
2557  * @tunnel: Tunnel to check
2558  * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
2559  *		 Can be %NULL.
2560  * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
2561  *		   Can be %NULL.
2562  *
2563  * Stores the amount of isochronous bandwidth @tunnel consumes in
2564  * @consumed_up and @consumed_down.
2565  *
2566  * Return: %0 on success, negative errno otherwise.
2567  */
2568 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
2569 				 int *consumed_down)
2570 {
2571 	int up_bw = 0, down_bw = 0;
2572 
2573 	/*
2574 	 * Here we need to distinguish between not active tunnel from
2575 	 * tunnels that are either fully active or activation started.
2576 	 * The latter is true for DP tunnels where we must report the
2577 	 * consumed to be the maximum we gave it until DPRX capabilities
2578 	 * read is done by the graphics driver.
2579 	 */
2580 	if (tb_tunnel_is_activated(tunnel) && tunnel->consumed_bandwidth) {
2581 		int ret;
2582 
2583 		ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
2584 		if (ret)
2585 			return ret;
2586 	}
2587 
2588 	if (consumed_up)
2589 		*consumed_up = up_bw;
2590 	if (consumed_down)
2591 		*consumed_down = down_bw;
2592 
2593 	tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw, down_bw);
2594 	return 0;
2595 }
2596 
2597 /**
2598  * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
2599  * @tunnel: Tunnel whose unused bandwidth to release
2600  *
2601  * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
2602  * moment) this function makes it release all the unused bandwidth.
2603  *
2604  * Return: %0 on success, negative errno otherwise.
2605  */
2606 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
2607 {
2608 	if (!tb_tunnel_is_active(tunnel))
2609 		return -ENOTCONN;
2610 
2611 	if (tunnel->release_unused_bandwidth) {
2612 		int ret;
2613 
2614 		ret = tunnel->release_unused_bandwidth(tunnel);
2615 		if (ret)
2616 			return ret;
2617 	}
2618 
2619 	return 0;
2620 }
2621 
2622 /**
2623  * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
2624  * @tunnel: Tunnel reclaiming available bandwidth
2625  * @available_up: Available upstream bandwidth (in Mb/s)
2626  * @available_down: Available downstream bandwidth (in Mb/s)
2627  *
2628  * Reclaims bandwidth from @available_up and @available_down and updates
2629  * the variables accordingly (e.g decreases both according to what was
2630  * reclaimed by the tunnel). If nothing was reclaimed the values are
2631  * kept as is.
2632  */
2633 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
2634 					   int *available_up,
2635 					   int *available_down)
2636 {
2637 	if (!tb_tunnel_is_active(tunnel))
2638 		return;
2639 
2640 	if (tunnel->reclaim_available_bandwidth)
2641 		tunnel->reclaim_available_bandwidth(tunnel, available_up,
2642 						    available_down);
2643 }
2644 
2645 const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel)
2646 {
2647 	return tb_tunnel_names[tunnel->type];
2648 }
2649