xref: /linux/drivers/thunderbolt/tunnel.c (revision bdd4f86c97e60b748027bdf6f6a3729c8a12da15)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - Tunneling support
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
12 #include <linux/ktime.h>
13 #include <linux/string_helpers.h>
14 
15 #include "tunnel.h"
16 #include "tb.h"
17 
18 /* PCIe adapters use always HopID of 8 for both directions */
19 #define TB_PCI_HOPID			8
20 
21 #define TB_PCI_PATH_DOWN		0
22 #define TB_PCI_PATH_UP			1
23 
24 #define TB_PCI_PRIORITY			3
25 #define TB_PCI_WEIGHT			1
26 
27 /* USB3 adapters use always HopID of 8 for both directions */
28 #define TB_USB3_HOPID			8
29 
30 #define TB_USB3_PATH_DOWN		0
31 #define TB_USB3_PATH_UP			1
32 
33 #define TB_USB3_PRIORITY		3
34 #define TB_USB3_WEIGHT			2
35 
36 /* DP adapters use HopID 8 for AUX and 9 for Video */
37 #define TB_DP_AUX_TX_HOPID		8
38 #define TB_DP_AUX_RX_HOPID		8
39 #define TB_DP_VIDEO_HOPID		9
40 
41 #define TB_DP_VIDEO_PATH_OUT		0
42 #define TB_DP_AUX_PATH_OUT		1
43 #define TB_DP_AUX_PATH_IN		2
44 
45 #define TB_DP_VIDEO_PRIORITY		1
46 #define TB_DP_VIDEO_WEIGHT		1
47 
48 #define TB_DP_AUX_PRIORITY		2
49 #define TB_DP_AUX_WEIGHT		1
50 
51 /* Minimum number of credits needed for PCIe path */
52 #define TB_MIN_PCIE_CREDITS		6U
53 /*
54  * Number of credits we try to allocate for each DMA path if not limited
55  * by the host router baMaxHI.
56  */
57 #define TB_DMA_CREDITS			14
58 /* Minimum number of credits for DMA path */
59 #define TB_MIN_DMA_CREDITS		1
60 
61 #define TB_DMA_PRIORITY			5
62 #define TB_DMA_WEIGHT			1
63 
64 /*
65  * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic
66  * according to USB4 v2 Connection Manager guide. This ends up reserving
67  * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into
68  * account.
69  */
70 #define USB4_V2_PCI_MIN_BANDWIDTH	(1500 * TB_PCI_WEIGHT)
71 #define USB4_V2_USB3_MIN_BANDWIDTH	(1500 * TB_USB3_WEIGHT)
72 
73 /*
74  * According to VESA spec, the DPRX negotiation shall compete in 5
75  * seconds after tunnel is established. Since at least i915 can runtime
76  * suspend if there is nothing connected, and that it polls any new
77  * connections every 10 seconds, we use 12 seconds here.
78  *
79  * These are in ms.
80  */
81 #define TB_DPRX_TIMEOUT			12000
82 #define TB_DPRX_WAIT_TIMEOUT		25
83 #define TB_DPRX_POLL_DELAY		50
84 
85 static int dprx_timeout = TB_DPRX_TIMEOUT;
86 module_param(dprx_timeout, int, 0444);
87 MODULE_PARM_DESC(dprx_timeout,
88 		 "DPRX capability read timeout in ms, -1 waits forever (default: "
89 		 __MODULE_STRING(TB_DPRX_TIMEOUT) ")");
90 
91 static unsigned int dma_credits = TB_DMA_CREDITS;
92 module_param(dma_credits, uint, 0444);
93 MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
94                 __MODULE_STRING(TB_DMA_CREDITS) ")");
95 
96 static bool bw_alloc_mode = true;
97 module_param(bw_alloc_mode, bool, 0444);
98 MODULE_PARM_DESC(bw_alloc_mode,
99 		 "enable bandwidth allocation mode if supported (default: true)");
100 
101 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
102 
103 /* Synchronizes kref_get()/put() of struct tb_tunnel */
104 static DEFINE_MUTEX(tb_tunnel_lock);
105 
106 static inline unsigned int tb_usable_credits(const struct tb_port *port)
107 {
108 	return port->total_credits - port->ctl_credits;
109 }
110 
111 /**
112  * tb_available_credits() - Available credits for PCIe and DMA
113  * @port: Lane adapter to check
114  * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
115  *		    streams possible through this lane adapter
116  */
117 static unsigned int tb_available_credits(const struct tb_port *port,
118 					 size_t *max_dp_streams)
119 {
120 	const struct tb_switch *sw = port->sw;
121 	int credits, usb3, pcie, spare;
122 	size_t ndp;
123 
124 	usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
125 	pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
126 
127 	if (tb_acpi_is_xdomain_allowed()) {
128 		spare = min_not_zero(sw->max_dma_credits, dma_credits);
129 		/* Add some credits for potential second DMA tunnel */
130 		spare += TB_MIN_DMA_CREDITS;
131 	} else {
132 		spare = 0;
133 	}
134 
135 	credits = tb_usable_credits(port);
136 	if (tb_acpi_may_tunnel_dp()) {
137 		/*
138 		 * Maximum number of DP streams possible through the
139 		 * lane adapter.
140 		 */
141 		if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
142 			ndp = (credits - (usb3 + pcie + spare)) /
143 			      (sw->min_dp_aux_credits + sw->min_dp_main_credits);
144 		else
145 			ndp = 0;
146 	} else {
147 		ndp = 0;
148 	}
149 	credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
150 	credits -= usb3;
151 
152 	if (max_dp_streams)
153 		*max_dp_streams = ndp;
154 
155 	return credits > 0 ? credits : 0;
156 }
157 
158 static void tb_init_pm_support(struct tb_path_hop *hop)
159 {
160 	struct tb_port *out_port = hop->out_port;
161 	struct tb_port *in_port = hop->in_port;
162 
163 	if (tb_port_is_null(in_port) && tb_port_is_null(out_port) &&
164 	    usb4_switch_version(in_port->sw) >= 2)
165 		hop->pm_support = true;
166 }
167 
168 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
169 					 enum tb_tunnel_type type)
170 {
171 	struct tb_tunnel *tunnel;
172 
173 	tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
174 	if (!tunnel)
175 		return NULL;
176 
177 	tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
178 	if (!tunnel->paths) {
179 		kfree(tunnel);
180 		return NULL;
181 	}
182 
183 	INIT_LIST_HEAD(&tunnel->list);
184 	tunnel->tb = tb;
185 	tunnel->npaths = npaths;
186 	tunnel->type = type;
187 	kref_init(&tunnel->kref);
188 
189 	return tunnel;
190 }
191 
192 static void tb_tunnel_get(struct tb_tunnel *tunnel)
193 {
194 	mutex_lock(&tb_tunnel_lock);
195 	kref_get(&tunnel->kref);
196 	mutex_unlock(&tb_tunnel_lock);
197 }
198 
199 static void tb_tunnel_destroy(struct kref *kref)
200 {
201 	struct tb_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref);
202 	int i;
203 
204 	if (tunnel->destroy)
205 		tunnel->destroy(tunnel);
206 
207 	for (i = 0; i < tunnel->npaths; i++) {
208 		if (tunnel->paths[i])
209 			tb_path_free(tunnel->paths[i]);
210 	}
211 
212 	kfree(tunnel->paths);
213 	kfree(tunnel);
214 }
215 
216 void tb_tunnel_put(struct tb_tunnel *tunnel)
217 {
218 	mutex_lock(&tb_tunnel_lock);
219 	kref_put(&tunnel->kref, tb_tunnel_destroy);
220 	mutex_unlock(&tb_tunnel_lock);
221 }
222 
223 static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
224 {
225 	struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
226 	int ret;
227 
228 	/* Only supported of both routers are at least USB4 v2 */
229 	if ((usb4_switch_version(tunnel->src_port->sw) < 2) ||
230 	   (usb4_switch_version(tunnel->dst_port->sw) < 2))
231 		return 0;
232 
233 	if (enable && tb_port_get_link_generation(port) < 4)
234 		return 0;
235 
236 	ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
237 	if (ret)
238 		return ret;
239 
240 	/*
241 	 * Downstream router could be unplugged so disable of encapsulation
242 	 * in upstream router is still possible.
243 	 */
244 	ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable);
245 	if (ret) {
246 		if (enable)
247 			return ret;
248 		if (ret != -ENODEV)
249 			return ret;
250 	}
251 
252 	tb_tunnel_dbg(tunnel, "extended encapsulation %s\n",
253 		      str_enabled_disabled(enable));
254 	return 0;
255 }
256 
257 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
258 {
259 	int res;
260 
261 	if (activate) {
262 		res = tb_pci_set_ext_encapsulation(tunnel, activate);
263 		if (res)
264 			return res;
265 	}
266 
267 	if (activate)
268 		res = tb_pci_port_enable(tunnel->dst_port, activate);
269 	else
270 		res = tb_pci_port_enable(tunnel->src_port, activate);
271 	if (res)
272 		return res;
273 
274 
275 	if (activate) {
276 		res = tb_pci_port_enable(tunnel->src_port, activate);
277 		if (res)
278 			return res;
279 	} else {
280 		/* Downstream router could be unplugged */
281 		tb_pci_port_enable(tunnel->dst_port, activate);
282 	}
283 
284 	return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate);
285 }
286 
287 static int tb_pci_init_credits(struct tb_path_hop *hop)
288 {
289 	struct tb_port *port = hop->in_port;
290 	struct tb_switch *sw = port->sw;
291 	unsigned int credits;
292 
293 	if (tb_port_use_credit_allocation(port)) {
294 		unsigned int available;
295 
296 		available = tb_available_credits(port, NULL);
297 		credits = min(sw->max_pcie_credits, available);
298 
299 		if (credits < TB_MIN_PCIE_CREDITS)
300 			return -ENOSPC;
301 
302 		credits = max(TB_MIN_PCIE_CREDITS, credits);
303 	} else {
304 		if (tb_port_is_null(port))
305 			credits = port->bonded ? 32 : 16;
306 		else
307 			credits = 7;
308 	}
309 
310 	hop->initial_credits = credits;
311 	return 0;
312 }
313 
314 static int tb_pci_init_path(struct tb_path *path)
315 {
316 	struct tb_path_hop *hop;
317 
318 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
319 	path->egress_shared_buffer = TB_PATH_NONE;
320 	path->ingress_fc_enable = TB_PATH_ALL;
321 	path->ingress_shared_buffer = TB_PATH_NONE;
322 	path->priority = TB_PCI_PRIORITY;
323 	path->weight = TB_PCI_WEIGHT;
324 	path->drop_packages = 0;
325 
326 	tb_path_for_each_hop(path, hop) {
327 		int ret;
328 
329 		ret = tb_pci_init_credits(hop);
330 		if (ret)
331 			return ret;
332 	}
333 
334 	return 0;
335 }
336 
337 /**
338  * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
339  * @tb: Pointer to the domain structure
340  * @down: PCIe downstream adapter
341  * @alloc_hopid: Allocate HopIDs from visited ports
342  *
343  * If @down adapter is active, follows the tunnel to the PCIe upstream
344  * adapter and back. Returns the discovered tunnel or %NULL if there was
345  * no tunnel.
346  */
347 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
348 					 bool alloc_hopid)
349 {
350 	struct tb_tunnel *tunnel;
351 	struct tb_path *path;
352 
353 	if (!tb_pci_port_is_enabled(down))
354 		return NULL;
355 
356 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
357 	if (!tunnel)
358 		return NULL;
359 
360 	tunnel->activate = tb_pci_activate;
361 	tunnel->src_port = down;
362 
363 	/*
364 	 * Discover both paths even if they are not complete. We will
365 	 * clean them up by calling tb_tunnel_deactivate() below in that
366 	 * case.
367 	 */
368 	path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
369 				&tunnel->dst_port, "PCIe Up", alloc_hopid);
370 	if (!path) {
371 		/* Just disable the downstream port */
372 		tb_pci_port_enable(down, false);
373 		goto err_free;
374 	}
375 	tunnel->paths[TB_PCI_PATH_UP] = path;
376 	if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
377 		goto err_free;
378 
379 	path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
380 				"PCIe Down", alloc_hopid);
381 	if (!path)
382 		goto err_deactivate;
383 	tunnel->paths[TB_PCI_PATH_DOWN] = path;
384 	if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
385 		goto err_deactivate;
386 
387 	/* Validate that the tunnel is complete */
388 	if (!tb_port_is_pcie_up(tunnel->dst_port)) {
389 		tb_port_warn(tunnel->dst_port,
390 			     "path does not end on a PCIe adapter, cleaning up\n");
391 		goto err_deactivate;
392 	}
393 
394 	if (down != tunnel->src_port) {
395 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
396 		goto err_deactivate;
397 	}
398 
399 	if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
400 		tb_tunnel_warn(tunnel,
401 			       "tunnel is not fully activated, cleaning up\n");
402 		goto err_deactivate;
403 	}
404 
405 	tb_tunnel_dbg(tunnel, "discovered\n");
406 	return tunnel;
407 
408 err_deactivate:
409 	tb_tunnel_deactivate(tunnel);
410 err_free:
411 	tb_tunnel_put(tunnel);
412 
413 	return NULL;
414 }
415 
416 /**
417  * tb_tunnel_alloc_pci() - allocate a pci tunnel
418  * @tb: Pointer to the domain structure
419  * @up: PCIe upstream adapter port
420  * @down: PCIe downstream adapter port
421  *
422  * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
423  * TB_TYPE_PCIE_DOWN.
424  *
425  * Return: Returns a tb_tunnel on success or NULL on failure.
426  */
427 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
428 				      struct tb_port *down)
429 {
430 	struct tb_tunnel *tunnel;
431 	struct tb_path *path;
432 
433 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
434 	if (!tunnel)
435 		return NULL;
436 
437 	tunnel->activate = tb_pci_activate;
438 	tunnel->src_port = down;
439 	tunnel->dst_port = up;
440 
441 	path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
442 			     "PCIe Down");
443 	if (!path)
444 		goto err_free;
445 	tunnel->paths[TB_PCI_PATH_DOWN] = path;
446 	if (tb_pci_init_path(path))
447 		goto err_free;
448 
449 	path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
450 			     "PCIe Up");
451 	if (!path)
452 		goto err_free;
453 	tunnel->paths[TB_PCI_PATH_UP] = path;
454 	if (tb_pci_init_path(path))
455 		goto err_free;
456 
457 	return tunnel;
458 
459 err_free:
460 	tb_tunnel_put(tunnel);
461 	return NULL;
462 }
463 
464 /**
465  * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe
466  * @port: Lane 0 adapter
467  * @reserved_up: Upstream bandwidth in Mb/s to reserve
468  * @reserved_down: Downstream bandwidth in Mb/s to reserve
469  *
470  * Can be called to any connected lane 0 adapter to find out how much
471  * bandwidth needs to be left in reserve for possible PCIe bulk traffic.
472  * Returns true if there is something to be reserved and writes the
473  * amount to @reserved_down/@reserved_up. Otherwise returns false and
474  * does not touch the parameters.
475  */
476 bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
477 			    int *reserved_down)
478 {
479 	if (WARN_ON_ONCE(!port->remote))
480 		return false;
481 
482 	if (!tb_acpi_may_tunnel_pcie())
483 		return false;
484 
485 	if (tb_port_get_link_generation(port) < 4)
486 		return false;
487 
488 	/* Must have PCIe adapters */
489 	if (tb_is_upstream_port(port)) {
490 		if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP))
491 			return false;
492 		if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN))
493 			return false;
494 	} else {
495 		if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN))
496 			return false;
497 		if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP))
498 			return false;
499 	}
500 
501 	*reserved_up = USB4_V2_PCI_MIN_BANDWIDTH;
502 	*reserved_down = USB4_V2_PCI_MIN_BANDWIDTH;
503 
504 	tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up,
505 		    *reserved_down);
506 	return true;
507 }
508 
509 static bool tb_dp_is_usb4(const struct tb_switch *sw)
510 {
511 	/* Titan Ridge DP adapters need the same treatment as USB4 */
512 	return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
513 }
514 
515 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
516 			      int timeout_msec)
517 {
518 	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
519 	u32 val;
520 	int ret;
521 
522 	/* Both ends need to support this */
523 	if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
524 		return 0;
525 
526 	ret = tb_port_read(out, &val, TB_CFG_PORT,
527 			   out->cap_adap + DP_STATUS_CTRL, 1);
528 	if (ret)
529 		return ret;
530 
531 	val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
532 
533 	ret = tb_port_write(out, &val, TB_CFG_PORT,
534 			    out->cap_adap + DP_STATUS_CTRL, 1);
535 	if (ret)
536 		return ret;
537 
538 	do {
539 		ret = tb_port_read(out, &val, TB_CFG_PORT,
540 				   out->cap_adap + DP_STATUS_CTRL, 1);
541 		if (ret)
542 			return ret;
543 		if (!(val & DP_STATUS_CTRL_CMHS))
544 			return 0;
545 		usleep_range(100, 150);
546 	} while (ktime_before(ktime_get(), timeout));
547 
548 	return -ETIMEDOUT;
549 }
550 
551 /*
552  * Returns maximum possible rate from capability supporting only DP 2.0
553  * and below. Used when DP BW allocation mode is not enabled.
554  */
555 static inline u32 tb_dp_cap_get_rate(u32 val)
556 {
557 	u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
558 
559 	switch (rate) {
560 	case DP_COMMON_CAP_RATE_RBR:
561 		return 1620;
562 	case DP_COMMON_CAP_RATE_HBR:
563 		return 2700;
564 	case DP_COMMON_CAP_RATE_HBR2:
565 		return 5400;
566 	case DP_COMMON_CAP_RATE_HBR3:
567 		return 8100;
568 	default:
569 		return 0;
570 	}
571 }
572 
573 /*
574  * Returns maximum possible rate from capability supporting DP 2.1
575  * UHBR20, 13.5 and 10 rates as well. Use only when DP BW allocation
576  * mode is enabled.
577  */
578 static inline u32 tb_dp_cap_get_rate_ext(u32 val)
579 {
580 	if (val & DP_COMMON_CAP_UHBR20)
581 		return 20000;
582 	else if (val & DP_COMMON_CAP_UHBR13_5)
583 		return 13500;
584 	else if (val & DP_COMMON_CAP_UHBR10)
585 		return 10000;
586 
587 	return tb_dp_cap_get_rate(val);
588 }
589 
590 static inline bool tb_dp_is_uhbr_rate(unsigned int rate)
591 {
592 	return rate >= 10000;
593 }
594 
595 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
596 {
597 	val &= ~DP_COMMON_CAP_RATE_MASK;
598 	switch (rate) {
599 	default:
600 		WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
601 		fallthrough;
602 	case 1620:
603 		val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
604 		break;
605 	case 2700:
606 		val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
607 		break;
608 	case 5400:
609 		val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
610 		break;
611 	case 8100:
612 		val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
613 		break;
614 	}
615 	return val;
616 }
617 
618 static inline u32 tb_dp_cap_get_lanes(u32 val)
619 {
620 	u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
621 
622 	switch (lanes) {
623 	case DP_COMMON_CAP_1_LANE:
624 		return 1;
625 	case DP_COMMON_CAP_2_LANES:
626 		return 2;
627 	case DP_COMMON_CAP_4_LANES:
628 		return 4;
629 	default:
630 		return 0;
631 	}
632 }
633 
634 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
635 {
636 	val &= ~DP_COMMON_CAP_LANES_MASK;
637 	switch (lanes) {
638 	default:
639 		WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
640 		     lanes);
641 		fallthrough;
642 	case 1:
643 		val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
644 		break;
645 	case 2:
646 		val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
647 		break;
648 	case 4:
649 		val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
650 		break;
651 	}
652 	return val;
653 }
654 
655 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
656 {
657 	/* Tunneling removes the DP 8b/10b 128/132b encoding */
658 	if (tb_dp_is_uhbr_rate(rate))
659 		return rate * lanes * 128 / 132;
660 	return rate * lanes * 8 / 10;
661 }
662 
663 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
664 				  u32 out_rate, u32 out_lanes, u32 *new_rate,
665 				  u32 *new_lanes)
666 {
667 	static const u32 dp_bw[][2] = {
668 		/* Mb/s, lanes */
669 		{ 8100, 4 }, /* 25920 Mb/s */
670 		{ 5400, 4 }, /* 17280 Mb/s */
671 		{ 8100, 2 }, /* 12960 Mb/s */
672 		{ 2700, 4 }, /* 8640 Mb/s */
673 		{ 5400, 2 }, /* 8640 Mb/s */
674 		{ 8100, 1 }, /* 6480 Mb/s */
675 		{ 1620, 4 }, /* 5184 Mb/s */
676 		{ 5400, 1 }, /* 4320 Mb/s */
677 		{ 2700, 2 }, /* 4320 Mb/s */
678 		{ 1620, 2 }, /* 2592 Mb/s */
679 		{ 2700, 1 }, /* 2160 Mb/s */
680 		{ 1620, 1 }, /* 1296 Mb/s */
681 	};
682 	unsigned int i;
683 
684 	/*
685 	 * Find a combination that can fit into max_bw and does not
686 	 * exceed the maximum rate and lanes supported by the DP OUT and
687 	 * DP IN adapters.
688 	 */
689 	for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
690 		if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
691 			continue;
692 
693 		if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
694 			continue;
695 
696 		if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
697 			*new_rate = dp_bw[i][0];
698 			*new_lanes = dp_bw[i][1];
699 			return 0;
700 		}
701 	}
702 
703 	return -ENOSR;
704 }
705 
706 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
707 {
708 	u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
709 	struct tb_port *out = tunnel->dst_port;
710 	struct tb_port *in = tunnel->src_port;
711 	int ret, max_bw;
712 
713 	/*
714 	 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
715 	 * newer generation hardware.
716 	 */
717 	if (in->sw->generation < 2 || out->sw->generation < 2)
718 		return 0;
719 
720 	/*
721 	 * Perform connection manager handshake between IN and OUT ports
722 	 * before capabilities exchange can take place.
723 	 */
724 	ret = tb_dp_cm_handshake(in, out, 3000);
725 	if (ret)
726 		return ret;
727 
728 	/* Read both DP_LOCAL_CAP registers */
729 	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
730 			   in->cap_adap + DP_LOCAL_CAP, 1);
731 	if (ret)
732 		return ret;
733 
734 	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
735 			   out->cap_adap + DP_LOCAL_CAP, 1);
736 	if (ret)
737 		return ret;
738 
739 	/* Write IN local caps to OUT remote caps */
740 	ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
741 			    out->cap_adap + DP_REMOTE_CAP, 1);
742 	if (ret)
743 		return ret;
744 
745 	in_rate = tb_dp_cap_get_rate(in_dp_cap);
746 	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
747 	tb_tunnel_dbg(tunnel,
748 		      "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
749 		      in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
750 
751 	/*
752 	 * If the tunnel bandwidth is limited (max_bw is set) then see
753 	 * if we need to reduce bandwidth to fit there.
754 	 */
755 	out_rate = tb_dp_cap_get_rate(out_dp_cap);
756 	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
757 	bw = tb_dp_bandwidth(out_rate, out_lanes);
758 	tb_tunnel_dbg(tunnel,
759 		      "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
760 		      out_rate, out_lanes, bw);
761 
762 	if (tb_tunnel_direction_downstream(tunnel))
763 		max_bw = tunnel->max_down;
764 	else
765 		max_bw = tunnel->max_up;
766 
767 	if (max_bw && bw > max_bw) {
768 		u32 new_rate, new_lanes, new_bw;
769 
770 		ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
771 					     out_rate, out_lanes, &new_rate,
772 					     &new_lanes);
773 		if (ret) {
774 			tb_tunnel_info(tunnel, "not enough bandwidth\n");
775 			return ret;
776 		}
777 
778 		new_bw = tb_dp_bandwidth(new_rate, new_lanes);
779 		tb_tunnel_dbg(tunnel,
780 			      "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
781 			      new_rate, new_lanes, new_bw);
782 
783 		/*
784 		 * Set new rate and number of lanes before writing it to
785 		 * the IN port remote caps.
786 		 */
787 		out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
788 		out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
789 	}
790 
791 	/*
792 	 * Titan Ridge does not disable AUX timers when it gets
793 	 * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with
794 	 * DP tunneling.
795 	 */
796 	if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
797 		out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
798 		tb_tunnel_dbg(tunnel, "disabling LTTPR\n");
799 	}
800 
801 	return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
802 			     in->cap_adap + DP_REMOTE_CAP, 1);
803 }
804 
805 static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
806 {
807 	int ret, estimated_bw, granularity, tmp;
808 	struct tb_port *out = tunnel->dst_port;
809 	struct tb_port *in = tunnel->src_port;
810 	u32 out_dp_cap, out_rate, out_lanes;
811 	u32 in_dp_cap, in_rate, in_lanes;
812 	u32 rate, lanes;
813 
814 	if (!bw_alloc_mode)
815 		return 0;
816 
817 	ret = usb4_dp_port_set_cm_bandwidth_mode_supported(in, true);
818 	if (ret)
819 		return ret;
820 
821 	ret = usb4_dp_port_set_group_id(in, in->group->index);
822 	if (ret)
823 		return ret;
824 
825 	/*
826 	 * Get the non-reduced rate and lanes based on the lowest
827 	 * capability of both adapters.
828 	 */
829 	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
830 			   in->cap_adap + DP_LOCAL_CAP, 1);
831 	if (ret)
832 		return ret;
833 
834 	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
835 			   out->cap_adap + DP_LOCAL_CAP, 1);
836 	if (ret)
837 		return ret;
838 
839 	in_rate = tb_dp_cap_get_rate(in_dp_cap);
840 	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
841 	out_rate = tb_dp_cap_get_rate(out_dp_cap);
842 	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
843 
844 	rate = min(in_rate, out_rate);
845 	lanes = min(in_lanes, out_lanes);
846 	tmp = tb_dp_bandwidth(rate, lanes);
847 
848 	tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
849 		      rate, lanes, tmp);
850 
851 	ret = usb4_dp_port_set_nrd(in, rate, lanes);
852 	if (ret)
853 		return ret;
854 
855 	/*
856 	 * Pick up granularity that supports maximum possible bandwidth.
857 	 * For that we use the UHBR rates too.
858 	 */
859 	in_rate = tb_dp_cap_get_rate_ext(in_dp_cap);
860 	out_rate = tb_dp_cap_get_rate_ext(out_dp_cap);
861 	rate = min(in_rate, out_rate);
862 	tmp = tb_dp_bandwidth(rate, lanes);
863 
864 	tb_tunnel_dbg(tunnel,
865 		      "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
866 		      rate, lanes, tmp);
867 
868 	for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
869 	     granularity *= 2)
870 		;
871 
872 	tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity);
873 
874 	/*
875 	 * Returns -EINVAL if granularity above is outside of the
876 	 * accepted ranges.
877 	 */
878 	ret = usb4_dp_port_set_granularity(in, granularity);
879 	if (ret)
880 		return ret;
881 
882 	/*
883 	 * Bandwidth estimation is pretty much what we have in
884 	 * max_up/down fields. For discovery we just read what the
885 	 * estimation was set to.
886 	 */
887 	if (tb_tunnel_direction_downstream(tunnel))
888 		estimated_bw = tunnel->max_down;
889 	else
890 		estimated_bw = tunnel->max_up;
891 
892 	tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw);
893 
894 	ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
895 	if (ret)
896 		return ret;
897 
898 	/* Initial allocation should be 0 according the spec */
899 	ret = usb4_dp_port_allocate_bandwidth(in, 0);
900 	if (ret)
901 		return ret;
902 
903 	tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n");
904 	return 0;
905 }
906 
907 static int tb_dp_pre_activate(struct tb_tunnel *tunnel)
908 {
909 	struct tb_port *in = tunnel->src_port;
910 	struct tb_switch *sw = in->sw;
911 	struct tb *tb = in->sw->tb;
912 	int ret;
913 
914 	ret = tb_dp_xchg_caps(tunnel);
915 	if (ret)
916 		return ret;
917 
918 	if (!tb_switch_is_usb4(sw))
919 		return 0;
920 
921 	if (!usb4_dp_port_bandwidth_mode_supported(in))
922 		return 0;
923 
924 	tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n");
925 
926 	ret = usb4_dp_port_set_cm_id(in, tb->index);
927 	if (ret)
928 		return ret;
929 
930 	return tb_dp_bandwidth_alloc_mode_enable(tunnel);
931 }
932 
933 static void tb_dp_post_deactivate(struct tb_tunnel *tunnel)
934 {
935 	struct tb_port *in = tunnel->src_port;
936 
937 	if (!usb4_dp_port_bandwidth_mode_supported(in))
938 		return;
939 	if (usb4_dp_port_bandwidth_mode_enabled(in)) {
940 		usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
941 		tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n");
942 	}
943 }
944 
945 static ktime_t dprx_timeout_to_ktime(int timeout_msec)
946 {
947 	return timeout_msec >= 0 ?
948 		ktime_add_ms(ktime_get(), timeout_msec) : KTIME_MAX;
949 }
950 
951 static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
952 {
953 	ktime_t timeout = dprx_timeout_to_ktime(timeout_msec);
954 	struct tb_port *in = tunnel->src_port;
955 
956 	/*
957 	 * Wait for DPRX done. Normally it should be already set for
958 	 * active tunnel.
959 	 */
960 	do {
961 		u32 val;
962 		int ret;
963 
964 		ret = tb_port_read(in, &val, TB_CFG_PORT,
965 				   in->cap_adap + DP_COMMON_CAP, 1);
966 		if (ret)
967 			return ret;
968 
969 		if (val & DP_COMMON_CAP_DPRX_DONE)
970 			return 0;
971 
972 		usleep_range(100, 150);
973 	} while (ktime_before(ktime_get(), timeout));
974 
975 	tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
976 	return -ETIMEDOUT;
977 }
978 
979 static void tb_dp_dprx_work(struct work_struct *work)
980 {
981 	struct tb_tunnel *tunnel = container_of(work, typeof(*tunnel), dprx_work.work);
982 	struct tb *tb = tunnel->tb;
983 
984 	if (!tunnel->dprx_canceled) {
985 		mutex_lock(&tb->lock);
986 		if (tb_dp_is_usb4(tunnel->src_port->sw) &&
987 		    tb_dp_wait_dprx(tunnel, TB_DPRX_WAIT_TIMEOUT)) {
988 			if (ktime_before(ktime_get(), tunnel->dprx_timeout)) {
989 				queue_delayed_work(tb->wq, &tunnel->dprx_work,
990 						   msecs_to_jiffies(TB_DPRX_POLL_DELAY));
991 				mutex_unlock(&tb->lock);
992 				return;
993 			}
994 		} else {
995 			tunnel->state = TB_TUNNEL_ACTIVE;
996 		}
997 		mutex_unlock(&tb->lock);
998 	}
999 
1000 	if (tunnel->callback)
1001 		tunnel->callback(tunnel, tunnel->callback_data);
1002 }
1003 
1004 static int tb_dp_dprx_start(struct tb_tunnel *tunnel)
1005 {
1006 	/*
1007 	 * Bump up the reference to keep the tunnel around. It will be
1008 	 * dropped in tb_dp_dprx_stop() once the tunnel is deactivated.
1009 	 */
1010 	tb_tunnel_get(tunnel);
1011 
1012 	if (tunnel->callback) {
1013 		tunnel->dprx_timeout = dprx_timeout_to_ktime(dprx_timeout);
1014 		queue_delayed_work(tunnel->tb->wq, &tunnel->dprx_work, 0);
1015 		return -EINPROGRESS;
1016 	}
1017 
1018 	return tb_dp_is_usb4(tunnel->src_port->sw) ?
1019 		tb_dp_wait_dprx(tunnel, dprx_timeout) : 0;
1020 }
1021 
1022 static void tb_dp_dprx_stop(struct tb_tunnel *tunnel)
1023 {
1024 	tunnel->dprx_canceled = true;
1025 	cancel_delayed_work(&tunnel->dprx_work);
1026 	tb_tunnel_put(tunnel);
1027 }
1028 
1029 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
1030 {
1031 	int ret;
1032 
1033 	if (active) {
1034 		struct tb_path **paths;
1035 		int last;
1036 
1037 		paths = tunnel->paths;
1038 		last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
1039 
1040 		tb_dp_port_set_hops(tunnel->src_port,
1041 			paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
1042 			paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
1043 			paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
1044 
1045 		tb_dp_port_set_hops(tunnel->dst_port,
1046 			paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
1047 			paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
1048 			paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
1049 	} else {
1050 		tb_dp_dprx_stop(tunnel);
1051 		tb_dp_port_hpd_clear(tunnel->src_port);
1052 		tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
1053 		if (tb_port_is_dpout(tunnel->dst_port))
1054 			tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
1055 	}
1056 
1057 	ret = tb_dp_port_enable(tunnel->src_port, active);
1058 	if (ret)
1059 		return ret;
1060 
1061 	if (tb_port_is_dpout(tunnel->dst_port)) {
1062 		ret = tb_dp_port_enable(tunnel->dst_port, active);
1063 		if (ret)
1064 			return ret;
1065 	}
1066 
1067 	return active ? tb_dp_dprx_start(tunnel) : 0;
1068 }
1069 
1070 /**
1071  * tb_dp_bandwidth_mode_maximum_bandwidth() - Maximum possible bandwidth
1072  * @tunnel: DP tunnel to check
1073  * @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity
1074  *
1075  * Returns maximum possible bandwidth for this tunnel in Mb/s.
1076  */
1077 static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
1078 						  int *max_bw_rounded)
1079 {
1080 	struct tb_port *in = tunnel->src_port;
1081 	int ret, rate, lanes, max_bw;
1082 	u32 cap;
1083 
1084 	/*
1085 	 * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX
1086 	 * read parameter values so this so we can use this to determine
1087 	 * the maximum possible bandwidth over this link.
1088 	 *
1089 	 * See USB4 v2 spec 1.0 10.4.4.5.
1090 	 */
1091 	ret = tb_port_read(in, &cap, TB_CFG_PORT,
1092 			   in->cap_adap + DP_LOCAL_CAP, 1);
1093 	if (ret)
1094 		return ret;
1095 
1096 	rate = tb_dp_cap_get_rate_ext(cap);
1097 	lanes = tb_dp_cap_get_lanes(cap);
1098 
1099 	max_bw = tb_dp_bandwidth(rate, lanes);
1100 
1101 	if (max_bw_rounded) {
1102 		ret = usb4_dp_port_granularity(in);
1103 		if (ret < 0)
1104 			return ret;
1105 		*max_bw_rounded = roundup(max_bw, ret);
1106 	}
1107 
1108 	return max_bw;
1109 }
1110 
1111 static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
1112 						   int *consumed_up,
1113 						   int *consumed_down)
1114 {
1115 	struct tb_port *in = tunnel->src_port;
1116 	int ret, allocated_bw, max_bw_rounded;
1117 
1118 	if (!usb4_dp_port_bandwidth_mode_enabled(in))
1119 		return -EOPNOTSUPP;
1120 
1121 	if (!tunnel->bw_mode)
1122 		return -EOPNOTSUPP;
1123 
1124 	/* Read what was allocated previously if any */
1125 	ret = usb4_dp_port_allocated_bandwidth(in);
1126 	if (ret < 0)
1127 		return ret;
1128 	allocated_bw = ret;
1129 
1130 	ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
1131 	if (ret < 0)
1132 		return ret;
1133 	if (allocated_bw == max_bw_rounded)
1134 		allocated_bw = ret;
1135 
1136 	if (tb_tunnel_direction_downstream(tunnel)) {
1137 		*consumed_up = 0;
1138 		*consumed_down = allocated_bw;
1139 	} else {
1140 		*consumed_up = allocated_bw;
1141 		*consumed_down = 0;
1142 	}
1143 
1144 	return 0;
1145 }
1146 
1147 static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
1148 				     int *allocated_down)
1149 {
1150 	struct tb_port *in = tunnel->src_port;
1151 
1152 	/*
1153 	 * If we have already set the allocated bandwidth then use that.
1154 	 * Otherwise we read it from the DPRX.
1155 	 */
1156 	if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
1157 		int ret, allocated_bw, max_bw_rounded;
1158 
1159 		ret = usb4_dp_port_allocated_bandwidth(in);
1160 		if (ret < 0)
1161 			return ret;
1162 		allocated_bw = ret;
1163 
1164 		ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel,
1165 							     &max_bw_rounded);
1166 		if (ret < 0)
1167 			return ret;
1168 		if (allocated_bw == max_bw_rounded)
1169 			allocated_bw = ret;
1170 
1171 		if (tb_tunnel_direction_downstream(tunnel)) {
1172 			*allocated_up = 0;
1173 			*allocated_down = allocated_bw;
1174 		} else {
1175 			*allocated_up = allocated_bw;
1176 			*allocated_down = 0;
1177 		}
1178 		return 0;
1179 	}
1180 
1181 	return tunnel->consumed_bandwidth(tunnel, allocated_up,
1182 					  allocated_down);
1183 }
1184 
1185 static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
1186 				 int *alloc_down)
1187 {
1188 	struct tb_port *in = tunnel->src_port;
1189 	int max_bw_rounded, ret, tmp;
1190 
1191 	if (!usb4_dp_port_bandwidth_mode_enabled(in))
1192 		return -EOPNOTSUPP;
1193 
1194 	ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
1195 	if (ret < 0)
1196 		return ret;
1197 
1198 	if (tb_tunnel_direction_downstream(tunnel)) {
1199 		tmp = min(*alloc_down, max_bw_rounded);
1200 		ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1201 		if (ret)
1202 			return ret;
1203 		*alloc_down = tmp;
1204 		*alloc_up = 0;
1205 	} else {
1206 		tmp = min(*alloc_up, max_bw_rounded);
1207 		ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1208 		if (ret)
1209 			return ret;
1210 		*alloc_down = 0;
1211 		*alloc_up = tmp;
1212 	}
1213 
1214 	/* Now we can use BW mode registers to figure out the bandwidth */
1215 	/* TODO: need to handle discovery too */
1216 	tunnel->bw_mode = true;
1217 	return 0;
1218 }
1219 
1220 /* Read cap from tunnel DP IN */
1221 static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
1222 			  u32 *lanes)
1223 {
1224 	struct tb_port *in = tunnel->src_port;
1225 	u32 val;
1226 	int ret;
1227 
1228 	switch (cap) {
1229 	case DP_LOCAL_CAP:
1230 	case DP_REMOTE_CAP:
1231 	case DP_COMMON_CAP:
1232 		break;
1233 
1234 	default:
1235 		tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap);
1236 		return -EINVAL;
1237 	}
1238 
1239 	/*
1240 	 * Read from the copied remote cap so that we take into account
1241 	 * if capabilities were reduced during exchange.
1242 	 */
1243 	ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1);
1244 	if (ret)
1245 		return ret;
1246 
1247 	*rate = tb_dp_cap_get_rate(val);
1248 	*lanes = tb_dp_cap_get_lanes(val);
1249 	return 0;
1250 }
1251 
1252 static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
1253 				   int *max_down)
1254 {
1255 	int ret;
1256 
1257 	if (!usb4_dp_port_bandwidth_mode_enabled(tunnel->src_port))
1258 		return -EOPNOTSUPP;
1259 
1260 	ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
1261 	if (ret < 0)
1262 		return ret;
1263 
1264 	if (tb_tunnel_direction_downstream(tunnel)) {
1265 		*max_up = 0;
1266 		*max_down = ret;
1267 	} else {
1268 		*max_up = ret;
1269 		*max_down = 0;
1270 	}
1271 
1272 	return 0;
1273 }
1274 
1275 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1276 				    int *consumed_down)
1277 {
1278 	const struct tb_switch *sw = tunnel->src_port->sw;
1279 	u32 rate = 0, lanes = 0;
1280 	int ret;
1281 
1282 	if (tb_dp_is_usb4(sw)) {
1283 		ret = tb_dp_wait_dprx(tunnel, 0);
1284 		if (ret) {
1285 			if (ret == -ETIMEDOUT) {
1286 				/*
1287 				 * While we wait for DPRX complete the
1288 				 * tunnel consumes as much as it had
1289 				 * been reserved initially.
1290 				 */
1291 				ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
1292 						     &rate, &lanes);
1293 				if (ret)
1294 					return ret;
1295 			} else {
1296 				return ret;
1297 			}
1298 		} else {
1299 			/*
1300 			 * On USB4 routers check if the bandwidth allocation
1301 			 * mode is enabled first and then read the bandwidth
1302 			 * through those registers.
1303 			 */
1304 			ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
1305 								      consumed_down);
1306 			if (ret < 0) {
1307 				if (ret != -EOPNOTSUPP)
1308 					return ret;
1309 			} else if (!ret) {
1310 				return 0;
1311 			}
1312 			ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
1313 			if (ret)
1314 				return ret;
1315 		}
1316 	} else if (sw->generation >= 2) {
1317 		ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
1318 		if (ret)
1319 			return ret;
1320 	} else {
1321 		/* No bandwidth management for legacy devices  */
1322 		*consumed_up = 0;
1323 		*consumed_down = 0;
1324 		return 0;
1325 	}
1326 
1327 	if (tb_tunnel_direction_downstream(tunnel)) {
1328 		*consumed_up = 0;
1329 		*consumed_down = tb_dp_bandwidth(rate, lanes);
1330 	} else {
1331 		*consumed_up = tb_dp_bandwidth(rate, lanes);
1332 		*consumed_down = 0;
1333 	}
1334 
1335 	return 0;
1336 }
1337 
1338 static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
1339 {
1340 	struct tb_port *port = hop->in_port;
1341 	struct tb_switch *sw = port->sw;
1342 
1343 	if (tb_port_use_credit_allocation(port))
1344 		hop->initial_credits = sw->min_dp_aux_credits;
1345 	else
1346 		hop->initial_credits = 1;
1347 }
1348 
1349 static void tb_dp_init_aux_path(struct tb_path *path, bool pm_support)
1350 {
1351 	struct tb_path_hop *hop;
1352 
1353 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1354 	path->egress_shared_buffer = TB_PATH_NONE;
1355 	path->ingress_fc_enable = TB_PATH_ALL;
1356 	path->ingress_shared_buffer = TB_PATH_NONE;
1357 	path->priority = TB_DP_AUX_PRIORITY;
1358 	path->weight = TB_DP_AUX_WEIGHT;
1359 
1360 	tb_path_for_each_hop(path, hop) {
1361 		tb_dp_init_aux_credits(hop);
1362 		if (pm_support)
1363 			tb_init_pm_support(hop);
1364 	}
1365 }
1366 
1367 static int tb_dp_init_video_credits(struct tb_path_hop *hop)
1368 {
1369 	struct tb_port *port = hop->in_port;
1370 	struct tb_switch *sw = port->sw;
1371 
1372 	if (tb_port_use_credit_allocation(port)) {
1373 		unsigned int nfc_credits;
1374 		size_t max_dp_streams;
1375 
1376 		tb_available_credits(port, &max_dp_streams);
1377 		/*
1378 		 * Read the number of currently allocated NFC credits
1379 		 * from the lane adapter. Since we only use them for DP
1380 		 * tunneling we can use that to figure out how many DP
1381 		 * tunnels already go through the lane adapter.
1382 		 */
1383 		nfc_credits = port->config.nfc_credits &
1384 				ADP_CS_4_NFC_BUFFERS_MASK;
1385 		if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
1386 			return -ENOSPC;
1387 
1388 		hop->nfc_credits = sw->min_dp_main_credits;
1389 	} else {
1390 		hop->nfc_credits = min(port->total_credits - 2, 12U);
1391 	}
1392 
1393 	return 0;
1394 }
1395 
1396 static int tb_dp_init_video_path(struct tb_path *path, bool pm_support)
1397 {
1398 	struct tb_path_hop *hop;
1399 
1400 	path->egress_fc_enable = TB_PATH_NONE;
1401 	path->egress_shared_buffer = TB_PATH_NONE;
1402 	path->ingress_fc_enable = TB_PATH_NONE;
1403 	path->ingress_shared_buffer = TB_PATH_NONE;
1404 	path->priority = TB_DP_VIDEO_PRIORITY;
1405 	path->weight = TB_DP_VIDEO_WEIGHT;
1406 
1407 	tb_path_for_each_hop(path, hop) {
1408 		int ret;
1409 
1410 		ret = tb_dp_init_video_credits(hop);
1411 		if (ret)
1412 			return ret;
1413 		if (pm_support)
1414 			tb_init_pm_support(hop);
1415 	}
1416 
1417 	return 0;
1418 }
1419 
1420 static void tb_dp_dump(struct tb_tunnel *tunnel)
1421 {
1422 	struct tb_port *in, *out;
1423 	u32 dp_cap, rate, lanes;
1424 
1425 	in = tunnel->src_port;
1426 	out = tunnel->dst_port;
1427 
1428 	if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1429 			 in->cap_adap + DP_LOCAL_CAP, 1))
1430 		return;
1431 
1432 	rate = tb_dp_cap_get_rate(dp_cap);
1433 	lanes = tb_dp_cap_get_lanes(dp_cap);
1434 
1435 	tb_tunnel_dbg(tunnel,
1436 		      "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1437 		      rate, lanes, tb_dp_bandwidth(rate, lanes));
1438 
1439 	if (tb_port_read(out, &dp_cap, TB_CFG_PORT,
1440 			 out->cap_adap + DP_LOCAL_CAP, 1))
1441 		return;
1442 
1443 	rate = tb_dp_cap_get_rate(dp_cap);
1444 	lanes = tb_dp_cap_get_lanes(dp_cap);
1445 
1446 	tb_tunnel_dbg(tunnel,
1447 		      "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1448 		      rate, lanes, tb_dp_bandwidth(rate, lanes));
1449 
1450 	if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1451 			 in->cap_adap + DP_REMOTE_CAP, 1))
1452 		return;
1453 
1454 	rate = tb_dp_cap_get_rate(dp_cap);
1455 	lanes = tb_dp_cap_get_lanes(dp_cap);
1456 
1457 	tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
1458 		      rate, lanes, tb_dp_bandwidth(rate, lanes));
1459 }
1460 
1461 /**
1462  * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
1463  * @tb: Pointer to the domain structure
1464  * @in: DP in adapter
1465  * @alloc_hopid: Allocate HopIDs from visited ports
1466  *
1467  * If @in adapter is active, follows the tunnel to the DP out adapter
1468  * and back. Returns the discovered tunnel or %NULL if there was no
1469  * tunnel.
1470  *
1471  * Return: DP tunnel or %NULL if no tunnel found.
1472  */
1473 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
1474 					bool alloc_hopid)
1475 {
1476 	struct tb_tunnel *tunnel;
1477 	struct tb_port *port;
1478 	struct tb_path *path;
1479 
1480 	if (!tb_dp_port_is_enabled(in))
1481 		return NULL;
1482 
1483 	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1484 	if (!tunnel)
1485 		return NULL;
1486 
1487 	tunnel->pre_activate = tb_dp_pre_activate;
1488 	tunnel->activate = tb_dp_activate;
1489 	tunnel->post_deactivate = tb_dp_post_deactivate;
1490 	tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1491 	tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1492 	tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1493 	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1494 	tunnel->src_port = in;
1495 
1496 	path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
1497 				&tunnel->dst_port, "Video", alloc_hopid);
1498 	if (!path) {
1499 		/* Just disable the DP IN port */
1500 		tb_dp_port_enable(in, false);
1501 		goto err_free;
1502 	}
1503 	tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
1504 	if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], false))
1505 		goto err_free;
1506 
1507 	path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
1508 				alloc_hopid);
1509 	if (!path)
1510 		goto err_deactivate;
1511 	tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
1512 	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT], false);
1513 
1514 	path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
1515 				&port, "AUX RX", alloc_hopid);
1516 	if (!path)
1517 		goto err_deactivate;
1518 	tunnel->paths[TB_DP_AUX_PATH_IN] = path;
1519 	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN], false);
1520 
1521 	/* Validate that the tunnel is complete */
1522 	if (!tb_port_is_dpout(tunnel->dst_port)) {
1523 		tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
1524 		goto err_deactivate;
1525 	}
1526 
1527 	if (!tb_dp_port_is_enabled(tunnel->dst_port))
1528 		goto err_deactivate;
1529 
1530 	if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
1531 		goto err_deactivate;
1532 
1533 	if (port != tunnel->src_port) {
1534 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1535 		goto err_deactivate;
1536 	}
1537 
1538 	tb_dp_dump(tunnel);
1539 
1540 	tb_tunnel_dbg(tunnel, "discovered\n");
1541 	return tunnel;
1542 
1543 err_deactivate:
1544 	tb_tunnel_deactivate(tunnel);
1545 err_free:
1546 	tb_tunnel_put(tunnel);
1547 
1548 	return NULL;
1549 }
1550 
1551 /**
1552  * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
1553  * @tb: Pointer to the domain structure
1554  * @in: DP in adapter port
1555  * @out: DP out adapter port
1556  * @link_nr: Preferred lane adapter when the link is not bonded
1557  * @max_up: Maximum available upstream bandwidth for the DP tunnel.
1558  *	    %0 if no available bandwidth.
1559  * @max_down: Maximum available downstream bandwidth for the DP tunnel.
1560  *	      %0 if no available bandwidth.
1561  * @callback: Optional callback that is called when the DP tunnel is
1562  *	      fully activated (or there is an error)
1563  * @callback_data: Optional data for @callback
1564  *
1565  * Allocates a tunnel between @in and @out that is capable of tunneling
1566  * Display Port traffic. If @callback is not %NULL it will be called
1567  * after tb_tunnel_activate() once the tunnel has been fully activated.
1568  * It can call tb_tunnel_is_active() to check if activation was
1569  * successful (or if it returns %false there was some sort of issue).
1570  * The @callback is called without @tb->lock held.
1571  *
1572  * Return: Returns a tb_tunnel on success or &NULL on failure.
1573  */
1574 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
1575 				     struct tb_port *out, int link_nr,
1576 				     int max_up, int max_down,
1577 				     void (*callback)(struct tb_tunnel *, void *),
1578 				     void *callback_data)
1579 {
1580 	struct tb_tunnel *tunnel;
1581 	struct tb_path **paths;
1582 	struct tb_path *path;
1583 	bool pm_support;
1584 
1585 	if (WARN_ON(!in->cap_adap || !out->cap_adap))
1586 		return NULL;
1587 
1588 	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1589 	if (!tunnel)
1590 		return NULL;
1591 
1592 	tunnel->pre_activate = tb_dp_pre_activate;
1593 	tunnel->activate = tb_dp_activate;
1594 	tunnel->post_deactivate = tb_dp_post_deactivate;
1595 	tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1596 	tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1597 	tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1598 	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1599 	tunnel->src_port = in;
1600 	tunnel->dst_port = out;
1601 	tunnel->max_up = max_up;
1602 	tunnel->max_down = max_down;
1603 	tunnel->callback = callback;
1604 	tunnel->callback_data = callback_data;
1605 	INIT_DELAYED_WORK(&tunnel->dprx_work, tb_dp_dprx_work);
1606 
1607 	paths = tunnel->paths;
1608 	pm_support = usb4_switch_version(in->sw) >= 2;
1609 
1610 	path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
1611 			     link_nr, "Video");
1612 	if (!path)
1613 		goto err_free;
1614 	tb_dp_init_video_path(path, pm_support);
1615 	paths[TB_DP_VIDEO_PATH_OUT] = path;
1616 
1617 	path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
1618 			     TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
1619 	if (!path)
1620 		goto err_free;
1621 	tb_dp_init_aux_path(path, pm_support);
1622 	paths[TB_DP_AUX_PATH_OUT] = path;
1623 
1624 	path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
1625 			     TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
1626 	if (!path)
1627 		goto err_free;
1628 	tb_dp_init_aux_path(path, pm_support);
1629 	paths[TB_DP_AUX_PATH_IN] = path;
1630 
1631 	return tunnel;
1632 
1633 err_free:
1634 	tb_tunnel_put(tunnel);
1635 	return NULL;
1636 }
1637 
1638 static unsigned int tb_dma_available_credits(const struct tb_port *port)
1639 {
1640 	const struct tb_switch *sw = port->sw;
1641 	int credits;
1642 
1643 	credits = tb_available_credits(port, NULL);
1644 	if (tb_acpi_may_tunnel_pcie())
1645 		credits -= sw->max_pcie_credits;
1646 	credits -= port->dma_credits;
1647 
1648 	return credits > 0 ? credits : 0;
1649 }
1650 
1651 static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
1652 {
1653 	struct tb_port *port = hop->in_port;
1654 
1655 	if (tb_port_use_credit_allocation(port)) {
1656 		unsigned int available = tb_dma_available_credits(port);
1657 
1658 		/*
1659 		 * Need to have at least TB_MIN_DMA_CREDITS, otherwise
1660 		 * DMA path cannot be established.
1661 		 */
1662 		if (available < TB_MIN_DMA_CREDITS)
1663 			return -ENOSPC;
1664 
1665 		while (credits > available)
1666 			credits--;
1667 
1668 		tb_port_dbg(port, "reserving %u credits for DMA path\n",
1669 			    credits);
1670 
1671 		port->dma_credits += credits;
1672 	} else {
1673 		if (tb_port_is_null(port))
1674 			credits = port->bonded ? 14 : 6;
1675 		else
1676 			credits = min(port->total_credits, credits);
1677 	}
1678 
1679 	hop->initial_credits = credits;
1680 	return 0;
1681 }
1682 
1683 /* Path from lane adapter to NHI */
1684 static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
1685 {
1686 	struct tb_path_hop *hop;
1687 	unsigned int i, tmp;
1688 
1689 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1690 	path->ingress_fc_enable = TB_PATH_ALL;
1691 	path->egress_shared_buffer = TB_PATH_NONE;
1692 	path->ingress_shared_buffer = TB_PATH_NONE;
1693 	path->priority = TB_DMA_PRIORITY;
1694 	path->weight = TB_DMA_WEIGHT;
1695 	path->clear_fc = true;
1696 
1697 	/*
1698 	 * First lane adapter is the one connected to the remote host.
1699 	 * We don't tunnel other traffic over this link so can use all
1700 	 * the credits (except the ones reserved for control traffic).
1701 	 */
1702 	hop = &path->hops[0];
1703 	tmp = min(tb_usable_credits(hop->in_port), credits);
1704 	hop->initial_credits = tmp;
1705 	hop->in_port->dma_credits += tmp;
1706 
1707 	for (i = 1; i < path->path_length; i++) {
1708 		int ret;
1709 
1710 		ret = tb_dma_reserve_credits(&path->hops[i], credits);
1711 		if (ret)
1712 			return ret;
1713 	}
1714 
1715 	return 0;
1716 }
1717 
1718 /* Path from NHI to lane adapter */
1719 static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
1720 {
1721 	struct tb_path_hop *hop;
1722 
1723 	path->egress_fc_enable = TB_PATH_ALL;
1724 	path->ingress_fc_enable = TB_PATH_ALL;
1725 	path->egress_shared_buffer = TB_PATH_NONE;
1726 	path->ingress_shared_buffer = TB_PATH_NONE;
1727 	path->priority = TB_DMA_PRIORITY;
1728 	path->weight = TB_DMA_WEIGHT;
1729 	path->clear_fc = true;
1730 
1731 	tb_path_for_each_hop(path, hop) {
1732 		int ret;
1733 
1734 		ret = tb_dma_reserve_credits(hop, credits);
1735 		if (ret)
1736 			return ret;
1737 	}
1738 
1739 	return 0;
1740 }
1741 
1742 static void tb_dma_release_credits(struct tb_path_hop *hop)
1743 {
1744 	struct tb_port *port = hop->in_port;
1745 
1746 	if (tb_port_use_credit_allocation(port)) {
1747 		port->dma_credits -= hop->initial_credits;
1748 
1749 		tb_port_dbg(port, "released %u DMA path credits\n",
1750 			    hop->initial_credits);
1751 	}
1752 }
1753 
1754 static void tb_dma_destroy_path(struct tb_path *path)
1755 {
1756 	struct tb_path_hop *hop;
1757 
1758 	tb_path_for_each_hop(path, hop)
1759 		tb_dma_release_credits(hop);
1760 }
1761 
1762 static void tb_dma_destroy(struct tb_tunnel *tunnel)
1763 {
1764 	int i;
1765 
1766 	for (i = 0; i < tunnel->npaths; i++) {
1767 		if (!tunnel->paths[i])
1768 			continue;
1769 		tb_dma_destroy_path(tunnel->paths[i]);
1770 	}
1771 }
1772 
1773 /**
1774  * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1775  * @tb: Pointer to the domain structure
1776  * @nhi: Host controller port
1777  * @dst: Destination null port which the other domain is connected to
1778  * @transmit_path: HopID used for transmitting packets
1779  * @transmit_ring: NHI ring number used to send packets towards the
1780  *		   other domain. Set to %-1 if TX path is not needed.
1781  * @receive_path: HopID used for receiving packets
1782  * @receive_ring: NHI ring number used to receive packets from the
1783  *		  other domain. Set to %-1 if RX path is not needed.
1784  *
1785  * Return: Returns a tb_tunnel on success or NULL on failure.
1786  */
1787 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
1788 				      struct tb_port *dst, int transmit_path,
1789 				      int transmit_ring, int receive_path,
1790 				      int receive_ring)
1791 {
1792 	struct tb_tunnel *tunnel;
1793 	size_t npaths = 0, i = 0;
1794 	struct tb_path *path;
1795 	int credits;
1796 
1797 	/* Ring 0 is reserved for control channel */
1798 	if (WARN_ON(!receive_ring || !transmit_ring))
1799 		return NULL;
1800 
1801 	if (receive_ring > 0)
1802 		npaths++;
1803 	if (transmit_ring > 0)
1804 		npaths++;
1805 
1806 	if (WARN_ON(!npaths))
1807 		return NULL;
1808 
1809 	tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
1810 	if (!tunnel)
1811 		return NULL;
1812 
1813 	tunnel->src_port = nhi;
1814 	tunnel->dst_port = dst;
1815 	tunnel->destroy = tb_dma_destroy;
1816 
1817 	credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
1818 
1819 	if (receive_ring > 0) {
1820 		path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
1821 				     "DMA RX");
1822 		if (!path)
1823 			goto err_free;
1824 		tunnel->paths[i++] = path;
1825 		if (tb_dma_init_rx_path(path, credits)) {
1826 			tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
1827 			goto err_free;
1828 		}
1829 	}
1830 
1831 	if (transmit_ring > 0) {
1832 		path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
1833 				     "DMA TX");
1834 		if (!path)
1835 			goto err_free;
1836 		tunnel->paths[i++] = path;
1837 		if (tb_dma_init_tx_path(path, credits)) {
1838 			tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
1839 			goto err_free;
1840 		}
1841 	}
1842 
1843 	return tunnel;
1844 
1845 err_free:
1846 	tb_tunnel_put(tunnel);
1847 	return NULL;
1848 }
1849 
1850 /**
1851  * tb_tunnel_match_dma() - Match DMA tunnel
1852  * @tunnel: Tunnel to match
1853  * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1854  * @transmit_ring: NHI ring number used to send packets towards the
1855  *		   other domain. Pass %-1 to ignore.
1856  * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1857  * @receive_ring: NHI ring number used to receive packets from the
1858  *		  other domain. Pass %-1 to ignore.
1859  *
1860  * This function can be used to match specific DMA tunnel, if there are
1861  * multiple DMA tunnels going through the same XDomain connection.
1862  * Returns true if there is match and false otherwise.
1863  */
1864 bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1865 			 int transmit_ring, int receive_path, int receive_ring)
1866 {
1867 	const struct tb_path *tx_path = NULL, *rx_path = NULL;
1868 	int i;
1869 
1870 	if (!receive_ring || !transmit_ring)
1871 		return false;
1872 
1873 	for (i = 0; i < tunnel->npaths; i++) {
1874 		const struct tb_path *path = tunnel->paths[i];
1875 
1876 		if (!path)
1877 			continue;
1878 
1879 		if (tb_port_is_nhi(path->hops[0].in_port))
1880 			tx_path = path;
1881 		else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1882 			rx_path = path;
1883 	}
1884 
1885 	if (transmit_ring > 0 || transmit_path > 0) {
1886 		if (!tx_path)
1887 			return false;
1888 		if (transmit_ring > 0 &&
1889 		    (tx_path->hops[0].in_hop_index != transmit_ring))
1890 			return false;
1891 		if (transmit_path > 0 &&
1892 		    (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1893 			return false;
1894 	}
1895 
1896 	if (receive_ring > 0 || receive_path > 0) {
1897 		if (!rx_path)
1898 			return false;
1899 		if (receive_path > 0 &&
1900 		    (rx_path->hops[0].in_hop_index != receive_path))
1901 			return false;
1902 		if (receive_ring > 0 &&
1903 		    (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1904 			return false;
1905 	}
1906 
1907 	return true;
1908 }
1909 
1910 static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
1911 {
1912 	int ret, up_max_rate, down_max_rate;
1913 
1914 	ret = usb4_usb3_port_max_link_rate(up);
1915 	if (ret < 0)
1916 		return ret;
1917 	up_max_rate = ret;
1918 
1919 	ret = usb4_usb3_port_max_link_rate(down);
1920 	if (ret < 0)
1921 		return ret;
1922 	down_max_rate = ret;
1923 
1924 	return min(up_max_rate, down_max_rate);
1925 }
1926 
1927 static int tb_usb3_pre_activate(struct tb_tunnel *tunnel)
1928 {
1929 	tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
1930 		      tunnel->allocated_up, tunnel->allocated_down);
1931 
1932 	return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
1933 						 &tunnel->allocated_up,
1934 						 &tunnel->allocated_down);
1935 }
1936 
1937 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
1938 {
1939 	int res;
1940 
1941 	res = tb_usb3_port_enable(tunnel->src_port, activate);
1942 	if (res)
1943 		return res;
1944 
1945 	if (tb_port_is_usb3_up(tunnel->dst_port))
1946 		return tb_usb3_port_enable(tunnel->dst_port, activate);
1947 
1948 	return 0;
1949 }
1950 
1951 static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
1952 		int *consumed_up, int *consumed_down)
1953 {
1954 	struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
1955 	int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0;
1956 
1957 	/*
1958 	 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
1959 	 * take that it into account here.
1960 	 */
1961 	*consumed_up = tunnel->allocated_up *
1962 		(TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
1963 	*consumed_down = tunnel->allocated_down *
1964 		(TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
1965 
1966 	if (tb_port_get_link_generation(port) >= 4) {
1967 		*consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH);
1968 		*consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH);
1969 	}
1970 
1971 	return 0;
1972 }
1973 
1974 static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
1975 {
1976 	int ret;
1977 
1978 	ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
1979 					       &tunnel->allocated_up,
1980 					       &tunnel->allocated_down);
1981 	if (ret)
1982 		return ret;
1983 
1984 	tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
1985 		      tunnel->allocated_up, tunnel->allocated_down);
1986 	return 0;
1987 }
1988 
1989 static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1990 						int *available_up,
1991 						int *available_down)
1992 {
1993 	int ret, max_rate, allocate_up, allocate_down;
1994 
1995 	ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
1996 	if (ret < 0) {
1997 		tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
1998 		return;
1999 	}
2000 
2001 	/*
2002 	 * 90% of the max rate can be allocated for isochronous
2003 	 * transfers.
2004 	 */
2005 	max_rate = ret * 90 / 100;
2006 
2007 	/* No need to reclaim if already at maximum */
2008 	if (tunnel->allocated_up >= max_rate &&
2009 	    tunnel->allocated_down >= max_rate)
2010 		return;
2011 
2012 	/* Don't go lower than what is already allocated */
2013 	allocate_up = min(max_rate, *available_up);
2014 	if (allocate_up < tunnel->allocated_up)
2015 		allocate_up = tunnel->allocated_up;
2016 
2017 	allocate_down = min(max_rate, *available_down);
2018 	if (allocate_down < tunnel->allocated_down)
2019 		allocate_down = tunnel->allocated_down;
2020 
2021 	/* If no changes no need to do more */
2022 	if (allocate_up == tunnel->allocated_up &&
2023 	    allocate_down == tunnel->allocated_down)
2024 		return;
2025 
2026 	ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
2027 						&allocate_down);
2028 	if (ret) {
2029 		tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
2030 		return;
2031 	}
2032 
2033 	tunnel->allocated_up = allocate_up;
2034 	*available_up -= tunnel->allocated_up;
2035 
2036 	tunnel->allocated_down = allocate_down;
2037 	*available_down -= tunnel->allocated_down;
2038 
2039 	tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
2040 		      tunnel->allocated_up, tunnel->allocated_down);
2041 }
2042 
2043 static void tb_usb3_init_credits(struct tb_path_hop *hop)
2044 {
2045 	struct tb_port *port = hop->in_port;
2046 	struct tb_switch *sw = port->sw;
2047 	unsigned int credits;
2048 
2049 	if (tb_port_use_credit_allocation(port)) {
2050 		credits = sw->max_usb3_credits;
2051 	} else {
2052 		if (tb_port_is_null(port))
2053 			credits = port->bonded ? 32 : 16;
2054 		else
2055 			credits = 7;
2056 	}
2057 
2058 	hop->initial_credits = credits;
2059 }
2060 
2061 static void tb_usb3_init_path(struct tb_path *path)
2062 {
2063 	struct tb_path_hop *hop;
2064 
2065 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
2066 	path->egress_shared_buffer = TB_PATH_NONE;
2067 	path->ingress_fc_enable = TB_PATH_ALL;
2068 	path->ingress_shared_buffer = TB_PATH_NONE;
2069 	path->priority = TB_USB3_PRIORITY;
2070 	path->weight = TB_USB3_WEIGHT;
2071 	path->drop_packages = 0;
2072 
2073 	tb_path_for_each_hop(path, hop)
2074 		tb_usb3_init_credits(hop);
2075 }
2076 
2077 /**
2078  * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
2079  * @tb: Pointer to the domain structure
2080  * @down: USB3 downstream adapter
2081  * @alloc_hopid: Allocate HopIDs from visited ports
2082  *
2083  * If @down adapter is active, follows the tunnel to the USB3 upstream
2084  * adapter and back. Returns the discovered tunnel or %NULL if there was
2085  * no tunnel.
2086  */
2087 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
2088 					  bool alloc_hopid)
2089 {
2090 	struct tb_tunnel *tunnel;
2091 	struct tb_path *path;
2092 
2093 	if (!tb_usb3_port_is_enabled(down))
2094 		return NULL;
2095 
2096 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
2097 	if (!tunnel)
2098 		return NULL;
2099 
2100 	tunnel->activate = tb_usb3_activate;
2101 	tunnel->src_port = down;
2102 
2103 	/*
2104 	 * Discover both paths even if they are not complete. We will
2105 	 * clean them up by calling tb_tunnel_deactivate() below in that
2106 	 * case.
2107 	 */
2108 	path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
2109 				&tunnel->dst_port, "USB3 Down", alloc_hopid);
2110 	if (!path) {
2111 		/* Just disable the downstream port */
2112 		tb_usb3_port_enable(down, false);
2113 		goto err_free;
2114 	}
2115 	tunnel->paths[TB_USB3_PATH_DOWN] = path;
2116 	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
2117 
2118 	path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
2119 				"USB3 Up", alloc_hopid);
2120 	if (!path)
2121 		goto err_deactivate;
2122 	tunnel->paths[TB_USB3_PATH_UP] = path;
2123 	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
2124 
2125 	/* Validate that the tunnel is complete */
2126 	if (!tb_port_is_usb3_up(tunnel->dst_port)) {
2127 		tb_port_warn(tunnel->dst_port,
2128 			     "path does not end on an USB3 adapter, cleaning up\n");
2129 		goto err_deactivate;
2130 	}
2131 
2132 	if (down != tunnel->src_port) {
2133 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
2134 		goto err_deactivate;
2135 	}
2136 
2137 	if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
2138 		tb_tunnel_warn(tunnel,
2139 			       "tunnel is not fully activated, cleaning up\n");
2140 		goto err_deactivate;
2141 	}
2142 
2143 	if (!tb_route(down->sw)) {
2144 		int ret;
2145 
2146 		/*
2147 		 * Read the initial bandwidth allocation for the first
2148 		 * hop tunnel.
2149 		 */
2150 		ret = usb4_usb3_port_allocated_bandwidth(down,
2151 			&tunnel->allocated_up, &tunnel->allocated_down);
2152 		if (ret)
2153 			goto err_deactivate;
2154 
2155 		tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
2156 			      tunnel->allocated_up, tunnel->allocated_down);
2157 
2158 		tunnel->pre_activate = tb_usb3_pre_activate;
2159 		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2160 		tunnel->release_unused_bandwidth =
2161 			tb_usb3_release_unused_bandwidth;
2162 		tunnel->reclaim_available_bandwidth =
2163 			tb_usb3_reclaim_available_bandwidth;
2164 	}
2165 
2166 	tb_tunnel_dbg(tunnel, "discovered\n");
2167 	return tunnel;
2168 
2169 err_deactivate:
2170 	tb_tunnel_deactivate(tunnel);
2171 err_free:
2172 	tb_tunnel_put(tunnel);
2173 
2174 	return NULL;
2175 }
2176 
2177 /**
2178  * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
2179  * @tb: Pointer to the domain structure
2180  * @up: USB3 upstream adapter port
2181  * @down: USB3 downstream adapter port
2182  * @max_up: Maximum available upstream bandwidth for the USB3 tunnel.
2183  *	    %0 if no available bandwidth.
2184  * @max_down: Maximum available downstream bandwidth for the USB3 tunnel.
2185  *	      %0 if no available bandwidth.
2186  *
2187  * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
2188  * @TB_TYPE_USB3_DOWN.
2189  *
2190  * Return: Returns a tb_tunnel on success or %NULL on failure.
2191  */
2192 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
2193 				       struct tb_port *down, int max_up,
2194 				       int max_down)
2195 {
2196 	struct tb_tunnel *tunnel;
2197 	struct tb_path *path;
2198 	int max_rate = 0;
2199 
2200 	if (!tb_route(down->sw) && (max_up > 0 || max_down > 0)) {
2201 		/*
2202 		 * For USB3 isochronous transfers, we allow bandwidth which is
2203 		 * not higher than 90% of maximum supported bandwidth by USB3
2204 		 * adapters.
2205 		 */
2206 		max_rate = tb_usb3_max_link_rate(down, up);
2207 		if (max_rate < 0)
2208 			return NULL;
2209 
2210 		max_rate = max_rate * 90 / 100;
2211 		tb_port_dbg(up, "maximum required bandwidth for USB3 tunnel %d Mb/s\n",
2212 			    max_rate);
2213 	}
2214 
2215 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
2216 	if (!tunnel)
2217 		return NULL;
2218 
2219 	tunnel->activate = tb_usb3_activate;
2220 	tunnel->src_port = down;
2221 	tunnel->dst_port = up;
2222 	tunnel->max_up = max_up;
2223 	tunnel->max_down = max_down;
2224 
2225 	path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
2226 			     "USB3 Down");
2227 	if (!path) {
2228 		tb_tunnel_put(tunnel);
2229 		return NULL;
2230 	}
2231 	tb_usb3_init_path(path);
2232 	tunnel->paths[TB_USB3_PATH_DOWN] = path;
2233 
2234 	path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
2235 			     "USB3 Up");
2236 	if (!path) {
2237 		tb_tunnel_put(tunnel);
2238 		return NULL;
2239 	}
2240 	tb_usb3_init_path(path);
2241 	tunnel->paths[TB_USB3_PATH_UP] = path;
2242 
2243 	if (!tb_route(down->sw)) {
2244 		tunnel->allocated_up = min(max_rate, max_up);
2245 		tunnel->allocated_down = min(max_rate, max_down);
2246 
2247 		tunnel->pre_activate = tb_usb3_pre_activate;
2248 		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2249 		tunnel->release_unused_bandwidth =
2250 			tb_usb3_release_unused_bandwidth;
2251 		tunnel->reclaim_available_bandwidth =
2252 			tb_usb3_reclaim_available_bandwidth;
2253 	}
2254 
2255 	return tunnel;
2256 }
2257 
2258 /**
2259  * tb_tunnel_is_invalid - check whether an activated path is still valid
2260  * @tunnel: Tunnel to check
2261  */
2262 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
2263 {
2264 	int i;
2265 
2266 	for (i = 0; i < tunnel->npaths; i++) {
2267 		WARN_ON(!tunnel->paths[i]->activated);
2268 		if (tb_path_is_invalid(tunnel->paths[i]))
2269 			return true;
2270 	}
2271 
2272 	return false;
2273 }
2274 
2275 /**
2276  * tb_tunnel_activate() - activate a tunnel
2277  * @tunnel: Tunnel to activate
2278  *
2279  * Return: 0 on success and negative errno in case if failure.
2280  * Specifically returns %-EINPROGRESS if the tunnel activation is still
2281  * in progress (that's for DP tunnels to complete DPRX capabilities
2282  * read).
2283  */
2284 int tb_tunnel_activate(struct tb_tunnel *tunnel)
2285 {
2286 	int res, i;
2287 
2288 	tb_tunnel_dbg(tunnel, "activating\n");
2289 
2290 	/*
2291 	 * Make sure all paths are properly disabled before enabling
2292 	 * them again.
2293 	 */
2294 	for (i = 0; i < tunnel->npaths; i++) {
2295 		if (tunnel->paths[i]->activated) {
2296 			tb_path_deactivate(tunnel->paths[i]);
2297 			tunnel->paths[i]->activated = false;
2298 		}
2299 	}
2300 
2301 	tunnel->state = TB_TUNNEL_ACTIVATING;
2302 
2303 	if (tunnel->pre_activate) {
2304 		res = tunnel->pre_activate(tunnel);
2305 		if (res)
2306 			return res;
2307 	}
2308 
2309 	for (i = 0; i < tunnel->npaths; i++) {
2310 		res = tb_path_activate(tunnel->paths[i]);
2311 		if (res)
2312 			goto err;
2313 	}
2314 
2315 	if (tunnel->activate) {
2316 		res = tunnel->activate(tunnel, true);
2317 		if (res) {
2318 			if (res == -EINPROGRESS)
2319 				return res;
2320 			goto err;
2321 		}
2322 	}
2323 
2324 	tunnel->state = TB_TUNNEL_ACTIVE;
2325 	return 0;
2326 
2327 err:
2328 	tb_tunnel_warn(tunnel, "activation failed\n");
2329 	tb_tunnel_deactivate(tunnel);
2330 	return res;
2331 }
2332 
2333 /**
2334  * tb_tunnel_deactivate() - deactivate a tunnel
2335  * @tunnel: Tunnel to deactivate
2336  */
2337 void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
2338 {
2339 	int i;
2340 
2341 	tb_tunnel_dbg(tunnel, "deactivating\n");
2342 
2343 	if (tunnel->activate)
2344 		tunnel->activate(tunnel, false);
2345 
2346 	for (i = 0; i < tunnel->npaths; i++) {
2347 		if (tunnel->paths[i] && tunnel->paths[i]->activated)
2348 			tb_path_deactivate(tunnel->paths[i]);
2349 	}
2350 
2351 	if (tunnel->post_deactivate)
2352 		tunnel->post_deactivate(tunnel);
2353 
2354 	tunnel->state = TB_TUNNEL_INACTIVE;
2355 }
2356 
2357 /**
2358  * tb_tunnel_port_on_path() - Does the tunnel go through port
2359  * @tunnel: Tunnel to check
2360  * @port: Port to check
2361  *
2362  * Returns true if @tunnel goes through @port (direction does not matter),
2363  * false otherwise.
2364  */
2365 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
2366 			    const struct tb_port *port)
2367 {
2368 	int i;
2369 
2370 	for (i = 0; i < tunnel->npaths; i++) {
2371 		if (!tunnel->paths[i])
2372 			continue;
2373 
2374 		if (tb_path_port_on_path(tunnel->paths[i], port))
2375 			return true;
2376 	}
2377 
2378 	return false;
2379 }
2380 
2381 // Is tb_tunnel_activate() called for the tunnel
2382 static bool tb_tunnel_is_activated(const struct tb_tunnel *tunnel)
2383 {
2384 	return tunnel->state == TB_TUNNEL_ACTIVATING || tb_tunnel_is_active(tunnel);
2385 }
2386 
2387 /**
2388  * tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth
2389  * @tunnel: Tunnel to check
2390  * @max_up: Maximum upstream bandwidth in Mb/s
2391  * @max_down: Maximum downstream bandwidth in Mb/s
2392  *
2393  * Returns maximum possible bandwidth this tunnel can go if not limited
2394  * by other bandwidth clients. If the tunnel does not support this
2395  * returns %-EOPNOTSUPP.
2396  */
2397 int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
2398 				int *max_down)
2399 {
2400 	if (!tb_tunnel_is_active(tunnel))
2401 		return -ENOTCONN;
2402 
2403 	if (tunnel->maximum_bandwidth)
2404 		return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
2405 	return -EOPNOTSUPP;
2406 }
2407 
2408 /**
2409  * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel
2410  * @tunnel: Tunnel to check
2411  * @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here
2412  * @allocated_down: Currently allocated downstream bandwidth in Mb/s is
2413  *		    stored here
2414  *
2415  * Returns the bandwidth allocated for the tunnel. This may be higher
2416  * than what the tunnel actually consumes.
2417  */
2418 int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
2419 				  int *allocated_down)
2420 {
2421 	if (!tb_tunnel_is_active(tunnel))
2422 		return -ENOTCONN;
2423 
2424 	if (tunnel->allocated_bandwidth)
2425 		return tunnel->allocated_bandwidth(tunnel, allocated_up,
2426 						   allocated_down);
2427 	return -EOPNOTSUPP;
2428 }
2429 
2430 /**
2431  * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation
2432  * @tunnel: Tunnel whose bandwidth allocation to change
2433  * @alloc_up: New upstream bandwidth in Mb/s
2434  * @alloc_down: New downstream bandwidth in Mb/s
2435  *
2436  * Tries to change tunnel bandwidth allocation. If succeeds returns %0
2437  * and updates @alloc_up and @alloc_down to that was actually allocated
2438  * (it may not be the same as passed originally). Returns negative errno
2439  * in case of failure.
2440  */
2441 int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
2442 			      int *alloc_down)
2443 {
2444 	if (!tb_tunnel_is_active(tunnel))
2445 		return -ENOTCONN;
2446 
2447 	if (tunnel->alloc_bandwidth)
2448 		return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
2449 
2450 	return -EOPNOTSUPP;
2451 }
2452 
2453 /**
2454  * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
2455  * @tunnel: Tunnel to check
2456  * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
2457  *		 Can be %NULL.
2458  * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
2459  *		   Can be %NULL.
2460  *
2461  * Stores the amount of isochronous bandwidth @tunnel consumes in
2462  * @consumed_up and @consumed_down. In case of success returns %0,
2463  * negative errno otherwise.
2464  */
2465 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
2466 				 int *consumed_down)
2467 {
2468 	int up_bw = 0, down_bw = 0;
2469 
2470 	/*
2471 	 * Here we need to distinguish between not active tunnel from
2472 	 * tunnels that are either fully active or activation started.
2473 	 * The latter is true for DP tunnels where we must report the
2474 	 * consumed to be the maximum we gave it until DPRX capabilities
2475 	 * read is done by the graphics driver.
2476 	 */
2477 	if (tb_tunnel_is_activated(tunnel) && tunnel->consumed_bandwidth) {
2478 		int ret;
2479 
2480 		ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
2481 		if (ret)
2482 			return ret;
2483 	}
2484 
2485 	if (consumed_up)
2486 		*consumed_up = up_bw;
2487 	if (consumed_down)
2488 		*consumed_down = down_bw;
2489 
2490 	tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw, down_bw);
2491 	return 0;
2492 }
2493 
2494 /**
2495  * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
2496  * @tunnel: Tunnel whose unused bandwidth to release
2497  *
2498  * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
2499  * moment) this function makes it to release all the unused bandwidth.
2500  *
2501  * Returns %0 in case of success and negative errno otherwise.
2502  */
2503 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
2504 {
2505 	if (!tb_tunnel_is_active(tunnel))
2506 		return -ENOTCONN;
2507 
2508 	if (tunnel->release_unused_bandwidth) {
2509 		int ret;
2510 
2511 		ret = tunnel->release_unused_bandwidth(tunnel);
2512 		if (ret)
2513 			return ret;
2514 	}
2515 
2516 	return 0;
2517 }
2518 
2519 /**
2520  * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
2521  * @tunnel: Tunnel reclaiming available bandwidth
2522  * @available_up: Available upstream bandwidth (in Mb/s)
2523  * @available_down: Available downstream bandwidth (in Mb/s)
2524  *
2525  * Reclaims bandwidth from @available_up and @available_down and updates
2526  * the variables accordingly (e.g decreases both according to what was
2527  * reclaimed by the tunnel). If nothing was reclaimed the values are
2528  * kept as is.
2529  */
2530 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
2531 					   int *available_up,
2532 					   int *available_down)
2533 {
2534 	if (!tb_tunnel_is_active(tunnel))
2535 		return;
2536 
2537 	if (tunnel->reclaim_available_bandwidth)
2538 		tunnel->reclaim_available_bandwidth(tunnel, available_up,
2539 						    available_down);
2540 }
2541 
2542 const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel)
2543 {
2544 	return tb_tunnel_names[tunnel->type];
2545 }
2546