xref: /linux/drivers/thunderbolt/tunnel.c (revision cb82ca153949c6204af793de24b18a04236e79fd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - Tunneling support
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
12 #include <linux/ktime.h>
13 #include <linux/string_helpers.h>
14 
15 #include "tunnel.h"
16 #include "tb.h"
17 
18 /* PCIe adapters use always HopID of 8 for both directions */
19 #define TB_PCI_HOPID			8
20 
21 #define TB_PCI_PATH_DOWN		0
22 #define TB_PCI_PATH_UP			1
23 
24 #define TB_PCI_PRIORITY			3
25 #define TB_PCI_WEIGHT			1
26 
27 /* USB3 adapters use always HopID of 8 for both directions */
28 #define TB_USB3_HOPID			8
29 
30 #define TB_USB3_PATH_DOWN		0
31 #define TB_USB3_PATH_UP			1
32 
33 #define TB_USB3_PRIORITY		3
34 #define TB_USB3_WEIGHT			2
35 
36 /* DP adapters use HopID 8 for AUX and 9 for Video */
37 #define TB_DP_AUX_TX_HOPID		8
38 #define TB_DP_AUX_RX_HOPID		8
39 #define TB_DP_VIDEO_HOPID		9
40 
41 #define TB_DP_VIDEO_PATH_OUT		0
42 #define TB_DP_AUX_PATH_OUT		1
43 #define TB_DP_AUX_PATH_IN		2
44 
45 #define TB_DP_VIDEO_PRIORITY		1
46 #define TB_DP_VIDEO_WEIGHT		1
47 
48 #define TB_DP_AUX_PRIORITY		2
49 #define TB_DP_AUX_WEIGHT		1
50 
51 /* Minimum number of credits needed for PCIe path */
52 #define TB_MIN_PCIE_CREDITS		6U
53 /*
54  * Number of credits we try to allocate for each DMA path if not limited
55  * by the host router baMaxHI.
56  */
57 #define TB_DMA_CREDITS			14
58 /* Minimum number of credits for DMA path */
59 #define TB_MIN_DMA_CREDITS		1
60 
61 #define TB_DMA_PRIORITY			5
62 #define TB_DMA_WEIGHT			1
63 
64 /*
65  * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic
66  * according to USB4 v2 Connection Manager guide. This ends up reserving
67  * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into
68  * account.
69  */
70 #define USB4_V2_PCI_MIN_BANDWIDTH	(1500 * TB_PCI_WEIGHT)
71 #define USB4_V2_USB3_MIN_BANDWIDTH	(1500 * TB_USB3_WEIGHT)
72 
73 /*
74  * According to VESA spec, the DPRX negotiation shall compete in 5
75  * seconds after tunnel is established. Since at least i915 can runtime
76  * suspend if there is nothing connected, and that it polls any new
77  * connections every 10 seconds, we use 12 seconds here.
78  *
79  * These are in ms.
80  */
81 #define TB_DPRX_TIMEOUT			12000
82 #define TB_DPRX_WAIT_TIMEOUT		25
83 #define TB_DPRX_POLL_DELAY		50
84 
85 static int dprx_timeout = TB_DPRX_TIMEOUT;
86 module_param(dprx_timeout, int, 0444);
87 MODULE_PARM_DESC(dprx_timeout,
88 		 "DPRX capability read timeout in ms, -1 waits forever (default: "
89 		 __MODULE_STRING(TB_DPRX_TIMEOUT) ")");
90 
91 static unsigned int dma_credits = TB_DMA_CREDITS;
92 module_param(dma_credits, uint, 0444);
93 MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
94                 __MODULE_STRING(TB_DMA_CREDITS) ")");
95 
96 static bool bw_alloc_mode = true;
97 module_param(bw_alloc_mode, bool, 0444);
98 MODULE_PARM_DESC(bw_alloc_mode,
99 		 "enable bandwidth allocation mode if supported (default: true)");
100 
101 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
102 
103 /* Synchronizes kref_get()/put() of struct tb_tunnel */
104 static DEFINE_MUTEX(tb_tunnel_lock);
105 
tb_usable_credits(const struct tb_port * port)106 static inline unsigned int tb_usable_credits(const struct tb_port *port)
107 {
108 	return port->total_credits - port->ctl_credits;
109 }
110 
111 /**
112  * tb_available_credits() - Available credits for PCIe and DMA
113  * @port: Lane adapter to check
114  * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
115  *		    streams possible through this lane adapter
116  */
tb_available_credits(const struct tb_port * port,size_t * max_dp_streams)117 static unsigned int tb_available_credits(const struct tb_port *port,
118 					 size_t *max_dp_streams)
119 {
120 	const struct tb_switch *sw = port->sw;
121 	int credits, usb3, pcie, spare;
122 	size_t ndp;
123 
124 	usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
125 	pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
126 
127 	if (tb_acpi_is_xdomain_allowed()) {
128 		spare = min_not_zero(sw->max_dma_credits, dma_credits);
129 		/* Add some credits for potential second DMA tunnel */
130 		spare += TB_MIN_DMA_CREDITS;
131 	} else {
132 		spare = 0;
133 	}
134 
135 	credits = tb_usable_credits(port);
136 	if (tb_acpi_may_tunnel_dp()) {
137 		/*
138 		 * Maximum number of DP streams possible through the
139 		 * lane adapter.
140 		 */
141 		if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
142 			ndp = (credits - (usb3 + pcie + spare)) /
143 			      (sw->min_dp_aux_credits + sw->min_dp_main_credits);
144 		else
145 			ndp = 0;
146 	} else {
147 		ndp = 0;
148 	}
149 	credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
150 	credits -= usb3;
151 
152 	if (max_dp_streams)
153 		*max_dp_streams = ndp;
154 
155 	return credits > 0 ? credits : 0;
156 }
157 
tb_init_pm_support(struct tb_path_hop * hop)158 static void tb_init_pm_support(struct tb_path_hop *hop)
159 {
160 	struct tb_port *out_port = hop->out_port;
161 	struct tb_port *in_port = hop->in_port;
162 
163 	if (tb_port_is_null(in_port) && tb_port_is_null(out_port) &&
164 	    usb4_switch_version(in_port->sw) >= 2)
165 		hop->pm_support = true;
166 }
167 
tb_tunnel_alloc(struct tb * tb,size_t npaths,enum tb_tunnel_type type)168 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
169 					 enum tb_tunnel_type type)
170 {
171 	struct tb_tunnel *tunnel;
172 
173 	tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
174 	if (!tunnel)
175 		return NULL;
176 
177 	tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
178 	if (!tunnel->paths) {
179 		kfree(tunnel);
180 		return NULL;
181 	}
182 
183 	INIT_LIST_HEAD(&tunnel->list);
184 	tunnel->tb = tb;
185 	tunnel->npaths = npaths;
186 	tunnel->type = type;
187 	kref_init(&tunnel->kref);
188 
189 	return tunnel;
190 }
191 
tb_tunnel_get(struct tb_tunnel * tunnel)192 static void tb_tunnel_get(struct tb_tunnel *tunnel)
193 {
194 	mutex_lock(&tb_tunnel_lock);
195 	kref_get(&tunnel->kref);
196 	mutex_unlock(&tb_tunnel_lock);
197 }
198 
tb_tunnel_destroy(struct kref * kref)199 static void tb_tunnel_destroy(struct kref *kref)
200 {
201 	struct tb_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref);
202 	int i;
203 
204 	if (tunnel->destroy)
205 		tunnel->destroy(tunnel);
206 
207 	for (i = 0; i < tunnel->npaths; i++) {
208 		if (tunnel->paths[i])
209 			tb_path_free(tunnel->paths[i]);
210 	}
211 
212 	kfree(tunnel->paths);
213 	kfree(tunnel);
214 }
215 
tb_tunnel_put(struct tb_tunnel * tunnel)216 void tb_tunnel_put(struct tb_tunnel *tunnel)
217 {
218 	mutex_lock(&tb_tunnel_lock);
219 	kref_put(&tunnel->kref, tb_tunnel_destroy);
220 	mutex_unlock(&tb_tunnel_lock);
221 }
222 
tb_pci_set_ext_encapsulation(struct tb_tunnel * tunnel,bool enable)223 static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
224 {
225 	struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
226 	int ret;
227 
228 	/* Only supported of both routers are at least USB4 v2 */
229 	if ((usb4_switch_version(tunnel->src_port->sw) < 2) ||
230 	   (usb4_switch_version(tunnel->dst_port->sw) < 2))
231 		return 0;
232 
233 	if (enable && tb_port_get_link_generation(port) < 4)
234 		return 0;
235 
236 	ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
237 	if (ret)
238 		return ret;
239 
240 	/*
241 	 * Downstream router could be unplugged so disable of encapsulation
242 	 * in upstream router is still possible.
243 	 */
244 	ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable);
245 	if (ret) {
246 		if (enable)
247 			return ret;
248 		if (ret != -ENODEV)
249 			return ret;
250 	}
251 
252 	tb_tunnel_dbg(tunnel, "extended encapsulation %s\n",
253 		      str_enabled_disabled(enable));
254 	return 0;
255 }
256 
tb_pci_activate(struct tb_tunnel * tunnel,bool activate)257 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
258 {
259 	int res;
260 
261 	if (activate) {
262 		res = tb_pci_set_ext_encapsulation(tunnel, activate);
263 		if (res)
264 			return res;
265 	}
266 
267 	if (activate)
268 		res = tb_pci_port_enable(tunnel->dst_port, activate);
269 	else
270 		res = tb_pci_port_enable(tunnel->src_port, activate);
271 	if (res)
272 		return res;
273 
274 
275 	if (activate) {
276 		res = tb_pci_port_enable(tunnel->src_port, activate);
277 		if (res)
278 			return res;
279 	} else {
280 		/* Downstream router could be unplugged */
281 		tb_pci_port_enable(tunnel->dst_port, activate);
282 	}
283 
284 	return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate);
285 }
286 
tb_pci_init_credits(struct tb_path_hop * hop)287 static int tb_pci_init_credits(struct tb_path_hop *hop)
288 {
289 	struct tb_port *port = hop->in_port;
290 	struct tb_switch *sw = port->sw;
291 	unsigned int credits;
292 
293 	if (tb_port_use_credit_allocation(port)) {
294 		unsigned int available;
295 
296 		available = tb_available_credits(port, NULL);
297 		credits = min(sw->max_pcie_credits, available);
298 
299 		if (credits < TB_MIN_PCIE_CREDITS)
300 			return -ENOSPC;
301 
302 		credits = max(TB_MIN_PCIE_CREDITS, credits);
303 	} else {
304 		if (tb_port_is_null(port))
305 			credits = port->bonded ? 32 : 16;
306 		else
307 			credits = 7;
308 	}
309 
310 	hop->initial_credits = credits;
311 	return 0;
312 }
313 
tb_pci_init_path(struct tb_path * path)314 static int tb_pci_init_path(struct tb_path *path)
315 {
316 	struct tb_path_hop *hop;
317 
318 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
319 	path->egress_shared_buffer = TB_PATH_NONE;
320 	path->ingress_fc_enable = TB_PATH_ALL;
321 	path->ingress_shared_buffer = TB_PATH_NONE;
322 	path->priority = TB_PCI_PRIORITY;
323 	path->weight = TB_PCI_WEIGHT;
324 	path->drop_packages = 0;
325 
326 	tb_path_for_each_hop(path, hop) {
327 		int ret;
328 
329 		ret = tb_pci_init_credits(hop);
330 		if (ret)
331 			return ret;
332 	}
333 
334 	return 0;
335 }
336 
337 /**
338  * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
339  * @tb: Pointer to the domain structure
340  * @down: PCIe downstream adapter
341  * @alloc_hopid: Allocate HopIDs from visited ports
342  *
343  * If @down adapter is active, follows the tunnel to the PCIe upstream
344  * adapter and back. Returns the discovered tunnel or %NULL if there was
345  * no tunnel.
346  */
tb_tunnel_discover_pci(struct tb * tb,struct tb_port * down,bool alloc_hopid)347 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
348 					 bool alloc_hopid)
349 {
350 	struct tb_tunnel *tunnel;
351 	struct tb_path *path;
352 
353 	if (!tb_pci_port_is_enabled(down))
354 		return NULL;
355 
356 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
357 	if (!tunnel)
358 		return NULL;
359 
360 	tunnel->activate = tb_pci_activate;
361 	tunnel->src_port = down;
362 
363 	/*
364 	 * Discover both paths even if they are not complete. We will
365 	 * clean them up by calling tb_tunnel_deactivate() below in that
366 	 * case.
367 	 */
368 	path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
369 				&tunnel->dst_port, "PCIe Up", alloc_hopid);
370 	if (!path) {
371 		/* Just disable the downstream port */
372 		tb_pci_port_enable(down, false);
373 		goto err_free;
374 	}
375 	tunnel->paths[TB_PCI_PATH_UP] = path;
376 	if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
377 		goto err_free;
378 
379 	path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
380 				"PCIe Down", alloc_hopid);
381 	if (!path)
382 		goto err_deactivate;
383 	tunnel->paths[TB_PCI_PATH_DOWN] = path;
384 	if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
385 		goto err_deactivate;
386 
387 	/* Validate that the tunnel is complete */
388 	if (!tb_port_is_pcie_up(tunnel->dst_port)) {
389 		tb_port_warn(tunnel->dst_port,
390 			     "path does not end on a PCIe adapter, cleaning up\n");
391 		goto err_deactivate;
392 	}
393 
394 	if (down != tunnel->src_port) {
395 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
396 		goto err_deactivate;
397 	}
398 
399 	if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
400 		tb_tunnel_warn(tunnel,
401 			       "tunnel is not fully activated, cleaning up\n");
402 		goto err_deactivate;
403 	}
404 
405 	tb_tunnel_dbg(tunnel, "discovered\n");
406 	return tunnel;
407 
408 err_deactivate:
409 	tb_tunnel_deactivate(tunnel);
410 err_free:
411 	tb_tunnel_put(tunnel);
412 
413 	return NULL;
414 }
415 
416 /**
417  * tb_tunnel_alloc_pci() - allocate a pci tunnel
418  * @tb: Pointer to the domain structure
419  * @up: PCIe upstream adapter port
420  * @down: PCIe downstream adapter port
421  *
422  * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
423  * TB_TYPE_PCIE_DOWN.
424  *
425  * Return: Returns a tb_tunnel on success or NULL on failure.
426  */
tb_tunnel_alloc_pci(struct tb * tb,struct tb_port * up,struct tb_port * down)427 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
428 				      struct tb_port *down)
429 {
430 	struct tb_tunnel *tunnel;
431 	struct tb_path *path;
432 
433 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
434 	if (!tunnel)
435 		return NULL;
436 
437 	tunnel->activate = tb_pci_activate;
438 	tunnel->src_port = down;
439 	tunnel->dst_port = up;
440 
441 	path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
442 			     "PCIe Down");
443 	if (!path)
444 		goto err_free;
445 	tunnel->paths[TB_PCI_PATH_DOWN] = path;
446 	if (tb_pci_init_path(path))
447 		goto err_free;
448 
449 	path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
450 			     "PCIe Up");
451 	if (!path)
452 		goto err_free;
453 	tunnel->paths[TB_PCI_PATH_UP] = path;
454 	if (tb_pci_init_path(path))
455 		goto err_free;
456 
457 	return tunnel;
458 
459 err_free:
460 	tb_tunnel_put(tunnel);
461 	return NULL;
462 }
463 
464 /**
465  * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe
466  * @port: Lane 0 adapter
467  * @reserved_up: Upstream bandwidth in Mb/s to reserve
468  * @reserved_down: Downstream bandwidth in Mb/s to reserve
469  *
470  * Can be called to any connected lane 0 adapter to find out how much
471  * bandwidth needs to be left in reserve for possible PCIe bulk traffic.
472  * Returns true if there is something to be reserved and writes the
473  * amount to @reserved_down/@reserved_up. Otherwise returns false and
474  * does not touch the parameters.
475  */
tb_tunnel_reserved_pci(struct tb_port * port,int * reserved_up,int * reserved_down)476 bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
477 			    int *reserved_down)
478 {
479 	if (WARN_ON_ONCE(!port->remote))
480 		return false;
481 
482 	if (!tb_acpi_may_tunnel_pcie())
483 		return false;
484 
485 	if (tb_port_get_link_generation(port) < 4)
486 		return false;
487 
488 	/* Must have PCIe adapters */
489 	if (tb_is_upstream_port(port)) {
490 		if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP))
491 			return false;
492 		if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN))
493 			return false;
494 	} else {
495 		if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN))
496 			return false;
497 		if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP))
498 			return false;
499 	}
500 
501 	*reserved_up = USB4_V2_PCI_MIN_BANDWIDTH;
502 	*reserved_down = USB4_V2_PCI_MIN_BANDWIDTH;
503 
504 	tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up,
505 		    *reserved_down);
506 	return true;
507 }
508 
tb_dp_is_usb4(const struct tb_switch * sw)509 static bool tb_dp_is_usb4(const struct tb_switch *sw)
510 {
511 	/* Titan Ridge DP adapters need the same treatment as USB4 */
512 	return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
513 }
514 
tb_dp_cm_handshake(struct tb_port * in,struct tb_port * out,int timeout_msec)515 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
516 			      int timeout_msec)
517 {
518 	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
519 	u32 val;
520 	int ret;
521 
522 	/* Both ends need to support this */
523 	if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
524 		return 0;
525 
526 	ret = tb_port_read(out, &val, TB_CFG_PORT,
527 			   out->cap_adap + DP_STATUS_CTRL, 1);
528 	if (ret)
529 		return ret;
530 
531 	val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
532 
533 	ret = tb_port_write(out, &val, TB_CFG_PORT,
534 			    out->cap_adap + DP_STATUS_CTRL, 1);
535 	if (ret)
536 		return ret;
537 
538 	do {
539 		ret = tb_port_read(out, &val, TB_CFG_PORT,
540 				   out->cap_adap + DP_STATUS_CTRL, 1);
541 		if (ret)
542 			return ret;
543 		if (!(val & DP_STATUS_CTRL_CMHS))
544 			return 0;
545 		usleep_range(100, 150);
546 	} while (ktime_before(ktime_get(), timeout));
547 
548 	return -ETIMEDOUT;
549 }
550 
551 /*
552  * Returns maximum possible rate from capability supporting only DP 2.0
553  * and below. Used when DP BW allocation mode is not enabled.
554  */
tb_dp_cap_get_rate(u32 val)555 static inline u32 tb_dp_cap_get_rate(u32 val)
556 {
557 	u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
558 
559 	switch (rate) {
560 	case DP_COMMON_CAP_RATE_RBR:
561 		return 1620;
562 	case DP_COMMON_CAP_RATE_HBR:
563 		return 2700;
564 	case DP_COMMON_CAP_RATE_HBR2:
565 		return 5400;
566 	case DP_COMMON_CAP_RATE_HBR3:
567 		return 8100;
568 	default:
569 		return 0;
570 	}
571 }
572 
573 /*
574  * Returns maximum possible rate from capability supporting DP 2.1
575  * UHBR20, 13.5 and 10 rates as well. Use only when DP BW allocation
576  * mode is enabled.
577  */
tb_dp_cap_get_rate_ext(u32 val)578 static inline u32 tb_dp_cap_get_rate_ext(u32 val)
579 {
580 	if (val & DP_COMMON_CAP_UHBR20)
581 		return 20000;
582 	else if (val & DP_COMMON_CAP_UHBR13_5)
583 		return 13500;
584 	else if (val & DP_COMMON_CAP_UHBR10)
585 		return 10000;
586 
587 	return tb_dp_cap_get_rate(val);
588 }
589 
tb_dp_is_uhbr_rate(unsigned int rate)590 static inline bool tb_dp_is_uhbr_rate(unsigned int rate)
591 {
592 	return rate >= 10000;
593 }
594 
tb_dp_cap_set_rate(u32 val,u32 rate)595 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
596 {
597 	val &= ~DP_COMMON_CAP_RATE_MASK;
598 	switch (rate) {
599 	default:
600 		WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
601 		fallthrough;
602 	case 1620:
603 		val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
604 		break;
605 	case 2700:
606 		val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
607 		break;
608 	case 5400:
609 		val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
610 		break;
611 	case 8100:
612 		val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
613 		break;
614 	}
615 	return val;
616 }
617 
tb_dp_cap_get_lanes(u32 val)618 static inline u32 tb_dp_cap_get_lanes(u32 val)
619 {
620 	u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
621 
622 	switch (lanes) {
623 	case DP_COMMON_CAP_1_LANE:
624 		return 1;
625 	case DP_COMMON_CAP_2_LANES:
626 		return 2;
627 	case DP_COMMON_CAP_4_LANES:
628 		return 4;
629 	default:
630 		return 0;
631 	}
632 }
633 
tb_dp_cap_set_lanes(u32 val,u32 lanes)634 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
635 {
636 	val &= ~DP_COMMON_CAP_LANES_MASK;
637 	switch (lanes) {
638 	default:
639 		WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
640 		     lanes);
641 		fallthrough;
642 	case 1:
643 		val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
644 		break;
645 	case 2:
646 		val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
647 		break;
648 	case 4:
649 		val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
650 		break;
651 	}
652 	return val;
653 }
654 
tb_dp_bandwidth(unsigned int rate,unsigned int lanes)655 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
656 {
657 	/* Tunneling removes the DP 8b/10b 128/132b encoding */
658 	if (tb_dp_is_uhbr_rate(rate))
659 		return rate * lanes * 128 / 132;
660 	return rate * lanes * 8 / 10;
661 }
662 
tb_dp_reduce_bandwidth(int max_bw,u32 in_rate,u32 in_lanes,u32 out_rate,u32 out_lanes,u32 * new_rate,u32 * new_lanes)663 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
664 				  u32 out_rate, u32 out_lanes, u32 *new_rate,
665 				  u32 *new_lanes)
666 {
667 	static const u32 dp_bw[][2] = {
668 		/* Mb/s, lanes */
669 		{ 8100, 4 }, /* 25920 Mb/s */
670 		{ 5400, 4 }, /* 17280 Mb/s */
671 		{ 8100, 2 }, /* 12960 Mb/s */
672 		{ 2700, 4 }, /* 8640 Mb/s */
673 		{ 5400, 2 }, /* 8640 Mb/s */
674 		{ 8100, 1 }, /* 6480 Mb/s */
675 		{ 1620, 4 }, /* 5184 Mb/s */
676 		{ 5400, 1 }, /* 4320 Mb/s */
677 		{ 2700, 2 }, /* 4320 Mb/s */
678 		{ 1620, 2 }, /* 2592 Mb/s */
679 		{ 2700, 1 }, /* 2160 Mb/s */
680 		{ 1620, 1 }, /* 1296 Mb/s */
681 	};
682 	unsigned int i;
683 
684 	/*
685 	 * Find a combination that can fit into max_bw and does not
686 	 * exceed the maximum rate and lanes supported by the DP OUT and
687 	 * DP IN adapters.
688 	 */
689 	for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
690 		if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
691 			continue;
692 
693 		if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
694 			continue;
695 
696 		if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
697 			*new_rate = dp_bw[i][0];
698 			*new_lanes = dp_bw[i][1];
699 			return 0;
700 		}
701 	}
702 
703 	return -ENOSR;
704 }
705 
tb_dp_xchg_caps(struct tb_tunnel * tunnel)706 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
707 {
708 	u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
709 	struct tb_port *out = tunnel->dst_port;
710 	struct tb_port *in = tunnel->src_port;
711 	int ret, max_bw;
712 
713 	/*
714 	 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
715 	 * newer generation hardware.
716 	 */
717 	if (in->sw->generation < 2 || out->sw->generation < 2)
718 		return 0;
719 
720 	/*
721 	 * Perform connection manager handshake between IN and OUT ports
722 	 * before capabilities exchange can take place.
723 	 */
724 	ret = tb_dp_cm_handshake(in, out, 3000);
725 	if (ret)
726 		return ret;
727 
728 	/* Read both DP_LOCAL_CAP registers */
729 	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
730 			   in->cap_adap + DP_LOCAL_CAP, 1);
731 	if (ret)
732 		return ret;
733 
734 	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
735 			   out->cap_adap + DP_LOCAL_CAP, 1);
736 	if (ret)
737 		return ret;
738 
739 	/* Write IN local caps to OUT remote caps */
740 	ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
741 			    out->cap_adap + DP_REMOTE_CAP, 1);
742 	if (ret)
743 		return ret;
744 
745 	in_rate = tb_dp_cap_get_rate(in_dp_cap);
746 	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
747 	tb_tunnel_dbg(tunnel,
748 		      "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
749 		      in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
750 
751 	/*
752 	 * If the tunnel bandwidth is limited (max_bw is set) then see
753 	 * if we need to reduce bandwidth to fit there.
754 	 */
755 	out_rate = tb_dp_cap_get_rate(out_dp_cap);
756 	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
757 	bw = tb_dp_bandwidth(out_rate, out_lanes);
758 	tb_tunnel_dbg(tunnel,
759 		      "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
760 		      out_rate, out_lanes, bw);
761 
762 	if (tb_tunnel_direction_downstream(tunnel))
763 		max_bw = tunnel->max_down;
764 	else
765 		max_bw = tunnel->max_up;
766 
767 	if (max_bw && bw > max_bw) {
768 		u32 new_rate, new_lanes, new_bw;
769 
770 		ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
771 					     out_rate, out_lanes, &new_rate,
772 					     &new_lanes);
773 		if (ret) {
774 			tb_tunnel_info(tunnel, "not enough bandwidth\n");
775 			return ret;
776 		}
777 
778 		new_bw = tb_dp_bandwidth(new_rate, new_lanes);
779 		tb_tunnel_dbg(tunnel,
780 			      "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
781 			      new_rate, new_lanes, new_bw);
782 
783 		/*
784 		 * Set new rate and number of lanes before writing it to
785 		 * the IN port remote caps.
786 		 */
787 		out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
788 		out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
789 	}
790 
791 	/*
792 	 * Titan Ridge does not disable AUX timers when it gets
793 	 * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with
794 	 * DP tunneling.
795 	 */
796 	if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
797 		out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
798 		tb_tunnel_dbg(tunnel, "disabling LTTPR\n");
799 	}
800 
801 	return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
802 			     in->cap_adap + DP_REMOTE_CAP, 1);
803 }
804 
tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel * tunnel)805 static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
806 {
807 	int ret, estimated_bw, granularity, tmp;
808 	struct tb_port *out = tunnel->dst_port;
809 	struct tb_port *in = tunnel->src_port;
810 	u32 out_dp_cap, out_rate, out_lanes;
811 	u32 in_dp_cap, in_rate, in_lanes;
812 	u32 rate, lanes;
813 
814 	if (!bw_alloc_mode)
815 		return 0;
816 
817 	ret = usb4_dp_port_set_cm_bandwidth_mode_supported(in, true);
818 	if (ret)
819 		return ret;
820 
821 	ret = usb4_dp_port_set_group_id(in, in->group->index);
822 	if (ret)
823 		return ret;
824 
825 	/*
826 	 * Get the non-reduced rate and lanes based on the lowest
827 	 * capability of both adapters.
828 	 */
829 	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
830 			   in->cap_adap + DP_LOCAL_CAP, 1);
831 	if (ret)
832 		return ret;
833 
834 	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
835 			   out->cap_adap + DP_LOCAL_CAP, 1);
836 	if (ret)
837 		return ret;
838 
839 	in_rate = tb_dp_cap_get_rate(in_dp_cap);
840 	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
841 	out_rate = tb_dp_cap_get_rate(out_dp_cap);
842 	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
843 
844 	rate = min(in_rate, out_rate);
845 	lanes = min(in_lanes, out_lanes);
846 	tmp = tb_dp_bandwidth(rate, lanes);
847 
848 	tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
849 		      rate, lanes, tmp);
850 
851 	ret = usb4_dp_port_set_nrd(in, rate, lanes);
852 	if (ret)
853 		return ret;
854 
855 	/*
856 	 * Pick up granularity that supports maximum possible bandwidth.
857 	 * For that we use the UHBR rates too.
858 	 */
859 	in_rate = tb_dp_cap_get_rate_ext(in_dp_cap);
860 	out_rate = tb_dp_cap_get_rate_ext(out_dp_cap);
861 	rate = min(in_rate, out_rate);
862 	tmp = tb_dp_bandwidth(rate, lanes);
863 
864 	tb_tunnel_dbg(tunnel,
865 		      "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
866 		      rate, lanes, tmp);
867 
868 	for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
869 	     granularity *= 2)
870 		;
871 
872 	tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity);
873 
874 	/*
875 	 * Returns -EINVAL if granularity above is outside of the
876 	 * accepted ranges.
877 	 */
878 	ret = usb4_dp_port_set_granularity(in, granularity);
879 	if (ret)
880 		return ret;
881 
882 	/*
883 	 * Bandwidth estimation is pretty much what we have in
884 	 * max_up/down fields. For discovery we just read what the
885 	 * estimation was set to.
886 	 */
887 	if (tb_tunnel_direction_downstream(tunnel))
888 		estimated_bw = tunnel->max_down;
889 	else
890 		estimated_bw = tunnel->max_up;
891 
892 	tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw);
893 
894 	ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
895 	if (ret)
896 		return ret;
897 
898 	/* Initial allocation should be 0 according the spec */
899 	ret = usb4_dp_port_allocate_bandwidth(in, 0);
900 	if (ret)
901 		return ret;
902 
903 	tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n");
904 	return 0;
905 }
906 
tb_dp_pre_activate(struct tb_tunnel * tunnel)907 static int tb_dp_pre_activate(struct tb_tunnel *tunnel)
908 {
909 	struct tb_port *in = tunnel->src_port;
910 	struct tb_switch *sw = in->sw;
911 	struct tb *tb = in->sw->tb;
912 	int ret;
913 
914 	ret = tb_dp_xchg_caps(tunnel);
915 	if (ret)
916 		return ret;
917 
918 	if (!tb_switch_is_usb4(sw))
919 		return 0;
920 
921 	if (!usb4_dp_port_bandwidth_mode_supported(in))
922 		return 0;
923 
924 	tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n");
925 
926 	ret = usb4_dp_port_set_cm_id(in, tb->index);
927 	if (ret)
928 		return ret;
929 
930 	return tb_dp_bandwidth_alloc_mode_enable(tunnel);
931 }
932 
tb_dp_post_deactivate(struct tb_tunnel * tunnel)933 static void tb_dp_post_deactivate(struct tb_tunnel *tunnel)
934 {
935 	struct tb_port *in = tunnel->src_port;
936 
937 	if (!usb4_dp_port_bandwidth_mode_supported(in))
938 		return;
939 	if (usb4_dp_port_bandwidth_mode_enabled(in)) {
940 		usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
941 		tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n");
942 	}
943 }
944 
dprx_timeout_to_ktime(int timeout_msec)945 static ktime_t dprx_timeout_to_ktime(int timeout_msec)
946 {
947 	return timeout_msec >= 0 ?
948 		ktime_add_ms(ktime_get(), timeout_msec) : KTIME_MAX;
949 }
950 
tb_dp_wait_dprx(struct tb_tunnel * tunnel,int timeout_msec)951 static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
952 {
953 	ktime_t timeout = dprx_timeout_to_ktime(timeout_msec);
954 	struct tb_port *in = tunnel->src_port;
955 
956 	/*
957 	 * Wait for DPRX done. Normally it should be already set for
958 	 * active tunnel.
959 	 */
960 	do {
961 		u32 val;
962 		int ret;
963 
964 		ret = tb_port_read(in, &val, TB_CFG_PORT,
965 				   in->cap_adap + DP_COMMON_CAP, 1);
966 		if (ret)
967 			return ret;
968 
969 		if (val & DP_COMMON_CAP_DPRX_DONE)
970 			return 0;
971 
972 		usleep_range(100, 150);
973 	} while (ktime_before(ktime_get(), timeout));
974 
975 	tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
976 	return -ETIMEDOUT;
977 }
978 
tb_dp_dprx_work(struct work_struct * work)979 static void tb_dp_dprx_work(struct work_struct *work)
980 {
981 	struct tb_tunnel *tunnel = container_of(work, typeof(*tunnel), dprx_work.work);
982 	struct tb *tb = tunnel->tb;
983 
984 	if (!tunnel->dprx_canceled) {
985 		mutex_lock(&tb->lock);
986 		if (tb_dp_is_usb4(tunnel->src_port->sw) &&
987 		    tb_dp_wait_dprx(tunnel, TB_DPRX_WAIT_TIMEOUT)) {
988 			if (ktime_before(ktime_get(), tunnel->dprx_timeout)) {
989 				queue_delayed_work(tb->wq, &tunnel->dprx_work,
990 						   msecs_to_jiffies(TB_DPRX_POLL_DELAY));
991 				mutex_unlock(&tb->lock);
992 				return;
993 			}
994 		} else {
995 			tunnel->state = TB_TUNNEL_ACTIVE;
996 		}
997 		mutex_unlock(&tb->lock);
998 	}
999 
1000 	if (tunnel->callback)
1001 		tunnel->callback(tunnel, tunnel->callback_data);
1002 }
1003 
tb_dp_dprx_start(struct tb_tunnel * tunnel)1004 static int tb_dp_dprx_start(struct tb_tunnel *tunnel)
1005 {
1006 	/*
1007 	 * Bump up the reference to keep the tunnel around. It will be
1008 	 * dropped in tb_dp_dprx_stop() once the tunnel is deactivated.
1009 	 */
1010 	tb_tunnel_get(tunnel);
1011 
1012 	tunnel->dprx_started = true;
1013 
1014 	if (tunnel->callback) {
1015 		tunnel->dprx_timeout = dprx_timeout_to_ktime(dprx_timeout);
1016 		queue_delayed_work(tunnel->tb->wq, &tunnel->dprx_work, 0);
1017 		return -EINPROGRESS;
1018 	}
1019 
1020 	return tb_dp_is_usb4(tunnel->src_port->sw) ?
1021 		tb_dp_wait_dprx(tunnel, dprx_timeout) : 0;
1022 }
1023 
tb_dp_dprx_stop(struct tb_tunnel * tunnel)1024 static void tb_dp_dprx_stop(struct tb_tunnel *tunnel)
1025 {
1026 	if (tunnel->dprx_started) {
1027 		tunnel->dprx_started = false;
1028 		tunnel->dprx_canceled = true;
1029 		cancel_delayed_work(&tunnel->dprx_work);
1030 		tb_tunnel_put(tunnel);
1031 	}
1032 }
1033 
tb_dp_activate(struct tb_tunnel * tunnel,bool active)1034 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
1035 {
1036 	int ret;
1037 
1038 	if (active) {
1039 		struct tb_path **paths;
1040 		int last;
1041 
1042 		paths = tunnel->paths;
1043 		last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
1044 
1045 		tb_dp_port_set_hops(tunnel->src_port,
1046 			paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
1047 			paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
1048 			paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
1049 
1050 		tb_dp_port_set_hops(tunnel->dst_port,
1051 			paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
1052 			paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
1053 			paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
1054 	} else {
1055 		tb_dp_dprx_stop(tunnel);
1056 		tb_dp_port_hpd_clear(tunnel->src_port);
1057 		tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
1058 		if (tb_port_is_dpout(tunnel->dst_port))
1059 			tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
1060 	}
1061 
1062 	ret = tb_dp_port_enable(tunnel->src_port, active);
1063 	if (ret)
1064 		return ret;
1065 
1066 	if (tb_port_is_dpout(tunnel->dst_port)) {
1067 		ret = tb_dp_port_enable(tunnel->dst_port, active);
1068 		if (ret)
1069 			return ret;
1070 	}
1071 
1072 	return active ? tb_dp_dprx_start(tunnel) : 0;
1073 }
1074 
1075 /**
1076  * tb_dp_bandwidth_mode_maximum_bandwidth() - Maximum possible bandwidth
1077  * @tunnel: DP tunnel to check
1078  * @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity
1079  *
1080  * Returns maximum possible bandwidth for this tunnel in Mb/s.
1081  */
tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel * tunnel,int * max_bw_rounded)1082 static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
1083 						  int *max_bw_rounded)
1084 {
1085 	struct tb_port *in = tunnel->src_port;
1086 	int ret, rate, lanes, max_bw;
1087 	u32 cap;
1088 
1089 	/*
1090 	 * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX
1091 	 * read parameter values so this so we can use this to determine
1092 	 * the maximum possible bandwidth over this link.
1093 	 *
1094 	 * See USB4 v2 spec 1.0 10.4.4.5.
1095 	 */
1096 	ret = tb_port_read(in, &cap, TB_CFG_PORT,
1097 			   in->cap_adap + DP_LOCAL_CAP, 1);
1098 	if (ret)
1099 		return ret;
1100 
1101 	rate = tb_dp_cap_get_rate_ext(cap);
1102 	lanes = tb_dp_cap_get_lanes(cap);
1103 
1104 	max_bw = tb_dp_bandwidth(rate, lanes);
1105 
1106 	if (max_bw_rounded) {
1107 		ret = usb4_dp_port_granularity(in);
1108 		if (ret < 0)
1109 			return ret;
1110 		*max_bw_rounded = roundup(max_bw, ret);
1111 	}
1112 
1113 	return max_bw;
1114 }
1115 
tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel * tunnel,int * consumed_up,int * consumed_down)1116 static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
1117 						   int *consumed_up,
1118 						   int *consumed_down)
1119 {
1120 	struct tb_port *in = tunnel->src_port;
1121 	int ret, allocated_bw, max_bw_rounded;
1122 
1123 	if (!usb4_dp_port_bandwidth_mode_enabled(in))
1124 		return -EOPNOTSUPP;
1125 
1126 	if (!tunnel->bw_mode)
1127 		return -EOPNOTSUPP;
1128 
1129 	/* Read what was allocated previously if any */
1130 	ret = usb4_dp_port_allocated_bandwidth(in);
1131 	if (ret < 0)
1132 		return ret;
1133 	allocated_bw = ret;
1134 
1135 	ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
1136 	if (ret < 0)
1137 		return ret;
1138 	if (allocated_bw == max_bw_rounded)
1139 		allocated_bw = ret;
1140 
1141 	if (tb_tunnel_direction_downstream(tunnel)) {
1142 		*consumed_up = 0;
1143 		*consumed_down = allocated_bw;
1144 	} else {
1145 		*consumed_up = allocated_bw;
1146 		*consumed_down = 0;
1147 	}
1148 
1149 	return 0;
1150 }
1151 
tb_dp_allocated_bandwidth(struct tb_tunnel * tunnel,int * allocated_up,int * allocated_down)1152 static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
1153 				     int *allocated_down)
1154 {
1155 	struct tb_port *in = tunnel->src_port;
1156 
1157 	/*
1158 	 * If we have already set the allocated bandwidth then use that.
1159 	 * Otherwise we read it from the DPRX.
1160 	 */
1161 	if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
1162 		int ret, allocated_bw, max_bw_rounded;
1163 
1164 		ret = usb4_dp_port_allocated_bandwidth(in);
1165 		if (ret < 0)
1166 			return ret;
1167 		allocated_bw = ret;
1168 
1169 		ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel,
1170 							     &max_bw_rounded);
1171 		if (ret < 0)
1172 			return ret;
1173 		if (allocated_bw == max_bw_rounded)
1174 			allocated_bw = ret;
1175 
1176 		if (tb_tunnel_direction_downstream(tunnel)) {
1177 			*allocated_up = 0;
1178 			*allocated_down = allocated_bw;
1179 		} else {
1180 			*allocated_up = allocated_bw;
1181 			*allocated_down = 0;
1182 		}
1183 		return 0;
1184 	}
1185 
1186 	return tunnel->consumed_bandwidth(tunnel, allocated_up,
1187 					  allocated_down);
1188 }
1189 
tb_dp_alloc_bandwidth(struct tb_tunnel * tunnel,int * alloc_up,int * alloc_down)1190 static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
1191 				 int *alloc_down)
1192 {
1193 	struct tb_port *in = tunnel->src_port;
1194 	int max_bw_rounded, ret, tmp;
1195 
1196 	if (!usb4_dp_port_bandwidth_mode_enabled(in))
1197 		return -EOPNOTSUPP;
1198 
1199 	ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
1200 	if (ret < 0)
1201 		return ret;
1202 
1203 	if (tb_tunnel_direction_downstream(tunnel)) {
1204 		tmp = min(*alloc_down, max_bw_rounded);
1205 		ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1206 		if (ret)
1207 			return ret;
1208 		*alloc_down = tmp;
1209 		*alloc_up = 0;
1210 	} else {
1211 		tmp = min(*alloc_up, max_bw_rounded);
1212 		ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1213 		if (ret)
1214 			return ret;
1215 		*alloc_down = 0;
1216 		*alloc_up = tmp;
1217 	}
1218 
1219 	/* Now we can use BW mode registers to figure out the bandwidth */
1220 	/* TODO: need to handle discovery too */
1221 	tunnel->bw_mode = true;
1222 	return 0;
1223 }
1224 
1225 /* Read cap from tunnel DP IN */
tb_dp_read_cap(struct tb_tunnel * tunnel,unsigned int cap,u32 * rate,u32 * lanes)1226 static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
1227 			  u32 *lanes)
1228 {
1229 	struct tb_port *in = tunnel->src_port;
1230 	u32 val;
1231 	int ret;
1232 
1233 	switch (cap) {
1234 	case DP_LOCAL_CAP:
1235 	case DP_REMOTE_CAP:
1236 	case DP_COMMON_CAP:
1237 		break;
1238 
1239 	default:
1240 		tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap);
1241 		return -EINVAL;
1242 	}
1243 
1244 	/*
1245 	 * Read from the copied remote cap so that we take into account
1246 	 * if capabilities were reduced during exchange.
1247 	 */
1248 	ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1);
1249 	if (ret)
1250 		return ret;
1251 
1252 	*rate = tb_dp_cap_get_rate(val);
1253 	*lanes = tb_dp_cap_get_lanes(val);
1254 	return 0;
1255 }
1256 
tb_dp_maximum_bandwidth(struct tb_tunnel * tunnel,int * max_up,int * max_down)1257 static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
1258 				   int *max_down)
1259 {
1260 	int ret;
1261 
1262 	if (!usb4_dp_port_bandwidth_mode_enabled(tunnel->src_port))
1263 		return -EOPNOTSUPP;
1264 
1265 	ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
1266 	if (ret < 0)
1267 		return ret;
1268 
1269 	if (tb_tunnel_direction_downstream(tunnel)) {
1270 		*max_up = 0;
1271 		*max_down = ret;
1272 	} else {
1273 		*max_up = ret;
1274 		*max_down = 0;
1275 	}
1276 
1277 	return 0;
1278 }
1279 
tb_dp_consumed_bandwidth(struct tb_tunnel * tunnel,int * consumed_up,int * consumed_down)1280 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1281 				    int *consumed_down)
1282 {
1283 	const struct tb_switch *sw = tunnel->src_port->sw;
1284 	u32 rate = 0, lanes = 0;
1285 	int ret;
1286 
1287 	if (tb_dp_is_usb4(sw)) {
1288 		ret = tb_dp_wait_dprx(tunnel, 0);
1289 		if (ret) {
1290 			if (ret == -ETIMEDOUT) {
1291 				/*
1292 				 * While we wait for DPRX complete the
1293 				 * tunnel consumes as much as it had
1294 				 * been reserved initially.
1295 				 */
1296 				ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
1297 						     &rate, &lanes);
1298 				if (ret)
1299 					return ret;
1300 			} else {
1301 				return ret;
1302 			}
1303 		} else {
1304 			/*
1305 			 * On USB4 routers check if the bandwidth allocation
1306 			 * mode is enabled first and then read the bandwidth
1307 			 * through those registers.
1308 			 */
1309 			ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
1310 								      consumed_down);
1311 			if (ret < 0) {
1312 				if (ret != -EOPNOTSUPP)
1313 					return ret;
1314 			} else if (!ret) {
1315 				return 0;
1316 			}
1317 			ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
1318 			if (ret)
1319 				return ret;
1320 		}
1321 	} else if (sw->generation >= 2) {
1322 		ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
1323 		if (ret)
1324 			return ret;
1325 	} else {
1326 		/* No bandwidth management for legacy devices  */
1327 		*consumed_up = 0;
1328 		*consumed_down = 0;
1329 		return 0;
1330 	}
1331 
1332 	if (tb_tunnel_direction_downstream(tunnel)) {
1333 		*consumed_up = 0;
1334 		*consumed_down = tb_dp_bandwidth(rate, lanes);
1335 	} else {
1336 		*consumed_up = tb_dp_bandwidth(rate, lanes);
1337 		*consumed_down = 0;
1338 	}
1339 
1340 	return 0;
1341 }
1342 
tb_dp_init_aux_credits(struct tb_path_hop * hop)1343 static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
1344 {
1345 	struct tb_port *port = hop->in_port;
1346 	struct tb_switch *sw = port->sw;
1347 
1348 	if (tb_port_use_credit_allocation(port))
1349 		hop->initial_credits = sw->min_dp_aux_credits;
1350 	else
1351 		hop->initial_credits = 1;
1352 }
1353 
tb_dp_init_aux_path(struct tb_path * path,bool pm_support)1354 static void tb_dp_init_aux_path(struct tb_path *path, bool pm_support)
1355 {
1356 	struct tb_path_hop *hop;
1357 
1358 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1359 	path->egress_shared_buffer = TB_PATH_NONE;
1360 	path->ingress_fc_enable = TB_PATH_ALL;
1361 	path->ingress_shared_buffer = TB_PATH_NONE;
1362 	path->priority = TB_DP_AUX_PRIORITY;
1363 	path->weight = TB_DP_AUX_WEIGHT;
1364 
1365 	tb_path_for_each_hop(path, hop) {
1366 		tb_dp_init_aux_credits(hop);
1367 		if (pm_support)
1368 			tb_init_pm_support(hop);
1369 	}
1370 }
1371 
tb_dp_init_video_credits(struct tb_path_hop * hop)1372 static int tb_dp_init_video_credits(struct tb_path_hop *hop)
1373 {
1374 	struct tb_port *port = hop->in_port;
1375 	struct tb_switch *sw = port->sw;
1376 
1377 	if (tb_port_use_credit_allocation(port)) {
1378 		unsigned int nfc_credits;
1379 		size_t max_dp_streams;
1380 
1381 		tb_available_credits(port, &max_dp_streams);
1382 		/*
1383 		 * Read the number of currently allocated NFC credits
1384 		 * from the lane adapter. Since we only use them for DP
1385 		 * tunneling we can use that to figure out how many DP
1386 		 * tunnels already go through the lane adapter.
1387 		 */
1388 		nfc_credits = port->config.nfc_credits &
1389 				ADP_CS_4_NFC_BUFFERS_MASK;
1390 		if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
1391 			return -ENOSPC;
1392 
1393 		hop->nfc_credits = sw->min_dp_main_credits;
1394 	} else {
1395 		hop->nfc_credits = min(port->total_credits - 2, 12U);
1396 	}
1397 
1398 	return 0;
1399 }
1400 
tb_dp_init_video_path(struct tb_path * path,bool pm_support)1401 static int tb_dp_init_video_path(struct tb_path *path, bool pm_support)
1402 {
1403 	struct tb_path_hop *hop;
1404 
1405 	path->egress_fc_enable = TB_PATH_NONE;
1406 	path->egress_shared_buffer = TB_PATH_NONE;
1407 	path->ingress_fc_enable = TB_PATH_NONE;
1408 	path->ingress_shared_buffer = TB_PATH_NONE;
1409 	path->priority = TB_DP_VIDEO_PRIORITY;
1410 	path->weight = TB_DP_VIDEO_WEIGHT;
1411 
1412 	tb_path_for_each_hop(path, hop) {
1413 		int ret;
1414 
1415 		ret = tb_dp_init_video_credits(hop);
1416 		if (ret)
1417 			return ret;
1418 		if (pm_support)
1419 			tb_init_pm_support(hop);
1420 	}
1421 
1422 	return 0;
1423 }
1424 
tb_dp_dump(struct tb_tunnel * tunnel)1425 static void tb_dp_dump(struct tb_tunnel *tunnel)
1426 {
1427 	struct tb_port *in, *out;
1428 	u32 dp_cap, rate, lanes;
1429 
1430 	in = tunnel->src_port;
1431 	out = tunnel->dst_port;
1432 
1433 	if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1434 			 in->cap_adap + DP_LOCAL_CAP, 1))
1435 		return;
1436 
1437 	rate = tb_dp_cap_get_rate(dp_cap);
1438 	lanes = tb_dp_cap_get_lanes(dp_cap);
1439 
1440 	tb_tunnel_dbg(tunnel,
1441 		      "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1442 		      rate, lanes, tb_dp_bandwidth(rate, lanes));
1443 
1444 	if (tb_port_read(out, &dp_cap, TB_CFG_PORT,
1445 			 out->cap_adap + DP_LOCAL_CAP, 1))
1446 		return;
1447 
1448 	rate = tb_dp_cap_get_rate(dp_cap);
1449 	lanes = tb_dp_cap_get_lanes(dp_cap);
1450 
1451 	tb_tunnel_dbg(tunnel,
1452 		      "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1453 		      rate, lanes, tb_dp_bandwidth(rate, lanes));
1454 
1455 	if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1456 			 in->cap_adap + DP_REMOTE_CAP, 1))
1457 		return;
1458 
1459 	rate = tb_dp_cap_get_rate(dp_cap);
1460 	lanes = tb_dp_cap_get_lanes(dp_cap);
1461 
1462 	tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
1463 		      rate, lanes, tb_dp_bandwidth(rate, lanes));
1464 }
1465 
1466 /**
1467  * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
1468  * @tb: Pointer to the domain structure
1469  * @in: DP in adapter
1470  * @alloc_hopid: Allocate HopIDs from visited ports
1471  *
1472  * If @in adapter is active, follows the tunnel to the DP out adapter
1473  * and back. Returns the discovered tunnel or %NULL if there was no
1474  * tunnel.
1475  *
1476  * Return: DP tunnel or %NULL if no tunnel found.
1477  */
tb_tunnel_discover_dp(struct tb * tb,struct tb_port * in,bool alloc_hopid)1478 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
1479 					bool alloc_hopid)
1480 {
1481 	struct tb_tunnel *tunnel;
1482 	struct tb_port *port;
1483 	struct tb_path *path;
1484 
1485 	if (!tb_dp_port_is_enabled(in))
1486 		return NULL;
1487 
1488 	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1489 	if (!tunnel)
1490 		return NULL;
1491 
1492 	tunnel->pre_activate = tb_dp_pre_activate;
1493 	tunnel->activate = tb_dp_activate;
1494 	tunnel->post_deactivate = tb_dp_post_deactivate;
1495 	tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1496 	tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1497 	tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1498 	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1499 	tunnel->src_port = in;
1500 
1501 	path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
1502 				&tunnel->dst_port, "Video", alloc_hopid);
1503 	if (!path) {
1504 		/* Just disable the DP IN port */
1505 		tb_dp_port_enable(in, false);
1506 		goto err_free;
1507 	}
1508 	tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
1509 	if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], false))
1510 		goto err_free;
1511 
1512 	path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
1513 				alloc_hopid);
1514 	if (!path)
1515 		goto err_deactivate;
1516 	tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
1517 	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT], false);
1518 
1519 	path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
1520 				&port, "AUX RX", alloc_hopid);
1521 	if (!path)
1522 		goto err_deactivate;
1523 	tunnel->paths[TB_DP_AUX_PATH_IN] = path;
1524 	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN], false);
1525 
1526 	/* Validate that the tunnel is complete */
1527 	if (!tb_port_is_dpout(tunnel->dst_port)) {
1528 		tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
1529 		goto err_deactivate;
1530 	}
1531 
1532 	if (!tb_dp_port_is_enabled(tunnel->dst_port))
1533 		goto err_deactivate;
1534 
1535 	if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
1536 		goto err_deactivate;
1537 
1538 	if (port != tunnel->src_port) {
1539 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1540 		goto err_deactivate;
1541 	}
1542 
1543 	tb_dp_dump(tunnel);
1544 
1545 	tb_tunnel_dbg(tunnel, "discovered\n");
1546 	return tunnel;
1547 
1548 err_deactivate:
1549 	tb_tunnel_deactivate(tunnel);
1550 err_free:
1551 	tb_tunnel_put(tunnel);
1552 
1553 	return NULL;
1554 }
1555 
1556 /**
1557  * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
1558  * @tb: Pointer to the domain structure
1559  * @in: DP in adapter port
1560  * @out: DP out adapter port
1561  * @link_nr: Preferred lane adapter when the link is not bonded
1562  * @max_up: Maximum available upstream bandwidth for the DP tunnel.
1563  *	    %0 if no available bandwidth.
1564  * @max_down: Maximum available downstream bandwidth for the DP tunnel.
1565  *	      %0 if no available bandwidth.
1566  * @callback: Optional callback that is called when the DP tunnel is
1567  *	      fully activated (or there is an error)
1568  * @callback_data: Optional data for @callback
1569  *
1570  * Allocates a tunnel between @in and @out that is capable of tunneling
1571  * Display Port traffic. If @callback is not %NULL it will be called
1572  * after tb_tunnel_activate() once the tunnel has been fully activated.
1573  * It can call tb_tunnel_is_active() to check if activation was
1574  * successful (or if it returns %false there was some sort of issue).
1575  * The @callback is called without @tb->lock held.
1576  *
1577  * Return: Returns a tb_tunnel on success or &NULL on failure.
1578  */
tb_tunnel_alloc_dp(struct tb * tb,struct tb_port * in,struct tb_port * out,int link_nr,int max_up,int max_down,void (* callback)(struct tb_tunnel *,void *),void * callback_data)1579 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
1580 				     struct tb_port *out, int link_nr,
1581 				     int max_up, int max_down,
1582 				     void (*callback)(struct tb_tunnel *, void *),
1583 				     void *callback_data)
1584 {
1585 	struct tb_tunnel *tunnel;
1586 	struct tb_path **paths;
1587 	struct tb_path *path;
1588 	bool pm_support;
1589 
1590 	if (WARN_ON(!in->cap_adap || !out->cap_adap))
1591 		return NULL;
1592 
1593 	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1594 	if (!tunnel)
1595 		return NULL;
1596 
1597 	tunnel->pre_activate = tb_dp_pre_activate;
1598 	tunnel->activate = tb_dp_activate;
1599 	tunnel->post_deactivate = tb_dp_post_deactivate;
1600 	tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1601 	tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1602 	tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1603 	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1604 	tunnel->src_port = in;
1605 	tunnel->dst_port = out;
1606 	tunnel->max_up = max_up;
1607 	tunnel->max_down = max_down;
1608 	tunnel->callback = callback;
1609 	tunnel->callback_data = callback_data;
1610 	INIT_DELAYED_WORK(&tunnel->dprx_work, tb_dp_dprx_work);
1611 
1612 	paths = tunnel->paths;
1613 	pm_support = usb4_switch_version(in->sw) >= 2;
1614 
1615 	path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
1616 			     link_nr, "Video");
1617 	if (!path)
1618 		goto err_free;
1619 	tb_dp_init_video_path(path, pm_support);
1620 	paths[TB_DP_VIDEO_PATH_OUT] = path;
1621 
1622 	path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
1623 			     TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
1624 	if (!path)
1625 		goto err_free;
1626 	tb_dp_init_aux_path(path, pm_support);
1627 	paths[TB_DP_AUX_PATH_OUT] = path;
1628 
1629 	path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
1630 			     TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
1631 	if (!path)
1632 		goto err_free;
1633 	tb_dp_init_aux_path(path, pm_support);
1634 	paths[TB_DP_AUX_PATH_IN] = path;
1635 
1636 	return tunnel;
1637 
1638 err_free:
1639 	tb_tunnel_put(tunnel);
1640 	return NULL;
1641 }
1642 
tb_dma_available_credits(const struct tb_port * port)1643 static unsigned int tb_dma_available_credits(const struct tb_port *port)
1644 {
1645 	const struct tb_switch *sw = port->sw;
1646 	int credits;
1647 
1648 	credits = tb_available_credits(port, NULL);
1649 	if (tb_acpi_may_tunnel_pcie())
1650 		credits -= sw->max_pcie_credits;
1651 	credits -= port->dma_credits;
1652 
1653 	return credits > 0 ? credits : 0;
1654 }
1655 
tb_dma_reserve_credits(struct tb_path_hop * hop,unsigned int credits)1656 static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
1657 {
1658 	struct tb_port *port = hop->in_port;
1659 
1660 	if (tb_port_use_credit_allocation(port)) {
1661 		unsigned int available = tb_dma_available_credits(port);
1662 
1663 		/*
1664 		 * Need to have at least TB_MIN_DMA_CREDITS, otherwise
1665 		 * DMA path cannot be established.
1666 		 */
1667 		if (available < TB_MIN_DMA_CREDITS)
1668 			return -ENOSPC;
1669 
1670 		while (credits > available)
1671 			credits--;
1672 
1673 		tb_port_dbg(port, "reserving %u credits for DMA path\n",
1674 			    credits);
1675 
1676 		port->dma_credits += credits;
1677 	} else {
1678 		if (tb_port_is_null(port))
1679 			credits = port->bonded ? 14 : 6;
1680 		else
1681 			credits = min(port->total_credits, credits);
1682 	}
1683 
1684 	hop->initial_credits = credits;
1685 	return 0;
1686 }
1687 
1688 /* Path from lane adapter to NHI */
tb_dma_init_rx_path(struct tb_path * path,unsigned int credits)1689 static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
1690 {
1691 	struct tb_path_hop *hop;
1692 	unsigned int i, tmp;
1693 
1694 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1695 	path->ingress_fc_enable = TB_PATH_ALL;
1696 	path->egress_shared_buffer = TB_PATH_NONE;
1697 	path->ingress_shared_buffer = TB_PATH_NONE;
1698 	path->priority = TB_DMA_PRIORITY;
1699 	path->weight = TB_DMA_WEIGHT;
1700 	path->clear_fc = true;
1701 
1702 	/*
1703 	 * First lane adapter is the one connected to the remote host.
1704 	 * We don't tunnel other traffic over this link so can use all
1705 	 * the credits (except the ones reserved for control traffic).
1706 	 */
1707 	hop = &path->hops[0];
1708 	tmp = min(tb_usable_credits(hop->in_port), credits);
1709 	hop->initial_credits = tmp;
1710 	hop->in_port->dma_credits += tmp;
1711 
1712 	for (i = 1; i < path->path_length; i++) {
1713 		int ret;
1714 
1715 		ret = tb_dma_reserve_credits(&path->hops[i], credits);
1716 		if (ret)
1717 			return ret;
1718 	}
1719 
1720 	return 0;
1721 }
1722 
1723 /* Path from NHI to lane adapter */
tb_dma_init_tx_path(struct tb_path * path,unsigned int credits)1724 static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
1725 {
1726 	struct tb_path_hop *hop;
1727 
1728 	path->egress_fc_enable = TB_PATH_ALL;
1729 	path->ingress_fc_enable = TB_PATH_ALL;
1730 	path->egress_shared_buffer = TB_PATH_NONE;
1731 	path->ingress_shared_buffer = TB_PATH_NONE;
1732 	path->priority = TB_DMA_PRIORITY;
1733 	path->weight = TB_DMA_WEIGHT;
1734 	path->clear_fc = true;
1735 
1736 	tb_path_for_each_hop(path, hop) {
1737 		int ret;
1738 
1739 		ret = tb_dma_reserve_credits(hop, credits);
1740 		if (ret)
1741 			return ret;
1742 	}
1743 
1744 	return 0;
1745 }
1746 
tb_dma_release_credits(struct tb_path_hop * hop)1747 static void tb_dma_release_credits(struct tb_path_hop *hop)
1748 {
1749 	struct tb_port *port = hop->in_port;
1750 
1751 	if (tb_port_use_credit_allocation(port)) {
1752 		port->dma_credits -= hop->initial_credits;
1753 
1754 		tb_port_dbg(port, "released %u DMA path credits\n",
1755 			    hop->initial_credits);
1756 	}
1757 }
1758 
tb_dma_destroy_path(struct tb_path * path)1759 static void tb_dma_destroy_path(struct tb_path *path)
1760 {
1761 	struct tb_path_hop *hop;
1762 
1763 	tb_path_for_each_hop(path, hop)
1764 		tb_dma_release_credits(hop);
1765 }
1766 
tb_dma_destroy(struct tb_tunnel * tunnel)1767 static void tb_dma_destroy(struct tb_tunnel *tunnel)
1768 {
1769 	int i;
1770 
1771 	for (i = 0; i < tunnel->npaths; i++) {
1772 		if (!tunnel->paths[i])
1773 			continue;
1774 		tb_dma_destroy_path(tunnel->paths[i]);
1775 	}
1776 }
1777 
1778 /**
1779  * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1780  * @tb: Pointer to the domain structure
1781  * @nhi: Host controller port
1782  * @dst: Destination null port which the other domain is connected to
1783  * @transmit_path: HopID used for transmitting packets
1784  * @transmit_ring: NHI ring number used to send packets towards the
1785  *		   other domain. Set to %-1 if TX path is not needed.
1786  * @receive_path: HopID used for receiving packets
1787  * @receive_ring: NHI ring number used to receive packets from the
1788  *		  other domain. Set to %-1 if RX path is not needed.
1789  *
1790  * Return: Returns a tb_tunnel on success or NULL on failure.
1791  */
tb_tunnel_alloc_dma(struct tb * tb,struct tb_port * nhi,struct tb_port * dst,int transmit_path,int transmit_ring,int receive_path,int receive_ring)1792 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
1793 				      struct tb_port *dst, int transmit_path,
1794 				      int transmit_ring, int receive_path,
1795 				      int receive_ring)
1796 {
1797 	struct tb_tunnel *tunnel;
1798 	size_t npaths = 0, i = 0;
1799 	struct tb_path *path;
1800 	int credits;
1801 
1802 	/* Ring 0 is reserved for control channel */
1803 	if (WARN_ON(!receive_ring || !transmit_ring))
1804 		return NULL;
1805 
1806 	if (receive_ring > 0)
1807 		npaths++;
1808 	if (transmit_ring > 0)
1809 		npaths++;
1810 
1811 	if (WARN_ON(!npaths))
1812 		return NULL;
1813 
1814 	tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
1815 	if (!tunnel)
1816 		return NULL;
1817 
1818 	tunnel->src_port = nhi;
1819 	tunnel->dst_port = dst;
1820 	tunnel->destroy = tb_dma_destroy;
1821 
1822 	credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
1823 
1824 	if (receive_ring > 0) {
1825 		path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
1826 				     "DMA RX");
1827 		if (!path)
1828 			goto err_free;
1829 		tunnel->paths[i++] = path;
1830 		if (tb_dma_init_rx_path(path, credits)) {
1831 			tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
1832 			goto err_free;
1833 		}
1834 	}
1835 
1836 	if (transmit_ring > 0) {
1837 		path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
1838 				     "DMA TX");
1839 		if (!path)
1840 			goto err_free;
1841 		tunnel->paths[i++] = path;
1842 		if (tb_dma_init_tx_path(path, credits)) {
1843 			tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
1844 			goto err_free;
1845 		}
1846 	}
1847 
1848 	return tunnel;
1849 
1850 err_free:
1851 	tb_tunnel_put(tunnel);
1852 	return NULL;
1853 }
1854 
1855 /**
1856  * tb_tunnel_match_dma() - Match DMA tunnel
1857  * @tunnel: Tunnel to match
1858  * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1859  * @transmit_ring: NHI ring number used to send packets towards the
1860  *		   other domain. Pass %-1 to ignore.
1861  * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1862  * @receive_ring: NHI ring number used to receive packets from the
1863  *		  other domain. Pass %-1 to ignore.
1864  *
1865  * This function can be used to match specific DMA tunnel, if there are
1866  * multiple DMA tunnels going through the same XDomain connection.
1867  * Returns true if there is match and false otherwise.
1868  */
tb_tunnel_match_dma(const struct tb_tunnel * tunnel,int transmit_path,int transmit_ring,int receive_path,int receive_ring)1869 bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1870 			 int transmit_ring, int receive_path, int receive_ring)
1871 {
1872 	const struct tb_path *tx_path = NULL, *rx_path = NULL;
1873 	int i;
1874 
1875 	if (!receive_ring || !transmit_ring)
1876 		return false;
1877 
1878 	for (i = 0; i < tunnel->npaths; i++) {
1879 		const struct tb_path *path = tunnel->paths[i];
1880 
1881 		if (!path)
1882 			continue;
1883 
1884 		if (tb_port_is_nhi(path->hops[0].in_port))
1885 			tx_path = path;
1886 		else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1887 			rx_path = path;
1888 	}
1889 
1890 	if (transmit_ring > 0 || transmit_path > 0) {
1891 		if (!tx_path)
1892 			return false;
1893 		if (transmit_ring > 0 &&
1894 		    (tx_path->hops[0].in_hop_index != transmit_ring))
1895 			return false;
1896 		if (transmit_path > 0 &&
1897 		    (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1898 			return false;
1899 	}
1900 
1901 	if (receive_ring > 0 || receive_path > 0) {
1902 		if (!rx_path)
1903 			return false;
1904 		if (receive_path > 0 &&
1905 		    (rx_path->hops[0].in_hop_index != receive_path))
1906 			return false;
1907 		if (receive_ring > 0 &&
1908 		    (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1909 			return false;
1910 	}
1911 
1912 	return true;
1913 }
1914 
tb_usb3_max_link_rate(struct tb_port * up,struct tb_port * down)1915 static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
1916 {
1917 	int ret, up_max_rate, down_max_rate;
1918 
1919 	ret = usb4_usb3_port_max_link_rate(up);
1920 	if (ret < 0)
1921 		return ret;
1922 	up_max_rate = ret;
1923 
1924 	ret = usb4_usb3_port_max_link_rate(down);
1925 	if (ret < 0)
1926 		return ret;
1927 	down_max_rate = ret;
1928 
1929 	return min(up_max_rate, down_max_rate);
1930 }
1931 
tb_usb3_pre_activate(struct tb_tunnel * tunnel)1932 static int tb_usb3_pre_activate(struct tb_tunnel *tunnel)
1933 {
1934 	tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
1935 		      tunnel->allocated_up, tunnel->allocated_down);
1936 
1937 	return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
1938 						 &tunnel->allocated_up,
1939 						 &tunnel->allocated_down);
1940 }
1941 
tb_usb3_activate(struct tb_tunnel * tunnel,bool activate)1942 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
1943 {
1944 	int res;
1945 
1946 	res = tb_usb3_port_enable(tunnel->src_port, activate);
1947 	if (res)
1948 		return res;
1949 
1950 	if (tb_port_is_usb3_up(tunnel->dst_port))
1951 		return tb_usb3_port_enable(tunnel->dst_port, activate);
1952 
1953 	return 0;
1954 }
1955 
tb_usb3_consumed_bandwidth(struct tb_tunnel * tunnel,int * consumed_up,int * consumed_down)1956 static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
1957 		int *consumed_up, int *consumed_down)
1958 {
1959 	struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
1960 	int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0;
1961 
1962 	/*
1963 	 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
1964 	 * take that it into account here.
1965 	 */
1966 	*consumed_up = tunnel->allocated_up *
1967 		(TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
1968 	*consumed_down = tunnel->allocated_down *
1969 		(TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
1970 
1971 	if (tb_port_get_link_generation(port) >= 4) {
1972 		*consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH);
1973 		*consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH);
1974 	}
1975 
1976 	return 0;
1977 }
1978 
tb_usb3_release_unused_bandwidth(struct tb_tunnel * tunnel)1979 static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
1980 {
1981 	int ret;
1982 
1983 	ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
1984 					       &tunnel->allocated_up,
1985 					       &tunnel->allocated_down);
1986 	if (ret)
1987 		return ret;
1988 
1989 	tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
1990 		      tunnel->allocated_up, tunnel->allocated_down);
1991 	return 0;
1992 }
1993 
tb_usb3_reclaim_available_bandwidth(struct tb_tunnel * tunnel,int * available_up,int * available_down)1994 static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1995 						int *available_up,
1996 						int *available_down)
1997 {
1998 	int ret, max_rate, allocate_up, allocate_down;
1999 
2000 	ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
2001 	if (ret < 0) {
2002 		tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
2003 		return;
2004 	}
2005 
2006 	/*
2007 	 * 90% of the max rate can be allocated for isochronous
2008 	 * transfers.
2009 	 */
2010 	max_rate = ret * 90 / 100;
2011 
2012 	/* No need to reclaim if already at maximum */
2013 	if (tunnel->allocated_up >= max_rate &&
2014 	    tunnel->allocated_down >= max_rate)
2015 		return;
2016 
2017 	/* Don't go lower than what is already allocated */
2018 	allocate_up = min(max_rate, *available_up);
2019 	if (allocate_up < tunnel->allocated_up)
2020 		allocate_up = tunnel->allocated_up;
2021 
2022 	allocate_down = min(max_rate, *available_down);
2023 	if (allocate_down < tunnel->allocated_down)
2024 		allocate_down = tunnel->allocated_down;
2025 
2026 	/* If no changes no need to do more */
2027 	if (allocate_up == tunnel->allocated_up &&
2028 	    allocate_down == tunnel->allocated_down)
2029 		return;
2030 
2031 	ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
2032 						&allocate_down);
2033 	if (ret) {
2034 		tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
2035 		return;
2036 	}
2037 
2038 	tunnel->allocated_up = allocate_up;
2039 	*available_up -= tunnel->allocated_up;
2040 
2041 	tunnel->allocated_down = allocate_down;
2042 	*available_down -= tunnel->allocated_down;
2043 
2044 	tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
2045 		      tunnel->allocated_up, tunnel->allocated_down);
2046 }
2047 
tb_usb3_init_credits(struct tb_path_hop * hop)2048 static void tb_usb3_init_credits(struct tb_path_hop *hop)
2049 {
2050 	struct tb_port *port = hop->in_port;
2051 	struct tb_switch *sw = port->sw;
2052 	unsigned int credits;
2053 
2054 	if (tb_port_use_credit_allocation(port)) {
2055 		credits = sw->max_usb3_credits;
2056 	} else {
2057 		if (tb_port_is_null(port))
2058 			credits = port->bonded ? 32 : 16;
2059 		else
2060 			credits = 7;
2061 	}
2062 
2063 	hop->initial_credits = credits;
2064 }
2065 
tb_usb3_init_path(struct tb_path * path)2066 static void tb_usb3_init_path(struct tb_path *path)
2067 {
2068 	struct tb_path_hop *hop;
2069 
2070 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
2071 	path->egress_shared_buffer = TB_PATH_NONE;
2072 	path->ingress_fc_enable = TB_PATH_ALL;
2073 	path->ingress_shared_buffer = TB_PATH_NONE;
2074 	path->priority = TB_USB3_PRIORITY;
2075 	path->weight = TB_USB3_WEIGHT;
2076 	path->drop_packages = 0;
2077 
2078 	tb_path_for_each_hop(path, hop)
2079 		tb_usb3_init_credits(hop);
2080 }
2081 
2082 /**
2083  * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
2084  * @tb: Pointer to the domain structure
2085  * @down: USB3 downstream adapter
2086  * @alloc_hopid: Allocate HopIDs from visited ports
2087  *
2088  * If @down adapter is active, follows the tunnel to the USB3 upstream
2089  * adapter and back. Returns the discovered tunnel or %NULL if there was
2090  * no tunnel.
2091  */
tb_tunnel_discover_usb3(struct tb * tb,struct tb_port * down,bool alloc_hopid)2092 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
2093 					  bool alloc_hopid)
2094 {
2095 	struct tb_tunnel *tunnel;
2096 	struct tb_path *path;
2097 
2098 	if (!tb_usb3_port_is_enabled(down))
2099 		return NULL;
2100 
2101 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
2102 	if (!tunnel)
2103 		return NULL;
2104 
2105 	tunnel->activate = tb_usb3_activate;
2106 	tunnel->src_port = down;
2107 
2108 	/*
2109 	 * Discover both paths even if they are not complete. We will
2110 	 * clean them up by calling tb_tunnel_deactivate() below in that
2111 	 * case.
2112 	 */
2113 	path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
2114 				&tunnel->dst_port, "USB3 Down", alloc_hopid);
2115 	if (!path) {
2116 		/* Just disable the downstream port */
2117 		tb_usb3_port_enable(down, false);
2118 		goto err_free;
2119 	}
2120 	tunnel->paths[TB_USB3_PATH_DOWN] = path;
2121 	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
2122 
2123 	path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
2124 				"USB3 Up", alloc_hopid);
2125 	if (!path)
2126 		goto err_deactivate;
2127 	tunnel->paths[TB_USB3_PATH_UP] = path;
2128 	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
2129 
2130 	/* Validate that the tunnel is complete */
2131 	if (!tb_port_is_usb3_up(tunnel->dst_port)) {
2132 		tb_port_warn(tunnel->dst_port,
2133 			     "path does not end on an USB3 adapter, cleaning up\n");
2134 		goto err_deactivate;
2135 	}
2136 
2137 	if (down != tunnel->src_port) {
2138 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
2139 		goto err_deactivate;
2140 	}
2141 
2142 	if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
2143 		tb_tunnel_warn(tunnel,
2144 			       "tunnel is not fully activated, cleaning up\n");
2145 		goto err_deactivate;
2146 	}
2147 
2148 	if (!tb_route(down->sw)) {
2149 		int ret;
2150 
2151 		/*
2152 		 * Read the initial bandwidth allocation for the first
2153 		 * hop tunnel.
2154 		 */
2155 		ret = usb4_usb3_port_allocated_bandwidth(down,
2156 			&tunnel->allocated_up, &tunnel->allocated_down);
2157 		if (ret)
2158 			goto err_deactivate;
2159 
2160 		tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
2161 			      tunnel->allocated_up, tunnel->allocated_down);
2162 
2163 		tunnel->pre_activate = tb_usb3_pre_activate;
2164 		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2165 		tunnel->release_unused_bandwidth =
2166 			tb_usb3_release_unused_bandwidth;
2167 		tunnel->reclaim_available_bandwidth =
2168 			tb_usb3_reclaim_available_bandwidth;
2169 	}
2170 
2171 	tb_tunnel_dbg(tunnel, "discovered\n");
2172 	return tunnel;
2173 
2174 err_deactivate:
2175 	tb_tunnel_deactivate(tunnel);
2176 err_free:
2177 	tb_tunnel_put(tunnel);
2178 
2179 	return NULL;
2180 }
2181 
2182 /**
2183  * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
2184  * @tb: Pointer to the domain structure
2185  * @up: USB3 upstream adapter port
2186  * @down: USB3 downstream adapter port
2187  * @max_up: Maximum available upstream bandwidth for the USB3 tunnel.
2188  *	    %0 if no available bandwidth.
2189  * @max_down: Maximum available downstream bandwidth for the USB3 tunnel.
2190  *	      %0 if no available bandwidth.
2191  *
2192  * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
2193  * @TB_TYPE_USB3_DOWN.
2194  *
2195  * Return: Returns a tb_tunnel on success or %NULL on failure.
2196  */
tb_tunnel_alloc_usb3(struct tb * tb,struct tb_port * up,struct tb_port * down,int max_up,int max_down)2197 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
2198 				       struct tb_port *down, int max_up,
2199 				       int max_down)
2200 {
2201 	struct tb_tunnel *tunnel;
2202 	struct tb_path *path;
2203 	int max_rate = 0;
2204 
2205 	if (!tb_route(down->sw) && (max_up > 0 || max_down > 0)) {
2206 		/*
2207 		 * For USB3 isochronous transfers, we allow bandwidth which is
2208 		 * not higher than 90% of maximum supported bandwidth by USB3
2209 		 * adapters.
2210 		 */
2211 		max_rate = tb_usb3_max_link_rate(down, up);
2212 		if (max_rate < 0)
2213 			return NULL;
2214 
2215 		max_rate = max_rate * 90 / 100;
2216 		tb_port_dbg(up, "maximum required bandwidth for USB3 tunnel %d Mb/s\n",
2217 			    max_rate);
2218 	}
2219 
2220 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
2221 	if (!tunnel)
2222 		return NULL;
2223 
2224 	tunnel->activate = tb_usb3_activate;
2225 	tunnel->src_port = down;
2226 	tunnel->dst_port = up;
2227 	tunnel->max_up = max_up;
2228 	tunnel->max_down = max_down;
2229 
2230 	path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
2231 			     "USB3 Down");
2232 	if (!path) {
2233 		tb_tunnel_put(tunnel);
2234 		return NULL;
2235 	}
2236 	tb_usb3_init_path(path);
2237 	tunnel->paths[TB_USB3_PATH_DOWN] = path;
2238 
2239 	path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
2240 			     "USB3 Up");
2241 	if (!path) {
2242 		tb_tunnel_put(tunnel);
2243 		return NULL;
2244 	}
2245 	tb_usb3_init_path(path);
2246 	tunnel->paths[TB_USB3_PATH_UP] = path;
2247 
2248 	if (!tb_route(down->sw)) {
2249 		tunnel->allocated_up = min(max_rate, max_up);
2250 		tunnel->allocated_down = min(max_rate, max_down);
2251 
2252 		tunnel->pre_activate = tb_usb3_pre_activate;
2253 		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2254 		tunnel->release_unused_bandwidth =
2255 			tb_usb3_release_unused_bandwidth;
2256 		tunnel->reclaim_available_bandwidth =
2257 			tb_usb3_reclaim_available_bandwidth;
2258 	}
2259 
2260 	return tunnel;
2261 }
2262 
2263 /**
2264  * tb_tunnel_is_invalid - check whether an activated path is still valid
2265  * @tunnel: Tunnel to check
2266  */
tb_tunnel_is_invalid(struct tb_tunnel * tunnel)2267 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
2268 {
2269 	int i;
2270 
2271 	for (i = 0; i < tunnel->npaths; i++) {
2272 		WARN_ON(!tunnel->paths[i]->activated);
2273 		if (tb_path_is_invalid(tunnel->paths[i]))
2274 			return true;
2275 	}
2276 
2277 	return false;
2278 }
2279 
2280 /**
2281  * tb_tunnel_activate() - activate a tunnel
2282  * @tunnel: Tunnel to activate
2283  *
2284  * Return: 0 on success and negative errno in case if failure.
2285  * Specifically returns %-EINPROGRESS if the tunnel activation is still
2286  * in progress (that's for DP tunnels to complete DPRX capabilities
2287  * read).
2288  */
tb_tunnel_activate(struct tb_tunnel * tunnel)2289 int tb_tunnel_activate(struct tb_tunnel *tunnel)
2290 {
2291 	int res, i;
2292 
2293 	tb_tunnel_dbg(tunnel, "activating\n");
2294 
2295 	/*
2296 	 * Make sure all paths are properly disabled before enabling
2297 	 * them again.
2298 	 */
2299 	for (i = 0; i < tunnel->npaths; i++) {
2300 		if (tunnel->paths[i]->activated) {
2301 			tb_path_deactivate(tunnel->paths[i]);
2302 			tunnel->paths[i]->activated = false;
2303 		}
2304 	}
2305 
2306 	tunnel->state = TB_TUNNEL_ACTIVATING;
2307 
2308 	if (tunnel->pre_activate) {
2309 		res = tunnel->pre_activate(tunnel);
2310 		if (res)
2311 			return res;
2312 	}
2313 
2314 	for (i = 0; i < tunnel->npaths; i++) {
2315 		res = tb_path_activate(tunnel->paths[i]);
2316 		if (res)
2317 			goto err;
2318 	}
2319 
2320 	if (tunnel->activate) {
2321 		res = tunnel->activate(tunnel, true);
2322 		if (res) {
2323 			if (res == -EINPROGRESS)
2324 				return res;
2325 			goto err;
2326 		}
2327 	}
2328 
2329 	tunnel->state = TB_TUNNEL_ACTIVE;
2330 	return 0;
2331 
2332 err:
2333 	tb_tunnel_warn(tunnel, "activation failed\n");
2334 	tb_tunnel_deactivate(tunnel);
2335 	return res;
2336 }
2337 
2338 /**
2339  * tb_tunnel_deactivate() - deactivate a tunnel
2340  * @tunnel: Tunnel to deactivate
2341  */
tb_tunnel_deactivate(struct tb_tunnel * tunnel)2342 void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
2343 {
2344 	int i;
2345 
2346 	tb_tunnel_dbg(tunnel, "deactivating\n");
2347 
2348 	if (tunnel->activate)
2349 		tunnel->activate(tunnel, false);
2350 
2351 	for (i = 0; i < tunnel->npaths; i++) {
2352 		if (tunnel->paths[i] && tunnel->paths[i]->activated)
2353 			tb_path_deactivate(tunnel->paths[i]);
2354 	}
2355 
2356 	if (tunnel->post_deactivate)
2357 		tunnel->post_deactivate(tunnel);
2358 
2359 	tunnel->state = TB_TUNNEL_INACTIVE;
2360 }
2361 
2362 /**
2363  * tb_tunnel_port_on_path() - Does the tunnel go through port
2364  * @tunnel: Tunnel to check
2365  * @port: Port to check
2366  *
2367  * Returns true if @tunnel goes through @port (direction does not matter),
2368  * false otherwise.
2369  */
tb_tunnel_port_on_path(const struct tb_tunnel * tunnel,const struct tb_port * port)2370 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
2371 			    const struct tb_port *port)
2372 {
2373 	int i;
2374 
2375 	for (i = 0; i < tunnel->npaths; i++) {
2376 		if (!tunnel->paths[i])
2377 			continue;
2378 
2379 		if (tb_path_port_on_path(tunnel->paths[i], port))
2380 			return true;
2381 	}
2382 
2383 	return false;
2384 }
2385 
2386 // Is tb_tunnel_activate() called for the tunnel
tb_tunnel_is_activated(const struct tb_tunnel * tunnel)2387 static bool tb_tunnel_is_activated(const struct tb_tunnel *tunnel)
2388 {
2389 	return tunnel->state == TB_TUNNEL_ACTIVATING || tb_tunnel_is_active(tunnel);
2390 }
2391 
2392 /**
2393  * tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth
2394  * @tunnel: Tunnel to check
2395  * @max_up: Maximum upstream bandwidth in Mb/s
2396  * @max_down: Maximum downstream bandwidth in Mb/s
2397  *
2398  * Returns maximum possible bandwidth this tunnel can go if not limited
2399  * by other bandwidth clients. If the tunnel does not support this
2400  * returns %-EOPNOTSUPP.
2401  */
tb_tunnel_maximum_bandwidth(struct tb_tunnel * tunnel,int * max_up,int * max_down)2402 int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
2403 				int *max_down)
2404 {
2405 	if (!tb_tunnel_is_active(tunnel))
2406 		return -ENOTCONN;
2407 
2408 	if (tunnel->maximum_bandwidth)
2409 		return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
2410 	return -EOPNOTSUPP;
2411 }
2412 
2413 /**
2414  * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel
2415  * @tunnel: Tunnel to check
2416  * @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here
2417  * @allocated_down: Currently allocated downstream bandwidth in Mb/s is
2418  *		    stored here
2419  *
2420  * Returns the bandwidth allocated for the tunnel. This may be higher
2421  * than what the tunnel actually consumes.
2422  */
tb_tunnel_allocated_bandwidth(struct tb_tunnel * tunnel,int * allocated_up,int * allocated_down)2423 int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
2424 				  int *allocated_down)
2425 {
2426 	if (!tb_tunnel_is_active(tunnel))
2427 		return -ENOTCONN;
2428 
2429 	if (tunnel->allocated_bandwidth)
2430 		return tunnel->allocated_bandwidth(tunnel, allocated_up,
2431 						   allocated_down);
2432 	return -EOPNOTSUPP;
2433 }
2434 
2435 /**
2436  * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation
2437  * @tunnel: Tunnel whose bandwidth allocation to change
2438  * @alloc_up: New upstream bandwidth in Mb/s
2439  * @alloc_down: New downstream bandwidth in Mb/s
2440  *
2441  * Tries to change tunnel bandwidth allocation. If succeeds returns %0
2442  * and updates @alloc_up and @alloc_down to that was actually allocated
2443  * (it may not be the same as passed originally). Returns negative errno
2444  * in case of failure.
2445  */
tb_tunnel_alloc_bandwidth(struct tb_tunnel * tunnel,int * alloc_up,int * alloc_down)2446 int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
2447 			      int *alloc_down)
2448 {
2449 	if (!tb_tunnel_is_active(tunnel))
2450 		return -ENOTCONN;
2451 
2452 	if (tunnel->alloc_bandwidth)
2453 		return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
2454 
2455 	return -EOPNOTSUPP;
2456 }
2457 
2458 /**
2459  * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
2460  * @tunnel: Tunnel to check
2461  * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
2462  *		 Can be %NULL.
2463  * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
2464  *		   Can be %NULL.
2465  *
2466  * Stores the amount of isochronous bandwidth @tunnel consumes in
2467  * @consumed_up and @consumed_down. In case of success returns %0,
2468  * negative errno otherwise.
2469  */
tb_tunnel_consumed_bandwidth(struct tb_tunnel * tunnel,int * consumed_up,int * consumed_down)2470 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
2471 				 int *consumed_down)
2472 {
2473 	int up_bw = 0, down_bw = 0;
2474 
2475 	/*
2476 	 * Here we need to distinguish between not active tunnel from
2477 	 * tunnels that are either fully active or activation started.
2478 	 * The latter is true for DP tunnels where we must report the
2479 	 * consumed to be the maximum we gave it until DPRX capabilities
2480 	 * read is done by the graphics driver.
2481 	 */
2482 	if (tb_tunnel_is_activated(tunnel) && tunnel->consumed_bandwidth) {
2483 		int ret;
2484 
2485 		ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
2486 		if (ret)
2487 			return ret;
2488 	}
2489 
2490 	if (consumed_up)
2491 		*consumed_up = up_bw;
2492 	if (consumed_down)
2493 		*consumed_down = down_bw;
2494 
2495 	tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw, down_bw);
2496 	return 0;
2497 }
2498 
2499 /**
2500  * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
2501  * @tunnel: Tunnel whose unused bandwidth to release
2502  *
2503  * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
2504  * moment) this function makes it to release all the unused bandwidth.
2505  *
2506  * Returns %0 in case of success and negative errno otherwise.
2507  */
tb_tunnel_release_unused_bandwidth(struct tb_tunnel * tunnel)2508 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
2509 {
2510 	if (!tb_tunnel_is_active(tunnel))
2511 		return -ENOTCONN;
2512 
2513 	if (tunnel->release_unused_bandwidth) {
2514 		int ret;
2515 
2516 		ret = tunnel->release_unused_bandwidth(tunnel);
2517 		if (ret)
2518 			return ret;
2519 	}
2520 
2521 	return 0;
2522 }
2523 
2524 /**
2525  * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
2526  * @tunnel: Tunnel reclaiming available bandwidth
2527  * @available_up: Available upstream bandwidth (in Mb/s)
2528  * @available_down: Available downstream bandwidth (in Mb/s)
2529  *
2530  * Reclaims bandwidth from @available_up and @available_down and updates
2531  * the variables accordingly (e.g decreases both according to what was
2532  * reclaimed by the tunnel). If nothing was reclaimed the values are
2533  * kept as is.
2534  */
tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel * tunnel,int * available_up,int * available_down)2535 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
2536 					   int *available_up,
2537 					   int *available_down)
2538 {
2539 	if (!tb_tunnel_is_active(tunnel))
2540 		return;
2541 
2542 	if (tunnel->reclaim_available_bandwidth)
2543 		tunnel->reclaim_available_bandwidth(tunnel, available_up,
2544 						    available_down);
2545 }
2546 
tb_tunnel_type_name(const struct tb_tunnel * tunnel)2547 const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel)
2548 {
2549 	return tb_tunnel_names[tunnel->type];
2550 }
2551