xref: /linux/drivers/thunderbolt/tunnel.c (revision 36ec807b627b4c0a0a382f0ae48eac7187d14b2b)
11752b9f7SMika Westerberg // SPDX-License-Identifier: GPL-2.0
21752b9f7SMika Westerberg /*
393f36adeSMika Westerberg  * Thunderbolt driver - Tunneling support
41752b9f7SMika Westerberg  *
51752b9f7SMika Westerberg  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
693f36adeSMika Westerberg  * Copyright (C) 2019, Intel Corporation
71752b9f7SMika Westerberg  */
81752b9f7SMika Westerberg 
9de718ac7SMika Westerberg #include <linux/delay.h>
101752b9f7SMika Westerberg #include <linux/slab.h>
111752b9f7SMika Westerberg #include <linux/list.h>
126ce35635SMika Westerberg #include <linux/ktime.h>
136e19d48eSGil Fine #include <linux/string_helpers.h>
141752b9f7SMika Westerberg 
151752b9f7SMika Westerberg #include "tunnel.h"
161752b9f7SMika Westerberg #include "tb.h"
171752b9f7SMika Westerberg 
188c7acaafSMika Westerberg /* PCIe adapters use always HopID of 8 for both directions */
198c7acaafSMika Westerberg #define TB_PCI_HOPID			8
208c7acaafSMika Westerberg 
2193f36adeSMika Westerberg #define TB_PCI_PATH_DOWN		0
2293f36adeSMika Westerberg #define TB_PCI_PATH_UP			1
2393f36adeSMika Westerberg 
24f73edddfSMika Westerberg #define TB_PCI_PRIORITY			3
25f73edddfSMika Westerberg #define TB_PCI_WEIGHT			1
26f73edddfSMika Westerberg 
27e6f81858SRajmohan Mani /* USB3 adapters use always HopID of 8 for both directions */
28e6f81858SRajmohan Mani #define TB_USB3_HOPID			8
29e6f81858SRajmohan Mani 
30e6f81858SRajmohan Mani #define TB_USB3_PATH_DOWN		0
31e6f81858SRajmohan Mani #define TB_USB3_PATH_UP			1
32e6f81858SRajmohan Mani 
33f73edddfSMika Westerberg #define TB_USB3_PRIORITY		3
34582e70b0SGil Fine #define TB_USB3_WEIGHT			2
35f73edddfSMika Westerberg 
364f807e47SMika Westerberg /* DP adapters use HopID 8 for AUX and 9 for Video */
374f807e47SMika Westerberg #define TB_DP_AUX_TX_HOPID		8
384f807e47SMika Westerberg #define TB_DP_AUX_RX_HOPID		8
394f807e47SMika Westerberg #define TB_DP_VIDEO_HOPID		9
404f807e47SMika Westerberg 
414f807e47SMika Westerberg #define TB_DP_VIDEO_PATH_OUT		0
424f807e47SMika Westerberg #define TB_DP_AUX_PATH_OUT		1
434f807e47SMika Westerberg #define TB_DP_AUX_PATH_IN		2
444f807e47SMika Westerberg 
45f73edddfSMika Westerberg #define TB_DP_VIDEO_PRIORITY		1
46f73edddfSMika Westerberg #define TB_DP_VIDEO_WEIGHT		1
47f73edddfSMika Westerberg 
48f73edddfSMika Westerberg #define TB_DP_AUX_PRIORITY		2
49f73edddfSMika Westerberg #define TB_DP_AUX_WEIGHT		1
50f73edddfSMika Westerberg 
516ed541c5SMika Westerberg /* Minimum number of credits needed for PCIe path */
526ed541c5SMika Westerberg #define TB_MIN_PCIE_CREDITS		6U
536ed541c5SMika Westerberg /*
546ed541c5SMika Westerberg  * Number of credits we try to allocate for each DMA path if not limited
556ed541c5SMika Westerberg  * by the host router baMaxHI.
566ed541c5SMika Westerberg  */
577ee20d0aSMika Westerberg #define TB_DMA_CREDITS			14
586ed541c5SMika Westerberg /* Minimum number of credits for DMA path */
597ee20d0aSMika Westerberg #define TB_MIN_DMA_CREDITS		1
607ee20d0aSMika Westerberg 
61f73edddfSMika Westerberg #define TB_DMA_PRIORITY			5
62f73edddfSMika Westerberg #define TB_DMA_WEIGHT			1
63f73edddfSMika Westerberg 
64582e70b0SGil Fine /*
65582e70b0SGil Fine  * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic
66582e70b0SGil Fine  * according to USB4 v2 Connection Manager guide. This ends up reserving
67582e70b0SGil Fine  * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into
68582e70b0SGil Fine  * account.
69582e70b0SGil Fine  */
70582e70b0SGil Fine #define USB4_V2_PCI_MIN_BANDWIDTH	(1500 * TB_PCI_WEIGHT)
71582e70b0SGil Fine #define USB4_V2_USB3_MIN_BANDWIDTH	(1500 * TB_USB3_WEIGHT)
72582e70b0SGil Fine 
737ee20d0aSMika Westerberg static unsigned int dma_credits = TB_DMA_CREDITS;
747ee20d0aSMika Westerberg module_param(dma_credits, uint, 0444);
757ee20d0aSMika Westerberg MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
767ee20d0aSMika Westerberg                 __MODULE_STRING(TB_DMA_CREDITS) ")");
776ed541c5SMika Westerberg 
786ce35635SMika Westerberg static bool bw_alloc_mode = true;
796ce35635SMika Westerberg module_param(bw_alloc_mode, bool, 0444);
806ce35635SMika Westerberg MODULE_PARM_DESC(bw_alloc_mode,
816ce35635SMika Westerberg 		 "enable bandwidth allocation mode if supported (default: true)");
826ce35635SMika Westerberg 
83e6f81858SRajmohan Mani static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
844f807e47SMika Westerberg 
856ed541c5SMika Westerberg static inline unsigned int tb_usable_credits(const struct tb_port *port)
866ed541c5SMika Westerberg {
876ed541c5SMika Westerberg 	return port->total_credits - port->ctl_credits;
886ed541c5SMika Westerberg }
896ed541c5SMika Westerberg 
906ed541c5SMika Westerberg /**
916ed541c5SMika Westerberg  * tb_available_credits() - Available credits for PCIe and DMA
926ed541c5SMika Westerberg  * @port: Lane adapter to check
936ed541c5SMika Westerberg  * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
946ed541c5SMika Westerberg  *		    streams possible through this lane adapter
956ed541c5SMika Westerberg  */
966ed541c5SMika Westerberg static unsigned int tb_available_credits(const struct tb_port *port,
976ed541c5SMika Westerberg 					 size_t *max_dp_streams)
986ed541c5SMika Westerberg {
996ed541c5SMika Westerberg 	const struct tb_switch *sw = port->sw;
1006ed541c5SMika Westerberg 	int credits, usb3, pcie, spare;
1016ed541c5SMika Westerberg 	size_t ndp;
1026ed541c5SMika Westerberg 
1036ed541c5SMika Westerberg 	usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
1046ed541c5SMika Westerberg 	pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
1056ed541c5SMika Westerberg 
1066ed541c5SMika Westerberg 	if (tb_acpi_is_xdomain_allowed()) {
1077ee20d0aSMika Westerberg 		spare = min_not_zero(sw->max_dma_credits, dma_credits);
1086ed541c5SMika Westerberg 		/* Add some credits for potential second DMA tunnel */
1096ed541c5SMika Westerberg 		spare += TB_MIN_DMA_CREDITS;
1106ed541c5SMika Westerberg 	} else {
1116ed541c5SMika Westerberg 		spare = 0;
1126ed541c5SMika Westerberg 	}
1136ed541c5SMika Westerberg 
1146ed541c5SMika Westerberg 	credits = tb_usable_credits(port);
1156ed541c5SMika Westerberg 	if (tb_acpi_may_tunnel_dp()) {
1166ed541c5SMika Westerberg 		/*
1176ed541c5SMika Westerberg 		 * Maximum number of DP streams possible through the
1186ed541c5SMika Westerberg 		 * lane adapter.
1196ed541c5SMika Westerberg 		 */
12093bf344fSGil Fine 		if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
1216ed541c5SMika Westerberg 			ndp = (credits - (usb3 + pcie + spare)) /
1226ed541c5SMika Westerberg 			      (sw->min_dp_aux_credits + sw->min_dp_main_credits);
12393bf344fSGil Fine 		else
12493bf344fSGil Fine 			ndp = 0;
1256ed541c5SMika Westerberg 	} else {
1266ed541c5SMika Westerberg 		ndp = 0;
1276ed541c5SMika Westerberg 	}
1286ed541c5SMika Westerberg 	credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
1296ed541c5SMika Westerberg 	credits -= usb3;
1306ed541c5SMika Westerberg 
1316ed541c5SMika Westerberg 	if (max_dp_streams)
1326ed541c5SMika Westerberg 		*max_dp_streams = ndp;
1336ed541c5SMika Westerberg 
1346ed541c5SMika Westerberg 	return credits > 0 ? credits : 0;
1356ed541c5SMika Westerberg }
1366ed541c5SMika Westerberg 
137ce91d793SMika Westerberg static void tb_init_pm_support(struct tb_path_hop *hop)
138ce91d793SMika Westerberg {
139ce91d793SMika Westerberg 	struct tb_port *out_port = hop->out_port;
140ce91d793SMika Westerberg 	struct tb_port *in_port = hop->in_port;
141ce91d793SMika Westerberg 
142ce91d793SMika Westerberg 	if (tb_port_is_null(in_port) && tb_port_is_null(out_port) &&
143ce91d793SMika Westerberg 	    usb4_switch_version(in_port->sw) >= 2)
144ce91d793SMika Westerberg 		hop->pm_support = true;
145ce91d793SMika Westerberg }
146ce91d793SMika Westerberg 
1474f807e47SMika Westerberg static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
1484f807e47SMika Westerberg 					 enum tb_tunnel_type type)
14993f36adeSMika Westerberg {
15093f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
15193f36adeSMika Westerberg 
15293f36adeSMika Westerberg 	tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
15393f36adeSMika Westerberg 	if (!tunnel)
15493f36adeSMika Westerberg 		return NULL;
15593f36adeSMika Westerberg 
15693f36adeSMika Westerberg 	tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
15793f36adeSMika Westerberg 	if (!tunnel->paths) {
15893f36adeSMika Westerberg 		tb_tunnel_free(tunnel);
15993f36adeSMika Westerberg 		return NULL;
16093f36adeSMika Westerberg 	}
16193f36adeSMika Westerberg 
16293f36adeSMika Westerberg 	INIT_LIST_HEAD(&tunnel->list);
16393f36adeSMika Westerberg 	tunnel->tb = tb;
16493f36adeSMika Westerberg 	tunnel->npaths = npaths;
1654f807e47SMika Westerberg 	tunnel->type = type;
16693f36adeSMika Westerberg 
16793f36adeSMika Westerberg 	return tunnel;
16893f36adeSMika Westerberg }
16993f36adeSMika Westerberg 
1706e19d48eSGil Fine static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
1716e19d48eSGil Fine {
172582e70b0SGil Fine 	struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
1736e19d48eSGil Fine 	int ret;
1746e19d48eSGil Fine 
1756e19d48eSGil Fine 	/* Only supported of both routers are at least USB4 v2 */
1762b3a6239SGil Fine 	if ((usb4_switch_version(tunnel->src_port->sw) < 2) ||
1772b3a6239SGil Fine 	   (usb4_switch_version(tunnel->dst_port->sw) < 2))
1782b3a6239SGil Fine 		return 0;
1792b3a6239SGil Fine 
1802b3a6239SGil Fine 	if (enable && tb_port_get_link_generation(port) < 4)
1816e19d48eSGil Fine 		return 0;
1826e19d48eSGil Fine 
1836e19d48eSGil Fine 	ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
1846e19d48eSGil Fine 	if (ret)
1856e19d48eSGil Fine 		return ret;
1866e19d48eSGil Fine 
1872b3a6239SGil Fine 	/*
1882b3a6239SGil Fine 	 * Downstream router could be unplugged so disable of encapsulation
1892b3a6239SGil Fine 	 * in upstream router is still possible.
1902b3a6239SGil Fine 	 */
1916e19d48eSGil Fine 	ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable);
1922b3a6239SGil Fine 	if (ret) {
1932b3a6239SGil Fine 		if (enable)
1946e19d48eSGil Fine 			return ret;
1952b3a6239SGil Fine 		if (ret != -ENODEV)
1962b3a6239SGil Fine 			return ret;
1972b3a6239SGil Fine 	}
1986e19d48eSGil Fine 
1996e19d48eSGil Fine 	tb_tunnel_dbg(tunnel, "extended encapsulation %s\n",
2006e19d48eSGil Fine 		      str_enabled_disabled(enable));
2016e19d48eSGil Fine 	return 0;
2026e19d48eSGil Fine }
2036e19d48eSGil Fine 
20493f36adeSMika Westerberg static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
20593f36adeSMika Westerberg {
20693f36adeSMika Westerberg 	int res;
20793f36adeSMika Westerberg 
2086e19d48eSGil Fine 	if (activate) {
2096e19d48eSGil Fine 		res = tb_pci_set_ext_encapsulation(tunnel, activate);
2106e19d48eSGil Fine 		if (res)
2116e19d48eSGil Fine 			return res;
2126e19d48eSGil Fine 	}
2136e19d48eSGil Fine 
21454967f41SGil Fine 	if (activate)
21554967f41SGil Fine 		res = tb_pci_port_enable(tunnel->dst_port, activate);
21654967f41SGil Fine 	else
21793f36adeSMika Westerberg 		res = tb_pci_port_enable(tunnel->src_port, activate);
21893f36adeSMika Westerberg 	if (res)
21993f36adeSMika Westerberg 		return res;
22093f36adeSMika Westerberg 
22154967f41SGil Fine 
22254967f41SGil Fine 	if (activate) {
22354967f41SGil Fine 		res = tb_pci_port_enable(tunnel->src_port, activate);
2246e19d48eSGil Fine 		if (res)
2256e19d48eSGil Fine 			return res;
22654967f41SGil Fine 	} else {
22754967f41SGil Fine 		/* Downstream router could be unplugged */
22854967f41SGil Fine 		tb_pci_port_enable(tunnel->dst_port, activate);
2296e19d48eSGil Fine 	}
2300414bec5SMika Westerberg 
2316e19d48eSGil Fine 	return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate);
23293f36adeSMika Westerberg }
23393f36adeSMika Westerberg 
2346ed541c5SMika Westerberg static int tb_pci_init_credits(struct tb_path_hop *hop)
23591c0c120SMika Westerberg {
2366ed541c5SMika Westerberg 	struct tb_port *port = hop->in_port;
2376ed541c5SMika Westerberg 	struct tb_switch *sw = port->sw;
2386ed541c5SMika Westerberg 	unsigned int credits;
2396ed541c5SMika Westerberg 
2406ed541c5SMika Westerberg 	if (tb_port_use_credit_allocation(port)) {
2416ed541c5SMika Westerberg 		unsigned int available;
2426ed541c5SMika Westerberg 
2436ed541c5SMika Westerberg 		available = tb_available_credits(port, NULL);
2446ed541c5SMika Westerberg 		credits = min(sw->max_pcie_credits, available);
2456ed541c5SMika Westerberg 
2466ed541c5SMika Westerberg 		if (credits < TB_MIN_PCIE_CREDITS)
2476ed541c5SMika Westerberg 			return -ENOSPC;
2486ed541c5SMika Westerberg 
2496ed541c5SMika Westerberg 		credits = max(TB_MIN_PCIE_CREDITS, credits);
2506ed541c5SMika Westerberg 	} else {
2516ed541c5SMika Westerberg 		if (tb_port_is_null(port))
2526ed541c5SMika Westerberg 			credits = port->bonded ? 32 : 16;
2536ed541c5SMika Westerberg 		else
2546ed541c5SMika Westerberg 			credits = 7;
25591c0c120SMika Westerberg 	}
25691c0c120SMika Westerberg 
2576ed541c5SMika Westerberg 	hop->initial_credits = credits;
2586ed541c5SMika Westerberg 	return 0;
25991c0c120SMika Westerberg }
26091c0c120SMika Westerberg 
2616ed541c5SMika Westerberg static int tb_pci_init_path(struct tb_path *path)
2621752b9f7SMika Westerberg {
2636ed541c5SMika Westerberg 	struct tb_path_hop *hop;
2646ed541c5SMika Westerberg 
2651752b9f7SMika Westerberg 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
2661752b9f7SMika Westerberg 	path->egress_shared_buffer = TB_PATH_NONE;
2671752b9f7SMika Westerberg 	path->ingress_fc_enable = TB_PATH_ALL;
2681752b9f7SMika Westerberg 	path->ingress_shared_buffer = TB_PATH_NONE;
269f73edddfSMika Westerberg 	path->priority = TB_PCI_PRIORITY;
270f73edddfSMika Westerberg 	path->weight = TB_PCI_WEIGHT;
2711752b9f7SMika Westerberg 	path->drop_packages = 0;
2726ed541c5SMika Westerberg 
2736ed541c5SMika Westerberg 	tb_path_for_each_hop(path, hop) {
2746ed541c5SMika Westerberg 		int ret;
2756ed541c5SMika Westerberg 
2766ed541c5SMika Westerberg 		ret = tb_pci_init_credits(hop);
2776ed541c5SMika Westerberg 		if (ret)
2786ed541c5SMika Westerberg 			return ret;
2796ed541c5SMika Westerberg 	}
2806ed541c5SMika Westerberg 
2816ed541c5SMika Westerberg 	return 0;
2820414bec5SMika Westerberg }
2830414bec5SMika Westerberg 
2840414bec5SMika Westerberg /**
2850414bec5SMika Westerberg  * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
2860414bec5SMika Westerberg  * @tb: Pointer to the domain structure
2870414bec5SMika Westerberg  * @down: PCIe downstream adapter
28843bddb26SMika Westerberg  * @alloc_hopid: Allocate HopIDs from visited ports
2890414bec5SMika Westerberg  *
2900414bec5SMika Westerberg  * If @down adapter is active, follows the tunnel to the PCIe upstream
2910414bec5SMika Westerberg  * adapter and back. Returns the discovered tunnel or %NULL if there was
2920414bec5SMika Westerberg  * no tunnel.
2930414bec5SMika Westerberg  */
29443bddb26SMika Westerberg struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
29543bddb26SMika Westerberg 					 bool alloc_hopid)
2960414bec5SMika Westerberg {
2970414bec5SMika Westerberg 	struct tb_tunnel *tunnel;
2980414bec5SMika Westerberg 	struct tb_path *path;
2990414bec5SMika Westerberg 
3000414bec5SMika Westerberg 	if (!tb_pci_port_is_enabled(down))
3010414bec5SMika Westerberg 		return NULL;
3020414bec5SMika Westerberg 
3034f807e47SMika Westerberg 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
3040414bec5SMika Westerberg 	if (!tunnel)
3050414bec5SMika Westerberg 		return NULL;
3060414bec5SMika Westerberg 
3070414bec5SMika Westerberg 	tunnel->activate = tb_pci_activate;
3080414bec5SMika Westerberg 	tunnel->src_port = down;
3090414bec5SMika Westerberg 
3100414bec5SMika Westerberg 	/*
3110414bec5SMika Westerberg 	 * Discover both paths even if they are not complete. We will
3120414bec5SMika Westerberg 	 * clean them up by calling tb_tunnel_deactivate() below in that
3130414bec5SMika Westerberg 	 * case.
3140414bec5SMika Westerberg 	 */
3150414bec5SMika Westerberg 	path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
31643bddb26SMika Westerberg 				&tunnel->dst_port, "PCIe Up", alloc_hopid);
3170414bec5SMika Westerberg 	if (!path) {
3180414bec5SMika Westerberg 		/* Just disable the downstream port */
3190414bec5SMika Westerberg 		tb_pci_port_enable(down, false);
3200414bec5SMika Westerberg 		goto err_free;
3210414bec5SMika Westerberg 	}
3220414bec5SMika Westerberg 	tunnel->paths[TB_PCI_PATH_UP] = path;
3236ed541c5SMika Westerberg 	if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
3246ed541c5SMika Westerberg 		goto err_free;
3250414bec5SMika Westerberg 
3260414bec5SMika Westerberg 	path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
32743bddb26SMika Westerberg 				"PCIe Down", alloc_hopid);
3280414bec5SMika Westerberg 	if (!path)
3290414bec5SMika Westerberg 		goto err_deactivate;
3300414bec5SMika Westerberg 	tunnel->paths[TB_PCI_PATH_DOWN] = path;
3316ed541c5SMika Westerberg 	if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
3326ed541c5SMika Westerberg 		goto err_deactivate;
3330414bec5SMika Westerberg 
3340414bec5SMika Westerberg 	/* Validate that the tunnel is complete */
3350414bec5SMika Westerberg 	if (!tb_port_is_pcie_up(tunnel->dst_port)) {
3360414bec5SMika Westerberg 		tb_port_warn(tunnel->dst_port,
3370414bec5SMika Westerberg 			     "path does not end on a PCIe adapter, cleaning up\n");
3380414bec5SMika Westerberg 		goto err_deactivate;
3390414bec5SMika Westerberg 	}
3400414bec5SMika Westerberg 
3410414bec5SMika Westerberg 	if (down != tunnel->src_port) {
3420414bec5SMika Westerberg 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
3430414bec5SMika Westerberg 		goto err_deactivate;
3440414bec5SMika Westerberg 	}
3450414bec5SMika Westerberg 
3460414bec5SMika Westerberg 	if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
3470414bec5SMika Westerberg 		tb_tunnel_warn(tunnel,
3480414bec5SMika Westerberg 			       "tunnel is not fully activated, cleaning up\n");
3490414bec5SMika Westerberg 		goto err_deactivate;
3500414bec5SMika Westerberg 	}
3510414bec5SMika Westerberg 
3520414bec5SMika Westerberg 	tb_tunnel_dbg(tunnel, "discovered\n");
3530414bec5SMika Westerberg 	return tunnel;
3540414bec5SMika Westerberg 
3550414bec5SMika Westerberg err_deactivate:
3560414bec5SMika Westerberg 	tb_tunnel_deactivate(tunnel);
3570414bec5SMika Westerberg err_free:
3580414bec5SMika Westerberg 	tb_tunnel_free(tunnel);
3590414bec5SMika Westerberg 
3600414bec5SMika Westerberg 	return NULL;
3611752b9f7SMika Westerberg }
3621752b9f7SMika Westerberg 
3631752b9f7SMika Westerberg /**
36493f36adeSMika Westerberg  * tb_tunnel_alloc_pci() - allocate a pci tunnel
36593f36adeSMika Westerberg  * @tb: Pointer to the domain structure
36693f36adeSMika Westerberg  * @up: PCIe upstream adapter port
36793f36adeSMika Westerberg  * @down: PCIe downstream adapter port
3681752b9f7SMika Westerberg  *
3691752b9f7SMika Westerberg  * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
3701752b9f7SMika Westerberg  * TB_TYPE_PCIE_DOWN.
3711752b9f7SMika Westerberg  *
37293f36adeSMika Westerberg  * Return: Returns a tb_tunnel on success or NULL on failure.
3731752b9f7SMika Westerberg  */
37493f36adeSMika Westerberg struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
3751752b9f7SMika Westerberg 				      struct tb_port *down)
3761752b9f7SMika Westerberg {
37793f36adeSMika Westerberg 	struct tb_tunnel *tunnel;
3788c7acaafSMika Westerberg 	struct tb_path *path;
37993f36adeSMika Westerberg 
3804f807e47SMika Westerberg 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
3811752b9f7SMika Westerberg 	if (!tunnel)
3821752b9f7SMika Westerberg 		return NULL;
38393f36adeSMika Westerberg 
38493f36adeSMika Westerberg 	tunnel->activate = tb_pci_activate;
38593f36adeSMika Westerberg 	tunnel->src_port = down;
38693f36adeSMika Westerberg 	tunnel->dst_port = up;
38793f36adeSMika Westerberg 
3888c7acaafSMika Westerberg 	path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
3898c7acaafSMika Westerberg 			     "PCIe Down");
3906ed541c5SMika Westerberg 	if (!path)
3916ed541c5SMika Westerberg 		goto err_free;
392ce19f91eSMika Westerberg 	tunnel->paths[TB_PCI_PATH_DOWN] = path;
3936ed541c5SMika Westerberg 	if (tb_pci_init_path(path))
3946ed541c5SMika Westerberg 		goto err_free;
39593f36adeSMika Westerberg 
3968c7acaafSMika Westerberg 	path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
3978c7acaafSMika Westerberg 			     "PCIe Up");
3986ed541c5SMika Westerberg 	if (!path)
3996ed541c5SMika Westerberg 		goto err_free;
400ce19f91eSMika Westerberg 	tunnel->paths[TB_PCI_PATH_UP] = path;
4016ed541c5SMika Westerberg 	if (tb_pci_init_path(path))
4026ed541c5SMika Westerberg 		goto err_free;
40393f36adeSMika Westerberg 
40493f36adeSMika Westerberg 	return tunnel;
4056ed541c5SMika Westerberg 
4066ed541c5SMika Westerberg err_free:
4076ed541c5SMika Westerberg 	tb_tunnel_free(tunnel);
4086ed541c5SMika Westerberg 	return NULL;
4091752b9f7SMika Westerberg }
4101752b9f7SMika Westerberg 
411582e70b0SGil Fine /**
412582e70b0SGil Fine  * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe
413582e70b0SGil Fine  * @port: Lane 0 adapter
414582e70b0SGil Fine  * @reserved_up: Upstream bandwidth in Mb/s to reserve
415582e70b0SGil Fine  * @reserved_down: Downstream bandwidth in Mb/s to reserve
416582e70b0SGil Fine  *
417582e70b0SGil Fine  * Can be called to any connected lane 0 adapter to find out how much
418582e70b0SGil Fine  * bandwidth needs to be left in reserve for possible PCIe bulk traffic.
419582e70b0SGil Fine  * Returns true if there is something to be reserved and writes the
420582e70b0SGil Fine  * amount to @reserved_down/@reserved_up. Otherwise returns false and
421582e70b0SGil Fine  * does not touch the parameters.
422582e70b0SGil Fine  */
423582e70b0SGil Fine bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
424582e70b0SGil Fine 			    int *reserved_down)
425582e70b0SGil Fine {
426582e70b0SGil Fine 	if (WARN_ON_ONCE(!port->remote))
427582e70b0SGil Fine 		return false;
428582e70b0SGil Fine 
429582e70b0SGil Fine 	if (!tb_acpi_may_tunnel_pcie())
430582e70b0SGil Fine 		return false;
431582e70b0SGil Fine 
432582e70b0SGil Fine 	if (tb_port_get_link_generation(port) < 4)
433582e70b0SGil Fine 		return false;
434582e70b0SGil Fine 
435582e70b0SGil Fine 	/* Must have PCIe adapters */
436582e70b0SGil Fine 	if (tb_is_upstream_port(port)) {
437582e70b0SGil Fine 		if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP))
438582e70b0SGil Fine 			return false;
439582e70b0SGil Fine 		if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN))
440582e70b0SGil Fine 			return false;
441582e70b0SGil Fine 	} else {
442582e70b0SGil Fine 		if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN))
443582e70b0SGil Fine 			return false;
444582e70b0SGil Fine 		if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP))
445582e70b0SGil Fine 			return false;
446582e70b0SGil Fine 	}
447582e70b0SGil Fine 
448582e70b0SGil Fine 	*reserved_up = USB4_V2_PCI_MIN_BANDWIDTH;
449582e70b0SGil Fine 	*reserved_down = USB4_V2_PCI_MIN_BANDWIDTH;
450582e70b0SGil Fine 
451582e70b0SGil Fine 	tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up,
452582e70b0SGil Fine 		    *reserved_down);
453582e70b0SGil Fine 	return true;
454582e70b0SGil Fine }
455582e70b0SGil Fine 
456b0407983SMika Westerberg static bool tb_dp_is_usb4(const struct tb_switch *sw)
457b0407983SMika Westerberg {
458b0407983SMika Westerberg 	/* Titan Ridge DP adapters need the same treatment as USB4 */
459b0407983SMika Westerberg 	return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
460b0407983SMika Westerberg }
461b0407983SMika Westerberg 
462fe1a1cf7SMika Westerberg static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
463fe1a1cf7SMika Westerberg 			      int timeout_msec)
464de718ac7SMika Westerberg {
465fe1a1cf7SMika Westerberg 	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
466de718ac7SMika Westerberg 	u32 val;
467de718ac7SMika Westerberg 	int ret;
468de718ac7SMika Westerberg 
469de718ac7SMika Westerberg 	/* Both ends need to support this */
470b0407983SMika Westerberg 	if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
471de718ac7SMika Westerberg 		return 0;
472de718ac7SMika Westerberg 
473de718ac7SMika Westerberg 	ret = tb_port_read(out, &val, TB_CFG_PORT,
474de718ac7SMika Westerberg 			   out->cap_adap + DP_STATUS_CTRL, 1);
475de718ac7SMika Westerberg 	if (ret)
476de718ac7SMika Westerberg 		return ret;
477de718ac7SMika Westerberg 
478de718ac7SMika Westerberg 	val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
479de718ac7SMika Westerberg 
480de718ac7SMika Westerberg 	ret = tb_port_write(out, &val, TB_CFG_PORT,
481de718ac7SMika Westerberg 			    out->cap_adap + DP_STATUS_CTRL, 1);
482de718ac7SMika Westerberg 	if (ret)
483de718ac7SMika Westerberg 		return ret;
484de718ac7SMika Westerberg 
485de718ac7SMika Westerberg 	do {
486de718ac7SMika Westerberg 		ret = tb_port_read(out, &val, TB_CFG_PORT,
487de718ac7SMika Westerberg 				   out->cap_adap + DP_STATUS_CTRL, 1);
488de718ac7SMika Westerberg 		if (ret)
489de718ac7SMika Westerberg 			return ret;
490de718ac7SMika Westerberg 		if (!(val & DP_STATUS_CTRL_CMHS))
491de718ac7SMika Westerberg 			return 0;
492fe1a1cf7SMika Westerberg 		usleep_range(100, 150);
493fe1a1cf7SMika Westerberg 	} while (ktime_before(ktime_get(), timeout));
494de718ac7SMika Westerberg 
495de718ac7SMika Westerberg 	return -ETIMEDOUT;
496de718ac7SMika Westerberg }
497de718ac7SMika Westerberg 
4982d7e0472SMika Westerberg /*
4992d7e0472SMika Westerberg  * Returns maximum possible rate from capability supporting only DP 2.0
5002d7e0472SMika Westerberg  * and below. Used when DP BW allocation mode is not enabled.
5012d7e0472SMika Westerberg  */
502a11b88adSMika Westerberg static inline u32 tb_dp_cap_get_rate(u32 val)
503a11b88adSMika Westerberg {
504a11b88adSMika Westerberg 	u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
505a11b88adSMika Westerberg 
506a11b88adSMika Westerberg 	switch (rate) {
507a11b88adSMika Westerberg 	case DP_COMMON_CAP_RATE_RBR:
508a11b88adSMika Westerberg 		return 1620;
509a11b88adSMika Westerberg 	case DP_COMMON_CAP_RATE_HBR:
510a11b88adSMika Westerberg 		return 2700;
511a11b88adSMika Westerberg 	case DP_COMMON_CAP_RATE_HBR2:
512a11b88adSMika Westerberg 		return 5400;
513a11b88adSMika Westerberg 	case DP_COMMON_CAP_RATE_HBR3:
514a11b88adSMika Westerberg 		return 8100;
515a11b88adSMika Westerberg 	default:
516a11b88adSMika Westerberg 		return 0;
517a11b88adSMika Westerberg 	}
518a11b88adSMika Westerberg }
519a11b88adSMika Westerberg 
5202d7e0472SMika Westerberg /*
5212d7e0472SMika Westerberg  * Returns maximum possible rate from capability supporting DP 2.1
5222d7e0472SMika Westerberg  * UHBR20, 13.5 and 10 rates as well. Use only when DP BW allocation
5232d7e0472SMika Westerberg  * mode is enabled.
5242d7e0472SMika Westerberg  */
5252d7e0472SMika Westerberg static inline u32 tb_dp_cap_get_rate_ext(u32 val)
5262d7e0472SMika Westerberg {
5272d7e0472SMika Westerberg 	if (val & DP_COMMON_CAP_UHBR20)
5282d7e0472SMika Westerberg 		return 20000;
5292d7e0472SMika Westerberg 	else if (val & DP_COMMON_CAP_UHBR13_5)
5302d7e0472SMika Westerberg 		return 13500;
5312d7e0472SMika Westerberg 	else if (val & DP_COMMON_CAP_UHBR10)
5322d7e0472SMika Westerberg 		return 10000;
5332d7e0472SMika Westerberg 
5342d7e0472SMika Westerberg 	return tb_dp_cap_get_rate(val);
5352d7e0472SMika Westerberg }
5362d7e0472SMika Westerberg 
5372d7e0472SMika Westerberg static inline bool tb_dp_is_uhbr_rate(unsigned int rate)
5382d7e0472SMika Westerberg {
5392d7e0472SMika Westerberg 	return rate >= 10000;
5402d7e0472SMika Westerberg }
5412d7e0472SMika Westerberg 
542a11b88adSMika Westerberg static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
543a11b88adSMika Westerberg {
544a11b88adSMika Westerberg 	val &= ~DP_COMMON_CAP_RATE_MASK;
545a11b88adSMika Westerberg 	switch (rate) {
546a11b88adSMika Westerberg 	default:
547a11b88adSMika Westerberg 		WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
548df561f66SGustavo A. R. Silva 		fallthrough;
549a11b88adSMika Westerberg 	case 1620:
550a11b88adSMika Westerberg 		val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
551a11b88adSMika Westerberg 		break;
552a11b88adSMika Westerberg 	case 2700:
553a11b88adSMika Westerberg 		val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
554a11b88adSMika Westerberg 		break;
555a11b88adSMika Westerberg 	case 5400:
556a11b88adSMika Westerberg 		val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
557a11b88adSMika Westerberg 		break;
558a11b88adSMika Westerberg 	case 8100:
559a11b88adSMika Westerberg 		val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
560a11b88adSMika Westerberg 		break;
561a11b88adSMika Westerberg 	}
562a11b88adSMika Westerberg 	return val;
563a11b88adSMika Westerberg }
564a11b88adSMika Westerberg 
565a11b88adSMika Westerberg static inline u32 tb_dp_cap_get_lanes(u32 val)
566a11b88adSMika Westerberg {
567a11b88adSMika Westerberg 	u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
568a11b88adSMika Westerberg 
569a11b88adSMika Westerberg 	switch (lanes) {
570a11b88adSMika Westerberg 	case DP_COMMON_CAP_1_LANE:
571a11b88adSMika Westerberg 		return 1;
572a11b88adSMika Westerberg 	case DP_COMMON_CAP_2_LANES:
573a11b88adSMika Westerberg 		return 2;
574a11b88adSMika Westerberg 	case DP_COMMON_CAP_4_LANES:
575a11b88adSMika Westerberg 		return 4;
576a11b88adSMika Westerberg 	default:
577a11b88adSMika Westerberg 		return 0;
578a11b88adSMika Westerberg 	}
579a11b88adSMika Westerberg }
580a11b88adSMika Westerberg 
581a11b88adSMika Westerberg static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
582a11b88adSMika Westerberg {
583a11b88adSMika Westerberg 	val &= ~DP_COMMON_CAP_LANES_MASK;
584a11b88adSMika Westerberg 	switch (lanes) {
585a11b88adSMika Westerberg 	default:
586a11b88adSMika Westerberg 		WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
587a11b88adSMika Westerberg 		     lanes);
588df561f66SGustavo A. R. Silva 		fallthrough;
589a11b88adSMika Westerberg 	case 1:
590a11b88adSMika Westerberg 		val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
591a11b88adSMika Westerberg 		break;
592a11b88adSMika Westerberg 	case 2:
593a11b88adSMika Westerberg 		val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
594a11b88adSMika Westerberg 		break;
595a11b88adSMika Westerberg 	case 4:
596a11b88adSMika Westerberg 		val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
597a11b88adSMika Westerberg 		break;
598a11b88adSMika Westerberg 	}
599a11b88adSMika Westerberg 	return val;
600a11b88adSMika Westerberg }
601a11b88adSMika Westerberg 
602a11b88adSMika Westerberg static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
603a11b88adSMika Westerberg {
6042d7e0472SMika Westerberg 	/* Tunneling removes the DP 8b/10b 128/132b encoding */
6052d7e0472SMika Westerberg 	if (tb_dp_is_uhbr_rate(rate))
6062d7e0472SMika Westerberg 		return rate * lanes * 128 / 132;
607a11b88adSMika Westerberg 	return rate * lanes * 8 / 10;
608a11b88adSMika Westerberg }
609a11b88adSMika Westerberg 
610a11b88adSMika Westerberg static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
611a11b88adSMika Westerberg 				  u32 out_rate, u32 out_lanes, u32 *new_rate,
612a11b88adSMika Westerberg 				  u32 *new_lanes)
613a11b88adSMika Westerberg {
614a11b88adSMika Westerberg 	static const u32 dp_bw[][2] = {
615a11b88adSMika Westerberg 		/* Mb/s, lanes */
616a11b88adSMika Westerberg 		{ 8100, 4 }, /* 25920 Mb/s */
617a11b88adSMika Westerberg 		{ 5400, 4 }, /* 17280 Mb/s */
618a11b88adSMika Westerberg 		{ 8100, 2 }, /* 12960 Mb/s */
619a11b88adSMika Westerberg 		{ 2700, 4 }, /* 8640 Mb/s */
620a11b88adSMika Westerberg 		{ 5400, 2 }, /* 8640 Mb/s */
621a11b88adSMika Westerberg 		{ 8100, 1 }, /* 6480 Mb/s */
622a11b88adSMika Westerberg 		{ 1620, 4 }, /* 5184 Mb/s */
623a11b88adSMika Westerberg 		{ 5400, 1 }, /* 4320 Mb/s */
624a11b88adSMika Westerberg 		{ 2700, 2 }, /* 4320 Mb/s */
625a11b88adSMika Westerberg 		{ 1620, 2 }, /* 2592 Mb/s */
626a11b88adSMika Westerberg 		{ 2700, 1 }, /* 2160 Mb/s */
627a11b88adSMika Westerberg 		{ 1620, 1 }, /* 1296 Mb/s */
628a11b88adSMika Westerberg 	};
629a11b88adSMika Westerberg 	unsigned int i;
630a11b88adSMika Westerberg 
631a11b88adSMika Westerberg 	/*
632a11b88adSMika Westerberg 	 * Find a combination that can fit into max_bw and does not
633a11b88adSMika Westerberg 	 * exceed the maximum rate and lanes supported by the DP OUT and
634a11b88adSMika Westerberg 	 * DP IN adapters.
635a11b88adSMika Westerberg 	 */
636a11b88adSMika Westerberg 	for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
637a11b88adSMika Westerberg 		if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
638a11b88adSMika Westerberg 			continue;
639a11b88adSMika Westerberg 
640a11b88adSMika Westerberg 		if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
641a11b88adSMika Westerberg 			continue;
642a11b88adSMika Westerberg 
643a11b88adSMika Westerberg 		if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
644a11b88adSMika Westerberg 			*new_rate = dp_bw[i][0];
645a11b88adSMika Westerberg 			*new_lanes = dp_bw[i][1];
646a11b88adSMika Westerberg 			return 0;
647a11b88adSMika Westerberg 		}
648a11b88adSMika Westerberg 	}
649a11b88adSMika Westerberg 
650a11b88adSMika Westerberg 	return -ENOSR;
651a11b88adSMika Westerberg }
652a11b88adSMika Westerberg 
6534f807e47SMika Westerberg static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
6544f807e47SMika Westerberg {
655a11b88adSMika Westerberg 	u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
6564f807e47SMika Westerberg 	struct tb_port *out = tunnel->dst_port;
6574f807e47SMika Westerberg 	struct tb_port *in = tunnel->src_port;
6580bd680cdSMika Westerberg 	int ret, max_bw;
6594f807e47SMika Westerberg 
6604f807e47SMika Westerberg 	/*
6614f807e47SMika Westerberg 	 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
6624f807e47SMika Westerberg 	 * newer generation hardware.
6634f807e47SMika Westerberg 	 */
6644f807e47SMika Westerberg 	if (in->sw->generation < 2 || out->sw->generation < 2)
6654f807e47SMika Westerberg 		return 0;
6664f807e47SMika Westerberg 
667de718ac7SMika Westerberg 	/*
668de718ac7SMika Westerberg 	 * Perform connection manager handshake between IN and OUT ports
669de718ac7SMika Westerberg 	 * before capabilities exchange can take place.
670de718ac7SMika Westerberg 	 */
671b6d572aeSMika Westerberg 	ret = tb_dp_cm_handshake(in, out, 3000);
672de718ac7SMika Westerberg 	if (ret)
673de718ac7SMika Westerberg 		return ret;
674de718ac7SMika Westerberg 
6754f807e47SMika Westerberg 	/* Read both DP_LOCAL_CAP registers */
6764f807e47SMika Westerberg 	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
67798176380SMika Westerberg 			   in->cap_adap + DP_LOCAL_CAP, 1);
6784f807e47SMika Westerberg 	if (ret)
6794f807e47SMika Westerberg 		return ret;
6804f807e47SMika Westerberg 
6814f807e47SMika Westerberg 	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
68298176380SMika Westerberg 			   out->cap_adap + DP_LOCAL_CAP, 1);
6834f807e47SMika Westerberg 	if (ret)
6844f807e47SMika Westerberg 		return ret;
6854f807e47SMika Westerberg 
6864f807e47SMika Westerberg 	/* Write IN local caps to OUT remote caps */
6874f807e47SMika Westerberg 	ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
68898176380SMika Westerberg 			    out->cap_adap + DP_REMOTE_CAP, 1);
6894f807e47SMika Westerberg 	if (ret)
6904f807e47SMika Westerberg 		return ret;
6914f807e47SMika Westerberg 
692a11b88adSMika Westerberg 	in_rate = tb_dp_cap_get_rate(in_dp_cap);
693a11b88adSMika Westerberg 	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
694fe8a0293SMika Westerberg 	tb_tunnel_dbg(tunnel,
695fe8a0293SMika Westerberg 		      "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
696a11b88adSMika Westerberg 		      in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
697a11b88adSMika Westerberg 
698a11b88adSMika Westerberg 	/*
699a11b88adSMika Westerberg 	 * If the tunnel bandwidth is limited (max_bw is set) then see
700a11b88adSMika Westerberg 	 * if we need to reduce bandwidth to fit there.
701a11b88adSMika Westerberg 	 */
702a11b88adSMika Westerberg 	out_rate = tb_dp_cap_get_rate(out_dp_cap);
703a11b88adSMika Westerberg 	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
704a11b88adSMika Westerberg 	bw = tb_dp_bandwidth(out_rate, out_lanes);
705fe8a0293SMika Westerberg 	tb_tunnel_dbg(tunnel,
706fe8a0293SMika Westerberg 		      "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
707a11b88adSMika Westerberg 		      out_rate, out_lanes, bw);
708a11b88adSMika Westerberg 
709769da970SMika Westerberg 	if (tb_tunnel_direction_downstream(tunnel))
7100bd680cdSMika Westerberg 		max_bw = tunnel->max_down;
7110bd680cdSMika Westerberg 	else
7120bd680cdSMika Westerberg 		max_bw = tunnel->max_up;
7130bd680cdSMika Westerberg 
7140bd680cdSMika Westerberg 	if (max_bw && bw > max_bw) {
715a11b88adSMika Westerberg 		u32 new_rate, new_lanes, new_bw;
716a11b88adSMika Westerberg 
7170bd680cdSMika Westerberg 		ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
718a11b88adSMika Westerberg 					     out_rate, out_lanes, &new_rate,
719a11b88adSMika Westerberg 					     &new_lanes);
720a11b88adSMika Westerberg 		if (ret) {
721fe8a0293SMika Westerberg 			tb_tunnel_info(tunnel, "not enough bandwidth\n");
722a11b88adSMika Westerberg 			return ret;
723a11b88adSMika Westerberg 		}
724a11b88adSMika Westerberg 
725a11b88adSMika Westerberg 		new_bw = tb_dp_bandwidth(new_rate, new_lanes);
726fe8a0293SMika Westerberg 		tb_tunnel_dbg(tunnel,
727fe8a0293SMika Westerberg 			      "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
728a11b88adSMika Westerberg 			      new_rate, new_lanes, new_bw);
729a11b88adSMika Westerberg 
730a11b88adSMika Westerberg 		/*
731a11b88adSMika Westerberg 		 * Set new rate and number of lanes before writing it to
732a11b88adSMika Westerberg 		 * the IN port remote caps.
733a11b88adSMika Westerberg 		 */
734a11b88adSMika Westerberg 		out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
735a11b88adSMika Westerberg 		out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
736a11b88adSMika Westerberg 	}
737a11b88adSMika Westerberg 
7383eddfc12SMika Westerberg 	/*
7393eddfc12SMika Westerberg 	 * Titan Ridge does not disable AUX timers when it gets
7403eddfc12SMika Westerberg 	 * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with
7413eddfc12SMika Westerberg 	 * DP tunneling.
7423eddfc12SMika Westerberg 	 */
7433eddfc12SMika Westerberg 	if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
7443eddfc12SMika Westerberg 		out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
745fe8a0293SMika Westerberg 		tb_tunnel_dbg(tunnel, "disabling LTTPR\n");
7463eddfc12SMika Westerberg 	}
7473eddfc12SMika Westerberg 
7484f807e47SMika Westerberg 	return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
74998176380SMika Westerberg 			     in->cap_adap + DP_REMOTE_CAP, 1);
7504f807e47SMika Westerberg }
7514f807e47SMika Westerberg 
7528d73f6b8SMika Westerberg static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
7536ce35635SMika Westerberg {
7546ce35635SMika Westerberg 	int ret, estimated_bw, granularity, tmp;
7556ce35635SMika Westerberg 	struct tb_port *out = tunnel->dst_port;
7566ce35635SMika Westerberg 	struct tb_port *in = tunnel->src_port;
7576ce35635SMika Westerberg 	u32 out_dp_cap, out_rate, out_lanes;
7586ce35635SMika Westerberg 	u32 in_dp_cap, in_rate, in_lanes;
7596ce35635SMika Westerberg 	u32 rate, lanes;
7606ce35635SMika Westerberg 
7616ce35635SMika Westerberg 	if (!bw_alloc_mode)
7626ce35635SMika Westerberg 		return 0;
7636ce35635SMika Westerberg 
7648d73f6b8SMika Westerberg 	ret = usb4_dp_port_set_cm_bandwidth_mode_supported(in, true);
7656ce35635SMika Westerberg 	if (ret)
7666ce35635SMika Westerberg 		return ret;
7676ce35635SMika Westerberg 
7686ce35635SMika Westerberg 	ret = usb4_dp_port_set_group_id(in, in->group->index);
7696ce35635SMika Westerberg 	if (ret)
7706ce35635SMika Westerberg 		return ret;
7716ce35635SMika Westerberg 
7726ce35635SMika Westerberg 	/*
7736ce35635SMika Westerberg 	 * Get the non-reduced rate and lanes based on the lowest
7746ce35635SMika Westerberg 	 * capability of both adapters.
7756ce35635SMika Westerberg 	 */
7766ce35635SMika Westerberg 	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
7776ce35635SMika Westerberg 			   in->cap_adap + DP_LOCAL_CAP, 1);
7786ce35635SMika Westerberg 	if (ret)
7796ce35635SMika Westerberg 		return ret;
7806ce35635SMika Westerberg 
7816ce35635SMika Westerberg 	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
7826ce35635SMika Westerberg 			   out->cap_adap + DP_LOCAL_CAP, 1);
7836ce35635SMika Westerberg 	if (ret)
7846ce35635SMika Westerberg 		return ret;
7856ce35635SMika Westerberg 
7866ce35635SMika Westerberg 	in_rate = tb_dp_cap_get_rate(in_dp_cap);
7876ce35635SMika Westerberg 	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
7886ce35635SMika Westerberg 	out_rate = tb_dp_cap_get_rate(out_dp_cap);
7896ce35635SMika Westerberg 	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
7906ce35635SMika Westerberg 
7916ce35635SMika Westerberg 	rate = min(in_rate, out_rate);
7926ce35635SMika Westerberg 	lanes = min(in_lanes, out_lanes);
7936ce35635SMika Westerberg 	tmp = tb_dp_bandwidth(rate, lanes);
7946ce35635SMika Westerberg 
795fe8a0293SMika Westerberg 	tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
796fe8a0293SMika Westerberg 		      rate, lanes, tmp);
7976ce35635SMika Westerberg 
7986ce35635SMika Westerberg 	ret = usb4_dp_port_set_nrd(in, rate, lanes);
7996ce35635SMika Westerberg 	if (ret)
8006ce35635SMika Westerberg 		return ret;
8016ce35635SMika Westerberg 
8022d7e0472SMika Westerberg 	/*
8032d7e0472SMika Westerberg 	 * Pick up granularity that supports maximum possible bandwidth.
8042d7e0472SMika Westerberg 	 * For that we use the UHBR rates too.
8052d7e0472SMika Westerberg 	 */
8062d7e0472SMika Westerberg 	in_rate = tb_dp_cap_get_rate_ext(in_dp_cap);
8072d7e0472SMika Westerberg 	out_rate = tb_dp_cap_get_rate_ext(out_dp_cap);
8082d7e0472SMika Westerberg 	rate = min(in_rate, out_rate);
8092d7e0472SMika Westerberg 	tmp = tb_dp_bandwidth(rate, lanes);
8102d7e0472SMika Westerberg 
811fe8a0293SMika Westerberg 	tb_tunnel_dbg(tunnel,
8122d7e0472SMika Westerberg 		      "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
8132d7e0472SMika Westerberg 		      rate, lanes, tmp);
8142d7e0472SMika Westerberg 
8156ce35635SMika Westerberg 	for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
8166ce35635SMika Westerberg 	     granularity *= 2)
8176ce35635SMika Westerberg 		;
8186ce35635SMika Westerberg 
819fe8a0293SMika Westerberg 	tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity);
8206ce35635SMika Westerberg 
8216ce35635SMika Westerberg 	/*
8226ce35635SMika Westerberg 	 * Returns -EINVAL if granularity above is outside of the
8236ce35635SMika Westerberg 	 * accepted ranges.
8246ce35635SMika Westerberg 	 */
8256ce35635SMika Westerberg 	ret = usb4_dp_port_set_granularity(in, granularity);
8266ce35635SMika Westerberg 	if (ret)
8276ce35635SMika Westerberg 		return ret;
8286ce35635SMika Westerberg 
8296ce35635SMika Westerberg 	/*
8306ce35635SMika Westerberg 	 * Bandwidth estimation is pretty much what we have in
8316ce35635SMika Westerberg 	 * max_up/down fields. For discovery we just read what the
8326ce35635SMika Westerberg 	 * estimation was set to.
8336ce35635SMika Westerberg 	 */
834769da970SMika Westerberg 	if (tb_tunnel_direction_downstream(tunnel))
8356ce35635SMika Westerberg 		estimated_bw = tunnel->max_down;
8366ce35635SMika Westerberg 	else
8376ce35635SMika Westerberg 		estimated_bw = tunnel->max_up;
8386ce35635SMika Westerberg 
839fe8a0293SMika Westerberg 	tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw);
8406ce35635SMika Westerberg 
8418d73f6b8SMika Westerberg 	ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
8426ce35635SMika Westerberg 	if (ret)
8436ce35635SMika Westerberg 		return ret;
8446ce35635SMika Westerberg 
8456ce35635SMika Westerberg 	/* Initial allocation should be 0 according the spec */
8468d73f6b8SMika Westerberg 	ret = usb4_dp_port_allocate_bandwidth(in, 0);
8476ce35635SMika Westerberg 	if (ret)
8486ce35635SMika Westerberg 		return ret;
8496ce35635SMika Westerberg 
850fe8a0293SMika Westerberg 	tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n");
8516ce35635SMika Westerberg 	return 0;
8526ce35635SMika Westerberg }
8536ce35635SMika Westerberg 
8546ce35635SMika Westerberg static int tb_dp_init(struct tb_tunnel *tunnel)
8556ce35635SMika Westerberg {
8566ce35635SMika Westerberg 	struct tb_port *in = tunnel->src_port;
8576ce35635SMika Westerberg 	struct tb_switch *sw = in->sw;
8586ce35635SMika Westerberg 	struct tb *tb = in->sw->tb;
8596ce35635SMika Westerberg 	int ret;
8606ce35635SMika Westerberg 
8616ce35635SMika Westerberg 	ret = tb_dp_xchg_caps(tunnel);
8626ce35635SMika Westerberg 	if (ret)
8636ce35635SMika Westerberg 		return ret;
8646ce35635SMika Westerberg 
8656ce35635SMika Westerberg 	if (!tb_switch_is_usb4(sw))
8666ce35635SMika Westerberg 		return 0;
8676ce35635SMika Westerberg 
8688d73f6b8SMika Westerberg 	if (!usb4_dp_port_bandwidth_mode_supported(in))
8696ce35635SMika Westerberg 		return 0;
8706ce35635SMika Westerberg 
871fe8a0293SMika Westerberg 	tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n");
8726ce35635SMika Westerberg 
8736ce35635SMika Westerberg 	ret = usb4_dp_port_set_cm_id(in, tb->index);
8746ce35635SMika Westerberg 	if (ret)
8756ce35635SMika Westerberg 		return ret;
8766ce35635SMika Westerberg 
8778d73f6b8SMika Westerberg 	return tb_dp_bandwidth_alloc_mode_enable(tunnel);
8786ce35635SMika Westerberg }
8796ce35635SMika Westerberg 
8806ce35635SMika Westerberg static void tb_dp_deinit(struct tb_tunnel *tunnel)
8816ce35635SMika Westerberg {
8826ce35635SMika Westerberg 	struct tb_port *in = tunnel->src_port;
8836ce35635SMika Westerberg 
8848d73f6b8SMika Westerberg 	if (!usb4_dp_port_bandwidth_mode_supported(in))
8856ce35635SMika Westerberg 		return;
8868d73f6b8SMika Westerberg 	if (usb4_dp_port_bandwidth_mode_enabled(in)) {
8878d73f6b8SMika Westerberg 		usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
888fe8a0293SMika Westerberg 		tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n");
8896ce35635SMika Westerberg 	}
8906ce35635SMika Westerberg }
8916ce35635SMika Westerberg 
8924f807e47SMika Westerberg static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
8934f807e47SMika Westerberg {
8944f807e47SMika Westerberg 	int ret;
8954f807e47SMika Westerberg 
8964f807e47SMika Westerberg 	if (active) {
8974f807e47SMika Westerberg 		struct tb_path **paths;
8984f807e47SMika Westerberg 		int last;
8994f807e47SMika Westerberg 
9004f807e47SMika Westerberg 		paths = tunnel->paths;
9014f807e47SMika Westerberg 		last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
9024f807e47SMika Westerberg 
9034f807e47SMika Westerberg 		tb_dp_port_set_hops(tunnel->src_port,
9044f807e47SMika Westerberg 			paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
9054f807e47SMika Westerberg 			paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
9064f807e47SMika Westerberg 			paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
9074f807e47SMika Westerberg 
9084f807e47SMika Westerberg 		tb_dp_port_set_hops(tunnel->dst_port,
9094f807e47SMika Westerberg 			paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
9104f807e47SMika Westerberg 			paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
9114f807e47SMika Westerberg 			paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
9124f807e47SMika Westerberg 	} else {
9134f807e47SMika Westerberg 		tb_dp_port_hpd_clear(tunnel->src_port);
9144f807e47SMika Westerberg 		tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
9154f807e47SMika Westerberg 		if (tb_port_is_dpout(tunnel->dst_port))
9164f807e47SMika Westerberg 			tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
9174f807e47SMika Westerberg 	}
9184f807e47SMika Westerberg 
9194f807e47SMika Westerberg 	ret = tb_dp_port_enable(tunnel->src_port, active);
9204f807e47SMika Westerberg 	if (ret)
9214f807e47SMika Westerberg 		return ret;
9224f807e47SMika Westerberg 
9234f807e47SMika Westerberg 	if (tb_port_is_dpout(tunnel->dst_port))
9244f807e47SMika Westerberg 		return tb_dp_port_enable(tunnel->dst_port, active);
9254f807e47SMika Westerberg 
9264f807e47SMika Westerberg 	return 0;
9274f807e47SMika Westerberg }
9284f807e47SMika Westerberg 
9297b5e0bfcSMika Westerberg /**
9307b5e0bfcSMika Westerberg  * tb_dp_bandwidth_mode_maximum_bandwidth() - Maximum possible bandwidth
9317b5e0bfcSMika Westerberg  * @tunnel: DP tunnel to check
9327b5e0bfcSMika Westerberg  * @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity
9337b5e0bfcSMika Westerberg  *
9347b5e0bfcSMika Westerberg  * Returns maximum possible bandwidth for this tunnel in Mb/s.
9357b5e0bfcSMika Westerberg  */
9362d7e0472SMika Westerberg static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
9377b5e0bfcSMika Westerberg 						  int *max_bw_rounded)
938a11b88adSMika Westerberg {
939a11b88adSMika Westerberg 	struct tb_port *in = tunnel->src_port;
9407b5e0bfcSMika Westerberg 	int ret, rate, lanes, max_bw;
9412d7e0472SMika Westerberg 	u32 cap;
942a11b88adSMika Westerberg 
9432d7e0472SMika Westerberg 	/*
9442d7e0472SMika Westerberg 	 * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX
9452d7e0472SMika Westerberg 	 * read parameter values so this so we can use this to determine
9462d7e0472SMika Westerberg 	 * the maximum possible bandwidth over this link.
9472d7e0472SMika Westerberg 	 *
9482d7e0472SMika Westerberg 	 * See USB4 v2 spec 1.0 10.4.4.5.
9492d7e0472SMika Westerberg 	 */
9502d7e0472SMika Westerberg 	ret = tb_port_read(in, &cap, TB_CFG_PORT,
9512d7e0472SMika Westerberg 			   in->cap_adap + DP_LOCAL_CAP, 1);
9522d7e0472SMika Westerberg 	if (ret)
9532d7e0472SMika Westerberg 		return ret;
9542d7e0472SMika Westerberg 
9552d7e0472SMika Westerberg 	rate = tb_dp_cap_get_rate_ext(cap);
9562d7e0472SMika Westerberg 	lanes = tb_dp_cap_get_lanes(cap);
9576ce35635SMika Westerberg 
9587b5e0bfcSMika Westerberg 	max_bw = tb_dp_bandwidth(rate, lanes);
9596ce35635SMika Westerberg 
9607b5e0bfcSMika Westerberg 	if (max_bw_rounded) {
9616ce35635SMika Westerberg 		ret = usb4_dp_port_granularity(in);
9626ce35635SMika Westerberg 		if (ret < 0)
9636ce35635SMika Westerberg 			return ret;
9647b5e0bfcSMika Westerberg 		*max_bw_rounded = roundup(max_bw, ret);
9656ce35635SMika Westerberg 	}
9666ce35635SMika Westerberg 
9677b5e0bfcSMika Westerberg 	return max_bw;
9686ce35635SMika Westerberg }
9696ce35635SMika Westerberg 
9708d73f6b8SMika Westerberg static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
9718d73f6b8SMika Westerberg 						   int *consumed_up,
9728d73f6b8SMika Westerberg 						   int *consumed_down)
9736ce35635SMika Westerberg {
9746ce35635SMika Westerberg 	struct tb_port *in = tunnel->src_port;
9757b5e0bfcSMika Westerberg 	int ret, allocated_bw, max_bw_rounded;
9766ce35635SMika Westerberg 
9778d73f6b8SMika Westerberg 	if (!usb4_dp_port_bandwidth_mode_enabled(in))
9786ce35635SMika Westerberg 		return -EOPNOTSUPP;
9796ce35635SMika Westerberg 
9806ce35635SMika Westerberg 	if (!tunnel->bw_mode)
9816ce35635SMika Westerberg 		return -EOPNOTSUPP;
9826ce35635SMika Westerberg 
9836ce35635SMika Westerberg 	/* Read what was allocated previously if any */
9848d73f6b8SMika Westerberg 	ret = usb4_dp_port_allocated_bandwidth(in);
9856ce35635SMika Westerberg 	if (ret < 0)
9866ce35635SMika Westerberg 		return ret;
9876ce35635SMika Westerberg 	allocated_bw = ret;
9886ce35635SMika Westerberg 
9897b5e0bfcSMika Westerberg 	ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
9906ce35635SMika Westerberg 	if (ret < 0)
9916ce35635SMika Westerberg 		return ret;
9927b5e0bfcSMika Westerberg 	if (allocated_bw == max_bw_rounded)
9936ce35635SMika Westerberg 		allocated_bw = ret;
9946ce35635SMika Westerberg 
995769da970SMika Westerberg 	if (tb_tunnel_direction_downstream(tunnel)) {
9966ce35635SMika Westerberg 		*consumed_up = 0;
9976ce35635SMika Westerberg 		*consumed_down = allocated_bw;
9986ce35635SMika Westerberg 	} else {
9996ce35635SMika Westerberg 		*consumed_up = allocated_bw;
10006ce35635SMika Westerberg 		*consumed_down = 0;
10016ce35635SMika Westerberg 	}
10026ce35635SMika Westerberg 
10036ce35635SMika Westerberg 	return 0;
10046ce35635SMika Westerberg }
10056ce35635SMika Westerberg 
10066ce35635SMika Westerberg static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
10076ce35635SMika Westerberg 				     int *allocated_down)
10086ce35635SMika Westerberg {
10096ce35635SMika Westerberg 	struct tb_port *in = tunnel->src_port;
1010a11b88adSMika Westerberg 
1011a11b88adSMika Westerberg 	/*
10126ce35635SMika Westerberg 	 * If we have already set the allocated bandwidth then use that.
10136ce35635SMika Westerberg 	 * Otherwise we read it from the DPRX.
10146ce35635SMika Westerberg 	 */
10158d73f6b8SMika Westerberg 	if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
10167b5e0bfcSMika Westerberg 		int ret, allocated_bw, max_bw_rounded;
10176ce35635SMika Westerberg 
10188d73f6b8SMika Westerberg 		ret = usb4_dp_port_allocated_bandwidth(in);
10196ce35635SMika Westerberg 		if (ret < 0)
10206ce35635SMika Westerberg 			return ret;
10216ce35635SMika Westerberg 		allocated_bw = ret;
10226ce35635SMika Westerberg 
10237b5e0bfcSMika Westerberg 		ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel,
10247b5e0bfcSMika Westerberg 							     &max_bw_rounded);
10256ce35635SMika Westerberg 		if (ret < 0)
10266ce35635SMika Westerberg 			return ret;
10277b5e0bfcSMika Westerberg 		if (allocated_bw == max_bw_rounded)
10286ce35635SMika Westerberg 			allocated_bw = ret;
10296ce35635SMika Westerberg 
1030769da970SMika Westerberg 		if (tb_tunnel_direction_downstream(tunnel)) {
10316ce35635SMika Westerberg 			*allocated_up = 0;
10326ce35635SMika Westerberg 			*allocated_down = allocated_bw;
10336ce35635SMika Westerberg 		} else {
10346ce35635SMika Westerberg 			*allocated_up = allocated_bw;
10356ce35635SMika Westerberg 			*allocated_down = 0;
10366ce35635SMika Westerberg 		}
10376ce35635SMika Westerberg 		return 0;
10386ce35635SMika Westerberg 	}
10396ce35635SMika Westerberg 
10406ce35635SMika Westerberg 	return tunnel->consumed_bandwidth(tunnel, allocated_up,
10416ce35635SMika Westerberg 					  allocated_down);
10426ce35635SMika Westerberg }
10436ce35635SMika Westerberg 
10446ce35635SMika Westerberg static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
10456ce35635SMika Westerberg 				 int *alloc_down)
10466ce35635SMika Westerberg {
10476ce35635SMika Westerberg 	struct tb_port *in = tunnel->src_port;
10487b5e0bfcSMika Westerberg 	int max_bw_rounded, ret, tmp;
10496ce35635SMika Westerberg 
10508d73f6b8SMika Westerberg 	if (!usb4_dp_port_bandwidth_mode_enabled(in))
10516ce35635SMika Westerberg 		return -EOPNOTSUPP;
10526ce35635SMika Westerberg 
10537b5e0bfcSMika Westerberg 	ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
10546ce35635SMika Westerberg 	if (ret < 0)
10556ce35635SMika Westerberg 		return ret;
10566ce35635SMika Westerberg 
1057769da970SMika Westerberg 	if (tb_tunnel_direction_downstream(tunnel)) {
10587b5e0bfcSMika Westerberg 		tmp = min(*alloc_down, max_bw_rounded);
10598d73f6b8SMika Westerberg 		ret = usb4_dp_port_allocate_bandwidth(in, tmp);
10606ce35635SMika Westerberg 		if (ret)
10616ce35635SMika Westerberg 			return ret;
10626ce35635SMika Westerberg 		*alloc_down = tmp;
10636ce35635SMika Westerberg 		*alloc_up = 0;
10646ce35635SMika Westerberg 	} else {
10657b5e0bfcSMika Westerberg 		tmp = min(*alloc_up, max_bw_rounded);
10668d73f6b8SMika Westerberg 		ret = usb4_dp_port_allocate_bandwidth(in, tmp);
10676ce35635SMika Westerberg 		if (ret)
10686ce35635SMika Westerberg 			return ret;
10696ce35635SMika Westerberg 		*alloc_down = 0;
10706ce35635SMika Westerberg 		*alloc_up = tmp;
10716ce35635SMika Westerberg 	}
10726ce35635SMika Westerberg 
10736ce35635SMika Westerberg 	/* Now we can use BW mode registers to figure out the bandwidth */
10746ce35635SMika Westerberg 	/* TODO: need to handle discovery too */
10756ce35635SMika Westerberg 	tunnel->bw_mode = true;
10766ce35635SMika Westerberg 	return 0;
10776ce35635SMika Westerberg }
10786ce35635SMika Westerberg 
107997e0a21cSMika Westerberg static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
10806ce35635SMika Westerberg {
10816ce35635SMika Westerberg 	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
10826ce35635SMika Westerberg 	struct tb_port *in = tunnel->src_port;
10836ce35635SMika Westerberg 
10846ce35635SMika Westerberg 	/*
10856ce35635SMika Westerberg 	 * Wait for DPRX done. Normally it should be already set for
10866ce35635SMika Westerberg 	 * active tunnel.
1087a11b88adSMika Westerberg 	 */
1088a11b88adSMika Westerberg 	do {
10896ce35635SMika Westerberg 		u32 val;
10906ce35635SMika Westerberg 		int ret;
10916ce35635SMika Westerberg 
1092a11b88adSMika Westerberg 		ret = tb_port_read(in, &val, TB_CFG_PORT,
1093a11b88adSMika Westerberg 				   in->cap_adap + DP_COMMON_CAP, 1);
1094a11b88adSMika Westerberg 		if (ret)
1095a11b88adSMika Westerberg 			return ret;
1096a11b88adSMika Westerberg 
1097a11b88adSMika Westerberg 		if (val & DP_COMMON_CAP_DPRX_DONE) {
1098fe8a0293SMika Westerberg 			tb_tunnel_dbg(tunnel, "DPRX read done\n");
10996ce35635SMika Westerberg 			return 0;
11006ce35635SMika Westerberg 		}
11016ce35635SMika Westerberg 		usleep_range(100, 150);
11026ce35635SMika Westerberg 	} while (ktime_before(ktime_get(), timeout));
11036ce35635SMika Westerberg 
110497e0a21cSMika Westerberg 	tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
1105a11b88adSMika Westerberg 	return -ETIMEDOUT;
11066ce35635SMika Westerberg }
11076ce35635SMika Westerberg 
11086ce35635SMika Westerberg /* Read cap from tunnel DP IN */
11096ce35635SMika Westerberg static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
11106ce35635SMika Westerberg 			  u32 *lanes)
11116ce35635SMika Westerberg {
11126ce35635SMika Westerberg 	struct tb_port *in = tunnel->src_port;
11136ce35635SMika Westerberg 	u32 val;
11146ce35635SMika Westerberg 	int ret;
11156ce35635SMika Westerberg 
11166ce35635SMika Westerberg 	switch (cap) {
11176ce35635SMika Westerberg 	case DP_LOCAL_CAP:
11186ce35635SMika Westerberg 	case DP_REMOTE_CAP:
111997e0a21cSMika Westerberg 	case DP_COMMON_CAP:
11206ce35635SMika Westerberg 		break;
11216ce35635SMika Westerberg 
11226ce35635SMika Westerberg 	default:
11236ce35635SMika Westerberg 		tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap);
11246ce35635SMika Westerberg 		return -EINVAL;
11256ce35635SMika Westerberg 	}
11266ce35635SMika Westerberg 
1127a11b88adSMika Westerberg 	/*
11286ce35635SMika Westerberg 	 * Read from the copied remote cap so that we take into account
11296ce35635SMika Westerberg 	 * if capabilities were reduced during exchange.
1130a11b88adSMika Westerberg 	 */
11316ce35635SMika Westerberg 	ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1);
1132a11b88adSMika Westerberg 	if (ret)
1133a11b88adSMika Westerberg 		return ret;
1134a11b88adSMika Westerberg 
11356ce35635SMika Westerberg 	*rate = tb_dp_cap_get_rate(val);
11366ce35635SMika Westerberg 	*lanes = tb_dp_cap_get_lanes(val);
11376ce35635SMika Westerberg 	return 0;
11386ce35635SMika Westerberg }
11396ce35635SMika Westerberg 
11406ce35635SMika Westerberg static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
11416ce35635SMika Westerberg 				   int *max_down)
11426ce35635SMika Westerberg {
11436ce35635SMika Westerberg 	int ret;
11446ce35635SMika Westerberg 
1145769da970SMika Westerberg 	if (!usb4_dp_port_bandwidth_mode_enabled(tunnel->src_port))
11462d7e0472SMika Westerberg 		return -EOPNOTSUPP;
11472d7e0472SMika Westerberg 
11482d7e0472SMika Westerberg 	ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
11492d7e0472SMika Westerberg 	if (ret < 0)
11506ce35635SMika Westerberg 		return ret;
11516ce35635SMika Westerberg 
1152769da970SMika Westerberg 	if (tb_tunnel_direction_downstream(tunnel)) {
11536ce35635SMika Westerberg 		*max_up = 0;
11542d7e0472SMika Westerberg 		*max_down = ret;
11556ce35635SMika Westerberg 	} else {
11562d7e0472SMika Westerberg 		*max_up = ret;
11576ce35635SMika Westerberg 		*max_down = 0;
11586ce35635SMika Westerberg 	}
11596ce35635SMika Westerberg 
11606ce35635SMika Westerberg 	return 0;
11616ce35635SMika Westerberg }
11626ce35635SMika Westerberg 
11636ce35635SMika Westerberg static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
11646ce35635SMika Westerberg 				    int *consumed_down)
11656ce35635SMika Westerberg {
1166769da970SMika Westerberg 	const struct tb_switch *sw = tunnel->src_port->sw;
11676ce35635SMika Westerberg 	u32 rate = 0, lanes = 0;
11686ce35635SMika Westerberg 	int ret;
11696ce35635SMika Westerberg 
11706ce35635SMika Westerberg 	if (tb_dp_is_usb4(sw)) {
11716ce35635SMika Westerberg 		/*
11726ce35635SMika Westerberg 		 * On USB4 routers check if the bandwidth allocation
11736ce35635SMika Westerberg 		 * mode is enabled first and then read the bandwidth
11746ce35635SMika Westerberg 		 * through those registers.
11756ce35635SMika Westerberg 		 */
11768d73f6b8SMika Westerberg 		ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
11776ce35635SMika Westerberg 							      consumed_down);
11786ce35635SMika Westerberg 		if (ret < 0) {
11796ce35635SMika Westerberg 			if (ret != -EOPNOTSUPP)
11806ce35635SMika Westerberg 				return ret;
11816ce35635SMika Westerberg 		} else if (!ret) {
11826ce35635SMika Westerberg 			return 0;
11836ce35635SMika Westerberg 		}
11846ce35635SMika Westerberg 		/*
11856ce35635SMika Westerberg 		 * Then see if the DPRX negotiation is ready and if yes
11866ce35635SMika Westerberg 		 * return that bandwidth (it may be smaller than the
1187ccd84502SGil Fine 		 * reduced one). According to VESA spec, the DPRX
1188ccd84502SGil Fine 		 * negotiation shall compete in 5 seconds after tunnel
1189ccd84502SGil Fine 		 * established. We give it 100ms extra just in case.
11906ce35635SMika Westerberg 		 */
1191ccd84502SGil Fine 		ret = tb_dp_wait_dprx(tunnel, 5100);
11926ce35635SMika Westerberg 		if (ret)
11936ce35635SMika Westerberg 			return ret;
119497e0a21cSMika Westerberg 		ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
119597e0a21cSMika Westerberg 		if (ret)
119697e0a21cSMika Westerberg 			return ret;
11976ce35635SMika Westerberg 	} else if (sw->generation >= 2) {
11986ce35635SMika Westerberg 		ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
11996ce35635SMika Westerberg 		if (ret)
12006ce35635SMika Westerberg 			return ret;
1201a11b88adSMika Westerberg 	} else {
1202a11b88adSMika Westerberg 		/* No bandwidth management for legacy devices  */
12037c0ee8fdSMika Westerberg 		*consumed_up = 0;
12047c0ee8fdSMika Westerberg 		*consumed_down = 0;
1205a11b88adSMika Westerberg 		return 0;
1206a11b88adSMika Westerberg 	}
1207a11b88adSMika Westerberg 
1208769da970SMika Westerberg 	if (tb_tunnel_direction_downstream(tunnel)) {
12097c0ee8fdSMika Westerberg 		*consumed_up = 0;
12107c0ee8fdSMika Westerberg 		*consumed_down = tb_dp_bandwidth(rate, lanes);
12117c0ee8fdSMika Westerberg 	} else {
12127c0ee8fdSMika Westerberg 		*consumed_up = tb_dp_bandwidth(rate, lanes);
12137c0ee8fdSMika Westerberg 		*consumed_down = 0;
12147c0ee8fdSMika Westerberg 	}
12157c0ee8fdSMika Westerberg 
12167c0ee8fdSMika Westerberg 	return 0;
1217a11b88adSMika Westerberg }
1218a11b88adSMika Westerberg 
12196ed541c5SMika Westerberg static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
12206ed541c5SMika Westerberg {
12216ed541c5SMika Westerberg 	struct tb_port *port = hop->in_port;
12226ed541c5SMika Westerberg 	struct tb_switch *sw = port->sw;
12236ed541c5SMika Westerberg 
12246ed541c5SMika Westerberg 	if (tb_port_use_credit_allocation(port))
12256ed541c5SMika Westerberg 		hop->initial_credits = sw->min_dp_aux_credits;
12266ed541c5SMika Westerberg 	else
12276ed541c5SMika Westerberg 		hop->initial_credits = 1;
12286ed541c5SMika Westerberg }
12296ed541c5SMika Westerberg 
1230ce91d793SMika Westerberg static void tb_dp_init_aux_path(struct tb_path *path, bool pm_support)
12314f807e47SMika Westerberg {
12326ed541c5SMika Westerberg 	struct tb_path_hop *hop;
12334f807e47SMika Westerberg 
12344f807e47SMika Westerberg 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
12354f807e47SMika Westerberg 	path->egress_shared_buffer = TB_PATH_NONE;
12364f807e47SMika Westerberg 	path->ingress_fc_enable = TB_PATH_ALL;
12374f807e47SMika Westerberg 	path->ingress_shared_buffer = TB_PATH_NONE;
1238f73edddfSMika Westerberg 	path->priority = TB_DP_AUX_PRIORITY;
1239f73edddfSMika Westerberg 	path->weight = TB_DP_AUX_WEIGHT;
12404f807e47SMika Westerberg 
1241ce91d793SMika Westerberg 	tb_path_for_each_hop(path, hop) {
12426ed541c5SMika Westerberg 		tb_dp_init_aux_credits(hop);
1243ce91d793SMika Westerberg 		if (pm_support)
1244ce91d793SMika Westerberg 			tb_init_pm_support(hop);
1245ce91d793SMika Westerberg 	}
12464f807e47SMika Westerberg }
12474f807e47SMika Westerberg 
12486ed541c5SMika Westerberg static int tb_dp_init_video_credits(struct tb_path_hop *hop)
12494f807e47SMika Westerberg {
12506ed541c5SMika Westerberg 	struct tb_port *port = hop->in_port;
12516ed541c5SMika Westerberg 	struct tb_switch *sw = port->sw;
12526ed541c5SMika Westerberg 
12536ed541c5SMika Westerberg 	if (tb_port_use_credit_allocation(port)) {
12546ed541c5SMika Westerberg 		unsigned int nfc_credits;
12556ed541c5SMika Westerberg 		size_t max_dp_streams;
12566ed541c5SMika Westerberg 
12576ed541c5SMika Westerberg 		tb_available_credits(port, &max_dp_streams);
12586ed541c5SMika Westerberg 		/*
12596ed541c5SMika Westerberg 		 * Read the number of currently allocated NFC credits
12606ed541c5SMika Westerberg 		 * from the lane adapter. Since we only use them for DP
12616ed541c5SMika Westerberg 		 * tunneling we can use that to figure out how many DP
12626ed541c5SMika Westerberg 		 * tunnels already go through the lane adapter.
12636ed541c5SMika Westerberg 		 */
12646ed541c5SMika Westerberg 		nfc_credits = port->config.nfc_credits &
12656ed541c5SMika Westerberg 				ADP_CS_4_NFC_BUFFERS_MASK;
12666ed541c5SMika Westerberg 		if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
12676ed541c5SMika Westerberg 			return -ENOSPC;
12686ed541c5SMika Westerberg 
12696ed541c5SMika Westerberg 		hop->nfc_credits = sw->min_dp_main_credits;
12706ed541c5SMika Westerberg 	} else {
12716ed541c5SMika Westerberg 		hop->nfc_credits = min(port->total_credits - 2, 12U);
12726ed541c5SMika Westerberg 	}
12736ed541c5SMika Westerberg 
12746ed541c5SMika Westerberg 	return 0;
12756ed541c5SMika Westerberg }
12766ed541c5SMika Westerberg 
1277ce91d793SMika Westerberg static int tb_dp_init_video_path(struct tb_path *path, bool pm_support)
12786ed541c5SMika Westerberg {
12796ed541c5SMika Westerberg 	struct tb_path_hop *hop;
12804f807e47SMika Westerberg 
12814f807e47SMika Westerberg 	path->egress_fc_enable = TB_PATH_NONE;
12824f807e47SMika Westerberg 	path->egress_shared_buffer = TB_PATH_NONE;
12834f807e47SMika Westerberg 	path->ingress_fc_enable = TB_PATH_NONE;
12844f807e47SMika Westerberg 	path->ingress_shared_buffer = TB_PATH_NONE;
1285f73edddfSMika Westerberg 	path->priority = TB_DP_VIDEO_PRIORITY;
1286f73edddfSMika Westerberg 	path->weight = TB_DP_VIDEO_WEIGHT;
12874f807e47SMika Westerberg 
12886ed541c5SMika Westerberg 	tb_path_for_each_hop(path, hop) {
12896ed541c5SMika Westerberg 		int ret;
129002c5e7c2SMika Westerberg 
12916ed541c5SMika Westerberg 		ret = tb_dp_init_video_credits(hop);
12926ed541c5SMika Westerberg 		if (ret)
12936ed541c5SMika Westerberg 			return ret;
1294ce91d793SMika Westerberg 		if (pm_support)
1295ce91d793SMika Westerberg 			tb_init_pm_support(hop);
12966ed541c5SMika Westerberg 	}
12974f807e47SMika Westerberg 
12986ed541c5SMika Westerberg 	return 0;
12994f807e47SMika Westerberg }
13004f807e47SMika Westerberg 
1301033c2d8aSMika Westerberg static void tb_dp_dump(struct tb_tunnel *tunnel)
1302033c2d8aSMika Westerberg {
1303033c2d8aSMika Westerberg 	struct tb_port *in, *out;
1304033c2d8aSMika Westerberg 	u32 dp_cap, rate, lanes;
1305033c2d8aSMika Westerberg 
1306033c2d8aSMika Westerberg 	in = tunnel->src_port;
1307033c2d8aSMika Westerberg 	out = tunnel->dst_port;
1308033c2d8aSMika Westerberg 
1309033c2d8aSMika Westerberg 	if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1310033c2d8aSMika Westerberg 			 in->cap_adap + DP_LOCAL_CAP, 1))
1311033c2d8aSMika Westerberg 		return;
1312033c2d8aSMika Westerberg 
1313033c2d8aSMika Westerberg 	rate = tb_dp_cap_get_rate(dp_cap);
1314033c2d8aSMika Westerberg 	lanes = tb_dp_cap_get_lanes(dp_cap);
1315033c2d8aSMika Westerberg 
1316fe8a0293SMika Westerberg 	tb_tunnel_dbg(tunnel,
1317fe8a0293SMika Westerberg 		      "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1318033c2d8aSMika Westerberg 		      rate, lanes, tb_dp_bandwidth(rate, lanes));
1319033c2d8aSMika Westerberg 
1320033c2d8aSMika Westerberg 	if (tb_port_read(out, &dp_cap, TB_CFG_PORT,
1321033c2d8aSMika Westerberg 			 out->cap_adap + DP_LOCAL_CAP, 1))
1322033c2d8aSMika Westerberg 		return;
1323033c2d8aSMika Westerberg 
1324033c2d8aSMika Westerberg 	rate = tb_dp_cap_get_rate(dp_cap);
1325033c2d8aSMika Westerberg 	lanes = tb_dp_cap_get_lanes(dp_cap);
1326033c2d8aSMika Westerberg 
1327fe8a0293SMika Westerberg 	tb_tunnel_dbg(tunnel,
1328fe8a0293SMika Westerberg 		      "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1329033c2d8aSMika Westerberg 		      rate, lanes, tb_dp_bandwidth(rate, lanes));
1330033c2d8aSMika Westerberg 
1331033c2d8aSMika Westerberg 	if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1332033c2d8aSMika Westerberg 			 in->cap_adap + DP_REMOTE_CAP, 1))
1333033c2d8aSMika Westerberg 		return;
1334033c2d8aSMika Westerberg 
1335033c2d8aSMika Westerberg 	rate = tb_dp_cap_get_rate(dp_cap);
1336033c2d8aSMika Westerberg 	lanes = tb_dp_cap_get_lanes(dp_cap);
1337033c2d8aSMika Westerberg 
1338fe8a0293SMika Westerberg 	tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
1339033c2d8aSMika Westerberg 		      rate, lanes, tb_dp_bandwidth(rate, lanes));
1340033c2d8aSMika Westerberg }
1341033c2d8aSMika Westerberg 
13424f807e47SMika Westerberg /**
13434f807e47SMika Westerberg  * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
13444f807e47SMika Westerberg  * @tb: Pointer to the domain structure
13454f807e47SMika Westerberg  * @in: DP in adapter
134643bddb26SMika Westerberg  * @alloc_hopid: Allocate HopIDs from visited ports
13474f807e47SMika Westerberg  *
13484f807e47SMika Westerberg  * If @in adapter is active, follows the tunnel to the DP out adapter
13494f807e47SMika Westerberg  * and back. Returns the discovered tunnel or %NULL if there was no
13504f807e47SMika Westerberg  * tunnel.
13514f807e47SMika Westerberg  *
13524f807e47SMika Westerberg  * Return: DP tunnel or %NULL if no tunnel found.
13534f807e47SMika Westerberg  */
135443bddb26SMika Westerberg struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
135543bddb26SMika Westerberg 					bool alloc_hopid)
13564f807e47SMika Westerberg {
13574f807e47SMika Westerberg 	struct tb_tunnel *tunnel;
13584f807e47SMika Westerberg 	struct tb_port *port;
13594f807e47SMika Westerberg 	struct tb_path *path;
13604f807e47SMika Westerberg 
13614f807e47SMika Westerberg 	if (!tb_dp_port_is_enabled(in))
13624f807e47SMika Westerberg 		return NULL;
13634f807e47SMika Westerberg 
13644f807e47SMika Westerberg 	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
13654f807e47SMika Westerberg 	if (!tunnel)
13664f807e47SMika Westerberg 		return NULL;
13674f807e47SMika Westerberg 
13686ce35635SMika Westerberg 	tunnel->init = tb_dp_init;
13696ce35635SMika Westerberg 	tunnel->deinit = tb_dp_deinit;
13704f807e47SMika Westerberg 	tunnel->activate = tb_dp_activate;
13716ce35635SMika Westerberg 	tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
13726ce35635SMika Westerberg 	tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
13736ce35635SMika Westerberg 	tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1374a11b88adSMika Westerberg 	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
13754f807e47SMika Westerberg 	tunnel->src_port = in;
13764f807e47SMika Westerberg 
13774f807e47SMika Westerberg 	path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
137843bddb26SMika Westerberg 				&tunnel->dst_port, "Video", alloc_hopid);
13794f807e47SMika Westerberg 	if (!path) {
13804f807e47SMika Westerberg 		/* Just disable the DP IN port */
13814f807e47SMika Westerberg 		tb_dp_port_enable(in, false);
13824f807e47SMika Westerberg 		goto err_free;
13834f807e47SMika Westerberg 	}
13844f807e47SMika Westerberg 	tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
1385ce91d793SMika Westerberg 	if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], false))
13866ed541c5SMika Westerberg 		goto err_free;
13874f807e47SMika Westerberg 
138843bddb26SMika Westerberg 	path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
138943bddb26SMika Westerberg 				alloc_hopid);
13904f807e47SMika Westerberg 	if (!path)
13914f807e47SMika Westerberg 		goto err_deactivate;
13924f807e47SMika Westerberg 	tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
1393ce91d793SMika Westerberg 	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT], false);
13944f807e47SMika Westerberg 
13954f807e47SMika Westerberg 	path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
139643bddb26SMika Westerberg 				&port, "AUX RX", alloc_hopid);
13974f807e47SMika Westerberg 	if (!path)
13984f807e47SMika Westerberg 		goto err_deactivate;
13994f807e47SMika Westerberg 	tunnel->paths[TB_DP_AUX_PATH_IN] = path;
1400ce91d793SMika Westerberg 	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN], false);
14014f807e47SMika Westerberg 
14024f807e47SMika Westerberg 	/* Validate that the tunnel is complete */
14034f807e47SMika Westerberg 	if (!tb_port_is_dpout(tunnel->dst_port)) {
14044f807e47SMika Westerberg 		tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
14054f807e47SMika Westerberg 		goto err_deactivate;
14064f807e47SMika Westerberg 	}
14074f807e47SMika Westerberg 
14084f807e47SMika Westerberg 	if (!tb_dp_port_is_enabled(tunnel->dst_port))
14094f807e47SMika Westerberg 		goto err_deactivate;
14104f807e47SMika Westerberg 
14114f807e47SMika Westerberg 	if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
14124f807e47SMika Westerberg 		goto err_deactivate;
14134f807e47SMika Westerberg 
14144f807e47SMika Westerberg 	if (port != tunnel->src_port) {
14154f807e47SMika Westerberg 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
14164f807e47SMika Westerberg 		goto err_deactivate;
14174f807e47SMika Westerberg 	}
14184f807e47SMika Westerberg 
1419033c2d8aSMika Westerberg 	tb_dp_dump(tunnel);
1420033c2d8aSMika Westerberg 
14214f807e47SMika Westerberg 	tb_tunnel_dbg(tunnel, "discovered\n");
14224f807e47SMika Westerberg 	return tunnel;
14234f807e47SMika Westerberg 
14244f807e47SMika Westerberg err_deactivate:
14254f807e47SMika Westerberg 	tb_tunnel_deactivate(tunnel);
14264f807e47SMika Westerberg err_free:
14274f807e47SMika Westerberg 	tb_tunnel_free(tunnel);
14284f807e47SMika Westerberg 
14294f807e47SMika Westerberg 	return NULL;
14304f807e47SMika Westerberg }
14314f807e47SMika Westerberg 
14324f807e47SMika Westerberg /**
14334f807e47SMika Westerberg  * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
14344f807e47SMika Westerberg  * @tb: Pointer to the domain structure
14354f807e47SMika Westerberg  * @in: DP in adapter port
14364f807e47SMika Westerberg  * @out: DP out adapter port
14379d2d0a5cSMika Westerberg  * @link_nr: Preferred lane adapter when the link is not bonded
1438*2a0ed2daSGil Fine  * @max_up: Maximum available upstream bandwidth for the DP tunnel.
1439*2a0ed2daSGil Fine  *	    %0 if no available bandwidth.
1440*2a0ed2daSGil Fine  * @max_down: Maximum available downstream bandwidth for the DP tunnel.
1441*2a0ed2daSGil Fine  *	      %0 if no available bandwidth.
14424f807e47SMika Westerberg  *
14434f807e47SMika Westerberg  * Allocates a tunnel between @in and @out that is capable of tunneling
14444f807e47SMika Westerberg  * Display Port traffic.
14454f807e47SMika Westerberg  *
14464f807e47SMika Westerberg  * Return: Returns a tb_tunnel on success or NULL on failure.
14474f807e47SMika Westerberg  */
14484f807e47SMika Westerberg struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
14499d2d0a5cSMika Westerberg 				     struct tb_port *out, int link_nr,
14509d2d0a5cSMika Westerberg 				     int max_up, int max_down)
14514f807e47SMika Westerberg {
14524f807e47SMika Westerberg 	struct tb_tunnel *tunnel;
14534f807e47SMika Westerberg 	struct tb_path **paths;
14544f807e47SMika Westerberg 	struct tb_path *path;
1455ce91d793SMika Westerberg 	bool pm_support;
14564f807e47SMika Westerberg 
14574f807e47SMika Westerberg 	if (WARN_ON(!in->cap_adap || !out->cap_adap))
14584f807e47SMika Westerberg 		return NULL;
14594f807e47SMika Westerberg 
14604f807e47SMika Westerberg 	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
14614f807e47SMika Westerberg 	if (!tunnel)
14624f807e47SMika Westerberg 		return NULL;
14634f807e47SMika Westerberg 
14646ce35635SMika Westerberg 	tunnel->init = tb_dp_init;
14656ce35635SMika Westerberg 	tunnel->deinit = tb_dp_deinit;
14664f807e47SMika Westerberg 	tunnel->activate = tb_dp_activate;
14676ce35635SMika Westerberg 	tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
14686ce35635SMika Westerberg 	tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
14696ce35635SMika Westerberg 	tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1470a11b88adSMika Westerberg 	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
14714f807e47SMika Westerberg 	tunnel->src_port = in;
14724f807e47SMika Westerberg 	tunnel->dst_port = out;
14730bd680cdSMika Westerberg 	tunnel->max_up = max_up;
14740bd680cdSMika Westerberg 	tunnel->max_down = max_down;
14754f807e47SMika Westerberg 
14764f807e47SMika Westerberg 	paths = tunnel->paths;
1477ce91d793SMika Westerberg 	pm_support = usb4_switch_version(in->sw) >= 2;
14784f807e47SMika Westerberg 
14794f807e47SMika Westerberg 	path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
14809d2d0a5cSMika Westerberg 			     link_nr, "Video");
14814f807e47SMika Westerberg 	if (!path)
14824f807e47SMika Westerberg 		goto err_free;
1483ce91d793SMika Westerberg 	tb_dp_init_video_path(path, pm_support);
14844f807e47SMika Westerberg 	paths[TB_DP_VIDEO_PATH_OUT] = path;
14854f807e47SMika Westerberg 
14864f807e47SMika Westerberg 	path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
14879d2d0a5cSMika Westerberg 			     TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
14884f807e47SMika Westerberg 	if (!path)
14894f807e47SMika Westerberg 		goto err_free;
1490ce91d793SMika Westerberg 	tb_dp_init_aux_path(path, pm_support);
14914f807e47SMika Westerberg 	paths[TB_DP_AUX_PATH_OUT] = path;
14924f807e47SMika Westerberg 
14934f807e47SMika Westerberg 	path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
14949d2d0a5cSMika Westerberg 			     TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
14954f807e47SMika Westerberg 	if (!path)
14964f807e47SMika Westerberg 		goto err_free;
1497ce91d793SMika Westerberg 	tb_dp_init_aux_path(path, pm_support);
14984f807e47SMika Westerberg 	paths[TB_DP_AUX_PATH_IN] = path;
14994f807e47SMika Westerberg 
15004f807e47SMika Westerberg 	return tunnel;
15014f807e47SMika Westerberg 
15024f807e47SMika Westerberg err_free:
15034f807e47SMika Westerberg 	tb_tunnel_free(tunnel);
15044f807e47SMika Westerberg 	return NULL;
15054f807e47SMika Westerberg }
15064f807e47SMika Westerberg 
15076ed541c5SMika Westerberg static unsigned int tb_dma_available_credits(const struct tb_port *port)
150844242d6cSMika Westerberg {
15096ed541c5SMika Westerberg 	const struct tb_switch *sw = port->sw;
15106ed541c5SMika Westerberg 	int credits;
151144242d6cSMika Westerberg 
15126ed541c5SMika Westerberg 	credits = tb_available_credits(port, NULL);
15136ed541c5SMika Westerberg 	if (tb_acpi_may_tunnel_pcie())
15146ed541c5SMika Westerberg 		credits -= sw->max_pcie_credits;
15156ed541c5SMika Westerberg 	credits -= port->dma_credits;
15166ed541c5SMika Westerberg 
15176ed541c5SMika Westerberg 	return credits > 0 ? credits : 0;
151844242d6cSMika Westerberg }
151944242d6cSMika Westerberg 
15206ed541c5SMika Westerberg static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
152144242d6cSMika Westerberg {
15226ed541c5SMika Westerberg 	struct tb_port *port = hop->in_port;
152344242d6cSMika Westerberg 
15246ed541c5SMika Westerberg 	if (tb_port_use_credit_allocation(port)) {
15256ed541c5SMika Westerberg 		unsigned int available = tb_dma_available_credits(port);
15266ed541c5SMika Westerberg 
15276ed541c5SMika Westerberg 		/*
15286ed541c5SMika Westerberg 		 * Need to have at least TB_MIN_DMA_CREDITS, otherwise
15296ed541c5SMika Westerberg 		 * DMA path cannot be established.
15306ed541c5SMika Westerberg 		 */
15316ed541c5SMika Westerberg 		if (available < TB_MIN_DMA_CREDITS)
15326ed541c5SMika Westerberg 			return -ENOSPC;
15336ed541c5SMika Westerberg 
15346ed541c5SMika Westerberg 		while (credits > available)
15356ed541c5SMika Westerberg 			credits--;
15366ed541c5SMika Westerberg 
15376ed541c5SMika Westerberg 		tb_port_dbg(port, "reserving %u credits for DMA path\n",
15386ed541c5SMika Westerberg 			    credits);
15396ed541c5SMika Westerberg 
15406ed541c5SMika Westerberg 		port->dma_credits += credits;
15416ed541c5SMika Westerberg 	} else {
15426ed541c5SMika Westerberg 		if (tb_port_is_null(port))
15436ed541c5SMika Westerberg 			credits = port->bonded ? 14 : 6;
15446ed541c5SMika Westerberg 		else
15456ed541c5SMika Westerberg 			credits = min(port->total_credits, credits);
15466ed541c5SMika Westerberg 	}
15476ed541c5SMika Westerberg 
15486ed541c5SMika Westerberg 	hop->initial_credits = credits;
15496ed541c5SMika Westerberg 	return 0;
15506ed541c5SMika Westerberg }
15516ed541c5SMika Westerberg 
15526ed541c5SMika Westerberg /* Path from lane adapter to NHI */
15536ed541c5SMika Westerberg static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
15546ed541c5SMika Westerberg {
15556ed541c5SMika Westerberg 	struct tb_path_hop *hop;
15566ed541c5SMika Westerberg 	unsigned int i, tmp;
15576ed541c5SMika Westerberg 
15586ed541c5SMika Westerberg 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
155944242d6cSMika Westerberg 	path->ingress_fc_enable = TB_PATH_ALL;
156044242d6cSMika Westerberg 	path->egress_shared_buffer = TB_PATH_NONE;
1561e5876559SMika Westerberg 	path->ingress_shared_buffer = TB_PATH_NONE;
1562f73edddfSMika Westerberg 	path->priority = TB_DMA_PRIORITY;
1563f73edddfSMika Westerberg 	path->weight = TB_DMA_WEIGHT;
156444242d6cSMika Westerberg 	path->clear_fc = true;
156544242d6cSMika Westerberg 
15666ed541c5SMika Westerberg 	/*
15676ed541c5SMika Westerberg 	 * First lane adapter is the one connected to the remote host.
15686ed541c5SMika Westerberg 	 * We don't tunnel other traffic over this link so can use all
15696ed541c5SMika Westerberg 	 * the credits (except the ones reserved for control traffic).
15706ed541c5SMika Westerberg 	 */
15716ed541c5SMika Westerberg 	hop = &path->hops[0];
15726ed541c5SMika Westerberg 	tmp = min(tb_usable_credits(hop->in_port), credits);
15736ed541c5SMika Westerberg 	hop->initial_credits = tmp;
15746ed541c5SMika Westerberg 	hop->in_port->dma_credits += tmp;
15756ed541c5SMika Westerberg 
15766ed541c5SMika Westerberg 	for (i = 1; i < path->path_length; i++) {
15776ed541c5SMika Westerberg 		int ret;
15786ed541c5SMika Westerberg 
15796ed541c5SMika Westerberg 		ret = tb_dma_reserve_credits(&path->hops[i], credits);
15806ed541c5SMika Westerberg 		if (ret)
15816ed541c5SMika Westerberg 			return ret;
15826ed541c5SMika Westerberg 	}
15836ed541c5SMika Westerberg 
15846ed541c5SMika Westerberg 	return 0;
15856ed541c5SMika Westerberg }
15866ed541c5SMika Westerberg 
15876ed541c5SMika Westerberg /* Path from NHI to lane adapter */
15886ed541c5SMika Westerberg static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
15896ed541c5SMika Westerberg {
15906ed541c5SMika Westerberg 	struct tb_path_hop *hop;
15916ed541c5SMika Westerberg 
15926ed541c5SMika Westerberg 	path->egress_fc_enable = TB_PATH_ALL;
15936ed541c5SMika Westerberg 	path->ingress_fc_enable = TB_PATH_ALL;
15946ed541c5SMika Westerberg 	path->egress_shared_buffer = TB_PATH_NONE;
15956ed541c5SMika Westerberg 	path->ingress_shared_buffer = TB_PATH_NONE;
1596f73edddfSMika Westerberg 	path->priority = TB_DMA_PRIORITY;
1597f73edddfSMika Westerberg 	path->weight = TB_DMA_WEIGHT;
15986ed541c5SMika Westerberg 	path->clear_fc = true;
15996ed541c5SMika Westerberg 
16006ed541c5SMika Westerberg 	tb_path_for_each_hop(path, hop) {
16016ed541c5SMika Westerberg 		int ret;
16026ed541c5SMika Westerberg 
16036ed541c5SMika Westerberg 		ret = tb_dma_reserve_credits(hop, credits);
16046ed541c5SMika Westerberg 		if (ret)
16056ed541c5SMika Westerberg 			return ret;
16066ed541c5SMika Westerberg 	}
16076ed541c5SMika Westerberg 
16086ed541c5SMika Westerberg 	return 0;
16096ed541c5SMika Westerberg }
16106ed541c5SMika Westerberg 
16116ed541c5SMika Westerberg static void tb_dma_release_credits(struct tb_path_hop *hop)
16126ed541c5SMika Westerberg {
16136ed541c5SMika Westerberg 	struct tb_port *port = hop->in_port;
16146ed541c5SMika Westerberg 
16156ed541c5SMika Westerberg 	if (tb_port_use_credit_allocation(port)) {
16166ed541c5SMika Westerberg 		port->dma_credits -= hop->initial_credits;
16176ed541c5SMika Westerberg 
16186ed541c5SMika Westerberg 		tb_port_dbg(port, "released %u DMA path credits\n",
16196ed541c5SMika Westerberg 			    hop->initial_credits);
16206ed541c5SMika Westerberg 	}
16216ed541c5SMika Westerberg }
16226ed541c5SMika Westerberg 
16236ed541c5SMika Westerberg static void tb_dma_deinit_path(struct tb_path *path)
16246ed541c5SMika Westerberg {
16256ed541c5SMika Westerberg 	struct tb_path_hop *hop;
16266ed541c5SMika Westerberg 
16276ed541c5SMika Westerberg 	tb_path_for_each_hop(path, hop)
16286ed541c5SMika Westerberg 		tb_dma_release_credits(hop);
16296ed541c5SMika Westerberg }
16306ed541c5SMika Westerberg 
16316ed541c5SMika Westerberg static void tb_dma_deinit(struct tb_tunnel *tunnel)
16326ed541c5SMika Westerberg {
16336ed541c5SMika Westerberg 	int i;
16346ed541c5SMika Westerberg 
16356ed541c5SMika Westerberg 	for (i = 0; i < tunnel->npaths; i++) {
16366ed541c5SMika Westerberg 		if (!tunnel->paths[i])
16376ed541c5SMika Westerberg 			continue;
16386ed541c5SMika Westerberg 		tb_dma_deinit_path(tunnel->paths[i]);
16396ed541c5SMika Westerberg 	}
164044242d6cSMika Westerberg }
164144242d6cSMika Westerberg 
164244242d6cSMika Westerberg /**
164344242d6cSMika Westerberg  * tb_tunnel_alloc_dma() - allocate a DMA tunnel
164444242d6cSMika Westerberg  * @tb: Pointer to the domain structure
164544242d6cSMika Westerberg  * @nhi: Host controller port
164644242d6cSMika Westerberg  * @dst: Destination null port which the other domain is connected to
164744242d6cSMika Westerberg  * @transmit_path: HopID used for transmitting packets
1648180b0689SMika Westerberg  * @transmit_ring: NHI ring number used to send packets towards the
1649180b0689SMika Westerberg  *		   other domain. Set to %-1 if TX path is not needed.
1650a27ea0dfSLee Jones  * @receive_path: HopID used for receiving packets
1651180b0689SMika Westerberg  * @receive_ring: NHI ring number used to receive packets from the
1652180b0689SMika Westerberg  *		  other domain. Set to %-1 if RX path is not needed.
165344242d6cSMika Westerberg  *
165444242d6cSMika Westerberg  * Return: Returns a tb_tunnel on success or NULL on failure.
165544242d6cSMika Westerberg  */
165644242d6cSMika Westerberg struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
1657180b0689SMika Westerberg 				      struct tb_port *dst, int transmit_path,
1658180b0689SMika Westerberg 				      int transmit_ring, int receive_path,
1659180b0689SMika Westerberg 				      int receive_ring)
166044242d6cSMika Westerberg {
166144242d6cSMika Westerberg 	struct tb_tunnel *tunnel;
16625bf722dfSMika Westerberg 	size_t npaths = 0, i = 0;
166344242d6cSMika Westerberg 	struct tb_path *path;
16646ed541c5SMika Westerberg 	int credits;
166544242d6cSMika Westerberg 
1666814c96c9SMika Westerberg 	/* Ring 0 is reserved for control channel */
1667814c96c9SMika Westerberg 	if (WARN_ON(!receive_ring || !transmit_ring))
1668814c96c9SMika Westerberg 		return NULL;
1669814c96c9SMika Westerberg 
1670180b0689SMika Westerberg 	if (receive_ring > 0)
16715bf722dfSMika Westerberg 		npaths++;
1672180b0689SMika Westerberg 	if (transmit_ring > 0)
16735bf722dfSMika Westerberg 		npaths++;
16745bf722dfSMika Westerberg 
16755bf722dfSMika Westerberg 	if (WARN_ON(!npaths))
16765bf722dfSMika Westerberg 		return NULL;
16775bf722dfSMika Westerberg 
16785bf722dfSMika Westerberg 	tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
167944242d6cSMika Westerberg 	if (!tunnel)
168044242d6cSMika Westerberg 		return NULL;
168144242d6cSMika Westerberg 
168244242d6cSMika Westerberg 	tunnel->src_port = nhi;
168344242d6cSMika Westerberg 	tunnel->dst_port = dst;
16846ed541c5SMika Westerberg 	tunnel->deinit = tb_dma_deinit;
168544242d6cSMika Westerberg 
16867ee20d0aSMika Westerberg 	credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
168744242d6cSMika Westerberg 
1688180b0689SMika Westerberg 	if (receive_ring > 0) {
16895bf722dfSMika Westerberg 		path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
16905bf722dfSMika Westerberg 				     "DMA RX");
16916ed541c5SMika Westerberg 		if (!path)
16926ed541c5SMika Westerberg 			goto err_free;
16935bf722dfSMika Westerberg 		tunnel->paths[i++] = path;
16946ed541c5SMika Westerberg 		if (tb_dma_init_rx_path(path, credits)) {
16956ed541c5SMika Westerberg 			tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
16966ed541c5SMika Westerberg 			goto err_free;
16976ed541c5SMika Westerberg 		}
16985bf722dfSMika Westerberg 	}
169944242d6cSMika Westerberg 
1700180b0689SMika Westerberg 	if (transmit_ring > 0) {
17015bf722dfSMika Westerberg 		path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
17025bf722dfSMika Westerberg 				     "DMA TX");
17036ed541c5SMika Westerberg 		if (!path)
17046ed541c5SMika Westerberg 			goto err_free;
17055bf722dfSMika Westerberg 		tunnel->paths[i++] = path;
17066ed541c5SMika Westerberg 		if (tb_dma_init_tx_path(path, credits)) {
17076ed541c5SMika Westerberg 			tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
17086ed541c5SMika Westerberg 			goto err_free;
17096ed541c5SMika Westerberg 		}
17105bf722dfSMika Westerberg 	}
171144242d6cSMika Westerberg 
171244242d6cSMika Westerberg 	return tunnel;
17136ed541c5SMika Westerberg 
17146ed541c5SMika Westerberg err_free:
17156ed541c5SMika Westerberg 	tb_tunnel_free(tunnel);
17166ed541c5SMika Westerberg 	return NULL;
171744242d6cSMika Westerberg }
171844242d6cSMika Westerberg 
1719180b0689SMika Westerberg /**
1720180b0689SMika Westerberg  * tb_tunnel_match_dma() - Match DMA tunnel
1721180b0689SMika Westerberg  * @tunnel: Tunnel to match
1722180b0689SMika Westerberg  * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1723180b0689SMika Westerberg  * @transmit_ring: NHI ring number used to send packets towards the
1724180b0689SMika Westerberg  *		   other domain. Pass %-1 to ignore.
1725180b0689SMika Westerberg  * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1726180b0689SMika Westerberg  * @receive_ring: NHI ring number used to receive packets from the
1727180b0689SMika Westerberg  *		  other domain. Pass %-1 to ignore.
1728180b0689SMika Westerberg  *
1729180b0689SMika Westerberg  * This function can be used to match specific DMA tunnel, if there are
1730180b0689SMika Westerberg  * multiple DMA tunnels going through the same XDomain connection.
1731180b0689SMika Westerberg  * Returns true if there is match and false otherwise.
1732180b0689SMika Westerberg  */
1733180b0689SMika Westerberg bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1734180b0689SMika Westerberg 			 int transmit_ring, int receive_path, int receive_ring)
1735180b0689SMika Westerberg {
1736180b0689SMika Westerberg 	const struct tb_path *tx_path = NULL, *rx_path = NULL;
1737180b0689SMika Westerberg 	int i;
1738180b0689SMika Westerberg 
1739180b0689SMika Westerberg 	if (!receive_ring || !transmit_ring)
1740180b0689SMika Westerberg 		return false;
1741180b0689SMika Westerberg 
1742180b0689SMika Westerberg 	for (i = 0; i < tunnel->npaths; i++) {
1743180b0689SMika Westerberg 		const struct tb_path *path = tunnel->paths[i];
1744180b0689SMika Westerberg 
1745180b0689SMika Westerberg 		if (!path)
1746180b0689SMika Westerberg 			continue;
1747180b0689SMika Westerberg 
1748180b0689SMika Westerberg 		if (tb_port_is_nhi(path->hops[0].in_port))
1749180b0689SMika Westerberg 			tx_path = path;
1750180b0689SMika Westerberg 		else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1751180b0689SMika Westerberg 			rx_path = path;
1752180b0689SMika Westerberg 	}
1753180b0689SMika Westerberg 
1754180b0689SMika Westerberg 	if (transmit_ring > 0 || transmit_path > 0) {
1755180b0689SMika Westerberg 		if (!tx_path)
1756180b0689SMika Westerberg 			return false;
1757180b0689SMika Westerberg 		if (transmit_ring > 0 &&
1758180b0689SMika Westerberg 		    (tx_path->hops[0].in_hop_index != transmit_ring))
1759180b0689SMika Westerberg 			return false;
1760180b0689SMika Westerberg 		if (transmit_path > 0 &&
1761180b0689SMika Westerberg 		    (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1762180b0689SMika Westerberg 			return false;
1763180b0689SMika Westerberg 	}
1764180b0689SMika Westerberg 
1765180b0689SMika Westerberg 	if (receive_ring > 0 || receive_path > 0) {
1766180b0689SMika Westerberg 		if (!rx_path)
1767180b0689SMika Westerberg 			return false;
1768180b0689SMika Westerberg 		if (receive_path > 0 &&
1769180b0689SMika Westerberg 		    (rx_path->hops[0].in_hop_index != receive_path))
1770180b0689SMika Westerberg 			return false;
1771180b0689SMika Westerberg 		if (receive_ring > 0 &&
1772180b0689SMika Westerberg 		    (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1773180b0689SMika Westerberg 			return false;
1774180b0689SMika Westerberg 	}
1775180b0689SMika Westerberg 
1776180b0689SMika Westerberg 	return true;
1777180b0689SMika Westerberg }
1778180b0689SMika Westerberg 
17790bd680cdSMika Westerberg static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
17800bd680cdSMika Westerberg {
17810bd680cdSMika Westerberg 	int ret, up_max_rate, down_max_rate;
17820bd680cdSMika Westerberg 
17830bd680cdSMika Westerberg 	ret = usb4_usb3_port_max_link_rate(up);
17840bd680cdSMika Westerberg 	if (ret < 0)
17850bd680cdSMika Westerberg 		return ret;
17860bd680cdSMika Westerberg 	up_max_rate = ret;
17870bd680cdSMika Westerberg 
17880bd680cdSMika Westerberg 	ret = usb4_usb3_port_max_link_rate(down);
17890bd680cdSMika Westerberg 	if (ret < 0)
17900bd680cdSMika Westerberg 		return ret;
17910bd680cdSMika Westerberg 	down_max_rate = ret;
17920bd680cdSMika Westerberg 
17930bd680cdSMika Westerberg 	return min(up_max_rate, down_max_rate);
17940bd680cdSMika Westerberg }
17950bd680cdSMika Westerberg 
17960bd680cdSMika Westerberg static int tb_usb3_init(struct tb_tunnel *tunnel)
17970bd680cdSMika Westerberg {
17980bd680cdSMika Westerberg 	tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
17990bd680cdSMika Westerberg 		      tunnel->allocated_up, tunnel->allocated_down);
18000bd680cdSMika Westerberg 
18010bd680cdSMika Westerberg 	return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
18020bd680cdSMika Westerberg 						 &tunnel->allocated_up,
18030bd680cdSMika Westerberg 						 &tunnel->allocated_down);
18040bd680cdSMika Westerberg }
18050bd680cdSMika Westerberg 
1806e6f81858SRajmohan Mani static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
1807e6f81858SRajmohan Mani {
1808e6f81858SRajmohan Mani 	int res;
1809e6f81858SRajmohan Mani 
1810e6f81858SRajmohan Mani 	res = tb_usb3_port_enable(tunnel->src_port, activate);
1811e6f81858SRajmohan Mani 	if (res)
1812e6f81858SRajmohan Mani 		return res;
1813e6f81858SRajmohan Mani 
1814e6f81858SRajmohan Mani 	if (tb_port_is_usb3_up(tunnel->dst_port))
1815e6f81858SRajmohan Mani 		return tb_usb3_port_enable(tunnel->dst_port, activate);
1816e6f81858SRajmohan Mani 
1817e6f81858SRajmohan Mani 	return 0;
1818e6f81858SRajmohan Mani }
1819e6f81858SRajmohan Mani 
18200bd680cdSMika Westerberg static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
18210bd680cdSMika Westerberg 		int *consumed_up, int *consumed_down)
18220bd680cdSMika Westerberg {
1823582e70b0SGil Fine 	struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
18244d24db0cSMika Westerberg 	int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0;
1825c6da62a2SMika Westerberg 
18260bd680cdSMika Westerberg 	/*
1827c6da62a2SMika Westerberg 	 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
1828c6da62a2SMika Westerberg 	 * take that it into account here.
18290bd680cdSMika Westerberg 	 */
18304d24db0cSMika Westerberg 	*consumed_up = tunnel->allocated_up *
18314d24db0cSMika Westerberg 		(TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
18324d24db0cSMika Westerberg 	*consumed_down = tunnel->allocated_down *
18334d24db0cSMika Westerberg 		(TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
18344d24db0cSMika Westerberg 
1835582e70b0SGil Fine 	if (tb_port_get_link_generation(port) >= 4) {
1836582e70b0SGil Fine 		*consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH);
1837582e70b0SGil Fine 		*consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH);
1838582e70b0SGil Fine 	}
1839582e70b0SGil Fine 
18400bd680cdSMika Westerberg 	return 0;
18410bd680cdSMika Westerberg }
18420bd680cdSMika Westerberg 
18430bd680cdSMika Westerberg static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
18440bd680cdSMika Westerberg {
18450bd680cdSMika Westerberg 	int ret;
18460bd680cdSMika Westerberg 
18470bd680cdSMika Westerberg 	ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
18480bd680cdSMika Westerberg 					       &tunnel->allocated_up,
18490bd680cdSMika Westerberg 					       &tunnel->allocated_down);
18500bd680cdSMika Westerberg 	if (ret)
18510bd680cdSMika Westerberg 		return ret;
18520bd680cdSMika Westerberg 
18530bd680cdSMika Westerberg 	tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
18540bd680cdSMika Westerberg 		      tunnel->allocated_up, tunnel->allocated_down);
18550bd680cdSMika Westerberg 	return 0;
18560bd680cdSMika Westerberg }
18570bd680cdSMika Westerberg 
18580bd680cdSMika Westerberg static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
18590bd680cdSMika Westerberg 						int *available_up,
18600bd680cdSMika Westerberg 						int *available_down)
18610bd680cdSMika Westerberg {
18620bd680cdSMika Westerberg 	int ret, max_rate, allocate_up, allocate_down;
18630bd680cdSMika Westerberg 
1864e8ff07fbSMika Westerberg 	ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
1865813050e0SMika Westerberg 	if (ret < 0) {
1866813050e0SMika Westerberg 		tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
18670bd680cdSMika Westerberg 		return;
18680bd680cdSMika Westerberg 	}
1869813050e0SMika Westerberg 
18700bd680cdSMika Westerberg 	/*
18710bd680cdSMika Westerberg 	 * 90% of the max rate can be allocated for isochronous
18720bd680cdSMika Westerberg 	 * transfers.
18730bd680cdSMika Westerberg 	 */
18740bd680cdSMika Westerberg 	max_rate = ret * 90 / 100;
18750bd680cdSMika Westerberg 
18760bd680cdSMika Westerberg 	/* No need to reclaim if already at maximum */
18770bd680cdSMika Westerberg 	if (tunnel->allocated_up >= max_rate &&
18780bd680cdSMika Westerberg 	    tunnel->allocated_down >= max_rate)
18790bd680cdSMika Westerberg 		return;
18800bd680cdSMika Westerberg 
18810bd680cdSMika Westerberg 	/* Don't go lower than what is already allocated */
18820bd680cdSMika Westerberg 	allocate_up = min(max_rate, *available_up);
18830bd680cdSMika Westerberg 	if (allocate_up < tunnel->allocated_up)
18840bd680cdSMika Westerberg 		allocate_up = tunnel->allocated_up;
18850bd680cdSMika Westerberg 
18860bd680cdSMika Westerberg 	allocate_down = min(max_rate, *available_down);
18870bd680cdSMika Westerberg 	if (allocate_down < tunnel->allocated_down)
18880bd680cdSMika Westerberg 		allocate_down = tunnel->allocated_down;
18890bd680cdSMika Westerberg 
18900bd680cdSMika Westerberg 	/* If no changes no need to do more */
18910bd680cdSMika Westerberg 	if (allocate_up == tunnel->allocated_up &&
18920bd680cdSMika Westerberg 	    allocate_down == tunnel->allocated_down)
18930bd680cdSMika Westerberg 		return;
18940bd680cdSMika Westerberg 
18950bd680cdSMika Westerberg 	ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
18960bd680cdSMika Westerberg 						&allocate_down);
18970bd680cdSMika Westerberg 	if (ret) {
18980bd680cdSMika Westerberg 		tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
18990bd680cdSMika Westerberg 		return;
19000bd680cdSMika Westerberg 	}
19010bd680cdSMika Westerberg 
19020bd680cdSMika Westerberg 	tunnel->allocated_up = allocate_up;
19030bd680cdSMika Westerberg 	*available_up -= tunnel->allocated_up;
19040bd680cdSMika Westerberg 
19050bd680cdSMika Westerberg 	tunnel->allocated_down = allocate_down;
19060bd680cdSMika Westerberg 	*available_down -= tunnel->allocated_down;
19070bd680cdSMika Westerberg 
19080bd680cdSMika Westerberg 	tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
19090bd680cdSMika Westerberg 		      tunnel->allocated_up, tunnel->allocated_down);
19100bd680cdSMika Westerberg }
19110bd680cdSMika Westerberg 
19126ed541c5SMika Westerberg static void tb_usb3_init_credits(struct tb_path_hop *hop)
19136ed541c5SMika Westerberg {
19146ed541c5SMika Westerberg 	struct tb_port *port = hop->in_port;
19156ed541c5SMika Westerberg 	struct tb_switch *sw = port->sw;
19166ed541c5SMika Westerberg 	unsigned int credits;
19176ed541c5SMika Westerberg 
19186ed541c5SMika Westerberg 	if (tb_port_use_credit_allocation(port)) {
19196ed541c5SMika Westerberg 		credits = sw->max_usb3_credits;
19206ed541c5SMika Westerberg 	} else {
19216ed541c5SMika Westerberg 		if (tb_port_is_null(port))
19226ed541c5SMika Westerberg 			credits = port->bonded ? 32 : 16;
19236ed541c5SMika Westerberg 		else
19246ed541c5SMika Westerberg 			credits = 7;
19256ed541c5SMika Westerberg 	}
19266ed541c5SMika Westerberg 
19276ed541c5SMika Westerberg 	hop->initial_credits = credits;
19286ed541c5SMika Westerberg }
19296ed541c5SMika Westerberg 
1930e6f81858SRajmohan Mani static void tb_usb3_init_path(struct tb_path *path)
1931e6f81858SRajmohan Mani {
19326ed541c5SMika Westerberg 	struct tb_path_hop *hop;
19336ed541c5SMika Westerberg 
1934e6f81858SRajmohan Mani 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1935e6f81858SRajmohan Mani 	path->egress_shared_buffer = TB_PATH_NONE;
1936e6f81858SRajmohan Mani 	path->ingress_fc_enable = TB_PATH_ALL;
1937e6f81858SRajmohan Mani 	path->ingress_shared_buffer = TB_PATH_NONE;
1938f73edddfSMika Westerberg 	path->priority = TB_USB3_PRIORITY;
1939f73edddfSMika Westerberg 	path->weight = TB_USB3_WEIGHT;
1940e6f81858SRajmohan Mani 	path->drop_packages = 0;
19416ed541c5SMika Westerberg 
19426ed541c5SMika Westerberg 	tb_path_for_each_hop(path, hop)
19436ed541c5SMika Westerberg 		tb_usb3_init_credits(hop);
1944e6f81858SRajmohan Mani }
1945e6f81858SRajmohan Mani 
1946e6f81858SRajmohan Mani /**
1947e6f81858SRajmohan Mani  * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1948e6f81858SRajmohan Mani  * @tb: Pointer to the domain structure
1949e6f81858SRajmohan Mani  * @down: USB3 downstream adapter
195043bddb26SMika Westerberg  * @alloc_hopid: Allocate HopIDs from visited ports
1951e6f81858SRajmohan Mani  *
1952e6f81858SRajmohan Mani  * If @down adapter is active, follows the tunnel to the USB3 upstream
1953e6f81858SRajmohan Mani  * adapter and back. Returns the discovered tunnel or %NULL if there was
1954e6f81858SRajmohan Mani  * no tunnel.
1955e6f81858SRajmohan Mani  */
195643bddb26SMika Westerberg struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
195743bddb26SMika Westerberg 					  bool alloc_hopid)
1958e6f81858SRajmohan Mani {
1959e6f81858SRajmohan Mani 	struct tb_tunnel *tunnel;
1960e6f81858SRajmohan Mani 	struct tb_path *path;
1961e6f81858SRajmohan Mani 
1962e6f81858SRajmohan Mani 	if (!tb_usb3_port_is_enabled(down))
1963e6f81858SRajmohan Mani 		return NULL;
1964e6f81858SRajmohan Mani 
1965e6f81858SRajmohan Mani 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1966e6f81858SRajmohan Mani 	if (!tunnel)
1967e6f81858SRajmohan Mani 		return NULL;
1968e6f81858SRajmohan Mani 
1969e6f81858SRajmohan Mani 	tunnel->activate = tb_usb3_activate;
1970e6f81858SRajmohan Mani 	tunnel->src_port = down;
1971e6f81858SRajmohan Mani 
1972e6f81858SRajmohan Mani 	/*
1973e6f81858SRajmohan Mani 	 * Discover both paths even if they are not complete. We will
1974e6f81858SRajmohan Mani 	 * clean them up by calling tb_tunnel_deactivate() below in that
1975e6f81858SRajmohan Mani 	 * case.
1976e6f81858SRajmohan Mani 	 */
1977e6f81858SRajmohan Mani 	path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
197843bddb26SMika Westerberg 				&tunnel->dst_port, "USB3 Down", alloc_hopid);
1979e6f81858SRajmohan Mani 	if (!path) {
1980e6f81858SRajmohan Mani 		/* Just disable the downstream port */
1981e6f81858SRajmohan Mani 		tb_usb3_port_enable(down, false);
1982e6f81858SRajmohan Mani 		goto err_free;
1983e6f81858SRajmohan Mani 	}
1984e6f81858SRajmohan Mani 	tunnel->paths[TB_USB3_PATH_DOWN] = path;
1985e6f81858SRajmohan Mani 	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1986e6f81858SRajmohan Mani 
1987783735f8SMika Westerberg 	path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
198843bddb26SMika Westerberg 				"USB3 Up", alloc_hopid);
1989783735f8SMika Westerberg 	if (!path)
1990783735f8SMika Westerberg 		goto err_deactivate;
1991783735f8SMika Westerberg 	tunnel->paths[TB_USB3_PATH_UP] = path;
1992783735f8SMika Westerberg 	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1993783735f8SMika Westerberg 
1994e6f81858SRajmohan Mani 	/* Validate that the tunnel is complete */
1995e6f81858SRajmohan Mani 	if (!tb_port_is_usb3_up(tunnel->dst_port)) {
1996e6f81858SRajmohan Mani 		tb_port_warn(tunnel->dst_port,
1997e6f81858SRajmohan Mani 			     "path does not end on an USB3 adapter, cleaning up\n");
1998e6f81858SRajmohan Mani 		goto err_deactivate;
1999e6f81858SRajmohan Mani 	}
2000e6f81858SRajmohan Mani 
2001e6f81858SRajmohan Mani 	if (down != tunnel->src_port) {
2002e6f81858SRajmohan Mani 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
2003e6f81858SRajmohan Mani 		goto err_deactivate;
2004e6f81858SRajmohan Mani 	}
2005e6f81858SRajmohan Mani 
2006e6f81858SRajmohan Mani 	if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
2007e6f81858SRajmohan Mani 		tb_tunnel_warn(tunnel,
2008e6f81858SRajmohan Mani 			       "tunnel is not fully activated, cleaning up\n");
2009e6f81858SRajmohan Mani 		goto err_deactivate;
2010e6f81858SRajmohan Mani 	}
2011e6f81858SRajmohan Mani 
20120bd680cdSMika Westerberg 	if (!tb_route(down->sw)) {
20130bd680cdSMika Westerberg 		int ret;
20140bd680cdSMika Westerberg 
20150bd680cdSMika Westerberg 		/*
20160bd680cdSMika Westerberg 		 * Read the initial bandwidth allocation for the first
20170bd680cdSMika Westerberg 		 * hop tunnel.
20180bd680cdSMika Westerberg 		 */
20190bd680cdSMika Westerberg 		ret = usb4_usb3_port_allocated_bandwidth(down,
20200bd680cdSMika Westerberg 			&tunnel->allocated_up, &tunnel->allocated_down);
20210bd680cdSMika Westerberg 		if (ret)
20220bd680cdSMika Westerberg 			goto err_deactivate;
20230bd680cdSMika Westerberg 
20240bd680cdSMika Westerberg 		tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
20250bd680cdSMika Westerberg 			      tunnel->allocated_up, tunnel->allocated_down);
20260bd680cdSMika Westerberg 
20270bd680cdSMika Westerberg 		tunnel->init = tb_usb3_init;
20280bd680cdSMika Westerberg 		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
20290bd680cdSMika Westerberg 		tunnel->release_unused_bandwidth =
20300bd680cdSMika Westerberg 			tb_usb3_release_unused_bandwidth;
20310bd680cdSMika Westerberg 		tunnel->reclaim_available_bandwidth =
20320bd680cdSMika Westerberg 			tb_usb3_reclaim_available_bandwidth;
20330bd680cdSMika Westerberg 	}
20340bd680cdSMika Westerberg 
2035e6f81858SRajmohan Mani 	tb_tunnel_dbg(tunnel, "discovered\n");
2036e6f81858SRajmohan Mani 	return tunnel;
2037e6f81858SRajmohan Mani 
2038e6f81858SRajmohan Mani err_deactivate:
2039e6f81858SRajmohan Mani 	tb_tunnel_deactivate(tunnel);
2040e6f81858SRajmohan Mani err_free:
2041e6f81858SRajmohan Mani 	tb_tunnel_free(tunnel);
2042e6f81858SRajmohan Mani 
2043e6f81858SRajmohan Mani 	return NULL;
2044e6f81858SRajmohan Mani }
2045e6f81858SRajmohan Mani 
2046e6f81858SRajmohan Mani /**
2047e6f81858SRajmohan Mani  * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
2048e6f81858SRajmohan Mani  * @tb: Pointer to the domain structure
2049e6f81858SRajmohan Mani  * @up: USB3 upstream adapter port
2050e6f81858SRajmohan Mani  * @down: USB3 downstream adapter port
205161684c0fSGil Fine  * @max_up: Maximum available upstream bandwidth for the USB3 tunnel.
205261684c0fSGil Fine  *	    %0 if no available bandwidth.
205361684c0fSGil Fine  * @max_down: Maximum available downstream bandwidth for the USB3 tunnel.
205461684c0fSGil Fine  *	      %0 if no available bandwidth.
2055e6f81858SRajmohan Mani  *
2056e6f81858SRajmohan Mani  * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
2057e6f81858SRajmohan Mani  * @TB_TYPE_USB3_DOWN.
2058e6f81858SRajmohan Mani  *
2059e6f81858SRajmohan Mani  * Return: Returns a tb_tunnel on success or %NULL on failure.
2060e6f81858SRajmohan Mani  */
2061e6f81858SRajmohan Mani struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
20620bd680cdSMika Westerberg 				       struct tb_port *down, int max_up,
20630bd680cdSMika Westerberg 				       int max_down)
2064e6f81858SRajmohan Mani {
2065e6f81858SRajmohan Mani 	struct tb_tunnel *tunnel;
2066e6f81858SRajmohan Mani 	struct tb_path *path;
206761684c0fSGil Fine 	int max_rate = 0;
20680bd680cdSMika Westerberg 
206925d905d2SGil Fine 	if (!tb_route(down->sw) && (max_up > 0 || max_down > 0)) {
20700bd680cdSMika Westerberg 		/*
207125d905d2SGil Fine 		 * For USB3 isochronous transfers, we allow bandwidth which is
207225d905d2SGil Fine 		 * not higher than 90% of maximum supported bandwidth by USB3
207325d905d2SGil Fine 		 * adapters.
20740bd680cdSMika Westerberg 		 */
20750bd680cdSMika Westerberg 		max_rate = tb_usb3_max_link_rate(down, up);
20760bd680cdSMika Westerberg 		if (max_rate < 0)
20770bd680cdSMika Westerberg 			return NULL;
20780bd680cdSMika Westerberg 
20790bd680cdSMika Westerberg 		max_rate = max_rate * 90 / 100;
208025d905d2SGil Fine 		tb_port_dbg(up, "maximum required bandwidth for USB3 tunnel %d Mb/s\n",
20810bd680cdSMika Westerberg 			    max_rate);
20820bd680cdSMika Westerberg 	}
2083e6f81858SRajmohan Mani 
2084e6f81858SRajmohan Mani 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
2085e6f81858SRajmohan Mani 	if (!tunnel)
2086e6f81858SRajmohan Mani 		return NULL;
2087e6f81858SRajmohan Mani 
2088e6f81858SRajmohan Mani 	tunnel->activate = tb_usb3_activate;
2089e6f81858SRajmohan Mani 	tunnel->src_port = down;
2090e6f81858SRajmohan Mani 	tunnel->dst_port = up;
20910bd680cdSMika Westerberg 	tunnel->max_up = max_up;
20920bd680cdSMika Westerberg 	tunnel->max_down = max_down;
2093e6f81858SRajmohan Mani 
2094e6f81858SRajmohan Mani 	path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
2095e6f81858SRajmohan Mani 			     "USB3 Down");
2096e6f81858SRajmohan Mani 	if (!path) {
2097e6f81858SRajmohan Mani 		tb_tunnel_free(tunnel);
2098e6f81858SRajmohan Mani 		return NULL;
2099e6f81858SRajmohan Mani 	}
2100e6f81858SRajmohan Mani 	tb_usb3_init_path(path);
2101e6f81858SRajmohan Mani 	tunnel->paths[TB_USB3_PATH_DOWN] = path;
2102e6f81858SRajmohan Mani 
2103e6f81858SRajmohan Mani 	path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
2104e6f81858SRajmohan Mani 			     "USB3 Up");
2105e6f81858SRajmohan Mani 	if (!path) {
2106e6f81858SRajmohan Mani 		tb_tunnel_free(tunnel);
2107e6f81858SRajmohan Mani 		return NULL;
2108e6f81858SRajmohan Mani 	}
2109e6f81858SRajmohan Mani 	tb_usb3_init_path(path);
2110e6f81858SRajmohan Mani 	tunnel->paths[TB_USB3_PATH_UP] = path;
2111e6f81858SRajmohan Mani 
21120bd680cdSMika Westerberg 	if (!tb_route(down->sw)) {
211325d905d2SGil Fine 		tunnel->allocated_up = min(max_rate, max_up);
211425d905d2SGil Fine 		tunnel->allocated_down = min(max_rate, max_down);
21150bd680cdSMika Westerberg 
21160bd680cdSMika Westerberg 		tunnel->init = tb_usb3_init;
21170bd680cdSMika Westerberg 		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
21180bd680cdSMika Westerberg 		tunnel->release_unused_bandwidth =
21190bd680cdSMika Westerberg 			tb_usb3_release_unused_bandwidth;
21200bd680cdSMika Westerberg 		tunnel->reclaim_available_bandwidth =
21210bd680cdSMika Westerberg 			tb_usb3_reclaim_available_bandwidth;
21220bd680cdSMika Westerberg 	}
21230bd680cdSMika Westerberg 
2124e6f81858SRajmohan Mani 	return tunnel;
2125e6f81858SRajmohan Mani }
2126e6f81858SRajmohan Mani 
21271752b9f7SMika Westerberg /**
212893f36adeSMika Westerberg  * tb_tunnel_free() - free a tunnel
212993f36adeSMika Westerberg  * @tunnel: Tunnel to be freed
21301752b9f7SMika Westerberg  *
2131ab9f31cfSMika Westerberg  * Frees a tunnel. The tunnel does not need to be deactivated.
21321752b9f7SMika Westerberg  */
213393f36adeSMika Westerberg void tb_tunnel_free(struct tb_tunnel *tunnel)
21341752b9f7SMika Westerberg {
213593f36adeSMika Westerberg 	int i;
213693f36adeSMika Westerberg 
213793f36adeSMika Westerberg 	if (!tunnel)
213893f36adeSMika Westerberg 		return;
213993f36adeSMika Westerberg 
21406ed541c5SMika Westerberg 	if (tunnel->deinit)
21416ed541c5SMika Westerberg 		tunnel->deinit(tunnel);
21426ed541c5SMika Westerberg 
214393f36adeSMika Westerberg 	for (i = 0; i < tunnel->npaths; i++) {
214493f36adeSMika Westerberg 		if (tunnel->paths[i])
214593f36adeSMika Westerberg 			tb_path_free(tunnel->paths[i]);
214693f36adeSMika Westerberg 	}
214793f36adeSMika Westerberg 
214893f36adeSMika Westerberg 	kfree(tunnel->paths);
21491752b9f7SMika Westerberg 	kfree(tunnel);
21501752b9f7SMika Westerberg }
21511752b9f7SMika Westerberg 
21521752b9f7SMika Westerberg /**
215393f36adeSMika Westerberg  * tb_tunnel_is_invalid - check whether an activated path is still valid
215493f36adeSMika Westerberg  * @tunnel: Tunnel to check
21551752b9f7SMika Westerberg  */
215693f36adeSMika Westerberg bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
21571752b9f7SMika Westerberg {
215893f36adeSMika Westerberg 	int i;
21591752b9f7SMika Westerberg 
216093f36adeSMika Westerberg 	for (i = 0; i < tunnel->npaths; i++) {
216193f36adeSMika Westerberg 		WARN_ON(!tunnel->paths[i]->activated);
216293f36adeSMika Westerberg 		if (tb_path_is_invalid(tunnel->paths[i]))
216393f36adeSMika Westerberg 			return true;
216493f36adeSMika Westerberg 	}
216593f36adeSMika Westerberg 
216693f36adeSMika Westerberg 	return false;
21671752b9f7SMika Westerberg }
21681752b9f7SMika Westerberg 
21691752b9f7SMika Westerberg /**
217093f36adeSMika Westerberg  * tb_tunnel_restart() - activate a tunnel after a hardware reset
217193f36adeSMika Westerberg  * @tunnel: Tunnel to restart
21721752b9f7SMika Westerberg  *
217393f36adeSMika Westerberg  * Return: 0 on success and negative errno in case if failure
21741752b9f7SMika Westerberg  */
217593f36adeSMika Westerberg int tb_tunnel_restart(struct tb_tunnel *tunnel)
21761752b9f7SMika Westerberg {
217793f36adeSMika Westerberg 	int res, i;
21781752b9f7SMika Westerberg 
217962efe699SMika Westerberg 	tb_tunnel_dbg(tunnel, "activating\n");
21801752b9f7SMika Westerberg 
2181aae9e27fSMika Westerberg 	/*
2182aae9e27fSMika Westerberg 	 * Make sure all paths are properly disabled before enabling
2183aae9e27fSMika Westerberg 	 * them again.
2184aae9e27fSMika Westerberg 	 */
218593f36adeSMika Westerberg 	for (i = 0; i < tunnel->npaths; i++) {
2186aae9e27fSMika Westerberg 		if (tunnel->paths[i]->activated) {
2187aae9e27fSMika Westerberg 			tb_path_deactivate(tunnel->paths[i]);
218893f36adeSMika Westerberg 			tunnel->paths[i]->activated = false;
2189aae9e27fSMika Westerberg 		}
2190aae9e27fSMika Westerberg 	}
2191aae9e27fSMika Westerberg 
21924f807e47SMika Westerberg 	if (tunnel->init) {
21934f807e47SMika Westerberg 		res = tunnel->init(tunnel);
21944f807e47SMika Westerberg 		if (res)
21954f807e47SMika Westerberg 			return res;
21964f807e47SMika Westerberg 	}
21974f807e47SMika Westerberg 
2198aae9e27fSMika Westerberg 	for (i = 0; i < tunnel->npaths; i++) {
219993f36adeSMika Westerberg 		res = tb_path_activate(tunnel->paths[i]);
22001752b9f7SMika Westerberg 		if (res)
22011752b9f7SMika Westerberg 			goto err;
220293f36adeSMika Westerberg 	}
22031752b9f7SMika Westerberg 
220493f36adeSMika Westerberg 	if (tunnel->activate) {
220593f36adeSMika Westerberg 		res = tunnel->activate(tunnel, true);
22061752b9f7SMika Westerberg 		if (res)
22071752b9f7SMika Westerberg 			goto err;
220893f36adeSMika Westerberg 	}
22091752b9f7SMika Westerberg 
22101752b9f7SMika Westerberg 	return 0;
221193f36adeSMika Westerberg 
22121752b9f7SMika Westerberg err:
22131752b9f7SMika Westerberg 	tb_tunnel_warn(tunnel, "activation failed\n");
221493f36adeSMika Westerberg 	tb_tunnel_deactivate(tunnel);
22151752b9f7SMika Westerberg 	return res;
22161752b9f7SMika Westerberg }
22171752b9f7SMika Westerberg 
22181752b9f7SMika Westerberg /**
221993f36adeSMika Westerberg  * tb_tunnel_activate() - activate a tunnel
222093f36adeSMika Westerberg  * @tunnel: Tunnel to activate
22211752b9f7SMika Westerberg  *
22221752b9f7SMika Westerberg  * Return: Returns 0 on success or an error code on failure.
22231752b9f7SMika Westerberg  */
222493f36adeSMika Westerberg int tb_tunnel_activate(struct tb_tunnel *tunnel)
22251752b9f7SMika Westerberg {
222693f36adeSMika Westerberg 	int i;
222793f36adeSMika Westerberg 
222893f36adeSMika Westerberg 	for (i = 0; i < tunnel->npaths; i++) {
222993f36adeSMika Westerberg 		if (tunnel->paths[i]->activated) {
22301752b9f7SMika Westerberg 			tb_tunnel_WARN(tunnel,
22311752b9f7SMika Westerberg 				       "trying to activate an already activated tunnel\n");
22321752b9f7SMika Westerberg 			return -EINVAL;
22331752b9f7SMika Westerberg 		}
22341752b9f7SMika Westerberg 	}
22351752b9f7SMika Westerberg 
223693f36adeSMika Westerberg 	return tb_tunnel_restart(tunnel);
223793f36adeSMika Westerberg }
22381752b9f7SMika Westerberg 
22391752b9f7SMika Westerberg /**
224093f36adeSMika Westerberg  * tb_tunnel_deactivate() - deactivate a tunnel
224193f36adeSMika Westerberg  * @tunnel: Tunnel to deactivate
22421752b9f7SMika Westerberg  */
224393f36adeSMika Westerberg void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
22441752b9f7SMika Westerberg {
224593f36adeSMika Westerberg 	int i;
22461752b9f7SMika Westerberg 
224762efe699SMika Westerberg 	tb_tunnel_dbg(tunnel, "deactivating\n");
224893f36adeSMika Westerberg 
224993f36adeSMika Westerberg 	if (tunnel->activate)
225093f36adeSMika Westerberg 		tunnel->activate(tunnel, false);
225193f36adeSMika Westerberg 
225293f36adeSMika Westerberg 	for (i = 0; i < tunnel->npaths; i++) {
22530414bec5SMika Westerberg 		if (tunnel->paths[i] && tunnel->paths[i]->activated)
225493f36adeSMika Westerberg 			tb_path_deactivate(tunnel->paths[i]);
225593f36adeSMika Westerberg 	}
225693f36adeSMika Westerberg }
2257a11b88adSMika Westerberg 
2258a11b88adSMika Westerberg /**
22590bd680cdSMika Westerberg  * tb_tunnel_port_on_path() - Does the tunnel go through port
2260a11b88adSMika Westerberg  * @tunnel: Tunnel to check
22610bd680cdSMika Westerberg  * @port: Port to check
2262a11b88adSMika Westerberg  *
22630bd680cdSMika Westerberg  * Returns true if @tunnel goes through @port (direction does not matter),
2264a11b88adSMika Westerberg  * false otherwise.
2265a11b88adSMika Westerberg  */
22660bd680cdSMika Westerberg bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
22670bd680cdSMika Westerberg 			    const struct tb_port *port)
2268a11b88adSMika Westerberg {
2269a11b88adSMika Westerberg 	int i;
2270a11b88adSMika Westerberg 
2271a11b88adSMika Westerberg 	for (i = 0; i < tunnel->npaths; i++) {
2272a11b88adSMika Westerberg 		if (!tunnel->paths[i])
2273a11b88adSMika Westerberg 			continue;
22740bd680cdSMika Westerberg 
22750bd680cdSMika Westerberg 		if (tb_path_port_on_path(tunnel->paths[i], port))
2276a11b88adSMika Westerberg 			return true;
2277a11b88adSMika Westerberg 	}
2278a11b88adSMika Westerberg 
2279a11b88adSMika Westerberg 	return false;
2280a11b88adSMika Westerberg }
2281a11b88adSMika Westerberg 
2282a11b88adSMika Westerberg static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
2283a11b88adSMika Westerberg {
2284a11b88adSMika Westerberg 	int i;
2285a11b88adSMika Westerberg 
2286a11b88adSMika Westerberg 	for (i = 0; i < tunnel->npaths; i++) {
2287a11b88adSMika Westerberg 		if (!tunnel->paths[i])
2288a11b88adSMika Westerberg 			return false;
2289a11b88adSMika Westerberg 		if (!tunnel->paths[i]->activated)
2290a11b88adSMika Westerberg 			return false;
2291a11b88adSMika Westerberg 	}
2292a11b88adSMika Westerberg 
2293a11b88adSMika Westerberg 	return true;
2294a11b88adSMika Westerberg }
2295a11b88adSMika Westerberg 
2296a11b88adSMika Westerberg /**
229706cbcbfaSMika Westerberg  * tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth
229806cbcbfaSMika Westerberg  * @tunnel: Tunnel to check
229906cbcbfaSMika Westerberg  * @max_up: Maximum upstream bandwidth in Mb/s
230006cbcbfaSMika Westerberg  * @max_down: Maximum downstream bandwidth in Mb/s
230106cbcbfaSMika Westerberg  *
230206cbcbfaSMika Westerberg  * Returns maximum possible bandwidth this tunnel can go if not limited
230306cbcbfaSMika Westerberg  * by other bandwidth clients. If the tunnel does not support this
230406cbcbfaSMika Westerberg  * returns %-EOPNOTSUPP.
230506cbcbfaSMika Westerberg  */
23066ce35635SMika Westerberg int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
23076ce35635SMika Westerberg 				int *max_down)
23086ce35635SMika Westerberg {
23096ce35635SMika Westerberg 	if (!tb_tunnel_is_active(tunnel))
23106ce35635SMika Westerberg 		return -EINVAL;
23116ce35635SMika Westerberg 
23126ce35635SMika Westerberg 	if (tunnel->maximum_bandwidth)
23136ce35635SMika Westerberg 		return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
23146ce35635SMika Westerberg 	return -EOPNOTSUPP;
23156ce35635SMika Westerberg }
23166ce35635SMika Westerberg 
23176ce35635SMika Westerberg /**
23186ce35635SMika Westerberg  * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel
23196ce35635SMika Westerberg  * @tunnel: Tunnel to check
23206ce35635SMika Westerberg  * @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here
23216ce35635SMika Westerberg  * @allocated_down: Currently allocated downstream bandwidth in Mb/s is
23226ce35635SMika Westerberg  *		    stored here
23236ce35635SMika Westerberg  *
23246ce35635SMika Westerberg  * Returns the bandwidth allocated for the tunnel. This may be higher
23256ce35635SMika Westerberg  * than what the tunnel actually consumes.
23266ce35635SMika Westerberg  */
23276ce35635SMika Westerberg int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
23286ce35635SMika Westerberg 				  int *allocated_down)
23296ce35635SMika Westerberg {
23306ce35635SMika Westerberg 	if (!tb_tunnel_is_active(tunnel))
23316ce35635SMika Westerberg 		return -EINVAL;
23326ce35635SMika Westerberg 
23336ce35635SMika Westerberg 	if (tunnel->allocated_bandwidth)
23346ce35635SMika Westerberg 		return tunnel->allocated_bandwidth(tunnel, allocated_up,
23356ce35635SMika Westerberg 						   allocated_down);
23366ce35635SMika Westerberg 	return -EOPNOTSUPP;
23376ce35635SMika Westerberg }
23386ce35635SMika Westerberg 
23396ce35635SMika Westerberg /**
23406ce35635SMika Westerberg  * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation
23416ce35635SMika Westerberg  * @tunnel: Tunnel whose bandwidth allocation to change
23426ce35635SMika Westerberg  * @alloc_up: New upstream bandwidth in Mb/s
23436ce35635SMika Westerberg  * @alloc_down: New downstream bandwidth in Mb/s
23446ce35635SMika Westerberg  *
23456ce35635SMika Westerberg  * Tries to change tunnel bandwidth allocation. If succeeds returns %0
23466ce35635SMika Westerberg  * and updates @alloc_up and @alloc_down to that was actually allocated
23476ce35635SMika Westerberg  * (it may not be the same as passed originally). Returns negative errno
23486ce35635SMika Westerberg  * in case of failure.
23496ce35635SMika Westerberg  */
23506ce35635SMika Westerberg int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
23516ce35635SMika Westerberg 			      int *alloc_down)
23526ce35635SMika Westerberg {
23536ce35635SMika Westerberg 	if (!tb_tunnel_is_active(tunnel))
23546ce35635SMika Westerberg 		return -EINVAL;
23556ce35635SMika Westerberg 
23566ce35635SMika Westerberg 	if (tunnel->alloc_bandwidth)
23576ce35635SMika Westerberg 		return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
23586ce35635SMika Westerberg 
23596ce35635SMika Westerberg 	return -EOPNOTSUPP;
23606ce35635SMika Westerberg }
23616ce35635SMika Westerberg 
2362a11b88adSMika Westerberg /**
2363a11b88adSMika Westerberg  * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
2364a11b88adSMika Westerberg  * @tunnel: Tunnel to check
23657c0ee8fdSMika Westerberg  * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
23667c0ee8fdSMika Westerberg  *		 Can be %NULL.
23677c0ee8fdSMika Westerberg  * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
23687c0ee8fdSMika Westerberg  *		   Can be %NULL.
2369a11b88adSMika Westerberg  *
23707c0ee8fdSMika Westerberg  * Stores the amount of isochronous bandwidth @tunnel consumes in
23717c0ee8fdSMika Westerberg  * @consumed_up and @consumed_down. In case of success returns %0,
23727c0ee8fdSMika Westerberg  * negative errno otherwise.
2373a11b88adSMika Westerberg  */
23747c0ee8fdSMika Westerberg int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
23757c0ee8fdSMika Westerberg 				 int *consumed_down)
2376a11b88adSMika Westerberg {
23777c0ee8fdSMika Westerberg 	int up_bw = 0, down_bw = 0;
23787c0ee8fdSMika Westerberg 
2379a11b88adSMika Westerberg 	if (!tb_tunnel_is_active(tunnel))
23807c0ee8fdSMika Westerberg 		goto out;
2381a11b88adSMika Westerberg 
2382a11b88adSMika Westerberg 	if (tunnel->consumed_bandwidth) {
23837c0ee8fdSMika Westerberg 		int ret;
2384a11b88adSMika Westerberg 
23857c0ee8fdSMika Westerberg 		ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
23867c0ee8fdSMika Westerberg 		if (ret)
2387a11b88adSMika Westerberg 			return ret;
23887c0ee8fdSMika Westerberg 
23897c0ee8fdSMika Westerberg 		tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
23907c0ee8fdSMika Westerberg 			      down_bw);
2391a11b88adSMika Westerberg 	}
2392a11b88adSMika Westerberg 
23937c0ee8fdSMika Westerberg out:
23947c0ee8fdSMika Westerberg 	if (consumed_up)
23957c0ee8fdSMika Westerberg 		*consumed_up = up_bw;
23967c0ee8fdSMika Westerberg 	if (consumed_down)
23977c0ee8fdSMika Westerberg 		*consumed_down = down_bw;
23987c0ee8fdSMika Westerberg 
2399a11b88adSMika Westerberg 	return 0;
2400a11b88adSMika Westerberg }
24010bd680cdSMika Westerberg 
24020bd680cdSMika Westerberg /**
24030bd680cdSMika Westerberg  * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
24040bd680cdSMika Westerberg  * @tunnel: Tunnel whose unused bandwidth to release
24050bd680cdSMika Westerberg  *
24060bd680cdSMika Westerberg  * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
24070bd680cdSMika Westerberg  * moment) this function makes it to release all the unused bandwidth.
24080bd680cdSMika Westerberg  *
24090bd680cdSMika Westerberg  * Returns %0 in case of success and negative errno otherwise.
24100bd680cdSMika Westerberg  */
24110bd680cdSMika Westerberg int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
24120bd680cdSMika Westerberg {
24130bd680cdSMika Westerberg 	if (!tb_tunnel_is_active(tunnel))
24140bd680cdSMika Westerberg 		return 0;
24150bd680cdSMika Westerberg 
24160bd680cdSMika Westerberg 	if (tunnel->release_unused_bandwidth) {
24170bd680cdSMika Westerberg 		int ret;
24180bd680cdSMika Westerberg 
24190bd680cdSMika Westerberg 		ret = tunnel->release_unused_bandwidth(tunnel);
24200bd680cdSMika Westerberg 		if (ret)
24210bd680cdSMika Westerberg 			return ret;
24220bd680cdSMika Westerberg 	}
24230bd680cdSMika Westerberg 
24240bd680cdSMika Westerberg 	return 0;
24250bd680cdSMika Westerberg }
24260bd680cdSMika Westerberg 
24270bd680cdSMika Westerberg /**
24280bd680cdSMika Westerberg  * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
24290bd680cdSMika Westerberg  * @tunnel: Tunnel reclaiming available bandwidth
24300bd680cdSMika Westerberg  * @available_up: Available upstream bandwidth (in Mb/s)
24310bd680cdSMika Westerberg  * @available_down: Available downstream bandwidth (in Mb/s)
24320bd680cdSMika Westerberg  *
24330bd680cdSMika Westerberg  * Reclaims bandwidth from @available_up and @available_down and updates
24340bd680cdSMika Westerberg  * the variables accordingly (e.g decreases both according to what was
24350bd680cdSMika Westerberg  * reclaimed by the tunnel). If nothing was reclaimed the values are
24360bd680cdSMika Westerberg  * kept as is.
24370bd680cdSMika Westerberg  */
24380bd680cdSMika Westerberg void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
24390bd680cdSMika Westerberg 					   int *available_up,
24400bd680cdSMika Westerberg 					   int *available_down)
24410bd680cdSMika Westerberg {
24420bd680cdSMika Westerberg 	if (!tb_tunnel_is_active(tunnel))
24430bd680cdSMika Westerberg 		return;
24440bd680cdSMika Westerberg 
24450bd680cdSMika Westerberg 	if (tunnel->reclaim_available_bandwidth)
24460bd680cdSMika Westerberg 		tunnel->reclaim_available_bandwidth(tunnel, available_up,
24470bd680cdSMika Westerberg 						    available_down);
24480bd680cdSMika Westerberg }
2449d27bd2c3SMika Westerberg 
2450d27bd2c3SMika Westerberg const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel)
2451d27bd2c3SMika Westerberg {
2452d27bd2c3SMika Westerberg 	return tb_tunnel_names[tunnel->type];
2453d27bd2c3SMika Westerberg }
2454