112a14f2fSMika Westerberg // SPDX-License-Identifier: GPL-2.0
212a14f2fSMika Westerberg /*
312a14f2fSMika Westerberg * CLx support
412a14f2fSMika Westerberg *
512a14f2fSMika Westerberg * Copyright (C) 2020 - 2023, Intel Corporation
612a14f2fSMika Westerberg * Authors: Gil Fine <gil.fine@intel.com>
712a14f2fSMika Westerberg * Mika Westerberg <mika.westerberg@linux.intel.com>
812a14f2fSMika Westerberg */
912a14f2fSMika Westerberg
1012a14f2fSMika Westerberg #include <linux/module.h>
1112a14f2fSMika Westerberg
1212a14f2fSMika Westerberg #include "tb.h"
1312a14f2fSMika Westerberg
1412a14f2fSMika Westerberg static bool clx_enabled = true;
1512a14f2fSMika Westerberg module_param_named(clx, clx_enabled, bool, 0444);
1612a14f2fSMika Westerberg MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)");
1712a14f2fSMika Westerberg
clx_name(unsigned int clx)18768e6fe6SMika Westerberg static const char *clx_name(unsigned int clx)
19768e6fe6SMika Westerberg {
20fd4d58d1SMika Westerberg switch (clx) {
21fd4d58d1SMika Westerberg case TB_CL0S | TB_CL1 | TB_CL2:
22768e6fe6SMika Westerberg return "CL0s/CL1/CL2";
23fd4d58d1SMika Westerberg case TB_CL1 | TB_CL2:
24fd4d58d1SMika Westerberg return "CL1/CL2";
25fd4d58d1SMika Westerberg case TB_CL0S | TB_CL2:
26fd4d58d1SMika Westerberg return "CL0s/CL2";
27fd4d58d1SMika Westerberg case TB_CL0S | TB_CL1:
28768e6fe6SMika Westerberg return "CL0s/CL1";
29fd4d58d1SMika Westerberg case TB_CL0S:
30768e6fe6SMika Westerberg return "CL0s";
31fd4d58d1SMika Westerberg case 0:
32fd4d58d1SMika Westerberg return "disabled";
33fd4d58d1SMika Westerberg default:
34768e6fe6SMika Westerberg return "unknown";
35768e6fe6SMika Westerberg }
36fd4d58d1SMika Westerberg }
37768e6fe6SMika Westerberg
tb_port_pm_secondary_set(struct tb_port * port,bool secondary)3812a14f2fSMika Westerberg static int tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
3912a14f2fSMika Westerberg {
4012a14f2fSMika Westerberg u32 phy;
4112a14f2fSMika Westerberg int ret;
4212a14f2fSMika Westerberg
4312a14f2fSMika Westerberg ret = tb_port_read(port, &phy, TB_CFG_PORT,
4412a14f2fSMika Westerberg port->cap_phy + LANE_ADP_CS_1, 1);
4512a14f2fSMika Westerberg if (ret)
4612a14f2fSMika Westerberg return ret;
4712a14f2fSMika Westerberg
4812a14f2fSMika Westerberg if (secondary)
4912a14f2fSMika Westerberg phy |= LANE_ADP_CS_1_PMS;
5012a14f2fSMika Westerberg else
5112a14f2fSMika Westerberg phy &= ~LANE_ADP_CS_1_PMS;
5212a14f2fSMika Westerberg
5312a14f2fSMika Westerberg return tb_port_write(port, &phy, TB_CFG_PORT,
5412a14f2fSMika Westerberg port->cap_phy + LANE_ADP_CS_1, 1);
5512a14f2fSMika Westerberg }
5612a14f2fSMika Westerberg
tb_port_pm_secondary_enable(struct tb_port * port)5712a14f2fSMika Westerberg static int tb_port_pm_secondary_enable(struct tb_port *port)
5812a14f2fSMika Westerberg {
5912a14f2fSMika Westerberg return tb_port_pm_secondary_set(port, true);
6012a14f2fSMika Westerberg }
6112a14f2fSMika Westerberg
tb_port_pm_secondary_disable(struct tb_port * port)6212a14f2fSMika Westerberg static int tb_port_pm_secondary_disable(struct tb_port *port)
6312a14f2fSMika Westerberg {
6412a14f2fSMika Westerberg return tb_port_pm_secondary_set(port, false);
6512a14f2fSMika Westerberg }
6612a14f2fSMika Westerberg
6712a14f2fSMika Westerberg /* Called for USB4 or Titan Ridge routers only */
tb_port_clx_supported(struct tb_port * port,unsigned int clx)6835627353SMika Westerberg static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx)
6912a14f2fSMika Westerberg {
7012a14f2fSMika Westerberg u32 val, mask = 0;
7112a14f2fSMika Westerberg bool ret;
7212a14f2fSMika Westerberg
7312a14f2fSMika Westerberg /* Don't enable CLx in case of two single-lane links */
7412a14f2fSMika Westerberg if (!port->bonded && port->dual_link_port)
7512a14f2fSMika Westerberg return false;
7612a14f2fSMika Westerberg
7712a14f2fSMika Westerberg /* Don't enable CLx in case of inter-domain link */
7812a14f2fSMika Westerberg if (port->xdomain)
7912a14f2fSMika Westerberg return false;
8012a14f2fSMika Westerberg
8112a14f2fSMika Westerberg if (tb_switch_is_usb4(port->sw)) {
8212a14f2fSMika Westerberg if (!usb4_port_clx_supported(port))
8312a14f2fSMika Westerberg return false;
8412a14f2fSMika Westerberg } else if (!tb_lc_is_clx_supported(port)) {
8512a14f2fSMika Westerberg return false;
8612a14f2fSMika Westerberg }
8712a14f2fSMika Westerberg
8835627353SMika Westerberg if (clx & TB_CL0S)
8935627353SMika Westerberg mask |= LANE_ADP_CS_0_CL0S_SUPPORT;
9035627353SMika Westerberg if (clx & TB_CL1)
9135627353SMika Westerberg mask |= LANE_ADP_CS_0_CL1_SUPPORT;
9235627353SMika Westerberg if (clx & TB_CL2)
9312a14f2fSMika Westerberg mask |= LANE_ADP_CS_0_CL2_SUPPORT;
9412a14f2fSMika Westerberg
9512a14f2fSMika Westerberg ret = tb_port_read(port, &val, TB_CFG_PORT,
9612a14f2fSMika Westerberg port->cap_phy + LANE_ADP_CS_0, 1);
9712a14f2fSMika Westerberg if (ret)
9812a14f2fSMika Westerberg return false;
9912a14f2fSMika Westerberg
10012a14f2fSMika Westerberg return !!(val & mask);
10112a14f2fSMika Westerberg }
10212a14f2fSMika Westerberg
tb_port_clx_set(struct tb_port * port,unsigned int clx,bool enable)10335627353SMika Westerberg static int tb_port_clx_set(struct tb_port *port, unsigned int clx, bool enable)
10412a14f2fSMika Westerberg {
10535627353SMika Westerberg u32 phy, mask = 0;
10612a14f2fSMika Westerberg int ret;
10712a14f2fSMika Westerberg
10835627353SMika Westerberg if (clx & TB_CL0S)
10935627353SMika Westerberg mask |= LANE_ADP_CS_1_CL0S_ENABLE;
11035627353SMika Westerberg if (clx & TB_CL1)
11135627353SMika Westerberg mask |= LANE_ADP_CS_1_CL1_ENABLE;
112fd4d58d1SMika Westerberg if (clx & TB_CL2)
113fd4d58d1SMika Westerberg mask |= LANE_ADP_CS_1_CL2_ENABLE;
11435627353SMika Westerberg
11535627353SMika Westerberg if (!mask)
11612a14f2fSMika Westerberg return -EOPNOTSUPP;
11712a14f2fSMika Westerberg
11812a14f2fSMika Westerberg ret = tb_port_read(port, &phy, TB_CFG_PORT,
11912a14f2fSMika Westerberg port->cap_phy + LANE_ADP_CS_1, 1);
12012a14f2fSMika Westerberg if (ret)
12112a14f2fSMika Westerberg return ret;
12212a14f2fSMika Westerberg
12312a14f2fSMika Westerberg if (enable)
12412a14f2fSMika Westerberg phy |= mask;
12512a14f2fSMika Westerberg else
12612a14f2fSMika Westerberg phy &= ~mask;
12712a14f2fSMika Westerberg
12812a14f2fSMika Westerberg return tb_port_write(port, &phy, TB_CFG_PORT,
12912a14f2fSMika Westerberg port->cap_phy + LANE_ADP_CS_1, 1);
13012a14f2fSMika Westerberg }
13112a14f2fSMika Westerberg
tb_port_clx_disable(struct tb_port * port,unsigned int clx)13235627353SMika Westerberg static int tb_port_clx_disable(struct tb_port *port, unsigned int clx)
13312a14f2fSMika Westerberg {
13412a14f2fSMika Westerberg return tb_port_clx_set(port, clx, false);
13512a14f2fSMika Westerberg }
13612a14f2fSMika Westerberg
tb_port_clx_enable(struct tb_port * port,unsigned int clx)13735627353SMika Westerberg static int tb_port_clx_enable(struct tb_port *port, unsigned int clx)
13812a14f2fSMika Westerberg {
13912a14f2fSMika Westerberg return tb_port_clx_set(port, clx, true);
14012a14f2fSMika Westerberg }
14112a14f2fSMika Westerberg
tb_port_clx(struct tb_port * port)142768e6fe6SMika Westerberg static int tb_port_clx(struct tb_port *port)
143768e6fe6SMika Westerberg {
144768e6fe6SMika Westerberg u32 val;
145768e6fe6SMika Westerberg int ret;
146768e6fe6SMika Westerberg
147768e6fe6SMika Westerberg if (!tb_port_clx_supported(port, TB_CL0S | TB_CL1 | TB_CL2))
148768e6fe6SMika Westerberg return 0;
149768e6fe6SMika Westerberg
150768e6fe6SMika Westerberg ret = tb_port_read(port, &val, TB_CFG_PORT,
151768e6fe6SMika Westerberg port->cap_phy + LANE_ADP_CS_1, 1);
152768e6fe6SMika Westerberg if (ret)
153768e6fe6SMika Westerberg return ret;
154768e6fe6SMika Westerberg
155768e6fe6SMika Westerberg if (val & LANE_ADP_CS_1_CL0S_ENABLE)
156768e6fe6SMika Westerberg ret |= TB_CL0S;
157768e6fe6SMika Westerberg if (val & LANE_ADP_CS_1_CL1_ENABLE)
158768e6fe6SMika Westerberg ret |= TB_CL1;
159768e6fe6SMika Westerberg if (val & LANE_ADP_CS_1_CL2_ENABLE)
160768e6fe6SMika Westerberg ret |= TB_CL2;
161768e6fe6SMika Westerberg
162768e6fe6SMika Westerberg return ret;
163768e6fe6SMika Westerberg }
164768e6fe6SMika Westerberg
16512a14f2fSMika Westerberg /**
16612a14f2fSMika Westerberg * tb_port_clx_is_enabled() - Is given CL state enabled
16712a14f2fSMika Westerberg * @port: USB4 port to check
16835627353SMika Westerberg * @clx: Mask of CL states to check
16912a14f2fSMika Westerberg *
17012a14f2fSMika Westerberg * Returns true if any of the given CL states is enabled for @port.
17112a14f2fSMika Westerberg */
tb_port_clx_is_enabled(struct tb_port * port,unsigned int clx)17235627353SMika Westerberg bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx)
17312a14f2fSMika Westerberg {
174768e6fe6SMika Westerberg return !!(tb_port_clx(port) & clx);
175768e6fe6SMika Westerberg }
17612a14f2fSMika Westerberg
177768e6fe6SMika Westerberg /**
17835c9ab4fSMika Westerberg * tb_switch_clx_is_supported() - Is CLx supported on this type of router
17935c9ab4fSMika Westerberg * @sw: The router to check CLx support for
18035c9ab4fSMika Westerberg */
tb_switch_clx_is_supported(const struct tb_switch * sw)18135c9ab4fSMika Westerberg static bool tb_switch_clx_is_supported(const struct tb_switch *sw)
18235c9ab4fSMika Westerberg {
18335c9ab4fSMika Westerberg if (!clx_enabled)
18435c9ab4fSMika Westerberg return false;
18535c9ab4fSMika Westerberg
18635c9ab4fSMika Westerberg if (sw->quirks & QUIRK_NO_CLX)
18735c9ab4fSMika Westerberg return false;
18835c9ab4fSMika Westerberg
18935c9ab4fSMika Westerberg /*
19035c9ab4fSMika Westerberg * CLx is not enabled and validated on Intel USB4 platforms
19135c9ab4fSMika Westerberg * before Alder Lake.
19235c9ab4fSMika Westerberg */
19335c9ab4fSMika Westerberg if (tb_switch_is_tiger_lake(sw))
19435c9ab4fSMika Westerberg return false;
19535c9ab4fSMika Westerberg
19635c9ab4fSMika Westerberg return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
19735c9ab4fSMika Westerberg }
19835c9ab4fSMika Westerberg
19935c9ab4fSMika Westerberg /**
200768e6fe6SMika Westerberg * tb_switch_clx_init() - Initialize router CL states
201768e6fe6SMika Westerberg * @sw: Router
202768e6fe6SMika Westerberg *
203768e6fe6SMika Westerberg * Can be called for any router. Initializes the current CL state by
204768e6fe6SMika Westerberg * reading it from the hardware.
205768e6fe6SMika Westerberg *
206768e6fe6SMika Westerberg * Returns %0 in case of success and negative errno in case of failure.
207768e6fe6SMika Westerberg */
tb_switch_clx_init(struct tb_switch * sw)208768e6fe6SMika Westerberg int tb_switch_clx_init(struct tb_switch *sw)
209768e6fe6SMika Westerberg {
210768e6fe6SMika Westerberg struct tb_port *up, *down;
211768e6fe6SMika Westerberg unsigned int clx, tmp;
21212a14f2fSMika Westerberg
213768e6fe6SMika Westerberg if (tb_switch_is_icm(sw))
214768e6fe6SMika Westerberg return 0;
21512a14f2fSMika Westerberg
216768e6fe6SMika Westerberg if (!tb_route(sw))
217768e6fe6SMika Westerberg return 0;
21812a14f2fSMika Westerberg
219768e6fe6SMika Westerberg if (!tb_switch_clx_is_supported(sw))
220768e6fe6SMika Westerberg return 0;
221768e6fe6SMika Westerberg
222768e6fe6SMika Westerberg up = tb_upstream_port(sw);
223768e6fe6SMika Westerberg down = tb_switch_downstream_port(sw);
224768e6fe6SMika Westerberg
225768e6fe6SMika Westerberg clx = tb_port_clx(up);
226768e6fe6SMika Westerberg tmp = tb_port_clx(down);
227768e6fe6SMika Westerberg if (clx != tmp)
228768e6fe6SMika Westerberg tb_sw_warn(sw, "CLx: inconsistent configuration %#x != %#x\n",
229768e6fe6SMika Westerberg clx, tmp);
230768e6fe6SMika Westerberg
231768e6fe6SMika Westerberg tb_sw_dbg(sw, "CLx: current mode: %s\n", clx_name(clx));
232768e6fe6SMika Westerberg
233768e6fe6SMika Westerberg sw->clx = clx;
234768e6fe6SMika Westerberg return 0;
23512a14f2fSMika Westerberg }
23612a14f2fSMika Westerberg
tb_switch_pm_secondary_resolve(struct tb_switch * sw)23712a14f2fSMika Westerberg static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
23812a14f2fSMika Westerberg {
23912a14f2fSMika Westerberg struct tb_port *up, *down;
24012a14f2fSMika Westerberg int ret;
24112a14f2fSMika Westerberg
24212a14f2fSMika Westerberg if (!tb_route(sw))
24312a14f2fSMika Westerberg return 0;
24412a14f2fSMika Westerberg
24512a14f2fSMika Westerberg up = tb_upstream_port(sw);
24612a14f2fSMika Westerberg down = tb_switch_downstream_port(sw);
24712a14f2fSMika Westerberg ret = tb_port_pm_secondary_enable(up);
24812a14f2fSMika Westerberg if (ret)
24912a14f2fSMika Westerberg return ret;
25012a14f2fSMika Westerberg
25112a14f2fSMika Westerberg return tb_port_pm_secondary_disable(down);
25212a14f2fSMika Westerberg }
25312a14f2fSMika Westerberg
tb_switch_mask_clx_objections(struct tb_switch * sw)25412a14f2fSMika Westerberg static int tb_switch_mask_clx_objections(struct tb_switch *sw)
25512a14f2fSMika Westerberg {
25612a14f2fSMika Westerberg int up_port = sw->config.upstream_port_number;
25712a14f2fSMika Westerberg u32 offset, val[2], mask_obj, unmask_obj;
25812a14f2fSMika Westerberg int ret, i;
25912a14f2fSMika Westerberg
26012a14f2fSMika Westerberg /* Only Titan Ridge of pre-USB4 devices support CLx states */
26112a14f2fSMika Westerberg if (!tb_switch_is_titan_ridge(sw))
26212a14f2fSMika Westerberg return 0;
26312a14f2fSMika Westerberg
26412a14f2fSMika Westerberg if (!tb_route(sw))
26512a14f2fSMika Westerberg return 0;
26612a14f2fSMika Westerberg
26712a14f2fSMika Westerberg /*
26812a14f2fSMika Westerberg * In Titan Ridge there are only 2 dual-lane Thunderbolt ports:
26912a14f2fSMika Westerberg * Port A consists of lane adapters 1,2 and
27012a14f2fSMika Westerberg * Port B consists of lane adapters 3,4
27112a14f2fSMika Westerberg * If upstream port is A, (lanes are 1,2), we mask objections from
27212a14f2fSMika Westerberg * port B (lanes 3,4) and unmask objections from Port A and vice-versa.
27312a14f2fSMika Westerberg */
27412a14f2fSMika Westerberg if (up_port == 1) {
27512a14f2fSMika Westerberg mask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
27612a14f2fSMika Westerberg unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
27712a14f2fSMika Westerberg offset = TB_LOW_PWR_C1_CL1;
27812a14f2fSMika Westerberg } else {
27912a14f2fSMika Westerberg mask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
28012a14f2fSMika Westerberg unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
28112a14f2fSMika Westerberg offset = TB_LOW_PWR_C3_CL1;
28212a14f2fSMika Westerberg }
28312a14f2fSMika Westerberg
28412a14f2fSMika Westerberg ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
28512a14f2fSMika Westerberg sw->cap_lp + offset, ARRAY_SIZE(val));
28612a14f2fSMika Westerberg if (ret)
28712a14f2fSMika Westerberg return ret;
28812a14f2fSMika Westerberg
28912a14f2fSMika Westerberg for (i = 0; i < ARRAY_SIZE(val); i++) {
29012a14f2fSMika Westerberg val[i] |= mask_obj;
29112a14f2fSMika Westerberg val[i] &= ~unmask_obj;
29212a14f2fSMika Westerberg }
29312a14f2fSMika Westerberg
29412a14f2fSMika Westerberg return tb_sw_write(sw, &val, TB_CFG_SWITCH,
29512a14f2fSMika Westerberg sw->cap_lp + offset, ARRAY_SIZE(val));
29612a14f2fSMika Westerberg }
29712a14f2fSMika Westerberg
validate_mask(unsigned int clx)29835627353SMika Westerberg static bool validate_mask(unsigned int clx)
29935627353SMika Westerberg {
30035627353SMika Westerberg /* Previous states need to be enabled */
30135627353SMika Westerberg if (clx & TB_CL1)
30235627353SMika Westerberg return (clx & TB_CL0S) == TB_CL0S;
30335627353SMika Westerberg return true;
30435627353SMika Westerberg }
30535627353SMika Westerberg
30635627353SMika Westerberg /**
3074f9a4f25SMika Westerberg * tb_switch_clx_enable() - Enable CLx on upstream port of specified router
3084f9a4f25SMika Westerberg * @sw: Router to enable CLx for
3094f9a4f25SMika Westerberg * @clx: The CLx state to enable
3104f9a4f25SMika Westerberg *
3119650de73SMika Westerberg * CLx is enabled only if both sides of the link support CLx, and if both sides
3129650de73SMika Westerberg * of the link are not configured as two single lane links and only if the link
3139650de73SMika Westerberg * is not inter-domain link. The complete set of conditions is described in CM
3149650de73SMika Westerberg * Guide 1.0 section 8.1.
3154f9a4f25SMika Westerberg *
3169650de73SMika Westerberg * Returns %0 on success or an error code on failure.
3174f9a4f25SMika Westerberg */
tb_switch_clx_enable(struct tb_switch * sw,unsigned int clx)31835627353SMika Westerberg int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx)
31912a14f2fSMika Westerberg {
32012a14f2fSMika Westerberg bool up_clx_support, down_clx_support;
32135627353SMika Westerberg struct tb_switch *parent_sw;
32212a14f2fSMika Westerberg struct tb_port *up, *down;
32312a14f2fSMika Westerberg int ret;
32412a14f2fSMika Westerberg
32553ba2e16SMika Westerberg if (!clx || sw->clx == clx)
3264a420eb1SMika Westerberg return 0;
3274a420eb1SMika Westerberg
32835627353SMika Westerberg if (!validate_mask(clx))
32935627353SMika Westerberg return -EINVAL;
33035627353SMika Westerberg
33135627353SMika Westerberg parent_sw = tb_switch_parent(sw);
33235627353SMika Westerberg if (!parent_sw)
3334f9a4f25SMika Westerberg return 0;
3344f9a4f25SMika Westerberg
33535627353SMika Westerberg if (!tb_switch_clx_is_supported(parent_sw) ||
33635627353SMika Westerberg !tb_switch_clx_is_supported(sw))
33712a14f2fSMika Westerberg return 0;
33812a14f2fSMika Westerberg
339fd4d58d1SMika Westerberg /* Only support CL2 for v2 routers */
340fd4d58d1SMika Westerberg if ((clx & TB_CL2) &&
341fd4d58d1SMika Westerberg (usb4_switch_version(parent_sw) < 2 ||
342fd4d58d1SMika Westerberg usb4_switch_version(sw) < 2))
34335627353SMika Westerberg return -EOPNOTSUPP;
34435627353SMika Westerberg
34512a14f2fSMika Westerberg ret = tb_switch_pm_secondary_resolve(sw);
34612a14f2fSMika Westerberg if (ret)
34712a14f2fSMika Westerberg return ret;
34812a14f2fSMika Westerberg
34912a14f2fSMika Westerberg up = tb_upstream_port(sw);
35012a14f2fSMika Westerberg down = tb_switch_downstream_port(sw);
35112a14f2fSMika Westerberg
35212a14f2fSMika Westerberg up_clx_support = tb_port_clx_supported(up, clx);
35312a14f2fSMika Westerberg down_clx_support = tb_port_clx_supported(down, clx);
35412a14f2fSMika Westerberg
355b5d15961SMika Westerberg tb_port_dbg(up, "CLx: %s %ssupported\n", clx_name(clx),
35612a14f2fSMika Westerberg up_clx_support ? "" : "not ");
357b5d15961SMika Westerberg tb_port_dbg(down, "CLx: %s %ssupported\n", clx_name(clx),
35812a14f2fSMika Westerberg down_clx_support ? "" : "not ");
35912a14f2fSMika Westerberg
36012a14f2fSMika Westerberg if (!up_clx_support || !down_clx_support)
36112a14f2fSMika Westerberg return -EOPNOTSUPP;
36212a14f2fSMika Westerberg
36312a14f2fSMika Westerberg ret = tb_port_clx_enable(up, clx);
36412a14f2fSMika Westerberg if (ret)
36512a14f2fSMika Westerberg return ret;
36612a14f2fSMika Westerberg
36712a14f2fSMika Westerberg ret = tb_port_clx_enable(down, clx);
36812a14f2fSMika Westerberg if (ret) {
36912a14f2fSMika Westerberg tb_port_clx_disable(up, clx);
37012a14f2fSMika Westerberg return ret;
37112a14f2fSMika Westerberg }
37212a14f2fSMika Westerberg
37312a14f2fSMika Westerberg ret = tb_switch_mask_clx_objections(sw);
37412a14f2fSMika Westerberg if (ret) {
37512a14f2fSMika Westerberg tb_port_clx_disable(up, clx);
37612a14f2fSMika Westerberg tb_port_clx_disable(down, clx);
37712a14f2fSMika Westerberg return ret;
37812a14f2fSMika Westerberg }
37912a14f2fSMika Westerberg
38035627353SMika Westerberg sw->clx |= clx;
38112a14f2fSMika Westerberg
382b5d15961SMika Westerberg tb_sw_dbg(sw, "CLx: %s enabled\n", clx_name(clx));
38312a14f2fSMika Westerberg return 0;
38412a14f2fSMika Westerberg }
38512a14f2fSMika Westerberg
38612a14f2fSMika Westerberg /**
3874f9a4f25SMika Westerberg * tb_switch_clx_disable() - Disable CLx on upstream port of specified router
3884f9a4f25SMika Westerberg * @sw: Router to disable CLx for
38935627353SMika Westerberg *
39035627353SMika Westerberg * Disables all CL states of the given router. Can be called on any
39135627353SMika Westerberg * router and if the states were not enabled already does nothing.
39212a14f2fSMika Westerberg *
3934a420eb1SMika Westerberg * Returns the CL states that were disabled or negative errno in case of
3944a420eb1SMika Westerberg * failure.
39512a14f2fSMika Westerberg */
tb_switch_clx_disable(struct tb_switch * sw)39635627353SMika Westerberg int tb_switch_clx_disable(struct tb_switch *sw)
39712a14f2fSMika Westerberg {
39835627353SMika Westerberg unsigned int clx = sw->clx;
3994f9a4f25SMika Westerberg struct tb_port *up, *down;
4004f9a4f25SMika Westerberg int ret;
40112a14f2fSMika Westerberg
40212a14f2fSMika Westerberg if (!tb_switch_clx_is_supported(sw))
40312a14f2fSMika Westerberg return 0;
40412a14f2fSMika Westerberg
40535627353SMika Westerberg if (!clx)
40635627353SMika Westerberg return 0;
40735627353SMika Westerberg
408*9e4f5b2aSMika Westerberg if (sw->is_unplugged)
409*9e4f5b2aSMika Westerberg return clx;
410*9e4f5b2aSMika Westerberg
41112a14f2fSMika Westerberg up = tb_upstream_port(sw);
41212a14f2fSMika Westerberg down = tb_switch_downstream_port(sw);
41335627353SMika Westerberg
41412a14f2fSMika Westerberg ret = tb_port_clx_disable(up, clx);
41512a14f2fSMika Westerberg if (ret)
41612a14f2fSMika Westerberg return ret;
41712a14f2fSMika Westerberg
41812a14f2fSMika Westerberg ret = tb_port_clx_disable(down, clx);
41912a14f2fSMika Westerberg if (ret)
42012a14f2fSMika Westerberg return ret;
42112a14f2fSMika Westerberg
42235627353SMika Westerberg sw->clx = 0;
42312a14f2fSMika Westerberg
424b5d15961SMika Westerberg tb_sw_dbg(sw, "CLx: %s disabled\n", clx_name(clx));
4254a420eb1SMika Westerberg return clx;
42612a14f2fSMika Westerberg }
427