112a14f2fSMika Westerberg // SPDX-License-Identifier: GPL-2.0 212a14f2fSMika Westerberg /* 312a14f2fSMika Westerberg * CLx support 412a14f2fSMika Westerberg * 512a14f2fSMika Westerberg * Copyright (C) 2020 - 2023, Intel Corporation 612a14f2fSMika Westerberg * Authors: Gil Fine <gil.fine@intel.com> 712a14f2fSMika Westerberg * Mika Westerberg <mika.westerberg@linux.intel.com> 812a14f2fSMika Westerberg */ 912a14f2fSMika Westerberg 1012a14f2fSMika Westerberg #include <linux/module.h> 1112a14f2fSMika Westerberg 1212a14f2fSMika Westerberg #include "tb.h" 1312a14f2fSMika Westerberg 1412a14f2fSMika Westerberg static bool clx_enabled = true; 1512a14f2fSMika Westerberg module_param_named(clx, clx_enabled, bool, 0444); 1612a14f2fSMika Westerberg MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)"); 1712a14f2fSMika Westerberg 1812a14f2fSMika Westerberg static int tb_port_pm_secondary_set(struct tb_port *port, bool secondary) 1912a14f2fSMika Westerberg { 2012a14f2fSMika Westerberg u32 phy; 2112a14f2fSMika Westerberg int ret; 2212a14f2fSMika Westerberg 2312a14f2fSMika Westerberg ret = tb_port_read(port, &phy, TB_CFG_PORT, 2412a14f2fSMika Westerberg port->cap_phy + LANE_ADP_CS_1, 1); 2512a14f2fSMika Westerberg if (ret) 2612a14f2fSMika Westerberg return ret; 2712a14f2fSMika Westerberg 2812a14f2fSMika Westerberg if (secondary) 2912a14f2fSMika Westerberg phy |= LANE_ADP_CS_1_PMS; 3012a14f2fSMika Westerberg else 3112a14f2fSMika Westerberg phy &= ~LANE_ADP_CS_1_PMS; 3212a14f2fSMika Westerberg 3312a14f2fSMika Westerberg return tb_port_write(port, &phy, TB_CFG_PORT, 3412a14f2fSMika Westerberg port->cap_phy + LANE_ADP_CS_1, 1); 3512a14f2fSMika Westerberg } 3612a14f2fSMika Westerberg 3712a14f2fSMika Westerberg static int tb_port_pm_secondary_enable(struct tb_port *port) 3812a14f2fSMika Westerberg { 3912a14f2fSMika Westerberg return tb_port_pm_secondary_set(port, true); 4012a14f2fSMika Westerberg } 4112a14f2fSMika Westerberg 4212a14f2fSMika Westerberg static int tb_port_pm_secondary_disable(struct tb_port *port) 4312a14f2fSMika Westerberg { 4412a14f2fSMika Westerberg return tb_port_pm_secondary_set(port, false); 4512a14f2fSMika Westerberg } 4612a14f2fSMika Westerberg 4712a14f2fSMika Westerberg /* Called for USB4 or Titan Ridge routers only */ 4835627353SMika Westerberg static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx) 4912a14f2fSMika Westerberg { 5012a14f2fSMika Westerberg u32 val, mask = 0; 5112a14f2fSMika Westerberg bool ret; 5212a14f2fSMika Westerberg 5312a14f2fSMika Westerberg /* Don't enable CLx in case of two single-lane links */ 5412a14f2fSMika Westerberg if (!port->bonded && port->dual_link_port) 5512a14f2fSMika Westerberg return false; 5612a14f2fSMika Westerberg 5712a14f2fSMika Westerberg /* Don't enable CLx in case of inter-domain link */ 5812a14f2fSMika Westerberg if (port->xdomain) 5912a14f2fSMika Westerberg return false; 6012a14f2fSMika Westerberg 6112a14f2fSMika Westerberg if (tb_switch_is_usb4(port->sw)) { 6212a14f2fSMika Westerberg if (!usb4_port_clx_supported(port)) 6312a14f2fSMika Westerberg return false; 6412a14f2fSMika Westerberg } else if (!tb_lc_is_clx_supported(port)) { 6512a14f2fSMika Westerberg return false; 6612a14f2fSMika Westerberg } 6712a14f2fSMika Westerberg 6835627353SMika Westerberg if (clx & TB_CL0S) 6935627353SMika Westerberg mask |= LANE_ADP_CS_0_CL0S_SUPPORT; 7035627353SMika Westerberg if (clx & TB_CL1) 7135627353SMika Westerberg mask |= LANE_ADP_CS_0_CL1_SUPPORT; 7235627353SMika Westerberg if (clx & TB_CL2) 7312a14f2fSMika Westerberg mask |= LANE_ADP_CS_0_CL2_SUPPORT; 7412a14f2fSMika Westerberg 7512a14f2fSMika Westerberg ret = tb_port_read(port, &val, TB_CFG_PORT, 7612a14f2fSMika Westerberg port->cap_phy + LANE_ADP_CS_0, 1); 7712a14f2fSMika Westerberg if (ret) 7812a14f2fSMika Westerberg return false; 7912a14f2fSMika Westerberg 8012a14f2fSMika Westerberg return !!(val & mask); 8112a14f2fSMika Westerberg } 8212a14f2fSMika Westerberg 8335627353SMika Westerberg static int tb_port_clx_set(struct tb_port *port, unsigned int clx, bool enable) 8412a14f2fSMika Westerberg { 8535627353SMika Westerberg u32 phy, mask = 0; 8612a14f2fSMika Westerberg int ret; 8712a14f2fSMika Westerberg 8835627353SMika Westerberg if (clx & TB_CL0S) 8935627353SMika Westerberg mask |= LANE_ADP_CS_1_CL0S_ENABLE; 9035627353SMika Westerberg if (clx & TB_CL1) 9135627353SMika Westerberg mask |= LANE_ADP_CS_1_CL1_ENABLE; 9235627353SMika Westerberg 9335627353SMika Westerberg if (!mask) 9412a14f2fSMika Westerberg return -EOPNOTSUPP; 9512a14f2fSMika Westerberg 9612a14f2fSMika Westerberg ret = tb_port_read(port, &phy, TB_CFG_PORT, 9712a14f2fSMika Westerberg port->cap_phy + LANE_ADP_CS_1, 1); 9812a14f2fSMika Westerberg if (ret) 9912a14f2fSMika Westerberg return ret; 10012a14f2fSMika Westerberg 10112a14f2fSMika Westerberg if (enable) 10212a14f2fSMika Westerberg phy |= mask; 10312a14f2fSMika Westerberg else 10412a14f2fSMika Westerberg phy &= ~mask; 10512a14f2fSMika Westerberg 10612a14f2fSMika Westerberg return tb_port_write(port, &phy, TB_CFG_PORT, 10712a14f2fSMika Westerberg port->cap_phy + LANE_ADP_CS_1, 1); 10812a14f2fSMika Westerberg } 10912a14f2fSMika Westerberg 11035627353SMika Westerberg static int tb_port_clx_disable(struct tb_port *port, unsigned int clx) 11112a14f2fSMika Westerberg { 11212a14f2fSMika Westerberg return tb_port_clx_set(port, clx, false); 11312a14f2fSMika Westerberg } 11412a14f2fSMika Westerberg 11535627353SMika Westerberg static int tb_port_clx_enable(struct tb_port *port, unsigned int clx) 11612a14f2fSMika Westerberg { 11712a14f2fSMika Westerberg return tb_port_clx_set(port, clx, true); 11812a14f2fSMika Westerberg } 11912a14f2fSMika Westerberg 12012a14f2fSMika Westerberg /** 12112a14f2fSMika Westerberg * tb_port_clx_is_enabled() - Is given CL state enabled 12212a14f2fSMika Westerberg * @port: USB4 port to check 12335627353SMika Westerberg * @clx: Mask of CL states to check 12412a14f2fSMika Westerberg * 12512a14f2fSMika Westerberg * Returns true if any of the given CL states is enabled for @port. 12612a14f2fSMika Westerberg */ 12735627353SMika Westerberg bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx) 12812a14f2fSMika Westerberg { 12912a14f2fSMika Westerberg u32 val, mask = 0; 13012a14f2fSMika Westerberg int ret; 13112a14f2fSMika Westerberg 13235627353SMika Westerberg if (!tb_port_clx_supported(port, clx)) 13312a14f2fSMika Westerberg return false; 13412a14f2fSMika Westerberg 13535627353SMika Westerberg if (clx & TB_CL0S) 13635627353SMika Westerberg mask |= LANE_ADP_CS_1_CL0S_ENABLE; 13735627353SMika Westerberg if (clx & TB_CL1) 13835627353SMika Westerberg mask |= LANE_ADP_CS_1_CL1_ENABLE; 13935627353SMika Westerberg if (clx & TB_CL2) 14012a14f2fSMika Westerberg mask |= LANE_ADP_CS_1_CL2_ENABLE; 14112a14f2fSMika Westerberg 14212a14f2fSMika Westerberg ret = tb_port_read(port, &val, TB_CFG_PORT, 14312a14f2fSMika Westerberg port->cap_phy + LANE_ADP_CS_1, 1); 14412a14f2fSMika Westerberg if (ret) 14512a14f2fSMika Westerberg return false; 14612a14f2fSMika Westerberg 14712a14f2fSMika Westerberg return !!(val & mask); 14812a14f2fSMika Westerberg } 14912a14f2fSMika Westerberg 15012a14f2fSMika Westerberg static int tb_switch_pm_secondary_resolve(struct tb_switch *sw) 15112a14f2fSMika Westerberg { 15212a14f2fSMika Westerberg struct tb_port *up, *down; 15312a14f2fSMika Westerberg int ret; 15412a14f2fSMika Westerberg 15512a14f2fSMika Westerberg if (!tb_route(sw)) 15612a14f2fSMika Westerberg return 0; 15712a14f2fSMika Westerberg 15812a14f2fSMika Westerberg up = tb_upstream_port(sw); 15912a14f2fSMika Westerberg down = tb_switch_downstream_port(sw); 16012a14f2fSMika Westerberg ret = tb_port_pm_secondary_enable(up); 16112a14f2fSMika Westerberg if (ret) 16212a14f2fSMika Westerberg return ret; 16312a14f2fSMika Westerberg 16412a14f2fSMika Westerberg return tb_port_pm_secondary_disable(down); 16512a14f2fSMika Westerberg } 16612a14f2fSMika Westerberg 16712a14f2fSMika Westerberg static int tb_switch_mask_clx_objections(struct tb_switch *sw) 16812a14f2fSMika Westerberg { 16912a14f2fSMika Westerberg int up_port = sw->config.upstream_port_number; 17012a14f2fSMika Westerberg u32 offset, val[2], mask_obj, unmask_obj; 17112a14f2fSMika Westerberg int ret, i; 17212a14f2fSMika Westerberg 17312a14f2fSMika Westerberg /* Only Titan Ridge of pre-USB4 devices support CLx states */ 17412a14f2fSMika Westerberg if (!tb_switch_is_titan_ridge(sw)) 17512a14f2fSMika Westerberg return 0; 17612a14f2fSMika Westerberg 17712a14f2fSMika Westerberg if (!tb_route(sw)) 17812a14f2fSMika Westerberg return 0; 17912a14f2fSMika Westerberg 18012a14f2fSMika Westerberg /* 18112a14f2fSMika Westerberg * In Titan Ridge there are only 2 dual-lane Thunderbolt ports: 18212a14f2fSMika Westerberg * Port A consists of lane adapters 1,2 and 18312a14f2fSMika Westerberg * Port B consists of lane adapters 3,4 18412a14f2fSMika Westerberg * If upstream port is A, (lanes are 1,2), we mask objections from 18512a14f2fSMika Westerberg * port B (lanes 3,4) and unmask objections from Port A and vice-versa. 18612a14f2fSMika Westerberg */ 18712a14f2fSMika Westerberg if (up_port == 1) { 18812a14f2fSMika Westerberg mask_obj = TB_LOW_PWR_C0_PORT_B_MASK; 18912a14f2fSMika Westerberg unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK; 19012a14f2fSMika Westerberg offset = TB_LOW_PWR_C1_CL1; 19112a14f2fSMika Westerberg } else { 19212a14f2fSMika Westerberg mask_obj = TB_LOW_PWR_C1_PORT_A_MASK; 19312a14f2fSMika Westerberg unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK; 19412a14f2fSMika Westerberg offset = TB_LOW_PWR_C3_CL1; 19512a14f2fSMika Westerberg } 19612a14f2fSMika Westerberg 19712a14f2fSMika Westerberg ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, 19812a14f2fSMika Westerberg sw->cap_lp + offset, ARRAY_SIZE(val)); 19912a14f2fSMika Westerberg if (ret) 20012a14f2fSMika Westerberg return ret; 20112a14f2fSMika Westerberg 20212a14f2fSMika Westerberg for (i = 0; i < ARRAY_SIZE(val); i++) { 20312a14f2fSMika Westerberg val[i] |= mask_obj; 20412a14f2fSMika Westerberg val[i] &= ~unmask_obj; 20512a14f2fSMika Westerberg } 20612a14f2fSMika Westerberg 20712a14f2fSMika Westerberg return tb_sw_write(sw, &val, TB_CFG_SWITCH, 20812a14f2fSMika Westerberg sw->cap_lp + offset, ARRAY_SIZE(val)); 20912a14f2fSMika Westerberg } 21012a14f2fSMika Westerberg 2114f9a4f25SMika Westerberg /** 21235627353SMika Westerberg * tb_switch_clx_is_supported() - Is CLx supported on this type of router 21335627353SMika Westerberg * @sw: The router to check CLx support for 21435627353SMika Westerberg */ 21535627353SMika Westerberg bool tb_switch_clx_is_supported(const struct tb_switch *sw) 21635627353SMika Westerberg { 21735627353SMika Westerberg if (!clx_enabled) 21835627353SMika Westerberg return false; 21935627353SMika Westerberg 22035627353SMika Westerberg if (sw->quirks & QUIRK_NO_CLX) 22135627353SMika Westerberg return false; 22235627353SMika Westerberg 22335627353SMika Westerberg /* 22435627353SMika Westerberg * CLx is not enabled and validated on Intel USB4 platforms 22535627353SMika Westerberg * before Alder Lake. 22635627353SMika Westerberg */ 22735627353SMika Westerberg if (tb_switch_is_tiger_lake(sw)) 22835627353SMika Westerberg return false; 22935627353SMika Westerberg 23035627353SMika Westerberg return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw); 23135627353SMika Westerberg } 23235627353SMika Westerberg 23335627353SMika Westerberg static bool validate_mask(unsigned int clx) 23435627353SMika Westerberg { 23535627353SMika Westerberg /* Previous states need to be enabled */ 23635627353SMika Westerberg if (clx & TB_CL2) 23735627353SMika Westerberg return (clx & (TB_CL0S | TB_CL1)) == (TB_CL0S | TB_CL1); 23835627353SMika Westerberg if (clx & TB_CL1) 23935627353SMika Westerberg return (clx & TB_CL0S) == TB_CL0S; 24035627353SMika Westerberg return true; 24135627353SMika Westerberg } 24235627353SMika Westerberg 24335627353SMika Westerberg static const char *clx_name(unsigned int clx) 24435627353SMika Westerberg { 24535627353SMika Westerberg if (clx & TB_CL2) 24635627353SMika Westerberg return "CL0s/CL1/CL2"; 24735627353SMika Westerberg if (clx & TB_CL1) 24835627353SMika Westerberg return "CL0s/CL1"; 24935627353SMika Westerberg if (clx & TB_CL0S) 25035627353SMika Westerberg return "CL0s"; 25135627353SMika Westerberg 25235627353SMika Westerberg return "unknown"; 25335627353SMika Westerberg } 25435627353SMika Westerberg 25535627353SMika Westerberg /** 2564f9a4f25SMika Westerberg * tb_switch_clx_enable() - Enable CLx on upstream port of specified router 2574f9a4f25SMika Westerberg * @sw: Router to enable CLx for 2584f9a4f25SMika Westerberg * @clx: The CLx state to enable 2594f9a4f25SMika Westerberg * 2609650de73SMika Westerberg * CLx is enabled only if both sides of the link support CLx, and if both sides 2619650de73SMika Westerberg * of the link are not configured as two single lane links and only if the link 2629650de73SMika Westerberg * is not inter-domain link. The complete set of conditions is described in CM 2639650de73SMika Westerberg * Guide 1.0 section 8.1. 2644f9a4f25SMika Westerberg * 2659650de73SMika Westerberg * Returns %0 on success or an error code on failure. 2664f9a4f25SMika Westerberg */ 26735627353SMika Westerberg int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx) 26812a14f2fSMika Westerberg { 26912a14f2fSMika Westerberg bool up_clx_support, down_clx_support; 27035627353SMika Westerberg struct tb_switch *parent_sw; 27112a14f2fSMika Westerberg struct tb_port *up, *down; 27212a14f2fSMika Westerberg int ret; 27312a14f2fSMika Westerberg 27435627353SMika Westerberg if (!validate_mask(clx)) 27535627353SMika Westerberg return -EINVAL; 27635627353SMika Westerberg 27735627353SMika Westerberg parent_sw = tb_switch_parent(sw); 27835627353SMika Westerberg if (!parent_sw) 2794f9a4f25SMika Westerberg return 0; 2804f9a4f25SMika Westerberg 28135627353SMika Westerberg if (!tb_switch_clx_is_supported(parent_sw) || 28235627353SMika Westerberg !tb_switch_clx_is_supported(sw)) 28312a14f2fSMika Westerberg return 0; 28412a14f2fSMika Westerberg 28535627353SMika Westerberg /* CL2 is not yet supported */ 28635627353SMika Westerberg if (clx & TB_CL2) 28735627353SMika Westerberg return -EOPNOTSUPP; 28835627353SMika Westerberg 28912a14f2fSMika Westerberg ret = tb_switch_pm_secondary_resolve(sw); 29012a14f2fSMika Westerberg if (ret) 29112a14f2fSMika Westerberg return ret; 29212a14f2fSMika Westerberg 29312a14f2fSMika Westerberg up = tb_upstream_port(sw); 29412a14f2fSMika Westerberg down = tb_switch_downstream_port(sw); 29512a14f2fSMika Westerberg 29612a14f2fSMika Westerberg up_clx_support = tb_port_clx_supported(up, clx); 29712a14f2fSMika Westerberg down_clx_support = tb_port_clx_supported(down, clx); 29812a14f2fSMika Westerberg 299*b5d15961SMika Westerberg tb_port_dbg(up, "CLx: %s %ssupported\n", clx_name(clx), 30012a14f2fSMika Westerberg up_clx_support ? "" : "not "); 301*b5d15961SMika Westerberg tb_port_dbg(down, "CLx: %s %ssupported\n", clx_name(clx), 30212a14f2fSMika Westerberg down_clx_support ? "" : "not "); 30312a14f2fSMika Westerberg 30412a14f2fSMika Westerberg if (!up_clx_support || !down_clx_support) 30512a14f2fSMika Westerberg return -EOPNOTSUPP; 30612a14f2fSMika Westerberg 30712a14f2fSMika Westerberg ret = tb_port_clx_enable(up, clx); 30812a14f2fSMika Westerberg if (ret) 30912a14f2fSMika Westerberg return ret; 31012a14f2fSMika Westerberg 31112a14f2fSMika Westerberg ret = tb_port_clx_enable(down, clx); 31212a14f2fSMika Westerberg if (ret) { 31312a14f2fSMika Westerberg tb_port_clx_disable(up, clx); 31412a14f2fSMika Westerberg return ret; 31512a14f2fSMika Westerberg } 31612a14f2fSMika Westerberg 31712a14f2fSMika Westerberg ret = tb_switch_mask_clx_objections(sw); 31812a14f2fSMika Westerberg if (ret) { 31912a14f2fSMika Westerberg tb_port_clx_disable(up, clx); 32012a14f2fSMika Westerberg tb_port_clx_disable(down, clx); 32112a14f2fSMika Westerberg return ret; 32212a14f2fSMika Westerberg } 32312a14f2fSMika Westerberg 32435627353SMika Westerberg sw->clx |= clx; 32512a14f2fSMika Westerberg 326*b5d15961SMika Westerberg tb_sw_dbg(sw, "CLx: %s enabled\n", clx_name(clx)); 32712a14f2fSMika Westerberg return 0; 32812a14f2fSMika Westerberg } 32912a14f2fSMika Westerberg 33012a14f2fSMika Westerberg /** 3314f9a4f25SMika Westerberg * tb_switch_clx_disable() - Disable CLx on upstream port of specified router 3324f9a4f25SMika Westerberg * @sw: Router to disable CLx for 33335627353SMika Westerberg * 33435627353SMika Westerberg * Disables all CL states of the given router. Can be called on any 33535627353SMika Westerberg * router and if the states were not enabled already does nothing. 33612a14f2fSMika Westerberg * 3379650de73SMika Westerberg * Returns %0 on success or an error code on failure. 33812a14f2fSMika Westerberg */ 33935627353SMika Westerberg int tb_switch_clx_disable(struct tb_switch *sw) 34012a14f2fSMika Westerberg { 34135627353SMika Westerberg unsigned int clx = sw->clx; 3424f9a4f25SMika Westerberg struct tb_port *up, *down; 3434f9a4f25SMika Westerberg int ret; 34412a14f2fSMika Westerberg 34512a14f2fSMika Westerberg if (!tb_switch_clx_is_supported(sw)) 34612a14f2fSMika Westerberg return 0; 34712a14f2fSMika Westerberg 34835627353SMika Westerberg if (!clx) 34935627353SMika Westerberg return 0; 35035627353SMika Westerberg 35112a14f2fSMika Westerberg up = tb_upstream_port(sw); 35212a14f2fSMika Westerberg down = tb_switch_downstream_port(sw); 35335627353SMika Westerberg 35412a14f2fSMika Westerberg ret = tb_port_clx_disable(up, clx); 35512a14f2fSMika Westerberg if (ret) 35612a14f2fSMika Westerberg return ret; 35712a14f2fSMika Westerberg 35812a14f2fSMika Westerberg ret = tb_port_clx_disable(down, clx); 35912a14f2fSMika Westerberg if (ret) 36012a14f2fSMika Westerberg return ret; 36112a14f2fSMika Westerberg 36235627353SMika Westerberg sw->clx = 0; 36312a14f2fSMika Westerberg 364*b5d15961SMika Westerberg tb_sw_dbg(sw, "CLx: %s disabled\n", clx_name(clx)); 36512a14f2fSMika Westerberg return 0; 36612a14f2fSMika Westerberg } 367