xref: /linux/drivers/thunderbolt/tmu.c (revision 6dacc6db4628af3fdc0627dca6dd6104ec9138ed)
1cf29b9afSRajmohan Mani // SPDX-License-Identifier: GPL-2.0
2cf29b9afSRajmohan Mani /*
3cf29b9afSRajmohan Mani  * Thunderbolt Time Management Unit (TMU) support
4cf29b9afSRajmohan Mani  *
5cf29b9afSRajmohan Mani  * Copyright (C) 2019, Intel Corporation
6cf29b9afSRajmohan Mani  * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7cf29b9afSRajmohan Mani  *	    Rajmohan Mani <rajmohan.mani@intel.com>
8cf29b9afSRajmohan Mani  */
9cf29b9afSRajmohan Mani 
10cf29b9afSRajmohan Mani #include <linux/delay.h>
11cf29b9afSRajmohan Mani 
12cf29b9afSRajmohan Mani #include "tb.h"
13cf29b9afSRajmohan Mani 
14d49b4f04SMika Westerberg static const unsigned int tmu_rates[] = {
15d49b4f04SMika Westerberg 	[TB_SWITCH_TMU_MODE_OFF] = 0,
16d49b4f04SMika Westerberg 	[TB_SWITCH_TMU_MODE_LOWRES] = 1000,
17d49b4f04SMika Westerberg 	[TB_SWITCH_TMU_MODE_HIFI_UNI] = 16,
18d49b4f04SMika Westerberg 	[TB_SWITCH_TMU_MODE_HIFI_BI] = 16,
19d49b4f04SMika Westerberg 	[TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = 16,
20d49b4f04SMika Westerberg };
21d49b4f04SMika Westerberg 
22*6dacc6dbSTom Rix static const struct {
23d49b4f04SMika Westerberg 	unsigned int freq_meas_window;
24d49b4f04SMika Westerberg 	unsigned int avg_const;
25d49b4f04SMika Westerberg 	unsigned int delta_avg_const;
26d49b4f04SMika Westerberg 	unsigned int repl_timeout;
27d49b4f04SMika Westerberg 	unsigned int repl_threshold;
28d49b4f04SMika Westerberg 	unsigned int repl_n;
29d49b4f04SMika Westerberg 	unsigned int dirswitch_n;
30d49b4f04SMika Westerberg } tmu_params[] = {
31d49b4f04SMika Westerberg 	[TB_SWITCH_TMU_MODE_OFF] = { },
32d49b4f04SMika Westerberg 	[TB_SWITCH_TMU_MODE_LOWRES] = { 30, 4, },
33d49b4f04SMika Westerberg 	[TB_SWITCH_TMU_MODE_HIFI_UNI] = { 800, 8, },
34d49b4f04SMika Westerberg 	[TB_SWITCH_TMU_MODE_HIFI_BI] = { 800, 8, },
35d49b4f04SMika Westerberg 	[TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = {
36d49b4f04SMika Westerberg 		800, 4, 0, 3125, 25, 128, 255,
37d49b4f04SMika Westerberg 	},
38d49b4f04SMika Westerberg };
39d49b4f04SMika Westerberg 
40d49b4f04SMika Westerberg static const char *tmu_mode_name(enum tb_switch_tmu_mode mode)
41b017a46dSGil Fine {
42d49b4f04SMika Westerberg 	switch (mode) {
43d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_OFF:
44d49b4f04SMika Westerberg 		return "off";
45d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_LOWRES:
46d49b4f04SMika Westerberg 		return "uni-directional, LowRes";
47d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_UNI:
48d49b4f04SMika Westerberg 		return "uni-directional, HiFi";
49d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_BI:
50d49b4f04SMika Westerberg 		return "bi-directional, HiFi";
51d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
52d49b4f04SMika Westerberg 		return "enhanced uni-directional, MedRes";
53d49b4f04SMika Westerberg 	default:
54d49b4f04SMika Westerberg 		return "unknown";
55d49b4f04SMika Westerberg 	}
56d49b4f04SMika Westerberg }
57d49b4f04SMika Westerberg 
58d49b4f04SMika Westerberg static bool tb_switch_tmu_enhanced_is_supported(const struct tb_switch *sw)
59d49b4f04SMika Westerberg {
60d49b4f04SMika Westerberg 	return usb4_switch_version(sw) > 1;
61d49b4f04SMika Westerberg }
62d49b4f04SMika Westerberg 
63d49b4f04SMika Westerberg static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
64d49b4f04SMika Westerberg 					 enum tb_switch_tmu_mode mode)
65d49b4f04SMika Westerberg {
66b017a46dSGil Fine 	u32 freq, avg, val;
67b017a46dSGil Fine 	int ret;
68b017a46dSGil Fine 
69d49b4f04SMika Westerberg 	freq = tmu_params[mode].freq_meas_window;
70d49b4f04SMika Westerberg 	avg = tmu_params[mode].avg_const;
71b017a46dSGil Fine 
72b017a46dSGil Fine 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
73b017a46dSGil Fine 			 sw->tmu.cap + TMU_RTR_CS_0, 1);
74b017a46dSGil Fine 	if (ret)
75b017a46dSGil Fine 		return ret;
76b017a46dSGil Fine 
77b017a46dSGil Fine 	val &= ~TMU_RTR_CS_0_FREQ_WIND_MASK;
78b017a46dSGil Fine 	val |= FIELD_PREP(TMU_RTR_CS_0_FREQ_WIND_MASK, freq);
79b017a46dSGil Fine 
80b017a46dSGil Fine 	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
81b017a46dSGil Fine 			  sw->tmu.cap + TMU_RTR_CS_0, 1);
82b017a46dSGil Fine 	if (ret)
83b017a46dSGil Fine 		return ret;
84b017a46dSGil Fine 
85b017a46dSGil Fine 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
86b017a46dSGil Fine 			 sw->tmu.cap + TMU_RTR_CS_15, 1);
87b017a46dSGil Fine 	if (ret)
88b017a46dSGil Fine 		return ret;
89b017a46dSGil Fine 
90b017a46dSGil Fine 	val &= ~TMU_RTR_CS_15_FREQ_AVG_MASK &
91b017a46dSGil Fine 		~TMU_RTR_CS_15_DELAY_AVG_MASK &
92b017a46dSGil Fine 		~TMU_RTR_CS_15_OFFSET_AVG_MASK &
93b017a46dSGil Fine 		~TMU_RTR_CS_15_ERROR_AVG_MASK;
94b017a46dSGil Fine 	val |=  FIELD_PREP(TMU_RTR_CS_15_FREQ_AVG_MASK, avg) |
95b017a46dSGil Fine 		FIELD_PREP(TMU_RTR_CS_15_DELAY_AVG_MASK, avg) |
96b017a46dSGil Fine 		FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) |
97b017a46dSGil Fine 		FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg);
98b017a46dSGil Fine 
99d49b4f04SMika Westerberg 	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
100b017a46dSGil Fine 			 sw->tmu.cap + TMU_RTR_CS_15, 1);
101d49b4f04SMika Westerberg 	if (ret)
102d49b4f04SMika Westerberg 		return ret;
103d49b4f04SMika Westerberg 
104d49b4f04SMika Westerberg 	if (tb_switch_tmu_enhanced_is_supported(sw)) {
105d49b4f04SMika Westerberg 		u32 delta_avg = tmu_params[mode].delta_avg_const;
106d49b4f04SMika Westerberg 
107d49b4f04SMika Westerberg 		ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
108d49b4f04SMika Westerberg 				 sw->tmu.cap + TMU_RTR_CS_18, 1);
109d49b4f04SMika Westerberg 		if (ret)
110d49b4f04SMika Westerberg 			return ret;
111d49b4f04SMika Westerberg 
112d49b4f04SMika Westerberg 		val &= ~TMU_RTR_CS_18_DELTA_AVG_CONST_MASK;
113d49b4f04SMika Westerberg 		val |= FIELD_PREP(TMU_RTR_CS_18_DELTA_AVG_CONST_MASK, delta_avg);
114d49b4f04SMika Westerberg 
115d49b4f04SMika Westerberg 		ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
116d49b4f04SMika Westerberg 				  sw->tmu.cap + TMU_RTR_CS_18, 1);
117b017a46dSGil Fine 	}
118b017a46dSGil Fine 
119d49b4f04SMika Westerberg 	return ret;
120cf29b9afSRajmohan Mani }
121cf29b9afSRajmohan Mani 
122d49b4f04SMika Westerberg static bool tb_switch_tmu_ucap_is_supported(struct tb_switch *sw)
123cf29b9afSRajmohan Mani {
124cf29b9afSRajmohan Mani 	int ret;
125cf29b9afSRajmohan Mani 	u32 val;
126cf29b9afSRajmohan Mani 
127cf29b9afSRajmohan Mani 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
128cf29b9afSRajmohan Mani 			 sw->tmu.cap + TMU_RTR_CS_0, 1);
129cf29b9afSRajmohan Mani 	if (ret)
130cf29b9afSRajmohan Mani 		return false;
131cf29b9afSRajmohan Mani 
132cf29b9afSRajmohan Mani 	return !!(val & TMU_RTR_CS_0_UCAP);
133cf29b9afSRajmohan Mani }
134cf29b9afSRajmohan Mani 
135cf29b9afSRajmohan Mani static int tb_switch_tmu_rate_read(struct tb_switch *sw)
136cf29b9afSRajmohan Mani {
137cf29b9afSRajmohan Mani 	int ret;
138cf29b9afSRajmohan Mani 	u32 val;
139cf29b9afSRajmohan Mani 
140cf29b9afSRajmohan Mani 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
141cf29b9afSRajmohan Mani 			 sw->tmu.cap + TMU_RTR_CS_3, 1);
142cf29b9afSRajmohan Mani 	if (ret)
143cf29b9afSRajmohan Mani 		return ret;
144cf29b9afSRajmohan Mani 
145cf29b9afSRajmohan Mani 	val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
146cf29b9afSRajmohan Mani 	return val;
147cf29b9afSRajmohan Mani }
148cf29b9afSRajmohan Mani 
149cf29b9afSRajmohan Mani static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
150cf29b9afSRajmohan Mani {
151cf29b9afSRajmohan Mani 	int ret;
152cf29b9afSRajmohan Mani 	u32 val;
153cf29b9afSRajmohan Mani 
154cf29b9afSRajmohan Mani 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
155cf29b9afSRajmohan Mani 			 sw->tmu.cap + TMU_RTR_CS_3, 1);
156cf29b9afSRajmohan Mani 	if (ret)
157cf29b9afSRajmohan Mani 		return ret;
158cf29b9afSRajmohan Mani 
159cf29b9afSRajmohan Mani 	val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
160cf29b9afSRajmohan Mani 	val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
161cf29b9afSRajmohan Mani 
162cf29b9afSRajmohan Mani 	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
163cf29b9afSRajmohan Mani 			   sw->tmu.cap + TMU_RTR_CS_3, 1);
164cf29b9afSRajmohan Mani }
165cf29b9afSRajmohan Mani 
166cf29b9afSRajmohan Mani static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
167cf29b9afSRajmohan Mani 			     u32 value)
168cf29b9afSRajmohan Mani {
169cf29b9afSRajmohan Mani 	u32 data;
170cf29b9afSRajmohan Mani 	int ret;
171cf29b9afSRajmohan Mani 
172cf29b9afSRajmohan Mani 	ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
173cf29b9afSRajmohan Mani 	if (ret)
174cf29b9afSRajmohan Mani 		return ret;
175cf29b9afSRajmohan Mani 
176cf29b9afSRajmohan Mani 	data &= ~mask;
177cf29b9afSRajmohan Mani 	data |= value;
178cf29b9afSRajmohan Mani 
179cf29b9afSRajmohan Mani 	return tb_port_write(port, &data, TB_CFG_PORT,
180cf29b9afSRajmohan Mani 			     port->cap_tmu + offset, 1);
181cf29b9afSRajmohan Mani }
182cf29b9afSRajmohan Mani 
183cf29b9afSRajmohan Mani static int tb_port_tmu_set_unidirectional(struct tb_port *port,
184cf29b9afSRajmohan Mani 					  bool unidirectional)
185cf29b9afSRajmohan Mani {
186cf29b9afSRajmohan Mani 	u32 val;
187cf29b9afSRajmohan Mani 
188cf29b9afSRajmohan Mani 	if (!port->sw->tmu.has_ucap)
189cf29b9afSRajmohan Mani 		return 0;
190cf29b9afSRajmohan Mani 
191cf29b9afSRajmohan Mani 	val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
192cf29b9afSRajmohan Mani 	return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
193cf29b9afSRajmohan Mani }
194cf29b9afSRajmohan Mani 
195cf29b9afSRajmohan Mani static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
196cf29b9afSRajmohan Mani {
197cf29b9afSRajmohan Mani 	return tb_port_tmu_set_unidirectional(port, false);
198cf29b9afSRajmohan Mani }
199cf29b9afSRajmohan Mani 
200a28ec0e1SGil Fine static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port)
201a28ec0e1SGil Fine {
202a28ec0e1SGil Fine 	return tb_port_tmu_set_unidirectional(port, true);
203a28ec0e1SGil Fine }
204a28ec0e1SGil Fine 
205cf29b9afSRajmohan Mani static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
206cf29b9afSRajmohan Mani {
207cf29b9afSRajmohan Mani 	int ret;
208cf29b9afSRajmohan Mani 	u32 val;
209cf29b9afSRajmohan Mani 
210cf29b9afSRajmohan Mani 	ret = tb_port_read(port, &val, TB_CFG_PORT,
211cf29b9afSRajmohan Mani 			   port->cap_tmu + TMU_ADP_CS_3, 1);
212cf29b9afSRajmohan Mani 	if (ret)
213cf29b9afSRajmohan Mani 		return false;
214cf29b9afSRajmohan Mani 
215cf29b9afSRajmohan Mani 	return val & TMU_ADP_CS_3_UDM;
216cf29b9afSRajmohan Mani }
217cf29b9afSRajmohan Mani 
218d49b4f04SMika Westerberg static bool tb_port_tmu_is_enhanced(struct tb_port *port)
219d49b4f04SMika Westerberg {
220d49b4f04SMika Westerberg 	int ret;
221d49b4f04SMika Westerberg 	u32 val;
222d49b4f04SMika Westerberg 
223d49b4f04SMika Westerberg 	ret = tb_port_read(port, &val, TB_CFG_PORT,
224d49b4f04SMika Westerberg 			   port->cap_tmu + TMU_ADP_CS_8, 1);
225d49b4f04SMika Westerberg 	if (ret)
226d49b4f04SMika Westerberg 		return false;
227d49b4f04SMika Westerberg 
228d49b4f04SMika Westerberg 	return val & TMU_ADP_CS_8_EUDM;
229d49b4f04SMika Westerberg }
230d49b4f04SMika Westerberg 
231d49b4f04SMika Westerberg /* Can be called to non-v2 lane adapters too */
232d49b4f04SMika Westerberg static int tb_port_tmu_enhanced_enable(struct tb_port *port, bool enable)
233d49b4f04SMika Westerberg {
234d49b4f04SMika Westerberg 	int ret;
235d49b4f04SMika Westerberg 	u32 val;
236d49b4f04SMika Westerberg 
237d49b4f04SMika Westerberg 	if (!tb_switch_tmu_enhanced_is_supported(port->sw))
238d49b4f04SMika Westerberg 		return 0;
239d49b4f04SMika Westerberg 
240d49b4f04SMika Westerberg 	ret = tb_port_read(port, &val, TB_CFG_PORT,
241d49b4f04SMika Westerberg 			   port->cap_tmu + TMU_ADP_CS_8, 1);
242d49b4f04SMika Westerberg 	if (ret)
243d49b4f04SMika Westerberg 		return ret;
244d49b4f04SMika Westerberg 
245d49b4f04SMika Westerberg 	if (enable)
246d49b4f04SMika Westerberg 		val |= TMU_ADP_CS_8_EUDM;
247d49b4f04SMika Westerberg 	else
248d49b4f04SMika Westerberg 		val &= ~TMU_ADP_CS_8_EUDM;
249d49b4f04SMika Westerberg 
250d49b4f04SMika Westerberg 	return tb_port_write(port, &val, TB_CFG_PORT,
251d49b4f04SMika Westerberg 			     port->cap_tmu + TMU_ADP_CS_8, 1);
252d49b4f04SMika Westerberg }
253d49b4f04SMika Westerberg 
254d49b4f04SMika Westerberg static int tb_port_set_tmu_mode_params(struct tb_port *port,
255d49b4f04SMika Westerberg 				       enum tb_switch_tmu_mode mode)
256d49b4f04SMika Westerberg {
257d49b4f04SMika Westerberg 	u32 repl_timeout, repl_threshold, repl_n, dirswitch_n, val;
258d49b4f04SMika Westerberg 	int ret;
259d49b4f04SMika Westerberg 
260d49b4f04SMika Westerberg 	repl_timeout = tmu_params[mode].repl_timeout;
261d49b4f04SMika Westerberg 	repl_threshold = tmu_params[mode].repl_threshold;
262d49b4f04SMika Westerberg 	repl_n = tmu_params[mode].repl_n;
263d49b4f04SMika Westerberg 	dirswitch_n = tmu_params[mode].dirswitch_n;
264d49b4f04SMika Westerberg 
265d49b4f04SMika Westerberg 	ret = tb_port_read(port, &val, TB_CFG_PORT,
266d49b4f04SMika Westerberg 			   port->cap_tmu + TMU_ADP_CS_8, 1);
267d49b4f04SMika Westerberg 	if (ret)
268d49b4f04SMika Westerberg 		return ret;
269d49b4f04SMika Westerberg 
270d49b4f04SMika Westerberg 	val &= ~TMU_ADP_CS_8_REPL_TIMEOUT_MASK;
271d49b4f04SMika Westerberg 	val &= ~TMU_ADP_CS_8_REPL_THRESHOLD_MASK;
272d49b4f04SMika Westerberg 	val |= FIELD_PREP(TMU_ADP_CS_8_REPL_TIMEOUT_MASK, repl_timeout);
273d49b4f04SMika Westerberg 	val |= FIELD_PREP(TMU_ADP_CS_8_REPL_THRESHOLD_MASK, repl_threshold);
274d49b4f04SMika Westerberg 
275d49b4f04SMika Westerberg 	ret = tb_port_write(port, &val, TB_CFG_PORT,
276d49b4f04SMika Westerberg 			    port->cap_tmu + TMU_ADP_CS_8, 1);
277d49b4f04SMika Westerberg 	if (ret)
278d49b4f04SMika Westerberg 		return ret;
279d49b4f04SMika Westerberg 
280d49b4f04SMika Westerberg 	ret = tb_port_read(port, &val, TB_CFG_PORT,
281d49b4f04SMika Westerberg 			   port->cap_tmu + TMU_ADP_CS_9, 1);
282d49b4f04SMika Westerberg 	if (ret)
283d49b4f04SMika Westerberg 		return ret;
284d49b4f04SMika Westerberg 
285d49b4f04SMika Westerberg 	val &= ~TMU_ADP_CS_9_REPL_N_MASK;
286d49b4f04SMika Westerberg 	val &= ~TMU_ADP_CS_9_DIRSWITCH_N_MASK;
287d49b4f04SMika Westerberg 	val |= FIELD_PREP(TMU_ADP_CS_9_REPL_N_MASK, repl_n);
288d49b4f04SMika Westerberg 	val |= FIELD_PREP(TMU_ADP_CS_9_DIRSWITCH_N_MASK, dirswitch_n);
289d49b4f04SMika Westerberg 
290d49b4f04SMika Westerberg 	return tb_port_write(port, &val, TB_CFG_PORT,
291d49b4f04SMika Westerberg 			     port->cap_tmu + TMU_ADP_CS_9, 1);
292d49b4f04SMika Westerberg }
293d49b4f04SMika Westerberg 
294d49b4f04SMika Westerberg /* Can be called to non-v2 lane adapters too */
295d49b4f04SMika Westerberg static int tb_port_tmu_rate_write(struct tb_port *port, int rate)
296d49b4f04SMika Westerberg {
297d49b4f04SMika Westerberg 	int ret;
298d49b4f04SMika Westerberg 	u32 val;
299d49b4f04SMika Westerberg 
300d49b4f04SMika Westerberg 	if (!tb_switch_tmu_enhanced_is_supported(port->sw))
301d49b4f04SMika Westerberg 		return 0;
302d49b4f04SMika Westerberg 
303d49b4f04SMika Westerberg 	ret = tb_port_read(port, &val, TB_CFG_PORT,
304d49b4f04SMika Westerberg 			   port->cap_tmu + TMU_ADP_CS_9, 1);
305d49b4f04SMika Westerberg 	if (ret)
306d49b4f04SMika Westerberg 		return ret;
307d49b4f04SMika Westerberg 
308d49b4f04SMika Westerberg 	val &= ~TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK;
309d49b4f04SMika Westerberg 	val |= FIELD_PREP(TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK, rate);
310d49b4f04SMika Westerberg 
311d49b4f04SMika Westerberg 	return tb_port_write(port, &val, TB_CFG_PORT,
312d49b4f04SMika Westerberg 			     port->cap_tmu + TMU_ADP_CS_9, 1);
313d49b4f04SMika Westerberg }
314d49b4f04SMika Westerberg 
315a28ec0e1SGil Fine static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync)
316a28ec0e1SGil Fine {
317a28ec0e1SGil Fine 	u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0;
318a28ec0e1SGil Fine 
319a28ec0e1SGil Fine 	return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val);
320a28ec0e1SGil Fine }
321a28ec0e1SGil Fine 
322a28ec0e1SGil Fine static int tb_port_tmu_time_sync_disable(struct tb_port *port)
323a28ec0e1SGil Fine {
324a28ec0e1SGil Fine 	return tb_port_tmu_time_sync(port, true);
325a28ec0e1SGil Fine }
326a28ec0e1SGil Fine 
327a28ec0e1SGil Fine static int tb_port_tmu_time_sync_enable(struct tb_port *port)
328a28ec0e1SGil Fine {
329a28ec0e1SGil Fine 	return tb_port_tmu_time_sync(port, false);
330a28ec0e1SGil Fine }
331a28ec0e1SGil Fine 
332cf29b9afSRajmohan Mani static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
333cf29b9afSRajmohan Mani {
33423ccd21cSGil Fine 	u32 val, offset, bit;
335cf29b9afSRajmohan Mani 	int ret;
336cf29b9afSRajmohan Mani 
33723ccd21cSGil Fine 	if (tb_switch_is_usb4(sw)) {
33823ccd21cSGil Fine 		offset = sw->tmu.cap + TMU_RTR_CS_0;
33923ccd21cSGil Fine 		bit = TMU_RTR_CS_0_TD;
34023ccd21cSGil Fine 	} else {
34123ccd21cSGil Fine 		offset = sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_26;
34223ccd21cSGil Fine 		bit = TB_TIME_VSEC_3_CS_26_TD;
34323ccd21cSGil Fine 	}
34423ccd21cSGil Fine 
34523ccd21cSGil Fine 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
346cf29b9afSRajmohan Mani 	if (ret)
347cf29b9afSRajmohan Mani 		return ret;
348cf29b9afSRajmohan Mani 
349cf29b9afSRajmohan Mani 	if (set)
35023ccd21cSGil Fine 		val |= bit;
351cf29b9afSRajmohan Mani 	else
35223ccd21cSGil Fine 		val &= ~bit;
353cf29b9afSRajmohan Mani 
35423ccd21cSGil Fine 	return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
355cf29b9afSRajmohan Mani }
356cf29b9afSRajmohan Mani 
357d49b4f04SMika Westerberg static int tmu_mode_init(struct tb_switch *sw)
358d49b4f04SMika Westerberg {
359d49b4f04SMika Westerberg 	bool enhanced, ucap;
360d49b4f04SMika Westerberg 	int ret, rate;
361d49b4f04SMika Westerberg 
362d49b4f04SMika Westerberg 	ucap = tb_switch_tmu_ucap_is_supported(sw);
363d49b4f04SMika Westerberg 	if (ucap)
364d49b4f04SMika Westerberg 		tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
365d49b4f04SMika Westerberg 	enhanced = tb_switch_tmu_enhanced_is_supported(sw);
366d49b4f04SMika Westerberg 	if (enhanced)
367d49b4f04SMika Westerberg 		tb_sw_dbg(sw, "TMU: supports enhanced uni-directional mode\n");
368d49b4f04SMika Westerberg 
369d49b4f04SMika Westerberg 	ret = tb_switch_tmu_rate_read(sw);
370d49b4f04SMika Westerberg 	if (ret < 0)
371d49b4f04SMika Westerberg 		return ret;
372d49b4f04SMika Westerberg 	rate = ret;
373d49b4f04SMika Westerberg 
374d49b4f04SMika Westerberg 	/* Off by default */
375d49b4f04SMika Westerberg 	sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
376d49b4f04SMika Westerberg 
377d49b4f04SMika Westerberg 	if (tb_route(sw)) {
378d49b4f04SMika Westerberg 		struct tb_port *up = tb_upstream_port(sw);
379d49b4f04SMika Westerberg 
380d49b4f04SMika Westerberg 		if (enhanced && tb_port_tmu_is_enhanced(up)) {
381d49b4f04SMika Westerberg 			sw->tmu.mode = TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI;
382d49b4f04SMika Westerberg 		} else if (ucap && tb_port_tmu_is_unidirectional(up)) {
383d49b4f04SMika Westerberg 			if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
384d49b4f04SMika Westerberg 				sw->tmu.mode = TB_SWITCH_TMU_MODE_LOWRES;
385d49b4f04SMika Westerberg 			else if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
386d49b4f04SMika Westerberg 				sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
387d49b4f04SMika Westerberg 		} else if (rate) {
388d49b4f04SMika Westerberg 			sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
389d49b4f04SMika Westerberg 		}
390d49b4f04SMika Westerberg 	} else if (rate) {
391d49b4f04SMika Westerberg 		sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
392d49b4f04SMika Westerberg 	}
393d49b4f04SMika Westerberg 
394d49b4f04SMika Westerberg 	/* Update the initial request to match the current mode */
395d49b4f04SMika Westerberg 	sw->tmu.mode_request = sw->tmu.mode;
396d49b4f04SMika Westerberg 	sw->tmu.has_ucap = ucap;
397d49b4f04SMika Westerberg 
398d49b4f04SMika Westerberg 	return 0;
399d49b4f04SMika Westerberg }
400d49b4f04SMika Westerberg 
401cf29b9afSRajmohan Mani /**
402cf29b9afSRajmohan Mani  * tb_switch_tmu_init() - Initialize switch TMU structures
403cf29b9afSRajmohan Mani  * @sw: Switch to initialized
404cf29b9afSRajmohan Mani  *
405cf29b9afSRajmohan Mani  * This function must be called before other TMU related functions to
406cf29b9afSRajmohan Mani  * makes the internal structures are filled in correctly. Does not
407cf29b9afSRajmohan Mani  * change any hardware configuration.
408cf29b9afSRajmohan Mani  */
409cf29b9afSRajmohan Mani int tb_switch_tmu_init(struct tb_switch *sw)
410cf29b9afSRajmohan Mani {
411cf29b9afSRajmohan Mani 	struct tb_port *port;
412cf29b9afSRajmohan Mani 	int ret;
413cf29b9afSRajmohan Mani 
414cf29b9afSRajmohan Mani 	if (tb_switch_is_icm(sw))
415cf29b9afSRajmohan Mani 		return 0;
416cf29b9afSRajmohan Mani 
417cf29b9afSRajmohan Mani 	ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
418cf29b9afSRajmohan Mani 	if (ret > 0)
419cf29b9afSRajmohan Mani 		sw->tmu.cap = ret;
420cf29b9afSRajmohan Mani 
421cf29b9afSRajmohan Mani 	tb_switch_for_each_port(sw, port) {
422cf29b9afSRajmohan Mani 		int cap;
423cf29b9afSRajmohan Mani 
424cf29b9afSRajmohan Mani 		cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
425cf29b9afSRajmohan Mani 		if (cap > 0)
426cf29b9afSRajmohan Mani 			port->cap_tmu = cap;
427cf29b9afSRajmohan Mani 	}
428cf29b9afSRajmohan Mani 
429d49b4f04SMika Westerberg 	ret = tmu_mode_init(sw);
430d49b4f04SMika Westerberg 	if (ret)
431cf29b9afSRajmohan Mani 		return ret;
432cf29b9afSRajmohan Mani 
433d49b4f04SMika Westerberg 	tb_sw_dbg(sw, "TMU: current mode: %s\n", tmu_mode_name(sw->tmu.mode));
434cf29b9afSRajmohan Mani 	return 0;
435cf29b9afSRajmohan Mani }
436cf29b9afSRajmohan Mani 
437cf29b9afSRajmohan Mani /**
438cf29b9afSRajmohan Mani  * tb_switch_tmu_post_time() - Update switch local time
439cf29b9afSRajmohan Mani  * @sw: Switch whose time to update
440cf29b9afSRajmohan Mani  *
441cf29b9afSRajmohan Mani  * Updates switch local time using time posting procedure.
442cf29b9afSRajmohan Mani  */
443cf29b9afSRajmohan Mani int tb_switch_tmu_post_time(struct tb_switch *sw)
444cf29b9afSRajmohan Mani {
445a28ec0e1SGil Fine 	unsigned int post_time_high_offset, post_time_high = 0;
446cf29b9afSRajmohan Mani 	unsigned int post_local_time_offset, post_time_offset;
447cf29b9afSRajmohan Mani 	struct tb_switch *root_switch = sw->tb->root_switch;
448cf29b9afSRajmohan Mani 	u64 hi, mid, lo, local_time, post_time;
449cf29b9afSRajmohan Mani 	int i, ret, retries = 100;
450cf29b9afSRajmohan Mani 	u32 gm_local_time[3];
451cf29b9afSRajmohan Mani 
452cf29b9afSRajmohan Mani 	if (!tb_route(sw))
453cf29b9afSRajmohan Mani 		return 0;
454cf29b9afSRajmohan Mani 
455cf29b9afSRajmohan Mani 	if (!tb_switch_is_usb4(sw))
456cf29b9afSRajmohan Mani 		return 0;
457cf29b9afSRajmohan Mani 
458cf29b9afSRajmohan Mani 	/* Need to be able to read the grand master time */
459cf29b9afSRajmohan Mani 	if (!root_switch->tmu.cap)
460cf29b9afSRajmohan Mani 		return 0;
461cf29b9afSRajmohan Mani 
462cf29b9afSRajmohan Mani 	ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
463cf29b9afSRajmohan Mani 			 root_switch->tmu.cap + TMU_RTR_CS_1,
464cf29b9afSRajmohan Mani 			 ARRAY_SIZE(gm_local_time));
465cf29b9afSRajmohan Mani 	if (ret)
466cf29b9afSRajmohan Mani 		return ret;
467cf29b9afSRajmohan Mani 
468cf29b9afSRajmohan Mani 	for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
469cb625ec6SMika Westerberg 		tb_sw_dbg(root_switch, "TMU: local_time[%d]=0x%08x\n", i,
470cf29b9afSRajmohan Mani 			  gm_local_time[i]);
471cf29b9afSRajmohan Mani 
472cf29b9afSRajmohan Mani 	/* Convert to nanoseconds (drop fractional part) */
473cf29b9afSRajmohan Mani 	hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
474cf29b9afSRajmohan Mani 	mid = gm_local_time[1];
475cf29b9afSRajmohan Mani 	lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
476cf29b9afSRajmohan Mani 		TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
477cf29b9afSRajmohan Mani 	local_time = hi << 48 | mid << 16 | lo;
478cf29b9afSRajmohan Mani 
479cf29b9afSRajmohan Mani 	/* Tell the switch that time sync is disrupted for a while */
480cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_set_time_disruption(sw, true);
481cf29b9afSRajmohan Mani 	if (ret)
482cf29b9afSRajmohan Mani 		return ret;
483cf29b9afSRajmohan Mani 
484cf29b9afSRajmohan Mani 	post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
485cf29b9afSRajmohan Mani 	post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
486a28ec0e1SGil Fine 	post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25;
487cf29b9afSRajmohan Mani 
488cf29b9afSRajmohan Mani 	/*
489cf29b9afSRajmohan Mani 	 * Write the Grandmaster time to the Post Local Time registers
490cf29b9afSRajmohan Mani 	 * of the new switch.
491cf29b9afSRajmohan Mani 	 */
492cf29b9afSRajmohan Mani 	ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
493cf29b9afSRajmohan Mani 			  post_local_time_offset, 2);
494cf29b9afSRajmohan Mani 	if (ret)
495cf29b9afSRajmohan Mani 		goto out;
496cf29b9afSRajmohan Mani 
497cf29b9afSRajmohan Mani 	/*
498a28ec0e1SGil Fine 	 * Have the new switch update its local time by:
499a28ec0e1SGil Fine 	 * 1) writing 0x1 to the Post Time Low register and 0xffffffff to
500a28ec0e1SGil Fine 	 * Post Time High register.
501a28ec0e1SGil Fine 	 * 2) write 0 to Post Time High register and then wait for
502a28ec0e1SGil Fine 	 * the completion of the post_time register becomes 0.
503a28ec0e1SGil Fine 	 * This means the time has been converged properly.
504cf29b9afSRajmohan Mani 	 */
505a28ec0e1SGil Fine 	post_time = 0xffffffff00000001ULL;
506cf29b9afSRajmohan Mani 
507cf29b9afSRajmohan Mani 	ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
508cf29b9afSRajmohan Mani 	if (ret)
509cf29b9afSRajmohan Mani 		goto out;
510cf29b9afSRajmohan Mani 
511a28ec0e1SGil Fine 	ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH,
512a28ec0e1SGil Fine 			  post_time_high_offset, 1);
513a28ec0e1SGil Fine 	if (ret)
514a28ec0e1SGil Fine 		goto out;
515a28ec0e1SGil Fine 
516cf29b9afSRajmohan Mani 	do {
517cf29b9afSRajmohan Mani 		usleep_range(5, 10);
518cf29b9afSRajmohan Mani 		ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
519cf29b9afSRajmohan Mani 				 post_time_offset, 2);
520cf29b9afSRajmohan Mani 		if (ret)
521cf29b9afSRajmohan Mani 			goto out;
522cf29b9afSRajmohan Mani 	} while (--retries && post_time);
523cf29b9afSRajmohan Mani 
524cf29b9afSRajmohan Mani 	if (!retries) {
525cf29b9afSRajmohan Mani 		ret = -ETIMEDOUT;
526cf29b9afSRajmohan Mani 		goto out;
527cf29b9afSRajmohan Mani 	}
528cf29b9afSRajmohan Mani 
529cf29b9afSRajmohan Mani 	tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
530cf29b9afSRajmohan Mani 
531cf29b9afSRajmohan Mani out:
532cf29b9afSRajmohan Mani 	tb_switch_tmu_set_time_disruption(sw, false);
533cf29b9afSRajmohan Mani 	return ret;
534cf29b9afSRajmohan Mani }
535cf29b9afSRajmohan Mani 
536d49b4f04SMika Westerberg static int disable_enhanced(struct tb_port *up, struct tb_port *down)
537d49b4f04SMika Westerberg {
538d49b4f04SMika Westerberg 	int ret;
539d49b4f04SMika Westerberg 
540d49b4f04SMika Westerberg 	/*
541d49b4f04SMika Westerberg 	 * Router may already been disconnected so ignore errors on the
542d49b4f04SMika Westerberg 	 * upstream port.
543d49b4f04SMika Westerberg 	 */
544d49b4f04SMika Westerberg 	tb_port_tmu_rate_write(up, 0);
545d49b4f04SMika Westerberg 	tb_port_tmu_enhanced_enable(up, false);
546d49b4f04SMika Westerberg 
547d49b4f04SMika Westerberg 	ret = tb_port_tmu_rate_write(down, 0);
548d49b4f04SMika Westerberg 	if (ret)
549d49b4f04SMika Westerberg 		return ret;
550d49b4f04SMika Westerberg 	return tb_port_tmu_enhanced_enable(down, false);
551d49b4f04SMika Westerberg }
552d49b4f04SMika Westerberg 
553cf29b9afSRajmohan Mani /**
554cf29b9afSRajmohan Mani  * tb_switch_tmu_disable() - Disable TMU of a switch
555cf29b9afSRajmohan Mani  * @sw: Switch whose TMU to disable
556cf29b9afSRajmohan Mani  *
557cf29b9afSRajmohan Mani  * Turns off TMU of @sw if it is enabled. If not enabled does nothing.
558cf29b9afSRajmohan Mani  */
559cf29b9afSRajmohan Mani int tb_switch_tmu_disable(struct tb_switch *sw)
560cf29b9afSRajmohan Mani {
561cf29b9afSRajmohan Mani 	/* Already disabled? */
562d49b4f04SMika Westerberg 	if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF)
563cf29b9afSRajmohan Mani 		return 0;
564cf29b9afSRajmohan Mani 
565a28ec0e1SGil Fine 	if (tb_route(sw)) {
566a28ec0e1SGil Fine 		struct tb_port *down, *up;
567a28ec0e1SGil Fine 		int ret;
568cf29b9afSRajmohan Mani 
5697ce54221SGil Fine 		down = tb_switch_downstream_port(sw);
570a28ec0e1SGil Fine 		up = tb_upstream_port(sw);
571a28ec0e1SGil Fine 		/*
572a28ec0e1SGil Fine 		 * In case of uni-directional time sync, TMU handshake is
573a28ec0e1SGil Fine 		 * initiated by upstream router. In case of bi-directional
574a28ec0e1SGil Fine 		 * time sync, TMU handshake is initiated by downstream router.
5755fd6b9a5SGil Fine 		 * We change downstream router's rate to off for both uni/bidir
5765fd6b9a5SGil Fine 		 * cases although it is needed only for the bi-directional mode.
5775fd6b9a5SGil Fine 		 * We avoid changing upstream router's mode since it might
5785fd6b9a5SGil Fine 		 * have another downstream router plugged, that is set to
5795fd6b9a5SGil Fine 		 * uni-directional mode and we don't want to change it's TMU
5805fd6b9a5SGil Fine 		 * mode.
581a28ec0e1SGil Fine 		 */
582d49b4f04SMika Westerberg 		tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
583cf29b9afSRajmohan Mani 
584a28ec0e1SGil Fine 		tb_port_tmu_time_sync_disable(up);
585a28ec0e1SGil Fine 		ret = tb_port_tmu_time_sync_disable(down);
586a28ec0e1SGil Fine 		if (ret)
587a28ec0e1SGil Fine 			return ret;
588a28ec0e1SGil Fine 
589d49b4f04SMika Westerberg 		switch (sw->tmu.mode) {
590d49b4f04SMika Westerberg 		case TB_SWITCH_TMU_MODE_LOWRES:
591d49b4f04SMika Westerberg 		case TB_SWITCH_TMU_MODE_HIFI_UNI:
592cf29b9afSRajmohan Mani 			/* The switch may be unplugged so ignore any errors */
593cf29b9afSRajmohan Mani 			tb_port_tmu_unidirectional_disable(up);
594cf29b9afSRajmohan Mani 			ret = tb_port_tmu_unidirectional_disable(down);
595cf29b9afSRajmohan Mani 			if (ret)
596cf29b9afSRajmohan Mani 				return ret;
597d49b4f04SMika Westerberg 			break;
598d49b4f04SMika Westerberg 
599d49b4f04SMika Westerberg 		case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
600d49b4f04SMika Westerberg 			ret = disable_enhanced(up, down);
601d49b4f04SMika Westerberg 			if (ret)
602d49b4f04SMika Westerberg 				return ret;
603d49b4f04SMika Westerberg 			break;
604d49b4f04SMika Westerberg 
605d49b4f04SMika Westerberg 		default:
606d49b4f04SMika Westerberg 			break;
607cf29b9afSRajmohan Mani 		}
608a28ec0e1SGil Fine 	} else {
609d49b4f04SMika Westerberg 		tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
610a28ec0e1SGil Fine 	}
611cf29b9afSRajmohan Mani 
612d49b4f04SMika Westerberg 	sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
613cf29b9afSRajmohan Mani 
614cf29b9afSRajmohan Mani 	tb_sw_dbg(sw, "TMU: disabled\n");
615cf29b9afSRajmohan Mani 	return 0;
616cf29b9afSRajmohan Mani }
617cf29b9afSRajmohan Mani 
618d49b4f04SMika Westerberg /* Called only when there is failure enabling requested mode */
619d49b4f04SMika Westerberg static void tb_switch_tmu_off(struct tb_switch *sw)
620cf29b9afSRajmohan Mani {
621d49b4f04SMika Westerberg 	unsigned int rate = tmu_rates[TB_SWITCH_TMU_MODE_OFF];
622a28ec0e1SGil Fine 	struct tb_port *down, *up;
623a28ec0e1SGil Fine 
6247ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
625a28ec0e1SGil Fine 	up = tb_upstream_port(sw);
626a28ec0e1SGil Fine 	/*
627a28ec0e1SGil Fine 	 * In case of any failure in one of the steps when setting
628a28ec0e1SGil Fine 	 * bi-directional or uni-directional TMU mode, get back to the TMU
629a28ec0e1SGil Fine 	 * configurations in off mode. In case of additional failures in
630a28ec0e1SGil Fine 	 * the functions below, ignore them since the caller shall already
631a28ec0e1SGil Fine 	 * report a failure.
632a28ec0e1SGil Fine 	 */
633a28ec0e1SGil Fine 	tb_port_tmu_time_sync_disable(down);
634a28ec0e1SGil Fine 	tb_port_tmu_time_sync_disable(up);
635a28ec0e1SGil Fine 
636d49b4f04SMika Westerberg 	switch (sw->tmu.mode_request) {
637d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_LOWRES:
638d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_UNI:
639d49b4f04SMika Westerberg 		tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
640d49b4f04SMika Westerberg 		break;
641d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
642d49b4f04SMika Westerberg 		disable_enhanced(up, down);
643d49b4f04SMika Westerberg 		break;
644d49b4f04SMika Westerberg 	default:
645d49b4f04SMika Westerberg 		break;
646d49b4f04SMika Westerberg 	}
647d49b4f04SMika Westerberg 
648d49b4f04SMika Westerberg 	/* Always set the rate to 0 */
649d49b4f04SMika Westerberg 	tb_switch_tmu_rate_write(sw, rate);
650d49b4f04SMika Westerberg 
651d49b4f04SMika Westerberg 	tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
652a28ec0e1SGil Fine 	tb_port_tmu_unidirectional_disable(down);
653a28ec0e1SGil Fine 	tb_port_tmu_unidirectional_disable(up);
654a28ec0e1SGil Fine }
655a28ec0e1SGil Fine 
656a28ec0e1SGil Fine /*
657a28ec0e1SGil Fine  * This function is called when the previous TMU mode was
658d49b4f04SMika Westerberg  * TB_SWITCH_TMU_MODE_OFF.
659a28ec0e1SGil Fine  */
660c437dcb1SMika Westerberg static int tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
661a28ec0e1SGil Fine {
662a28ec0e1SGil Fine 	struct tb_port *up, *down;
663cf29b9afSRajmohan Mani 	int ret;
664cf29b9afSRajmohan Mani 
665a28ec0e1SGil Fine 	up = tb_upstream_port(sw);
6667ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
667a28ec0e1SGil Fine 
668a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_disable(up);
669a28ec0e1SGil Fine 	if (ret)
670a28ec0e1SGil Fine 		return ret;
671a28ec0e1SGil Fine 
672a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_disable(down);
673a28ec0e1SGil Fine 	if (ret)
674a28ec0e1SGil Fine 		goto out;
675a28ec0e1SGil Fine 
676d49b4f04SMika Westerberg 	ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_HIFI_BI]);
677a28ec0e1SGil Fine 	if (ret)
678a28ec0e1SGil Fine 		goto out;
679a28ec0e1SGil Fine 
680a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(up);
681a28ec0e1SGil Fine 	if (ret)
682a28ec0e1SGil Fine 		goto out;
683a28ec0e1SGil Fine 
684a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(down);
685a28ec0e1SGil Fine 	if (ret)
686a28ec0e1SGil Fine 		goto out;
687a28ec0e1SGil Fine 
688a28ec0e1SGil Fine 	return 0;
689a28ec0e1SGil Fine 
690a28ec0e1SGil Fine out:
691d49b4f04SMika Westerberg 	tb_switch_tmu_off(sw);
692a28ec0e1SGil Fine 	return ret;
693a28ec0e1SGil Fine }
694a28ec0e1SGil Fine 
695701e73a8SMika Westerberg /* Only needed for Titan Ridge */
696701e73a8SMika Westerberg static int tb_switch_tmu_disable_objections(struct tb_switch *sw)
69743f977bcSGil Fine {
698701e73a8SMika Westerberg 	struct tb_port *up = tb_upstream_port(sw);
69943f977bcSGil Fine 	u32 val;
70043f977bcSGil Fine 	int ret;
70143f977bcSGil Fine 
70243f977bcSGil Fine 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
70343f977bcSGil Fine 			 sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
70443f977bcSGil Fine 	if (ret)
70543f977bcSGil Fine 		return ret;
70643f977bcSGil Fine 
70743f977bcSGil Fine 	val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK;
70843f977bcSGil Fine 
709701e73a8SMika Westerberg 	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
71043f977bcSGil Fine 			  sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
711701e73a8SMika Westerberg 	if (ret)
712701e73a8SMika Westerberg 		return ret;
71343f977bcSGil Fine 
71443f977bcSGil Fine 	return tb_port_tmu_write(up, TMU_ADP_CS_6,
71543f977bcSGil Fine 				 TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK,
716701e73a8SMika Westerberg 				 TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL1 |
717701e73a8SMika Westerberg 				 TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL2);
71843f977bcSGil Fine }
71943f977bcSGil Fine 
720a28ec0e1SGil Fine /*
721a28ec0e1SGil Fine  * This function is called when the previous TMU mode was
722d49b4f04SMika Westerberg  * TB_SWITCH_TMU_MODE_OFF.
723a28ec0e1SGil Fine  */
724c437dcb1SMika Westerberg static int tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
725a28ec0e1SGil Fine {
726a28ec0e1SGil Fine 	struct tb_port *up, *down;
727a28ec0e1SGil Fine 	int ret;
728a28ec0e1SGil Fine 
729a28ec0e1SGil Fine 	up = tb_upstream_port(sw);
7307ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
7317ce54221SGil Fine 	ret = tb_switch_tmu_rate_write(tb_switch_parent(sw),
732d49b4f04SMika Westerberg 				       tmu_rates[sw->tmu.mode_request]);
733b017a46dSGil Fine 	if (ret)
734b017a46dSGil Fine 		return ret;
735b017a46dSGil Fine 
736d49b4f04SMika Westerberg 	ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
737a28ec0e1SGil Fine 	if (ret)
738a28ec0e1SGil Fine 		return ret;
739a28ec0e1SGil Fine 
740a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_enable(up);
741a28ec0e1SGil Fine 	if (ret)
742a28ec0e1SGil Fine 		goto out;
743a28ec0e1SGil Fine 
744a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(up);
745a28ec0e1SGil Fine 	if (ret)
746a28ec0e1SGil Fine 		goto out;
747a28ec0e1SGil Fine 
748a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_enable(down);
749a28ec0e1SGil Fine 	if (ret)
750a28ec0e1SGil Fine 		goto out;
751a28ec0e1SGil Fine 
752a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(down);
753a28ec0e1SGil Fine 	if (ret)
754a28ec0e1SGil Fine 		goto out;
755a28ec0e1SGil Fine 
756a28ec0e1SGil Fine 	return 0;
757a28ec0e1SGil Fine 
758a28ec0e1SGil Fine out:
759d49b4f04SMika Westerberg 	tb_switch_tmu_off(sw);
760d49b4f04SMika Westerberg 	return ret;
761d49b4f04SMika Westerberg }
762d49b4f04SMika Westerberg 
763d49b4f04SMika Westerberg /*
764d49b4f04SMika Westerberg  * This function is called when the previous TMU mode was
765d49b4f04SMika Westerberg  * TB_SWITCH_TMU_RATE_OFF.
766d49b4f04SMika Westerberg  */
767d49b4f04SMika Westerberg static int tb_switch_tmu_enable_enhanced(struct tb_switch *sw)
768d49b4f04SMika Westerberg {
769d49b4f04SMika Westerberg 	unsigned int rate = tmu_rates[sw->tmu.mode_request];
770d49b4f04SMika Westerberg 	struct tb_port *up, *down;
771d49b4f04SMika Westerberg 	int ret;
772d49b4f04SMika Westerberg 
773d49b4f04SMika Westerberg 	/* Router specific parameters first */
774d49b4f04SMika Westerberg 	ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
775d49b4f04SMika Westerberg 	if (ret)
776d49b4f04SMika Westerberg 		return ret;
777d49b4f04SMika Westerberg 
778d49b4f04SMika Westerberg 	up = tb_upstream_port(sw);
779d49b4f04SMika Westerberg 	down = tb_switch_downstream_port(sw);
780d49b4f04SMika Westerberg 
781d49b4f04SMika Westerberg 	ret = tb_port_set_tmu_mode_params(up, sw->tmu.mode_request);
782d49b4f04SMika Westerberg 	if (ret)
783d49b4f04SMika Westerberg 		goto out;
784d49b4f04SMika Westerberg 
785d49b4f04SMika Westerberg 	ret = tb_port_tmu_rate_write(up, rate);
786d49b4f04SMika Westerberg 	if (ret)
787d49b4f04SMika Westerberg 		goto out;
788d49b4f04SMika Westerberg 
789d49b4f04SMika Westerberg 	ret = tb_port_tmu_enhanced_enable(up, true);
790d49b4f04SMika Westerberg 	if (ret)
791d49b4f04SMika Westerberg 		goto out;
792d49b4f04SMika Westerberg 
793d49b4f04SMika Westerberg 	ret = tb_port_set_tmu_mode_params(down, sw->tmu.mode_request);
794d49b4f04SMika Westerberg 	if (ret)
795d49b4f04SMika Westerberg 		goto out;
796d49b4f04SMika Westerberg 
797d49b4f04SMika Westerberg 	ret = tb_port_tmu_rate_write(down, rate);
798d49b4f04SMika Westerberg 	if (ret)
799d49b4f04SMika Westerberg 		goto out;
800d49b4f04SMika Westerberg 
801d49b4f04SMika Westerberg 	ret = tb_port_tmu_enhanced_enable(down, true);
802d49b4f04SMika Westerberg 	if (ret)
803d49b4f04SMika Westerberg 		goto out;
804d49b4f04SMika Westerberg 
805d49b4f04SMika Westerberg 	return 0;
806d49b4f04SMika Westerberg 
807d49b4f04SMika Westerberg out:
808d49b4f04SMika Westerberg 	tb_switch_tmu_off(sw);
809a28ec0e1SGil Fine 	return ret;
810a28ec0e1SGil Fine }
811a28ec0e1SGil Fine 
812c437dcb1SMika Westerberg static void tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
813b017a46dSGil Fine {
814d49b4f04SMika Westerberg 	unsigned int rate = tmu_rates[sw->tmu.mode];
815b017a46dSGil Fine 	struct tb_port *down, *up;
816b017a46dSGil Fine 
8177ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
818b017a46dSGil Fine 	up = tb_upstream_port(sw);
819b017a46dSGil Fine 	/*
820b017a46dSGil Fine 	 * In case of any failure in one of the steps when change mode,
821b017a46dSGil Fine 	 * get back to the TMU configurations in previous mode.
822b017a46dSGil Fine 	 * In case of additional failures in the functions below,
823b017a46dSGil Fine 	 * ignore them since the caller shall already report a failure.
824b017a46dSGil Fine 	 */
825d49b4f04SMika Westerberg 	switch (sw->tmu.mode) {
826d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_LOWRES:
827d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_UNI:
828d49b4f04SMika Westerberg 		tb_port_tmu_set_unidirectional(down, true);
829d49b4f04SMika Westerberg 		tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
830d49b4f04SMika Westerberg 		break;
831b017a46dSGil Fine 
832d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_BI:
833d49b4f04SMika Westerberg 		tb_port_tmu_set_unidirectional(down, false);
834d49b4f04SMika Westerberg 		tb_switch_tmu_rate_write(sw, rate);
835d49b4f04SMika Westerberg 		break;
836d49b4f04SMika Westerberg 
837d49b4f04SMika Westerberg 	default:
838d49b4f04SMika Westerberg 		break;
839d49b4f04SMika Westerberg 	}
840d49b4f04SMika Westerberg 
841d49b4f04SMika Westerberg 	tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
842d49b4f04SMika Westerberg 
843d49b4f04SMika Westerberg 	switch (sw->tmu.mode) {
844d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_LOWRES:
845d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_UNI:
846d49b4f04SMika Westerberg 		tb_port_tmu_set_unidirectional(up, true);
847d49b4f04SMika Westerberg 		break;
848d49b4f04SMika Westerberg 
849d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_BI:
850d49b4f04SMika Westerberg 		tb_port_tmu_set_unidirectional(up, false);
851d49b4f04SMika Westerberg 		break;
852d49b4f04SMika Westerberg 
853d49b4f04SMika Westerberg 	default:
854d49b4f04SMika Westerberg 		break;
855d49b4f04SMika Westerberg 	}
856b017a46dSGil Fine }
857b017a46dSGil Fine 
858c437dcb1SMika Westerberg static int tb_switch_tmu_change_mode(struct tb_switch *sw)
859b017a46dSGil Fine {
860d49b4f04SMika Westerberg 	unsigned int rate = tmu_rates[sw->tmu.mode_request];
861b017a46dSGil Fine 	struct tb_port *up, *down;
862b017a46dSGil Fine 	int ret;
863b017a46dSGil Fine 
864b017a46dSGil Fine 	up = tb_upstream_port(sw);
8657ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
866d49b4f04SMika Westerberg 
867d49b4f04SMika Westerberg 	/* Program the upstream router downstream facing lane adapter */
868d49b4f04SMika Westerberg 	switch (sw->tmu.mode_request) {
869d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_LOWRES:
870d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_UNI:
871d49b4f04SMika Westerberg 		ret = tb_port_tmu_set_unidirectional(down, true);
872b017a46dSGil Fine 		if (ret)
873b017a46dSGil Fine 			goto out;
874d49b4f04SMika Westerberg 		ret = tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
875d49b4f04SMika Westerberg 		if (ret)
876d49b4f04SMika Westerberg 			goto out;
877d49b4f04SMika Westerberg 		break;
878b017a46dSGil Fine 
879d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_BI:
880d49b4f04SMika Westerberg 		ret = tb_port_tmu_set_unidirectional(down, false);
881d49b4f04SMika Westerberg 		if (ret)
882d49b4f04SMika Westerberg 			goto out;
883d49b4f04SMika Westerberg 		ret = tb_switch_tmu_rate_write(sw, rate);
884d49b4f04SMika Westerberg 		if (ret)
885d49b4f04SMika Westerberg 			goto out;
886d49b4f04SMika Westerberg 		break;
887d49b4f04SMika Westerberg 
888d49b4f04SMika Westerberg 	default:
889d49b4f04SMika Westerberg 		/* Not allowed to change modes from other than above */
890d49b4f04SMika Westerberg 		return -EINVAL;
891d49b4f04SMika Westerberg 	}
892d49b4f04SMika Westerberg 
893d49b4f04SMika Westerberg 	ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
894b017a46dSGil Fine 	if (ret)
895b017a46dSGil Fine 		return ret;
896b017a46dSGil Fine 
897d49b4f04SMika Westerberg 	/* Program the new mode and the downstream router lane adapter */
898d49b4f04SMika Westerberg 	switch (sw->tmu.mode_request) {
899d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_LOWRES:
900d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_UNI:
901d49b4f04SMika Westerberg 		ret = tb_port_tmu_set_unidirectional(up, true);
902b017a46dSGil Fine 		if (ret)
903b017a46dSGil Fine 			goto out;
904d49b4f04SMika Westerberg 		break;
905d49b4f04SMika Westerberg 
906d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_BI:
907d49b4f04SMika Westerberg 		ret = tb_port_tmu_set_unidirectional(up, false);
908d49b4f04SMika Westerberg 		if (ret)
909d49b4f04SMika Westerberg 			goto out;
910d49b4f04SMika Westerberg 		break;
911d49b4f04SMika Westerberg 
912d49b4f04SMika Westerberg 	default:
913d49b4f04SMika Westerberg 		/* Not allowed to change modes from other than above */
914d49b4f04SMika Westerberg 		return -EINVAL;
915d49b4f04SMika Westerberg 	}
916b017a46dSGil Fine 
917b017a46dSGil Fine 	ret = tb_port_tmu_time_sync_enable(down);
918b017a46dSGil Fine 	if (ret)
919b017a46dSGil Fine 		goto out;
920b017a46dSGil Fine 
921b017a46dSGil Fine 	ret = tb_port_tmu_time_sync_enable(up);
922b017a46dSGil Fine 	if (ret)
923b017a46dSGil Fine 		goto out;
924b017a46dSGil Fine 
925b017a46dSGil Fine 	return 0;
926b017a46dSGil Fine 
927b017a46dSGil Fine out:
928c437dcb1SMika Westerberg 	tb_switch_tmu_change_mode_prev(sw);
929b017a46dSGil Fine 	return ret;
930b017a46dSGil Fine }
931b017a46dSGil Fine 
932b017a46dSGil Fine /**
933b017a46dSGil Fine  * tb_switch_tmu_enable() - Enable TMU on a router
934b017a46dSGil Fine  * @sw: Router whose TMU to enable
935b017a46dSGil Fine  *
936826f55d5SMika Westerberg  * Enables TMU of a router to be in uni-directional Normal/HiFi or
937826f55d5SMika Westerberg  * bi-directional HiFi mode. Calling tb_switch_tmu_configure() is
938826f55d5SMika Westerberg  * required before calling this function.
939b017a46dSGil Fine  */
940b017a46dSGil Fine int tb_switch_tmu_enable(struct tb_switch *sw)
941a28ec0e1SGil Fine {
942a28ec0e1SGil Fine 	int ret;
943a28ec0e1SGil Fine 
944826f55d5SMika Westerberg 	if (tb_switch_tmu_is_enabled(sw))
945cf29b9afSRajmohan Mani 		return 0;
946cf29b9afSRajmohan Mani 
947d49b4f04SMika Westerberg 	if (tb_switch_is_titan_ridge(sw) &&
948d49b4f04SMika Westerberg 	    (sw->tmu.mode_request == TB_SWITCH_TMU_MODE_LOWRES ||
949d49b4f04SMika Westerberg 	     sw->tmu.mode_request == TB_SWITCH_TMU_MODE_HIFI_UNI)) {
950701e73a8SMika Westerberg 		ret = tb_switch_tmu_disable_objections(sw);
95143f977bcSGil Fine 		if (ret)
95243f977bcSGil Fine 			return ret;
95343f977bcSGil Fine 	}
95443f977bcSGil Fine 
955cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_set_time_disruption(sw, true);
956cf29b9afSRajmohan Mani 	if (ret)
957cf29b9afSRajmohan Mani 		return ret;
958cf29b9afSRajmohan Mani 
959a28ec0e1SGil Fine 	if (tb_route(sw)) {
960b017a46dSGil Fine 		/*
961b017a46dSGil Fine 		 * The used mode changes are from OFF to
962b017a46dSGil Fine 		 * HiFi-Uni/HiFi-BiDir/Normal-Uni or from Normal-Uni to
963b017a46dSGil Fine 		 * HiFi-Uni.
964b017a46dSGil Fine 		 */
965d49b4f04SMika Westerberg 		if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF) {
966d49b4f04SMika Westerberg 			switch (sw->tmu.mode_request) {
967d49b4f04SMika Westerberg 			case TB_SWITCH_TMU_MODE_LOWRES:
968d49b4f04SMika Westerberg 			case TB_SWITCH_TMU_MODE_HIFI_UNI:
969c437dcb1SMika Westerberg 				ret = tb_switch_tmu_enable_unidirectional(sw);
970d49b4f04SMika Westerberg 				break;
971d49b4f04SMika Westerberg 
972d49b4f04SMika Westerberg 			case TB_SWITCH_TMU_MODE_HIFI_BI:
973c437dcb1SMika Westerberg 				ret = tb_switch_tmu_enable_bidirectional(sw);
974d49b4f04SMika Westerberg 				break;
975d49b4f04SMika Westerberg 			case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
976d49b4f04SMika Westerberg 				ret = tb_switch_tmu_enable_enhanced(sw);
977d49b4f04SMika Westerberg 				break;
978d49b4f04SMika Westerberg 			default:
979d49b4f04SMika Westerberg 				ret = -EINVAL;
980d49b4f04SMika Westerberg 				break;
981a28ec0e1SGil Fine 			}
982d49b4f04SMika Westerberg 		} else if (sw->tmu.mode == TB_SWITCH_TMU_MODE_LOWRES ||
983d49b4f04SMika Westerberg 			   sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_UNI ||
984d49b4f04SMika Westerberg 			   sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_BI) {
985d49b4f04SMika Westerberg 			ret = tb_switch_tmu_change_mode(sw);
986d49b4f04SMika Westerberg 		} else {
987d49b4f04SMika Westerberg 			ret = -EINVAL;
988d49b4f04SMika Westerberg 		}
989cf29b9afSRajmohan Mani 	} else {
990a28ec0e1SGil Fine 		/*
991a28ec0e1SGil Fine 		 * Host router port configurations are written as
992a28ec0e1SGil Fine 		 * part of configurations for downstream port of the parent
993a28ec0e1SGil Fine 		 * of the child node - see above.
994a28ec0e1SGil Fine 		 * Here only the host router' rate configuration is written.
995a28ec0e1SGil Fine 		 */
996d49b4f04SMika Westerberg 		ret = tb_switch_tmu_rate_write(sw, tmu_rates[sw->tmu.mode_request]);
997cf29b9afSRajmohan Mani 	}
998cf29b9afSRajmohan Mani 
999d49b4f04SMika Westerberg 	if (ret) {
1000d49b4f04SMika Westerberg 		tb_sw_warn(sw, "TMU: failed to enable mode %s: %d\n",
1001d49b4f04SMika Westerberg 			   tmu_mode_name(sw->tmu.mode_request), ret);
1002d49b4f04SMika Westerberg 	} else {
1003d49b4f04SMika Westerberg 		sw->tmu.mode = sw->tmu.mode_request;
1004d49b4f04SMika Westerberg 		tb_sw_dbg(sw, "TMU: mode set to: %s\n", tmu_mode_name(sw->tmu.mode));
1005d49b4f04SMika Westerberg 	}
1006cf29b9afSRajmohan Mani 
1007cf29b9afSRajmohan Mani 	return tb_switch_tmu_set_time_disruption(sw, false);
1008cf29b9afSRajmohan Mani }
1009a28ec0e1SGil Fine 
1010a28ec0e1SGil Fine /**
1011d49b4f04SMika Westerberg  * tb_switch_tmu_configure() - Configure the TMU mode
1012a28ec0e1SGil Fine  * @sw: Router whose mode to change
1013d49b4f04SMika Westerberg  * @mode: Mode to configure
1014a28ec0e1SGil Fine  *
1015d49b4f04SMika Westerberg  * Selects the TMU mode that is enabled when tb_switch_tmu_enable() is
1016d49b4f04SMika Westerberg  * next called.
1017ef34add8SMika Westerberg  *
1018d49b4f04SMika Westerberg  * Returns %0 in success and negative errno otherwise. Specifically
1019d49b4f04SMika Westerberg  * returns %-EOPNOTSUPP if the requested mode is not possible (not
1020d49b4f04SMika Westerberg  * supported by the router and/or topology).
1021a28ec0e1SGil Fine  */
1022d49b4f04SMika Westerberg int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode)
1023a28ec0e1SGil Fine {
1024d49b4f04SMika Westerberg 	switch (mode) {
1025d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_OFF:
1026d49b4f04SMika Westerberg 		break;
1027ef34add8SMika Westerberg 
1028d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_LOWRES:
1029d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_UNI:
1030d49b4f04SMika Westerberg 		if (!sw->tmu.has_ucap)
1031d49b4f04SMika Westerberg 			return -EOPNOTSUPP;
1032d49b4f04SMika Westerberg 		break;
1033d49b4f04SMika Westerberg 
1034d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_BI:
1035d49b4f04SMika Westerberg 		break;
1036d49b4f04SMika Westerberg 
1037d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: {
1038d49b4f04SMika Westerberg 		const struct tb_switch *parent_sw = tb_switch_parent(sw);
1039d49b4f04SMika Westerberg 
1040d49b4f04SMika Westerberg 		if (!parent_sw || !tb_switch_tmu_enhanced_is_supported(parent_sw))
1041d49b4f04SMika Westerberg 			return -EOPNOTSUPP;
1042d49b4f04SMika Westerberg 		if (!tb_switch_tmu_enhanced_is_supported(sw))
1043d49b4f04SMika Westerberg 			return -EOPNOTSUPP;
1044d49b4f04SMika Westerberg 
1045d49b4f04SMika Westerberg 		break;
1046d49b4f04SMika Westerberg 	}
1047d49b4f04SMika Westerberg 
1048d49b4f04SMika Westerberg 	default:
1049d49b4f04SMika Westerberg 		tb_sw_warn(sw, "TMU: unsupported mode %u\n", mode);
1050d49b4f04SMika Westerberg 		return -EINVAL;
1051d49b4f04SMika Westerberg 	}
1052d49b4f04SMika Westerberg 
1053d49b4f04SMika Westerberg 	if (sw->tmu.mode_request != mode) {
1054d49b4f04SMika Westerberg 		tb_sw_dbg(sw, "TMU: mode change %s -> %s requested\n",
1055d49b4f04SMika Westerberg 			  tmu_mode_name(sw->tmu.mode), tmu_mode_name(mode));
1056d49b4f04SMika Westerberg 		sw->tmu.mode_request = mode;
1057d49b4f04SMika Westerberg 	}
1058d49b4f04SMika Westerberg 
1059ef34add8SMika Westerberg 	return 0;
1060a28ec0e1SGil Fine }
1061