1cf29b9afSRajmohan Mani // SPDX-License-Identifier: GPL-2.0
2cf29b9afSRajmohan Mani /*
3cf29b9afSRajmohan Mani * Thunderbolt Time Management Unit (TMU) support
4cf29b9afSRajmohan Mani *
5cf29b9afSRajmohan Mani * Copyright (C) 2019, Intel Corporation
6cf29b9afSRajmohan Mani * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7cf29b9afSRajmohan Mani * Rajmohan Mani <rajmohan.mani@intel.com>
8cf29b9afSRajmohan Mani */
9cf29b9afSRajmohan Mani
10cf29b9afSRajmohan Mani #include <linux/delay.h>
11cf29b9afSRajmohan Mani
12cf29b9afSRajmohan Mani #include "tb.h"
13cf29b9afSRajmohan Mani
14d49b4f04SMika Westerberg static const unsigned int tmu_rates[] = {
15d49b4f04SMika Westerberg [TB_SWITCH_TMU_MODE_OFF] = 0,
16d49b4f04SMika Westerberg [TB_SWITCH_TMU_MODE_LOWRES] = 1000,
17d49b4f04SMika Westerberg [TB_SWITCH_TMU_MODE_HIFI_UNI] = 16,
18d49b4f04SMika Westerberg [TB_SWITCH_TMU_MODE_HIFI_BI] = 16,
19d49b4f04SMika Westerberg [TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = 16,
20d49b4f04SMika Westerberg };
21d49b4f04SMika Westerberg
226dacc6dbSTom Rix static const struct {
23d49b4f04SMika Westerberg unsigned int freq_meas_window;
24d49b4f04SMika Westerberg unsigned int avg_const;
25d49b4f04SMika Westerberg unsigned int delta_avg_const;
26d49b4f04SMika Westerberg unsigned int repl_timeout;
27d49b4f04SMika Westerberg unsigned int repl_threshold;
28d49b4f04SMika Westerberg unsigned int repl_n;
29d49b4f04SMika Westerberg unsigned int dirswitch_n;
30d49b4f04SMika Westerberg } tmu_params[] = {
31d49b4f04SMika Westerberg [TB_SWITCH_TMU_MODE_OFF] = { },
32d49b4f04SMika Westerberg [TB_SWITCH_TMU_MODE_LOWRES] = { 30, 4, },
33d49b4f04SMika Westerberg [TB_SWITCH_TMU_MODE_HIFI_UNI] = { 800, 8, },
34d49b4f04SMika Westerberg [TB_SWITCH_TMU_MODE_HIFI_BI] = { 800, 8, },
35d49b4f04SMika Westerberg [TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = {
36d49b4f04SMika Westerberg 800, 4, 0, 3125, 25, 128, 255,
37d49b4f04SMika Westerberg },
38d49b4f04SMika Westerberg };
39d49b4f04SMika Westerberg
tmu_mode_name(enum tb_switch_tmu_mode mode)40d49b4f04SMika Westerberg static const char *tmu_mode_name(enum tb_switch_tmu_mode mode)
41b017a46dSGil Fine {
42d49b4f04SMika Westerberg switch (mode) {
43d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_OFF:
44d49b4f04SMika Westerberg return "off";
45d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_LOWRES:
46d49b4f04SMika Westerberg return "uni-directional, LowRes";
47d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_HIFI_UNI:
48d49b4f04SMika Westerberg return "uni-directional, HiFi";
49d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_HIFI_BI:
50d49b4f04SMika Westerberg return "bi-directional, HiFi";
51d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
52d49b4f04SMika Westerberg return "enhanced uni-directional, MedRes";
53d49b4f04SMika Westerberg default:
54d49b4f04SMika Westerberg return "unknown";
55d49b4f04SMika Westerberg }
56d49b4f04SMika Westerberg }
57d49b4f04SMika Westerberg
tb_switch_tmu_enhanced_is_supported(const struct tb_switch * sw)58d49b4f04SMika Westerberg static bool tb_switch_tmu_enhanced_is_supported(const struct tb_switch *sw)
59d49b4f04SMika Westerberg {
60d49b4f04SMika Westerberg return usb4_switch_version(sw) > 1;
61d49b4f04SMika Westerberg }
62d49b4f04SMika Westerberg
tb_switch_set_tmu_mode_params(struct tb_switch * sw,enum tb_switch_tmu_mode mode)63d49b4f04SMika Westerberg static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
64d49b4f04SMika Westerberg enum tb_switch_tmu_mode mode)
65d49b4f04SMika Westerberg {
66b017a46dSGil Fine u32 freq, avg, val;
67b017a46dSGil Fine int ret;
68b017a46dSGil Fine
69d49b4f04SMika Westerberg freq = tmu_params[mode].freq_meas_window;
70d49b4f04SMika Westerberg avg = tmu_params[mode].avg_const;
71b017a46dSGil Fine
72b017a46dSGil Fine ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
73b017a46dSGil Fine sw->tmu.cap + TMU_RTR_CS_0, 1);
74b017a46dSGil Fine if (ret)
75b017a46dSGil Fine return ret;
76b017a46dSGil Fine
77b017a46dSGil Fine val &= ~TMU_RTR_CS_0_FREQ_WIND_MASK;
78b017a46dSGil Fine val |= FIELD_PREP(TMU_RTR_CS_0_FREQ_WIND_MASK, freq);
79b017a46dSGil Fine
80b017a46dSGil Fine ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
81b017a46dSGil Fine sw->tmu.cap + TMU_RTR_CS_0, 1);
82b017a46dSGil Fine if (ret)
83b017a46dSGil Fine return ret;
84b017a46dSGil Fine
85b017a46dSGil Fine ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
86b017a46dSGil Fine sw->tmu.cap + TMU_RTR_CS_15, 1);
87b017a46dSGil Fine if (ret)
88b017a46dSGil Fine return ret;
89b017a46dSGil Fine
90b017a46dSGil Fine val &= ~TMU_RTR_CS_15_FREQ_AVG_MASK &
91b017a46dSGil Fine ~TMU_RTR_CS_15_DELAY_AVG_MASK &
92b017a46dSGil Fine ~TMU_RTR_CS_15_OFFSET_AVG_MASK &
93b017a46dSGil Fine ~TMU_RTR_CS_15_ERROR_AVG_MASK;
94b017a46dSGil Fine val |= FIELD_PREP(TMU_RTR_CS_15_FREQ_AVG_MASK, avg) |
95b017a46dSGil Fine FIELD_PREP(TMU_RTR_CS_15_DELAY_AVG_MASK, avg) |
96b017a46dSGil Fine FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) |
97b017a46dSGil Fine FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg);
98b017a46dSGil Fine
99d49b4f04SMika Westerberg ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
100b017a46dSGil Fine sw->tmu.cap + TMU_RTR_CS_15, 1);
101d49b4f04SMika Westerberg if (ret)
102d49b4f04SMika Westerberg return ret;
103d49b4f04SMika Westerberg
104d49b4f04SMika Westerberg if (tb_switch_tmu_enhanced_is_supported(sw)) {
105d49b4f04SMika Westerberg u32 delta_avg = tmu_params[mode].delta_avg_const;
106d49b4f04SMika Westerberg
107d49b4f04SMika Westerberg ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
108d49b4f04SMika Westerberg sw->tmu.cap + TMU_RTR_CS_18, 1);
109d49b4f04SMika Westerberg if (ret)
110d49b4f04SMika Westerberg return ret;
111d49b4f04SMika Westerberg
112d49b4f04SMika Westerberg val &= ~TMU_RTR_CS_18_DELTA_AVG_CONST_MASK;
113d49b4f04SMika Westerberg val |= FIELD_PREP(TMU_RTR_CS_18_DELTA_AVG_CONST_MASK, delta_avg);
114d49b4f04SMika Westerberg
115d49b4f04SMika Westerberg ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
116d49b4f04SMika Westerberg sw->tmu.cap + TMU_RTR_CS_18, 1);
117b017a46dSGil Fine }
118b017a46dSGil Fine
119d49b4f04SMika Westerberg return ret;
120cf29b9afSRajmohan Mani }
121cf29b9afSRajmohan Mani
tb_switch_tmu_ucap_is_supported(struct tb_switch * sw)122d49b4f04SMika Westerberg static bool tb_switch_tmu_ucap_is_supported(struct tb_switch *sw)
123cf29b9afSRajmohan Mani {
124cf29b9afSRajmohan Mani int ret;
125cf29b9afSRajmohan Mani u32 val;
126cf29b9afSRajmohan Mani
127cf29b9afSRajmohan Mani ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
128cf29b9afSRajmohan Mani sw->tmu.cap + TMU_RTR_CS_0, 1);
129cf29b9afSRajmohan Mani if (ret)
130cf29b9afSRajmohan Mani return false;
131cf29b9afSRajmohan Mani
132cf29b9afSRajmohan Mani return !!(val & TMU_RTR_CS_0_UCAP);
133cf29b9afSRajmohan Mani }
134cf29b9afSRajmohan Mani
tb_switch_tmu_rate_read(struct tb_switch * sw)135cf29b9afSRajmohan Mani static int tb_switch_tmu_rate_read(struct tb_switch *sw)
136cf29b9afSRajmohan Mani {
137cf29b9afSRajmohan Mani int ret;
138cf29b9afSRajmohan Mani u32 val;
139cf29b9afSRajmohan Mani
140cf29b9afSRajmohan Mani ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
141cf29b9afSRajmohan Mani sw->tmu.cap + TMU_RTR_CS_3, 1);
142cf29b9afSRajmohan Mani if (ret)
143cf29b9afSRajmohan Mani return ret;
144cf29b9afSRajmohan Mani
145cf29b9afSRajmohan Mani val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
146cf29b9afSRajmohan Mani return val;
147cf29b9afSRajmohan Mani }
148cf29b9afSRajmohan Mani
tb_switch_tmu_rate_write(struct tb_switch * sw,int rate)149cf29b9afSRajmohan Mani static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
150cf29b9afSRajmohan Mani {
151cf29b9afSRajmohan Mani int ret;
152cf29b9afSRajmohan Mani u32 val;
153cf29b9afSRajmohan Mani
154cf29b9afSRajmohan Mani ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
155cf29b9afSRajmohan Mani sw->tmu.cap + TMU_RTR_CS_3, 1);
156cf29b9afSRajmohan Mani if (ret)
157cf29b9afSRajmohan Mani return ret;
158cf29b9afSRajmohan Mani
159cf29b9afSRajmohan Mani val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
160cf29b9afSRajmohan Mani val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
161cf29b9afSRajmohan Mani
162cf29b9afSRajmohan Mani return tb_sw_write(sw, &val, TB_CFG_SWITCH,
163cf29b9afSRajmohan Mani sw->tmu.cap + TMU_RTR_CS_3, 1);
164cf29b9afSRajmohan Mani }
165cf29b9afSRajmohan Mani
tb_port_tmu_write(struct tb_port * port,u8 offset,u32 mask,u32 value)166cf29b9afSRajmohan Mani static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
167cf29b9afSRajmohan Mani u32 value)
168cf29b9afSRajmohan Mani {
169cf29b9afSRajmohan Mani u32 data;
170cf29b9afSRajmohan Mani int ret;
171cf29b9afSRajmohan Mani
172cf29b9afSRajmohan Mani ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
173cf29b9afSRajmohan Mani if (ret)
174cf29b9afSRajmohan Mani return ret;
175cf29b9afSRajmohan Mani
176cf29b9afSRajmohan Mani data &= ~mask;
177cf29b9afSRajmohan Mani data |= value;
178cf29b9afSRajmohan Mani
179cf29b9afSRajmohan Mani return tb_port_write(port, &data, TB_CFG_PORT,
180cf29b9afSRajmohan Mani port->cap_tmu + offset, 1);
181cf29b9afSRajmohan Mani }
182cf29b9afSRajmohan Mani
tb_port_tmu_set_unidirectional(struct tb_port * port,bool unidirectional)183cf29b9afSRajmohan Mani static int tb_port_tmu_set_unidirectional(struct tb_port *port,
184cf29b9afSRajmohan Mani bool unidirectional)
185cf29b9afSRajmohan Mani {
186cf29b9afSRajmohan Mani u32 val;
187cf29b9afSRajmohan Mani
188cf29b9afSRajmohan Mani if (!port->sw->tmu.has_ucap)
189cf29b9afSRajmohan Mani return 0;
190cf29b9afSRajmohan Mani
191cf29b9afSRajmohan Mani val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
192cf29b9afSRajmohan Mani return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
193cf29b9afSRajmohan Mani }
194cf29b9afSRajmohan Mani
tb_port_tmu_unidirectional_disable(struct tb_port * port)195cf29b9afSRajmohan Mani static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
196cf29b9afSRajmohan Mani {
197cf29b9afSRajmohan Mani return tb_port_tmu_set_unidirectional(port, false);
198cf29b9afSRajmohan Mani }
199cf29b9afSRajmohan Mani
tb_port_tmu_unidirectional_enable(struct tb_port * port)200a28ec0e1SGil Fine static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port)
201a28ec0e1SGil Fine {
202a28ec0e1SGil Fine return tb_port_tmu_set_unidirectional(port, true);
203a28ec0e1SGil Fine }
204a28ec0e1SGil Fine
tb_port_tmu_is_unidirectional(struct tb_port * port)205cf29b9afSRajmohan Mani static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
206cf29b9afSRajmohan Mani {
207cf29b9afSRajmohan Mani int ret;
208cf29b9afSRajmohan Mani u32 val;
209cf29b9afSRajmohan Mani
210cf29b9afSRajmohan Mani ret = tb_port_read(port, &val, TB_CFG_PORT,
211cf29b9afSRajmohan Mani port->cap_tmu + TMU_ADP_CS_3, 1);
212cf29b9afSRajmohan Mani if (ret)
213cf29b9afSRajmohan Mani return false;
214cf29b9afSRajmohan Mani
215cf29b9afSRajmohan Mani return val & TMU_ADP_CS_3_UDM;
216cf29b9afSRajmohan Mani }
217cf29b9afSRajmohan Mani
tb_port_tmu_is_enhanced(struct tb_port * port)218d49b4f04SMika Westerberg static bool tb_port_tmu_is_enhanced(struct tb_port *port)
219d49b4f04SMika Westerberg {
220d49b4f04SMika Westerberg int ret;
221d49b4f04SMika Westerberg u32 val;
222d49b4f04SMika Westerberg
223d49b4f04SMika Westerberg ret = tb_port_read(port, &val, TB_CFG_PORT,
224d49b4f04SMika Westerberg port->cap_tmu + TMU_ADP_CS_8, 1);
225d49b4f04SMika Westerberg if (ret)
226d49b4f04SMika Westerberg return false;
227d49b4f04SMika Westerberg
228d49b4f04SMika Westerberg return val & TMU_ADP_CS_8_EUDM;
229d49b4f04SMika Westerberg }
230d49b4f04SMika Westerberg
231d49b4f04SMika Westerberg /* Can be called to non-v2 lane adapters too */
tb_port_tmu_enhanced_enable(struct tb_port * port,bool enable)232d49b4f04SMika Westerberg static int tb_port_tmu_enhanced_enable(struct tb_port *port, bool enable)
233d49b4f04SMika Westerberg {
234d49b4f04SMika Westerberg int ret;
235d49b4f04SMika Westerberg u32 val;
236d49b4f04SMika Westerberg
237d49b4f04SMika Westerberg if (!tb_switch_tmu_enhanced_is_supported(port->sw))
238d49b4f04SMika Westerberg return 0;
239d49b4f04SMika Westerberg
240d49b4f04SMika Westerberg ret = tb_port_read(port, &val, TB_CFG_PORT,
241d49b4f04SMika Westerberg port->cap_tmu + TMU_ADP_CS_8, 1);
242d49b4f04SMika Westerberg if (ret)
243d49b4f04SMika Westerberg return ret;
244d49b4f04SMika Westerberg
245d49b4f04SMika Westerberg if (enable)
246d49b4f04SMika Westerberg val |= TMU_ADP_CS_8_EUDM;
247d49b4f04SMika Westerberg else
248d49b4f04SMika Westerberg val &= ~TMU_ADP_CS_8_EUDM;
249d49b4f04SMika Westerberg
250d49b4f04SMika Westerberg return tb_port_write(port, &val, TB_CFG_PORT,
251d49b4f04SMika Westerberg port->cap_tmu + TMU_ADP_CS_8, 1);
252d49b4f04SMika Westerberg }
253d49b4f04SMika Westerberg
tb_port_set_tmu_mode_params(struct tb_port * port,enum tb_switch_tmu_mode mode)254d49b4f04SMika Westerberg static int tb_port_set_tmu_mode_params(struct tb_port *port,
255d49b4f04SMika Westerberg enum tb_switch_tmu_mode mode)
256d49b4f04SMika Westerberg {
257d49b4f04SMika Westerberg u32 repl_timeout, repl_threshold, repl_n, dirswitch_n, val;
258d49b4f04SMika Westerberg int ret;
259d49b4f04SMika Westerberg
260d49b4f04SMika Westerberg repl_timeout = tmu_params[mode].repl_timeout;
261d49b4f04SMika Westerberg repl_threshold = tmu_params[mode].repl_threshold;
262d49b4f04SMika Westerberg repl_n = tmu_params[mode].repl_n;
263d49b4f04SMika Westerberg dirswitch_n = tmu_params[mode].dirswitch_n;
264d49b4f04SMika Westerberg
265d49b4f04SMika Westerberg ret = tb_port_read(port, &val, TB_CFG_PORT,
266d49b4f04SMika Westerberg port->cap_tmu + TMU_ADP_CS_8, 1);
267d49b4f04SMika Westerberg if (ret)
268d49b4f04SMika Westerberg return ret;
269d49b4f04SMika Westerberg
270d49b4f04SMika Westerberg val &= ~TMU_ADP_CS_8_REPL_TIMEOUT_MASK;
271d49b4f04SMika Westerberg val &= ~TMU_ADP_CS_8_REPL_THRESHOLD_MASK;
272d49b4f04SMika Westerberg val |= FIELD_PREP(TMU_ADP_CS_8_REPL_TIMEOUT_MASK, repl_timeout);
273d49b4f04SMika Westerberg val |= FIELD_PREP(TMU_ADP_CS_8_REPL_THRESHOLD_MASK, repl_threshold);
274d49b4f04SMika Westerberg
275d49b4f04SMika Westerberg ret = tb_port_write(port, &val, TB_CFG_PORT,
276d49b4f04SMika Westerberg port->cap_tmu + TMU_ADP_CS_8, 1);
277d49b4f04SMika Westerberg if (ret)
278d49b4f04SMika Westerberg return ret;
279d49b4f04SMika Westerberg
280d49b4f04SMika Westerberg ret = tb_port_read(port, &val, TB_CFG_PORT,
281d49b4f04SMika Westerberg port->cap_tmu + TMU_ADP_CS_9, 1);
282d49b4f04SMika Westerberg if (ret)
283d49b4f04SMika Westerberg return ret;
284d49b4f04SMika Westerberg
285d49b4f04SMika Westerberg val &= ~TMU_ADP_CS_9_REPL_N_MASK;
286d49b4f04SMika Westerberg val &= ~TMU_ADP_CS_9_DIRSWITCH_N_MASK;
287d49b4f04SMika Westerberg val |= FIELD_PREP(TMU_ADP_CS_9_REPL_N_MASK, repl_n);
288d49b4f04SMika Westerberg val |= FIELD_PREP(TMU_ADP_CS_9_DIRSWITCH_N_MASK, dirswitch_n);
289d49b4f04SMika Westerberg
290d49b4f04SMika Westerberg return tb_port_write(port, &val, TB_CFG_PORT,
291d49b4f04SMika Westerberg port->cap_tmu + TMU_ADP_CS_9, 1);
292d49b4f04SMika Westerberg }
293d49b4f04SMika Westerberg
294d49b4f04SMika Westerberg /* Can be called to non-v2 lane adapters too */
tb_port_tmu_rate_write(struct tb_port * port,int rate)295d49b4f04SMika Westerberg static int tb_port_tmu_rate_write(struct tb_port *port, int rate)
296d49b4f04SMika Westerberg {
297d49b4f04SMika Westerberg int ret;
298d49b4f04SMika Westerberg u32 val;
299d49b4f04SMika Westerberg
300d49b4f04SMika Westerberg if (!tb_switch_tmu_enhanced_is_supported(port->sw))
301d49b4f04SMika Westerberg return 0;
302d49b4f04SMika Westerberg
303d49b4f04SMika Westerberg ret = tb_port_read(port, &val, TB_CFG_PORT,
304d49b4f04SMika Westerberg port->cap_tmu + TMU_ADP_CS_9, 1);
305d49b4f04SMika Westerberg if (ret)
306d49b4f04SMika Westerberg return ret;
307d49b4f04SMika Westerberg
308d49b4f04SMika Westerberg val &= ~TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK;
309d49b4f04SMika Westerberg val |= FIELD_PREP(TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK, rate);
310d49b4f04SMika Westerberg
311d49b4f04SMika Westerberg return tb_port_write(port, &val, TB_CFG_PORT,
312d49b4f04SMika Westerberg port->cap_tmu + TMU_ADP_CS_9, 1);
313d49b4f04SMika Westerberg }
314d49b4f04SMika Westerberg
tb_port_tmu_time_sync(struct tb_port * port,bool time_sync)315a28ec0e1SGil Fine static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync)
316a28ec0e1SGil Fine {
317a28ec0e1SGil Fine u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0;
318a28ec0e1SGil Fine
319a28ec0e1SGil Fine return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val);
320a28ec0e1SGil Fine }
321a28ec0e1SGil Fine
tb_port_tmu_time_sync_disable(struct tb_port * port)322a28ec0e1SGil Fine static int tb_port_tmu_time_sync_disable(struct tb_port *port)
323a28ec0e1SGil Fine {
324a28ec0e1SGil Fine return tb_port_tmu_time_sync(port, true);
325a28ec0e1SGil Fine }
326a28ec0e1SGil Fine
tb_port_tmu_time_sync_enable(struct tb_port * port)327a28ec0e1SGil Fine static int tb_port_tmu_time_sync_enable(struct tb_port *port)
328a28ec0e1SGil Fine {
329a28ec0e1SGil Fine return tb_port_tmu_time_sync(port, false);
330a28ec0e1SGil Fine }
331a28ec0e1SGil Fine
tb_switch_tmu_set_time_disruption(struct tb_switch * sw,bool set)332cf29b9afSRajmohan Mani static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
333cf29b9afSRajmohan Mani {
33423ccd21cSGil Fine u32 val, offset, bit;
335cf29b9afSRajmohan Mani int ret;
336cf29b9afSRajmohan Mani
33723ccd21cSGil Fine if (tb_switch_is_usb4(sw)) {
33823ccd21cSGil Fine offset = sw->tmu.cap + TMU_RTR_CS_0;
33923ccd21cSGil Fine bit = TMU_RTR_CS_0_TD;
34023ccd21cSGil Fine } else {
34123ccd21cSGil Fine offset = sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_26;
34223ccd21cSGil Fine bit = TB_TIME_VSEC_3_CS_26_TD;
34323ccd21cSGil Fine }
34423ccd21cSGil Fine
34523ccd21cSGil Fine ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
346cf29b9afSRajmohan Mani if (ret)
347cf29b9afSRajmohan Mani return ret;
348cf29b9afSRajmohan Mani
349cf29b9afSRajmohan Mani if (set)
35023ccd21cSGil Fine val |= bit;
351cf29b9afSRajmohan Mani else
35223ccd21cSGil Fine val &= ~bit;
353cf29b9afSRajmohan Mani
35423ccd21cSGil Fine return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
355cf29b9afSRajmohan Mani }
356cf29b9afSRajmohan Mani
tmu_mode_init(struct tb_switch * sw)357d49b4f04SMika Westerberg static int tmu_mode_init(struct tb_switch *sw)
358d49b4f04SMika Westerberg {
359d49b4f04SMika Westerberg bool enhanced, ucap;
360d49b4f04SMika Westerberg int ret, rate;
361d49b4f04SMika Westerberg
362d49b4f04SMika Westerberg ucap = tb_switch_tmu_ucap_is_supported(sw);
363d49b4f04SMika Westerberg if (ucap)
364d49b4f04SMika Westerberg tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
365d49b4f04SMika Westerberg enhanced = tb_switch_tmu_enhanced_is_supported(sw);
366d49b4f04SMika Westerberg if (enhanced)
367d49b4f04SMika Westerberg tb_sw_dbg(sw, "TMU: supports enhanced uni-directional mode\n");
368d49b4f04SMika Westerberg
369d49b4f04SMika Westerberg ret = tb_switch_tmu_rate_read(sw);
370d49b4f04SMika Westerberg if (ret < 0)
371d49b4f04SMika Westerberg return ret;
372d49b4f04SMika Westerberg rate = ret;
373d49b4f04SMika Westerberg
374d49b4f04SMika Westerberg /* Off by default */
375d49b4f04SMika Westerberg sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
376d49b4f04SMika Westerberg
377d49b4f04SMika Westerberg if (tb_route(sw)) {
378d49b4f04SMika Westerberg struct tb_port *up = tb_upstream_port(sw);
379d49b4f04SMika Westerberg
380d49b4f04SMika Westerberg if (enhanced && tb_port_tmu_is_enhanced(up)) {
381d49b4f04SMika Westerberg sw->tmu.mode = TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI;
382d49b4f04SMika Westerberg } else if (ucap && tb_port_tmu_is_unidirectional(up)) {
383d49b4f04SMika Westerberg if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
384d49b4f04SMika Westerberg sw->tmu.mode = TB_SWITCH_TMU_MODE_LOWRES;
385e19f714eSMika Westerberg else if (tmu_rates[TB_SWITCH_TMU_MODE_HIFI_UNI] == rate)
386d49b4f04SMika Westerberg sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
387d49b4f04SMika Westerberg } else if (rate) {
388d49b4f04SMika Westerberg sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
389d49b4f04SMika Westerberg }
390d49b4f04SMika Westerberg } else if (rate) {
391d49b4f04SMika Westerberg sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
392d49b4f04SMika Westerberg }
393d49b4f04SMika Westerberg
394d49b4f04SMika Westerberg /* Update the initial request to match the current mode */
395d49b4f04SMika Westerberg sw->tmu.mode_request = sw->tmu.mode;
396d49b4f04SMika Westerberg sw->tmu.has_ucap = ucap;
397d49b4f04SMika Westerberg
398d49b4f04SMika Westerberg return 0;
399d49b4f04SMika Westerberg }
400d49b4f04SMika Westerberg
401cf29b9afSRajmohan Mani /**
402cf29b9afSRajmohan Mani * tb_switch_tmu_init() - Initialize switch TMU structures
403cf29b9afSRajmohan Mani * @sw: Switch to initialized
404cf29b9afSRajmohan Mani *
405cf29b9afSRajmohan Mani * This function must be called before other TMU related functions to
406cf29b9afSRajmohan Mani * makes the internal structures are filled in correctly. Does not
407cf29b9afSRajmohan Mani * change any hardware configuration.
408cf29b9afSRajmohan Mani */
tb_switch_tmu_init(struct tb_switch * sw)409cf29b9afSRajmohan Mani int tb_switch_tmu_init(struct tb_switch *sw)
410cf29b9afSRajmohan Mani {
411cf29b9afSRajmohan Mani struct tb_port *port;
412cf29b9afSRajmohan Mani int ret;
413cf29b9afSRajmohan Mani
414cf29b9afSRajmohan Mani if (tb_switch_is_icm(sw))
415cf29b9afSRajmohan Mani return 0;
416cf29b9afSRajmohan Mani
417cf29b9afSRajmohan Mani ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
418cf29b9afSRajmohan Mani if (ret > 0)
419cf29b9afSRajmohan Mani sw->tmu.cap = ret;
420cf29b9afSRajmohan Mani
421cf29b9afSRajmohan Mani tb_switch_for_each_port(sw, port) {
422cf29b9afSRajmohan Mani int cap;
423cf29b9afSRajmohan Mani
424cf29b9afSRajmohan Mani cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
425cf29b9afSRajmohan Mani if (cap > 0)
426cf29b9afSRajmohan Mani port->cap_tmu = cap;
427cf29b9afSRajmohan Mani }
428cf29b9afSRajmohan Mani
429d49b4f04SMika Westerberg ret = tmu_mode_init(sw);
430d49b4f04SMika Westerberg if (ret)
431cf29b9afSRajmohan Mani return ret;
432cf29b9afSRajmohan Mani
433d49b4f04SMika Westerberg tb_sw_dbg(sw, "TMU: current mode: %s\n", tmu_mode_name(sw->tmu.mode));
434cf29b9afSRajmohan Mani return 0;
435cf29b9afSRajmohan Mani }
436cf29b9afSRajmohan Mani
437cf29b9afSRajmohan Mani /**
438cf29b9afSRajmohan Mani * tb_switch_tmu_post_time() - Update switch local time
439cf29b9afSRajmohan Mani * @sw: Switch whose time to update
440cf29b9afSRajmohan Mani *
441cf29b9afSRajmohan Mani * Updates switch local time using time posting procedure.
442cf29b9afSRajmohan Mani */
tb_switch_tmu_post_time(struct tb_switch * sw)443cf29b9afSRajmohan Mani int tb_switch_tmu_post_time(struct tb_switch *sw)
444cf29b9afSRajmohan Mani {
445a28ec0e1SGil Fine unsigned int post_time_high_offset, post_time_high = 0;
446cf29b9afSRajmohan Mani unsigned int post_local_time_offset, post_time_offset;
447cf29b9afSRajmohan Mani struct tb_switch *root_switch = sw->tb->root_switch;
448cf29b9afSRajmohan Mani u64 hi, mid, lo, local_time, post_time;
449cf29b9afSRajmohan Mani int i, ret, retries = 100;
450cf29b9afSRajmohan Mani u32 gm_local_time[3];
451cf29b9afSRajmohan Mani
452cf29b9afSRajmohan Mani if (!tb_route(sw))
453cf29b9afSRajmohan Mani return 0;
454cf29b9afSRajmohan Mani
455cf29b9afSRajmohan Mani if (!tb_switch_is_usb4(sw))
456cf29b9afSRajmohan Mani return 0;
457cf29b9afSRajmohan Mani
458cf29b9afSRajmohan Mani /* Need to be able to read the grand master time */
459cf29b9afSRajmohan Mani if (!root_switch->tmu.cap)
460cf29b9afSRajmohan Mani return 0;
461cf29b9afSRajmohan Mani
462cf29b9afSRajmohan Mani ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
463cf29b9afSRajmohan Mani root_switch->tmu.cap + TMU_RTR_CS_1,
464cf29b9afSRajmohan Mani ARRAY_SIZE(gm_local_time));
465cf29b9afSRajmohan Mani if (ret)
466cf29b9afSRajmohan Mani return ret;
467cf29b9afSRajmohan Mani
468cf29b9afSRajmohan Mani for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
469cb625ec6SMika Westerberg tb_sw_dbg(root_switch, "TMU: local_time[%d]=0x%08x\n", i,
470cf29b9afSRajmohan Mani gm_local_time[i]);
471cf29b9afSRajmohan Mani
472cf29b9afSRajmohan Mani /* Convert to nanoseconds (drop fractional part) */
473cf29b9afSRajmohan Mani hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
474cf29b9afSRajmohan Mani mid = gm_local_time[1];
475cf29b9afSRajmohan Mani lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
476cf29b9afSRajmohan Mani TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
477cf29b9afSRajmohan Mani local_time = hi << 48 | mid << 16 | lo;
478cf29b9afSRajmohan Mani
479cf29b9afSRajmohan Mani /* Tell the switch that time sync is disrupted for a while */
480cf29b9afSRajmohan Mani ret = tb_switch_tmu_set_time_disruption(sw, true);
481cf29b9afSRajmohan Mani if (ret)
482cf29b9afSRajmohan Mani return ret;
483cf29b9afSRajmohan Mani
484cf29b9afSRajmohan Mani post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
485cf29b9afSRajmohan Mani post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
486a28ec0e1SGil Fine post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25;
487cf29b9afSRajmohan Mani
488cf29b9afSRajmohan Mani /*
489cf29b9afSRajmohan Mani * Write the Grandmaster time to the Post Local Time registers
490cf29b9afSRajmohan Mani * of the new switch.
491cf29b9afSRajmohan Mani */
492cf29b9afSRajmohan Mani ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
493cf29b9afSRajmohan Mani post_local_time_offset, 2);
494cf29b9afSRajmohan Mani if (ret)
495cf29b9afSRajmohan Mani goto out;
496cf29b9afSRajmohan Mani
497cf29b9afSRajmohan Mani /*
498a28ec0e1SGil Fine * Have the new switch update its local time by:
499a28ec0e1SGil Fine * 1) writing 0x1 to the Post Time Low register and 0xffffffff to
500a28ec0e1SGil Fine * Post Time High register.
501a28ec0e1SGil Fine * 2) write 0 to Post Time High register and then wait for
502a28ec0e1SGil Fine * the completion of the post_time register becomes 0.
503a28ec0e1SGil Fine * This means the time has been converged properly.
504cf29b9afSRajmohan Mani */
505a28ec0e1SGil Fine post_time = 0xffffffff00000001ULL;
506cf29b9afSRajmohan Mani
507cf29b9afSRajmohan Mani ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
508cf29b9afSRajmohan Mani if (ret)
509cf29b9afSRajmohan Mani goto out;
510cf29b9afSRajmohan Mani
511a28ec0e1SGil Fine ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH,
512a28ec0e1SGil Fine post_time_high_offset, 1);
513a28ec0e1SGil Fine if (ret)
514a28ec0e1SGil Fine goto out;
515a28ec0e1SGil Fine
516cf29b9afSRajmohan Mani do {
517cf29b9afSRajmohan Mani usleep_range(5, 10);
518cf29b9afSRajmohan Mani ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
519cf29b9afSRajmohan Mani post_time_offset, 2);
520cf29b9afSRajmohan Mani if (ret)
521cf29b9afSRajmohan Mani goto out;
522cf29b9afSRajmohan Mani } while (--retries && post_time);
523cf29b9afSRajmohan Mani
524cf29b9afSRajmohan Mani if (!retries) {
525cf29b9afSRajmohan Mani ret = -ETIMEDOUT;
526cf29b9afSRajmohan Mani goto out;
527cf29b9afSRajmohan Mani }
528cf29b9afSRajmohan Mani
529cf29b9afSRajmohan Mani tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
530cf29b9afSRajmohan Mani
531cf29b9afSRajmohan Mani out:
532cf29b9afSRajmohan Mani tb_switch_tmu_set_time_disruption(sw, false);
533cf29b9afSRajmohan Mani return ret;
534cf29b9afSRajmohan Mani }
535cf29b9afSRajmohan Mani
disable_enhanced(struct tb_port * up,struct tb_port * down)536d49b4f04SMika Westerberg static int disable_enhanced(struct tb_port *up, struct tb_port *down)
537d49b4f04SMika Westerberg {
538d49b4f04SMika Westerberg int ret;
539d49b4f04SMika Westerberg
540d49b4f04SMika Westerberg /*
541d49b4f04SMika Westerberg * Router may already been disconnected so ignore errors on the
542d49b4f04SMika Westerberg * upstream port.
543d49b4f04SMika Westerberg */
544d49b4f04SMika Westerberg tb_port_tmu_rate_write(up, 0);
545d49b4f04SMika Westerberg tb_port_tmu_enhanced_enable(up, false);
546d49b4f04SMika Westerberg
547d49b4f04SMika Westerberg ret = tb_port_tmu_rate_write(down, 0);
548d49b4f04SMika Westerberg if (ret)
549d49b4f04SMika Westerberg return ret;
550d49b4f04SMika Westerberg return tb_port_tmu_enhanced_enable(down, false);
551d49b4f04SMika Westerberg }
552d49b4f04SMika Westerberg
553cf29b9afSRajmohan Mani /**
554cf29b9afSRajmohan Mani * tb_switch_tmu_disable() - Disable TMU of a switch
555cf29b9afSRajmohan Mani * @sw: Switch whose TMU to disable
556cf29b9afSRajmohan Mani *
557cf29b9afSRajmohan Mani * Turns off TMU of @sw if it is enabled. If not enabled does nothing.
558cf29b9afSRajmohan Mani */
tb_switch_tmu_disable(struct tb_switch * sw)559cf29b9afSRajmohan Mani int tb_switch_tmu_disable(struct tb_switch *sw)
560cf29b9afSRajmohan Mani {
561cf29b9afSRajmohan Mani /* Already disabled? */
562d49b4f04SMika Westerberg if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF)
563cf29b9afSRajmohan Mani return 0;
564cf29b9afSRajmohan Mani
565a28ec0e1SGil Fine if (tb_route(sw)) {
566a28ec0e1SGil Fine struct tb_port *down, *up;
567a28ec0e1SGil Fine int ret;
568cf29b9afSRajmohan Mani
5697ce54221SGil Fine down = tb_switch_downstream_port(sw);
570a28ec0e1SGil Fine up = tb_upstream_port(sw);
571a28ec0e1SGil Fine /*
572a28ec0e1SGil Fine * In case of uni-directional time sync, TMU handshake is
573a28ec0e1SGil Fine * initiated by upstream router. In case of bi-directional
574a28ec0e1SGil Fine * time sync, TMU handshake is initiated by downstream router.
5755fd6b9a5SGil Fine * We change downstream router's rate to off for both uni/bidir
5765fd6b9a5SGil Fine * cases although it is needed only for the bi-directional mode.
5775fd6b9a5SGil Fine * We avoid changing upstream router's mode since it might
5785fd6b9a5SGil Fine * have another downstream router plugged, that is set to
5795fd6b9a5SGil Fine * uni-directional mode and we don't want to change it's TMU
5805fd6b9a5SGil Fine * mode.
581a28ec0e1SGil Fine */
582583893a6SSanjay R Mehta ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
583583893a6SSanjay R Mehta if (ret)
584583893a6SSanjay R Mehta return ret;
585cf29b9afSRajmohan Mani
586a28ec0e1SGil Fine tb_port_tmu_time_sync_disable(up);
587a28ec0e1SGil Fine ret = tb_port_tmu_time_sync_disable(down);
588a28ec0e1SGil Fine if (ret)
589a28ec0e1SGil Fine return ret;
590a28ec0e1SGil Fine
591d49b4f04SMika Westerberg switch (sw->tmu.mode) {
592d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_LOWRES:
593d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_HIFI_UNI:
594cf29b9afSRajmohan Mani /* The switch may be unplugged so ignore any errors */
595cf29b9afSRajmohan Mani tb_port_tmu_unidirectional_disable(up);
596cf29b9afSRajmohan Mani ret = tb_port_tmu_unidirectional_disable(down);
597cf29b9afSRajmohan Mani if (ret)
598cf29b9afSRajmohan Mani return ret;
599d49b4f04SMika Westerberg break;
600d49b4f04SMika Westerberg
601d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
602d49b4f04SMika Westerberg ret = disable_enhanced(up, down);
603d49b4f04SMika Westerberg if (ret)
604d49b4f04SMika Westerberg return ret;
605d49b4f04SMika Westerberg break;
606d49b4f04SMika Westerberg
607d49b4f04SMika Westerberg default:
608d49b4f04SMika Westerberg break;
609cf29b9afSRajmohan Mani }
610a28ec0e1SGil Fine } else {
611d49b4f04SMika Westerberg tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
612a28ec0e1SGil Fine }
613cf29b9afSRajmohan Mani
614d49b4f04SMika Westerberg sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
615cf29b9afSRajmohan Mani
616cf29b9afSRajmohan Mani tb_sw_dbg(sw, "TMU: disabled\n");
617cf29b9afSRajmohan Mani return 0;
618cf29b9afSRajmohan Mani }
619cf29b9afSRajmohan Mani
620d49b4f04SMika Westerberg /* Called only when there is failure enabling requested mode */
tb_switch_tmu_off(struct tb_switch * sw)621d49b4f04SMika Westerberg static void tb_switch_tmu_off(struct tb_switch *sw)
622cf29b9afSRajmohan Mani {
623d49b4f04SMika Westerberg unsigned int rate = tmu_rates[TB_SWITCH_TMU_MODE_OFF];
624a28ec0e1SGil Fine struct tb_port *down, *up;
625a28ec0e1SGil Fine
6267ce54221SGil Fine down = tb_switch_downstream_port(sw);
627a28ec0e1SGil Fine up = tb_upstream_port(sw);
628a28ec0e1SGil Fine /*
629a28ec0e1SGil Fine * In case of any failure in one of the steps when setting
630a28ec0e1SGil Fine * bi-directional or uni-directional TMU mode, get back to the TMU
631a28ec0e1SGil Fine * configurations in off mode. In case of additional failures in
632a28ec0e1SGil Fine * the functions below, ignore them since the caller shall already
633a28ec0e1SGil Fine * report a failure.
634a28ec0e1SGil Fine */
635a28ec0e1SGil Fine tb_port_tmu_time_sync_disable(down);
636a28ec0e1SGil Fine tb_port_tmu_time_sync_disable(up);
637a28ec0e1SGil Fine
638d49b4f04SMika Westerberg switch (sw->tmu.mode_request) {
639d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_LOWRES:
640d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_HIFI_UNI:
641d49b4f04SMika Westerberg tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
642d49b4f04SMika Westerberg break;
643d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
644d49b4f04SMika Westerberg disable_enhanced(up, down);
645d49b4f04SMika Westerberg break;
646d49b4f04SMika Westerberg default:
647d49b4f04SMika Westerberg break;
648d49b4f04SMika Westerberg }
649d49b4f04SMika Westerberg
650d49b4f04SMika Westerberg /* Always set the rate to 0 */
651d49b4f04SMika Westerberg tb_switch_tmu_rate_write(sw, rate);
652d49b4f04SMika Westerberg
653d49b4f04SMika Westerberg tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
654a28ec0e1SGil Fine tb_port_tmu_unidirectional_disable(down);
655a28ec0e1SGil Fine tb_port_tmu_unidirectional_disable(up);
656a28ec0e1SGil Fine }
657a28ec0e1SGil Fine
658a28ec0e1SGil Fine /*
659a28ec0e1SGil Fine * This function is called when the previous TMU mode was
660d49b4f04SMika Westerberg * TB_SWITCH_TMU_MODE_OFF.
661a28ec0e1SGil Fine */
tb_switch_tmu_enable_bidirectional(struct tb_switch * sw)662c437dcb1SMika Westerberg static int tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
663a28ec0e1SGil Fine {
664a28ec0e1SGil Fine struct tb_port *up, *down;
665cf29b9afSRajmohan Mani int ret;
666cf29b9afSRajmohan Mani
667a28ec0e1SGil Fine up = tb_upstream_port(sw);
6687ce54221SGil Fine down = tb_switch_downstream_port(sw);
669a28ec0e1SGil Fine
670a28ec0e1SGil Fine ret = tb_port_tmu_unidirectional_disable(up);
671a28ec0e1SGil Fine if (ret)
672a28ec0e1SGil Fine return ret;
673a28ec0e1SGil Fine
674a28ec0e1SGil Fine ret = tb_port_tmu_unidirectional_disable(down);
675a28ec0e1SGil Fine if (ret)
676a28ec0e1SGil Fine goto out;
677a28ec0e1SGil Fine
678d49b4f04SMika Westerberg ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_HIFI_BI]);
679a28ec0e1SGil Fine if (ret)
680a28ec0e1SGil Fine goto out;
681a28ec0e1SGil Fine
682a28ec0e1SGil Fine ret = tb_port_tmu_time_sync_enable(up);
683a28ec0e1SGil Fine if (ret)
684a28ec0e1SGil Fine goto out;
685a28ec0e1SGil Fine
686a28ec0e1SGil Fine ret = tb_port_tmu_time_sync_enable(down);
687a28ec0e1SGil Fine if (ret)
688a28ec0e1SGil Fine goto out;
689a28ec0e1SGil Fine
690a28ec0e1SGil Fine return 0;
691a28ec0e1SGil Fine
692a28ec0e1SGil Fine out:
693d49b4f04SMika Westerberg tb_switch_tmu_off(sw);
694a28ec0e1SGil Fine return ret;
695a28ec0e1SGil Fine }
696a28ec0e1SGil Fine
697701e73a8SMika Westerberg /* Only needed for Titan Ridge */
tb_switch_tmu_disable_objections(struct tb_switch * sw)698701e73a8SMika Westerberg static int tb_switch_tmu_disable_objections(struct tb_switch *sw)
69943f977bcSGil Fine {
700701e73a8SMika Westerberg struct tb_port *up = tb_upstream_port(sw);
70143f977bcSGil Fine u32 val;
70243f977bcSGil Fine int ret;
70343f977bcSGil Fine
70443f977bcSGil Fine ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
70543f977bcSGil Fine sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
70643f977bcSGil Fine if (ret)
70743f977bcSGil Fine return ret;
70843f977bcSGil Fine
70943f977bcSGil Fine val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK;
71043f977bcSGil Fine
711701e73a8SMika Westerberg ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
71243f977bcSGil Fine sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
713701e73a8SMika Westerberg if (ret)
714701e73a8SMika Westerberg return ret;
71543f977bcSGil Fine
71643f977bcSGil Fine return tb_port_tmu_write(up, TMU_ADP_CS_6,
71743f977bcSGil Fine TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK,
718701e73a8SMika Westerberg TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL1 |
719701e73a8SMika Westerberg TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL2);
72043f977bcSGil Fine }
72143f977bcSGil Fine
722a28ec0e1SGil Fine /*
723a28ec0e1SGil Fine * This function is called when the previous TMU mode was
724d49b4f04SMika Westerberg * TB_SWITCH_TMU_MODE_OFF.
725a28ec0e1SGil Fine */
tb_switch_tmu_enable_unidirectional(struct tb_switch * sw)726c437dcb1SMika Westerberg static int tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
727a28ec0e1SGil Fine {
728a28ec0e1SGil Fine struct tb_port *up, *down;
729a28ec0e1SGil Fine int ret;
730a28ec0e1SGil Fine
731a28ec0e1SGil Fine up = tb_upstream_port(sw);
7327ce54221SGil Fine down = tb_switch_downstream_port(sw);
7337ce54221SGil Fine ret = tb_switch_tmu_rate_write(tb_switch_parent(sw),
734d49b4f04SMika Westerberg tmu_rates[sw->tmu.mode_request]);
735b017a46dSGil Fine if (ret)
736b017a46dSGil Fine return ret;
737b017a46dSGil Fine
738d49b4f04SMika Westerberg ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
739a28ec0e1SGil Fine if (ret)
740a28ec0e1SGil Fine return ret;
741a28ec0e1SGil Fine
742a28ec0e1SGil Fine ret = tb_port_tmu_unidirectional_enable(up);
743a28ec0e1SGil Fine if (ret)
744a28ec0e1SGil Fine goto out;
745a28ec0e1SGil Fine
746a28ec0e1SGil Fine ret = tb_port_tmu_time_sync_enable(up);
747a28ec0e1SGil Fine if (ret)
748a28ec0e1SGil Fine goto out;
749a28ec0e1SGil Fine
750a28ec0e1SGil Fine ret = tb_port_tmu_unidirectional_enable(down);
751a28ec0e1SGil Fine if (ret)
752a28ec0e1SGil Fine goto out;
753a28ec0e1SGil Fine
754a28ec0e1SGil Fine ret = tb_port_tmu_time_sync_enable(down);
755a28ec0e1SGil Fine if (ret)
756a28ec0e1SGil Fine goto out;
757a28ec0e1SGil Fine
758a28ec0e1SGil Fine return 0;
759a28ec0e1SGil Fine
760a28ec0e1SGil Fine out:
761d49b4f04SMika Westerberg tb_switch_tmu_off(sw);
762d49b4f04SMika Westerberg return ret;
763d49b4f04SMika Westerberg }
764d49b4f04SMika Westerberg
765d49b4f04SMika Westerberg /*
766d49b4f04SMika Westerberg * This function is called when the previous TMU mode was
767d49b4f04SMika Westerberg * TB_SWITCH_TMU_RATE_OFF.
768d49b4f04SMika Westerberg */
tb_switch_tmu_enable_enhanced(struct tb_switch * sw)769d49b4f04SMika Westerberg static int tb_switch_tmu_enable_enhanced(struct tb_switch *sw)
770d49b4f04SMika Westerberg {
771d49b4f04SMika Westerberg unsigned int rate = tmu_rates[sw->tmu.mode_request];
772d49b4f04SMika Westerberg struct tb_port *up, *down;
773d49b4f04SMika Westerberg int ret;
774d49b4f04SMika Westerberg
775d49b4f04SMika Westerberg /* Router specific parameters first */
776d49b4f04SMika Westerberg ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
777d49b4f04SMika Westerberg if (ret)
778d49b4f04SMika Westerberg return ret;
779d49b4f04SMika Westerberg
780d49b4f04SMika Westerberg up = tb_upstream_port(sw);
781d49b4f04SMika Westerberg down = tb_switch_downstream_port(sw);
782d49b4f04SMika Westerberg
783d49b4f04SMika Westerberg ret = tb_port_set_tmu_mode_params(up, sw->tmu.mode_request);
784d49b4f04SMika Westerberg if (ret)
785d49b4f04SMika Westerberg goto out;
786d49b4f04SMika Westerberg
787d49b4f04SMika Westerberg ret = tb_port_tmu_rate_write(up, rate);
788d49b4f04SMika Westerberg if (ret)
789d49b4f04SMika Westerberg goto out;
790d49b4f04SMika Westerberg
791d49b4f04SMika Westerberg ret = tb_port_tmu_enhanced_enable(up, true);
792d49b4f04SMika Westerberg if (ret)
793d49b4f04SMika Westerberg goto out;
794d49b4f04SMika Westerberg
795d49b4f04SMika Westerberg ret = tb_port_set_tmu_mode_params(down, sw->tmu.mode_request);
796d49b4f04SMika Westerberg if (ret)
797d49b4f04SMika Westerberg goto out;
798d49b4f04SMika Westerberg
799d49b4f04SMika Westerberg ret = tb_port_tmu_rate_write(down, rate);
800d49b4f04SMika Westerberg if (ret)
801d49b4f04SMika Westerberg goto out;
802d49b4f04SMika Westerberg
803d49b4f04SMika Westerberg ret = tb_port_tmu_enhanced_enable(down, true);
804d49b4f04SMika Westerberg if (ret)
805d49b4f04SMika Westerberg goto out;
806d49b4f04SMika Westerberg
807d49b4f04SMika Westerberg return 0;
808d49b4f04SMika Westerberg
809d49b4f04SMika Westerberg out:
810d49b4f04SMika Westerberg tb_switch_tmu_off(sw);
811a28ec0e1SGil Fine return ret;
812a28ec0e1SGil Fine }
813a28ec0e1SGil Fine
tb_switch_tmu_change_mode_prev(struct tb_switch * sw)814c437dcb1SMika Westerberg static void tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
815b017a46dSGil Fine {
816d49b4f04SMika Westerberg unsigned int rate = tmu_rates[sw->tmu.mode];
817b017a46dSGil Fine struct tb_port *down, *up;
818b017a46dSGil Fine
8197ce54221SGil Fine down = tb_switch_downstream_port(sw);
820b017a46dSGil Fine up = tb_upstream_port(sw);
821b017a46dSGil Fine /*
822b017a46dSGil Fine * In case of any failure in one of the steps when change mode,
823b017a46dSGil Fine * get back to the TMU configurations in previous mode.
824b017a46dSGil Fine * In case of additional failures in the functions below,
825b017a46dSGil Fine * ignore them since the caller shall already report a failure.
826b017a46dSGil Fine */
827d49b4f04SMika Westerberg switch (sw->tmu.mode) {
828d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_LOWRES:
829d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_HIFI_UNI:
830d49b4f04SMika Westerberg tb_port_tmu_set_unidirectional(down, true);
831d49b4f04SMika Westerberg tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
832d49b4f04SMika Westerberg break;
833b017a46dSGil Fine
834d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_HIFI_BI:
835d49b4f04SMika Westerberg tb_port_tmu_set_unidirectional(down, false);
836d49b4f04SMika Westerberg tb_switch_tmu_rate_write(sw, rate);
837d49b4f04SMika Westerberg break;
838d49b4f04SMika Westerberg
839d49b4f04SMika Westerberg default:
840d49b4f04SMika Westerberg break;
841d49b4f04SMika Westerberg }
842d49b4f04SMika Westerberg
843d49b4f04SMika Westerberg tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
844d49b4f04SMika Westerberg
845d49b4f04SMika Westerberg switch (sw->tmu.mode) {
846d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_LOWRES:
847d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_HIFI_UNI:
848d49b4f04SMika Westerberg tb_port_tmu_set_unidirectional(up, true);
849d49b4f04SMika Westerberg break;
850d49b4f04SMika Westerberg
851d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_HIFI_BI:
852d49b4f04SMika Westerberg tb_port_tmu_set_unidirectional(up, false);
853d49b4f04SMika Westerberg break;
854d49b4f04SMika Westerberg
855d49b4f04SMika Westerberg default:
856d49b4f04SMika Westerberg break;
857d49b4f04SMika Westerberg }
858b017a46dSGil Fine }
859b017a46dSGil Fine
tb_switch_tmu_change_mode(struct tb_switch * sw)860c437dcb1SMika Westerberg static int tb_switch_tmu_change_mode(struct tb_switch *sw)
861b017a46dSGil Fine {
862d49b4f04SMika Westerberg unsigned int rate = tmu_rates[sw->tmu.mode_request];
863b017a46dSGil Fine struct tb_port *up, *down;
864b017a46dSGil Fine int ret;
865b017a46dSGil Fine
866b017a46dSGil Fine up = tb_upstream_port(sw);
8677ce54221SGil Fine down = tb_switch_downstream_port(sw);
868d49b4f04SMika Westerberg
869d49b4f04SMika Westerberg /* Program the upstream router downstream facing lane adapter */
870d49b4f04SMika Westerberg switch (sw->tmu.mode_request) {
871d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_LOWRES:
872d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_HIFI_UNI:
873d49b4f04SMika Westerberg ret = tb_port_tmu_set_unidirectional(down, true);
874b017a46dSGil Fine if (ret)
875b017a46dSGil Fine goto out;
876d49b4f04SMika Westerberg ret = tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
877d49b4f04SMika Westerberg if (ret)
878d49b4f04SMika Westerberg goto out;
879d49b4f04SMika Westerberg break;
880b017a46dSGil Fine
881d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_HIFI_BI:
882d49b4f04SMika Westerberg ret = tb_port_tmu_set_unidirectional(down, false);
883d49b4f04SMika Westerberg if (ret)
884d49b4f04SMika Westerberg goto out;
885d49b4f04SMika Westerberg ret = tb_switch_tmu_rate_write(sw, rate);
886d49b4f04SMika Westerberg if (ret)
887d49b4f04SMika Westerberg goto out;
888d49b4f04SMika Westerberg break;
889d49b4f04SMika Westerberg
890d49b4f04SMika Westerberg default:
891d49b4f04SMika Westerberg /* Not allowed to change modes from other than above */
892d49b4f04SMika Westerberg return -EINVAL;
893d49b4f04SMika Westerberg }
894d49b4f04SMika Westerberg
895d49b4f04SMika Westerberg ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
896b017a46dSGil Fine if (ret)
897*79fff937SMika Westerberg goto out;
898b017a46dSGil Fine
899d49b4f04SMika Westerberg /* Program the new mode and the downstream router lane adapter */
900d49b4f04SMika Westerberg switch (sw->tmu.mode_request) {
901d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_LOWRES:
902d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_HIFI_UNI:
903d49b4f04SMika Westerberg ret = tb_port_tmu_set_unidirectional(up, true);
904b017a46dSGil Fine if (ret)
905b017a46dSGil Fine goto out;
906d49b4f04SMika Westerberg break;
907d49b4f04SMika Westerberg
908d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_HIFI_BI:
909d49b4f04SMika Westerberg ret = tb_port_tmu_set_unidirectional(up, false);
910d49b4f04SMika Westerberg if (ret)
911d49b4f04SMika Westerberg goto out;
912d49b4f04SMika Westerberg break;
913d49b4f04SMika Westerberg
914d49b4f04SMika Westerberg default:
915d49b4f04SMika Westerberg /* Not allowed to change modes from other than above */
916d49b4f04SMika Westerberg return -EINVAL;
917d49b4f04SMika Westerberg }
918b017a46dSGil Fine
919b017a46dSGil Fine ret = tb_port_tmu_time_sync_enable(down);
920b017a46dSGil Fine if (ret)
921b017a46dSGil Fine goto out;
922b017a46dSGil Fine
923b017a46dSGil Fine ret = tb_port_tmu_time_sync_enable(up);
924b017a46dSGil Fine if (ret)
925b017a46dSGil Fine goto out;
926b017a46dSGil Fine
927b017a46dSGil Fine return 0;
928b017a46dSGil Fine
929b017a46dSGil Fine out:
930c437dcb1SMika Westerberg tb_switch_tmu_change_mode_prev(sw);
931b017a46dSGil Fine return ret;
932b017a46dSGil Fine }
933b017a46dSGil Fine
934b017a46dSGil Fine /**
935b017a46dSGil Fine * tb_switch_tmu_enable() - Enable TMU on a router
936b017a46dSGil Fine * @sw: Router whose TMU to enable
937b017a46dSGil Fine *
938826f55d5SMika Westerberg * Enables TMU of a router to be in uni-directional Normal/HiFi or
939826f55d5SMika Westerberg * bi-directional HiFi mode. Calling tb_switch_tmu_configure() is
940826f55d5SMika Westerberg * required before calling this function.
941b017a46dSGil Fine */
tb_switch_tmu_enable(struct tb_switch * sw)942b017a46dSGil Fine int tb_switch_tmu_enable(struct tb_switch *sw)
943a28ec0e1SGil Fine {
944a28ec0e1SGil Fine int ret;
945a28ec0e1SGil Fine
946826f55d5SMika Westerberg if (tb_switch_tmu_is_enabled(sw))
947cf29b9afSRajmohan Mani return 0;
948cf29b9afSRajmohan Mani
949d49b4f04SMika Westerberg if (tb_switch_is_titan_ridge(sw) &&
950d49b4f04SMika Westerberg (sw->tmu.mode_request == TB_SWITCH_TMU_MODE_LOWRES ||
951d49b4f04SMika Westerberg sw->tmu.mode_request == TB_SWITCH_TMU_MODE_HIFI_UNI)) {
952701e73a8SMika Westerberg ret = tb_switch_tmu_disable_objections(sw);
95343f977bcSGil Fine if (ret)
95443f977bcSGil Fine return ret;
95543f977bcSGil Fine }
95643f977bcSGil Fine
957cf29b9afSRajmohan Mani ret = tb_switch_tmu_set_time_disruption(sw, true);
958cf29b9afSRajmohan Mani if (ret)
959cf29b9afSRajmohan Mani return ret;
960cf29b9afSRajmohan Mani
961a28ec0e1SGil Fine if (tb_route(sw)) {
962b017a46dSGil Fine /*
963b017a46dSGil Fine * The used mode changes are from OFF to
964b017a46dSGil Fine * HiFi-Uni/HiFi-BiDir/Normal-Uni or from Normal-Uni to
965b017a46dSGil Fine * HiFi-Uni.
966b017a46dSGil Fine */
967d49b4f04SMika Westerberg if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF) {
968d49b4f04SMika Westerberg switch (sw->tmu.mode_request) {
969d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_LOWRES:
970d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_HIFI_UNI:
971c437dcb1SMika Westerberg ret = tb_switch_tmu_enable_unidirectional(sw);
972d49b4f04SMika Westerberg break;
973d49b4f04SMika Westerberg
974d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_HIFI_BI:
975c437dcb1SMika Westerberg ret = tb_switch_tmu_enable_bidirectional(sw);
976d49b4f04SMika Westerberg break;
977d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
978d49b4f04SMika Westerberg ret = tb_switch_tmu_enable_enhanced(sw);
979d49b4f04SMika Westerberg break;
980d49b4f04SMika Westerberg default:
981d49b4f04SMika Westerberg ret = -EINVAL;
982d49b4f04SMika Westerberg break;
983a28ec0e1SGil Fine }
984d49b4f04SMika Westerberg } else if (sw->tmu.mode == TB_SWITCH_TMU_MODE_LOWRES ||
985d49b4f04SMika Westerberg sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_UNI ||
986d49b4f04SMika Westerberg sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_BI) {
987d49b4f04SMika Westerberg ret = tb_switch_tmu_change_mode(sw);
988d49b4f04SMika Westerberg } else {
989d49b4f04SMika Westerberg ret = -EINVAL;
990d49b4f04SMika Westerberg }
991cf29b9afSRajmohan Mani } else {
992a28ec0e1SGil Fine /*
993a28ec0e1SGil Fine * Host router port configurations are written as
994a28ec0e1SGil Fine * part of configurations for downstream port of the parent
995a28ec0e1SGil Fine * of the child node - see above.
996a28ec0e1SGil Fine * Here only the host router' rate configuration is written.
997a28ec0e1SGil Fine */
998d49b4f04SMika Westerberg ret = tb_switch_tmu_rate_write(sw, tmu_rates[sw->tmu.mode_request]);
999cf29b9afSRajmohan Mani }
1000cf29b9afSRajmohan Mani
1001d49b4f04SMika Westerberg if (ret) {
1002d49b4f04SMika Westerberg tb_sw_warn(sw, "TMU: failed to enable mode %s: %d\n",
1003d49b4f04SMika Westerberg tmu_mode_name(sw->tmu.mode_request), ret);
1004d49b4f04SMika Westerberg } else {
1005d49b4f04SMika Westerberg sw->tmu.mode = sw->tmu.mode_request;
1006d49b4f04SMika Westerberg tb_sw_dbg(sw, "TMU: mode set to: %s\n", tmu_mode_name(sw->tmu.mode));
1007d49b4f04SMika Westerberg }
1008cf29b9afSRajmohan Mani
1009cf29b9afSRajmohan Mani return tb_switch_tmu_set_time_disruption(sw, false);
1010cf29b9afSRajmohan Mani }
1011a28ec0e1SGil Fine
1012a28ec0e1SGil Fine /**
1013d49b4f04SMika Westerberg * tb_switch_tmu_configure() - Configure the TMU mode
1014a28ec0e1SGil Fine * @sw: Router whose mode to change
1015d49b4f04SMika Westerberg * @mode: Mode to configure
1016a28ec0e1SGil Fine *
1017d49b4f04SMika Westerberg * Selects the TMU mode that is enabled when tb_switch_tmu_enable() is
1018d49b4f04SMika Westerberg * next called.
1019ef34add8SMika Westerberg *
1020d49b4f04SMika Westerberg * Returns %0 in success and negative errno otherwise. Specifically
1021d49b4f04SMika Westerberg * returns %-EOPNOTSUPP if the requested mode is not possible (not
1022d49b4f04SMika Westerberg * supported by the router and/or topology).
1023a28ec0e1SGil Fine */
tb_switch_tmu_configure(struct tb_switch * sw,enum tb_switch_tmu_mode mode)1024d49b4f04SMika Westerberg int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode)
1025a28ec0e1SGil Fine {
1026d49b4f04SMika Westerberg switch (mode) {
1027d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_OFF:
1028d49b4f04SMika Westerberg break;
1029ef34add8SMika Westerberg
1030d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_LOWRES:
1031d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_HIFI_UNI:
1032d49b4f04SMika Westerberg if (!sw->tmu.has_ucap)
1033d49b4f04SMika Westerberg return -EOPNOTSUPP;
1034d49b4f04SMika Westerberg break;
1035d49b4f04SMika Westerberg
1036d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_HIFI_BI:
1037d49b4f04SMika Westerberg break;
1038d49b4f04SMika Westerberg
1039d49b4f04SMika Westerberg case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: {
1040d49b4f04SMika Westerberg const struct tb_switch *parent_sw = tb_switch_parent(sw);
1041d49b4f04SMika Westerberg
1042d49b4f04SMika Westerberg if (!parent_sw || !tb_switch_tmu_enhanced_is_supported(parent_sw))
1043d49b4f04SMika Westerberg return -EOPNOTSUPP;
1044d49b4f04SMika Westerberg if (!tb_switch_tmu_enhanced_is_supported(sw))
1045d49b4f04SMika Westerberg return -EOPNOTSUPP;
1046d49b4f04SMika Westerberg
1047d49b4f04SMika Westerberg break;
1048d49b4f04SMika Westerberg }
1049d49b4f04SMika Westerberg
1050d49b4f04SMika Westerberg default:
1051d49b4f04SMika Westerberg tb_sw_warn(sw, "TMU: unsupported mode %u\n", mode);
1052d49b4f04SMika Westerberg return -EINVAL;
1053d49b4f04SMika Westerberg }
1054d49b4f04SMika Westerberg
1055d49b4f04SMika Westerberg if (sw->tmu.mode_request != mode) {
1056d49b4f04SMika Westerberg tb_sw_dbg(sw, "TMU: mode change %s -> %s requested\n",
1057d49b4f04SMika Westerberg tmu_mode_name(sw->tmu.mode), tmu_mode_name(mode));
1058d49b4f04SMika Westerberg sw->tmu.mode_request = mode;
1059d49b4f04SMika Westerberg }
1060d49b4f04SMika Westerberg
1061ef34add8SMika Westerberg return 0;
1062a28ec0e1SGil Fine }
1063