1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Thunderbolt Time Management Unit (TMU) support
4 *
5 * Copyright (C) 2019, Intel Corporation
6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7 * Rajmohan Mani <rajmohan.mani@intel.com>
8 */
9
10 #include <linux/delay.h>
11
12 #include "tb.h"
13
14 static const unsigned int tmu_rates[] = {
15 [TB_SWITCH_TMU_MODE_OFF] = 0,
16 [TB_SWITCH_TMU_MODE_LOWRES] = 1000,
17 [TB_SWITCH_TMU_MODE_HIFI_UNI] = 16,
18 [TB_SWITCH_TMU_MODE_HIFI_BI] = 16,
19 [TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = 16,
20 };
21
22 static const struct {
23 unsigned int freq_meas_window;
24 unsigned int avg_const;
25 unsigned int delta_avg_const;
26 unsigned int repl_timeout;
27 unsigned int repl_threshold;
28 unsigned int repl_n;
29 unsigned int dirswitch_n;
30 } tmu_params[] = {
31 [TB_SWITCH_TMU_MODE_OFF] = { },
32 [TB_SWITCH_TMU_MODE_LOWRES] = { 30, 4, },
33 [TB_SWITCH_TMU_MODE_HIFI_UNI] = { 800, 8, },
34 [TB_SWITCH_TMU_MODE_HIFI_BI] = { 800, 8, },
35 [TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = {
36 800, 4, 0, 3125, 25, 128, 255,
37 },
38 };
39
tmu_mode_name(enum tb_switch_tmu_mode mode)40 static const char *tmu_mode_name(enum tb_switch_tmu_mode mode)
41 {
42 switch (mode) {
43 case TB_SWITCH_TMU_MODE_OFF:
44 return "off";
45 case TB_SWITCH_TMU_MODE_LOWRES:
46 return "uni-directional, LowRes";
47 case TB_SWITCH_TMU_MODE_HIFI_UNI:
48 return "uni-directional, HiFi";
49 case TB_SWITCH_TMU_MODE_HIFI_BI:
50 return "bi-directional, HiFi";
51 case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
52 return "enhanced uni-directional, MedRes";
53 default:
54 return "unknown";
55 }
56 }
57
tb_switch_tmu_enhanced_is_supported(const struct tb_switch * sw)58 static bool tb_switch_tmu_enhanced_is_supported(const struct tb_switch *sw)
59 {
60 return usb4_switch_version(sw) > 1;
61 }
62
tb_switch_set_tmu_mode_params(struct tb_switch * sw,enum tb_switch_tmu_mode mode)63 static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
64 enum tb_switch_tmu_mode mode)
65 {
66 u32 freq, avg, val;
67 int ret;
68
69 freq = tmu_params[mode].freq_meas_window;
70 avg = tmu_params[mode].avg_const;
71
72 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
73 sw->tmu.cap + TMU_RTR_CS_0, 1);
74 if (ret)
75 return ret;
76
77 val &= ~TMU_RTR_CS_0_FREQ_WIND_MASK;
78 val |= FIELD_PREP(TMU_RTR_CS_0_FREQ_WIND_MASK, freq);
79
80 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
81 sw->tmu.cap + TMU_RTR_CS_0, 1);
82 if (ret)
83 return ret;
84
85 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
86 sw->tmu.cap + TMU_RTR_CS_15, 1);
87 if (ret)
88 return ret;
89
90 val &= ~TMU_RTR_CS_15_FREQ_AVG_MASK &
91 ~TMU_RTR_CS_15_DELAY_AVG_MASK &
92 ~TMU_RTR_CS_15_OFFSET_AVG_MASK &
93 ~TMU_RTR_CS_15_ERROR_AVG_MASK;
94 val |= FIELD_PREP(TMU_RTR_CS_15_FREQ_AVG_MASK, avg) |
95 FIELD_PREP(TMU_RTR_CS_15_DELAY_AVG_MASK, avg) |
96 FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) |
97 FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg);
98
99 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
100 sw->tmu.cap + TMU_RTR_CS_15, 1);
101 if (ret)
102 return ret;
103
104 if (tb_switch_tmu_enhanced_is_supported(sw)) {
105 u32 delta_avg = tmu_params[mode].delta_avg_const;
106
107 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
108 sw->tmu.cap + TMU_RTR_CS_18, 1);
109 if (ret)
110 return ret;
111
112 val &= ~TMU_RTR_CS_18_DELTA_AVG_CONST_MASK;
113 val |= FIELD_PREP(TMU_RTR_CS_18_DELTA_AVG_CONST_MASK, delta_avg);
114
115 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
116 sw->tmu.cap + TMU_RTR_CS_18, 1);
117 }
118
119 return ret;
120 }
121
tb_switch_tmu_ucap_is_supported(struct tb_switch * sw)122 static bool tb_switch_tmu_ucap_is_supported(struct tb_switch *sw)
123 {
124 int ret;
125 u32 val;
126
127 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
128 sw->tmu.cap + TMU_RTR_CS_0, 1);
129 if (ret)
130 return false;
131
132 return !!(val & TMU_RTR_CS_0_UCAP);
133 }
134
tb_switch_tmu_rate_read(struct tb_switch * sw)135 static int tb_switch_tmu_rate_read(struct tb_switch *sw)
136 {
137 int ret;
138 u32 val;
139
140 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
141 sw->tmu.cap + TMU_RTR_CS_3, 1);
142 if (ret)
143 return ret;
144
145 val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
146 return val;
147 }
148
tb_switch_tmu_rate_write(struct tb_switch * sw,int rate)149 static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
150 {
151 int ret;
152 u32 val;
153
154 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
155 sw->tmu.cap + TMU_RTR_CS_3, 1);
156 if (ret)
157 return ret;
158
159 val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
160 val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
161
162 return tb_sw_write(sw, &val, TB_CFG_SWITCH,
163 sw->tmu.cap + TMU_RTR_CS_3, 1);
164 }
165
tb_port_tmu_write(struct tb_port * port,u8 offset,u32 mask,u32 value)166 static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
167 u32 value)
168 {
169 u32 data;
170 int ret;
171
172 ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
173 if (ret)
174 return ret;
175
176 data &= ~mask;
177 data |= value;
178
179 return tb_port_write(port, &data, TB_CFG_PORT,
180 port->cap_tmu + offset, 1);
181 }
182
tb_port_tmu_set_unidirectional(struct tb_port * port,bool unidirectional)183 static int tb_port_tmu_set_unidirectional(struct tb_port *port,
184 bool unidirectional)
185 {
186 u32 val;
187
188 if (!port->sw->tmu.has_ucap)
189 return 0;
190
191 val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
192 return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
193 }
194
tb_port_tmu_unidirectional_disable(struct tb_port * port)195 static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
196 {
197 return tb_port_tmu_set_unidirectional(port, false);
198 }
199
tb_port_tmu_unidirectional_enable(struct tb_port * port)200 static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port)
201 {
202 return tb_port_tmu_set_unidirectional(port, true);
203 }
204
tb_port_tmu_is_unidirectional(struct tb_port * port)205 static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
206 {
207 int ret;
208 u32 val;
209
210 ret = tb_port_read(port, &val, TB_CFG_PORT,
211 port->cap_tmu + TMU_ADP_CS_3, 1);
212 if (ret)
213 return false;
214
215 return val & TMU_ADP_CS_3_UDM;
216 }
217
tb_port_tmu_is_enhanced(struct tb_port * port)218 static bool tb_port_tmu_is_enhanced(struct tb_port *port)
219 {
220 int ret;
221 u32 val;
222
223 ret = tb_port_read(port, &val, TB_CFG_PORT,
224 port->cap_tmu + TMU_ADP_CS_8, 1);
225 if (ret)
226 return false;
227
228 return val & TMU_ADP_CS_8_EUDM;
229 }
230
231 /* Can be called to non-v2 lane adapters too */
tb_port_tmu_enhanced_enable(struct tb_port * port,bool enable)232 static int tb_port_tmu_enhanced_enable(struct tb_port *port, bool enable)
233 {
234 int ret;
235 u32 val;
236
237 if (!tb_switch_tmu_enhanced_is_supported(port->sw))
238 return 0;
239
240 ret = tb_port_read(port, &val, TB_CFG_PORT,
241 port->cap_tmu + TMU_ADP_CS_8, 1);
242 if (ret)
243 return ret;
244
245 if (enable)
246 val |= TMU_ADP_CS_8_EUDM;
247 else
248 val &= ~TMU_ADP_CS_8_EUDM;
249
250 return tb_port_write(port, &val, TB_CFG_PORT,
251 port->cap_tmu + TMU_ADP_CS_8, 1);
252 }
253
tb_port_set_tmu_mode_params(struct tb_port * port,enum tb_switch_tmu_mode mode)254 static int tb_port_set_tmu_mode_params(struct tb_port *port,
255 enum tb_switch_tmu_mode mode)
256 {
257 u32 repl_timeout, repl_threshold, repl_n, dirswitch_n, val;
258 int ret;
259
260 repl_timeout = tmu_params[mode].repl_timeout;
261 repl_threshold = tmu_params[mode].repl_threshold;
262 repl_n = tmu_params[mode].repl_n;
263 dirswitch_n = tmu_params[mode].dirswitch_n;
264
265 ret = tb_port_read(port, &val, TB_CFG_PORT,
266 port->cap_tmu + TMU_ADP_CS_8, 1);
267 if (ret)
268 return ret;
269
270 val &= ~TMU_ADP_CS_8_REPL_TIMEOUT_MASK;
271 val &= ~TMU_ADP_CS_8_REPL_THRESHOLD_MASK;
272 val |= FIELD_PREP(TMU_ADP_CS_8_REPL_TIMEOUT_MASK, repl_timeout);
273 val |= FIELD_PREP(TMU_ADP_CS_8_REPL_THRESHOLD_MASK, repl_threshold);
274
275 ret = tb_port_write(port, &val, TB_CFG_PORT,
276 port->cap_tmu + TMU_ADP_CS_8, 1);
277 if (ret)
278 return ret;
279
280 ret = tb_port_read(port, &val, TB_CFG_PORT,
281 port->cap_tmu + TMU_ADP_CS_9, 1);
282 if (ret)
283 return ret;
284
285 val &= ~TMU_ADP_CS_9_REPL_N_MASK;
286 val &= ~TMU_ADP_CS_9_DIRSWITCH_N_MASK;
287 val |= FIELD_PREP(TMU_ADP_CS_9_REPL_N_MASK, repl_n);
288 val |= FIELD_PREP(TMU_ADP_CS_9_DIRSWITCH_N_MASK, dirswitch_n);
289
290 return tb_port_write(port, &val, TB_CFG_PORT,
291 port->cap_tmu + TMU_ADP_CS_9, 1);
292 }
293
294 /* Can be called to non-v2 lane adapters too */
tb_port_tmu_rate_write(struct tb_port * port,int rate)295 static int tb_port_tmu_rate_write(struct tb_port *port, int rate)
296 {
297 int ret;
298 u32 val;
299
300 if (!tb_switch_tmu_enhanced_is_supported(port->sw))
301 return 0;
302
303 ret = tb_port_read(port, &val, TB_CFG_PORT,
304 port->cap_tmu + TMU_ADP_CS_9, 1);
305 if (ret)
306 return ret;
307
308 val &= ~TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK;
309 val |= FIELD_PREP(TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK, rate);
310
311 return tb_port_write(port, &val, TB_CFG_PORT,
312 port->cap_tmu + TMU_ADP_CS_9, 1);
313 }
314
tb_port_tmu_time_sync(struct tb_port * port,bool time_sync)315 static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync)
316 {
317 u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0;
318
319 return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val);
320 }
321
tb_port_tmu_time_sync_disable(struct tb_port * port)322 static int tb_port_tmu_time_sync_disable(struct tb_port *port)
323 {
324 return tb_port_tmu_time_sync(port, true);
325 }
326
tb_port_tmu_time_sync_enable(struct tb_port * port)327 static int tb_port_tmu_time_sync_enable(struct tb_port *port)
328 {
329 return tb_port_tmu_time_sync(port, false);
330 }
331
tb_switch_tmu_set_time_disruption(struct tb_switch * sw,bool set)332 static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
333 {
334 u32 val, offset, bit;
335 int ret;
336
337 if (tb_switch_is_usb4(sw)) {
338 offset = sw->tmu.cap + TMU_RTR_CS_0;
339 bit = TMU_RTR_CS_0_TD;
340 } else {
341 offset = sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_26;
342 bit = TB_TIME_VSEC_3_CS_26_TD;
343 }
344
345 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
346 if (ret)
347 return ret;
348
349 if (set)
350 val |= bit;
351 else
352 val &= ~bit;
353
354 return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
355 }
356
tmu_mode_init(struct tb_switch * sw)357 static int tmu_mode_init(struct tb_switch *sw)
358 {
359 bool enhanced, ucap;
360 int ret, rate;
361
362 ucap = tb_switch_tmu_ucap_is_supported(sw);
363 if (ucap)
364 tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
365 enhanced = tb_switch_tmu_enhanced_is_supported(sw);
366 if (enhanced)
367 tb_sw_dbg(sw, "TMU: supports enhanced uni-directional mode\n");
368
369 ret = tb_switch_tmu_rate_read(sw);
370 if (ret < 0)
371 return ret;
372 rate = ret;
373
374 /* Off by default */
375 sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
376
377 if (tb_route(sw)) {
378 struct tb_port *up = tb_upstream_port(sw);
379
380 if (enhanced && tb_port_tmu_is_enhanced(up)) {
381 sw->tmu.mode = TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI;
382 } else if (ucap && tb_port_tmu_is_unidirectional(up)) {
383 if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
384 sw->tmu.mode = TB_SWITCH_TMU_MODE_LOWRES;
385 else if (tmu_rates[TB_SWITCH_TMU_MODE_HIFI_UNI] == rate)
386 sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
387 } else if (rate) {
388 sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
389 }
390 } else if (rate) {
391 sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
392 }
393
394 /* Update the initial request to match the current mode */
395 sw->tmu.mode_request = sw->tmu.mode;
396 sw->tmu.has_ucap = ucap;
397
398 return 0;
399 }
400
401 /**
402 * tb_switch_tmu_init() - Initialize switch TMU structures
403 * @sw: Switch to be initialized
404 *
405 * This function must be called before other TMU related functions to
406 * make sure the internal structures are filled in correctly. Does not
407 * change any hardware configuration.
408 *
409 * Return: %0 on success, negative errno otherwise.
410 */
tb_switch_tmu_init(struct tb_switch * sw)411 int tb_switch_tmu_init(struct tb_switch *sw)
412 {
413 struct tb_port *port;
414 int ret;
415
416 if (tb_switch_is_icm(sw))
417 return 0;
418
419 ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
420 if (ret > 0)
421 sw->tmu.cap = ret;
422
423 tb_switch_for_each_port(sw, port) {
424 int cap;
425
426 cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
427 if (cap > 0)
428 port->cap_tmu = cap;
429 }
430
431 ret = tmu_mode_init(sw);
432 if (ret)
433 return ret;
434
435 tb_sw_dbg(sw, "TMU: current mode: %s\n", tmu_mode_name(sw->tmu.mode));
436 return 0;
437 }
438
439 /**
440 * tb_switch_tmu_post_time() - Update switch local time
441 * @sw: Switch whose time to update
442 *
443 * Updates switch local time using time posting procedure.
444 *
445 * Return: %0 on success, negative errno otherwise.
446 */
tb_switch_tmu_post_time(struct tb_switch * sw)447 int tb_switch_tmu_post_time(struct tb_switch *sw)
448 {
449 unsigned int post_time_high_offset, post_time_high = 0;
450 unsigned int post_local_time_offset, post_time_offset;
451 struct tb_switch *root_switch = sw->tb->root_switch;
452 u64 hi, mid, lo, local_time, post_time;
453 int i, ret, retries = 100;
454 u32 gm_local_time[3];
455
456 if (!tb_route(sw))
457 return 0;
458
459 if (!tb_switch_is_usb4(sw))
460 return 0;
461
462 /* Need to be able to read the grand master time */
463 if (!root_switch->tmu.cap)
464 return 0;
465
466 ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
467 root_switch->tmu.cap + TMU_RTR_CS_1,
468 ARRAY_SIZE(gm_local_time));
469 if (ret)
470 return ret;
471
472 for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
473 tb_sw_dbg(root_switch, "TMU: local_time[%d]=0x%08x\n", i,
474 gm_local_time[i]);
475
476 /* Convert to nanoseconds (drop fractional part) */
477 hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
478 mid = gm_local_time[1];
479 lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
480 TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
481 local_time = hi << 48 | mid << 16 | lo;
482
483 /* Tell the switch that time sync is disrupted for a while */
484 ret = tb_switch_tmu_set_time_disruption(sw, true);
485 if (ret)
486 return ret;
487
488 post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
489 post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
490 post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25;
491
492 /*
493 * Write the Grandmaster time to the Post Local Time registers
494 * of the new switch.
495 */
496 ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
497 post_local_time_offset, 2);
498 if (ret)
499 goto out;
500
501 /*
502 * Have the new switch update its local time by:
503 * 1) writing 0x1 to the Post Time Low register and 0xffffffff to
504 * Post Time High register.
505 * 2) write 0 to Post Time High register and then wait for
506 * the completion of the post_time register becomes 0.
507 * This means the time has been converged properly.
508 */
509 post_time = 0xffffffff00000001ULL;
510
511 ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
512 if (ret)
513 goto out;
514
515 ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH,
516 post_time_high_offset, 1);
517 if (ret)
518 goto out;
519
520 do {
521 usleep_range(5, 10);
522 ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
523 post_time_offset, 2);
524 if (ret)
525 goto out;
526 } while (--retries && post_time);
527
528 if (!retries) {
529 ret = -ETIMEDOUT;
530 goto out;
531 }
532
533 tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
534
535 out:
536 tb_switch_tmu_set_time_disruption(sw, false);
537 return ret;
538 }
539
disable_enhanced(struct tb_port * up,struct tb_port * down)540 static int disable_enhanced(struct tb_port *up, struct tb_port *down)
541 {
542 int ret;
543
544 /*
545 * Router may already been disconnected so ignore errors on the
546 * upstream port.
547 */
548 tb_port_tmu_rate_write(up, 0);
549 tb_port_tmu_enhanced_enable(up, false);
550
551 ret = tb_port_tmu_rate_write(down, 0);
552 if (ret)
553 return ret;
554 return tb_port_tmu_enhanced_enable(down, false);
555 }
556
557 /**
558 * tb_switch_tmu_disable() - Disable TMU of a switch
559 * @sw: Switch whose TMU to disable
560 *
561 * Turns off TMU of @sw if it is enabled. If not enabled does nothing.
562 *
563 * Return: %0 on success, negative errno otherwise.
564 */
tb_switch_tmu_disable(struct tb_switch * sw)565 int tb_switch_tmu_disable(struct tb_switch *sw)
566 {
567 /* Already disabled? */
568 if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF)
569 return 0;
570
571 if (tb_route(sw)) {
572 struct tb_port *down, *up;
573 int ret;
574
575 down = tb_switch_downstream_port(sw);
576 up = tb_upstream_port(sw);
577 /*
578 * In case of uni-directional time sync, TMU handshake is
579 * initiated by upstream router. In case of bi-directional
580 * time sync, TMU handshake is initiated by downstream router.
581 * We change downstream router's rate to off for both uni/bidir
582 * cases although it is needed only for the bi-directional mode.
583 * We avoid changing upstream router's mode since it might
584 * have another downstream router plugged, that is set to
585 * uni-directional mode and we don't want to change it's TMU
586 * mode.
587 */
588 ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
589 if (ret)
590 return ret;
591
592 tb_port_tmu_time_sync_disable(up);
593 ret = tb_port_tmu_time_sync_disable(down);
594 if (ret)
595 return ret;
596
597 switch (sw->tmu.mode) {
598 case TB_SWITCH_TMU_MODE_LOWRES:
599 case TB_SWITCH_TMU_MODE_HIFI_UNI:
600 /* The switch may be unplugged so ignore any errors */
601 tb_port_tmu_unidirectional_disable(up);
602 ret = tb_port_tmu_unidirectional_disable(down);
603 if (ret)
604 return ret;
605 break;
606
607 case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
608 ret = disable_enhanced(up, down);
609 if (ret)
610 return ret;
611 break;
612
613 default:
614 break;
615 }
616 } else {
617 tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
618 }
619
620 sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
621
622 tb_sw_dbg(sw, "TMU: disabled\n");
623 return 0;
624 }
625
626 /* Called only when there is failure enabling requested mode */
tb_switch_tmu_off(struct tb_switch * sw)627 static void tb_switch_tmu_off(struct tb_switch *sw)
628 {
629 unsigned int rate = tmu_rates[TB_SWITCH_TMU_MODE_OFF];
630 struct tb_port *down, *up;
631
632 down = tb_switch_downstream_port(sw);
633 up = tb_upstream_port(sw);
634 /*
635 * In case of any failure in one of the steps when setting
636 * bi-directional or uni-directional TMU mode, get back to the TMU
637 * configurations in off mode. In case of additional failures in
638 * the functions below, ignore them since the caller shall already
639 * report a failure.
640 */
641 tb_port_tmu_time_sync_disable(down);
642 tb_port_tmu_time_sync_disable(up);
643
644 switch (sw->tmu.mode_request) {
645 case TB_SWITCH_TMU_MODE_LOWRES:
646 case TB_SWITCH_TMU_MODE_HIFI_UNI:
647 tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
648 break;
649 case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
650 disable_enhanced(up, down);
651 break;
652 default:
653 break;
654 }
655
656 /* Always set the rate to 0 */
657 tb_switch_tmu_rate_write(sw, rate);
658
659 tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
660 tb_port_tmu_unidirectional_disable(down);
661 tb_port_tmu_unidirectional_disable(up);
662 }
663
664 /*
665 * This function is called when the previous TMU mode was
666 * TB_SWITCH_TMU_MODE_OFF.
667 */
tb_switch_tmu_enable_bidirectional(struct tb_switch * sw)668 static int tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
669 {
670 struct tb_port *up, *down;
671 int ret;
672
673 up = tb_upstream_port(sw);
674 down = tb_switch_downstream_port(sw);
675
676 ret = tb_port_tmu_unidirectional_disable(up);
677 if (ret)
678 return ret;
679
680 ret = tb_port_tmu_unidirectional_disable(down);
681 if (ret)
682 goto out;
683
684 ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_HIFI_BI]);
685 if (ret)
686 goto out;
687
688 ret = tb_port_tmu_time_sync_enable(up);
689 if (ret)
690 goto out;
691
692 ret = tb_port_tmu_time_sync_enable(down);
693 if (ret)
694 goto out;
695
696 return 0;
697
698 out:
699 tb_switch_tmu_off(sw);
700 return ret;
701 }
702
703 /* Only needed for Titan Ridge */
tb_switch_tmu_disable_objections(struct tb_switch * sw)704 static int tb_switch_tmu_disable_objections(struct tb_switch *sw)
705 {
706 struct tb_port *up = tb_upstream_port(sw);
707 u32 val;
708 int ret;
709
710 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
711 sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
712 if (ret)
713 return ret;
714
715 val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK;
716
717 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
718 sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
719 if (ret)
720 return ret;
721
722 return tb_port_tmu_write(up, TMU_ADP_CS_6,
723 TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK,
724 TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL1 |
725 TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL2);
726 }
727
728 /*
729 * This function is called when the previous TMU mode was
730 * TB_SWITCH_TMU_MODE_OFF.
731 */
tb_switch_tmu_enable_unidirectional(struct tb_switch * sw)732 static int tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
733 {
734 struct tb_port *up, *down;
735 int ret;
736
737 up = tb_upstream_port(sw);
738 down = tb_switch_downstream_port(sw);
739 ret = tb_switch_tmu_rate_write(tb_switch_parent(sw),
740 tmu_rates[sw->tmu.mode_request]);
741 if (ret)
742 return ret;
743
744 ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
745 if (ret)
746 return ret;
747
748 ret = tb_port_tmu_unidirectional_enable(up);
749 if (ret)
750 goto out;
751
752 ret = tb_port_tmu_time_sync_enable(up);
753 if (ret)
754 goto out;
755
756 ret = tb_port_tmu_unidirectional_enable(down);
757 if (ret)
758 goto out;
759
760 ret = tb_port_tmu_time_sync_enable(down);
761 if (ret)
762 goto out;
763
764 return 0;
765
766 out:
767 tb_switch_tmu_off(sw);
768 return ret;
769 }
770
771 /*
772 * This function is called when the previous TMU mode was
773 * TB_SWITCH_TMU_RATE_OFF.
774 */
tb_switch_tmu_enable_enhanced(struct tb_switch * sw)775 static int tb_switch_tmu_enable_enhanced(struct tb_switch *sw)
776 {
777 unsigned int rate = tmu_rates[sw->tmu.mode_request];
778 struct tb_port *up, *down;
779 int ret;
780
781 /* Router specific parameters first */
782 ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
783 if (ret)
784 return ret;
785
786 up = tb_upstream_port(sw);
787 down = tb_switch_downstream_port(sw);
788
789 ret = tb_port_set_tmu_mode_params(up, sw->tmu.mode_request);
790 if (ret)
791 goto out;
792
793 ret = tb_port_tmu_rate_write(up, rate);
794 if (ret)
795 goto out;
796
797 ret = tb_port_tmu_enhanced_enable(up, true);
798 if (ret)
799 goto out;
800
801 ret = tb_port_set_tmu_mode_params(down, sw->tmu.mode_request);
802 if (ret)
803 goto out;
804
805 ret = tb_port_tmu_rate_write(down, rate);
806 if (ret)
807 goto out;
808
809 ret = tb_port_tmu_enhanced_enable(down, true);
810 if (ret)
811 goto out;
812
813 return 0;
814
815 out:
816 tb_switch_tmu_off(sw);
817 return ret;
818 }
819
tb_switch_tmu_change_mode_prev(struct tb_switch * sw)820 static void tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
821 {
822 unsigned int rate = tmu_rates[sw->tmu.mode];
823 struct tb_port *down, *up;
824
825 down = tb_switch_downstream_port(sw);
826 up = tb_upstream_port(sw);
827 /*
828 * In case of any failure in one of the steps when change mode,
829 * get back to the TMU configurations in previous mode.
830 * In case of additional failures in the functions below,
831 * ignore them since the caller shall already report a failure.
832 */
833 switch (sw->tmu.mode) {
834 case TB_SWITCH_TMU_MODE_LOWRES:
835 case TB_SWITCH_TMU_MODE_HIFI_UNI:
836 tb_port_tmu_set_unidirectional(down, true);
837 tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
838 break;
839
840 case TB_SWITCH_TMU_MODE_HIFI_BI:
841 tb_port_tmu_set_unidirectional(down, false);
842 tb_switch_tmu_rate_write(sw, rate);
843 break;
844
845 default:
846 break;
847 }
848
849 tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
850
851 switch (sw->tmu.mode) {
852 case TB_SWITCH_TMU_MODE_LOWRES:
853 case TB_SWITCH_TMU_MODE_HIFI_UNI:
854 tb_port_tmu_set_unidirectional(up, true);
855 break;
856
857 case TB_SWITCH_TMU_MODE_HIFI_BI:
858 tb_port_tmu_set_unidirectional(up, false);
859 break;
860
861 default:
862 break;
863 }
864 }
865
tb_switch_tmu_change_mode(struct tb_switch * sw)866 static int tb_switch_tmu_change_mode(struct tb_switch *sw)
867 {
868 unsigned int rate = tmu_rates[sw->tmu.mode_request];
869 struct tb_port *up, *down;
870 int ret;
871
872 up = tb_upstream_port(sw);
873 down = tb_switch_downstream_port(sw);
874
875 /* Program the upstream router downstream facing lane adapter */
876 switch (sw->tmu.mode_request) {
877 case TB_SWITCH_TMU_MODE_LOWRES:
878 case TB_SWITCH_TMU_MODE_HIFI_UNI:
879 ret = tb_port_tmu_set_unidirectional(down, true);
880 if (ret)
881 goto out;
882 ret = tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
883 if (ret)
884 goto out;
885 break;
886
887 case TB_SWITCH_TMU_MODE_HIFI_BI:
888 ret = tb_port_tmu_set_unidirectional(down, false);
889 if (ret)
890 goto out;
891 ret = tb_switch_tmu_rate_write(sw, rate);
892 if (ret)
893 goto out;
894 break;
895
896 default:
897 /* Not allowed to change modes from other than above */
898 return -EINVAL;
899 }
900
901 ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
902 if (ret)
903 goto out;
904
905 /* Program the new mode and the downstream router lane adapter */
906 switch (sw->tmu.mode_request) {
907 case TB_SWITCH_TMU_MODE_LOWRES:
908 case TB_SWITCH_TMU_MODE_HIFI_UNI:
909 ret = tb_port_tmu_set_unidirectional(up, true);
910 if (ret)
911 goto out;
912 break;
913
914 case TB_SWITCH_TMU_MODE_HIFI_BI:
915 ret = tb_port_tmu_set_unidirectional(up, false);
916 if (ret)
917 goto out;
918 break;
919
920 default:
921 /* Not allowed to change modes from other than above */
922 return -EINVAL;
923 }
924
925 ret = tb_port_tmu_time_sync_enable(down);
926 if (ret)
927 goto out;
928
929 ret = tb_port_tmu_time_sync_enable(up);
930 if (ret)
931 goto out;
932
933 return 0;
934
935 out:
936 tb_switch_tmu_change_mode_prev(sw);
937 return ret;
938 }
939
940 /**
941 * tb_switch_tmu_enable() - Enable TMU on a router
942 * @sw: Router whose TMU to enable
943 *
944 * Enables TMU of a router to be in uni-directional Normal/HiFi or
945 * bi-directional HiFi mode. Calling tb_switch_tmu_configure() is
946 * required before calling this function.
947 *
948 * Return: %0 on success, negative errno otherwise.
949 */
tb_switch_tmu_enable(struct tb_switch * sw)950 int tb_switch_tmu_enable(struct tb_switch *sw)
951 {
952 int ret;
953
954 if (tb_switch_tmu_is_enabled(sw))
955 return 0;
956
957 if (tb_switch_is_titan_ridge(sw) &&
958 (sw->tmu.mode_request == TB_SWITCH_TMU_MODE_LOWRES ||
959 sw->tmu.mode_request == TB_SWITCH_TMU_MODE_HIFI_UNI)) {
960 ret = tb_switch_tmu_disable_objections(sw);
961 if (ret)
962 return ret;
963 }
964
965 ret = tb_switch_tmu_set_time_disruption(sw, true);
966 if (ret)
967 return ret;
968
969 if (tb_route(sw)) {
970 /*
971 * The used mode changes are from OFF to
972 * HiFi-Uni/HiFi-BiDir/Normal-Uni or from Normal-Uni to
973 * HiFi-Uni.
974 */
975 if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF) {
976 switch (sw->tmu.mode_request) {
977 case TB_SWITCH_TMU_MODE_LOWRES:
978 case TB_SWITCH_TMU_MODE_HIFI_UNI:
979 ret = tb_switch_tmu_enable_unidirectional(sw);
980 break;
981
982 case TB_SWITCH_TMU_MODE_HIFI_BI:
983 ret = tb_switch_tmu_enable_bidirectional(sw);
984 break;
985 case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
986 ret = tb_switch_tmu_enable_enhanced(sw);
987 break;
988 default:
989 ret = -EINVAL;
990 break;
991 }
992 } else if (sw->tmu.mode == TB_SWITCH_TMU_MODE_LOWRES ||
993 sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_UNI ||
994 sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_BI) {
995 ret = tb_switch_tmu_change_mode(sw);
996 } else {
997 ret = -EINVAL;
998 }
999 } else {
1000 /*
1001 * Host router port configurations are written as
1002 * part of configurations for downstream port of the parent
1003 * of the child node - see above.
1004 * Here only the host router' rate configuration is written.
1005 */
1006 ret = tb_switch_tmu_rate_write(sw, tmu_rates[sw->tmu.mode_request]);
1007 }
1008
1009 if (ret) {
1010 tb_sw_warn(sw, "TMU: failed to enable mode %s: %d\n",
1011 tmu_mode_name(sw->tmu.mode_request), ret);
1012 } else {
1013 sw->tmu.mode = sw->tmu.mode_request;
1014 tb_sw_dbg(sw, "TMU: mode set to: %s\n", tmu_mode_name(sw->tmu.mode));
1015 }
1016
1017 return tb_switch_tmu_set_time_disruption(sw, false);
1018 }
1019
1020 /**
1021 * tb_switch_tmu_configure() - Configure the TMU mode
1022 * @sw: Router whose mode to change
1023 * @mode: Mode to configure
1024 *
1025 * Selects the TMU mode that is enabled when tb_switch_tmu_enable() is
1026 * next called.
1027 *
1028 * Return:
1029 * * %0 - On success.
1030 * * %-EOPNOTSUPP - If the requested mode is not possible (not supported by
1031 * the router and/or topology).
1032 * * Negative errno - Another error occurred.
1033 */
tb_switch_tmu_configure(struct tb_switch * sw,enum tb_switch_tmu_mode mode)1034 int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode)
1035 {
1036 switch (mode) {
1037 case TB_SWITCH_TMU_MODE_OFF:
1038 break;
1039
1040 case TB_SWITCH_TMU_MODE_LOWRES:
1041 case TB_SWITCH_TMU_MODE_HIFI_UNI:
1042 if (!sw->tmu.has_ucap)
1043 return -EOPNOTSUPP;
1044 break;
1045
1046 case TB_SWITCH_TMU_MODE_HIFI_BI:
1047 break;
1048
1049 case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: {
1050 const struct tb_switch *parent_sw = tb_switch_parent(sw);
1051
1052 if (!parent_sw || !tb_switch_tmu_enhanced_is_supported(parent_sw))
1053 return -EOPNOTSUPP;
1054 if (!tb_switch_tmu_enhanced_is_supported(sw))
1055 return -EOPNOTSUPP;
1056
1057 break;
1058 }
1059
1060 default:
1061 tb_sw_warn(sw, "TMU: unsupported mode %u\n", mode);
1062 return -EINVAL;
1063 }
1064
1065 if (sw->tmu.mode_request != mode) {
1066 tb_sw_dbg(sw, "TMU: mode change %s -> %s requested\n",
1067 tmu_mode_name(sw->tmu.mode), tmu_mode_name(mode));
1068 sw->tmu.mode_request = mode;
1069 }
1070
1071 return 0;
1072 }
1073