xref: /linux/drivers/gpu/drm/i915/display/intel_tc.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <linux/iopoll.h>
7 
8 #include <drm/drm_print.h>
9 
10 #include "i915_reg.h"
11 #include "intel_atomic.h"
12 #include "intel_cx0_phy_regs.h"
13 #include "intel_ddi.h"
14 #include "intel_de.h"
15 #include "intel_display.h"
16 #include "intel_display_driver.h"
17 #include "intel_display_power_map.h"
18 #include "intel_display_regs.h"
19 #include "intel_display_types.h"
20 #include "intel_display_utils.h"
21 #include "intel_dkl_phy_regs.h"
22 #include "intel_dp.h"
23 #include "intel_dp_mst.h"
24 #include "intel_mg_phy_regs.h"
25 #include "intel_modeset_lock.h"
26 #include "intel_tc.h"
27 
28 enum tc_port_mode {
29 	TC_PORT_DISCONNECTED,
30 	TC_PORT_TBT_ALT,
31 	TC_PORT_DP_ALT,
32 	TC_PORT_LEGACY,
33 };
34 
35 struct intel_tc_port;
36 
37 struct intel_tc_phy_ops {
38 	enum intel_display_power_domain (*cold_off_domain)(struct intel_tc_port *tc);
39 	u32 (*hpd_live_status)(struct intel_tc_port *tc);
40 	bool (*is_ready)(struct intel_tc_port *tc);
41 	bool (*is_owned)(struct intel_tc_port *tc);
42 	void (*get_hw_state)(struct intel_tc_port *tc);
43 	bool (*connect)(struct intel_tc_port *tc, int required_lanes);
44 	void (*disconnect)(struct intel_tc_port *tc);
45 	void (*init)(struct intel_tc_port *tc);
46 };
47 
48 struct intel_tc_port {
49 	struct intel_digital_port *dig_port;
50 
51 	const struct intel_tc_phy_ops *phy_ops;
52 
53 	struct mutex lock;	/* protects the TypeC port mode */
54 	struct ref_tracker *lock_wakeref;
55 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
56 	enum intel_display_power_domain lock_power_domain;
57 #endif
58 	struct delayed_work disconnect_phy_work;
59 	struct delayed_work link_reset_work;
60 	int link_refcount;
61 	bool legacy_port:1;
62 	const char *port_name;
63 	enum tc_port_mode mode;
64 	enum tc_port_mode init_mode;
65 	enum phy_fia phy_fia;
66 	enum intel_tc_pin_assignment pin_assignment;
67 	u8 phy_fia_idx;
68 	u8 max_lane_count;
69 };
70 
71 static enum intel_display_power_domain
72 tc_phy_cold_off_domain(struct intel_tc_port *);
73 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc);
74 static bool tc_phy_is_ready(struct intel_tc_port *tc);
75 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc);
76 static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc);
77 
tc_port_mode_name(enum tc_port_mode mode)78 static const char *tc_port_mode_name(enum tc_port_mode mode)
79 {
80 	static const char * const names[] = {
81 		[TC_PORT_DISCONNECTED] = "disconnected",
82 		[TC_PORT_TBT_ALT] = "tbt-alt",
83 		[TC_PORT_DP_ALT] = "dp-alt",
84 		[TC_PORT_LEGACY] = "legacy",
85 	};
86 
87 	if (WARN_ON(mode >= ARRAY_SIZE(names)))
88 		mode = TC_PORT_DISCONNECTED;
89 
90 	return names[mode];
91 }
92 
to_tc_port(struct intel_digital_port * dig_port)93 static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port)
94 {
95 	return dig_port->tc;
96 }
97 
intel_tc_port_in_mode(struct intel_digital_port * dig_port,enum tc_port_mode mode)98 static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
99 				  enum tc_port_mode mode)
100 {
101 	struct intel_tc_port *tc = to_tc_port(dig_port);
102 
103 	return intel_encoder_is_tc(&dig_port->base) && tc->mode == mode;
104 }
105 
intel_tc_port_in_tbt_alt_mode(struct intel_digital_port * dig_port)106 bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
107 {
108 	return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT);
109 }
110 
intel_tc_port_in_dp_alt_mode(struct intel_digital_port * dig_port)111 bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port)
112 {
113 	return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT);
114 }
115 
intel_tc_port_in_legacy_mode(struct intel_digital_port * dig_port)116 bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
117 {
118 	return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
119 }
120 
intel_tc_port_handles_hpd_glitches(struct intel_digital_port * dig_port)121 bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port)
122 {
123 	struct intel_tc_port *tc = to_tc_port(dig_port);
124 
125 	return intel_encoder_is_tc(&dig_port->base) && !tc->legacy_port;
126 }
127 
128 /*
129  * The display power domains used for TC ports depending on the
130  * platform and TC mode (legacy, DP-alt, TBT):
131  *
132  * POWER_DOMAIN_DISPLAY_CORE:
133  * --------------------------
134  * ADLP/all modes:
135  *   - TCSS/IOM access for PHY ready state.
136  * ADLP+/all modes:
137  *   - DE/north-,south-HPD ISR access for HPD live state.
138  *
139  * POWER_DOMAIN_PORT_DDI_LANES_<port>:
140  * -----------------------------------
141  * ICL+/all modes:
142  *   - DE/DDI_BUF access for port enabled state.
143  * ADLP/all modes:
144  *   - DE/DDI_BUF access for PHY owned state.
145  *
146  * POWER_DOMAIN_AUX_USBC<TC port index>:
147  * -------------------------------------
148  * ICL/legacy mode:
149  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
150  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
151  *     main lanes.
152  * ADLP/legacy, DP-alt modes:
153  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
154  *     main lanes.
155  *
156  * POWER_DOMAIN_TC_COLD_OFF:
157  * -------------------------
158  * ICL/DP-alt, TBT mode:
159  *   - TCSS/TBT: block TC-cold power state for using the (direct or
160  *     TBT DP-IN) AUX and main lanes.
161  *
162  * TGL/all modes:
163  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
164  *   - TCSS/PHY: block TC-cold power state for using the (direct or
165  *     TBT DP-IN) AUX and main lanes.
166  *
167  * ADLP/TBT mode:
168  *   - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN)
169  *     AUX and main lanes.
170  *
171  * XELPDP+/all modes:
172  *   - TCSS/IOM,FIA access for PHY ready, owned state
173  *   - TCSS/PHY: block TC-cold power state for using the (direct or
174  *     TBT DP-IN) AUX and main lanes.
175  */
intel_tc_cold_requires_aux_pw(struct intel_digital_port * dig_port)176 bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
177 {
178 	struct intel_display *display = to_intel_display(dig_port);
179 	struct intel_tc_port *tc = to_tc_port(dig_port);
180 
181 	return tc_phy_cold_off_domain(tc) ==
182 	       intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
183 }
184 
185 static struct ref_tracker *
__tc_cold_block(struct intel_tc_port * tc,enum intel_display_power_domain * domain)186 __tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain)
187 {
188 	struct intel_display *display = to_intel_display(tc->dig_port);
189 
190 	*domain = tc_phy_cold_off_domain(tc);
191 
192 	return intel_display_power_get(display, *domain);
193 }
194 
195 static struct ref_tracker *
tc_cold_block(struct intel_tc_port * tc)196 tc_cold_block(struct intel_tc_port *tc)
197 {
198 	enum intel_display_power_domain domain;
199 	struct ref_tracker *wakeref;
200 
201 	wakeref = __tc_cold_block(tc, &domain);
202 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
203 	tc->lock_power_domain = domain;
204 #endif
205 	return wakeref;
206 }
207 
208 static void
__tc_cold_unblock(struct intel_tc_port * tc,enum intel_display_power_domain domain,struct ref_tracker * wakeref)209 __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain,
210 		  struct ref_tracker *wakeref)
211 {
212 	struct intel_display *display = to_intel_display(tc->dig_port);
213 
214 	intel_display_power_put(display, domain, wakeref);
215 }
216 
217 static void
tc_cold_unblock(struct intel_tc_port * tc,struct ref_tracker * wakeref)218 tc_cold_unblock(struct intel_tc_port *tc, struct ref_tracker *wakeref)
219 {
220 	struct intel_display __maybe_unused *display = to_intel_display(tc->dig_port);
221 	enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc);
222 
223 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
224 	drm_WARN_ON(display->drm, tc->lock_power_domain != domain);
225 #endif
226 	__tc_cold_unblock(tc, domain, wakeref);
227 }
228 
229 static void
assert_display_core_power_enabled(struct intel_tc_port * tc)230 assert_display_core_power_enabled(struct intel_tc_port *tc)
231 {
232 	struct intel_display *display = to_intel_display(tc->dig_port);
233 
234 	drm_WARN_ON(display->drm,
235 		    !intel_display_power_is_enabled(display, POWER_DOMAIN_DISPLAY_CORE));
236 }
237 
238 static void
assert_tc_cold_blocked(struct intel_tc_port * tc)239 assert_tc_cold_blocked(struct intel_tc_port *tc)
240 {
241 	struct intel_display *display = to_intel_display(tc->dig_port);
242 	bool enabled;
243 
244 	enabled = intel_display_power_is_enabled(display,
245 						 tc_phy_cold_off_domain(tc));
246 	drm_WARN_ON(display->drm, !enabled);
247 }
248 
249 static enum intel_display_power_domain
tc_port_power_domain(struct intel_tc_port * tc)250 tc_port_power_domain(struct intel_tc_port *tc)
251 {
252 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
253 
254 	if (tc_port == TC_PORT_NONE)
255 		return POWER_DOMAIN_INVALID;
256 
257 	return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
258 }
259 
260 static void
assert_tc_port_power_enabled(struct intel_tc_port * tc)261 assert_tc_port_power_enabled(struct intel_tc_port *tc)
262 {
263 	struct intel_display *display = to_intel_display(tc->dig_port);
264 
265 	drm_WARN_ON(display->drm,
266 		    !intel_display_power_is_enabled(display, tc_port_power_domain(tc)));
267 }
268 
get_lane_mask(struct intel_tc_port * tc)269 static u32 get_lane_mask(struct intel_tc_port *tc)
270 {
271 	struct intel_display *display = to_intel_display(tc->dig_port);
272 	u32 lane_mask;
273 
274 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE)
275 		lane_mask = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
276 
277 	drm_WARN_ON(display->drm, lane_mask == 0xffffffff);
278 	assert_tc_cold_blocked(tc);
279 
280 	lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx);
281 	return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
282 }
283 
pin_assignment_name(enum intel_tc_pin_assignment pin_assignment)284 static char pin_assignment_name(enum intel_tc_pin_assignment pin_assignment)
285 {
286 	if (pin_assignment == INTEL_TC_PIN_ASSIGNMENT_NONE)
287 		return '-';
288 
289 	return 'A' + pin_assignment - INTEL_TC_PIN_ASSIGNMENT_A;
290 }
291 
292 static enum intel_tc_pin_assignment
get_pin_assignment(struct intel_tc_port * tc)293 get_pin_assignment(struct intel_tc_port *tc)
294 {
295 	struct intel_display *display = to_intel_display(tc->dig_port);
296 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
297 	enum intel_tc_pin_assignment pin_assignment;
298 	i915_reg_t reg;
299 	u32 mask;
300 	u32 val;
301 
302 	if (tc->mode == TC_PORT_TBT_ALT)
303 		return INTEL_TC_PIN_ASSIGNMENT_NONE;
304 
305 	if (DISPLAY_VER(display) >= 20) {
306 		reg = TCSS_DDI_STATUS(tc_port);
307 		mask = TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK;
308 	} else {
309 		reg = PORT_TX_DFLEXPA1(tc->phy_fia);
310 		mask = DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx);
311 	}
312 
313 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE)
314 		val = intel_de_read(display, reg);
315 
316 	drm_WARN_ON(display->drm, val == 0xffffffff);
317 	assert_tc_cold_blocked(tc);
318 
319 	pin_assignment = (val & mask) >> (ffs(mask) - 1);
320 
321 	switch (pin_assignment) {
322 	case INTEL_TC_PIN_ASSIGNMENT_A:
323 	case INTEL_TC_PIN_ASSIGNMENT_B:
324 	case INTEL_TC_PIN_ASSIGNMENT_F:
325 		drm_WARN_ON(display->drm, DISPLAY_VER(display) > 11);
326 		break;
327 	case INTEL_TC_PIN_ASSIGNMENT_NONE:
328 	case INTEL_TC_PIN_ASSIGNMENT_C:
329 	case INTEL_TC_PIN_ASSIGNMENT_D:
330 	case INTEL_TC_PIN_ASSIGNMENT_E:
331 		break;
332 	default:
333 		MISSING_CASE(pin_assignment);
334 	}
335 
336 	return pin_assignment;
337 }
338 
mtl_get_max_lane_count(struct intel_tc_port * tc)339 static int mtl_get_max_lane_count(struct intel_tc_port *tc)
340 {
341 	enum intel_tc_pin_assignment pin_assignment;
342 
343 	pin_assignment = get_pin_assignment(tc);
344 
345 	switch (pin_assignment) {
346 	case INTEL_TC_PIN_ASSIGNMENT_NONE:
347 		return 0;
348 	default:
349 		MISSING_CASE(pin_assignment);
350 		fallthrough;
351 	case INTEL_TC_PIN_ASSIGNMENT_D:
352 		return 2;
353 	case INTEL_TC_PIN_ASSIGNMENT_C:
354 	case INTEL_TC_PIN_ASSIGNMENT_E:
355 		return 4;
356 	}
357 }
358 
icl_get_max_lane_count(struct intel_tc_port * tc)359 static int icl_get_max_lane_count(struct intel_tc_port *tc)
360 {
361 	u32 lane_mask = 0;
362 
363 	lane_mask = get_lane_mask(tc);
364 
365 	switch (lane_mask) {
366 	default:
367 		MISSING_CASE(lane_mask);
368 		fallthrough;
369 	case 0x1:
370 	case 0x2:
371 	case 0x4:
372 	case 0x8:
373 		return 1;
374 	case 0x3:
375 	case 0xc:
376 		return 2;
377 	case 0xf:
378 		return 4;
379 	}
380 }
381 
get_max_lane_count(struct intel_tc_port * tc)382 static int get_max_lane_count(struct intel_tc_port *tc)
383 {
384 	struct intel_display *display = to_intel_display(tc->dig_port);
385 
386 	if (tc->mode != TC_PORT_DP_ALT)
387 		return 4;
388 
389 	if (DISPLAY_VER(display) >= 14)
390 		return mtl_get_max_lane_count(tc);
391 
392 	return icl_get_max_lane_count(tc);
393 }
394 
read_pin_configuration(struct intel_tc_port * tc)395 static void read_pin_configuration(struct intel_tc_port *tc)
396 {
397 	tc->pin_assignment = get_pin_assignment(tc);
398 	tc->max_lane_count = get_max_lane_count(tc);
399 }
400 
intel_tc_port_max_lane_count(struct intel_digital_port * dig_port)401 int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
402 {
403 	struct intel_tc_port *tc = to_tc_port(dig_port);
404 
405 	if (!intel_encoder_is_tc(&dig_port->base))
406 		return 4;
407 
408 	return tc->max_lane_count;
409 }
410 
411 enum intel_tc_pin_assignment
intel_tc_port_get_pin_assignment(struct intel_digital_port * dig_port)412 intel_tc_port_get_pin_assignment(struct intel_digital_port *dig_port)
413 {
414 	struct intel_tc_port *tc = to_tc_port(dig_port);
415 
416 	if (!intel_encoder_is_tc(&dig_port->base))
417 		return INTEL_TC_PIN_ASSIGNMENT_NONE;
418 
419 	return tc->pin_assignment;
420 }
421 
intel_tc_port_set_fia_lane_count(struct intel_digital_port * dig_port,int required_lanes)422 void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
423 				      int required_lanes)
424 {
425 	struct intel_display *display = to_intel_display(dig_port);
426 	struct intel_tc_port *tc = to_tc_port(dig_port);
427 	bool lane_reversal = dig_port->lane_reversal;
428 	u32 val;
429 
430 	if (DISPLAY_VER(display) >= 14)
431 		return;
432 
433 	drm_WARN_ON(display->drm,
434 		    lane_reversal && tc->mode != TC_PORT_LEGACY);
435 
436 	assert_tc_cold_blocked(tc);
437 
438 	val = intel_de_read(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
439 	val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx);
440 
441 	switch (required_lanes) {
442 	case 1:
443 		val |= lane_reversal ?
444 			DFLEXDPMLE1_DPMLETC_ML3(tc->phy_fia_idx) :
445 			DFLEXDPMLE1_DPMLETC_ML0(tc->phy_fia_idx);
446 		break;
447 	case 2:
448 		val |= lane_reversal ?
449 			DFLEXDPMLE1_DPMLETC_ML3_2(tc->phy_fia_idx) :
450 			DFLEXDPMLE1_DPMLETC_ML1_0(tc->phy_fia_idx);
451 		break;
452 	case 4:
453 		val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc->phy_fia_idx);
454 		break;
455 	default:
456 		MISSING_CASE(required_lanes);
457 	}
458 
459 	intel_de_write(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
460 }
461 
tc_port_fixup_legacy_flag(struct intel_tc_port * tc,u32 live_status_mask)462 static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
463 				      u32 live_status_mask)
464 {
465 	struct intel_display *display = to_intel_display(tc->dig_port);
466 	u32 valid_hpd_mask;
467 
468 	drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED);
469 
470 	if (hweight32(live_status_mask) != 1)
471 		return;
472 
473 	if (tc->legacy_port)
474 		valid_hpd_mask = BIT(TC_PORT_LEGACY);
475 	else
476 		valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
477 				 BIT(TC_PORT_TBT_ALT);
478 
479 	if (!(live_status_mask & ~valid_hpd_mask))
480 		return;
481 
482 	/* If live status mismatches the VBT flag, trust the live status. */
483 	drm_dbg_kms(display->drm,
484 		    "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
485 		    tc->port_name, live_status_mask, valid_hpd_mask);
486 
487 	tc->legacy_port = !tc->legacy_port;
488 }
489 
tc_phy_load_fia_params(struct intel_tc_port * tc,bool modular_fia)490 static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia)
491 {
492 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
493 
494 	/*
495 	 * Each Modular FIA instance houses 2 TC ports. In SOC that has more
496 	 * than two TC ports, there are multiple instances of Modular FIA.
497 	 */
498 	if (modular_fia) {
499 		tc->phy_fia = tc_port / 2;
500 		tc->phy_fia_idx = tc_port % 2;
501 	} else {
502 		tc->phy_fia = FIA1;
503 		tc->phy_fia_idx = tc_port;
504 	}
505 }
506 
507 /*
508  * ICL TC PHY handlers
509  * -------------------
510  */
511 static enum intel_display_power_domain
icl_tc_phy_cold_off_domain(struct intel_tc_port * tc)512 icl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
513 {
514 	struct intel_display *display = to_intel_display(tc->dig_port);
515 	struct intel_digital_port *dig_port = tc->dig_port;
516 
517 	if (tc->legacy_port)
518 		return intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
519 
520 	return POWER_DOMAIN_TC_COLD_OFF;
521 }
522 
icl_tc_phy_hpd_live_status(struct intel_tc_port * tc)523 static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
524 {
525 	struct intel_display *display = to_intel_display(tc->dig_port);
526 	struct intel_digital_port *dig_port = tc->dig_port;
527 	u32 isr_bit = display->hotplug.pch_hpd[dig_port->base.hpd_pin];
528 	u32 fia_isr;
529 	u32 pch_isr;
530 	u32 mask = 0;
531 
532 	with_intel_display_power(display, tc_phy_cold_off_domain(tc)) {
533 		fia_isr = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
534 		pch_isr = intel_de_read(display, SDEISR);
535 	}
536 
537 	if (fia_isr == 0xffffffff) {
538 		drm_dbg_kms(display->drm,
539 			    "Port %s: PHY in TCCOLD, nothing connected\n",
540 			    tc->port_name);
541 		return mask;
542 	}
543 
544 	if (fia_isr & TC_LIVE_STATE_TBT(tc->phy_fia_idx))
545 		mask |= BIT(TC_PORT_TBT_ALT);
546 	if (fia_isr & TC_LIVE_STATE_TC(tc->phy_fia_idx))
547 		mask |= BIT(TC_PORT_DP_ALT);
548 
549 	if (pch_isr & isr_bit)
550 		mask |= BIT(TC_PORT_LEGACY);
551 
552 	return mask;
553 }
554 
555 /*
556  * Return the PHY status complete flag indicating that display can acquire the
557  * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink
558  * is connected and it's ready to switch the ownership to display. The flag
559  * will be left cleared when a TBT-alt sink is connected, where the PHY is
560  * owned by the TBT subsystem and so switching the ownership to display is not
561  * required.
562  */
icl_tc_phy_is_ready(struct intel_tc_port * tc)563 static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
564 {
565 	struct intel_display *display = to_intel_display(tc->dig_port);
566 	u32 val;
567 
568 	assert_tc_cold_blocked(tc);
569 
570 	val = intel_de_read(display, PORT_TX_DFLEXDPPMS(tc->phy_fia));
571 	if (val == 0xffffffff) {
572 		drm_dbg_kms(display->drm,
573 			    "Port %s: PHY in TCCOLD, assuming not ready\n",
574 			    tc->port_name);
575 		return false;
576 	}
577 
578 	return val & DP_PHY_MODE_STATUS_COMPLETED(tc->phy_fia_idx);
579 }
580 
icl_tc_phy_take_ownership(struct intel_tc_port * tc,bool take)581 static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
582 				      bool take)
583 {
584 	struct intel_display *display = to_intel_display(tc->dig_port);
585 	u32 val;
586 
587 	assert_tc_cold_blocked(tc);
588 
589 	val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
590 	if (val == 0xffffffff) {
591 		drm_dbg_kms(display->drm,
592 			    "Port %s: PHY in TCCOLD, can't %s ownership\n",
593 			    tc->port_name, take ? "take" : "release");
594 
595 		return false;
596 	}
597 
598 	val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
599 	if (take)
600 		val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
601 
602 	intel_de_write(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
603 
604 	return true;
605 }
606 
icl_tc_phy_is_owned(struct intel_tc_port * tc)607 static bool icl_tc_phy_is_owned(struct intel_tc_port *tc)
608 {
609 	struct intel_display *display = to_intel_display(tc->dig_port);
610 	u32 val;
611 
612 	assert_tc_cold_blocked(tc);
613 
614 	val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
615 	if (val == 0xffffffff) {
616 		drm_dbg_kms(display->drm,
617 			    "Port %s: PHY in TCCOLD, assume not owned\n",
618 			    tc->port_name);
619 		return false;
620 	}
621 
622 	return val & DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
623 }
624 
icl_tc_phy_get_hw_state(struct intel_tc_port * tc)625 static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
626 {
627 	enum intel_display_power_domain domain;
628 	struct ref_tracker *tc_cold_wref;
629 
630 	tc_cold_wref = __tc_cold_block(tc, &domain);
631 
632 	tc->mode = tc_phy_get_current_mode(tc);
633 	if (tc->mode != TC_PORT_DISCONNECTED) {
634 		tc->lock_wakeref = tc_cold_block(tc);
635 
636 		read_pin_configuration(tc);
637 	}
638 
639 	__tc_cold_unblock(tc, domain, tc_cold_wref);
640 }
641 
642 /*
643  * This function implements the first part of the Connect Flow described by our
644  * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
645  * lanes, EDID, etc) is done as needed in the typical places.
646  *
647  * Unlike the other ports, type-C ports are not available to use as soon as we
648  * get a hotplug. The type-C PHYs can be shared between multiple controllers:
649  * display, USB, etc. As a result, handshaking through FIA is required around
650  * connect and disconnect to cleanly transfer ownership with the controller and
651  * set the type-C power state.
652  */
tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port * tc,int required_lanes)653 static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
654 						int required_lanes)
655 {
656 	struct intel_display *display = to_intel_display(tc->dig_port);
657 	struct intel_digital_port *dig_port = tc->dig_port;
658 	int max_lanes;
659 
660 	max_lanes = intel_tc_port_max_lane_count(dig_port);
661 	if (tc->mode == TC_PORT_LEGACY) {
662 		drm_WARN_ON(display->drm, max_lanes != 4);
663 		return true;
664 	}
665 
666 	drm_WARN_ON(display->drm, tc->mode != TC_PORT_DP_ALT);
667 
668 	/*
669 	 * Now we have to re-check the live state, in case the port recently
670 	 * became disconnected. Not necessary for legacy mode.
671 	 */
672 	if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) {
673 		drm_dbg_kms(display->drm, "Port %s: PHY sudden disconnect\n",
674 			    tc->port_name);
675 		return false;
676 	}
677 
678 	if (max_lanes < required_lanes) {
679 		drm_dbg_kms(display->drm,
680 			    "Port %s: PHY max lanes %d < required lanes %d\n",
681 			    tc->port_name,
682 			    max_lanes, required_lanes);
683 		return false;
684 	}
685 
686 	return true;
687 }
688 
icl_tc_phy_connect(struct intel_tc_port * tc,int required_lanes)689 static bool icl_tc_phy_connect(struct intel_tc_port *tc,
690 			       int required_lanes)
691 {
692 	struct intel_display *display = to_intel_display(tc->dig_port);
693 
694 	tc->lock_wakeref = tc_cold_block(tc);
695 
696 	if (tc->mode == TC_PORT_TBT_ALT) {
697 		read_pin_configuration(tc);
698 
699 		return true;
700 	}
701 
702 	if ((!tc_phy_is_ready(tc) ||
703 	     !icl_tc_phy_take_ownership(tc, true)) &&
704 	    !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
705 		drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership (ready %s)\n",
706 			    tc->port_name,
707 			    str_yes_no(tc_phy_is_ready(tc)));
708 		goto out_unblock_tc_cold;
709 	}
710 
711 	read_pin_configuration(tc);
712 
713 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
714 		goto out_release_phy;
715 
716 	return true;
717 
718 out_release_phy:
719 	icl_tc_phy_take_ownership(tc, false);
720 out_unblock_tc_cold:
721 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
722 
723 	return false;
724 }
725 
726 /*
727  * See the comment at the connect function. This implements the Disconnect
728  * Flow.
729  */
icl_tc_phy_disconnect(struct intel_tc_port * tc)730 static void icl_tc_phy_disconnect(struct intel_tc_port *tc)
731 {
732 	switch (tc->mode) {
733 	case TC_PORT_LEGACY:
734 	case TC_PORT_DP_ALT:
735 		icl_tc_phy_take_ownership(tc, false);
736 		fallthrough;
737 	case TC_PORT_TBT_ALT:
738 		tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
739 		break;
740 	default:
741 		MISSING_CASE(tc->mode);
742 	}
743 }
744 
icl_tc_phy_init(struct intel_tc_port * tc)745 static void icl_tc_phy_init(struct intel_tc_port *tc)
746 {
747 	tc_phy_load_fia_params(tc, false);
748 }
749 
750 static const struct intel_tc_phy_ops icl_tc_phy_ops = {
751 	.cold_off_domain = icl_tc_phy_cold_off_domain,
752 	.hpd_live_status = icl_tc_phy_hpd_live_status,
753 	.is_ready = icl_tc_phy_is_ready,
754 	.is_owned = icl_tc_phy_is_owned,
755 	.get_hw_state = icl_tc_phy_get_hw_state,
756 	.connect = icl_tc_phy_connect,
757 	.disconnect = icl_tc_phy_disconnect,
758 	.init = icl_tc_phy_init,
759 };
760 
761 /*
762  * TGL TC PHY handlers
763  * -------------------
764  */
765 static enum intel_display_power_domain
tgl_tc_phy_cold_off_domain(struct intel_tc_port * tc)766 tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
767 {
768 	return POWER_DOMAIN_TC_COLD_OFF;
769 }
770 
tgl_tc_phy_init(struct intel_tc_port * tc)771 static void tgl_tc_phy_init(struct intel_tc_port *tc)
772 {
773 	struct intel_display *display = to_intel_display(tc->dig_port);
774 	u32 val;
775 
776 	with_intel_display_power(display, tc_phy_cold_off_domain(tc))
777 		val = intel_de_read(display, PORT_TX_DFLEXDPSP(FIA1));
778 
779 	drm_WARN_ON(display->drm, val == 0xffffffff);
780 
781 	tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK);
782 }
783 
784 static const struct intel_tc_phy_ops tgl_tc_phy_ops = {
785 	.cold_off_domain = tgl_tc_phy_cold_off_domain,
786 	.hpd_live_status = icl_tc_phy_hpd_live_status,
787 	.is_ready = icl_tc_phy_is_ready,
788 	.is_owned = icl_tc_phy_is_owned,
789 	.get_hw_state = icl_tc_phy_get_hw_state,
790 	.connect = icl_tc_phy_connect,
791 	.disconnect = icl_tc_phy_disconnect,
792 	.init = tgl_tc_phy_init,
793 };
794 
795 /*
796  * ADLP TC PHY handlers
797  * --------------------
798  */
799 static enum intel_display_power_domain
adlp_tc_phy_cold_off_domain(struct intel_tc_port * tc)800 adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc)
801 {
802 	struct intel_display *display = to_intel_display(tc->dig_port);
803 	struct intel_digital_port *dig_port = tc->dig_port;
804 
805 	if (tc->mode != TC_PORT_TBT_ALT)
806 		return intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
807 
808 	return POWER_DOMAIN_TC_COLD_OFF;
809 }
810 
adlp_tc_phy_hpd_live_status(struct intel_tc_port * tc)811 static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
812 {
813 	struct intel_display *display = to_intel_display(tc->dig_port);
814 	struct intel_digital_port *dig_port = tc->dig_port;
815 	enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
816 	u32 cpu_isr_bits = display->hotplug.hpd[hpd_pin];
817 	u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
818 	u32 cpu_isr;
819 	u32 pch_isr;
820 	u32 mask = 0;
821 
822 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE) {
823 		cpu_isr = intel_de_read(display, GEN11_DE_HPD_ISR);
824 		pch_isr = intel_de_read(display, SDEISR);
825 	}
826 
827 	if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK))
828 		mask |= BIT(TC_PORT_DP_ALT);
829 	if (cpu_isr & (cpu_isr_bits & GEN11_DE_TBT_HOTPLUG_MASK))
830 		mask |= BIT(TC_PORT_TBT_ALT);
831 
832 	if (pch_isr & pch_isr_bit)
833 		mask |= BIT(TC_PORT_LEGACY);
834 
835 	return mask;
836 }
837 
838 /*
839  * Return the PHY status complete flag indicating that display can acquire the
840  * PHY ownership. The IOM firmware sets this flag when it's ready to switch
841  * the ownership to display, regardless of what sink is connected (TBT-alt,
842  * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
843  * subsystem and so switching the ownership to display is not required.
844  */
adlp_tc_phy_is_ready(struct intel_tc_port * tc)845 static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
846 {
847 	struct intel_display *display = to_intel_display(tc->dig_port);
848 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
849 	u32 val;
850 
851 	assert_display_core_power_enabled(tc);
852 
853 	val = intel_de_read(display, TCSS_DDI_STATUS(tc_port));
854 	if (val == 0xffffffff) {
855 		drm_dbg_kms(display->drm,
856 			    "Port %s: PHY in TCCOLD, assuming not ready\n",
857 			    tc->port_name);
858 		return false;
859 	}
860 
861 	return val & TCSS_DDI_STATUS_READY;
862 }
863 
adlp_tc_phy_take_ownership(struct intel_tc_port * tc,bool take)864 static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
865 				       bool take)
866 {
867 	struct intel_display *display = to_intel_display(tc->dig_port);
868 	enum port port = tc->dig_port->base.port;
869 
870 	assert_tc_port_power_enabled(tc);
871 
872 	intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
873 		     take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
874 
875 	return true;
876 }
877 
adlp_tc_phy_is_owned(struct intel_tc_port * tc)878 static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc)
879 {
880 	struct intel_display *display = to_intel_display(tc->dig_port);
881 	enum port port = tc->dig_port->base.port;
882 	u32 val;
883 
884 	assert_tc_port_power_enabled(tc);
885 
886 	val = intel_de_read(display, DDI_BUF_CTL(port));
887 	return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
888 }
889 
adlp_tc_phy_get_hw_state(struct intel_tc_port * tc)890 static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
891 {
892 	struct intel_display *display = to_intel_display(tc->dig_port);
893 	enum intel_display_power_domain port_power_domain =
894 		tc_port_power_domain(tc);
895 	struct ref_tracker *port_wakeref;
896 
897 	port_wakeref = intel_display_power_get(display, port_power_domain);
898 
899 	tc->mode = tc_phy_get_current_mode(tc);
900 	if (tc->mode != TC_PORT_DISCONNECTED) {
901 		tc->lock_wakeref = tc_cold_block(tc);
902 
903 		read_pin_configuration(tc);
904 	}
905 
906 	intel_display_power_put(display, port_power_domain, port_wakeref);
907 }
908 
adlp_tc_phy_connect(struct intel_tc_port * tc,int required_lanes)909 static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
910 {
911 	struct intel_display *display = to_intel_display(tc->dig_port);
912 	enum intel_display_power_domain port_power_domain =
913 		tc_port_power_domain(tc);
914 	struct ref_tracker *port_wakeref;
915 
916 	if (tc->mode == TC_PORT_TBT_ALT) {
917 		tc->lock_wakeref = tc_cold_block(tc);
918 
919 		read_pin_configuration(tc);
920 
921 		return true;
922 	}
923 
924 	port_wakeref = intel_display_power_get(display, port_power_domain);
925 
926 	if (!adlp_tc_phy_take_ownership(tc, true) &&
927 	    !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
928 		drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership\n",
929 			    tc->port_name);
930 		goto out_put_port_power;
931 	}
932 
933 	if (!tc_phy_is_ready(tc) &&
934 	    !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
935 		drm_dbg_kms(display->drm, "Port %s: PHY not ready\n",
936 			    tc->port_name);
937 		goto out_release_phy;
938 	}
939 
940 	tc->lock_wakeref = tc_cold_block(tc);
941 
942 	read_pin_configuration(tc);
943 
944 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
945 		goto out_unblock_tc_cold;
946 
947 	intel_display_power_put(display, port_power_domain, port_wakeref);
948 
949 	return true;
950 
951 out_unblock_tc_cold:
952 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
953 out_release_phy:
954 	adlp_tc_phy_take_ownership(tc, false);
955 out_put_port_power:
956 	intel_display_power_put(display, port_power_domain, port_wakeref);
957 
958 	return false;
959 }
960 
adlp_tc_phy_disconnect(struct intel_tc_port * tc)961 static void adlp_tc_phy_disconnect(struct intel_tc_port *tc)
962 {
963 	struct intel_display *display = to_intel_display(tc->dig_port);
964 	enum intel_display_power_domain port_power_domain =
965 		tc_port_power_domain(tc);
966 	struct ref_tracker *port_wakeref;
967 
968 	port_wakeref = intel_display_power_get(display, port_power_domain);
969 
970 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
971 
972 	switch (tc->mode) {
973 	case TC_PORT_LEGACY:
974 	case TC_PORT_DP_ALT:
975 		adlp_tc_phy_take_ownership(tc, false);
976 		fallthrough;
977 	case TC_PORT_TBT_ALT:
978 		break;
979 	default:
980 		MISSING_CASE(tc->mode);
981 	}
982 
983 	intel_display_power_put(display, port_power_domain, port_wakeref);
984 }
985 
adlp_tc_phy_init(struct intel_tc_port * tc)986 static void adlp_tc_phy_init(struct intel_tc_port *tc)
987 {
988 	tc_phy_load_fia_params(tc, true);
989 }
990 
991 static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
992 	.cold_off_domain = adlp_tc_phy_cold_off_domain,
993 	.hpd_live_status = adlp_tc_phy_hpd_live_status,
994 	.is_ready = adlp_tc_phy_is_ready,
995 	.is_owned = adlp_tc_phy_is_owned,
996 	.get_hw_state = adlp_tc_phy_get_hw_state,
997 	.connect = adlp_tc_phy_connect,
998 	.disconnect = adlp_tc_phy_disconnect,
999 	.init = adlp_tc_phy_init,
1000 };
1001 
1002 /*
1003  * XELPDP TC PHY handlers
1004  * ----------------------
1005  */
xelpdp_tc_phy_hpd_live_status(struct intel_tc_port * tc)1006 static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
1007 {
1008 	struct intel_display *display = to_intel_display(tc->dig_port);
1009 	struct intel_digital_port *dig_port = tc->dig_port;
1010 	enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
1011 	u32 pica_isr_bits = display->hotplug.hpd[hpd_pin];
1012 	u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
1013 	u32 pica_isr;
1014 	u32 pch_isr;
1015 	u32 mask = 0;
1016 
1017 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE) {
1018 		pica_isr = intel_de_read(display, PICAINTERRUPT_ISR);
1019 		pch_isr = intel_de_read(display, SDEISR);
1020 	}
1021 
1022 	if (pica_isr & (pica_isr_bits & XELPDP_DP_ALT_HOTPLUG_MASK))
1023 		mask |= BIT(TC_PORT_DP_ALT);
1024 	if (pica_isr & (pica_isr_bits & XELPDP_TBT_HOTPLUG_MASK))
1025 		mask |= BIT(TC_PORT_TBT_ALT);
1026 
1027 	if (tc->legacy_port && (pch_isr & pch_isr_bit))
1028 		mask |= BIT(TC_PORT_LEGACY);
1029 
1030 	return mask;
1031 }
1032 
1033 static bool
xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port * tc)1034 xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc)
1035 {
1036 	struct intel_display *display = to_intel_display(tc->dig_port);
1037 	enum port port = tc->dig_port->base.port;
1038 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
1039 
1040 	assert_tc_cold_blocked(tc);
1041 
1042 	return intel_de_read(display, reg) & XELPDP_TCSS_POWER_STATE;
1043 }
1044 
1045 static bool
xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port * tc,bool enabled)1046 xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
1047 {
1048 	struct intel_display *display = to_intel_display(tc->dig_port);
1049 	bool is_enabled;
1050 	int ret;
1051 
1052 	ret = poll_timeout_us(is_enabled = xelpdp_tc_phy_tcss_power_is_enabled(tc),
1053 			      is_enabled == enabled,
1054 			      200, 5000, false);
1055 	if (ret) {
1056 		drm_dbg_kms(display->drm,
1057 			    "Port %s: timeout waiting for TCSS power to get %s\n",
1058 			    str_enabled_disabled(enabled),
1059 			    tc->port_name);
1060 		return false;
1061 	}
1062 
1063 	return true;
1064 }
1065 
1066 /*
1067  * Gfx driver WA 14020908590 for PTL tcss_rxdetect_clkswb_req/ack
1068  * handshake violation when pwwreq= 0->1 during TC7/10 entry
1069  */
xelpdp_tc_power_request_wa(struct intel_display * display,bool enable)1070 static void xelpdp_tc_power_request_wa(struct intel_display *display, bool enable)
1071 {
1072 	/* check if mailbox is running busy */
1073 	if (intel_de_wait_for_clear_ms(display, TCSS_DISP_MAILBOX_IN_CMD,
1074 				       TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
1075 		drm_dbg_kms(display->drm,
1076 			    "Timeout waiting for TCSS mailbox run/busy bit to clear\n");
1077 		return;
1078 	}
1079 
1080 	intel_de_write(display, TCSS_DISP_MAILBOX_IN_DATA, enable ? 1 : 0);
1081 	intel_de_write(display, TCSS_DISP_MAILBOX_IN_CMD,
1082 		       TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY |
1083 		       TCSS_DISP_MAILBOX_IN_CMD_DATA(0x1));
1084 
1085 	/* wait to clear mailbox running busy bit before continuing */
1086 	if (intel_de_wait_for_clear_ms(display, TCSS_DISP_MAILBOX_IN_CMD,
1087 				       TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
1088 		drm_dbg_kms(display->drm,
1089 			    "Timeout after writing data to mailbox. Mailbox run/busy bit did not clear\n");
1090 		return;
1091 	}
1092 }
1093 
__xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port * tc,bool enable)1094 static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1095 {
1096 	struct intel_display *display = to_intel_display(tc->dig_port);
1097 	enum port port = tc->dig_port->base.port;
1098 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
1099 	u32 val;
1100 
1101 	assert_tc_cold_blocked(tc);
1102 
1103 	if (DISPLAY_VER(display) == 30)
1104 		xelpdp_tc_power_request_wa(display, enable);
1105 
1106 	val = intel_de_read(display, reg);
1107 	if (enable)
1108 		val |= XELPDP_TCSS_POWER_REQUEST;
1109 	else
1110 		val &= ~XELPDP_TCSS_POWER_REQUEST;
1111 	intel_de_write(display, reg, val);
1112 }
1113 
xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port * tc,bool enable)1114 static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1115 {
1116 	struct intel_display *display = to_intel_display(tc->dig_port);
1117 
1118 	__xelpdp_tc_phy_enable_tcss_power(tc, enable);
1119 
1120 	if (enable && !tc_phy_wait_for_ready(tc))
1121 		goto out_disable;
1122 
1123 	if (!xelpdp_tc_phy_wait_for_tcss_power(tc, enable))
1124 		goto out_disable;
1125 
1126 	return true;
1127 
1128 out_disable:
1129 	if (drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY))
1130 		return false;
1131 
1132 	if (!enable)
1133 		return false;
1134 
1135 	__xelpdp_tc_phy_enable_tcss_power(tc, false);
1136 	xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1137 
1138 	return false;
1139 }
1140 
xelpdp_tc_phy_take_ownership(struct intel_tc_port * tc,bool take)1141 static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
1142 {
1143 	struct intel_display *display = to_intel_display(tc->dig_port);
1144 	enum port port = tc->dig_port->base.port;
1145 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
1146 	u32 val;
1147 
1148 	assert_tc_cold_blocked(tc);
1149 
1150 	val = intel_de_read(display, reg);
1151 	if (take)
1152 		val |= XELPDP_TC_PHY_OWNERSHIP;
1153 	else
1154 		val &= ~XELPDP_TC_PHY_OWNERSHIP;
1155 	intel_de_write(display, reg, val);
1156 }
1157 
xelpdp_tc_phy_is_owned(struct intel_tc_port * tc)1158 static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
1159 {
1160 	struct intel_display *display = to_intel_display(tc->dig_port);
1161 	enum port port = tc->dig_port->base.port;
1162 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
1163 
1164 	assert_tc_cold_blocked(tc);
1165 
1166 	return intel_de_read(display, reg) & XELPDP_TC_PHY_OWNERSHIP;
1167 }
1168 
xelpdp_tc_phy_get_hw_state(struct intel_tc_port * tc)1169 static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
1170 {
1171 	struct intel_display *display = to_intel_display(tc->dig_port);
1172 	struct ref_tracker *tc_cold_wref;
1173 	enum intel_display_power_domain domain;
1174 
1175 	tc_cold_wref = __tc_cold_block(tc, &domain);
1176 
1177 	tc->mode = tc_phy_get_current_mode(tc);
1178 	if (tc->mode != TC_PORT_DISCONNECTED) {
1179 		tc->lock_wakeref = tc_cold_block(tc);
1180 
1181 		read_pin_configuration(tc);
1182 		/*
1183 		 * Set a valid lane count value for a DP-alt sink which got
1184 		 * disconnected. The driver can only disable the output on this PHY.
1185 		 */
1186 		if (tc->max_lane_count == 0)
1187 			tc->max_lane_count = 4;
1188 	}
1189 
1190 	drm_WARN_ON(display->drm,
1191 		    (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
1192 		    !xelpdp_tc_phy_tcss_power_is_enabled(tc));
1193 
1194 	__tc_cold_unblock(tc, domain, tc_cold_wref);
1195 }
1196 
xelpdp_tc_phy_connect(struct intel_tc_port * tc,int required_lanes)1197 static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1198 {
1199 	tc->lock_wakeref = tc_cold_block(tc);
1200 
1201 	if (tc->mode == TC_PORT_TBT_ALT) {
1202 		read_pin_configuration(tc);
1203 
1204 		return true;
1205 	}
1206 
1207 	if (!xelpdp_tc_phy_enable_tcss_power(tc, true))
1208 		goto out_unblock_tccold;
1209 
1210 	xelpdp_tc_phy_take_ownership(tc, true);
1211 
1212 	read_pin_configuration(tc);
1213 
1214 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
1215 		goto out_release_phy;
1216 
1217 	return true;
1218 
1219 out_release_phy:
1220 	xelpdp_tc_phy_take_ownership(tc, false);
1221 	xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1222 
1223 out_unblock_tccold:
1224 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1225 
1226 	return false;
1227 }
1228 
xelpdp_tc_phy_disconnect(struct intel_tc_port * tc)1229 static void xelpdp_tc_phy_disconnect(struct intel_tc_port *tc)
1230 {
1231 	switch (tc->mode) {
1232 	case TC_PORT_LEGACY:
1233 	case TC_PORT_DP_ALT:
1234 		xelpdp_tc_phy_take_ownership(tc, false);
1235 		xelpdp_tc_phy_enable_tcss_power(tc, false);
1236 		fallthrough;
1237 	case TC_PORT_TBT_ALT:
1238 		tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1239 		break;
1240 	default:
1241 		MISSING_CASE(tc->mode);
1242 	}
1243 }
1244 
1245 static const struct intel_tc_phy_ops xelpdp_tc_phy_ops = {
1246 	.cold_off_domain = tgl_tc_phy_cold_off_domain,
1247 	.hpd_live_status = xelpdp_tc_phy_hpd_live_status,
1248 	.is_ready = adlp_tc_phy_is_ready,
1249 	.is_owned = xelpdp_tc_phy_is_owned,
1250 	.get_hw_state = xelpdp_tc_phy_get_hw_state,
1251 	.connect = xelpdp_tc_phy_connect,
1252 	.disconnect = xelpdp_tc_phy_disconnect,
1253 	.init = adlp_tc_phy_init,
1254 };
1255 
1256 /*
1257  * Generic TC PHY handlers
1258  * -----------------------
1259  */
1260 static enum intel_display_power_domain
tc_phy_cold_off_domain(struct intel_tc_port * tc)1261 tc_phy_cold_off_domain(struct intel_tc_port *tc)
1262 {
1263 	return tc->phy_ops->cold_off_domain(tc);
1264 }
1265 
tc_phy_hpd_live_status(struct intel_tc_port * tc)1266 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc)
1267 {
1268 	struct intel_display *display = to_intel_display(tc->dig_port);
1269 	u32 mask;
1270 
1271 	mask = tc->phy_ops->hpd_live_status(tc);
1272 
1273 	/* The sink can be connected only in a single mode. */
1274 	drm_WARN_ON_ONCE(display->drm, hweight32(mask) > 1);
1275 
1276 	return mask;
1277 }
1278 
tc_phy_is_ready(struct intel_tc_port * tc)1279 static bool tc_phy_is_ready(struct intel_tc_port *tc)
1280 {
1281 	return tc->phy_ops->is_ready(tc);
1282 }
1283 
tc_phy_is_owned(struct intel_tc_port * tc)1284 static bool tc_phy_is_owned(struct intel_tc_port *tc)
1285 {
1286 	return tc->phy_ops->is_owned(tc);
1287 }
1288 
tc_phy_get_hw_state(struct intel_tc_port * tc)1289 static void tc_phy_get_hw_state(struct intel_tc_port *tc)
1290 {
1291 	tc->phy_ops->get_hw_state(tc);
1292 }
1293 
1294 /* Is the PHY owned by display i.e. is it in legacy or DP-alt mode? */
tc_phy_owned_by_display(struct intel_tc_port * tc,bool phy_is_ready,bool phy_is_owned)1295 static bool tc_phy_owned_by_display(struct intel_tc_port *tc,
1296 				    bool phy_is_ready, bool phy_is_owned)
1297 {
1298 	struct intel_display *display = to_intel_display(tc->dig_port);
1299 
1300 	if (DISPLAY_VER(display) < 20) {
1301 		drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready);
1302 
1303 		return phy_is_ready && phy_is_owned;
1304 	} else {
1305 		return phy_is_owned;
1306 	}
1307 }
1308 
tc_phy_is_connected(struct intel_tc_port * tc,enum icl_port_dpll_id port_pll_type)1309 static bool tc_phy_is_connected(struct intel_tc_port *tc,
1310 				enum icl_port_dpll_id port_pll_type)
1311 {
1312 	struct intel_display *display = to_intel_display(tc->dig_port);
1313 	bool phy_is_ready = tc_phy_is_ready(tc);
1314 	bool phy_is_owned = tc_phy_is_owned(tc);
1315 	bool is_connected;
1316 
1317 	if (tc_phy_owned_by_display(tc, phy_is_ready, phy_is_owned))
1318 		is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY;
1319 	else
1320 		is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
1321 
1322 	drm_dbg_kms(display->drm,
1323 		    "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
1324 		    tc->port_name,
1325 		    str_yes_no(is_connected),
1326 		    str_yes_no(phy_is_ready),
1327 		    str_yes_no(phy_is_owned),
1328 		    port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt");
1329 
1330 	return is_connected;
1331 }
1332 
tc_phy_wait_for_ready(struct intel_tc_port * tc)1333 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc)
1334 {
1335 	struct intel_display *display = to_intel_display(tc->dig_port);
1336 	bool is_ready;
1337 	int ret;
1338 
1339 	ret = poll_timeout_us(is_ready = tc_phy_is_ready(tc),
1340 			      is_ready,
1341 			      1000, 500 * 1000, false);
1342 	if (ret) {
1343 		drm_err(display->drm, "Port %s: timeout waiting for PHY ready\n",
1344 			tc->port_name);
1345 
1346 		return false;
1347 	}
1348 
1349 	return true;
1350 }
1351 
1352 static enum tc_port_mode
hpd_mask_to_tc_mode(u32 live_status_mask)1353 hpd_mask_to_tc_mode(u32 live_status_mask)
1354 {
1355 	if (live_status_mask)
1356 		return fls(live_status_mask) - 1;
1357 
1358 	return TC_PORT_DISCONNECTED;
1359 }
1360 
1361 static enum tc_port_mode
tc_phy_hpd_live_mode(struct intel_tc_port * tc)1362 tc_phy_hpd_live_mode(struct intel_tc_port *tc)
1363 {
1364 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1365 
1366 	return hpd_mask_to_tc_mode(live_status_mask);
1367 }
1368 
1369 static enum tc_port_mode
get_tc_mode_in_phy_owned_state(struct intel_tc_port * tc,enum tc_port_mode live_mode)1370 get_tc_mode_in_phy_owned_state(struct intel_tc_port *tc,
1371 			       enum tc_port_mode live_mode)
1372 {
1373 	switch (live_mode) {
1374 	case TC_PORT_LEGACY:
1375 	case TC_PORT_DP_ALT:
1376 		return live_mode;
1377 	default:
1378 		MISSING_CASE(live_mode);
1379 		fallthrough;
1380 	case TC_PORT_TBT_ALT:
1381 	case TC_PORT_DISCONNECTED:
1382 		if (tc->legacy_port)
1383 			return TC_PORT_LEGACY;
1384 		else
1385 			return TC_PORT_DP_ALT;
1386 	}
1387 }
1388 
1389 static enum tc_port_mode
get_tc_mode_in_phy_not_owned_state(struct intel_tc_port * tc,enum tc_port_mode live_mode)1390 get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc,
1391 				   enum tc_port_mode live_mode)
1392 {
1393 	switch (live_mode) {
1394 	case TC_PORT_LEGACY:
1395 		return TC_PORT_DISCONNECTED;
1396 	case TC_PORT_DP_ALT:
1397 	case TC_PORT_TBT_ALT:
1398 		return TC_PORT_TBT_ALT;
1399 	default:
1400 		MISSING_CASE(live_mode);
1401 		fallthrough;
1402 	case TC_PORT_DISCONNECTED:
1403 		if (tc->legacy_port)
1404 			return TC_PORT_DISCONNECTED;
1405 		else
1406 			return TC_PORT_TBT_ALT;
1407 	}
1408 }
1409 
1410 static enum tc_port_mode
tc_phy_get_current_mode(struct intel_tc_port * tc)1411 tc_phy_get_current_mode(struct intel_tc_port *tc)
1412 {
1413 	struct intel_display *display = to_intel_display(tc->dig_port);
1414 	enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc);
1415 	bool phy_is_ready;
1416 	bool phy_is_owned;
1417 	enum tc_port_mode mode;
1418 
1419 	/*
1420 	 * For legacy ports the IOM firmware initializes the PHY during boot-up
1421 	 * and system resume whether or not a sink is connected. Wait here for
1422 	 * the initialization to get ready.
1423 	 */
1424 	if (tc->legacy_port)
1425 		tc_phy_wait_for_ready(tc);
1426 
1427 	phy_is_ready = tc_phy_is_ready(tc);
1428 	phy_is_owned = tc_phy_is_owned(tc);
1429 
1430 	if (!tc_phy_owned_by_display(tc, phy_is_ready, phy_is_owned)) {
1431 		mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
1432 	} else {
1433 		drm_WARN_ON(display->drm, live_mode == TC_PORT_TBT_ALT);
1434 		mode = get_tc_mode_in_phy_owned_state(tc, live_mode);
1435 	}
1436 
1437 	drm_dbg_kms(display->drm,
1438 		    "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
1439 		    tc->port_name,
1440 		    tc_port_mode_name(mode),
1441 		    str_yes_no(phy_is_ready),
1442 		    str_yes_no(phy_is_owned),
1443 		    tc_port_mode_name(live_mode));
1444 
1445 	return mode;
1446 }
1447 
default_tc_mode(struct intel_tc_port * tc)1448 static enum tc_port_mode default_tc_mode(struct intel_tc_port *tc)
1449 {
1450 	if (tc->legacy_port)
1451 		return TC_PORT_LEGACY;
1452 
1453 	return TC_PORT_TBT_ALT;
1454 }
1455 
1456 static enum tc_port_mode
hpd_mask_to_target_mode(struct intel_tc_port * tc,u32 live_status_mask)1457 hpd_mask_to_target_mode(struct intel_tc_port *tc, u32 live_status_mask)
1458 {
1459 	enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask);
1460 
1461 	if (mode != TC_PORT_DISCONNECTED)
1462 		return mode;
1463 
1464 	return default_tc_mode(tc);
1465 }
1466 
1467 static enum tc_port_mode
tc_phy_get_target_mode(struct intel_tc_port * tc)1468 tc_phy_get_target_mode(struct intel_tc_port *tc)
1469 {
1470 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1471 
1472 	return hpd_mask_to_target_mode(tc, live_status_mask);
1473 }
1474 
tc_phy_connect(struct intel_tc_port * tc,int required_lanes)1475 static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1476 {
1477 	struct intel_display *display = to_intel_display(tc->dig_port);
1478 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1479 	bool connected;
1480 
1481 	tc_port_fixup_legacy_flag(tc, live_status_mask);
1482 
1483 	tc->mode = hpd_mask_to_target_mode(tc, live_status_mask);
1484 
1485 	connected = tc->phy_ops->connect(tc, required_lanes);
1486 	if (!connected && tc->mode != default_tc_mode(tc)) {
1487 		tc->mode = default_tc_mode(tc);
1488 		connected = tc->phy_ops->connect(tc, required_lanes);
1489 	}
1490 
1491 	drm_WARN_ON(display->drm, !connected);
1492 }
1493 
tc_phy_disconnect(struct intel_tc_port * tc)1494 static void tc_phy_disconnect(struct intel_tc_port *tc)
1495 {
1496 	if (tc->mode != TC_PORT_DISCONNECTED) {
1497 		tc->phy_ops->disconnect(tc);
1498 		tc->mode = TC_PORT_DISCONNECTED;
1499 	}
1500 }
1501 
tc_phy_init(struct intel_tc_port * tc)1502 static void tc_phy_init(struct intel_tc_port *tc)
1503 {
1504 	mutex_lock(&tc->lock);
1505 	tc->phy_ops->init(tc);
1506 	mutex_unlock(&tc->lock);
1507 }
1508 
intel_tc_port_reset_mode(struct intel_tc_port * tc,int required_lanes,bool force_disconnect)1509 static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
1510 				     int required_lanes, bool force_disconnect)
1511 {
1512 	struct intel_display *display = to_intel_display(tc->dig_port);
1513 	struct intel_digital_port *dig_port = tc->dig_port;
1514 	enum tc_port_mode old_tc_mode = tc->mode;
1515 
1516 	intel_display_power_flush_work(display);
1517 	if (!intel_tc_cold_requires_aux_pw(dig_port)) {
1518 		enum intel_display_power_domain aux_domain;
1519 
1520 		aux_domain = intel_aux_power_domain(dig_port);
1521 		if (intel_display_power_is_enabled(display, aux_domain))
1522 			drm_dbg_kms(display->drm, "Port %s: AUX unexpectedly powered\n",
1523 				    tc->port_name);
1524 	}
1525 
1526 	tc_phy_disconnect(tc);
1527 	if (!force_disconnect)
1528 		tc_phy_connect(tc, required_lanes);
1529 
1530 	drm_dbg_kms(display->drm,
1531 		    "Port %s: TC port mode reset (%s -> %s) pin assignment: %c max lanes: %d\n",
1532 		    tc->port_name,
1533 		    tc_port_mode_name(old_tc_mode),
1534 		    tc_port_mode_name(tc->mode),
1535 		    pin_assignment_name(tc->pin_assignment),
1536 		    tc->max_lane_count);
1537 }
1538 
intel_tc_port_needs_reset(struct intel_tc_port * tc)1539 static bool intel_tc_port_needs_reset(struct intel_tc_port *tc)
1540 {
1541 	return tc_phy_get_target_mode(tc) != tc->mode;
1542 }
1543 
intel_tc_port_update_mode(struct intel_tc_port * tc,int required_lanes,bool force_disconnect)1544 static void intel_tc_port_update_mode(struct intel_tc_port *tc,
1545 				      int required_lanes, bool force_disconnect)
1546 {
1547 	if (force_disconnect ||
1548 	    intel_tc_port_needs_reset(tc))
1549 		intel_tc_port_reset_mode(tc, required_lanes, force_disconnect);
1550 }
1551 
__intel_tc_port_get_link(struct intel_tc_port * tc)1552 static void __intel_tc_port_get_link(struct intel_tc_port *tc)
1553 {
1554 	tc->link_refcount++;
1555 }
1556 
__intel_tc_port_put_link(struct intel_tc_port * tc)1557 static void __intel_tc_port_put_link(struct intel_tc_port *tc)
1558 {
1559 	tc->link_refcount--;
1560 }
1561 
tc_port_is_enabled(struct intel_tc_port * tc)1562 static bool tc_port_is_enabled(struct intel_tc_port *tc)
1563 {
1564 	struct intel_display *display = to_intel_display(tc->dig_port);
1565 	struct intel_digital_port *dig_port = tc->dig_port;
1566 
1567 	assert_tc_port_power_enabled(tc);
1568 
1569 	return intel_de_read(display, DDI_BUF_CTL(dig_port->base.port)) &
1570 	       DDI_BUF_CTL_ENABLE;
1571 }
1572 
1573 /**
1574  * intel_tc_port_init_mode: Read out HW state and init the given port's TypeC mode
1575  * @dig_port: digital port
1576  *
1577  * Read out the HW state and initialize the TypeC mode of @dig_port. The mode
1578  * will be locked until intel_tc_port_sanitize_mode() is called.
1579  */
intel_tc_port_init_mode(struct intel_digital_port * dig_port)1580 void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
1581 {
1582 	struct intel_display *display = to_intel_display(dig_port);
1583 	struct intel_tc_port *tc = to_tc_port(dig_port);
1584 	bool update_mode = false;
1585 
1586 	mutex_lock(&tc->lock);
1587 
1588 	drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED);
1589 	drm_WARN_ON(display->drm, tc->lock_wakeref);
1590 	drm_WARN_ON(display->drm, tc->link_refcount);
1591 
1592 	tc_phy_get_hw_state(tc);
1593 	/*
1594 	 * Save the initial mode for the state check in
1595 	 * intel_tc_port_sanitize_mode().
1596 	 */
1597 	tc->init_mode = tc->mode;
1598 
1599 	/*
1600 	 * The PHY needs to be connected for AUX to work during HW readout and
1601 	 * MST topology resume, but the PHY mode can only be changed if the
1602 	 * port is disabled.
1603 	 *
1604 	 * An exception is the case where BIOS leaves the PHY incorrectly
1605 	 * disconnected on an enabled legacy port. Work around that by
1606 	 * connecting the PHY even though the port is enabled. This doesn't
1607 	 * cause a problem as the PHY ownership state is ignored by the
1608 	 * IOM/TCSS firmware (only display can own the PHY in that case).
1609 	 */
1610 	if (!tc_port_is_enabled(tc)) {
1611 		update_mode = true;
1612 	} else if (tc->mode == TC_PORT_DISCONNECTED) {
1613 		drm_WARN_ON(display->drm, !tc->legacy_port);
1614 		drm_err(display->drm,
1615 			"Port %s: PHY disconnected on enabled port, connecting it\n",
1616 			tc->port_name);
1617 		update_mode = true;
1618 	}
1619 
1620 	if (update_mode)
1621 		intel_tc_port_update_mode(tc, 1, false);
1622 
1623 	/* Prevent changing tc->mode until intel_tc_port_sanitize_mode() is called. */
1624 	__intel_tc_port_get_link(tc);
1625 
1626 	mutex_unlock(&tc->lock);
1627 }
1628 
tc_port_has_active_streams(struct intel_tc_port * tc,const struct intel_crtc_state * crtc_state)1629 static bool tc_port_has_active_streams(struct intel_tc_port *tc,
1630 				       const struct intel_crtc_state *crtc_state)
1631 {
1632 	struct intel_display *display = to_intel_display(tc->dig_port);
1633 	struct intel_digital_port *dig_port = tc->dig_port;
1634 	enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
1635 	int active_streams = 0;
1636 
1637 	if (dig_port->dp.is_mst) {
1638 		/* TODO: get the PLL type for MST, once HW readout is done for it. */
1639 		active_streams = intel_dp_mst_active_streams(&dig_port->dp);
1640 	} else if (crtc_state && crtc_state->hw.active) {
1641 		pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
1642 		active_streams = 1;
1643 	}
1644 
1645 	if (active_streams && !tc_phy_is_connected(tc, pll_type))
1646 		drm_err(display->drm,
1647 			"Port %s: PHY disconnected with %d active stream(s)\n",
1648 			tc->port_name, active_streams);
1649 
1650 	return active_streams;
1651 }
1652 
1653 /**
1654  * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
1655  * @dig_port: digital port
1656  * @crtc_state: atomic state of CRTC connected to @dig_port
1657  *
1658  * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
1659  * loading and system resume:
1660  * If the encoder is enabled keep the TypeC mode/PHY connected state locked until
1661  * the encoder is disabled.
1662  * If the encoder is disabled make sure the PHY is disconnected.
1663  * @crtc_state is valid if @dig_port is enabled, NULL otherwise.
1664  */
intel_tc_port_sanitize_mode(struct intel_digital_port * dig_port,const struct intel_crtc_state * crtc_state)1665 void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
1666 				 const struct intel_crtc_state *crtc_state)
1667 {
1668 	struct intel_display *display = to_intel_display(dig_port);
1669 	struct intel_tc_port *tc = to_tc_port(dig_port);
1670 
1671 	mutex_lock(&tc->lock);
1672 
1673 	drm_WARN_ON(display->drm, tc->link_refcount != 1);
1674 	if (!tc_port_has_active_streams(tc, crtc_state)) {
1675 		/*
1676 		 * TBT-alt is the default mode in any case the PHY ownership is not
1677 		 * held (regardless of the sink's connected live state), so
1678 		 * we'll just switch to disconnected mode from it here without
1679 		 * a note.
1680 		 */
1681 		if (tc->init_mode != TC_PORT_TBT_ALT &&
1682 		    tc->init_mode != TC_PORT_DISCONNECTED)
1683 			drm_dbg_kms(display->drm,
1684 				    "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
1685 				    tc->port_name,
1686 				    tc_port_mode_name(tc->init_mode));
1687 		tc_phy_disconnect(tc);
1688 		__intel_tc_port_put_link(tc);
1689 	}
1690 
1691 	drm_dbg_kms(display->drm, "Port %s: sanitize mode (%s) pin assignment: %c max lanes: %d\n",
1692 		    tc->port_name,
1693 		    tc_port_mode_name(tc->mode),
1694 		    pin_assignment_name(tc->pin_assignment),
1695 		    tc->max_lane_count);
1696 
1697 	mutex_unlock(&tc->lock);
1698 }
1699 
intel_tc_info(struct drm_printer * p,struct intel_digital_port * dig_port)1700 void intel_tc_info(struct drm_printer *p,  struct intel_digital_port *dig_port)
1701 {
1702 	struct intel_tc_port *tc = to_tc_port(dig_port);
1703 
1704 	intel_tc_port_lock(dig_port);
1705 	drm_printf(p, "\tTC Port %s: mode: %s, pin assignment: %c, max lanes: %d\n",
1706 		   tc->port_name,
1707 		   tc_port_mode_name(tc->mode),
1708 		   pin_assignment_name(tc->pin_assignment),
1709 		   tc->max_lane_count);
1710 	intel_tc_port_unlock(dig_port);
1711 }
1712 
1713 /*
1714  * The type-C ports are different because even when they are connected, they may
1715  * not be available/usable by the graphics driver: see the comment on
1716  * icl_tc_phy_connect(). So in our driver instead of adding the additional
1717  * concept of "usable" and make everything check for "connected and usable" we
1718  * define a port as "connected" when it is not only connected, but also when it
1719  * is usable by the rest of the driver. That maintains the old assumption that
1720  * connected ports are usable, and avoids exposing to the users objects they
1721  * can't really use.
1722  */
intel_tc_port_connected(struct intel_encoder * encoder)1723 bool intel_tc_port_connected(struct intel_encoder *encoder)
1724 {
1725 	struct intel_display *display = to_intel_display(encoder);
1726 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1727 	struct intel_tc_port *tc = to_tc_port(dig_port);
1728 	u32 mask = ~0;
1729 
1730 	drm_WARN_ON(display->drm, !intel_tc_port_ref_held(dig_port));
1731 
1732 	if (tc->mode != TC_PORT_DISCONNECTED)
1733 		mask = BIT(tc->mode);
1734 
1735 	return tc_phy_hpd_live_status(tc) & mask;
1736 }
1737 
__intel_tc_port_link_needs_reset(struct intel_tc_port * tc)1738 static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc)
1739 {
1740 	bool ret;
1741 
1742 	mutex_lock(&tc->lock);
1743 
1744 	ret = tc->link_refcount &&
1745 	      tc->mode == TC_PORT_DP_ALT &&
1746 	      intel_tc_port_needs_reset(tc);
1747 
1748 	mutex_unlock(&tc->lock);
1749 
1750 	return ret;
1751 }
1752 
intel_tc_port_link_needs_reset(struct intel_digital_port * dig_port)1753 bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port)
1754 {
1755 	if (!intel_encoder_is_tc(&dig_port->base))
1756 		return false;
1757 
1758 	return __intel_tc_port_link_needs_reset(to_tc_port(dig_port));
1759 }
1760 
reset_link_commit(struct intel_tc_port * tc,struct intel_atomic_state * state,struct drm_modeset_acquire_ctx * ctx)1761 static int reset_link_commit(struct intel_tc_port *tc,
1762 			     struct intel_atomic_state *state,
1763 			     struct drm_modeset_acquire_ctx *ctx)
1764 {
1765 	struct intel_display *display = to_intel_display(tc->dig_port);
1766 	struct intel_digital_port *dig_port = tc->dig_port;
1767 	struct intel_dp *intel_dp = enc_to_intel_dp(&dig_port->base);
1768 	struct intel_crtc *crtc;
1769 	u8 pipe_mask;
1770 	int ret;
1771 
1772 	ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex, ctx);
1773 	if (ret)
1774 		return ret;
1775 
1776 	ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
1777 	if (ret)
1778 		return ret;
1779 
1780 	if (!pipe_mask)
1781 		return 0;
1782 
1783 	for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
1784 		struct intel_crtc_state *crtc_state;
1785 
1786 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
1787 		if (IS_ERR(crtc_state))
1788 			return PTR_ERR(crtc_state);
1789 
1790 		crtc_state->uapi.connectors_changed = true;
1791 	}
1792 
1793 	if (!__intel_tc_port_link_needs_reset(tc))
1794 		return 0;
1795 
1796 	return drm_atomic_commit(&state->base);
1797 }
1798 
reset_link(struct intel_tc_port * tc)1799 static int reset_link(struct intel_tc_port *tc)
1800 {
1801 	struct intel_display *display = to_intel_display(tc->dig_port);
1802 	struct drm_modeset_acquire_ctx ctx;
1803 	struct drm_atomic_state *_state;
1804 	struct intel_atomic_state *state;
1805 	int ret;
1806 
1807 	_state = drm_atomic_state_alloc(display->drm);
1808 	if (!_state)
1809 		return -ENOMEM;
1810 
1811 	state = to_intel_atomic_state(_state);
1812 	state->internal = true;
1813 
1814 	intel_modeset_lock_ctx_retry(&ctx, state, 0, ret)
1815 		ret = reset_link_commit(tc, state, &ctx);
1816 
1817 	drm_atomic_state_put(&state->base);
1818 
1819 	return ret;
1820 }
1821 
intel_tc_port_link_reset_work(struct work_struct * work)1822 static void intel_tc_port_link_reset_work(struct work_struct *work)
1823 {
1824 	struct intel_tc_port *tc =
1825 		container_of(work, struct intel_tc_port, link_reset_work.work);
1826 	struct intel_display *display = to_intel_display(tc->dig_port);
1827 	int ret;
1828 
1829 	if (!__intel_tc_port_link_needs_reset(tc))
1830 		return;
1831 
1832 	mutex_lock(&display->drm->mode_config.mutex);
1833 
1834 	drm_dbg_kms(display->drm,
1835 		    "Port %s: TypeC DP-alt sink disconnected, resetting link\n",
1836 		    tc->port_name);
1837 	ret = reset_link(tc);
1838 	drm_WARN_ON(display->drm, ret);
1839 
1840 	mutex_unlock(&display->drm->mode_config.mutex);
1841 }
1842 
intel_tc_port_link_reset(struct intel_digital_port * dig_port)1843 bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
1844 {
1845 	if (!intel_tc_port_link_needs_reset(dig_port))
1846 		return false;
1847 
1848 	queue_delayed_work(system_unbound_wq,
1849 			   &to_tc_port(dig_port)->link_reset_work,
1850 			   msecs_to_jiffies(2000));
1851 
1852 	return true;
1853 }
1854 
intel_tc_port_link_cancel_reset_work(struct intel_digital_port * dig_port)1855 void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port)
1856 {
1857 	struct intel_tc_port *tc = to_tc_port(dig_port);
1858 
1859 	if (!intel_encoder_is_tc(&dig_port->base))
1860 		return;
1861 
1862 	cancel_delayed_work(&tc->link_reset_work);
1863 }
1864 
__intel_tc_port_lock(struct intel_tc_port * tc,int required_lanes)1865 static void __intel_tc_port_lock(struct intel_tc_port *tc,
1866 				 int required_lanes)
1867 {
1868 	struct intel_display *display = to_intel_display(tc->dig_port);
1869 
1870 	mutex_lock(&tc->lock);
1871 
1872 	cancel_delayed_work(&tc->disconnect_phy_work);
1873 
1874 	if (!tc->link_refcount)
1875 		intel_tc_port_update_mode(tc, required_lanes,
1876 					  false);
1877 
1878 	drm_WARN_ON(display->drm, tc->mode == TC_PORT_DISCONNECTED);
1879 	drm_WARN_ON(display->drm, tc->mode != TC_PORT_TBT_ALT && !tc_phy_is_owned(tc));
1880 }
1881 
intel_tc_port_lock(struct intel_digital_port * dig_port)1882 void intel_tc_port_lock(struct intel_digital_port *dig_port)
1883 {
1884 	__intel_tc_port_lock(to_tc_port(dig_port), 1);
1885 }
1886 
1887 /*
1888  * Disconnect the given digital port from its TypeC PHY (handing back the
1889  * control of the PHY to the TypeC subsystem). This will happen in a delayed
1890  * manner after each aux transactions and modeset disables.
1891  */
intel_tc_port_disconnect_phy_work(struct work_struct * work)1892 static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
1893 {
1894 	struct intel_tc_port *tc =
1895 		container_of(work, struct intel_tc_port, disconnect_phy_work.work);
1896 
1897 	mutex_lock(&tc->lock);
1898 
1899 	if (!tc->link_refcount)
1900 		intel_tc_port_update_mode(tc, 1, true);
1901 
1902 	mutex_unlock(&tc->lock);
1903 }
1904 
1905 /**
1906  * intel_tc_port_flush_work: flush the work disconnecting the PHY
1907  * @dig_port: digital port
1908  *
1909  * Flush the delayed work disconnecting an idle PHY.
1910  */
intel_tc_port_flush_work(struct intel_digital_port * dig_port)1911 static void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
1912 {
1913 	flush_delayed_work(&to_tc_port(dig_port)->disconnect_phy_work);
1914 }
1915 
intel_tc_port_suspend(struct intel_digital_port * dig_port)1916 void intel_tc_port_suspend(struct intel_digital_port *dig_port)
1917 {
1918 	struct intel_tc_port *tc = to_tc_port(dig_port);
1919 
1920 	cancel_delayed_work_sync(&tc->link_reset_work);
1921 	intel_tc_port_flush_work(dig_port);
1922 }
1923 
intel_tc_port_unlock(struct intel_digital_port * dig_port)1924 void intel_tc_port_unlock(struct intel_digital_port *dig_port)
1925 {
1926 	struct intel_tc_port *tc = to_tc_port(dig_port);
1927 
1928 	if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED)
1929 		queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work,
1930 				   msecs_to_jiffies(1000));
1931 
1932 	mutex_unlock(&tc->lock);
1933 }
1934 
intel_tc_port_ref_held(struct intel_digital_port * dig_port)1935 bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
1936 {
1937 	struct intel_tc_port *tc = to_tc_port(dig_port);
1938 
1939 	return mutex_is_locked(&tc->lock) ||
1940 	       tc->link_refcount;
1941 }
1942 
intel_tc_port_get_link(struct intel_digital_port * dig_port,int required_lanes)1943 void intel_tc_port_get_link(struct intel_digital_port *dig_port,
1944 			    int required_lanes)
1945 {
1946 	struct intel_tc_port *tc = to_tc_port(dig_port);
1947 
1948 	__intel_tc_port_lock(tc, required_lanes);
1949 	__intel_tc_port_get_link(tc);
1950 	intel_tc_port_unlock(dig_port);
1951 }
1952 
intel_tc_port_put_link(struct intel_digital_port * dig_port)1953 void intel_tc_port_put_link(struct intel_digital_port *dig_port)
1954 {
1955 	struct intel_tc_port *tc = to_tc_port(dig_port);
1956 
1957 	intel_tc_port_lock(dig_port);
1958 	__intel_tc_port_put_link(tc);
1959 	intel_tc_port_unlock(dig_port);
1960 
1961 	/*
1962 	 * The firmware will not update the HPD status of other TypeC ports
1963 	 * that are active in DP-alt mode with their sink disconnected, until
1964 	 * this port is disabled and its PHY gets disconnected. Make sure this
1965 	 * happens in a timely manner by disconnecting the PHY synchronously.
1966 	 */
1967 	intel_tc_port_flush_work(dig_port);
1968 }
1969 
intel_tc_port_init(struct intel_digital_port * dig_port,bool is_legacy)1970 int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
1971 {
1972 	struct intel_display *display = to_intel_display(dig_port);
1973 	struct intel_tc_port *tc;
1974 	enum port port = dig_port->base.port;
1975 	enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
1976 
1977 	if (drm_WARN_ON(display->drm, tc_port == TC_PORT_NONE))
1978 		return -EINVAL;
1979 
1980 	tc = kzalloc_obj(*tc);
1981 	if (!tc)
1982 		return -ENOMEM;
1983 
1984 	dig_port->tc = tc;
1985 	tc->dig_port = dig_port;
1986 
1987 	if (DISPLAY_VER(display) >= 14)
1988 		tc->phy_ops = &xelpdp_tc_phy_ops;
1989 	else if (DISPLAY_VER(display) >= 13)
1990 		tc->phy_ops = &adlp_tc_phy_ops;
1991 	else if (DISPLAY_VER(display) >= 12)
1992 		tc->phy_ops = &tgl_tc_phy_ops;
1993 	else
1994 		tc->phy_ops = &icl_tc_phy_ops;
1995 
1996 	tc->port_name = kasprintf(GFP_KERNEL, "%c/TC#%d", port_name(port),
1997 				  tc_port + 1);
1998 	if (!tc->port_name) {
1999 		kfree(tc);
2000 		return -ENOMEM;
2001 	}
2002 
2003 	mutex_init(&tc->lock);
2004 	/* TODO: Combine the two works */
2005 	INIT_DELAYED_WORK(&tc->disconnect_phy_work, intel_tc_port_disconnect_phy_work);
2006 	INIT_DELAYED_WORK(&tc->link_reset_work, intel_tc_port_link_reset_work);
2007 	tc->legacy_port = is_legacy;
2008 	tc->mode = TC_PORT_DISCONNECTED;
2009 	tc->link_refcount = 0;
2010 
2011 	tc_phy_init(tc);
2012 
2013 	intel_tc_port_init_mode(dig_port);
2014 
2015 	return 0;
2016 }
2017 
intel_tc_port_cleanup(struct intel_digital_port * dig_port)2018 void intel_tc_port_cleanup(struct intel_digital_port *dig_port)
2019 {
2020 	intel_tc_port_suspend(dig_port);
2021 
2022 	kfree(dig_port->tc->port_name);
2023 	kfree(dig_port->tc);
2024 	dig_port->tc = NULL;
2025 }
2026