xref: /linux/drivers/gpu/drm/i915/display/intel_tc.c (revision c5288cda69ee2d8607f5026bd599a5cebf0ee783)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "i915_reg.h"
8 #include "intel_atomic.h"
9 #include "intel_cx0_phy_regs.h"
10 #include "intel_ddi.h"
11 #include "intel_de.h"
12 #include "intel_display.h"
13 #include "intel_display_driver.h"
14 #include "intel_display_power_map.h"
15 #include "intel_display_types.h"
16 #include "intel_dkl_phy_regs.h"
17 #include "intel_dp.h"
18 #include "intel_dp_mst.h"
19 #include "intel_mg_phy_regs.h"
20 #include "intel_modeset_lock.h"
21 #include "intel_tc.h"
22 
23 #define DP_PIN_ASSIGNMENT_C	0x3
24 #define DP_PIN_ASSIGNMENT_D	0x4
25 #define DP_PIN_ASSIGNMENT_E	0x5
26 
27 enum tc_port_mode {
28 	TC_PORT_DISCONNECTED,
29 	TC_PORT_TBT_ALT,
30 	TC_PORT_DP_ALT,
31 	TC_PORT_LEGACY,
32 };
33 
34 struct intel_tc_port;
35 
36 struct intel_tc_phy_ops {
37 	enum intel_display_power_domain (*cold_off_domain)(struct intel_tc_port *tc);
38 	u32 (*hpd_live_status)(struct intel_tc_port *tc);
39 	bool (*is_ready)(struct intel_tc_port *tc);
40 	bool (*is_owned)(struct intel_tc_port *tc);
41 	void (*get_hw_state)(struct intel_tc_port *tc);
42 	bool (*connect)(struct intel_tc_port *tc, int required_lanes);
43 	void (*disconnect)(struct intel_tc_port *tc);
44 	void (*init)(struct intel_tc_port *tc);
45 };
46 
47 struct intel_tc_port {
48 	struct intel_digital_port *dig_port;
49 
50 	const struct intel_tc_phy_ops *phy_ops;
51 
52 	struct mutex lock;	/* protects the TypeC port mode */
53 	intel_wakeref_t lock_wakeref;
54 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
55 	enum intel_display_power_domain lock_power_domain;
56 #endif
57 	struct delayed_work disconnect_phy_work;
58 	struct delayed_work link_reset_work;
59 	int link_refcount;
60 	bool legacy_port:1;
61 	const char *port_name;
62 	enum tc_port_mode mode;
63 	enum tc_port_mode init_mode;
64 	enum phy_fia phy_fia;
65 	u8 phy_fia_idx;
66 };
67 
68 static enum intel_display_power_domain
69 tc_phy_cold_off_domain(struct intel_tc_port *);
70 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc);
71 static bool tc_phy_is_ready(struct intel_tc_port *tc);
72 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc);
73 static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc);
74 
75 static const char *tc_port_mode_name(enum tc_port_mode mode)
76 {
77 	static const char * const names[] = {
78 		[TC_PORT_DISCONNECTED] = "disconnected",
79 		[TC_PORT_TBT_ALT] = "tbt-alt",
80 		[TC_PORT_DP_ALT] = "dp-alt",
81 		[TC_PORT_LEGACY] = "legacy",
82 	};
83 
84 	if (WARN_ON(mode >= ARRAY_SIZE(names)))
85 		mode = TC_PORT_DISCONNECTED;
86 
87 	return names[mode];
88 }
89 
90 static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port)
91 {
92 	return dig_port->tc;
93 }
94 
95 static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc)
96 {
97 	return to_i915(tc->dig_port->base.base.dev);
98 }
99 
100 static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
101 				  enum tc_port_mode mode)
102 {
103 	struct intel_tc_port *tc = to_tc_port(dig_port);
104 
105 	return intel_encoder_is_tc(&dig_port->base) && tc->mode == mode;
106 }
107 
108 bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
109 {
110 	return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT);
111 }
112 
113 bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port)
114 {
115 	return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT);
116 }
117 
118 bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
119 {
120 	return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
121 }
122 
123 bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port)
124 {
125 	struct intel_tc_port *tc = to_tc_port(dig_port);
126 
127 	return intel_encoder_is_tc(&dig_port->base) && !tc->legacy_port;
128 }
129 
130 /*
131  * The display power domains used for TC ports depending on the
132  * platform and TC mode (legacy, DP-alt, TBT):
133  *
134  * POWER_DOMAIN_DISPLAY_CORE:
135  * --------------------------
136  * ADLP/all modes:
137  *   - TCSS/IOM access for PHY ready state.
138  * ADLP+/all modes:
139  *   - DE/north-,south-HPD ISR access for HPD live state.
140  *
141  * POWER_DOMAIN_PORT_DDI_LANES_<port>:
142  * -----------------------------------
143  * ICL+/all modes:
144  *   - DE/DDI_BUF access for port enabled state.
145  * ADLP/all modes:
146  *   - DE/DDI_BUF access for PHY owned state.
147  *
148  * POWER_DOMAIN_AUX_USBC<TC port index>:
149  * -------------------------------------
150  * ICL/legacy mode:
151  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
152  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
153  *     main lanes.
154  * ADLP/legacy, DP-alt modes:
155  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
156  *     main lanes.
157  *
158  * POWER_DOMAIN_TC_COLD_OFF:
159  * -------------------------
160  * ICL/DP-alt, TBT mode:
161  *   - TCSS/TBT: block TC-cold power state for using the (direct or
162  *     TBT DP-IN) AUX and main lanes.
163  *
164  * TGL/all modes:
165  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
166  *   - TCSS/PHY: block TC-cold power state for using the (direct or
167  *     TBT DP-IN) AUX and main lanes.
168  *
169  * ADLP/TBT mode:
170  *   - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN)
171  *     AUX and main lanes.
172  *
173  * XELPDP+/all modes:
174  *   - TCSS/IOM,FIA access for PHY ready, owned state
175  *   - TCSS/PHY: block TC-cold power state for using the (direct or
176  *     TBT DP-IN) AUX and main lanes.
177  */
178 bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
179 {
180 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
181 	struct intel_tc_port *tc = to_tc_port(dig_port);
182 
183 	return tc_phy_cold_off_domain(tc) ==
184 	       intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
185 }
186 
187 static intel_wakeref_t
188 __tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain)
189 {
190 	struct drm_i915_private *i915 = tc_to_i915(tc);
191 
192 	*domain = tc_phy_cold_off_domain(tc);
193 
194 	return intel_display_power_get(i915, *domain);
195 }
196 
197 static intel_wakeref_t
198 tc_cold_block(struct intel_tc_port *tc)
199 {
200 	enum intel_display_power_domain domain;
201 	intel_wakeref_t wakeref;
202 
203 	wakeref = __tc_cold_block(tc, &domain);
204 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
205 	tc->lock_power_domain = domain;
206 #endif
207 	return wakeref;
208 }
209 
210 static void
211 __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain,
212 		  intel_wakeref_t wakeref)
213 {
214 	struct drm_i915_private *i915 = tc_to_i915(tc);
215 
216 	intel_display_power_put(i915, domain, wakeref);
217 }
218 
219 static void
220 tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref)
221 {
222 	enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc);
223 
224 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
225 	drm_WARN_ON(&tc_to_i915(tc)->drm, tc->lock_power_domain != domain);
226 #endif
227 	__tc_cold_unblock(tc, domain, wakeref);
228 }
229 
230 static void
231 assert_display_core_power_enabled(struct intel_tc_port *tc)
232 {
233 	struct drm_i915_private *i915 = tc_to_i915(tc);
234 
235 	drm_WARN_ON(&i915->drm,
236 		    !intel_display_power_is_enabled(i915, POWER_DOMAIN_DISPLAY_CORE));
237 }
238 
239 static void
240 assert_tc_cold_blocked(struct intel_tc_port *tc)
241 {
242 	struct drm_i915_private *i915 = tc_to_i915(tc);
243 	bool enabled;
244 
245 	enabled = intel_display_power_is_enabled(i915,
246 						 tc_phy_cold_off_domain(tc));
247 	drm_WARN_ON(&i915->drm, !enabled);
248 }
249 
250 static enum intel_display_power_domain
251 tc_port_power_domain(struct intel_tc_port *tc)
252 {
253 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
254 
255 	return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
256 }
257 
258 static void
259 assert_tc_port_power_enabled(struct intel_tc_port *tc)
260 {
261 	struct drm_i915_private *i915 = tc_to_i915(tc);
262 
263 	drm_WARN_ON(&i915->drm,
264 		    !intel_display_power_is_enabled(i915, tc_port_power_domain(tc)));
265 }
266 
267 static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
268 {
269 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
270 	struct intel_tc_port *tc = to_tc_port(dig_port);
271 	u32 lane_mask;
272 
273 	lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
274 
275 	drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
276 	assert_tc_cold_blocked(tc);
277 
278 	lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx);
279 	return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
280 }
281 
282 u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
283 {
284 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
285 	struct intel_tc_port *tc = to_tc_port(dig_port);
286 	u32 pin_mask;
287 
288 	pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(tc->phy_fia));
289 
290 	drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
291 	assert_tc_cold_blocked(tc);
292 
293 	return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >>
294 	       DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
295 }
296 
297 static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
298 {
299 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
300 	enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
301 	intel_wakeref_t wakeref;
302 	u32 val, pin_assignment;
303 
304 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
305 		val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
306 
307 	pin_assignment =
308 		REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val);
309 
310 	switch (pin_assignment) {
311 	default:
312 		MISSING_CASE(pin_assignment);
313 		fallthrough;
314 	case DP_PIN_ASSIGNMENT_D:
315 		return 2;
316 	case DP_PIN_ASSIGNMENT_C:
317 	case DP_PIN_ASSIGNMENT_E:
318 		return 4;
319 	}
320 }
321 
322 static int mtl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
323 {
324 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
325 	intel_wakeref_t wakeref;
326 	u32 pin_mask;
327 
328 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
329 		pin_mask = intel_tc_port_get_pin_assignment_mask(dig_port);
330 
331 	switch (pin_mask) {
332 	default:
333 		MISSING_CASE(pin_mask);
334 		fallthrough;
335 	case DP_PIN_ASSIGNMENT_D:
336 		return 2;
337 	case DP_PIN_ASSIGNMENT_C:
338 	case DP_PIN_ASSIGNMENT_E:
339 		return 4;
340 	}
341 }
342 
343 static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
344 {
345 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
346 	intel_wakeref_t wakeref;
347 	u32 lane_mask = 0;
348 
349 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
350 		lane_mask = intel_tc_port_get_lane_mask(dig_port);
351 
352 	switch (lane_mask) {
353 	default:
354 		MISSING_CASE(lane_mask);
355 		fallthrough;
356 	case 0x1:
357 	case 0x2:
358 	case 0x4:
359 	case 0x8:
360 		return 1;
361 	case 0x3:
362 	case 0xc:
363 		return 2;
364 	case 0xf:
365 		return 4;
366 	}
367 }
368 
369 int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
370 {
371 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
372 	struct intel_tc_port *tc = to_tc_port(dig_port);
373 
374 	if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT)
375 		return 4;
376 
377 	assert_tc_cold_blocked(tc);
378 
379 	if (DISPLAY_VER(i915) >= 20)
380 		return lnl_tc_port_get_max_lane_count(dig_port);
381 
382 	if (DISPLAY_VER(i915) >= 14)
383 		return mtl_tc_port_get_max_lane_count(dig_port);
384 
385 	return intel_tc_port_get_max_lane_count(dig_port);
386 }
387 
388 void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
389 				      int required_lanes)
390 {
391 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
392 	struct intel_tc_port *tc = to_tc_port(dig_port);
393 	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
394 	u32 val;
395 
396 	drm_WARN_ON(&i915->drm,
397 		    lane_reversal && tc->mode != TC_PORT_LEGACY);
398 
399 	assert_tc_cold_blocked(tc);
400 
401 	val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
402 	val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx);
403 
404 	switch (required_lanes) {
405 	case 1:
406 		val |= lane_reversal ?
407 			DFLEXDPMLE1_DPMLETC_ML3(tc->phy_fia_idx) :
408 			DFLEXDPMLE1_DPMLETC_ML0(tc->phy_fia_idx);
409 		break;
410 	case 2:
411 		val |= lane_reversal ?
412 			DFLEXDPMLE1_DPMLETC_ML3_2(tc->phy_fia_idx) :
413 			DFLEXDPMLE1_DPMLETC_ML1_0(tc->phy_fia_idx);
414 		break;
415 	case 4:
416 		val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc->phy_fia_idx);
417 		break;
418 	default:
419 		MISSING_CASE(required_lanes);
420 	}
421 
422 	intel_de_write(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
423 }
424 
425 static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
426 				      u32 live_status_mask)
427 {
428 	struct drm_i915_private *i915 = tc_to_i915(tc);
429 	u32 valid_hpd_mask;
430 
431 	drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
432 
433 	if (hweight32(live_status_mask) != 1)
434 		return;
435 
436 	if (tc->legacy_port)
437 		valid_hpd_mask = BIT(TC_PORT_LEGACY);
438 	else
439 		valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
440 				 BIT(TC_PORT_TBT_ALT);
441 
442 	if (!(live_status_mask & ~valid_hpd_mask))
443 		return;
444 
445 	/* If live status mismatches the VBT flag, trust the live status. */
446 	drm_dbg_kms(&i915->drm,
447 		    "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
448 		    tc->port_name, live_status_mask, valid_hpd_mask);
449 
450 	tc->legacy_port = !tc->legacy_port;
451 }
452 
453 static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia)
454 {
455 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
456 
457 	/*
458 	 * Each Modular FIA instance houses 2 TC ports. In SOC that has more
459 	 * than two TC ports, there are multiple instances of Modular FIA.
460 	 */
461 	if (modular_fia) {
462 		tc->phy_fia = tc_port / 2;
463 		tc->phy_fia_idx = tc_port % 2;
464 	} else {
465 		tc->phy_fia = FIA1;
466 		tc->phy_fia_idx = tc_port;
467 	}
468 }
469 
470 /*
471  * ICL TC PHY handlers
472  * -------------------
473  */
474 static enum intel_display_power_domain
475 icl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
476 {
477 	struct drm_i915_private *i915 = tc_to_i915(tc);
478 	struct intel_digital_port *dig_port = tc->dig_port;
479 
480 	if (tc->legacy_port)
481 		return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
482 
483 	return POWER_DOMAIN_TC_COLD_OFF;
484 }
485 
486 static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
487 {
488 	struct drm_i915_private *i915 = tc_to_i915(tc);
489 	struct intel_digital_port *dig_port = tc->dig_port;
490 	u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
491 	intel_wakeref_t wakeref;
492 	u32 fia_isr;
493 	u32 pch_isr;
494 	u32 mask = 0;
495 
496 	with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref) {
497 		fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
498 		pch_isr = intel_de_read(i915, SDEISR);
499 	}
500 
501 	if (fia_isr == 0xffffffff) {
502 		drm_dbg_kms(&i915->drm,
503 			    "Port %s: PHY in TCCOLD, nothing connected\n",
504 			    tc->port_name);
505 		return mask;
506 	}
507 
508 	if (fia_isr & TC_LIVE_STATE_TBT(tc->phy_fia_idx))
509 		mask |= BIT(TC_PORT_TBT_ALT);
510 	if (fia_isr & TC_LIVE_STATE_TC(tc->phy_fia_idx))
511 		mask |= BIT(TC_PORT_DP_ALT);
512 
513 	if (pch_isr & isr_bit)
514 		mask |= BIT(TC_PORT_LEGACY);
515 
516 	return mask;
517 }
518 
519 /*
520  * Return the PHY status complete flag indicating that display can acquire the
521  * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink
522  * is connected and it's ready to switch the ownership to display. The flag
523  * will be left cleared when a TBT-alt sink is connected, where the PHY is
524  * owned by the TBT subsystem and so switching the ownership to display is not
525  * required.
526  */
527 static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
528 {
529 	struct drm_i915_private *i915 = tc_to_i915(tc);
530 	u32 val;
531 
532 	assert_tc_cold_blocked(tc);
533 
534 	val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(tc->phy_fia));
535 	if (val == 0xffffffff) {
536 		drm_dbg_kms(&i915->drm,
537 			    "Port %s: PHY in TCCOLD, assuming not ready\n",
538 			    tc->port_name);
539 		return false;
540 	}
541 
542 	return val & DP_PHY_MODE_STATUS_COMPLETED(tc->phy_fia_idx);
543 }
544 
545 static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
546 				      bool take)
547 {
548 	struct drm_i915_private *i915 = tc_to_i915(tc);
549 	u32 val;
550 
551 	assert_tc_cold_blocked(tc);
552 
553 	val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
554 	if (val == 0xffffffff) {
555 		drm_dbg_kms(&i915->drm,
556 			    "Port %s: PHY in TCCOLD, can't %s ownership\n",
557 			    tc->port_name, take ? "take" : "release");
558 
559 		return false;
560 	}
561 
562 	val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
563 	if (take)
564 		val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
565 
566 	intel_de_write(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
567 
568 	return true;
569 }
570 
571 static bool icl_tc_phy_is_owned(struct intel_tc_port *tc)
572 {
573 	struct drm_i915_private *i915 = tc_to_i915(tc);
574 	u32 val;
575 
576 	assert_tc_cold_blocked(tc);
577 
578 	val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
579 	if (val == 0xffffffff) {
580 		drm_dbg_kms(&i915->drm,
581 			    "Port %s: PHY in TCCOLD, assume not owned\n",
582 			    tc->port_name);
583 		return false;
584 	}
585 
586 	return val & DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
587 }
588 
589 static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
590 {
591 	enum intel_display_power_domain domain;
592 	intel_wakeref_t tc_cold_wref;
593 
594 	tc_cold_wref = __tc_cold_block(tc, &domain);
595 
596 	tc->mode = tc_phy_get_current_mode(tc);
597 	if (tc->mode != TC_PORT_DISCONNECTED)
598 		tc->lock_wakeref = tc_cold_block(tc);
599 
600 	__tc_cold_unblock(tc, domain, tc_cold_wref);
601 }
602 
603 /*
604  * This function implements the first part of the Connect Flow described by our
605  * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
606  * lanes, EDID, etc) is done as needed in the typical places.
607  *
608  * Unlike the other ports, type-C ports are not available to use as soon as we
609  * get a hotplug. The type-C PHYs can be shared between multiple controllers:
610  * display, USB, etc. As a result, handshaking through FIA is required around
611  * connect and disconnect to cleanly transfer ownership with the controller and
612  * set the type-C power state.
613  */
614 static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
615 						int required_lanes)
616 {
617 	struct drm_i915_private *i915 = tc_to_i915(tc);
618 	struct intel_digital_port *dig_port = tc->dig_port;
619 	int max_lanes;
620 
621 	max_lanes = intel_tc_port_max_lane_count(dig_port);
622 	if (tc->mode == TC_PORT_LEGACY) {
623 		drm_WARN_ON(&i915->drm, max_lanes != 4);
624 		return true;
625 	}
626 
627 	drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DP_ALT);
628 
629 	/*
630 	 * Now we have to re-check the live state, in case the port recently
631 	 * became disconnected. Not necessary for legacy mode.
632 	 */
633 	if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) {
634 		drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
635 			    tc->port_name);
636 		return false;
637 	}
638 
639 	if (max_lanes < required_lanes) {
640 		drm_dbg_kms(&i915->drm,
641 			    "Port %s: PHY max lanes %d < required lanes %d\n",
642 			    tc->port_name,
643 			    max_lanes, required_lanes);
644 		return false;
645 	}
646 
647 	return true;
648 }
649 
650 static bool icl_tc_phy_connect(struct intel_tc_port *tc,
651 			       int required_lanes)
652 {
653 	struct drm_i915_private *i915 = tc_to_i915(tc);
654 
655 	tc->lock_wakeref = tc_cold_block(tc);
656 
657 	if (tc->mode == TC_PORT_TBT_ALT)
658 		return true;
659 
660 	if ((!tc_phy_is_ready(tc) ||
661 	     !icl_tc_phy_take_ownership(tc, true)) &&
662 	    !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
663 		drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership (ready %s)\n",
664 			    tc->port_name,
665 			    str_yes_no(tc_phy_is_ready(tc)));
666 		goto out_unblock_tc_cold;
667 	}
668 
669 
670 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
671 		goto out_release_phy;
672 
673 	return true;
674 
675 out_release_phy:
676 	icl_tc_phy_take_ownership(tc, false);
677 out_unblock_tc_cold:
678 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
679 
680 	return false;
681 }
682 
683 /*
684  * See the comment at the connect function. This implements the Disconnect
685  * Flow.
686  */
687 static void icl_tc_phy_disconnect(struct intel_tc_port *tc)
688 {
689 	switch (tc->mode) {
690 	case TC_PORT_LEGACY:
691 	case TC_PORT_DP_ALT:
692 		icl_tc_phy_take_ownership(tc, false);
693 		fallthrough;
694 	case TC_PORT_TBT_ALT:
695 		tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
696 		break;
697 	default:
698 		MISSING_CASE(tc->mode);
699 	}
700 }
701 
702 static void icl_tc_phy_init(struct intel_tc_port *tc)
703 {
704 	tc_phy_load_fia_params(tc, false);
705 }
706 
707 static const struct intel_tc_phy_ops icl_tc_phy_ops = {
708 	.cold_off_domain = icl_tc_phy_cold_off_domain,
709 	.hpd_live_status = icl_tc_phy_hpd_live_status,
710 	.is_ready = icl_tc_phy_is_ready,
711 	.is_owned = icl_tc_phy_is_owned,
712 	.get_hw_state = icl_tc_phy_get_hw_state,
713 	.connect = icl_tc_phy_connect,
714 	.disconnect = icl_tc_phy_disconnect,
715 	.init = icl_tc_phy_init,
716 };
717 
718 /*
719  * TGL TC PHY handlers
720  * -------------------
721  */
722 static enum intel_display_power_domain
723 tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
724 {
725 	return POWER_DOMAIN_TC_COLD_OFF;
726 }
727 
728 static void tgl_tc_phy_init(struct intel_tc_port *tc)
729 {
730 	struct drm_i915_private *i915 = tc_to_i915(tc);
731 	intel_wakeref_t wakeref;
732 	u32 val;
733 
734 	with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref)
735 		val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1));
736 
737 	drm_WARN_ON(&i915->drm, val == 0xffffffff);
738 
739 	tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK);
740 }
741 
742 static const struct intel_tc_phy_ops tgl_tc_phy_ops = {
743 	.cold_off_domain = tgl_tc_phy_cold_off_domain,
744 	.hpd_live_status = icl_tc_phy_hpd_live_status,
745 	.is_ready = icl_tc_phy_is_ready,
746 	.is_owned = icl_tc_phy_is_owned,
747 	.get_hw_state = icl_tc_phy_get_hw_state,
748 	.connect = icl_tc_phy_connect,
749 	.disconnect = icl_tc_phy_disconnect,
750 	.init = tgl_tc_phy_init,
751 };
752 
753 /*
754  * ADLP TC PHY handlers
755  * --------------------
756  */
757 static enum intel_display_power_domain
758 adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc)
759 {
760 	struct drm_i915_private *i915 = tc_to_i915(tc);
761 	struct intel_digital_port *dig_port = tc->dig_port;
762 
763 	if (tc->mode != TC_PORT_TBT_ALT)
764 		return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
765 
766 	return POWER_DOMAIN_TC_COLD_OFF;
767 }
768 
769 static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
770 {
771 	struct drm_i915_private *i915 = tc_to_i915(tc);
772 	struct intel_digital_port *dig_port = tc->dig_port;
773 	enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
774 	u32 cpu_isr_bits = i915->display.hotplug.hpd[hpd_pin];
775 	u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
776 	intel_wakeref_t wakeref;
777 	u32 cpu_isr;
778 	u32 pch_isr;
779 	u32 mask = 0;
780 
781 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
782 		cpu_isr = intel_de_read(i915, GEN11_DE_HPD_ISR);
783 		pch_isr = intel_de_read(i915, SDEISR);
784 	}
785 
786 	if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK))
787 		mask |= BIT(TC_PORT_DP_ALT);
788 	if (cpu_isr & (cpu_isr_bits & GEN11_DE_TBT_HOTPLUG_MASK))
789 		mask |= BIT(TC_PORT_TBT_ALT);
790 
791 	if (pch_isr & pch_isr_bit)
792 		mask |= BIT(TC_PORT_LEGACY);
793 
794 	return mask;
795 }
796 
797 /*
798  * Return the PHY status complete flag indicating that display can acquire the
799  * PHY ownership. The IOM firmware sets this flag when it's ready to switch
800  * the ownership to display, regardless of what sink is connected (TBT-alt,
801  * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
802  * subsystem and so switching the ownership to display is not required.
803  */
804 static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
805 {
806 	struct drm_i915_private *i915 = tc_to_i915(tc);
807 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
808 	u32 val;
809 
810 	assert_display_core_power_enabled(tc);
811 
812 	val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
813 	if (val == 0xffffffff) {
814 		drm_dbg_kms(&i915->drm,
815 			    "Port %s: PHY in TCCOLD, assuming not ready\n",
816 			    tc->port_name);
817 		return false;
818 	}
819 
820 	return val & TCSS_DDI_STATUS_READY;
821 }
822 
823 static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
824 				       bool take)
825 {
826 	struct drm_i915_private *i915 = tc_to_i915(tc);
827 	enum port port = tc->dig_port->base.port;
828 
829 	assert_tc_port_power_enabled(tc);
830 
831 	intel_de_rmw(i915, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
832 		     take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
833 
834 	return true;
835 }
836 
837 static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc)
838 {
839 	struct drm_i915_private *i915 = tc_to_i915(tc);
840 	enum port port = tc->dig_port->base.port;
841 	u32 val;
842 
843 	assert_tc_port_power_enabled(tc);
844 
845 	val = intel_de_read(i915, DDI_BUF_CTL(port));
846 	return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
847 }
848 
849 static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
850 {
851 	struct drm_i915_private *i915 = tc_to_i915(tc);
852 	enum intel_display_power_domain port_power_domain =
853 		tc_port_power_domain(tc);
854 	intel_wakeref_t port_wakeref;
855 
856 	port_wakeref = intel_display_power_get(i915, port_power_domain);
857 
858 	tc->mode = tc_phy_get_current_mode(tc);
859 	if (tc->mode != TC_PORT_DISCONNECTED)
860 		tc->lock_wakeref = tc_cold_block(tc);
861 
862 	intel_display_power_put(i915, port_power_domain, port_wakeref);
863 }
864 
865 static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
866 {
867 	struct drm_i915_private *i915 = tc_to_i915(tc);
868 	enum intel_display_power_domain port_power_domain =
869 		tc_port_power_domain(tc);
870 	intel_wakeref_t port_wakeref;
871 
872 	if (tc->mode == TC_PORT_TBT_ALT) {
873 		tc->lock_wakeref = tc_cold_block(tc);
874 		return true;
875 	}
876 
877 	port_wakeref = intel_display_power_get(i915, port_power_domain);
878 
879 	if (!adlp_tc_phy_take_ownership(tc, true) &&
880 	    !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
881 		drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership\n",
882 			    tc->port_name);
883 		goto out_put_port_power;
884 	}
885 
886 	if (!tc_phy_is_ready(tc) &&
887 	    !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
888 		drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
889 			    tc->port_name);
890 		goto out_release_phy;
891 	}
892 
893 	tc->lock_wakeref = tc_cold_block(tc);
894 
895 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
896 		goto out_unblock_tc_cold;
897 
898 	intel_display_power_put(i915, port_power_domain, port_wakeref);
899 
900 	return true;
901 
902 out_unblock_tc_cold:
903 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
904 out_release_phy:
905 	adlp_tc_phy_take_ownership(tc, false);
906 out_put_port_power:
907 	intel_display_power_put(i915, port_power_domain, port_wakeref);
908 
909 	return false;
910 }
911 
912 static void adlp_tc_phy_disconnect(struct intel_tc_port *tc)
913 {
914 	struct drm_i915_private *i915 = tc_to_i915(tc);
915 	enum intel_display_power_domain port_power_domain =
916 		tc_port_power_domain(tc);
917 	intel_wakeref_t port_wakeref;
918 
919 	port_wakeref = intel_display_power_get(i915, port_power_domain);
920 
921 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
922 
923 	switch (tc->mode) {
924 	case TC_PORT_LEGACY:
925 	case TC_PORT_DP_ALT:
926 		adlp_tc_phy_take_ownership(tc, false);
927 		fallthrough;
928 	case TC_PORT_TBT_ALT:
929 		break;
930 	default:
931 		MISSING_CASE(tc->mode);
932 	}
933 
934 	intel_display_power_put(i915, port_power_domain, port_wakeref);
935 }
936 
937 static void adlp_tc_phy_init(struct intel_tc_port *tc)
938 {
939 	tc_phy_load_fia_params(tc, true);
940 }
941 
942 static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
943 	.cold_off_domain = adlp_tc_phy_cold_off_domain,
944 	.hpd_live_status = adlp_tc_phy_hpd_live_status,
945 	.is_ready = adlp_tc_phy_is_ready,
946 	.is_owned = adlp_tc_phy_is_owned,
947 	.get_hw_state = adlp_tc_phy_get_hw_state,
948 	.connect = adlp_tc_phy_connect,
949 	.disconnect = adlp_tc_phy_disconnect,
950 	.init = adlp_tc_phy_init,
951 };
952 
953 /*
954  * XELPDP TC PHY handlers
955  * ----------------------
956  */
957 static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
958 {
959 	struct drm_i915_private *i915 = tc_to_i915(tc);
960 	struct intel_digital_port *dig_port = tc->dig_port;
961 	enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
962 	u32 pica_isr_bits = i915->display.hotplug.hpd[hpd_pin];
963 	u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
964 	intel_wakeref_t wakeref;
965 	u32 pica_isr;
966 	u32 pch_isr;
967 	u32 mask = 0;
968 
969 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
970 		pica_isr = intel_de_read(i915, PICAINTERRUPT_ISR);
971 		pch_isr = intel_de_read(i915, SDEISR);
972 	}
973 
974 	if (pica_isr & (pica_isr_bits & XELPDP_DP_ALT_HOTPLUG_MASK))
975 		mask |= BIT(TC_PORT_DP_ALT);
976 	if (pica_isr & (pica_isr_bits & XELPDP_TBT_HOTPLUG_MASK))
977 		mask |= BIT(TC_PORT_TBT_ALT);
978 
979 	if (tc->legacy_port && (pch_isr & pch_isr_bit))
980 		mask |= BIT(TC_PORT_LEGACY);
981 
982 	return mask;
983 }
984 
985 static bool
986 xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc)
987 {
988 	struct drm_i915_private *i915 = tc_to_i915(tc);
989 	enum port port = tc->dig_port->base.port;
990 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
991 
992 	assert_tc_cold_blocked(tc);
993 
994 	return intel_de_read(i915, reg) & XELPDP_TCSS_POWER_STATE;
995 }
996 
997 static bool
998 xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
999 {
1000 	struct drm_i915_private *i915 = tc_to_i915(tc);
1001 
1002 	if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) {
1003 		drm_dbg_kms(&i915->drm,
1004 			    "Port %s: timeout waiting for TCSS power to get %s\n",
1005 			    enabled ? "enabled" : "disabled",
1006 			    tc->port_name);
1007 		return false;
1008 	}
1009 
1010 	return true;
1011 }
1012 
1013 static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1014 {
1015 	struct drm_i915_private *i915 = tc_to_i915(tc);
1016 	enum port port = tc->dig_port->base.port;
1017 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
1018 	u32 val;
1019 
1020 	assert_tc_cold_blocked(tc);
1021 
1022 	val = intel_de_read(i915, reg);
1023 	if (enable)
1024 		val |= XELPDP_TCSS_POWER_REQUEST;
1025 	else
1026 		val &= ~XELPDP_TCSS_POWER_REQUEST;
1027 	intel_de_write(i915, reg, val);
1028 }
1029 
1030 static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1031 {
1032 	struct drm_i915_private *i915 = tc_to_i915(tc);
1033 
1034 	__xelpdp_tc_phy_enable_tcss_power(tc, enable);
1035 
1036 	if (enable && !tc_phy_wait_for_ready(tc))
1037 		goto out_disable;
1038 
1039 	if (!xelpdp_tc_phy_wait_for_tcss_power(tc, enable))
1040 		goto out_disable;
1041 
1042 	return true;
1043 
1044 out_disable:
1045 	if (drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY))
1046 		return false;
1047 
1048 	if (!enable)
1049 		return false;
1050 
1051 	__xelpdp_tc_phy_enable_tcss_power(tc, false);
1052 	xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1053 
1054 	return false;
1055 }
1056 
1057 static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
1058 {
1059 	struct drm_i915_private *i915 = tc_to_i915(tc);
1060 	enum port port = tc->dig_port->base.port;
1061 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
1062 	u32 val;
1063 
1064 	assert_tc_cold_blocked(tc);
1065 
1066 	val = intel_de_read(i915, reg);
1067 	if (take)
1068 		val |= XELPDP_TC_PHY_OWNERSHIP;
1069 	else
1070 		val &= ~XELPDP_TC_PHY_OWNERSHIP;
1071 	intel_de_write(i915, reg, val);
1072 }
1073 
1074 static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
1075 {
1076 	struct drm_i915_private *i915 = tc_to_i915(tc);
1077 	enum port port = tc->dig_port->base.port;
1078 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
1079 
1080 	assert_tc_cold_blocked(tc);
1081 
1082 	return intel_de_read(i915, reg) & XELPDP_TC_PHY_OWNERSHIP;
1083 }
1084 
1085 static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
1086 {
1087 	struct drm_i915_private *i915 = tc_to_i915(tc);
1088 	intel_wakeref_t tc_cold_wref;
1089 	enum intel_display_power_domain domain;
1090 
1091 	tc_cold_wref = __tc_cold_block(tc, &domain);
1092 
1093 	tc->mode = tc_phy_get_current_mode(tc);
1094 	if (tc->mode != TC_PORT_DISCONNECTED)
1095 		tc->lock_wakeref = tc_cold_block(tc);
1096 
1097 	drm_WARN_ON(&i915->drm,
1098 		    (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
1099 		    !xelpdp_tc_phy_tcss_power_is_enabled(tc));
1100 
1101 	__tc_cold_unblock(tc, domain, tc_cold_wref);
1102 }
1103 
1104 static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1105 {
1106 	tc->lock_wakeref = tc_cold_block(tc);
1107 
1108 	if (tc->mode == TC_PORT_TBT_ALT)
1109 		return true;
1110 
1111 	if (!xelpdp_tc_phy_enable_tcss_power(tc, true))
1112 		goto out_unblock_tccold;
1113 
1114 	xelpdp_tc_phy_take_ownership(tc, true);
1115 
1116 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
1117 		goto out_release_phy;
1118 
1119 	return true;
1120 
1121 out_release_phy:
1122 	xelpdp_tc_phy_take_ownership(tc, false);
1123 	xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1124 
1125 out_unblock_tccold:
1126 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1127 
1128 	return false;
1129 }
1130 
1131 static void xelpdp_tc_phy_disconnect(struct intel_tc_port *tc)
1132 {
1133 	switch (tc->mode) {
1134 	case TC_PORT_LEGACY:
1135 	case TC_PORT_DP_ALT:
1136 		xelpdp_tc_phy_take_ownership(tc, false);
1137 		xelpdp_tc_phy_enable_tcss_power(tc, false);
1138 		fallthrough;
1139 	case TC_PORT_TBT_ALT:
1140 		tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1141 		break;
1142 	default:
1143 		MISSING_CASE(tc->mode);
1144 	}
1145 }
1146 
1147 static const struct intel_tc_phy_ops xelpdp_tc_phy_ops = {
1148 	.cold_off_domain = tgl_tc_phy_cold_off_domain,
1149 	.hpd_live_status = xelpdp_tc_phy_hpd_live_status,
1150 	.is_ready = adlp_tc_phy_is_ready,
1151 	.is_owned = xelpdp_tc_phy_is_owned,
1152 	.get_hw_state = xelpdp_tc_phy_get_hw_state,
1153 	.connect = xelpdp_tc_phy_connect,
1154 	.disconnect = xelpdp_tc_phy_disconnect,
1155 	.init = adlp_tc_phy_init,
1156 };
1157 
1158 /*
1159  * Generic TC PHY handlers
1160  * -----------------------
1161  */
1162 static enum intel_display_power_domain
1163 tc_phy_cold_off_domain(struct intel_tc_port *tc)
1164 {
1165 	return tc->phy_ops->cold_off_domain(tc);
1166 }
1167 
1168 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc)
1169 {
1170 	struct drm_i915_private *i915 = tc_to_i915(tc);
1171 	u32 mask;
1172 
1173 	mask = tc->phy_ops->hpd_live_status(tc);
1174 
1175 	/* The sink can be connected only in a single mode. */
1176 	drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1);
1177 
1178 	return mask;
1179 }
1180 
1181 static bool tc_phy_is_ready(struct intel_tc_port *tc)
1182 {
1183 	return tc->phy_ops->is_ready(tc);
1184 }
1185 
1186 static bool tc_phy_is_owned(struct intel_tc_port *tc)
1187 {
1188 	return tc->phy_ops->is_owned(tc);
1189 }
1190 
1191 static void tc_phy_get_hw_state(struct intel_tc_port *tc)
1192 {
1193 	tc->phy_ops->get_hw_state(tc);
1194 }
1195 
1196 static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
1197 				      bool phy_is_ready, bool phy_is_owned)
1198 {
1199 	struct drm_i915_private *i915 = tc_to_i915(tc);
1200 
1201 	drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready);
1202 
1203 	return phy_is_ready && phy_is_owned;
1204 }
1205 
1206 static bool tc_phy_is_connected(struct intel_tc_port *tc,
1207 				enum icl_port_dpll_id port_pll_type)
1208 {
1209 	struct intel_encoder *encoder = &tc->dig_port->base;
1210 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1211 	bool phy_is_ready = tc_phy_is_ready(tc);
1212 	bool phy_is_owned = tc_phy_is_owned(tc);
1213 	bool is_connected;
1214 
1215 	if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned))
1216 		is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY;
1217 	else
1218 		is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
1219 
1220 	drm_dbg_kms(&i915->drm,
1221 		    "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
1222 		    tc->port_name,
1223 		    str_yes_no(is_connected),
1224 		    str_yes_no(phy_is_ready),
1225 		    str_yes_no(phy_is_owned),
1226 		    port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt");
1227 
1228 	return is_connected;
1229 }
1230 
1231 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc)
1232 {
1233 	struct drm_i915_private *i915 = tc_to_i915(tc);
1234 
1235 	if (wait_for(tc_phy_is_ready(tc), 500)) {
1236 		drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n",
1237 			tc->port_name);
1238 
1239 		return false;
1240 	}
1241 
1242 	return true;
1243 }
1244 
1245 static enum tc_port_mode
1246 hpd_mask_to_tc_mode(u32 live_status_mask)
1247 {
1248 	if (live_status_mask)
1249 		return fls(live_status_mask) - 1;
1250 
1251 	return TC_PORT_DISCONNECTED;
1252 }
1253 
1254 static enum tc_port_mode
1255 tc_phy_hpd_live_mode(struct intel_tc_port *tc)
1256 {
1257 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1258 
1259 	return hpd_mask_to_tc_mode(live_status_mask);
1260 }
1261 
1262 static enum tc_port_mode
1263 get_tc_mode_in_phy_owned_state(struct intel_tc_port *tc,
1264 			       enum tc_port_mode live_mode)
1265 {
1266 	switch (live_mode) {
1267 	case TC_PORT_LEGACY:
1268 	case TC_PORT_DP_ALT:
1269 		return live_mode;
1270 	default:
1271 		MISSING_CASE(live_mode);
1272 		fallthrough;
1273 	case TC_PORT_TBT_ALT:
1274 	case TC_PORT_DISCONNECTED:
1275 		if (tc->legacy_port)
1276 			return TC_PORT_LEGACY;
1277 		else
1278 			return TC_PORT_DP_ALT;
1279 	}
1280 }
1281 
1282 static enum tc_port_mode
1283 get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc,
1284 				   enum tc_port_mode live_mode)
1285 {
1286 	switch (live_mode) {
1287 	case TC_PORT_LEGACY:
1288 		return TC_PORT_DISCONNECTED;
1289 	case TC_PORT_DP_ALT:
1290 	case TC_PORT_TBT_ALT:
1291 		return TC_PORT_TBT_ALT;
1292 	default:
1293 		MISSING_CASE(live_mode);
1294 		fallthrough;
1295 	case TC_PORT_DISCONNECTED:
1296 		if (tc->legacy_port)
1297 			return TC_PORT_DISCONNECTED;
1298 		else
1299 			return TC_PORT_TBT_ALT;
1300 	}
1301 }
1302 
1303 static enum tc_port_mode
1304 tc_phy_get_current_mode(struct intel_tc_port *tc)
1305 {
1306 	struct drm_i915_private *i915 = tc_to_i915(tc);
1307 	enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc);
1308 	bool phy_is_ready;
1309 	bool phy_is_owned;
1310 	enum tc_port_mode mode;
1311 
1312 	/*
1313 	 * For legacy ports the IOM firmware initializes the PHY during boot-up
1314 	 * and system resume whether or not a sink is connected. Wait here for
1315 	 * the initialization to get ready.
1316 	 */
1317 	if (tc->legacy_port)
1318 		tc_phy_wait_for_ready(tc);
1319 
1320 	phy_is_ready = tc_phy_is_ready(tc);
1321 	phy_is_owned = tc_phy_is_owned(tc);
1322 
1323 	if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) {
1324 		mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
1325 	} else {
1326 		drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT);
1327 		mode = get_tc_mode_in_phy_owned_state(tc, live_mode);
1328 	}
1329 
1330 	drm_dbg_kms(&i915->drm,
1331 		    "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
1332 		    tc->port_name,
1333 		    tc_port_mode_name(mode),
1334 		    str_yes_no(phy_is_ready),
1335 		    str_yes_no(phy_is_owned),
1336 		    tc_port_mode_name(live_mode));
1337 
1338 	return mode;
1339 }
1340 
1341 static enum tc_port_mode default_tc_mode(struct intel_tc_port *tc)
1342 {
1343 	if (tc->legacy_port)
1344 		return TC_PORT_LEGACY;
1345 
1346 	return TC_PORT_TBT_ALT;
1347 }
1348 
1349 static enum tc_port_mode
1350 hpd_mask_to_target_mode(struct intel_tc_port *tc, u32 live_status_mask)
1351 {
1352 	enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask);
1353 
1354 	if (mode != TC_PORT_DISCONNECTED)
1355 		return mode;
1356 
1357 	return default_tc_mode(tc);
1358 }
1359 
1360 static enum tc_port_mode
1361 tc_phy_get_target_mode(struct intel_tc_port *tc)
1362 {
1363 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1364 
1365 	return hpd_mask_to_target_mode(tc, live_status_mask);
1366 }
1367 
1368 static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1369 {
1370 	struct drm_i915_private *i915 = tc_to_i915(tc);
1371 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1372 	bool connected;
1373 
1374 	tc_port_fixup_legacy_flag(tc, live_status_mask);
1375 
1376 	tc->mode = hpd_mask_to_target_mode(tc, live_status_mask);
1377 
1378 	connected = tc->phy_ops->connect(tc, required_lanes);
1379 	if (!connected && tc->mode != default_tc_mode(tc)) {
1380 		tc->mode = default_tc_mode(tc);
1381 		connected = tc->phy_ops->connect(tc, required_lanes);
1382 	}
1383 
1384 	drm_WARN_ON(&i915->drm, !connected);
1385 }
1386 
1387 static void tc_phy_disconnect(struct intel_tc_port *tc)
1388 {
1389 	if (tc->mode != TC_PORT_DISCONNECTED) {
1390 		tc->phy_ops->disconnect(tc);
1391 		tc->mode = TC_PORT_DISCONNECTED;
1392 	}
1393 }
1394 
1395 static void tc_phy_init(struct intel_tc_port *tc)
1396 {
1397 	mutex_lock(&tc->lock);
1398 	tc->phy_ops->init(tc);
1399 	mutex_unlock(&tc->lock);
1400 }
1401 
1402 static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
1403 				     int required_lanes, bool force_disconnect)
1404 {
1405 	struct drm_i915_private *i915 = tc_to_i915(tc);
1406 	struct intel_digital_port *dig_port = tc->dig_port;
1407 	enum tc_port_mode old_tc_mode = tc->mode;
1408 
1409 	intel_display_power_flush_work(i915);
1410 	if (!intel_tc_cold_requires_aux_pw(dig_port)) {
1411 		enum intel_display_power_domain aux_domain;
1412 		bool aux_powered;
1413 
1414 		aux_domain = intel_aux_power_domain(dig_port);
1415 		aux_powered = intel_display_power_is_enabled(i915, aux_domain);
1416 		drm_WARN_ON(&i915->drm, aux_powered);
1417 	}
1418 
1419 	tc_phy_disconnect(tc);
1420 	if (!force_disconnect)
1421 		tc_phy_connect(tc, required_lanes);
1422 
1423 	drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
1424 		    tc->port_name,
1425 		    tc_port_mode_name(old_tc_mode),
1426 		    tc_port_mode_name(tc->mode));
1427 }
1428 
1429 static bool intel_tc_port_needs_reset(struct intel_tc_port *tc)
1430 {
1431 	return tc_phy_get_target_mode(tc) != tc->mode;
1432 }
1433 
1434 static void intel_tc_port_update_mode(struct intel_tc_port *tc,
1435 				      int required_lanes, bool force_disconnect)
1436 {
1437 	if (force_disconnect ||
1438 	    intel_tc_port_needs_reset(tc))
1439 		intel_tc_port_reset_mode(tc, required_lanes, force_disconnect);
1440 }
1441 
1442 static void __intel_tc_port_get_link(struct intel_tc_port *tc)
1443 {
1444 	tc->link_refcount++;
1445 }
1446 
1447 static void __intel_tc_port_put_link(struct intel_tc_port *tc)
1448 {
1449 	tc->link_refcount--;
1450 }
1451 
1452 static bool tc_port_is_enabled(struct intel_tc_port *tc)
1453 {
1454 	struct drm_i915_private *i915 = tc_to_i915(tc);
1455 	struct intel_digital_port *dig_port = tc->dig_port;
1456 
1457 	assert_tc_port_power_enabled(tc);
1458 
1459 	return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) &
1460 	       DDI_BUF_CTL_ENABLE;
1461 }
1462 
1463 /**
1464  * intel_tc_port_init_mode: Read out HW state and init the given port's TypeC mode
1465  * @dig_port: digital port
1466  *
1467  * Read out the HW state and initialize the TypeC mode of @dig_port. The mode
1468  * will be locked until intel_tc_port_sanitize_mode() is called.
1469  */
1470 void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
1471 {
1472 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1473 	struct intel_tc_port *tc = to_tc_port(dig_port);
1474 	bool update_mode = false;
1475 
1476 	mutex_lock(&tc->lock);
1477 
1478 	drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
1479 	drm_WARN_ON(&i915->drm, tc->lock_wakeref);
1480 	drm_WARN_ON(&i915->drm, tc->link_refcount);
1481 
1482 	tc_phy_get_hw_state(tc);
1483 	/*
1484 	 * Save the initial mode for the state check in
1485 	 * intel_tc_port_sanitize_mode().
1486 	 */
1487 	tc->init_mode = tc->mode;
1488 
1489 	/*
1490 	 * The PHY needs to be connected for AUX to work during HW readout and
1491 	 * MST topology resume, but the PHY mode can only be changed if the
1492 	 * port is disabled.
1493 	 *
1494 	 * An exception is the case where BIOS leaves the PHY incorrectly
1495 	 * disconnected on an enabled legacy port. Work around that by
1496 	 * connecting the PHY even though the port is enabled. This doesn't
1497 	 * cause a problem as the PHY ownership state is ignored by the
1498 	 * IOM/TCSS firmware (only display can own the PHY in that case).
1499 	 */
1500 	if (!tc_port_is_enabled(tc)) {
1501 		update_mode = true;
1502 	} else if (tc->mode == TC_PORT_DISCONNECTED) {
1503 		drm_WARN_ON(&i915->drm, !tc->legacy_port);
1504 		drm_err(&i915->drm,
1505 			"Port %s: PHY disconnected on enabled port, connecting it\n",
1506 			tc->port_name);
1507 		update_mode = true;
1508 	}
1509 
1510 	if (update_mode)
1511 		intel_tc_port_update_mode(tc, 1, false);
1512 
1513 	/* Prevent changing tc->mode until intel_tc_port_sanitize_mode() is called. */
1514 	__intel_tc_port_get_link(tc);
1515 
1516 	mutex_unlock(&tc->lock);
1517 }
1518 
1519 static bool tc_port_has_active_links(struct intel_tc_port *tc,
1520 				     const struct intel_crtc_state *crtc_state)
1521 {
1522 	struct drm_i915_private *i915 = tc_to_i915(tc);
1523 	struct intel_digital_port *dig_port = tc->dig_port;
1524 	enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
1525 	int active_links = 0;
1526 
1527 	if (dig_port->dp.is_mst) {
1528 		/* TODO: get the PLL type for MST, once HW readout is done for it. */
1529 		active_links = intel_dp_mst_encoder_active_links(dig_port);
1530 	} else if (crtc_state && crtc_state->hw.active) {
1531 		pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
1532 		active_links = 1;
1533 	}
1534 
1535 	if (active_links && !tc_phy_is_connected(tc, pll_type))
1536 		drm_err(&i915->drm,
1537 			"Port %s: PHY disconnected with %d active link(s)\n",
1538 			tc->port_name, active_links);
1539 
1540 	return active_links;
1541 }
1542 
1543 /**
1544  * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
1545  * @dig_port: digital port
1546  * @crtc_state: atomic state of CRTC connected to @dig_port
1547  *
1548  * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
1549  * loading and system resume:
1550  * If the encoder is enabled keep the TypeC mode/PHY connected state locked until
1551  * the encoder is disabled.
1552  * If the encoder is disabled make sure the PHY is disconnected.
1553  * @crtc_state is valid if @dig_port is enabled, NULL otherwise.
1554  */
1555 void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
1556 				 const struct intel_crtc_state *crtc_state)
1557 {
1558 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1559 	struct intel_tc_port *tc = to_tc_port(dig_port);
1560 
1561 	mutex_lock(&tc->lock);
1562 
1563 	drm_WARN_ON(&i915->drm, tc->link_refcount != 1);
1564 	if (!tc_port_has_active_links(tc, crtc_state)) {
1565 		/*
1566 		 * TBT-alt is the default mode in any case the PHY ownership is not
1567 		 * held (regardless of the sink's connected live state), so
1568 		 * we'll just switch to disconnected mode from it here without
1569 		 * a note.
1570 		 */
1571 		if (tc->init_mode != TC_PORT_TBT_ALT &&
1572 		    tc->init_mode != TC_PORT_DISCONNECTED)
1573 			drm_dbg_kms(&i915->drm,
1574 				    "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
1575 				    tc->port_name,
1576 				    tc_port_mode_name(tc->init_mode));
1577 		tc_phy_disconnect(tc);
1578 		__intel_tc_port_put_link(tc);
1579 	}
1580 
1581 	drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
1582 		    tc->port_name,
1583 		    tc_port_mode_name(tc->mode));
1584 
1585 	mutex_unlock(&tc->lock);
1586 }
1587 
1588 /*
1589  * The type-C ports are different because even when they are connected, they may
1590  * not be available/usable by the graphics driver: see the comment on
1591  * icl_tc_phy_connect(). So in our driver instead of adding the additional
1592  * concept of "usable" and make everything check for "connected and usable" we
1593  * define a port as "connected" when it is not only connected, but also when it
1594  * is usable by the rest of the driver. That maintains the old assumption that
1595  * connected ports are usable, and avoids exposing to the users objects they
1596  * can't really use.
1597  */
1598 bool intel_tc_port_connected(struct intel_encoder *encoder)
1599 {
1600 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1601 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1602 	struct intel_tc_port *tc = to_tc_port(dig_port);
1603 	u32 mask = ~0;
1604 
1605 	drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port));
1606 
1607 	if (tc->mode != TC_PORT_DISCONNECTED)
1608 		mask = BIT(tc->mode);
1609 
1610 	return tc_phy_hpd_live_status(tc) & mask;
1611 }
1612 
1613 static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc)
1614 {
1615 	bool ret;
1616 
1617 	mutex_lock(&tc->lock);
1618 
1619 	ret = tc->link_refcount &&
1620 	      tc->mode == TC_PORT_DP_ALT &&
1621 	      intel_tc_port_needs_reset(tc);
1622 
1623 	mutex_unlock(&tc->lock);
1624 
1625 	return ret;
1626 }
1627 
1628 bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port)
1629 {
1630 	if (!intel_encoder_is_tc(&dig_port->base))
1631 		return false;
1632 
1633 	return __intel_tc_port_link_needs_reset(to_tc_port(dig_port));
1634 }
1635 
1636 static int reset_link_commit(struct intel_tc_port *tc,
1637 			     struct intel_atomic_state *state,
1638 			     struct drm_modeset_acquire_ctx *ctx)
1639 {
1640 	struct drm_i915_private *i915 = tc_to_i915(tc);
1641 	struct intel_digital_port *dig_port = tc->dig_port;
1642 	struct intel_dp *intel_dp = enc_to_intel_dp(&dig_port->base);
1643 	struct intel_crtc *crtc;
1644 	u8 pipe_mask;
1645 	int ret;
1646 
1647 	ret = drm_modeset_lock(&i915->drm.mode_config.connection_mutex, ctx);
1648 	if (ret)
1649 		return ret;
1650 
1651 	ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
1652 	if (ret)
1653 		return ret;
1654 
1655 	if (!pipe_mask)
1656 		return 0;
1657 
1658 	for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
1659 		struct intel_crtc_state *crtc_state;
1660 
1661 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
1662 		if (IS_ERR(crtc_state))
1663 			return PTR_ERR(crtc_state);
1664 
1665 		crtc_state->uapi.connectors_changed = true;
1666 	}
1667 
1668 	if (!__intel_tc_port_link_needs_reset(tc))
1669 		return 0;
1670 
1671 	return drm_atomic_commit(&state->base);
1672 }
1673 
1674 static int reset_link(struct intel_tc_port *tc)
1675 {
1676 	struct drm_i915_private *i915 = tc_to_i915(tc);
1677 	struct drm_modeset_acquire_ctx ctx;
1678 	struct drm_atomic_state *_state;
1679 	struct intel_atomic_state *state;
1680 	int ret;
1681 
1682 	_state = drm_atomic_state_alloc(&i915->drm);
1683 	if (!_state)
1684 		return -ENOMEM;
1685 
1686 	state = to_intel_atomic_state(_state);
1687 	state->internal = true;
1688 
1689 	intel_modeset_lock_ctx_retry(&ctx, state, 0, ret)
1690 		ret = reset_link_commit(tc, state, &ctx);
1691 
1692 	drm_atomic_state_put(&state->base);
1693 
1694 	return ret;
1695 }
1696 
1697 static void intel_tc_port_link_reset_work(struct work_struct *work)
1698 {
1699 	struct intel_tc_port *tc =
1700 		container_of(work, struct intel_tc_port, link_reset_work.work);
1701 	struct drm_i915_private *i915 = tc_to_i915(tc);
1702 	int ret;
1703 
1704 	if (!__intel_tc_port_link_needs_reset(tc))
1705 		return;
1706 
1707 	mutex_lock(&i915->drm.mode_config.mutex);
1708 
1709 	drm_dbg_kms(&i915->drm,
1710 		    "Port %s: TypeC DP-alt sink disconnected, resetting link\n",
1711 		    tc->port_name);
1712 	ret = reset_link(tc);
1713 	drm_WARN_ON(&i915->drm, ret);
1714 
1715 	mutex_unlock(&i915->drm.mode_config.mutex);
1716 }
1717 
1718 bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
1719 {
1720 	if (!intel_tc_port_link_needs_reset(dig_port))
1721 		return false;
1722 
1723 	queue_delayed_work(system_unbound_wq,
1724 			   &to_tc_port(dig_port)->link_reset_work,
1725 			   msecs_to_jiffies(2000));
1726 
1727 	return true;
1728 }
1729 
1730 void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port)
1731 {
1732 	struct intel_tc_port *tc = to_tc_port(dig_port);
1733 
1734 	if (!intel_encoder_is_tc(&dig_port->base))
1735 		return;
1736 
1737 	cancel_delayed_work(&tc->link_reset_work);
1738 }
1739 
1740 static void __intel_tc_port_lock(struct intel_tc_port *tc,
1741 				 int required_lanes)
1742 {
1743 	struct drm_i915_private *i915 = tc_to_i915(tc);
1744 
1745 	mutex_lock(&tc->lock);
1746 
1747 	cancel_delayed_work(&tc->disconnect_phy_work);
1748 
1749 	if (!tc->link_refcount)
1750 		intel_tc_port_update_mode(tc, required_lanes,
1751 					  false);
1752 
1753 	drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_DISCONNECTED);
1754 	drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_TBT_ALT &&
1755 				!tc_phy_is_owned(tc));
1756 }
1757 
1758 void intel_tc_port_lock(struct intel_digital_port *dig_port)
1759 {
1760 	__intel_tc_port_lock(to_tc_port(dig_port), 1);
1761 }
1762 
1763 /*
1764  * Disconnect the given digital port from its TypeC PHY (handing back the
1765  * control of the PHY to the TypeC subsystem). This will happen in a delayed
1766  * manner after each aux transactions and modeset disables.
1767  */
1768 static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
1769 {
1770 	struct intel_tc_port *tc =
1771 		container_of(work, struct intel_tc_port, disconnect_phy_work.work);
1772 
1773 	mutex_lock(&tc->lock);
1774 
1775 	if (!tc->link_refcount)
1776 		intel_tc_port_update_mode(tc, 1, true);
1777 
1778 	mutex_unlock(&tc->lock);
1779 }
1780 
1781 /**
1782  * intel_tc_port_flush_work: flush the work disconnecting the PHY
1783  * @dig_port: digital port
1784  *
1785  * Flush the delayed work disconnecting an idle PHY.
1786  */
1787 static void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
1788 {
1789 	flush_delayed_work(&to_tc_port(dig_port)->disconnect_phy_work);
1790 }
1791 
1792 void intel_tc_port_suspend(struct intel_digital_port *dig_port)
1793 {
1794 	struct intel_tc_port *tc = to_tc_port(dig_port);
1795 
1796 	cancel_delayed_work_sync(&tc->link_reset_work);
1797 	intel_tc_port_flush_work(dig_port);
1798 }
1799 
1800 void intel_tc_port_unlock(struct intel_digital_port *dig_port)
1801 {
1802 	struct intel_tc_port *tc = to_tc_port(dig_port);
1803 
1804 	if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED)
1805 		queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work,
1806 				   msecs_to_jiffies(1000));
1807 
1808 	mutex_unlock(&tc->lock);
1809 }
1810 
1811 bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
1812 {
1813 	struct intel_tc_port *tc = to_tc_port(dig_port);
1814 
1815 	return mutex_is_locked(&tc->lock) ||
1816 	       tc->link_refcount;
1817 }
1818 
1819 void intel_tc_port_get_link(struct intel_digital_port *dig_port,
1820 			    int required_lanes)
1821 {
1822 	struct intel_tc_port *tc = to_tc_port(dig_port);
1823 
1824 	__intel_tc_port_lock(tc, required_lanes);
1825 	__intel_tc_port_get_link(tc);
1826 	intel_tc_port_unlock(dig_port);
1827 }
1828 
1829 void intel_tc_port_put_link(struct intel_digital_port *dig_port)
1830 {
1831 	struct intel_tc_port *tc = to_tc_port(dig_port);
1832 
1833 	intel_tc_port_lock(dig_port);
1834 	__intel_tc_port_put_link(tc);
1835 	intel_tc_port_unlock(dig_port);
1836 
1837 	/*
1838 	 * The firmware will not update the HPD status of other TypeC ports
1839 	 * that are active in DP-alt mode with their sink disconnected, until
1840 	 * this port is disabled and its PHY gets disconnected. Make sure this
1841 	 * happens in a timely manner by disconnecting the PHY synchronously.
1842 	 */
1843 	intel_tc_port_flush_work(dig_port);
1844 }
1845 
1846 int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
1847 {
1848 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1849 	struct intel_tc_port *tc;
1850 	enum port port = dig_port->base.port;
1851 	enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
1852 
1853 	if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
1854 		return -EINVAL;
1855 
1856 	tc = kzalloc(sizeof(*tc), GFP_KERNEL);
1857 	if (!tc)
1858 		return -ENOMEM;
1859 
1860 	dig_port->tc = tc;
1861 	tc->dig_port = dig_port;
1862 
1863 	if (DISPLAY_VER(i915) >= 14)
1864 		tc->phy_ops = &xelpdp_tc_phy_ops;
1865 	else if (DISPLAY_VER(i915) >= 13)
1866 		tc->phy_ops = &adlp_tc_phy_ops;
1867 	else if (DISPLAY_VER(i915) >= 12)
1868 		tc->phy_ops = &tgl_tc_phy_ops;
1869 	else
1870 		tc->phy_ops = &icl_tc_phy_ops;
1871 
1872 	tc->port_name = kasprintf(GFP_KERNEL, "%c/TC#%d", port_name(port),
1873 				  tc_port + 1);
1874 	if (!tc->port_name) {
1875 		kfree(tc);
1876 		return -ENOMEM;
1877 	}
1878 
1879 	mutex_init(&tc->lock);
1880 	/* TODO: Combine the two works */
1881 	INIT_DELAYED_WORK(&tc->disconnect_phy_work, intel_tc_port_disconnect_phy_work);
1882 	INIT_DELAYED_WORK(&tc->link_reset_work, intel_tc_port_link_reset_work);
1883 	tc->legacy_port = is_legacy;
1884 	tc->mode = TC_PORT_DISCONNECTED;
1885 	tc->link_refcount = 0;
1886 
1887 	tc_phy_init(tc);
1888 
1889 	intel_tc_port_init_mode(dig_port);
1890 
1891 	return 0;
1892 }
1893 
1894 void intel_tc_port_cleanup(struct intel_digital_port *dig_port)
1895 {
1896 	intel_tc_port_suspend(dig_port);
1897 
1898 	kfree(dig_port->tc->port_name);
1899 	kfree(dig_port->tc);
1900 	dig_port->tc = NULL;
1901 }
1902