xref: /linux/drivers/gpu/drm/i915/display/intel_tc.c (revision eb01fe7abbe2d0b38824d2a93fdb4cc3eaf2ccc1)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "i915_reg.h"
8 #include "intel_atomic.h"
9 #include "intel_cx0_phy_regs.h"
10 #include "intel_ddi.h"
11 #include "intel_de.h"
12 #include "intel_display.h"
13 #include "intel_display_driver.h"
14 #include "intel_display_power_map.h"
15 #include "intel_display_types.h"
16 #include "intel_dkl_phy_regs.h"
17 #include "intel_dp.h"
18 #include "intel_dp_mst.h"
19 #include "intel_mg_phy_regs.h"
20 #include "intel_modeset_lock.h"
21 #include "intel_tc.h"
22 
23 #define DP_PIN_ASSIGNMENT_C	0x3
24 #define DP_PIN_ASSIGNMENT_D	0x4
25 #define DP_PIN_ASSIGNMENT_E	0x5
26 
27 enum tc_port_mode {
28 	TC_PORT_DISCONNECTED,
29 	TC_PORT_TBT_ALT,
30 	TC_PORT_DP_ALT,
31 	TC_PORT_LEGACY,
32 };
33 
34 struct intel_tc_port;
35 
36 struct intel_tc_phy_ops {
37 	enum intel_display_power_domain (*cold_off_domain)(struct intel_tc_port *tc);
38 	u32 (*hpd_live_status)(struct intel_tc_port *tc);
39 	bool (*is_ready)(struct intel_tc_port *tc);
40 	bool (*is_owned)(struct intel_tc_port *tc);
41 	void (*get_hw_state)(struct intel_tc_port *tc);
42 	bool (*connect)(struct intel_tc_port *tc, int required_lanes);
43 	void (*disconnect)(struct intel_tc_port *tc);
44 	void (*init)(struct intel_tc_port *tc);
45 };
46 
47 struct intel_tc_port {
48 	struct intel_digital_port *dig_port;
49 
50 	const struct intel_tc_phy_ops *phy_ops;
51 
52 	struct mutex lock;	/* protects the TypeC port mode */
53 	intel_wakeref_t lock_wakeref;
54 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
55 	enum intel_display_power_domain lock_power_domain;
56 #endif
57 	struct delayed_work disconnect_phy_work;
58 	struct delayed_work link_reset_work;
59 	int link_refcount;
60 	bool legacy_port:1;
61 	const char *port_name;
62 	enum tc_port_mode mode;
63 	enum tc_port_mode init_mode;
64 	enum phy_fia phy_fia;
65 	u8 phy_fia_idx;
66 };
67 
68 static enum intel_display_power_domain
69 tc_phy_cold_off_domain(struct intel_tc_port *);
70 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc);
71 static bool tc_phy_is_ready(struct intel_tc_port *tc);
72 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc);
73 static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc);
74 
75 static const char *tc_port_mode_name(enum tc_port_mode mode)
76 {
77 	static const char * const names[] = {
78 		[TC_PORT_DISCONNECTED] = "disconnected",
79 		[TC_PORT_TBT_ALT] = "tbt-alt",
80 		[TC_PORT_DP_ALT] = "dp-alt",
81 		[TC_PORT_LEGACY] = "legacy",
82 	};
83 
84 	if (WARN_ON(mode >= ARRAY_SIZE(names)))
85 		mode = TC_PORT_DISCONNECTED;
86 
87 	return names[mode];
88 }
89 
90 static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port)
91 {
92 	return dig_port->tc;
93 }
94 
95 static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc)
96 {
97 	return to_i915(tc->dig_port->base.base.dev);
98 }
99 
100 static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
101 				  enum tc_port_mode mode)
102 {
103 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
104 	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
105 	struct intel_tc_port *tc = to_tc_port(dig_port);
106 
107 	return intel_phy_is_tc(i915, phy) && tc->mode == mode;
108 }
109 
110 bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
111 {
112 	return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT);
113 }
114 
115 bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port)
116 {
117 	return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT);
118 }
119 
120 bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
121 {
122 	return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
123 }
124 
125 bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port)
126 {
127 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
128 	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
129 	struct intel_tc_port *tc = to_tc_port(dig_port);
130 
131 	return intel_phy_is_tc(i915, phy) && !tc->legacy_port;
132 }
133 
134 /*
135  * The display power domains used for TC ports depending on the
136  * platform and TC mode (legacy, DP-alt, TBT):
137  *
138  * POWER_DOMAIN_DISPLAY_CORE:
139  * --------------------------
140  * ADLP/all modes:
141  *   - TCSS/IOM access for PHY ready state.
142  * ADLP+/all modes:
143  *   - DE/north-,south-HPD ISR access for HPD live state.
144  *
145  * POWER_DOMAIN_PORT_DDI_LANES_<port>:
146  * -----------------------------------
147  * ICL+/all modes:
148  *   - DE/DDI_BUF access for port enabled state.
149  * ADLP/all modes:
150  *   - DE/DDI_BUF access for PHY owned state.
151  *
152  * POWER_DOMAIN_AUX_USBC<TC port index>:
153  * -------------------------------------
154  * ICL/legacy mode:
155  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
156  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
157  *     main lanes.
158  * ADLP/legacy, DP-alt modes:
159  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
160  *     main lanes.
161  *
162  * POWER_DOMAIN_TC_COLD_OFF:
163  * -------------------------
164  * ICL/DP-alt, TBT mode:
165  *   - TCSS/TBT: block TC-cold power state for using the (direct or
166  *     TBT DP-IN) AUX and main lanes.
167  *
168  * TGL/all modes:
169  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
170  *   - TCSS/PHY: block TC-cold power state for using the (direct or
171  *     TBT DP-IN) AUX and main lanes.
172  *
173  * ADLP/TBT mode:
174  *   - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN)
175  *     AUX and main lanes.
176  *
177  * XELPDP+/all modes:
178  *   - TCSS/IOM,FIA access for PHY ready, owned state
179  *   - TCSS/PHY: block TC-cold power state for using the (direct or
180  *     TBT DP-IN) AUX and main lanes.
181  */
182 bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
183 {
184 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
185 	struct intel_tc_port *tc = to_tc_port(dig_port);
186 
187 	return tc_phy_cold_off_domain(tc) ==
188 	       intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
189 }
190 
191 static intel_wakeref_t
192 __tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain)
193 {
194 	struct drm_i915_private *i915 = tc_to_i915(tc);
195 
196 	*domain = tc_phy_cold_off_domain(tc);
197 
198 	return intel_display_power_get(i915, *domain);
199 }
200 
201 static intel_wakeref_t
202 tc_cold_block(struct intel_tc_port *tc)
203 {
204 	enum intel_display_power_domain domain;
205 	intel_wakeref_t wakeref;
206 
207 	wakeref = __tc_cold_block(tc, &domain);
208 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
209 	tc->lock_power_domain = domain;
210 #endif
211 	return wakeref;
212 }
213 
214 static void
215 __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain,
216 		  intel_wakeref_t wakeref)
217 {
218 	struct drm_i915_private *i915 = tc_to_i915(tc);
219 
220 	intel_display_power_put(i915, domain, wakeref);
221 }
222 
223 static void
224 tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref)
225 {
226 	enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc);
227 
228 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
229 	drm_WARN_ON(&tc_to_i915(tc)->drm, tc->lock_power_domain != domain);
230 #endif
231 	__tc_cold_unblock(tc, domain, wakeref);
232 }
233 
234 static void
235 assert_display_core_power_enabled(struct intel_tc_port *tc)
236 {
237 	struct drm_i915_private *i915 = tc_to_i915(tc);
238 
239 	drm_WARN_ON(&i915->drm,
240 		    !intel_display_power_is_enabled(i915, POWER_DOMAIN_DISPLAY_CORE));
241 }
242 
243 static void
244 assert_tc_cold_blocked(struct intel_tc_port *tc)
245 {
246 	struct drm_i915_private *i915 = tc_to_i915(tc);
247 	bool enabled;
248 
249 	enabled = intel_display_power_is_enabled(i915,
250 						 tc_phy_cold_off_domain(tc));
251 	drm_WARN_ON(&i915->drm, !enabled);
252 }
253 
254 static enum intel_display_power_domain
255 tc_port_power_domain(struct intel_tc_port *tc)
256 {
257 	struct drm_i915_private *i915 = tc_to_i915(tc);
258 	enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
259 
260 	return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
261 }
262 
263 static void
264 assert_tc_port_power_enabled(struct intel_tc_port *tc)
265 {
266 	struct drm_i915_private *i915 = tc_to_i915(tc);
267 
268 	drm_WARN_ON(&i915->drm,
269 		    !intel_display_power_is_enabled(i915, tc_port_power_domain(tc)));
270 }
271 
272 static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
273 {
274 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
275 	struct intel_tc_port *tc = to_tc_port(dig_port);
276 	u32 lane_mask;
277 
278 	lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
279 
280 	drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
281 	assert_tc_cold_blocked(tc);
282 
283 	lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx);
284 	return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
285 }
286 
287 u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
288 {
289 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
290 	struct intel_tc_port *tc = to_tc_port(dig_port);
291 	u32 pin_mask;
292 
293 	pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(tc->phy_fia));
294 
295 	drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
296 	assert_tc_cold_blocked(tc);
297 
298 	return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >>
299 	       DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
300 }
301 
302 static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
303 {
304 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
305 	enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
306 	intel_wakeref_t wakeref;
307 	u32 val, pin_assignment;
308 
309 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
310 		val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
311 
312 	pin_assignment =
313 		REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val);
314 
315 	switch (pin_assignment) {
316 	default:
317 		MISSING_CASE(pin_assignment);
318 		fallthrough;
319 	case DP_PIN_ASSIGNMENT_D:
320 		return 2;
321 	case DP_PIN_ASSIGNMENT_C:
322 	case DP_PIN_ASSIGNMENT_E:
323 		return 4;
324 	}
325 }
326 
327 static int mtl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
328 {
329 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
330 	intel_wakeref_t wakeref;
331 	u32 pin_mask;
332 
333 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
334 		pin_mask = intel_tc_port_get_pin_assignment_mask(dig_port);
335 
336 	switch (pin_mask) {
337 	default:
338 		MISSING_CASE(pin_mask);
339 		fallthrough;
340 	case DP_PIN_ASSIGNMENT_D:
341 		return 2;
342 	case DP_PIN_ASSIGNMENT_C:
343 	case DP_PIN_ASSIGNMENT_E:
344 		return 4;
345 	}
346 }
347 
348 static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
349 {
350 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
351 	intel_wakeref_t wakeref;
352 	u32 lane_mask = 0;
353 
354 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
355 		lane_mask = intel_tc_port_get_lane_mask(dig_port);
356 
357 	switch (lane_mask) {
358 	default:
359 		MISSING_CASE(lane_mask);
360 		fallthrough;
361 	case 0x1:
362 	case 0x2:
363 	case 0x4:
364 	case 0x8:
365 		return 1;
366 	case 0x3:
367 	case 0xc:
368 		return 2;
369 	case 0xf:
370 		return 4;
371 	}
372 }
373 
374 int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
375 {
376 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
377 	struct intel_tc_port *tc = to_tc_port(dig_port);
378 	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
379 
380 	if (!intel_phy_is_tc(i915, phy) || tc->mode != TC_PORT_DP_ALT)
381 		return 4;
382 
383 	assert_tc_cold_blocked(tc);
384 
385 	if (DISPLAY_VER(i915) >= 20)
386 		return lnl_tc_port_get_max_lane_count(dig_port);
387 
388 	if (DISPLAY_VER(i915) >= 14)
389 		return mtl_tc_port_get_max_lane_count(dig_port);
390 
391 	return intel_tc_port_get_max_lane_count(dig_port);
392 }
393 
394 void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
395 				      int required_lanes)
396 {
397 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
398 	struct intel_tc_port *tc = to_tc_port(dig_port);
399 	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
400 	u32 val;
401 
402 	drm_WARN_ON(&i915->drm,
403 		    lane_reversal && tc->mode != TC_PORT_LEGACY);
404 
405 	assert_tc_cold_blocked(tc);
406 
407 	val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
408 	val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx);
409 
410 	switch (required_lanes) {
411 	case 1:
412 		val |= lane_reversal ?
413 			DFLEXDPMLE1_DPMLETC_ML3(tc->phy_fia_idx) :
414 			DFLEXDPMLE1_DPMLETC_ML0(tc->phy_fia_idx);
415 		break;
416 	case 2:
417 		val |= lane_reversal ?
418 			DFLEXDPMLE1_DPMLETC_ML3_2(tc->phy_fia_idx) :
419 			DFLEXDPMLE1_DPMLETC_ML1_0(tc->phy_fia_idx);
420 		break;
421 	case 4:
422 		val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc->phy_fia_idx);
423 		break;
424 	default:
425 		MISSING_CASE(required_lanes);
426 	}
427 
428 	intel_de_write(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
429 }
430 
431 static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
432 				      u32 live_status_mask)
433 {
434 	struct drm_i915_private *i915 = tc_to_i915(tc);
435 	u32 valid_hpd_mask;
436 
437 	drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
438 
439 	if (hweight32(live_status_mask) != 1)
440 		return;
441 
442 	if (tc->legacy_port)
443 		valid_hpd_mask = BIT(TC_PORT_LEGACY);
444 	else
445 		valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
446 				 BIT(TC_PORT_TBT_ALT);
447 
448 	if (!(live_status_mask & ~valid_hpd_mask))
449 		return;
450 
451 	/* If live status mismatches the VBT flag, trust the live status. */
452 	drm_dbg_kms(&i915->drm,
453 		    "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
454 		    tc->port_name, live_status_mask, valid_hpd_mask);
455 
456 	tc->legacy_port = !tc->legacy_port;
457 }
458 
459 static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia)
460 {
461 	struct drm_i915_private *i915 = tc_to_i915(tc);
462 	enum port port = tc->dig_port->base.port;
463 	enum tc_port tc_port = intel_port_to_tc(i915, port);
464 
465 	/*
466 	 * Each Modular FIA instance houses 2 TC ports. In SOC that has more
467 	 * than two TC ports, there are multiple instances of Modular FIA.
468 	 */
469 	if (modular_fia) {
470 		tc->phy_fia = tc_port / 2;
471 		tc->phy_fia_idx = tc_port % 2;
472 	} else {
473 		tc->phy_fia = FIA1;
474 		tc->phy_fia_idx = tc_port;
475 	}
476 }
477 
478 /*
479  * ICL TC PHY handlers
480  * -------------------
481  */
482 static enum intel_display_power_domain
483 icl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
484 {
485 	struct drm_i915_private *i915 = tc_to_i915(tc);
486 	struct intel_digital_port *dig_port = tc->dig_port;
487 
488 	if (tc->legacy_port)
489 		return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
490 
491 	return POWER_DOMAIN_TC_COLD_OFF;
492 }
493 
494 static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
495 {
496 	struct drm_i915_private *i915 = tc_to_i915(tc);
497 	struct intel_digital_port *dig_port = tc->dig_port;
498 	u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
499 	intel_wakeref_t wakeref;
500 	u32 fia_isr;
501 	u32 pch_isr;
502 	u32 mask = 0;
503 
504 	with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref) {
505 		fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
506 		pch_isr = intel_de_read(i915, SDEISR);
507 	}
508 
509 	if (fia_isr == 0xffffffff) {
510 		drm_dbg_kms(&i915->drm,
511 			    "Port %s: PHY in TCCOLD, nothing connected\n",
512 			    tc->port_name);
513 		return mask;
514 	}
515 
516 	if (fia_isr & TC_LIVE_STATE_TBT(tc->phy_fia_idx))
517 		mask |= BIT(TC_PORT_TBT_ALT);
518 	if (fia_isr & TC_LIVE_STATE_TC(tc->phy_fia_idx))
519 		mask |= BIT(TC_PORT_DP_ALT);
520 
521 	if (pch_isr & isr_bit)
522 		mask |= BIT(TC_PORT_LEGACY);
523 
524 	return mask;
525 }
526 
527 /*
528  * Return the PHY status complete flag indicating that display can acquire the
529  * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink
530  * is connected and it's ready to switch the ownership to display. The flag
531  * will be left cleared when a TBT-alt sink is connected, where the PHY is
532  * owned by the TBT subsystem and so switching the ownership to display is not
533  * required.
534  */
535 static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
536 {
537 	struct drm_i915_private *i915 = tc_to_i915(tc);
538 	u32 val;
539 
540 	assert_tc_cold_blocked(tc);
541 
542 	val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(tc->phy_fia));
543 	if (val == 0xffffffff) {
544 		drm_dbg_kms(&i915->drm,
545 			    "Port %s: PHY in TCCOLD, assuming not ready\n",
546 			    tc->port_name);
547 		return false;
548 	}
549 
550 	return val & DP_PHY_MODE_STATUS_COMPLETED(tc->phy_fia_idx);
551 }
552 
553 static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
554 				      bool take)
555 {
556 	struct drm_i915_private *i915 = tc_to_i915(tc);
557 	u32 val;
558 
559 	assert_tc_cold_blocked(tc);
560 
561 	val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
562 	if (val == 0xffffffff) {
563 		drm_dbg_kms(&i915->drm,
564 			    "Port %s: PHY in TCCOLD, can't %s ownership\n",
565 			    tc->port_name, take ? "take" : "release");
566 
567 		return false;
568 	}
569 
570 	val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
571 	if (take)
572 		val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
573 
574 	intel_de_write(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
575 
576 	return true;
577 }
578 
579 static bool icl_tc_phy_is_owned(struct intel_tc_port *tc)
580 {
581 	struct drm_i915_private *i915 = tc_to_i915(tc);
582 	u32 val;
583 
584 	assert_tc_cold_blocked(tc);
585 
586 	val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
587 	if (val == 0xffffffff) {
588 		drm_dbg_kms(&i915->drm,
589 			    "Port %s: PHY in TCCOLD, assume not owned\n",
590 			    tc->port_name);
591 		return false;
592 	}
593 
594 	return val & DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
595 }
596 
597 static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
598 {
599 	enum intel_display_power_domain domain;
600 	intel_wakeref_t tc_cold_wref;
601 
602 	tc_cold_wref = __tc_cold_block(tc, &domain);
603 
604 	tc->mode = tc_phy_get_current_mode(tc);
605 	if (tc->mode != TC_PORT_DISCONNECTED)
606 		tc->lock_wakeref = tc_cold_block(tc);
607 
608 	__tc_cold_unblock(tc, domain, tc_cold_wref);
609 }
610 
611 /*
612  * This function implements the first part of the Connect Flow described by our
613  * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
614  * lanes, EDID, etc) is done as needed in the typical places.
615  *
616  * Unlike the other ports, type-C ports are not available to use as soon as we
617  * get a hotplug. The type-C PHYs can be shared between multiple controllers:
618  * display, USB, etc. As a result, handshaking through FIA is required around
619  * connect and disconnect to cleanly transfer ownership with the controller and
620  * set the type-C power state.
621  */
622 static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
623 						int required_lanes)
624 {
625 	struct drm_i915_private *i915 = tc_to_i915(tc);
626 	struct intel_digital_port *dig_port = tc->dig_port;
627 	int max_lanes;
628 
629 	max_lanes = intel_tc_port_max_lane_count(dig_port);
630 	if (tc->mode == TC_PORT_LEGACY) {
631 		drm_WARN_ON(&i915->drm, max_lanes != 4);
632 		return true;
633 	}
634 
635 	drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DP_ALT);
636 
637 	/*
638 	 * Now we have to re-check the live state, in case the port recently
639 	 * became disconnected. Not necessary for legacy mode.
640 	 */
641 	if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) {
642 		drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
643 			    tc->port_name);
644 		return false;
645 	}
646 
647 	if (max_lanes < required_lanes) {
648 		drm_dbg_kms(&i915->drm,
649 			    "Port %s: PHY max lanes %d < required lanes %d\n",
650 			    tc->port_name,
651 			    max_lanes, required_lanes);
652 		return false;
653 	}
654 
655 	return true;
656 }
657 
658 static bool icl_tc_phy_connect(struct intel_tc_port *tc,
659 			       int required_lanes)
660 {
661 	struct drm_i915_private *i915 = tc_to_i915(tc);
662 
663 	tc->lock_wakeref = tc_cold_block(tc);
664 
665 	if (tc->mode == TC_PORT_TBT_ALT)
666 		return true;
667 
668 	if ((!tc_phy_is_ready(tc) ||
669 	     !icl_tc_phy_take_ownership(tc, true)) &&
670 	    !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
671 		drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership (ready %s)\n",
672 			    tc->port_name,
673 			    str_yes_no(tc_phy_is_ready(tc)));
674 		goto out_unblock_tc_cold;
675 	}
676 
677 
678 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
679 		goto out_release_phy;
680 
681 	return true;
682 
683 out_release_phy:
684 	icl_tc_phy_take_ownership(tc, false);
685 out_unblock_tc_cold:
686 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
687 
688 	return false;
689 }
690 
691 /*
692  * See the comment at the connect function. This implements the Disconnect
693  * Flow.
694  */
695 static void icl_tc_phy_disconnect(struct intel_tc_port *tc)
696 {
697 	switch (tc->mode) {
698 	case TC_PORT_LEGACY:
699 	case TC_PORT_DP_ALT:
700 		icl_tc_phy_take_ownership(tc, false);
701 		fallthrough;
702 	case TC_PORT_TBT_ALT:
703 		tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
704 		break;
705 	default:
706 		MISSING_CASE(tc->mode);
707 	}
708 }
709 
710 static void icl_tc_phy_init(struct intel_tc_port *tc)
711 {
712 	tc_phy_load_fia_params(tc, false);
713 }
714 
715 static const struct intel_tc_phy_ops icl_tc_phy_ops = {
716 	.cold_off_domain = icl_tc_phy_cold_off_domain,
717 	.hpd_live_status = icl_tc_phy_hpd_live_status,
718 	.is_ready = icl_tc_phy_is_ready,
719 	.is_owned = icl_tc_phy_is_owned,
720 	.get_hw_state = icl_tc_phy_get_hw_state,
721 	.connect = icl_tc_phy_connect,
722 	.disconnect = icl_tc_phy_disconnect,
723 	.init = icl_tc_phy_init,
724 };
725 
726 /*
727  * TGL TC PHY handlers
728  * -------------------
729  */
730 static enum intel_display_power_domain
731 tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
732 {
733 	return POWER_DOMAIN_TC_COLD_OFF;
734 }
735 
736 static void tgl_tc_phy_init(struct intel_tc_port *tc)
737 {
738 	struct drm_i915_private *i915 = tc_to_i915(tc);
739 	intel_wakeref_t wakeref;
740 	u32 val;
741 
742 	with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref)
743 		val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1));
744 
745 	drm_WARN_ON(&i915->drm, val == 0xffffffff);
746 
747 	tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK);
748 }
749 
750 static const struct intel_tc_phy_ops tgl_tc_phy_ops = {
751 	.cold_off_domain = tgl_tc_phy_cold_off_domain,
752 	.hpd_live_status = icl_tc_phy_hpd_live_status,
753 	.is_ready = icl_tc_phy_is_ready,
754 	.is_owned = icl_tc_phy_is_owned,
755 	.get_hw_state = icl_tc_phy_get_hw_state,
756 	.connect = icl_tc_phy_connect,
757 	.disconnect = icl_tc_phy_disconnect,
758 	.init = tgl_tc_phy_init,
759 };
760 
761 /*
762  * ADLP TC PHY handlers
763  * --------------------
764  */
765 static enum intel_display_power_domain
766 adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc)
767 {
768 	struct drm_i915_private *i915 = tc_to_i915(tc);
769 	struct intel_digital_port *dig_port = tc->dig_port;
770 
771 	if (tc->mode != TC_PORT_TBT_ALT)
772 		return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
773 
774 	return POWER_DOMAIN_TC_COLD_OFF;
775 }
776 
777 static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
778 {
779 	struct drm_i915_private *i915 = tc_to_i915(tc);
780 	struct intel_digital_port *dig_port = tc->dig_port;
781 	enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
782 	u32 cpu_isr_bits = i915->display.hotplug.hpd[hpd_pin];
783 	u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
784 	intel_wakeref_t wakeref;
785 	u32 cpu_isr;
786 	u32 pch_isr;
787 	u32 mask = 0;
788 
789 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
790 		cpu_isr = intel_de_read(i915, GEN11_DE_HPD_ISR);
791 		pch_isr = intel_de_read(i915, SDEISR);
792 	}
793 
794 	if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK))
795 		mask |= BIT(TC_PORT_DP_ALT);
796 	if (cpu_isr & (cpu_isr_bits & GEN11_DE_TBT_HOTPLUG_MASK))
797 		mask |= BIT(TC_PORT_TBT_ALT);
798 
799 	if (pch_isr & pch_isr_bit)
800 		mask |= BIT(TC_PORT_LEGACY);
801 
802 	return mask;
803 }
804 
805 /*
806  * Return the PHY status complete flag indicating that display can acquire the
807  * PHY ownership. The IOM firmware sets this flag when it's ready to switch
808  * the ownership to display, regardless of what sink is connected (TBT-alt,
809  * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
810  * subsystem and so switching the ownership to display is not required.
811  */
812 static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
813 {
814 	struct drm_i915_private *i915 = tc_to_i915(tc);
815 	enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
816 	u32 val;
817 
818 	assert_display_core_power_enabled(tc);
819 
820 	val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
821 	if (val == 0xffffffff) {
822 		drm_dbg_kms(&i915->drm,
823 			    "Port %s: PHY in TCCOLD, assuming not ready\n",
824 			    tc->port_name);
825 		return false;
826 	}
827 
828 	return val & TCSS_DDI_STATUS_READY;
829 }
830 
831 static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
832 				       bool take)
833 {
834 	struct drm_i915_private *i915 = tc_to_i915(tc);
835 	enum port port = tc->dig_port->base.port;
836 
837 	assert_tc_port_power_enabled(tc);
838 
839 	intel_de_rmw(i915, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
840 		     take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
841 
842 	return true;
843 }
844 
845 static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc)
846 {
847 	struct drm_i915_private *i915 = tc_to_i915(tc);
848 	enum port port = tc->dig_port->base.port;
849 	u32 val;
850 
851 	assert_tc_port_power_enabled(tc);
852 
853 	val = intel_de_read(i915, DDI_BUF_CTL(port));
854 	return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
855 }
856 
857 static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
858 {
859 	struct drm_i915_private *i915 = tc_to_i915(tc);
860 	enum intel_display_power_domain port_power_domain =
861 		tc_port_power_domain(tc);
862 	intel_wakeref_t port_wakeref;
863 
864 	port_wakeref = intel_display_power_get(i915, port_power_domain);
865 
866 	tc->mode = tc_phy_get_current_mode(tc);
867 	if (tc->mode != TC_PORT_DISCONNECTED)
868 		tc->lock_wakeref = tc_cold_block(tc);
869 
870 	intel_display_power_put(i915, port_power_domain, port_wakeref);
871 }
872 
873 static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
874 {
875 	struct drm_i915_private *i915 = tc_to_i915(tc);
876 	enum intel_display_power_domain port_power_domain =
877 		tc_port_power_domain(tc);
878 	intel_wakeref_t port_wakeref;
879 
880 	if (tc->mode == TC_PORT_TBT_ALT) {
881 		tc->lock_wakeref = tc_cold_block(tc);
882 		return true;
883 	}
884 
885 	port_wakeref = intel_display_power_get(i915, port_power_domain);
886 
887 	if (!adlp_tc_phy_take_ownership(tc, true) &&
888 	    !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
889 		drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership\n",
890 			    tc->port_name);
891 		goto out_put_port_power;
892 	}
893 
894 	if (!tc_phy_is_ready(tc) &&
895 	    !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
896 		drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
897 			    tc->port_name);
898 		goto out_release_phy;
899 	}
900 
901 	tc->lock_wakeref = tc_cold_block(tc);
902 
903 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
904 		goto out_unblock_tc_cold;
905 
906 	intel_display_power_put(i915, port_power_domain, port_wakeref);
907 
908 	return true;
909 
910 out_unblock_tc_cold:
911 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
912 out_release_phy:
913 	adlp_tc_phy_take_ownership(tc, false);
914 out_put_port_power:
915 	intel_display_power_put(i915, port_power_domain, port_wakeref);
916 
917 	return false;
918 }
919 
920 static void adlp_tc_phy_disconnect(struct intel_tc_port *tc)
921 {
922 	struct drm_i915_private *i915 = tc_to_i915(tc);
923 	enum intel_display_power_domain port_power_domain =
924 		tc_port_power_domain(tc);
925 	intel_wakeref_t port_wakeref;
926 
927 	port_wakeref = intel_display_power_get(i915, port_power_domain);
928 
929 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
930 
931 	switch (tc->mode) {
932 	case TC_PORT_LEGACY:
933 	case TC_PORT_DP_ALT:
934 		adlp_tc_phy_take_ownership(tc, false);
935 		fallthrough;
936 	case TC_PORT_TBT_ALT:
937 		break;
938 	default:
939 		MISSING_CASE(tc->mode);
940 	}
941 
942 	intel_display_power_put(i915, port_power_domain, port_wakeref);
943 }
944 
945 static void adlp_tc_phy_init(struct intel_tc_port *tc)
946 {
947 	tc_phy_load_fia_params(tc, true);
948 }
949 
950 static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
951 	.cold_off_domain = adlp_tc_phy_cold_off_domain,
952 	.hpd_live_status = adlp_tc_phy_hpd_live_status,
953 	.is_ready = adlp_tc_phy_is_ready,
954 	.is_owned = adlp_tc_phy_is_owned,
955 	.get_hw_state = adlp_tc_phy_get_hw_state,
956 	.connect = adlp_tc_phy_connect,
957 	.disconnect = adlp_tc_phy_disconnect,
958 	.init = adlp_tc_phy_init,
959 };
960 
961 /*
962  * XELPDP TC PHY handlers
963  * ----------------------
964  */
965 static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
966 {
967 	struct drm_i915_private *i915 = tc_to_i915(tc);
968 	struct intel_digital_port *dig_port = tc->dig_port;
969 	enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
970 	u32 pica_isr_bits = i915->display.hotplug.hpd[hpd_pin];
971 	u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
972 	intel_wakeref_t wakeref;
973 	u32 pica_isr;
974 	u32 pch_isr;
975 	u32 mask = 0;
976 
977 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
978 		pica_isr = intel_de_read(i915, PICAINTERRUPT_ISR);
979 		pch_isr = intel_de_read(i915, SDEISR);
980 	}
981 
982 	if (pica_isr & (pica_isr_bits & XELPDP_DP_ALT_HOTPLUG_MASK))
983 		mask |= BIT(TC_PORT_DP_ALT);
984 	if (pica_isr & (pica_isr_bits & XELPDP_TBT_HOTPLUG_MASK))
985 		mask |= BIT(TC_PORT_TBT_ALT);
986 
987 	if (tc->legacy_port && (pch_isr & pch_isr_bit))
988 		mask |= BIT(TC_PORT_LEGACY);
989 
990 	return mask;
991 }
992 
993 static bool
994 xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc)
995 {
996 	struct drm_i915_private *i915 = tc_to_i915(tc);
997 	enum port port = tc->dig_port->base.port;
998 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
999 
1000 	assert_tc_cold_blocked(tc);
1001 
1002 	return intel_de_read(i915, reg) & XELPDP_TCSS_POWER_STATE;
1003 }
1004 
1005 static bool
1006 xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
1007 {
1008 	struct drm_i915_private *i915 = tc_to_i915(tc);
1009 
1010 	if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) {
1011 		drm_dbg_kms(&i915->drm,
1012 			    "Port %s: timeout waiting for TCSS power to get %s\n",
1013 			    enabled ? "enabled" : "disabled",
1014 			    tc->port_name);
1015 		return false;
1016 	}
1017 
1018 	return true;
1019 }
1020 
1021 static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1022 {
1023 	struct drm_i915_private *i915 = tc_to_i915(tc);
1024 	enum port port = tc->dig_port->base.port;
1025 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
1026 	u32 val;
1027 
1028 	assert_tc_cold_blocked(tc);
1029 
1030 	val = intel_de_read(i915, reg);
1031 	if (enable)
1032 		val |= XELPDP_TCSS_POWER_REQUEST;
1033 	else
1034 		val &= ~XELPDP_TCSS_POWER_REQUEST;
1035 	intel_de_write(i915, reg, val);
1036 }
1037 
1038 static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1039 {
1040 	struct drm_i915_private *i915 = tc_to_i915(tc);
1041 
1042 	__xelpdp_tc_phy_enable_tcss_power(tc, enable);
1043 
1044 	if (enable && !tc_phy_wait_for_ready(tc))
1045 		goto out_disable;
1046 
1047 	if (!xelpdp_tc_phy_wait_for_tcss_power(tc, enable))
1048 		goto out_disable;
1049 
1050 	return true;
1051 
1052 out_disable:
1053 	if (drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY))
1054 		return false;
1055 
1056 	if (!enable)
1057 		return false;
1058 
1059 	__xelpdp_tc_phy_enable_tcss_power(tc, false);
1060 	xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1061 
1062 	return false;
1063 }
1064 
1065 static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
1066 {
1067 	struct drm_i915_private *i915 = tc_to_i915(tc);
1068 	enum port port = tc->dig_port->base.port;
1069 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
1070 	u32 val;
1071 
1072 	assert_tc_cold_blocked(tc);
1073 
1074 	val = intel_de_read(i915, reg);
1075 	if (take)
1076 		val |= XELPDP_TC_PHY_OWNERSHIP;
1077 	else
1078 		val &= ~XELPDP_TC_PHY_OWNERSHIP;
1079 	intel_de_write(i915, reg, val);
1080 }
1081 
1082 static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
1083 {
1084 	struct drm_i915_private *i915 = tc_to_i915(tc);
1085 	enum port port = tc->dig_port->base.port;
1086 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
1087 
1088 	assert_tc_cold_blocked(tc);
1089 
1090 	return intel_de_read(i915, reg) & XELPDP_TC_PHY_OWNERSHIP;
1091 }
1092 
1093 static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
1094 {
1095 	struct drm_i915_private *i915 = tc_to_i915(tc);
1096 	intel_wakeref_t tc_cold_wref;
1097 	enum intel_display_power_domain domain;
1098 
1099 	tc_cold_wref = __tc_cold_block(tc, &domain);
1100 
1101 	tc->mode = tc_phy_get_current_mode(tc);
1102 	if (tc->mode != TC_PORT_DISCONNECTED)
1103 		tc->lock_wakeref = tc_cold_block(tc);
1104 
1105 	drm_WARN_ON(&i915->drm,
1106 		    (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
1107 		    !xelpdp_tc_phy_tcss_power_is_enabled(tc));
1108 
1109 	__tc_cold_unblock(tc, domain, tc_cold_wref);
1110 }
1111 
1112 static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1113 {
1114 	tc->lock_wakeref = tc_cold_block(tc);
1115 
1116 	if (tc->mode == TC_PORT_TBT_ALT)
1117 		return true;
1118 
1119 	if (!xelpdp_tc_phy_enable_tcss_power(tc, true))
1120 		goto out_unblock_tccold;
1121 
1122 	xelpdp_tc_phy_take_ownership(tc, true);
1123 
1124 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
1125 		goto out_release_phy;
1126 
1127 	return true;
1128 
1129 out_release_phy:
1130 	xelpdp_tc_phy_take_ownership(tc, false);
1131 	xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1132 
1133 out_unblock_tccold:
1134 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1135 
1136 	return false;
1137 }
1138 
1139 static void xelpdp_tc_phy_disconnect(struct intel_tc_port *tc)
1140 {
1141 	switch (tc->mode) {
1142 	case TC_PORT_LEGACY:
1143 	case TC_PORT_DP_ALT:
1144 		xelpdp_tc_phy_take_ownership(tc, false);
1145 		xelpdp_tc_phy_enable_tcss_power(tc, false);
1146 		fallthrough;
1147 	case TC_PORT_TBT_ALT:
1148 		tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1149 		break;
1150 	default:
1151 		MISSING_CASE(tc->mode);
1152 	}
1153 }
1154 
1155 static const struct intel_tc_phy_ops xelpdp_tc_phy_ops = {
1156 	.cold_off_domain = tgl_tc_phy_cold_off_domain,
1157 	.hpd_live_status = xelpdp_tc_phy_hpd_live_status,
1158 	.is_ready = adlp_tc_phy_is_ready,
1159 	.is_owned = xelpdp_tc_phy_is_owned,
1160 	.get_hw_state = xelpdp_tc_phy_get_hw_state,
1161 	.connect = xelpdp_tc_phy_connect,
1162 	.disconnect = xelpdp_tc_phy_disconnect,
1163 	.init = adlp_tc_phy_init,
1164 };
1165 
1166 /*
1167  * Generic TC PHY handlers
1168  * -----------------------
1169  */
1170 static enum intel_display_power_domain
1171 tc_phy_cold_off_domain(struct intel_tc_port *tc)
1172 {
1173 	return tc->phy_ops->cold_off_domain(tc);
1174 }
1175 
1176 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc)
1177 {
1178 	struct drm_i915_private *i915 = tc_to_i915(tc);
1179 	u32 mask;
1180 
1181 	mask = tc->phy_ops->hpd_live_status(tc);
1182 
1183 	/* The sink can be connected only in a single mode. */
1184 	drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1);
1185 
1186 	return mask;
1187 }
1188 
1189 static bool tc_phy_is_ready(struct intel_tc_port *tc)
1190 {
1191 	return tc->phy_ops->is_ready(tc);
1192 }
1193 
1194 static bool tc_phy_is_owned(struct intel_tc_port *tc)
1195 {
1196 	return tc->phy_ops->is_owned(tc);
1197 }
1198 
1199 static void tc_phy_get_hw_state(struct intel_tc_port *tc)
1200 {
1201 	tc->phy_ops->get_hw_state(tc);
1202 }
1203 
1204 static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
1205 				      bool phy_is_ready, bool phy_is_owned)
1206 {
1207 	struct drm_i915_private *i915 = tc_to_i915(tc);
1208 
1209 	drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready);
1210 
1211 	return phy_is_ready && phy_is_owned;
1212 }
1213 
1214 static bool tc_phy_is_connected(struct intel_tc_port *tc,
1215 				enum icl_port_dpll_id port_pll_type)
1216 {
1217 	struct intel_encoder *encoder = &tc->dig_port->base;
1218 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1219 	bool phy_is_ready = tc_phy_is_ready(tc);
1220 	bool phy_is_owned = tc_phy_is_owned(tc);
1221 	bool is_connected;
1222 
1223 	if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned))
1224 		is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY;
1225 	else
1226 		is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
1227 
1228 	drm_dbg_kms(&i915->drm,
1229 		    "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
1230 		    tc->port_name,
1231 		    str_yes_no(is_connected),
1232 		    str_yes_no(phy_is_ready),
1233 		    str_yes_no(phy_is_owned),
1234 		    port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt");
1235 
1236 	return is_connected;
1237 }
1238 
1239 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc)
1240 {
1241 	struct drm_i915_private *i915 = tc_to_i915(tc);
1242 
1243 	if (wait_for(tc_phy_is_ready(tc), 500)) {
1244 		drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n",
1245 			tc->port_name);
1246 
1247 		return false;
1248 	}
1249 
1250 	return true;
1251 }
1252 
1253 static enum tc_port_mode
1254 hpd_mask_to_tc_mode(u32 live_status_mask)
1255 {
1256 	if (live_status_mask)
1257 		return fls(live_status_mask) - 1;
1258 
1259 	return TC_PORT_DISCONNECTED;
1260 }
1261 
1262 static enum tc_port_mode
1263 tc_phy_hpd_live_mode(struct intel_tc_port *tc)
1264 {
1265 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1266 
1267 	return hpd_mask_to_tc_mode(live_status_mask);
1268 }
1269 
1270 static enum tc_port_mode
1271 get_tc_mode_in_phy_owned_state(struct intel_tc_port *tc,
1272 			       enum tc_port_mode live_mode)
1273 {
1274 	switch (live_mode) {
1275 	case TC_PORT_LEGACY:
1276 	case TC_PORT_DP_ALT:
1277 		return live_mode;
1278 	default:
1279 		MISSING_CASE(live_mode);
1280 		fallthrough;
1281 	case TC_PORT_TBT_ALT:
1282 	case TC_PORT_DISCONNECTED:
1283 		if (tc->legacy_port)
1284 			return TC_PORT_LEGACY;
1285 		else
1286 			return TC_PORT_DP_ALT;
1287 	}
1288 }
1289 
1290 static enum tc_port_mode
1291 get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc,
1292 				   enum tc_port_mode live_mode)
1293 {
1294 	switch (live_mode) {
1295 	case TC_PORT_LEGACY:
1296 		return TC_PORT_DISCONNECTED;
1297 	case TC_PORT_DP_ALT:
1298 	case TC_PORT_TBT_ALT:
1299 		return TC_PORT_TBT_ALT;
1300 	default:
1301 		MISSING_CASE(live_mode);
1302 		fallthrough;
1303 	case TC_PORT_DISCONNECTED:
1304 		if (tc->legacy_port)
1305 			return TC_PORT_DISCONNECTED;
1306 		else
1307 			return TC_PORT_TBT_ALT;
1308 	}
1309 }
1310 
1311 static enum tc_port_mode
1312 tc_phy_get_current_mode(struct intel_tc_port *tc)
1313 {
1314 	struct drm_i915_private *i915 = tc_to_i915(tc);
1315 	enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc);
1316 	bool phy_is_ready;
1317 	bool phy_is_owned;
1318 	enum tc_port_mode mode;
1319 
1320 	/*
1321 	 * For legacy ports the IOM firmware initializes the PHY during boot-up
1322 	 * and system resume whether or not a sink is connected. Wait here for
1323 	 * the initialization to get ready.
1324 	 */
1325 	if (tc->legacy_port)
1326 		tc_phy_wait_for_ready(tc);
1327 
1328 	phy_is_ready = tc_phy_is_ready(tc);
1329 	phy_is_owned = tc_phy_is_owned(tc);
1330 
1331 	if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) {
1332 		mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
1333 	} else {
1334 		drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT);
1335 		mode = get_tc_mode_in_phy_owned_state(tc, live_mode);
1336 	}
1337 
1338 	drm_dbg_kms(&i915->drm,
1339 		    "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
1340 		    tc->port_name,
1341 		    tc_port_mode_name(mode),
1342 		    str_yes_no(phy_is_ready),
1343 		    str_yes_no(phy_is_owned),
1344 		    tc_port_mode_name(live_mode));
1345 
1346 	return mode;
1347 }
1348 
1349 static enum tc_port_mode default_tc_mode(struct intel_tc_port *tc)
1350 {
1351 	if (tc->legacy_port)
1352 		return TC_PORT_LEGACY;
1353 
1354 	return TC_PORT_TBT_ALT;
1355 }
1356 
1357 static enum tc_port_mode
1358 hpd_mask_to_target_mode(struct intel_tc_port *tc, u32 live_status_mask)
1359 {
1360 	enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask);
1361 
1362 	if (mode != TC_PORT_DISCONNECTED)
1363 		return mode;
1364 
1365 	return default_tc_mode(tc);
1366 }
1367 
1368 static enum tc_port_mode
1369 tc_phy_get_target_mode(struct intel_tc_port *tc)
1370 {
1371 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1372 
1373 	return hpd_mask_to_target_mode(tc, live_status_mask);
1374 }
1375 
1376 static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1377 {
1378 	struct drm_i915_private *i915 = tc_to_i915(tc);
1379 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1380 	bool connected;
1381 
1382 	tc_port_fixup_legacy_flag(tc, live_status_mask);
1383 
1384 	tc->mode = hpd_mask_to_target_mode(tc, live_status_mask);
1385 
1386 	connected = tc->phy_ops->connect(tc, required_lanes);
1387 	if (!connected && tc->mode != default_tc_mode(tc)) {
1388 		tc->mode = default_tc_mode(tc);
1389 		connected = tc->phy_ops->connect(tc, required_lanes);
1390 	}
1391 
1392 	drm_WARN_ON(&i915->drm, !connected);
1393 }
1394 
1395 static void tc_phy_disconnect(struct intel_tc_port *tc)
1396 {
1397 	if (tc->mode != TC_PORT_DISCONNECTED) {
1398 		tc->phy_ops->disconnect(tc);
1399 		tc->mode = TC_PORT_DISCONNECTED;
1400 	}
1401 }
1402 
1403 static void tc_phy_init(struct intel_tc_port *tc)
1404 {
1405 	mutex_lock(&tc->lock);
1406 	tc->phy_ops->init(tc);
1407 	mutex_unlock(&tc->lock);
1408 }
1409 
1410 static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
1411 				     int required_lanes, bool force_disconnect)
1412 {
1413 	struct drm_i915_private *i915 = tc_to_i915(tc);
1414 	struct intel_digital_port *dig_port = tc->dig_port;
1415 	enum tc_port_mode old_tc_mode = tc->mode;
1416 
1417 	intel_display_power_flush_work(i915);
1418 	if (!intel_tc_cold_requires_aux_pw(dig_port)) {
1419 		enum intel_display_power_domain aux_domain;
1420 		bool aux_powered;
1421 
1422 		aux_domain = intel_aux_power_domain(dig_port);
1423 		aux_powered = intel_display_power_is_enabled(i915, aux_domain);
1424 		drm_WARN_ON(&i915->drm, aux_powered);
1425 	}
1426 
1427 	tc_phy_disconnect(tc);
1428 	if (!force_disconnect)
1429 		tc_phy_connect(tc, required_lanes);
1430 
1431 	drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
1432 		    tc->port_name,
1433 		    tc_port_mode_name(old_tc_mode),
1434 		    tc_port_mode_name(tc->mode));
1435 }
1436 
1437 static bool intel_tc_port_needs_reset(struct intel_tc_port *tc)
1438 {
1439 	return tc_phy_get_target_mode(tc) != tc->mode;
1440 }
1441 
1442 static void intel_tc_port_update_mode(struct intel_tc_port *tc,
1443 				      int required_lanes, bool force_disconnect)
1444 {
1445 	if (force_disconnect ||
1446 	    intel_tc_port_needs_reset(tc))
1447 		intel_tc_port_reset_mode(tc, required_lanes, force_disconnect);
1448 }
1449 
1450 static void __intel_tc_port_get_link(struct intel_tc_port *tc)
1451 {
1452 	tc->link_refcount++;
1453 }
1454 
1455 static void __intel_tc_port_put_link(struct intel_tc_port *tc)
1456 {
1457 	tc->link_refcount--;
1458 }
1459 
1460 static bool tc_port_is_enabled(struct intel_tc_port *tc)
1461 {
1462 	struct drm_i915_private *i915 = tc_to_i915(tc);
1463 	struct intel_digital_port *dig_port = tc->dig_port;
1464 
1465 	assert_tc_port_power_enabled(tc);
1466 
1467 	return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) &
1468 	       DDI_BUF_CTL_ENABLE;
1469 }
1470 
1471 /**
1472  * intel_tc_port_init_mode: Read out HW state and init the given port's TypeC mode
1473  * @dig_port: digital port
1474  *
1475  * Read out the HW state and initialize the TypeC mode of @dig_port. The mode
1476  * will be locked until intel_tc_port_sanitize_mode() is called.
1477  */
1478 void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
1479 {
1480 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1481 	struct intel_tc_port *tc = to_tc_port(dig_port);
1482 	bool update_mode = false;
1483 
1484 	mutex_lock(&tc->lock);
1485 
1486 	drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
1487 	drm_WARN_ON(&i915->drm, tc->lock_wakeref);
1488 	drm_WARN_ON(&i915->drm, tc->link_refcount);
1489 
1490 	tc_phy_get_hw_state(tc);
1491 	/*
1492 	 * Save the initial mode for the state check in
1493 	 * intel_tc_port_sanitize_mode().
1494 	 */
1495 	tc->init_mode = tc->mode;
1496 
1497 	/*
1498 	 * The PHY needs to be connected for AUX to work during HW readout and
1499 	 * MST topology resume, but the PHY mode can only be changed if the
1500 	 * port is disabled.
1501 	 *
1502 	 * An exception is the case where BIOS leaves the PHY incorrectly
1503 	 * disconnected on an enabled legacy port. Work around that by
1504 	 * connecting the PHY even though the port is enabled. This doesn't
1505 	 * cause a problem as the PHY ownership state is ignored by the
1506 	 * IOM/TCSS firmware (only display can own the PHY in that case).
1507 	 */
1508 	if (!tc_port_is_enabled(tc)) {
1509 		update_mode = true;
1510 	} else if (tc->mode == TC_PORT_DISCONNECTED) {
1511 		drm_WARN_ON(&i915->drm, !tc->legacy_port);
1512 		drm_err(&i915->drm,
1513 			"Port %s: PHY disconnected on enabled port, connecting it\n",
1514 			tc->port_name);
1515 		update_mode = true;
1516 	}
1517 
1518 	if (update_mode)
1519 		intel_tc_port_update_mode(tc, 1, false);
1520 
1521 	/* Prevent changing tc->mode until intel_tc_port_sanitize_mode() is called. */
1522 	__intel_tc_port_get_link(tc);
1523 
1524 	mutex_unlock(&tc->lock);
1525 }
1526 
1527 static bool tc_port_has_active_links(struct intel_tc_port *tc,
1528 				     const struct intel_crtc_state *crtc_state)
1529 {
1530 	struct drm_i915_private *i915 = tc_to_i915(tc);
1531 	struct intel_digital_port *dig_port = tc->dig_port;
1532 	enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
1533 	int active_links = 0;
1534 
1535 	if (dig_port->dp.is_mst) {
1536 		/* TODO: get the PLL type for MST, once HW readout is done for it. */
1537 		active_links = intel_dp_mst_encoder_active_links(dig_port);
1538 	} else if (crtc_state && crtc_state->hw.active) {
1539 		pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
1540 		active_links = 1;
1541 	}
1542 
1543 	if (active_links && !tc_phy_is_connected(tc, pll_type))
1544 		drm_err(&i915->drm,
1545 			"Port %s: PHY disconnected with %d active link(s)\n",
1546 			tc->port_name, active_links);
1547 
1548 	return active_links;
1549 }
1550 
1551 /**
1552  * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
1553  * @dig_port: digital port
1554  * @crtc_state: atomic state of CRTC connected to @dig_port
1555  *
1556  * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
1557  * loading and system resume:
1558  * If the encoder is enabled keep the TypeC mode/PHY connected state locked until
1559  * the encoder is disabled.
1560  * If the encoder is disabled make sure the PHY is disconnected.
1561  * @crtc_state is valid if @dig_port is enabled, NULL otherwise.
1562  */
1563 void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
1564 				 const struct intel_crtc_state *crtc_state)
1565 {
1566 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1567 	struct intel_tc_port *tc = to_tc_port(dig_port);
1568 
1569 	mutex_lock(&tc->lock);
1570 
1571 	drm_WARN_ON(&i915->drm, tc->link_refcount != 1);
1572 	if (!tc_port_has_active_links(tc, crtc_state)) {
1573 		/*
1574 		 * TBT-alt is the default mode in any case the PHY ownership is not
1575 		 * held (regardless of the sink's connected live state), so
1576 		 * we'll just switch to disconnected mode from it here without
1577 		 * a note.
1578 		 */
1579 		if (tc->init_mode != TC_PORT_TBT_ALT &&
1580 		    tc->init_mode != TC_PORT_DISCONNECTED)
1581 			drm_dbg_kms(&i915->drm,
1582 				    "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
1583 				    tc->port_name,
1584 				    tc_port_mode_name(tc->init_mode));
1585 		tc_phy_disconnect(tc);
1586 		__intel_tc_port_put_link(tc);
1587 	}
1588 
1589 	drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
1590 		    tc->port_name,
1591 		    tc_port_mode_name(tc->mode));
1592 
1593 	mutex_unlock(&tc->lock);
1594 }
1595 
1596 /*
1597  * The type-C ports are different because even when they are connected, they may
1598  * not be available/usable by the graphics driver: see the comment on
1599  * icl_tc_phy_connect(). So in our driver instead of adding the additional
1600  * concept of "usable" and make everything check for "connected and usable" we
1601  * define a port as "connected" when it is not only connected, but also when it
1602  * is usable by the rest of the driver. That maintains the old assumption that
1603  * connected ports are usable, and avoids exposing to the users objects they
1604  * can't really use.
1605  */
1606 bool intel_tc_port_connected(struct intel_encoder *encoder)
1607 {
1608 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1609 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1610 	struct intel_tc_port *tc = to_tc_port(dig_port);
1611 	u32 mask = ~0;
1612 
1613 	drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port));
1614 
1615 	if (tc->mode != TC_PORT_DISCONNECTED)
1616 		mask = BIT(tc->mode);
1617 
1618 	return tc_phy_hpd_live_status(tc) & mask;
1619 }
1620 
1621 static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc)
1622 {
1623 	bool ret;
1624 
1625 	mutex_lock(&tc->lock);
1626 
1627 	ret = tc->link_refcount &&
1628 	      tc->mode == TC_PORT_DP_ALT &&
1629 	      intel_tc_port_needs_reset(tc);
1630 
1631 	mutex_unlock(&tc->lock);
1632 
1633 	return ret;
1634 }
1635 
1636 bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port)
1637 {
1638 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1639 	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1640 
1641 	if (!intel_phy_is_tc(i915, phy))
1642 		return false;
1643 
1644 	return __intel_tc_port_link_needs_reset(to_tc_port(dig_port));
1645 }
1646 
1647 static int reset_link_commit(struct intel_tc_port *tc,
1648 			     struct intel_atomic_state *state,
1649 			     struct drm_modeset_acquire_ctx *ctx)
1650 {
1651 	struct drm_i915_private *i915 = tc_to_i915(tc);
1652 	struct intel_digital_port *dig_port = tc->dig_port;
1653 	struct intel_dp *intel_dp = enc_to_intel_dp(&dig_port->base);
1654 	struct intel_crtc *crtc;
1655 	u8 pipe_mask;
1656 	int ret;
1657 
1658 	ret = drm_modeset_lock(&i915->drm.mode_config.connection_mutex, ctx);
1659 	if (ret)
1660 		return ret;
1661 
1662 	ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
1663 	if (ret)
1664 		return ret;
1665 
1666 	if (!pipe_mask)
1667 		return 0;
1668 
1669 	for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
1670 		struct intel_crtc_state *crtc_state;
1671 
1672 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
1673 		if (IS_ERR(crtc_state))
1674 			return PTR_ERR(crtc_state);
1675 
1676 		crtc_state->uapi.connectors_changed = true;
1677 	}
1678 
1679 	if (!__intel_tc_port_link_needs_reset(tc))
1680 		return 0;
1681 
1682 	return drm_atomic_commit(&state->base);
1683 }
1684 
1685 static int reset_link(struct intel_tc_port *tc)
1686 {
1687 	struct drm_i915_private *i915 = tc_to_i915(tc);
1688 	struct drm_modeset_acquire_ctx ctx;
1689 	struct drm_atomic_state *_state;
1690 	struct intel_atomic_state *state;
1691 	int ret;
1692 
1693 	_state = drm_atomic_state_alloc(&i915->drm);
1694 	if (!_state)
1695 		return -ENOMEM;
1696 
1697 	state = to_intel_atomic_state(_state);
1698 	state->internal = true;
1699 
1700 	intel_modeset_lock_ctx_retry(&ctx, state, 0, ret)
1701 		ret = reset_link_commit(tc, state, &ctx);
1702 
1703 	drm_atomic_state_put(&state->base);
1704 
1705 	return ret;
1706 }
1707 
1708 static void intel_tc_port_link_reset_work(struct work_struct *work)
1709 {
1710 	struct intel_tc_port *tc =
1711 		container_of(work, struct intel_tc_port, link_reset_work.work);
1712 	struct drm_i915_private *i915 = tc_to_i915(tc);
1713 	int ret;
1714 
1715 	if (!__intel_tc_port_link_needs_reset(tc))
1716 		return;
1717 
1718 	mutex_lock(&i915->drm.mode_config.mutex);
1719 
1720 	drm_dbg_kms(&i915->drm,
1721 		    "Port %s: TypeC DP-alt sink disconnected, resetting link\n",
1722 		    tc->port_name);
1723 	ret = reset_link(tc);
1724 	drm_WARN_ON(&i915->drm, ret);
1725 
1726 	mutex_unlock(&i915->drm.mode_config.mutex);
1727 }
1728 
1729 bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
1730 {
1731 	if (!intel_tc_port_link_needs_reset(dig_port))
1732 		return false;
1733 
1734 	queue_delayed_work(system_unbound_wq,
1735 			   &to_tc_port(dig_port)->link_reset_work,
1736 			   msecs_to_jiffies(2000));
1737 
1738 	return true;
1739 }
1740 
1741 void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port)
1742 {
1743 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1744 	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1745 	struct intel_tc_port *tc = to_tc_port(dig_port);
1746 
1747 	if (!intel_phy_is_tc(i915, phy))
1748 		return;
1749 
1750 	cancel_delayed_work(&tc->link_reset_work);
1751 }
1752 
1753 static void __intel_tc_port_lock(struct intel_tc_port *tc,
1754 				 int required_lanes)
1755 {
1756 	struct drm_i915_private *i915 = tc_to_i915(tc);
1757 
1758 	mutex_lock(&tc->lock);
1759 
1760 	cancel_delayed_work(&tc->disconnect_phy_work);
1761 
1762 	if (!tc->link_refcount)
1763 		intel_tc_port_update_mode(tc, required_lanes,
1764 					  false);
1765 
1766 	drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_DISCONNECTED);
1767 	drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_TBT_ALT &&
1768 				!tc_phy_is_owned(tc));
1769 }
1770 
1771 void intel_tc_port_lock(struct intel_digital_port *dig_port)
1772 {
1773 	__intel_tc_port_lock(to_tc_port(dig_port), 1);
1774 }
1775 
1776 /*
1777  * Disconnect the given digital port from its TypeC PHY (handing back the
1778  * control of the PHY to the TypeC subsystem). This will happen in a delayed
1779  * manner after each aux transactions and modeset disables.
1780  */
1781 static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
1782 {
1783 	struct intel_tc_port *tc =
1784 		container_of(work, struct intel_tc_port, disconnect_phy_work.work);
1785 
1786 	mutex_lock(&tc->lock);
1787 
1788 	if (!tc->link_refcount)
1789 		intel_tc_port_update_mode(tc, 1, true);
1790 
1791 	mutex_unlock(&tc->lock);
1792 }
1793 
1794 /**
1795  * intel_tc_port_flush_work: flush the work disconnecting the PHY
1796  * @dig_port: digital port
1797  *
1798  * Flush the delayed work disconnecting an idle PHY.
1799  */
1800 static void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
1801 {
1802 	flush_delayed_work(&to_tc_port(dig_port)->disconnect_phy_work);
1803 }
1804 
1805 void intel_tc_port_suspend(struct intel_digital_port *dig_port)
1806 {
1807 	struct intel_tc_port *tc = to_tc_port(dig_port);
1808 
1809 	cancel_delayed_work_sync(&tc->link_reset_work);
1810 	intel_tc_port_flush_work(dig_port);
1811 }
1812 
1813 void intel_tc_port_unlock(struct intel_digital_port *dig_port)
1814 {
1815 	struct intel_tc_port *tc = to_tc_port(dig_port);
1816 
1817 	if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED)
1818 		queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work,
1819 				   msecs_to_jiffies(1000));
1820 
1821 	mutex_unlock(&tc->lock);
1822 }
1823 
1824 bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
1825 {
1826 	struct intel_tc_port *tc = to_tc_port(dig_port);
1827 
1828 	return mutex_is_locked(&tc->lock) ||
1829 	       tc->link_refcount;
1830 }
1831 
1832 void intel_tc_port_get_link(struct intel_digital_port *dig_port,
1833 			    int required_lanes)
1834 {
1835 	struct intel_tc_port *tc = to_tc_port(dig_port);
1836 
1837 	__intel_tc_port_lock(tc, required_lanes);
1838 	__intel_tc_port_get_link(tc);
1839 	intel_tc_port_unlock(dig_port);
1840 }
1841 
1842 void intel_tc_port_put_link(struct intel_digital_port *dig_port)
1843 {
1844 	struct intel_tc_port *tc = to_tc_port(dig_port);
1845 
1846 	intel_tc_port_lock(dig_port);
1847 	__intel_tc_port_put_link(tc);
1848 	intel_tc_port_unlock(dig_port);
1849 
1850 	/*
1851 	 * The firmware will not update the HPD status of other TypeC ports
1852 	 * that are active in DP-alt mode with their sink disconnected, until
1853 	 * this port is disabled and its PHY gets disconnected. Make sure this
1854 	 * happens in a timely manner by disconnecting the PHY synchronously.
1855 	 */
1856 	intel_tc_port_flush_work(dig_port);
1857 }
1858 
1859 int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
1860 {
1861 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1862 	struct intel_tc_port *tc;
1863 	enum port port = dig_port->base.port;
1864 	enum tc_port tc_port = intel_port_to_tc(i915, port);
1865 
1866 	if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
1867 		return -EINVAL;
1868 
1869 	tc = kzalloc(sizeof(*tc), GFP_KERNEL);
1870 	if (!tc)
1871 		return -ENOMEM;
1872 
1873 	dig_port->tc = tc;
1874 	tc->dig_port = dig_port;
1875 
1876 	if (DISPLAY_VER(i915) >= 14)
1877 		tc->phy_ops = &xelpdp_tc_phy_ops;
1878 	else if (DISPLAY_VER(i915) >= 13)
1879 		tc->phy_ops = &adlp_tc_phy_ops;
1880 	else if (DISPLAY_VER(i915) >= 12)
1881 		tc->phy_ops = &tgl_tc_phy_ops;
1882 	else
1883 		tc->phy_ops = &icl_tc_phy_ops;
1884 
1885 	tc->port_name = kasprintf(GFP_KERNEL, "%c/TC#%d", port_name(port),
1886 				  tc_port + 1);
1887 	if (!tc->port_name) {
1888 		kfree(tc);
1889 		return -ENOMEM;
1890 	}
1891 
1892 	mutex_init(&tc->lock);
1893 	/* TODO: Combine the two works */
1894 	INIT_DELAYED_WORK(&tc->disconnect_phy_work, intel_tc_port_disconnect_phy_work);
1895 	INIT_DELAYED_WORK(&tc->link_reset_work, intel_tc_port_link_reset_work);
1896 	tc->legacy_port = is_legacy;
1897 	tc->mode = TC_PORT_DISCONNECTED;
1898 	tc->link_refcount = 0;
1899 
1900 	tc_phy_init(tc);
1901 
1902 	intel_tc_port_init_mode(dig_port);
1903 
1904 	return 0;
1905 }
1906 
1907 void intel_tc_port_cleanup(struct intel_digital_port *dig_port)
1908 {
1909 	intel_tc_port_suspend(dig_port);
1910 
1911 	kfree(dig_port->tc->port_name);
1912 	kfree(dig_port->tc);
1913 	dig_port->tc = NULL;
1914 }
1915