xref: /linux/drivers/gpu/drm/i915/display/intel_tc.c (revision bdfa82f5b8998a6311a8ef0cf89ad413f5cd9ea4)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <drm/drm_print.h>
7 
8 #include "i915_reg.h"
9 #include "i915_utils.h"
10 #include "intel_atomic.h"
11 #include "intel_cx0_phy_regs.h"
12 #include "intel_ddi.h"
13 #include "intel_de.h"
14 #include "intel_display.h"
15 #include "intel_display_driver.h"
16 #include "intel_display_power_map.h"
17 #include "intel_display_types.h"
18 #include "intel_dkl_phy_regs.h"
19 #include "intel_dp.h"
20 #include "intel_dp_mst.h"
21 #include "intel_mg_phy_regs.h"
22 #include "intel_modeset_lock.h"
23 #include "intel_tc.h"
24 
25 #define DP_PIN_ASSIGNMENT_C	0x3
26 #define DP_PIN_ASSIGNMENT_D	0x4
27 #define DP_PIN_ASSIGNMENT_E	0x5
28 
29 enum tc_port_mode {
30 	TC_PORT_DISCONNECTED,
31 	TC_PORT_TBT_ALT,
32 	TC_PORT_DP_ALT,
33 	TC_PORT_LEGACY,
34 };
35 
36 struct intel_tc_port;
37 
38 struct intel_tc_phy_ops {
39 	enum intel_display_power_domain (*cold_off_domain)(struct intel_tc_port *tc);
40 	u32 (*hpd_live_status)(struct intel_tc_port *tc);
41 	bool (*is_ready)(struct intel_tc_port *tc);
42 	bool (*is_owned)(struct intel_tc_port *tc);
43 	void (*get_hw_state)(struct intel_tc_port *tc);
44 	bool (*connect)(struct intel_tc_port *tc, int required_lanes);
45 	void (*disconnect)(struct intel_tc_port *tc);
46 	void (*init)(struct intel_tc_port *tc);
47 };
48 
49 struct intel_tc_port {
50 	struct intel_digital_port *dig_port;
51 
52 	const struct intel_tc_phy_ops *phy_ops;
53 
54 	struct mutex lock;	/* protects the TypeC port mode */
55 	intel_wakeref_t lock_wakeref;
56 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
57 	enum intel_display_power_domain lock_power_domain;
58 #endif
59 	struct delayed_work disconnect_phy_work;
60 	struct delayed_work link_reset_work;
61 	int link_refcount;
62 	bool legacy_port:1;
63 	const char *port_name;
64 	enum tc_port_mode mode;
65 	enum tc_port_mode init_mode;
66 	enum phy_fia phy_fia;
67 	u8 phy_fia_idx;
68 };
69 
70 static enum intel_display_power_domain
71 tc_phy_cold_off_domain(struct intel_tc_port *);
72 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc);
73 static bool tc_phy_is_ready(struct intel_tc_port *tc);
74 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc);
75 static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc);
76 
77 static const char *tc_port_mode_name(enum tc_port_mode mode)
78 {
79 	static const char * const names[] = {
80 		[TC_PORT_DISCONNECTED] = "disconnected",
81 		[TC_PORT_TBT_ALT] = "tbt-alt",
82 		[TC_PORT_DP_ALT] = "dp-alt",
83 		[TC_PORT_LEGACY] = "legacy",
84 	};
85 
86 	if (WARN_ON(mode >= ARRAY_SIZE(names)))
87 		mode = TC_PORT_DISCONNECTED;
88 
89 	return names[mode];
90 }
91 
92 static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port)
93 {
94 	return dig_port->tc;
95 }
96 
97 static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
98 				  enum tc_port_mode mode)
99 {
100 	struct intel_tc_port *tc = to_tc_port(dig_port);
101 
102 	return intel_encoder_is_tc(&dig_port->base) && tc->mode == mode;
103 }
104 
105 bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
106 {
107 	return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT);
108 }
109 
110 bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port)
111 {
112 	return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT);
113 }
114 
115 bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
116 {
117 	return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
118 }
119 
120 bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port)
121 {
122 	struct intel_tc_port *tc = to_tc_port(dig_port);
123 
124 	return intel_encoder_is_tc(&dig_port->base) && !tc->legacy_port;
125 }
126 
127 /*
128  * The display power domains used for TC ports depending on the
129  * platform and TC mode (legacy, DP-alt, TBT):
130  *
131  * POWER_DOMAIN_DISPLAY_CORE:
132  * --------------------------
133  * ADLP/all modes:
134  *   - TCSS/IOM access for PHY ready state.
135  * ADLP+/all modes:
136  *   - DE/north-,south-HPD ISR access for HPD live state.
137  *
138  * POWER_DOMAIN_PORT_DDI_LANES_<port>:
139  * -----------------------------------
140  * ICL+/all modes:
141  *   - DE/DDI_BUF access for port enabled state.
142  * ADLP/all modes:
143  *   - DE/DDI_BUF access for PHY owned state.
144  *
145  * POWER_DOMAIN_AUX_USBC<TC port index>:
146  * -------------------------------------
147  * ICL/legacy mode:
148  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
149  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
150  *     main lanes.
151  * ADLP/legacy, DP-alt modes:
152  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
153  *     main lanes.
154  *
155  * POWER_DOMAIN_TC_COLD_OFF:
156  * -------------------------
157  * ICL/DP-alt, TBT mode:
158  *   - TCSS/TBT: block TC-cold power state for using the (direct or
159  *     TBT DP-IN) AUX and main lanes.
160  *
161  * TGL/all modes:
162  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
163  *   - TCSS/PHY: block TC-cold power state for using the (direct or
164  *     TBT DP-IN) AUX and main lanes.
165  *
166  * ADLP/TBT mode:
167  *   - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN)
168  *     AUX and main lanes.
169  *
170  * XELPDP+/all modes:
171  *   - TCSS/IOM,FIA access for PHY ready, owned state
172  *   - TCSS/PHY: block TC-cold power state for using the (direct or
173  *     TBT DP-IN) AUX and main lanes.
174  */
175 bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
176 {
177 	struct intel_display *display = to_intel_display(dig_port);
178 	struct intel_tc_port *tc = to_tc_port(dig_port);
179 
180 	return tc_phy_cold_off_domain(tc) ==
181 	       intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
182 }
183 
184 static intel_wakeref_t
185 __tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain)
186 {
187 	struct intel_display *display = to_intel_display(tc->dig_port);
188 
189 	*domain = tc_phy_cold_off_domain(tc);
190 
191 	return intel_display_power_get(display, *domain);
192 }
193 
194 static intel_wakeref_t
195 tc_cold_block(struct intel_tc_port *tc)
196 {
197 	enum intel_display_power_domain domain;
198 	intel_wakeref_t wakeref;
199 
200 	wakeref = __tc_cold_block(tc, &domain);
201 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
202 	tc->lock_power_domain = domain;
203 #endif
204 	return wakeref;
205 }
206 
207 static void
208 __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain,
209 		  intel_wakeref_t wakeref)
210 {
211 	struct intel_display *display = to_intel_display(tc->dig_port);
212 
213 	intel_display_power_put(display, domain, wakeref);
214 }
215 
216 static void
217 tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref)
218 {
219 	struct intel_display __maybe_unused *display = to_intel_display(tc->dig_port);
220 	enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc);
221 
222 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
223 	drm_WARN_ON(display->drm, tc->lock_power_domain != domain);
224 #endif
225 	__tc_cold_unblock(tc, domain, wakeref);
226 }
227 
228 static void
229 assert_display_core_power_enabled(struct intel_tc_port *tc)
230 {
231 	struct intel_display *display = to_intel_display(tc->dig_port);
232 
233 	drm_WARN_ON(display->drm,
234 		    !intel_display_power_is_enabled(display, POWER_DOMAIN_DISPLAY_CORE));
235 }
236 
237 static void
238 assert_tc_cold_blocked(struct intel_tc_port *tc)
239 {
240 	struct intel_display *display = to_intel_display(tc->dig_port);
241 	bool enabled;
242 
243 	enabled = intel_display_power_is_enabled(display,
244 						 tc_phy_cold_off_domain(tc));
245 	drm_WARN_ON(display->drm, !enabled);
246 }
247 
248 static enum intel_display_power_domain
249 tc_port_power_domain(struct intel_tc_port *tc)
250 {
251 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
252 
253 	return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
254 }
255 
256 static void
257 assert_tc_port_power_enabled(struct intel_tc_port *tc)
258 {
259 	struct intel_display *display = to_intel_display(tc->dig_port);
260 
261 	drm_WARN_ON(display->drm,
262 		    !intel_display_power_is_enabled(display, tc_port_power_domain(tc)));
263 }
264 
265 static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
266 {
267 	struct intel_display *display = to_intel_display(dig_port);
268 	struct intel_tc_port *tc = to_tc_port(dig_port);
269 	u32 lane_mask;
270 
271 	lane_mask = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
272 
273 	drm_WARN_ON(display->drm, lane_mask == 0xffffffff);
274 	assert_tc_cold_blocked(tc);
275 
276 	lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx);
277 	return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
278 }
279 
280 u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
281 {
282 	struct intel_display *display = to_intel_display(dig_port);
283 	struct intel_tc_port *tc = to_tc_port(dig_port);
284 	u32 pin_mask;
285 
286 	pin_mask = intel_de_read(display, PORT_TX_DFLEXPA1(tc->phy_fia));
287 
288 	drm_WARN_ON(display->drm, pin_mask == 0xffffffff);
289 	assert_tc_cold_blocked(tc);
290 
291 	return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >>
292 	       DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
293 }
294 
295 static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
296 {
297 	struct intel_display *display = to_intel_display(dig_port);
298 	enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
299 	intel_wakeref_t wakeref;
300 	u32 val, pin_assignment;
301 
302 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
303 		val = intel_de_read(display, TCSS_DDI_STATUS(tc_port));
304 
305 	pin_assignment =
306 		REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val);
307 
308 	switch (pin_assignment) {
309 	default:
310 		MISSING_CASE(pin_assignment);
311 		fallthrough;
312 	case DP_PIN_ASSIGNMENT_D:
313 		return 2;
314 	case DP_PIN_ASSIGNMENT_C:
315 	case DP_PIN_ASSIGNMENT_E:
316 		return 4;
317 	}
318 }
319 
320 static int mtl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
321 {
322 	struct intel_display *display = to_intel_display(dig_port);
323 	intel_wakeref_t wakeref;
324 	u32 pin_mask;
325 
326 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
327 		pin_mask = intel_tc_port_get_pin_assignment_mask(dig_port);
328 
329 	switch (pin_mask) {
330 	default:
331 		MISSING_CASE(pin_mask);
332 		fallthrough;
333 	case DP_PIN_ASSIGNMENT_D:
334 		return 2;
335 	case DP_PIN_ASSIGNMENT_C:
336 	case DP_PIN_ASSIGNMENT_E:
337 		return 4;
338 	}
339 }
340 
341 static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
342 {
343 	struct intel_display *display = to_intel_display(dig_port);
344 	intel_wakeref_t wakeref;
345 	u32 lane_mask = 0;
346 
347 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
348 		lane_mask = intel_tc_port_get_lane_mask(dig_port);
349 
350 	switch (lane_mask) {
351 	default:
352 		MISSING_CASE(lane_mask);
353 		fallthrough;
354 	case 0x1:
355 	case 0x2:
356 	case 0x4:
357 	case 0x8:
358 		return 1;
359 	case 0x3:
360 	case 0xc:
361 		return 2;
362 	case 0xf:
363 		return 4;
364 	}
365 }
366 
367 int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
368 {
369 	struct intel_display *display = to_intel_display(dig_port);
370 	struct intel_tc_port *tc = to_tc_port(dig_port);
371 
372 	if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT)
373 		return 4;
374 
375 	assert_tc_cold_blocked(tc);
376 
377 	if (DISPLAY_VER(display) >= 20)
378 		return lnl_tc_port_get_max_lane_count(dig_port);
379 
380 	if (DISPLAY_VER(display) >= 14)
381 		return mtl_tc_port_get_max_lane_count(dig_port);
382 
383 	return intel_tc_port_get_max_lane_count(dig_port);
384 }
385 
386 void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
387 				      int required_lanes)
388 {
389 	struct intel_display *display = to_intel_display(dig_port);
390 	struct intel_tc_port *tc = to_tc_port(dig_port);
391 	bool lane_reversal = dig_port->lane_reversal;
392 	u32 val;
393 
394 	if (DISPLAY_VER(display) >= 14)
395 		return;
396 
397 	drm_WARN_ON(display->drm,
398 		    lane_reversal && tc->mode != TC_PORT_LEGACY);
399 
400 	assert_tc_cold_blocked(tc);
401 
402 	val = intel_de_read(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
403 	val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx);
404 
405 	switch (required_lanes) {
406 	case 1:
407 		val |= lane_reversal ?
408 			DFLEXDPMLE1_DPMLETC_ML3(tc->phy_fia_idx) :
409 			DFLEXDPMLE1_DPMLETC_ML0(tc->phy_fia_idx);
410 		break;
411 	case 2:
412 		val |= lane_reversal ?
413 			DFLEXDPMLE1_DPMLETC_ML3_2(tc->phy_fia_idx) :
414 			DFLEXDPMLE1_DPMLETC_ML1_0(tc->phy_fia_idx);
415 		break;
416 	case 4:
417 		val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc->phy_fia_idx);
418 		break;
419 	default:
420 		MISSING_CASE(required_lanes);
421 	}
422 
423 	intel_de_write(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
424 }
425 
426 static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
427 				      u32 live_status_mask)
428 {
429 	struct intel_display *display = to_intel_display(tc->dig_port);
430 	u32 valid_hpd_mask;
431 
432 	drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED);
433 
434 	if (hweight32(live_status_mask) != 1)
435 		return;
436 
437 	if (tc->legacy_port)
438 		valid_hpd_mask = BIT(TC_PORT_LEGACY);
439 	else
440 		valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
441 				 BIT(TC_PORT_TBT_ALT);
442 
443 	if (!(live_status_mask & ~valid_hpd_mask))
444 		return;
445 
446 	/* If live status mismatches the VBT flag, trust the live status. */
447 	drm_dbg_kms(display->drm,
448 		    "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
449 		    tc->port_name, live_status_mask, valid_hpd_mask);
450 
451 	tc->legacy_port = !tc->legacy_port;
452 }
453 
454 static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia)
455 {
456 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
457 
458 	/*
459 	 * Each Modular FIA instance houses 2 TC ports. In SOC that has more
460 	 * than two TC ports, there are multiple instances of Modular FIA.
461 	 */
462 	if (modular_fia) {
463 		tc->phy_fia = tc_port / 2;
464 		tc->phy_fia_idx = tc_port % 2;
465 	} else {
466 		tc->phy_fia = FIA1;
467 		tc->phy_fia_idx = tc_port;
468 	}
469 }
470 
471 /*
472  * ICL TC PHY handlers
473  * -------------------
474  */
475 static enum intel_display_power_domain
476 icl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
477 {
478 	struct intel_display *display = to_intel_display(tc->dig_port);
479 	struct intel_digital_port *dig_port = tc->dig_port;
480 
481 	if (tc->legacy_port)
482 		return intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
483 
484 	return POWER_DOMAIN_TC_COLD_OFF;
485 }
486 
487 static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
488 {
489 	struct intel_display *display = to_intel_display(tc->dig_port);
490 	struct intel_digital_port *dig_port = tc->dig_port;
491 	u32 isr_bit = display->hotplug.pch_hpd[dig_port->base.hpd_pin];
492 	intel_wakeref_t wakeref;
493 	u32 fia_isr;
494 	u32 pch_isr;
495 	u32 mask = 0;
496 
497 	with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref) {
498 		fia_isr = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
499 		pch_isr = intel_de_read(display, SDEISR);
500 	}
501 
502 	if (fia_isr == 0xffffffff) {
503 		drm_dbg_kms(display->drm,
504 			    "Port %s: PHY in TCCOLD, nothing connected\n",
505 			    tc->port_name);
506 		return mask;
507 	}
508 
509 	if (fia_isr & TC_LIVE_STATE_TBT(tc->phy_fia_idx))
510 		mask |= BIT(TC_PORT_TBT_ALT);
511 	if (fia_isr & TC_LIVE_STATE_TC(tc->phy_fia_idx))
512 		mask |= BIT(TC_PORT_DP_ALT);
513 
514 	if (pch_isr & isr_bit)
515 		mask |= BIT(TC_PORT_LEGACY);
516 
517 	return mask;
518 }
519 
520 /*
521  * Return the PHY status complete flag indicating that display can acquire the
522  * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink
523  * is connected and it's ready to switch the ownership to display. The flag
524  * will be left cleared when a TBT-alt sink is connected, where the PHY is
525  * owned by the TBT subsystem and so switching the ownership to display is not
526  * required.
527  */
528 static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
529 {
530 	struct intel_display *display = to_intel_display(tc->dig_port);
531 	u32 val;
532 
533 	assert_tc_cold_blocked(tc);
534 
535 	val = intel_de_read(display, PORT_TX_DFLEXDPPMS(tc->phy_fia));
536 	if (val == 0xffffffff) {
537 		drm_dbg_kms(display->drm,
538 			    "Port %s: PHY in TCCOLD, assuming not ready\n",
539 			    tc->port_name);
540 		return false;
541 	}
542 
543 	return val & DP_PHY_MODE_STATUS_COMPLETED(tc->phy_fia_idx);
544 }
545 
546 static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
547 				      bool take)
548 {
549 	struct intel_display *display = to_intel_display(tc->dig_port);
550 	u32 val;
551 
552 	assert_tc_cold_blocked(tc);
553 
554 	val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
555 	if (val == 0xffffffff) {
556 		drm_dbg_kms(display->drm,
557 			    "Port %s: PHY in TCCOLD, can't %s ownership\n",
558 			    tc->port_name, take ? "take" : "release");
559 
560 		return false;
561 	}
562 
563 	val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
564 	if (take)
565 		val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
566 
567 	intel_de_write(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
568 
569 	return true;
570 }
571 
572 static bool icl_tc_phy_is_owned(struct intel_tc_port *tc)
573 {
574 	struct intel_display *display = to_intel_display(tc->dig_port);
575 	u32 val;
576 
577 	assert_tc_cold_blocked(tc);
578 
579 	val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
580 	if (val == 0xffffffff) {
581 		drm_dbg_kms(display->drm,
582 			    "Port %s: PHY in TCCOLD, assume not owned\n",
583 			    tc->port_name);
584 		return false;
585 	}
586 
587 	return val & DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
588 }
589 
590 static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
591 {
592 	enum intel_display_power_domain domain;
593 	intel_wakeref_t tc_cold_wref;
594 
595 	tc_cold_wref = __tc_cold_block(tc, &domain);
596 
597 	tc->mode = tc_phy_get_current_mode(tc);
598 	if (tc->mode != TC_PORT_DISCONNECTED)
599 		tc->lock_wakeref = tc_cold_block(tc);
600 
601 	__tc_cold_unblock(tc, domain, tc_cold_wref);
602 }
603 
604 /*
605  * This function implements the first part of the Connect Flow described by our
606  * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
607  * lanes, EDID, etc) is done as needed in the typical places.
608  *
609  * Unlike the other ports, type-C ports are not available to use as soon as we
610  * get a hotplug. The type-C PHYs can be shared between multiple controllers:
611  * display, USB, etc. As a result, handshaking through FIA is required around
612  * connect and disconnect to cleanly transfer ownership with the controller and
613  * set the type-C power state.
614  */
615 static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
616 						int required_lanes)
617 {
618 	struct intel_display *display = to_intel_display(tc->dig_port);
619 	struct intel_digital_port *dig_port = tc->dig_port;
620 	int max_lanes;
621 
622 	max_lanes = intel_tc_port_max_lane_count(dig_port);
623 	if (tc->mode == TC_PORT_LEGACY) {
624 		drm_WARN_ON(display->drm, max_lanes != 4);
625 		return true;
626 	}
627 
628 	drm_WARN_ON(display->drm, tc->mode != TC_PORT_DP_ALT);
629 
630 	/*
631 	 * Now we have to re-check the live state, in case the port recently
632 	 * became disconnected. Not necessary for legacy mode.
633 	 */
634 	if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) {
635 		drm_dbg_kms(display->drm, "Port %s: PHY sudden disconnect\n",
636 			    tc->port_name);
637 		return false;
638 	}
639 
640 	if (max_lanes < required_lanes) {
641 		drm_dbg_kms(display->drm,
642 			    "Port %s: PHY max lanes %d < required lanes %d\n",
643 			    tc->port_name,
644 			    max_lanes, required_lanes);
645 		return false;
646 	}
647 
648 	return true;
649 }
650 
651 static bool icl_tc_phy_connect(struct intel_tc_port *tc,
652 			       int required_lanes)
653 {
654 	struct intel_display *display = to_intel_display(tc->dig_port);
655 
656 	tc->lock_wakeref = tc_cold_block(tc);
657 
658 	if (tc->mode == TC_PORT_TBT_ALT)
659 		return true;
660 
661 	if ((!tc_phy_is_ready(tc) ||
662 	     !icl_tc_phy_take_ownership(tc, true)) &&
663 	    !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
664 		drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership (ready %s)\n",
665 			    tc->port_name,
666 			    str_yes_no(tc_phy_is_ready(tc)));
667 		goto out_unblock_tc_cold;
668 	}
669 
670 
671 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
672 		goto out_release_phy;
673 
674 	return true;
675 
676 out_release_phy:
677 	icl_tc_phy_take_ownership(tc, false);
678 out_unblock_tc_cold:
679 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
680 
681 	return false;
682 }
683 
684 /*
685  * See the comment at the connect function. This implements the Disconnect
686  * Flow.
687  */
688 static void icl_tc_phy_disconnect(struct intel_tc_port *tc)
689 {
690 	switch (tc->mode) {
691 	case TC_PORT_LEGACY:
692 	case TC_PORT_DP_ALT:
693 		icl_tc_phy_take_ownership(tc, false);
694 		fallthrough;
695 	case TC_PORT_TBT_ALT:
696 		tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
697 		break;
698 	default:
699 		MISSING_CASE(tc->mode);
700 	}
701 }
702 
703 static void icl_tc_phy_init(struct intel_tc_port *tc)
704 {
705 	tc_phy_load_fia_params(tc, false);
706 }
707 
708 static const struct intel_tc_phy_ops icl_tc_phy_ops = {
709 	.cold_off_domain = icl_tc_phy_cold_off_domain,
710 	.hpd_live_status = icl_tc_phy_hpd_live_status,
711 	.is_ready = icl_tc_phy_is_ready,
712 	.is_owned = icl_tc_phy_is_owned,
713 	.get_hw_state = icl_tc_phy_get_hw_state,
714 	.connect = icl_tc_phy_connect,
715 	.disconnect = icl_tc_phy_disconnect,
716 	.init = icl_tc_phy_init,
717 };
718 
719 /*
720  * TGL TC PHY handlers
721  * -------------------
722  */
723 static enum intel_display_power_domain
724 tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
725 {
726 	return POWER_DOMAIN_TC_COLD_OFF;
727 }
728 
729 static void tgl_tc_phy_init(struct intel_tc_port *tc)
730 {
731 	struct intel_display *display = to_intel_display(tc->dig_port);
732 	intel_wakeref_t wakeref;
733 	u32 val;
734 
735 	with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref)
736 		val = intel_de_read(display, PORT_TX_DFLEXDPSP(FIA1));
737 
738 	drm_WARN_ON(display->drm, val == 0xffffffff);
739 
740 	tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK);
741 }
742 
743 static const struct intel_tc_phy_ops tgl_tc_phy_ops = {
744 	.cold_off_domain = tgl_tc_phy_cold_off_domain,
745 	.hpd_live_status = icl_tc_phy_hpd_live_status,
746 	.is_ready = icl_tc_phy_is_ready,
747 	.is_owned = icl_tc_phy_is_owned,
748 	.get_hw_state = icl_tc_phy_get_hw_state,
749 	.connect = icl_tc_phy_connect,
750 	.disconnect = icl_tc_phy_disconnect,
751 	.init = tgl_tc_phy_init,
752 };
753 
754 /*
755  * ADLP TC PHY handlers
756  * --------------------
757  */
758 static enum intel_display_power_domain
759 adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc)
760 {
761 	struct intel_display *display = to_intel_display(tc->dig_port);
762 	struct intel_digital_port *dig_port = tc->dig_port;
763 
764 	if (tc->mode != TC_PORT_TBT_ALT)
765 		return intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
766 
767 	return POWER_DOMAIN_TC_COLD_OFF;
768 }
769 
770 static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
771 {
772 	struct intel_display *display = to_intel_display(tc->dig_port);
773 	struct intel_digital_port *dig_port = tc->dig_port;
774 	enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
775 	u32 cpu_isr_bits = display->hotplug.hpd[hpd_pin];
776 	u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
777 	intel_wakeref_t wakeref;
778 	u32 cpu_isr;
779 	u32 pch_isr;
780 	u32 mask = 0;
781 
782 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
783 		cpu_isr = intel_de_read(display, GEN11_DE_HPD_ISR);
784 		pch_isr = intel_de_read(display, SDEISR);
785 	}
786 
787 	if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK))
788 		mask |= BIT(TC_PORT_DP_ALT);
789 	if (cpu_isr & (cpu_isr_bits & GEN11_DE_TBT_HOTPLUG_MASK))
790 		mask |= BIT(TC_PORT_TBT_ALT);
791 
792 	if (pch_isr & pch_isr_bit)
793 		mask |= BIT(TC_PORT_LEGACY);
794 
795 	return mask;
796 }
797 
798 /*
799  * Return the PHY status complete flag indicating that display can acquire the
800  * PHY ownership. The IOM firmware sets this flag when it's ready to switch
801  * the ownership to display, regardless of what sink is connected (TBT-alt,
802  * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
803  * subsystem and so switching the ownership to display is not required.
804  */
805 static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
806 {
807 	struct intel_display *display = to_intel_display(tc->dig_port);
808 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
809 	u32 val;
810 
811 	assert_display_core_power_enabled(tc);
812 
813 	val = intel_de_read(display, TCSS_DDI_STATUS(tc_port));
814 	if (val == 0xffffffff) {
815 		drm_dbg_kms(display->drm,
816 			    "Port %s: PHY in TCCOLD, assuming not ready\n",
817 			    tc->port_name);
818 		return false;
819 	}
820 
821 	return val & TCSS_DDI_STATUS_READY;
822 }
823 
824 static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
825 				       bool take)
826 {
827 	struct intel_display *display = to_intel_display(tc->dig_port);
828 	enum port port = tc->dig_port->base.port;
829 
830 	assert_tc_port_power_enabled(tc);
831 
832 	intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
833 		     take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
834 
835 	return true;
836 }
837 
838 static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc)
839 {
840 	struct intel_display *display = to_intel_display(tc->dig_port);
841 	enum port port = tc->dig_port->base.port;
842 	u32 val;
843 
844 	assert_tc_port_power_enabled(tc);
845 
846 	val = intel_de_read(display, DDI_BUF_CTL(port));
847 	return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
848 }
849 
850 static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
851 {
852 	struct intel_display *display = to_intel_display(tc->dig_port);
853 	enum intel_display_power_domain port_power_domain =
854 		tc_port_power_domain(tc);
855 	intel_wakeref_t port_wakeref;
856 
857 	port_wakeref = intel_display_power_get(display, port_power_domain);
858 
859 	tc->mode = tc_phy_get_current_mode(tc);
860 	if (tc->mode != TC_PORT_DISCONNECTED)
861 		tc->lock_wakeref = tc_cold_block(tc);
862 
863 	intel_display_power_put(display, port_power_domain, port_wakeref);
864 }
865 
866 static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
867 {
868 	struct intel_display *display = to_intel_display(tc->dig_port);
869 	enum intel_display_power_domain port_power_domain =
870 		tc_port_power_domain(tc);
871 	intel_wakeref_t port_wakeref;
872 
873 	if (tc->mode == TC_PORT_TBT_ALT) {
874 		tc->lock_wakeref = tc_cold_block(tc);
875 		return true;
876 	}
877 
878 	port_wakeref = intel_display_power_get(display, port_power_domain);
879 
880 	if (!adlp_tc_phy_take_ownership(tc, true) &&
881 	    !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
882 		drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership\n",
883 			    tc->port_name);
884 		goto out_put_port_power;
885 	}
886 
887 	if (!tc_phy_is_ready(tc) &&
888 	    !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
889 		drm_dbg_kms(display->drm, "Port %s: PHY not ready\n",
890 			    tc->port_name);
891 		goto out_release_phy;
892 	}
893 
894 	tc->lock_wakeref = tc_cold_block(tc);
895 
896 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
897 		goto out_unblock_tc_cold;
898 
899 	intel_display_power_put(display, port_power_domain, port_wakeref);
900 
901 	return true;
902 
903 out_unblock_tc_cold:
904 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
905 out_release_phy:
906 	adlp_tc_phy_take_ownership(tc, false);
907 out_put_port_power:
908 	intel_display_power_put(display, port_power_domain, port_wakeref);
909 
910 	return false;
911 }
912 
913 static void adlp_tc_phy_disconnect(struct intel_tc_port *tc)
914 {
915 	struct intel_display *display = to_intel_display(tc->dig_port);
916 	enum intel_display_power_domain port_power_domain =
917 		tc_port_power_domain(tc);
918 	intel_wakeref_t port_wakeref;
919 
920 	port_wakeref = intel_display_power_get(display, port_power_domain);
921 
922 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
923 
924 	switch (tc->mode) {
925 	case TC_PORT_LEGACY:
926 	case TC_PORT_DP_ALT:
927 		adlp_tc_phy_take_ownership(tc, false);
928 		fallthrough;
929 	case TC_PORT_TBT_ALT:
930 		break;
931 	default:
932 		MISSING_CASE(tc->mode);
933 	}
934 
935 	intel_display_power_put(display, port_power_domain, port_wakeref);
936 }
937 
938 static void adlp_tc_phy_init(struct intel_tc_port *tc)
939 {
940 	tc_phy_load_fia_params(tc, true);
941 }
942 
943 static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
944 	.cold_off_domain = adlp_tc_phy_cold_off_domain,
945 	.hpd_live_status = adlp_tc_phy_hpd_live_status,
946 	.is_ready = adlp_tc_phy_is_ready,
947 	.is_owned = adlp_tc_phy_is_owned,
948 	.get_hw_state = adlp_tc_phy_get_hw_state,
949 	.connect = adlp_tc_phy_connect,
950 	.disconnect = adlp_tc_phy_disconnect,
951 	.init = adlp_tc_phy_init,
952 };
953 
954 /*
955  * XELPDP TC PHY handlers
956  * ----------------------
957  */
958 static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
959 {
960 	struct intel_display *display = to_intel_display(tc->dig_port);
961 	struct intel_digital_port *dig_port = tc->dig_port;
962 	enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
963 	u32 pica_isr_bits = display->hotplug.hpd[hpd_pin];
964 	u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
965 	intel_wakeref_t wakeref;
966 	u32 pica_isr;
967 	u32 pch_isr;
968 	u32 mask = 0;
969 
970 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
971 		pica_isr = intel_de_read(display, PICAINTERRUPT_ISR);
972 		pch_isr = intel_de_read(display, SDEISR);
973 	}
974 
975 	if (pica_isr & (pica_isr_bits & XELPDP_DP_ALT_HOTPLUG_MASK))
976 		mask |= BIT(TC_PORT_DP_ALT);
977 	if (pica_isr & (pica_isr_bits & XELPDP_TBT_HOTPLUG_MASK))
978 		mask |= BIT(TC_PORT_TBT_ALT);
979 
980 	if (tc->legacy_port && (pch_isr & pch_isr_bit))
981 		mask |= BIT(TC_PORT_LEGACY);
982 
983 	return mask;
984 }
985 
986 static bool
987 xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc)
988 {
989 	struct intel_display *display = to_intel_display(tc->dig_port);
990 	enum port port = tc->dig_port->base.port;
991 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
992 
993 	assert_tc_cold_blocked(tc);
994 
995 	return intel_de_read(display, reg) & XELPDP_TCSS_POWER_STATE;
996 }
997 
998 static bool
999 xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
1000 {
1001 	struct intel_display *display = to_intel_display(tc->dig_port);
1002 
1003 	if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) {
1004 		drm_dbg_kms(display->drm,
1005 			    "Port %s: timeout waiting for TCSS power to get %s\n",
1006 			    str_enabled_disabled(enabled),
1007 			    tc->port_name);
1008 		return false;
1009 	}
1010 
1011 	return true;
1012 }
1013 
1014 /*
1015  * Gfx driver WA 14020908590 for PTL tcss_rxdetect_clkswb_req/ack
1016  * handshake violation when pwwreq= 0->1 during TC7/10 entry
1017  */
1018 static void xelpdp_tc_power_request_wa(struct intel_display *display, bool enable)
1019 {
1020 	/* check if mailbox is running busy */
1021 	if (intel_de_wait_for_clear(display, TCSS_DISP_MAILBOX_IN_CMD,
1022 				    TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
1023 		drm_dbg_kms(display->drm,
1024 			    "Timeout waiting for TCSS mailbox run/busy bit to clear\n");
1025 		return;
1026 	}
1027 
1028 	intel_de_write(display, TCSS_DISP_MAILBOX_IN_DATA, enable ? 1 : 0);
1029 	intel_de_write(display, TCSS_DISP_MAILBOX_IN_CMD,
1030 		       TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY |
1031 		       TCSS_DISP_MAILBOX_IN_CMD_DATA(0x1));
1032 
1033 	/* wait to clear mailbox running busy bit before continuing */
1034 	if (intel_de_wait_for_clear(display, TCSS_DISP_MAILBOX_IN_CMD,
1035 				    TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
1036 		drm_dbg_kms(display->drm,
1037 			    "Timeout after writing data to mailbox. Mailbox run/busy bit did not clear\n");
1038 		return;
1039 	}
1040 }
1041 
1042 static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1043 {
1044 	struct intel_display *display = to_intel_display(tc->dig_port);
1045 	enum port port = tc->dig_port->base.port;
1046 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
1047 	u32 val;
1048 
1049 	assert_tc_cold_blocked(tc);
1050 
1051 	if (DISPLAY_VER(display) == 30)
1052 		xelpdp_tc_power_request_wa(display, enable);
1053 
1054 	val = intel_de_read(display, reg);
1055 	if (enable)
1056 		val |= XELPDP_TCSS_POWER_REQUEST;
1057 	else
1058 		val &= ~XELPDP_TCSS_POWER_REQUEST;
1059 	intel_de_write(display, reg, val);
1060 }
1061 
1062 static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1063 {
1064 	struct intel_display *display = to_intel_display(tc->dig_port);
1065 
1066 	__xelpdp_tc_phy_enable_tcss_power(tc, enable);
1067 
1068 	if (enable && !tc_phy_wait_for_ready(tc))
1069 		goto out_disable;
1070 
1071 	if (!xelpdp_tc_phy_wait_for_tcss_power(tc, enable))
1072 		goto out_disable;
1073 
1074 	return true;
1075 
1076 out_disable:
1077 	if (drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY))
1078 		return false;
1079 
1080 	if (!enable)
1081 		return false;
1082 
1083 	__xelpdp_tc_phy_enable_tcss_power(tc, false);
1084 	xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1085 
1086 	return false;
1087 }
1088 
1089 static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
1090 {
1091 	struct intel_display *display = to_intel_display(tc->dig_port);
1092 	enum port port = tc->dig_port->base.port;
1093 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
1094 	u32 val;
1095 
1096 	assert_tc_cold_blocked(tc);
1097 
1098 	val = intel_de_read(display, reg);
1099 	if (take)
1100 		val |= XELPDP_TC_PHY_OWNERSHIP;
1101 	else
1102 		val &= ~XELPDP_TC_PHY_OWNERSHIP;
1103 	intel_de_write(display, reg, val);
1104 }
1105 
1106 static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
1107 {
1108 	struct intel_display *display = to_intel_display(tc->dig_port);
1109 	enum port port = tc->dig_port->base.port;
1110 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
1111 
1112 	assert_tc_cold_blocked(tc);
1113 
1114 	return intel_de_read(display, reg) & XELPDP_TC_PHY_OWNERSHIP;
1115 }
1116 
1117 static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
1118 {
1119 	struct intel_display *display = to_intel_display(tc->dig_port);
1120 	intel_wakeref_t tc_cold_wref;
1121 	enum intel_display_power_domain domain;
1122 
1123 	tc_cold_wref = __tc_cold_block(tc, &domain);
1124 
1125 	tc->mode = tc_phy_get_current_mode(tc);
1126 	if (tc->mode != TC_PORT_DISCONNECTED)
1127 		tc->lock_wakeref = tc_cold_block(tc);
1128 
1129 	drm_WARN_ON(display->drm,
1130 		    (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
1131 		    !xelpdp_tc_phy_tcss_power_is_enabled(tc));
1132 
1133 	__tc_cold_unblock(tc, domain, tc_cold_wref);
1134 }
1135 
1136 static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1137 {
1138 	tc->lock_wakeref = tc_cold_block(tc);
1139 
1140 	if (tc->mode == TC_PORT_TBT_ALT)
1141 		return true;
1142 
1143 	if (!xelpdp_tc_phy_enable_tcss_power(tc, true))
1144 		goto out_unblock_tccold;
1145 
1146 	xelpdp_tc_phy_take_ownership(tc, true);
1147 
1148 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
1149 		goto out_release_phy;
1150 
1151 	return true;
1152 
1153 out_release_phy:
1154 	xelpdp_tc_phy_take_ownership(tc, false);
1155 	xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1156 
1157 out_unblock_tccold:
1158 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1159 
1160 	return false;
1161 }
1162 
1163 static void xelpdp_tc_phy_disconnect(struct intel_tc_port *tc)
1164 {
1165 	switch (tc->mode) {
1166 	case TC_PORT_LEGACY:
1167 	case TC_PORT_DP_ALT:
1168 		xelpdp_tc_phy_take_ownership(tc, false);
1169 		xelpdp_tc_phy_enable_tcss_power(tc, false);
1170 		fallthrough;
1171 	case TC_PORT_TBT_ALT:
1172 		tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1173 		break;
1174 	default:
1175 		MISSING_CASE(tc->mode);
1176 	}
1177 }
1178 
1179 static const struct intel_tc_phy_ops xelpdp_tc_phy_ops = {
1180 	.cold_off_domain = tgl_tc_phy_cold_off_domain,
1181 	.hpd_live_status = xelpdp_tc_phy_hpd_live_status,
1182 	.is_ready = adlp_tc_phy_is_ready,
1183 	.is_owned = xelpdp_tc_phy_is_owned,
1184 	.get_hw_state = xelpdp_tc_phy_get_hw_state,
1185 	.connect = xelpdp_tc_phy_connect,
1186 	.disconnect = xelpdp_tc_phy_disconnect,
1187 	.init = adlp_tc_phy_init,
1188 };
1189 
1190 /*
1191  * Generic TC PHY handlers
1192  * -----------------------
1193  */
1194 static enum intel_display_power_domain
1195 tc_phy_cold_off_domain(struct intel_tc_port *tc)
1196 {
1197 	return tc->phy_ops->cold_off_domain(tc);
1198 }
1199 
1200 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc)
1201 {
1202 	struct intel_display *display = to_intel_display(tc->dig_port);
1203 	u32 mask;
1204 
1205 	mask = tc->phy_ops->hpd_live_status(tc);
1206 
1207 	/* The sink can be connected only in a single mode. */
1208 	drm_WARN_ON_ONCE(display->drm, hweight32(mask) > 1);
1209 
1210 	return mask;
1211 }
1212 
1213 static bool tc_phy_is_ready(struct intel_tc_port *tc)
1214 {
1215 	return tc->phy_ops->is_ready(tc);
1216 }
1217 
1218 static bool tc_phy_is_owned(struct intel_tc_port *tc)
1219 {
1220 	return tc->phy_ops->is_owned(tc);
1221 }
1222 
1223 static void tc_phy_get_hw_state(struct intel_tc_port *tc)
1224 {
1225 	tc->phy_ops->get_hw_state(tc);
1226 }
1227 
1228 static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
1229 				      bool phy_is_ready, bool phy_is_owned)
1230 {
1231 	struct intel_display *display = to_intel_display(tc->dig_port);
1232 
1233 	drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready);
1234 
1235 	return phy_is_ready && phy_is_owned;
1236 }
1237 
1238 static bool tc_phy_is_connected(struct intel_tc_port *tc,
1239 				enum icl_port_dpll_id port_pll_type)
1240 {
1241 	struct intel_display *display = to_intel_display(tc->dig_port);
1242 	bool phy_is_ready = tc_phy_is_ready(tc);
1243 	bool phy_is_owned = tc_phy_is_owned(tc);
1244 	bool is_connected;
1245 
1246 	if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned))
1247 		is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY;
1248 	else
1249 		is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
1250 
1251 	drm_dbg_kms(display->drm,
1252 		    "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
1253 		    tc->port_name,
1254 		    str_yes_no(is_connected),
1255 		    str_yes_no(phy_is_ready),
1256 		    str_yes_no(phy_is_owned),
1257 		    port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt");
1258 
1259 	return is_connected;
1260 }
1261 
1262 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc)
1263 {
1264 	struct intel_display *display = to_intel_display(tc->dig_port);
1265 
1266 	if (wait_for(tc_phy_is_ready(tc), 500)) {
1267 		drm_err(display->drm, "Port %s: timeout waiting for PHY ready\n",
1268 			tc->port_name);
1269 
1270 		return false;
1271 	}
1272 
1273 	return true;
1274 }
1275 
1276 static enum tc_port_mode
1277 hpd_mask_to_tc_mode(u32 live_status_mask)
1278 {
1279 	if (live_status_mask)
1280 		return fls(live_status_mask) - 1;
1281 
1282 	return TC_PORT_DISCONNECTED;
1283 }
1284 
1285 static enum tc_port_mode
1286 tc_phy_hpd_live_mode(struct intel_tc_port *tc)
1287 {
1288 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1289 
1290 	return hpd_mask_to_tc_mode(live_status_mask);
1291 }
1292 
1293 static enum tc_port_mode
1294 get_tc_mode_in_phy_owned_state(struct intel_tc_port *tc,
1295 			       enum tc_port_mode live_mode)
1296 {
1297 	switch (live_mode) {
1298 	case TC_PORT_LEGACY:
1299 	case TC_PORT_DP_ALT:
1300 		return live_mode;
1301 	default:
1302 		MISSING_CASE(live_mode);
1303 		fallthrough;
1304 	case TC_PORT_TBT_ALT:
1305 	case TC_PORT_DISCONNECTED:
1306 		if (tc->legacy_port)
1307 			return TC_PORT_LEGACY;
1308 		else
1309 			return TC_PORT_DP_ALT;
1310 	}
1311 }
1312 
1313 static enum tc_port_mode
1314 get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc,
1315 				   enum tc_port_mode live_mode)
1316 {
1317 	switch (live_mode) {
1318 	case TC_PORT_LEGACY:
1319 		return TC_PORT_DISCONNECTED;
1320 	case TC_PORT_DP_ALT:
1321 	case TC_PORT_TBT_ALT:
1322 		return TC_PORT_TBT_ALT;
1323 	default:
1324 		MISSING_CASE(live_mode);
1325 		fallthrough;
1326 	case TC_PORT_DISCONNECTED:
1327 		if (tc->legacy_port)
1328 			return TC_PORT_DISCONNECTED;
1329 		else
1330 			return TC_PORT_TBT_ALT;
1331 	}
1332 }
1333 
1334 static enum tc_port_mode
1335 tc_phy_get_current_mode(struct intel_tc_port *tc)
1336 {
1337 	struct intel_display *display = to_intel_display(tc->dig_port);
1338 	enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc);
1339 	bool phy_is_ready;
1340 	bool phy_is_owned;
1341 	enum tc_port_mode mode;
1342 
1343 	/*
1344 	 * For legacy ports the IOM firmware initializes the PHY during boot-up
1345 	 * and system resume whether or not a sink is connected. Wait here for
1346 	 * the initialization to get ready.
1347 	 */
1348 	if (tc->legacy_port)
1349 		tc_phy_wait_for_ready(tc);
1350 
1351 	phy_is_ready = tc_phy_is_ready(tc);
1352 	phy_is_owned = tc_phy_is_owned(tc);
1353 
1354 	if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) {
1355 		mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
1356 	} else {
1357 		drm_WARN_ON(display->drm, live_mode == TC_PORT_TBT_ALT);
1358 		mode = get_tc_mode_in_phy_owned_state(tc, live_mode);
1359 	}
1360 
1361 	drm_dbg_kms(display->drm,
1362 		    "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
1363 		    tc->port_name,
1364 		    tc_port_mode_name(mode),
1365 		    str_yes_no(phy_is_ready),
1366 		    str_yes_no(phy_is_owned),
1367 		    tc_port_mode_name(live_mode));
1368 
1369 	return mode;
1370 }
1371 
1372 static enum tc_port_mode default_tc_mode(struct intel_tc_port *tc)
1373 {
1374 	if (tc->legacy_port)
1375 		return TC_PORT_LEGACY;
1376 
1377 	return TC_PORT_TBT_ALT;
1378 }
1379 
1380 static enum tc_port_mode
1381 hpd_mask_to_target_mode(struct intel_tc_port *tc, u32 live_status_mask)
1382 {
1383 	enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask);
1384 
1385 	if (mode != TC_PORT_DISCONNECTED)
1386 		return mode;
1387 
1388 	return default_tc_mode(tc);
1389 }
1390 
1391 static enum tc_port_mode
1392 tc_phy_get_target_mode(struct intel_tc_port *tc)
1393 {
1394 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1395 
1396 	return hpd_mask_to_target_mode(tc, live_status_mask);
1397 }
1398 
1399 static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1400 {
1401 	struct intel_display *display = to_intel_display(tc->dig_port);
1402 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1403 	bool connected;
1404 
1405 	tc_port_fixup_legacy_flag(tc, live_status_mask);
1406 
1407 	tc->mode = hpd_mask_to_target_mode(tc, live_status_mask);
1408 
1409 	connected = tc->phy_ops->connect(tc, required_lanes);
1410 	if (!connected && tc->mode != default_tc_mode(tc)) {
1411 		tc->mode = default_tc_mode(tc);
1412 		connected = tc->phy_ops->connect(tc, required_lanes);
1413 	}
1414 
1415 	drm_WARN_ON(display->drm, !connected);
1416 }
1417 
1418 static void tc_phy_disconnect(struct intel_tc_port *tc)
1419 {
1420 	if (tc->mode != TC_PORT_DISCONNECTED) {
1421 		tc->phy_ops->disconnect(tc);
1422 		tc->mode = TC_PORT_DISCONNECTED;
1423 	}
1424 }
1425 
1426 static void tc_phy_init(struct intel_tc_port *tc)
1427 {
1428 	mutex_lock(&tc->lock);
1429 	tc->phy_ops->init(tc);
1430 	mutex_unlock(&tc->lock);
1431 }
1432 
1433 static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
1434 				     int required_lanes, bool force_disconnect)
1435 {
1436 	struct intel_display *display = to_intel_display(tc->dig_port);
1437 	struct intel_digital_port *dig_port = tc->dig_port;
1438 	enum tc_port_mode old_tc_mode = tc->mode;
1439 
1440 	intel_display_power_flush_work(display);
1441 	if (!intel_tc_cold_requires_aux_pw(dig_port)) {
1442 		enum intel_display_power_domain aux_domain;
1443 		bool aux_powered;
1444 
1445 		aux_domain = intel_aux_power_domain(dig_port);
1446 		aux_powered = intel_display_power_is_enabled(display, aux_domain);
1447 		drm_WARN_ON(display->drm, aux_powered);
1448 	}
1449 
1450 	tc_phy_disconnect(tc);
1451 	if (!force_disconnect)
1452 		tc_phy_connect(tc, required_lanes);
1453 
1454 	drm_dbg_kms(display->drm, "Port %s: TC port mode reset (%s -> %s)\n",
1455 		    tc->port_name,
1456 		    tc_port_mode_name(old_tc_mode),
1457 		    tc_port_mode_name(tc->mode));
1458 }
1459 
1460 static bool intel_tc_port_needs_reset(struct intel_tc_port *tc)
1461 {
1462 	return tc_phy_get_target_mode(tc) != tc->mode;
1463 }
1464 
1465 static void intel_tc_port_update_mode(struct intel_tc_port *tc,
1466 				      int required_lanes, bool force_disconnect)
1467 {
1468 	if (force_disconnect ||
1469 	    intel_tc_port_needs_reset(tc))
1470 		intel_tc_port_reset_mode(tc, required_lanes, force_disconnect);
1471 }
1472 
1473 static void __intel_tc_port_get_link(struct intel_tc_port *tc)
1474 {
1475 	tc->link_refcount++;
1476 }
1477 
1478 static void __intel_tc_port_put_link(struct intel_tc_port *tc)
1479 {
1480 	tc->link_refcount--;
1481 }
1482 
1483 static bool tc_port_is_enabled(struct intel_tc_port *tc)
1484 {
1485 	struct intel_display *display = to_intel_display(tc->dig_port);
1486 	struct intel_digital_port *dig_port = tc->dig_port;
1487 
1488 	assert_tc_port_power_enabled(tc);
1489 
1490 	return intel_de_read(display, DDI_BUF_CTL(dig_port->base.port)) &
1491 	       DDI_BUF_CTL_ENABLE;
1492 }
1493 
1494 /**
1495  * intel_tc_port_init_mode: Read out HW state and init the given port's TypeC mode
1496  * @dig_port: digital port
1497  *
1498  * Read out the HW state and initialize the TypeC mode of @dig_port. The mode
1499  * will be locked until intel_tc_port_sanitize_mode() is called.
1500  */
1501 void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
1502 {
1503 	struct intel_display *display = to_intel_display(dig_port);
1504 	struct intel_tc_port *tc = to_tc_port(dig_port);
1505 	bool update_mode = false;
1506 
1507 	mutex_lock(&tc->lock);
1508 
1509 	drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED);
1510 	drm_WARN_ON(display->drm, tc->lock_wakeref);
1511 	drm_WARN_ON(display->drm, tc->link_refcount);
1512 
1513 	tc_phy_get_hw_state(tc);
1514 	/*
1515 	 * Save the initial mode for the state check in
1516 	 * intel_tc_port_sanitize_mode().
1517 	 */
1518 	tc->init_mode = tc->mode;
1519 
1520 	/*
1521 	 * The PHY needs to be connected for AUX to work during HW readout and
1522 	 * MST topology resume, but the PHY mode can only be changed if the
1523 	 * port is disabled.
1524 	 *
1525 	 * An exception is the case where BIOS leaves the PHY incorrectly
1526 	 * disconnected on an enabled legacy port. Work around that by
1527 	 * connecting the PHY even though the port is enabled. This doesn't
1528 	 * cause a problem as the PHY ownership state is ignored by the
1529 	 * IOM/TCSS firmware (only display can own the PHY in that case).
1530 	 */
1531 	if (!tc_port_is_enabled(tc)) {
1532 		update_mode = true;
1533 	} else if (tc->mode == TC_PORT_DISCONNECTED) {
1534 		drm_WARN_ON(display->drm, !tc->legacy_port);
1535 		drm_err(display->drm,
1536 			"Port %s: PHY disconnected on enabled port, connecting it\n",
1537 			tc->port_name);
1538 		update_mode = true;
1539 	}
1540 
1541 	if (update_mode)
1542 		intel_tc_port_update_mode(tc, 1, false);
1543 
1544 	/* Prevent changing tc->mode until intel_tc_port_sanitize_mode() is called. */
1545 	__intel_tc_port_get_link(tc);
1546 
1547 	mutex_unlock(&tc->lock);
1548 }
1549 
1550 static bool tc_port_has_active_streams(struct intel_tc_port *tc,
1551 				       const struct intel_crtc_state *crtc_state)
1552 {
1553 	struct intel_display *display = to_intel_display(tc->dig_port);
1554 	struct intel_digital_port *dig_port = tc->dig_port;
1555 	enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
1556 	int active_streams = 0;
1557 
1558 	if (dig_port->dp.is_mst) {
1559 		/* TODO: get the PLL type for MST, once HW readout is done for it. */
1560 		active_streams = intel_dp_mst_active_streams(&dig_port->dp);
1561 	} else if (crtc_state && crtc_state->hw.active) {
1562 		pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
1563 		active_streams = 1;
1564 	}
1565 
1566 	if (active_streams && !tc_phy_is_connected(tc, pll_type))
1567 		drm_err(display->drm,
1568 			"Port %s: PHY disconnected with %d active stream(s)\n",
1569 			tc->port_name, active_streams);
1570 
1571 	return active_streams;
1572 }
1573 
1574 /**
1575  * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
1576  * @dig_port: digital port
1577  * @crtc_state: atomic state of CRTC connected to @dig_port
1578  *
1579  * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
1580  * loading and system resume:
1581  * If the encoder is enabled keep the TypeC mode/PHY connected state locked until
1582  * the encoder is disabled.
1583  * If the encoder is disabled make sure the PHY is disconnected.
1584  * @crtc_state is valid if @dig_port is enabled, NULL otherwise.
1585  */
1586 void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
1587 				 const struct intel_crtc_state *crtc_state)
1588 {
1589 	struct intel_display *display = to_intel_display(dig_port);
1590 	struct intel_tc_port *tc = to_tc_port(dig_port);
1591 
1592 	mutex_lock(&tc->lock);
1593 
1594 	drm_WARN_ON(display->drm, tc->link_refcount != 1);
1595 	if (!tc_port_has_active_streams(tc, crtc_state)) {
1596 		/*
1597 		 * TBT-alt is the default mode in any case the PHY ownership is not
1598 		 * held (regardless of the sink's connected live state), so
1599 		 * we'll just switch to disconnected mode from it here without
1600 		 * a note.
1601 		 */
1602 		if (tc->init_mode != TC_PORT_TBT_ALT &&
1603 		    tc->init_mode != TC_PORT_DISCONNECTED)
1604 			drm_dbg_kms(display->drm,
1605 				    "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
1606 				    tc->port_name,
1607 				    tc_port_mode_name(tc->init_mode));
1608 		tc_phy_disconnect(tc);
1609 		__intel_tc_port_put_link(tc);
1610 	}
1611 
1612 	drm_dbg_kms(display->drm, "Port %s: sanitize mode (%s)\n",
1613 		    tc->port_name,
1614 		    tc_port_mode_name(tc->mode));
1615 
1616 	mutex_unlock(&tc->lock);
1617 }
1618 
1619 /*
1620  * The type-C ports are different because even when they are connected, they may
1621  * not be available/usable by the graphics driver: see the comment on
1622  * icl_tc_phy_connect(). So in our driver instead of adding the additional
1623  * concept of "usable" and make everything check for "connected and usable" we
1624  * define a port as "connected" when it is not only connected, but also when it
1625  * is usable by the rest of the driver. That maintains the old assumption that
1626  * connected ports are usable, and avoids exposing to the users objects they
1627  * can't really use.
1628  */
1629 bool intel_tc_port_connected(struct intel_encoder *encoder)
1630 {
1631 	struct intel_display *display = to_intel_display(encoder);
1632 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1633 	struct intel_tc_port *tc = to_tc_port(dig_port);
1634 	u32 mask = ~0;
1635 
1636 	drm_WARN_ON(display->drm, !intel_tc_port_ref_held(dig_port));
1637 
1638 	if (tc->mode != TC_PORT_DISCONNECTED)
1639 		mask = BIT(tc->mode);
1640 
1641 	return tc_phy_hpd_live_status(tc) & mask;
1642 }
1643 
1644 static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc)
1645 {
1646 	bool ret;
1647 
1648 	mutex_lock(&tc->lock);
1649 
1650 	ret = tc->link_refcount &&
1651 	      tc->mode == TC_PORT_DP_ALT &&
1652 	      intel_tc_port_needs_reset(tc);
1653 
1654 	mutex_unlock(&tc->lock);
1655 
1656 	return ret;
1657 }
1658 
1659 bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port)
1660 {
1661 	if (!intel_encoder_is_tc(&dig_port->base))
1662 		return false;
1663 
1664 	return __intel_tc_port_link_needs_reset(to_tc_port(dig_port));
1665 }
1666 
1667 static int reset_link_commit(struct intel_tc_port *tc,
1668 			     struct intel_atomic_state *state,
1669 			     struct drm_modeset_acquire_ctx *ctx)
1670 {
1671 	struct intel_display *display = to_intel_display(tc->dig_port);
1672 	struct intel_digital_port *dig_port = tc->dig_port;
1673 	struct intel_dp *intel_dp = enc_to_intel_dp(&dig_port->base);
1674 	struct intel_crtc *crtc;
1675 	u8 pipe_mask;
1676 	int ret;
1677 
1678 	ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex, ctx);
1679 	if (ret)
1680 		return ret;
1681 
1682 	ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
1683 	if (ret)
1684 		return ret;
1685 
1686 	if (!pipe_mask)
1687 		return 0;
1688 
1689 	for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
1690 		struct intel_crtc_state *crtc_state;
1691 
1692 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
1693 		if (IS_ERR(crtc_state))
1694 			return PTR_ERR(crtc_state);
1695 
1696 		crtc_state->uapi.connectors_changed = true;
1697 	}
1698 
1699 	if (!__intel_tc_port_link_needs_reset(tc))
1700 		return 0;
1701 
1702 	return drm_atomic_commit(&state->base);
1703 }
1704 
1705 static int reset_link(struct intel_tc_port *tc)
1706 {
1707 	struct intel_display *display = to_intel_display(tc->dig_port);
1708 	struct drm_modeset_acquire_ctx ctx;
1709 	struct drm_atomic_state *_state;
1710 	struct intel_atomic_state *state;
1711 	int ret;
1712 
1713 	_state = drm_atomic_state_alloc(display->drm);
1714 	if (!_state)
1715 		return -ENOMEM;
1716 
1717 	state = to_intel_atomic_state(_state);
1718 	state->internal = true;
1719 
1720 	intel_modeset_lock_ctx_retry(&ctx, state, 0, ret)
1721 		ret = reset_link_commit(tc, state, &ctx);
1722 
1723 	drm_atomic_state_put(&state->base);
1724 
1725 	return ret;
1726 }
1727 
1728 static void intel_tc_port_link_reset_work(struct work_struct *work)
1729 {
1730 	struct intel_tc_port *tc =
1731 		container_of(work, struct intel_tc_port, link_reset_work.work);
1732 	struct intel_display *display = to_intel_display(tc->dig_port);
1733 	int ret;
1734 
1735 	if (!__intel_tc_port_link_needs_reset(tc))
1736 		return;
1737 
1738 	mutex_lock(&display->drm->mode_config.mutex);
1739 
1740 	drm_dbg_kms(display->drm,
1741 		    "Port %s: TypeC DP-alt sink disconnected, resetting link\n",
1742 		    tc->port_name);
1743 	ret = reset_link(tc);
1744 	drm_WARN_ON(display->drm, ret);
1745 
1746 	mutex_unlock(&display->drm->mode_config.mutex);
1747 }
1748 
1749 bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
1750 {
1751 	if (!intel_tc_port_link_needs_reset(dig_port))
1752 		return false;
1753 
1754 	queue_delayed_work(system_unbound_wq,
1755 			   &to_tc_port(dig_port)->link_reset_work,
1756 			   msecs_to_jiffies(2000));
1757 
1758 	return true;
1759 }
1760 
1761 void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port)
1762 {
1763 	struct intel_tc_port *tc = to_tc_port(dig_port);
1764 
1765 	if (!intel_encoder_is_tc(&dig_port->base))
1766 		return;
1767 
1768 	cancel_delayed_work(&tc->link_reset_work);
1769 }
1770 
1771 static void __intel_tc_port_lock(struct intel_tc_port *tc,
1772 				 int required_lanes)
1773 {
1774 	struct intel_display *display = to_intel_display(tc->dig_port);
1775 
1776 	mutex_lock(&tc->lock);
1777 
1778 	cancel_delayed_work(&tc->disconnect_phy_work);
1779 
1780 	if (!tc->link_refcount)
1781 		intel_tc_port_update_mode(tc, required_lanes,
1782 					  false);
1783 
1784 	drm_WARN_ON(display->drm, tc->mode == TC_PORT_DISCONNECTED);
1785 	drm_WARN_ON(display->drm, tc->mode != TC_PORT_TBT_ALT && !tc_phy_is_owned(tc));
1786 }
1787 
1788 void intel_tc_port_lock(struct intel_digital_port *dig_port)
1789 {
1790 	__intel_tc_port_lock(to_tc_port(dig_port), 1);
1791 }
1792 
1793 /*
1794  * Disconnect the given digital port from its TypeC PHY (handing back the
1795  * control of the PHY to the TypeC subsystem). This will happen in a delayed
1796  * manner after each aux transactions and modeset disables.
1797  */
1798 static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
1799 {
1800 	struct intel_tc_port *tc =
1801 		container_of(work, struct intel_tc_port, disconnect_phy_work.work);
1802 
1803 	mutex_lock(&tc->lock);
1804 
1805 	if (!tc->link_refcount)
1806 		intel_tc_port_update_mode(tc, 1, true);
1807 
1808 	mutex_unlock(&tc->lock);
1809 }
1810 
1811 /**
1812  * intel_tc_port_flush_work: flush the work disconnecting the PHY
1813  * @dig_port: digital port
1814  *
1815  * Flush the delayed work disconnecting an idle PHY.
1816  */
1817 static void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
1818 {
1819 	flush_delayed_work(&to_tc_port(dig_port)->disconnect_phy_work);
1820 }
1821 
1822 void intel_tc_port_suspend(struct intel_digital_port *dig_port)
1823 {
1824 	struct intel_tc_port *tc = to_tc_port(dig_port);
1825 
1826 	cancel_delayed_work_sync(&tc->link_reset_work);
1827 	intel_tc_port_flush_work(dig_port);
1828 }
1829 
1830 void intel_tc_port_unlock(struct intel_digital_port *dig_port)
1831 {
1832 	struct intel_tc_port *tc = to_tc_port(dig_port);
1833 
1834 	if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED)
1835 		queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work,
1836 				   msecs_to_jiffies(1000));
1837 
1838 	mutex_unlock(&tc->lock);
1839 }
1840 
1841 bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
1842 {
1843 	struct intel_tc_port *tc = to_tc_port(dig_port);
1844 
1845 	return mutex_is_locked(&tc->lock) ||
1846 	       tc->link_refcount;
1847 }
1848 
1849 void intel_tc_port_get_link(struct intel_digital_port *dig_port,
1850 			    int required_lanes)
1851 {
1852 	struct intel_tc_port *tc = to_tc_port(dig_port);
1853 
1854 	__intel_tc_port_lock(tc, required_lanes);
1855 	__intel_tc_port_get_link(tc);
1856 	intel_tc_port_unlock(dig_port);
1857 }
1858 
1859 void intel_tc_port_put_link(struct intel_digital_port *dig_port)
1860 {
1861 	struct intel_tc_port *tc = to_tc_port(dig_port);
1862 
1863 	intel_tc_port_lock(dig_port);
1864 	__intel_tc_port_put_link(tc);
1865 	intel_tc_port_unlock(dig_port);
1866 
1867 	/*
1868 	 * The firmware will not update the HPD status of other TypeC ports
1869 	 * that are active in DP-alt mode with their sink disconnected, until
1870 	 * this port is disabled and its PHY gets disconnected. Make sure this
1871 	 * happens in a timely manner by disconnecting the PHY synchronously.
1872 	 */
1873 	intel_tc_port_flush_work(dig_port);
1874 }
1875 
1876 int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
1877 {
1878 	struct intel_display *display = to_intel_display(dig_port);
1879 	struct intel_tc_port *tc;
1880 	enum port port = dig_port->base.port;
1881 	enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
1882 
1883 	if (drm_WARN_ON(display->drm, tc_port == TC_PORT_NONE))
1884 		return -EINVAL;
1885 
1886 	tc = kzalloc(sizeof(*tc), GFP_KERNEL);
1887 	if (!tc)
1888 		return -ENOMEM;
1889 
1890 	dig_port->tc = tc;
1891 	tc->dig_port = dig_port;
1892 
1893 	if (DISPLAY_VER(display) >= 14)
1894 		tc->phy_ops = &xelpdp_tc_phy_ops;
1895 	else if (DISPLAY_VER(display) >= 13)
1896 		tc->phy_ops = &adlp_tc_phy_ops;
1897 	else if (DISPLAY_VER(display) >= 12)
1898 		tc->phy_ops = &tgl_tc_phy_ops;
1899 	else
1900 		tc->phy_ops = &icl_tc_phy_ops;
1901 
1902 	tc->port_name = kasprintf(GFP_KERNEL, "%c/TC#%d", port_name(port),
1903 				  tc_port + 1);
1904 	if (!tc->port_name) {
1905 		kfree(tc);
1906 		return -ENOMEM;
1907 	}
1908 
1909 	mutex_init(&tc->lock);
1910 	/* TODO: Combine the two works */
1911 	INIT_DELAYED_WORK(&tc->disconnect_phy_work, intel_tc_port_disconnect_phy_work);
1912 	INIT_DELAYED_WORK(&tc->link_reset_work, intel_tc_port_link_reset_work);
1913 	tc->legacy_port = is_legacy;
1914 	tc->mode = TC_PORT_DISCONNECTED;
1915 	tc->link_refcount = 0;
1916 
1917 	tc_phy_init(tc);
1918 
1919 	intel_tc_port_init_mode(dig_port);
1920 
1921 	return 0;
1922 }
1923 
1924 void intel_tc_port_cleanup(struct intel_digital_port *dig_port)
1925 {
1926 	intel_tc_port_suspend(dig_port);
1927 
1928 	kfree(dig_port->tc->port_name);
1929 	kfree(dig_port->tc);
1930 	dig_port->tc = NULL;
1931 }
1932