xref: /linux/drivers/gpu/drm/i915/display/intel_tc.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <linux/iopoll.h>
7 
8 #include <drm/drm_print.h>
9 
10 #include "intel_atomic.h"
11 #include "intel_cx0_phy_regs.h"
12 #include "intel_ddi.h"
13 #include "intel_de.h"
14 #include "intel_display.h"
15 #include "intel_display_driver.h"
16 #include "intel_display_power_map.h"
17 #include "intel_display_regs.h"
18 #include "intel_display_types.h"
19 #include "intel_display_utils.h"
20 #include "intel_dkl_phy_regs.h"
21 #include "intel_dp.h"
22 #include "intel_dp_mst.h"
23 #include "intel_mg_phy_regs.h"
24 #include "intel_modeset_lock.h"
25 #include "intel_tc.h"
26 
27 enum tc_port_mode {
28 	TC_PORT_DISCONNECTED,
29 	TC_PORT_TBT_ALT,
30 	TC_PORT_DP_ALT,
31 	TC_PORT_LEGACY,
32 };
33 
34 struct intel_tc_port;
35 
36 struct intel_tc_phy_ops {
37 	enum intel_display_power_domain (*cold_off_domain)(struct intel_tc_port *tc);
38 	u32 (*hpd_live_status)(struct intel_tc_port *tc);
39 	bool (*is_ready)(struct intel_tc_port *tc);
40 	bool (*is_owned)(struct intel_tc_port *tc);
41 	void (*get_hw_state)(struct intel_tc_port *tc);
42 	bool (*connect)(struct intel_tc_port *tc, int required_lanes);
43 	void (*disconnect)(struct intel_tc_port *tc);
44 	void (*init)(struct intel_tc_port *tc);
45 };
46 
47 struct intel_tc_port {
48 	struct intel_digital_port *dig_port;
49 
50 	const struct intel_tc_phy_ops *phy_ops;
51 
52 	struct mutex lock;	/* protects the TypeC port mode */
53 	struct ref_tracker *lock_wakeref;
54 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
55 	enum intel_display_power_domain lock_power_domain;
56 #endif
57 	struct delayed_work disconnect_phy_work;
58 	struct delayed_work link_reset_work;
59 	int link_refcount;
60 	bool legacy_port:1;
61 	const char *port_name;
62 	enum tc_port_mode mode;
63 	enum tc_port_mode init_mode;
64 	enum phy_fia phy_fia;
65 	enum intel_tc_pin_assignment pin_assignment;
66 	u8 phy_fia_idx;
67 	u8 max_lane_count;
68 };
69 
70 static enum intel_display_power_domain
71 tc_phy_cold_off_domain(struct intel_tc_port *);
72 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc);
73 static bool tc_phy_is_ready(struct intel_tc_port *tc);
74 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc);
75 static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc);
76 
77 static const char *tc_port_mode_name(enum tc_port_mode mode)
78 {
79 	static const char * const names[] = {
80 		[TC_PORT_DISCONNECTED] = "disconnected",
81 		[TC_PORT_TBT_ALT] = "tbt-alt",
82 		[TC_PORT_DP_ALT] = "dp-alt",
83 		[TC_PORT_LEGACY] = "legacy",
84 	};
85 
86 	if (WARN_ON(mode >= ARRAY_SIZE(names)))
87 		mode = TC_PORT_DISCONNECTED;
88 
89 	return names[mode];
90 }
91 
92 static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port)
93 {
94 	return dig_port->tc;
95 }
96 
97 static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
98 				  enum tc_port_mode mode)
99 {
100 	struct intel_tc_port *tc = to_tc_port(dig_port);
101 
102 	return intel_encoder_is_tc(&dig_port->base) && tc->mode == mode;
103 }
104 
105 bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
106 {
107 	return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT);
108 }
109 
110 bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port)
111 {
112 	return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT);
113 }
114 
115 bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
116 {
117 	return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
118 }
119 
120 bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port)
121 {
122 	struct intel_tc_port *tc = to_tc_port(dig_port);
123 
124 	return intel_encoder_is_tc(&dig_port->base) && !tc->legacy_port;
125 }
126 
127 /*
128  * The display power domains used for TC ports depending on the
129  * platform and TC mode (legacy, DP-alt, TBT):
130  *
131  * POWER_DOMAIN_DISPLAY_CORE:
132  * --------------------------
133  * ADLP/all modes:
134  *   - TCSS/IOM access for PHY ready state.
135  * ADLP+/all modes:
136  *   - DE/north-,south-HPD ISR access for HPD live state.
137  *
138  * POWER_DOMAIN_PORT_DDI_LANES_<port>:
139  * -----------------------------------
140  * ICL+/all modes:
141  *   - DE/DDI_BUF access for port enabled state.
142  * ADLP/all modes:
143  *   - DE/DDI_BUF access for PHY owned state.
144  *
145  * POWER_DOMAIN_AUX_USBC<TC port index>:
146  * -------------------------------------
147  * ICL/legacy mode:
148  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
149  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
150  *     main lanes.
151  * ADLP/legacy, DP-alt modes:
152  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
153  *     main lanes.
154  *
155  * POWER_DOMAIN_TC_COLD_OFF:
156  * -------------------------
157  * ICL/DP-alt, TBT mode:
158  *   - TCSS/TBT: block TC-cold power state for using the (direct or
159  *     TBT DP-IN) AUX and main lanes.
160  *
161  * TGL/all modes:
162  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
163  *   - TCSS/PHY: block TC-cold power state for using the (direct or
164  *     TBT DP-IN) AUX and main lanes.
165  *
166  * ADLP/TBT mode:
167  *   - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN)
168  *     AUX and main lanes.
169  *
170  * XELPDP+/all modes:
171  *   - TCSS/IOM,FIA access for PHY ready, owned state
172  *   - TCSS/PHY: block TC-cold power state for using the (direct or
173  *     TBT DP-IN) AUX and main lanes.
174  */
175 bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
176 {
177 	struct intel_display *display = to_intel_display(dig_port);
178 	struct intel_tc_port *tc = to_tc_port(dig_port);
179 
180 	return tc_phy_cold_off_domain(tc) ==
181 	       intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
182 }
183 
184 static struct ref_tracker *
185 __tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain)
186 {
187 	struct intel_display *display = to_intel_display(tc->dig_port);
188 
189 	*domain = tc_phy_cold_off_domain(tc);
190 
191 	return intel_display_power_get(display, *domain);
192 }
193 
194 static struct ref_tracker *
195 tc_cold_block(struct intel_tc_port *tc)
196 {
197 	enum intel_display_power_domain domain;
198 	struct ref_tracker *wakeref;
199 
200 	wakeref = __tc_cold_block(tc, &domain);
201 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
202 	tc->lock_power_domain = domain;
203 #endif
204 	return wakeref;
205 }
206 
207 static void
208 __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain,
209 		  struct ref_tracker *wakeref)
210 {
211 	struct intel_display *display = to_intel_display(tc->dig_port);
212 
213 	intel_display_power_put(display, domain, wakeref);
214 }
215 
216 static void
217 tc_cold_unblock(struct intel_tc_port *tc, struct ref_tracker *wakeref)
218 {
219 	struct intel_display __maybe_unused *display = to_intel_display(tc->dig_port);
220 	enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc);
221 
222 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
223 	drm_WARN_ON(display->drm, tc->lock_power_domain != domain);
224 #endif
225 	__tc_cold_unblock(tc, domain, wakeref);
226 }
227 
228 static void
229 assert_display_core_power_enabled(struct intel_tc_port *tc)
230 {
231 	struct intel_display *display = to_intel_display(tc->dig_port);
232 
233 	drm_WARN_ON(display->drm,
234 		    !intel_display_power_is_enabled(display, POWER_DOMAIN_DISPLAY_CORE));
235 }
236 
237 static void
238 assert_tc_cold_blocked(struct intel_tc_port *tc)
239 {
240 	struct intel_display *display = to_intel_display(tc->dig_port);
241 	bool enabled;
242 
243 	enabled = intel_display_power_is_enabled(display,
244 						 tc_phy_cold_off_domain(tc));
245 	drm_WARN_ON(display->drm, !enabled);
246 }
247 
248 static enum intel_display_power_domain
249 tc_port_power_domain(struct intel_tc_port *tc)
250 {
251 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
252 
253 	if (tc_port == TC_PORT_NONE)
254 		return POWER_DOMAIN_INVALID;
255 
256 	return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
257 }
258 
259 static void
260 assert_tc_port_power_enabled(struct intel_tc_port *tc)
261 {
262 	struct intel_display *display = to_intel_display(tc->dig_port);
263 
264 	drm_WARN_ON(display->drm,
265 		    !intel_display_power_is_enabled(display, tc_port_power_domain(tc)));
266 }
267 
268 static u32 get_lane_mask(struct intel_tc_port *tc)
269 {
270 	struct intel_display *display = to_intel_display(tc->dig_port);
271 	u32 lane_mask;
272 
273 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE)
274 		lane_mask = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
275 
276 	drm_WARN_ON(display->drm, lane_mask == 0xffffffff);
277 	assert_tc_cold_blocked(tc);
278 
279 	lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx);
280 	return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
281 }
282 
283 static char pin_assignment_name(enum intel_tc_pin_assignment pin_assignment)
284 {
285 	if (pin_assignment == INTEL_TC_PIN_ASSIGNMENT_NONE)
286 		return '-';
287 
288 	return 'A' + pin_assignment - INTEL_TC_PIN_ASSIGNMENT_A;
289 }
290 
291 static enum intel_tc_pin_assignment
292 get_pin_assignment(struct intel_tc_port *tc)
293 {
294 	struct intel_display *display = to_intel_display(tc->dig_port);
295 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
296 	enum intel_tc_pin_assignment pin_assignment;
297 	i915_reg_t reg;
298 	u32 mask;
299 	u32 val;
300 
301 	if (tc->mode == TC_PORT_TBT_ALT)
302 		return INTEL_TC_PIN_ASSIGNMENT_NONE;
303 
304 	if (DISPLAY_VER(display) >= 20) {
305 		reg = TCSS_DDI_STATUS(tc_port);
306 		mask = TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK;
307 	} else {
308 		reg = PORT_TX_DFLEXPA1(tc->phy_fia);
309 		mask = DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx);
310 	}
311 
312 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE)
313 		val = intel_de_read(display, reg);
314 
315 	drm_WARN_ON(display->drm, val == 0xffffffff);
316 	assert_tc_cold_blocked(tc);
317 
318 	pin_assignment = (val & mask) >> (ffs(mask) - 1);
319 
320 	switch (pin_assignment) {
321 	case INTEL_TC_PIN_ASSIGNMENT_A:
322 	case INTEL_TC_PIN_ASSIGNMENT_B:
323 	case INTEL_TC_PIN_ASSIGNMENT_F:
324 		drm_WARN_ON(display->drm, DISPLAY_VER(display) > 11);
325 		break;
326 	case INTEL_TC_PIN_ASSIGNMENT_NONE:
327 	case INTEL_TC_PIN_ASSIGNMENT_C:
328 	case INTEL_TC_PIN_ASSIGNMENT_D:
329 	case INTEL_TC_PIN_ASSIGNMENT_E:
330 		break;
331 	default:
332 		MISSING_CASE(pin_assignment);
333 	}
334 
335 	return pin_assignment;
336 }
337 
338 static int mtl_get_max_lane_count(struct intel_tc_port *tc)
339 {
340 	enum intel_tc_pin_assignment pin_assignment;
341 
342 	pin_assignment = get_pin_assignment(tc);
343 
344 	switch (pin_assignment) {
345 	case INTEL_TC_PIN_ASSIGNMENT_NONE:
346 		return 0;
347 	default:
348 		MISSING_CASE(pin_assignment);
349 		fallthrough;
350 	case INTEL_TC_PIN_ASSIGNMENT_D:
351 		return 2;
352 	case INTEL_TC_PIN_ASSIGNMENT_C:
353 	case INTEL_TC_PIN_ASSIGNMENT_E:
354 		return 4;
355 	}
356 }
357 
358 static int icl_get_max_lane_count(struct intel_tc_port *tc)
359 {
360 	u32 lane_mask = 0;
361 
362 	lane_mask = get_lane_mask(tc);
363 
364 	switch (lane_mask) {
365 	default:
366 		MISSING_CASE(lane_mask);
367 		fallthrough;
368 	case 0x1:
369 	case 0x2:
370 	case 0x4:
371 	case 0x8:
372 		return 1;
373 	case 0x3:
374 	case 0xc:
375 		return 2;
376 	case 0xf:
377 		return 4;
378 	}
379 }
380 
381 static int get_max_lane_count(struct intel_tc_port *tc)
382 {
383 	struct intel_display *display = to_intel_display(tc->dig_port);
384 
385 	if (tc->mode != TC_PORT_DP_ALT)
386 		return 4;
387 
388 	if (DISPLAY_VER(display) >= 14)
389 		return mtl_get_max_lane_count(tc);
390 
391 	return icl_get_max_lane_count(tc);
392 }
393 
394 static void read_pin_configuration(struct intel_tc_port *tc)
395 {
396 	tc->pin_assignment = get_pin_assignment(tc);
397 	tc->max_lane_count = get_max_lane_count(tc);
398 }
399 
400 int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
401 {
402 	struct intel_tc_port *tc = to_tc_port(dig_port);
403 
404 	if (!intel_encoder_is_tc(&dig_port->base))
405 		return 4;
406 
407 	return tc->max_lane_count;
408 }
409 
410 enum intel_tc_pin_assignment
411 intel_tc_port_get_pin_assignment(struct intel_digital_port *dig_port)
412 {
413 	struct intel_tc_port *tc = to_tc_port(dig_port);
414 
415 	if (!intel_encoder_is_tc(&dig_port->base))
416 		return INTEL_TC_PIN_ASSIGNMENT_NONE;
417 
418 	return tc->pin_assignment;
419 }
420 
421 void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
422 				      int required_lanes)
423 {
424 	struct intel_display *display = to_intel_display(dig_port);
425 	struct intel_tc_port *tc = to_tc_port(dig_port);
426 	bool lane_reversal = dig_port->lane_reversal;
427 	u32 val;
428 
429 	if (DISPLAY_VER(display) >= 14)
430 		return;
431 
432 	drm_WARN_ON(display->drm,
433 		    lane_reversal && tc->mode != TC_PORT_LEGACY);
434 
435 	assert_tc_cold_blocked(tc);
436 
437 	val = intel_de_read(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
438 	val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx);
439 
440 	switch (required_lanes) {
441 	case 1:
442 		val |= lane_reversal ?
443 			DFLEXDPMLE1_DPMLETC_ML3(tc->phy_fia_idx) :
444 			DFLEXDPMLE1_DPMLETC_ML0(tc->phy_fia_idx);
445 		break;
446 	case 2:
447 		val |= lane_reversal ?
448 			DFLEXDPMLE1_DPMLETC_ML3_2(tc->phy_fia_idx) :
449 			DFLEXDPMLE1_DPMLETC_ML1_0(tc->phy_fia_idx);
450 		break;
451 	case 4:
452 		val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc->phy_fia_idx);
453 		break;
454 	default:
455 		MISSING_CASE(required_lanes);
456 	}
457 
458 	intel_de_write(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
459 }
460 
461 static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
462 				      u32 live_status_mask)
463 {
464 	struct intel_display *display = to_intel_display(tc->dig_port);
465 	u32 valid_hpd_mask;
466 
467 	drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED);
468 
469 	if (hweight32(live_status_mask) != 1)
470 		return;
471 
472 	if (tc->legacy_port)
473 		valid_hpd_mask = BIT(TC_PORT_LEGACY);
474 	else
475 		valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
476 				 BIT(TC_PORT_TBT_ALT);
477 
478 	if (!(live_status_mask & ~valid_hpd_mask))
479 		return;
480 
481 	/* If live status mismatches the VBT flag, trust the live status. */
482 	drm_dbg_kms(display->drm,
483 		    "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
484 		    tc->port_name, live_status_mask, valid_hpd_mask);
485 
486 	tc->legacy_port = !tc->legacy_port;
487 }
488 
489 static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia)
490 {
491 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
492 
493 	/*
494 	 * Each Modular FIA instance houses 2 TC ports. In SOC that has more
495 	 * than two TC ports, there are multiple instances of Modular FIA.
496 	 */
497 	if (modular_fia) {
498 		tc->phy_fia = tc_port / 2;
499 		tc->phy_fia_idx = tc_port % 2;
500 	} else {
501 		tc->phy_fia = FIA1;
502 		tc->phy_fia_idx = tc_port;
503 	}
504 }
505 
506 /*
507  * ICL TC PHY handlers
508  * -------------------
509  */
510 static enum intel_display_power_domain
511 icl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
512 {
513 	struct intel_display *display = to_intel_display(tc->dig_port);
514 	struct intel_digital_port *dig_port = tc->dig_port;
515 
516 	if (tc->legacy_port)
517 		return intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
518 
519 	return POWER_DOMAIN_TC_COLD_OFF;
520 }
521 
522 static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
523 {
524 	struct intel_display *display = to_intel_display(tc->dig_port);
525 	struct intel_digital_port *dig_port = tc->dig_port;
526 	u32 isr_bit = display->hotplug.pch_hpd[dig_port->base.hpd_pin];
527 	u32 fia_isr;
528 	u32 pch_isr;
529 	u32 mask = 0;
530 
531 	with_intel_display_power(display, tc_phy_cold_off_domain(tc)) {
532 		fia_isr = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
533 		pch_isr = intel_de_read(display, SDEISR);
534 	}
535 
536 	if (fia_isr == 0xffffffff) {
537 		drm_dbg_kms(display->drm,
538 			    "Port %s: PHY in TCCOLD, nothing connected\n",
539 			    tc->port_name);
540 		return mask;
541 	}
542 
543 	if (fia_isr & TC_LIVE_STATE_TBT(tc->phy_fia_idx))
544 		mask |= BIT(TC_PORT_TBT_ALT);
545 	if (fia_isr & TC_LIVE_STATE_TC(tc->phy_fia_idx))
546 		mask |= BIT(TC_PORT_DP_ALT);
547 
548 	if (pch_isr & isr_bit)
549 		mask |= BIT(TC_PORT_LEGACY);
550 
551 	return mask;
552 }
553 
554 /*
555  * Return the PHY status complete flag indicating that display can acquire the
556  * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink
557  * is connected and it's ready to switch the ownership to display. The flag
558  * will be left cleared when a TBT-alt sink is connected, where the PHY is
559  * owned by the TBT subsystem and so switching the ownership to display is not
560  * required.
561  */
562 static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
563 {
564 	struct intel_display *display = to_intel_display(tc->dig_port);
565 	u32 val;
566 
567 	assert_tc_cold_blocked(tc);
568 
569 	val = intel_de_read(display, PORT_TX_DFLEXDPPMS(tc->phy_fia));
570 	if (val == 0xffffffff) {
571 		drm_dbg_kms(display->drm,
572 			    "Port %s: PHY in TCCOLD, assuming not ready\n",
573 			    tc->port_name);
574 		return false;
575 	}
576 
577 	return val & DP_PHY_MODE_STATUS_COMPLETED(tc->phy_fia_idx);
578 }
579 
580 static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
581 				      bool take)
582 {
583 	struct intel_display *display = to_intel_display(tc->dig_port);
584 	u32 val;
585 
586 	assert_tc_cold_blocked(tc);
587 
588 	val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
589 	if (val == 0xffffffff) {
590 		drm_dbg_kms(display->drm,
591 			    "Port %s: PHY in TCCOLD, can't %s ownership\n",
592 			    tc->port_name, take ? "take" : "release");
593 
594 		return false;
595 	}
596 
597 	val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
598 	if (take)
599 		val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
600 
601 	intel_de_write(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
602 
603 	return true;
604 }
605 
606 static bool icl_tc_phy_is_owned(struct intel_tc_port *tc)
607 {
608 	struct intel_display *display = to_intel_display(tc->dig_port);
609 	u32 val;
610 
611 	assert_tc_cold_blocked(tc);
612 
613 	val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
614 	if (val == 0xffffffff) {
615 		drm_dbg_kms(display->drm,
616 			    "Port %s: PHY in TCCOLD, assume not owned\n",
617 			    tc->port_name);
618 		return false;
619 	}
620 
621 	return val & DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
622 }
623 
624 static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
625 {
626 	enum intel_display_power_domain domain;
627 	struct ref_tracker *tc_cold_wref;
628 
629 	tc_cold_wref = __tc_cold_block(tc, &domain);
630 
631 	tc->mode = tc_phy_get_current_mode(tc);
632 	if (tc->mode != TC_PORT_DISCONNECTED) {
633 		tc->lock_wakeref = tc_cold_block(tc);
634 
635 		read_pin_configuration(tc);
636 	}
637 
638 	__tc_cold_unblock(tc, domain, tc_cold_wref);
639 }
640 
641 /*
642  * This function implements the first part of the Connect Flow described by our
643  * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
644  * lanes, EDID, etc) is done as needed in the typical places.
645  *
646  * Unlike the other ports, type-C ports are not available to use as soon as we
647  * get a hotplug. The type-C PHYs can be shared between multiple controllers:
648  * display, USB, etc. As a result, handshaking through FIA is required around
649  * connect and disconnect to cleanly transfer ownership with the controller and
650  * set the type-C power state.
651  */
652 static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
653 						int required_lanes)
654 {
655 	struct intel_display *display = to_intel_display(tc->dig_port);
656 	struct intel_digital_port *dig_port = tc->dig_port;
657 	int max_lanes;
658 
659 	max_lanes = intel_tc_port_max_lane_count(dig_port);
660 	if (tc->mode == TC_PORT_LEGACY) {
661 		drm_WARN_ON(display->drm, max_lanes != 4);
662 		return true;
663 	}
664 
665 	drm_WARN_ON(display->drm, tc->mode != TC_PORT_DP_ALT);
666 
667 	/*
668 	 * Now we have to re-check the live state, in case the port recently
669 	 * became disconnected. Not necessary for legacy mode.
670 	 */
671 	if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) {
672 		drm_dbg_kms(display->drm, "Port %s: PHY sudden disconnect\n",
673 			    tc->port_name);
674 		return false;
675 	}
676 
677 	if (max_lanes < required_lanes) {
678 		drm_dbg_kms(display->drm,
679 			    "Port %s: PHY max lanes %d < required lanes %d\n",
680 			    tc->port_name,
681 			    max_lanes, required_lanes);
682 		return false;
683 	}
684 
685 	return true;
686 }
687 
688 static bool icl_tc_phy_connect(struct intel_tc_port *tc,
689 			       int required_lanes)
690 {
691 	struct intel_display *display = to_intel_display(tc->dig_port);
692 
693 	tc->lock_wakeref = tc_cold_block(tc);
694 
695 	if (tc->mode == TC_PORT_TBT_ALT) {
696 		read_pin_configuration(tc);
697 
698 		return true;
699 	}
700 
701 	if ((!tc_phy_is_ready(tc) ||
702 	     !icl_tc_phy_take_ownership(tc, true)) &&
703 	    !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
704 		drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership (ready %s)\n",
705 			    tc->port_name,
706 			    str_yes_no(tc_phy_is_ready(tc)));
707 		goto out_unblock_tc_cold;
708 	}
709 
710 	read_pin_configuration(tc);
711 
712 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
713 		goto out_release_phy;
714 
715 	return true;
716 
717 out_release_phy:
718 	icl_tc_phy_take_ownership(tc, false);
719 out_unblock_tc_cold:
720 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
721 
722 	return false;
723 }
724 
725 /*
726  * See the comment at the connect function. This implements the Disconnect
727  * Flow.
728  */
729 static void icl_tc_phy_disconnect(struct intel_tc_port *tc)
730 {
731 	switch (tc->mode) {
732 	case TC_PORT_LEGACY:
733 	case TC_PORT_DP_ALT:
734 		icl_tc_phy_take_ownership(tc, false);
735 		fallthrough;
736 	case TC_PORT_TBT_ALT:
737 		tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
738 		break;
739 	default:
740 		MISSING_CASE(tc->mode);
741 	}
742 }
743 
744 static void icl_tc_phy_init(struct intel_tc_port *tc)
745 {
746 	tc_phy_load_fia_params(tc, false);
747 }
748 
749 static const struct intel_tc_phy_ops icl_tc_phy_ops = {
750 	.cold_off_domain = icl_tc_phy_cold_off_domain,
751 	.hpd_live_status = icl_tc_phy_hpd_live_status,
752 	.is_ready = icl_tc_phy_is_ready,
753 	.is_owned = icl_tc_phy_is_owned,
754 	.get_hw_state = icl_tc_phy_get_hw_state,
755 	.connect = icl_tc_phy_connect,
756 	.disconnect = icl_tc_phy_disconnect,
757 	.init = icl_tc_phy_init,
758 };
759 
760 /*
761  * TGL TC PHY handlers
762  * -------------------
763  */
764 static enum intel_display_power_domain
765 tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
766 {
767 	return POWER_DOMAIN_TC_COLD_OFF;
768 }
769 
770 static void tgl_tc_phy_init(struct intel_tc_port *tc)
771 {
772 	struct intel_display *display = to_intel_display(tc->dig_port);
773 	u32 val;
774 
775 	with_intel_display_power(display, tc_phy_cold_off_domain(tc))
776 		val = intel_de_read(display, PORT_TX_DFLEXDPSP(FIA1));
777 
778 	drm_WARN_ON(display->drm, val == 0xffffffff);
779 
780 	tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK);
781 }
782 
783 static const struct intel_tc_phy_ops tgl_tc_phy_ops = {
784 	.cold_off_domain = tgl_tc_phy_cold_off_domain,
785 	.hpd_live_status = icl_tc_phy_hpd_live_status,
786 	.is_ready = icl_tc_phy_is_ready,
787 	.is_owned = icl_tc_phy_is_owned,
788 	.get_hw_state = icl_tc_phy_get_hw_state,
789 	.connect = icl_tc_phy_connect,
790 	.disconnect = icl_tc_phy_disconnect,
791 	.init = tgl_tc_phy_init,
792 };
793 
794 /*
795  * ADLP TC PHY handlers
796  * --------------------
797  */
798 static enum intel_display_power_domain
799 adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc)
800 {
801 	struct intel_display *display = to_intel_display(tc->dig_port);
802 	struct intel_digital_port *dig_port = tc->dig_port;
803 
804 	if (tc->mode != TC_PORT_TBT_ALT)
805 		return intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
806 
807 	return POWER_DOMAIN_TC_COLD_OFF;
808 }
809 
810 static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
811 {
812 	struct intel_display *display = to_intel_display(tc->dig_port);
813 	struct intel_digital_port *dig_port = tc->dig_port;
814 	enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
815 	u32 cpu_isr_bits = display->hotplug.hpd[hpd_pin];
816 	u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
817 	u32 cpu_isr;
818 	u32 pch_isr;
819 	u32 mask = 0;
820 
821 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE) {
822 		cpu_isr = intel_de_read(display, GEN11_DE_HPD_ISR);
823 		pch_isr = intel_de_read(display, SDEISR);
824 	}
825 
826 	if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK))
827 		mask |= BIT(TC_PORT_DP_ALT);
828 	if (cpu_isr & (cpu_isr_bits & GEN11_DE_TBT_HOTPLUG_MASK))
829 		mask |= BIT(TC_PORT_TBT_ALT);
830 
831 	if (pch_isr & pch_isr_bit)
832 		mask |= BIT(TC_PORT_LEGACY);
833 
834 	return mask;
835 }
836 
837 /*
838  * Return the PHY status complete flag indicating that display can acquire the
839  * PHY ownership. The IOM firmware sets this flag when it's ready to switch
840  * the ownership to display, regardless of what sink is connected (TBT-alt,
841  * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
842  * subsystem and so switching the ownership to display is not required.
843  */
844 static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
845 {
846 	struct intel_display *display = to_intel_display(tc->dig_port);
847 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
848 	u32 val;
849 
850 	assert_display_core_power_enabled(tc);
851 
852 	val = intel_de_read(display, TCSS_DDI_STATUS(tc_port));
853 	if (val == 0xffffffff) {
854 		drm_dbg_kms(display->drm,
855 			    "Port %s: PHY in TCCOLD, assuming not ready\n",
856 			    tc->port_name);
857 		return false;
858 	}
859 
860 	return val & TCSS_DDI_STATUS_READY;
861 }
862 
863 static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
864 				       bool take)
865 {
866 	struct intel_display *display = to_intel_display(tc->dig_port);
867 	enum port port = tc->dig_port->base.port;
868 
869 	assert_tc_port_power_enabled(tc);
870 
871 	intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
872 		     take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
873 
874 	return true;
875 }
876 
877 static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc)
878 {
879 	struct intel_display *display = to_intel_display(tc->dig_port);
880 	enum port port = tc->dig_port->base.port;
881 	u32 val;
882 
883 	assert_tc_port_power_enabled(tc);
884 
885 	val = intel_de_read(display, DDI_BUF_CTL(port));
886 	return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
887 }
888 
889 static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
890 {
891 	struct intel_display *display = to_intel_display(tc->dig_port);
892 	enum intel_display_power_domain port_power_domain =
893 		tc_port_power_domain(tc);
894 	struct ref_tracker *port_wakeref;
895 
896 	port_wakeref = intel_display_power_get(display, port_power_domain);
897 
898 	tc->mode = tc_phy_get_current_mode(tc);
899 	if (tc->mode != TC_PORT_DISCONNECTED) {
900 		tc->lock_wakeref = tc_cold_block(tc);
901 
902 		read_pin_configuration(tc);
903 	}
904 
905 	intel_display_power_put(display, port_power_domain, port_wakeref);
906 }
907 
908 static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
909 {
910 	struct intel_display *display = to_intel_display(tc->dig_port);
911 	enum intel_display_power_domain port_power_domain =
912 		tc_port_power_domain(tc);
913 	struct ref_tracker *port_wakeref;
914 
915 	if (tc->mode == TC_PORT_TBT_ALT) {
916 		tc->lock_wakeref = tc_cold_block(tc);
917 
918 		read_pin_configuration(tc);
919 
920 		return true;
921 	}
922 
923 	port_wakeref = intel_display_power_get(display, port_power_domain);
924 
925 	if (!adlp_tc_phy_take_ownership(tc, true) &&
926 	    !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
927 		drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership\n",
928 			    tc->port_name);
929 		goto out_put_port_power;
930 	}
931 
932 	if (!tc_phy_is_ready(tc) &&
933 	    !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
934 		drm_dbg_kms(display->drm, "Port %s: PHY not ready\n",
935 			    tc->port_name);
936 		goto out_release_phy;
937 	}
938 
939 	tc->lock_wakeref = tc_cold_block(tc);
940 
941 	read_pin_configuration(tc);
942 
943 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
944 		goto out_unblock_tc_cold;
945 
946 	intel_display_power_put(display, port_power_domain, port_wakeref);
947 
948 	return true;
949 
950 out_unblock_tc_cold:
951 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
952 out_release_phy:
953 	adlp_tc_phy_take_ownership(tc, false);
954 out_put_port_power:
955 	intel_display_power_put(display, port_power_domain, port_wakeref);
956 
957 	return false;
958 }
959 
960 static void adlp_tc_phy_disconnect(struct intel_tc_port *tc)
961 {
962 	struct intel_display *display = to_intel_display(tc->dig_port);
963 	enum intel_display_power_domain port_power_domain =
964 		tc_port_power_domain(tc);
965 	struct ref_tracker *port_wakeref;
966 
967 	port_wakeref = intel_display_power_get(display, port_power_domain);
968 
969 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
970 
971 	switch (tc->mode) {
972 	case TC_PORT_LEGACY:
973 	case TC_PORT_DP_ALT:
974 		adlp_tc_phy_take_ownership(tc, false);
975 		fallthrough;
976 	case TC_PORT_TBT_ALT:
977 		break;
978 	default:
979 		MISSING_CASE(tc->mode);
980 	}
981 
982 	intel_display_power_put(display, port_power_domain, port_wakeref);
983 }
984 
985 static void adlp_tc_phy_init(struct intel_tc_port *tc)
986 {
987 	tc_phy_load_fia_params(tc, true);
988 }
989 
990 static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
991 	.cold_off_domain = adlp_tc_phy_cold_off_domain,
992 	.hpd_live_status = adlp_tc_phy_hpd_live_status,
993 	.is_ready = adlp_tc_phy_is_ready,
994 	.is_owned = adlp_tc_phy_is_owned,
995 	.get_hw_state = adlp_tc_phy_get_hw_state,
996 	.connect = adlp_tc_phy_connect,
997 	.disconnect = adlp_tc_phy_disconnect,
998 	.init = adlp_tc_phy_init,
999 };
1000 
1001 /*
1002  * XELPDP TC PHY handlers
1003  * ----------------------
1004  */
1005 static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
1006 {
1007 	struct intel_display *display = to_intel_display(tc->dig_port);
1008 	struct intel_digital_port *dig_port = tc->dig_port;
1009 	enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
1010 	u32 pica_isr_bits = display->hotplug.hpd[hpd_pin];
1011 	u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
1012 	u32 pica_isr;
1013 	u32 pch_isr;
1014 	u32 mask = 0;
1015 
1016 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE) {
1017 		pica_isr = intel_de_read(display, PICAINTERRUPT_ISR);
1018 		pch_isr = intel_de_read(display, SDEISR);
1019 	}
1020 
1021 	if (pica_isr & (pica_isr_bits & XELPDP_DP_ALT_HOTPLUG_MASK))
1022 		mask |= BIT(TC_PORT_DP_ALT);
1023 	if (pica_isr & (pica_isr_bits & XELPDP_TBT_HOTPLUG_MASK))
1024 		mask |= BIT(TC_PORT_TBT_ALT);
1025 
1026 	if (tc->legacy_port && (pch_isr & pch_isr_bit))
1027 		mask |= BIT(TC_PORT_LEGACY);
1028 
1029 	return mask;
1030 }
1031 
1032 static bool
1033 xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc)
1034 {
1035 	struct intel_display *display = to_intel_display(tc->dig_port);
1036 	enum port port = tc->dig_port->base.port;
1037 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
1038 
1039 	assert_tc_cold_blocked(tc);
1040 
1041 	return intel_de_read(display, reg) & XELPDP_TCSS_POWER_STATE;
1042 }
1043 
1044 static bool
1045 xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
1046 {
1047 	struct intel_display *display = to_intel_display(tc->dig_port);
1048 	bool is_enabled;
1049 	int ret;
1050 
1051 	ret = poll_timeout_us(is_enabled = xelpdp_tc_phy_tcss_power_is_enabled(tc),
1052 			      is_enabled == enabled,
1053 			      200, 5000, false);
1054 	if (ret) {
1055 		drm_dbg_kms(display->drm,
1056 			    "Port %s: timeout waiting for TCSS power to get %s\n",
1057 			    str_enabled_disabled(enabled),
1058 			    tc->port_name);
1059 		return false;
1060 	}
1061 
1062 	return true;
1063 }
1064 
1065 /*
1066  * Gfx driver WA 14020908590 for PTL tcss_rxdetect_clkswb_req/ack
1067  * handshake violation when pwwreq= 0->1 during TC7/10 entry
1068  */
1069 static void xelpdp_tc_power_request_wa(struct intel_display *display, bool enable)
1070 {
1071 	/* check if mailbox is running busy */
1072 	if (intel_de_wait_for_clear_ms(display, TCSS_DISP_MAILBOX_IN_CMD,
1073 				       TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
1074 		drm_dbg_kms(display->drm,
1075 			    "Timeout waiting for TCSS mailbox run/busy bit to clear\n");
1076 		return;
1077 	}
1078 
1079 	intel_de_write(display, TCSS_DISP_MAILBOX_IN_DATA, enable ? 1 : 0);
1080 	intel_de_write(display, TCSS_DISP_MAILBOX_IN_CMD,
1081 		       TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY |
1082 		       TCSS_DISP_MAILBOX_IN_CMD_DATA(0x1));
1083 
1084 	/* wait to clear mailbox running busy bit before continuing */
1085 	if (intel_de_wait_for_clear_ms(display, TCSS_DISP_MAILBOX_IN_CMD,
1086 				       TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
1087 		drm_dbg_kms(display->drm,
1088 			    "Timeout after writing data to mailbox. Mailbox run/busy bit did not clear\n");
1089 		return;
1090 	}
1091 }
1092 
1093 static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1094 {
1095 	struct intel_display *display = to_intel_display(tc->dig_port);
1096 	enum port port = tc->dig_port->base.port;
1097 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
1098 	u32 val;
1099 
1100 	assert_tc_cold_blocked(tc);
1101 
1102 	if (DISPLAY_VER(display) == 30)
1103 		xelpdp_tc_power_request_wa(display, enable);
1104 
1105 	val = intel_de_read(display, reg);
1106 	if (enable)
1107 		val |= XELPDP_TCSS_POWER_REQUEST;
1108 	else
1109 		val &= ~XELPDP_TCSS_POWER_REQUEST;
1110 	intel_de_write(display, reg, val);
1111 }
1112 
1113 static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1114 {
1115 	struct intel_display *display = to_intel_display(tc->dig_port);
1116 
1117 	__xelpdp_tc_phy_enable_tcss_power(tc, enable);
1118 
1119 	if (enable && !tc_phy_wait_for_ready(tc))
1120 		goto out_disable;
1121 
1122 	if (!xelpdp_tc_phy_wait_for_tcss_power(tc, enable))
1123 		goto out_disable;
1124 
1125 	return true;
1126 
1127 out_disable:
1128 	if (drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY))
1129 		return false;
1130 
1131 	if (!enable)
1132 		return false;
1133 
1134 	__xelpdp_tc_phy_enable_tcss_power(tc, false);
1135 	xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1136 
1137 	return false;
1138 }
1139 
1140 static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
1141 {
1142 	struct intel_display *display = to_intel_display(tc->dig_port);
1143 	enum port port = tc->dig_port->base.port;
1144 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
1145 	u32 val;
1146 
1147 	assert_tc_cold_blocked(tc);
1148 
1149 	val = intel_de_read(display, reg);
1150 	if (take)
1151 		val |= XELPDP_TC_PHY_OWNERSHIP;
1152 	else
1153 		val &= ~XELPDP_TC_PHY_OWNERSHIP;
1154 	intel_de_write(display, reg, val);
1155 }
1156 
1157 static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
1158 {
1159 	struct intel_display *display = to_intel_display(tc->dig_port);
1160 	enum port port = tc->dig_port->base.port;
1161 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
1162 
1163 	assert_tc_cold_blocked(tc);
1164 
1165 	return intel_de_read(display, reg) & XELPDP_TC_PHY_OWNERSHIP;
1166 }
1167 
1168 static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
1169 {
1170 	struct intel_display *display = to_intel_display(tc->dig_port);
1171 	struct ref_tracker *tc_cold_wref;
1172 	enum intel_display_power_domain domain;
1173 
1174 	tc_cold_wref = __tc_cold_block(tc, &domain);
1175 
1176 	tc->mode = tc_phy_get_current_mode(tc);
1177 	if (tc->mode != TC_PORT_DISCONNECTED) {
1178 		tc->lock_wakeref = tc_cold_block(tc);
1179 
1180 		read_pin_configuration(tc);
1181 		/*
1182 		 * Set a valid lane count value for a DP-alt sink which got
1183 		 * disconnected. The driver can only disable the output on this PHY.
1184 		 */
1185 		if (tc->max_lane_count == 0)
1186 			tc->max_lane_count = 4;
1187 	}
1188 
1189 	drm_WARN_ON(display->drm,
1190 		    (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
1191 		    !xelpdp_tc_phy_tcss_power_is_enabled(tc));
1192 
1193 	__tc_cold_unblock(tc, domain, tc_cold_wref);
1194 }
1195 
1196 static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1197 {
1198 	tc->lock_wakeref = tc_cold_block(tc);
1199 
1200 	if (tc->mode == TC_PORT_TBT_ALT) {
1201 		read_pin_configuration(tc);
1202 
1203 		return true;
1204 	}
1205 
1206 	if (!xelpdp_tc_phy_enable_tcss_power(tc, true))
1207 		goto out_unblock_tccold;
1208 
1209 	xelpdp_tc_phy_take_ownership(tc, true);
1210 
1211 	read_pin_configuration(tc);
1212 
1213 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
1214 		goto out_release_phy;
1215 
1216 	return true;
1217 
1218 out_release_phy:
1219 	xelpdp_tc_phy_take_ownership(tc, false);
1220 	xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1221 
1222 out_unblock_tccold:
1223 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1224 
1225 	return false;
1226 }
1227 
1228 static void xelpdp_tc_phy_disconnect(struct intel_tc_port *tc)
1229 {
1230 	switch (tc->mode) {
1231 	case TC_PORT_LEGACY:
1232 	case TC_PORT_DP_ALT:
1233 		xelpdp_tc_phy_take_ownership(tc, false);
1234 		xelpdp_tc_phy_enable_tcss_power(tc, false);
1235 		fallthrough;
1236 	case TC_PORT_TBT_ALT:
1237 		tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1238 		break;
1239 	default:
1240 		MISSING_CASE(tc->mode);
1241 	}
1242 }
1243 
1244 static const struct intel_tc_phy_ops xelpdp_tc_phy_ops = {
1245 	.cold_off_domain = tgl_tc_phy_cold_off_domain,
1246 	.hpd_live_status = xelpdp_tc_phy_hpd_live_status,
1247 	.is_ready = adlp_tc_phy_is_ready,
1248 	.is_owned = xelpdp_tc_phy_is_owned,
1249 	.get_hw_state = xelpdp_tc_phy_get_hw_state,
1250 	.connect = xelpdp_tc_phy_connect,
1251 	.disconnect = xelpdp_tc_phy_disconnect,
1252 	.init = adlp_tc_phy_init,
1253 };
1254 
1255 /*
1256  * Generic TC PHY handlers
1257  * -----------------------
1258  */
1259 static enum intel_display_power_domain
1260 tc_phy_cold_off_domain(struct intel_tc_port *tc)
1261 {
1262 	return tc->phy_ops->cold_off_domain(tc);
1263 }
1264 
1265 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc)
1266 {
1267 	struct intel_display *display = to_intel_display(tc->dig_port);
1268 	u32 mask;
1269 
1270 	mask = tc->phy_ops->hpd_live_status(tc);
1271 
1272 	/* The sink can be connected only in a single mode. */
1273 	drm_WARN_ON_ONCE(display->drm, hweight32(mask) > 1);
1274 
1275 	return mask;
1276 }
1277 
1278 static bool tc_phy_is_ready(struct intel_tc_port *tc)
1279 {
1280 	return tc->phy_ops->is_ready(tc);
1281 }
1282 
1283 static bool tc_phy_is_owned(struct intel_tc_port *tc)
1284 {
1285 	return tc->phy_ops->is_owned(tc);
1286 }
1287 
1288 static void tc_phy_get_hw_state(struct intel_tc_port *tc)
1289 {
1290 	tc->phy_ops->get_hw_state(tc);
1291 }
1292 
1293 /* Is the PHY owned by display i.e. is it in legacy or DP-alt mode? */
1294 static bool tc_phy_owned_by_display(struct intel_tc_port *tc,
1295 				    bool phy_is_ready, bool phy_is_owned)
1296 {
1297 	struct intel_display *display = to_intel_display(tc->dig_port);
1298 
1299 	if (DISPLAY_VER(display) < 20) {
1300 		drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready);
1301 
1302 		return phy_is_ready && phy_is_owned;
1303 	} else {
1304 		return phy_is_owned;
1305 	}
1306 }
1307 
1308 static bool tc_phy_is_connected(struct intel_tc_port *tc,
1309 				enum icl_port_dpll_id port_pll_type)
1310 {
1311 	struct intel_display *display = to_intel_display(tc->dig_port);
1312 	bool phy_is_ready = tc_phy_is_ready(tc);
1313 	bool phy_is_owned = tc_phy_is_owned(tc);
1314 	bool is_connected;
1315 
1316 	if (tc_phy_owned_by_display(tc, phy_is_ready, phy_is_owned))
1317 		is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY;
1318 	else
1319 		is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
1320 
1321 	drm_dbg_kms(display->drm,
1322 		    "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
1323 		    tc->port_name,
1324 		    str_yes_no(is_connected),
1325 		    str_yes_no(phy_is_ready),
1326 		    str_yes_no(phy_is_owned),
1327 		    port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt");
1328 
1329 	return is_connected;
1330 }
1331 
1332 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc)
1333 {
1334 	struct intel_display *display = to_intel_display(tc->dig_port);
1335 	bool is_ready;
1336 	int ret;
1337 
1338 	ret = poll_timeout_us(is_ready = tc_phy_is_ready(tc),
1339 			      is_ready,
1340 			      1000, 500 * 1000, false);
1341 	if (ret) {
1342 		drm_err(display->drm, "Port %s: timeout waiting for PHY ready\n",
1343 			tc->port_name);
1344 
1345 		return false;
1346 	}
1347 
1348 	return true;
1349 }
1350 
1351 static enum tc_port_mode
1352 hpd_mask_to_tc_mode(u32 live_status_mask)
1353 {
1354 	if (live_status_mask)
1355 		return fls(live_status_mask) - 1;
1356 
1357 	return TC_PORT_DISCONNECTED;
1358 }
1359 
1360 static enum tc_port_mode
1361 tc_phy_hpd_live_mode(struct intel_tc_port *tc)
1362 {
1363 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1364 
1365 	return hpd_mask_to_tc_mode(live_status_mask);
1366 }
1367 
1368 static enum tc_port_mode
1369 get_tc_mode_in_phy_owned_state(struct intel_tc_port *tc,
1370 			       enum tc_port_mode live_mode)
1371 {
1372 	switch (live_mode) {
1373 	case TC_PORT_LEGACY:
1374 	case TC_PORT_DP_ALT:
1375 		return live_mode;
1376 	default:
1377 		MISSING_CASE(live_mode);
1378 		fallthrough;
1379 	case TC_PORT_TBT_ALT:
1380 	case TC_PORT_DISCONNECTED:
1381 		if (tc->legacy_port)
1382 			return TC_PORT_LEGACY;
1383 		else
1384 			return TC_PORT_DP_ALT;
1385 	}
1386 }
1387 
1388 static enum tc_port_mode
1389 get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc,
1390 				   enum tc_port_mode live_mode)
1391 {
1392 	switch (live_mode) {
1393 	case TC_PORT_LEGACY:
1394 		return TC_PORT_DISCONNECTED;
1395 	case TC_PORT_DP_ALT:
1396 	case TC_PORT_TBT_ALT:
1397 		return TC_PORT_TBT_ALT;
1398 	default:
1399 		MISSING_CASE(live_mode);
1400 		fallthrough;
1401 	case TC_PORT_DISCONNECTED:
1402 		if (tc->legacy_port)
1403 			return TC_PORT_DISCONNECTED;
1404 		else
1405 			return TC_PORT_TBT_ALT;
1406 	}
1407 }
1408 
1409 static enum tc_port_mode
1410 tc_phy_get_current_mode(struct intel_tc_port *tc)
1411 {
1412 	struct intel_display *display = to_intel_display(tc->dig_port);
1413 	enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc);
1414 	bool phy_is_ready;
1415 	bool phy_is_owned;
1416 	enum tc_port_mode mode;
1417 
1418 	/*
1419 	 * For legacy ports the IOM firmware initializes the PHY during boot-up
1420 	 * and system resume whether or not a sink is connected. Wait here for
1421 	 * the initialization to get ready.
1422 	 */
1423 	if (tc->legacy_port)
1424 		tc_phy_wait_for_ready(tc);
1425 
1426 	phy_is_ready = tc_phy_is_ready(tc);
1427 	phy_is_owned = tc_phy_is_owned(tc);
1428 
1429 	if (!tc_phy_owned_by_display(tc, phy_is_ready, phy_is_owned)) {
1430 		mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
1431 	} else {
1432 		drm_WARN_ON(display->drm, live_mode == TC_PORT_TBT_ALT);
1433 		mode = get_tc_mode_in_phy_owned_state(tc, live_mode);
1434 	}
1435 
1436 	drm_dbg_kms(display->drm,
1437 		    "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
1438 		    tc->port_name,
1439 		    tc_port_mode_name(mode),
1440 		    str_yes_no(phy_is_ready),
1441 		    str_yes_no(phy_is_owned),
1442 		    tc_port_mode_name(live_mode));
1443 
1444 	return mode;
1445 }
1446 
1447 static enum tc_port_mode default_tc_mode(struct intel_tc_port *tc)
1448 {
1449 	if (tc->legacy_port)
1450 		return TC_PORT_LEGACY;
1451 
1452 	return TC_PORT_TBT_ALT;
1453 }
1454 
1455 static enum tc_port_mode
1456 hpd_mask_to_target_mode(struct intel_tc_port *tc, u32 live_status_mask)
1457 {
1458 	enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask);
1459 
1460 	if (mode != TC_PORT_DISCONNECTED)
1461 		return mode;
1462 
1463 	return default_tc_mode(tc);
1464 }
1465 
1466 static enum tc_port_mode
1467 tc_phy_get_target_mode(struct intel_tc_port *tc)
1468 {
1469 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1470 
1471 	return hpd_mask_to_target_mode(tc, live_status_mask);
1472 }
1473 
1474 static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1475 {
1476 	struct intel_display *display = to_intel_display(tc->dig_port);
1477 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1478 	bool connected;
1479 
1480 	tc_port_fixup_legacy_flag(tc, live_status_mask);
1481 
1482 	tc->mode = hpd_mask_to_target_mode(tc, live_status_mask);
1483 
1484 	connected = tc->phy_ops->connect(tc, required_lanes);
1485 	if (!connected && tc->mode != default_tc_mode(tc)) {
1486 		tc->mode = default_tc_mode(tc);
1487 		connected = tc->phy_ops->connect(tc, required_lanes);
1488 	}
1489 
1490 	drm_WARN_ON(display->drm, !connected);
1491 }
1492 
1493 static void tc_phy_disconnect(struct intel_tc_port *tc)
1494 {
1495 	if (tc->mode != TC_PORT_DISCONNECTED) {
1496 		tc->phy_ops->disconnect(tc);
1497 		tc->mode = TC_PORT_DISCONNECTED;
1498 	}
1499 }
1500 
1501 static void tc_phy_init(struct intel_tc_port *tc)
1502 {
1503 	mutex_lock(&tc->lock);
1504 	tc->phy_ops->init(tc);
1505 	mutex_unlock(&tc->lock);
1506 }
1507 
1508 static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
1509 				     int required_lanes, bool force_disconnect)
1510 {
1511 	struct intel_display *display = to_intel_display(tc->dig_port);
1512 	struct intel_digital_port *dig_port = tc->dig_port;
1513 	enum tc_port_mode old_tc_mode = tc->mode;
1514 
1515 	intel_display_power_flush_work(display);
1516 	if (!intel_tc_cold_requires_aux_pw(dig_port)) {
1517 		enum intel_display_power_domain aux_domain;
1518 
1519 		aux_domain = intel_aux_power_domain(dig_port);
1520 		if (intel_display_power_is_enabled(display, aux_domain))
1521 			drm_dbg_kms(display->drm, "Port %s: AUX unexpectedly powered\n",
1522 				    tc->port_name);
1523 	}
1524 
1525 	tc_phy_disconnect(tc);
1526 	if (!force_disconnect)
1527 		tc_phy_connect(tc, required_lanes);
1528 
1529 	drm_dbg_kms(display->drm,
1530 		    "Port %s: TC port mode reset (%s -> %s) pin assignment: %c max lanes: %d\n",
1531 		    tc->port_name,
1532 		    tc_port_mode_name(old_tc_mode),
1533 		    tc_port_mode_name(tc->mode),
1534 		    pin_assignment_name(tc->pin_assignment),
1535 		    tc->max_lane_count);
1536 }
1537 
1538 static bool intel_tc_port_needs_reset(struct intel_tc_port *tc)
1539 {
1540 	return tc_phy_get_target_mode(tc) != tc->mode;
1541 }
1542 
1543 static void intel_tc_port_update_mode(struct intel_tc_port *tc,
1544 				      int required_lanes, bool force_disconnect)
1545 {
1546 	if (force_disconnect ||
1547 	    intel_tc_port_needs_reset(tc))
1548 		intel_tc_port_reset_mode(tc, required_lanes, force_disconnect);
1549 }
1550 
1551 static void __intel_tc_port_get_link(struct intel_tc_port *tc)
1552 {
1553 	tc->link_refcount++;
1554 }
1555 
1556 static void __intel_tc_port_put_link(struct intel_tc_port *tc)
1557 {
1558 	tc->link_refcount--;
1559 }
1560 
1561 static bool tc_port_is_enabled(struct intel_tc_port *tc)
1562 {
1563 	struct intel_display *display = to_intel_display(tc->dig_port);
1564 	struct intel_digital_port *dig_port = tc->dig_port;
1565 
1566 	assert_tc_port_power_enabled(tc);
1567 
1568 	return intel_de_read(display, DDI_BUF_CTL(dig_port->base.port)) &
1569 	       DDI_BUF_CTL_ENABLE;
1570 }
1571 
1572 /**
1573  * intel_tc_port_init_mode: Read out HW state and init the given port's TypeC mode
1574  * @dig_port: digital port
1575  *
1576  * Read out the HW state and initialize the TypeC mode of @dig_port. The mode
1577  * will be locked until intel_tc_port_sanitize_mode() is called.
1578  */
1579 void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
1580 {
1581 	struct intel_display *display = to_intel_display(dig_port);
1582 	struct intel_tc_port *tc = to_tc_port(dig_port);
1583 	bool update_mode = false;
1584 
1585 	mutex_lock(&tc->lock);
1586 
1587 	drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED);
1588 	drm_WARN_ON(display->drm, tc->lock_wakeref);
1589 	drm_WARN_ON(display->drm, tc->link_refcount);
1590 
1591 	tc_phy_get_hw_state(tc);
1592 	/*
1593 	 * Save the initial mode for the state check in
1594 	 * intel_tc_port_sanitize_mode().
1595 	 */
1596 	tc->init_mode = tc->mode;
1597 
1598 	/*
1599 	 * The PHY needs to be connected for AUX to work during HW readout and
1600 	 * MST topology resume, but the PHY mode can only be changed if the
1601 	 * port is disabled.
1602 	 *
1603 	 * An exception is the case where BIOS leaves the PHY incorrectly
1604 	 * disconnected on an enabled legacy port. Work around that by
1605 	 * connecting the PHY even though the port is enabled. This doesn't
1606 	 * cause a problem as the PHY ownership state is ignored by the
1607 	 * IOM/TCSS firmware (only display can own the PHY in that case).
1608 	 */
1609 	if (!tc_port_is_enabled(tc)) {
1610 		update_mode = true;
1611 	} else if (tc->mode == TC_PORT_DISCONNECTED) {
1612 		drm_WARN_ON(display->drm, !tc->legacy_port);
1613 		drm_err(display->drm,
1614 			"Port %s: PHY disconnected on enabled port, connecting it\n",
1615 			tc->port_name);
1616 		update_mode = true;
1617 	}
1618 
1619 	if (update_mode)
1620 		intel_tc_port_update_mode(tc, 1, false);
1621 
1622 	/* Prevent changing tc->mode until intel_tc_port_sanitize_mode() is called. */
1623 	__intel_tc_port_get_link(tc);
1624 
1625 	mutex_unlock(&tc->lock);
1626 }
1627 
1628 static bool tc_port_has_active_streams(struct intel_tc_port *tc,
1629 				       const struct intel_crtc_state *crtc_state)
1630 {
1631 	struct intel_display *display = to_intel_display(tc->dig_port);
1632 	struct intel_digital_port *dig_port = tc->dig_port;
1633 	enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
1634 	int active_streams = 0;
1635 
1636 	if (dig_port->dp.is_mst) {
1637 		/* TODO: get the PLL type for MST, once HW readout is done for it. */
1638 		active_streams = intel_dp_mst_active_streams(&dig_port->dp);
1639 	} else if (crtc_state && crtc_state->hw.active) {
1640 		pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
1641 		active_streams = 1;
1642 	}
1643 
1644 	if (active_streams && !tc_phy_is_connected(tc, pll_type))
1645 		drm_err(display->drm,
1646 			"Port %s: PHY disconnected with %d active stream(s)\n",
1647 			tc->port_name, active_streams);
1648 
1649 	return active_streams;
1650 }
1651 
1652 /**
1653  * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
1654  * @dig_port: digital port
1655  * @crtc_state: atomic state of CRTC connected to @dig_port
1656  *
1657  * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
1658  * loading and system resume:
1659  * If the encoder is enabled keep the TypeC mode/PHY connected state locked until
1660  * the encoder is disabled.
1661  * If the encoder is disabled make sure the PHY is disconnected.
1662  * @crtc_state is valid if @dig_port is enabled, NULL otherwise.
1663  */
1664 void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
1665 				 const struct intel_crtc_state *crtc_state)
1666 {
1667 	struct intel_display *display = to_intel_display(dig_port);
1668 	struct intel_tc_port *tc = to_tc_port(dig_port);
1669 
1670 	mutex_lock(&tc->lock);
1671 
1672 	drm_WARN_ON(display->drm, tc->link_refcount != 1);
1673 	if (!tc_port_has_active_streams(tc, crtc_state)) {
1674 		/*
1675 		 * TBT-alt is the default mode in any case the PHY ownership is not
1676 		 * held (regardless of the sink's connected live state), so
1677 		 * we'll just switch to disconnected mode from it here without
1678 		 * a note.
1679 		 */
1680 		if (tc->init_mode != TC_PORT_TBT_ALT &&
1681 		    tc->init_mode != TC_PORT_DISCONNECTED)
1682 			drm_dbg_kms(display->drm,
1683 				    "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
1684 				    tc->port_name,
1685 				    tc_port_mode_name(tc->init_mode));
1686 		tc_phy_disconnect(tc);
1687 		__intel_tc_port_put_link(tc);
1688 	}
1689 
1690 	drm_dbg_kms(display->drm, "Port %s: sanitize mode (%s) pin assignment: %c max lanes: %d\n",
1691 		    tc->port_name,
1692 		    tc_port_mode_name(tc->mode),
1693 		    pin_assignment_name(tc->pin_assignment),
1694 		    tc->max_lane_count);
1695 
1696 	mutex_unlock(&tc->lock);
1697 }
1698 
1699 void intel_tc_info(struct drm_printer *p,  struct intel_digital_port *dig_port)
1700 {
1701 	struct intel_tc_port *tc = to_tc_port(dig_port);
1702 
1703 	intel_tc_port_lock(dig_port);
1704 	drm_printf(p, "\tTC Port %s: mode: %s, pin assignment: %c, max lanes: %d\n",
1705 		   tc->port_name,
1706 		   tc_port_mode_name(tc->mode),
1707 		   pin_assignment_name(tc->pin_assignment),
1708 		   tc->max_lane_count);
1709 	intel_tc_port_unlock(dig_port);
1710 }
1711 
1712 /*
1713  * The type-C ports are different because even when they are connected, they may
1714  * not be available/usable by the graphics driver: see the comment on
1715  * icl_tc_phy_connect(). So in our driver instead of adding the additional
1716  * concept of "usable" and make everything check for "connected and usable" we
1717  * define a port as "connected" when it is not only connected, but also when it
1718  * is usable by the rest of the driver. That maintains the old assumption that
1719  * connected ports are usable, and avoids exposing to the users objects they
1720  * can't really use.
1721  */
1722 bool intel_tc_port_connected(struct intel_encoder *encoder)
1723 {
1724 	struct intel_display *display = to_intel_display(encoder);
1725 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1726 	struct intel_tc_port *tc = to_tc_port(dig_port);
1727 	u32 mask = ~0;
1728 
1729 	drm_WARN_ON(display->drm, !intel_tc_port_ref_held(dig_port));
1730 
1731 	if (tc->mode != TC_PORT_DISCONNECTED)
1732 		mask = BIT(tc->mode);
1733 
1734 	return tc_phy_hpd_live_status(tc) & mask;
1735 }
1736 
1737 static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc)
1738 {
1739 	bool ret;
1740 
1741 	mutex_lock(&tc->lock);
1742 
1743 	ret = tc->link_refcount &&
1744 	      tc->mode == TC_PORT_DP_ALT &&
1745 	      intel_tc_port_needs_reset(tc);
1746 
1747 	mutex_unlock(&tc->lock);
1748 
1749 	return ret;
1750 }
1751 
1752 bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port)
1753 {
1754 	if (!intel_encoder_is_tc(&dig_port->base))
1755 		return false;
1756 
1757 	return __intel_tc_port_link_needs_reset(to_tc_port(dig_port));
1758 }
1759 
1760 static int reset_link_commit(struct intel_tc_port *tc,
1761 			     struct intel_atomic_state *state,
1762 			     struct drm_modeset_acquire_ctx *ctx)
1763 {
1764 	struct intel_display *display = to_intel_display(tc->dig_port);
1765 	struct intel_digital_port *dig_port = tc->dig_port;
1766 	struct intel_dp *intel_dp = enc_to_intel_dp(&dig_port->base);
1767 	struct intel_crtc *crtc;
1768 	u8 pipe_mask;
1769 	int ret;
1770 
1771 	ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex, ctx);
1772 	if (ret)
1773 		return ret;
1774 
1775 	ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
1776 	if (ret)
1777 		return ret;
1778 
1779 	if (!pipe_mask)
1780 		return 0;
1781 
1782 	for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
1783 		struct intel_crtc_state *crtc_state;
1784 
1785 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
1786 		if (IS_ERR(crtc_state))
1787 			return PTR_ERR(crtc_state);
1788 
1789 		crtc_state->uapi.connectors_changed = true;
1790 	}
1791 
1792 	if (!__intel_tc_port_link_needs_reset(tc))
1793 		return 0;
1794 
1795 	return drm_atomic_commit(&state->base);
1796 }
1797 
1798 static int reset_link(struct intel_tc_port *tc)
1799 {
1800 	struct intel_display *display = to_intel_display(tc->dig_port);
1801 	struct drm_modeset_acquire_ctx ctx;
1802 	struct drm_atomic_state *_state;
1803 	struct intel_atomic_state *state;
1804 	int ret;
1805 
1806 	_state = drm_atomic_state_alloc(display->drm);
1807 	if (!_state)
1808 		return -ENOMEM;
1809 
1810 	state = to_intel_atomic_state(_state);
1811 	state->internal = true;
1812 
1813 	intel_modeset_lock_ctx_retry(&ctx, state, 0, ret)
1814 		ret = reset_link_commit(tc, state, &ctx);
1815 
1816 	drm_atomic_state_put(&state->base);
1817 
1818 	return ret;
1819 }
1820 
1821 static void intel_tc_port_link_reset_work(struct work_struct *work)
1822 {
1823 	struct intel_tc_port *tc =
1824 		container_of(work, struct intel_tc_port, link_reset_work.work);
1825 	struct intel_display *display = to_intel_display(tc->dig_port);
1826 	int ret;
1827 
1828 	if (!__intel_tc_port_link_needs_reset(tc))
1829 		return;
1830 
1831 	mutex_lock(&display->drm->mode_config.mutex);
1832 
1833 	drm_dbg_kms(display->drm,
1834 		    "Port %s: TypeC DP-alt sink disconnected, resetting link\n",
1835 		    tc->port_name);
1836 	ret = reset_link(tc);
1837 	drm_WARN_ON(display->drm, ret);
1838 
1839 	mutex_unlock(&display->drm->mode_config.mutex);
1840 }
1841 
1842 bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
1843 {
1844 	if (!intel_tc_port_link_needs_reset(dig_port))
1845 		return false;
1846 
1847 	queue_delayed_work(system_dfl_wq,
1848 			   &to_tc_port(dig_port)->link_reset_work,
1849 			   msecs_to_jiffies(2000));
1850 
1851 	return true;
1852 }
1853 
1854 void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port)
1855 {
1856 	struct intel_tc_port *tc = to_tc_port(dig_port);
1857 
1858 	if (!intel_encoder_is_tc(&dig_port->base))
1859 		return;
1860 
1861 	cancel_delayed_work(&tc->link_reset_work);
1862 }
1863 
1864 static void __intel_tc_port_lock(struct intel_tc_port *tc,
1865 				 int required_lanes)
1866 {
1867 	struct intel_display *display = to_intel_display(tc->dig_port);
1868 
1869 	mutex_lock(&tc->lock);
1870 
1871 	cancel_delayed_work(&tc->disconnect_phy_work);
1872 
1873 	if (!tc->link_refcount)
1874 		intel_tc_port_update_mode(tc, required_lanes,
1875 					  false);
1876 
1877 	drm_WARN_ON(display->drm, tc->mode == TC_PORT_DISCONNECTED);
1878 	drm_WARN_ON(display->drm, tc->mode != TC_PORT_TBT_ALT && !tc_phy_is_owned(tc));
1879 }
1880 
1881 void intel_tc_port_lock(struct intel_digital_port *dig_port)
1882 {
1883 	__intel_tc_port_lock(to_tc_port(dig_port), 1);
1884 }
1885 
1886 /*
1887  * Disconnect the given digital port from its TypeC PHY (handing back the
1888  * control of the PHY to the TypeC subsystem). This will happen in a delayed
1889  * manner after each aux transactions and modeset disables.
1890  */
1891 static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
1892 {
1893 	struct intel_tc_port *tc =
1894 		container_of(work, struct intel_tc_port, disconnect_phy_work.work);
1895 
1896 	mutex_lock(&tc->lock);
1897 
1898 	if (!tc->link_refcount)
1899 		intel_tc_port_update_mode(tc, 1, true);
1900 
1901 	mutex_unlock(&tc->lock);
1902 }
1903 
1904 /**
1905  * intel_tc_port_flush_work: flush the work disconnecting the PHY
1906  * @dig_port: digital port
1907  *
1908  * Flush the delayed work disconnecting an idle PHY.
1909  */
1910 static void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
1911 {
1912 	flush_delayed_work(&to_tc_port(dig_port)->disconnect_phy_work);
1913 }
1914 
1915 void intel_tc_port_suspend(struct intel_digital_port *dig_port)
1916 {
1917 	struct intel_tc_port *tc = to_tc_port(dig_port);
1918 
1919 	cancel_delayed_work_sync(&tc->link_reset_work);
1920 	intel_tc_port_flush_work(dig_port);
1921 }
1922 
1923 void intel_tc_port_unlock(struct intel_digital_port *dig_port)
1924 {
1925 	struct intel_tc_port *tc = to_tc_port(dig_port);
1926 
1927 	if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED)
1928 		queue_delayed_work(system_dfl_wq, &tc->disconnect_phy_work,
1929 				   msecs_to_jiffies(1000));
1930 
1931 	mutex_unlock(&tc->lock);
1932 }
1933 
1934 bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
1935 {
1936 	struct intel_tc_port *tc = to_tc_port(dig_port);
1937 
1938 	return mutex_is_locked(&tc->lock) ||
1939 	       tc->link_refcount;
1940 }
1941 
1942 void intel_tc_port_get_link(struct intel_digital_port *dig_port,
1943 			    int required_lanes)
1944 {
1945 	struct intel_tc_port *tc = to_tc_port(dig_port);
1946 
1947 	__intel_tc_port_lock(tc, required_lanes);
1948 	__intel_tc_port_get_link(tc);
1949 	intel_tc_port_unlock(dig_port);
1950 }
1951 
1952 void intel_tc_port_put_link(struct intel_digital_port *dig_port)
1953 {
1954 	struct intel_tc_port *tc = to_tc_port(dig_port);
1955 
1956 	intel_tc_port_lock(dig_port);
1957 	__intel_tc_port_put_link(tc);
1958 	intel_tc_port_unlock(dig_port);
1959 
1960 	/*
1961 	 * The firmware will not update the HPD status of other TypeC ports
1962 	 * that are active in DP-alt mode with their sink disconnected, until
1963 	 * this port is disabled and its PHY gets disconnected. Make sure this
1964 	 * happens in a timely manner by disconnecting the PHY synchronously.
1965 	 */
1966 	intel_tc_port_flush_work(dig_port);
1967 }
1968 
1969 int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
1970 {
1971 	struct intel_display *display = to_intel_display(dig_port);
1972 	struct intel_tc_port *tc;
1973 	enum port port = dig_port->base.port;
1974 	enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
1975 
1976 	if (drm_WARN_ON(display->drm, tc_port == TC_PORT_NONE))
1977 		return -EINVAL;
1978 
1979 	tc = kzalloc_obj(*tc);
1980 	if (!tc)
1981 		return -ENOMEM;
1982 
1983 	dig_port->tc = tc;
1984 	tc->dig_port = dig_port;
1985 
1986 	if (DISPLAY_VER(display) >= 14)
1987 		tc->phy_ops = &xelpdp_tc_phy_ops;
1988 	else if (DISPLAY_VER(display) >= 13)
1989 		tc->phy_ops = &adlp_tc_phy_ops;
1990 	else if (DISPLAY_VER(display) >= 12)
1991 		tc->phy_ops = &tgl_tc_phy_ops;
1992 	else
1993 		tc->phy_ops = &icl_tc_phy_ops;
1994 
1995 	tc->port_name = kasprintf(GFP_KERNEL, "%c/TC#%d", port_name(port),
1996 				  tc_port + 1);
1997 	if (!tc->port_name) {
1998 		kfree(tc);
1999 		return -ENOMEM;
2000 	}
2001 
2002 	mutex_init(&tc->lock);
2003 	/* TODO: Combine the two works */
2004 	INIT_DELAYED_WORK(&tc->disconnect_phy_work, intel_tc_port_disconnect_phy_work);
2005 	INIT_DELAYED_WORK(&tc->link_reset_work, intel_tc_port_link_reset_work);
2006 	tc->legacy_port = is_legacy;
2007 	tc->mode = TC_PORT_DISCONNECTED;
2008 	tc->link_refcount = 0;
2009 
2010 	tc_phy_init(tc);
2011 
2012 	intel_tc_port_init_mode(dig_port);
2013 
2014 	return 0;
2015 }
2016 
2017 void intel_tc_port_cleanup(struct intel_digital_port *dig_port)
2018 {
2019 	intel_tc_port_suspend(dig_port);
2020 
2021 	kfree(dig_port->tc->port_name);
2022 	kfree(dig_port->tc);
2023 	dig_port->tc = NULL;
2024 }
2025