xref: /linux/drivers/gpu/drm/i915/display/intel_tc.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <linux/iopoll.h>
7 
8 #include <drm/drm_print.h>
9 
10 #include "i915_reg.h"
11 #include "i915_utils.h"
12 #include "intel_atomic.h"
13 #include "intel_cx0_phy_regs.h"
14 #include "intel_ddi.h"
15 #include "intel_de.h"
16 #include "intel_display.h"
17 #include "intel_display_driver.h"
18 #include "intel_display_power_map.h"
19 #include "intel_display_regs.h"
20 #include "intel_display_types.h"
21 #include "intel_dkl_phy_regs.h"
22 #include "intel_dp.h"
23 #include "intel_dp_mst.h"
24 #include "intel_mg_phy_regs.h"
25 #include "intel_modeset_lock.h"
26 #include "intel_tc.h"
27 
28 enum tc_port_mode {
29 	TC_PORT_DISCONNECTED,
30 	TC_PORT_TBT_ALT,
31 	TC_PORT_DP_ALT,
32 	TC_PORT_LEGACY,
33 };
34 
35 struct intel_tc_port;
36 
37 struct intel_tc_phy_ops {
38 	enum intel_display_power_domain (*cold_off_domain)(struct intel_tc_port *tc);
39 	u32 (*hpd_live_status)(struct intel_tc_port *tc);
40 	bool (*is_ready)(struct intel_tc_port *tc);
41 	bool (*is_owned)(struct intel_tc_port *tc);
42 	void (*get_hw_state)(struct intel_tc_port *tc);
43 	bool (*connect)(struct intel_tc_port *tc, int required_lanes);
44 	void (*disconnect)(struct intel_tc_port *tc);
45 	void (*init)(struct intel_tc_port *tc);
46 };
47 
48 struct intel_tc_port {
49 	struct intel_digital_port *dig_port;
50 
51 	const struct intel_tc_phy_ops *phy_ops;
52 
53 	struct mutex lock;	/* protects the TypeC port mode */
54 	intel_wakeref_t lock_wakeref;
55 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
56 	enum intel_display_power_domain lock_power_domain;
57 #endif
58 	struct delayed_work disconnect_phy_work;
59 	struct delayed_work link_reset_work;
60 	int link_refcount;
61 	bool legacy_port:1;
62 	const char *port_name;
63 	enum tc_port_mode mode;
64 	enum tc_port_mode init_mode;
65 	enum phy_fia phy_fia;
66 	enum intel_tc_pin_assignment pin_assignment;
67 	u8 phy_fia_idx;
68 	u8 max_lane_count;
69 };
70 
71 static enum intel_display_power_domain
72 tc_phy_cold_off_domain(struct intel_tc_port *);
73 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc);
74 static bool tc_phy_is_ready(struct intel_tc_port *tc);
75 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc);
76 static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc);
77 
78 static const char *tc_port_mode_name(enum tc_port_mode mode)
79 {
80 	static const char * const names[] = {
81 		[TC_PORT_DISCONNECTED] = "disconnected",
82 		[TC_PORT_TBT_ALT] = "tbt-alt",
83 		[TC_PORT_DP_ALT] = "dp-alt",
84 		[TC_PORT_LEGACY] = "legacy",
85 	};
86 
87 	if (WARN_ON(mode >= ARRAY_SIZE(names)))
88 		mode = TC_PORT_DISCONNECTED;
89 
90 	return names[mode];
91 }
92 
93 static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port)
94 {
95 	return dig_port->tc;
96 }
97 
98 static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
99 				  enum tc_port_mode mode)
100 {
101 	struct intel_tc_port *tc = to_tc_port(dig_port);
102 
103 	return intel_encoder_is_tc(&dig_port->base) && tc->mode == mode;
104 }
105 
106 bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
107 {
108 	return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT);
109 }
110 
111 bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port)
112 {
113 	return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT);
114 }
115 
116 bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
117 {
118 	return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
119 }
120 
121 bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port)
122 {
123 	struct intel_tc_port *tc = to_tc_port(dig_port);
124 
125 	return intel_encoder_is_tc(&dig_port->base) && !tc->legacy_port;
126 }
127 
128 /*
129  * The display power domains used for TC ports depending on the
130  * platform and TC mode (legacy, DP-alt, TBT):
131  *
132  * POWER_DOMAIN_DISPLAY_CORE:
133  * --------------------------
134  * ADLP/all modes:
135  *   - TCSS/IOM access for PHY ready state.
136  * ADLP+/all modes:
137  *   - DE/north-,south-HPD ISR access for HPD live state.
138  *
139  * POWER_DOMAIN_PORT_DDI_LANES_<port>:
140  * -----------------------------------
141  * ICL+/all modes:
142  *   - DE/DDI_BUF access for port enabled state.
143  * ADLP/all modes:
144  *   - DE/DDI_BUF access for PHY owned state.
145  *
146  * POWER_DOMAIN_AUX_USBC<TC port index>:
147  * -------------------------------------
148  * ICL/legacy mode:
149  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
150  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
151  *     main lanes.
152  * ADLP/legacy, DP-alt modes:
153  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
154  *     main lanes.
155  *
156  * POWER_DOMAIN_TC_COLD_OFF:
157  * -------------------------
158  * ICL/DP-alt, TBT mode:
159  *   - TCSS/TBT: block TC-cold power state for using the (direct or
160  *     TBT DP-IN) AUX and main lanes.
161  *
162  * TGL/all modes:
163  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
164  *   - TCSS/PHY: block TC-cold power state for using the (direct or
165  *     TBT DP-IN) AUX and main lanes.
166  *
167  * ADLP/TBT mode:
168  *   - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN)
169  *     AUX and main lanes.
170  *
171  * XELPDP+/all modes:
172  *   - TCSS/IOM,FIA access for PHY ready, owned state
173  *   - TCSS/PHY: block TC-cold power state for using the (direct or
174  *     TBT DP-IN) AUX and main lanes.
175  */
176 bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
177 {
178 	struct intel_display *display = to_intel_display(dig_port);
179 	struct intel_tc_port *tc = to_tc_port(dig_port);
180 
181 	return tc_phy_cold_off_domain(tc) ==
182 	       intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
183 }
184 
185 static intel_wakeref_t
186 __tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain)
187 {
188 	struct intel_display *display = to_intel_display(tc->dig_port);
189 
190 	*domain = tc_phy_cold_off_domain(tc);
191 
192 	return intel_display_power_get(display, *domain);
193 }
194 
195 static intel_wakeref_t
196 tc_cold_block(struct intel_tc_port *tc)
197 {
198 	enum intel_display_power_domain domain;
199 	intel_wakeref_t wakeref;
200 
201 	wakeref = __tc_cold_block(tc, &domain);
202 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
203 	tc->lock_power_domain = domain;
204 #endif
205 	return wakeref;
206 }
207 
208 static void
209 __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain,
210 		  intel_wakeref_t wakeref)
211 {
212 	struct intel_display *display = to_intel_display(tc->dig_port);
213 
214 	intel_display_power_put(display, domain, wakeref);
215 }
216 
217 static void
218 tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref)
219 {
220 	struct intel_display __maybe_unused *display = to_intel_display(tc->dig_port);
221 	enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc);
222 
223 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
224 	drm_WARN_ON(display->drm, tc->lock_power_domain != domain);
225 #endif
226 	__tc_cold_unblock(tc, domain, wakeref);
227 }
228 
229 static void
230 assert_display_core_power_enabled(struct intel_tc_port *tc)
231 {
232 	struct intel_display *display = to_intel_display(tc->dig_port);
233 
234 	drm_WARN_ON(display->drm,
235 		    !intel_display_power_is_enabled(display, POWER_DOMAIN_DISPLAY_CORE));
236 }
237 
238 static void
239 assert_tc_cold_blocked(struct intel_tc_port *tc)
240 {
241 	struct intel_display *display = to_intel_display(tc->dig_port);
242 	bool enabled;
243 
244 	enabled = intel_display_power_is_enabled(display,
245 						 tc_phy_cold_off_domain(tc));
246 	drm_WARN_ON(display->drm, !enabled);
247 }
248 
249 static enum intel_display_power_domain
250 tc_port_power_domain(struct intel_tc_port *tc)
251 {
252 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
253 
254 	if (tc_port == TC_PORT_NONE)
255 		return POWER_DOMAIN_INVALID;
256 
257 	return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
258 }
259 
260 static void
261 assert_tc_port_power_enabled(struct intel_tc_port *tc)
262 {
263 	struct intel_display *display = to_intel_display(tc->dig_port);
264 
265 	drm_WARN_ON(display->drm,
266 		    !intel_display_power_is_enabled(display, tc_port_power_domain(tc)));
267 }
268 
269 static u32 get_lane_mask(struct intel_tc_port *tc)
270 {
271 	struct intel_display *display = to_intel_display(tc->dig_port);
272 	intel_wakeref_t wakeref;
273 	u32 lane_mask;
274 
275 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
276 		lane_mask = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
277 
278 	drm_WARN_ON(display->drm, lane_mask == 0xffffffff);
279 	assert_tc_cold_blocked(tc);
280 
281 	lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx);
282 	return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
283 }
284 
285 static char pin_assignment_name(enum intel_tc_pin_assignment pin_assignment)
286 {
287 	if (pin_assignment == INTEL_TC_PIN_ASSIGNMENT_NONE)
288 		return '-';
289 
290 	return 'A' + pin_assignment - INTEL_TC_PIN_ASSIGNMENT_A;
291 }
292 
293 static enum intel_tc_pin_assignment
294 get_pin_assignment(struct intel_tc_port *tc)
295 {
296 	struct intel_display *display = to_intel_display(tc->dig_port);
297 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
298 	enum intel_tc_pin_assignment pin_assignment;
299 	intel_wakeref_t wakeref;
300 	i915_reg_t reg;
301 	u32 mask;
302 	u32 val;
303 
304 	if (tc->mode == TC_PORT_TBT_ALT)
305 		return INTEL_TC_PIN_ASSIGNMENT_NONE;
306 
307 	if (DISPLAY_VER(display) >= 20) {
308 		reg = TCSS_DDI_STATUS(tc_port);
309 		mask = TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK;
310 	} else {
311 		reg = PORT_TX_DFLEXPA1(tc->phy_fia);
312 		mask = DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx);
313 	}
314 
315 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
316 		val = intel_de_read(display, reg);
317 
318 	drm_WARN_ON(display->drm, val == 0xffffffff);
319 	assert_tc_cold_blocked(tc);
320 
321 	pin_assignment = (val & mask) >> (ffs(mask) - 1);
322 
323 	switch (pin_assignment) {
324 	case INTEL_TC_PIN_ASSIGNMENT_A:
325 	case INTEL_TC_PIN_ASSIGNMENT_B:
326 	case INTEL_TC_PIN_ASSIGNMENT_F:
327 		drm_WARN_ON(display->drm, DISPLAY_VER(display) > 11);
328 		break;
329 	case INTEL_TC_PIN_ASSIGNMENT_NONE:
330 	case INTEL_TC_PIN_ASSIGNMENT_C:
331 	case INTEL_TC_PIN_ASSIGNMENT_D:
332 	case INTEL_TC_PIN_ASSIGNMENT_E:
333 		break;
334 	default:
335 		MISSING_CASE(pin_assignment);
336 	}
337 
338 	return pin_assignment;
339 }
340 
341 static int mtl_get_max_lane_count(struct intel_tc_port *tc)
342 {
343 	enum intel_tc_pin_assignment pin_assignment;
344 
345 	pin_assignment = get_pin_assignment(tc);
346 
347 	switch (pin_assignment) {
348 	case INTEL_TC_PIN_ASSIGNMENT_NONE:
349 		return 0;
350 	default:
351 		MISSING_CASE(pin_assignment);
352 		fallthrough;
353 	case INTEL_TC_PIN_ASSIGNMENT_D:
354 		return 2;
355 	case INTEL_TC_PIN_ASSIGNMENT_C:
356 	case INTEL_TC_PIN_ASSIGNMENT_E:
357 		return 4;
358 	}
359 }
360 
361 static int icl_get_max_lane_count(struct intel_tc_port *tc)
362 {
363 	u32 lane_mask = 0;
364 
365 	lane_mask = get_lane_mask(tc);
366 
367 	switch (lane_mask) {
368 	default:
369 		MISSING_CASE(lane_mask);
370 		fallthrough;
371 	case 0x1:
372 	case 0x2:
373 	case 0x4:
374 	case 0x8:
375 		return 1;
376 	case 0x3:
377 	case 0xc:
378 		return 2;
379 	case 0xf:
380 		return 4;
381 	}
382 }
383 
384 static int get_max_lane_count(struct intel_tc_port *tc)
385 {
386 	struct intel_display *display = to_intel_display(tc->dig_port);
387 
388 	if (tc->mode != TC_PORT_DP_ALT)
389 		return 4;
390 
391 	if (DISPLAY_VER(display) >= 14)
392 		return mtl_get_max_lane_count(tc);
393 
394 	return icl_get_max_lane_count(tc);
395 }
396 
397 static void read_pin_configuration(struct intel_tc_port *tc)
398 {
399 	tc->pin_assignment = get_pin_assignment(tc);
400 	tc->max_lane_count = get_max_lane_count(tc);
401 }
402 
403 int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
404 {
405 	struct intel_tc_port *tc = to_tc_port(dig_port);
406 
407 	if (!intel_encoder_is_tc(&dig_port->base))
408 		return 4;
409 
410 	return tc->max_lane_count;
411 }
412 
413 enum intel_tc_pin_assignment
414 intel_tc_port_get_pin_assignment(struct intel_digital_port *dig_port)
415 {
416 	struct intel_tc_port *tc = to_tc_port(dig_port);
417 
418 	if (!intel_encoder_is_tc(&dig_port->base))
419 		return INTEL_TC_PIN_ASSIGNMENT_NONE;
420 
421 	return tc->pin_assignment;
422 }
423 
424 void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
425 				      int required_lanes)
426 {
427 	struct intel_display *display = to_intel_display(dig_port);
428 	struct intel_tc_port *tc = to_tc_port(dig_port);
429 	bool lane_reversal = dig_port->lane_reversal;
430 	u32 val;
431 
432 	if (DISPLAY_VER(display) >= 14)
433 		return;
434 
435 	drm_WARN_ON(display->drm,
436 		    lane_reversal && tc->mode != TC_PORT_LEGACY);
437 
438 	assert_tc_cold_blocked(tc);
439 
440 	val = intel_de_read(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
441 	val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx);
442 
443 	switch (required_lanes) {
444 	case 1:
445 		val |= lane_reversal ?
446 			DFLEXDPMLE1_DPMLETC_ML3(tc->phy_fia_idx) :
447 			DFLEXDPMLE1_DPMLETC_ML0(tc->phy_fia_idx);
448 		break;
449 	case 2:
450 		val |= lane_reversal ?
451 			DFLEXDPMLE1_DPMLETC_ML3_2(tc->phy_fia_idx) :
452 			DFLEXDPMLE1_DPMLETC_ML1_0(tc->phy_fia_idx);
453 		break;
454 	case 4:
455 		val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc->phy_fia_idx);
456 		break;
457 	default:
458 		MISSING_CASE(required_lanes);
459 	}
460 
461 	intel_de_write(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
462 }
463 
464 static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
465 				      u32 live_status_mask)
466 {
467 	struct intel_display *display = to_intel_display(tc->dig_port);
468 	u32 valid_hpd_mask;
469 
470 	drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED);
471 
472 	if (hweight32(live_status_mask) != 1)
473 		return;
474 
475 	if (tc->legacy_port)
476 		valid_hpd_mask = BIT(TC_PORT_LEGACY);
477 	else
478 		valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
479 				 BIT(TC_PORT_TBT_ALT);
480 
481 	if (!(live_status_mask & ~valid_hpd_mask))
482 		return;
483 
484 	/* If live status mismatches the VBT flag, trust the live status. */
485 	drm_dbg_kms(display->drm,
486 		    "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
487 		    tc->port_name, live_status_mask, valid_hpd_mask);
488 
489 	tc->legacy_port = !tc->legacy_port;
490 }
491 
492 static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia)
493 {
494 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
495 
496 	/*
497 	 * Each Modular FIA instance houses 2 TC ports. In SOC that has more
498 	 * than two TC ports, there are multiple instances of Modular FIA.
499 	 */
500 	if (modular_fia) {
501 		tc->phy_fia = tc_port / 2;
502 		tc->phy_fia_idx = tc_port % 2;
503 	} else {
504 		tc->phy_fia = FIA1;
505 		tc->phy_fia_idx = tc_port;
506 	}
507 }
508 
509 /*
510  * ICL TC PHY handlers
511  * -------------------
512  */
513 static enum intel_display_power_domain
514 icl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
515 {
516 	struct intel_display *display = to_intel_display(tc->dig_port);
517 	struct intel_digital_port *dig_port = tc->dig_port;
518 
519 	if (tc->legacy_port)
520 		return intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
521 
522 	return POWER_DOMAIN_TC_COLD_OFF;
523 }
524 
525 static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
526 {
527 	struct intel_display *display = to_intel_display(tc->dig_port);
528 	struct intel_digital_port *dig_port = tc->dig_port;
529 	u32 isr_bit = display->hotplug.pch_hpd[dig_port->base.hpd_pin];
530 	intel_wakeref_t wakeref;
531 	u32 fia_isr;
532 	u32 pch_isr;
533 	u32 mask = 0;
534 
535 	with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref) {
536 		fia_isr = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
537 		pch_isr = intel_de_read(display, SDEISR);
538 	}
539 
540 	if (fia_isr == 0xffffffff) {
541 		drm_dbg_kms(display->drm,
542 			    "Port %s: PHY in TCCOLD, nothing connected\n",
543 			    tc->port_name);
544 		return mask;
545 	}
546 
547 	if (fia_isr & TC_LIVE_STATE_TBT(tc->phy_fia_idx))
548 		mask |= BIT(TC_PORT_TBT_ALT);
549 	if (fia_isr & TC_LIVE_STATE_TC(tc->phy_fia_idx))
550 		mask |= BIT(TC_PORT_DP_ALT);
551 
552 	if (pch_isr & isr_bit)
553 		mask |= BIT(TC_PORT_LEGACY);
554 
555 	return mask;
556 }
557 
558 /*
559  * Return the PHY status complete flag indicating that display can acquire the
560  * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink
561  * is connected and it's ready to switch the ownership to display. The flag
562  * will be left cleared when a TBT-alt sink is connected, where the PHY is
563  * owned by the TBT subsystem and so switching the ownership to display is not
564  * required.
565  */
566 static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
567 {
568 	struct intel_display *display = to_intel_display(tc->dig_port);
569 	u32 val;
570 
571 	assert_tc_cold_blocked(tc);
572 
573 	val = intel_de_read(display, PORT_TX_DFLEXDPPMS(tc->phy_fia));
574 	if (val == 0xffffffff) {
575 		drm_dbg_kms(display->drm,
576 			    "Port %s: PHY in TCCOLD, assuming not ready\n",
577 			    tc->port_name);
578 		return false;
579 	}
580 
581 	return val & DP_PHY_MODE_STATUS_COMPLETED(tc->phy_fia_idx);
582 }
583 
584 static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
585 				      bool take)
586 {
587 	struct intel_display *display = to_intel_display(tc->dig_port);
588 	u32 val;
589 
590 	assert_tc_cold_blocked(tc);
591 
592 	val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
593 	if (val == 0xffffffff) {
594 		drm_dbg_kms(display->drm,
595 			    "Port %s: PHY in TCCOLD, can't %s ownership\n",
596 			    tc->port_name, take ? "take" : "release");
597 
598 		return false;
599 	}
600 
601 	val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
602 	if (take)
603 		val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
604 
605 	intel_de_write(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
606 
607 	return true;
608 }
609 
610 static bool icl_tc_phy_is_owned(struct intel_tc_port *tc)
611 {
612 	struct intel_display *display = to_intel_display(tc->dig_port);
613 	u32 val;
614 
615 	assert_tc_cold_blocked(tc);
616 
617 	val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
618 	if (val == 0xffffffff) {
619 		drm_dbg_kms(display->drm,
620 			    "Port %s: PHY in TCCOLD, assume not owned\n",
621 			    tc->port_name);
622 		return false;
623 	}
624 
625 	return val & DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
626 }
627 
628 static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
629 {
630 	enum intel_display_power_domain domain;
631 	intel_wakeref_t tc_cold_wref;
632 
633 	tc_cold_wref = __tc_cold_block(tc, &domain);
634 
635 	tc->mode = tc_phy_get_current_mode(tc);
636 	if (tc->mode != TC_PORT_DISCONNECTED) {
637 		tc->lock_wakeref = tc_cold_block(tc);
638 
639 		read_pin_configuration(tc);
640 	}
641 
642 	__tc_cold_unblock(tc, domain, tc_cold_wref);
643 }
644 
645 /*
646  * This function implements the first part of the Connect Flow described by our
647  * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
648  * lanes, EDID, etc) is done as needed in the typical places.
649  *
650  * Unlike the other ports, type-C ports are not available to use as soon as we
651  * get a hotplug. The type-C PHYs can be shared between multiple controllers:
652  * display, USB, etc. As a result, handshaking through FIA is required around
653  * connect and disconnect to cleanly transfer ownership with the controller and
654  * set the type-C power state.
655  */
656 static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
657 						int required_lanes)
658 {
659 	struct intel_display *display = to_intel_display(tc->dig_port);
660 	struct intel_digital_port *dig_port = tc->dig_port;
661 	int max_lanes;
662 
663 	max_lanes = intel_tc_port_max_lane_count(dig_port);
664 	if (tc->mode == TC_PORT_LEGACY) {
665 		drm_WARN_ON(display->drm, max_lanes != 4);
666 		return true;
667 	}
668 
669 	drm_WARN_ON(display->drm, tc->mode != TC_PORT_DP_ALT);
670 
671 	/*
672 	 * Now we have to re-check the live state, in case the port recently
673 	 * became disconnected. Not necessary for legacy mode.
674 	 */
675 	if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) {
676 		drm_dbg_kms(display->drm, "Port %s: PHY sudden disconnect\n",
677 			    tc->port_name);
678 		return false;
679 	}
680 
681 	if (max_lanes < required_lanes) {
682 		drm_dbg_kms(display->drm,
683 			    "Port %s: PHY max lanes %d < required lanes %d\n",
684 			    tc->port_name,
685 			    max_lanes, required_lanes);
686 		return false;
687 	}
688 
689 	return true;
690 }
691 
692 static bool icl_tc_phy_connect(struct intel_tc_port *tc,
693 			       int required_lanes)
694 {
695 	struct intel_display *display = to_intel_display(tc->dig_port);
696 
697 	tc->lock_wakeref = tc_cold_block(tc);
698 
699 	if (tc->mode == TC_PORT_TBT_ALT) {
700 		read_pin_configuration(tc);
701 
702 		return true;
703 	}
704 
705 	if ((!tc_phy_is_ready(tc) ||
706 	     !icl_tc_phy_take_ownership(tc, true)) &&
707 	    !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
708 		drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership (ready %s)\n",
709 			    tc->port_name,
710 			    str_yes_no(tc_phy_is_ready(tc)));
711 		goto out_unblock_tc_cold;
712 	}
713 
714 	read_pin_configuration(tc);
715 
716 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
717 		goto out_release_phy;
718 
719 	return true;
720 
721 out_release_phy:
722 	icl_tc_phy_take_ownership(tc, false);
723 out_unblock_tc_cold:
724 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
725 
726 	return false;
727 }
728 
729 /*
730  * See the comment at the connect function. This implements the Disconnect
731  * Flow.
732  */
733 static void icl_tc_phy_disconnect(struct intel_tc_port *tc)
734 {
735 	switch (tc->mode) {
736 	case TC_PORT_LEGACY:
737 	case TC_PORT_DP_ALT:
738 		icl_tc_phy_take_ownership(tc, false);
739 		fallthrough;
740 	case TC_PORT_TBT_ALT:
741 		tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
742 		break;
743 	default:
744 		MISSING_CASE(tc->mode);
745 	}
746 }
747 
748 static void icl_tc_phy_init(struct intel_tc_port *tc)
749 {
750 	tc_phy_load_fia_params(tc, false);
751 }
752 
753 static const struct intel_tc_phy_ops icl_tc_phy_ops = {
754 	.cold_off_domain = icl_tc_phy_cold_off_domain,
755 	.hpd_live_status = icl_tc_phy_hpd_live_status,
756 	.is_ready = icl_tc_phy_is_ready,
757 	.is_owned = icl_tc_phy_is_owned,
758 	.get_hw_state = icl_tc_phy_get_hw_state,
759 	.connect = icl_tc_phy_connect,
760 	.disconnect = icl_tc_phy_disconnect,
761 	.init = icl_tc_phy_init,
762 };
763 
764 /*
765  * TGL TC PHY handlers
766  * -------------------
767  */
768 static enum intel_display_power_domain
769 tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
770 {
771 	return POWER_DOMAIN_TC_COLD_OFF;
772 }
773 
774 static void tgl_tc_phy_init(struct intel_tc_port *tc)
775 {
776 	struct intel_display *display = to_intel_display(tc->dig_port);
777 	intel_wakeref_t wakeref;
778 	u32 val;
779 
780 	with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref)
781 		val = intel_de_read(display, PORT_TX_DFLEXDPSP(FIA1));
782 
783 	drm_WARN_ON(display->drm, val == 0xffffffff);
784 
785 	tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK);
786 }
787 
788 static const struct intel_tc_phy_ops tgl_tc_phy_ops = {
789 	.cold_off_domain = tgl_tc_phy_cold_off_domain,
790 	.hpd_live_status = icl_tc_phy_hpd_live_status,
791 	.is_ready = icl_tc_phy_is_ready,
792 	.is_owned = icl_tc_phy_is_owned,
793 	.get_hw_state = icl_tc_phy_get_hw_state,
794 	.connect = icl_tc_phy_connect,
795 	.disconnect = icl_tc_phy_disconnect,
796 	.init = tgl_tc_phy_init,
797 };
798 
799 /*
800  * ADLP TC PHY handlers
801  * --------------------
802  */
803 static enum intel_display_power_domain
804 adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc)
805 {
806 	struct intel_display *display = to_intel_display(tc->dig_port);
807 	struct intel_digital_port *dig_port = tc->dig_port;
808 
809 	if (tc->mode != TC_PORT_TBT_ALT)
810 		return intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
811 
812 	return POWER_DOMAIN_TC_COLD_OFF;
813 }
814 
815 static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
816 {
817 	struct intel_display *display = to_intel_display(tc->dig_port);
818 	struct intel_digital_port *dig_port = tc->dig_port;
819 	enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
820 	u32 cpu_isr_bits = display->hotplug.hpd[hpd_pin];
821 	u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
822 	intel_wakeref_t wakeref;
823 	u32 cpu_isr;
824 	u32 pch_isr;
825 	u32 mask = 0;
826 
827 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
828 		cpu_isr = intel_de_read(display, GEN11_DE_HPD_ISR);
829 		pch_isr = intel_de_read(display, SDEISR);
830 	}
831 
832 	if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK))
833 		mask |= BIT(TC_PORT_DP_ALT);
834 	if (cpu_isr & (cpu_isr_bits & GEN11_DE_TBT_HOTPLUG_MASK))
835 		mask |= BIT(TC_PORT_TBT_ALT);
836 
837 	if (pch_isr & pch_isr_bit)
838 		mask |= BIT(TC_PORT_LEGACY);
839 
840 	return mask;
841 }
842 
843 /*
844  * Return the PHY status complete flag indicating that display can acquire the
845  * PHY ownership. The IOM firmware sets this flag when it's ready to switch
846  * the ownership to display, regardless of what sink is connected (TBT-alt,
847  * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
848  * subsystem and so switching the ownership to display is not required.
849  */
850 static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
851 {
852 	struct intel_display *display = to_intel_display(tc->dig_port);
853 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
854 	u32 val;
855 
856 	assert_display_core_power_enabled(tc);
857 
858 	val = intel_de_read(display, TCSS_DDI_STATUS(tc_port));
859 	if (val == 0xffffffff) {
860 		drm_dbg_kms(display->drm,
861 			    "Port %s: PHY in TCCOLD, assuming not ready\n",
862 			    tc->port_name);
863 		return false;
864 	}
865 
866 	return val & TCSS_DDI_STATUS_READY;
867 }
868 
869 static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
870 				       bool take)
871 {
872 	struct intel_display *display = to_intel_display(tc->dig_port);
873 	enum port port = tc->dig_port->base.port;
874 
875 	assert_tc_port_power_enabled(tc);
876 
877 	intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
878 		     take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
879 
880 	return true;
881 }
882 
883 static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc)
884 {
885 	struct intel_display *display = to_intel_display(tc->dig_port);
886 	enum port port = tc->dig_port->base.port;
887 	u32 val;
888 
889 	assert_tc_port_power_enabled(tc);
890 
891 	val = intel_de_read(display, DDI_BUF_CTL(port));
892 	return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
893 }
894 
895 static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
896 {
897 	struct intel_display *display = to_intel_display(tc->dig_port);
898 	enum intel_display_power_domain port_power_domain =
899 		tc_port_power_domain(tc);
900 	intel_wakeref_t port_wakeref;
901 
902 	port_wakeref = intel_display_power_get(display, port_power_domain);
903 
904 	tc->mode = tc_phy_get_current_mode(tc);
905 	if (tc->mode != TC_PORT_DISCONNECTED) {
906 		tc->lock_wakeref = tc_cold_block(tc);
907 
908 		read_pin_configuration(tc);
909 	}
910 
911 	intel_display_power_put(display, port_power_domain, port_wakeref);
912 }
913 
914 static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
915 {
916 	struct intel_display *display = to_intel_display(tc->dig_port);
917 	enum intel_display_power_domain port_power_domain =
918 		tc_port_power_domain(tc);
919 	intel_wakeref_t port_wakeref;
920 
921 	if (tc->mode == TC_PORT_TBT_ALT) {
922 		tc->lock_wakeref = tc_cold_block(tc);
923 
924 		read_pin_configuration(tc);
925 
926 		return true;
927 	}
928 
929 	port_wakeref = intel_display_power_get(display, port_power_domain);
930 
931 	if (!adlp_tc_phy_take_ownership(tc, true) &&
932 	    !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
933 		drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership\n",
934 			    tc->port_name);
935 		goto out_put_port_power;
936 	}
937 
938 	if (!tc_phy_is_ready(tc) &&
939 	    !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
940 		drm_dbg_kms(display->drm, "Port %s: PHY not ready\n",
941 			    tc->port_name);
942 		goto out_release_phy;
943 	}
944 
945 	tc->lock_wakeref = tc_cold_block(tc);
946 
947 	read_pin_configuration(tc);
948 
949 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
950 		goto out_unblock_tc_cold;
951 
952 	intel_display_power_put(display, port_power_domain, port_wakeref);
953 
954 	return true;
955 
956 out_unblock_tc_cold:
957 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
958 out_release_phy:
959 	adlp_tc_phy_take_ownership(tc, false);
960 out_put_port_power:
961 	intel_display_power_put(display, port_power_domain, port_wakeref);
962 
963 	return false;
964 }
965 
966 static void adlp_tc_phy_disconnect(struct intel_tc_port *tc)
967 {
968 	struct intel_display *display = to_intel_display(tc->dig_port);
969 	enum intel_display_power_domain port_power_domain =
970 		tc_port_power_domain(tc);
971 	intel_wakeref_t port_wakeref;
972 
973 	port_wakeref = intel_display_power_get(display, port_power_domain);
974 
975 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
976 
977 	switch (tc->mode) {
978 	case TC_PORT_LEGACY:
979 	case TC_PORT_DP_ALT:
980 		adlp_tc_phy_take_ownership(tc, false);
981 		fallthrough;
982 	case TC_PORT_TBT_ALT:
983 		break;
984 	default:
985 		MISSING_CASE(tc->mode);
986 	}
987 
988 	intel_display_power_put(display, port_power_domain, port_wakeref);
989 }
990 
991 static void adlp_tc_phy_init(struct intel_tc_port *tc)
992 {
993 	tc_phy_load_fia_params(tc, true);
994 }
995 
996 static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
997 	.cold_off_domain = adlp_tc_phy_cold_off_domain,
998 	.hpd_live_status = adlp_tc_phy_hpd_live_status,
999 	.is_ready = adlp_tc_phy_is_ready,
1000 	.is_owned = adlp_tc_phy_is_owned,
1001 	.get_hw_state = adlp_tc_phy_get_hw_state,
1002 	.connect = adlp_tc_phy_connect,
1003 	.disconnect = adlp_tc_phy_disconnect,
1004 	.init = adlp_tc_phy_init,
1005 };
1006 
1007 /*
1008  * XELPDP TC PHY handlers
1009  * ----------------------
1010  */
1011 static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
1012 {
1013 	struct intel_display *display = to_intel_display(tc->dig_port);
1014 	struct intel_digital_port *dig_port = tc->dig_port;
1015 	enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
1016 	u32 pica_isr_bits = display->hotplug.hpd[hpd_pin];
1017 	u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
1018 	intel_wakeref_t wakeref;
1019 	u32 pica_isr;
1020 	u32 pch_isr;
1021 	u32 mask = 0;
1022 
1023 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
1024 		pica_isr = intel_de_read(display, PICAINTERRUPT_ISR);
1025 		pch_isr = intel_de_read(display, SDEISR);
1026 	}
1027 
1028 	if (pica_isr & (pica_isr_bits & XELPDP_DP_ALT_HOTPLUG_MASK))
1029 		mask |= BIT(TC_PORT_DP_ALT);
1030 	if (pica_isr & (pica_isr_bits & XELPDP_TBT_HOTPLUG_MASK))
1031 		mask |= BIT(TC_PORT_TBT_ALT);
1032 
1033 	if (tc->legacy_port && (pch_isr & pch_isr_bit))
1034 		mask |= BIT(TC_PORT_LEGACY);
1035 
1036 	return mask;
1037 }
1038 
1039 static bool
1040 xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc)
1041 {
1042 	struct intel_display *display = to_intel_display(tc->dig_port);
1043 	enum port port = tc->dig_port->base.port;
1044 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
1045 
1046 	assert_tc_cold_blocked(tc);
1047 
1048 	return intel_de_read(display, reg) & XELPDP_TCSS_POWER_STATE;
1049 }
1050 
1051 static bool
1052 xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
1053 {
1054 	struct intel_display *display = to_intel_display(tc->dig_port);
1055 	bool is_enabled;
1056 	int ret;
1057 
1058 	ret = poll_timeout_us(is_enabled = xelpdp_tc_phy_tcss_power_is_enabled(tc),
1059 			      is_enabled == enabled,
1060 			      200, 5000, false);
1061 	if (ret) {
1062 		drm_dbg_kms(display->drm,
1063 			    "Port %s: timeout waiting for TCSS power to get %s\n",
1064 			    str_enabled_disabled(enabled),
1065 			    tc->port_name);
1066 		return false;
1067 	}
1068 
1069 	return true;
1070 }
1071 
1072 /*
1073  * Gfx driver WA 14020908590 for PTL tcss_rxdetect_clkswb_req/ack
1074  * handshake violation when pwwreq= 0->1 during TC7/10 entry
1075  */
1076 static void xelpdp_tc_power_request_wa(struct intel_display *display, bool enable)
1077 {
1078 	/* check if mailbox is running busy */
1079 	if (intel_de_wait_for_clear(display, TCSS_DISP_MAILBOX_IN_CMD,
1080 				    TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
1081 		drm_dbg_kms(display->drm,
1082 			    "Timeout waiting for TCSS mailbox run/busy bit to clear\n");
1083 		return;
1084 	}
1085 
1086 	intel_de_write(display, TCSS_DISP_MAILBOX_IN_DATA, enable ? 1 : 0);
1087 	intel_de_write(display, TCSS_DISP_MAILBOX_IN_CMD,
1088 		       TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY |
1089 		       TCSS_DISP_MAILBOX_IN_CMD_DATA(0x1));
1090 
1091 	/* wait to clear mailbox running busy bit before continuing */
1092 	if (intel_de_wait_for_clear(display, TCSS_DISP_MAILBOX_IN_CMD,
1093 				    TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
1094 		drm_dbg_kms(display->drm,
1095 			    "Timeout after writing data to mailbox. Mailbox run/busy bit did not clear\n");
1096 		return;
1097 	}
1098 }
1099 
1100 static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1101 {
1102 	struct intel_display *display = to_intel_display(tc->dig_port);
1103 	enum port port = tc->dig_port->base.port;
1104 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
1105 	u32 val;
1106 
1107 	assert_tc_cold_blocked(tc);
1108 
1109 	if (DISPLAY_VER(display) == 30)
1110 		xelpdp_tc_power_request_wa(display, enable);
1111 
1112 	val = intel_de_read(display, reg);
1113 	if (enable)
1114 		val |= XELPDP_TCSS_POWER_REQUEST;
1115 	else
1116 		val &= ~XELPDP_TCSS_POWER_REQUEST;
1117 	intel_de_write(display, reg, val);
1118 }
1119 
1120 static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1121 {
1122 	struct intel_display *display = to_intel_display(tc->dig_port);
1123 
1124 	__xelpdp_tc_phy_enable_tcss_power(tc, enable);
1125 
1126 	if (enable && !tc_phy_wait_for_ready(tc))
1127 		goto out_disable;
1128 
1129 	if (!xelpdp_tc_phy_wait_for_tcss_power(tc, enable))
1130 		goto out_disable;
1131 
1132 	return true;
1133 
1134 out_disable:
1135 	if (drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY))
1136 		return false;
1137 
1138 	if (!enable)
1139 		return false;
1140 
1141 	__xelpdp_tc_phy_enable_tcss_power(tc, false);
1142 	xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1143 
1144 	return false;
1145 }
1146 
1147 static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
1148 {
1149 	struct intel_display *display = to_intel_display(tc->dig_port);
1150 	enum port port = tc->dig_port->base.port;
1151 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
1152 	u32 val;
1153 
1154 	assert_tc_cold_blocked(tc);
1155 
1156 	val = intel_de_read(display, reg);
1157 	if (take)
1158 		val |= XELPDP_TC_PHY_OWNERSHIP;
1159 	else
1160 		val &= ~XELPDP_TC_PHY_OWNERSHIP;
1161 	intel_de_write(display, reg, val);
1162 }
1163 
1164 static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
1165 {
1166 	struct intel_display *display = to_intel_display(tc->dig_port);
1167 	enum port port = tc->dig_port->base.port;
1168 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
1169 
1170 	assert_tc_cold_blocked(tc);
1171 
1172 	return intel_de_read(display, reg) & XELPDP_TC_PHY_OWNERSHIP;
1173 }
1174 
1175 static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
1176 {
1177 	struct intel_display *display = to_intel_display(tc->dig_port);
1178 	intel_wakeref_t tc_cold_wref;
1179 	enum intel_display_power_domain domain;
1180 
1181 	tc_cold_wref = __tc_cold_block(tc, &domain);
1182 
1183 	tc->mode = tc_phy_get_current_mode(tc);
1184 	if (tc->mode != TC_PORT_DISCONNECTED) {
1185 		tc->lock_wakeref = tc_cold_block(tc);
1186 
1187 		read_pin_configuration(tc);
1188 		/*
1189 		 * Set a valid lane count value for a DP-alt sink which got
1190 		 * disconnected. The driver can only disable the output on this PHY.
1191 		 */
1192 		if (tc->max_lane_count == 0)
1193 			tc->max_lane_count = 4;
1194 	}
1195 
1196 	drm_WARN_ON(display->drm,
1197 		    (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
1198 		    !xelpdp_tc_phy_tcss_power_is_enabled(tc));
1199 
1200 	__tc_cold_unblock(tc, domain, tc_cold_wref);
1201 }
1202 
1203 static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1204 {
1205 	tc->lock_wakeref = tc_cold_block(tc);
1206 
1207 	if (tc->mode == TC_PORT_TBT_ALT) {
1208 		read_pin_configuration(tc);
1209 
1210 		return true;
1211 	}
1212 
1213 	if (!xelpdp_tc_phy_enable_tcss_power(tc, true))
1214 		goto out_unblock_tccold;
1215 
1216 	xelpdp_tc_phy_take_ownership(tc, true);
1217 
1218 	read_pin_configuration(tc);
1219 
1220 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
1221 		goto out_release_phy;
1222 
1223 	return true;
1224 
1225 out_release_phy:
1226 	xelpdp_tc_phy_take_ownership(tc, false);
1227 	xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1228 
1229 out_unblock_tccold:
1230 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1231 
1232 	return false;
1233 }
1234 
1235 static void xelpdp_tc_phy_disconnect(struct intel_tc_port *tc)
1236 {
1237 	switch (tc->mode) {
1238 	case TC_PORT_LEGACY:
1239 	case TC_PORT_DP_ALT:
1240 		xelpdp_tc_phy_take_ownership(tc, false);
1241 		xelpdp_tc_phy_enable_tcss_power(tc, false);
1242 		fallthrough;
1243 	case TC_PORT_TBT_ALT:
1244 		tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1245 		break;
1246 	default:
1247 		MISSING_CASE(tc->mode);
1248 	}
1249 }
1250 
1251 static const struct intel_tc_phy_ops xelpdp_tc_phy_ops = {
1252 	.cold_off_domain = tgl_tc_phy_cold_off_domain,
1253 	.hpd_live_status = xelpdp_tc_phy_hpd_live_status,
1254 	.is_ready = adlp_tc_phy_is_ready,
1255 	.is_owned = xelpdp_tc_phy_is_owned,
1256 	.get_hw_state = xelpdp_tc_phy_get_hw_state,
1257 	.connect = xelpdp_tc_phy_connect,
1258 	.disconnect = xelpdp_tc_phy_disconnect,
1259 	.init = adlp_tc_phy_init,
1260 };
1261 
1262 /*
1263  * Generic TC PHY handlers
1264  * -----------------------
1265  */
1266 static enum intel_display_power_domain
1267 tc_phy_cold_off_domain(struct intel_tc_port *tc)
1268 {
1269 	return tc->phy_ops->cold_off_domain(tc);
1270 }
1271 
1272 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc)
1273 {
1274 	struct intel_display *display = to_intel_display(tc->dig_port);
1275 	u32 mask;
1276 
1277 	mask = tc->phy_ops->hpd_live_status(tc);
1278 
1279 	/* The sink can be connected only in a single mode. */
1280 	drm_WARN_ON_ONCE(display->drm, hweight32(mask) > 1);
1281 
1282 	return mask;
1283 }
1284 
1285 static bool tc_phy_is_ready(struct intel_tc_port *tc)
1286 {
1287 	return tc->phy_ops->is_ready(tc);
1288 }
1289 
1290 static bool tc_phy_is_owned(struct intel_tc_port *tc)
1291 {
1292 	return tc->phy_ops->is_owned(tc);
1293 }
1294 
1295 static void tc_phy_get_hw_state(struct intel_tc_port *tc)
1296 {
1297 	tc->phy_ops->get_hw_state(tc);
1298 }
1299 
1300 /* Is the PHY owned by display i.e. is it in legacy or DP-alt mode? */
1301 static bool tc_phy_owned_by_display(struct intel_tc_port *tc,
1302 				    bool phy_is_ready, bool phy_is_owned)
1303 {
1304 	struct intel_display *display = to_intel_display(tc->dig_port);
1305 
1306 	if (DISPLAY_VER(display) < 20) {
1307 		drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready);
1308 
1309 		return phy_is_ready && phy_is_owned;
1310 	} else {
1311 		return phy_is_owned;
1312 	}
1313 }
1314 
1315 static bool tc_phy_is_connected(struct intel_tc_port *tc,
1316 				enum icl_port_dpll_id port_pll_type)
1317 {
1318 	struct intel_display *display = to_intel_display(tc->dig_port);
1319 	bool phy_is_ready = tc_phy_is_ready(tc);
1320 	bool phy_is_owned = tc_phy_is_owned(tc);
1321 	bool is_connected;
1322 
1323 	if (tc_phy_owned_by_display(tc, phy_is_ready, phy_is_owned))
1324 		is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY;
1325 	else
1326 		is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
1327 
1328 	drm_dbg_kms(display->drm,
1329 		    "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
1330 		    tc->port_name,
1331 		    str_yes_no(is_connected),
1332 		    str_yes_no(phy_is_ready),
1333 		    str_yes_no(phy_is_owned),
1334 		    port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt");
1335 
1336 	return is_connected;
1337 }
1338 
1339 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc)
1340 {
1341 	struct intel_display *display = to_intel_display(tc->dig_port);
1342 	bool is_ready;
1343 	int ret;
1344 
1345 	ret = poll_timeout_us(is_ready = tc_phy_is_ready(tc),
1346 			      is_ready,
1347 			      1000, 500 * 1000, false);
1348 	if (ret) {
1349 		drm_err(display->drm, "Port %s: timeout waiting for PHY ready\n",
1350 			tc->port_name);
1351 
1352 		return false;
1353 	}
1354 
1355 	return true;
1356 }
1357 
1358 static enum tc_port_mode
1359 hpd_mask_to_tc_mode(u32 live_status_mask)
1360 {
1361 	if (live_status_mask)
1362 		return fls(live_status_mask) - 1;
1363 
1364 	return TC_PORT_DISCONNECTED;
1365 }
1366 
1367 static enum tc_port_mode
1368 tc_phy_hpd_live_mode(struct intel_tc_port *tc)
1369 {
1370 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1371 
1372 	return hpd_mask_to_tc_mode(live_status_mask);
1373 }
1374 
1375 static enum tc_port_mode
1376 get_tc_mode_in_phy_owned_state(struct intel_tc_port *tc,
1377 			       enum tc_port_mode live_mode)
1378 {
1379 	switch (live_mode) {
1380 	case TC_PORT_LEGACY:
1381 	case TC_PORT_DP_ALT:
1382 		return live_mode;
1383 	default:
1384 		MISSING_CASE(live_mode);
1385 		fallthrough;
1386 	case TC_PORT_TBT_ALT:
1387 	case TC_PORT_DISCONNECTED:
1388 		if (tc->legacy_port)
1389 			return TC_PORT_LEGACY;
1390 		else
1391 			return TC_PORT_DP_ALT;
1392 	}
1393 }
1394 
1395 static enum tc_port_mode
1396 get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc,
1397 				   enum tc_port_mode live_mode)
1398 {
1399 	switch (live_mode) {
1400 	case TC_PORT_LEGACY:
1401 		return TC_PORT_DISCONNECTED;
1402 	case TC_PORT_DP_ALT:
1403 	case TC_PORT_TBT_ALT:
1404 		return TC_PORT_TBT_ALT;
1405 	default:
1406 		MISSING_CASE(live_mode);
1407 		fallthrough;
1408 	case TC_PORT_DISCONNECTED:
1409 		if (tc->legacy_port)
1410 			return TC_PORT_DISCONNECTED;
1411 		else
1412 			return TC_PORT_TBT_ALT;
1413 	}
1414 }
1415 
1416 static enum tc_port_mode
1417 tc_phy_get_current_mode(struct intel_tc_port *tc)
1418 {
1419 	struct intel_display *display = to_intel_display(tc->dig_port);
1420 	enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc);
1421 	bool phy_is_ready;
1422 	bool phy_is_owned;
1423 	enum tc_port_mode mode;
1424 
1425 	/*
1426 	 * For legacy ports the IOM firmware initializes the PHY during boot-up
1427 	 * and system resume whether or not a sink is connected. Wait here for
1428 	 * the initialization to get ready.
1429 	 */
1430 	if (tc->legacy_port)
1431 		tc_phy_wait_for_ready(tc);
1432 
1433 	phy_is_ready = tc_phy_is_ready(tc);
1434 	phy_is_owned = tc_phy_is_owned(tc);
1435 
1436 	if (!tc_phy_owned_by_display(tc, phy_is_ready, phy_is_owned)) {
1437 		mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
1438 	} else {
1439 		drm_WARN_ON(display->drm, live_mode == TC_PORT_TBT_ALT);
1440 		mode = get_tc_mode_in_phy_owned_state(tc, live_mode);
1441 	}
1442 
1443 	drm_dbg_kms(display->drm,
1444 		    "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
1445 		    tc->port_name,
1446 		    tc_port_mode_name(mode),
1447 		    str_yes_no(phy_is_ready),
1448 		    str_yes_no(phy_is_owned),
1449 		    tc_port_mode_name(live_mode));
1450 
1451 	return mode;
1452 }
1453 
1454 static enum tc_port_mode default_tc_mode(struct intel_tc_port *tc)
1455 {
1456 	if (tc->legacy_port)
1457 		return TC_PORT_LEGACY;
1458 
1459 	return TC_PORT_TBT_ALT;
1460 }
1461 
1462 static enum tc_port_mode
1463 hpd_mask_to_target_mode(struct intel_tc_port *tc, u32 live_status_mask)
1464 {
1465 	enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask);
1466 
1467 	if (mode != TC_PORT_DISCONNECTED)
1468 		return mode;
1469 
1470 	return default_tc_mode(tc);
1471 }
1472 
1473 static enum tc_port_mode
1474 tc_phy_get_target_mode(struct intel_tc_port *tc)
1475 {
1476 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1477 
1478 	return hpd_mask_to_target_mode(tc, live_status_mask);
1479 }
1480 
1481 static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1482 {
1483 	struct intel_display *display = to_intel_display(tc->dig_port);
1484 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1485 	bool connected;
1486 
1487 	tc_port_fixup_legacy_flag(tc, live_status_mask);
1488 
1489 	tc->mode = hpd_mask_to_target_mode(tc, live_status_mask);
1490 
1491 	connected = tc->phy_ops->connect(tc, required_lanes);
1492 	if (!connected && tc->mode != default_tc_mode(tc)) {
1493 		tc->mode = default_tc_mode(tc);
1494 		connected = tc->phy_ops->connect(tc, required_lanes);
1495 	}
1496 
1497 	drm_WARN_ON(display->drm, !connected);
1498 }
1499 
1500 static void tc_phy_disconnect(struct intel_tc_port *tc)
1501 {
1502 	if (tc->mode != TC_PORT_DISCONNECTED) {
1503 		tc->phy_ops->disconnect(tc);
1504 		tc->mode = TC_PORT_DISCONNECTED;
1505 	}
1506 }
1507 
1508 static void tc_phy_init(struct intel_tc_port *tc)
1509 {
1510 	mutex_lock(&tc->lock);
1511 	tc->phy_ops->init(tc);
1512 	mutex_unlock(&tc->lock);
1513 }
1514 
1515 static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
1516 				     int required_lanes, bool force_disconnect)
1517 {
1518 	struct intel_display *display = to_intel_display(tc->dig_port);
1519 	struct intel_digital_port *dig_port = tc->dig_port;
1520 	enum tc_port_mode old_tc_mode = tc->mode;
1521 
1522 	intel_display_power_flush_work(display);
1523 	if (!intel_tc_cold_requires_aux_pw(dig_port)) {
1524 		enum intel_display_power_domain aux_domain;
1525 
1526 		aux_domain = intel_aux_power_domain(dig_port);
1527 		if (intel_display_power_is_enabled(display, aux_domain))
1528 			drm_dbg_kms(display->drm, "Port %s: AUX unexpectedly powered\n",
1529 				    tc->port_name);
1530 	}
1531 
1532 	tc_phy_disconnect(tc);
1533 	if (!force_disconnect)
1534 		tc_phy_connect(tc, required_lanes);
1535 
1536 	drm_dbg_kms(display->drm,
1537 		    "Port %s: TC port mode reset (%s -> %s) pin assignment: %c max lanes: %d\n",
1538 		    tc->port_name,
1539 		    tc_port_mode_name(old_tc_mode),
1540 		    tc_port_mode_name(tc->mode),
1541 		    pin_assignment_name(tc->pin_assignment),
1542 		    tc->max_lane_count);
1543 }
1544 
1545 static bool intel_tc_port_needs_reset(struct intel_tc_port *tc)
1546 {
1547 	return tc_phy_get_target_mode(tc) != tc->mode;
1548 }
1549 
1550 static void intel_tc_port_update_mode(struct intel_tc_port *tc,
1551 				      int required_lanes, bool force_disconnect)
1552 {
1553 	if (force_disconnect ||
1554 	    intel_tc_port_needs_reset(tc))
1555 		intel_tc_port_reset_mode(tc, required_lanes, force_disconnect);
1556 }
1557 
1558 static void __intel_tc_port_get_link(struct intel_tc_port *tc)
1559 {
1560 	tc->link_refcount++;
1561 }
1562 
1563 static void __intel_tc_port_put_link(struct intel_tc_port *tc)
1564 {
1565 	tc->link_refcount--;
1566 }
1567 
1568 static bool tc_port_is_enabled(struct intel_tc_port *tc)
1569 {
1570 	struct intel_display *display = to_intel_display(tc->dig_port);
1571 	struct intel_digital_port *dig_port = tc->dig_port;
1572 
1573 	assert_tc_port_power_enabled(tc);
1574 
1575 	return intel_de_read(display, DDI_BUF_CTL(dig_port->base.port)) &
1576 	       DDI_BUF_CTL_ENABLE;
1577 }
1578 
1579 /**
1580  * intel_tc_port_init_mode: Read out HW state and init the given port's TypeC mode
1581  * @dig_port: digital port
1582  *
1583  * Read out the HW state and initialize the TypeC mode of @dig_port. The mode
1584  * will be locked until intel_tc_port_sanitize_mode() is called.
1585  */
1586 void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
1587 {
1588 	struct intel_display *display = to_intel_display(dig_port);
1589 	struct intel_tc_port *tc = to_tc_port(dig_port);
1590 	bool update_mode = false;
1591 
1592 	mutex_lock(&tc->lock);
1593 
1594 	drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED);
1595 	drm_WARN_ON(display->drm, tc->lock_wakeref);
1596 	drm_WARN_ON(display->drm, tc->link_refcount);
1597 
1598 	tc_phy_get_hw_state(tc);
1599 	/*
1600 	 * Save the initial mode for the state check in
1601 	 * intel_tc_port_sanitize_mode().
1602 	 */
1603 	tc->init_mode = tc->mode;
1604 
1605 	/*
1606 	 * The PHY needs to be connected for AUX to work during HW readout and
1607 	 * MST topology resume, but the PHY mode can only be changed if the
1608 	 * port is disabled.
1609 	 *
1610 	 * An exception is the case where BIOS leaves the PHY incorrectly
1611 	 * disconnected on an enabled legacy port. Work around that by
1612 	 * connecting the PHY even though the port is enabled. This doesn't
1613 	 * cause a problem as the PHY ownership state is ignored by the
1614 	 * IOM/TCSS firmware (only display can own the PHY in that case).
1615 	 */
1616 	if (!tc_port_is_enabled(tc)) {
1617 		update_mode = true;
1618 	} else if (tc->mode == TC_PORT_DISCONNECTED) {
1619 		drm_WARN_ON(display->drm, !tc->legacy_port);
1620 		drm_err(display->drm,
1621 			"Port %s: PHY disconnected on enabled port, connecting it\n",
1622 			tc->port_name);
1623 		update_mode = true;
1624 	}
1625 
1626 	if (update_mode)
1627 		intel_tc_port_update_mode(tc, 1, false);
1628 
1629 	/* Prevent changing tc->mode until intel_tc_port_sanitize_mode() is called. */
1630 	__intel_tc_port_get_link(tc);
1631 
1632 	mutex_unlock(&tc->lock);
1633 }
1634 
1635 static bool tc_port_has_active_streams(struct intel_tc_port *tc,
1636 				       const struct intel_crtc_state *crtc_state)
1637 {
1638 	struct intel_display *display = to_intel_display(tc->dig_port);
1639 	struct intel_digital_port *dig_port = tc->dig_port;
1640 	enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
1641 	int active_streams = 0;
1642 
1643 	if (dig_port->dp.is_mst) {
1644 		/* TODO: get the PLL type for MST, once HW readout is done for it. */
1645 		active_streams = intel_dp_mst_active_streams(&dig_port->dp);
1646 	} else if (crtc_state && crtc_state->hw.active) {
1647 		pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
1648 		active_streams = 1;
1649 	}
1650 
1651 	if (active_streams && !tc_phy_is_connected(tc, pll_type))
1652 		drm_err(display->drm,
1653 			"Port %s: PHY disconnected with %d active stream(s)\n",
1654 			tc->port_name, active_streams);
1655 
1656 	return active_streams;
1657 }
1658 
1659 /**
1660  * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
1661  * @dig_port: digital port
1662  * @crtc_state: atomic state of CRTC connected to @dig_port
1663  *
1664  * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
1665  * loading and system resume:
1666  * If the encoder is enabled keep the TypeC mode/PHY connected state locked until
1667  * the encoder is disabled.
1668  * If the encoder is disabled make sure the PHY is disconnected.
1669  * @crtc_state is valid if @dig_port is enabled, NULL otherwise.
1670  */
1671 void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
1672 				 const struct intel_crtc_state *crtc_state)
1673 {
1674 	struct intel_display *display = to_intel_display(dig_port);
1675 	struct intel_tc_port *tc = to_tc_port(dig_port);
1676 
1677 	mutex_lock(&tc->lock);
1678 
1679 	drm_WARN_ON(display->drm, tc->link_refcount != 1);
1680 	if (!tc_port_has_active_streams(tc, crtc_state)) {
1681 		/*
1682 		 * TBT-alt is the default mode in any case the PHY ownership is not
1683 		 * held (regardless of the sink's connected live state), so
1684 		 * we'll just switch to disconnected mode from it here without
1685 		 * a note.
1686 		 */
1687 		if (tc->init_mode != TC_PORT_TBT_ALT &&
1688 		    tc->init_mode != TC_PORT_DISCONNECTED)
1689 			drm_dbg_kms(display->drm,
1690 				    "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
1691 				    tc->port_name,
1692 				    tc_port_mode_name(tc->init_mode));
1693 		tc_phy_disconnect(tc);
1694 		__intel_tc_port_put_link(tc);
1695 	}
1696 
1697 	drm_dbg_kms(display->drm, "Port %s: sanitize mode (%s) pin assignment: %c max lanes: %d\n",
1698 		    tc->port_name,
1699 		    tc_port_mode_name(tc->mode),
1700 		    pin_assignment_name(tc->pin_assignment),
1701 		    tc->max_lane_count);
1702 
1703 	mutex_unlock(&tc->lock);
1704 }
1705 
1706 /*
1707  * The type-C ports are different because even when they are connected, they may
1708  * not be available/usable by the graphics driver: see the comment on
1709  * icl_tc_phy_connect(). So in our driver instead of adding the additional
1710  * concept of "usable" and make everything check for "connected and usable" we
1711  * define a port as "connected" when it is not only connected, but also when it
1712  * is usable by the rest of the driver. That maintains the old assumption that
1713  * connected ports are usable, and avoids exposing to the users objects they
1714  * can't really use.
1715  */
1716 bool intel_tc_port_connected(struct intel_encoder *encoder)
1717 {
1718 	struct intel_display *display = to_intel_display(encoder);
1719 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1720 	struct intel_tc_port *tc = to_tc_port(dig_port);
1721 	u32 mask = ~0;
1722 
1723 	drm_WARN_ON(display->drm, !intel_tc_port_ref_held(dig_port));
1724 
1725 	if (tc->mode != TC_PORT_DISCONNECTED)
1726 		mask = BIT(tc->mode);
1727 
1728 	return tc_phy_hpd_live_status(tc) & mask;
1729 }
1730 
1731 static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc)
1732 {
1733 	bool ret;
1734 
1735 	mutex_lock(&tc->lock);
1736 
1737 	ret = tc->link_refcount &&
1738 	      tc->mode == TC_PORT_DP_ALT &&
1739 	      intel_tc_port_needs_reset(tc);
1740 
1741 	mutex_unlock(&tc->lock);
1742 
1743 	return ret;
1744 }
1745 
1746 bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port)
1747 {
1748 	if (!intel_encoder_is_tc(&dig_port->base))
1749 		return false;
1750 
1751 	return __intel_tc_port_link_needs_reset(to_tc_port(dig_port));
1752 }
1753 
1754 static int reset_link_commit(struct intel_tc_port *tc,
1755 			     struct intel_atomic_state *state,
1756 			     struct drm_modeset_acquire_ctx *ctx)
1757 {
1758 	struct intel_display *display = to_intel_display(tc->dig_port);
1759 	struct intel_digital_port *dig_port = tc->dig_port;
1760 	struct intel_dp *intel_dp = enc_to_intel_dp(&dig_port->base);
1761 	struct intel_crtc *crtc;
1762 	u8 pipe_mask;
1763 	int ret;
1764 
1765 	ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex, ctx);
1766 	if (ret)
1767 		return ret;
1768 
1769 	ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
1770 	if (ret)
1771 		return ret;
1772 
1773 	if (!pipe_mask)
1774 		return 0;
1775 
1776 	for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
1777 		struct intel_crtc_state *crtc_state;
1778 
1779 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
1780 		if (IS_ERR(crtc_state))
1781 			return PTR_ERR(crtc_state);
1782 
1783 		crtc_state->uapi.connectors_changed = true;
1784 	}
1785 
1786 	if (!__intel_tc_port_link_needs_reset(tc))
1787 		return 0;
1788 
1789 	return drm_atomic_commit(&state->base);
1790 }
1791 
1792 static int reset_link(struct intel_tc_port *tc)
1793 {
1794 	struct intel_display *display = to_intel_display(tc->dig_port);
1795 	struct drm_modeset_acquire_ctx ctx;
1796 	struct drm_atomic_state *_state;
1797 	struct intel_atomic_state *state;
1798 	int ret;
1799 
1800 	_state = drm_atomic_state_alloc(display->drm);
1801 	if (!_state)
1802 		return -ENOMEM;
1803 
1804 	state = to_intel_atomic_state(_state);
1805 	state->internal = true;
1806 
1807 	intel_modeset_lock_ctx_retry(&ctx, state, 0, ret)
1808 		ret = reset_link_commit(tc, state, &ctx);
1809 
1810 	drm_atomic_state_put(&state->base);
1811 
1812 	return ret;
1813 }
1814 
1815 static void intel_tc_port_link_reset_work(struct work_struct *work)
1816 {
1817 	struct intel_tc_port *tc =
1818 		container_of(work, struct intel_tc_port, link_reset_work.work);
1819 	struct intel_display *display = to_intel_display(tc->dig_port);
1820 	int ret;
1821 
1822 	if (!__intel_tc_port_link_needs_reset(tc))
1823 		return;
1824 
1825 	mutex_lock(&display->drm->mode_config.mutex);
1826 
1827 	drm_dbg_kms(display->drm,
1828 		    "Port %s: TypeC DP-alt sink disconnected, resetting link\n",
1829 		    tc->port_name);
1830 	ret = reset_link(tc);
1831 	drm_WARN_ON(display->drm, ret);
1832 
1833 	mutex_unlock(&display->drm->mode_config.mutex);
1834 }
1835 
1836 bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
1837 {
1838 	if (!intel_tc_port_link_needs_reset(dig_port))
1839 		return false;
1840 
1841 	queue_delayed_work(system_unbound_wq,
1842 			   &to_tc_port(dig_port)->link_reset_work,
1843 			   msecs_to_jiffies(2000));
1844 
1845 	return true;
1846 }
1847 
1848 void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port)
1849 {
1850 	struct intel_tc_port *tc = to_tc_port(dig_port);
1851 
1852 	if (!intel_encoder_is_tc(&dig_port->base))
1853 		return;
1854 
1855 	cancel_delayed_work(&tc->link_reset_work);
1856 }
1857 
1858 static void __intel_tc_port_lock(struct intel_tc_port *tc,
1859 				 int required_lanes)
1860 {
1861 	struct intel_display *display = to_intel_display(tc->dig_port);
1862 
1863 	mutex_lock(&tc->lock);
1864 
1865 	cancel_delayed_work(&tc->disconnect_phy_work);
1866 
1867 	if (!tc->link_refcount)
1868 		intel_tc_port_update_mode(tc, required_lanes,
1869 					  false);
1870 
1871 	drm_WARN_ON(display->drm, tc->mode == TC_PORT_DISCONNECTED);
1872 	drm_WARN_ON(display->drm, tc->mode != TC_PORT_TBT_ALT && !tc_phy_is_owned(tc));
1873 }
1874 
1875 void intel_tc_port_lock(struct intel_digital_port *dig_port)
1876 {
1877 	__intel_tc_port_lock(to_tc_port(dig_port), 1);
1878 }
1879 
1880 /*
1881  * Disconnect the given digital port from its TypeC PHY (handing back the
1882  * control of the PHY to the TypeC subsystem). This will happen in a delayed
1883  * manner after each aux transactions and modeset disables.
1884  */
1885 static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
1886 {
1887 	struct intel_tc_port *tc =
1888 		container_of(work, struct intel_tc_port, disconnect_phy_work.work);
1889 
1890 	mutex_lock(&tc->lock);
1891 
1892 	if (!tc->link_refcount)
1893 		intel_tc_port_update_mode(tc, 1, true);
1894 
1895 	mutex_unlock(&tc->lock);
1896 }
1897 
1898 /**
1899  * intel_tc_port_flush_work: flush the work disconnecting the PHY
1900  * @dig_port: digital port
1901  *
1902  * Flush the delayed work disconnecting an idle PHY.
1903  */
1904 static void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
1905 {
1906 	flush_delayed_work(&to_tc_port(dig_port)->disconnect_phy_work);
1907 }
1908 
1909 void intel_tc_port_suspend(struct intel_digital_port *dig_port)
1910 {
1911 	struct intel_tc_port *tc = to_tc_port(dig_port);
1912 
1913 	cancel_delayed_work_sync(&tc->link_reset_work);
1914 	intel_tc_port_flush_work(dig_port);
1915 }
1916 
1917 void intel_tc_port_unlock(struct intel_digital_port *dig_port)
1918 {
1919 	struct intel_tc_port *tc = to_tc_port(dig_port);
1920 
1921 	if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED)
1922 		queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work,
1923 				   msecs_to_jiffies(1000));
1924 
1925 	mutex_unlock(&tc->lock);
1926 }
1927 
1928 bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
1929 {
1930 	struct intel_tc_port *tc = to_tc_port(dig_port);
1931 
1932 	return mutex_is_locked(&tc->lock) ||
1933 	       tc->link_refcount;
1934 }
1935 
1936 void intel_tc_port_get_link(struct intel_digital_port *dig_port,
1937 			    int required_lanes)
1938 {
1939 	struct intel_tc_port *tc = to_tc_port(dig_port);
1940 
1941 	__intel_tc_port_lock(tc, required_lanes);
1942 	__intel_tc_port_get_link(tc);
1943 	intel_tc_port_unlock(dig_port);
1944 }
1945 
1946 void intel_tc_port_put_link(struct intel_digital_port *dig_port)
1947 {
1948 	struct intel_tc_port *tc = to_tc_port(dig_port);
1949 
1950 	intel_tc_port_lock(dig_port);
1951 	__intel_tc_port_put_link(tc);
1952 	intel_tc_port_unlock(dig_port);
1953 
1954 	/*
1955 	 * The firmware will not update the HPD status of other TypeC ports
1956 	 * that are active in DP-alt mode with their sink disconnected, until
1957 	 * this port is disabled and its PHY gets disconnected. Make sure this
1958 	 * happens in a timely manner by disconnecting the PHY synchronously.
1959 	 */
1960 	intel_tc_port_flush_work(dig_port);
1961 }
1962 
1963 int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
1964 {
1965 	struct intel_display *display = to_intel_display(dig_port);
1966 	struct intel_tc_port *tc;
1967 	enum port port = dig_port->base.port;
1968 	enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
1969 
1970 	if (drm_WARN_ON(display->drm, tc_port == TC_PORT_NONE))
1971 		return -EINVAL;
1972 
1973 	tc = kzalloc(sizeof(*tc), GFP_KERNEL);
1974 	if (!tc)
1975 		return -ENOMEM;
1976 
1977 	dig_port->tc = tc;
1978 	tc->dig_port = dig_port;
1979 
1980 	if (DISPLAY_VER(display) >= 14)
1981 		tc->phy_ops = &xelpdp_tc_phy_ops;
1982 	else if (DISPLAY_VER(display) >= 13)
1983 		tc->phy_ops = &adlp_tc_phy_ops;
1984 	else if (DISPLAY_VER(display) >= 12)
1985 		tc->phy_ops = &tgl_tc_phy_ops;
1986 	else
1987 		tc->phy_ops = &icl_tc_phy_ops;
1988 
1989 	tc->port_name = kasprintf(GFP_KERNEL, "%c/TC#%d", port_name(port),
1990 				  tc_port + 1);
1991 	if (!tc->port_name) {
1992 		kfree(tc);
1993 		return -ENOMEM;
1994 	}
1995 
1996 	mutex_init(&tc->lock);
1997 	/* TODO: Combine the two works */
1998 	INIT_DELAYED_WORK(&tc->disconnect_phy_work, intel_tc_port_disconnect_phy_work);
1999 	INIT_DELAYED_WORK(&tc->link_reset_work, intel_tc_port_link_reset_work);
2000 	tc->legacy_port = is_legacy;
2001 	tc->mode = TC_PORT_DISCONNECTED;
2002 	tc->link_refcount = 0;
2003 
2004 	tc_phy_init(tc);
2005 
2006 	intel_tc_port_init_mode(dig_port);
2007 
2008 	return 0;
2009 }
2010 
2011 void intel_tc_port_cleanup(struct intel_digital_port *dig_port)
2012 {
2013 	intel_tc_port_suspend(dig_port);
2014 
2015 	kfree(dig_port->tc->port_name);
2016 	kfree(dig_port->tc);
2017 	dig_port->tc = NULL;
2018 }
2019