xref: /linux/drivers/gpu/drm/i915/display/intel_dp.c (revision 62597edf6340191511bdf9a7f64fa315ddc58805)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27 
28 #include <linux/export.h>
29 #include <linux/i2c.h>
30 #include <linux/notifier.h>
31 #include <linux/slab.h>
32 #include <linux/string_helpers.h>
33 #include <linux/timekeeping.h>
34 #include <linux/types.h>
35 
36 #include <asm/byteorder.h>
37 
38 #include <drm/display/drm_dp_helper.h>
39 #include <drm/display/drm_dp_tunnel.h>
40 #include <drm/display/drm_dsc_helper.h>
41 #include <drm/display/drm_hdmi_helper.h>
42 #include <drm/drm_atomic_helper.h>
43 #include <drm/drm_crtc.h>
44 #include <drm/drm_edid.h>
45 #include <drm/drm_probe_helper.h>
46 
47 #include "g4x_dp.h"
48 #include "i915_drv.h"
49 #include "i915_irq.h"
50 #include "i915_reg.h"
51 #include "intel_alpm.h"
52 #include "intel_atomic.h"
53 #include "intel_audio.h"
54 #include "intel_backlight.h"
55 #include "intel_combo_phy_regs.h"
56 #include "intel_connector.h"
57 #include "intel_crtc.h"
58 #include "intel_cx0_phy.h"
59 #include "intel_ddi.h"
60 #include "intel_de.h"
61 #include "intel_display_driver.h"
62 #include "intel_display_types.h"
63 #include "intel_dp.h"
64 #include "intel_dp_aux.h"
65 #include "intel_dp_hdcp.h"
66 #include "intel_dp_link_training.h"
67 #include "intel_dp_mst.h"
68 #include "intel_dp_tunnel.h"
69 #include "intel_dpio_phy.h"
70 #include "intel_dpll.h"
71 #include "intel_drrs.h"
72 #include "intel_encoder.h"
73 #include "intel_fifo_underrun.h"
74 #include "intel_hdcp.h"
75 #include "intel_hdmi.h"
76 #include "intel_hotplug.h"
77 #include "intel_hotplug_irq.h"
78 #include "intel_lspcon.h"
79 #include "intel_lvds.h"
80 #include "intel_modeset_lock.h"
81 #include "intel_panel.h"
82 #include "intel_pch_display.h"
83 #include "intel_pps.h"
84 #include "intel_psr.h"
85 #include "intel_quirks.h"
86 #include "intel_tc.h"
87 #include "intel_vdsc.h"
88 #include "intel_vrr.h"
89 #include "intel_crtc_state_dump.h"
90 
91 /* DP DSC throughput values used for slice count calculations KPixels/s */
92 #define DP_DSC_PEAK_PIXEL_RATE			2720000
93 #define DP_DSC_MAX_ENC_THROUGHPUT_0		340000
94 #define DP_DSC_MAX_ENC_THROUGHPUT_1		400000
95 
96 /* Max DSC line buffer depth supported by HW. */
97 #define INTEL_DP_DSC_MAX_LINE_BUF_DEPTH		13
98 
99 /* DP DSC FEC Overhead factor in ppm = 1/(0.972261) = 1.028530 */
100 #define DP_DSC_FEC_OVERHEAD_FACTOR		1028530
101 
102 /* Compliance test status bits  */
103 #define INTEL_DP_RESOLUTION_SHIFT_MASK	0
104 #define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
105 #define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
106 #define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
107 
108 
109 /* Constants for DP DSC configurations */
110 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
111 
112 /* With Single pipe configuration, HW is capable of supporting maximum
113  * of 4 slices per line.
114  */
115 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
116 
117 /**
118  * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
119  * @intel_dp: DP struct
120  *
121  * If a CPU or PCH DP output is attached to an eDP panel, this function
122  * will return true, and false otherwise.
123  *
124  * This function is not safe to use prior to encoder type being set.
125  */
126 bool intel_dp_is_edp(struct intel_dp *intel_dp)
127 {
128 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
129 
130 	return dig_port->base.type == INTEL_OUTPUT_EDP;
131 }
132 
133 bool intel_dp_as_sdp_supported(struct intel_dp *intel_dp)
134 {
135 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
136 
137 	return HAS_AS_SDP(i915) &&
138 		drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd);
139 }
140 
141 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
142 
143 /* Is link rate UHBR and thus 128b/132b? */
144 bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state)
145 {
146 	return drm_dp_is_uhbr_rate(crtc_state->port_clock);
147 }
148 
149 /**
150  * intel_dp_link_symbol_size - get the link symbol size for a given link rate
151  * @rate: link rate in 10kbit/s units
152  *
153  * Returns the link symbol size in bits/symbol units depending on the link
154  * rate -> channel coding.
155  */
156 int intel_dp_link_symbol_size(int rate)
157 {
158 	return drm_dp_is_uhbr_rate(rate) ? 32 : 10;
159 }
160 
161 /**
162  * intel_dp_link_symbol_clock - convert link rate to link symbol clock
163  * @rate: link rate in 10kbit/s units
164  *
165  * Returns the link symbol clock frequency in kHz units depending on the
166  * link rate and channel coding.
167  */
168 int intel_dp_link_symbol_clock(int rate)
169 {
170 	return DIV_ROUND_CLOSEST(rate * 10, intel_dp_link_symbol_size(rate));
171 }
172 
173 static int max_dprx_rate(struct intel_dp *intel_dp)
174 {
175 	if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
176 		return drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel);
177 
178 	return drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
179 }
180 
181 static int max_dprx_lane_count(struct intel_dp *intel_dp)
182 {
183 	if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
184 		return drm_dp_tunnel_max_dprx_lane_count(intel_dp->tunnel);
185 
186 	return drm_dp_max_lane_count(intel_dp->dpcd);
187 }
188 
189 static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp)
190 {
191 	intel_dp->sink_rates[0] = 162000;
192 	intel_dp->num_sink_rates = 1;
193 }
194 
195 /* update sink rates from dpcd */
196 static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp)
197 {
198 	static const int dp_rates[] = {
199 		162000, 270000, 540000, 810000
200 	};
201 	int i, max_rate;
202 	int max_lttpr_rate;
203 
204 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
205 		/* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
206 		static const int quirk_rates[] = { 162000, 270000, 324000 };
207 
208 		memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates));
209 		intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates);
210 
211 		return;
212 	}
213 
214 	/*
215 	 * Sink rates for 8b/10b.
216 	 */
217 	max_rate = max_dprx_rate(intel_dp);
218 	max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps);
219 	if (max_lttpr_rate)
220 		max_rate = min(max_rate, max_lttpr_rate);
221 
222 	for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
223 		if (dp_rates[i] > max_rate)
224 			break;
225 		intel_dp->sink_rates[i] = dp_rates[i];
226 	}
227 
228 	/*
229 	 * Sink rates for 128b/132b. If set, sink should support all 8b/10b
230 	 * rates and 10 Gbps.
231 	 */
232 	if (drm_dp_128b132b_supported(intel_dp->dpcd)) {
233 		u8 uhbr_rates = 0;
234 
235 		BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3);
236 
237 		drm_dp_dpcd_readb(&intel_dp->aux,
238 				  DP_128B132B_SUPPORTED_LINK_RATES, &uhbr_rates);
239 
240 		if (drm_dp_lttpr_count(intel_dp->lttpr_common_caps)) {
241 			/* We have a repeater */
242 			if (intel_dp->lttpr_common_caps[0] >= 0x20 &&
243 			    intel_dp->lttpr_common_caps[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
244 							DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] &
245 			    DP_PHY_REPEATER_128B132B_SUPPORTED) {
246 				/* Repeater supports 128b/132b, valid UHBR rates */
247 				uhbr_rates &= intel_dp->lttpr_common_caps[DP_PHY_REPEATER_128B132B_RATES -
248 									  DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
249 			} else {
250 				/* Does not support 128b/132b */
251 				uhbr_rates = 0;
252 			}
253 		}
254 
255 		if (uhbr_rates & DP_UHBR10)
256 			intel_dp->sink_rates[i++] = 1000000;
257 		if (uhbr_rates & DP_UHBR13_5)
258 			intel_dp->sink_rates[i++] = 1350000;
259 		if (uhbr_rates & DP_UHBR20)
260 			intel_dp->sink_rates[i++] = 2000000;
261 	}
262 
263 	intel_dp->num_sink_rates = i;
264 }
265 
266 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
267 {
268 	struct intel_connector *connector = intel_dp->attached_connector;
269 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
270 	struct intel_encoder *encoder = &intel_dig_port->base;
271 
272 	intel_dp_set_dpcd_sink_rates(intel_dp);
273 
274 	if (intel_dp->num_sink_rates)
275 		return;
276 
277 	drm_err(&dp_to_i915(intel_dp)->drm,
278 		"[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD with no link rates, using defaults\n",
279 		connector->base.base.id, connector->base.name,
280 		encoder->base.base.id, encoder->base.name);
281 
282 	intel_dp_set_default_sink_rates(intel_dp);
283 }
284 
285 static void intel_dp_set_default_max_sink_lane_count(struct intel_dp *intel_dp)
286 {
287 	intel_dp->max_sink_lane_count = 1;
288 }
289 
290 static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp)
291 {
292 	struct intel_connector *connector = intel_dp->attached_connector;
293 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
294 	struct intel_encoder *encoder = &intel_dig_port->base;
295 
296 	intel_dp->max_sink_lane_count = max_dprx_lane_count(intel_dp);
297 
298 	switch (intel_dp->max_sink_lane_count) {
299 	case 1:
300 	case 2:
301 	case 4:
302 		return;
303 	}
304 
305 	drm_err(&dp_to_i915(intel_dp)->drm,
306 		"[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD max lane count (%d), using default\n",
307 		connector->base.base.id, connector->base.name,
308 		encoder->base.base.id, encoder->base.name,
309 		intel_dp->max_sink_lane_count);
310 
311 	intel_dp_set_default_max_sink_lane_count(intel_dp);
312 }
313 
314 /* Get length of rates array potentially limited by max_rate. */
315 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
316 {
317 	int i;
318 
319 	/* Limit results by potentially reduced max rate */
320 	for (i = 0; i < len; i++) {
321 		if (rates[len - i - 1] <= max_rate)
322 			return len - i;
323 	}
324 
325 	return 0;
326 }
327 
328 /* Get length of common rates array potentially limited by max_rate. */
329 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
330 					  int max_rate)
331 {
332 	return intel_dp_rate_limit_len(intel_dp->common_rates,
333 				       intel_dp->num_common_rates, max_rate);
334 }
335 
336 int intel_dp_common_rate(struct intel_dp *intel_dp, int index)
337 {
338 	if (drm_WARN_ON(&dp_to_i915(intel_dp)->drm,
339 			index < 0 || index >= intel_dp->num_common_rates))
340 		return 162000;
341 
342 	return intel_dp->common_rates[index];
343 }
344 
345 /* Theoretical max between source and sink */
346 int intel_dp_max_common_rate(struct intel_dp *intel_dp)
347 {
348 	return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1);
349 }
350 
351 int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port)
352 {
353 	int vbt_max_lanes = intel_bios_dp_max_lane_count(dig_port->base.devdata);
354 	int max_lanes = dig_port->max_lanes;
355 
356 	if (vbt_max_lanes)
357 		max_lanes = min(max_lanes, vbt_max_lanes);
358 
359 	return max_lanes;
360 }
361 
362 /* Theoretical max between source and sink */
363 int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
364 {
365 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
366 	int source_max = intel_dp_max_source_lane_count(dig_port);
367 	int sink_max = intel_dp->max_sink_lane_count;
368 	int lane_max = intel_tc_port_max_lane_count(dig_port);
369 	int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps);
370 
371 	if (lttpr_max)
372 		sink_max = min(sink_max, lttpr_max);
373 
374 	return min3(source_max, sink_max, lane_max);
375 }
376 
377 static int forced_lane_count(struct intel_dp *intel_dp)
378 {
379 	return clamp(intel_dp->link.force_lane_count, 1, intel_dp_max_common_lane_count(intel_dp));
380 }
381 
382 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
383 {
384 	int lane_count;
385 
386 	if (intel_dp->link.force_lane_count)
387 		lane_count = forced_lane_count(intel_dp);
388 	else
389 		lane_count = intel_dp->link.max_lane_count;
390 
391 	switch (lane_count) {
392 	case 1:
393 	case 2:
394 	case 4:
395 		return lane_count;
396 	default:
397 		MISSING_CASE(lane_count);
398 		return 1;
399 	}
400 }
401 
402 static int intel_dp_min_lane_count(struct intel_dp *intel_dp)
403 {
404 	if (intel_dp->link.force_lane_count)
405 		return forced_lane_count(intel_dp);
406 
407 	return 1;
408 }
409 
410 /*
411  * The required data bandwidth for a mode with given pixel clock and bpp. This
412  * is the required net bandwidth independent of the data bandwidth efficiency.
413  *
414  * TODO: check if callers of this functions should use
415  * intel_dp_effective_data_rate() instead.
416  */
417 int
418 intel_dp_link_required(int pixel_clock, int bpp)
419 {
420 	/* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
421 	return DIV_ROUND_UP(pixel_clock * bpp, 8);
422 }
423 
424 /**
425  * intel_dp_effective_data_rate - Return the pixel data rate accounting for BW allocation overhead
426  * @pixel_clock: pixel clock in kHz
427  * @bpp_x16: bits per pixel .4 fixed point format
428  * @bw_overhead: BW allocation overhead in 1ppm units
429  *
430  * Return the effective pixel data rate in kB/sec units taking into account
431  * the provided SSC, FEC, DSC BW allocation overhead.
432  */
433 int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
434 				 int bw_overhead)
435 {
436 	return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_clock * bpp_x16, bw_overhead),
437 				1000000 * 16 * 8);
438 }
439 
440 /**
441  * intel_dp_max_link_data_rate: Calculate the maximum rate for the given link params
442  * @intel_dp: Intel DP object
443  * @max_dprx_rate: Maximum data rate of the DPRX
444  * @max_dprx_lanes: Maximum lane count of the DPRX
445  *
446  * Calculate the maximum data rate for the provided link parameters taking into
447  * account any BW limitations by a DP tunnel attached to @intel_dp.
448  *
449  * Returns the maximum data rate in kBps units.
450  */
451 int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
452 				int max_dprx_rate, int max_dprx_lanes)
453 {
454 	int max_rate = drm_dp_max_dprx_data_rate(max_dprx_rate, max_dprx_lanes);
455 
456 	if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
457 		max_rate = min(max_rate,
458 			       drm_dp_tunnel_available_bw(intel_dp->tunnel));
459 
460 	return max_rate;
461 }
462 
463 bool intel_dp_has_joiner(struct intel_dp *intel_dp)
464 {
465 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
466 	struct intel_encoder *encoder = &intel_dig_port->base;
467 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
468 
469 	/* eDP MSO is not compatible with joiner */
470 	if (intel_dp->mso_link_count)
471 		return false;
472 
473 	return DISPLAY_VER(dev_priv) >= 12 ||
474 		(DISPLAY_VER(dev_priv) == 11 &&
475 		 encoder->port != PORT_A);
476 }
477 
478 static int dg2_max_source_rate(struct intel_dp *intel_dp)
479 {
480 	return intel_dp_is_edp(intel_dp) ? 810000 : 1350000;
481 }
482 
483 static int icl_max_source_rate(struct intel_dp *intel_dp)
484 {
485 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
486 
487 	if (intel_encoder_is_combo(encoder) && !intel_dp_is_edp(intel_dp))
488 		return 540000;
489 
490 	return 810000;
491 }
492 
493 static int ehl_max_source_rate(struct intel_dp *intel_dp)
494 {
495 	if (intel_dp_is_edp(intel_dp))
496 		return 540000;
497 
498 	return 810000;
499 }
500 
501 static int mtl_max_source_rate(struct intel_dp *intel_dp)
502 {
503 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
504 
505 	if (intel_encoder_is_c10phy(encoder))
506 		return 810000;
507 
508 	if (DISPLAY_VER_FULL(to_i915(encoder->base.dev)) == IP_VER(14, 1))
509 		return 1350000;
510 
511 	return 2000000;
512 }
513 
514 static int vbt_max_link_rate(struct intel_dp *intel_dp)
515 {
516 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
517 	int max_rate;
518 
519 	max_rate = intel_bios_dp_max_link_rate(encoder->devdata);
520 
521 	if (intel_dp_is_edp(intel_dp)) {
522 		struct intel_connector *connector = intel_dp->attached_connector;
523 		int edp_max_rate = connector->panel.vbt.edp.max_link_rate;
524 
525 		if (max_rate && edp_max_rate)
526 			max_rate = min(max_rate, edp_max_rate);
527 		else if (edp_max_rate)
528 			max_rate = edp_max_rate;
529 	}
530 
531 	return max_rate;
532 }
533 
534 static void
535 intel_dp_set_source_rates(struct intel_dp *intel_dp)
536 {
537 	/* The values must be in increasing order */
538 	static const int mtl_rates[] = {
539 		162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000,
540 		810000,	1000000, 2000000,
541 	};
542 	static const int icl_rates[] = {
543 		162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000,
544 		1000000, 1350000,
545 	};
546 	static const int bxt_rates[] = {
547 		162000, 216000, 243000, 270000, 324000, 432000, 540000
548 	};
549 	static const int skl_rates[] = {
550 		162000, 216000, 270000, 324000, 432000, 540000
551 	};
552 	static const int hsw_rates[] = {
553 		162000, 270000, 540000
554 	};
555 	static const int g4x_rates[] = {
556 		162000, 270000
557 	};
558 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
559 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
560 	const int *source_rates;
561 	int size, max_rate = 0, vbt_max_rate;
562 
563 	/* This should only be done once */
564 	drm_WARN_ON(&dev_priv->drm,
565 		    intel_dp->source_rates || intel_dp->num_source_rates);
566 
567 	if (DISPLAY_VER(dev_priv) >= 14) {
568 		source_rates = mtl_rates;
569 		size = ARRAY_SIZE(mtl_rates);
570 		max_rate = mtl_max_source_rate(intel_dp);
571 	} else if (DISPLAY_VER(dev_priv) >= 11) {
572 		source_rates = icl_rates;
573 		size = ARRAY_SIZE(icl_rates);
574 		if (IS_DG2(dev_priv))
575 			max_rate = dg2_max_source_rate(intel_dp);
576 		else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
577 			 IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
578 			max_rate = 810000;
579 		else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
580 			max_rate = ehl_max_source_rate(intel_dp);
581 		else
582 			max_rate = icl_max_source_rate(intel_dp);
583 	} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
584 		source_rates = bxt_rates;
585 		size = ARRAY_SIZE(bxt_rates);
586 	} else if (DISPLAY_VER(dev_priv) == 9) {
587 		source_rates = skl_rates;
588 		size = ARRAY_SIZE(skl_rates);
589 	} else if ((IS_HASWELL(dev_priv) && !IS_HASWELL_ULX(dev_priv)) ||
590 		   IS_BROADWELL(dev_priv)) {
591 		source_rates = hsw_rates;
592 		size = ARRAY_SIZE(hsw_rates);
593 	} else {
594 		source_rates = g4x_rates;
595 		size = ARRAY_SIZE(g4x_rates);
596 	}
597 
598 	vbt_max_rate = vbt_max_link_rate(intel_dp);
599 	if (max_rate && vbt_max_rate)
600 		max_rate = min(max_rate, vbt_max_rate);
601 	else if (vbt_max_rate)
602 		max_rate = vbt_max_rate;
603 
604 	if (max_rate)
605 		size = intel_dp_rate_limit_len(source_rates, size, max_rate);
606 
607 	intel_dp->source_rates = source_rates;
608 	intel_dp->num_source_rates = size;
609 }
610 
611 static int intersect_rates(const int *source_rates, int source_len,
612 			   const int *sink_rates, int sink_len,
613 			   int *common_rates)
614 {
615 	int i = 0, j = 0, k = 0;
616 
617 	while (i < source_len && j < sink_len) {
618 		if (source_rates[i] == sink_rates[j]) {
619 			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
620 				return k;
621 			common_rates[k] = source_rates[i];
622 			++k;
623 			++i;
624 			++j;
625 		} else if (source_rates[i] < sink_rates[j]) {
626 			++i;
627 		} else {
628 			++j;
629 		}
630 	}
631 	return k;
632 }
633 
634 /* return index of rate in rates array, or -1 if not found */
635 int intel_dp_rate_index(const int *rates, int len, int rate)
636 {
637 	int i;
638 
639 	for (i = 0; i < len; i++)
640 		if (rate == rates[i])
641 			return i;
642 
643 	return -1;
644 }
645 
646 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
647 {
648 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
649 
650 	drm_WARN_ON(&i915->drm,
651 		    !intel_dp->num_source_rates || !intel_dp->num_sink_rates);
652 
653 	intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
654 						     intel_dp->num_source_rates,
655 						     intel_dp->sink_rates,
656 						     intel_dp->num_sink_rates,
657 						     intel_dp->common_rates);
658 
659 	/* Paranoia, there should always be something in common. */
660 	if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
661 		intel_dp->common_rates[0] = 162000;
662 		intel_dp->num_common_rates = 1;
663 	}
664 }
665 
666 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
667 				       u8 lane_count)
668 {
669 	/*
670 	 * FIXME: we need to synchronize the current link parameters with
671 	 * hardware readout. Currently fast link training doesn't work on
672 	 * boot-up.
673 	 */
674 	if (link_rate == 0 ||
675 	    link_rate > intel_dp->link.max_rate)
676 		return false;
677 
678 	if (lane_count == 0 ||
679 	    lane_count > intel_dp_max_lane_count(intel_dp))
680 		return false;
681 
682 	return true;
683 }
684 
685 u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
686 {
687 	return div_u64(mul_u32_u32(mode_clock, DP_DSC_FEC_OVERHEAD_FACTOR),
688 		       1000000U);
689 }
690 
691 int intel_dp_bw_fec_overhead(bool fec_enabled)
692 {
693 	/*
694 	 * TODO: Calculate the actual overhead for a given mode.
695 	 * The hard-coded 1/0.972261=2.853% overhead factor
696 	 * corresponds (for instance) to the 8b/10b DP FEC 2.4% +
697 	 * 0.453% DSC overhead. This is enough for a 3840 width mode,
698 	 * which has a DSC overhead of up to ~0.2%, but may not be
699 	 * enough for a 1024 width mode where this is ~0.8% (on a 4
700 	 * lane DP link, with 2 DSC slices and 8 bpp color depth).
701 	 */
702 	return fec_enabled ? DP_DSC_FEC_OVERHEAD_FACTOR : 1000000;
703 }
704 
705 static int
706 small_joiner_ram_size_bits(struct drm_i915_private *i915)
707 {
708 	if (DISPLAY_VER(i915) >= 13)
709 		return 17280 * 8;
710 	else if (DISPLAY_VER(i915) >= 11)
711 		return 7680 * 8;
712 	else
713 		return 6144 * 8;
714 }
715 
716 u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp)
717 {
718 	u32 bits_per_pixel = bpp;
719 	int i;
720 
721 	/* Error out if the max bpp is less than smallest allowed valid bpp */
722 	if (bits_per_pixel < valid_dsc_bpp[0]) {
723 		drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
724 			    bits_per_pixel, valid_dsc_bpp[0]);
725 		return 0;
726 	}
727 
728 	/* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */
729 	if (DISPLAY_VER(i915) >= 13) {
730 		bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1);
731 
732 		/*
733 		 * According to BSpec, 27 is the max DSC output bpp,
734 		 * 8 is the min DSC output bpp.
735 		 * While we can still clamp higher bpp values to 27, saving bandwidth,
736 		 * if it is required to oompress up to bpp < 8, means we can't do
737 		 * that and probably means we can't fit the required mode, even with
738 		 * DSC enabled.
739 		 */
740 		if (bits_per_pixel < 8) {
741 			drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min 8\n",
742 				    bits_per_pixel);
743 			return 0;
744 		}
745 		bits_per_pixel = min_t(u32, bits_per_pixel, 27);
746 	} else {
747 		/* Find the nearest match in the array of known BPPs from VESA */
748 		for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
749 			if (bits_per_pixel < valid_dsc_bpp[i + 1])
750 				break;
751 		}
752 		drm_dbg_kms(&i915->drm, "Set dsc bpp from %d to VESA %d\n",
753 			    bits_per_pixel, valid_dsc_bpp[i]);
754 
755 		bits_per_pixel = valid_dsc_bpp[i];
756 	}
757 
758 	return bits_per_pixel;
759 }
760 
761 static
762 u32 get_max_compressed_bpp_with_joiner(struct drm_i915_private *i915,
763 				       u32 mode_clock, u32 mode_hdisplay,
764 				       bool bigjoiner)
765 {
766 	u32 max_bpp_small_joiner_ram;
767 
768 	/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
769 	max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / mode_hdisplay;
770 
771 	if (bigjoiner) {
772 		int bigjoiner_interface_bits = DISPLAY_VER(i915) >= 14 ? 36 : 24;
773 		/* With bigjoiner multiple dsc engines are used in parallel so PPC is 2 */
774 		int ppc = 2;
775 		u32 max_bpp_bigjoiner =
776 			i915->display.cdclk.max_cdclk_freq * ppc * bigjoiner_interface_bits /
777 			intel_dp_mode_to_fec_clock(mode_clock);
778 
779 		max_bpp_small_joiner_ram *= 2;
780 
781 		return min(max_bpp_small_joiner_ram, max_bpp_bigjoiner);
782 	}
783 
784 	return max_bpp_small_joiner_ram;
785 }
786 
787 u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915,
788 					u32 link_clock, u32 lane_count,
789 					u32 mode_clock, u32 mode_hdisplay,
790 					bool bigjoiner,
791 					enum intel_output_format output_format,
792 					u32 pipe_bpp,
793 					u32 timeslots)
794 {
795 	u32 bits_per_pixel, joiner_max_bpp;
796 
797 	/*
798 	 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
799 	 * (LinkSymbolClock)* 8 * (TimeSlots / 64)
800 	 * for SST -> TimeSlots is 64(i.e all TimeSlots that are available)
801 	 * for MST -> TimeSlots has to be calculated, based on mode requirements
802 	 *
803 	 * Due to FEC overhead, the available bw is reduced to 97.2261%.
804 	 * To support the given mode:
805 	 * Bandwidth required should be <= Available link Bandwidth * FEC Overhead
806 	 * =>ModeClock * bits_per_pixel <= Available Link Bandwidth * FEC Overhead
807 	 * =>bits_per_pixel <= Available link Bandwidth * FEC Overhead / ModeClock
808 	 * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock) * 8 (TimeSlots / 64) /
809 	 *		       (ModeClock / FEC Overhead)
810 	 * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock * TimeSlots) /
811 	 *		       (ModeClock / FEC Overhead * 8)
812 	 */
813 	bits_per_pixel = ((link_clock * lane_count) * timeslots) /
814 			 (intel_dp_mode_to_fec_clock(mode_clock) * 8);
815 
816 	/* Bandwidth required for 420 is half, that of 444 format */
817 	if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
818 		bits_per_pixel *= 2;
819 
820 	/*
821 	 * According to DSC 1.2a Section 4.1.1 Table 4.1 the maximum
822 	 * supported PPS value can be 63.9375 and with the further
823 	 * mention that for 420, 422 formats, bpp should be programmed double
824 	 * the target bpp restricting our target bpp to be 31.9375 at max.
825 	 */
826 	if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
827 		bits_per_pixel = min_t(u32, bits_per_pixel, 31);
828 
829 	drm_dbg_kms(&i915->drm, "Max link bpp is %u for %u timeslots "
830 				"total bw %u pixel clock %u\n",
831 				bits_per_pixel, timeslots,
832 				(link_clock * lane_count * 8),
833 				intel_dp_mode_to_fec_clock(mode_clock));
834 
835 	joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, mode_clock,
836 							    mode_hdisplay, bigjoiner);
837 	bits_per_pixel = min(bits_per_pixel, joiner_max_bpp);
838 
839 	bits_per_pixel = intel_dp_dsc_nearest_valid_bpp(i915, bits_per_pixel, pipe_bpp);
840 
841 	return bits_per_pixel;
842 }
843 
844 u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
845 				int mode_clock, int mode_hdisplay,
846 				bool bigjoiner)
847 {
848 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
849 	u8 min_slice_count, i;
850 	int max_slice_width;
851 
852 	if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
853 		min_slice_count = DIV_ROUND_UP(mode_clock,
854 					       DP_DSC_MAX_ENC_THROUGHPUT_0);
855 	else
856 		min_slice_count = DIV_ROUND_UP(mode_clock,
857 					       DP_DSC_MAX_ENC_THROUGHPUT_1);
858 
859 	/*
860 	 * Due to some DSC engine BW limitations, we need to enable second
861 	 * slice and VDSC engine, whenever we approach close enough to max CDCLK
862 	 */
863 	if (mode_clock >= ((i915->display.cdclk.max_cdclk_freq * 85) / 100))
864 		min_slice_count = max_t(u8, min_slice_count, 2);
865 
866 	max_slice_width = drm_dp_dsc_sink_max_slice_width(connector->dp.dsc_dpcd);
867 	if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
868 		drm_dbg_kms(&i915->drm,
869 			    "Unsupported slice width %d by DP DSC Sink device\n",
870 			    max_slice_width);
871 		return 0;
872 	}
873 	/* Also take into account max slice width */
874 	min_slice_count = max_t(u8, min_slice_count,
875 				DIV_ROUND_UP(mode_hdisplay,
876 					     max_slice_width));
877 
878 	/* Find the closest match to the valid slice count values */
879 	for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
880 		u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner;
881 
882 		if (test_slice_count >
883 		    drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd, false))
884 			break;
885 
886 		/* big joiner needs small joiner to be enabled */
887 		if (bigjoiner && test_slice_count < 4)
888 			continue;
889 
890 		if (min_slice_count <= test_slice_count)
891 			return test_slice_count;
892 	}
893 
894 	drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
895 		    min_slice_count);
896 	return 0;
897 }
898 
899 static bool source_can_output(struct intel_dp *intel_dp,
900 			      enum intel_output_format format)
901 {
902 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
903 
904 	switch (format) {
905 	case INTEL_OUTPUT_FORMAT_RGB:
906 		return true;
907 
908 	case INTEL_OUTPUT_FORMAT_YCBCR444:
909 		/*
910 		 * No YCbCr output support on gmch platforms.
911 		 * Also, ILK doesn't seem capable of DP YCbCr output.
912 		 * The displayed image is severly corrupted. SNB+ is fine.
913 		 */
914 		return !HAS_GMCH(i915) && !IS_IRONLAKE(i915);
915 
916 	case INTEL_OUTPUT_FORMAT_YCBCR420:
917 		/* Platform < Gen 11 cannot output YCbCr420 format */
918 		return DISPLAY_VER(i915) >= 11;
919 
920 	default:
921 		MISSING_CASE(format);
922 		return false;
923 	}
924 }
925 
926 static bool
927 dfp_can_convert_from_rgb(struct intel_dp *intel_dp,
928 			 enum intel_output_format sink_format)
929 {
930 	if (!drm_dp_is_branch(intel_dp->dpcd))
931 		return false;
932 
933 	if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444)
934 		return intel_dp->dfp.rgb_to_ycbcr;
935 
936 	if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420)
937 		return intel_dp->dfp.rgb_to_ycbcr &&
938 			intel_dp->dfp.ycbcr_444_to_420;
939 
940 	return false;
941 }
942 
943 static bool
944 dfp_can_convert_from_ycbcr444(struct intel_dp *intel_dp,
945 			      enum intel_output_format sink_format)
946 {
947 	if (!drm_dp_is_branch(intel_dp->dpcd))
948 		return false;
949 
950 	if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420)
951 		return intel_dp->dfp.ycbcr_444_to_420;
952 
953 	return false;
954 }
955 
956 static bool
957 dfp_can_convert(struct intel_dp *intel_dp,
958 		enum intel_output_format output_format,
959 		enum intel_output_format sink_format)
960 {
961 	switch (output_format) {
962 	case INTEL_OUTPUT_FORMAT_RGB:
963 		return dfp_can_convert_from_rgb(intel_dp, sink_format);
964 	case INTEL_OUTPUT_FORMAT_YCBCR444:
965 		return dfp_can_convert_from_ycbcr444(intel_dp, sink_format);
966 	default:
967 		MISSING_CASE(output_format);
968 		return false;
969 	}
970 
971 	return false;
972 }
973 
974 static enum intel_output_format
975 intel_dp_output_format(struct intel_connector *connector,
976 		       enum intel_output_format sink_format)
977 {
978 	struct intel_dp *intel_dp = intel_attached_dp(connector);
979 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
980 	enum intel_output_format force_dsc_output_format =
981 		intel_dp->force_dsc_output_format;
982 	enum intel_output_format output_format;
983 	if (force_dsc_output_format) {
984 		if (source_can_output(intel_dp, force_dsc_output_format) &&
985 		    (!drm_dp_is_branch(intel_dp->dpcd) ||
986 		     sink_format != force_dsc_output_format ||
987 		     dfp_can_convert(intel_dp, force_dsc_output_format, sink_format)))
988 			return force_dsc_output_format;
989 
990 		drm_dbg_kms(&i915->drm, "Cannot force DSC output format\n");
991 	}
992 
993 	if (sink_format == INTEL_OUTPUT_FORMAT_RGB ||
994 	    dfp_can_convert_from_rgb(intel_dp, sink_format))
995 		output_format = INTEL_OUTPUT_FORMAT_RGB;
996 
997 	else if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444 ||
998 		 dfp_can_convert_from_ycbcr444(intel_dp, sink_format))
999 		output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
1000 
1001 	else
1002 		output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
1003 
1004 	drm_WARN_ON(&i915->drm, !source_can_output(intel_dp, output_format));
1005 
1006 	return output_format;
1007 }
1008 
1009 int intel_dp_min_bpp(enum intel_output_format output_format)
1010 {
1011 	if (output_format == INTEL_OUTPUT_FORMAT_RGB)
1012 		return 6 * 3;
1013 	else
1014 		return 8 * 3;
1015 }
1016 
1017 int intel_dp_output_bpp(enum intel_output_format output_format, int bpp)
1018 {
1019 	/*
1020 	 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
1021 	 * format of the number of bytes per pixel will be half the number
1022 	 * of bytes of RGB pixel.
1023 	 */
1024 	if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
1025 		bpp /= 2;
1026 
1027 	return bpp;
1028 }
1029 
1030 static enum intel_output_format
1031 intel_dp_sink_format(struct intel_connector *connector,
1032 		     const struct drm_display_mode *mode)
1033 {
1034 	const struct drm_display_info *info = &connector->base.display_info;
1035 
1036 	if (drm_mode_is_420_only(info, mode))
1037 		return INTEL_OUTPUT_FORMAT_YCBCR420;
1038 
1039 	return INTEL_OUTPUT_FORMAT_RGB;
1040 }
1041 
1042 static int
1043 intel_dp_mode_min_output_bpp(struct intel_connector *connector,
1044 			     const struct drm_display_mode *mode)
1045 {
1046 	enum intel_output_format output_format, sink_format;
1047 
1048 	sink_format = intel_dp_sink_format(connector, mode);
1049 
1050 	output_format = intel_dp_output_format(connector, sink_format);
1051 
1052 	return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format));
1053 }
1054 
1055 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
1056 				  int hdisplay)
1057 {
1058 	/*
1059 	 * Older platforms don't like hdisplay==4096 with DP.
1060 	 *
1061 	 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
1062 	 * and frame counter increment), but we don't get vblank interrupts,
1063 	 * and the pipe underruns immediately. The link also doesn't seem
1064 	 * to get trained properly.
1065 	 *
1066 	 * On CHV the vblank interrupts don't seem to disappear but
1067 	 * otherwise the symptoms are similar.
1068 	 *
1069 	 * TODO: confirm the behaviour on HSW+
1070 	 */
1071 	return hdisplay == 4096 && !HAS_DDI(dev_priv);
1072 }
1073 
1074 static int intel_dp_max_tmds_clock(struct intel_dp *intel_dp)
1075 {
1076 	struct intel_connector *connector = intel_dp->attached_connector;
1077 	const struct drm_display_info *info = &connector->base.display_info;
1078 	int max_tmds_clock = intel_dp->dfp.max_tmds_clock;
1079 
1080 	/* Only consider the sink's max TMDS clock if we know this is a HDMI DFP */
1081 	if (max_tmds_clock && info->max_tmds_clock)
1082 		max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock);
1083 
1084 	return max_tmds_clock;
1085 }
1086 
1087 static enum drm_mode_status
1088 intel_dp_tmds_clock_valid(struct intel_dp *intel_dp,
1089 			  int clock, int bpc,
1090 			  enum intel_output_format sink_format,
1091 			  bool respect_downstream_limits)
1092 {
1093 	int tmds_clock, min_tmds_clock, max_tmds_clock;
1094 
1095 	if (!respect_downstream_limits)
1096 		return MODE_OK;
1097 
1098 	tmds_clock = intel_hdmi_tmds_clock(clock, bpc, sink_format);
1099 
1100 	min_tmds_clock = intel_dp->dfp.min_tmds_clock;
1101 	max_tmds_clock = intel_dp_max_tmds_clock(intel_dp);
1102 
1103 	if (min_tmds_clock && tmds_clock < min_tmds_clock)
1104 		return MODE_CLOCK_LOW;
1105 
1106 	if (max_tmds_clock && tmds_clock > max_tmds_clock)
1107 		return MODE_CLOCK_HIGH;
1108 
1109 	return MODE_OK;
1110 }
1111 
1112 static enum drm_mode_status
1113 intel_dp_mode_valid_downstream(struct intel_connector *connector,
1114 			       const struct drm_display_mode *mode,
1115 			       int target_clock)
1116 {
1117 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1118 	const struct drm_display_info *info = &connector->base.display_info;
1119 	enum drm_mode_status status;
1120 	enum intel_output_format sink_format;
1121 
1122 	/* If PCON supports FRL MODE, check FRL bandwidth constraints */
1123 	if (intel_dp->dfp.pcon_max_frl_bw) {
1124 		int target_bw;
1125 		int max_frl_bw;
1126 		int bpp = intel_dp_mode_min_output_bpp(connector, mode);
1127 
1128 		target_bw = bpp * target_clock;
1129 
1130 		max_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
1131 
1132 		/* converting bw from Gbps to Kbps*/
1133 		max_frl_bw = max_frl_bw * 1000000;
1134 
1135 		if (target_bw > max_frl_bw)
1136 			return MODE_CLOCK_HIGH;
1137 
1138 		return MODE_OK;
1139 	}
1140 
1141 	if (intel_dp->dfp.max_dotclock &&
1142 	    target_clock > intel_dp->dfp.max_dotclock)
1143 		return MODE_CLOCK_HIGH;
1144 
1145 	sink_format = intel_dp_sink_format(connector, mode);
1146 
1147 	/* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */
1148 	status = intel_dp_tmds_clock_valid(intel_dp, target_clock,
1149 					   8, sink_format, true);
1150 
1151 	if (status != MODE_OK) {
1152 		if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
1153 		    !connector->base.ycbcr_420_allowed ||
1154 		    !drm_mode_is_420_also(info, mode))
1155 			return status;
1156 		sink_format = INTEL_OUTPUT_FORMAT_YCBCR420;
1157 		status = intel_dp_tmds_clock_valid(intel_dp, target_clock,
1158 						   8, sink_format, true);
1159 		if (status != MODE_OK)
1160 			return status;
1161 	}
1162 
1163 	return MODE_OK;
1164 }
1165 
1166 bool intel_dp_need_joiner(struct intel_dp *intel_dp,
1167 			  struct intel_connector *connector,
1168 			  int hdisplay, int clock)
1169 {
1170 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1171 
1172 	if (!intel_dp_has_joiner(intel_dp))
1173 		return false;
1174 
1175 	return clock > i915->display.cdclk.max_dotclk_freq || hdisplay > 5120 ||
1176 	       connector->force_bigjoiner_enable;
1177 }
1178 
1179 bool intel_dp_has_dsc(const struct intel_connector *connector)
1180 {
1181 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1182 
1183 	if (!HAS_DSC(i915))
1184 		return false;
1185 
1186 	if (connector->mst_port && !HAS_DSC_MST(i915))
1187 		return false;
1188 
1189 	if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
1190 	    connector->panel.vbt.edp.dsc_disable)
1191 		return false;
1192 
1193 	if (!drm_dp_sink_supports_dsc(connector->dp.dsc_dpcd))
1194 		return false;
1195 
1196 	return true;
1197 }
1198 
1199 static enum drm_mode_status
1200 intel_dp_mode_valid(struct drm_connector *_connector,
1201 		    struct drm_display_mode *mode)
1202 {
1203 	struct intel_connector *connector = to_intel_connector(_connector);
1204 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1205 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1206 	const struct drm_display_mode *fixed_mode;
1207 	int target_clock = mode->clock;
1208 	int max_rate, mode_rate, max_lanes, max_link_clock;
1209 	int max_dotclk = dev_priv->display.cdclk.max_dotclk_freq;
1210 	u16 dsc_max_compressed_bpp = 0;
1211 	u8 dsc_slice_count = 0;
1212 	enum drm_mode_status status;
1213 	bool dsc = false, joiner = false;
1214 
1215 	status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
1216 	if (status != MODE_OK)
1217 		return status;
1218 
1219 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
1220 		return MODE_H_ILLEGAL;
1221 
1222 	if (mode->clock < 10000)
1223 		return MODE_CLOCK_LOW;
1224 
1225 	fixed_mode = intel_panel_fixed_mode(connector, mode);
1226 	if (intel_dp_is_edp(intel_dp) && fixed_mode) {
1227 		status = intel_panel_mode_valid(connector, mode);
1228 		if (status != MODE_OK)
1229 			return status;
1230 
1231 		target_clock = fixed_mode->clock;
1232 	}
1233 
1234 	if (intel_dp_need_joiner(intel_dp, connector,
1235 				 mode->hdisplay, target_clock)) {
1236 		joiner = true;
1237 		max_dotclk *= 2;
1238 	}
1239 	if (target_clock > max_dotclk)
1240 		return MODE_CLOCK_HIGH;
1241 
1242 	if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
1243 		return MODE_H_ILLEGAL;
1244 
1245 	max_link_clock = intel_dp_max_link_rate(intel_dp);
1246 	max_lanes = intel_dp_max_lane_count(intel_dp);
1247 
1248 	max_rate = intel_dp_max_link_data_rate(intel_dp, max_link_clock, max_lanes);
1249 
1250 	mode_rate = intel_dp_link_required(target_clock,
1251 					   intel_dp_mode_min_output_bpp(connector, mode));
1252 
1253 	if (intel_dp_has_dsc(connector)) {
1254 		enum intel_output_format sink_format, output_format;
1255 		int pipe_bpp;
1256 
1257 		sink_format = intel_dp_sink_format(connector, mode);
1258 		output_format = intel_dp_output_format(connector, sink_format);
1259 		/*
1260 		 * TBD pass the connector BPC,
1261 		 * for now U8_MAX so that max BPC on that platform would be picked
1262 		 */
1263 		pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX);
1264 
1265 		/*
1266 		 * Output bpp is stored in 6.4 format so right shift by 4 to get the
1267 		 * integer value since we support only integer values of bpp.
1268 		 */
1269 		if (intel_dp_is_edp(intel_dp)) {
1270 			dsc_max_compressed_bpp =
1271 				drm_edp_dsc_sink_output_bpp(connector->dp.dsc_dpcd) >> 4;
1272 			dsc_slice_count =
1273 				drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd,
1274 								true);
1275 		} else if (drm_dp_sink_supports_fec(connector->dp.fec_capability)) {
1276 			dsc_max_compressed_bpp =
1277 				intel_dp_dsc_get_max_compressed_bpp(dev_priv,
1278 								    max_link_clock,
1279 								    max_lanes,
1280 								    target_clock,
1281 								    mode->hdisplay,
1282 								    joiner,
1283 								    output_format,
1284 								    pipe_bpp, 64);
1285 			dsc_slice_count =
1286 				intel_dp_dsc_get_slice_count(connector,
1287 							     target_clock,
1288 							     mode->hdisplay,
1289 							     joiner);
1290 		}
1291 
1292 		dsc = dsc_max_compressed_bpp && dsc_slice_count;
1293 	}
1294 
1295 	if (intel_dp_joiner_needs_dsc(dev_priv, joiner) && !dsc)
1296 		return MODE_CLOCK_HIGH;
1297 
1298 	if (mode_rate > max_rate && !dsc)
1299 		return MODE_CLOCK_HIGH;
1300 
1301 	status = intel_dp_mode_valid_downstream(connector, mode, target_clock);
1302 	if (status != MODE_OK)
1303 		return status;
1304 
1305 	return intel_mode_valid_max_plane_size(dev_priv, mode, joiner);
1306 }
1307 
1308 bool intel_dp_source_supports_tps3(struct drm_i915_private *i915)
1309 {
1310 	return DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915) || IS_HASWELL(i915);
1311 }
1312 
1313 bool intel_dp_source_supports_tps4(struct drm_i915_private *i915)
1314 {
1315 	return DISPLAY_VER(i915) >= 10;
1316 }
1317 
1318 static void snprintf_int_array(char *str, size_t len,
1319 			       const int *array, int nelem)
1320 {
1321 	int i;
1322 
1323 	str[0] = '\0';
1324 
1325 	for (i = 0; i < nelem; i++) {
1326 		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1327 		if (r >= len)
1328 			return;
1329 		str += r;
1330 		len -= r;
1331 	}
1332 }
1333 
1334 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1335 {
1336 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1337 	char str[128]; /* FIXME: too big for stack? */
1338 
1339 	if (!drm_debug_enabled(DRM_UT_KMS))
1340 		return;
1341 
1342 	snprintf_int_array(str, sizeof(str),
1343 			   intel_dp->source_rates, intel_dp->num_source_rates);
1344 	drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
1345 
1346 	snprintf_int_array(str, sizeof(str),
1347 			   intel_dp->sink_rates, intel_dp->num_sink_rates);
1348 	drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
1349 
1350 	snprintf_int_array(str, sizeof(str),
1351 			   intel_dp->common_rates, intel_dp->num_common_rates);
1352 	drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
1353 }
1354 
1355 static int forced_link_rate(struct intel_dp *intel_dp)
1356 {
1357 	int len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->link.force_rate);
1358 
1359 	if (len == 0)
1360 		return intel_dp_common_rate(intel_dp, 0);
1361 
1362 	return intel_dp_common_rate(intel_dp, len - 1);
1363 }
1364 
1365 int
1366 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1367 {
1368 	int len;
1369 
1370 	if (intel_dp->link.force_rate)
1371 		return forced_link_rate(intel_dp);
1372 
1373 	len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->link.max_rate);
1374 
1375 	return intel_dp_common_rate(intel_dp, len - 1);
1376 }
1377 
1378 static int
1379 intel_dp_min_link_rate(struct intel_dp *intel_dp)
1380 {
1381 	if (intel_dp->link.force_rate)
1382 		return forced_link_rate(intel_dp);
1383 
1384 	return intel_dp_common_rate(intel_dp, 0);
1385 }
1386 
1387 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1388 {
1389 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1390 	int i = intel_dp_rate_index(intel_dp->sink_rates,
1391 				    intel_dp->num_sink_rates, rate);
1392 
1393 	if (drm_WARN_ON(&i915->drm, i < 0))
1394 		i = 0;
1395 
1396 	return i;
1397 }
1398 
1399 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1400 			   u8 *link_bw, u8 *rate_select)
1401 {
1402 	/* eDP 1.4 rate select method. */
1403 	if (intel_dp->use_rate_select) {
1404 		*link_bw = 0;
1405 		*rate_select =
1406 			intel_dp_rate_select(intel_dp, port_clock);
1407 	} else {
1408 		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1409 		*rate_select = 0;
1410 	}
1411 }
1412 
1413 bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp)
1414 {
1415 	struct intel_connector *connector = intel_dp->attached_connector;
1416 
1417 	return connector->base.display_info.is_hdmi;
1418 }
1419 
1420 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1421 					 const struct intel_crtc_state *pipe_config)
1422 {
1423 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1424 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1425 
1426 	if (DISPLAY_VER(dev_priv) >= 12)
1427 		return true;
1428 
1429 	if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A &&
1430 	    !intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
1431 		return true;
1432 
1433 	return false;
1434 }
1435 
1436 bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1437 			   const struct intel_connector *connector,
1438 			   const struct intel_crtc_state *pipe_config)
1439 {
1440 	return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1441 		drm_dp_sink_supports_fec(connector->dp.fec_capability);
1442 }
1443 
1444 bool intel_dp_supports_dsc(const struct intel_connector *connector,
1445 			   const struct intel_crtc_state *crtc_state)
1446 {
1447 	if (!intel_dp_has_dsc(connector))
1448 		return false;
1449 
1450 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable)
1451 		return false;
1452 
1453 	return intel_dsc_source_support(crtc_state);
1454 }
1455 
1456 static int intel_dp_hdmi_compute_bpc(struct intel_dp *intel_dp,
1457 				     const struct intel_crtc_state *crtc_state,
1458 				     int bpc, bool respect_downstream_limits)
1459 {
1460 	int clock = crtc_state->hw.adjusted_mode.crtc_clock;
1461 
1462 	/*
1463 	 * Current bpc could already be below 8bpc due to
1464 	 * FDI bandwidth constraints or other limits.
1465 	 * HDMI minimum is 8bpc however.
1466 	 */
1467 	bpc = max(bpc, 8);
1468 
1469 	/*
1470 	 * We will never exceed downstream TMDS clock limits while
1471 	 * attempting deep color. If the user insists on forcing an
1472 	 * out of spec mode they will have to be satisfied with 8bpc.
1473 	 */
1474 	if (!respect_downstream_limits)
1475 		bpc = 8;
1476 
1477 	for (; bpc >= 8; bpc -= 2) {
1478 		if (intel_hdmi_bpc_possible(crtc_state, bpc,
1479 					    intel_dp_has_hdmi_sink(intel_dp)) &&
1480 		    intel_dp_tmds_clock_valid(intel_dp, clock, bpc, crtc_state->sink_format,
1481 					      respect_downstream_limits) == MODE_OK)
1482 			return bpc;
1483 	}
1484 
1485 	return -EINVAL;
1486 }
1487 
1488 static int intel_dp_max_bpp(struct intel_dp *intel_dp,
1489 			    const struct intel_crtc_state *crtc_state,
1490 			    bool respect_downstream_limits)
1491 {
1492 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1493 	struct intel_connector *intel_connector = intel_dp->attached_connector;
1494 	int bpp, bpc;
1495 
1496 	bpc = crtc_state->pipe_bpp / 3;
1497 
1498 	if (intel_dp->dfp.max_bpc)
1499 		bpc = min_t(int, bpc, intel_dp->dfp.max_bpc);
1500 
1501 	if (intel_dp->dfp.min_tmds_clock) {
1502 		int max_hdmi_bpc;
1503 
1504 		max_hdmi_bpc = intel_dp_hdmi_compute_bpc(intel_dp, crtc_state, bpc,
1505 							 respect_downstream_limits);
1506 		if (max_hdmi_bpc < 0)
1507 			return 0;
1508 
1509 		bpc = min(bpc, max_hdmi_bpc);
1510 	}
1511 
1512 	bpp = bpc * 3;
1513 	if (intel_dp_is_edp(intel_dp)) {
1514 		/* Get bpp from vbt only for panels that dont have bpp in edid */
1515 		if (intel_connector->base.display_info.bpc == 0 &&
1516 		    intel_connector->panel.vbt.edp.bpp &&
1517 		    intel_connector->panel.vbt.edp.bpp < bpp) {
1518 			drm_dbg_kms(&dev_priv->drm,
1519 				    "clamping bpp for eDP panel to BIOS-provided %i\n",
1520 				    intel_connector->panel.vbt.edp.bpp);
1521 			bpp = intel_connector->panel.vbt.edp.bpp;
1522 		}
1523 	}
1524 
1525 	return bpp;
1526 }
1527 
1528 /* Adjust link config limits based on compliance test requests. */
1529 void
1530 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1531 				  struct intel_crtc_state *pipe_config,
1532 				  struct link_config_limits *limits)
1533 {
1534 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1535 
1536 	/* For DP Compliance we override the computed bpp for the pipe */
1537 	if (intel_dp->compliance.test_data.bpc != 0) {
1538 		int bpp = 3 * intel_dp->compliance.test_data.bpc;
1539 
1540 		limits->pipe.min_bpp = limits->pipe.max_bpp = bpp;
1541 		pipe_config->dither_force_disable = bpp == 6 * 3;
1542 
1543 		drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp);
1544 	}
1545 
1546 	/* Use values requested by Compliance Test Request */
1547 	if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1548 		int index;
1549 
1550 		/* Validate the compliance test data since max values
1551 		 * might have changed due to link train fallback.
1552 		 */
1553 		if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1554 					       intel_dp->compliance.test_lane_count)) {
1555 			index = intel_dp_rate_index(intel_dp->common_rates,
1556 						    intel_dp->num_common_rates,
1557 						    intel_dp->compliance.test_link_rate);
1558 			if (index >= 0)
1559 				limits->min_rate = limits->max_rate =
1560 					intel_dp->compliance.test_link_rate;
1561 			limits->min_lane_count = limits->max_lane_count =
1562 				intel_dp->compliance.test_lane_count;
1563 		}
1564 	}
1565 }
1566 
1567 static bool has_seamless_m_n(struct intel_connector *connector)
1568 {
1569 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1570 
1571 	/*
1572 	 * Seamless M/N reprogramming only implemented
1573 	 * for BDW+ double buffered M/N registers so far.
1574 	 */
1575 	return HAS_DOUBLE_BUFFERED_M_N(i915) &&
1576 		intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS;
1577 }
1578 
1579 static int intel_dp_mode_clock(const struct intel_crtc_state *crtc_state,
1580 			       const struct drm_connector_state *conn_state)
1581 {
1582 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
1583 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1584 
1585 	/* FIXME a bit of a mess wrt clock vs. crtc_clock */
1586 	if (has_seamless_m_n(connector))
1587 		return intel_panel_highest_mode(connector, adjusted_mode)->clock;
1588 	else
1589 		return adjusted_mode->crtc_clock;
1590 }
1591 
1592 /* Optimize link config in order: max bpp, min clock, min lanes */
1593 static int
1594 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1595 				  struct intel_crtc_state *pipe_config,
1596 				  const struct drm_connector_state *conn_state,
1597 				  const struct link_config_limits *limits)
1598 {
1599 	int bpp, i, lane_count, clock = intel_dp_mode_clock(pipe_config, conn_state);
1600 	int mode_rate, link_rate, link_avail;
1601 
1602 	for (bpp = to_bpp_int(limits->link.max_bpp_x16);
1603 	     bpp >= to_bpp_int(limits->link.min_bpp_x16);
1604 	     bpp -= 2 * 3) {
1605 		int link_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
1606 
1607 		mode_rate = intel_dp_link_required(clock, link_bpp);
1608 
1609 		for (i = 0; i < intel_dp->num_common_rates; i++) {
1610 			link_rate = intel_dp_common_rate(intel_dp, i);
1611 			if (link_rate < limits->min_rate ||
1612 			    link_rate > limits->max_rate)
1613 				continue;
1614 
1615 			for (lane_count = limits->min_lane_count;
1616 			     lane_count <= limits->max_lane_count;
1617 			     lane_count <<= 1) {
1618 				link_avail = intel_dp_max_link_data_rate(intel_dp,
1619 									 link_rate,
1620 									 lane_count);
1621 
1622 
1623 				if (mode_rate <= link_avail) {
1624 					pipe_config->lane_count = lane_count;
1625 					pipe_config->pipe_bpp = bpp;
1626 					pipe_config->port_clock = link_rate;
1627 
1628 					return 0;
1629 				}
1630 			}
1631 		}
1632 	}
1633 
1634 	return -EINVAL;
1635 }
1636 
1637 static
1638 u8 intel_dp_dsc_max_src_input_bpc(struct drm_i915_private *i915)
1639 {
1640 	/* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
1641 	if (DISPLAY_VER(i915) >= 12)
1642 		return 12;
1643 	if (DISPLAY_VER(i915) == 11)
1644 		return 10;
1645 
1646 	return 0;
1647 }
1648 
1649 int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
1650 				 u8 max_req_bpc)
1651 {
1652 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1653 	int i, num_bpc;
1654 	u8 dsc_bpc[3] = {};
1655 	u8 dsc_max_bpc;
1656 
1657 	dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915);
1658 
1659 	if (!dsc_max_bpc)
1660 		return dsc_max_bpc;
1661 
1662 	dsc_max_bpc = min_t(u8, dsc_max_bpc, max_req_bpc);
1663 
1664 	num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd,
1665 						       dsc_bpc);
1666 	for (i = 0; i < num_bpc; i++) {
1667 		if (dsc_max_bpc >= dsc_bpc[i])
1668 			return dsc_bpc[i] * 3;
1669 	}
1670 
1671 	return 0;
1672 }
1673 
1674 static int intel_dp_source_dsc_version_minor(struct drm_i915_private *i915)
1675 {
1676 	return DISPLAY_VER(i915) >= 14 ? 2 : 1;
1677 }
1678 
1679 static int intel_dp_sink_dsc_version_minor(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
1680 {
1681 	return (dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & DP_DSC_MINOR_MASK) >>
1682 		DP_DSC_MINOR_SHIFT;
1683 }
1684 
1685 static int intel_dp_get_slice_height(int vactive)
1686 {
1687 	int slice_height;
1688 
1689 	/*
1690 	 * VDSC 1.2a spec in Section 3.8 Options for Slices implies that 108
1691 	 * lines is an optimal slice height, but any size can be used as long as
1692 	 * vertical active integer multiple and maximum vertical slice count
1693 	 * requirements are met.
1694 	 */
1695 	for (slice_height = 108; slice_height <= vactive; slice_height += 2)
1696 		if (vactive % slice_height == 0)
1697 			return slice_height;
1698 
1699 	/*
1700 	 * Highly unlikely we reach here as most of the resolutions will end up
1701 	 * finding appropriate slice_height in above loop but returning
1702 	 * slice_height as 2 here as it should work with all resolutions.
1703 	 */
1704 	return 2;
1705 }
1706 
1707 static int intel_dp_dsc_compute_params(const struct intel_connector *connector,
1708 				       struct intel_crtc_state *crtc_state)
1709 {
1710 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1711 	struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1712 	int ret;
1713 
1714 	/*
1715 	 * RC_MODEL_SIZE is currently a constant across all configurations.
1716 	 *
1717 	 * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and
1718 	 * DP_DSC_RC_BUF_SIZE for this.
1719 	 */
1720 	vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
1721 	vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1722 
1723 	vdsc_cfg->slice_height = intel_dp_get_slice_height(vdsc_cfg->pic_height);
1724 
1725 	ret = intel_dsc_compute_params(crtc_state);
1726 	if (ret)
1727 		return ret;
1728 
1729 	vdsc_cfg->dsc_version_major =
1730 		(connector->dp.dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
1731 		 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
1732 	vdsc_cfg->dsc_version_minor =
1733 		min(intel_dp_source_dsc_version_minor(i915),
1734 		    intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd));
1735 	if (vdsc_cfg->convert_rgb)
1736 		vdsc_cfg->convert_rgb =
1737 			connector->dp.dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
1738 			DP_DSC_RGB;
1739 
1740 	vdsc_cfg->line_buf_depth = min(INTEL_DP_DSC_MAX_LINE_BUF_DEPTH,
1741 				       drm_dp_dsc_sink_line_buf_depth(connector->dp.dsc_dpcd));
1742 	if (!vdsc_cfg->line_buf_depth) {
1743 		drm_dbg_kms(&i915->drm,
1744 			    "DSC Sink Line Buffer Depth invalid\n");
1745 		return -EINVAL;
1746 	}
1747 
1748 	vdsc_cfg->block_pred_enable =
1749 		connector->dp.dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
1750 		DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
1751 
1752 	return drm_dsc_compute_rc_parameters(vdsc_cfg);
1753 }
1754 
1755 static bool intel_dp_dsc_supports_format(const struct intel_connector *connector,
1756 					 enum intel_output_format output_format)
1757 {
1758 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1759 	u8 sink_dsc_format;
1760 
1761 	switch (output_format) {
1762 	case INTEL_OUTPUT_FORMAT_RGB:
1763 		sink_dsc_format = DP_DSC_RGB;
1764 		break;
1765 	case INTEL_OUTPUT_FORMAT_YCBCR444:
1766 		sink_dsc_format = DP_DSC_YCbCr444;
1767 		break;
1768 	case INTEL_OUTPUT_FORMAT_YCBCR420:
1769 		if (min(intel_dp_source_dsc_version_minor(i915),
1770 			intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd)) < 2)
1771 			return false;
1772 		sink_dsc_format = DP_DSC_YCbCr420_Native;
1773 		break;
1774 	default:
1775 		return false;
1776 	}
1777 
1778 	return drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd, sink_dsc_format);
1779 }
1780 
1781 static bool is_bw_sufficient_for_dsc_config(u16 compressed_bppx16, u32 link_clock,
1782 					    u32 lane_count, u32 mode_clock,
1783 					    enum intel_output_format output_format,
1784 					    int timeslots)
1785 {
1786 	u32 available_bw, required_bw;
1787 
1788 	available_bw = (link_clock * lane_count * timeslots * 16)  / 8;
1789 	required_bw = compressed_bppx16 * (intel_dp_mode_to_fec_clock(mode_clock));
1790 
1791 	return available_bw > required_bw;
1792 }
1793 
1794 static int dsc_compute_link_config(struct intel_dp *intel_dp,
1795 				   struct intel_crtc_state *pipe_config,
1796 				   struct link_config_limits *limits,
1797 				   u16 compressed_bppx16,
1798 				   int timeslots)
1799 {
1800 	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
1801 	int link_rate, lane_count;
1802 	int i;
1803 
1804 	for (i = 0; i < intel_dp->num_common_rates; i++) {
1805 		link_rate = intel_dp_common_rate(intel_dp, i);
1806 		if (link_rate < limits->min_rate || link_rate > limits->max_rate)
1807 			continue;
1808 
1809 		for (lane_count = limits->min_lane_count;
1810 		     lane_count <= limits->max_lane_count;
1811 		     lane_count <<= 1) {
1812 			if (!is_bw_sufficient_for_dsc_config(compressed_bppx16, link_rate,
1813 							     lane_count, adjusted_mode->clock,
1814 							     pipe_config->output_format,
1815 							     timeslots))
1816 				continue;
1817 
1818 			pipe_config->lane_count = lane_count;
1819 			pipe_config->port_clock = link_rate;
1820 
1821 			return 0;
1822 		}
1823 	}
1824 
1825 	return -EINVAL;
1826 }
1827 
1828 static
1829 u16 intel_dp_dsc_max_sink_compressed_bppx16(const struct intel_connector *connector,
1830 					    struct intel_crtc_state *pipe_config,
1831 					    int bpc)
1832 {
1833 	u16 max_bppx16 = drm_edp_dsc_sink_output_bpp(connector->dp.dsc_dpcd);
1834 
1835 	if (max_bppx16)
1836 		return max_bppx16;
1837 	/*
1838 	 * If support not given in DPCD 67h, 68h use the Maximum Allowed bit rate
1839 	 * values as given in spec Table 2-157 DP v2.0
1840 	 */
1841 	switch (pipe_config->output_format) {
1842 	case INTEL_OUTPUT_FORMAT_RGB:
1843 	case INTEL_OUTPUT_FORMAT_YCBCR444:
1844 		return (3 * bpc) << 4;
1845 	case INTEL_OUTPUT_FORMAT_YCBCR420:
1846 		return (3 * (bpc / 2)) << 4;
1847 	default:
1848 		MISSING_CASE(pipe_config->output_format);
1849 		break;
1850 	}
1851 
1852 	return 0;
1853 }
1854 
1855 int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config)
1856 {
1857 	/* From Mandatory bit rate range Support Table 2-157 (DP v2.0) */
1858 	switch (pipe_config->output_format) {
1859 	case INTEL_OUTPUT_FORMAT_RGB:
1860 	case INTEL_OUTPUT_FORMAT_YCBCR444:
1861 		return 8;
1862 	case INTEL_OUTPUT_FORMAT_YCBCR420:
1863 		return 6;
1864 	default:
1865 		MISSING_CASE(pipe_config->output_format);
1866 		break;
1867 	}
1868 
1869 	return 0;
1870 }
1871 
1872 int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
1873 					 struct intel_crtc_state *pipe_config,
1874 					 int bpc)
1875 {
1876 	return intel_dp_dsc_max_sink_compressed_bppx16(connector,
1877 						       pipe_config, bpc) >> 4;
1878 }
1879 
1880 static int dsc_src_min_compressed_bpp(void)
1881 {
1882 	/* Min Compressed bpp supported by source is 8 */
1883 	return 8;
1884 }
1885 
1886 static int dsc_src_max_compressed_bpp(struct intel_dp *intel_dp)
1887 {
1888 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1889 
1890 	/*
1891 	 * Max Compressed bpp for Gen 13+ is 27bpp.
1892 	 * For earlier platform is 23bpp. (Bspec:49259).
1893 	 */
1894 	if (DISPLAY_VER(i915) < 13)
1895 		return 23;
1896 	else
1897 		return 27;
1898 }
1899 
1900 /*
1901  * From a list of valid compressed bpps try different compressed bpp and find a
1902  * suitable link configuration that can support it.
1903  */
1904 static int
1905 icl_dsc_compute_link_config(struct intel_dp *intel_dp,
1906 			    struct intel_crtc_state *pipe_config,
1907 			    struct link_config_limits *limits,
1908 			    int dsc_max_bpp,
1909 			    int dsc_min_bpp,
1910 			    int pipe_bpp,
1911 			    int timeslots)
1912 {
1913 	int i, ret;
1914 
1915 	/* Compressed BPP should be less than the Input DSC bpp */
1916 	dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
1917 
1918 	for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
1919 		if (valid_dsc_bpp[i] < dsc_min_bpp)
1920 			continue;
1921 		if (valid_dsc_bpp[i] > dsc_max_bpp)
1922 			break;
1923 
1924 		ret = dsc_compute_link_config(intel_dp,
1925 					      pipe_config,
1926 					      limits,
1927 					      valid_dsc_bpp[i] << 4,
1928 					      timeslots);
1929 		if (ret == 0) {
1930 			pipe_config->dsc.compressed_bpp_x16 =
1931 				to_bpp_x16(valid_dsc_bpp[i]);
1932 			return 0;
1933 		}
1934 	}
1935 
1936 	return -EINVAL;
1937 }
1938 
1939 /*
1940  * From XE_LPD onwards we supports compression bpps in steps of 1 up to
1941  * uncompressed bpp-1. So we start from max compressed bpp and see if any
1942  * link configuration is able to support that compressed bpp, if not we
1943  * step down and check for lower compressed bpp.
1944  */
1945 static int
1946 xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
1947 			      const struct intel_connector *connector,
1948 			      struct intel_crtc_state *pipe_config,
1949 			      struct link_config_limits *limits,
1950 			      int dsc_max_bpp,
1951 			      int dsc_min_bpp,
1952 			      int pipe_bpp,
1953 			      int timeslots)
1954 {
1955 	u8 bppx16_incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd);
1956 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1957 	u16 compressed_bppx16;
1958 	u8 bppx16_step;
1959 	int ret;
1960 
1961 	if (DISPLAY_VER(i915) < 14 || bppx16_incr <= 1)
1962 		bppx16_step = 16;
1963 	else
1964 		bppx16_step = 16 / bppx16_incr;
1965 
1966 	/* Compressed BPP should be less than the Input DSC bpp */
1967 	dsc_max_bpp = min(dsc_max_bpp << 4, (pipe_bpp << 4) - bppx16_step);
1968 	dsc_min_bpp = dsc_min_bpp << 4;
1969 
1970 	for (compressed_bppx16 = dsc_max_bpp;
1971 	     compressed_bppx16 >= dsc_min_bpp;
1972 	     compressed_bppx16 -= bppx16_step) {
1973 		if (intel_dp->force_dsc_fractional_bpp_en &&
1974 		    !to_bpp_frac(compressed_bppx16))
1975 			continue;
1976 		ret = dsc_compute_link_config(intel_dp,
1977 					      pipe_config,
1978 					      limits,
1979 					      compressed_bppx16,
1980 					      timeslots);
1981 		if (ret == 0) {
1982 			pipe_config->dsc.compressed_bpp_x16 = compressed_bppx16;
1983 			if (intel_dp->force_dsc_fractional_bpp_en &&
1984 			    to_bpp_frac(compressed_bppx16))
1985 				drm_dbg_kms(&i915->drm, "Forcing DSC fractional bpp\n");
1986 
1987 			return 0;
1988 		}
1989 	}
1990 	return -EINVAL;
1991 }
1992 
1993 static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
1994 				      const struct intel_connector *connector,
1995 				      struct intel_crtc_state *pipe_config,
1996 				      struct link_config_limits *limits,
1997 				      int pipe_bpp,
1998 				      int timeslots)
1999 {
2000 	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2001 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2002 	int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
2003 	int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
2004 	int dsc_joiner_max_bpp;
2005 
2006 	dsc_src_min_bpp = dsc_src_min_compressed_bpp();
2007 	dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
2008 	dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
2009 	dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
2010 
2011 	dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
2012 	dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
2013 								pipe_config,
2014 								pipe_bpp / 3);
2015 	dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
2016 
2017 	dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, adjusted_mode->clock,
2018 								adjusted_mode->hdisplay,
2019 								pipe_config->joiner_pipes);
2020 	dsc_max_bpp = min(dsc_max_bpp, dsc_joiner_max_bpp);
2021 	dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16));
2022 
2023 	if (DISPLAY_VER(i915) >= 13)
2024 		return xelpd_dsc_compute_link_config(intel_dp, connector, pipe_config, limits,
2025 						     dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
2026 	return icl_dsc_compute_link_config(intel_dp, pipe_config, limits,
2027 					   dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
2028 }
2029 
2030 static
2031 u8 intel_dp_dsc_min_src_input_bpc(struct drm_i915_private *i915)
2032 {
2033 	/* Min DSC Input BPC for ICL+ is 8 */
2034 	return HAS_DSC(i915) ? 8 : 0;
2035 }
2036 
2037 static
2038 bool is_dsc_pipe_bpp_sufficient(struct drm_i915_private *i915,
2039 				struct drm_connector_state *conn_state,
2040 				struct link_config_limits *limits,
2041 				int pipe_bpp)
2042 {
2043 	u8 dsc_max_bpc, dsc_min_bpc, dsc_max_pipe_bpp, dsc_min_pipe_bpp;
2044 
2045 	dsc_max_bpc = min(intel_dp_dsc_max_src_input_bpc(i915), conn_state->max_requested_bpc);
2046 	dsc_min_bpc = intel_dp_dsc_min_src_input_bpc(i915);
2047 
2048 	dsc_max_pipe_bpp = min(dsc_max_bpc * 3, limits->pipe.max_bpp);
2049 	dsc_min_pipe_bpp = max(dsc_min_bpc * 3, limits->pipe.min_bpp);
2050 
2051 	return pipe_bpp >= dsc_min_pipe_bpp &&
2052 	       pipe_bpp <= dsc_max_pipe_bpp;
2053 }
2054 
2055 static
2056 int intel_dp_force_dsc_pipe_bpp(struct intel_dp *intel_dp,
2057 				struct drm_connector_state *conn_state,
2058 				struct link_config_limits *limits)
2059 {
2060 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2061 	int forced_bpp;
2062 
2063 	if (!intel_dp->force_dsc_bpc)
2064 		return 0;
2065 
2066 	forced_bpp = intel_dp->force_dsc_bpc * 3;
2067 
2068 	if (is_dsc_pipe_bpp_sufficient(i915, conn_state, limits, forced_bpp)) {
2069 		drm_dbg_kms(&i915->drm, "Input DSC BPC forced to %d\n", intel_dp->force_dsc_bpc);
2070 		return forced_bpp;
2071 	}
2072 
2073 	drm_dbg_kms(&i915->drm, "Cannot force DSC BPC:%d, due to DSC BPC limits\n",
2074 		    intel_dp->force_dsc_bpc);
2075 
2076 	return 0;
2077 }
2078 
2079 static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
2080 					 struct intel_crtc_state *pipe_config,
2081 					 struct drm_connector_state *conn_state,
2082 					 struct link_config_limits *limits,
2083 					 int timeslots)
2084 {
2085 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2086 	const struct intel_connector *connector =
2087 		to_intel_connector(conn_state->connector);
2088 	u8 max_req_bpc = conn_state->max_requested_bpc;
2089 	u8 dsc_max_bpc, dsc_max_bpp;
2090 	u8 dsc_min_bpc, dsc_min_bpp;
2091 	u8 dsc_bpc[3] = {};
2092 	int forced_bpp, pipe_bpp;
2093 	int num_bpc, i, ret;
2094 
2095 	forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, conn_state, limits);
2096 
2097 	if (forced_bpp) {
2098 		ret = dsc_compute_compressed_bpp(intel_dp, connector, pipe_config,
2099 						 limits, forced_bpp, timeslots);
2100 		if (ret == 0) {
2101 			pipe_config->pipe_bpp = forced_bpp;
2102 			return 0;
2103 		}
2104 	}
2105 
2106 	dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915);
2107 	if (!dsc_max_bpc)
2108 		return -EINVAL;
2109 
2110 	dsc_max_bpc = min_t(u8, dsc_max_bpc, max_req_bpc);
2111 	dsc_max_bpp = min(dsc_max_bpc * 3, limits->pipe.max_bpp);
2112 
2113 	dsc_min_bpc = intel_dp_dsc_min_src_input_bpc(i915);
2114 	dsc_min_bpp = max(dsc_min_bpc * 3, limits->pipe.min_bpp);
2115 
2116 	/*
2117 	 * Get the maximum DSC bpc that will be supported by any valid
2118 	 * link configuration and compressed bpp.
2119 	 */
2120 	num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd, dsc_bpc);
2121 	for (i = 0; i < num_bpc; i++) {
2122 		pipe_bpp = dsc_bpc[i] * 3;
2123 		if (pipe_bpp < dsc_min_bpp)
2124 			break;
2125 		if (pipe_bpp > dsc_max_bpp)
2126 			continue;
2127 		ret = dsc_compute_compressed_bpp(intel_dp, connector, pipe_config,
2128 						 limits, pipe_bpp, timeslots);
2129 		if (ret == 0) {
2130 			pipe_config->pipe_bpp = pipe_bpp;
2131 			return 0;
2132 		}
2133 	}
2134 
2135 	return -EINVAL;
2136 }
2137 
2138 static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
2139 					  struct intel_crtc_state *pipe_config,
2140 					  struct drm_connector_state *conn_state,
2141 					  struct link_config_limits *limits)
2142 {
2143 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2144 	struct intel_connector *connector =
2145 		to_intel_connector(conn_state->connector);
2146 	int pipe_bpp, forced_bpp;
2147 	int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
2148 	int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
2149 
2150 	forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, conn_state, limits);
2151 
2152 	if (forced_bpp) {
2153 		pipe_bpp = forced_bpp;
2154 	} else {
2155 		int max_bpc = min(limits->pipe.max_bpp / 3, (int)conn_state->max_requested_bpc);
2156 
2157 		/* For eDP use max bpp that can be supported with DSC. */
2158 		pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, max_bpc);
2159 		if (!is_dsc_pipe_bpp_sufficient(i915, conn_state, limits, pipe_bpp)) {
2160 			drm_dbg_kms(&i915->drm,
2161 				    "Computed BPC is not in DSC BPC limits\n");
2162 			return -EINVAL;
2163 		}
2164 	}
2165 	pipe_config->port_clock = limits->max_rate;
2166 	pipe_config->lane_count = limits->max_lane_count;
2167 
2168 	dsc_src_min_bpp = dsc_src_min_compressed_bpp();
2169 	dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
2170 	dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
2171 	dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
2172 
2173 	dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
2174 	dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
2175 								pipe_config,
2176 								pipe_bpp / 3);
2177 	dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
2178 	dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16));
2179 
2180 	/* Compressed BPP should be less than the Input DSC bpp */
2181 	dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
2182 
2183 	pipe_config->dsc.compressed_bpp_x16 =
2184 		to_bpp_x16(max(dsc_min_bpp, dsc_max_bpp));
2185 
2186 	pipe_config->pipe_bpp = pipe_bpp;
2187 
2188 	return 0;
2189 }
2190 
2191 int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
2192 				struct intel_crtc_state *pipe_config,
2193 				struct drm_connector_state *conn_state,
2194 				struct link_config_limits *limits,
2195 				int timeslots,
2196 				bool compute_pipe_bpp)
2197 {
2198 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2199 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2200 	const struct intel_connector *connector =
2201 		to_intel_connector(conn_state->connector);
2202 	const struct drm_display_mode *adjusted_mode =
2203 		&pipe_config->hw.adjusted_mode;
2204 	int ret;
2205 
2206 	pipe_config->fec_enable = pipe_config->fec_enable ||
2207 		(!intel_dp_is_edp(intel_dp) &&
2208 		 intel_dp_supports_fec(intel_dp, connector, pipe_config));
2209 
2210 	if (!intel_dp_supports_dsc(connector, pipe_config))
2211 		return -EINVAL;
2212 
2213 	if (!intel_dp_dsc_supports_format(connector, pipe_config->output_format))
2214 		return -EINVAL;
2215 
2216 	/*
2217 	 * compute pipe bpp is set to false for DP MST DSC case
2218 	 * and compressed_bpp is calculated same time once
2219 	 * vpci timeslots are allocated, because overall bpp
2220 	 * calculation procedure is bit different for MST case.
2221 	 */
2222 	if (compute_pipe_bpp) {
2223 		if (intel_dp_is_edp(intel_dp))
2224 			ret = intel_edp_dsc_compute_pipe_bpp(intel_dp, pipe_config,
2225 							     conn_state, limits);
2226 		else
2227 			ret = intel_dp_dsc_compute_pipe_bpp(intel_dp, pipe_config,
2228 							    conn_state, limits, timeslots);
2229 		if (ret) {
2230 			drm_dbg_kms(&dev_priv->drm,
2231 				    "No Valid pipe bpp for given mode ret = %d\n", ret);
2232 			return ret;
2233 		}
2234 	}
2235 
2236 	/* Calculate Slice count */
2237 	if (intel_dp_is_edp(intel_dp)) {
2238 		pipe_config->dsc.slice_count =
2239 			drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd,
2240 							true);
2241 		if (!pipe_config->dsc.slice_count) {
2242 			drm_dbg_kms(&dev_priv->drm, "Unsupported Slice Count %d\n",
2243 				    pipe_config->dsc.slice_count);
2244 			return -EINVAL;
2245 		}
2246 	} else {
2247 		u8 dsc_dp_slice_count;
2248 
2249 		dsc_dp_slice_count =
2250 			intel_dp_dsc_get_slice_count(connector,
2251 						     adjusted_mode->crtc_clock,
2252 						     adjusted_mode->crtc_hdisplay,
2253 						     pipe_config->joiner_pipes);
2254 		if (!dsc_dp_slice_count) {
2255 			drm_dbg_kms(&dev_priv->drm,
2256 				    "Compressed Slice Count not supported\n");
2257 			return -EINVAL;
2258 		}
2259 
2260 		pipe_config->dsc.slice_count = dsc_dp_slice_count;
2261 	}
2262 	/*
2263 	 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
2264 	 * is greater than the maximum Cdclock and if slice count is even
2265 	 * then we need to use 2 VDSC instances.
2266 	 */
2267 	if (pipe_config->joiner_pipes || pipe_config->dsc.slice_count > 1)
2268 		pipe_config->dsc.dsc_split = true;
2269 
2270 	ret = intel_dp_dsc_compute_params(connector, pipe_config);
2271 	if (ret < 0) {
2272 		drm_dbg_kms(&dev_priv->drm,
2273 			    "Cannot compute valid DSC parameters for Input Bpp = %d"
2274 			    "Compressed BPP = " BPP_X16_FMT "\n",
2275 			    pipe_config->pipe_bpp,
2276 			    BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16));
2277 		return ret;
2278 	}
2279 
2280 	pipe_config->dsc.compression_enable = true;
2281 	drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
2282 		    "Compressed Bpp = " BPP_X16_FMT " Slice Count = %d\n",
2283 		    pipe_config->pipe_bpp,
2284 		    BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16),
2285 		    pipe_config->dsc.slice_count);
2286 
2287 	return 0;
2288 }
2289 
2290 /**
2291  * intel_dp_compute_config_link_bpp_limits - compute output link bpp limits
2292  * @intel_dp: intel DP
2293  * @crtc_state: crtc state
2294  * @dsc: DSC compression mode
2295  * @limits: link configuration limits
2296  *
2297  * Calculates the output link min, max bpp values in @limits based on the
2298  * pipe bpp range, @crtc_state and @dsc mode.
2299  *
2300  * Returns %true in case of success.
2301  */
2302 bool
2303 intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
2304 					const struct intel_crtc_state *crtc_state,
2305 					bool dsc,
2306 					struct link_config_limits *limits)
2307 {
2308 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2309 	const struct drm_display_mode *adjusted_mode =
2310 		&crtc_state->hw.adjusted_mode;
2311 	const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2312 	const struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
2313 	int max_link_bpp_x16;
2314 
2315 	max_link_bpp_x16 = min(crtc_state->max_link_bpp_x16,
2316 			       to_bpp_x16(limits->pipe.max_bpp));
2317 
2318 	if (!dsc) {
2319 		max_link_bpp_x16 = rounddown(max_link_bpp_x16, to_bpp_x16(2 * 3));
2320 
2321 		if (max_link_bpp_x16 < to_bpp_x16(limits->pipe.min_bpp))
2322 			return false;
2323 
2324 		limits->link.min_bpp_x16 = to_bpp_x16(limits->pipe.min_bpp);
2325 	} else {
2326 		/*
2327 		 * TODO: set the DSC link limits already here, atm these are
2328 		 * initialized only later in intel_edp_dsc_compute_pipe_bpp() /
2329 		 * intel_dp_dsc_compute_pipe_bpp()
2330 		 */
2331 		limits->link.min_bpp_x16 = 0;
2332 	}
2333 
2334 	limits->link.max_bpp_x16 = max_link_bpp_x16;
2335 
2336 	drm_dbg_kms(&i915->drm,
2337 		    "[ENCODER:%d:%s][CRTC:%d:%s] DP link limits: pixel clock %d kHz DSC %s max lanes %d max rate %d max pipe_bpp %d max link_bpp " BPP_X16_FMT "\n",
2338 		    encoder->base.base.id, encoder->base.name,
2339 		    crtc->base.base.id, crtc->base.name,
2340 		    adjusted_mode->crtc_clock,
2341 		    dsc ? "on" : "off",
2342 		    limits->max_lane_count,
2343 		    limits->max_rate,
2344 		    limits->pipe.max_bpp,
2345 		    BPP_X16_ARGS(limits->link.max_bpp_x16));
2346 
2347 	return true;
2348 }
2349 
2350 static bool
2351 intel_dp_compute_config_limits(struct intel_dp *intel_dp,
2352 			       struct intel_crtc_state *crtc_state,
2353 			       bool respect_downstream_limits,
2354 			       bool dsc,
2355 			       struct link_config_limits *limits)
2356 {
2357 	limits->min_rate = intel_dp_min_link_rate(intel_dp);
2358 	limits->max_rate = intel_dp_max_link_rate(intel_dp);
2359 
2360 	/* FIXME 128b/132b SST support missing */
2361 	limits->max_rate = min(limits->max_rate, 810000);
2362 	limits->min_rate = min(limits->min_rate, limits->max_rate);
2363 
2364 	limits->min_lane_count = intel_dp_min_lane_count(intel_dp);
2365 	limits->max_lane_count = intel_dp_max_lane_count(intel_dp);
2366 
2367 	limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format);
2368 	limits->pipe.max_bpp = intel_dp_max_bpp(intel_dp, crtc_state,
2369 						     respect_downstream_limits);
2370 
2371 	if (intel_dp->use_max_params) {
2372 		/*
2373 		 * Use the maximum clock and number of lanes the eDP panel
2374 		 * advertizes being capable of in case the initial fast
2375 		 * optimal params failed us. The panels are generally
2376 		 * designed to support only a single clock and lane
2377 		 * configuration, and typically on older panels these
2378 		 * values correspond to the native resolution of the panel.
2379 		 */
2380 		limits->min_lane_count = limits->max_lane_count;
2381 		limits->min_rate = limits->max_rate;
2382 	}
2383 
2384 	intel_dp_adjust_compliance_config(intel_dp, crtc_state, limits);
2385 
2386 	return intel_dp_compute_config_link_bpp_limits(intel_dp,
2387 						       crtc_state,
2388 						       dsc,
2389 						       limits);
2390 }
2391 
2392 int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state)
2393 {
2394 	const struct drm_display_mode *adjusted_mode =
2395 		&crtc_state->hw.adjusted_mode;
2396 	int bpp = crtc_state->dsc.compression_enable ?
2397 		to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) :
2398 		crtc_state->pipe_bpp;
2399 
2400 	return intel_dp_link_required(adjusted_mode->crtc_clock, bpp);
2401 }
2402 
2403 bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, bool use_joiner)
2404 {
2405 	/*
2406 	 * Pipe joiner needs compression up to display 12 due to bandwidth
2407 	 * limitation. DG2 onwards pipe joiner can be enabled without
2408 	 * compression.
2409 	 */
2410 	return DISPLAY_VER(i915) < 13 && use_joiner;
2411 }
2412 
2413 static int
2414 intel_dp_compute_link_config(struct intel_encoder *encoder,
2415 			     struct intel_crtc_state *pipe_config,
2416 			     struct drm_connector_state *conn_state,
2417 			     bool respect_downstream_limits)
2418 {
2419 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2420 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
2421 	struct intel_connector *connector =
2422 		to_intel_connector(conn_state->connector);
2423 	const struct drm_display_mode *adjusted_mode =
2424 		&pipe_config->hw.adjusted_mode;
2425 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2426 	struct link_config_limits limits;
2427 	bool dsc_needed, joiner_needs_dsc;
2428 	int ret = 0;
2429 
2430 	if (pipe_config->fec_enable &&
2431 	    !intel_dp_supports_fec(intel_dp, connector, pipe_config))
2432 		return -EINVAL;
2433 
2434 	if (intel_dp_need_joiner(intel_dp, connector,
2435 				 adjusted_mode->crtc_hdisplay,
2436 				 adjusted_mode->crtc_clock))
2437 		pipe_config->joiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe);
2438 
2439 	joiner_needs_dsc = intel_dp_joiner_needs_dsc(i915, pipe_config->joiner_pipes);
2440 
2441 	dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
2442 		     !intel_dp_compute_config_limits(intel_dp, pipe_config,
2443 						     respect_downstream_limits,
2444 						     false,
2445 						     &limits);
2446 
2447 	if (!dsc_needed) {
2448 		/*
2449 		 * Optimize for slow and wide for everything, because there are some
2450 		 * eDP 1.3 and 1.4 panels don't work well with fast and narrow.
2451 		 */
2452 		ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
2453 							conn_state, &limits);
2454 		if (ret)
2455 			dsc_needed = true;
2456 	}
2457 
2458 	if (dsc_needed) {
2459 		drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
2460 			    str_yes_no(ret), str_yes_no(joiner_needs_dsc),
2461 			    str_yes_no(intel_dp->force_dsc_en));
2462 
2463 		if (!intel_dp_compute_config_limits(intel_dp, pipe_config,
2464 						    respect_downstream_limits,
2465 						    true,
2466 						    &limits))
2467 			return -EINVAL;
2468 
2469 		ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2470 						  conn_state, &limits, 64, true);
2471 		if (ret < 0)
2472 			return ret;
2473 	}
2474 
2475 	drm_dbg_kms(&i915->drm,
2476 		    "DP lane count %d clock %d bpp input %d compressed " BPP_X16_FMT " link rate required %d available %d\n",
2477 		    pipe_config->lane_count, pipe_config->port_clock,
2478 		    pipe_config->pipe_bpp,
2479 		    BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16),
2480 		    intel_dp_config_required_rate(pipe_config),
2481 		    intel_dp_max_link_data_rate(intel_dp,
2482 						pipe_config->port_clock,
2483 						pipe_config->lane_count));
2484 
2485 	return 0;
2486 }
2487 
2488 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2489 				  const struct drm_connector_state *conn_state)
2490 {
2491 	const struct intel_digital_connector_state *intel_conn_state =
2492 		to_intel_digital_connector_state(conn_state);
2493 	const struct drm_display_mode *adjusted_mode =
2494 		&crtc_state->hw.adjusted_mode;
2495 
2496 	/*
2497 	 * Our YCbCr output is always limited range.
2498 	 * crtc_state->limited_color_range only applies to RGB,
2499 	 * and it must never be set for YCbCr or we risk setting
2500 	 * some conflicting bits in TRANSCONF which will mess up
2501 	 * the colors on the monitor.
2502 	 */
2503 	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
2504 		return false;
2505 
2506 	if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2507 		/*
2508 		 * See:
2509 		 * CEA-861-E - 5.1 Default Encoding Parameters
2510 		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2511 		 */
2512 		return crtc_state->pipe_bpp != 18 &&
2513 			drm_default_rgb_quant_range(adjusted_mode) ==
2514 			HDMI_QUANTIZATION_RANGE_LIMITED;
2515 	} else {
2516 		return intel_conn_state->broadcast_rgb ==
2517 			INTEL_BROADCAST_RGB_LIMITED;
2518 	}
2519 }
2520 
2521 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
2522 				    enum port port)
2523 {
2524 	if (IS_G4X(dev_priv))
2525 		return false;
2526 	if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A)
2527 		return false;
2528 
2529 	return true;
2530 }
2531 
2532 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state,
2533 					     const struct drm_connector_state *conn_state,
2534 					     struct drm_dp_vsc_sdp *vsc)
2535 {
2536 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2537 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2538 
2539 	if (crtc_state->has_panel_replay) {
2540 		/*
2541 		 * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
2542 		 * VSC SDP supporting 3D stereo, Panel Replay, and Pixel
2543 		 * Encoding/Colorimetry Format indication.
2544 		 */
2545 		vsc->revision = 0x7;
2546 	} else {
2547 		/*
2548 		 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
2549 		 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
2550 		 * Colorimetry Format indication.
2551 		 */
2552 		vsc->revision = 0x5;
2553 	}
2554 
2555 	vsc->length = 0x13;
2556 
2557 	/* DP 1.4a spec, Table 2-120 */
2558 	switch (crtc_state->output_format) {
2559 	case INTEL_OUTPUT_FORMAT_YCBCR444:
2560 		vsc->pixelformat = DP_PIXELFORMAT_YUV444;
2561 		break;
2562 	case INTEL_OUTPUT_FORMAT_YCBCR420:
2563 		vsc->pixelformat = DP_PIXELFORMAT_YUV420;
2564 		break;
2565 	case INTEL_OUTPUT_FORMAT_RGB:
2566 	default:
2567 		vsc->pixelformat = DP_PIXELFORMAT_RGB;
2568 	}
2569 
2570 	switch (conn_state->colorspace) {
2571 	case DRM_MODE_COLORIMETRY_BT709_YCC:
2572 		vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
2573 		break;
2574 	case DRM_MODE_COLORIMETRY_XVYCC_601:
2575 		vsc->colorimetry = DP_COLORIMETRY_XVYCC_601;
2576 		break;
2577 	case DRM_MODE_COLORIMETRY_XVYCC_709:
2578 		vsc->colorimetry = DP_COLORIMETRY_XVYCC_709;
2579 		break;
2580 	case DRM_MODE_COLORIMETRY_SYCC_601:
2581 		vsc->colorimetry = DP_COLORIMETRY_SYCC_601;
2582 		break;
2583 	case DRM_MODE_COLORIMETRY_OPYCC_601:
2584 		vsc->colorimetry = DP_COLORIMETRY_OPYCC_601;
2585 		break;
2586 	case DRM_MODE_COLORIMETRY_BT2020_CYCC:
2587 		vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC;
2588 		break;
2589 	case DRM_MODE_COLORIMETRY_BT2020_RGB:
2590 		vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB;
2591 		break;
2592 	case DRM_MODE_COLORIMETRY_BT2020_YCC:
2593 		vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC;
2594 		break;
2595 	case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
2596 	case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
2597 		vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB;
2598 		break;
2599 	default:
2600 		/*
2601 		 * RGB->YCBCR color conversion uses the BT.709
2602 		 * color space.
2603 		 */
2604 		if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2605 			vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
2606 		else
2607 			vsc->colorimetry = DP_COLORIMETRY_DEFAULT;
2608 		break;
2609 	}
2610 
2611 	vsc->bpc = crtc_state->pipe_bpp / 3;
2612 
2613 	/* only RGB pixelformat supports 6 bpc */
2614 	drm_WARN_ON(&dev_priv->drm,
2615 		    vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);
2616 
2617 	/* all YCbCr are always limited range */
2618 	vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA;
2619 	vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
2620 }
2621 
2622 static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
2623 				    struct intel_crtc_state *crtc_state)
2624 {
2625 	struct drm_dp_as_sdp *as_sdp = &crtc_state->infoframes.as_sdp;
2626 	const struct drm_display_mode *adjusted_mode =
2627 		&crtc_state->hw.adjusted_mode;
2628 
2629 	if (!crtc_state->vrr.enable ||
2630 	    !intel_dp_as_sdp_supported(intel_dp))
2631 		return;
2632 
2633 	crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC);
2634 
2635 	/* Currently only DP_AS_SDP_AVT_FIXED_VTOTAL mode supported */
2636 	as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC;
2637 	as_sdp->length = 0x9;
2638 	as_sdp->duration_incr_ms = 0;
2639 	as_sdp->duration_incr_ms = 0;
2640 
2641 	if (crtc_state->cmrr.enable) {
2642 		as_sdp->mode = DP_AS_SDP_FAVT_TRR_REACHED;
2643 		as_sdp->vtotal = adjusted_mode->vtotal;
2644 		as_sdp->target_rr = drm_mode_vrefresh(adjusted_mode);
2645 		as_sdp->target_rr_divider = true;
2646 	} else {
2647 		as_sdp->mode = DP_AS_SDP_AVT_FIXED_VTOTAL;
2648 		as_sdp->vtotal = adjusted_mode->vtotal;
2649 		as_sdp->target_rr = 0;
2650 	}
2651 }
2652 
2653 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
2654 				     struct intel_crtc_state *crtc_state,
2655 				     const struct drm_connector_state *conn_state)
2656 {
2657 	struct drm_dp_vsc_sdp *vsc;
2658 
2659 	if ((!intel_dp->colorimetry_support ||
2660 	     !intel_dp_needs_vsc_sdp(crtc_state, conn_state)) &&
2661 	    !crtc_state->has_psr)
2662 		return;
2663 
2664 	vsc = &crtc_state->infoframes.vsc;
2665 
2666 	crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
2667 	vsc->sdp_type = DP_SDP_VSC;
2668 
2669 	/* Needs colorimetry */
2670 	if (intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
2671 		intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
2672 						 vsc);
2673 	} else if (crtc_state->has_panel_replay) {
2674 		/*
2675 		 * [Panel Replay without colorimetry info]
2676 		 * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
2677 		 * VSC SDP supporting 3D stereo + Panel Replay.
2678 		 */
2679 		vsc->revision = 0x6;
2680 		vsc->length = 0x10;
2681 	} else if (crtc_state->has_sel_update) {
2682 		/*
2683 		 * [PSR2 without colorimetry]
2684 		 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
2685 		 * 3D stereo + PSR/PSR2 + Y-coordinate.
2686 		 */
2687 		vsc->revision = 0x4;
2688 		vsc->length = 0xe;
2689 	} else {
2690 		/*
2691 		 * [PSR1]
2692 		 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
2693 		 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or
2694 		 * higher).
2695 		 */
2696 		vsc->revision = 0x2;
2697 		vsc->length = 0x8;
2698 	}
2699 }
2700 
2701 static void
2702 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
2703 					    struct intel_crtc_state *crtc_state,
2704 					    const struct drm_connector_state *conn_state)
2705 {
2706 	int ret;
2707 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2708 	struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;
2709 
2710 	if (!conn_state->hdr_output_metadata)
2711 		return;
2712 
2713 	ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);
2714 
2715 	if (ret) {
2716 		drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
2717 		return;
2718 	}
2719 
2720 	crtc_state->infoframes.enable |=
2721 		intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
2722 }
2723 
2724 static bool can_enable_drrs(struct intel_connector *connector,
2725 			    const struct intel_crtc_state *pipe_config,
2726 			    const struct drm_display_mode *downclock_mode)
2727 {
2728 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2729 
2730 	if (pipe_config->vrr.enable)
2731 		return false;
2732 
2733 	/*
2734 	 * DRRS and PSR can't be enable together, so giving preference to PSR
2735 	 * as it allows more power-savings by complete shutting down display,
2736 	 * so to guarantee this, intel_drrs_compute_config() must be called
2737 	 * after intel_psr_compute_config().
2738 	 */
2739 	if (pipe_config->has_psr)
2740 		return false;
2741 
2742 	/* FIXME missing FDI M2/N2 etc. */
2743 	if (pipe_config->has_pch_encoder)
2744 		return false;
2745 
2746 	if (!intel_cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder))
2747 		return false;
2748 
2749 	return downclock_mode &&
2750 		intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS;
2751 }
2752 
2753 static void
2754 intel_dp_drrs_compute_config(struct intel_connector *connector,
2755 			     struct intel_crtc_state *pipe_config,
2756 			     int link_bpp_x16)
2757 {
2758 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2759 	const struct drm_display_mode *downclock_mode =
2760 		intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
2761 	int pixel_clock;
2762 
2763 	/*
2764 	 * FIXME all joined pipes share the same transcoder.
2765 	 * Need to account for that when updating M/N live.
2766 	 */
2767 	if (has_seamless_m_n(connector) && !pipe_config->joiner_pipes)
2768 		pipe_config->update_m_n = true;
2769 
2770 	if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
2771 		if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder))
2772 			intel_zero_m_n(&pipe_config->dp_m2_n2);
2773 		return;
2774 	}
2775 
2776 	if (IS_IRONLAKE(i915) || IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915))
2777 		pipe_config->msa_timing_delay = connector->panel.vbt.edp.drrs_msa_timing_delay;
2778 
2779 	pipe_config->has_drrs = true;
2780 
2781 	pixel_clock = downclock_mode->clock;
2782 	if (pipe_config->splitter.enable)
2783 		pixel_clock /= pipe_config->splitter.link_count;
2784 
2785 	intel_link_compute_m_n(link_bpp_x16, pipe_config->lane_count, pixel_clock,
2786 			       pipe_config->port_clock,
2787 			       intel_dp_bw_fec_overhead(pipe_config->fec_enable),
2788 			       &pipe_config->dp_m2_n2);
2789 
2790 	/* FIXME: abstract this better */
2791 	if (pipe_config->splitter.enable)
2792 		pipe_config->dp_m2_n2.data_m *= pipe_config->splitter.link_count;
2793 }
2794 
2795 static bool intel_dp_has_audio(struct intel_encoder *encoder,
2796 			       const struct drm_connector_state *conn_state)
2797 {
2798 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2799 	const struct intel_digital_connector_state *intel_conn_state =
2800 		to_intel_digital_connector_state(conn_state);
2801 	struct intel_connector *connector =
2802 		to_intel_connector(conn_state->connector);
2803 
2804 	if (!intel_dp_port_has_audio(i915, encoder->port))
2805 		return false;
2806 
2807 	if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2808 		return connector->base.display_info.has_audio;
2809 	else
2810 		return intel_conn_state->force_audio == HDMI_AUDIO_ON;
2811 }
2812 
2813 static int
2814 intel_dp_compute_output_format(struct intel_encoder *encoder,
2815 			       struct intel_crtc_state *crtc_state,
2816 			       struct drm_connector_state *conn_state,
2817 			       bool respect_downstream_limits)
2818 {
2819 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2820 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2821 	struct intel_connector *connector = intel_dp->attached_connector;
2822 	const struct drm_display_info *info = &connector->base.display_info;
2823 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2824 	bool ycbcr_420_only;
2825 	int ret;
2826 
2827 	ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode);
2828 
2829 	if (ycbcr_420_only && !connector->base.ycbcr_420_allowed) {
2830 		drm_dbg_kms(&i915->drm,
2831 			    "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n");
2832 		crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB;
2833 	} else {
2834 		crtc_state->sink_format = intel_dp_sink_format(connector, adjusted_mode);
2835 	}
2836 
2837 	crtc_state->output_format = intel_dp_output_format(connector, crtc_state->sink_format);
2838 
2839 	ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state,
2840 					   respect_downstream_limits);
2841 	if (ret) {
2842 		if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
2843 		    !connector->base.ycbcr_420_allowed ||
2844 		    !drm_mode_is_420_also(info, adjusted_mode))
2845 			return ret;
2846 
2847 		crtc_state->sink_format = INTEL_OUTPUT_FORMAT_YCBCR420;
2848 		crtc_state->output_format = intel_dp_output_format(connector,
2849 								   crtc_state->sink_format);
2850 		ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state,
2851 						   respect_downstream_limits);
2852 	}
2853 
2854 	return ret;
2855 }
2856 
2857 void
2858 intel_dp_audio_compute_config(struct intel_encoder *encoder,
2859 			      struct intel_crtc_state *pipe_config,
2860 			      struct drm_connector_state *conn_state)
2861 {
2862 	pipe_config->has_audio =
2863 		intel_dp_has_audio(encoder, conn_state) &&
2864 		intel_audio_compute_config(encoder, pipe_config, conn_state);
2865 
2866 	pipe_config->sdp_split_enable = pipe_config->has_audio &&
2867 					intel_dp_is_uhbr(pipe_config);
2868 }
2869 
2870 static void intel_dp_queue_modeset_retry_work(struct intel_connector *connector)
2871 {
2872 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2873 
2874 	drm_connector_get(&connector->base);
2875 	if (!queue_work(i915->unordered_wq, &connector->modeset_retry_work))
2876 		drm_connector_put(&connector->base);
2877 }
2878 
2879 /* NOTE: @state is only valid for MST links and can be %NULL for SST. */
2880 void
2881 intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state,
2882 				      struct intel_encoder *encoder,
2883 				      const struct intel_crtc_state *crtc_state)
2884 {
2885 	struct intel_connector *connector;
2886 	struct intel_digital_connector_state *conn_state;
2887 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2888 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2889 	int i;
2890 
2891 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
2892 		intel_dp_queue_modeset_retry_work(intel_dp->attached_connector);
2893 
2894 		return;
2895 	}
2896 
2897 	if (drm_WARN_ON(&i915->drm, !state))
2898 		return;
2899 
2900 	for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
2901 		if (!conn_state->base.crtc)
2902 			continue;
2903 
2904 		if (connector->mst_port == intel_dp)
2905 			intel_dp_queue_modeset_retry_work(connector);
2906 	}
2907 }
2908 
2909 int
2910 intel_dp_compute_config(struct intel_encoder *encoder,
2911 			struct intel_crtc_state *pipe_config,
2912 			struct drm_connector_state *conn_state)
2913 {
2914 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2915 	struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
2916 	struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2917 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2918 	const struct drm_display_mode *fixed_mode;
2919 	struct intel_connector *connector = intel_dp->attached_connector;
2920 	int ret = 0, link_bpp_x16;
2921 
2922 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A)
2923 		pipe_config->has_pch_encoder = true;
2924 
2925 	fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode);
2926 	if (intel_dp_is_edp(intel_dp) && fixed_mode) {
2927 		ret = intel_panel_compute_config(connector, adjusted_mode);
2928 		if (ret)
2929 			return ret;
2930 	}
2931 
2932 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2933 		return -EINVAL;
2934 
2935 	if (!connector->base.interlace_allowed &&
2936 	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2937 		return -EINVAL;
2938 
2939 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2940 		return -EINVAL;
2941 
2942 	if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
2943 		return -EINVAL;
2944 
2945 	/*
2946 	 * Try to respect downstream TMDS clock limits first, if
2947 	 * that fails assume the user might know something we don't.
2948 	 */
2949 	ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, true);
2950 	if (ret)
2951 		ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, false);
2952 	if (ret)
2953 		return ret;
2954 
2955 	if ((intel_dp_is_edp(intel_dp) && fixed_mode) ||
2956 	    pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
2957 		ret = intel_panel_fitting(pipe_config, conn_state);
2958 		if (ret)
2959 			return ret;
2960 	}
2961 
2962 	pipe_config->limited_color_range =
2963 		intel_dp_limited_color_range(pipe_config, conn_state);
2964 
2965 	pipe_config->enhanced_framing =
2966 		drm_dp_enhanced_frame_cap(intel_dp->dpcd);
2967 
2968 	if (pipe_config->dsc.compression_enable)
2969 		link_bpp_x16 = pipe_config->dsc.compressed_bpp_x16;
2970 	else
2971 		link_bpp_x16 = to_bpp_x16(intel_dp_output_bpp(pipe_config->output_format,
2972 							      pipe_config->pipe_bpp));
2973 
2974 	if (intel_dp->mso_link_count) {
2975 		int n = intel_dp->mso_link_count;
2976 		int overlap = intel_dp->mso_pixel_overlap;
2977 
2978 		pipe_config->splitter.enable = true;
2979 		pipe_config->splitter.link_count = n;
2980 		pipe_config->splitter.pixel_overlap = overlap;
2981 
2982 		drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n",
2983 			    n, overlap);
2984 
2985 		adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap;
2986 		adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap;
2987 		adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap;
2988 		adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap;
2989 		adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap;
2990 		adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap;
2991 		adjusted_mode->crtc_clock /= n;
2992 	}
2993 
2994 	intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
2995 
2996 	intel_link_compute_m_n(link_bpp_x16,
2997 			       pipe_config->lane_count,
2998 			       adjusted_mode->crtc_clock,
2999 			       pipe_config->port_clock,
3000 			       intel_dp_bw_fec_overhead(pipe_config->fec_enable),
3001 			       &pipe_config->dp_m_n);
3002 
3003 	/* FIXME: abstract this better */
3004 	if (pipe_config->splitter.enable)
3005 		pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count;
3006 
3007 	if (!HAS_DDI(dev_priv))
3008 		g4x_dp_set_clock(encoder, pipe_config);
3009 
3010 	intel_vrr_compute_config(pipe_config, conn_state);
3011 	intel_dp_compute_as_sdp(intel_dp, pipe_config);
3012 	intel_psr_compute_config(intel_dp, pipe_config, conn_state);
3013 	intel_alpm_lobf_compute_config(intel_dp, pipe_config, conn_state);
3014 	intel_dp_drrs_compute_config(connector, pipe_config, link_bpp_x16);
3015 	intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
3016 	intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
3017 
3018 	return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector,
3019 							pipe_config);
3020 }
3021 
3022 void intel_dp_set_link_params(struct intel_dp *intel_dp,
3023 			      int link_rate, int lane_count)
3024 {
3025 	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3026 	intel_dp->link_trained = false;
3027 	intel_dp->link_rate = link_rate;
3028 	intel_dp->lane_count = lane_count;
3029 }
3030 
3031 void intel_dp_reset_link_params(struct intel_dp *intel_dp)
3032 {
3033 	intel_dp->link.max_lane_count = intel_dp_max_common_lane_count(intel_dp);
3034 	intel_dp->link.max_rate = intel_dp_max_common_rate(intel_dp);
3035 	intel_dp->link.retrain_disabled = false;
3036 	intel_dp->link.seq_train_failures = 0;
3037 }
3038 
3039 /* Enable backlight PWM and backlight PP control. */
3040 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
3041 			    const struct drm_connector_state *conn_state)
3042 {
3043 	struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
3044 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3045 
3046 	if (!intel_dp_is_edp(intel_dp))
3047 		return;
3048 
3049 	drm_dbg_kms(&i915->drm, "\n");
3050 
3051 	intel_backlight_enable(crtc_state, conn_state);
3052 	intel_pps_backlight_on(intel_dp);
3053 }
3054 
3055 /* Disable backlight PP control and backlight PWM. */
3056 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
3057 {
3058 	struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
3059 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3060 
3061 	if (!intel_dp_is_edp(intel_dp))
3062 		return;
3063 
3064 	drm_dbg_kms(&i915->drm, "\n");
3065 
3066 	intel_pps_backlight_off(intel_dp);
3067 	intel_backlight_disable(old_conn_state);
3068 }
3069 
3070 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
3071 {
3072 	/*
3073 	 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
3074 	 * be capable of signalling downstream hpd with a long pulse.
3075 	 * Whether or not that means D3 is safe to use is not clear,
3076 	 * but let's assume so until proven otherwise.
3077 	 *
3078 	 * FIXME should really check all downstream ports...
3079 	 */
3080 	return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
3081 		drm_dp_is_branch(intel_dp->dpcd) &&
3082 		intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
3083 }
3084 
3085 static int
3086 write_dsc_decompression_flag(struct drm_dp_aux *aux, u8 flag, bool set)
3087 {
3088 	int err;
3089 	u8 val;
3090 
3091 	err = drm_dp_dpcd_readb(aux, DP_DSC_ENABLE, &val);
3092 	if (err < 0)
3093 		return err;
3094 
3095 	if (set)
3096 		val |= flag;
3097 	else
3098 		val &= ~flag;
3099 
3100 	return drm_dp_dpcd_writeb(aux, DP_DSC_ENABLE, val);
3101 }
3102 
3103 static void
3104 intel_dp_sink_set_dsc_decompression(struct intel_connector *connector,
3105 				    bool enable)
3106 {
3107 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3108 
3109 	if (write_dsc_decompression_flag(connector->dp.dsc_decompression_aux,
3110 					 DP_DECOMPRESSION_EN, enable) < 0)
3111 		drm_dbg_kms(&i915->drm,
3112 			    "Failed to %s sink decompression state\n",
3113 			    str_enable_disable(enable));
3114 }
3115 
3116 static void
3117 intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector,
3118 				  bool enable)
3119 {
3120 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3121 	struct drm_dp_aux *aux = connector->port ?
3122 				 connector->port->passthrough_aux : NULL;
3123 
3124 	if (!aux)
3125 		return;
3126 
3127 	if (write_dsc_decompression_flag(aux,
3128 					 DP_DSC_PASSTHROUGH_EN, enable) < 0)
3129 		drm_dbg_kms(&i915->drm,
3130 			    "Failed to %s sink compression passthrough state\n",
3131 			    str_enable_disable(enable));
3132 }
3133 
3134 static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state,
3135 				      const struct intel_connector *connector,
3136 				      bool for_get_ref)
3137 {
3138 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3139 	struct drm_connector *_connector_iter;
3140 	struct drm_connector_state *old_conn_state;
3141 	struct drm_connector_state *new_conn_state;
3142 	int ref_count = 0;
3143 	int i;
3144 
3145 	/*
3146 	 * On SST the decompression AUX device won't be shared, each connector
3147 	 * uses for this its own AUX targeting the sink device.
3148 	 */
3149 	if (!connector->mst_port)
3150 		return connector->dp.dsc_decompression_enabled ? 1 : 0;
3151 
3152 	for_each_oldnew_connector_in_state(&state->base, _connector_iter,
3153 					   old_conn_state, new_conn_state, i) {
3154 		const struct intel_connector *
3155 			connector_iter = to_intel_connector(_connector_iter);
3156 
3157 		if (connector_iter->mst_port != connector->mst_port)
3158 			continue;
3159 
3160 		if (!connector_iter->dp.dsc_decompression_enabled)
3161 			continue;
3162 
3163 		drm_WARN_ON(&i915->drm,
3164 			    (for_get_ref && !new_conn_state->crtc) ||
3165 			    (!for_get_ref && !old_conn_state->crtc));
3166 
3167 		if (connector_iter->dp.dsc_decompression_aux ==
3168 		    connector->dp.dsc_decompression_aux)
3169 			ref_count++;
3170 	}
3171 
3172 	return ref_count;
3173 }
3174 
3175 static bool intel_dp_dsc_aux_get_ref(struct intel_atomic_state *state,
3176 				     struct intel_connector *connector)
3177 {
3178 	bool ret = intel_dp_dsc_aux_ref_count(state, connector, true) == 0;
3179 
3180 	connector->dp.dsc_decompression_enabled = true;
3181 
3182 	return ret;
3183 }
3184 
3185 static bool intel_dp_dsc_aux_put_ref(struct intel_atomic_state *state,
3186 				     struct intel_connector *connector)
3187 {
3188 	connector->dp.dsc_decompression_enabled = false;
3189 
3190 	return intel_dp_dsc_aux_ref_count(state, connector, false) == 0;
3191 }
3192 
3193 /**
3194  * intel_dp_sink_enable_decompression - Enable DSC decompression in sink/last branch device
3195  * @state: atomic state
3196  * @connector: connector to enable the decompression for
3197  * @new_crtc_state: new state for the CRTC driving @connector
3198  *
3199  * Enable the DSC decompression if required in the %DP_DSC_ENABLE DPCD
3200  * register of the appropriate sink/branch device. On SST this is always the
3201  * sink device, whereas on MST based on each device's DSC capabilities it's
3202  * either the last branch device (enabling decompression in it) or both the
3203  * last branch device (enabling passthrough in it) and the sink device
3204  * (enabling decompression in it).
3205  */
3206 void intel_dp_sink_enable_decompression(struct intel_atomic_state *state,
3207 					struct intel_connector *connector,
3208 					const struct intel_crtc_state *new_crtc_state)
3209 {
3210 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3211 
3212 	if (!new_crtc_state->dsc.compression_enable)
3213 		return;
3214 
3215 	if (drm_WARN_ON(&i915->drm,
3216 			!connector->dp.dsc_decompression_aux ||
3217 			connector->dp.dsc_decompression_enabled))
3218 		return;
3219 
3220 	if (!intel_dp_dsc_aux_get_ref(state, connector))
3221 		return;
3222 
3223 	intel_dp_sink_set_dsc_passthrough(connector, true);
3224 	intel_dp_sink_set_dsc_decompression(connector, true);
3225 }
3226 
3227 /**
3228  * intel_dp_sink_disable_decompression - Disable DSC decompression in sink/last branch device
3229  * @state: atomic state
3230  * @connector: connector to disable the decompression for
3231  * @old_crtc_state: old state for the CRTC driving @connector
3232  *
3233  * Disable the DSC decompression if required in the %DP_DSC_ENABLE DPCD
3234  * register of the appropriate sink/branch device, corresponding to the
3235  * sequence in intel_dp_sink_enable_decompression().
3236  */
3237 void intel_dp_sink_disable_decompression(struct intel_atomic_state *state,
3238 					 struct intel_connector *connector,
3239 					 const struct intel_crtc_state *old_crtc_state)
3240 {
3241 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3242 
3243 	if (!old_crtc_state->dsc.compression_enable)
3244 		return;
3245 
3246 	if (drm_WARN_ON(&i915->drm,
3247 			!connector->dp.dsc_decompression_aux ||
3248 			!connector->dp.dsc_decompression_enabled))
3249 		return;
3250 
3251 	if (!intel_dp_dsc_aux_put_ref(state, connector))
3252 		return;
3253 
3254 	intel_dp_sink_set_dsc_decompression(connector, false);
3255 	intel_dp_sink_set_dsc_passthrough(connector, false);
3256 }
3257 
3258 static void
3259 intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
3260 {
3261 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3262 	u8 oui[] = { 0x00, 0xaa, 0x01 };
3263 	u8 buf[3] = {};
3264 
3265 	/*
3266 	 * During driver init, we want to be careful and avoid changing the source OUI if it's
3267 	 * already set to what we want, so as to avoid clearing any state by accident
3268 	 */
3269 	if (careful) {
3270 		if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0)
3271 			drm_err(&i915->drm, "Failed to read source OUI\n");
3272 
3273 		if (memcmp(oui, buf, sizeof(oui)) == 0)
3274 			return;
3275 	}
3276 
3277 	if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
3278 		drm_err(&i915->drm, "Failed to write source OUI\n");
3279 
3280 	intel_dp->last_oui_write = jiffies;
3281 }
3282 
3283 void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
3284 {
3285 	struct intel_connector *connector = intel_dp->attached_connector;
3286 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3287 
3288 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n",
3289 		    connector->base.base.id, connector->base.name,
3290 		    connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout);
3291 
3292 	wait_remaining_ms_from_jiffies(intel_dp->last_oui_write,
3293 				       connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout);
3294 }
3295 
3296 /* If the device supports it, try to set the power state appropriately */
3297 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
3298 {
3299 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3300 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3301 	int ret, i;
3302 
3303 	/* Should have a valid DPCD by this point */
3304 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
3305 		return;
3306 
3307 	if (mode != DP_SET_POWER_D0) {
3308 		if (downstream_hpd_needs_d0(intel_dp))
3309 			return;
3310 
3311 		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
3312 	} else {
3313 		struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
3314 
3315 		lspcon_resume(dp_to_dig_port(intel_dp));
3316 
3317 		/* Write the source OUI as early as possible */
3318 		if (intel_dp_is_edp(intel_dp))
3319 			intel_edp_init_source_oui(intel_dp, false);
3320 
3321 		/*
3322 		 * When turning on, we need to retry for 1ms to give the sink
3323 		 * time to wake up.
3324 		 */
3325 		for (i = 0; i < 3; i++) {
3326 			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
3327 			if (ret == 1)
3328 				break;
3329 			msleep(1);
3330 		}
3331 
3332 		if (ret == 1 && lspcon->active)
3333 			lspcon_wait_pcon_mode(lspcon);
3334 	}
3335 
3336 	if (ret != 1)
3337 		drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n",
3338 			    encoder->base.base.id, encoder->base.name,
3339 			    mode == DP_SET_POWER_D0 ? "D0" : "D3");
3340 }
3341 
3342 static bool
3343 intel_dp_get_dpcd(struct intel_dp *intel_dp);
3344 
3345 /**
3346  * intel_dp_sync_state - sync the encoder state during init/resume
3347  * @encoder: intel encoder to sync
3348  * @crtc_state: state for the CRTC connected to the encoder
3349  *
3350  * Sync any state stored in the encoder wrt. HW state during driver init
3351  * and system resume.
3352  */
3353 void intel_dp_sync_state(struct intel_encoder *encoder,
3354 			 const struct intel_crtc_state *crtc_state)
3355 {
3356 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3357 	bool dpcd_updated = false;
3358 
3359 	/*
3360 	 * Don't clobber DPCD if it's been already read out during output
3361 	 * setup (eDP) or detect.
3362 	 */
3363 	if (crtc_state && intel_dp->dpcd[DP_DPCD_REV] == 0) {
3364 		intel_dp_get_dpcd(intel_dp);
3365 		dpcd_updated = true;
3366 	}
3367 
3368 	intel_dp_tunnel_resume(intel_dp, crtc_state, dpcd_updated);
3369 
3370 	if (crtc_state)
3371 		intel_dp_reset_link_params(intel_dp);
3372 }
3373 
3374 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
3375 				    struct intel_crtc_state *crtc_state)
3376 {
3377 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3378 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3379 	bool fastset = true;
3380 
3381 	/*
3382 	 * If BIOS has set an unsupported or non-standard link rate for some
3383 	 * reason force an encoder recompute and full modeset.
3384 	 */
3385 	if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates,
3386 				crtc_state->port_clock) < 0) {
3387 		drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to unsupported link rate\n",
3388 			    encoder->base.base.id, encoder->base.name);
3389 		crtc_state->uapi.connectors_changed = true;
3390 		fastset = false;
3391 	}
3392 
3393 	/*
3394 	 * FIXME hack to force full modeset when DSC is being used.
3395 	 *
3396 	 * As long as we do not have full state readout and config comparison
3397 	 * of crtc_state->dsc, we have no way to ensure reliable fastset.
3398 	 * Remove once we have readout for DSC.
3399 	 */
3400 	if (crtc_state->dsc.compression_enable) {
3401 		drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to DSC being enabled\n",
3402 			    encoder->base.base.id, encoder->base.name);
3403 		crtc_state->uapi.mode_changed = true;
3404 		fastset = false;
3405 	}
3406 
3407 	if (CAN_PANEL_REPLAY(intel_dp)) {
3408 		drm_dbg_kms(&i915->drm,
3409 			    "[ENCODER:%d:%s] Forcing full modeset to compute panel replay state\n",
3410 			    encoder->base.base.id, encoder->base.name);
3411 		crtc_state->uapi.mode_changed = true;
3412 		fastset = false;
3413 	}
3414 
3415 	return fastset;
3416 }
3417 
3418 static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
3419 {
3420 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3421 
3422 	/* Clear the cached register set to avoid using stale values */
3423 
3424 	memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd));
3425 
3426 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
3427 			     intel_dp->pcon_dsc_dpcd,
3428 			     sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
3429 		drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n",
3430 			DP_PCON_DSC_ENCODER);
3431 
3432 	drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n",
3433 		    (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd);
3434 }
3435 
3436 static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask)
3437 {
3438 	int bw_gbps[] = {9, 18, 24, 32, 40, 48};
3439 	int i;
3440 
3441 	for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) {
3442 		if (frl_bw_mask & (1 << i))
3443 			return bw_gbps[i];
3444 	}
3445 	return 0;
3446 }
3447 
3448 static int intel_dp_pcon_set_frl_mask(int max_frl)
3449 {
3450 	switch (max_frl) {
3451 	case 48:
3452 		return DP_PCON_FRL_BW_MASK_48GBPS;
3453 	case 40:
3454 		return DP_PCON_FRL_BW_MASK_40GBPS;
3455 	case 32:
3456 		return DP_PCON_FRL_BW_MASK_32GBPS;
3457 	case 24:
3458 		return DP_PCON_FRL_BW_MASK_24GBPS;
3459 	case 18:
3460 		return DP_PCON_FRL_BW_MASK_18GBPS;
3461 	case 9:
3462 		return DP_PCON_FRL_BW_MASK_9GBPS;
3463 	}
3464 
3465 	return 0;
3466 }
3467 
3468 static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
3469 {
3470 	struct intel_connector *intel_connector = intel_dp->attached_connector;
3471 	struct drm_connector *connector = &intel_connector->base;
3472 	int max_frl_rate;
3473 	int max_lanes, rate_per_lane;
3474 	int max_dsc_lanes, dsc_rate_per_lane;
3475 
3476 	max_lanes = connector->display_info.hdmi.max_lanes;
3477 	rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane;
3478 	max_frl_rate = max_lanes * rate_per_lane;
3479 
3480 	if (connector->display_info.hdmi.dsc_cap.v_1p2) {
3481 		max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes;
3482 		dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane;
3483 		if (max_dsc_lanes && dsc_rate_per_lane)
3484 			max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane);
3485 	}
3486 
3487 	return max_frl_rate;
3488 }
3489 
3490 static bool
3491 intel_dp_pcon_is_frl_trained(struct intel_dp *intel_dp,
3492 			     u8 max_frl_bw_mask, u8 *frl_trained_mask)
3493 {
3494 	if (drm_dp_pcon_hdmi_link_active(&intel_dp->aux) &&
3495 	    drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, frl_trained_mask) == DP_PCON_HDMI_MODE_FRL &&
3496 	    *frl_trained_mask >= max_frl_bw_mask)
3497 		return true;
3498 
3499 	return false;
3500 }
3501 
3502 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
3503 {
3504 #define TIMEOUT_FRL_READY_MS 500
3505 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000
3506 
3507 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3508 	int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret;
3509 	u8 max_frl_bw_mask = 0, frl_trained_mask;
3510 	bool is_active;
3511 
3512 	max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
3513 	drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
3514 
3515 	max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp);
3516 	drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw);
3517 
3518 	max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw);
3519 
3520 	if (max_frl_bw <= 0)
3521 		return -EINVAL;
3522 
3523 	max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
3524 	drm_dbg(&i915->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask);
3525 
3526 	if (intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask))
3527 		goto frl_trained;
3528 
3529 	ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false);
3530 	if (ret < 0)
3531 		return ret;
3532 	/* Wait for PCON to be FRL Ready */
3533 	wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS);
3534 
3535 	if (!is_active)
3536 		return -ETIMEDOUT;
3537 
3538 	ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw,
3539 					  DP_PCON_ENABLE_SEQUENTIAL_LINK);
3540 	if (ret < 0)
3541 		return ret;
3542 	ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask,
3543 					  DP_PCON_FRL_LINK_TRAIN_NORMAL);
3544 	if (ret < 0)
3545 		return ret;
3546 	ret = drm_dp_pcon_frl_enable(&intel_dp->aux);
3547 	if (ret < 0)
3548 		return ret;
3549 	/*
3550 	 * Wait for FRL to be completed
3551 	 * Check if the HDMI Link is up and active.
3552 	 */
3553 	wait_for(is_active =
3554 		 intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask),
3555 		 TIMEOUT_HDMI_LINK_ACTIVE_MS);
3556 
3557 	if (!is_active)
3558 		return -ETIMEDOUT;
3559 
3560 frl_trained:
3561 	drm_dbg(&i915->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask);
3562 	intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask);
3563 	intel_dp->frl.is_trained = true;
3564 	drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps);
3565 
3566 	return 0;
3567 }
3568 
3569 static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp)
3570 {
3571 	if (drm_dp_is_branch(intel_dp->dpcd) &&
3572 	    intel_dp_has_hdmi_sink(intel_dp) &&
3573 	    intel_dp_hdmi_sink_max_frl(intel_dp) > 0)
3574 		return true;
3575 
3576 	return false;
3577 }
3578 
3579 static
3580 int intel_dp_pcon_set_tmds_mode(struct intel_dp *intel_dp)
3581 {
3582 	int ret;
3583 	u8 buf = 0;
3584 
3585 	/* Set PCON source control mode */
3586 	buf |= DP_PCON_ENABLE_SOURCE_CTL_MODE;
3587 
3588 	ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
3589 	if (ret < 0)
3590 		return ret;
3591 
3592 	/* Set HDMI LINK ENABLE */
3593 	buf |= DP_PCON_ENABLE_HDMI_LINK;
3594 	ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
3595 	if (ret < 0)
3596 		return ret;
3597 
3598 	return 0;
3599 }
3600 
3601 void intel_dp_check_frl_training(struct intel_dp *intel_dp)
3602 {
3603 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3604 
3605 	/*
3606 	 * Always go for FRL training if:
3607 	 * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7)
3608 	 * -sink is HDMI2.1
3609 	 */
3610 	if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) ||
3611 	    !intel_dp_is_hdmi_2_1_sink(intel_dp) ||
3612 	    intel_dp->frl.is_trained)
3613 		return;
3614 
3615 	if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
3616 		int ret, mode;
3617 
3618 		drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n");
3619 		ret = intel_dp_pcon_set_tmds_mode(intel_dp);
3620 		mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
3621 
3622 		if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS)
3623 			drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n");
3624 	} else {
3625 		drm_dbg(&dev_priv->drm, "FRL training Completed\n");
3626 	}
3627 }
3628 
3629 static int
3630 intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state)
3631 {
3632 	int vactive = crtc_state->hw.adjusted_mode.vdisplay;
3633 
3634 	return intel_hdmi_dsc_get_slice_height(vactive);
3635 }
3636 
3637 static int
3638 intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp,
3639 			     const struct intel_crtc_state *crtc_state)
3640 {
3641 	struct intel_connector *intel_connector = intel_dp->attached_connector;
3642 	struct drm_connector *connector = &intel_connector->base;
3643 	int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice;
3644 	int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices;
3645 	int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd);
3646 	int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd);
3647 
3648 	return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices,
3649 					     pcon_max_slice_width,
3650 					     hdmi_max_slices, hdmi_throughput);
3651 }
3652 
3653 static int
3654 intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp,
3655 			  const struct intel_crtc_state *crtc_state,
3656 			  int num_slices, int slice_width)
3657 {
3658 	struct intel_connector *intel_connector = intel_dp->attached_connector;
3659 	struct drm_connector *connector = &intel_connector->base;
3660 	int output_format = crtc_state->output_format;
3661 	bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp;
3662 	int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd);
3663 	int hdmi_max_chunk_bytes =
3664 		connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024;
3665 
3666 	return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width,
3667 				      num_slices, output_format, hdmi_all_bpp,
3668 				      hdmi_max_chunk_bytes);
3669 }
3670 
3671 void
3672 intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
3673 			    const struct intel_crtc_state *crtc_state)
3674 {
3675 	u8 pps_param[6];
3676 	int slice_height;
3677 	int slice_width;
3678 	int num_slices;
3679 	int bits_per_pixel;
3680 	int ret;
3681 	struct intel_connector *intel_connector = intel_dp->attached_connector;
3682 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3683 	struct drm_connector *connector;
3684 	bool hdmi_is_dsc_1_2;
3685 
3686 	if (!intel_dp_is_hdmi_2_1_sink(intel_dp))
3687 		return;
3688 
3689 	if (!intel_connector)
3690 		return;
3691 	connector = &intel_connector->base;
3692 	hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2;
3693 
3694 	if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) ||
3695 	    !hdmi_is_dsc_1_2)
3696 		return;
3697 
3698 	slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state);
3699 	if (!slice_height)
3700 		return;
3701 
3702 	num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state);
3703 	if (!num_slices)
3704 		return;
3705 
3706 	slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay,
3707 				   num_slices);
3708 
3709 	bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state,
3710 						   num_slices, slice_width);
3711 	if (!bits_per_pixel)
3712 		return;
3713 
3714 	pps_param[0] = slice_height & 0xFF;
3715 	pps_param[1] = slice_height >> 8;
3716 	pps_param[2] = slice_width & 0xFF;
3717 	pps_param[3] = slice_width >> 8;
3718 	pps_param[4] = bits_per_pixel & 0xFF;
3719 	pps_param[5] = (bits_per_pixel >> 8) & 0x3;
3720 
3721 	ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param);
3722 	if (ret < 0)
3723 		drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n");
3724 }
3725 
3726 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
3727 					   const struct intel_crtc_state *crtc_state)
3728 {
3729 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3730 	bool ycbcr444_to_420 = false;
3731 	bool rgb_to_ycbcr = false;
3732 	u8 tmp;
3733 
3734 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x13)
3735 		return;
3736 
3737 	if (!drm_dp_is_branch(intel_dp->dpcd))
3738 		return;
3739 
3740 	tmp = intel_dp_has_hdmi_sink(intel_dp) ? DP_HDMI_DVI_OUTPUT_CONFIG : 0;
3741 
3742 	if (drm_dp_dpcd_writeb(&intel_dp->aux,
3743 			       DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1)
3744 		drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n",
3745 			    str_enable_disable(intel_dp_has_hdmi_sink(intel_dp)));
3746 
3747 	if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
3748 		switch (crtc_state->output_format) {
3749 		case INTEL_OUTPUT_FORMAT_YCBCR420:
3750 			break;
3751 		case INTEL_OUTPUT_FORMAT_YCBCR444:
3752 			ycbcr444_to_420 = true;
3753 			break;
3754 		case INTEL_OUTPUT_FORMAT_RGB:
3755 			rgb_to_ycbcr = true;
3756 			ycbcr444_to_420 = true;
3757 			break;
3758 		default:
3759 			MISSING_CASE(crtc_state->output_format);
3760 			break;
3761 		}
3762 	} else if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
3763 		switch (crtc_state->output_format) {
3764 		case INTEL_OUTPUT_FORMAT_YCBCR444:
3765 			break;
3766 		case INTEL_OUTPUT_FORMAT_RGB:
3767 			rgb_to_ycbcr = true;
3768 			break;
3769 		default:
3770 			MISSING_CASE(crtc_state->output_format);
3771 			break;
3772 		}
3773 	}
3774 
3775 	tmp = ycbcr444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
3776 
3777 	if (drm_dp_dpcd_writeb(&intel_dp->aux,
3778 			       DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
3779 		drm_dbg_kms(&i915->drm,
3780 			    "Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n",
3781 			    str_enable_disable(intel_dp->dfp.ycbcr_444_to_420));
3782 
3783 	tmp = rgb_to_ycbcr ? DP_CONVERSION_BT709_RGB_YCBCR_ENABLE : 0;
3784 
3785 	if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
3786 		drm_dbg_kms(&i915->drm,
3787 			    "Failed to %s protocol converter RGB->YCbCr conversion mode\n",
3788 			    str_enable_disable(tmp));
3789 }
3790 
3791 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
3792 {
3793 	u8 dprx = 0;
3794 
3795 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
3796 			      &dprx) != 1)
3797 		return false;
3798 	return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
3799 }
3800 
3801 static void intel_dp_read_dsc_dpcd(struct drm_dp_aux *aux,
3802 				   u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
3803 {
3804 	if (drm_dp_dpcd_read(aux, DP_DSC_SUPPORT, dsc_dpcd,
3805 			     DP_DSC_RECEIVER_CAP_SIZE) < 0) {
3806 		drm_err(aux->drm_dev,
3807 			"Failed to read DPCD register 0x%x\n",
3808 			DP_DSC_SUPPORT);
3809 		return;
3810 	}
3811 
3812 	drm_dbg_kms(aux->drm_dev, "DSC DPCD: %*ph\n",
3813 		    DP_DSC_RECEIVER_CAP_SIZE,
3814 		    dsc_dpcd);
3815 }
3816 
3817 void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector)
3818 {
3819 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3820 
3821 	/*
3822 	 * Clear the cached register set to avoid using stale values
3823 	 * for the sinks that do not support DSC.
3824 	 */
3825 	memset(connector->dp.dsc_dpcd, 0, sizeof(connector->dp.dsc_dpcd));
3826 
3827 	/* Clear fec_capable to avoid using stale values */
3828 	connector->dp.fec_capability = 0;
3829 
3830 	if (dpcd_rev < DP_DPCD_REV_14)
3831 		return;
3832 
3833 	intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux,
3834 			       connector->dp.dsc_dpcd);
3835 
3836 	if (drm_dp_dpcd_readb(connector->dp.dsc_decompression_aux, DP_FEC_CAPABILITY,
3837 			      &connector->dp.fec_capability) < 0) {
3838 		drm_err(&i915->drm, "Failed to read FEC DPCD register\n");
3839 		return;
3840 	}
3841 
3842 	drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
3843 		    connector->dp.fec_capability);
3844 }
3845 
3846 static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *connector)
3847 {
3848 	if (edp_dpcd_rev < DP_EDP_14)
3849 		return;
3850 
3851 	intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux, connector->dp.dsc_dpcd);
3852 }
3853 
3854 static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
3855 				     struct drm_display_mode *mode)
3856 {
3857 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3858 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3859 	int n = intel_dp->mso_link_count;
3860 	int overlap = intel_dp->mso_pixel_overlap;
3861 
3862 	if (!mode || !n)
3863 		return;
3864 
3865 	mode->hdisplay = (mode->hdisplay - overlap) * n;
3866 	mode->hsync_start = (mode->hsync_start - overlap) * n;
3867 	mode->hsync_end = (mode->hsync_end - overlap) * n;
3868 	mode->htotal = (mode->htotal - overlap) * n;
3869 	mode->clock *= n;
3870 
3871 	drm_mode_set_name(mode);
3872 
3873 	drm_dbg_kms(&i915->drm,
3874 		    "[CONNECTOR:%d:%s] using generated MSO mode: " DRM_MODE_FMT "\n",
3875 		    connector->base.base.id, connector->base.name,
3876 		    DRM_MODE_ARG(mode));
3877 }
3878 
3879 void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp)
3880 {
3881 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3882 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3883 	struct intel_connector *connector = intel_dp->attached_connector;
3884 
3885 	if (connector->panel.vbt.edp.bpp && pipe_bpp > connector->panel.vbt.edp.bpp) {
3886 		/*
3887 		 * This is a big fat ugly hack.
3888 		 *
3889 		 * Some machines in UEFI boot mode provide us a VBT that has 18
3890 		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3891 		 * unknown we fail to light up. Yet the same BIOS boots up with
3892 		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3893 		 * max, not what it tells us to use.
3894 		 *
3895 		 * Note: This will still be broken if the eDP panel is not lit
3896 		 * up by the BIOS, and thus we can't get the mode at module
3897 		 * load.
3898 		 */
3899 		drm_dbg_kms(&dev_priv->drm,
3900 			    "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3901 			    pipe_bpp, connector->panel.vbt.edp.bpp);
3902 		connector->panel.vbt.edp.bpp = pipe_bpp;
3903 	}
3904 }
3905 
3906 static void intel_edp_mso_init(struct intel_dp *intel_dp)
3907 {
3908 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3909 	struct intel_connector *connector = intel_dp->attached_connector;
3910 	struct drm_display_info *info = &connector->base.display_info;
3911 	u8 mso;
3912 
3913 	if (intel_dp->edp_dpcd[0] < DP_EDP_14)
3914 		return;
3915 
3916 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) {
3917 		drm_err(&i915->drm, "Failed to read MSO cap\n");
3918 		return;
3919 	}
3920 
3921 	/* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */
3922 	mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK;
3923 	if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) {
3924 		drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso);
3925 		mso = 0;
3926 	}
3927 
3928 	if (mso) {
3929 		drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration, pixel overlap %u\n",
3930 			    mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso,
3931 			    info->mso_pixel_overlap);
3932 		if (!HAS_MSO(i915)) {
3933 			drm_err(&i915->drm, "No source MSO support, disabling\n");
3934 			mso = 0;
3935 		}
3936 	}
3937 
3938 	intel_dp->mso_link_count = mso;
3939 	intel_dp->mso_pixel_overlap = mso ? info->mso_pixel_overlap : 0;
3940 }
3941 
3942 static bool
3943 intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector)
3944 {
3945 	struct drm_i915_private *dev_priv =
3946 		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
3947 
3948 	/* this function is meant to be called only once */
3949 	drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
3950 
3951 	if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0)
3952 		return false;
3953 
3954 	drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
3955 			 drm_dp_is_branch(intel_dp->dpcd));
3956 	intel_init_dpcd_quirks(intel_dp, &intel_dp->desc.ident);
3957 
3958 	/*
3959 	 * Read the eDP display control registers.
3960 	 *
3961 	 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
3962 	 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
3963 	 * set, but require eDP 1.4+ detection (e.g. for supported link rates
3964 	 * method). The display control registers should read zero if they're
3965 	 * not supported anyway.
3966 	 */
3967 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3968 			     intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3969 			     sizeof(intel_dp->edp_dpcd)) {
3970 		drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
3971 			    (int)sizeof(intel_dp->edp_dpcd),
3972 			    intel_dp->edp_dpcd);
3973 
3974 		intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14;
3975 	}
3976 
3977 	/*
3978 	 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
3979 	 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
3980 	 */
3981 	intel_psr_init_dpcd(intel_dp);
3982 
3983 	/* Clear the default sink rates */
3984 	intel_dp->num_sink_rates = 0;
3985 
3986 	/* Read the eDP 1.4+ supported link rates. */
3987 	if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
3988 		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3989 		int i;
3990 
3991 		drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
3992 				sink_rates, sizeof(sink_rates));
3993 
3994 		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3995 			int val = le16_to_cpu(sink_rates[i]);
3996 
3997 			if (val == 0)
3998 				break;
3999 
4000 			/* Value read multiplied by 200kHz gives the per-lane
4001 			 * link rate in kHz. The source rates are, however,
4002 			 * stored in terms of LS_Clk kHz. The full conversion
4003 			 * back to symbols is
4004 			 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4005 			 */
4006 			intel_dp->sink_rates[i] = (val * 200) / 10;
4007 		}
4008 		intel_dp->num_sink_rates = i;
4009 	}
4010 
4011 	/*
4012 	 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4013 	 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4014 	 */
4015 	if (intel_dp->num_sink_rates)
4016 		intel_dp->use_rate_select = true;
4017 	else
4018 		intel_dp_set_sink_rates(intel_dp);
4019 	intel_dp_set_max_sink_lane_count(intel_dp);
4020 
4021 	/* Read the eDP DSC DPCD registers */
4022 	if (HAS_DSC(dev_priv))
4023 		intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0],
4024 					   connector);
4025 
4026 	/*
4027 	 * If needed, program our source OUI so we can make various Intel-specific AUX services
4028 	 * available (such as HDR backlight controls)
4029 	 */
4030 	intel_edp_init_source_oui(intel_dp, true);
4031 
4032 	return true;
4033 }
4034 
4035 static bool
4036 intel_dp_has_sink_count(struct intel_dp *intel_dp)
4037 {
4038 	if (!intel_dp->attached_connector)
4039 		return false;
4040 
4041 	return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base,
4042 					  intel_dp->dpcd,
4043 					  &intel_dp->desc);
4044 }
4045 
4046 void intel_dp_update_sink_caps(struct intel_dp *intel_dp)
4047 {
4048 	intel_dp_set_sink_rates(intel_dp);
4049 	intel_dp_set_max_sink_lane_count(intel_dp);
4050 	intel_dp_set_common_rates(intel_dp);
4051 }
4052 
4053 static bool
4054 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4055 {
4056 	int ret;
4057 
4058 	if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0)
4059 		return false;
4060 
4061 	/*
4062 	 * Don't clobber cached eDP rates. Also skip re-reading
4063 	 * the OUI/ID since we know it won't change.
4064 	 */
4065 	if (!intel_dp_is_edp(intel_dp)) {
4066 		drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4067 				 drm_dp_is_branch(intel_dp->dpcd));
4068 
4069 		intel_init_dpcd_quirks(intel_dp, &intel_dp->desc.ident);
4070 
4071 		intel_dp_update_sink_caps(intel_dp);
4072 	}
4073 
4074 	if (intel_dp_has_sink_count(intel_dp)) {
4075 		ret = drm_dp_read_sink_count(&intel_dp->aux);
4076 		if (ret < 0)
4077 			return false;
4078 
4079 		/*
4080 		 * Sink count can change between short pulse hpd hence
4081 		 * a member variable in intel_dp will track any changes
4082 		 * between short pulse interrupts.
4083 		 */
4084 		intel_dp->sink_count = ret;
4085 
4086 		/*
4087 		 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4088 		 * a dongle is present but no display. Unless we require to know
4089 		 * if a dongle is present or not, we don't need to update
4090 		 * downstream port information. So, an early return here saves
4091 		 * time from performing other operations which are not required.
4092 		 */
4093 		if (!intel_dp->sink_count)
4094 			return false;
4095 	}
4096 
4097 	return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd,
4098 					   intel_dp->downstream_ports) == 0;
4099 }
4100 
4101 static const char *intel_dp_mst_mode_str(enum drm_dp_mst_mode mst_mode)
4102 {
4103 	if (mst_mode == DRM_DP_MST)
4104 		return "MST";
4105 	else if (mst_mode == DRM_DP_SST_SIDEBAND_MSG)
4106 		return "SST w/ sideband messaging";
4107 	else
4108 		return "SST";
4109 }
4110 
4111 static enum drm_dp_mst_mode
4112 intel_dp_mst_mode_choose(struct intel_dp *intel_dp,
4113 			 enum drm_dp_mst_mode sink_mst_mode)
4114 {
4115 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4116 
4117 	if (!i915->display.params.enable_dp_mst)
4118 		return DRM_DP_SST;
4119 
4120 	if (!intel_dp_mst_source_support(intel_dp))
4121 		return DRM_DP_SST;
4122 
4123 	if (sink_mst_mode == DRM_DP_SST_SIDEBAND_MSG &&
4124 	    !(intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B))
4125 		return DRM_DP_SST;
4126 
4127 	return sink_mst_mode;
4128 }
4129 
4130 static enum drm_dp_mst_mode
4131 intel_dp_mst_detect(struct intel_dp *intel_dp)
4132 {
4133 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4134 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4135 	enum drm_dp_mst_mode sink_mst_mode;
4136 	enum drm_dp_mst_mode mst_detect;
4137 
4138 	sink_mst_mode = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
4139 
4140 	mst_detect = intel_dp_mst_mode_choose(intel_dp, sink_mst_mode);
4141 
4142 	drm_dbg_kms(&i915->drm,
4143 		    "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s -> enable: %s\n",
4144 		    encoder->base.base.id, encoder->base.name,
4145 		    str_yes_no(intel_dp_mst_source_support(intel_dp)),
4146 		    intel_dp_mst_mode_str(sink_mst_mode),
4147 		    str_yes_no(i915->display.params.enable_dp_mst),
4148 		    intel_dp_mst_mode_str(mst_detect));
4149 
4150 	return mst_detect;
4151 }
4152 
4153 static void
4154 intel_dp_mst_configure(struct intel_dp *intel_dp)
4155 {
4156 	if (!intel_dp_mst_source_support(intel_dp))
4157 		return;
4158 
4159 	intel_dp->is_mst = intel_dp->mst_detect != DRM_DP_SST;
4160 
4161 	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4162 
4163 	/* Avoid stale info on the next detect cycle. */
4164 	intel_dp->mst_detect = DRM_DP_SST;
4165 }
4166 
4167 static void
4168 intel_dp_mst_disconnect(struct intel_dp *intel_dp)
4169 {
4170 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4171 
4172 	if (!intel_dp->is_mst)
4173 		return;
4174 
4175 	drm_dbg_kms(&i915->drm, "MST device may have disappeared %d vs %d\n",
4176 		    intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4177 	intel_dp->is_mst = false;
4178 	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4179 }
4180 
4181 static bool
4182 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi)
4183 {
4184 	return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 4) == 4;
4185 }
4186 
4187 static bool intel_dp_ack_sink_irq_esi(struct intel_dp *intel_dp, u8 esi[4])
4188 {
4189 	int retry;
4190 
4191 	for (retry = 0; retry < 3; retry++) {
4192 		if (drm_dp_dpcd_write(&intel_dp->aux, DP_SINK_COUNT_ESI + 1,
4193 				      &esi[1], 3) == 3)
4194 			return true;
4195 	}
4196 
4197 	return false;
4198 }
4199 
4200 bool
4201 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
4202 		       const struct drm_connector_state *conn_state)
4203 {
4204 	/*
4205 	 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
4206 	 * of Color Encoding Format and Content Color Gamut], in order to
4207 	 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
4208 	 */
4209 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
4210 		return true;
4211 
4212 	switch (conn_state->colorspace) {
4213 	case DRM_MODE_COLORIMETRY_SYCC_601:
4214 	case DRM_MODE_COLORIMETRY_OPYCC_601:
4215 	case DRM_MODE_COLORIMETRY_BT2020_YCC:
4216 	case DRM_MODE_COLORIMETRY_BT2020_RGB:
4217 	case DRM_MODE_COLORIMETRY_BT2020_CYCC:
4218 		return true;
4219 	default:
4220 		break;
4221 	}
4222 
4223 	return false;
4224 }
4225 
4226 static ssize_t intel_dp_as_sdp_pack(const struct drm_dp_as_sdp *as_sdp,
4227 				    struct dp_sdp *sdp, size_t size)
4228 {
4229 	size_t length = sizeof(struct dp_sdp);
4230 
4231 	if (size < length)
4232 		return -ENOSPC;
4233 
4234 	memset(sdp, 0, size);
4235 
4236 	/* Prepare AS (Adaptive Sync) SDP Header */
4237 	sdp->sdp_header.HB0 = 0;
4238 	sdp->sdp_header.HB1 = as_sdp->sdp_type;
4239 	sdp->sdp_header.HB2 = 0x02;
4240 	sdp->sdp_header.HB3 = as_sdp->length;
4241 
4242 	/* Fill AS (Adaptive Sync) SDP Payload */
4243 	sdp->db[0] = as_sdp->mode;
4244 	sdp->db[1] = as_sdp->vtotal & 0xFF;
4245 	sdp->db[2] = (as_sdp->vtotal >> 8) & 0xFF;
4246 	sdp->db[3] = as_sdp->target_rr & 0xFF;
4247 	sdp->db[4] = (as_sdp->target_rr >> 8) & 0x3;
4248 
4249 	if (as_sdp->target_rr_divider)
4250 		sdp->db[4] |= 0x20;
4251 
4252 	return length;
4253 }
4254 
4255 static ssize_t
4256 intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915,
4257 					 const struct hdmi_drm_infoframe *drm_infoframe,
4258 					 struct dp_sdp *sdp,
4259 					 size_t size)
4260 {
4261 	size_t length = sizeof(struct dp_sdp);
4262 	const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
4263 	unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
4264 	ssize_t len;
4265 
4266 	if (size < length)
4267 		return -ENOSPC;
4268 
4269 	memset(sdp, 0, size);
4270 
4271 	len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
4272 	if (len < 0) {
4273 		drm_dbg_kms(&i915->drm, "buffer size is smaller than hdr metadata infoframe\n");
4274 		return -ENOSPC;
4275 	}
4276 
4277 	if (len != infoframe_size) {
4278 		drm_dbg_kms(&i915->drm, "wrong static hdr metadata size\n");
4279 		return -ENOSPC;
4280 	}
4281 
4282 	/*
4283 	 * Set up the infoframe sdp packet for HDR static metadata.
4284 	 * Prepare VSC Header for SU as per DP 1.4a spec,
4285 	 * Table 2-100 and Table 2-101
4286 	 */
4287 
4288 	/* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */
4289 	sdp->sdp_header.HB0 = 0;
4290 	/*
4291 	 * Packet Type 80h + Non-audio INFOFRAME Type value
4292 	 * HDMI_INFOFRAME_TYPE_DRM: 0x87
4293 	 * - 80h + Non-audio INFOFRAME Type value
4294 	 * - InfoFrame Type: 0x07
4295 	 *    [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame]
4296 	 */
4297 	sdp->sdp_header.HB1 = drm_infoframe->type;
4298 	/*
4299 	 * Least Significant Eight Bits of (Data Byte Count – 1)
4300 	 * infoframe_size - 1
4301 	 */
4302 	sdp->sdp_header.HB2 = 0x1D;
4303 	/* INFOFRAME SDP Version Number */
4304 	sdp->sdp_header.HB3 = (0x13 << 2);
4305 	/* CTA Header Byte 2 (INFOFRAME Version Number) */
4306 	sdp->db[0] = drm_infoframe->version;
4307 	/* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
4308 	sdp->db[1] = drm_infoframe->length;
4309 	/*
4310 	 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
4311 	 * HDMI_INFOFRAME_HEADER_SIZE
4312 	 */
4313 	BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2);
4314 	memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
4315 	       HDMI_DRM_INFOFRAME_SIZE);
4316 
4317 	/*
4318 	 * Size of DP infoframe sdp packet for HDR static metadata consists of
4319 	 * - DP SDP Header(struct dp_sdp_header): 4 bytes
4320 	 * - Two Data Blocks: 2 bytes
4321 	 *    CTA Header Byte2 (INFOFRAME Version Number)
4322 	 *    CTA Header Byte3 (Length of INFOFRAME)
4323 	 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
4324 	 *
4325 	 * Prior to GEN11's GMP register size is identical to DP HDR static metadata
4326 	 * infoframe size. But GEN11+ has larger than that size, write_infoframe
4327 	 * will pad rest of the size.
4328 	 */
4329 	return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE;
4330 }
4331 
4332 static void intel_write_dp_sdp(struct intel_encoder *encoder,
4333 			       const struct intel_crtc_state *crtc_state,
4334 			       unsigned int type)
4335 {
4336 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4337 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4338 	struct dp_sdp sdp = {};
4339 	ssize_t len;
4340 
4341 	if ((crtc_state->infoframes.enable &
4342 	     intel_hdmi_infoframe_enable(type)) == 0)
4343 		return;
4344 
4345 	switch (type) {
4346 	case DP_SDP_VSC:
4347 		len = drm_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp);
4348 		break;
4349 	case HDMI_PACKET_TYPE_GAMUT_METADATA:
4350 		len = intel_dp_hdr_metadata_infoframe_sdp_pack(dev_priv,
4351 							       &crtc_state->infoframes.drm.drm,
4352 							       &sdp, sizeof(sdp));
4353 		break;
4354 	case DP_SDP_ADAPTIVE_SYNC:
4355 		len = intel_dp_as_sdp_pack(&crtc_state->infoframes.as_sdp, &sdp,
4356 					   sizeof(sdp));
4357 		break;
4358 	default:
4359 		MISSING_CASE(type);
4360 		return;
4361 	}
4362 
4363 	if (drm_WARN_ON(&dev_priv->drm, len < 0))
4364 		return;
4365 
4366 	dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
4367 }
4368 
4369 void intel_dp_set_infoframes(struct intel_encoder *encoder,
4370 			     bool enable,
4371 			     const struct intel_crtc_state *crtc_state,
4372 			     const struct drm_connector_state *conn_state)
4373 {
4374 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4375 	i915_reg_t reg = HSW_TVIDEO_DIP_CTL(dev_priv,
4376 					    crtc_state->cpu_transcoder);
4377 	u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
4378 			 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
4379 			 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
4380 
4381 	if (HAS_AS_SDP(dev_priv))
4382 		dip_enable |= VIDEO_DIP_ENABLE_AS_ADL;
4383 
4384 	u32 val = intel_de_read(dev_priv, reg) & ~dip_enable;
4385 
4386 	/* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */
4387 	if (!enable && HAS_DSC(dev_priv))
4388 		val &= ~VDIP_ENABLE_PPS;
4389 
4390 	/* When PSR is enabled, this routine doesn't disable VSC DIP */
4391 	if (!crtc_state->has_psr)
4392 		val &= ~VIDEO_DIP_ENABLE_VSC_HSW;
4393 
4394 	intel_de_write(dev_priv, reg, val);
4395 	intel_de_posting_read(dev_priv, reg);
4396 
4397 	if (!enable)
4398 		return;
4399 
4400 	intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
4401 	intel_write_dp_sdp(encoder, crtc_state, DP_SDP_ADAPTIVE_SYNC);
4402 
4403 	intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
4404 }
4405 
4406 static
4407 int intel_dp_as_sdp_unpack(struct drm_dp_as_sdp *as_sdp,
4408 			   const void *buffer, size_t size)
4409 {
4410 	const struct dp_sdp *sdp = buffer;
4411 
4412 	if (size < sizeof(struct dp_sdp))
4413 		return -EINVAL;
4414 
4415 	memset(as_sdp, 0, sizeof(*as_sdp));
4416 
4417 	if (sdp->sdp_header.HB0 != 0)
4418 		return -EINVAL;
4419 
4420 	if (sdp->sdp_header.HB1 != DP_SDP_ADAPTIVE_SYNC)
4421 		return -EINVAL;
4422 
4423 	if (sdp->sdp_header.HB2 != 0x02)
4424 		return -EINVAL;
4425 
4426 	if ((sdp->sdp_header.HB3 & 0x3F) != 9)
4427 		return -EINVAL;
4428 
4429 	as_sdp->length = sdp->sdp_header.HB3 & DP_ADAPTIVE_SYNC_SDP_LENGTH;
4430 	as_sdp->mode = sdp->db[0] & DP_ADAPTIVE_SYNC_SDP_OPERATION_MODE;
4431 	as_sdp->vtotal = (sdp->db[2] << 8) | sdp->db[1];
4432 	as_sdp->target_rr = (u64)sdp->db[3] | ((u64)sdp->db[4] & 0x3);
4433 	as_sdp->target_rr_divider = sdp->db[4] & 0x20 ? true : false;
4434 
4435 	return 0;
4436 }
4437 
4438 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
4439 				   const void *buffer, size_t size)
4440 {
4441 	const struct dp_sdp *sdp = buffer;
4442 
4443 	if (size < sizeof(struct dp_sdp))
4444 		return -EINVAL;
4445 
4446 	memset(vsc, 0, sizeof(*vsc));
4447 
4448 	if (sdp->sdp_header.HB0 != 0)
4449 		return -EINVAL;
4450 
4451 	if (sdp->sdp_header.HB1 != DP_SDP_VSC)
4452 		return -EINVAL;
4453 
4454 	vsc->sdp_type = sdp->sdp_header.HB1;
4455 	vsc->revision = sdp->sdp_header.HB2;
4456 	vsc->length = sdp->sdp_header.HB3;
4457 
4458 	if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) ||
4459 	    (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe) ||
4460 	    (sdp->sdp_header.HB2 == 0x6 && sdp->sdp_header.HB3 == 0x10)) {
4461 		/*
4462 		 * - HB2 = 0x2, HB3 = 0x8
4463 		 *   VSC SDP supporting 3D stereo + PSR
4464 		 * - HB2 = 0x4, HB3 = 0xe
4465 		 *   VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of
4466 		 *   first scan line of the SU region (applies to eDP v1.4b
4467 		 *   and higher).
4468 		 * - HB2 = 0x6, HB3 = 0x10
4469 		 *   VSC SDP supporting 3D stereo + Panel Replay.
4470 		 */
4471 		return 0;
4472 	} else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) {
4473 		/*
4474 		 * - HB2 = 0x5, HB3 = 0x13
4475 		 *   VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry
4476 		 *   Format.
4477 		 */
4478 		vsc->pixelformat = (sdp->db[16] >> 4) & 0xf;
4479 		vsc->colorimetry = sdp->db[16] & 0xf;
4480 		vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1;
4481 
4482 		switch (sdp->db[17] & 0x7) {
4483 		case 0x0:
4484 			vsc->bpc = 6;
4485 			break;
4486 		case 0x1:
4487 			vsc->bpc = 8;
4488 			break;
4489 		case 0x2:
4490 			vsc->bpc = 10;
4491 			break;
4492 		case 0x3:
4493 			vsc->bpc = 12;
4494 			break;
4495 		case 0x4:
4496 			vsc->bpc = 16;
4497 			break;
4498 		default:
4499 			MISSING_CASE(sdp->db[17] & 0x7);
4500 			return -EINVAL;
4501 		}
4502 
4503 		vsc->content_type = sdp->db[18] & 0x7;
4504 	} else {
4505 		return -EINVAL;
4506 	}
4507 
4508 	return 0;
4509 }
4510 
4511 static void
4512 intel_read_dp_as_sdp(struct intel_encoder *encoder,
4513 		     struct intel_crtc_state *crtc_state,
4514 		     struct drm_dp_as_sdp *as_sdp)
4515 {
4516 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4517 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4518 	unsigned int type = DP_SDP_ADAPTIVE_SYNC;
4519 	struct dp_sdp sdp = {};
4520 	int ret;
4521 
4522 	if ((crtc_state->infoframes.enable &
4523 	     intel_hdmi_infoframe_enable(type)) == 0)
4524 		return;
4525 
4526 	dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
4527 				 sizeof(sdp));
4528 
4529 	ret = intel_dp_as_sdp_unpack(as_sdp, &sdp, sizeof(sdp));
4530 	if (ret)
4531 		drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP AS SDP\n");
4532 }
4533 
4534 static int
4535 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
4536 					   const void *buffer, size_t size)
4537 {
4538 	int ret;
4539 
4540 	const struct dp_sdp *sdp = buffer;
4541 
4542 	if (size < sizeof(struct dp_sdp))
4543 		return -EINVAL;
4544 
4545 	if (sdp->sdp_header.HB0 != 0)
4546 		return -EINVAL;
4547 
4548 	if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM)
4549 		return -EINVAL;
4550 
4551 	/*
4552 	 * Least Significant Eight Bits of (Data Byte Count – 1)
4553 	 * 1Dh (i.e., Data Byte Count = 30 bytes).
4554 	 */
4555 	if (sdp->sdp_header.HB2 != 0x1D)
4556 		return -EINVAL;
4557 
4558 	/* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */
4559 	if ((sdp->sdp_header.HB3 & 0x3) != 0)
4560 		return -EINVAL;
4561 
4562 	/* INFOFRAME SDP Version Number */
4563 	if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13)
4564 		return -EINVAL;
4565 
4566 	/* CTA Header Byte 2 (INFOFRAME Version Number) */
4567 	if (sdp->db[0] != 1)
4568 		return -EINVAL;
4569 
4570 	/* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
4571 	if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE)
4572 		return -EINVAL;
4573 
4574 	ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2],
4575 					     HDMI_DRM_INFOFRAME_SIZE);
4576 
4577 	return ret;
4578 }
4579 
4580 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
4581 				  struct intel_crtc_state *crtc_state,
4582 				  struct drm_dp_vsc_sdp *vsc)
4583 {
4584 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4585 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4586 	unsigned int type = DP_SDP_VSC;
4587 	struct dp_sdp sdp = {};
4588 	int ret;
4589 
4590 	if ((crtc_state->infoframes.enable &
4591 	     intel_hdmi_infoframe_enable(type)) == 0)
4592 		return;
4593 
4594 	dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
4595 
4596 	ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
4597 
4598 	if (ret)
4599 		drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n");
4600 }
4601 
4602 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder,
4603 						     struct intel_crtc_state *crtc_state,
4604 						     struct hdmi_drm_infoframe *drm_infoframe)
4605 {
4606 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4607 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4608 	unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
4609 	struct dp_sdp sdp = {};
4610 	int ret;
4611 
4612 	if ((crtc_state->infoframes.enable &
4613 	    intel_hdmi_infoframe_enable(type)) == 0)
4614 		return;
4615 
4616 	dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
4617 				 sizeof(sdp));
4618 
4619 	ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
4620 							 sizeof(sdp));
4621 
4622 	if (ret)
4623 		drm_dbg_kms(&dev_priv->drm,
4624 			    "Failed to unpack DP HDR Metadata Infoframe SDP\n");
4625 }
4626 
4627 void intel_read_dp_sdp(struct intel_encoder *encoder,
4628 		       struct intel_crtc_state *crtc_state,
4629 		       unsigned int type)
4630 {
4631 	switch (type) {
4632 	case DP_SDP_VSC:
4633 		intel_read_dp_vsc_sdp(encoder, crtc_state,
4634 				      &crtc_state->infoframes.vsc);
4635 		break;
4636 	case HDMI_PACKET_TYPE_GAMUT_METADATA:
4637 		intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
4638 							 &crtc_state->infoframes.drm.drm);
4639 		break;
4640 	case DP_SDP_ADAPTIVE_SYNC:
4641 		intel_read_dp_as_sdp(encoder, crtc_state,
4642 				     &crtc_state->infoframes.as_sdp);
4643 		break;
4644 	default:
4645 		MISSING_CASE(type);
4646 		break;
4647 	}
4648 }
4649 
4650 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4651 {
4652 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4653 	int status = 0;
4654 	int test_link_rate;
4655 	u8 test_lane_count, test_link_bw;
4656 	/* (DP CTS 1.2)
4657 	 * 4.3.1.11
4658 	 */
4659 	/* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4660 	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4661 				   &test_lane_count);
4662 
4663 	if (status <= 0) {
4664 		drm_dbg_kms(&i915->drm, "Lane count read failed\n");
4665 		return DP_TEST_NAK;
4666 	}
4667 	test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4668 
4669 	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4670 				   &test_link_bw);
4671 	if (status <= 0) {
4672 		drm_dbg_kms(&i915->drm, "Link Rate read failed\n");
4673 		return DP_TEST_NAK;
4674 	}
4675 	test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4676 
4677 	/* Validate the requested link rate and lane count */
4678 	if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4679 					test_lane_count))
4680 		return DP_TEST_NAK;
4681 
4682 	intel_dp->compliance.test_lane_count = test_lane_count;
4683 	intel_dp->compliance.test_link_rate = test_link_rate;
4684 
4685 	return DP_TEST_ACK;
4686 }
4687 
4688 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4689 {
4690 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4691 	u8 test_pattern;
4692 	u8 test_misc;
4693 	__be16 h_width, v_height;
4694 	int status = 0;
4695 
4696 	/* Read the TEST_PATTERN (DP CTS 3.1.5) */
4697 	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4698 				   &test_pattern);
4699 	if (status <= 0) {
4700 		drm_dbg_kms(&i915->drm, "Test pattern read failed\n");
4701 		return DP_TEST_NAK;
4702 	}
4703 	if (test_pattern != DP_COLOR_RAMP)
4704 		return DP_TEST_NAK;
4705 
4706 	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4707 				  &h_width, 2);
4708 	if (status <= 0) {
4709 		drm_dbg_kms(&i915->drm, "H Width read failed\n");
4710 		return DP_TEST_NAK;
4711 	}
4712 
4713 	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4714 				  &v_height, 2);
4715 	if (status <= 0) {
4716 		drm_dbg_kms(&i915->drm, "V Height read failed\n");
4717 		return DP_TEST_NAK;
4718 	}
4719 
4720 	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4721 				   &test_misc);
4722 	if (status <= 0) {
4723 		drm_dbg_kms(&i915->drm, "TEST MISC read failed\n");
4724 		return DP_TEST_NAK;
4725 	}
4726 	if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4727 		return DP_TEST_NAK;
4728 	if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4729 		return DP_TEST_NAK;
4730 	switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4731 	case DP_TEST_BIT_DEPTH_6:
4732 		intel_dp->compliance.test_data.bpc = 6;
4733 		break;
4734 	case DP_TEST_BIT_DEPTH_8:
4735 		intel_dp->compliance.test_data.bpc = 8;
4736 		break;
4737 	default:
4738 		return DP_TEST_NAK;
4739 	}
4740 
4741 	intel_dp->compliance.test_data.video_pattern = test_pattern;
4742 	intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4743 	intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4744 	/* Set test active flag here so userspace doesn't interrupt things */
4745 	intel_dp->compliance.test_active = true;
4746 
4747 	return DP_TEST_ACK;
4748 }
4749 
4750 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
4751 {
4752 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4753 	u8 test_result = DP_TEST_ACK;
4754 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4755 	struct drm_connector *connector = &intel_connector->base;
4756 
4757 	if (intel_connector->detect_edid == NULL ||
4758 	    connector->edid_corrupt ||
4759 	    intel_dp->aux.i2c_defer_count > 6) {
4760 		/* Check EDID read for NACKs, DEFERs and corruption
4761 		 * (DP CTS 1.2 Core r1.1)
4762 		 *    4.2.2.4 : Failed EDID read, I2C_NAK
4763 		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
4764 		 *    4.2.2.6 : EDID corruption detected
4765 		 * Use failsafe mode for all cases
4766 		 */
4767 		if (intel_dp->aux.i2c_nack_count > 0 ||
4768 			intel_dp->aux.i2c_defer_count > 0)
4769 			drm_dbg_kms(&i915->drm,
4770 				    "EDID read had %d NACKs, %d DEFERs\n",
4771 				    intel_dp->aux.i2c_nack_count,
4772 				    intel_dp->aux.i2c_defer_count);
4773 		intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4774 	} else {
4775 		/* FIXME: Get rid of drm_edid_raw() */
4776 		const struct edid *block = drm_edid_raw(intel_connector->detect_edid);
4777 
4778 		/* We have to write the checksum of the last block read */
4779 		block += block->extensions;
4780 
4781 		if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4782 				       block->checksum) <= 0)
4783 			drm_dbg_kms(&i915->drm,
4784 				    "Failed to write EDID checksum\n");
4785 
4786 		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4787 		intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4788 	}
4789 
4790 	/* Set test active flag here so userspace doesn't interrupt things */
4791 	intel_dp->compliance.test_active = true;
4792 
4793 	return test_result;
4794 }
4795 
4796 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
4797 					const struct intel_crtc_state *crtc_state)
4798 {
4799 	struct drm_i915_private *dev_priv =
4800 			to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4801 	struct drm_dp_phy_test_params *data =
4802 			&intel_dp->compliance.test_data.phytest;
4803 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4804 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4805 	enum pipe pipe = crtc->pipe;
4806 	u32 pattern_val;
4807 
4808 	switch (data->phy_pattern) {
4809 	case DP_LINK_QUAL_PATTERN_DISABLE:
4810 		drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n");
4811 		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
4812 		if (DISPLAY_VER(dev_priv) >= 10)
4813 			intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
4814 				     DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK,
4815 				     DP_TP_CTL_LINK_TRAIN_NORMAL);
4816 		break;
4817 	case DP_LINK_QUAL_PATTERN_D10_2:
4818 		drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n");
4819 		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4820 			       DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
4821 		break;
4822 	case DP_LINK_QUAL_PATTERN_ERROR_RATE:
4823 		drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n");
4824 		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4825 			       DDI_DP_COMP_CTL_ENABLE |
4826 			       DDI_DP_COMP_CTL_SCRAMBLED_0);
4827 		break;
4828 	case DP_LINK_QUAL_PATTERN_PRBS7:
4829 		drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n");
4830 		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4831 			       DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
4832 		break;
4833 	case DP_LINK_QUAL_PATTERN_80BIT_CUSTOM:
4834 		/*
4835 		 * FIXME: Ideally pattern should come from DPCD 0x250. As
4836 		 * current firmware of DPR-100 could not set it, so hardcoding
4837 		 * now for complaince test.
4838 		 */
4839 		drm_dbg_kms(&dev_priv->drm,
4840 			    "Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
4841 		pattern_val = 0x3e0f83e0;
4842 		intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
4843 		pattern_val = 0x0f83e0f8;
4844 		intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val);
4845 		pattern_val = 0x0000f83e;
4846 		intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val);
4847 		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4848 			       DDI_DP_COMP_CTL_ENABLE |
4849 			       DDI_DP_COMP_CTL_CUSTOM80);
4850 		break;
4851 	case DP_LINK_QUAL_PATTERN_CP2520_PAT_1:
4852 		/*
4853 		 * FIXME: Ideally pattern should come from DPCD 0x24A. As
4854 		 * current firmware of DPR-100 could not set it, so hardcoding
4855 		 * now for complaince test.
4856 		 */
4857 		drm_dbg_kms(&dev_priv->drm, "Set HBR2 compliance Phy Test Pattern\n");
4858 		pattern_val = 0xFB;
4859 		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4860 			       DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
4861 			       pattern_val);
4862 		break;
4863 	case DP_LINK_QUAL_PATTERN_CP2520_PAT_3:
4864 		if (DISPLAY_VER(dev_priv) < 10)  {
4865 			drm_warn(&dev_priv->drm, "Platform does not support TPS4\n");
4866 			break;
4867 		}
4868 		drm_dbg_kms(&dev_priv->drm, "Set TPS4 compliance Phy Test Pattern\n");
4869 		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
4870 		intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
4871 			     DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK,
4872 			     DP_TP_CTL_TRAIN_PAT4_SEL_TP4A | DP_TP_CTL_LINK_TRAIN_PAT4);
4873 		break;
4874 	default:
4875 		drm_warn(&dev_priv->drm, "Invalid Phy Test Pattern\n");
4876 	}
4877 }
4878 
4879 static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
4880 					 const struct intel_crtc_state *crtc_state)
4881 {
4882 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4883 	struct drm_dp_phy_test_params *data =
4884 		&intel_dp->compliance.test_data.phytest;
4885 	u8 link_status[DP_LINK_STATUS_SIZE];
4886 
4887 	if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
4888 					     link_status) < 0) {
4889 		drm_dbg_kms(&i915->drm, "failed to get link status\n");
4890 		return;
4891 	}
4892 
4893 	/* retrieve vswing & pre-emphasis setting */
4894 	intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX,
4895 				  link_status);
4896 
4897 	intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
4898 
4899 	intel_dp_phy_pattern_update(intel_dp, crtc_state);
4900 
4901 	drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
4902 			  intel_dp->train_set, crtc_state->lane_count);
4903 
4904 	drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
4905 				    intel_dp->dpcd[DP_DPCD_REV]);
4906 }
4907 
4908 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4909 {
4910 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4911 	struct drm_dp_phy_test_params *data =
4912 		&intel_dp->compliance.test_data.phytest;
4913 
4914 	if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
4915 		drm_dbg_kms(&i915->drm, "DP Phy Test pattern AUX read failure\n");
4916 		return DP_TEST_NAK;
4917 	}
4918 
4919 	/* Set test active flag here so userspace doesn't interrupt things */
4920 	intel_dp->compliance.test_active = true;
4921 
4922 	return DP_TEST_ACK;
4923 }
4924 
4925 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4926 {
4927 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4928 	u8 response = DP_TEST_NAK;
4929 	u8 request = 0;
4930 	int status;
4931 
4932 	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4933 	if (status <= 0) {
4934 		drm_dbg_kms(&i915->drm,
4935 			    "Could not read test request from sink\n");
4936 		goto update_status;
4937 	}
4938 
4939 	switch (request) {
4940 	case DP_TEST_LINK_TRAINING:
4941 		drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n");
4942 		response = intel_dp_autotest_link_training(intel_dp);
4943 		break;
4944 	case DP_TEST_LINK_VIDEO_PATTERN:
4945 		drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n");
4946 		response = intel_dp_autotest_video_pattern(intel_dp);
4947 		break;
4948 	case DP_TEST_LINK_EDID_READ:
4949 		drm_dbg_kms(&i915->drm, "EDID test requested\n");
4950 		response = intel_dp_autotest_edid(intel_dp);
4951 		break;
4952 	case DP_TEST_LINK_PHY_TEST_PATTERN:
4953 		drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n");
4954 		response = intel_dp_autotest_phy_pattern(intel_dp);
4955 		break;
4956 	default:
4957 		drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n",
4958 			    request);
4959 		break;
4960 	}
4961 
4962 	if (response & DP_TEST_ACK)
4963 		intel_dp->compliance.test_type = request;
4964 
4965 update_status:
4966 	status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4967 	if (status <= 0)
4968 		drm_dbg_kms(&i915->drm,
4969 			    "Could not write test response to sink\n");
4970 }
4971 
4972 static bool intel_dp_link_ok(struct intel_dp *intel_dp,
4973 			     u8 link_status[DP_LINK_STATUS_SIZE])
4974 {
4975 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4976 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4977 	bool uhbr = intel_dp->link_rate >= 1000000;
4978 	bool ok;
4979 
4980 	if (uhbr)
4981 		ok = drm_dp_128b132b_lane_channel_eq_done(link_status,
4982 							  intel_dp->lane_count);
4983 	else
4984 		ok = drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4985 
4986 	if (ok)
4987 		return true;
4988 
4989 	intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
4990 	drm_dbg_kms(&i915->drm,
4991 		    "[ENCODER:%d:%s] %s link not ok, retraining\n",
4992 		    encoder->base.base.id, encoder->base.name,
4993 		    uhbr ? "128b/132b" : "8b/10b");
4994 
4995 	return false;
4996 }
4997 
4998 static void
4999 intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack)
5000 {
5001 	bool handled = false;
5002 
5003 	drm_dp_mst_hpd_irq_handle_event(&intel_dp->mst_mgr, esi, ack, &handled);
5004 
5005 	if (esi[1] & DP_CP_IRQ) {
5006 		intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
5007 		ack[1] |= DP_CP_IRQ;
5008 	}
5009 }
5010 
5011 static bool intel_dp_mst_link_status(struct intel_dp *intel_dp)
5012 {
5013 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
5014 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
5015 	u8 link_status[DP_LINK_STATUS_SIZE] = {};
5016 	const size_t esi_link_status_size = DP_LINK_STATUS_SIZE - 2;
5017 
5018 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS_ESI, link_status,
5019 			     esi_link_status_size) != esi_link_status_size) {
5020 		drm_err(&i915->drm,
5021 			"[ENCODER:%d:%s] Failed to read link status\n",
5022 			encoder->base.base.id, encoder->base.name);
5023 		return false;
5024 	}
5025 
5026 	return intel_dp_link_ok(intel_dp, link_status);
5027 }
5028 
5029 /**
5030  * intel_dp_check_mst_status - service any pending MST interrupts, check link status
5031  * @intel_dp: Intel DP struct
5032  *
5033  * Read any pending MST interrupts, call MST core to handle these and ack the
5034  * interrupts. Check if the main and AUX link state is ok.
5035  *
5036  * Returns:
5037  * - %true if pending interrupts were serviced (or no interrupts were
5038  *   pending) w/o detecting an error condition.
5039  * - %false if an error condition - like AUX failure or a loss of link - is
5040  *   detected, or another condition - like a DP tunnel BW state change - needs
5041  *   servicing from the hotplug work.
5042  */
5043 static bool
5044 intel_dp_check_mst_status(struct intel_dp *intel_dp)
5045 {
5046 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5047 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5048 	struct intel_encoder *encoder = &dig_port->base;
5049 	bool link_ok = true;
5050 	bool reprobe_needed = false;
5051 
5052 	drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
5053 
5054 	for (;;) {
5055 		u8 esi[4] = {};
5056 		u8 ack[4] = {};
5057 
5058 		if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
5059 			drm_dbg_kms(&i915->drm,
5060 				    "failed to get ESI - device may have failed\n");
5061 			link_ok = false;
5062 
5063 			break;
5064 		}
5065 
5066 		drm_dbg_kms(&i915->drm, "DPRX ESI: %4ph\n", esi);
5067 
5068 		if (intel_dp->active_mst_links > 0 && link_ok &&
5069 		    esi[3] & LINK_STATUS_CHANGED) {
5070 			if (!intel_dp_mst_link_status(intel_dp))
5071 				link_ok = false;
5072 			ack[3] |= LINK_STATUS_CHANGED;
5073 		}
5074 
5075 		intel_dp_mst_hpd_irq(intel_dp, esi, ack);
5076 
5077 		if (esi[3] & DP_TUNNELING_IRQ) {
5078 			if (drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr,
5079 						     &intel_dp->aux))
5080 				reprobe_needed = true;
5081 			ack[3] |= DP_TUNNELING_IRQ;
5082 		}
5083 
5084 		if (!memchr_inv(ack, 0, sizeof(ack)))
5085 			break;
5086 
5087 		if (!intel_dp_ack_sink_irq_esi(intel_dp, ack))
5088 			drm_dbg_kms(&i915->drm, "Failed to ack ESI\n");
5089 
5090 		if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY))
5091 			drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr);
5092 	}
5093 
5094 	if (!link_ok || intel_dp->link.force_retrain)
5095 		intel_encoder_link_check_queue_work(encoder, 0);
5096 
5097 	return !reprobe_needed;
5098 }
5099 
5100 static void
5101 intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
5102 {
5103 	bool is_active;
5104 	u8 buf = 0;
5105 
5106 	is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux);
5107 	if (intel_dp->frl.is_trained && !is_active) {
5108 		if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0)
5109 			return;
5110 
5111 		buf &=  ~DP_PCON_ENABLE_HDMI_LINK;
5112 		if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0)
5113 			return;
5114 
5115 		drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base);
5116 
5117 		intel_dp->frl.is_trained = false;
5118 
5119 		/* Restart FRL training or fall back to TMDS mode */
5120 		intel_dp_check_frl_training(intel_dp);
5121 	}
5122 }
5123 
5124 static bool
5125 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
5126 {
5127 	u8 link_status[DP_LINK_STATUS_SIZE];
5128 
5129 	if (!intel_dp->link_trained)
5130 		return false;
5131 
5132 	/*
5133 	 * While PSR source HW is enabled, it will control main-link sending
5134 	 * frames, enabling and disabling it so trying to do a retrain will fail
5135 	 * as the link would or not be on or it could mix training patterns
5136 	 * and frame data at the same time causing retrain to fail.
5137 	 * Also when exiting PSR, HW will retrain the link anyways fixing
5138 	 * any link status error.
5139 	 */
5140 	if (intel_psr_enabled(intel_dp))
5141 		return false;
5142 
5143 	if (intel_dp->link.force_retrain)
5144 		return true;
5145 
5146 	if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
5147 					     link_status) < 0)
5148 		return false;
5149 
5150 	/*
5151 	 * Validate the cached values of intel_dp->link_rate and
5152 	 * intel_dp->lane_count before attempting to retrain.
5153 	 *
5154 	 * FIXME would be nice to user the crtc state here, but since
5155 	 * we need to call this from the short HPD handler that seems
5156 	 * a bit hard.
5157 	 */
5158 	if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
5159 					intel_dp->lane_count))
5160 		return false;
5161 
5162 	if (intel_dp->link.retrain_disabled)
5163 		return false;
5164 
5165 	if (intel_dp->link.seq_train_failures)
5166 		return true;
5167 
5168 	/* Retrain if link not ok */
5169 	return !intel_dp_link_ok(intel_dp, link_status);
5170 }
5171 
5172 static bool intel_dp_has_connector(struct intel_dp *intel_dp,
5173 				   const struct drm_connector_state *conn_state)
5174 {
5175 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5176 	struct intel_encoder *encoder;
5177 	enum pipe pipe;
5178 
5179 	if (!conn_state->best_encoder)
5180 		return false;
5181 
5182 	/* SST */
5183 	encoder = &dp_to_dig_port(intel_dp)->base;
5184 	if (conn_state->best_encoder == &encoder->base)
5185 		return true;
5186 
5187 	/* MST */
5188 	for_each_pipe(i915, pipe) {
5189 		encoder = &intel_dp->mst_encoders[pipe]->base;
5190 		if (conn_state->best_encoder == &encoder->base)
5191 			return true;
5192 	}
5193 
5194 	return false;
5195 }
5196 
5197 int intel_dp_get_active_pipes(struct intel_dp *intel_dp,
5198 			      struct drm_modeset_acquire_ctx *ctx,
5199 			      u8 *pipe_mask)
5200 {
5201 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5202 	struct drm_connector_list_iter conn_iter;
5203 	struct intel_connector *connector;
5204 	int ret = 0;
5205 
5206 	*pipe_mask = 0;
5207 
5208 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
5209 	for_each_intel_connector_iter(connector, &conn_iter) {
5210 		struct drm_connector_state *conn_state =
5211 			connector->base.state;
5212 		struct intel_crtc_state *crtc_state;
5213 		struct intel_crtc *crtc;
5214 
5215 		if (!intel_dp_has_connector(intel_dp, conn_state))
5216 			continue;
5217 
5218 		crtc = to_intel_crtc(conn_state->crtc);
5219 		if (!crtc)
5220 			continue;
5221 
5222 		ret = drm_modeset_lock(&crtc->base.mutex, ctx);
5223 		if (ret)
5224 			break;
5225 
5226 		crtc_state = to_intel_crtc_state(crtc->base.state);
5227 
5228 		drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
5229 
5230 		if (!crtc_state->hw.active)
5231 			continue;
5232 
5233 		if (conn_state->commit)
5234 			drm_WARN_ON(&i915->drm,
5235 				    !wait_for_completion_timeout(&conn_state->commit->hw_done,
5236 								 msecs_to_jiffies(5000)));
5237 
5238 		*pipe_mask |= BIT(crtc->pipe);
5239 	}
5240 	drm_connector_list_iter_end(&conn_iter);
5241 
5242 	return ret;
5243 }
5244 
5245 static bool intel_dp_is_connected(struct intel_dp *intel_dp)
5246 {
5247 	struct intel_connector *connector = intel_dp->attached_connector;
5248 
5249 	return connector->base.status == connector_status_connected ||
5250 		intel_dp->is_mst;
5251 }
5252 
5253 static int intel_dp_retrain_link(struct intel_encoder *encoder,
5254 				 struct drm_modeset_acquire_ctx *ctx)
5255 {
5256 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5257 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5258 	struct intel_crtc *crtc;
5259 	bool mst_output = false;
5260 	u8 pipe_mask;
5261 	int ret;
5262 
5263 	if (!intel_dp_is_connected(intel_dp))
5264 		return 0;
5265 
5266 	ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
5267 			       ctx);
5268 	if (ret)
5269 		return ret;
5270 
5271 	if (!intel_dp_needs_link_retrain(intel_dp))
5272 		return 0;
5273 
5274 	ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
5275 	if (ret)
5276 		return ret;
5277 
5278 	if (pipe_mask == 0)
5279 		return 0;
5280 
5281 	if (!intel_dp_needs_link_retrain(intel_dp))
5282 		return 0;
5283 
5284 	drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link (forced %s)\n",
5285 		    encoder->base.base.id, encoder->base.name,
5286 		    str_yes_no(intel_dp->link.force_retrain));
5287 
5288 	for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
5289 		const struct intel_crtc_state *crtc_state =
5290 			to_intel_crtc_state(crtc->base.state);
5291 
5292 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
5293 			mst_output = true;
5294 			break;
5295 		}
5296 
5297 		/* Suppress underruns caused by re-training */
5298 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5299 		if (crtc_state->has_pch_encoder)
5300 			intel_set_pch_fifo_underrun_reporting(dev_priv,
5301 							      intel_crtc_pch_transcoder(crtc), false);
5302 	}
5303 
5304 	/* TODO: use a modeset for SST as well. */
5305 	if (mst_output) {
5306 		ret = intel_modeset_commit_pipes(dev_priv, pipe_mask, ctx);
5307 
5308 		if (ret && ret != -EDEADLK)
5309 			drm_dbg_kms(&dev_priv->drm,
5310 				    "[ENCODER:%d:%s] link retraining failed: %pe\n",
5311 				    encoder->base.base.id, encoder->base.name,
5312 				    ERR_PTR(ret));
5313 
5314 		goto out;
5315 	}
5316 
5317 	for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
5318 		const struct intel_crtc_state *crtc_state =
5319 			to_intel_crtc_state(crtc->base.state);
5320 
5321 		intel_dp->link_trained = false;
5322 
5323 		intel_dp_check_frl_training(intel_dp);
5324 		intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
5325 		intel_dp_start_link_train(NULL, intel_dp, crtc_state);
5326 		intel_dp_stop_link_train(intel_dp, crtc_state);
5327 		break;
5328 	}
5329 
5330 	for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
5331 		const struct intel_crtc_state *crtc_state =
5332 			to_intel_crtc_state(crtc->base.state);
5333 
5334 		/* Keep underrun reporting disabled until things are stable */
5335 		intel_crtc_wait_for_next_vblank(crtc);
5336 
5337 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
5338 		if (crtc_state->has_pch_encoder)
5339 			intel_set_pch_fifo_underrun_reporting(dev_priv,
5340 							      intel_crtc_pch_transcoder(crtc), true);
5341 	}
5342 
5343 out:
5344 	if (ret != -EDEADLK)
5345 		intel_dp->link.force_retrain = false;
5346 
5347 	return ret;
5348 }
5349 
5350 void intel_dp_link_check(struct intel_encoder *encoder)
5351 {
5352 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
5353 	struct drm_modeset_acquire_ctx ctx;
5354 	int ret;
5355 
5356 	intel_modeset_lock_ctx_retry(&ctx, NULL, 0, ret)
5357 		ret = intel_dp_retrain_link(encoder, &ctx);
5358 
5359 	drm_WARN_ON(&i915->drm, ret);
5360 }
5361 
5362 void intel_dp_check_link_state(struct intel_dp *intel_dp)
5363 {
5364 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5365 	struct intel_encoder *encoder = &dig_port->base;
5366 
5367 	if (!intel_dp_is_connected(intel_dp))
5368 		return;
5369 
5370 	if (!intel_dp_needs_link_retrain(intel_dp))
5371 		return;
5372 
5373 	intel_encoder_link_check_queue_work(encoder, 0);
5374 }
5375 
5376 static int intel_dp_prep_phy_test(struct intel_dp *intel_dp,
5377 				  struct drm_modeset_acquire_ctx *ctx,
5378 				  u8 *pipe_mask)
5379 {
5380 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5381 	struct drm_connector_list_iter conn_iter;
5382 	struct intel_connector *connector;
5383 	int ret = 0;
5384 
5385 	*pipe_mask = 0;
5386 
5387 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
5388 	for_each_intel_connector_iter(connector, &conn_iter) {
5389 		struct drm_connector_state *conn_state =
5390 			connector->base.state;
5391 		struct intel_crtc_state *crtc_state;
5392 		struct intel_crtc *crtc;
5393 
5394 		if (!intel_dp_has_connector(intel_dp, conn_state))
5395 			continue;
5396 
5397 		crtc = to_intel_crtc(conn_state->crtc);
5398 		if (!crtc)
5399 			continue;
5400 
5401 		ret = drm_modeset_lock(&crtc->base.mutex, ctx);
5402 		if (ret)
5403 			break;
5404 
5405 		crtc_state = to_intel_crtc_state(crtc->base.state);
5406 
5407 		drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
5408 
5409 		if (!crtc_state->hw.active)
5410 			continue;
5411 
5412 		if (conn_state->commit &&
5413 		    !try_wait_for_completion(&conn_state->commit->hw_done))
5414 			continue;
5415 
5416 		*pipe_mask |= BIT(crtc->pipe);
5417 	}
5418 	drm_connector_list_iter_end(&conn_iter);
5419 
5420 	return ret;
5421 }
5422 
5423 static int intel_dp_do_phy_test(struct intel_encoder *encoder,
5424 				struct drm_modeset_acquire_ctx *ctx)
5425 {
5426 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5427 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5428 	struct intel_crtc *crtc;
5429 	u8 pipe_mask;
5430 	int ret;
5431 
5432 	ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
5433 			       ctx);
5434 	if (ret)
5435 		return ret;
5436 
5437 	ret = intel_dp_prep_phy_test(intel_dp, ctx, &pipe_mask);
5438 	if (ret)
5439 		return ret;
5440 
5441 	if (pipe_mask == 0)
5442 		return 0;
5443 
5444 	drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n",
5445 		    encoder->base.base.id, encoder->base.name);
5446 
5447 	for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
5448 		const struct intel_crtc_state *crtc_state =
5449 			to_intel_crtc_state(crtc->base.state);
5450 
5451 		/* test on the MST master transcoder */
5452 		if (DISPLAY_VER(dev_priv) >= 12 &&
5453 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
5454 		    !intel_dp_mst_is_master_trans(crtc_state))
5455 			continue;
5456 
5457 		intel_dp_process_phy_request(intel_dp, crtc_state);
5458 		break;
5459 	}
5460 
5461 	return 0;
5462 }
5463 
5464 void intel_dp_phy_test(struct intel_encoder *encoder)
5465 {
5466 	struct drm_modeset_acquire_ctx ctx;
5467 	int ret;
5468 
5469 	drm_modeset_acquire_init(&ctx, 0);
5470 
5471 	for (;;) {
5472 		ret = intel_dp_do_phy_test(encoder, &ctx);
5473 
5474 		if (ret == -EDEADLK) {
5475 			drm_modeset_backoff(&ctx);
5476 			continue;
5477 		}
5478 
5479 		break;
5480 	}
5481 
5482 	drm_modeset_drop_locks(&ctx);
5483 	drm_modeset_acquire_fini(&ctx);
5484 	drm_WARN(encoder->base.dev, ret,
5485 		 "Acquiring modeset locks failed with %i\n", ret);
5486 }
5487 
5488 static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
5489 {
5490 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5491 	u8 val;
5492 
5493 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
5494 		return;
5495 
5496 	if (drm_dp_dpcd_readb(&intel_dp->aux,
5497 			      DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
5498 		return;
5499 
5500 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
5501 
5502 	if (val & DP_AUTOMATED_TEST_REQUEST)
5503 		intel_dp_handle_test_request(intel_dp);
5504 
5505 	if (val & DP_CP_IRQ)
5506 		intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
5507 
5508 	if (val & DP_SINK_SPECIFIC_IRQ)
5509 		drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
5510 }
5511 
5512 static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
5513 {
5514 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5515 	bool reprobe_needed = false;
5516 	u8 val;
5517 
5518 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
5519 		return false;
5520 
5521 	if (drm_dp_dpcd_readb(&intel_dp->aux,
5522 			      DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val)
5523 		return false;
5524 
5525 	if ((val & DP_TUNNELING_IRQ) &&
5526 	    drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr,
5527 				     &intel_dp->aux))
5528 		reprobe_needed = true;
5529 
5530 	if (drm_dp_dpcd_writeb(&intel_dp->aux,
5531 			       DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1)
5532 		return reprobe_needed;
5533 
5534 	if (val & HDMI_LINK_STATUS_CHANGED)
5535 		intel_dp_handle_hdmi_link_status_change(intel_dp);
5536 
5537 	return reprobe_needed;
5538 }
5539 
5540 /*
5541  * According to DP spec
5542  * 5.1.2:
5543  *  1. Read DPCD
5544  *  2. Configure link according to Receiver Capabilities
5545  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
5546  *  4. Check link status on receipt of hot-plug interrupt
5547  *
5548  * intel_dp_short_pulse -  handles short pulse interrupts
5549  * when full detection is not required.
5550  * Returns %true if short pulse is handled and full detection
5551  * is NOT required and %false otherwise.
5552  */
5553 static bool
5554 intel_dp_short_pulse(struct intel_dp *intel_dp)
5555 {
5556 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5557 	u8 old_sink_count = intel_dp->sink_count;
5558 	bool reprobe_needed = false;
5559 	bool ret;
5560 
5561 	/*
5562 	 * Clearing compliance test variables to allow capturing
5563 	 * of values for next automated test request.
5564 	 */
5565 	memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5566 
5567 	/*
5568 	 * Now read the DPCD to see if it's actually running
5569 	 * If the current value of sink count doesn't match with
5570 	 * the value that was stored earlier or dpcd read failed
5571 	 * we need to do full detection
5572 	 */
5573 	ret = intel_dp_get_dpcd(intel_dp);
5574 
5575 	if ((old_sink_count != intel_dp->sink_count) || !ret) {
5576 		/* No need to proceed if we are going to do full detect */
5577 		return false;
5578 	}
5579 
5580 	intel_dp_check_device_service_irq(intel_dp);
5581 	reprobe_needed = intel_dp_check_link_service_irq(intel_dp);
5582 
5583 	/* Handle CEC interrupts, if any */
5584 	drm_dp_cec_irq(&intel_dp->aux);
5585 
5586 	intel_dp_check_link_state(intel_dp);
5587 
5588 	intel_psr_short_pulse(intel_dp);
5589 
5590 	switch (intel_dp->compliance.test_type) {
5591 	case DP_TEST_LINK_TRAINING:
5592 		drm_dbg_kms(&dev_priv->drm,
5593 			    "Link Training Compliance Test requested\n");
5594 		/* Send a Hotplug Uevent to userspace to start modeset */
5595 		drm_kms_helper_hotplug_event(&dev_priv->drm);
5596 		break;
5597 	case DP_TEST_LINK_PHY_TEST_PATTERN:
5598 		drm_dbg_kms(&dev_priv->drm,
5599 			    "PHY test pattern Compliance Test requested\n");
5600 		/*
5601 		 * Schedule long hpd to do the test
5602 		 *
5603 		 * FIXME get rid of the ad-hoc phy test modeset code
5604 		 * and properly incorporate it into the normal modeset.
5605 		 */
5606 		reprobe_needed = true;
5607 	}
5608 
5609 	return !reprobe_needed;
5610 }
5611 
5612 /* XXX this is probably wrong for multiple downstream ports */
5613 static enum drm_connector_status
5614 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
5615 {
5616 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5617 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5618 	u8 *dpcd = intel_dp->dpcd;
5619 	u8 type;
5620 
5621 	if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
5622 		return connector_status_connected;
5623 
5624 	lspcon_resume(dig_port);
5625 
5626 	if (!intel_dp_get_dpcd(intel_dp))
5627 		return connector_status_disconnected;
5628 
5629 	intel_dp->mst_detect = intel_dp_mst_detect(intel_dp);
5630 
5631 	/* if there's no downstream port, we're done */
5632 	if (!drm_dp_is_branch(dpcd))
5633 		return connector_status_connected;
5634 
5635 	/* If we're HPD-aware, SINK_COUNT changes dynamically */
5636 	if (intel_dp_has_sink_count(intel_dp) &&
5637 	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
5638 		return intel_dp->sink_count ?
5639 		connector_status_connected : connector_status_disconnected;
5640 	}
5641 
5642 	if (intel_dp->mst_detect == DRM_DP_MST)
5643 		return connector_status_connected;
5644 
5645 	/* If no HPD, poke DDC gently */
5646 	if (drm_probe_ddc(&intel_dp->aux.ddc))
5647 		return connector_status_connected;
5648 
5649 	/* Well we tried, say unknown for unreliable port types */
5650 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
5651 		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
5652 		if (type == DP_DS_PORT_TYPE_VGA ||
5653 		    type == DP_DS_PORT_TYPE_NON_EDID)
5654 			return connector_status_unknown;
5655 	} else {
5656 		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
5657 			DP_DWN_STRM_PORT_TYPE_MASK;
5658 		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
5659 		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
5660 			return connector_status_unknown;
5661 	}
5662 
5663 	/* Anything else is out of spec, warn and ignore */
5664 	drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
5665 	return connector_status_disconnected;
5666 }
5667 
5668 static enum drm_connector_status
5669 edp_detect(struct intel_dp *intel_dp)
5670 {
5671 	return connector_status_connected;
5672 }
5673 
5674 void intel_digital_port_lock(struct intel_encoder *encoder)
5675 {
5676 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5677 
5678 	if (dig_port->lock)
5679 		dig_port->lock(dig_port);
5680 }
5681 
5682 void intel_digital_port_unlock(struct intel_encoder *encoder)
5683 {
5684 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5685 
5686 	if (dig_port->unlock)
5687 		dig_port->unlock(dig_port);
5688 }
5689 
5690 /*
5691  * intel_digital_port_connected_locked - is the specified port connected?
5692  * @encoder: intel_encoder
5693  *
5694  * In cases where there's a connector physically connected but it can't be used
5695  * by our hardware we also return false, since the rest of the driver should
5696  * pretty much treat the port as disconnected. This is relevant for type-C
5697  * (starting on ICL) where there's ownership involved.
5698  *
5699  * The caller must hold the lock acquired by calling intel_digital_port_lock()
5700  * when calling this function.
5701  *
5702  * Return %true if port is connected, %false otherwise.
5703  */
5704 bool intel_digital_port_connected_locked(struct intel_encoder *encoder)
5705 {
5706 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5707 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5708 	bool is_glitch_free = intel_tc_port_handles_hpd_glitches(dig_port);
5709 	bool is_connected = false;
5710 	intel_wakeref_t wakeref;
5711 
5712 	with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
5713 		unsigned long wait_expires = jiffies + msecs_to_jiffies_timeout(4);
5714 
5715 		do {
5716 			is_connected = dig_port->connected(encoder);
5717 			if (is_connected || is_glitch_free)
5718 				break;
5719 			usleep_range(10, 30);
5720 		} while (time_before(jiffies, wait_expires));
5721 	}
5722 
5723 	return is_connected;
5724 }
5725 
5726 bool intel_digital_port_connected(struct intel_encoder *encoder)
5727 {
5728 	bool ret;
5729 
5730 	intel_digital_port_lock(encoder);
5731 	ret = intel_digital_port_connected_locked(encoder);
5732 	intel_digital_port_unlock(encoder);
5733 
5734 	return ret;
5735 }
5736 
5737 static const struct drm_edid *
5738 intel_dp_get_edid(struct intel_dp *intel_dp)
5739 {
5740 	struct intel_connector *connector = intel_dp->attached_connector;
5741 	const struct drm_edid *fixed_edid = connector->panel.fixed_edid;
5742 
5743 	/* Use panel fixed edid if we have one */
5744 	if (fixed_edid) {
5745 		/* invalid edid */
5746 		if (IS_ERR(fixed_edid))
5747 			return NULL;
5748 
5749 		return drm_edid_dup(fixed_edid);
5750 	}
5751 
5752 	return drm_edid_read_ddc(&connector->base, &intel_dp->aux.ddc);
5753 }
5754 
5755 static void
5756 intel_dp_update_dfp(struct intel_dp *intel_dp,
5757 		    const struct drm_edid *drm_edid)
5758 {
5759 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5760 	struct intel_connector *connector = intel_dp->attached_connector;
5761 
5762 	intel_dp->dfp.max_bpc =
5763 		drm_dp_downstream_max_bpc(intel_dp->dpcd,
5764 					  intel_dp->downstream_ports, drm_edid);
5765 
5766 	intel_dp->dfp.max_dotclock =
5767 		drm_dp_downstream_max_dotclock(intel_dp->dpcd,
5768 					       intel_dp->downstream_ports);
5769 
5770 	intel_dp->dfp.min_tmds_clock =
5771 		drm_dp_downstream_min_tmds_clock(intel_dp->dpcd,
5772 						 intel_dp->downstream_ports,
5773 						 drm_edid);
5774 	intel_dp->dfp.max_tmds_clock =
5775 		drm_dp_downstream_max_tmds_clock(intel_dp->dpcd,
5776 						 intel_dp->downstream_ports,
5777 						 drm_edid);
5778 
5779 	intel_dp->dfp.pcon_max_frl_bw =
5780 		drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd,
5781 					   intel_dp->downstream_ports);
5782 
5783 	drm_dbg_kms(&i915->drm,
5784 		    "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n",
5785 		    connector->base.base.id, connector->base.name,
5786 		    intel_dp->dfp.max_bpc,
5787 		    intel_dp->dfp.max_dotclock,
5788 		    intel_dp->dfp.min_tmds_clock,
5789 		    intel_dp->dfp.max_tmds_clock,
5790 		    intel_dp->dfp.pcon_max_frl_bw);
5791 
5792 	intel_dp_get_pcon_dsc_cap(intel_dp);
5793 }
5794 
5795 static bool
5796 intel_dp_can_ycbcr420(struct intel_dp *intel_dp)
5797 {
5798 	if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420) &&
5799 	    (!drm_dp_is_branch(intel_dp->dpcd) || intel_dp->dfp.ycbcr420_passthrough))
5800 		return true;
5801 
5802 	if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_RGB) &&
5803 	    dfp_can_convert_from_rgb(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420))
5804 		return true;
5805 
5806 	if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR444) &&
5807 	    dfp_can_convert_from_ycbcr444(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420))
5808 		return true;
5809 
5810 	return false;
5811 }
5812 
5813 static void
5814 intel_dp_update_420(struct intel_dp *intel_dp)
5815 {
5816 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5817 	struct intel_connector *connector = intel_dp->attached_connector;
5818 
5819 	intel_dp->dfp.ycbcr420_passthrough =
5820 		drm_dp_downstream_420_passthrough(intel_dp->dpcd,
5821 						  intel_dp->downstream_ports);
5822 	/* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */
5823 	intel_dp->dfp.ycbcr_444_to_420 =
5824 		dp_to_dig_port(intel_dp)->lspcon.active ||
5825 		drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd,
5826 							intel_dp->downstream_ports);
5827 	intel_dp->dfp.rgb_to_ycbcr =
5828 		drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
5829 							  intel_dp->downstream_ports,
5830 							  DP_DS_HDMI_BT709_RGB_YCBCR_CONV);
5831 
5832 	connector->base.ycbcr_420_allowed = intel_dp_can_ycbcr420(intel_dp);
5833 
5834 	drm_dbg_kms(&i915->drm,
5835 		    "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
5836 		    connector->base.base.id, connector->base.name,
5837 		    str_yes_no(intel_dp->dfp.rgb_to_ycbcr),
5838 		    str_yes_no(connector->base.ycbcr_420_allowed),
5839 		    str_yes_no(intel_dp->dfp.ycbcr_444_to_420));
5840 }
5841 
5842 static void
5843 intel_dp_set_edid(struct intel_dp *intel_dp)
5844 {
5845 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5846 	struct intel_connector *connector = intel_dp->attached_connector;
5847 	const struct drm_edid *drm_edid;
5848 	bool vrr_capable;
5849 
5850 	intel_dp_unset_edid(intel_dp);
5851 	drm_edid = intel_dp_get_edid(intel_dp);
5852 	connector->detect_edid = drm_edid;
5853 
5854 	/* Below we depend on display info having been updated */
5855 	drm_edid_connector_update(&connector->base, drm_edid);
5856 
5857 	vrr_capable = intel_vrr_is_capable(connector);
5858 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n",
5859 		    connector->base.base.id, connector->base.name, str_yes_no(vrr_capable));
5860 	drm_connector_set_vrr_capable_property(&connector->base, vrr_capable);
5861 
5862 	intel_dp_update_dfp(intel_dp, drm_edid);
5863 	intel_dp_update_420(intel_dp);
5864 
5865 	drm_dp_cec_attach(&intel_dp->aux,
5866 			  connector->base.display_info.source_physical_address);
5867 }
5868 
5869 static void
5870 intel_dp_unset_edid(struct intel_dp *intel_dp)
5871 {
5872 	struct intel_connector *connector = intel_dp->attached_connector;
5873 
5874 	drm_dp_cec_unset_edid(&intel_dp->aux);
5875 	drm_edid_free(connector->detect_edid);
5876 	connector->detect_edid = NULL;
5877 
5878 	intel_dp->dfp.max_bpc = 0;
5879 	intel_dp->dfp.max_dotclock = 0;
5880 	intel_dp->dfp.min_tmds_clock = 0;
5881 	intel_dp->dfp.max_tmds_clock = 0;
5882 
5883 	intel_dp->dfp.pcon_max_frl_bw = 0;
5884 
5885 	intel_dp->dfp.ycbcr_444_to_420 = false;
5886 	connector->base.ycbcr_420_allowed = false;
5887 
5888 	drm_connector_set_vrr_capable_property(&connector->base,
5889 					       false);
5890 }
5891 
5892 static void
5893 intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *connector)
5894 {
5895 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5896 
5897 	/* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5898 	if (!HAS_DSC(i915))
5899 		return;
5900 
5901 	if (intel_dp_is_edp(intel_dp))
5902 		intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0],
5903 					   connector);
5904 	else
5905 		intel_dp_get_dsc_sink_cap(intel_dp->dpcd[DP_DPCD_REV],
5906 					  connector);
5907 }
5908 
5909 static int
5910 intel_dp_detect(struct drm_connector *connector,
5911 		struct drm_modeset_acquire_ctx *ctx,
5912 		bool force)
5913 {
5914 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
5915 	struct intel_connector *intel_connector =
5916 		to_intel_connector(connector);
5917 	struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
5918 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5919 	struct intel_encoder *encoder = &dig_port->base;
5920 	enum drm_connector_status status;
5921 	int ret;
5922 
5923 	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
5924 		    connector->base.id, connector->name);
5925 	drm_WARN_ON(&dev_priv->drm,
5926 		    !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
5927 
5928 	if (!intel_display_device_enabled(dev_priv))
5929 		return connector_status_disconnected;
5930 
5931 	if (!intel_display_driver_check_access(dev_priv))
5932 		return connector->status;
5933 
5934 	/* Can't disconnect eDP */
5935 	if (intel_dp_is_edp(intel_dp))
5936 		status = edp_detect(intel_dp);
5937 	else if (intel_digital_port_connected(encoder))
5938 		status = intel_dp_detect_dpcd(intel_dp);
5939 	else
5940 		status = connector_status_disconnected;
5941 
5942 	if (status != connector_status_disconnected &&
5943 	    !intel_dp_mst_verify_dpcd_state(intel_dp))
5944 		/*
5945 		 * This requires retrying detection for instance to re-enable
5946 		 * the MST mode that got reset via a long HPD pulse. The retry
5947 		 * will happen either via the hotplug handler's retry logic,
5948 		 * ensured by setting the connector here to SST/disconnected,
5949 		 * or via a userspace connector probing in response to the
5950 		 * hotplug uevent sent when removing the MST connectors.
5951 		 */
5952 		status = connector_status_disconnected;
5953 
5954 	if (status == connector_status_disconnected) {
5955 		memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5956 		memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd));
5957 		intel_dp->psr.sink_panel_replay_support = false;
5958 		intel_dp->psr.sink_panel_replay_su_support = false;
5959 
5960 		intel_dp_mst_disconnect(intel_dp);
5961 
5962 		intel_dp_tunnel_disconnect(intel_dp);
5963 
5964 		goto out;
5965 	}
5966 
5967 	ret = intel_dp_tunnel_detect(intel_dp, ctx);
5968 	if (ret == -EDEADLK)
5969 		return ret;
5970 
5971 	if (ret == 1)
5972 		intel_connector->base.epoch_counter++;
5973 
5974 	if (!intel_dp_is_edp(intel_dp))
5975 		intel_psr_init_dpcd(intel_dp);
5976 
5977 	intel_dp_detect_dsc_caps(intel_dp, intel_connector);
5978 
5979 	intel_dp_mst_configure(intel_dp);
5980 
5981 	if (intel_dp->reset_link_params) {
5982 		intel_dp_reset_link_params(intel_dp);
5983 		intel_dp->reset_link_params = false;
5984 	}
5985 
5986 	intel_dp_print_rates(intel_dp);
5987 
5988 	if (intel_dp->is_mst) {
5989 		/*
5990 		 * If we are in MST mode then this connector
5991 		 * won't appear connected or have anything
5992 		 * with EDID on it
5993 		 */
5994 		status = connector_status_disconnected;
5995 		goto out;
5996 	}
5997 
5998 	/*
5999 	 * Some external monitors do not signal loss of link synchronization
6000 	 * with an IRQ_HPD, so force a link status check.
6001 	 *
6002 	 * TODO: this probably became redundant, so remove it: the link state
6003 	 * is rechecked/recovered now after modesets, where the loss of
6004 	 * synchronization tends to occur.
6005 	 */
6006 	if (!intel_dp_is_edp(intel_dp))
6007 		intel_dp_check_link_state(intel_dp);
6008 
6009 	/*
6010 	 * Clearing NACK and defer counts to get their exact values
6011 	 * while reading EDID which are required by Compliance tests
6012 	 * 4.2.2.4 and 4.2.2.5
6013 	 */
6014 	intel_dp->aux.i2c_nack_count = 0;
6015 	intel_dp->aux.i2c_defer_count = 0;
6016 
6017 	intel_dp_set_edid(intel_dp);
6018 	if (intel_dp_is_edp(intel_dp) ||
6019 	    to_intel_connector(connector)->detect_edid)
6020 		status = connector_status_connected;
6021 
6022 	intel_dp_check_device_service_irq(intel_dp);
6023 
6024 out:
6025 	if (status != connector_status_connected && !intel_dp->is_mst)
6026 		intel_dp_unset_edid(intel_dp);
6027 
6028 	if (!intel_dp_is_edp(intel_dp))
6029 		drm_dp_set_subconnector_property(connector,
6030 						 status,
6031 						 intel_dp->dpcd,
6032 						 intel_dp->downstream_ports);
6033 	return status;
6034 }
6035 
6036 static void
6037 intel_dp_force(struct drm_connector *connector)
6038 {
6039 	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
6040 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6041 	struct intel_encoder *intel_encoder = &dig_port->base;
6042 	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
6043 
6044 	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
6045 		    connector->base.id, connector->name);
6046 
6047 	if (!intel_display_driver_check_access(dev_priv))
6048 		return;
6049 
6050 	intel_dp_unset_edid(intel_dp);
6051 
6052 	if (connector->status != connector_status_connected)
6053 		return;
6054 
6055 	intel_dp_set_edid(intel_dp);
6056 }
6057 
6058 static int intel_dp_get_modes(struct drm_connector *connector)
6059 {
6060 	struct intel_connector *intel_connector = to_intel_connector(connector);
6061 	int num_modes;
6062 
6063 	/* drm_edid_connector_update() done in ->detect() or ->force() */
6064 	num_modes = drm_edid_connector_add_modes(connector);
6065 
6066 	/* Also add fixed mode, which may or may not be present in EDID */
6067 	if (intel_dp_is_edp(intel_attached_dp(intel_connector)))
6068 		num_modes += intel_panel_get_modes(intel_connector);
6069 
6070 	if (num_modes)
6071 		return num_modes;
6072 
6073 	if (!intel_connector->detect_edid) {
6074 		struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
6075 		struct drm_display_mode *mode;
6076 
6077 		mode = drm_dp_downstream_mode(connector->dev,
6078 					      intel_dp->dpcd,
6079 					      intel_dp->downstream_ports);
6080 		if (mode) {
6081 			drm_mode_probed_add(connector, mode);
6082 			num_modes++;
6083 		}
6084 	}
6085 
6086 	return num_modes;
6087 }
6088 
6089 static int
6090 intel_dp_connector_register(struct drm_connector *connector)
6091 {
6092 	struct drm_i915_private *i915 = to_i915(connector->dev);
6093 	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
6094 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6095 	struct intel_lspcon *lspcon = &dig_port->lspcon;
6096 	int ret;
6097 
6098 	ret = intel_connector_register(connector);
6099 	if (ret)
6100 		return ret;
6101 
6102 	drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
6103 		    intel_dp->aux.name, connector->kdev->kobj.name);
6104 
6105 	intel_dp->aux.dev = connector->kdev;
6106 	ret = drm_dp_aux_register(&intel_dp->aux);
6107 	if (!ret)
6108 		drm_dp_cec_register_connector(&intel_dp->aux, connector);
6109 
6110 	if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata))
6111 		return ret;
6112 
6113 	/*
6114 	 * ToDo: Clean this up to handle lspcon init and resume more
6115 	 * efficiently and streamlined.
6116 	 */
6117 	if (lspcon_init(dig_port)) {
6118 		lspcon_detect_hdr_capability(lspcon);
6119 		if (lspcon->hdr_supported)
6120 			drm_connector_attach_hdr_output_metadata_property(connector);
6121 	}
6122 
6123 	return ret;
6124 }
6125 
6126 static void
6127 intel_dp_connector_unregister(struct drm_connector *connector)
6128 {
6129 	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
6130 
6131 	drm_dp_cec_unregister_connector(&intel_dp->aux);
6132 	drm_dp_aux_unregister(&intel_dp->aux);
6133 	intel_connector_unregister(connector);
6134 }
6135 
6136 void intel_dp_connector_sync_state(struct intel_connector *connector,
6137 				   const struct intel_crtc_state *crtc_state)
6138 {
6139 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
6140 
6141 	if (crtc_state && crtc_state->dsc.compression_enable) {
6142 		drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux);
6143 		connector->dp.dsc_decompression_enabled = true;
6144 	} else {
6145 		connector->dp.dsc_decompression_enabled = false;
6146 	}
6147 }
6148 
6149 void intel_dp_encoder_flush_work(struct drm_encoder *_encoder)
6150 {
6151 	struct intel_encoder *encoder = to_intel_encoder(_encoder);
6152 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
6153 	struct intel_dp *intel_dp = &dig_port->dp;
6154 
6155 	intel_encoder_link_check_flush_work(encoder);
6156 
6157 	intel_dp_mst_encoder_cleanup(dig_port);
6158 
6159 	intel_dp_tunnel_destroy(intel_dp);
6160 
6161 	intel_pps_vdd_off_sync(intel_dp);
6162 
6163 	/*
6164 	 * Ensure power off delay is respected on module remove, so that we can
6165 	 * reduce delays at driver probe. See pps_init_timestamps().
6166 	 */
6167 	intel_pps_wait_power_cycle(intel_dp);
6168 
6169 	intel_dp_aux_fini(intel_dp);
6170 }
6171 
6172 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
6173 {
6174 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
6175 
6176 	intel_pps_vdd_off_sync(intel_dp);
6177 
6178 	intel_dp_tunnel_suspend(intel_dp);
6179 }
6180 
6181 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
6182 {
6183 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
6184 
6185 	intel_pps_wait_power_cycle(intel_dp);
6186 }
6187 
6188 static int intel_modeset_tile_group(struct intel_atomic_state *state,
6189 				    int tile_group_id)
6190 {
6191 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6192 	struct drm_connector_list_iter conn_iter;
6193 	struct drm_connector *connector;
6194 	int ret = 0;
6195 
6196 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
6197 	drm_for_each_connector_iter(connector, &conn_iter) {
6198 		struct drm_connector_state *conn_state;
6199 		struct intel_crtc_state *crtc_state;
6200 		struct intel_crtc *crtc;
6201 
6202 		if (!connector->has_tile ||
6203 		    connector->tile_group->id != tile_group_id)
6204 			continue;
6205 
6206 		conn_state = drm_atomic_get_connector_state(&state->base,
6207 							    connector);
6208 		if (IS_ERR(conn_state)) {
6209 			ret = PTR_ERR(conn_state);
6210 			break;
6211 		}
6212 
6213 		crtc = to_intel_crtc(conn_state->crtc);
6214 
6215 		if (!crtc)
6216 			continue;
6217 
6218 		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6219 		crtc_state->uapi.mode_changed = true;
6220 
6221 		ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
6222 		if (ret)
6223 			break;
6224 	}
6225 	drm_connector_list_iter_end(&conn_iter);
6226 
6227 	return ret;
6228 }
6229 
6230 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
6231 {
6232 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6233 	struct intel_crtc *crtc;
6234 
6235 	if (transcoders == 0)
6236 		return 0;
6237 
6238 	for_each_intel_crtc(&dev_priv->drm, crtc) {
6239 		struct intel_crtc_state *crtc_state;
6240 		int ret;
6241 
6242 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6243 		if (IS_ERR(crtc_state))
6244 			return PTR_ERR(crtc_state);
6245 
6246 		if (!crtc_state->hw.enable)
6247 			continue;
6248 
6249 		if (!(transcoders & BIT(crtc_state->cpu_transcoder)))
6250 			continue;
6251 
6252 		crtc_state->uapi.mode_changed = true;
6253 
6254 		ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
6255 		if (ret)
6256 			return ret;
6257 
6258 		ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
6259 		if (ret)
6260 			return ret;
6261 
6262 		transcoders &= ~BIT(crtc_state->cpu_transcoder);
6263 	}
6264 
6265 	drm_WARN_ON(&dev_priv->drm, transcoders != 0);
6266 
6267 	return 0;
6268 }
6269 
6270 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
6271 				      struct drm_connector *connector)
6272 {
6273 	const struct drm_connector_state *old_conn_state =
6274 		drm_atomic_get_old_connector_state(&state->base, connector);
6275 	const struct intel_crtc_state *old_crtc_state;
6276 	struct intel_crtc *crtc;
6277 	u8 transcoders;
6278 
6279 	crtc = to_intel_crtc(old_conn_state->crtc);
6280 	if (!crtc)
6281 		return 0;
6282 
6283 	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
6284 
6285 	if (!old_crtc_state->hw.active)
6286 		return 0;
6287 
6288 	transcoders = old_crtc_state->sync_mode_slaves_mask;
6289 	if (old_crtc_state->master_transcoder != INVALID_TRANSCODER)
6290 		transcoders |= BIT(old_crtc_state->master_transcoder);
6291 
6292 	return intel_modeset_affected_transcoders(state,
6293 						  transcoders);
6294 }
6295 
6296 static int intel_dp_connector_atomic_check(struct drm_connector *conn,
6297 					   struct drm_atomic_state *_state)
6298 {
6299 	struct drm_i915_private *dev_priv = to_i915(conn->dev);
6300 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
6301 	struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(_state, conn);
6302 	struct intel_connector *intel_conn = to_intel_connector(conn);
6303 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_conn->encoder);
6304 	int ret;
6305 
6306 	ret = intel_digital_connector_atomic_check(conn, &state->base);
6307 	if (ret)
6308 		return ret;
6309 
6310 	if (intel_dp_mst_source_support(intel_dp)) {
6311 		ret = drm_dp_mst_root_conn_atomic_check(conn_state, &intel_dp->mst_mgr);
6312 		if (ret)
6313 			return ret;
6314 	}
6315 
6316 	if (!intel_connector_needs_modeset(state, conn))
6317 		return 0;
6318 
6319 	ret = intel_dp_tunnel_atomic_check_state(state,
6320 						 intel_dp,
6321 						 intel_conn);
6322 	if (ret)
6323 		return ret;
6324 
6325 	/*
6326 	 * We don't enable port sync on BDW due to missing w/as and
6327 	 * due to not having adjusted the modeset sequence appropriately.
6328 	 */
6329 	if (DISPLAY_VER(dev_priv) < 9)
6330 		return 0;
6331 
6332 	if (conn->has_tile) {
6333 		ret = intel_modeset_tile_group(state, conn->tile_group->id);
6334 		if (ret)
6335 			return ret;
6336 	}
6337 
6338 	return intel_modeset_synced_crtcs(state, conn);
6339 }
6340 
6341 static void intel_dp_oob_hotplug_event(struct drm_connector *connector,
6342 				       enum drm_connector_status hpd_state)
6343 {
6344 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
6345 	struct drm_i915_private *i915 = to_i915(connector->dev);
6346 	bool hpd_high = hpd_state == connector_status_connected;
6347 	unsigned int hpd_pin = encoder->hpd_pin;
6348 	bool need_work = false;
6349 
6350 	spin_lock_irq(&i915->irq_lock);
6351 	if (hpd_high != test_bit(hpd_pin, &i915->display.hotplug.oob_hotplug_last_state)) {
6352 		i915->display.hotplug.event_bits |= BIT(hpd_pin);
6353 
6354 		__assign_bit(hpd_pin, &i915->display.hotplug.oob_hotplug_last_state, hpd_high);
6355 		need_work = true;
6356 	}
6357 	spin_unlock_irq(&i915->irq_lock);
6358 
6359 	if (need_work)
6360 		intel_hpd_schedule_detection(i915);
6361 }
6362 
6363 static const struct drm_connector_funcs intel_dp_connector_funcs = {
6364 	.force = intel_dp_force,
6365 	.fill_modes = drm_helper_probe_single_connector_modes,
6366 	.atomic_get_property = intel_digital_connector_atomic_get_property,
6367 	.atomic_set_property = intel_digital_connector_atomic_set_property,
6368 	.late_register = intel_dp_connector_register,
6369 	.early_unregister = intel_dp_connector_unregister,
6370 	.destroy = intel_connector_destroy,
6371 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6372 	.atomic_duplicate_state = intel_digital_connector_duplicate_state,
6373 	.oob_hotplug_event = intel_dp_oob_hotplug_event,
6374 };
6375 
6376 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6377 	.detect_ctx = intel_dp_detect,
6378 	.get_modes = intel_dp_get_modes,
6379 	.mode_valid = intel_dp_mode_valid,
6380 	.atomic_check = intel_dp_connector_atomic_check,
6381 };
6382 
6383 enum irqreturn
6384 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
6385 {
6386 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
6387 	struct intel_dp *intel_dp = &dig_port->dp;
6388 	u8 dpcd[DP_RECEIVER_CAP_SIZE];
6389 
6390 	if (dig_port->base.type == INTEL_OUTPUT_EDP &&
6391 	    (long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) {
6392 		/*
6393 		 * vdd off can generate a long/short pulse on eDP which
6394 		 * would require vdd on to handle it, and thus we
6395 		 * would end up in an endless cycle of
6396 		 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
6397 		 */
6398 		drm_dbg_kms(&i915->drm,
6399 			    "ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
6400 			    long_hpd ? "long" : "short",
6401 			    dig_port->base.base.base.id,
6402 			    dig_port->base.base.name);
6403 		return IRQ_HANDLED;
6404 	}
6405 
6406 	drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
6407 		    dig_port->base.base.base.id,
6408 		    dig_port->base.base.name,
6409 		    long_hpd ? "long" : "short");
6410 
6411 	/*
6412 	 * TBT DP tunnels require the GFX driver to read out the DPRX caps in
6413 	 * response to long HPD pulses. The DP hotplug handler does that,
6414 	 * however the hotplug handler may be blocked by another
6415 	 * connector's/encoder's hotplug handler. Since the TBT CM may not
6416 	 * complete the DP tunnel BW request for the latter connector/encoder
6417 	 * waiting for this encoder's DPRX read, perform a dummy read here.
6418 	 */
6419 	if (long_hpd)
6420 		intel_dp_read_dprx_caps(intel_dp, dpcd);
6421 
6422 	if (long_hpd) {
6423 		intel_dp->reset_link_params = true;
6424 		return IRQ_NONE;
6425 	}
6426 
6427 	if (intel_dp->is_mst) {
6428 		if (!intel_dp_check_mst_status(intel_dp))
6429 			return IRQ_NONE;
6430 	} else if (!intel_dp_short_pulse(intel_dp)) {
6431 		return IRQ_NONE;
6432 	}
6433 
6434 	return IRQ_HANDLED;
6435 }
6436 
6437 static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv,
6438 				  const struct intel_bios_encoder_data *devdata,
6439 				  enum port port)
6440 {
6441 	/*
6442 	 * eDP not supported on g4x. so bail out early just
6443 	 * for a bit extra safety in case the VBT is bonkers.
6444 	 */
6445 	if (DISPLAY_VER(dev_priv) < 5)
6446 		return false;
6447 
6448 	if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A)
6449 		return true;
6450 
6451 	return devdata && intel_bios_encoder_supports_edp(devdata);
6452 }
6453 
6454 bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port)
6455 {
6456 	const struct intel_bios_encoder_data *devdata =
6457 		intel_bios_encoder_data_lookup(i915, port);
6458 
6459 	return _intel_dp_is_port_edp(i915, devdata, port);
6460 }
6461 
6462 bool
6463 intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder)
6464 {
6465 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
6466 	enum port port = encoder->port;
6467 
6468 	if (intel_bios_encoder_is_lspcon(encoder->devdata))
6469 		return false;
6470 
6471 	if (DISPLAY_VER(i915) >= 11)
6472 		return true;
6473 
6474 	if (port == PORT_A)
6475 		return false;
6476 
6477 	if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
6478 	    DISPLAY_VER(i915) >= 9)
6479 		return true;
6480 
6481 	return false;
6482 }
6483 
6484 static void
6485 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6486 {
6487 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
6488 	enum port port = dp_to_dig_port(intel_dp)->base.port;
6489 
6490 	if (!intel_dp_is_edp(intel_dp))
6491 		drm_connector_attach_dp_subconnector_property(connector);
6492 
6493 	if (!IS_G4X(dev_priv) && port != PORT_A)
6494 		intel_attach_force_audio_property(connector);
6495 
6496 	intel_attach_broadcast_rgb_property(connector);
6497 	if (HAS_GMCH(dev_priv))
6498 		drm_connector_attach_max_bpc_property(connector, 6, 10);
6499 	else if (DISPLAY_VER(dev_priv) >= 5)
6500 		drm_connector_attach_max_bpc_property(connector, 6, 12);
6501 
6502 	/* Register HDMI colorspace for case of lspcon */
6503 	if (intel_bios_encoder_is_lspcon(dp_to_dig_port(intel_dp)->base.devdata)) {
6504 		drm_connector_attach_content_type_property(connector);
6505 		intel_attach_hdmi_colorspace_property(connector);
6506 	} else {
6507 		intel_attach_dp_colorspace_property(connector);
6508 	}
6509 
6510 	if (intel_dp_has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base))
6511 		drm_connector_attach_hdr_output_metadata_property(connector);
6512 
6513 	if (HAS_VRR(dev_priv))
6514 		drm_connector_attach_vrr_capable_property(connector);
6515 }
6516 
6517 static void
6518 intel_edp_add_properties(struct intel_dp *intel_dp)
6519 {
6520 	struct intel_connector *connector = intel_dp->attached_connector;
6521 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
6522 	const struct drm_display_mode *fixed_mode =
6523 		intel_panel_preferred_fixed_mode(connector);
6524 
6525 	intel_attach_scaling_mode_property(&connector->base);
6526 
6527 	drm_connector_set_panel_orientation_with_quirk(&connector->base,
6528 						       i915->display.vbt.orientation,
6529 						       fixed_mode->hdisplay,
6530 						       fixed_mode->vdisplay);
6531 }
6532 
6533 static void intel_edp_backlight_setup(struct intel_dp *intel_dp,
6534 				      struct intel_connector *connector)
6535 {
6536 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6537 	enum pipe pipe = INVALID_PIPE;
6538 
6539 	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
6540 		/*
6541 		 * Figure out the current pipe for the initial backlight setup.
6542 		 * If the current pipe isn't valid, try the PPS pipe, and if that
6543 		 * fails just assume pipe A.
6544 		 */
6545 		pipe = vlv_active_pipe(intel_dp);
6546 
6547 		if (pipe != PIPE_A && pipe != PIPE_B)
6548 			pipe = intel_dp->pps.pps_pipe;
6549 
6550 		if (pipe != PIPE_A && pipe != PIPE_B)
6551 			pipe = PIPE_A;
6552 	}
6553 
6554 	intel_backlight_setup(connector, pipe);
6555 }
6556 
6557 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
6558 				     struct intel_connector *intel_connector)
6559 {
6560 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6561 	struct drm_connector *connector = &intel_connector->base;
6562 	struct drm_display_mode *fixed_mode;
6563 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6564 	bool has_dpcd;
6565 	const struct drm_edid *drm_edid;
6566 
6567 	if (!intel_dp_is_edp(intel_dp))
6568 		return true;
6569 
6570 	/*
6571 	 * On IBX/CPT we may get here with LVDS already registered. Since the
6572 	 * driver uses the only internal power sequencer available for both
6573 	 * eDP and LVDS bail out early in this case to prevent interfering
6574 	 * with an already powered-on LVDS power sequencer.
6575 	 */
6576 	if (intel_get_lvds_encoder(dev_priv)) {
6577 		drm_WARN_ON(&dev_priv->drm,
6578 			    !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
6579 		drm_info(&dev_priv->drm,
6580 			 "LVDS was detected, not registering eDP\n");
6581 
6582 		return false;
6583 	}
6584 
6585 	intel_bios_init_panel_early(dev_priv, &intel_connector->panel,
6586 				    encoder->devdata);
6587 
6588 	if (!intel_pps_init(intel_dp)) {
6589 		drm_info(&dev_priv->drm,
6590 			 "[ENCODER:%d:%s] unusable PPS, disabling eDP\n",
6591 			 encoder->base.base.id, encoder->base.name);
6592 		/*
6593 		 * The BIOS may have still enabled VDD on the PPS even
6594 		 * though it's unusable. Make sure we turn it back off
6595 		 * and to release the power domain references/etc.
6596 		 */
6597 		goto out_vdd_off;
6598 	}
6599 
6600 	/*
6601 	 * Enable HPD sense for live status check.
6602 	 * intel_hpd_irq_setup() will turn it off again
6603 	 * if it's no longer needed later.
6604 	 *
6605 	 * The DPCD probe below will make sure VDD is on.
6606 	 */
6607 	intel_hpd_enable_detection(encoder);
6608 
6609 	intel_alpm_init_dpcd(intel_dp);
6610 
6611 	/* Cache DPCD and EDID for edp. */
6612 	has_dpcd = intel_edp_init_dpcd(intel_dp, intel_connector);
6613 
6614 	if (!has_dpcd) {
6615 		/* if this fails, presume the device is a ghost */
6616 		drm_info(&dev_priv->drm,
6617 			 "[ENCODER:%d:%s] failed to retrieve link info, disabling eDP\n",
6618 			 encoder->base.base.id, encoder->base.name);
6619 		goto out_vdd_off;
6620 	}
6621 
6622 	/*
6623 	 * VBT and straps are liars. Also check HPD as that seems
6624 	 * to be the most reliable piece of information available.
6625 	 *
6626 	 * ... expect on devices that forgot to hook HPD up for eDP
6627 	 * (eg. Acer Chromebook C710), so we'll check it only if multiple
6628 	 * ports are attempting to use the same AUX CH, according to VBT.
6629 	 */
6630 	if (intel_bios_dp_has_shared_aux_ch(encoder->devdata)) {
6631 		/*
6632 		 * If this fails, presume the DPCD answer came
6633 		 * from some other port using the same AUX CH.
6634 		 *
6635 		 * FIXME maybe cleaner to check this before the
6636 		 * DPCD read? Would need sort out the VDD handling...
6637 		 */
6638 		if (!intel_digital_port_connected(encoder)) {
6639 			drm_info(&dev_priv->drm,
6640 				 "[ENCODER:%d:%s] HPD is down, disabling eDP\n",
6641 				 encoder->base.base.id, encoder->base.name);
6642 			goto out_vdd_off;
6643 		}
6644 
6645 		/*
6646 		 * Unfortunately even the HPD based detection fails on
6647 		 * eg. Asus B360M-A (CFL+CNP), so as a last resort fall
6648 		 * back to checking for a VGA branch device. Only do this
6649 		 * on known affected platforms to minimize false positives.
6650 		 */
6651 		if (DISPLAY_VER(dev_priv) == 9 && drm_dp_is_branch(intel_dp->dpcd) &&
6652 		    (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) ==
6653 		    DP_DWN_STRM_PORT_TYPE_ANALOG) {
6654 			drm_info(&dev_priv->drm,
6655 				 "[ENCODER:%d:%s] VGA converter detected, disabling eDP\n",
6656 				 encoder->base.base.id, encoder->base.name);
6657 			goto out_vdd_off;
6658 		}
6659 	}
6660 
6661 	mutex_lock(&dev_priv->drm.mode_config.mutex);
6662 	drm_edid = drm_edid_read_ddc(connector, connector->ddc);
6663 	if (!drm_edid) {
6664 		/* Fallback to EDID from ACPI OpRegion, if any */
6665 		drm_edid = intel_opregion_get_edid(intel_connector);
6666 		if (drm_edid)
6667 			drm_dbg_kms(&dev_priv->drm,
6668 				    "[CONNECTOR:%d:%s] Using OpRegion EDID\n",
6669 				    connector->base.id, connector->name);
6670 	}
6671 	if (drm_edid) {
6672 		if (drm_edid_connector_update(connector, drm_edid) ||
6673 		    !drm_edid_connector_add_modes(connector)) {
6674 			drm_edid_connector_update(connector, NULL);
6675 			drm_edid_free(drm_edid);
6676 			drm_edid = ERR_PTR(-EINVAL);
6677 		}
6678 	} else {
6679 		drm_edid = ERR_PTR(-ENOENT);
6680 	}
6681 
6682 	intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata,
6683 				   IS_ERR(drm_edid) ? NULL : drm_edid);
6684 
6685 	intel_panel_add_edid_fixed_modes(intel_connector, true);
6686 
6687 	/* MSO requires information from the EDID */
6688 	intel_edp_mso_init(intel_dp);
6689 
6690 	/* multiply the mode clock and horizontal timings for MSO */
6691 	list_for_each_entry(fixed_mode, &intel_connector->panel.fixed_modes, head)
6692 		intel_edp_mso_mode_fixup(intel_connector, fixed_mode);
6693 
6694 	/* fallback to VBT if available for eDP */
6695 	if (!intel_panel_preferred_fixed_mode(intel_connector))
6696 		intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
6697 
6698 	mutex_unlock(&dev_priv->drm.mode_config.mutex);
6699 
6700 	if (!intel_panel_preferred_fixed_mode(intel_connector)) {
6701 		drm_info(&dev_priv->drm,
6702 			 "[ENCODER:%d:%s] failed to find fixed mode for the panel, disabling eDP\n",
6703 			 encoder->base.base.id, encoder->base.name);
6704 		goto out_vdd_off;
6705 	}
6706 
6707 	intel_panel_init(intel_connector, drm_edid);
6708 
6709 	intel_edp_backlight_setup(intel_dp, intel_connector);
6710 
6711 	intel_edp_add_properties(intel_dp);
6712 
6713 	intel_pps_init_late(intel_dp);
6714 
6715 	return true;
6716 
6717 out_vdd_off:
6718 	intel_pps_vdd_off_sync(intel_dp);
6719 
6720 	return false;
6721 }
6722 
6723 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
6724 {
6725 	struct intel_connector *intel_connector;
6726 	struct drm_connector *connector;
6727 
6728 	intel_connector = container_of(work, typeof(*intel_connector),
6729 				       modeset_retry_work);
6730 	connector = &intel_connector->base;
6731 	drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n", connector->base.id,
6732 		    connector->name);
6733 
6734 	/* Grab the locks before changing connector property*/
6735 	mutex_lock(&connector->dev->mode_config.mutex);
6736 	/* Set connector link status to BAD and send a Uevent to notify
6737 	 * userspace to do a modeset.
6738 	 */
6739 	drm_connector_set_link_status_property(connector,
6740 					       DRM_MODE_LINK_STATUS_BAD);
6741 	mutex_unlock(&connector->dev->mode_config.mutex);
6742 	/* Send Hotplug uevent so userspace can reprobe */
6743 	drm_kms_helper_connector_hotplug_event(connector);
6744 
6745 	drm_connector_put(connector);
6746 }
6747 
6748 void intel_dp_init_modeset_retry_work(struct intel_connector *connector)
6749 {
6750 	INIT_WORK(&connector->modeset_retry_work,
6751 		  intel_dp_modeset_retry_work_fn);
6752 }
6753 
6754 bool
6755 intel_dp_init_connector(struct intel_digital_port *dig_port,
6756 			struct intel_connector *intel_connector)
6757 {
6758 	struct drm_connector *connector = &intel_connector->base;
6759 	struct intel_dp *intel_dp = &dig_port->dp;
6760 	struct intel_encoder *intel_encoder = &dig_port->base;
6761 	struct drm_device *dev = intel_encoder->base.dev;
6762 	struct drm_i915_private *dev_priv = to_i915(dev);
6763 	enum port port = intel_encoder->port;
6764 	int type;
6765 
6766 	/* Initialize the work for modeset in case of link train failure */
6767 	intel_dp_init_modeset_retry_work(intel_connector);
6768 
6769 	if (drm_WARN(dev, dig_port->max_lanes < 1,
6770 		     "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
6771 		     dig_port->max_lanes, intel_encoder->base.base.id,
6772 		     intel_encoder->base.name))
6773 		return false;
6774 
6775 	intel_dp->reset_link_params = true;
6776 	intel_dp->pps.pps_pipe = INVALID_PIPE;
6777 	intel_dp->pps.active_pipe = INVALID_PIPE;
6778 
6779 	/* Preserve the current hw state. */
6780 	intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
6781 	intel_dp->attached_connector = intel_connector;
6782 
6783 	if (_intel_dp_is_port_edp(dev_priv, intel_encoder->devdata, port)) {
6784 		/*
6785 		 * Currently we don't support eDP on TypeC ports, although in
6786 		 * theory it could work on TypeC legacy ports.
6787 		 */
6788 		drm_WARN_ON(dev, intel_encoder_is_tc(intel_encoder));
6789 		type = DRM_MODE_CONNECTOR_eDP;
6790 		intel_encoder->type = INTEL_OUTPUT_EDP;
6791 
6792 		/* eDP only on port B and/or C on vlv/chv */
6793 		if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
6794 				      IS_CHERRYVIEW(dev_priv)) &&
6795 				port != PORT_B && port != PORT_C))
6796 			return false;
6797 	} else {
6798 		type = DRM_MODE_CONNECTOR_DisplayPort;
6799 	}
6800 
6801 	intel_dp_set_default_sink_rates(intel_dp);
6802 	intel_dp_set_default_max_sink_lane_count(intel_dp);
6803 
6804 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6805 		intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
6806 
6807 	intel_dp_aux_init(intel_dp);
6808 	intel_connector->dp.dsc_decompression_aux = &intel_dp->aux;
6809 
6810 	drm_dbg_kms(&dev_priv->drm,
6811 		    "Adding %s connector on [ENCODER:%d:%s]\n",
6812 		    type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6813 		    intel_encoder->base.base.id, intel_encoder->base.name);
6814 
6815 	drm_connector_init_with_ddc(dev, connector, &intel_dp_connector_funcs,
6816 				    type, &intel_dp->aux.ddc);
6817 	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6818 
6819 	if (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) < 12)
6820 		connector->interlace_allowed = true;
6821 
6822 	intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
6823 	intel_connector->base.polled = intel_connector->polled;
6824 
6825 	intel_connector_attach_encoder(intel_connector, intel_encoder);
6826 
6827 	if (HAS_DDI(dev_priv))
6828 		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6829 	else
6830 		intel_connector->get_hw_state = intel_connector_get_hw_state;
6831 	intel_connector->sync_state = intel_dp_connector_sync_state;
6832 
6833 	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6834 		intel_dp_aux_fini(intel_dp);
6835 		goto fail;
6836 	}
6837 
6838 	intel_dp_set_source_rates(intel_dp);
6839 	intel_dp_set_common_rates(intel_dp);
6840 	intel_dp_reset_link_params(intel_dp);
6841 
6842 	/* init MST on ports that can support it */
6843 	intel_dp_mst_encoder_init(dig_port,
6844 				  intel_connector->base.base.id);
6845 
6846 	intel_dp_add_properties(intel_dp, connector);
6847 
6848 	if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
6849 		int ret = intel_dp_hdcp_init(dig_port, intel_connector);
6850 		if (ret)
6851 			drm_dbg_kms(&dev_priv->drm,
6852 				    "HDCP init failed, skipping.\n");
6853 	}
6854 
6855 	intel_dp->colorimetry_support =
6856 		intel_dp_get_colorimetry_status(intel_dp);
6857 
6858 	intel_dp->frl.is_trained = false;
6859 	intel_dp->frl.trained_rate_gbps = 0;
6860 
6861 	intel_psr_init(intel_dp);
6862 
6863 	return true;
6864 
6865 fail:
6866 	intel_display_power_flush_work(dev_priv);
6867 	drm_connector_cleanup(connector);
6868 
6869 	return false;
6870 }
6871 
6872 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
6873 {
6874 	struct intel_encoder *encoder;
6875 
6876 	if (!HAS_DISPLAY(dev_priv))
6877 		return;
6878 
6879 	for_each_intel_encoder(&dev_priv->drm, encoder) {
6880 		struct intel_dp *intel_dp;
6881 
6882 		if (encoder->type != INTEL_OUTPUT_DDI)
6883 			continue;
6884 
6885 		intel_dp = enc_to_intel_dp(encoder);
6886 
6887 		if (!intel_dp_mst_source_support(intel_dp))
6888 			continue;
6889 
6890 		if (intel_dp->is_mst)
6891 			drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
6892 	}
6893 }
6894 
6895 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
6896 {
6897 	struct intel_encoder *encoder;
6898 
6899 	if (!HAS_DISPLAY(dev_priv))
6900 		return;
6901 
6902 	for_each_intel_encoder(&dev_priv->drm, encoder) {
6903 		struct intel_dp *intel_dp;
6904 		int ret;
6905 
6906 		if (encoder->type != INTEL_OUTPUT_DDI)
6907 			continue;
6908 
6909 		intel_dp = enc_to_intel_dp(encoder);
6910 
6911 		if (!intel_dp_mst_source_support(intel_dp))
6912 			continue;
6913 
6914 		ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
6915 						     true);
6916 		if (ret) {
6917 			intel_dp->is_mst = false;
6918 			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6919 							false);
6920 		}
6921 	}
6922 }
6923