xref: /linux/drivers/gpu/drm/i915/display/intel_dp.c (revision ab0f4cedc3554f921691ce5b63d59e258154e799)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27 
28 #include <linux/export.h>
29 #include <linux/i2c.h>
30 #include <linux/notifier.h>
31 #include <linux/slab.h>
32 #include <linux/string_helpers.h>
33 #include <linux/timekeeping.h>
34 #include <linux/types.h>
35 
36 #include <asm/byteorder.h>
37 
38 #include <drm/display/drm_dp_helper.h>
39 #include <drm/display/drm_dp_tunnel.h>
40 #include <drm/display/drm_dsc_helper.h>
41 #include <drm/display/drm_hdmi_helper.h>
42 #include <drm/drm_atomic_helper.h>
43 #include <drm/drm_crtc.h>
44 #include <drm/drm_edid.h>
45 #include <drm/drm_probe_helper.h>
46 
47 #include "g4x_dp.h"
48 #include "i915_drv.h"
49 #include "i915_irq.h"
50 #include "i915_reg.h"
51 #include "intel_atomic.h"
52 #include "intel_audio.h"
53 #include "intel_backlight.h"
54 #include "intel_combo_phy_regs.h"
55 #include "intel_connector.h"
56 #include "intel_crtc.h"
57 #include "intel_cx0_phy.h"
58 #include "intel_ddi.h"
59 #include "intel_de.h"
60 #include "intel_display_driver.h"
61 #include "intel_display_types.h"
62 #include "intel_dp.h"
63 #include "intel_dp_aux.h"
64 #include "intel_dp_hdcp.h"
65 #include "intel_dp_link_training.h"
66 #include "intel_dp_mst.h"
67 #include "intel_dp_tunnel.h"
68 #include "intel_dpio_phy.h"
69 #include "intel_dpll.h"
70 #include "intel_drrs.h"
71 #include "intel_fifo_underrun.h"
72 #include "intel_hdcp.h"
73 #include "intel_hdmi.h"
74 #include "intel_hotplug.h"
75 #include "intel_hotplug_irq.h"
76 #include "intel_lspcon.h"
77 #include "intel_lvds.h"
78 #include "intel_panel.h"
79 #include "intel_pch_display.h"
80 #include "intel_pps.h"
81 #include "intel_psr.h"
82 #include "intel_tc.h"
83 #include "intel_vdsc.h"
84 #include "intel_vrr.h"
85 #include "intel_crtc_state_dump.h"
86 
87 /* DP DSC throughput values used for slice count calculations KPixels/s */
88 #define DP_DSC_PEAK_PIXEL_RATE			2720000
89 #define DP_DSC_MAX_ENC_THROUGHPUT_0		340000
90 #define DP_DSC_MAX_ENC_THROUGHPUT_1		400000
91 
92 /* Max DSC line buffer depth supported by HW. */
93 #define INTEL_DP_DSC_MAX_LINE_BUF_DEPTH		13
94 
95 /* DP DSC FEC Overhead factor in ppm = 1/(0.972261) = 1.028530 */
96 #define DP_DSC_FEC_OVERHEAD_FACTOR		1028530
97 
98 /* Compliance test status bits  */
99 #define INTEL_DP_RESOLUTION_SHIFT_MASK	0
100 #define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
101 #define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
102 #define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
103 
104 
105 /* Constants for DP DSC configurations */
106 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
107 
108 /* With Single pipe configuration, HW is capable of supporting maximum
109  * of 4 slices per line.
110  */
111 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
112 
113 /**
114  * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
115  * @intel_dp: DP struct
116  *
117  * If a CPU or PCH DP output is attached to an eDP panel, this function
118  * will return true, and false otherwise.
119  *
120  * This function is not safe to use prior to encoder type being set.
121  */
122 bool intel_dp_is_edp(struct intel_dp *intel_dp)
123 {
124 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
125 
126 	return dig_port->base.type == INTEL_OUTPUT_EDP;
127 }
128 
129 bool intel_dp_as_sdp_supported(struct intel_dp *intel_dp)
130 {
131 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
132 
133 	return HAS_AS_SDP(i915) &&
134 		drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd);
135 }
136 
137 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
138 
139 /* Is link rate UHBR and thus 128b/132b? */
140 bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state)
141 {
142 	return drm_dp_is_uhbr_rate(crtc_state->port_clock);
143 }
144 
145 /**
146  * intel_dp_link_symbol_size - get the link symbol size for a given link rate
147  * @rate: link rate in 10kbit/s units
148  *
149  * Returns the link symbol size in bits/symbol units depending on the link
150  * rate -> channel coding.
151  */
152 int intel_dp_link_symbol_size(int rate)
153 {
154 	return drm_dp_is_uhbr_rate(rate) ? 32 : 10;
155 }
156 
157 /**
158  * intel_dp_link_symbol_clock - convert link rate to link symbol clock
159  * @rate: link rate in 10kbit/s units
160  *
161  * Returns the link symbol clock frequency in kHz units depending on the
162  * link rate and channel coding.
163  */
164 int intel_dp_link_symbol_clock(int rate)
165 {
166 	return DIV_ROUND_CLOSEST(rate * 10, intel_dp_link_symbol_size(rate));
167 }
168 
169 static int max_dprx_rate(struct intel_dp *intel_dp)
170 {
171 	if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
172 		return drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel);
173 
174 	return drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
175 }
176 
177 static int max_dprx_lane_count(struct intel_dp *intel_dp)
178 {
179 	if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
180 		return drm_dp_tunnel_max_dprx_lane_count(intel_dp->tunnel);
181 
182 	return drm_dp_max_lane_count(intel_dp->dpcd);
183 }
184 
185 static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp)
186 {
187 	intel_dp->sink_rates[0] = 162000;
188 	intel_dp->num_sink_rates = 1;
189 }
190 
191 /* update sink rates from dpcd */
192 static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp)
193 {
194 	static const int dp_rates[] = {
195 		162000, 270000, 540000, 810000
196 	};
197 	int i, max_rate;
198 	int max_lttpr_rate;
199 
200 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
201 		/* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
202 		static const int quirk_rates[] = { 162000, 270000, 324000 };
203 
204 		memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates));
205 		intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates);
206 
207 		return;
208 	}
209 
210 	/*
211 	 * Sink rates for 8b/10b.
212 	 */
213 	max_rate = max_dprx_rate(intel_dp);
214 	max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps);
215 	if (max_lttpr_rate)
216 		max_rate = min(max_rate, max_lttpr_rate);
217 
218 	for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
219 		if (dp_rates[i] > max_rate)
220 			break;
221 		intel_dp->sink_rates[i] = dp_rates[i];
222 	}
223 
224 	/*
225 	 * Sink rates for 128b/132b. If set, sink should support all 8b/10b
226 	 * rates and 10 Gbps.
227 	 */
228 	if (drm_dp_128b132b_supported(intel_dp->dpcd)) {
229 		u8 uhbr_rates = 0;
230 
231 		BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3);
232 
233 		drm_dp_dpcd_readb(&intel_dp->aux,
234 				  DP_128B132B_SUPPORTED_LINK_RATES, &uhbr_rates);
235 
236 		if (drm_dp_lttpr_count(intel_dp->lttpr_common_caps)) {
237 			/* We have a repeater */
238 			if (intel_dp->lttpr_common_caps[0] >= 0x20 &&
239 			    intel_dp->lttpr_common_caps[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
240 							DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] &
241 			    DP_PHY_REPEATER_128B132B_SUPPORTED) {
242 				/* Repeater supports 128b/132b, valid UHBR rates */
243 				uhbr_rates &= intel_dp->lttpr_common_caps[DP_PHY_REPEATER_128B132B_RATES -
244 									  DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
245 			} else {
246 				/* Does not support 128b/132b */
247 				uhbr_rates = 0;
248 			}
249 		}
250 
251 		if (uhbr_rates & DP_UHBR10)
252 			intel_dp->sink_rates[i++] = 1000000;
253 		if (uhbr_rates & DP_UHBR13_5)
254 			intel_dp->sink_rates[i++] = 1350000;
255 		if (uhbr_rates & DP_UHBR20)
256 			intel_dp->sink_rates[i++] = 2000000;
257 	}
258 
259 	intel_dp->num_sink_rates = i;
260 }
261 
262 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
263 {
264 	struct intel_connector *connector = intel_dp->attached_connector;
265 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
266 	struct intel_encoder *encoder = &intel_dig_port->base;
267 
268 	intel_dp_set_dpcd_sink_rates(intel_dp);
269 
270 	if (intel_dp->num_sink_rates)
271 		return;
272 
273 	drm_err(&dp_to_i915(intel_dp)->drm,
274 		"[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD with no link rates, using defaults\n",
275 		connector->base.base.id, connector->base.name,
276 		encoder->base.base.id, encoder->base.name);
277 
278 	intel_dp_set_default_sink_rates(intel_dp);
279 }
280 
281 static void intel_dp_set_default_max_sink_lane_count(struct intel_dp *intel_dp)
282 {
283 	intel_dp->max_sink_lane_count = 1;
284 }
285 
286 static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp)
287 {
288 	struct intel_connector *connector = intel_dp->attached_connector;
289 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
290 	struct intel_encoder *encoder = &intel_dig_port->base;
291 
292 	intel_dp->max_sink_lane_count = max_dprx_lane_count(intel_dp);
293 
294 	switch (intel_dp->max_sink_lane_count) {
295 	case 1:
296 	case 2:
297 	case 4:
298 		return;
299 	}
300 
301 	drm_err(&dp_to_i915(intel_dp)->drm,
302 		"[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD max lane count (%d), using default\n",
303 		connector->base.base.id, connector->base.name,
304 		encoder->base.base.id, encoder->base.name,
305 		intel_dp->max_sink_lane_count);
306 
307 	intel_dp_set_default_max_sink_lane_count(intel_dp);
308 }
309 
310 /* Get length of rates array potentially limited by max_rate. */
311 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
312 {
313 	int i;
314 
315 	/* Limit results by potentially reduced max rate */
316 	for (i = 0; i < len; i++) {
317 		if (rates[len - i - 1] <= max_rate)
318 			return len - i;
319 	}
320 
321 	return 0;
322 }
323 
324 /* Get length of common rates array potentially limited by max_rate. */
325 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
326 					  int max_rate)
327 {
328 	return intel_dp_rate_limit_len(intel_dp->common_rates,
329 				       intel_dp->num_common_rates, max_rate);
330 }
331 
332 static int intel_dp_common_rate(struct intel_dp *intel_dp, int index)
333 {
334 	if (drm_WARN_ON(&dp_to_i915(intel_dp)->drm,
335 			index < 0 || index >= intel_dp->num_common_rates))
336 		return 162000;
337 
338 	return intel_dp->common_rates[index];
339 }
340 
341 /* Theoretical max between source and sink */
342 int intel_dp_max_common_rate(struct intel_dp *intel_dp)
343 {
344 	return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1);
345 }
346 
347 static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port)
348 {
349 	int vbt_max_lanes = intel_bios_dp_max_lane_count(dig_port->base.devdata);
350 	int max_lanes = dig_port->max_lanes;
351 
352 	if (vbt_max_lanes)
353 		max_lanes = min(max_lanes, vbt_max_lanes);
354 
355 	return max_lanes;
356 }
357 
358 /* Theoretical max between source and sink */
359 int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
360 {
361 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
362 	int source_max = intel_dp_max_source_lane_count(dig_port);
363 	int sink_max = intel_dp->max_sink_lane_count;
364 	int lane_max = intel_tc_port_max_lane_count(dig_port);
365 	int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps);
366 
367 	if (lttpr_max)
368 		sink_max = min(sink_max, lttpr_max);
369 
370 	return min3(source_max, sink_max, lane_max);
371 }
372 
373 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
374 {
375 	switch (intel_dp->max_link_lane_count) {
376 	case 1:
377 	case 2:
378 	case 4:
379 		return intel_dp->max_link_lane_count;
380 	default:
381 		MISSING_CASE(intel_dp->max_link_lane_count);
382 		return 1;
383 	}
384 }
385 
386 /*
387  * The required data bandwidth for a mode with given pixel clock and bpp. This
388  * is the required net bandwidth independent of the data bandwidth efficiency.
389  *
390  * TODO: check if callers of this functions should use
391  * intel_dp_effective_data_rate() instead.
392  */
393 int
394 intel_dp_link_required(int pixel_clock, int bpp)
395 {
396 	/* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
397 	return DIV_ROUND_UP(pixel_clock * bpp, 8);
398 }
399 
400 /**
401  * intel_dp_effective_data_rate - Return the pixel data rate accounting for BW allocation overhead
402  * @pixel_clock: pixel clock in kHz
403  * @bpp_x16: bits per pixel .4 fixed point format
404  * @bw_overhead: BW allocation overhead in 1ppm units
405  *
406  * Return the effective pixel data rate in kB/sec units taking into account
407  * the provided SSC, FEC, DSC BW allocation overhead.
408  */
409 int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
410 				 int bw_overhead)
411 {
412 	return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_clock * bpp_x16, bw_overhead),
413 				1000000 * 16 * 8);
414 }
415 
416 /**
417  * intel_dp_max_link_data_rate: Calculate the maximum rate for the given link params
418  * @intel_dp: Intel DP object
419  * @max_dprx_rate: Maximum data rate of the DPRX
420  * @max_dprx_lanes: Maximum lane count of the DPRX
421  *
422  * Calculate the maximum data rate for the provided link parameters taking into
423  * account any BW limitations by a DP tunnel attached to @intel_dp.
424  *
425  * Returns the maximum data rate in kBps units.
426  */
427 int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
428 				int max_dprx_rate, int max_dprx_lanes)
429 {
430 	int max_rate = drm_dp_max_dprx_data_rate(max_dprx_rate, max_dprx_lanes);
431 
432 	if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
433 		max_rate = min(max_rate,
434 			       drm_dp_tunnel_available_bw(intel_dp->tunnel));
435 
436 	return max_rate;
437 }
438 
439 bool intel_dp_has_bigjoiner(struct intel_dp *intel_dp)
440 {
441 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
442 	struct intel_encoder *encoder = &intel_dig_port->base;
443 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
444 
445 	/* eDP MSO is not compatible with joiner */
446 	if (intel_dp->mso_link_count)
447 		return false;
448 
449 	return DISPLAY_VER(dev_priv) >= 12 ||
450 		(DISPLAY_VER(dev_priv) == 11 &&
451 		 encoder->port != PORT_A);
452 }
453 
454 static int dg2_max_source_rate(struct intel_dp *intel_dp)
455 {
456 	return intel_dp_is_edp(intel_dp) ? 810000 : 1350000;
457 }
458 
459 static int icl_max_source_rate(struct intel_dp *intel_dp)
460 {
461 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
462 
463 	if (intel_encoder_is_combo(encoder) && !intel_dp_is_edp(intel_dp))
464 		return 540000;
465 
466 	return 810000;
467 }
468 
469 static int ehl_max_source_rate(struct intel_dp *intel_dp)
470 {
471 	if (intel_dp_is_edp(intel_dp))
472 		return 540000;
473 
474 	return 810000;
475 }
476 
477 static int mtl_max_source_rate(struct intel_dp *intel_dp)
478 {
479 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
480 
481 	if (intel_encoder_is_c10phy(encoder))
482 		return 810000;
483 
484 	return 2000000;
485 }
486 
487 static int vbt_max_link_rate(struct intel_dp *intel_dp)
488 {
489 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
490 	int max_rate;
491 
492 	max_rate = intel_bios_dp_max_link_rate(encoder->devdata);
493 
494 	if (intel_dp_is_edp(intel_dp)) {
495 		struct intel_connector *connector = intel_dp->attached_connector;
496 		int edp_max_rate = connector->panel.vbt.edp.max_link_rate;
497 
498 		if (max_rate && edp_max_rate)
499 			max_rate = min(max_rate, edp_max_rate);
500 		else if (edp_max_rate)
501 			max_rate = edp_max_rate;
502 	}
503 
504 	return max_rate;
505 }
506 
507 static void
508 intel_dp_set_source_rates(struct intel_dp *intel_dp)
509 {
510 	/* The values must be in increasing order */
511 	static const int mtl_rates[] = {
512 		162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000,
513 		810000,	1000000, 2000000,
514 	};
515 	static const int icl_rates[] = {
516 		162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000,
517 		1000000, 1350000,
518 	};
519 	static const int bxt_rates[] = {
520 		162000, 216000, 243000, 270000, 324000, 432000, 540000
521 	};
522 	static const int skl_rates[] = {
523 		162000, 216000, 270000, 324000, 432000, 540000
524 	};
525 	static const int hsw_rates[] = {
526 		162000, 270000, 540000
527 	};
528 	static const int g4x_rates[] = {
529 		162000, 270000
530 	};
531 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
532 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
533 	const int *source_rates;
534 	int size, max_rate = 0, vbt_max_rate;
535 
536 	/* This should only be done once */
537 	drm_WARN_ON(&dev_priv->drm,
538 		    intel_dp->source_rates || intel_dp->num_source_rates);
539 
540 	if (DISPLAY_VER(dev_priv) >= 14) {
541 		source_rates = mtl_rates;
542 		size = ARRAY_SIZE(mtl_rates);
543 		max_rate = mtl_max_source_rate(intel_dp);
544 	} else if (DISPLAY_VER(dev_priv) >= 11) {
545 		source_rates = icl_rates;
546 		size = ARRAY_SIZE(icl_rates);
547 		if (IS_DG2(dev_priv))
548 			max_rate = dg2_max_source_rate(intel_dp);
549 		else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
550 			 IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
551 			max_rate = 810000;
552 		else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
553 			max_rate = ehl_max_source_rate(intel_dp);
554 		else
555 			max_rate = icl_max_source_rate(intel_dp);
556 	} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
557 		source_rates = bxt_rates;
558 		size = ARRAY_SIZE(bxt_rates);
559 	} else if (DISPLAY_VER(dev_priv) == 9) {
560 		source_rates = skl_rates;
561 		size = ARRAY_SIZE(skl_rates);
562 	} else if ((IS_HASWELL(dev_priv) && !IS_HASWELL_ULX(dev_priv)) ||
563 		   IS_BROADWELL(dev_priv)) {
564 		source_rates = hsw_rates;
565 		size = ARRAY_SIZE(hsw_rates);
566 	} else {
567 		source_rates = g4x_rates;
568 		size = ARRAY_SIZE(g4x_rates);
569 	}
570 
571 	vbt_max_rate = vbt_max_link_rate(intel_dp);
572 	if (max_rate && vbt_max_rate)
573 		max_rate = min(max_rate, vbt_max_rate);
574 	else if (vbt_max_rate)
575 		max_rate = vbt_max_rate;
576 
577 	if (max_rate)
578 		size = intel_dp_rate_limit_len(source_rates, size, max_rate);
579 
580 	intel_dp->source_rates = source_rates;
581 	intel_dp->num_source_rates = size;
582 }
583 
584 static int intersect_rates(const int *source_rates, int source_len,
585 			   const int *sink_rates, int sink_len,
586 			   int *common_rates)
587 {
588 	int i = 0, j = 0, k = 0;
589 
590 	while (i < source_len && j < sink_len) {
591 		if (source_rates[i] == sink_rates[j]) {
592 			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
593 				return k;
594 			common_rates[k] = source_rates[i];
595 			++k;
596 			++i;
597 			++j;
598 		} else if (source_rates[i] < sink_rates[j]) {
599 			++i;
600 		} else {
601 			++j;
602 		}
603 	}
604 	return k;
605 }
606 
607 /* return index of rate in rates array, or -1 if not found */
608 static int intel_dp_rate_index(const int *rates, int len, int rate)
609 {
610 	int i;
611 
612 	for (i = 0; i < len; i++)
613 		if (rate == rates[i])
614 			return i;
615 
616 	return -1;
617 }
618 
619 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
620 {
621 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
622 
623 	drm_WARN_ON(&i915->drm,
624 		    !intel_dp->num_source_rates || !intel_dp->num_sink_rates);
625 
626 	intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
627 						     intel_dp->num_source_rates,
628 						     intel_dp->sink_rates,
629 						     intel_dp->num_sink_rates,
630 						     intel_dp->common_rates);
631 
632 	/* Paranoia, there should always be something in common. */
633 	if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
634 		intel_dp->common_rates[0] = 162000;
635 		intel_dp->num_common_rates = 1;
636 	}
637 }
638 
639 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
640 				       u8 lane_count)
641 {
642 	/*
643 	 * FIXME: we need to synchronize the current link parameters with
644 	 * hardware readout. Currently fast link training doesn't work on
645 	 * boot-up.
646 	 */
647 	if (link_rate == 0 ||
648 	    link_rate > intel_dp->max_link_rate)
649 		return false;
650 
651 	if (lane_count == 0 ||
652 	    lane_count > intel_dp_max_lane_count(intel_dp))
653 		return false;
654 
655 	return true;
656 }
657 
658 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
659 						     int link_rate,
660 						     u8 lane_count)
661 {
662 	/* FIXME figure out what we actually want here */
663 	const struct drm_display_mode *fixed_mode =
664 		intel_panel_preferred_fixed_mode(intel_dp->attached_connector);
665 	int mode_rate, max_rate;
666 
667 	mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
668 	max_rate = intel_dp_max_link_data_rate(intel_dp, link_rate, lane_count);
669 	if (mode_rate > max_rate)
670 		return false;
671 
672 	return true;
673 }
674 
675 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
676 					    int link_rate, u8 lane_count)
677 {
678 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
679 	int index;
680 
681 	/*
682 	 * TODO: Enable fallback on MST links once MST link compute can handle
683 	 * the fallback params.
684 	 */
685 	if (intel_dp->is_mst) {
686 		drm_err(&i915->drm, "Link Training Unsuccessful\n");
687 		return -1;
688 	}
689 
690 	if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) {
691 		drm_dbg_kms(&i915->drm,
692 			    "Retrying Link training for eDP with max parameters\n");
693 		intel_dp->use_max_params = true;
694 		return 0;
695 	}
696 
697 	index = intel_dp_rate_index(intel_dp->common_rates,
698 				    intel_dp->num_common_rates,
699 				    link_rate);
700 	if (index > 0) {
701 		if (intel_dp_is_edp(intel_dp) &&
702 		    !intel_dp_can_link_train_fallback_for_edp(intel_dp,
703 							      intel_dp_common_rate(intel_dp, index - 1),
704 							      lane_count)) {
705 			drm_dbg_kms(&i915->drm,
706 				    "Retrying Link training for eDP with same parameters\n");
707 			return 0;
708 		}
709 		intel_dp->max_link_rate = intel_dp_common_rate(intel_dp, index - 1);
710 		intel_dp->max_link_lane_count = lane_count;
711 	} else if (lane_count > 1) {
712 		if (intel_dp_is_edp(intel_dp) &&
713 		    !intel_dp_can_link_train_fallback_for_edp(intel_dp,
714 							      intel_dp_max_common_rate(intel_dp),
715 							      lane_count >> 1)) {
716 			drm_dbg_kms(&i915->drm,
717 				    "Retrying Link training for eDP with same parameters\n");
718 			return 0;
719 		}
720 		intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
721 		intel_dp->max_link_lane_count = lane_count >> 1;
722 	} else {
723 		drm_err(&i915->drm, "Link Training Unsuccessful\n");
724 		return -1;
725 	}
726 
727 	return 0;
728 }
729 
730 u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
731 {
732 	return div_u64(mul_u32_u32(mode_clock, DP_DSC_FEC_OVERHEAD_FACTOR),
733 		       1000000U);
734 }
735 
736 int intel_dp_bw_fec_overhead(bool fec_enabled)
737 {
738 	/*
739 	 * TODO: Calculate the actual overhead for a given mode.
740 	 * The hard-coded 1/0.972261=2.853% overhead factor
741 	 * corresponds (for instance) to the 8b/10b DP FEC 2.4% +
742 	 * 0.453% DSC overhead. This is enough for a 3840 width mode,
743 	 * which has a DSC overhead of up to ~0.2%, but may not be
744 	 * enough for a 1024 width mode where this is ~0.8% (on a 4
745 	 * lane DP link, with 2 DSC slices and 8 bpp color depth).
746 	 */
747 	return fec_enabled ? DP_DSC_FEC_OVERHEAD_FACTOR : 1000000;
748 }
749 
750 static int
751 small_joiner_ram_size_bits(struct drm_i915_private *i915)
752 {
753 	if (DISPLAY_VER(i915) >= 13)
754 		return 17280 * 8;
755 	else if (DISPLAY_VER(i915) >= 11)
756 		return 7680 * 8;
757 	else
758 		return 6144 * 8;
759 }
760 
761 u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp)
762 {
763 	u32 bits_per_pixel = bpp;
764 	int i;
765 
766 	/* Error out if the max bpp is less than smallest allowed valid bpp */
767 	if (bits_per_pixel < valid_dsc_bpp[0]) {
768 		drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
769 			    bits_per_pixel, valid_dsc_bpp[0]);
770 		return 0;
771 	}
772 
773 	/* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */
774 	if (DISPLAY_VER(i915) >= 13) {
775 		bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1);
776 
777 		/*
778 		 * According to BSpec, 27 is the max DSC output bpp,
779 		 * 8 is the min DSC output bpp.
780 		 * While we can still clamp higher bpp values to 27, saving bandwidth,
781 		 * if it is required to oompress up to bpp < 8, means we can't do
782 		 * that and probably means we can't fit the required mode, even with
783 		 * DSC enabled.
784 		 */
785 		if (bits_per_pixel < 8) {
786 			drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min 8\n",
787 				    bits_per_pixel);
788 			return 0;
789 		}
790 		bits_per_pixel = min_t(u32, bits_per_pixel, 27);
791 	} else {
792 		/* Find the nearest match in the array of known BPPs from VESA */
793 		for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
794 			if (bits_per_pixel < valid_dsc_bpp[i + 1])
795 				break;
796 		}
797 		drm_dbg_kms(&i915->drm, "Set dsc bpp from %d to VESA %d\n",
798 			    bits_per_pixel, valid_dsc_bpp[i]);
799 
800 		bits_per_pixel = valid_dsc_bpp[i];
801 	}
802 
803 	return bits_per_pixel;
804 }
805 
806 static
807 u32 get_max_compressed_bpp_with_joiner(struct drm_i915_private *i915,
808 				       u32 mode_clock, u32 mode_hdisplay,
809 				       bool bigjoiner)
810 {
811 	u32 max_bpp_small_joiner_ram;
812 
813 	/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
814 	max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / mode_hdisplay;
815 
816 	if (bigjoiner) {
817 		int bigjoiner_interface_bits = DISPLAY_VER(i915) >= 14 ? 36 : 24;
818 		/* With bigjoiner multiple dsc engines are used in parallel so PPC is 2 */
819 		int ppc = 2;
820 		u32 max_bpp_bigjoiner =
821 			i915->display.cdclk.max_cdclk_freq * ppc * bigjoiner_interface_bits /
822 			intel_dp_mode_to_fec_clock(mode_clock);
823 
824 		max_bpp_small_joiner_ram *= 2;
825 
826 		return min(max_bpp_small_joiner_ram, max_bpp_bigjoiner);
827 	}
828 
829 	return max_bpp_small_joiner_ram;
830 }
831 
832 u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915,
833 					u32 link_clock, u32 lane_count,
834 					u32 mode_clock, u32 mode_hdisplay,
835 					bool bigjoiner,
836 					enum intel_output_format output_format,
837 					u32 pipe_bpp,
838 					u32 timeslots)
839 {
840 	u32 bits_per_pixel, joiner_max_bpp;
841 
842 	/*
843 	 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
844 	 * (LinkSymbolClock)* 8 * (TimeSlots / 64)
845 	 * for SST -> TimeSlots is 64(i.e all TimeSlots that are available)
846 	 * for MST -> TimeSlots has to be calculated, based on mode requirements
847 	 *
848 	 * Due to FEC overhead, the available bw is reduced to 97.2261%.
849 	 * To support the given mode:
850 	 * Bandwidth required should be <= Available link Bandwidth * FEC Overhead
851 	 * =>ModeClock * bits_per_pixel <= Available Link Bandwidth * FEC Overhead
852 	 * =>bits_per_pixel <= Available link Bandwidth * FEC Overhead / ModeClock
853 	 * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock) * 8 (TimeSlots / 64) /
854 	 *		       (ModeClock / FEC Overhead)
855 	 * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock * TimeSlots) /
856 	 *		       (ModeClock / FEC Overhead * 8)
857 	 */
858 	bits_per_pixel = ((link_clock * lane_count) * timeslots) /
859 			 (intel_dp_mode_to_fec_clock(mode_clock) * 8);
860 
861 	/* Bandwidth required for 420 is half, that of 444 format */
862 	if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
863 		bits_per_pixel *= 2;
864 
865 	/*
866 	 * According to DSC 1.2a Section 4.1.1 Table 4.1 the maximum
867 	 * supported PPS value can be 63.9375 and with the further
868 	 * mention that for 420, 422 formats, bpp should be programmed double
869 	 * the target bpp restricting our target bpp to be 31.9375 at max.
870 	 */
871 	if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
872 		bits_per_pixel = min_t(u32, bits_per_pixel, 31);
873 
874 	drm_dbg_kms(&i915->drm, "Max link bpp is %u for %u timeslots "
875 				"total bw %u pixel clock %u\n",
876 				bits_per_pixel, timeslots,
877 				(link_clock * lane_count * 8),
878 				intel_dp_mode_to_fec_clock(mode_clock));
879 
880 	joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, mode_clock,
881 							    mode_hdisplay, bigjoiner);
882 	bits_per_pixel = min(bits_per_pixel, joiner_max_bpp);
883 
884 	bits_per_pixel = intel_dp_dsc_nearest_valid_bpp(i915, bits_per_pixel, pipe_bpp);
885 
886 	return bits_per_pixel;
887 }
888 
889 u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
890 				int mode_clock, int mode_hdisplay,
891 				bool bigjoiner)
892 {
893 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
894 	u8 min_slice_count, i;
895 	int max_slice_width;
896 
897 	if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
898 		min_slice_count = DIV_ROUND_UP(mode_clock,
899 					       DP_DSC_MAX_ENC_THROUGHPUT_0);
900 	else
901 		min_slice_count = DIV_ROUND_UP(mode_clock,
902 					       DP_DSC_MAX_ENC_THROUGHPUT_1);
903 
904 	/*
905 	 * Due to some DSC engine BW limitations, we need to enable second
906 	 * slice and VDSC engine, whenever we approach close enough to max CDCLK
907 	 */
908 	if (mode_clock >= ((i915->display.cdclk.max_cdclk_freq * 85) / 100))
909 		min_slice_count = max_t(u8, min_slice_count, 2);
910 
911 	max_slice_width = drm_dp_dsc_sink_max_slice_width(connector->dp.dsc_dpcd);
912 	if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
913 		drm_dbg_kms(&i915->drm,
914 			    "Unsupported slice width %d by DP DSC Sink device\n",
915 			    max_slice_width);
916 		return 0;
917 	}
918 	/* Also take into account max slice width */
919 	min_slice_count = max_t(u8, min_slice_count,
920 				DIV_ROUND_UP(mode_hdisplay,
921 					     max_slice_width));
922 
923 	/* Find the closest match to the valid slice count values */
924 	for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
925 		u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner;
926 
927 		if (test_slice_count >
928 		    drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd, false))
929 			break;
930 
931 		/* big joiner needs small joiner to be enabled */
932 		if (bigjoiner && test_slice_count < 4)
933 			continue;
934 
935 		if (min_slice_count <= test_slice_count)
936 			return test_slice_count;
937 	}
938 
939 	drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
940 		    min_slice_count);
941 	return 0;
942 }
943 
944 static bool source_can_output(struct intel_dp *intel_dp,
945 			      enum intel_output_format format)
946 {
947 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
948 
949 	switch (format) {
950 	case INTEL_OUTPUT_FORMAT_RGB:
951 		return true;
952 
953 	case INTEL_OUTPUT_FORMAT_YCBCR444:
954 		/*
955 		 * No YCbCr output support on gmch platforms.
956 		 * Also, ILK doesn't seem capable of DP YCbCr output.
957 		 * The displayed image is severly corrupted. SNB+ is fine.
958 		 */
959 		return !HAS_GMCH(i915) && !IS_IRONLAKE(i915);
960 
961 	case INTEL_OUTPUT_FORMAT_YCBCR420:
962 		/* Platform < Gen 11 cannot output YCbCr420 format */
963 		return DISPLAY_VER(i915) >= 11;
964 
965 	default:
966 		MISSING_CASE(format);
967 		return false;
968 	}
969 }
970 
971 static bool
972 dfp_can_convert_from_rgb(struct intel_dp *intel_dp,
973 			 enum intel_output_format sink_format)
974 {
975 	if (!drm_dp_is_branch(intel_dp->dpcd))
976 		return false;
977 
978 	if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444)
979 		return intel_dp->dfp.rgb_to_ycbcr;
980 
981 	if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420)
982 		return intel_dp->dfp.rgb_to_ycbcr &&
983 			intel_dp->dfp.ycbcr_444_to_420;
984 
985 	return false;
986 }
987 
988 static bool
989 dfp_can_convert_from_ycbcr444(struct intel_dp *intel_dp,
990 			      enum intel_output_format sink_format)
991 {
992 	if (!drm_dp_is_branch(intel_dp->dpcd))
993 		return false;
994 
995 	if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420)
996 		return intel_dp->dfp.ycbcr_444_to_420;
997 
998 	return false;
999 }
1000 
1001 static bool
1002 dfp_can_convert(struct intel_dp *intel_dp,
1003 		enum intel_output_format output_format,
1004 		enum intel_output_format sink_format)
1005 {
1006 	switch (output_format) {
1007 	case INTEL_OUTPUT_FORMAT_RGB:
1008 		return dfp_can_convert_from_rgb(intel_dp, sink_format);
1009 	case INTEL_OUTPUT_FORMAT_YCBCR444:
1010 		return dfp_can_convert_from_ycbcr444(intel_dp, sink_format);
1011 	default:
1012 		MISSING_CASE(output_format);
1013 		return false;
1014 	}
1015 
1016 	return false;
1017 }
1018 
1019 static enum intel_output_format
1020 intel_dp_output_format(struct intel_connector *connector,
1021 		       enum intel_output_format sink_format)
1022 {
1023 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1024 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1025 	enum intel_output_format force_dsc_output_format =
1026 		intel_dp->force_dsc_output_format;
1027 	enum intel_output_format output_format;
1028 	if (force_dsc_output_format) {
1029 		if (source_can_output(intel_dp, force_dsc_output_format) &&
1030 		    (!drm_dp_is_branch(intel_dp->dpcd) ||
1031 		     sink_format != force_dsc_output_format ||
1032 		     dfp_can_convert(intel_dp, force_dsc_output_format, sink_format)))
1033 			return force_dsc_output_format;
1034 
1035 		drm_dbg_kms(&i915->drm, "Cannot force DSC output format\n");
1036 	}
1037 
1038 	if (sink_format == INTEL_OUTPUT_FORMAT_RGB ||
1039 	    dfp_can_convert_from_rgb(intel_dp, sink_format))
1040 		output_format = INTEL_OUTPUT_FORMAT_RGB;
1041 
1042 	else if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444 ||
1043 		 dfp_can_convert_from_ycbcr444(intel_dp, sink_format))
1044 		output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
1045 
1046 	else
1047 		output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
1048 
1049 	drm_WARN_ON(&i915->drm, !source_can_output(intel_dp, output_format));
1050 
1051 	return output_format;
1052 }
1053 
1054 int intel_dp_min_bpp(enum intel_output_format output_format)
1055 {
1056 	if (output_format == INTEL_OUTPUT_FORMAT_RGB)
1057 		return 6 * 3;
1058 	else
1059 		return 8 * 3;
1060 }
1061 
1062 int intel_dp_output_bpp(enum intel_output_format output_format, int bpp)
1063 {
1064 	/*
1065 	 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
1066 	 * format of the number of bytes per pixel will be half the number
1067 	 * of bytes of RGB pixel.
1068 	 */
1069 	if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
1070 		bpp /= 2;
1071 
1072 	return bpp;
1073 }
1074 
1075 static enum intel_output_format
1076 intel_dp_sink_format(struct intel_connector *connector,
1077 		     const struct drm_display_mode *mode)
1078 {
1079 	const struct drm_display_info *info = &connector->base.display_info;
1080 
1081 	if (drm_mode_is_420_only(info, mode))
1082 		return INTEL_OUTPUT_FORMAT_YCBCR420;
1083 
1084 	return INTEL_OUTPUT_FORMAT_RGB;
1085 }
1086 
1087 static int
1088 intel_dp_mode_min_output_bpp(struct intel_connector *connector,
1089 			     const struct drm_display_mode *mode)
1090 {
1091 	enum intel_output_format output_format, sink_format;
1092 
1093 	sink_format = intel_dp_sink_format(connector, mode);
1094 
1095 	output_format = intel_dp_output_format(connector, sink_format);
1096 
1097 	return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format));
1098 }
1099 
1100 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
1101 				  int hdisplay)
1102 {
1103 	/*
1104 	 * Older platforms don't like hdisplay==4096 with DP.
1105 	 *
1106 	 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
1107 	 * and frame counter increment), but we don't get vblank interrupts,
1108 	 * and the pipe underruns immediately. The link also doesn't seem
1109 	 * to get trained properly.
1110 	 *
1111 	 * On CHV the vblank interrupts don't seem to disappear but
1112 	 * otherwise the symptoms are similar.
1113 	 *
1114 	 * TODO: confirm the behaviour on HSW+
1115 	 */
1116 	return hdisplay == 4096 && !HAS_DDI(dev_priv);
1117 }
1118 
1119 static int intel_dp_max_tmds_clock(struct intel_dp *intel_dp)
1120 {
1121 	struct intel_connector *connector = intel_dp->attached_connector;
1122 	const struct drm_display_info *info = &connector->base.display_info;
1123 	int max_tmds_clock = intel_dp->dfp.max_tmds_clock;
1124 
1125 	/* Only consider the sink's max TMDS clock if we know this is a HDMI DFP */
1126 	if (max_tmds_clock && info->max_tmds_clock)
1127 		max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock);
1128 
1129 	return max_tmds_clock;
1130 }
1131 
1132 static enum drm_mode_status
1133 intel_dp_tmds_clock_valid(struct intel_dp *intel_dp,
1134 			  int clock, int bpc,
1135 			  enum intel_output_format sink_format,
1136 			  bool respect_downstream_limits)
1137 {
1138 	int tmds_clock, min_tmds_clock, max_tmds_clock;
1139 
1140 	if (!respect_downstream_limits)
1141 		return MODE_OK;
1142 
1143 	tmds_clock = intel_hdmi_tmds_clock(clock, bpc, sink_format);
1144 
1145 	min_tmds_clock = intel_dp->dfp.min_tmds_clock;
1146 	max_tmds_clock = intel_dp_max_tmds_clock(intel_dp);
1147 
1148 	if (min_tmds_clock && tmds_clock < min_tmds_clock)
1149 		return MODE_CLOCK_LOW;
1150 
1151 	if (max_tmds_clock && tmds_clock > max_tmds_clock)
1152 		return MODE_CLOCK_HIGH;
1153 
1154 	return MODE_OK;
1155 }
1156 
1157 static enum drm_mode_status
1158 intel_dp_mode_valid_downstream(struct intel_connector *connector,
1159 			       const struct drm_display_mode *mode,
1160 			       int target_clock)
1161 {
1162 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1163 	const struct drm_display_info *info = &connector->base.display_info;
1164 	enum drm_mode_status status;
1165 	enum intel_output_format sink_format;
1166 
1167 	/* If PCON supports FRL MODE, check FRL bandwidth constraints */
1168 	if (intel_dp->dfp.pcon_max_frl_bw) {
1169 		int target_bw;
1170 		int max_frl_bw;
1171 		int bpp = intel_dp_mode_min_output_bpp(connector, mode);
1172 
1173 		target_bw = bpp * target_clock;
1174 
1175 		max_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
1176 
1177 		/* converting bw from Gbps to Kbps*/
1178 		max_frl_bw = max_frl_bw * 1000000;
1179 
1180 		if (target_bw > max_frl_bw)
1181 			return MODE_CLOCK_HIGH;
1182 
1183 		return MODE_OK;
1184 	}
1185 
1186 	if (intel_dp->dfp.max_dotclock &&
1187 	    target_clock > intel_dp->dfp.max_dotclock)
1188 		return MODE_CLOCK_HIGH;
1189 
1190 	sink_format = intel_dp_sink_format(connector, mode);
1191 
1192 	/* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */
1193 	status = intel_dp_tmds_clock_valid(intel_dp, target_clock,
1194 					   8, sink_format, true);
1195 
1196 	if (status != MODE_OK) {
1197 		if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
1198 		    !connector->base.ycbcr_420_allowed ||
1199 		    !drm_mode_is_420_also(info, mode))
1200 			return status;
1201 		sink_format = INTEL_OUTPUT_FORMAT_YCBCR420;
1202 		status = intel_dp_tmds_clock_valid(intel_dp, target_clock,
1203 						   8, sink_format, true);
1204 		if (status != MODE_OK)
1205 			return status;
1206 	}
1207 
1208 	return MODE_OK;
1209 }
1210 
1211 bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
1212 			     struct intel_connector *connector,
1213 			     int hdisplay, int clock)
1214 {
1215 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1216 
1217 	if (!intel_dp_has_bigjoiner(intel_dp))
1218 		return false;
1219 
1220 	return clock > i915->display.cdclk.max_dotclk_freq || hdisplay > 5120 ||
1221 	       connector->force_bigjoiner_enable;
1222 }
1223 
1224 static enum drm_mode_status
1225 intel_dp_mode_valid(struct drm_connector *_connector,
1226 		    struct drm_display_mode *mode)
1227 {
1228 	struct intel_connector *connector = to_intel_connector(_connector);
1229 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1230 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1231 	const struct drm_display_mode *fixed_mode;
1232 	int target_clock = mode->clock;
1233 	int max_rate, mode_rate, max_lanes, max_link_clock;
1234 	int max_dotclk = dev_priv->display.cdclk.max_dotclk_freq;
1235 	u16 dsc_max_compressed_bpp = 0;
1236 	u8 dsc_slice_count = 0;
1237 	enum drm_mode_status status;
1238 	bool dsc = false, bigjoiner = false;
1239 
1240 	status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
1241 	if (status != MODE_OK)
1242 		return status;
1243 
1244 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
1245 		return MODE_H_ILLEGAL;
1246 
1247 	if (mode->clock < 10000)
1248 		return MODE_CLOCK_LOW;
1249 
1250 	fixed_mode = intel_panel_fixed_mode(connector, mode);
1251 	if (intel_dp_is_edp(intel_dp) && fixed_mode) {
1252 		status = intel_panel_mode_valid(connector, mode);
1253 		if (status != MODE_OK)
1254 			return status;
1255 
1256 		target_clock = fixed_mode->clock;
1257 	}
1258 
1259 	if (intel_dp_need_bigjoiner(intel_dp, connector,
1260 				    mode->hdisplay, target_clock)) {
1261 		bigjoiner = true;
1262 		max_dotclk *= 2;
1263 	}
1264 	if (target_clock > max_dotclk)
1265 		return MODE_CLOCK_HIGH;
1266 
1267 	if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
1268 		return MODE_H_ILLEGAL;
1269 
1270 	max_link_clock = intel_dp_max_link_rate(intel_dp);
1271 	max_lanes = intel_dp_max_lane_count(intel_dp);
1272 
1273 	max_rate = intel_dp_max_link_data_rate(intel_dp, max_link_clock, max_lanes);
1274 
1275 	mode_rate = intel_dp_link_required(target_clock,
1276 					   intel_dp_mode_min_output_bpp(connector, mode));
1277 
1278 	if (HAS_DSC(dev_priv) &&
1279 	    drm_dp_sink_supports_dsc(connector->dp.dsc_dpcd)) {
1280 		enum intel_output_format sink_format, output_format;
1281 		int pipe_bpp;
1282 
1283 		sink_format = intel_dp_sink_format(connector, mode);
1284 		output_format = intel_dp_output_format(connector, sink_format);
1285 		/*
1286 		 * TBD pass the connector BPC,
1287 		 * for now U8_MAX so that max BPC on that platform would be picked
1288 		 */
1289 		pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX);
1290 
1291 		/*
1292 		 * Output bpp is stored in 6.4 format so right shift by 4 to get the
1293 		 * integer value since we support only integer values of bpp.
1294 		 */
1295 		if (intel_dp_is_edp(intel_dp)) {
1296 			dsc_max_compressed_bpp =
1297 				drm_edp_dsc_sink_output_bpp(connector->dp.dsc_dpcd) >> 4;
1298 			dsc_slice_count =
1299 				drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd,
1300 								true);
1301 		} else if (drm_dp_sink_supports_fec(connector->dp.fec_capability)) {
1302 			dsc_max_compressed_bpp =
1303 				intel_dp_dsc_get_max_compressed_bpp(dev_priv,
1304 								    max_link_clock,
1305 								    max_lanes,
1306 								    target_clock,
1307 								    mode->hdisplay,
1308 								    bigjoiner,
1309 								    output_format,
1310 								    pipe_bpp, 64);
1311 			dsc_slice_count =
1312 				intel_dp_dsc_get_slice_count(connector,
1313 							     target_clock,
1314 							     mode->hdisplay,
1315 							     bigjoiner);
1316 		}
1317 
1318 		dsc = dsc_max_compressed_bpp && dsc_slice_count;
1319 	}
1320 
1321 	if (intel_dp_joiner_needs_dsc(dev_priv, bigjoiner) && !dsc)
1322 		return MODE_CLOCK_HIGH;
1323 
1324 	if (mode_rate > max_rate && !dsc)
1325 		return MODE_CLOCK_HIGH;
1326 
1327 	status = intel_dp_mode_valid_downstream(connector, mode, target_clock);
1328 	if (status != MODE_OK)
1329 		return status;
1330 
1331 	return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner);
1332 }
1333 
1334 bool intel_dp_source_supports_tps3(struct drm_i915_private *i915)
1335 {
1336 	return DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915) || IS_HASWELL(i915);
1337 }
1338 
1339 bool intel_dp_source_supports_tps4(struct drm_i915_private *i915)
1340 {
1341 	return DISPLAY_VER(i915) >= 10;
1342 }
1343 
1344 static void snprintf_int_array(char *str, size_t len,
1345 			       const int *array, int nelem)
1346 {
1347 	int i;
1348 
1349 	str[0] = '\0';
1350 
1351 	for (i = 0; i < nelem; i++) {
1352 		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1353 		if (r >= len)
1354 			return;
1355 		str += r;
1356 		len -= r;
1357 	}
1358 }
1359 
1360 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1361 {
1362 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1363 	char str[128]; /* FIXME: too big for stack? */
1364 
1365 	if (!drm_debug_enabled(DRM_UT_KMS))
1366 		return;
1367 
1368 	snprintf_int_array(str, sizeof(str),
1369 			   intel_dp->source_rates, intel_dp->num_source_rates);
1370 	drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
1371 
1372 	snprintf_int_array(str, sizeof(str),
1373 			   intel_dp->sink_rates, intel_dp->num_sink_rates);
1374 	drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
1375 
1376 	snprintf_int_array(str, sizeof(str),
1377 			   intel_dp->common_rates, intel_dp->num_common_rates);
1378 	drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
1379 }
1380 
1381 int
1382 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1383 {
1384 	int len;
1385 
1386 	len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1387 
1388 	return intel_dp_common_rate(intel_dp, len - 1);
1389 }
1390 
1391 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1392 {
1393 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1394 	int i = intel_dp_rate_index(intel_dp->sink_rates,
1395 				    intel_dp->num_sink_rates, rate);
1396 
1397 	if (drm_WARN_ON(&i915->drm, i < 0))
1398 		i = 0;
1399 
1400 	return i;
1401 }
1402 
1403 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1404 			   u8 *link_bw, u8 *rate_select)
1405 {
1406 	/* eDP 1.4 rate select method. */
1407 	if (intel_dp->use_rate_select) {
1408 		*link_bw = 0;
1409 		*rate_select =
1410 			intel_dp_rate_select(intel_dp, port_clock);
1411 	} else {
1412 		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1413 		*rate_select = 0;
1414 	}
1415 }
1416 
1417 bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp)
1418 {
1419 	struct intel_connector *connector = intel_dp->attached_connector;
1420 
1421 	return connector->base.display_info.is_hdmi;
1422 }
1423 
1424 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1425 					 const struct intel_crtc_state *pipe_config)
1426 {
1427 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1428 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1429 
1430 	if (DISPLAY_VER(dev_priv) >= 12)
1431 		return true;
1432 
1433 	if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A &&
1434 	    !intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
1435 		return true;
1436 
1437 	return false;
1438 }
1439 
1440 bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1441 			   const struct intel_connector *connector,
1442 			   const struct intel_crtc_state *pipe_config)
1443 {
1444 	return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1445 		drm_dp_sink_supports_fec(connector->dp.fec_capability);
1446 }
1447 
1448 static bool intel_dp_supports_dsc(const struct intel_connector *connector,
1449 				  const struct intel_crtc_state *crtc_state)
1450 {
1451 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable)
1452 		return false;
1453 
1454 	return intel_dsc_source_support(crtc_state) &&
1455 		connector->dp.dsc_decompression_aux &&
1456 		drm_dp_sink_supports_dsc(connector->dp.dsc_dpcd);
1457 }
1458 
1459 static int intel_dp_hdmi_compute_bpc(struct intel_dp *intel_dp,
1460 				     const struct intel_crtc_state *crtc_state,
1461 				     int bpc, bool respect_downstream_limits)
1462 {
1463 	int clock = crtc_state->hw.adjusted_mode.crtc_clock;
1464 
1465 	/*
1466 	 * Current bpc could already be below 8bpc due to
1467 	 * FDI bandwidth constraints or other limits.
1468 	 * HDMI minimum is 8bpc however.
1469 	 */
1470 	bpc = max(bpc, 8);
1471 
1472 	/*
1473 	 * We will never exceed downstream TMDS clock limits while
1474 	 * attempting deep color. If the user insists on forcing an
1475 	 * out of spec mode they will have to be satisfied with 8bpc.
1476 	 */
1477 	if (!respect_downstream_limits)
1478 		bpc = 8;
1479 
1480 	for (; bpc >= 8; bpc -= 2) {
1481 		if (intel_hdmi_bpc_possible(crtc_state, bpc,
1482 					    intel_dp_has_hdmi_sink(intel_dp)) &&
1483 		    intel_dp_tmds_clock_valid(intel_dp, clock, bpc, crtc_state->sink_format,
1484 					      respect_downstream_limits) == MODE_OK)
1485 			return bpc;
1486 	}
1487 
1488 	return -EINVAL;
1489 }
1490 
1491 static int intel_dp_max_bpp(struct intel_dp *intel_dp,
1492 			    const struct intel_crtc_state *crtc_state,
1493 			    bool respect_downstream_limits)
1494 {
1495 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1496 	struct intel_connector *intel_connector = intel_dp->attached_connector;
1497 	int bpp, bpc;
1498 
1499 	bpc = crtc_state->pipe_bpp / 3;
1500 
1501 	if (intel_dp->dfp.max_bpc)
1502 		bpc = min_t(int, bpc, intel_dp->dfp.max_bpc);
1503 
1504 	if (intel_dp->dfp.min_tmds_clock) {
1505 		int max_hdmi_bpc;
1506 
1507 		max_hdmi_bpc = intel_dp_hdmi_compute_bpc(intel_dp, crtc_state, bpc,
1508 							 respect_downstream_limits);
1509 		if (max_hdmi_bpc < 0)
1510 			return 0;
1511 
1512 		bpc = min(bpc, max_hdmi_bpc);
1513 	}
1514 
1515 	bpp = bpc * 3;
1516 	if (intel_dp_is_edp(intel_dp)) {
1517 		/* Get bpp from vbt only for panels that dont have bpp in edid */
1518 		if (intel_connector->base.display_info.bpc == 0 &&
1519 		    intel_connector->panel.vbt.edp.bpp &&
1520 		    intel_connector->panel.vbt.edp.bpp < bpp) {
1521 			drm_dbg_kms(&dev_priv->drm,
1522 				    "clamping bpp for eDP panel to BIOS-provided %i\n",
1523 				    intel_connector->panel.vbt.edp.bpp);
1524 			bpp = intel_connector->panel.vbt.edp.bpp;
1525 		}
1526 	}
1527 
1528 	return bpp;
1529 }
1530 
1531 /* Adjust link config limits based on compliance test requests. */
1532 void
1533 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1534 				  struct intel_crtc_state *pipe_config,
1535 				  struct link_config_limits *limits)
1536 {
1537 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1538 
1539 	/* For DP Compliance we override the computed bpp for the pipe */
1540 	if (intel_dp->compliance.test_data.bpc != 0) {
1541 		int bpp = 3 * intel_dp->compliance.test_data.bpc;
1542 
1543 		limits->pipe.min_bpp = limits->pipe.max_bpp = bpp;
1544 		pipe_config->dither_force_disable = bpp == 6 * 3;
1545 
1546 		drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp);
1547 	}
1548 
1549 	/* Use values requested by Compliance Test Request */
1550 	if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1551 		int index;
1552 
1553 		/* Validate the compliance test data since max values
1554 		 * might have changed due to link train fallback.
1555 		 */
1556 		if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1557 					       intel_dp->compliance.test_lane_count)) {
1558 			index = intel_dp_rate_index(intel_dp->common_rates,
1559 						    intel_dp->num_common_rates,
1560 						    intel_dp->compliance.test_link_rate);
1561 			if (index >= 0)
1562 				limits->min_rate = limits->max_rate =
1563 					intel_dp->compliance.test_link_rate;
1564 			limits->min_lane_count = limits->max_lane_count =
1565 				intel_dp->compliance.test_lane_count;
1566 		}
1567 	}
1568 }
1569 
1570 static bool has_seamless_m_n(struct intel_connector *connector)
1571 {
1572 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1573 
1574 	/*
1575 	 * Seamless M/N reprogramming only implemented
1576 	 * for BDW+ double buffered M/N registers so far.
1577 	 */
1578 	return HAS_DOUBLE_BUFFERED_M_N(i915) &&
1579 		intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS;
1580 }
1581 
1582 static int intel_dp_mode_clock(const struct intel_crtc_state *crtc_state,
1583 			       const struct drm_connector_state *conn_state)
1584 {
1585 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
1586 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1587 
1588 	/* FIXME a bit of a mess wrt clock vs. crtc_clock */
1589 	if (has_seamless_m_n(connector))
1590 		return intel_panel_highest_mode(connector, adjusted_mode)->clock;
1591 	else
1592 		return adjusted_mode->crtc_clock;
1593 }
1594 
1595 /* Optimize link config in order: max bpp, min clock, min lanes */
1596 static int
1597 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1598 				  struct intel_crtc_state *pipe_config,
1599 				  const struct drm_connector_state *conn_state,
1600 				  const struct link_config_limits *limits)
1601 {
1602 	int bpp, i, lane_count, clock = intel_dp_mode_clock(pipe_config, conn_state);
1603 	int mode_rate, link_rate, link_avail;
1604 
1605 	for (bpp = to_bpp_int(limits->link.max_bpp_x16);
1606 	     bpp >= to_bpp_int(limits->link.min_bpp_x16);
1607 	     bpp -= 2 * 3) {
1608 		int link_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
1609 
1610 		mode_rate = intel_dp_link_required(clock, link_bpp);
1611 
1612 		for (i = 0; i < intel_dp->num_common_rates; i++) {
1613 			link_rate = intel_dp_common_rate(intel_dp, i);
1614 			if (link_rate < limits->min_rate ||
1615 			    link_rate > limits->max_rate)
1616 				continue;
1617 
1618 			for (lane_count = limits->min_lane_count;
1619 			     lane_count <= limits->max_lane_count;
1620 			     lane_count <<= 1) {
1621 				link_avail = intel_dp_max_link_data_rate(intel_dp,
1622 									 link_rate,
1623 									 lane_count);
1624 
1625 
1626 				if (mode_rate <= link_avail) {
1627 					pipe_config->lane_count = lane_count;
1628 					pipe_config->pipe_bpp = bpp;
1629 					pipe_config->port_clock = link_rate;
1630 
1631 					return 0;
1632 				}
1633 			}
1634 		}
1635 	}
1636 
1637 	return -EINVAL;
1638 }
1639 
1640 static
1641 u8 intel_dp_dsc_max_src_input_bpc(struct drm_i915_private *i915)
1642 {
1643 	/* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
1644 	if (DISPLAY_VER(i915) >= 12)
1645 		return 12;
1646 	if (DISPLAY_VER(i915) == 11)
1647 		return 10;
1648 
1649 	return 0;
1650 }
1651 
1652 int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
1653 				 u8 max_req_bpc)
1654 {
1655 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1656 	int i, num_bpc;
1657 	u8 dsc_bpc[3] = {};
1658 	u8 dsc_max_bpc;
1659 
1660 	dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915);
1661 
1662 	if (!dsc_max_bpc)
1663 		return dsc_max_bpc;
1664 
1665 	dsc_max_bpc = min_t(u8, dsc_max_bpc, max_req_bpc);
1666 
1667 	num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd,
1668 						       dsc_bpc);
1669 	for (i = 0; i < num_bpc; i++) {
1670 		if (dsc_max_bpc >= dsc_bpc[i])
1671 			return dsc_bpc[i] * 3;
1672 	}
1673 
1674 	return 0;
1675 }
1676 
1677 static int intel_dp_source_dsc_version_minor(struct drm_i915_private *i915)
1678 {
1679 	return DISPLAY_VER(i915) >= 14 ? 2 : 1;
1680 }
1681 
1682 static int intel_dp_sink_dsc_version_minor(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
1683 {
1684 	return (dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & DP_DSC_MINOR_MASK) >>
1685 		DP_DSC_MINOR_SHIFT;
1686 }
1687 
1688 static int intel_dp_get_slice_height(int vactive)
1689 {
1690 	int slice_height;
1691 
1692 	/*
1693 	 * VDSC 1.2a spec in Section 3.8 Options for Slices implies that 108
1694 	 * lines is an optimal slice height, but any size can be used as long as
1695 	 * vertical active integer multiple and maximum vertical slice count
1696 	 * requirements are met.
1697 	 */
1698 	for (slice_height = 108; slice_height <= vactive; slice_height += 2)
1699 		if (vactive % slice_height == 0)
1700 			return slice_height;
1701 
1702 	/*
1703 	 * Highly unlikely we reach here as most of the resolutions will end up
1704 	 * finding appropriate slice_height in above loop but returning
1705 	 * slice_height as 2 here as it should work with all resolutions.
1706 	 */
1707 	return 2;
1708 }
1709 
1710 static int intel_dp_dsc_compute_params(const struct intel_connector *connector,
1711 				       struct intel_crtc_state *crtc_state)
1712 {
1713 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1714 	struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1715 	int ret;
1716 
1717 	/*
1718 	 * RC_MODEL_SIZE is currently a constant across all configurations.
1719 	 *
1720 	 * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and
1721 	 * DP_DSC_RC_BUF_SIZE for this.
1722 	 */
1723 	vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
1724 	vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1725 
1726 	vdsc_cfg->slice_height = intel_dp_get_slice_height(vdsc_cfg->pic_height);
1727 
1728 	ret = intel_dsc_compute_params(crtc_state);
1729 	if (ret)
1730 		return ret;
1731 
1732 	vdsc_cfg->dsc_version_major =
1733 		(connector->dp.dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
1734 		 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
1735 	vdsc_cfg->dsc_version_minor =
1736 		min(intel_dp_source_dsc_version_minor(i915),
1737 		    intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd));
1738 	if (vdsc_cfg->convert_rgb)
1739 		vdsc_cfg->convert_rgb =
1740 			connector->dp.dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
1741 			DP_DSC_RGB;
1742 
1743 	vdsc_cfg->line_buf_depth = min(INTEL_DP_DSC_MAX_LINE_BUF_DEPTH,
1744 				       drm_dp_dsc_sink_line_buf_depth(connector->dp.dsc_dpcd));
1745 	if (!vdsc_cfg->line_buf_depth) {
1746 		drm_dbg_kms(&i915->drm,
1747 			    "DSC Sink Line Buffer Depth invalid\n");
1748 		return -EINVAL;
1749 	}
1750 
1751 	vdsc_cfg->block_pred_enable =
1752 		connector->dp.dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
1753 		DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
1754 
1755 	return drm_dsc_compute_rc_parameters(vdsc_cfg);
1756 }
1757 
1758 static bool intel_dp_dsc_supports_format(const struct intel_connector *connector,
1759 					 enum intel_output_format output_format)
1760 {
1761 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1762 	u8 sink_dsc_format;
1763 
1764 	switch (output_format) {
1765 	case INTEL_OUTPUT_FORMAT_RGB:
1766 		sink_dsc_format = DP_DSC_RGB;
1767 		break;
1768 	case INTEL_OUTPUT_FORMAT_YCBCR444:
1769 		sink_dsc_format = DP_DSC_YCbCr444;
1770 		break;
1771 	case INTEL_OUTPUT_FORMAT_YCBCR420:
1772 		if (min(intel_dp_source_dsc_version_minor(i915),
1773 			intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd)) < 2)
1774 			return false;
1775 		sink_dsc_format = DP_DSC_YCbCr420_Native;
1776 		break;
1777 	default:
1778 		return false;
1779 	}
1780 
1781 	return drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd, sink_dsc_format);
1782 }
1783 
1784 static bool is_bw_sufficient_for_dsc_config(u16 compressed_bppx16, u32 link_clock,
1785 					    u32 lane_count, u32 mode_clock,
1786 					    enum intel_output_format output_format,
1787 					    int timeslots)
1788 {
1789 	u32 available_bw, required_bw;
1790 
1791 	available_bw = (link_clock * lane_count * timeslots * 16)  / 8;
1792 	required_bw = compressed_bppx16 * (intel_dp_mode_to_fec_clock(mode_clock));
1793 
1794 	return available_bw > required_bw;
1795 }
1796 
1797 static int dsc_compute_link_config(struct intel_dp *intel_dp,
1798 				   struct intel_crtc_state *pipe_config,
1799 				   struct link_config_limits *limits,
1800 				   u16 compressed_bppx16,
1801 				   int timeslots)
1802 {
1803 	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
1804 	int link_rate, lane_count;
1805 	int i;
1806 
1807 	for (i = 0; i < intel_dp->num_common_rates; i++) {
1808 		link_rate = intel_dp_common_rate(intel_dp, i);
1809 		if (link_rate < limits->min_rate || link_rate > limits->max_rate)
1810 			continue;
1811 
1812 		for (lane_count = limits->min_lane_count;
1813 		     lane_count <= limits->max_lane_count;
1814 		     lane_count <<= 1) {
1815 			if (!is_bw_sufficient_for_dsc_config(compressed_bppx16, link_rate,
1816 							     lane_count, adjusted_mode->clock,
1817 							     pipe_config->output_format,
1818 							     timeslots))
1819 				continue;
1820 
1821 			pipe_config->lane_count = lane_count;
1822 			pipe_config->port_clock = link_rate;
1823 
1824 			return 0;
1825 		}
1826 	}
1827 
1828 	return -EINVAL;
1829 }
1830 
1831 static
1832 u16 intel_dp_dsc_max_sink_compressed_bppx16(const struct intel_connector *connector,
1833 					    struct intel_crtc_state *pipe_config,
1834 					    int bpc)
1835 {
1836 	u16 max_bppx16 = drm_edp_dsc_sink_output_bpp(connector->dp.dsc_dpcd);
1837 
1838 	if (max_bppx16)
1839 		return max_bppx16;
1840 	/*
1841 	 * If support not given in DPCD 67h, 68h use the Maximum Allowed bit rate
1842 	 * values as given in spec Table 2-157 DP v2.0
1843 	 */
1844 	switch (pipe_config->output_format) {
1845 	case INTEL_OUTPUT_FORMAT_RGB:
1846 	case INTEL_OUTPUT_FORMAT_YCBCR444:
1847 		return (3 * bpc) << 4;
1848 	case INTEL_OUTPUT_FORMAT_YCBCR420:
1849 		return (3 * (bpc / 2)) << 4;
1850 	default:
1851 		MISSING_CASE(pipe_config->output_format);
1852 		break;
1853 	}
1854 
1855 	return 0;
1856 }
1857 
1858 int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config)
1859 {
1860 	/* From Mandatory bit rate range Support Table 2-157 (DP v2.0) */
1861 	switch (pipe_config->output_format) {
1862 	case INTEL_OUTPUT_FORMAT_RGB:
1863 	case INTEL_OUTPUT_FORMAT_YCBCR444:
1864 		return 8;
1865 	case INTEL_OUTPUT_FORMAT_YCBCR420:
1866 		return 6;
1867 	default:
1868 		MISSING_CASE(pipe_config->output_format);
1869 		break;
1870 	}
1871 
1872 	return 0;
1873 }
1874 
1875 int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
1876 					 struct intel_crtc_state *pipe_config,
1877 					 int bpc)
1878 {
1879 	return intel_dp_dsc_max_sink_compressed_bppx16(connector,
1880 						       pipe_config, bpc) >> 4;
1881 }
1882 
1883 static int dsc_src_min_compressed_bpp(void)
1884 {
1885 	/* Min Compressed bpp supported by source is 8 */
1886 	return 8;
1887 }
1888 
1889 static int dsc_src_max_compressed_bpp(struct intel_dp *intel_dp)
1890 {
1891 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1892 
1893 	/*
1894 	 * Max Compressed bpp for Gen 13+ is 27bpp.
1895 	 * For earlier platform is 23bpp. (Bspec:49259).
1896 	 */
1897 	if (DISPLAY_VER(i915) < 13)
1898 		return 23;
1899 	else
1900 		return 27;
1901 }
1902 
1903 /*
1904  * From a list of valid compressed bpps try different compressed bpp and find a
1905  * suitable link configuration that can support it.
1906  */
1907 static int
1908 icl_dsc_compute_link_config(struct intel_dp *intel_dp,
1909 			    struct intel_crtc_state *pipe_config,
1910 			    struct link_config_limits *limits,
1911 			    int dsc_max_bpp,
1912 			    int dsc_min_bpp,
1913 			    int pipe_bpp,
1914 			    int timeslots)
1915 {
1916 	int i, ret;
1917 
1918 	/* Compressed BPP should be less than the Input DSC bpp */
1919 	dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
1920 
1921 	for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
1922 		if (valid_dsc_bpp[i] < dsc_min_bpp)
1923 			continue;
1924 		if (valid_dsc_bpp[i] > dsc_max_bpp)
1925 			break;
1926 
1927 		ret = dsc_compute_link_config(intel_dp,
1928 					      pipe_config,
1929 					      limits,
1930 					      valid_dsc_bpp[i] << 4,
1931 					      timeslots);
1932 		if (ret == 0) {
1933 			pipe_config->dsc.compressed_bpp_x16 =
1934 				to_bpp_x16(valid_dsc_bpp[i]);
1935 			return 0;
1936 		}
1937 	}
1938 
1939 	return -EINVAL;
1940 }
1941 
1942 /*
1943  * From XE_LPD onwards we supports compression bpps in steps of 1 up to
1944  * uncompressed bpp-1. So we start from max compressed bpp and see if any
1945  * link configuration is able to support that compressed bpp, if not we
1946  * step down and check for lower compressed bpp.
1947  */
1948 static int
1949 xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
1950 			      const struct intel_connector *connector,
1951 			      struct intel_crtc_state *pipe_config,
1952 			      struct link_config_limits *limits,
1953 			      int dsc_max_bpp,
1954 			      int dsc_min_bpp,
1955 			      int pipe_bpp,
1956 			      int timeslots)
1957 {
1958 	u8 bppx16_incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd);
1959 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1960 	u16 compressed_bppx16;
1961 	u8 bppx16_step;
1962 	int ret;
1963 
1964 	if (DISPLAY_VER(i915) < 14 || bppx16_incr <= 1)
1965 		bppx16_step = 16;
1966 	else
1967 		bppx16_step = 16 / bppx16_incr;
1968 
1969 	/* Compressed BPP should be less than the Input DSC bpp */
1970 	dsc_max_bpp = min(dsc_max_bpp << 4, (pipe_bpp << 4) - bppx16_step);
1971 	dsc_min_bpp = dsc_min_bpp << 4;
1972 
1973 	for (compressed_bppx16 = dsc_max_bpp;
1974 	     compressed_bppx16 >= dsc_min_bpp;
1975 	     compressed_bppx16 -= bppx16_step) {
1976 		if (intel_dp->force_dsc_fractional_bpp_en &&
1977 		    !to_bpp_frac(compressed_bppx16))
1978 			continue;
1979 		ret = dsc_compute_link_config(intel_dp,
1980 					      pipe_config,
1981 					      limits,
1982 					      compressed_bppx16,
1983 					      timeslots);
1984 		if (ret == 0) {
1985 			pipe_config->dsc.compressed_bpp_x16 = compressed_bppx16;
1986 			if (intel_dp->force_dsc_fractional_bpp_en &&
1987 			    to_bpp_frac(compressed_bppx16))
1988 				drm_dbg_kms(&i915->drm, "Forcing DSC fractional bpp\n");
1989 
1990 			return 0;
1991 		}
1992 	}
1993 	return -EINVAL;
1994 }
1995 
1996 static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
1997 				      const struct intel_connector *connector,
1998 				      struct intel_crtc_state *pipe_config,
1999 				      struct link_config_limits *limits,
2000 				      int pipe_bpp,
2001 				      int timeslots)
2002 {
2003 	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2004 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2005 	int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
2006 	int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
2007 	int dsc_joiner_max_bpp;
2008 
2009 	dsc_src_min_bpp = dsc_src_min_compressed_bpp();
2010 	dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
2011 	dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
2012 	dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
2013 
2014 	dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
2015 	dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
2016 								pipe_config,
2017 								pipe_bpp / 3);
2018 	dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
2019 
2020 	dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, adjusted_mode->clock,
2021 								adjusted_mode->hdisplay,
2022 								pipe_config->bigjoiner_pipes);
2023 	dsc_max_bpp = min(dsc_max_bpp, dsc_joiner_max_bpp);
2024 	dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16));
2025 
2026 	if (DISPLAY_VER(i915) >= 13)
2027 		return xelpd_dsc_compute_link_config(intel_dp, connector, pipe_config, limits,
2028 						     dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
2029 	return icl_dsc_compute_link_config(intel_dp, pipe_config, limits,
2030 					   dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
2031 }
2032 
2033 static
2034 u8 intel_dp_dsc_min_src_input_bpc(struct drm_i915_private *i915)
2035 {
2036 	/* Min DSC Input BPC for ICL+ is 8 */
2037 	return HAS_DSC(i915) ? 8 : 0;
2038 }
2039 
2040 static
2041 bool is_dsc_pipe_bpp_sufficient(struct drm_i915_private *i915,
2042 				struct drm_connector_state *conn_state,
2043 				struct link_config_limits *limits,
2044 				int pipe_bpp)
2045 {
2046 	u8 dsc_max_bpc, dsc_min_bpc, dsc_max_pipe_bpp, dsc_min_pipe_bpp;
2047 
2048 	dsc_max_bpc = min(intel_dp_dsc_max_src_input_bpc(i915), conn_state->max_requested_bpc);
2049 	dsc_min_bpc = intel_dp_dsc_min_src_input_bpc(i915);
2050 
2051 	dsc_max_pipe_bpp = min(dsc_max_bpc * 3, limits->pipe.max_bpp);
2052 	dsc_min_pipe_bpp = max(dsc_min_bpc * 3, limits->pipe.min_bpp);
2053 
2054 	return pipe_bpp >= dsc_min_pipe_bpp &&
2055 	       pipe_bpp <= dsc_max_pipe_bpp;
2056 }
2057 
2058 static
2059 int intel_dp_force_dsc_pipe_bpp(struct intel_dp *intel_dp,
2060 				struct drm_connector_state *conn_state,
2061 				struct link_config_limits *limits)
2062 {
2063 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2064 	int forced_bpp;
2065 
2066 	if (!intel_dp->force_dsc_bpc)
2067 		return 0;
2068 
2069 	forced_bpp = intel_dp->force_dsc_bpc * 3;
2070 
2071 	if (is_dsc_pipe_bpp_sufficient(i915, conn_state, limits, forced_bpp)) {
2072 		drm_dbg_kms(&i915->drm, "Input DSC BPC forced to %d\n", intel_dp->force_dsc_bpc);
2073 		return forced_bpp;
2074 	}
2075 
2076 	drm_dbg_kms(&i915->drm, "Cannot force DSC BPC:%d, due to DSC BPC limits\n",
2077 		    intel_dp->force_dsc_bpc);
2078 
2079 	return 0;
2080 }
2081 
2082 static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
2083 					 struct intel_crtc_state *pipe_config,
2084 					 struct drm_connector_state *conn_state,
2085 					 struct link_config_limits *limits,
2086 					 int timeslots)
2087 {
2088 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2089 	const struct intel_connector *connector =
2090 		to_intel_connector(conn_state->connector);
2091 	u8 max_req_bpc = conn_state->max_requested_bpc;
2092 	u8 dsc_max_bpc, dsc_max_bpp;
2093 	u8 dsc_min_bpc, dsc_min_bpp;
2094 	u8 dsc_bpc[3] = {};
2095 	int forced_bpp, pipe_bpp;
2096 	int num_bpc, i, ret;
2097 
2098 	forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, conn_state, limits);
2099 
2100 	if (forced_bpp) {
2101 		ret = dsc_compute_compressed_bpp(intel_dp, connector, pipe_config,
2102 						 limits, forced_bpp, timeslots);
2103 		if (ret == 0) {
2104 			pipe_config->pipe_bpp = forced_bpp;
2105 			return 0;
2106 		}
2107 	}
2108 
2109 	dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915);
2110 	if (!dsc_max_bpc)
2111 		return -EINVAL;
2112 
2113 	dsc_max_bpc = min_t(u8, dsc_max_bpc, max_req_bpc);
2114 	dsc_max_bpp = min(dsc_max_bpc * 3, limits->pipe.max_bpp);
2115 
2116 	dsc_min_bpc = intel_dp_dsc_min_src_input_bpc(i915);
2117 	dsc_min_bpp = max(dsc_min_bpc * 3, limits->pipe.min_bpp);
2118 
2119 	/*
2120 	 * Get the maximum DSC bpc that will be supported by any valid
2121 	 * link configuration and compressed bpp.
2122 	 */
2123 	num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd, dsc_bpc);
2124 	for (i = 0; i < num_bpc; i++) {
2125 		pipe_bpp = dsc_bpc[i] * 3;
2126 		if (pipe_bpp < dsc_min_bpp)
2127 			break;
2128 		if (pipe_bpp > dsc_max_bpp)
2129 			continue;
2130 		ret = dsc_compute_compressed_bpp(intel_dp, connector, pipe_config,
2131 						 limits, pipe_bpp, timeslots);
2132 		if (ret == 0) {
2133 			pipe_config->pipe_bpp = pipe_bpp;
2134 			return 0;
2135 		}
2136 	}
2137 
2138 	return -EINVAL;
2139 }
2140 
2141 static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
2142 					  struct intel_crtc_state *pipe_config,
2143 					  struct drm_connector_state *conn_state,
2144 					  struct link_config_limits *limits)
2145 {
2146 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2147 	struct intel_connector *connector =
2148 		to_intel_connector(conn_state->connector);
2149 	int pipe_bpp, forced_bpp;
2150 	int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
2151 	int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
2152 
2153 	forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, conn_state, limits);
2154 
2155 	if (forced_bpp) {
2156 		pipe_bpp = forced_bpp;
2157 	} else {
2158 		int max_bpc = min(limits->pipe.max_bpp / 3, (int)conn_state->max_requested_bpc);
2159 
2160 		/* For eDP use max bpp that can be supported with DSC. */
2161 		pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, max_bpc);
2162 		if (!is_dsc_pipe_bpp_sufficient(i915, conn_state, limits, pipe_bpp)) {
2163 			drm_dbg_kms(&i915->drm,
2164 				    "Computed BPC is not in DSC BPC limits\n");
2165 			return -EINVAL;
2166 		}
2167 	}
2168 	pipe_config->port_clock = limits->max_rate;
2169 	pipe_config->lane_count = limits->max_lane_count;
2170 
2171 	dsc_src_min_bpp = dsc_src_min_compressed_bpp();
2172 	dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
2173 	dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
2174 	dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
2175 
2176 	dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
2177 	dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
2178 								pipe_config,
2179 								pipe_bpp / 3);
2180 	dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
2181 	dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16));
2182 
2183 	/* Compressed BPP should be less than the Input DSC bpp */
2184 	dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
2185 
2186 	pipe_config->dsc.compressed_bpp_x16 =
2187 		to_bpp_x16(max(dsc_min_bpp, dsc_max_bpp));
2188 
2189 	pipe_config->pipe_bpp = pipe_bpp;
2190 
2191 	return 0;
2192 }
2193 
2194 int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
2195 				struct intel_crtc_state *pipe_config,
2196 				struct drm_connector_state *conn_state,
2197 				struct link_config_limits *limits,
2198 				int timeslots,
2199 				bool compute_pipe_bpp)
2200 {
2201 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2202 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2203 	const struct intel_connector *connector =
2204 		to_intel_connector(conn_state->connector);
2205 	const struct drm_display_mode *adjusted_mode =
2206 		&pipe_config->hw.adjusted_mode;
2207 	int ret;
2208 
2209 	pipe_config->fec_enable = pipe_config->fec_enable ||
2210 		(!intel_dp_is_edp(intel_dp) &&
2211 		 intel_dp_supports_fec(intel_dp, connector, pipe_config));
2212 
2213 	if (!intel_dp_supports_dsc(connector, pipe_config))
2214 		return -EINVAL;
2215 
2216 	if (!intel_dp_dsc_supports_format(connector, pipe_config->output_format))
2217 		return -EINVAL;
2218 
2219 	/*
2220 	 * compute pipe bpp is set to false for DP MST DSC case
2221 	 * and compressed_bpp is calculated same time once
2222 	 * vpci timeslots are allocated, because overall bpp
2223 	 * calculation procedure is bit different for MST case.
2224 	 */
2225 	if (compute_pipe_bpp) {
2226 		if (intel_dp_is_edp(intel_dp))
2227 			ret = intel_edp_dsc_compute_pipe_bpp(intel_dp, pipe_config,
2228 							     conn_state, limits);
2229 		else
2230 			ret = intel_dp_dsc_compute_pipe_bpp(intel_dp, pipe_config,
2231 							    conn_state, limits, timeslots);
2232 		if (ret) {
2233 			drm_dbg_kms(&dev_priv->drm,
2234 				    "No Valid pipe bpp for given mode ret = %d\n", ret);
2235 			return ret;
2236 		}
2237 	}
2238 
2239 	/* Calculate Slice count */
2240 	if (intel_dp_is_edp(intel_dp)) {
2241 		pipe_config->dsc.slice_count =
2242 			drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd,
2243 							true);
2244 		if (!pipe_config->dsc.slice_count) {
2245 			drm_dbg_kms(&dev_priv->drm, "Unsupported Slice Count %d\n",
2246 				    pipe_config->dsc.slice_count);
2247 			return -EINVAL;
2248 		}
2249 	} else {
2250 		u8 dsc_dp_slice_count;
2251 
2252 		dsc_dp_slice_count =
2253 			intel_dp_dsc_get_slice_count(connector,
2254 						     adjusted_mode->crtc_clock,
2255 						     adjusted_mode->crtc_hdisplay,
2256 						     pipe_config->bigjoiner_pipes);
2257 		if (!dsc_dp_slice_count) {
2258 			drm_dbg_kms(&dev_priv->drm,
2259 				    "Compressed Slice Count not supported\n");
2260 			return -EINVAL;
2261 		}
2262 
2263 		pipe_config->dsc.slice_count = dsc_dp_slice_count;
2264 	}
2265 	/*
2266 	 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
2267 	 * is greater than the maximum Cdclock and if slice count is even
2268 	 * then we need to use 2 VDSC instances.
2269 	 */
2270 	if (pipe_config->bigjoiner_pipes || pipe_config->dsc.slice_count > 1)
2271 		pipe_config->dsc.dsc_split = true;
2272 
2273 	ret = intel_dp_dsc_compute_params(connector, pipe_config);
2274 	if (ret < 0) {
2275 		drm_dbg_kms(&dev_priv->drm,
2276 			    "Cannot compute valid DSC parameters for Input Bpp = %d"
2277 			    "Compressed BPP = " BPP_X16_FMT "\n",
2278 			    pipe_config->pipe_bpp,
2279 			    BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16));
2280 		return ret;
2281 	}
2282 
2283 	pipe_config->dsc.compression_enable = true;
2284 	drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
2285 		    "Compressed Bpp = " BPP_X16_FMT " Slice Count = %d\n",
2286 		    pipe_config->pipe_bpp,
2287 		    BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16),
2288 		    pipe_config->dsc.slice_count);
2289 
2290 	return 0;
2291 }
2292 
2293 /**
2294  * intel_dp_compute_config_link_bpp_limits - compute output link bpp limits
2295  * @intel_dp: intel DP
2296  * @crtc_state: crtc state
2297  * @dsc: DSC compression mode
2298  * @limits: link configuration limits
2299  *
2300  * Calculates the output link min, max bpp values in @limits based on the
2301  * pipe bpp range, @crtc_state and @dsc mode.
2302  *
2303  * Returns %true in case of success.
2304  */
2305 bool
2306 intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
2307 					const struct intel_crtc_state *crtc_state,
2308 					bool dsc,
2309 					struct link_config_limits *limits)
2310 {
2311 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2312 	const struct drm_display_mode *adjusted_mode =
2313 		&crtc_state->hw.adjusted_mode;
2314 	const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2315 	const struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
2316 	int max_link_bpp_x16;
2317 
2318 	max_link_bpp_x16 = min(crtc_state->max_link_bpp_x16,
2319 			       to_bpp_x16(limits->pipe.max_bpp));
2320 
2321 	if (!dsc) {
2322 		max_link_bpp_x16 = rounddown(max_link_bpp_x16, to_bpp_x16(2 * 3));
2323 
2324 		if (max_link_bpp_x16 < to_bpp_x16(limits->pipe.min_bpp))
2325 			return false;
2326 
2327 		limits->link.min_bpp_x16 = to_bpp_x16(limits->pipe.min_bpp);
2328 	} else {
2329 		/*
2330 		 * TODO: set the DSC link limits already here, atm these are
2331 		 * initialized only later in intel_edp_dsc_compute_pipe_bpp() /
2332 		 * intel_dp_dsc_compute_pipe_bpp()
2333 		 */
2334 		limits->link.min_bpp_x16 = 0;
2335 	}
2336 
2337 	limits->link.max_bpp_x16 = max_link_bpp_x16;
2338 
2339 	drm_dbg_kms(&i915->drm,
2340 		    "[ENCODER:%d:%s][CRTC:%d:%s] DP link limits: pixel clock %d kHz DSC %s max lanes %d max rate %d max pipe_bpp %d max link_bpp " BPP_X16_FMT "\n",
2341 		    encoder->base.base.id, encoder->base.name,
2342 		    crtc->base.base.id, crtc->base.name,
2343 		    adjusted_mode->crtc_clock,
2344 		    dsc ? "on" : "off",
2345 		    limits->max_lane_count,
2346 		    limits->max_rate,
2347 		    limits->pipe.max_bpp,
2348 		    BPP_X16_ARGS(limits->link.max_bpp_x16));
2349 
2350 	return true;
2351 }
2352 
2353 static bool
2354 intel_dp_compute_config_limits(struct intel_dp *intel_dp,
2355 			       struct intel_crtc_state *crtc_state,
2356 			       bool respect_downstream_limits,
2357 			       bool dsc,
2358 			       struct link_config_limits *limits)
2359 {
2360 	limits->min_rate = intel_dp_common_rate(intel_dp, 0);
2361 	limits->max_rate = intel_dp_max_link_rate(intel_dp);
2362 
2363 	/* FIXME 128b/132b SST support missing */
2364 	limits->max_rate = min(limits->max_rate, 810000);
2365 
2366 	limits->min_lane_count = 1;
2367 	limits->max_lane_count = intel_dp_max_lane_count(intel_dp);
2368 
2369 	limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format);
2370 	limits->pipe.max_bpp = intel_dp_max_bpp(intel_dp, crtc_state,
2371 						     respect_downstream_limits);
2372 
2373 	if (intel_dp->use_max_params) {
2374 		/*
2375 		 * Use the maximum clock and number of lanes the eDP panel
2376 		 * advertizes being capable of in case the initial fast
2377 		 * optimal params failed us. The panels are generally
2378 		 * designed to support only a single clock and lane
2379 		 * configuration, and typically on older panels these
2380 		 * values correspond to the native resolution of the panel.
2381 		 */
2382 		limits->min_lane_count = limits->max_lane_count;
2383 		limits->min_rate = limits->max_rate;
2384 	}
2385 
2386 	intel_dp_adjust_compliance_config(intel_dp, crtc_state, limits);
2387 
2388 	return intel_dp_compute_config_link_bpp_limits(intel_dp,
2389 						       crtc_state,
2390 						       dsc,
2391 						       limits);
2392 }
2393 
2394 int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state)
2395 {
2396 	const struct drm_display_mode *adjusted_mode =
2397 		&crtc_state->hw.adjusted_mode;
2398 	int bpp = crtc_state->dsc.compression_enable ?
2399 		to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) :
2400 		crtc_state->pipe_bpp;
2401 
2402 	return intel_dp_link_required(adjusted_mode->crtc_clock, bpp);
2403 }
2404 
2405 bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, bool use_joiner)
2406 {
2407 	/*
2408 	 * Pipe joiner needs compression up to display 12 due to bandwidth
2409 	 * limitation. DG2 onwards pipe joiner can be enabled without
2410 	 * compression.
2411 	 */
2412 	return DISPLAY_VER(i915) < 13 && use_joiner;
2413 }
2414 
2415 static int
2416 intel_dp_compute_link_config(struct intel_encoder *encoder,
2417 			     struct intel_crtc_state *pipe_config,
2418 			     struct drm_connector_state *conn_state,
2419 			     bool respect_downstream_limits)
2420 {
2421 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2422 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
2423 	struct intel_connector *connector =
2424 		to_intel_connector(conn_state->connector);
2425 	const struct drm_display_mode *adjusted_mode =
2426 		&pipe_config->hw.adjusted_mode;
2427 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2428 	struct link_config_limits limits;
2429 	bool dsc_needed, joiner_needs_dsc;
2430 	int ret = 0;
2431 
2432 	if (pipe_config->fec_enable &&
2433 	    !intel_dp_supports_fec(intel_dp, connector, pipe_config))
2434 		return -EINVAL;
2435 
2436 	if (intel_dp_need_bigjoiner(intel_dp, connector,
2437 				    adjusted_mode->crtc_hdisplay,
2438 				    adjusted_mode->crtc_clock))
2439 		pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe);
2440 
2441 	joiner_needs_dsc = intel_dp_joiner_needs_dsc(i915, pipe_config->bigjoiner_pipes);
2442 
2443 	dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
2444 		     !intel_dp_compute_config_limits(intel_dp, pipe_config,
2445 						     respect_downstream_limits,
2446 						     false,
2447 						     &limits);
2448 
2449 	if (!dsc_needed) {
2450 		/*
2451 		 * Optimize for slow and wide for everything, because there are some
2452 		 * eDP 1.3 and 1.4 panels don't work well with fast and narrow.
2453 		 */
2454 		ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
2455 							conn_state, &limits);
2456 		if (ret)
2457 			dsc_needed = true;
2458 	}
2459 
2460 	if (dsc_needed) {
2461 		drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
2462 			    str_yes_no(ret), str_yes_no(joiner_needs_dsc),
2463 			    str_yes_no(intel_dp->force_dsc_en));
2464 
2465 		if (!intel_dp_compute_config_limits(intel_dp, pipe_config,
2466 						    respect_downstream_limits,
2467 						    true,
2468 						    &limits))
2469 			return -EINVAL;
2470 
2471 		ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2472 						  conn_state, &limits, 64, true);
2473 		if (ret < 0)
2474 			return ret;
2475 	}
2476 
2477 	drm_dbg_kms(&i915->drm,
2478 		    "DP lane count %d clock %d bpp input %d compressed " BPP_X16_FMT " link rate required %d available %d\n",
2479 		    pipe_config->lane_count, pipe_config->port_clock,
2480 		    pipe_config->pipe_bpp,
2481 		    BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16),
2482 		    intel_dp_config_required_rate(pipe_config),
2483 		    intel_dp_max_link_data_rate(intel_dp,
2484 						pipe_config->port_clock,
2485 						pipe_config->lane_count));
2486 
2487 	return 0;
2488 }
2489 
2490 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2491 				  const struct drm_connector_state *conn_state)
2492 {
2493 	const struct intel_digital_connector_state *intel_conn_state =
2494 		to_intel_digital_connector_state(conn_state);
2495 	const struct drm_display_mode *adjusted_mode =
2496 		&crtc_state->hw.adjusted_mode;
2497 
2498 	/*
2499 	 * Our YCbCr output is always limited range.
2500 	 * crtc_state->limited_color_range only applies to RGB,
2501 	 * and it must never be set for YCbCr or we risk setting
2502 	 * some conflicting bits in TRANSCONF which will mess up
2503 	 * the colors on the monitor.
2504 	 */
2505 	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
2506 		return false;
2507 
2508 	if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2509 		/*
2510 		 * See:
2511 		 * CEA-861-E - 5.1 Default Encoding Parameters
2512 		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2513 		 */
2514 		return crtc_state->pipe_bpp != 18 &&
2515 			drm_default_rgb_quant_range(adjusted_mode) ==
2516 			HDMI_QUANTIZATION_RANGE_LIMITED;
2517 	} else {
2518 		return intel_conn_state->broadcast_rgb ==
2519 			INTEL_BROADCAST_RGB_LIMITED;
2520 	}
2521 }
2522 
2523 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
2524 				    enum port port)
2525 {
2526 	if (IS_G4X(dev_priv))
2527 		return false;
2528 	if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A)
2529 		return false;
2530 
2531 	return true;
2532 }
2533 
2534 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state,
2535 					     const struct drm_connector_state *conn_state,
2536 					     struct drm_dp_vsc_sdp *vsc)
2537 {
2538 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2539 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2540 
2541 	if (crtc_state->has_panel_replay) {
2542 		/*
2543 		 * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
2544 		 * VSC SDP supporting 3D stereo, Panel Replay, and Pixel
2545 		 * Encoding/Colorimetry Format indication.
2546 		 */
2547 		vsc->revision = 0x7;
2548 	} else {
2549 		/*
2550 		 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
2551 		 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
2552 		 * Colorimetry Format indication.
2553 		 */
2554 		vsc->revision = 0x5;
2555 	}
2556 
2557 	vsc->length = 0x13;
2558 
2559 	/* DP 1.4a spec, Table 2-120 */
2560 	switch (crtc_state->output_format) {
2561 	case INTEL_OUTPUT_FORMAT_YCBCR444:
2562 		vsc->pixelformat = DP_PIXELFORMAT_YUV444;
2563 		break;
2564 	case INTEL_OUTPUT_FORMAT_YCBCR420:
2565 		vsc->pixelformat = DP_PIXELFORMAT_YUV420;
2566 		break;
2567 	case INTEL_OUTPUT_FORMAT_RGB:
2568 	default:
2569 		vsc->pixelformat = DP_PIXELFORMAT_RGB;
2570 	}
2571 
2572 	switch (conn_state->colorspace) {
2573 	case DRM_MODE_COLORIMETRY_BT709_YCC:
2574 		vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
2575 		break;
2576 	case DRM_MODE_COLORIMETRY_XVYCC_601:
2577 		vsc->colorimetry = DP_COLORIMETRY_XVYCC_601;
2578 		break;
2579 	case DRM_MODE_COLORIMETRY_XVYCC_709:
2580 		vsc->colorimetry = DP_COLORIMETRY_XVYCC_709;
2581 		break;
2582 	case DRM_MODE_COLORIMETRY_SYCC_601:
2583 		vsc->colorimetry = DP_COLORIMETRY_SYCC_601;
2584 		break;
2585 	case DRM_MODE_COLORIMETRY_OPYCC_601:
2586 		vsc->colorimetry = DP_COLORIMETRY_OPYCC_601;
2587 		break;
2588 	case DRM_MODE_COLORIMETRY_BT2020_CYCC:
2589 		vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC;
2590 		break;
2591 	case DRM_MODE_COLORIMETRY_BT2020_RGB:
2592 		vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB;
2593 		break;
2594 	case DRM_MODE_COLORIMETRY_BT2020_YCC:
2595 		vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC;
2596 		break;
2597 	case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
2598 	case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
2599 		vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB;
2600 		break;
2601 	default:
2602 		/*
2603 		 * RGB->YCBCR color conversion uses the BT.709
2604 		 * color space.
2605 		 */
2606 		if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2607 			vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
2608 		else
2609 			vsc->colorimetry = DP_COLORIMETRY_DEFAULT;
2610 		break;
2611 	}
2612 
2613 	vsc->bpc = crtc_state->pipe_bpp / 3;
2614 
2615 	/* only RGB pixelformat supports 6 bpc */
2616 	drm_WARN_ON(&dev_priv->drm,
2617 		    vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);
2618 
2619 	/* all YCbCr are always limited range */
2620 	vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA;
2621 	vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
2622 }
2623 
2624 static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
2625 				    struct intel_crtc_state *crtc_state)
2626 {
2627 	struct drm_dp_as_sdp *as_sdp = &crtc_state->infoframes.as_sdp;
2628 	const struct drm_display_mode *adjusted_mode =
2629 		&crtc_state->hw.adjusted_mode;
2630 
2631 	if (!crtc_state->vrr.enable ||
2632 	    !intel_dp_as_sdp_supported(intel_dp))
2633 		return;
2634 
2635 	crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC);
2636 
2637 	/* Currently only DP_AS_SDP_AVT_FIXED_VTOTAL mode supported */
2638 	as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC;
2639 	as_sdp->length = 0x9;
2640 	as_sdp->mode = DP_AS_SDP_AVT_FIXED_VTOTAL;
2641 	as_sdp->vtotal = adjusted_mode->vtotal;
2642 	as_sdp->target_rr = 0;
2643 	as_sdp->duration_incr_ms = 0;
2644 	as_sdp->duration_incr_ms = 0;
2645 }
2646 
2647 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
2648 				     struct intel_crtc_state *crtc_state,
2649 				     const struct drm_connector_state *conn_state)
2650 {
2651 	struct drm_dp_vsc_sdp *vsc;
2652 
2653 	if ((!intel_dp->colorimetry_support ||
2654 	     !intel_dp_needs_vsc_sdp(crtc_state, conn_state)) &&
2655 	    !crtc_state->has_psr)
2656 		return;
2657 
2658 	vsc = &crtc_state->infoframes.vsc;
2659 
2660 	crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
2661 	vsc->sdp_type = DP_SDP_VSC;
2662 
2663 	/* Needs colorimetry */
2664 	if (intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
2665 		intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
2666 						 vsc);
2667 	} else if (crtc_state->has_psr2) {
2668 		/*
2669 		 * [PSR2 without colorimetry]
2670 		 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
2671 		 * 3D stereo + PSR/PSR2 + Y-coordinate.
2672 		 */
2673 		vsc->revision = 0x4;
2674 		vsc->length = 0xe;
2675 	} else if (crtc_state->has_panel_replay) {
2676 		/*
2677 		 * [Panel Replay without colorimetry info]
2678 		 * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
2679 		 * VSC SDP supporting 3D stereo + Panel Replay.
2680 		 */
2681 		vsc->revision = 0x6;
2682 		vsc->length = 0x10;
2683 	} else {
2684 		/*
2685 		 * [PSR1]
2686 		 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
2687 		 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or
2688 		 * higher).
2689 		 */
2690 		vsc->revision = 0x2;
2691 		vsc->length = 0x8;
2692 	}
2693 }
2694 
2695 static void
2696 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
2697 					    struct intel_crtc_state *crtc_state,
2698 					    const struct drm_connector_state *conn_state)
2699 {
2700 	int ret;
2701 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2702 	struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;
2703 
2704 	if (!conn_state->hdr_output_metadata)
2705 		return;
2706 
2707 	ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);
2708 
2709 	if (ret) {
2710 		drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
2711 		return;
2712 	}
2713 
2714 	crtc_state->infoframes.enable |=
2715 		intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
2716 }
2717 
2718 static bool can_enable_drrs(struct intel_connector *connector,
2719 			    const struct intel_crtc_state *pipe_config,
2720 			    const struct drm_display_mode *downclock_mode)
2721 {
2722 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2723 
2724 	if (pipe_config->vrr.enable)
2725 		return false;
2726 
2727 	/*
2728 	 * DRRS and PSR can't be enable together, so giving preference to PSR
2729 	 * as it allows more power-savings by complete shutting down display,
2730 	 * so to guarantee this, intel_drrs_compute_config() must be called
2731 	 * after intel_psr_compute_config().
2732 	 */
2733 	if (pipe_config->has_psr)
2734 		return false;
2735 
2736 	/* FIXME missing FDI M2/N2 etc. */
2737 	if (pipe_config->has_pch_encoder)
2738 		return false;
2739 
2740 	if (!intel_cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder))
2741 		return false;
2742 
2743 	return downclock_mode &&
2744 		intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS;
2745 }
2746 
2747 static void
2748 intel_dp_drrs_compute_config(struct intel_connector *connector,
2749 			     struct intel_crtc_state *pipe_config,
2750 			     int link_bpp_x16)
2751 {
2752 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2753 	const struct drm_display_mode *downclock_mode =
2754 		intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
2755 	int pixel_clock;
2756 
2757 	/*
2758 	 * FIXME all joined pipes share the same transcoder.
2759 	 * Need to account for that when updating M/N live.
2760 	 */
2761 	if (has_seamless_m_n(connector) && !pipe_config->bigjoiner_pipes)
2762 		pipe_config->update_m_n = true;
2763 
2764 	if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
2765 		if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder))
2766 			intel_zero_m_n(&pipe_config->dp_m2_n2);
2767 		return;
2768 	}
2769 
2770 	if (IS_IRONLAKE(i915) || IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915))
2771 		pipe_config->msa_timing_delay = connector->panel.vbt.edp.drrs_msa_timing_delay;
2772 
2773 	pipe_config->has_drrs = true;
2774 
2775 	pixel_clock = downclock_mode->clock;
2776 	if (pipe_config->splitter.enable)
2777 		pixel_clock /= pipe_config->splitter.link_count;
2778 
2779 	intel_link_compute_m_n(link_bpp_x16, pipe_config->lane_count, pixel_clock,
2780 			       pipe_config->port_clock,
2781 			       intel_dp_bw_fec_overhead(pipe_config->fec_enable),
2782 			       &pipe_config->dp_m2_n2);
2783 
2784 	/* FIXME: abstract this better */
2785 	if (pipe_config->splitter.enable)
2786 		pipe_config->dp_m2_n2.data_m *= pipe_config->splitter.link_count;
2787 }
2788 
2789 static bool intel_dp_has_audio(struct intel_encoder *encoder,
2790 			       struct intel_crtc_state *crtc_state,
2791 			       const struct drm_connector_state *conn_state)
2792 {
2793 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2794 	const struct intel_digital_connector_state *intel_conn_state =
2795 		to_intel_digital_connector_state(conn_state);
2796 	struct intel_connector *connector =
2797 		to_intel_connector(conn_state->connector);
2798 
2799 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
2800 	    !intel_dp_port_has_audio(i915, encoder->port))
2801 		return false;
2802 
2803 	if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2804 		return connector->base.display_info.has_audio;
2805 	else
2806 		return intel_conn_state->force_audio == HDMI_AUDIO_ON;
2807 }
2808 
2809 static int
2810 intel_dp_compute_output_format(struct intel_encoder *encoder,
2811 			       struct intel_crtc_state *crtc_state,
2812 			       struct drm_connector_state *conn_state,
2813 			       bool respect_downstream_limits)
2814 {
2815 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2816 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2817 	struct intel_connector *connector = intel_dp->attached_connector;
2818 	const struct drm_display_info *info = &connector->base.display_info;
2819 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2820 	bool ycbcr_420_only;
2821 	int ret;
2822 
2823 	ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode);
2824 
2825 	if (ycbcr_420_only && !connector->base.ycbcr_420_allowed) {
2826 		drm_dbg_kms(&i915->drm,
2827 			    "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n");
2828 		crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB;
2829 	} else {
2830 		crtc_state->sink_format = intel_dp_sink_format(connector, adjusted_mode);
2831 	}
2832 
2833 	crtc_state->output_format = intel_dp_output_format(connector, crtc_state->sink_format);
2834 
2835 	ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state,
2836 					   respect_downstream_limits);
2837 	if (ret) {
2838 		if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
2839 		    !connector->base.ycbcr_420_allowed ||
2840 		    !drm_mode_is_420_also(info, adjusted_mode))
2841 			return ret;
2842 
2843 		crtc_state->sink_format = INTEL_OUTPUT_FORMAT_YCBCR420;
2844 		crtc_state->output_format = intel_dp_output_format(connector,
2845 								   crtc_state->sink_format);
2846 		ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state,
2847 						   respect_downstream_limits);
2848 	}
2849 
2850 	return ret;
2851 }
2852 
2853 void
2854 intel_dp_audio_compute_config(struct intel_encoder *encoder,
2855 			      struct intel_crtc_state *pipe_config,
2856 			      struct drm_connector_state *conn_state)
2857 {
2858 	pipe_config->has_audio =
2859 		intel_dp_has_audio(encoder, pipe_config, conn_state) &&
2860 		intel_audio_compute_config(encoder, pipe_config, conn_state);
2861 
2862 	pipe_config->sdp_split_enable = pipe_config->has_audio &&
2863 					intel_dp_is_uhbr(pipe_config);
2864 }
2865 
2866 void intel_dp_queue_modeset_retry_work(struct intel_connector *connector)
2867 {
2868 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2869 
2870 	drm_connector_get(&connector->base);
2871 	if (!queue_work(i915->unordered_wq, &connector->modeset_retry_work))
2872 		drm_connector_put(&connector->base);
2873 }
2874 
2875 void
2876 intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state,
2877 				      struct intel_encoder *encoder,
2878 				      const struct intel_crtc_state *crtc_state)
2879 {
2880 	struct intel_connector *connector;
2881 	struct intel_digital_connector_state *conn_state;
2882 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2883 	int i;
2884 
2885 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
2886 		intel_dp_queue_modeset_retry_work(intel_dp->attached_connector);
2887 
2888 		return;
2889 	}
2890 
2891 	for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
2892 		if (!conn_state->base.crtc)
2893 			continue;
2894 
2895 		if (connector->mst_port == intel_dp)
2896 			intel_dp_queue_modeset_retry_work(connector);
2897 	}
2898 }
2899 
2900 int
2901 intel_dp_compute_config(struct intel_encoder *encoder,
2902 			struct intel_crtc_state *pipe_config,
2903 			struct drm_connector_state *conn_state)
2904 {
2905 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2906 	struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
2907 	struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2908 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2909 	const struct drm_display_mode *fixed_mode;
2910 	struct intel_connector *connector = intel_dp->attached_connector;
2911 	int ret = 0, link_bpp_x16;
2912 
2913 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A)
2914 		pipe_config->has_pch_encoder = true;
2915 
2916 	fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode);
2917 	if (intel_dp_is_edp(intel_dp) && fixed_mode) {
2918 		ret = intel_panel_compute_config(connector, adjusted_mode);
2919 		if (ret)
2920 			return ret;
2921 	}
2922 
2923 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2924 		return -EINVAL;
2925 
2926 	if (!connector->base.interlace_allowed &&
2927 	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2928 		return -EINVAL;
2929 
2930 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2931 		return -EINVAL;
2932 
2933 	if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
2934 		return -EINVAL;
2935 
2936 	/*
2937 	 * Try to respect downstream TMDS clock limits first, if
2938 	 * that fails assume the user might know something we don't.
2939 	 */
2940 	ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, true);
2941 	if (ret)
2942 		ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, false);
2943 	if (ret)
2944 		return ret;
2945 
2946 	if ((intel_dp_is_edp(intel_dp) && fixed_mode) ||
2947 	    pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
2948 		ret = intel_panel_fitting(pipe_config, conn_state);
2949 		if (ret)
2950 			return ret;
2951 	}
2952 
2953 	pipe_config->limited_color_range =
2954 		intel_dp_limited_color_range(pipe_config, conn_state);
2955 
2956 	pipe_config->enhanced_framing =
2957 		drm_dp_enhanced_frame_cap(intel_dp->dpcd);
2958 
2959 	if (pipe_config->dsc.compression_enable)
2960 		link_bpp_x16 = pipe_config->dsc.compressed_bpp_x16;
2961 	else
2962 		link_bpp_x16 = to_bpp_x16(intel_dp_output_bpp(pipe_config->output_format,
2963 							      pipe_config->pipe_bpp));
2964 
2965 	if (intel_dp->mso_link_count) {
2966 		int n = intel_dp->mso_link_count;
2967 		int overlap = intel_dp->mso_pixel_overlap;
2968 
2969 		pipe_config->splitter.enable = true;
2970 		pipe_config->splitter.link_count = n;
2971 		pipe_config->splitter.pixel_overlap = overlap;
2972 
2973 		drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n",
2974 			    n, overlap);
2975 
2976 		adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap;
2977 		adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap;
2978 		adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap;
2979 		adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap;
2980 		adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap;
2981 		adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap;
2982 		adjusted_mode->crtc_clock /= n;
2983 	}
2984 
2985 	intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
2986 
2987 	intel_link_compute_m_n(link_bpp_x16,
2988 			       pipe_config->lane_count,
2989 			       adjusted_mode->crtc_clock,
2990 			       pipe_config->port_clock,
2991 			       intel_dp_bw_fec_overhead(pipe_config->fec_enable),
2992 			       &pipe_config->dp_m_n);
2993 
2994 	/* FIXME: abstract this better */
2995 	if (pipe_config->splitter.enable)
2996 		pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count;
2997 
2998 	if (!HAS_DDI(dev_priv))
2999 		g4x_dp_set_clock(encoder, pipe_config);
3000 
3001 	intel_vrr_compute_config(pipe_config, conn_state);
3002 	intel_dp_compute_as_sdp(intel_dp, pipe_config);
3003 	intel_psr_compute_config(intel_dp, pipe_config, conn_state);
3004 	intel_dp_drrs_compute_config(connector, pipe_config, link_bpp_x16);
3005 	intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
3006 	intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
3007 
3008 	return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector,
3009 							pipe_config);
3010 }
3011 
3012 void intel_dp_set_link_params(struct intel_dp *intel_dp,
3013 			      int link_rate, int lane_count)
3014 {
3015 	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3016 	intel_dp->link_trained = false;
3017 	intel_dp->link_rate = link_rate;
3018 	intel_dp->lane_count = lane_count;
3019 }
3020 
3021 static void intel_dp_reset_max_link_params(struct intel_dp *intel_dp)
3022 {
3023 	intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
3024 	intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
3025 }
3026 
3027 /* Enable backlight PWM and backlight PP control. */
3028 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
3029 			    const struct drm_connector_state *conn_state)
3030 {
3031 	struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
3032 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3033 
3034 	if (!intel_dp_is_edp(intel_dp))
3035 		return;
3036 
3037 	drm_dbg_kms(&i915->drm, "\n");
3038 
3039 	intel_backlight_enable(crtc_state, conn_state);
3040 	intel_pps_backlight_on(intel_dp);
3041 }
3042 
3043 /* Disable backlight PP control and backlight PWM. */
3044 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
3045 {
3046 	struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
3047 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3048 
3049 	if (!intel_dp_is_edp(intel_dp))
3050 		return;
3051 
3052 	drm_dbg_kms(&i915->drm, "\n");
3053 
3054 	intel_pps_backlight_off(intel_dp);
3055 	intel_backlight_disable(old_conn_state);
3056 }
3057 
3058 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
3059 {
3060 	/*
3061 	 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
3062 	 * be capable of signalling downstream hpd with a long pulse.
3063 	 * Whether or not that means D3 is safe to use is not clear,
3064 	 * but let's assume so until proven otherwise.
3065 	 *
3066 	 * FIXME should really check all downstream ports...
3067 	 */
3068 	return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
3069 		drm_dp_is_branch(intel_dp->dpcd) &&
3070 		intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
3071 }
3072 
3073 static int
3074 write_dsc_decompression_flag(struct drm_dp_aux *aux, u8 flag, bool set)
3075 {
3076 	int err;
3077 	u8 val;
3078 
3079 	err = drm_dp_dpcd_readb(aux, DP_DSC_ENABLE, &val);
3080 	if (err < 0)
3081 		return err;
3082 
3083 	if (set)
3084 		val |= flag;
3085 	else
3086 		val &= ~flag;
3087 
3088 	return drm_dp_dpcd_writeb(aux, DP_DSC_ENABLE, val);
3089 }
3090 
3091 static void
3092 intel_dp_sink_set_dsc_decompression(struct intel_connector *connector,
3093 				    bool enable)
3094 {
3095 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3096 
3097 	if (write_dsc_decompression_flag(connector->dp.dsc_decompression_aux,
3098 					 DP_DECOMPRESSION_EN, enable) < 0)
3099 		drm_dbg_kms(&i915->drm,
3100 			    "Failed to %s sink decompression state\n",
3101 			    str_enable_disable(enable));
3102 }
3103 
3104 static void
3105 intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector,
3106 				  bool enable)
3107 {
3108 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3109 	struct drm_dp_aux *aux = connector->port ?
3110 				 connector->port->passthrough_aux : NULL;
3111 
3112 	if (!aux)
3113 		return;
3114 
3115 	if (write_dsc_decompression_flag(aux,
3116 					 DP_DSC_PASSTHROUGH_EN, enable) < 0)
3117 		drm_dbg_kms(&i915->drm,
3118 			    "Failed to %s sink compression passthrough state\n",
3119 			    str_enable_disable(enable));
3120 }
3121 
3122 static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state,
3123 				      const struct intel_connector *connector,
3124 				      bool for_get_ref)
3125 {
3126 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3127 	struct drm_connector *_connector_iter;
3128 	struct drm_connector_state *old_conn_state;
3129 	struct drm_connector_state *new_conn_state;
3130 	int ref_count = 0;
3131 	int i;
3132 
3133 	/*
3134 	 * On SST the decompression AUX device won't be shared, each connector
3135 	 * uses for this its own AUX targeting the sink device.
3136 	 */
3137 	if (!connector->mst_port)
3138 		return connector->dp.dsc_decompression_enabled ? 1 : 0;
3139 
3140 	for_each_oldnew_connector_in_state(&state->base, _connector_iter,
3141 					   old_conn_state, new_conn_state, i) {
3142 		const struct intel_connector *
3143 			connector_iter = to_intel_connector(_connector_iter);
3144 
3145 		if (connector_iter->mst_port != connector->mst_port)
3146 			continue;
3147 
3148 		if (!connector_iter->dp.dsc_decompression_enabled)
3149 			continue;
3150 
3151 		drm_WARN_ON(&i915->drm,
3152 			    (for_get_ref && !new_conn_state->crtc) ||
3153 			    (!for_get_ref && !old_conn_state->crtc));
3154 
3155 		if (connector_iter->dp.dsc_decompression_aux ==
3156 		    connector->dp.dsc_decompression_aux)
3157 			ref_count++;
3158 	}
3159 
3160 	return ref_count;
3161 }
3162 
3163 static bool intel_dp_dsc_aux_get_ref(struct intel_atomic_state *state,
3164 				     struct intel_connector *connector)
3165 {
3166 	bool ret = intel_dp_dsc_aux_ref_count(state, connector, true) == 0;
3167 
3168 	connector->dp.dsc_decompression_enabled = true;
3169 
3170 	return ret;
3171 }
3172 
3173 static bool intel_dp_dsc_aux_put_ref(struct intel_atomic_state *state,
3174 				     struct intel_connector *connector)
3175 {
3176 	connector->dp.dsc_decompression_enabled = false;
3177 
3178 	return intel_dp_dsc_aux_ref_count(state, connector, false) == 0;
3179 }
3180 
3181 /**
3182  * intel_dp_sink_enable_decompression - Enable DSC decompression in sink/last branch device
3183  * @state: atomic state
3184  * @connector: connector to enable the decompression for
3185  * @new_crtc_state: new state for the CRTC driving @connector
3186  *
3187  * Enable the DSC decompression if required in the %DP_DSC_ENABLE DPCD
3188  * register of the appropriate sink/branch device. On SST this is always the
3189  * sink device, whereas on MST based on each device's DSC capabilities it's
3190  * either the last branch device (enabling decompression in it) or both the
3191  * last branch device (enabling passthrough in it) and the sink device
3192  * (enabling decompression in it).
3193  */
3194 void intel_dp_sink_enable_decompression(struct intel_atomic_state *state,
3195 					struct intel_connector *connector,
3196 					const struct intel_crtc_state *new_crtc_state)
3197 {
3198 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3199 
3200 	if (!new_crtc_state->dsc.compression_enable)
3201 		return;
3202 
3203 	if (drm_WARN_ON(&i915->drm,
3204 			!connector->dp.dsc_decompression_aux ||
3205 			connector->dp.dsc_decompression_enabled))
3206 		return;
3207 
3208 	if (!intel_dp_dsc_aux_get_ref(state, connector))
3209 		return;
3210 
3211 	intel_dp_sink_set_dsc_passthrough(connector, true);
3212 	intel_dp_sink_set_dsc_decompression(connector, true);
3213 }
3214 
3215 /**
3216  * intel_dp_sink_disable_decompression - Disable DSC decompression in sink/last branch device
3217  * @state: atomic state
3218  * @connector: connector to disable the decompression for
3219  * @old_crtc_state: old state for the CRTC driving @connector
3220  *
3221  * Disable the DSC decompression if required in the %DP_DSC_ENABLE DPCD
3222  * register of the appropriate sink/branch device, corresponding to the
3223  * sequence in intel_dp_sink_enable_decompression().
3224  */
3225 void intel_dp_sink_disable_decompression(struct intel_atomic_state *state,
3226 					 struct intel_connector *connector,
3227 					 const struct intel_crtc_state *old_crtc_state)
3228 {
3229 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3230 
3231 	if (!old_crtc_state->dsc.compression_enable)
3232 		return;
3233 
3234 	if (drm_WARN_ON(&i915->drm,
3235 			!connector->dp.dsc_decompression_aux ||
3236 			!connector->dp.dsc_decompression_enabled))
3237 		return;
3238 
3239 	if (!intel_dp_dsc_aux_put_ref(state, connector))
3240 		return;
3241 
3242 	intel_dp_sink_set_dsc_decompression(connector, false);
3243 	intel_dp_sink_set_dsc_passthrough(connector, false);
3244 }
3245 
3246 static void
3247 intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
3248 {
3249 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3250 	u8 oui[] = { 0x00, 0xaa, 0x01 };
3251 	u8 buf[3] = {};
3252 
3253 	/*
3254 	 * During driver init, we want to be careful and avoid changing the source OUI if it's
3255 	 * already set to what we want, so as to avoid clearing any state by accident
3256 	 */
3257 	if (careful) {
3258 		if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0)
3259 			drm_err(&i915->drm, "Failed to read source OUI\n");
3260 
3261 		if (memcmp(oui, buf, sizeof(oui)) == 0)
3262 			return;
3263 	}
3264 
3265 	if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
3266 		drm_err(&i915->drm, "Failed to write source OUI\n");
3267 
3268 	intel_dp->last_oui_write = jiffies;
3269 }
3270 
3271 void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
3272 {
3273 	struct intel_connector *connector = intel_dp->attached_connector;
3274 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3275 
3276 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n",
3277 		    connector->base.base.id, connector->base.name,
3278 		    connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout);
3279 
3280 	wait_remaining_ms_from_jiffies(intel_dp->last_oui_write,
3281 				       connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout);
3282 }
3283 
3284 /* If the device supports it, try to set the power state appropriately */
3285 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
3286 {
3287 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3288 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3289 	int ret, i;
3290 
3291 	/* Should have a valid DPCD by this point */
3292 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
3293 		return;
3294 
3295 	if (mode != DP_SET_POWER_D0) {
3296 		if (downstream_hpd_needs_d0(intel_dp))
3297 			return;
3298 
3299 		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
3300 	} else {
3301 		struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
3302 
3303 		lspcon_resume(dp_to_dig_port(intel_dp));
3304 
3305 		/* Write the source OUI as early as possible */
3306 		if (intel_dp_is_edp(intel_dp))
3307 			intel_edp_init_source_oui(intel_dp, false);
3308 
3309 		/*
3310 		 * When turning on, we need to retry for 1ms to give the sink
3311 		 * time to wake up.
3312 		 */
3313 		for (i = 0; i < 3; i++) {
3314 			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
3315 			if (ret == 1)
3316 				break;
3317 			msleep(1);
3318 		}
3319 
3320 		if (ret == 1 && lspcon->active)
3321 			lspcon_wait_pcon_mode(lspcon);
3322 	}
3323 
3324 	if (ret != 1)
3325 		drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n",
3326 			    encoder->base.base.id, encoder->base.name,
3327 			    mode == DP_SET_POWER_D0 ? "D0" : "D3");
3328 }
3329 
3330 static bool
3331 intel_dp_get_dpcd(struct intel_dp *intel_dp);
3332 
3333 /**
3334  * intel_dp_sync_state - sync the encoder state during init/resume
3335  * @encoder: intel encoder to sync
3336  * @crtc_state: state for the CRTC connected to the encoder
3337  *
3338  * Sync any state stored in the encoder wrt. HW state during driver init
3339  * and system resume.
3340  */
3341 void intel_dp_sync_state(struct intel_encoder *encoder,
3342 			 const struct intel_crtc_state *crtc_state)
3343 {
3344 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3345 	bool dpcd_updated = false;
3346 
3347 	/*
3348 	 * Don't clobber DPCD if it's been already read out during output
3349 	 * setup (eDP) or detect.
3350 	 */
3351 	if (crtc_state && intel_dp->dpcd[DP_DPCD_REV] == 0) {
3352 		intel_dp_get_dpcd(intel_dp);
3353 		dpcd_updated = true;
3354 	}
3355 
3356 	intel_dp_tunnel_resume(intel_dp, crtc_state, dpcd_updated);
3357 
3358 	if (crtc_state)
3359 		intel_dp_reset_max_link_params(intel_dp);
3360 }
3361 
3362 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
3363 				    struct intel_crtc_state *crtc_state)
3364 {
3365 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3366 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3367 	bool fastset = true;
3368 
3369 	/*
3370 	 * If BIOS has set an unsupported or non-standard link rate for some
3371 	 * reason force an encoder recompute and full modeset.
3372 	 */
3373 	if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates,
3374 				crtc_state->port_clock) < 0) {
3375 		drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to unsupported link rate\n",
3376 			    encoder->base.base.id, encoder->base.name);
3377 		crtc_state->uapi.connectors_changed = true;
3378 		fastset = false;
3379 	}
3380 
3381 	/*
3382 	 * FIXME hack to force full modeset when DSC is being used.
3383 	 *
3384 	 * As long as we do not have full state readout and config comparison
3385 	 * of crtc_state->dsc, we have no way to ensure reliable fastset.
3386 	 * Remove once we have readout for DSC.
3387 	 */
3388 	if (crtc_state->dsc.compression_enable) {
3389 		drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to DSC being enabled\n",
3390 			    encoder->base.base.id, encoder->base.name);
3391 		crtc_state->uapi.mode_changed = true;
3392 		fastset = false;
3393 	}
3394 
3395 	if (CAN_PANEL_REPLAY(intel_dp)) {
3396 		drm_dbg_kms(&i915->drm,
3397 			    "[ENCODER:%d:%s] Forcing full modeset to compute panel replay state\n",
3398 			    encoder->base.base.id, encoder->base.name);
3399 		crtc_state->uapi.mode_changed = true;
3400 		fastset = false;
3401 	}
3402 
3403 	return fastset;
3404 }
3405 
3406 static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
3407 {
3408 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3409 
3410 	/* Clear the cached register set to avoid using stale values */
3411 
3412 	memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd));
3413 
3414 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
3415 			     intel_dp->pcon_dsc_dpcd,
3416 			     sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
3417 		drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n",
3418 			DP_PCON_DSC_ENCODER);
3419 
3420 	drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n",
3421 		    (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd);
3422 }
3423 
3424 static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask)
3425 {
3426 	int bw_gbps[] = {9, 18, 24, 32, 40, 48};
3427 	int i;
3428 
3429 	for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) {
3430 		if (frl_bw_mask & (1 << i))
3431 			return bw_gbps[i];
3432 	}
3433 	return 0;
3434 }
3435 
3436 static int intel_dp_pcon_set_frl_mask(int max_frl)
3437 {
3438 	switch (max_frl) {
3439 	case 48:
3440 		return DP_PCON_FRL_BW_MASK_48GBPS;
3441 	case 40:
3442 		return DP_PCON_FRL_BW_MASK_40GBPS;
3443 	case 32:
3444 		return DP_PCON_FRL_BW_MASK_32GBPS;
3445 	case 24:
3446 		return DP_PCON_FRL_BW_MASK_24GBPS;
3447 	case 18:
3448 		return DP_PCON_FRL_BW_MASK_18GBPS;
3449 	case 9:
3450 		return DP_PCON_FRL_BW_MASK_9GBPS;
3451 	}
3452 
3453 	return 0;
3454 }
3455 
3456 static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
3457 {
3458 	struct intel_connector *intel_connector = intel_dp->attached_connector;
3459 	struct drm_connector *connector = &intel_connector->base;
3460 	int max_frl_rate;
3461 	int max_lanes, rate_per_lane;
3462 	int max_dsc_lanes, dsc_rate_per_lane;
3463 
3464 	max_lanes = connector->display_info.hdmi.max_lanes;
3465 	rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane;
3466 	max_frl_rate = max_lanes * rate_per_lane;
3467 
3468 	if (connector->display_info.hdmi.dsc_cap.v_1p2) {
3469 		max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes;
3470 		dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane;
3471 		if (max_dsc_lanes && dsc_rate_per_lane)
3472 			max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane);
3473 	}
3474 
3475 	return max_frl_rate;
3476 }
3477 
3478 static bool
3479 intel_dp_pcon_is_frl_trained(struct intel_dp *intel_dp,
3480 			     u8 max_frl_bw_mask, u8 *frl_trained_mask)
3481 {
3482 	if (drm_dp_pcon_hdmi_link_active(&intel_dp->aux) &&
3483 	    drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, frl_trained_mask) == DP_PCON_HDMI_MODE_FRL &&
3484 	    *frl_trained_mask >= max_frl_bw_mask)
3485 		return true;
3486 
3487 	return false;
3488 }
3489 
3490 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
3491 {
3492 #define TIMEOUT_FRL_READY_MS 500
3493 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000
3494 
3495 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3496 	int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret;
3497 	u8 max_frl_bw_mask = 0, frl_trained_mask;
3498 	bool is_active;
3499 
3500 	max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
3501 	drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
3502 
3503 	max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp);
3504 	drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw);
3505 
3506 	max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw);
3507 
3508 	if (max_frl_bw <= 0)
3509 		return -EINVAL;
3510 
3511 	max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
3512 	drm_dbg(&i915->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask);
3513 
3514 	if (intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask))
3515 		goto frl_trained;
3516 
3517 	ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false);
3518 	if (ret < 0)
3519 		return ret;
3520 	/* Wait for PCON to be FRL Ready */
3521 	wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS);
3522 
3523 	if (!is_active)
3524 		return -ETIMEDOUT;
3525 
3526 	ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw,
3527 					  DP_PCON_ENABLE_SEQUENTIAL_LINK);
3528 	if (ret < 0)
3529 		return ret;
3530 	ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask,
3531 					  DP_PCON_FRL_LINK_TRAIN_NORMAL);
3532 	if (ret < 0)
3533 		return ret;
3534 	ret = drm_dp_pcon_frl_enable(&intel_dp->aux);
3535 	if (ret < 0)
3536 		return ret;
3537 	/*
3538 	 * Wait for FRL to be completed
3539 	 * Check if the HDMI Link is up and active.
3540 	 */
3541 	wait_for(is_active =
3542 		 intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask),
3543 		 TIMEOUT_HDMI_LINK_ACTIVE_MS);
3544 
3545 	if (!is_active)
3546 		return -ETIMEDOUT;
3547 
3548 frl_trained:
3549 	drm_dbg(&i915->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask);
3550 	intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask);
3551 	intel_dp->frl.is_trained = true;
3552 	drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps);
3553 
3554 	return 0;
3555 }
3556 
3557 static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp)
3558 {
3559 	if (drm_dp_is_branch(intel_dp->dpcd) &&
3560 	    intel_dp_has_hdmi_sink(intel_dp) &&
3561 	    intel_dp_hdmi_sink_max_frl(intel_dp) > 0)
3562 		return true;
3563 
3564 	return false;
3565 }
3566 
3567 static
3568 int intel_dp_pcon_set_tmds_mode(struct intel_dp *intel_dp)
3569 {
3570 	int ret;
3571 	u8 buf = 0;
3572 
3573 	/* Set PCON source control mode */
3574 	buf |= DP_PCON_ENABLE_SOURCE_CTL_MODE;
3575 
3576 	ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
3577 	if (ret < 0)
3578 		return ret;
3579 
3580 	/* Set HDMI LINK ENABLE */
3581 	buf |= DP_PCON_ENABLE_HDMI_LINK;
3582 	ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
3583 	if (ret < 0)
3584 		return ret;
3585 
3586 	return 0;
3587 }
3588 
3589 void intel_dp_check_frl_training(struct intel_dp *intel_dp)
3590 {
3591 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3592 
3593 	/*
3594 	 * Always go for FRL training if:
3595 	 * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7)
3596 	 * -sink is HDMI2.1
3597 	 */
3598 	if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) ||
3599 	    !intel_dp_is_hdmi_2_1_sink(intel_dp) ||
3600 	    intel_dp->frl.is_trained)
3601 		return;
3602 
3603 	if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
3604 		int ret, mode;
3605 
3606 		drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n");
3607 		ret = intel_dp_pcon_set_tmds_mode(intel_dp);
3608 		mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
3609 
3610 		if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS)
3611 			drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n");
3612 	} else {
3613 		drm_dbg(&dev_priv->drm, "FRL training Completed\n");
3614 	}
3615 }
3616 
3617 static int
3618 intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state)
3619 {
3620 	int vactive = crtc_state->hw.adjusted_mode.vdisplay;
3621 
3622 	return intel_hdmi_dsc_get_slice_height(vactive);
3623 }
3624 
3625 static int
3626 intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp,
3627 			     const struct intel_crtc_state *crtc_state)
3628 {
3629 	struct intel_connector *intel_connector = intel_dp->attached_connector;
3630 	struct drm_connector *connector = &intel_connector->base;
3631 	int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice;
3632 	int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices;
3633 	int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd);
3634 	int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd);
3635 
3636 	return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices,
3637 					     pcon_max_slice_width,
3638 					     hdmi_max_slices, hdmi_throughput);
3639 }
3640 
3641 static int
3642 intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp,
3643 			  const struct intel_crtc_state *crtc_state,
3644 			  int num_slices, int slice_width)
3645 {
3646 	struct intel_connector *intel_connector = intel_dp->attached_connector;
3647 	struct drm_connector *connector = &intel_connector->base;
3648 	int output_format = crtc_state->output_format;
3649 	bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp;
3650 	int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd);
3651 	int hdmi_max_chunk_bytes =
3652 		connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024;
3653 
3654 	return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width,
3655 				      num_slices, output_format, hdmi_all_bpp,
3656 				      hdmi_max_chunk_bytes);
3657 }
3658 
3659 void
3660 intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
3661 			    const struct intel_crtc_state *crtc_state)
3662 {
3663 	u8 pps_param[6];
3664 	int slice_height;
3665 	int slice_width;
3666 	int num_slices;
3667 	int bits_per_pixel;
3668 	int ret;
3669 	struct intel_connector *intel_connector = intel_dp->attached_connector;
3670 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3671 	struct drm_connector *connector;
3672 	bool hdmi_is_dsc_1_2;
3673 
3674 	if (!intel_dp_is_hdmi_2_1_sink(intel_dp))
3675 		return;
3676 
3677 	if (!intel_connector)
3678 		return;
3679 	connector = &intel_connector->base;
3680 	hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2;
3681 
3682 	if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) ||
3683 	    !hdmi_is_dsc_1_2)
3684 		return;
3685 
3686 	slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state);
3687 	if (!slice_height)
3688 		return;
3689 
3690 	num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state);
3691 	if (!num_slices)
3692 		return;
3693 
3694 	slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay,
3695 				   num_slices);
3696 
3697 	bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state,
3698 						   num_slices, slice_width);
3699 	if (!bits_per_pixel)
3700 		return;
3701 
3702 	pps_param[0] = slice_height & 0xFF;
3703 	pps_param[1] = slice_height >> 8;
3704 	pps_param[2] = slice_width & 0xFF;
3705 	pps_param[3] = slice_width >> 8;
3706 	pps_param[4] = bits_per_pixel & 0xFF;
3707 	pps_param[5] = (bits_per_pixel >> 8) & 0x3;
3708 
3709 	ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param);
3710 	if (ret < 0)
3711 		drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n");
3712 }
3713 
3714 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
3715 					   const struct intel_crtc_state *crtc_state)
3716 {
3717 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3718 	bool ycbcr444_to_420 = false;
3719 	bool rgb_to_ycbcr = false;
3720 	u8 tmp;
3721 
3722 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x13)
3723 		return;
3724 
3725 	if (!drm_dp_is_branch(intel_dp->dpcd))
3726 		return;
3727 
3728 	tmp = intel_dp_has_hdmi_sink(intel_dp) ? DP_HDMI_DVI_OUTPUT_CONFIG : 0;
3729 
3730 	if (drm_dp_dpcd_writeb(&intel_dp->aux,
3731 			       DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1)
3732 		drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n",
3733 			    str_enable_disable(intel_dp_has_hdmi_sink(intel_dp)));
3734 
3735 	if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
3736 		switch (crtc_state->output_format) {
3737 		case INTEL_OUTPUT_FORMAT_YCBCR420:
3738 			break;
3739 		case INTEL_OUTPUT_FORMAT_YCBCR444:
3740 			ycbcr444_to_420 = true;
3741 			break;
3742 		case INTEL_OUTPUT_FORMAT_RGB:
3743 			rgb_to_ycbcr = true;
3744 			ycbcr444_to_420 = true;
3745 			break;
3746 		default:
3747 			MISSING_CASE(crtc_state->output_format);
3748 			break;
3749 		}
3750 	} else if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
3751 		switch (crtc_state->output_format) {
3752 		case INTEL_OUTPUT_FORMAT_YCBCR444:
3753 			break;
3754 		case INTEL_OUTPUT_FORMAT_RGB:
3755 			rgb_to_ycbcr = true;
3756 			break;
3757 		default:
3758 			MISSING_CASE(crtc_state->output_format);
3759 			break;
3760 		}
3761 	}
3762 
3763 	tmp = ycbcr444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
3764 
3765 	if (drm_dp_dpcd_writeb(&intel_dp->aux,
3766 			       DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
3767 		drm_dbg_kms(&i915->drm,
3768 			    "Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n",
3769 			    str_enable_disable(intel_dp->dfp.ycbcr_444_to_420));
3770 
3771 	tmp = rgb_to_ycbcr ? DP_CONVERSION_BT709_RGB_YCBCR_ENABLE : 0;
3772 
3773 	if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
3774 		drm_dbg_kms(&i915->drm,
3775 			    "Failed to %s protocol converter RGB->YCbCr conversion mode\n",
3776 			    str_enable_disable(tmp));
3777 }
3778 
3779 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
3780 {
3781 	u8 dprx = 0;
3782 
3783 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
3784 			      &dprx) != 1)
3785 		return false;
3786 	return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
3787 }
3788 
3789 static void intel_dp_read_dsc_dpcd(struct drm_dp_aux *aux,
3790 				   u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
3791 {
3792 	if (drm_dp_dpcd_read(aux, DP_DSC_SUPPORT, dsc_dpcd,
3793 			     DP_DSC_RECEIVER_CAP_SIZE) < 0) {
3794 		drm_err(aux->drm_dev,
3795 			"Failed to read DPCD register 0x%x\n",
3796 			DP_DSC_SUPPORT);
3797 		return;
3798 	}
3799 
3800 	drm_dbg_kms(aux->drm_dev, "DSC DPCD: %*ph\n",
3801 		    DP_DSC_RECEIVER_CAP_SIZE,
3802 		    dsc_dpcd);
3803 }
3804 
3805 void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector)
3806 {
3807 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3808 
3809 	/*
3810 	 * Clear the cached register set to avoid using stale values
3811 	 * for the sinks that do not support DSC.
3812 	 */
3813 	memset(connector->dp.dsc_dpcd, 0, sizeof(connector->dp.dsc_dpcd));
3814 
3815 	/* Clear fec_capable to avoid using stale values */
3816 	connector->dp.fec_capability = 0;
3817 
3818 	if (dpcd_rev < DP_DPCD_REV_14)
3819 		return;
3820 
3821 	intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux,
3822 			       connector->dp.dsc_dpcd);
3823 
3824 	if (drm_dp_dpcd_readb(connector->dp.dsc_decompression_aux, DP_FEC_CAPABILITY,
3825 			      &connector->dp.fec_capability) < 0) {
3826 		drm_err(&i915->drm, "Failed to read FEC DPCD register\n");
3827 		return;
3828 	}
3829 
3830 	drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
3831 		    connector->dp.fec_capability);
3832 }
3833 
3834 static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *connector)
3835 {
3836 	if (edp_dpcd_rev < DP_EDP_14)
3837 		return;
3838 
3839 	intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux, connector->dp.dsc_dpcd);
3840 }
3841 
3842 static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
3843 				     struct drm_display_mode *mode)
3844 {
3845 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3846 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3847 	int n = intel_dp->mso_link_count;
3848 	int overlap = intel_dp->mso_pixel_overlap;
3849 
3850 	if (!mode || !n)
3851 		return;
3852 
3853 	mode->hdisplay = (mode->hdisplay - overlap) * n;
3854 	mode->hsync_start = (mode->hsync_start - overlap) * n;
3855 	mode->hsync_end = (mode->hsync_end - overlap) * n;
3856 	mode->htotal = (mode->htotal - overlap) * n;
3857 	mode->clock *= n;
3858 
3859 	drm_mode_set_name(mode);
3860 
3861 	drm_dbg_kms(&i915->drm,
3862 		    "[CONNECTOR:%d:%s] using generated MSO mode: " DRM_MODE_FMT "\n",
3863 		    connector->base.base.id, connector->base.name,
3864 		    DRM_MODE_ARG(mode));
3865 }
3866 
3867 void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp)
3868 {
3869 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3870 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3871 	struct intel_connector *connector = intel_dp->attached_connector;
3872 
3873 	if (connector->panel.vbt.edp.bpp && pipe_bpp > connector->panel.vbt.edp.bpp) {
3874 		/*
3875 		 * This is a big fat ugly hack.
3876 		 *
3877 		 * Some machines in UEFI boot mode provide us a VBT that has 18
3878 		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3879 		 * unknown we fail to light up. Yet the same BIOS boots up with
3880 		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3881 		 * max, not what it tells us to use.
3882 		 *
3883 		 * Note: This will still be broken if the eDP panel is not lit
3884 		 * up by the BIOS, and thus we can't get the mode at module
3885 		 * load.
3886 		 */
3887 		drm_dbg_kms(&dev_priv->drm,
3888 			    "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3889 			    pipe_bpp, connector->panel.vbt.edp.bpp);
3890 		connector->panel.vbt.edp.bpp = pipe_bpp;
3891 	}
3892 }
3893 
3894 static void intel_edp_mso_init(struct intel_dp *intel_dp)
3895 {
3896 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3897 	struct intel_connector *connector = intel_dp->attached_connector;
3898 	struct drm_display_info *info = &connector->base.display_info;
3899 	u8 mso;
3900 
3901 	if (intel_dp->edp_dpcd[0] < DP_EDP_14)
3902 		return;
3903 
3904 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) {
3905 		drm_err(&i915->drm, "Failed to read MSO cap\n");
3906 		return;
3907 	}
3908 
3909 	/* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */
3910 	mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK;
3911 	if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) {
3912 		drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso);
3913 		mso = 0;
3914 	}
3915 
3916 	if (mso) {
3917 		drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration, pixel overlap %u\n",
3918 			    mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso,
3919 			    info->mso_pixel_overlap);
3920 		if (!HAS_MSO(i915)) {
3921 			drm_err(&i915->drm, "No source MSO support, disabling\n");
3922 			mso = 0;
3923 		}
3924 	}
3925 
3926 	intel_dp->mso_link_count = mso;
3927 	intel_dp->mso_pixel_overlap = mso ? info->mso_pixel_overlap : 0;
3928 }
3929 
3930 static bool
3931 intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector)
3932 {
3933 	struct drm_i915_private *dev_priv =
3934 		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
3935 
3936 	/* this function is meant to be called only once */
3937 	drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
3938 
3939 	if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0)
3940 		return false;
3941 
3942 	drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
3943 			 drm_dp_is_branch(intel_dp->dpcd));
3944 
3945 	/*
3946 	 * Read the eDP display control registers.
3947 	 *
3948 	 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
3949 	 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
3950 	 * set, but require eDP 1.4+ detection (e.g. for supported link rates
3951 	 * method). The display control registers should read zero if they're
3952 	 * not supported anyway.
3953 	 */
3954 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3955 			     intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3956 			     sizeof(intel_dp->edp_dpcd)) {
3957 		drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
3958 			    (int)sizeof(intel_dp->edp_dpcd),
3959 			    intel_dp->edp_dpcd);
3960 
3961 		intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14;
3962 	}
3963 
3964 	/*
3965 	 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
3966 	 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
3967 	 */
3968 	intel_psr_init_dpcd(intel_dp);
3969 
3970 	/* Clear the default sink rates */
3971 	intel_dp->num_sink_rates = 0;
3972 
3973 	/* Read the eDP 1.4+ supported link rates. */
3974 	if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
3975 		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3976 		int i;
3977 
3978 		drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
3979 				sink_rates, sizeof(sink_rates));
3980 
3981 		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3982 			int val = le16_to_cpu(sink_rates[i]);
3983 
3984 			if (val == 0)
3985 				break;
3986 
3987 			/* Value read multiplied by 200kHz gives the per-lane
3988 			 * link rate in kHz. The source rates are, however,
3989 			 * stored in terms of LS_Clk kHz. The full conversion
3990 			 * back to symbols is
3991 			 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
3992 			 */
3993 			intel_dp->sink_rates[i] = (val * 200) / 10;
3994 		}
3995 		intel_dp->num_sink_rates = i;
3996 	}
3997 
3998 	/*
3999 	 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4000 	 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4001 	 */
4002 	if (intel_dp->num_sink_rates)
4003 		intel_dp->use_rate_select = true;
4004 	else
4005 		intel_dp_set_sink_rates(intel_dp);
4006 	intel_dp_set_max_sink_lane_count(intel_dp);
4007 
4008 	/* Read the eDP DSC DPCD registers */
4009 	if (HAS_DSC(dev_priv))
4010 		intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0],
4011 					   connector);
4012 
4013 	/*
4014 	 * If needed, program our source OUI so we can make various Intel-specific AUX services
4015 	 * available (such as HDR backlight controls)
4016 	 */
4017 	intel_edp_init_source_oui(intel_dp, true);
4018 
4019 	return true;
4020 }
4021 
4022 static bool
4023 intel_dp_has_sink_count(struct intel_dp *intel_dp)
4024 {
4025 	if (!intel_dp->attached_connector)
4026 		return false;
4027 
4028 	return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base,
4029 					  intel_dp->dpcd,
4030 					  &intel_dp->desc);
4031 }
4032 
4033 void intel_dp_update_sink_caps(struct intel_dp *intel_dp)
4034 {
4035 	intel_dp_set_sink_rates(intel_dp);
4036 	intel_dp_set_max_sink_lane_count(intel_dp);
4037 	intel_dp_set_common_rates(intel_dp);
4038 }
4039 
4040 static bool
4041 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4042 {
4043 	int ret;
4044 
4045 	if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0)
4046 		return false;
4047 
4048 	/*
4049 	 * Don't clobber cached eDP rates. Also skip re-reading
4050 	 * the OUI/ID since we know it won't change.
4051 	 */
4052 	if (!intel_dp_is_edp(intel_dp)) {
4053 		drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4054 				 drm_dp_is_branch(intel_dp->dpcd));
4055 
4056 		intel_dp_update_sink_caps(intel_dp);
4057 	}
4058 
4059 	if (intel_dp_has_sink_count(intel_dp)) {
4060 		ret = drm_dp_read_sink_count(&intel_dp->aux);
4061 		if (ret < 0)
4062 			return false;
4063 
4064 		/*
4065 		 * Sink count can change between short pulse hpd hence
4066 		 * a member variable in intel_dp will track any changes
4067 		 * between short pulse interrupts.
4068 		 */
4069 		intel_dp->sink_count = ret;
4070 
4071 		/*
4072 		 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4073 		 * a dongle is present but no display. Unless we require to know
4074 		 * if a dongle is present or not, we don't need to update
4075 		 * downstream port information. So, an early return here saves
4076 		 * time from performing other operations which are not required.
4077 		 */
4078 		if (!intel_dp->sink_count)
4079 			return false;
4080 	}
4081 
4082 	return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd,
4083 					   intel_dp->downstream_ports) == 0;
4084 }
4085 
4086 static const char *intel_dp_mst_mode_str(enum drm_dp_mst_mode mst_mode)
4087 {
4088 	if (mst_mode == DRM_DP_MST)
4089 		return "MST";
4090 	else if (mst_mode == DRM_DP_SST_SIDEBAND_MSG)
4091 		return "SST w/ sideband messaging";
4092 	else
4093 		return "SST";
4094 }
4095 
4096 static enum drm_dp_mst_mode
4097 intel_dp_mst_mode_choose(struct intel_dp *intel_dp,
4098 			 enum drm_dp_mst_mode sink_mst_mode)
4099 {
4100 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4101 
4102 	if (!i915->display.params.enable_dp_mst)
4103 		return DRM_DP_SST;
4104 
4105 	if (!intel_dp_mst_source_support(intel_dp))
4106 		return DRM_DP_SST;
4107 
4108 	if (sink_mst_mode == DRM_DP_SST_SIDEBAND_MSG &&
4109 	    !(intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B))
4110 		return DRM_DP_SST;
4111 
4112 	return sink_mst_mode;
4113 }
4114 
4115 static enum drm_dp_mst_mode
4116 intel_dp_mst_detect(struct intel_dp *intel_dp)
4117 {
4118 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4119 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4120 	enum drm_dp_mst_mode sink_mst_mode;
4121 	enum drm_dp_mst_mode mst_detect;
4122 
4123 	sink_mst_mode = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
4124 
4125 	mst_detect = intel_dp_mst_mode_choose(intel_dp, sink_mst_mode);
4126 
4127 	drm_dbg_kms(&i915->drm,
4128 		    "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s -> enable: %s\n",
4129 		    encoder->base.base.id, encoder->base.name,
4130 		    str_yes_no(intel_dp_mst_source_support(intel_dp)),
4131 		    intel_dp_mst_mode_str(sink_mst_mode),
4132 		    str_yes_no(i915->display.params.enable_dp_mst),
4133 		    intel_dp_mst_mode_str(mst_detect));
4134 
4135 	return mst_detect;
4136 }
4137 
4138 static void
4139 intel_dp_mst_configure(struct intel_dp *intel_dp)
4140 {
4141 	if (!intel_dp_mst_source_support(intel_dp))
4142 		return;
4143 
4144 	intel_dp->is_mst = intel_dp->mst_detect != DRM_DP_SST;
4145 
4146 	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4147 
4148 	/* Avoid stale info on the next detect cycle. */
4149 	intel_dp->mst_detect = DRM_DP_SST;
4150 }
4151 
4152 static void
4153 intel_dp_mst_disconnect(struct intel_dp *intel_dp)
4154 {
4155 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4156 
4157 	if (!intel_dp->is_mst)
4158 		return;
4159 
4160 	drm_dbg_kms(&i915->drm, "MST device may have disappeared %d vs %d\n",
4161 		    intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4162 	intel_dp->is_mst = false;
4163 	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4164 }
4165 
4166 static bool
4167 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi)
4168 {
4169 	return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 4) == 4;
4170 }
4171 
4172 static bool intel_dp_ack_sink_irq_esi(struct intel_dp *intel_dp, u8 esi[4])
4173 {
4174 	int retry;
4175 
4176 	for (retry = 0; retry < 3; retry++) {
4177 		if (drm_dp_dpcd_write(&intel_dp->aux, DP_SINK_COUNT_ESI + 1,
4178 				      &esi[1], 3) == 3)
4179 			return true;
4180 	}
4181 
4182 	return false;
4183 }
4184 
4185 bool
4186 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
4187 		       const struct drm_connector_state *conn_state)
4188 {
4189 	/*
4190 	 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
4191 	 * of Color Encoding Format and Content Color Gamut], in order to
4192 	 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
4193 	 */
4194 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
4195 		return true;
4196 
4197 	switch (conn_state->colorspace) {
4198 	case DRM_MODE_COLORIMETRY_SYCC_601:
4199 	case DRM_MODE_COLORIMETRY_OPYCC_601:
4200 	case DRM_MODE_COLORIMETRY_BT2020_YCC:
4201 	case DRM_MODE_COLORIMETRY_BT2020_RGB:
4202 	case DRM_MODE_COLORIMETRY_BT2020_CYCC:
4203 		return true;
4204 	default:
4205 		break;
4206 	}
4207 
4208 	return false;
4209 }
4210 
4211 static ssize_t intel_dp_as_sdp_pack(const struct drm_dp_as_sdp *as_sdp,
4212 				    struct dp_sdp *sdp, size_t size)
4213 {
4214 	size_t length = sizeof(struct dp_sdp);
4215 
4216 	if (size < length)
4217 		return -ENOSPC;
4218 
4219 	memset(sdp, 0, size);
4220 
4221 	/* Prepare AS (Adaptive Sync) SDP Header */
4222 	sdp->sdp_header.HB0 = 0;
4223 	sdp->sdp_header.HB1 = as_sdp->sdp_type;
4224 	sdp->sdp_header.HB2 = 0x02;
4225 	sdp->sdp_header.HB3 = as_sdp->length;
4226 
4227 	/* Fill AS (Adaptive Sync) SDP Payload */
4228 	sdp->db[0] = as_sdp->mode;
4229 	sdp->db[1] = as_sdp->vtotal & 0xFF;
4230 	sdp->db[2] = (as_sdp->vtotal >> 8) & 0xFF;
4231 	sdp->db[3] = as_sdp->target_rr & 0xFF;
4232 	sdp->db[4] = (as_sdp->target_rr >> 8) & 0x3;
4233 
4234 	return length;
4235 }
4236 
4237 static ssize_t
4238 intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915,
4239 					 const struct hdmi_drm_infoframe *drm_infoframe,
4240 					 struct dp_sdp *sdp,
4241 					 size_t size)
4242 {
4243 	size_t length = sizeof(struct dp_sdp);
4244 	const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
4245 	unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
4246 	ssize_t len;
4247 
4248 	if (size < length)
4249 		return -ENOSPC;
4250 
4251 	memset(sdp, 0, size);
4252 
4253 	len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
4254 	if (len < 0) {
4255 		drm_dbg_kms(&i915->drm, "buffer size is smaller than hdr metadata infoframe\n");
4256 		return -ENOSPC;
4257 	}
4258 
4259 	if (len != infoframe_size) {
4260 		drm_dbg_kms(&i915->drm, "wrong static hdr metadata size\n");
4261 		return -ENOSPC;
4262 	}
4263 
4264 	/*
4265 	 * Set up the infoframe sdp packet for HDR static metadata.
4266 	 * Prepare VSC Header for SU as per DP 1.4a spec,
4267 	 * Table 2-100 and Table 2-101
4268 	 */
4269 
4270 	/* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */
4271 	sdp->sdp_header.HB0 = 0;
4272 	/*
4273 	 * Packet Type 80h + Non-audio INFOFRAME Type value
4274 	 * HDMI_INFOFRAME_TYPE_DRM: 0x87
4275 	 * - 80h + Non-audio INFOFRAME Type value
4276 	 * - InfoFrame Type: 0x07
4277 	 *    [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame]
4278 	 */
4279 	sdp->sdp_header.HB1 = drm_infoframe->type;
4280 	/*
4281 	 * Least Significant Eight Bits of (Data Byte Count – 1)
4282 	 * infoframe_size - 1
4283 	 */
4284 	sdp->sdp_header.HB2 = 0x1D;
4285 	/* INFOFRAME SDP Version Number */
4286 	sdp->sdp_header.HB3 = (0x13 << 2);
4287 	/* CTA Header Byte 2 (INFOFRAME Version Number) */
4288 	sdp->db[0] = drm_infoframe->version;
4289 	/* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
4290 	sdp->db[1] = drm_infoframe->length;
4291 	/*
4292 	 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
4293 	 * HDMI_INFOFRAME_HEADER_SIZE
4294 	 */
4295 	BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2);
4296 	memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
4297 	       HDMI_DRM_INFOFRAME_SIZE);
4298 
4299 	/*
4300 	 * Size of DP infoframe sdp packet for HDR static metadata consists of
4301 	 * - DP SDP Header(struct dp_sdp_header): 4 bytes
4302 	 * - Two Data Blocks: 2 bytes
4303 	 *    CTA Header Byte2 (INFOFRAME Version Number)
4304 	 *    CTA Header Byte3 (Length of INFOFRAME)
4305 	 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
4306 	 *
4307 	 * Prior to GEN11's GMP register size is identical to DP HDR static metadata
4308 	 * infoframe size. But GEN11+ has larger than that size, write_infoframe
4309 	 * will pad rest of the size.
4310 	 */
4311 	return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE;
4312 }
4313 
4314 static void intel_write_dp_sdp(struct intel_encoder *encoder,
4315 			       const struct intel_crtc_state *crtc_state,
4316 			       unsigned int type)
4317 {
4318 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4319 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4320 	struct dp_sdp sdp = {};
4321 	ssize_t len;
4322 
4323 	if ((crtc_state->infoframes.enable &
4324 	     intel_hdmi_infoframe_enable(type)) == 0)
4325 		return;
4326 
4327 	switch (type) {
4328 	case DP_SDP_VSC:
4329 		len = drm_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp);
4330 		break;
4331 	case HDMI_PACKET_TYPE_GAMUT_METADATA:
4332 		len = intel_dp_hdr_metadata_infoframe_sdp_pack(dev_priv,
4333 							       &crtc_state->infoframes.drm.drm,
4334 							       &sdp, sizeof(sdp));
4335 		break;
4336 	case DP_SDP_ADAPTIVE_SYNC:
4337 		len = intel_dp_as_sdp_pack(&crtc_state->infoframes.as_sdp, &sdp,
4338 					   sizeof(sdp));
4339 		break;
4340 	default:
4341 		MISSING_CASE(type);
4342 		return;
4343 	}
4344 
4345 	if (drm_WARN_ON(&dev_priv->drm, len < 0))
4346 		return;
4347 
4348 	dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
4349 }
4350 
4351 void intel_dp_set_infoframes(struct intel_encoder *encoder,
4352 			     bool enable,
4353 			     const struct intel_crtc_state *crtc_state,
4354 			     const struct drm_connector_state *conn_state)
4355 {
4356 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4357 	i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
4358 	u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
4359 			 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
4360 			 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
4361 
4362 	if (HAS_AS_SDP(dev_priv))
4363 		dip_enable |= VIDEO_DIP_ENABLE_AS_ADL;
4364 
4365 	u32 val = intel_de_read(dev_priv, reg) & ~dip_enable;
4366 
4367 	/* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */
4368 	if (!enable && HAS_DSC(dev_priv))
4369 		val &= ~VDIP_ENABLE_PPS;
4370 
4371 	/* When PSR is enabled, this routine doesn't disable VSC DIP */
4372 	if (!crtc_state->has_psr)
4373 		val &= ~VIDEO_DIP_ENABLE_VSC_HSW;
4374 
4375 	intel_de_write(dev_priv, reg, val);
4376 	intel_de_posting_read(dev_priv, reg);
4377 
4378 	if (!enable)
4379 		return;
4380 
4381 	intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
4382 	intel_write_dp_sdp(encoder, crtc_state, DP_SDP_ADAPTIVE_SYNC);
4383 
4384 	intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
4385 }
4386 
4387 static
4388 int intel_dp_as_sdp_unpack(struct drm_dp_as_sdp *as_sdp,
4389 			   const void *buffer, size_t size)
4390 {
4391 	const struct dp_sdp *sdp = buffer;
4392 
4393 	if (size < sizeof(struct dp_sdp))
4394 		return -EINVAL;
4395 
4396 	memset(as_sdp, 0, sizeof(*as_sdp));
4397 
4398 	if (sdp->sdp_header.HB0 != 0)
4399 		return -EINVAL;
4400 
4401 	if (sdp->sdp_header.HB1 != DP_SDP_ADAPTIVE_SYNC)
4402 		return -EINVAL;
4403 
4404 	if (sdp->sdp_header.HB2 != 0x02)
4405 		return -EINVAL;
4406 
4407 	if ((sdp->sdp_header.HB3 & 0x3F) != 9)
4408 		return -EINVAL;
4409 
4410 	as_sdp->length = sdp->sdp_header.HB3 & DP_ADAPTIVE_SYNC_SDP_LENGTH;
4411 	as_sdp->mode = sdp->db[0] & DP_ADAPTIVE_SYNC_SDP_OPERATION_MODE;
4412 	as_sdp->vtotal = (sdp->db[2] << 8) | sdp->db[1];
4413 	as_sdp->target_rr = (u64)sdp->db[3] | ((u64)sdp->db[4] & 0x3);
4414 
4415 	return 0;
4416 }
4417 
4418 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
4419 				   const void *buffer, size_t size)
4420 {
4421 	const struct dp_sdp *sdp = buffer;
4422 
4423 	if (size < sizeof(struct dp_sdp))
4424 		return -EINVAL;
4425 
4426 	memset(vsc, 0, sizeof(*vsc));
4427 
4428 	if (sdp->sdp_header.HB0 != 0)
4429 		return -EINVAL;
4430 
4431 	if (sdp->sdp_header.HB1 != DP_SDP_VSC)
4432 		return -EINVAL;
4433 
4434 	vsc->sdp_type = sdp->sdp_header.HB1;
4435 	vsc->revision = sdp->sdp_header.HB2;
4436 	vsc->length = sdp->sdp_header.HB3;
4437 
4438 	if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) ||
4439 	    (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) {
4440 		/*
4441 		 * - HB2 = 0x2, HB3 = 0x8
4442 		 *   VSC SDP supporting 3D stereo + PSR
4443 		 * - HB2 = 0x4, HB3 = 0xe
4444 		 *   VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of
4445 		 *   first scan line of the SU region (applies to eDP v1.4b
4446 		 *   and higher).
4447 		 */
4448 		return 0;
4449 	} else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) {
4450 		/*
4451 		 * - HB2 = 0x5, HB3 = 0x13
4452 		 *   VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry
4453 		 *   Format.
4454 		 */
4455 		vsc->pixelformat = (sdp->db[16] >> 4) & 0xf;
4456 		vsc->colorimetry = sdp->db[16] & 0xf;
4457 		vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1;
4458 
4459 		switch (sdp->db[17] & 0x7) {
4460 		case 0x0:
4461 			vsc->bpc = 6;
4462 			break;
4463 		case 0x1:
4464 			vsc->bpc = 8;
4465 			break;
4466 		case 0x2:
4467 			vsc->bpc = 10;
4468 			break;
4469 		case 0x3:
4470 			vsc->bpc = 12;
4471 			break;
4472 		case 0x4:
4473 			vsc->bpc = 16;
4474 			break;
4475 		default:
4476 			MISSING_CASE(sdp->db[17] & 0x7);
4477 			return -EINVAL;
4478 		}
4479 
4480 		vsc->content_type = sdp->db[18] & 0x7;
4481 	} else {
4482 		return -EINVAL;
4483 	}
4484 
4485 	return 0;
4486 }
4487 
4488 static void
4489 intel_read_dp_as_sdp(struct intel_encoder *encoder,
4490 		     struct intel_crtc_state *crtc_state,
4491 		     struct drm_dp_as_sdp *as_sdp)
4492 {
4493 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4494 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4495 	unsigned int type = DP_SDP_ADAPTIVE_SYNC;
4496 	struct dp_sdp sdp = {};
4497 	int ret;
4498 
4499 	if ((crtc_state->infoframes.enable &
4500 	     intel_hdmi_infoframe_enable(type)) == 0)
4501 		return;
4502 
4503 	dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
4504 				 sizeof(sdp));
4505 
4506 	ret = intel_dp_as_sdp_unpack(as_sdp, &sdp, sizeof(sdp));
4507 	if (ret)
4508 		drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP AS SDP\n");
4509 }
4510 
4511 static int
4512 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
4513 					   const void *buffer, size_t size)
4514 {
4515 	int ret;
4516 
4517 	const struct dp_sdp *sdp = buffer;
4518 
4519 	if (size < sizeof(struct dp_sdp))
4520 		return -EINVAL;
4521 
4522 	if (sdp->sdp_header.HB0 != 0)
4523 		return -EINVAL;
4524 
4525 	if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM)
4526 		return -EINVAL;
4527 
4528 	/*
4529 	 * Least Significant Eight Bits of (Data Byte Count – 1)
4530 	 * 1Dh (i.e., Data Byte Count = 30 bytes).
4531 	 */
4532 	if (sdp->sdp_header.HB2 != 0x1D)
4533 		return -EINVAL;
4534 
4535 	/* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */
4536 	if ((sdp->sdp_header.HB3 & 0x3) != 0)
4537 		return -EINVAL;
4538 
4539 	/* INFOFRAME SDP Version Number */
4540 	if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13)
4541 		return -EINVAL;
4542 
4543 	/* CTA Header Byte 2 (INFOFRAME Version Number) */
4544 	if (sdp->db[0] != 1)
4545 		return -EINVAL;
4546 
4547 	/* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
4548 	if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE)
4549 		return -EINVAL;
4550 
4551 	ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2],
4552 					     HDMI_DRM_INFOFRAME_SIZE);
4553 
4554 	return ret;
4555 }
4556 
4557 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
4558 				  struct intel_crtc_state *crtc_state,
4559 				  struct drm_dp_vsc_sdp *vsc)
4560 {
4561 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4562 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4563 	unsigned int type = DP_SDP_VSC;
4564 	struct dp_sdp sdp = {};
4565 	int ret;
4566 
4567 	if ((crtc_state->infoframes.enable &
4568 	     intel_hdmi_infoframe_enable(type)) == 0)
4569 		return;
4570 
4571 	dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
4572 
4573 	ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
4574 
4575 	if (ret)
4576 		drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n");
4577 }
4578 
4579 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder,
4580 						     struct intel_crtc_state *crtc_state,
4581 						     struct hdmi_drm_infoframe *drm_infoframe)
4582 {
4583 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4584 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4585 	unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
4586 	struct dp_sdp sdp = {};
4587 	int ret;
4588 
4589 	if ((crtc_state->infoframes.enable &
4590 	    intel_hdmi_infoframe_enable(type)) == 0)
4591 		return;
4592 
4593 	dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
4594 				 sizeof(sdp));
4595 
4596 	ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
4597 							 sizeof(sdp));
4598 
4599 	if (ret)
4600 		drm_dbg_kms(&dev_priv->drm,
4601 			    "Failed to unpack DP HDR Metadata Infoframe SDP\n");
4602 }
4603 
4604 void intel_read_dp_sdp(struct intel_encoder *encoder,
4605 		       struct intel_crtc_state *crtc_state,
4606 		       unsigned int type)
4607 {
4608 	switch (type) {
4609 	case DP_SDP_VSC:
4610 		intel_read_dp_vsc_sdp(encoder, crtc_state,
4611 				      &crtc_state->infoframes.vsc);
4612 		break;
4613 	case HDMI_PACKET_TYPE_GAMUT_METADATA:
4614 		intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
4615 							 &crtc_state->infoframes.drm.drm);
4616 		break;
4617 	case DP_SDP_ADAPTIVE_SYNC:
4618 		intel_read_dp_as_sdp(encoder, crtc_state,
4619 				     &crtc_state->infoframes.as_sdp);
4620 		break;
4621 	default:
4622 		MISSING_CASE(type);
4623 		break;
4624 	}
4625 }
4626 
4627 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4628 {
4629 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4630 	int status = 0;
4631 	int test_link_rate;
4632 	u8 test_lane_count, test_link_bw;
4633 	/* (DP CTS 1.2)
4634 	 * 4.3.1.11
4635 	 */
4636 	/* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4637 	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4638 				   &test_lane_count);
4639 
4640 	if (status <= 0) {
4641 		drm_dbg_kms(&i915->drm, "Lane count read failed\n");
4642 		return DP_TEST_NAK;
4643 	}
4644 	test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4645 
4646 	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4647 				   &test_link_bw);
4648 	if (status <= 0) {
4649 		drm_dbg_kms(&i915->drm, "Link Rate read failed\n");
4650 		return DP_TEST_NAK;
4651 	}
4652 	test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4653 
4654 	/* Validate the requested link rate and lane count */
4655 	if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4656 					test_lane_count))
4657 		return DP_TEST_NAK;
4658 
4659 	intel_dp->compliance.test_lane_count = test_lane_count;
4660 	intel_dp->compliance.test_link_rate = test_link_rate;
4661 
4662 	return DP_TEST_ACK;
4663 }
4664 
4665 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4666 {
4667 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4668 	u8 test_pattern;
4669 	u8 test_misc;
4670 	__be16 h_width, v_height;
4671 	int status = 0;
4672 
4673 	/* Read the TEST_PATTERN (DP CTS 3.1.5) */
4674 	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4675 				   &test_pattern);
4676 	if (status <= 0) {
4677 		drm_dbg_kms(&i915->drm, "Test pattern read failed\n");
4678 		return DP_TEST_NAK;
4679 	}
4680 	if (test_pattern != DP_COLOR_RAMP)
4681 		return DP_TEST_NAK;
4682 
4683 	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4684 				  &h_width, 2);
4685 	if (status <= 0) {
4686 		drm_dbg_kms(&i915->drm, "H Width read failed\n");
4687 		return DP_TEST_NAK;
4688 	}
4689 
4690 	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4691 				  &v_height, 2);
4692 	if (status <= 0) {
4693 		drm_dbg_kms(&i915->drm, "V Height read failed\n");
4694 		return DP_TEST_NAK;
4695 	}
4696 
4697 	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4698 				   &test_misc);
4699 	if (status <= 0) {
4700 		drm_dbg_kms(&i915->drm, "TEST MISC read failed\n");
4701 		return DP_TEST_NAK;
4702 	}
4703 	if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4704 		return DP_TEST_NAK;
4705 	if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4706 		return DP_TEST_NAK;
4707 	switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4708 	case DP_TEST_BIT_DEPTH_6:
4709 		intel_dp->compliance.test_data.bpc = 6;
4710 		break;
4711 	case DP_TEST_BIT_DEPTH_8:
4712 		intel_dp->compliance.test_data.bpc = 8;
4713 		break;
4714 	default:
4715 		return DP_TEST_NAK;
4716 	}
4717 
4718 	intel_dp->compliance.test_data.video_pattern = test_pattern;
4719 	intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4720 	intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4721 	/* Set test active flag here so userspace doesn't interrupt things */
4722 	intel_dp->compliance.test_active = true;
4723 
4724 	return DP_TEST_ACK;
4725 }
4726 
4727 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
4728 {
4729 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4730 	u8 test_result = DP_TEST_ACK;
4731 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4732 	struct drm_connector *connector = &intel_connector->base;
4733 
4734 	if (intel_connector->detect_edid == NULL ||
4735 	    connector->edid_corrupt ||
4736 	    intel_dp->aux.i2c_defer_count > 6) {
4737 		/* Check EDID read for NACKs, DEFERs and corruption
4738 		 * (DP CTS 1.2 Core r1.1)
4739 		 *    4.2.2.4 : Failed EDID read, I2C_NAK
4740 		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
4741 		 *    4.2.2.6 : EDID corruption detected
4742 		 * Use failsafe mode for all cases
4743 		 */
4744 		if (intel_dp->aux.i2c_nack_count > 0 ||
4745 			intel_dp->aux.i2c_defer_count > 0)
4746 			drm_dbg_kms(&i915->drm,
4747 				    "EDID read had %d NACKs, %d DEFERs\n",
4748 				    intel_dp->aux.i2c_nack_count,
4749 				    intel_dp->aux.i2c_defer_count);
4750 		intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4751 	} else {
4752 		/* FIXME: Get rid of drm_edid_raw() */
4753 		const struct edid *block = drm_edid_raw(intel_connector->detect_edid);
4754 
4755 		/* We have to write the checksum of the last block read */
4756 		block += block->extensions;
4757 
4758 		if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4759 				       block->checksum) <= 0)
4760 			drm_dbg_kms(&i915->drm,
4761 				    "Failed to write EDID checksum\n");
4762 
4763 		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4764 		intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4765 	}
4766 
4767 	/* Set test active flag here so userspace doesn't interrupt things */
4768 	intel_dp->compliance.test_active = true;
4769 
4770 	return test_result;
4771 }
4772 
4773 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
4774 					const struct intel_crtc_state *crtc_state)
4775 {
4776 	struct drm_i915_private *dev_priv =
4777 			to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4778 	struct drm_dp_phy_test_params *data =
4779 			&intel_dp->compliance.test_data.phytest;
4780 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4781 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4782 	enum pipe pipe = crtc->pipe;
4783 	u32 pattern_val;
4784 
4785 	switch (data->phy_pattern) {
4786 	case DP_LINK_QUAL_PATTERN_DISABLE:
4787 		drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n");
4788 		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
4789 		if (DISPLAY_VER(dev_priv) >= 10)
4790 			intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
4791 				     DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK,
4792 				     DP_TP_CTL_LINK_TRAIN_NORMAL);
4793 		break;
4794 	case DP_LINK_QUAL_PATTERN_D10_2:
4795 		drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n");
4796 		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4797 			       DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
4798 		break;
4799 	case DP_LINK_QUAL_PATTERN_ERROR_RATE:
4800 		drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n");
4801 		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4802 			       DDI_DP_COMP_CTL_ENABLE |
4803 			       DDI_DP_COMP_CTL_SCRAMBLED_0);
4804 		break;
4805 	case DP_LINK_QUAL_PATTERN_PRBS7:
4806 		drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n");
4807 		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4808 			       DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
4809 		break;
4810 	case DP_LINK_QUAL_PATTERN_80BIT_CUSTOM:
4811 		/*
4812 		 * FIXME: Ideally pattern should come from DPCD 0x250. As
4813 		 * current firmware of DPR-100 could not set it, so hardcoding
4814 		 * now for complaince test.
4815 		 */
4816 		drm_dbg_kms(&dev_priv->drm,
4817 			    "Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
4818 		pattern_val = 0x3e0f83e0;
4819 		intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
4820 		pattern_val = 0x0f83e0f8;
4821 		intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val);
4822 		pattern_val = 0x0000f83e;
4823 		intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val);
4824 		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4825 			       DDI_DP_COMP_CTL_ENABLE |
4826 			       DDI_DP_COMP_CTL_CUSTOM80);
4827 		break;
4828 	case DP_LINK_QUAL_PATTERN_CP2520_PAT_1:
4829 		/*
4830 		 * FIXME: Ideally pattern should come from DPCD 0x24A. As
4831 		 * current firmware of DPR-100 could not set it, so hardcoding
4832 		 * now for complaince test.
4833 		 */
4834 		drm_dbg_kms(&dev_priv->drm, "Set HBR2 compliance Phy Test Pattern\n");
4835 		pattern_val = 0xFB;
4836 		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4837 			       DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
4838 			       pattern_val);
4839 		break;
4840 	case DP_LINK_QUAL_PATTERN_CP2520_PAT_3:
4841 		if (DISPLAY_VER(dev_priv) < 10)  {
4842 			drm_warn(&dev_priv->drm, "Platform does not support TPS4\n");
4843 			break;
4844 		}
4845 		drm_dbg_kms(&dev_priv->drm, "Set TPS4 compliance Phy Test Pattern\n");
4846 		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
4847 		intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
4848 			     DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK,
4849 			     DP_TP_CTL_TRAIN_PAT4_SEL_TP4A | DP_TP_CTL_LINK_TRAIN_PAT4);
4850 		break;
4851 	default:
4852 		drm_warn(&dev_priv->drm, "Invalid Phy Test Pattern\n");
4853 	}
4854 }
4855 
4856 static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
4857 					 const struct intel_crtc_state *crtc_state)
4858 {
4859 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4860 	struct drm_dp_phy_test_params *data =
4861 		&intel_dp->compliance.test_data.phytest;
4862 	u8 link_status[DP_LINK_STATUS_SIZE];
4863 
4864 	if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
4865 					     link_status) < 0) {
4866 		drm_dbg_kms(&i915->drm, "failed to get link status\n");
4867 		return;
4868 	}
4869 
4870 	/* retrieve vswing & pre-emphasis setting */
4871 	intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX,
4872 				  link_status);
4873 
4874 	intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
4875 
4876 	intel_dp_phy_pattern_update(intel_dp, crtc_state);
4877 
4878 	drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
4879 			  intel_dp->train_set, crtc_state->lane_count);
4880 
4881 	drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
4882 				    intel_dp->dpcd[DP_DPCD_REV]);
4883 }
4884 
4885 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4886 {
4887 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4888 	struct drm_dp_phy_test_params *data =
4889 		&intel_dp->compliance.test_data.phytest;
4890 
4891 	if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
4892 		drm_dbg_kms(&i915->drm, "DP Phy Test pattern AUX read failure\n");
4893 		return DP_TEST_NAK;
4894 	}
4895 
4896 	/* Set test active flag here so userspace doesn't interrupt things */
4897 	intel_dp->compliance.test_active = true;
4898 
4899 	return DP_TEST_ACK;
4900 }
4901 
4902 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4903 {
4904 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4905 	u8 response = DP_TEST_NAK;
4906 	u8 request = 0;
4907 	int status;
4908 
4909 	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4910 	if (status <= 0) {
4911 		drm_dbg_kms(&i915->drm,
4912 			    "Could not read test request from sink\n");
4913 		goto update_status;
4914 	}
4915 
4916 	switch (request) {
4917 	case DP_TEST_LINK_TRAINING:
4918 		drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n");
4919 		response = intel_dp_autotest_link_training(intel_dp);
4920 		break;
4921 	case DP_TEST_LINK_VIDEO_PATTERN:
4922 		drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n");
4923 		response = intel_dp_autotest_video_pattern(intel_dp);
4924 		break;
4925 	case DP_TEST_LINK_EDID_READ:
4926 		drm_dbg_kms(&i915->drm, "EDID test requested\n");
4927 		response = intel_dp_autotest_edid(intel_dp);
4928 		break;
4929 	case DP_TEST_LINK_PHY_TEST_PATTERN:
4930 		drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n");
4931 		response = intel_dp_autotest_phy_pattern(intel_dp);
4932 		break;
4933 	default:
4934 		drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n",
4935 			    request);
4936 		break;
4937 	}
4938 
4939 	if (response & DP_TEST_ACK)
4940 		intel_dp->compliance.test_type = request;
4941 
4942 update_status:
4943 	status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4944 	if (status <= 0)
4945 		drm_dbg_kms(&i915->drm,
4946 			    "Could not write test response to sink\n");
4947 }
4948 
4949 static bool intel_dp_link_ok(struct intel_dp *intel_dp,
4950 			     u8 link_status[DP_LINK_STATUS_SIZE])
4951 {
4952 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4953 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4954 	bool uhbr = intel_dp->link_rate >= 1000000;
4955 	bool ok;
4956 
4957 	if (uhbr)
4958 		ok = drm_dp_128b132b_lane_channel_eq_done(link_status,
4959 							  intel_dp->lane_count);
4960 	else
4961 		ok = drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4962 
4963 	if (ok)
4964 		return true;
4965 
4966 	intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
4967 	drm_dbg_kms(&i915->drm,
4968 		    "[ENCODER:%d:%s] %s link not ok, retraining\n",
4969 		    encoder->base.base.id, encoder->base.name,
4970 		    uhbr ? "128b/132b" : "8b/10b");
4971 
4972 	return false;
4973 }
4974 
4975 static void
4976 intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack)
4977 {
4978 	bool handled = false;
4979 
4980 	drm_dp_mst_hpd_irq_handle_event(&intel_dp->mst_mgr, esi, ack, &handled);
4981 
4982 	if (esi[1] & DP_CP_IRQ) {
4983 		intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
4984 		ack[1] |= DP_CP_IRQ;
4985 	}
4986 }
4987 
4988 static bool intel_dp_mst_link_status(struct intel_dp *intel_dp)
4989 {
4990 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4991 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4992 	u8 link_status[DP_LINK_STATUS_SIZE] = {};
4993 	const size_t esi_link_status_size = DP_LINK_STATUS_SIZE - 2;
4994 
4995 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS_ESI, link_status,
4996 			     esi_link_status_size) != esi_link_status_size) {
4997 		drm_err(&i915->drm,
4998 			"[ENCODER:%d:%s] Failed to read link status\n",
4999 			encoder->base.base.id, encoder->base.name);
5000 		return false;
5001 	}
5002 
5003 	return intel_dp_link_ok(intel_dp, link_status);
5004 }
5005 
5006 /**
5007  * intel_dp_check_mst_status - service any pending MST interrupts, check link status
5008  * @intel_dp: Intel DP struct
5009  *
5010  * Read any pending MST interrupts, call MST core to handle these and ack the
5011  * interrupts. Check if the main and AUX link state is ok.
5012  *
5013  * Returns:
5014  * - %true if pending interrupts were serviced (or no interrupts were
5015  *   pending) w/o detecting an error condition.
5016  * - %false if an error condition - like AUX failure or a loss of link - is
5017  *   detected, or another condition - like a DP tunnel BW state change - needs
5018  *   servicing from the hotplug work.
5019  */
5020 static bool
5021 intel_dp_check_mst_status(struct intel_dp *intel_dp)
5022 {
5023 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5024 	bool link_ok = true;
5025 	bool reprobe_needed = false;
5026 
5027 	drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
5028 
5029 	for (;;) {
5030 		u8 esi[4] = {};
5031 		u8 ack[4] = {};
5032 
5033 		if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
5034 			drm_dbg_kms(&i915->drm,
5035 				    "failed to get ESI - device may have failed\n");
5036 			link_ok = false;
5037 
5038 			break;
5039 		}
5040 
5041 		drm_dbg_kms(&i915->drm, "DPRX ESI: %4ph\n", esi);
5042 
5043 		if (intel_dp->active_mst_links > 0 && link_ok &&
5044 		    esi[3] & LINK_STATUS_CHANGED) {
5045 			if (!intel_dp_mst_link_status(intel_dp))
5046 				link_ok = false;
5047 			ack[3] |= LINK_STATUS_CHANGED;
5048 		}
5049 
5050 		intel_dp_mst_hpd_irq(intel_dp, esi, ack);
5051 
5052 		if (esi[3] & DP_TUNNELING_IRQ) {
5053 			if (drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr,
5054 						     &intel_dp->aux))
5055 				reprobe_needed = true;
5056 			ack[3] |= DP_TUNNELING_IRQ;
5057 		}
5058 
5059 		if (!memchr_inv(ack, 0, sizeof(ack)))
5060 			break;
5061 
5062 		if (!intel_dp_ack_sink_irq_esi(intel_dp, ack))
5063 			drm_dbg_kms(&i915->drm, "Failed to ack ESI\n");
5064 
5065 		if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY))
5066 			drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr);
5067 	}
5068 
5069 	return link_ok && !reprobe_needed;
5070 }
5071 
5072 static void
5073 intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
5074 {
5075 	bool is_active;
5076 	u8 buf = 0;
5077 
5078 	is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux);
5079 	if (intel_dp->frl.is_trained && !is_active) {
5080 		if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0)
5081 			return;
5082 
5083 		buf &=  ~DP_PCON_ENABLE_HDMI_LINK;
5084 		if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0)
5085 			return;
5086 
5087 		drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base);
5088 
5089 		intel_dp->frl.is_trained = false;
5090 
5091 		/* Restart FRL training or fall back to TMDS mode */
5092 		intel_dp_check_frl_training(intel_dp);
5093 	}
5094 }
5095 
5096 static bool
5097 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
5098 {
5099 	u8 link_status[DP_LINK_STATUS_SIZE];
5100 
5101 	if (!intel_dp->link_trained)
5102 		return false;
5103 
5104 	/*
5105 	 * While PSR source HW is enabled, it will control main-link sending
5106 	 * frames, enabling and disabling it so trying to do a retrain will fail
5107 	 * as the link would or not be on or it could mix training patterns
5108 	 * and frame data at the same time causing retrain to fail.
5109 	 * Also when exiting PSR, HW will retrain the link anyways fixing
5110 	 * any link status error.
5111 	 */
5112 	if (intel_psr_enabled(intel_dp))
5113 		return false;
5114 
5115 	if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
5116 					     link_status) < 0)
5117 		return false;
5118 
5119 	/*
5120 	 * Validate the cached values of intel_dp->link_rate and
5121 	 * intel_dp->lane_count before attempting to retrain.
5122 	 *
5123 	 * FIXME would be nice to user the crtc state here, but since
5124 	 * we need to call this from the short HPD handler that seems
5125 	 * a bit hard.
5126 	 */
5127 	if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
5128 					intel_dp->lane_count))
5129 		return false;
5130 
5131 	/* Retrain if link not ok */
5132 	return !intel_dp_link_ok(intel_dp, link_status);
5133 }
5134 
5135 static bool intel_dp_has_connector(struct intel_dp *intel_dp,
5136 				   const struct drm_connector_state *conn_state)
5137 {
5138 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5139 	struct intel_encoder *encoder;
5140 	enum pipe pipe;
5141 
5142 	if (!conn_state->best_encoder)
5143 		return false;
5144 
5145 	/* SST */
5146 	encoder = &dp_to_dig_port(intel_dp)->base;
5147 	if (conn_state->best_encoder == &encoder->base)
5148 		return true;
5149 
5150 	/* MST */
5151 	for_each_pipe(i915, pipe) {
5152 		encoder = &intel_dp->mst_encoders[pipe]->base;
5153 		if (conn_state->best_encoder == &encoder->base)
5154 			return true;
5155 	}
5156 
5157 	return false;
5158 }
5159 
5160 int intel_dp_get_active_pipes(struct intel_dp *intel_dp,
5161 			      struct drm_modeset_acquire_ctx *ctx,
5162 			      u8 *pipe_mask)
5163 {
5164 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5165 	struct drm_connector_list_iter conn_iter;
5166 	struct intel_connector *connector;
5167 	int ret = 0;
5168 
5169 	*pipe_mask = 0;
5170 
5171 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
5172 	for_each_intel_connector_iter(connector, &conn_iter) {
5173 		struct drm_connector_state *conn_state =
5174 			connector->base.state;
5175 		struct intel_crtc_state *crtc_state;
5176 		struct intel_crtc *crtc;
5177 
5178 		if (!intel_dp_has_connector(intel_dp, conn_state))
5179 			continue;
5180 
5181 		crtc = to_intel_crtc(conn_state->crtc);
5182 		if (!crtc)
5183 			continue;
5184 
5185 		ret = drm_modeset_lock(&crtc->base.mutex, ctx);
5186 		if (ret)
5187 			break;
5188 
5189 		crtc_state = to_intel_crtc_state(crtc->base.state);
5190 
5191 		drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
5192 
5193 		if (!crtc_state->hw.active)
5194 			continue;
5195 
5196 		if (conn_state->commit)
5197 			drm_WARN_ON(&i915->drm,
5198 				    !wait_for_completion_timeout(&conn_state->commit->hw_done,
5199 								 msecs_to_jiffies(5000)));
5200 
5201 		*pipe_mask |= BIT(crtc->pipe);
5202 	}
5203 	drm_connector_list_iter_end(&conn_iter);
5204 
5205 	return ret;
5206 }
5207 
5208 static bool intel_dp_is_connected(struct intel_dp *intel_dp)
5209 {
5210 	struct intel_connector *connector = intel_dp->attached_connector;
5211 
5212 	return connector->base.status == connector_status_connected ||
5213 		intel_dp->is_mst;
5214 }
5215 
5216 int intel_dp_retrain_link(struct intel_encoder *encoder,
5217 			  struct drm_modeset_acquire_ctx *ctx)
5218 {
5219 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5220 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5221 	struct intel_crtc *crtc;
5222 	u8 pipe_mask;
5223 	int ret;
5224 
5225 	if (!intel_dp_is_connected(intel_dp))
5226 		return 0;
5227 
5228 	ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
5229 			       ctx);
5230 	if (ret)
5231 		return ret;
5232 
5233 	if (!intel_dp_needs_link_retrain(intel_dp))
5234 		return 0;
5235 
5236 	ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
5237 	if (ret)
5238 		return ret;
5239 
5240 	if (pipe_mask == 0)
5241 		return 0;
5242 
5243 	if (!intel_dp_needs_link_retrain(intel_dp))
5244 		return 0;
5245 
5246 	drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n",
5247 		    encoder->base.base.id, encoder->base.name);
5248 
5249 	for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
5250 		const struct intel_crtc_state *crtc_state =
5251 			to_intel_crtc_state(crtc->base.state);
5252 
5253 		/* Suppress underruns caused by re-training */
5254 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5255 		if (crtc_state->has_pch_encoder)
5256 			intel_set_pch_fifo_underrun_reporting(dev_priv,
5257 							      intel_crtc_pch_transcoder(crtc), false);
5258 	}
5259 
5260 	for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
5261 		const struct intel_crtc_state *crtc_state =
5262 			to_intel_crtc_state(crtc->base.state);
5263 
5264 		/* retrain on the MST master transcoder */
5265 		if (DISPLAY_VER(dev_priv) >= 12 &&
5266 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
5267 		    !intel_dp_mst_is_master_trans(crtc_state))
5268 			continue;
5269 
5270 		intel_dp_check_frl_training(intel_dp);
5271 		intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
5272 		intel_dp_start_link_train(intel_dp, crtc_state);
5273 		intel_dp_stop_link_train(intel_dp, crtc_state);
5274 		break;
5275 	}
5276 
5277 	for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
5278 		const struct intel_crtc_state *crtc_state =
5279 			to_intel_crtc_state(crtc->base.state);
5280 
5281 		/* Keep underrun reporting disabled until things are stable */
5282 		intel_crtc_wait_for_next_vblank(crtc);
5283 
5284 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
5285 		if (crtc_state->has_pch_encoder)
5286 			intel_set_pch_fifo_underrun_reporting(dev_priv,
5287 							      intel_crtc_pch_transcoder(crtc), true);
5288 	}
5289 
5290 	return 0;
5291 }
5292 
5293 static int intel_dp_prep_phy_test(struct intel_dp *intel_dp,
5294 				  struct drm_modeset_acquire_ctx *ctx,
5295 				  u8 *pipe_mask)
5296 {
5297 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5298 	struct drm_connector_list_iter conn_iter;
5299 	struct intel_connector *connector;
5300 	int ret = 0;
5301 
5302 	*pipe_mask = 0;
5303 
5304 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
5305 	for_each_intel_connector_iter(connector, &conn_iter) {
5306 		struct drm_connector_state *conn_state =
5307 			connector->base.state;
5308 		struct intel_crtc_state *crtc_state;
5309 		struct intel_crtc *crtc;
5310 
5311 		if (!intel_dp_has_connector(intel_dp, conn_state))
5312 			continue;
5313 
5314 		crtc = to_intel_crtc(conn_state->crtc);
5315 		if (!crtc)
5316 			continue;
5317 
5318 		ret = drm_modeset_lock(&crtc->base.mutex, ctx);
5319 		if (ret)
5320 			break;
5321 
5322 		crtc_state = to_intel_crtc_state(crtc->base.state);
5323 
5324 		drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
5325 
5326 		if (!crtc_state->hw.active)
5327 			continue;
5328 
5329 		if (conn_state->commit &&
5330 		    !try_wait_for_completion(&conn_state->commit->hw_done))
5331 			continue;
5332 
5333 		*pipe_mask |= BIT(crtc->pipe);
5334 	}
5335 	drm_connector_list_iter_end(&conn_iter);
5336 
5337 	return ret;
5338 }
5339 
5340 static int intel_dp_do_phy_test(struct intel_encoder *encoder,
5341 				struct drm_modeset_acquire_ctx *ctx)
5342 {
5343 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5344 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5345 	struct intel_crtc *crtc;
5346 	u8 pipe_mask;
5347 	int ret;
5348 
5349 	ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
5350 			       ctx);
5351 	if (ret)
5352 		return ret;
5353 
5354 	ret = intel_dp_prep_phy_test(intel_dp, ctx, &pipe_mask);
5355 	if (ret)
5356 		return ret;
5357 
5358 	if (pipe_mask == 0)
5359 		return 0;
5360 
5361 	drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n",
5362 		    encoder->base.base.id, encoder->base.name);
5363 
5364 	for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
5365 		const struct intel_crtc_state *crtc_state =
5366 			to_intel_crtc_state(crtc->base.state);
5367 
5368 		/* test on the MST master transcoder */
5369 		if (DISPLAY_VER(dev_priv) >= 12 &&
5370 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
5371 		    !intel_dp_mst_is_master_trans(crtc_state))
5372 			continue;
5373 
5374 		intel_dp_process_phy_request(intel_dp, crtc_state);
5375 		break;
5376 	}
5377 
5378 	return 0;
5379 }
5380 
5381 void intel_dp_phy_test(struct intel_encoder *encoder)
5382 {
5383 	struct drm_modeset_acquire_ctx ctx;
5384 	int ret;
5385 
5386 	drm_modeset_acquire_init(&ctx, 0);
5387 
5388 	for (;;) {
5389 		ret = intel_dp_do_phy_test(encoder, &ctx);
5390 
5391 		if (ret == -EDEADLK) {
5392 			drm_modeset_backoff(&ctx);
5393 			continue;
5394 		}
5395 
5396 		break;
5397 	}
5398 
5399 	drm_modeset_drop_locks(&ctx);
5400 	drm_modeset_acquire_fini(&ctx);
5401 	drm_WARN(encoder->base.dev, ret,
5402 		 "Acquiring modeset locks failed with %i\n", ret);
5403 }
5404 
5405 static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
5406 {
5407 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5408 	u8 val;
5409 
5410 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
5411 		return;
5412 
5413 	if (drm_dp_dpcd_readb(&intel_dp->aux,
5414 			      DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
5415 		return;
5416 
5417 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
5418 
5419 	if (val & DP_AUTOMATED_TEST_REQUEST)
5420 		intel_dp_handle_test_request(intel_dp);
5421 
5422 	if (val & DP_CP_IRQ)
5423 		intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
5424 
5425 	if (val & DP_SINK_SPECIFIC_IRQ)
5426 		drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
5427 }
5428 
5429 static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
5430 {
5431 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5432 	bool reprobe_needed = false;
5433 	u8 val;
5434 
5435 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
5436 		return false;
5437 
5438 	if (drm_dp_dpcd_readb(&intel_dp->aux,
5439 			      DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val)
5440 		return false;
5441 
5442 	if ((val & DP_TUNNELING_IRQ) &&
5443 	    drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr,
5444 				     &intel_dp->aux))
5445 		reprobe_needed = true;
5446 
5447 	if (drm_dp_dpcd_writeb(&intel_dp->aux,
5448 			       DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1)
5449 		return reprobe_needed;
5450 
5451 	if (val & HDMI_LINK_STATUS_CHANGED)
5452 		intel_dp_handle_hdmi_link_status_change(intel_dp);
5453 
5454 	return reprobe_needed;
5455 }
5456 
5457 /*
5458  * According to DP spec
5459  * 5.1.2:
5460  *  1. Read DPCD
5461  *  2. Configure link according to Receiver Capabilities
5462  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
5463  *  4. Check link status on receipt of hot-plug interrupt
5464  *
5465  * intel_dp_short_pulse -  handles short pulse interrupts
5466  * when full detection is not required.
5467  * Returns %true if short pulse is handled and full detection
5468  * is NOT required and %false otherwise.
5469  */
5470 static bool
5471 intel_dp_short_pulse(struct intel_dp *intel_dp)
5472 {
5473 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5474 	u8 old_sink_count = intel_dp->sink_count;
5475 	bool reprobe_needed = false;
5476 	bool ret;
5477 
5478 	/*
5479 	 * Clearing compliance test variables to allow capturing
5480 	 * of values for next automated test request.
5481 	 */
5482 	memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5483 
5484 	/*
5485 	 * Now read the DPCD to see if it's actually running
5486 	 * If the current value of sink count doesn't match with
5487 	 * the value that was stored earlier or dpcd read failed
5488 	 * we need to do full detection
5489 	 */
5490 	ret = intel_dp_get_dpcd(intel_dp);
5491 
5492 	if ((old_sink_count != intel_dp->sink_count) || !ret) {
5493 		/* No need to proceed if we are going to do full detect */
5494 		return false;
5495 	}
5496 
5497 	intel_dp_check_device_service_irq(intel_dp);
5498 	reprobe_needed = intel_dp_check_link_service_irq(intel_dp);
5499 
5500 	/* Handle CEC interrupts, if any */
5501 	drm_dp_cec_irq(&intel_dp->aux);
5502 
5503 	/* defer to the hotplug work for link retraining if needed */
5504 	if (intel_dp_needs_link_retrain(intel_dp))
5505 		return false;
5506 
5507 	intel_psr_short_pulse(intel_dp);
5508 
5509 	switch (intel_dp->compliance.test_type) {
5510 	case DP_TEST_LINK_TRAINING:
5511 		drm_dbg_kms(&dev_priv->drm,
5512 			    "Link Training Compliance Test requested\n");
5513 		/* Send a Hotplug Uevent to userspace to start modeset */
5514 		drm_kms_helper_hotplug_event(&dev_priv->drm);
5515 		break;
5516 	case DP_TEST_LINK_PHY_TEST_PATTERN:
5517 		drm_dbg_kms(&dev_priv->drm,
5518 			    "PHY test pattern Compliance Test requested\n");
5519 		/*
5520 		 * Schedule long hpd to do the test
5521 		 *
5522 		 * FIXME get rid of the ad-hoc phy test modeset code
5523 		 * and properly incorporate it into the normal modeset.
5524 		 */
5525 		reprobe_needed = true;
5526 	}
5527 
5528 	return !reprobe_needed;
5529 }
5530 
5531 /* XXX this is probably wrong for multiple downstream ports */
5532 static enum drm_connector_status
5533 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
5534 {
5535 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5536 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5537 	u8 *dpcd = intel_dp->dpcd;
5538 	u8 type;
5539 
5540 	if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
5541 		return connector_status_connected;
5542 
5543 	lspcon_resume(dig_port);
5544 
5545 	if (!intel_dp_get_dpcd(intel_dp))
5546 		return connector_status_disconnected;
5547 
5548 	intel_dp->mst_detect = intel_dp_mst_detect(intel_dp);
5549 
5550 	/* if there's no downstream port, we're done */
5551 	if (!drm_dp_is_branch(dpcd))
5552 		return connector_status_connected;
5553 
5554 	/* If we're HPD-aware, SINK_COUNT changes dynamically */
5555 	if (intel_dp_has_sink_count(intel_dp) &&
5556 	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
5557 		return intel_dp->sink_count ?
5558 		connector_status_connected : connector_status_disconnected;
5559 	}
5560 
5561 	if (intel_dp->mst_detect == DRM_DP_MST)
5562 		return connector_status_connected;
5563 
5564 	/* If no HPD, poke DDC gently */
5565 	if (drm_probe_ddc(&intel_dp->aux.ddc))
5566 		return connector_status_connected;
5567 
5568 	/* Well we tried, say unknown for unreliable port types */
5569 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
5570 		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
5571 		if (type == DP_DS_PORT_TYPE_VGA ||
5572 		    type == DP_DS_PORT_TYPE_NON_EDID)
5573 			return connector_status_unknown;
5574 	} else {
5575 		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
5576 			DP_DWN_STRM_PORT_TYPE_MASK;
5577 		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
5578 		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
5579 			return connector_status_unknown;
5580 	}
5581 
5582 	/* Anything else is out of spec, warn and ignore */
5583 	drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
5584 	return connector_status_disconnected;
5585 }
5586 
5587 static enum drm_connector_status
5588 edp_detect(struct intel_dp *intel_dp)
5589 {
5590 	return connector_status_connected;
5591 }
5592 
5593 void intel_digital_port_lock(struct intel_encoder *encoder)
5594 {
5595 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5596 
5597 	if (dig_port->lock)
5598 		dig_port->lock(dig_port);
5599 }
5600 
5601 void intel_digital_port_unlock(struct intel_encoder *encoder)
5602 {
5603 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5604 
5605 	if (dig_port->unlock)
5606 		dig_port->unlock(dig_port);
5607 }
5608 
5609 /*
5610  * intel_digital_port_connected_locked - is the specified port connected?
5611  * @encoder: intel_encoder
5612  *
5613  * In cases where there's a connector physically connected but it can't be used
5614  * by our hardware we also return false, since the rest of the driver should
5615  * pretty much treat the port as disconnected. This is relevant for type-C
5616  * (starting on ICL) where there's ownership involved.
5617  *
5618  * The caller must hold the lock acquired by calling intel_digital_port_lock()
5619  * when calling this function.
5620  *
5621  * Return %true if port is connected, %false otherwise.
5622  */
5623 bool intel_digital_port_connected_locked(struct intel_encoder *encoder)
5624 {
5625 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5626 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5627 	bool is_glitch_free = intel_tc_port_handles_hpd_glitches(dig_port);
5628 	bool is_connected = false;
5629 	intel_wakeref_t wakeref;
5630 
5631 	with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
5632 		unsigned long wait_expires = jiffies + msecs_to_jiffies_timeout(4);
5633 
5634 		do {
5635 			is_connected = dig_port->connected(encoder);
5636 			if (is_connected || is_glitch_free)
5637 				break;
5638 			usleep_range(10, 30);
5639 		} while (time_before(jiffies, wait_expires));
5640 	}
5641 
5642 	return is_connected;
5643 }
5644 
5645 bool intel_digital_port_connected(struct intel_encoder *encoder)
5646 {
5647 	bool ret;
5648 
5649 	intel_digital_port_lock(encoder);
5650 	ret = intel_digital_port_connected_locked(encoder);
5651 	intel_digital_port_unlock(encoder);
5652 
5653 	return ret;
5654 }
5655 
5656 static const struct drm_edid *
5657 intel_dp_get_edid(struct intel_dp *intel_dp)
5658 {
5659 	struct intel_connector *connector = intel_dp->attached_connector;
5660 	const struct drm_edid *fixed_edid = connector->panel.fixed_edid;
5661 
5662 	/* Use panel fixed edid if we have one */
5663 	if (fixed_edid) {
5664 		/* invalid edid */
5665 		if (IS_ERR(fixed_edid))
5666 			return NULL;
5667 
5668 		return drm_edid_dup(fixed_edid);
5669 	}
5670 
5671 	return drm_edid_read_ddc(&connector->base, &intel_dp->aux.ddc);
5672 }
5673 
5674 static void
5675 intel_dp_update_dfp(struct intel_dp *intel_dp,
5676 		    const struct drm_edid *drm_edid)
5677 {
5678 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5679 	struct intel_connector *connector = intel_dp->attached_connector;
5680 
5681 	intel_dp->dfp.max_bpc =
5682 		drm_dp_downstream_max_bpc(intel_dp->dpcd,
5683 					  intel_dp->downstream_ports, drm_edid);
5684 
5685 	intel_dp->dfp.max_dotclock =
5686 		drm_dp_downstream_max_dotclock(intel_dp->dpcd,
5687 					       intel_dp->downstream_ports);
5688 
5689 	intel_dp->dfp.min_tmds_clock =
5690 		drm_dp_downstream_min_tmds_clock(intel_dp->dpcd,
5691 						 intel_dp->downstream_ports,
5692 						 drm_edid);
5693 	intel_dp->dfp.max_tmds_clock =
5694 		drm_dp_downstream_max_tmds_clock(intel_dp->dpcd,
5695 						 intel_dp->downstream_ports,
5696 						 drm_edid);
5697 
5698 	intel_dp->dfp.pcon_max_frl_bw =
5699 		drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd,
5700 					   intel_dp->downstream_ports);
5701 
5702 	drm_dbg_kms(&i915->drm,
5703 		    "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n",
5704 		    connector->base.base.id, connector->base.name,
5705 		    intel_dp->dfp.max_bpc,
5706 		    intel_dp->dfp.max_dotclock,
5707 		    intel_dp->dfp.min_tmds_clock,
5708 		    intel_dp->dfp.max_tmds_clock,
5709 		    intel_dp->dfp.pcon_max_frl_bw);
5710 
5711 	intel_dp_get_pcon_dsc_cap(intel_dp);
5712 }
5713 
5714 static bool
5715 intel_dp_can_ycbcr420(struct intel_dp *intel_dp)
5716 {
5717 	if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420) &&
5718 	    (!drm_dp_is_branch(intel_dp->dpcd) || intel_dp->dfp.ycbcr420_passthrough))
5719 		return true;
5720 
5721 	if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_RGB) &&
5722 	    dfp_can_convert_from_rgb(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420))
5723 		return true;
5724 
5725 	if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR444) &&
5726 	    dfp_can_convert_from_ycbcr444(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420))
5727 		return true;
5728 
5729 	return false;
5730 }
5731 
5732 static void
5733 intel_dp_update_420(struct intel_dp *intel_dp)
5734 {
5735 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5736 	struct intel_connector *connector = intel_dp->attached_connector;
5737 
5738 	intel_dp->dfp.ycbcr420_passthrough =
5739 		drm_dp_downstream_420_passthrough(intel_dp->dpcd,
5740 						  intel_dp->downstream_ports);
5741 	/* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */
5742 	intel_dp->dfp.ycbcr_444_to_420 =
5743 		dp_to_dig_port(intel_dp)->lspcon.active ||
5744 		drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd,
5745 							intel_dp->downstream_ports);
5746 	intel_dp->dfp.rgb_to_ycbcr =
5747 		drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
5748 							  intel_dp->downstream_ports,
5749 							  DP_DS_HDMI_BT709_RGB_YCBCR_CONV);
5750 
5751 	connector->base.ycbcr_420_allowed = intel_dp_can_ycbcr420(intel_dp);
5752 
5753 	drm_dbg_kms(&i915->drm,
5754 		    "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
5755 		    connector->base.base.id, connector->base.name,
5756 		    str_yes_no(intel_dp->dfp.rgb_to_ycbcr),
5757 		    str_yes_no(connector->base.ycbcr_420_allowed),
5758 		    str_yes_no(intel_dp->dfp.ycbcr_444_to_420));
5759 }
5760 
5761 static void
5762 intel_dp_set_edid(struct intel_dp *intel_dp)
5763 {
5764 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5765 	struct intel_connector *connector = intel_dp->attached_connector;
5766 	const struct drm_edid *drm_edid;
5767 	bool vrr_capable;
5768 
5769 	intel_dp_unset_edid(intel_dp);
5770 	drm_edid = intel_dp_get_edid(intel_dp);
5771 	connector->detect_edid = drm_edid;
5772 
5773 	/* Below we depend on display info having been updated */
5774 	drm_edid_connector_update(&connector->base, drm_edid);
5775 
5776 	vrr_capable = intel_vrr_is_capable(connector);
5777 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n",
5778 		    connector->base.base.id, connector->base.name, str_yes_no(vrr_capable));
5779 	drm_connector_set_vrr_capable_property(&connector->base, vrr_capable);
5780 
5781 	intel_dp_update_dfp(intel_dp, drm_edid);
5782 	intel_dp_update_420(intel_dp);
5783 
5784 	drm_dp_cec_attach(&intel_dp->aux,
5785 			  connector->base.display_info.source_physical_address);
5786 }
5787 
5788 static void
5789 intel_dp_unset_edid(struct intel_dp *intel_dp)
5790 {
5791 	struct intel_connector *connector = intel_dp->attached_connector;
5792 
5793 	drm_dp_cec_unset_edid(&intel_dp->aux);
5794 	drm_edid_free(connector->detect_edid);
5795 	connector->detect_edid = NULL;
5796 
5797 	intel_dp->dfp.max_bpc = 0;
5798 	intel_dp->dfp.max_dotclock = 0;
5799 	intel_dp->dfp.min_tmds_clock = 0;
5800 	intel_dp->dfp.max_tmds_clock = 0;
5801 
5802 	intel_dp->dfp.pcon_max_frl_bw = 0;
5803 
5804 	intel_dp->dfp.ycbcr_444_to_420 = false;
5805 	connector->base.ycbcr_420_allowed = false;
5806 
5807 	drm_connector_set_vrr_capable_property(&connector->base,
5808 					       false);
5809 }
5810 
5811 static void
5812 intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *connector)
5813 {
5814 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5815 
5816 	/* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5817 	if (!HAS_DSC(i915))
5818 		return;
5819 
5820 	if (intel_dp_is_edp(intel_dp))
5821 		intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0],
5822 					   connector);
5823 	else
5824 		intel_dp_get_dsc_sink_cap(intel_dp->dpcd[DP_DPCD_REV],
5825 					  connector);
5826 }
5827 
5828 static int
5829 intel_dp_detect(struct drm_connector *connector,
5830 		struct drm_modeset_acquire_ctx *ctx,
5831 		bool force)
5832 {
5833 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
5834 	struct intel_connector *intel_connector =
5835 		to_intel_connector(connector);
5836 	struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
5837 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5838 	struct intel_encoder *encoder = &dig_port->base;
5839 	enum drm_connector_status status;
5840 	int ret;
5841 
5842 	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
5843 		    connector->base.id, connector->name);
5844 	drm_WARN_ON(&dev_priv->drm,
5845 		    !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
5846 
5847 	if (!intel_display_device_enabled(dev_priv))
5848 		return connector_status_disconnected;
5849 
5850 	if (!intel_display_driver_check_access(dev_priv))
5851 		return connector->status;
5852 
5853 	/* Can't disconnect eDP */
5854 	if (intel_dp_is_edp(intel_dp))
5855 		status = edp_detect(intel_dp);
5856 	else if (intel_digital_port_connected(encoder))
5857 		status = intel_dp_detect_dpcd(intel_dp);
5858 	else
5859 		status = connector_status_disconnected;
5860 
5861 	if (status == connector_status_disconnected) {
5862 		memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5863 		memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd));
5864 		intel_dp->psr.sink_panel_replay_support = false;
5865 
5866 		intel_dp_mst_disconnect(intel_dp);
5867 
5868 		intel_dp_tunnel_disconnect(intel_dp);
5869 
5870 		goto out;
5871 	}
5872 
5873 	ret = intel_dp_tunnel_detect(intel_dp, ctx);
5874 	if (ret == -EDEADLK)
5875 		return ret;
5876 
5877 	if (ret == 1)
5878 		intel_connector->base.epoch_counter++;
5879 
5880 	if (!intel_dp_is_edp(intel_dp))
5881 		intel_psr_init_dpcd(intel_dp);
5882 
5883 	intel_dp_detect_dsc_caps(intel_dp, intel_connector);
5884 
5885 	intel_dp_mst_configure(intel_dp);
5886 
5887 	/*
5888 	 * TODO: Reset link params when switching to MST mode, until MST
5889 	 * supports link training fallback params.
5890 	 */
5891 	if (intel_dp->reset_link_params || intel_dp->is_mst) {
5892 		intel_dp_reset_max_link_params(intel_dp);
5893 		intel_dp->reset_link_params = false;
5894 	}
5895 
5896 	intel_dp_print_rates(intel_dp);
5897 
5898 	if (intel_dp->is_mst) {
5899 		/*
5900 		 * If we are in MST mode then this connector
5901 		 * won't appear connected or have anything
5902 		 * with EDID on it
5903 		 */
5904 		status = connector_status_disconnected;
5905 		goto out;
5906 	}
5907 
5908 	/*
5909 	 * Some external monitors do not signal loss of link synchronization
5910 	 * with an IRQ_HPD, so force a link status check.
5911 	 */
5912 	if (!intel_dp_is_edp(intel_dp)) {
5913 		ret = intel_dp_retrain_link(encoder, ctx);
5914 		if (ret)
5915 			return ret;
5916 	}
5917 
5918 	/*
5919 	 * Clearing NACK and defer counts to get their exact values
5920 	 * while reading EDID which are required by Compliance tests
5921 	 * 4.2.2.4 and 4.2.2.5
5922 	 */
5923 	intel_dp->aux.i2c_nack_count = 0;
5924 	intel_dp->aux.i2c_defer_count = 0;
5925 
5926 	intel_dp_set_edid(intel_dp);
5927 	if (intel_dp_is_edp(intel_dp) ||
5928 	    to_intel_connector(connector)->detect_edid)
5929 		status = connector_status_connected;
5930 
5931 	intel_dp_check_device_service_irq(intel_dp);
5932 
5933 out:
5934 	if (status != connector_status_connected && !intel_dp->is_mst)
5935 		intel_dp_unset_edid(intel_dp);
5936 
5937 	if (!intel_dp_is_edp(intel_dp))
5938 		drm_dp_set_subconnector_property(connector,
5939 						 status,
5940 						 intel_dp->dpcd,
5941 						 intel_dp->downstream_ports);
5942 	return status;
5943 }
5944 
5945 static void
5946 intel_dp_force(struct drm_connector *connector)
5947 {
5948 	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
5949 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5950 	struct intel_encoder *intel_encoder = &dig_port->base;
5951 	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5952 
5953 	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
5954 		    connector->base.id, connector->name);
5955 
5956 	if (!intel_display_driver_check_access(dev_priv))
5957 		return;
5958 
5959 	intel_dp_unset_edid(intel_dp);
5960 
5961 	if (connector->status != connector_status_connected)
5962 		return;
5963 
5964 	intel_dp_set_edid(intel_dp);
5965 }
5966 
5967 static int intel_dp_get_modes(struct drm_connector *connector)
5968 {
5969 	struct intel_connector *intel_connector = to_intel_connector(connector);
5970 	int num_modes;
5971 
5972 	/* drm_edid_connector_update() done in ->detect() or ->force() */
5973 	num_modes = drm_edid_connector_add_modes(connector);
5974 
5975 	/* Also add fixed mode, which may or may not be present in EDID */
5976 	if (intel_dp_is_edp(intel_attached_dp(intel_connector)))
5977 		num_modes += intel_panel_get_modes(intel_connector);
5978 
5979 	if (num_modes)
5980 		return num_modes;
5981 
5982 	if (!intel_connector->detect_edid) {
5983 		struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
5984 		struct drm_display_mode *mode;
5985 
5986 		mode = drm_dp_downstream_mode(connector->dev,
5987 					      intel_dp->dpcd,
5988 					      intel_dp->downstream_ports);
5989 		if (mode) {
5990 			drm_mode_probed_add(connector, mode);
5991 			num_modes++;
5992 		}
5993 	}
5994 
5995 	return num_modes;
5996 }
5997 
5998 static int
5999 intel_dp_connector_register(struct drm_connector *connector)
6000 {
6001 	struct drm_i915_private *i915 = to_i915(connector->dev);
6002 	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
6003 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6004 	struct intel_lspcon *lspcon = &dig_port->lspcon;
6005 	int ret;
6006 
6007 	ret = intel_connector_register(connector);
6008 	if (ret)
6009 		return ret;
6010 
6011 	drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
6012 		    intel_dp->aux.name, connector->kdev->kobj.name);
6013 
6014 	intel_dp->aux.dev = connector->kdev;
6015 	ret = drm_dp_aux_register(&intel_dp->aux);
6016 	if (!ret)
6017 		drm_dp_cec_register_connector(&intel_dp->aux, connector);
6018 
6019 	if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata))
6020 		return ret;
6021 
6022 	/*
6023 	 * ToDo: Clean this up to handle lspcon init and resume more
6024 	 * efficiently and streamlined.
6025 	 */
6026 	if (lspcon_init(dig_port)) {
6027 		lspcon_detect_hdr_capability(lspcon);
6028 		if (lspcon->hdr_supported)
6029 			drm_connector_attach_hdr_output_metadata_property(connector);
6030 	}
6031 
6032 	return ret;
6033 }
6034 
6035 static void
6036 intel_dp_connector_unregister(struct drm_connector *connector)
6037 {
6038 	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
6039 
6040 	drm_dp_cec_unregister_connector(&intel_dp->aux);
6041 	drm_dp_aux_unregister(&intel_dp->aux);
6042 	intel_connector_unregister(connector);
6043 }
6044 
6045 void intel_dp_connector_sync_state(struct intel_connector *connector,
6046 				   const struct intel_crtc_state *crtc_state)
6047 {
6048 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
6049 
6050 	if (crtc_state && crtc_state->dsc.compression_enable) {
6051 		drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux);
6052 		connector->dp.dsc_decompression_enabled = true;
6053 	} else {
6054 		connector->dp.dsc_decompression_enabled = false;
6055 	}
6056 }
6057 
6058 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
6059 {
6060 	struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
6061 	struct intel_dp *intel_dp = &dig_port->dp;
6062 
6063 	intel_dp_mst_encoder_cleanup(dig_port);
6064 
6065 	intel_dp_tunnel_destroy(intel_dp);
6066 
6067 	intel_pps_vdd_off_sync(intel_dp);
6068 
6069 	/*
6070 	 * Ensure power off delay is respected on module remove, so that we can
6071 	 * reduce delays at driver probe. See pps_init_timestamps().
6072 	 */
6073 	intel_pps_wait_power_cycle(intel_dp);
6074 
6075 	intel_dp_aux_fini(intel_dp);
6076 }
6077 
6078 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
6079 {
6080 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
6081 
6082 	intel_pps_vdd_off_sync(intel_dp);
6083 
6084 	intel_dp_tunnel_suspend(intel_dp);
6085 }
6086 
6087 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
6088 {
6089 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
6090 
6091 	intel_pps_wait_power_cycle(intel_dp);
6092 }
6093 
6094 static int intel_modeset_tile_group(struct intel_atomic_state *state,
6095 				    int tile_group_id)
6096 {
6097 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6098 	struct drm_connector_list_iter conn_iter;
6099 	struct drm_connector *connector;
6100 	int ret = 0;
6101 
6102 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
6103 	drm_for_each_connector_iter(connector, &conn_iter) {
6104 		struct drm_connector_state *conn_state;
6105 		struct intel_crtc_state *crtc_state;
6106 		struct intel_crtc *crtc;
6107 
6108 		if (!connector->has_tile ||
6109 		    connector->tile_group->id != tile_group_id)
6110 			continue;
6111 
6112 		conn_state = drm_atomic_get_connector_state(&state->base,
6113 							    connector);
6114 		if (IS_ERR(conn_state)) {
6115 			ret = PTR_ERR(conn_state);
6116 			break;
6117 		}
6118 
6119 		crtc = to_intel_crtc(conn_state->crtc);
6120 
6121 		if (!crtc)
6122 			continue;
6123 
6124 		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6125 		crtc_state->uapi.mode_changed = true;
6126 
6127 		ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
6128 		if (ret)
6129 			break;
6130 	}
6131 	drm_connector_list_iter_end(&conn_iter);
6132 
6133 	return ret;
6134 }
6135 
6136 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
6137 {
6138 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6139 	struct intel_crtc *crtc;
6140 
6141 	if (transcoders == 0)
6142 		return 0;
6143 
6144 	for_each_intel_crtc(&dev_priv->drm, crtc) {
6145 		struct intel_crtc_state *crtc_state;
6146 		int ret;
6147 
6148 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6149 		if (IS_ERR(crtc_state))
6150 			return PTR_ERR(crtc_state);
6151 
6152 		if (!crtc_state->hw.enable)
6153 			continue;
6154 
6155 		if (!(transcoders & BIT(crtc_state->cpu_transcoder)))
6156 			continue;
6157 
6158 		crtc_state->uapi.mode_changed = true;
6159 
6160 		ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
6161 		if (ret)
6162 			return ret;
6163 
6164 		ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
6165 		if (ret)
6166 			return ret;
6167 
6168 		transcoders &= ~BIT(crtc_state->cpu_transcoder);
6169 	}
6170 
6171 	drm_WARN_ON(&dev_priv->drm, transcoders != 0);
6172 
6173 	return 0;
6174 }
6175 
6176 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
6177 				      struct drm_connector *connector)
6178 {
6179 	const struct drm_connector_state *old_conn_state =
6180 		drm_atomic_get_old_connector_state(&state->base, connector);
6181 	const struct intel_crtc_state *old_crtc_state;
6182 	struct intel_crtc *crtc;
6183 	u8 transcoders;
6184 
6185 	crtc = to_intel_crtc(old_conn_state->crtc);
6186 	if (!crtc)
6187 		return 0;
6188 
6189 	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
6190 
6191 	if (!old_crtc_state->hw.active)
6192 		return 0;
6193 
6194 	transcoders = old_crtc_state->sync_mode_slaves_mask;
6195 	if (old_crtc_state->master_transcoder != INVALID_TRANSCODER)
6196 		transcoders |= BIT(old_crtc_state->master_transcoder);
6197 
6198 	return intel_modeset_affected_transcoders(state,
6199 						  transcoders);
6200 }
6201 
6202 static int intel_dp_connector_atomic_check(struct drm_connector *conn,
6203 					   struct drm_atomic_state *_state)
6204 {
6205 	struct drm_i915_private *dev_priv = to_i915(conn->dev);
6206 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
6207 	struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(_state, conn);
6208 	struct intel_connector *intel_conn = to_intel_connector(conn);
6209 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_conn->encoder);
6210 	int ret;
6211 
6212 	ret = intel_digital_connector_atomic_check(conn, &state->base);
6213 	if (ret)
6214 		return ret;
6215 
6216 	if (intel_dp_mst_source_support(intel_dp)) {
6217 		ret = drm_dp_mst_root_conn_atomic_check(conn_state, &intel_dp->mst_mgr);
6218 		if (ret)
6219 			return ret;
6220 	}
6221 
6222 	if (!intel_connector_needs_modeset(state, conn))
6223 		return 0;
6224 
6225 	ret = intel_dp_tunnel_atomic_check_state(state,
6226 						 intel_dp,
6227 						 intel_conn);
6228 	if (ret)
6229 		return ret;
6230 
6231 	/*
6232 	 * We don't enable port sync on BDW due to missing w/as and
6233 	 * due to not having adjusted the modeset sequence appropriately.
6234 	 */
6235 	if (DISPLAY_VER(dev_priv) < 9)
6236 		return 0;
6237 
6238 	if (conn->has_tile) {
6239 		ret = intel_modeset_tile_group(state, conn->tile_group->id);
6240 		if (ret)
6241 			return ret;
6242 	}
6243 
6244 	return intel_modeset_synced_crtcs(state, conn);
6245 }
6246 
6247 static void intel_dp_oob_hotplug_event(struct drm_connector *connector,
6248 				       enum drm_connector_status hpd_state)
6249 {
6250 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
6251 	struct drm_i915_private *i915 = to_i915(connector->dev);
6252 	bool hpd_high = hpd_state == connector_status_connected;
6253 	unsigned int hpd_pin = encoder->hpd_pin;
6254 	bool need_work = false;
6255 
6256 	spin_lock_irq(&i915->irq_lock);
6257 	if (hpd_high != test_bit(hpd_pin, &i915->display.hotplug.oob_hotplug_last_state)) {
6258 		i915->display.hotplug.event_bits |= BIT(hpd_pin);
6259 
6260 		__assign_bit(hpd_pin, &i915->display.hotplug.oob_hotplug_last_state, hpd_high);
6261 		need_work = true;
6262 	}
6263 	spin_unlock_irq(&i915->irq_lock);
6264 
6265 	if (need_work)
6266 		intel_hpd_schedule_detection(i915);
6267 }
6268 
6269 static const struct drm_connector_funcs intel_dp_connector_funcs = {
6270 	.force = intel_dp_force,
6271 	.fill_modes = drm_helper_probe_single_connector_modes,
6272 	.atomic_get_property = intel_digital_connector_atomic_get_property,
6273 	.atomic_set_property = intel_digital_connector_atomic_set_property,
6274 	.late_register = intel_dp_connector_register,
6275 	.early_unregister = intel_dp_connector_unregister,
6276 	.destroy = intel_connector_destroy,
6277 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6278 	.atomic_duplicate_state = intel_digital_connector_duplicate_state,
6279 	.oob_hotplug_event = intel_dp_oob_hotplug_event,
6280 };
6281 
6282 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6283 	.detect_ctx = intel_dp_detect,
6284 	.get_modes = intel_dp_get_modes,
6285 	.mode_valid = intel_dp_mode_valid,
6286 	.atomic_check = intel_dp_connector_atomic_check,
6287 };
6288 
6289 enum irqreturn
6290 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
6291 {
6292 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
6293 	struct intel_dp *intel_dp = &dig_port->dp;
6294 	u8 dpcd[DP_RECEIVER_CAP_SIZE];
6295 
6296 	if (dig_port->base.type == INTEL_OUTPUT_EDP &&
6297 	    (long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) {
6298 		/*
6299 		 * vdd off can generate a long/short pulse on eDP which
6300 		 * would require vdd on to handle it, and thus we
6301 		 * would end up in an endless cycle of
6302 		 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
6303 		 */
6304 		drm_dbg_kms(&i915->drm,
6305 			    "ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
6306 			    long_hpd ? "long" : "short",
6307 			    dig_port->base.base.base.id,
6308 			    dig_port->base.base.name);
6309 		return IRQ_HANDLED;
6310 	}
6311 
6312 	drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
6313 		    dig_port->base.base.base.id,
6314 		    dig_port->base.base.name,
6315 		    long_hpd ? "long" : "short");
6316 
6317 	/*
6318 	 * TBT DP tunnels require the GFX driver to read out the DPRX caps in
6319 	 * response to long HPD pulses. The DP hotplug handler does that,
6320 	 * however the hotplug handler may be blocked by another
6321 	 * connector's/encoder's hotplug handler. Since the TBT CM may not
6322 	 * complete the DP tunnel BW request for the latter connector/encoder
6323 	 * waiting for this encoder's DPRX read, perform a dummy read here.
6324 	 */
6325 	if (long_hpd)
6326 		intel_dp_read_dprx_caps(intel_dp, dpcd);
6327 
6328 	if (long_hpd) {
6329 		intel_dp->reset_link_params = true;
6330 		return IRQ_NONE;
6331 	}
6332 
6333 	if (intel_dp->is_mst) {
6334 		if (!intel_dp_check_mst_status(intel_dp))
6335 			return IRQ_NONE;
6336 	} else if (!intel_dp_short_pulse(intel_dp)) {
6337 		return IRQ_NONE;
6338 	}
6339 
6340 	return IRQ_HANDLED;
6341 }
6342 
6343 static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv,
6344 				  const struct intel_bios_encoder_data *devdata,
6345 				  enum port port)
6346 {
6347 	/*
6348 	 * eDP not supported on g4x. so bail out early just
6349 	 * for a bit extra safety in case the VBT is bonkers.
6350 	 */
6351 	if (DISPLAY_VER(dev_priv) < 5)
6352 		return false;
6353 
6354 	if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A)
6355 		return true;
6356 
6357 	return devdata && intel_bios_encoder_supports_edp(devdata);
6358 }
6359 
6360 bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port)
6361 {
6362 	const struct intel_bios_encoder_data *devdata =
6363 		intel_bios_encoder_data_lookup(i915, port);
6364 
6365 	return _intel_dp_is_port_edp(i915, devdata, port);
6366 }
6367 
6368 static bool
6369 has_gamut_metadata_dip(struct intel_encoder *encoder)
6370 {
6371 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
6372 	enum port port = encoder->port;
6373 
6374 	if (intel_bios_encoder_is_lspcon(encoder->devdata))
6375 		return false;
6376 
6377 	if (DISPLAY_VER(i915) >= 11)
6378 		return true;
6379 
6380 	if (port == PORT_A)
6381 		return false;
6382 
6383 	if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
6384 	    DISPLAY_VER(i915) >= 9)
6385 		return true;
6386 
6387 	return false;
6388 }
6389 
6390 static void
6391 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6392 {
6393 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
6394 	enum port port = dp_to_dig_port(intel_dp)->base.port;
6395 
6396 	if (!intel_dp_is_edp(intel_dp))
6397 		drm_connector_attach_dp_subconnector_property(connector);
6398 
6399 	if (!IS_G4X(dev_priv) && port != PORT_A)
6400 		intel_attach_force_audio_property(connector);
6401 
6402 	intel_attach_broadcast_rgb_property(connector);
6403 	if (HAS_GMCH(dev_priv))
6404 		drm_connector_attach_max_bpc_property(connector, 6, 10);
6405 	else if (DISPLAY_VER(dev_priv) >= 5)
6406 		drm_connector_attach_max_bpc_property(connector, 6, 12);
6407 
6408 	/* Register HDMI colorspace for case of lspcon */
6409 	if (intel_bios_encoder_is_lspcon(dp_to_dig_port(intel_dp)->base.devdata)) {
6410 		drm_connector_attach_content_type_property(connector);
6411 		intel_attach_hdmi_colorspace_property(connector);
6412 	} else {
6413 		intel_attach_dp_colorspace_property(connector);
6414 	}
6415 
6416 	if (has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base))
6417 		drm_connector_attach_hdr_output_metadata_property(connector);
6418 
6419 	if (HAS_VRR(dev_priv))
6420 		drm_connector_attach_vrr_capable_property(connector);
6421 }
6422 
6423 static void
6424 intel_edp_add_properties(struct intel_dp *intel_dp)
6425 {
6426 	struct intel_connector *connector = intel_dp->attached_connector;
6427 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
6428 	const struct drm_display_mode *fixed_mode =
6429 		intel_panel_preferred_fixed_mode(connector);
6430 
6431 	intel_attach_scaling_mode_property(&connector->base);
6432 
6433 	drm_connector_set_panel_orientation_with_quirk(&connector->base,
6434 						       i915->display.vbt.orientation,
6435 						       fixed_mode->hdisplay,
6436 						       fixed_mode->vdisplay);
6437 }
6438 
6439 static void intel_edp_backlight_setup(struct intel_dp *intel_dp,
6440 				      struct intel_connector *connector)
6441 {
6442 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6443 	enum pipe pipe = INVALID_PIPE;
6444 
6445 	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
6446 		/*
6447 		 * Figure out the current pipe for the initial backlight setup.
6448 		 * If the current pipe isn't valid, try the PPS pipe, and if that
6449 		 * fails just assume pipe A.
6450 		 */
6451 		pipe = vlv_active_pipe(intel_dp);
6452 
6453 		if (pipe != PIPE_A && pipe != PIPE_B)
6454 			pipe = intel_dp->pps.pps_pipe;
6455 
6456 		if (pipe != PIPE_A && pipe != PIPE_B)
6457 			pipe = PIPE_A;
6458 	}
6459 
6460 	intel_backlight_setup(connector, pipe);
6461 }
6462 
6463 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
6464 				     struct intel_connector *intel_connector)
6465 {
6466 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6467 	struct drm_connector *connector = &intel_connector->base;
6468 	struct drm_display_mode *fixed_mode;
6469 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6470 	bool has_dpcd;
6471 	const struct drm_edid *drm_edid;
6472 
6473 	if (!intel_dp_is_edp(intel_dp))
6474 		return true;
6475 
6476 	/*
6477 	 * On IBX/CPT we may get here with LVDS already registered. Since the
6478 	 * driver uses the only internal power sequencer available for both
6479 	 * eDP and LVDS bail out early in this case to prevent interfering
6480 	 * with an already powered-on LVDS power sequencer.
6481 	 */
6482 	if (intel_get_lvds_encoder(dev_priv)) {
6483 		drm_WARN_ON(&dev_priv->drm,
6484 			    !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
6485 		drm_info(&dev_priv->drm,
6486 			 "LVDS was detected, not registering eDP\n");
6487 
6488 		return false;
6489 	}
6490 
6491 	intel_bios_init_panel_early(dev_priv, &intel_connector->panel,
6492 				    encoder->devdata);
6493 
6494 	if (!intel_pps_init(intel_dp)) {
6495 		drm_info(&dev_priv->drm,
6496 			 "[ENCODER:%d:%s] unusable PPS, disabling eDP\n",
6497 			 encoder->base.base.id, encoder->base.name);
6498 		/*
6499 		 * The BIOS may have still enabled VDD on the PPS even
6500 		 * though it's unusable. Make sure we turn it back off
6501 		 * and to release the power domain references/etc.
6502 		 */
6503 		goto out_vdd_off;
6504 	}
6505 
6506 	/*
6507 	 * Enable HPD sense for live status check.
6508 	 * intel_hpd_irq_setup() will turn it off again
6509 	 * if it's no longer needed later.
6510 	 *
6511 	 * The DPCD probe below will make sure VDD is on.
6512 	 */
6513 	intel_hpd_enable_detection(encoder);
6514 
6515 	/* Cache DPCD and EDID for edp. */
6516 	has_dpcd = intel_edp_init_dpcd(intel_dp, intel_connector);
6517 
6518 	if (!has_dpcd) {
6519 		/* if this fails, presume the device is a ghost */
6520 		drm_info(&dev_priv->drm,
6521 			 "[ENCODER:%d:%s] failed to retrieve link info, disabling eDP\n",
6522 			 encoder->base.base.id, encoder->base.name);
6523 		goto out_vdd_off;
6524 	}
6525 
6526 	/*
6527 	 * VBT and straps are liars. Also check HPD as that seems
6528 	 * to be the most reliable piece of information available.
6529 	 *
6530 	 * ... expect on devices that forgot to hook HPD up for eDP
6531 	 * (eg. Acer Chromebook C710), so we'll check it only if multiple
6532 	 * ports are attempting to use the same AUX CH, according to VBT.
6533 	 */
6534 	if (intel_bios_dp_has_shared_aux_ch(encoder->devdata)) {
6535 		/*
6536 		 * If this fails, presume the DPCD answer came
6537 		 * from some other port using the same AUX CH.
6538 		 *
6539 		 * FIXME maybe cleaner to check this before the
6540 		 * DPCD read? Would need sort out the VDD handling...
6541 		 */
6542 		if (!intel_digital_port_connected(encoder)) {
6543 			drm_info(&dev_priv->drm,
6544 				 "[ENCODER:%d:%s] HPD is down, disabling eDP\n",
6545 				 encoder->base.base.id, encoder->base.name);
6546 			goto out_vdd_off;
6547 		}
6548 
6549 		/*
6550 		 * Unfortunately even the HPD based detection fails on
6551 		 * eg. Asus B360M-A (CFL+CNP), so as a last resort fall
6552 		 * back to checking for a VGA branch device. Only do this
6553 		 * on known affected platforms to minimize false positives.
6554 		 */
6555 		if (DISPLAY_VER(dev_priv) == 9 && drm_dp_is_branch(intel_dp->dpcd) &&
6556 		    (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) ==
6557 		    DP_DWN_STRM_PORT_TYPE_ANALOG) {
6558 			drm_info(&dev_priv->drm,
6559 				 "[ENCODER:%d:%s] VGA converter detected, disabling eDP\n",
6560 				 encoder->base.base.id, encoder->base.name);
6561 			goto out_vdd_off;
6562 		}
6563 	}
6564 
6565 	mutex_lock(&dev_priv->drm.mode_config.mutex);
6566 	drm_edid = drm_edid_read_ddc(connector, connector->ddc);
6567 	if (!drm_edid) {
6568 		/* Fallback to EDID from ACPI OpRegion, if any */
6569 		drm_edid = intel_opregion_get_edid(intel_connector);
6570 		if (drm_edid)
6571 			drm_dbg_kms(&dev_priv->drm,
6572 				    "[CONNECTOR:%d:%s] Using OpRegion EDID\n",
6573 				    connector->base.id, connector->name);
6574 	}
6575 	if (drm_edid) {
6576 		if (drm_edid_connector_update(connector, drm_edid) ||
6577 		    !drm_edid_connector_add_modes(connector)) {
6578 			drm_edid_connector_update(connector, NULL);
6579 			drm_edid_free(drm_edid);
6580 			drm_edid = ERR_PTR(-EINVAL);
6581 		}
6582 	} else {
6583 		drm_edid = ERR_PTR(-ENOENT);
6584 	}
6585 
6586 	intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata,
6587 				   IS_ERR(drm_edid) ? NULL : drm_edid);
6588 
6589 	intel_panel_add_edid_fixed_modes(intel_connector, true);
6590 
6591 	/* MSO requires information from the EDID */
6592 	intel_edp_mso_init(intel_dp);
6593 
6594 	/* multiply the mode clock and horizontal timings for MSO */
6595 	list_for_each_entry(fixed_mode, &intel_connector->panel.fixed_modes, head)
6596 		intel_edp_mso_mode_fixup(intel_connector, fixed_mode);
6597 
6598 	/* fallback to VBT if available for eDP */
6599 	if (!intel_panel_preferred_fixed_mode(intel_connector))
6600 		intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
6601 
6602 	mutex_unlock(&dev_priv->drm.mode_config.mutex);
6603 
6604 	if (!intel_panel_preferred_fixed_mode(intel_connector)) {
6605 		drm_info(&dev_priv->drm,
6606 			 "[ENCODER:%d:%s] failed to find fixed mode for the panel, disabling eDP\n",
6607 			 encoder->base.base.id, encoder->base.name);
6608 		goto out_vdd_off;
6609 	}
6610 
6611 	intel_panel_init(intel_connector, drm_edid);
6612 
6613 	intel_edp_backlight_setup(intel_dp, intel_connector);
6614 
6615 	intel_edp_add_properties(intel_dp);
6616 
6617 	intel_pps_init_late(intel_dp);
6618 
6619 	return true;
6620 
6621 out_vdd_off:
6622 	intel_pps_vdd_off_sync(intel_dp);
6623 
6624 	return false;
6625 }
6626 
6627 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
6628 {
6629 	struct intel_connector *intel_connector;
6630 	struct drm_connector *connector;
6631 
6632 	intel_connector = container_of(work, typeof(*intel_connector),
6633 				       modeset_retry_work);
6634 	connector = &intel_connector->base;
6635 	drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n", connector->base.id,
6636 		    connector->name);
6637 
6638 	/* Grab the locks before changing connector property*/
6639 	mutex_lock(&connector->dev->mode_config.mutex);
6640 	/* Set connector link status to BAD and send a Uevent to notify
6641 	 * userspace to do a modeset.
6642 	 */
6643 	drm_connector_set_link_status_property(connector,
6644 					       DRM_MODE_LINK_STATUS_BAD);
6645 	mutex_unlock(&connector->dev->mode_config.mutex);
6646 	/* Send Hotplug uevent so userspace can reprobe */
6647 	drm_kms_helper_connector_hotplug_event(connector);
6648 
6649 	drm_connector_put(connector);
6650 }
6651 
6652 void intel_dp_init_modeset_retry_work(struct intel_connector *connector)
6653 {
6654 	INIT_WORK(&connector->modeset_retry_work,
6655 		  intel_dp_modeset_retry_work_fn);
6656 }
6657 
6658 bool
6659 intel_dp_init_connector(struct intel_digital_port *dig_port,
6660 			struct intel_connector *intel_connector)
6661 {
6662 	struct drm_connector *connector = &intel_connector->base;
6663 	struct intel_dp *intel_dp = &dig_port->dp;
6664 	struct intel_encoder *intel_encoder = &dig_port->base;
6665 	struct drm_device *dev = intel_encoder->base.dev;
6666 	struct drm_i915_private *dev_priv = to_i915(dev);
6667 	enum port port = intel_encoder->port;
6668 	int type;
6669 
6670 	/* Initialize the work for modeset in case of link train failure */
6671 	intel_dp_init_modeset_retry_work(intel_connector);
6672 
6673 	if (drm_WARN(dev, dig_port->max_lanes < 1,
6674 		     "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
6675 		     dig_port->max_lanes, intel_encoder->base.base.id,
6676 		     intel_encoder->base.name))
6677 		return false;
6678 
6679 	intel_dp->reset_link_params = true;
6680 	intel_dp->pps.pps_pipe = INVALID_PIPE;
6681 	intel_dp->pps.active_pipe = INVALID_PIPE;
6682 
6683 	/* Preserve the current hw state. */
6684 	intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
6685 	intel_dp->attached_connector = intel_connector;
6686 
6687 	if (_intel_dp_is_port_edp(dev_priv, intel_encoder->devdata, port)) {
6688 		/*
6689 		 * Currently we don't support eDP on TypeC ports, although in
6690 		 * theory it could work on TypeC legacy ports.
6691 		 */
6692 		drm_WARN_ON(dev, intel_encoder_is_tc(intel_encoder));
6693 		type = DRM_MODE_CONNECTOR_eDP;
6694 		intel_encoder->type = INTEL_OUTPUT_EDP;
6695 
6696 		/* eDP only on port B and/or C on vlv/chv */
6697 		if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
6698 				      IS_CHERRYVIEW(dev_priv)) &&
6699 				port != PORT_B && port != PORT_C))
6700 			return false;
6701 	} else {
6702 		type = DRM_MODE_CONNECTOR_DisplayPort;
6703 	}
6704 
6705 	intel_dp_set_default_sink_rates(intel_dp);
6706 	intel_dp_set_default_max_sink_lane_count(intel_dp);
6707 
6708 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6709 		intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
6710 
6711 	intel_dp_aux_init(intel_dp);
6712 	intel_connector->dp.dsc_decompression_aux = &intel_dp->aux;
6713 
6714 	drm_dbg_kms(&dev_priv->drm,
6715 		    "Adding %s connector on [ENCODER:%d:%s]\n",
6716 		    type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6717 		    intel_encoder->base.base.id, intel_encoder->base.name);
6718 
6719 	drm_connector_init_with_ddc(dev, connector, &intel_dp_connector_funcs,
6720 				    type, &intel_dp->aux.ddc);
6721 	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6722 
6723 	if (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) < 12)
6724 		connector->interlace_allowed = true;
6725 
6726 	intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
6727 	intel_connector->base.polled = intel_connector->polled;
6728 
6729 	intel_connector_attach_encoder(intel_connector, intel_encoder);
6730 
6731 	if (HAS_DDI(dev_priv))
6732 		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6733 	else
6734 		intel_connector->get_hw_state = intel_connector_get_hw_state;
6735 	intel_connector->sync_state = intel_dp_connector_sync_state;
6736 
6737 	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6738 		intel_dp_aux_fini(intel_dp);
6739 		goto fail;
6740 	}
6741 
6742 	intel_dp_set_source_rates(intel_dp);
6743 	intel_dp_set_common_rates(intel_dp);
6744 	intel_dp_reset_max_link_params(intel_dp);
6745 
6746 	/* init MST on ports that can support it */
6747 	intel_dp_mst_encoder_init(dig_port,
6748 				  intel_connector->base.base.id);
6749 
6750 	intel_dp_add_properties(intel_dp, connector);
6751 
6752 	if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
6753 		int ret = intel_dp_hdcp_init(dig_port, intel_connector);
6754 		if (ret)
6755 			drm_dbg_kms(&dev_priv->drm,
6756 				    "HDCP init failed, skipping.\n");
6757 	}
6758 
6759 	intel_dp->colorimetry_support =
6760 		intel_dp_get_colorimetry_status(intel_dp);
6761 
6762 	intel_dp->frl.is_trained = false;
6763 	intel_dp->frl.trained_rate_gbps = 0;
6764 
6765 	intel_psr_init(intel_dp);
6766 
6767 	return true;
6768 
6769 fail:
6770 	intel_display_power_flush_work(dev_priv);
6771 	drm_connector_cleanup(connector);
6772 
6773 	return false;
6774 }
6775 
6776 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
6777 {
6778 	struct intel_encoder *encoder;
6779 
6780 	if (!HAS_DISPLAY(dev_priv))
6781 		return;
6782 
6783 	for_each_intel_encoder(&dev_priv->drm, encoder) {
6784 		struct intel_dp *intel_dp;
6785 
6786 		if (encoder->type != INTEL_OUTPUT_DDI)
6787 			continue;
6788 
6789 		intel_dp = enc_to_intel_dp(encoder);
6790 
6791 		if (!intel_dp_mst_source_support(intel_dp))
6792 			continue;
6793 
6794 		if (intel_dp->is_mst)
6795 			drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
6796 	}
6797 }
6798 
6799 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
6800 {
6801 	struct intel_encoder *encoder;
6802 
6803 	if (!HAS_DISPLAY(dev_priv))
6804 		return;
6805 
6806 	for_each_intel_encoder(&dev_priv->drm, encoder) {
6807 		struct intel_dp *intel_dp;
6808 		int ret;
6809 
6810 		if (encoder->type != INTEL_OUTPUT_DDI)
6811 			continue;
6812 
6813 		intel_dp = enc_to_intel_dp(encoder);
6814 
6815 		if (!intel_dp_mst_source_support(intel_dp))
6816 			continue;
6817 
6818 		ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
6819 						     true);
6820 		if (ret) {
6821 			intel_dp->is_mst = false;
6822 			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6823 							false);
6824 		}
6825 	}
6826 }
6827