xref: /linux/drivers/gpu/drm/i915/display/intel_dp_mst.c (revision 566ab427f827b0256d3e8ce0235d088e6a9c28bd)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *             2014 Red Hat Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  *
24  */
25 
26 #include <drm/drm_atomic.h>
27 #include <drm/drm_atomic_helper.h>
28 #include <drm/drm_edid.h>
29 #include <drm/drm_fixed.h>
30 #include <drm/drm_probe_helper.h>
31 
32 #include "i915_drv.h"
33 #include "i915_reg.h"
34 #include "intel_atomic.h"
35 #include "intel_audio.h"
36 #include "intel_connector.h"
37 #include "intel_crtc.h"
38 #include "intel_ddi.h"
39 #include "intel_de.h"
40 #include "intel_display_driver.h"
41 #include "intel_display_types.h"
42 #include "intel_dp.h"
43 #include "intel_dp_hdcp.h"
44 #include "intel_dp_mst.h"
45 #include "intel_dp_tunnel.h"
46 #include "intel_dp_link_training.h"
47 #include "intel_dpio_phy.h"
48 #include "intel_hdcp.h"
49 #include "intel_hotplug.h"
50 #include "intel_link_bw.h"
51 #include "intel_psr.h"
52 #include "intel_vdsc.h"
53 #include "skl_scaler.h"
54 
55 static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
56 				    bool dsc)
57 {
58 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
59 	const struct drm_display_mode *adjusted_mode =
60 		&crtc_state->hw.adjusted_mode;
61 
62 	if (!intel_dp_is_uhbr(crtc_state) || DISPLAY_VER(i915) >= 20 || !dsc)
63 		return INT_MAX;
64 
65 	/*
66 	 * DSC->DPT interface width:
67 	 *   ICL-MTL: 72 bits (each branch has 72 bits, only left branch is used)
68 	 *   LNL+:    144 bits (not a bottleneck in any config)
69 	 *
70 	 * Bspec/49259 suggests that the FEC overhead needs to be
71 	 * applied here, though HW people claim that neither this FEC
72 	 * or any other overhead is applicable here (that is the actual
73 	 * available_bw is just symbol_clock * 72). However based on
74 	 * testing on MTL-P the
75 	 * - DELL U3224KBA display
76 	 * - Unigraf UCD-500 CTS test sink
77 	 * devices the
78 	 * - 5120x2880/995.59Mhz
79 	 * - 6016x3384/1357.23Mhz
80 	 * - 6144x3456/1413.39Mhz
81 	 * modes (all the ones having a DPT limit on the above devices),
82 	 * both the channel coding efficiency and an additional 3%
83 	 * overhead needs to be accounted for.
84 	 */
85 	return div64_u64(mul_u32_u32(intel_dp_link_symbol_clock(crtc_state->port_clock) * 72,
86 				     drm_dp_bw_channel_coding_efficiency(true)),
87 			 mul_u32_u32(adjusted_mode->crtc_clock, 1030000));
88 }
89 
90 static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
91 				    const struct intel_connector *connector,
92 				    bool ssc, bool dsc, int bpp_x16)
93 {
94 	const struct drm_display_mode *adjusted_mode =
95 		&crtc_state->hw.adjusted_mode;
96 	unsigned long flags = DRM_DP_BW_OVERHEAD_MST;
97 	int dsc_slice_count = 0;
98 	int overhead;
99 
100 	flags |= intel_dp_is_uhbr(crtc_state) ? DRM_DP_BW_OVERHEAD_UHBR : 0;
101 	flags |= ssc ? DRM_DP_BW_OVERHEAD_SSC_REF_CLK : 0;
102 	flags |= crtc_state->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0;
103 
104 	if (dsc) {
105 		flags |= DRM_DP_BW_OVERHEAD_DSC;
106 		dsc_slice_count = intel_dp_dsc_get_slice_count(connector,
107 							       adjusted_mode->clock,
108 							       adjusted_mode->hdisplay,
109 							       crtc_state->joiner_pipes);
110 	}
111 
112 	overhead = drm_dp_bw_overhead(crtc_state->lane_count,
113 				      adjusted_mode->hdisplay,
114 				      dsc_slice_count,
115 				      bpp_x16,
116 				      flags);
117 
118 	/*
119 	 * TODO: clarify whether a minimum required by the fixed FEC overhead
120 	 * in the bspec audio programming sequence is required here.
121 	 */
122 	return max(overhead, intel_dp_bw_fec_overhead(crtc_state->fec_enable));
123 }
124 
125 static void intel_dp_mst_compute_m_n(const struct intel_crtc_state *crtc_state,
126 				     const struct intel_connector *connector,
127 				     int overhead,
128 				     int bpp_x16,
129 				     struct intel_link_m_n *m_n)
130 {
131 	const struct drm_display_mode *adjusted_mode =
132 		&crtc_state->hw.adjusted_mode;
133 
134 	/* TODO: Check WA 14013163432 to set data M/N for full BW utilization. */
135 	intel_link_compute_m_n(bpp_x16, crtc_state->lane_count,
136 			       adjusted_mode->crtc_clock,
137 			       crtc_state->port_clock,
138 			       overhead,
139 			       m_n);
140 
141 	m_n->tu = DIV_ROUND_UP_ULL(mul_u32_u32(m_n->data_m, 64), m_n->data_n);
142 }
143 
144 static int intel_dp_mst_calc_pbn(int pixel_clock, int bpp_x16, int bw_overhead)
145 {
146 	int effective_data_rate =
147 		intel_dp_effective_data_rate(pixel_clock, bpp_x16, bw_overhead);
148 
149 	/*
150 	 * TODO: Use drm_dp_calc_pbn_mode() instead, once it's converted
151 	 * to calculate PBN with the BW overhead passed to it.
152 	 */
153 	return DIV_ROUND_UP(effective_data_rate * 64, 54 * 1000);
154 }
155 
156 static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
157 						struct intel_crtc_state *crtc_state,
158 						int max_bpp,
159 						int min_bpp,
160 						struct link_config_limits *limits,
161 						struct drm_connector_state *conn_state,
162 						int step,
163 						bool dsc)
164 {
165 	struct drm_atomic_state *state = crtc_state->uapi.state;
166 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
167 	struct intel_dp *intel_dp = &intel_mst->primary->dp;
168 	struct drm_dp_mst_topology_state *mst_state;
169 	struct intel_connector *connector =
170 		to_intel_connector(conn_state->connector);
171 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
172 	const struct drm_display_mode *adjusted_mode =
173 		&crtc_state->hw.adjusted_mode;
174 	int bpp, slots = -EINVAL;
175 	int max_dpt_bpp;
176 	int ret = 0;
177 
178 	mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
179 	if (IS_ERR(mst_state))
180 		return PTR_ERR(mst_state);
181 
182 	crtc_state->lane_count = limits->max_lane_count;
183 	crtc_state->port_clock = limits->max_rate;
184 
185 	if (dsc) {
186 		if (!intel_dp_supports_fec(intel_dp, connector, crtc_state))
187 			return -EINVAL;
188 
189 		crtc_state->fec_enable = !intel_dp_is_uhbr(crtc_state);
190 	}
191 
192 	mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
193 						      crtc_state->port_clock,
194 						      crtc_state->lane_count);
195 
196 	max_dpt_bpp = intel_dp_mst_max_dpt_bpp(crtc_state, dsc);
197 	if (max_bpp > max_dpt_bpp) {
198 		drm_dbg_kms(&i915->drm, "Limiting bpp to max DPT bpp (%d -> %d)\n",
199 			    max_bpp, max_dpt_bpp);
200 		max_bpp = max_dpt_bpp;
201 	}
202 
203 	drm_dbg_kms(&i915->drm, "Looking for slots in range min bpp %d max bpp %d\n",
204 		    min_bpp, max_bpp);
205 
206 	for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) {
207 		int local_bw_overhead;
208 		int remote_bw_overhead;
209 		int link_bpp_x16;
210 		int remote_tu;
211 		fixed20_12 pbn;
212 
213 		drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp);
214 
215 		link_bpp_x16 = fxp_q4_from_int(dsc ? bpp :
216 					       intel_dp_output_bpp(crtc_state->output_format, bpp));
217 
218 		local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
219 							     false, dsc, link_bpp_x16);
220 		remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
221 							      true, dsc, link_bpp_x16);
222 
223 		intel_dp_mst_compute_m_n(crtc_state, connector,
224 					 local_bw_overhead,
225 					 link_bpp_x16,
226 					 &crtc_state->dp_m_n);
227 
228 		/*
229 		 * The TU size programmed to the HW determines which slots in
230 		 * an MTP frame are used for this stream, which needs to match
231 		 * the payload size programmed to the first downstream branch
232 		 * device's payload table.
233 		 *
234 		 * Note that atm the payload's PBN value DRM core sends via
235 		 * the ALLOCATE_PAYLOAD side-band message matches the payload
236 		 * size (which it calculates from the PBN value) it programs
237 		 * to the first branch device's payload table. The allocation
238 		 * in the payload table could be reduced though (to
239 		 * crtc_state->dp_m_n.tu), provided that the driver doesn't
240 		 * enable SSC on the corresponding link.
241 		 */
242 		pbn.full = dfixed_const(intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock,
243 							      link_bpp_x16,
244 							      remote_bw_overhead));
245 		remote_tu = DIV_ROUND_UP(pbn.full, mst_state->pbn_div.full);
246 
247 		/*
248 		 * Aligning the TUs ensures that symbols consisting of multiple
249 		 * (4) symbol cycles don't get split between two consecutive
250 		 * MTPs, as required by Bspec.
251 		 * TODO: remove the alignment restriction for 128b/132b links
252 		 * on some platforms, where Bspec allows this.
253 		 */
254 		remote_tu = ALIGN(remote_tu, 4 / crtc_state->lane_count);
255 
256 		/*
257 		 * Also align PBNs accordingly, since MST core will derive its
258 		 * own copy of TU from the PBN in drm_dp_atomic_find_time_slots().
259 		 * The above comment about the difference between the PBN
260 		 * allocated for the whole path and the TUs allocated for the
261 		 * first branch device's link also applies here.
262 		 */
263 		pbn.full = remote_tu * mst_state->pbn_div.full;
264 		crtc_state->pbn = dfixed_trunc(pbn);
265 
266 		drm_WARN_ON(&i915->drm, remote_tu < crtc_state->dp_m_n.tu);
267 		crtc_state->dp_m_n.tu = remote_tu;
268 
269 		slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
270 						      connector->port,
271 						      crtc_state->pbn);
272 		if (slots == -EDEADLK)
273 			return slots;
274 
275 		if (slots >= 0) {
276 			drm_WARN_ON(&i915->drm, slots != crtc_state->dp_m_n.tu);
277 
278 			break;
279 		}
280 	}
281 
282 	/* We failed to find a proper bpp/timeslots, return error */
283 	if (ret)
284 		slots = ret;
285 
286 	if (slots < 0) {
287 		drm_dbg_kms(&i915->drm, "failed finding vcpi slots:%d\n",
288 			    slots);
289 	} else {
290 		if (!dsc)
291 			crtc_state->pipe_bpp = bpp;
292 		else
293 			crtc_state->dsc.compressed_bpp_x16 = fxp_q4_from_int(bpp);
294 		drm_dbg_kms(&i915->drm, "Got %d slots for pipe bpp %d dsc %d\n", slots, bpp, dsc);
295 	}
296 
297 	return slots;
298 }
299 
300 static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
301 					    struct intel_crtc_state *crtc_state,
302 					    struct drm_connector_state *conn_state,
303 					    struct link_config_limits *limits)
304 {
305 	int slots = -EINVAL;
306 
307 	/*
308 	 * FIXME: allocate the BW according to link_bpp, which in the case of
309 	 * YUV420 is only half of the pipe bpp value.
310 	 */
311 	slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state,
312 						     fxp_q4_to_int(limits->link.max_bpp_x16),
313 						     fxp_q4_to_int(limits->link.min_bpp_x16),
314 						     limits,
315 						     conn_state, 2 * 3, false);
316 
317 	if (slots < 0)
318 		return slots;
319 
320 	return 0;
321 }
322 
323 static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
324 						struct intel_crtc_state *crtc_state,
325 						struct drm_connector_state *conn_state,
326 						struct link_config_limits *limits)
327 {
328 	struct intel_connector *connector =
329 		to_intel_connector(conn_state->connector);
330 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
331 	int slots = -EINVAL;
332 	int i, num_bpc;
333 	u8 dsc_bpc[3] = {};
334 	int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp;
335 	u8 dsc_max_bpc;
336 	int min_compressed_bpp, max_compressed_bpp;
337 
338 	/* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
339 	if (DISPLAY_VER(i915) >= 12)
340 		dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
341 	else
342 		dsc_max_bpc = min_t(u8, 10, conn_state->max_requested_bpc);
343 
344 	max_bpp = min_t(u8, dsc_max_bpc * 3, limits->pipe.max_bpp);
345 	min_bpp = limits->pipe.min_bpp;
346 
347 	num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd,
348 						       dsc_bpc);
349 
350 	drm_dbg_kms(&i915->drm, "DSC Source supported min bpp %d max bpp %d\n",
351 		    min_bpp, max_bpp);
352 
353 	sink_max_bpp = dsc_bpc[0] * 3;
354 	sink_min_bpp = sink_max_bpp;
355 
356 	for (i = 1; i < num_bpc; i++) {
357 		if (sink_min_bpp > dsc_bpc[i] * 3)
358 			sink_min_bpp = dsc_bpc[i] * 3;
359 		if (sink_max_bpp < dsc_bpc[i] * 3)
360 			sink_max_bpp = dsc_bpc[i] * 3;
361 	}
362 
363 	drm_dbg_kms(&i915->drm, "DSC Sink supported min bpp %d max bpp %d\n",
364 		    sink_min_bpp, sink_max_bpp);
365 
366 	if (min_bpp < sink_min_bpp)
367 		min_bpp = sink_min_bpp;
368 
369 	if (max_bpp > sink_max_bpp)
370 		max_bpp = sink_max_bpp;
371 
372 	crtc_state->pipe_bpp = max_bpp;
373 
374 	max_compressed_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
375 								  crtc_state,
376 								  max_bpp / 3);
377 	max_compressed_bpp = min(max_compressed_bpp,
378 				 fxp_q4_to_int(limits->link.max_bpp_x16));
379 
380 	min_compressed_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
381 	min_compressed_bpp = max(min_compressed_bpp,
382 				 fxp_q4_to_int_roundup(limits->link.min_bpp_x16));
383 
384 	drm_dbg_kms(&i915->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n",
385 		    min_compressed_bpp, max_compressed_bpp);
386 
387 	/* Align compressed bpps according to our own constraints */
388 	max_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, max_compressed_bpp,
389 							    crtc_state->pipe_bpp);
390 	min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, min_compressed_bpp,
391 							    crtc_state->pipe_bpp);
392 
393 	slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, max_compressed_bpp,
394 						     min_compressed_bpp, limits,
395 						     conn_state, 1, true);
396 
397 	if (slots < 0)
398 		return slots;
399 
400 	return 0;
401 }
402 static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
403 				     struct intel_crtc_state *crtc_state,
404 				     struct drm_connector_state *conn_state)
405 {
406 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
407 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
408 	struct intel_dp *intel_dp = &intel_mst->primary->dp;
409 	struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
410 	struct drm_dp_mst_topology_state *topology_state;
411 	u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ?
412 		DP_CAP_ANSI_128B132B : DP_CAP_ANSI_8B10B;
413 
414 	topology_state = drm_atomic_get_mst_topology_state(conn_state->state, mgr);
415 	if (IS_ERR(topology_state)) {
416 		drm_dbg_kms(&i915->drm, "slot update failed\n");
417 		return PTR_ERR(topology_state);
418 	}
419 
420 	drm_dp_mst_update_slots(topology_state, link_coding_cap);
421 
422 	return 0;
423 }
424 
425 static int mode_hblank_period_ns(const struct drm_display_mode *mode)
426 {
427 	return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(mode->htotal - mode->hdisplay,
428 						 NSEC_PER_SEC / 1000),
429 				     mode->crtc_clock);
430 }
431 
432 static bool
433 hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector,
434 				 const struct intel_crtc_state *crtc_state,
435 				 const struct link_config_limits *limits)
436 {
437 	const struct drm_display_mode *adjusted_mode =
438 		&crtc_state->hw.adjusted_mode;
439 	bool is_uhbr_sink = connector->mst_port &&
440 			    drm_dp_128b132b_supported(connector->mst_port->dpcd);
441 	int hblank_limit = is_uhbr_sink ? 500 : 300;
442 
443 	if (!connector->dp.dsc_hblank_expansion_quirk)
444 		return false;
445 
446 	if (is_uhbr_sink && !drm_dp_is_uhbr_rate(limits->max_rate))
447 		return false;
448 
449 	if (mode_hblank_period_ns(adjusted_mode) > hblank_limit)
450 		return false;
451 
452 	return true;
453 }
454 
455 static bool
456 adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *connector,
457 					     const struct intel_crtc_state *crtc_state,
458 					     struct link_config_limits *limits,
459 					     bool dsc)
460 {
461 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
462 	const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
463 	int min_bpp_x16 = limits->link.min_bpp_x16;
464 
465 	if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state, limits))
466 		return true;
467 
468 	if (!dsc) {
469 		if (intel_dp_supports_dsc(connector, crtc_state)) {
470 			drm_dbg_kms(&i915->drm,
471 				    "[CRTC:%d:%s][CONNECTOR:%d:%s] DSC needed by hblank expansion quirk\n",
472 				    crtc->base.base.id, crtc->base.name,
473 				    connector->base.base.id, connector->base.name);
474 			return false;
475 		}
476 
477 		drm_dbg_kms(&i915->drm,
478 			    "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to 24 due to hblank expansion quirk\n",
479 			    crtc->base.base.id, crtc->base.name,
480 			    connector->base.base.id, connector->base.name);
481 
482 		if (limits->link.max_bpp_x16 < fxp_q4_from_int(24))
483 			return false;
484 
485 		limits->link.min_bpp_x16 = fxp_q4_from_int(24);
486 
487 		return true;
488 	}
489 
490 	drm_WARN_ON(&i915->drm, limits->min_rate != limits->max_rate);
491 
492 	if (limits->max_rate < 540000)
493 		min_bpp_x16 = fxp_q4_from_int(13);
494 	else if (limits->max_rate < 810000)
495 		min_bpp_x16 = fxp_q4_from_int(10);
496 
497 	if (limits->link.min_bpp_x16 >= min_bpp_x16)
498 		return true;
499 
500 	drm_dbg_kms(&i915->drm,
501 		    "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " FXP_Q4_FMT " in DSC mode due to hblank expansion quirk\n",
502 		    crtc->base.base.id, crtc->base.name,
503 		    connector->base.base.id, connector->base.name,
504 		    FXP_Q4_ARGS(min_bpp_x16));
505 
506 	if (limits->link.max_bpp_x16 < min_bpp_x16)
507 		return false;
508 
509 	limits->link.min_bpp_x16 = min_bpp_x16;
510 
511 	return true;
512 }
513 
514 static bool
515 intel_dp_mst_compute_config_limits(struct intel_dp *intel_dp,
516 				   const struct intel_connector *connector,
517 				   struct intel_crtc_state *crtc_state,
518 				   bool dsc,
519 				   struct link_config_limits *limits)
520 {
521 	/*
522 	 * for MST we always configure max link bw - the spec doesn't
523 	 * seem to suggest we should do otherwise.
524 	 */
525 	limits->min_rate = limits->max_rate =
526 		intel_dp_max_link_rate(intel_dp);
527 
528 	limits->min_lane_count = limits->max_lane_count =
529 		intel_dp_max_lane_count(intel_dp);
530 
531 	limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format);
532 	/*
533 	 * FIXME: If all the streams can't fit into the link with
534 	 * their current pipe_bpp we should reduce pipe_bpp across
535 	 * the board until things start to fit. Until then we
536 	 * limit to <= 8bpc since that's what was hardcoded for all
537 	 * MST streams previously. This hack should be removed once
538 	 * we have the proper retry logic in place.
539 	 */
540 	limits->pipe.max_bpp = min(crtc_state->pipe_bpp, 24);
541 
542 	intel_dp_adjust_compliance_config(intel_dp, crtc_state, limits);
543 
544 	if (!intel_dp_compute_config_link_bpp_limits(intel_dp,
545 						     crtc_state,
546 						     dsc,
547 						     limits))
548 		return false;
549 
550 	return adjust_limits_for_dsc_hblank_expansion_quirk(connector,
551 							    crtc_state,
552 							    limits,
553 							    dsc);
554 }
555 
556 static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
557 				       struct intel_crtc_state *pipe_config,
558 				       struct drm_connector_state *conn_state)
559 {
560 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
561 	struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
562 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
563 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
564 	struct intel_dp *intel_dp = &intel_mst->primary->dp;
565 	struct intel_connector *connector =
566 		to_intel_connector(conn_state->connector);
567 	const struct drm_display_mode *adjusted_mode =
568 		&pipe_config->hw.adjusted_mode;
569 	struct link_config_limits limits;
570 	bool dsc_needed, joiner_needs_dsc;
571 	int ret = 0;
572 
573 	if (pipe_config->fec_enable &&
574 	    !intel_dp_supports_fec(intel_dp, connector, pipe_config))
575 		return -EINVAL;
576 
577 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
578 		return -EINVAL;
579 
580 	if (intel_dp_need_joiner(intel_dp, connector,
581 				 adjusted_mode->crtc_hdisplay,
582 				 adjusted_mode->crtc_clock))
583 		pipe_config->joiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe);
584 
585 	pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
586 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
587 	pipe_config->has_pch_encoder = false;
588 
589 	joiner_needs_dsc = intel_dp_joiner_needs_dsc(dev_priv, pipe_config->joiner_pipes);
590 
591 	dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
592 		     !intel_dp_mst_compute_config_limits(intel_dp,
593 							 connector,
594 							 pipe_config,
595 							 false,
596 							 &limits);
597 
598 	if (!dsc_needed) {
599 		ret = intel_dp_mst_compute_link_config(encoder, pipe_config,
600 						       conn_state, &limits);
601 
602 		if (ret == -EDEADLK)
603 			return ret;
604 
605 		if (ret)
606 			dsc_needed = true;
607 	}
608 
609 	/* enable compression if the mode doesn't fit available BW */
610 	if (dsc_needed) {
611 		drm_dbg_kms(&dev_priv->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
612 			    str_yes_no(ret), str_yes_no(joiner_needs_dsc),
613 			    str_yes_no(intel_dp->force_dsc_en));
614 
615 		if (!intel_dp_supports_dsc(connector, pipe_config))
616 			return -EINVAL;
617 
618 		if (!intel_dp_mst_compute_config_limits(intel_dp,
619 							connector,
620 							pipe_config,
621 							true,
622 							&limits))
623 			return -EINVAL;
624 
625 		/*
626 		 * FIXME: As bpc is hardcoded to 8, as mentioned above,
627 		 * WARN and ignore the debug flag force_dsc_bpc for now.
628 		 */
629 		drm_WARN(&dev_priv->drm, intel_dp->force_dsc_bpc, "Cannot Force BPC for MST\n");
630 		/*
631 		 * Try to get at least some timeslots and then see, if
632 		 * we can fit there with DSC.
633 		 */
634 		drm_dbg_kms(&dev_priv->drm, "Trying to find VCPI slots in DSC mode\n");
635 
636 		ret = intel_dp_dsc_mst_compute_link_config(encoder, pipe_config,
637 							   conn_state, &limits);
638 		if (ret < 0)
639 			return ret;
640 
641 		ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
642 						  conn_state, &limits,
643 						  pipe_config->dp_m_n.tu, false);
644 	}
645 
646 	if (ret)
647 		return ret;
648 
649 	ret = intel_dp_mst_update_slots(encoder, pipe_config, conn_state);
650 	if (ret)
651 		return ret;
652 
653 	pipe_config->limited_color_range =
654 		intel_dp_limited_color_range(pipe_config, conn_state);
655 
656 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
657 		pipe_config->lane_lat_optim_mask =
658 			bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
659 
660 	intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
661 
662 	intel_ddi_compute_min_voltage_level(pipe_config);
663 
664 	intel_psr_compute_config(intel_dp, pipe_config, conn_state);
665 
666 	return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector,
667 							pipe_config);
668 }
669 
670 /*
671  * Iterate over all connectors and return a mask of
672  * all CPU transcoders streaming over the same DP link.
673  */
674 static unsigned int
675 intel_dp_mst_transcoder_mask(struct intel_atomic_state *state,
676 			     struct intel_dp *mst_port)
677 {
678 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
679 	const struct intel_digital_connector_state *conn_state;
680 	struct intel_connector *connector;
681 	u8 transcoders = 0;
682 	int i;
683 
684 	if (DISPLAY_VER(dev_priv) < 12)
685 		return 0;
686 
687 	for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
688 		const struct intel_crtc_state *crtc_state;
689 		struct intel_crtc *crtc;
690 
691 		if (connector->mst_port != mst_port || !conn_state->base.crtc)
692 			continue;
693 
694 		crtc = to_intel_crtc(conn_state->base.crtc);
695 		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
696 
697 		if (!crtc_state->hw.active)
698 			continue;
699 
700 		transcoders |= BIT(crtc_state->cpu_transcoder);
701 	}
702 
703 	return transcoders;
704 }
705 
706 static u8 get_pipes_downstream_of_mst_port(struct intel_atomic_state *state,
707 					   struct drm_dp_mst_topology_mgr *mst_mgr,
708 					   struct drm_dp_mst_port *parent_port)
709 {
710 	const struct intel_digital_connector_state *conn_state;
711 	struct intel_connector *connector;
712 	u8 mask = 0;
713 	int i;
714 
715 	for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
716 		if (!conn_state->base.crtc)
717 			continue;
718 
719 		if (&connector->mst_port->mst_mgr != mst_mgr)
720 			continue;
721 
722 		if (connector->port != parent_port &&
723 		    !drm_dp_mst_port_downstream_of_parent(mst_mgr,
724 							  connector->port,
725 							  parent_port))
726 			continue;
727 
728 		mask |= BIT(to_intel_crtc(conn_state->base.crtc)->pipe);
729 	}
730 
731 	return mask;
732 }
733 
734 static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state,
735 					 struct drm_dp_mst_topology_mgr *mst_mgr,
736 					 struct intel_link_bw_limits *limits)
737 {
738 	struct drm_i915_private *i915 = to_i915(state->base.dev);
739 	struct intel_crtc *crtc;
740 	u8 mst_pipe_mask;
741 	u8 fec_pipe_mask = 0;
742 	int ret;
743 
744 	mst_pipe_mask = get_pipes_downstream_of_mst_port(state, mst_mgr, NULL);
745 
746 	for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mst_pipe_mask) {
747 		struct intel_crtc_state *crtc_state =
748 			intel_atomic_get_new_crtc_state(state, crtc);
749 
750 		/* Atomic connector check should've added all the MST CRTCs. */
751 		if (drm_WARN_ON(&i915->drm, !crtc_state))
752 			return -EINVAL;
753 
754 		if (crtc_state->fec_enable)
755 			fec_pipe_mask |= BIT(crtc->pipe);
756 	}
757 
758 	if (!fec_pipe_mask || mst_pipe_mask == fec_pipe_mask)
759 		return 0;
760 
761 	limits->force_fec_pipes |= mst_pipe_mask;
762 
763 	ret = intel_modeset_pipes_in_mask_early(state, "MST FEC",
764 						mst_pipe_mask);
765 
766 	return ret ? : -EAGAIN;
767 }
768 
769 static int intel_dp_mst_check_bw(struct intel_atomic_state *state,
770 				 struct drm_dp_mst_topology_mgr *mst_mgr,
771 				 struct drm_dp_mst_topology_state *mst_state,
772 				 struct intel_link_bw_limits *limits)
773 {
774 	struct drm_dp_mst_port *mst_port;
775 	u8 mst_port_pipes;
776 	int ret;
777 
778 	ret = drm_dp_mst_atomic_check_mgr(&state->base, mst_mgr, mst_state, &mst_port);
779 	if (ret != -ENOSPC)
780 		return ret;
781 
782 	mst_port_pipes = get_pipes_downstream_of_mst_port(state, mst_mgr, mst_port);
783 
784 	ret = intel_link_bw_reduce_bpp(state, limits,
785 				       mst_port_pipes, "MST link BW");
786 
787 	return ret ? : -EAGAIN;
788 }
789 
790 /**
791  * intel_dp_mst_atomic_check_link - check all modeset MST link configuration
792  * @state: intel atomic state
793  * @limits: link BW limits
794  *
795  * Check the link configuration for all modeset MST outputs. If the
796  * configuration is invalid @limits will be updated if possible to
797  * reduce the total BW, after which the configuration for all CRTCs in
798  * @state must be recomputed with the updated @limits.
799  *
800  * Returns:
801  *   - 0 if the confugration is valid
802  *   - %-EAGAIN, if the configuration is invalid and @limits got updated
803  *     with fallback values with which the configuration of all CRTCs in
804  *     @state must be recomputed
805  *   - Other negative error, if the configuration is invalid without a
806  *     fallback possibility, or the check failed for another reason
807  */
808 int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state,
809 				   struct intel_link_bw_limits *limits)
810 {
811 	struct drm_dp_mst_topology_mgr *mgr;
812 	struct drm_dp_mst_topology_state *mst_state;
813 	int ret;
814 	int i;
815 
816 	for_each_new_mst_mgr_in_state(&state->base, mgr, mst_state, i) {
817 		ret = intel_dp_mst_check_fec_change(state, mgr, limits);
818 		if (ret)
819 			return ret;
820 
821 		ret = intel_dp_mst_check_bw(state, mgr, mst_state,
822 					    limits);
823 		if (ret)
824 			return ret;
825 	}
826 
827 	return 0;
828 }
829 
830 static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
831 					    struct intel_crtc_state *crtc_state,
832 					    struct drm_connector_state *conn_state)
833 {
834 	struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
835 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
836 	struct intel_dp *intel_dp = &intel_mst->primary->dp;
837 
838 	/* lowest numbered transcoder will be designated master */
839 	crtc_state->mst_master_transcoder =
840 		ffs(intel_dp_mst_transcoder_mask(state, intel_dp)) - 1;
841 
842 	return 0;
843 }
844 
845 /*
846  * If one of the connectors in a MST stream needs a modeset, mark all CRTCs
847  * that shares the same MST stream as mode changed,
848  * intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do
849  * a fastset when possible.
850  *
851  * On TGL+ this is required since each stream go through a master transcoder,
852  * so if the master transcoder needs modeset, all other streams in the
853  * topology need a modeset. All platforms need to add the atomic state
854  * for all streams in the topology, since a modeset on one may require
855  * changing the MST link BW usage of the others, which in turn needs a
856  * recomputation of the corresponding CRTC states.
857  */
858 static int
859 intel_dp_mst_atomic_topology_check(struct intel_connector *connector,
860 				   struct intel_atomic_state *state)
861 {
862 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
863 	struct drm_connector_list_iter connector_list_iter;
864 	struct intel_connector *connector_iter;
865 	int ret = 0;
866 
867 	if (!intel_connector_needs_modeset(state, &connector->base))
868 		return 0;
869 
870 	drm_connector_list_iter_begin(&dev_priv->drm, &connector_list_iter);
871 	for_each_intel_connector_iter(connector_iter, &connector_list_iter) {
872 		struct intel_digital_connector_state *conn_iter_state;
873 		struct intel_crtc_state *crtc_state;
874 		struct intel_crtc *crtc;
875 
876 		if (connector_iter->mst_port != connector->mst_port ||
877 		    connector_iter == connector)
878 			continue;
879 
880 		conn_iter_state = intel_atomic_get_digital_connector_state(state,
881 									   connector_iter);
882 		if (IS_ERR(conn_iter_state)) {
883 			ret = PTR_ERR(conn_iter_state);
884 			break;
885 		}
886 
887 		if (!conn_iter_state->base.crtc)
888 			continue;
889 
890 		crtc = to_intel_crtc(conn_iter_state->base.crtc);
891 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
892 		if (IS_ERR(crtc_state)) {
893 			ret = PTR_ERR(crtc_state);
894 			break;
895 		}
896 
897 		ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
898 		if (ret)
899 			break;
900 		crtc_state->uapi.mode_changed = true;
901 	}
902 	drm_connector_list_iter_end(&connector_list_iter);
903 
904 	return ret;
905 }
906 
907 static int
908 intel_dp_mst_atomic_check(struct drm_connector *connector,
909 			  struct drm_atomic_state *_state)
910 {
911 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
912 	struct intel_connector *intel_connector =
913 		to_intel_connector(connector);
914 	int ret;
915 
916 	ret = intel_digital_connector_atomic_check(connector, &state->base);
917 	if (ret)
918 		return ret;
919 
920 	ret = intel_dp_mst_atomic_topology_check(intel_connector, state);
921 	if (ret)
922 		return ret;
923 
924 	if (intel_connector_needs_modeset(state, connector)) {
925 		ret = intel_dp_tunnel_atomic_check_state(state,
926 							 intel_connector->mst_port,
927 							 intel_connector);
928 		if (ret)
929 			return ret;
930 	}
931 
932 	return drm_dp_atomic_release_time_slots(&state->base,
933 						&intel_connector->mst_port->mst_mgr,
934 						intel_connector->port);
935 }
936 
937 static void clear_act_sent(struct intel_encoder *encoder,
938 			   const struct intel_crtc_state *crtc_state)
939 {
940 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
941 
942 	intel_de_write(i915, dp_tp_status_reg(encoder, crtc_state),
943 		       DP_TP_STATUS_ACT_SENT);
944 }
945 
946 static void wait_for_act_sent(struct intel_encoder *encoder,
947 			      const struct intel_crtc_state *crtc_state)
948 {
949 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
950 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
951 	struct intel_dp *intel_dp = &intel_mst->primary->dp;
952 
953 	if (intel_de_wait_for_set(i915, dp_tp_status_reg(encoder, crtc_state),
954 				  DP_TP_STATUS_ACT_SENT, 1))
955 		drm_err(&i915->drm, "Timed out waiting for ACT sent\n");
956 
957 	drm_dp_check_act_status(&intel_dp->mst_mgr);
958 }
959 
960 static void intel_mst_disable_dp(struct intel_atomic_state *state,
961 				 struct intel_encoder *encoder,
962 				 const struct intel_crtc_state *old_crtc_state,
963 				 const struct drm_connector_state *old_conn_state)
964 {
965 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
966 	struct intel_digital_port *dig_port = intel_mst->primary;
967 	struct intel_dp *intel_dp = &dig_port->dp;
968 	struct intel_connector *connector =
969 		to_intel_connector(old_conn_state->connector);
970 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
971 
972 	drm_dbg_kms(&i915->drm, "active links %d\n",
973 		    intel_dp->active_mst_links);
974 
975 	if (intel_dp->active_mst_links == 1)
976 		intel_dp->link_trained = false;
977 
978 	intel_hdcp_disable(intel_mst->connector);
979 
980 	intel_dp_sink_disable_decompression(state, connector, old_crtc_state);
981 }
982 
983 static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
984 				      struct intel_encoder *encoder,
985 				      const struct intel_crtc_state *old_crtc_state,
986 				      const struct drm_connector_state *old_conn_state)
987 {
988 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
989 	struct intel_digital_port *dig_port = intel_mst->primary;
990 	struct intel_dp *intel_dp = &dig_port->dp;
991 	struct intel_connector *connector =
992 		to_intel_connector(old_conn_state->connector);
993 	struct drm_dp_mst_topology_state *old_mst_state =
994 		drm_atomic_get_old_mst_topology_state(&state->base, &intel_dp->mst_mgr);
995 	struct drm_dp_mst_topology_state *new_mst_state =
996 		drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
997 	const struct drm_dp_mst_atomic_payload *old_payload =
998 		drm_atomic_get_mst_payload_state(old_mst_state, connector->port);
999 	struct drm_dp_mst_atomic_payload *new_payload =
1000 		drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
1001 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1002 	struct intel_crtc *pipe_crtc;
1003 	bool last_mst_stream;
1004 
1005 	intel_dp->active_mst_links--;
1006 	last_mst_stream = intel_dp->active_mst_links == 0;
1007 	drm_WARN_ON(&dev_priv->drm,
1008 		    DISPLAY_VER(dev_priv) >= 12 && last_mst_stream &&
1009 		    !intel_dp_mst_is_master_trans(old_crtc_state));
1010 
1011 	for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
1012 					 intel_crtc_joined_pipe_mask(old_crtc_state)) {
1013 		const struct intel_crtc_state *old_pipe_crtc_state =
1014 			intel_atomic_get_old_crtc_state(state, pipe_crtc);
1015 
1016 		intel_crtc_vblank_off(old_pipe_crtc_state);
1017 	}
1018 
1019 	intel_disable_transcoder(old_crtc_state);
1020 
1021 	drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload);
1022 
1023 	clear_act_sent(encoder, old_crtc_state);
1024 
1025 	intel_de_rmw(dev_priv,
1026 		     TRANS_DDI_FUNC_CTL(dev_priv, old_crtc_state->cpu_transcoder),
1027 		     TRANS_DDI_DP_VC_PAYLOAD_ALLOC, 0);
1028 
1029 	wait_for_act_sent(encoder, old_crtc_state);
1030 
1031 	drm_dp_remove_payload_part2(&intel_dp->mst_mgr, new_mst_state,
1032 				    old_payload, new_payload);
1033 
1034 	intel_ddi_disable_transcoder_func(old_crtc_state);
1035 
1036 	for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
1037 					 intel_crtc_joined_pipe_mask(old_crtc_state)) {
1038 		const struct intel_crtc_state *old_pipe_crtc_state =
1039 			intel_atomic_get_old_crtc_state(state, pipe_crtc);
1040 
1041 		intel_dsc_disable(old_pipe_crtc_state);
1042 
1043 		if (DISPLAY_VER(dev_priv) >= 9)
1044 			skl_scaler_disable(old_pipe_crtc_state);
1045 		else
1046 			ilk_pfit_disable(old_pipe_crtc_state);
1047 	}
1048 
1049 	/*
1050 	 * Power down mst path before disabling the port, otherwise we end
1051 	 * up getting interrupts from the sink upon detecting link loss.
1052 	 */
1053 	drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
1054 				     false);
1055 
1056 	/*
1057 	 * BSpec 4287: disable DIP after the transcoder is disabled and before
1058 	 * the transcoder clock select is set to none.
1059 	 */
1060 	intel_dp_set_infoframes(&dig_port->base, false,
1061 				old_crtc_state, NULL);
1062 	/*
1063 	 * From TGL spec: "If multi-stream slave transcoder: Configure
1064 	 * Transcoder Clock Select to direct no clock to the transcoder"
1065 	 *
1066 	 * From older GENs spec: "Configure Transcoder Clock Select to direct
1067 	 * no clock to the transcoder"
1068 	 */
1069 	if (DISPLAY_VER(dev_priv) < 12 || !last_mst_stream)
1070 		intel_ddi_disable_transcoder_clock(old_crtc_state);
1071 
1072 
1073 	intel_mst->connector = NULL;
1074 	if (last_mst_stream)
1075 		dig_port->base.post_disable(state, &dig_port->base,
1076 						  old_crtc_state, NULL);
1077 
1078 	drm_dbg_kms(&dev_priv->drm, "active links %d\n",
1079 		    intel_dp->active_mst_links);
1080 }
1081 
1082 static void intel_mst_post_pll_disable_dp(struct intel_atomic_state *state,
1083 					  struct intel_encoder *encoder,
1084 					  const struct intel_crtc_state *old_crtc_state,
1085 					  const struct drm_connector_state *old_conn_state)
1086 {
1087 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
1088 	struct intel_digital_port *dig_port = intel_mst->primary;
1089 	struct intel_dp *intel_dp = &dig_port->dp;
1090 
1091 	if (intel_dp->active_mst_links == 0 &&
1092 	    dig_port->base.post_pll_disable)
1093 		dig_port->base.post_pll_disable(state, encoder, old_crtc_state, old_conn_state);
1094 }
1095 
1096 static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state,
1097 					struct intel_encoder *encoder,
1098 					const struct intel_crtc_state *pipe_config,
1099 					const struct drm_connector_state *conn_state)
1100 {
1101 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
1102 	struct intel_digital_port *dig_port = intel_mst->primary;
1103 	struct intel_dp *intel_dp = &dig_port->dp;
1104 
1105 	if (intel_dp->active_mst_links == 0)
1106 		dig_port->base.pre_pll_enable(state, &dig_port->base,
1107 						    pipe_config, NULL);
1108 	else
1109 		/*
1110 		 * The port PLL state needs to get updated for secondary
1111 		 * streams as for the primary stream.
1112 		 */
1113 		intel_ddi_update_active_dpll(state, &dig_port->base,
1114 					     to_intel_crtc(pipe_config->uapi.crtc));
1115 }
1116 
1117 static bool intel_mst_probed_link_params_valid(struct intel_dp *intel_dp,
1118 					       int link_rate, int lane_count)
1119 {
1120 	return intel_dp->link.mst_probed_rate == link_rate &&
1121 		intel_dp->link.mst_probed_lane_count == lane_count;
1122 }
1123 
1124 static void intel_mst_set_probed_link_params(struct intel_dp *intel_dp,
1125 					     int link_rate, int lane_count)
1126 {
1127 	intel_dp->link.mst_probed_rate = link_rate;
1128 	intel_dp->link.mst_probed_lane_count = lane_count;
1129 }
1130 
1131 static void intel_mst_reprobe_topology(struct intel_dp *intel_dp,
1132 				       const struct intel_crtc_state *crtc_state)
1133 {
1134 	if (intel_mst_probed_link_params_valid(intel_dp,
1135 					       crtc_state->port_clock, crtc_state->lane_count))
1136 		return;
1137 
1138 	drm_dp_mst_topology_queue_probe(&intel_dp->mst_mgr);
1139 
1140 	intel_mst_set_probed_link_params(intel_dp,
1141 					 crtc_state->port_clock, crtc_state->lane_count);
1142 }
1143 
1144 static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
1145 				    struct intel_encoder *encoder,
1146 				    const struct intel_crtc_state *pipe_config,
1147 				    const struct drm_connector_state *conn_state)
1148 {
1149 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
1150 	struct intel_digital_port *dig_port = intel_mst->primary;
1151 	struct intel_dp *intel_dp = &dig_port->dp;
1152 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1153 	struct intel_connector *connector =
1154 		to_intel_connector(conn_state->connector);
1155 	struct drm_dp_mst_topology_state *mst_state =
1156 		drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
1157 	int ret;
1158 	bool first_mst_stream;
1159 
1160 	/* MST encoders are bound to a crtc, not to a connector,
1161 	 * force the mapping here for get_hw_state.
1162 	 */
1163 	connector->encoder = encoder;
1164 	intel_mst->connector = connector;
1165 	first_mst_stream = intel_dp->active_mst_links == 0;
1166 	drm_WARN_ON(&dev_priv->drm,
1167 		    DISPLAY_VER(dev_priv) >= 12 && first_mst_stream &&
1168 		    !intel_dp_mst_is_master_trans(pipe_config));
1169 
1170 	drm_dbg_kms(&dev_priv->drm, "active links %d\n",
1171 		    intel_dp->active_mst_links);
1172 
1173 	if (first_mst_stream)
1174 		intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
1175 
1176 	drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
1177 
1178 	intel_dp_sink_enable_decompression(state, connector, pipe_config);
1179 
1180 	if (first_mst_stream) {
1181 		dig_port->base.pre_enable(state, &dig_port->base,
1182 						pipe_config, NULL);
1183 
1184 		intel_mst_reprobe_topology(intel_dp, pipe_config);
1185 	}
1186 
1187 	intel_dp->active_mst_links++;
1188 
1189 	ret = drm_dp_add_payload_part1(&intel_dp->mst_mgr, mst_state,
1190 				       drm_atomic_get_mst_payload_state(mst_state, connector->port));
1191 	if (ret < 0)
1192 		intel_dp_queue_modeset_retry_for_link(state, &dig_port->base, pipe_config);
1193 
1194 	/*
1195 	 * Before Gen 12 this is not done as part of
1196 	 * dig_port->base.pre_enable() and should be done here. For
1197 	 * Gen 12+ the step in which this should be done is different for the
1198 	 * first MST stream, so it's done on the DDI for the first stream and
1199 	 * here for the following ones.
1200 	 */
1201 	if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream)
1202 		intel_ddi_enable_transcoder_clock(encoder, pipe_config);
1203 
1204 	intel_dsc_dp_pps_write(&dig_port->base, pipe_config);
1205 	intel_ddi_set_dp_msa(pipe_config, conn_state);
1206 }
1207 
1208 static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
1209 {
1210 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1211 	u32 clear = 0;
1212 	u32 set = 0;
1213 
1214 	if (!IS_ALDERLAKE_P(i915))
1215 		return;
1216 
1217 	if (!IS_DISPLAY_STEP(i915, STEP_D0, STEP_FOREVER))
1218 		return;
1219 
1220 	/* Wa_14013163432:adlp */
1221 	if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state))
1222 		set |= DP_MST_FEC_BS_JITTER_WA(crtc_state->cpu_transcoder);
1223 
1224 	/* Wa_14014143976:adlp */
1225 	if (IS_DISPLAY_STEP(i915, STEP_E0, STEP_FOREVER)) {
1226 		if (intel_dp_is_uhbr(crtc_state))
1227 			set |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder);
1228 		else if (crtc_state->fec_enable)
1229 			clear |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder);
1230 
1231 		if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state))
1232 			set |= DP_MST_DPT_DPTP_ALIGN_WA(crtc_state->cpu_transcoder);
1233 	}
1234 
1235 	if (!clear && !set)
1236 		return;
1237 
1238 	intel_de_rmw(i915, CHICKEN_MISC_3, clear, set);
1239 }
1240 
1241 static void intel_mst_enable_dp(struct intel_atomic_state *state,
1242 				struct intel_encoder *encoder,
1243 				const struct intel_crtc_state *pipe_config,
1244 				const struct drm_connector_state *conn_state)
1245 {
1246 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
1247 	struct intel_digital_port *dig_port = intel_mst->primary;
1248 	struct intel_dp *intel_dp = &dig_port->dp;
1249 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
1250 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1251 	struct drm_dp_mst_topology_state *mst_state =
1252 		drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
1253 	enum transcoder trans = pipe_config->cpu_transcoder;
1254 	bool first_mst_stream = intel_dp->active_mst_links == 1;
1255 	struct intel_crtc *pipe_crtc;
1256 	int ret;
1257 
1258 	drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
1259 
1260 	if (intel_dp_is_uhbr(pipe_config)) {
1261 		const struct drm_display_mode *adjusted_mode =
1262 			&pipe_config->hw.adjusted_mode;
1263 		u64 crtc_clock_hz = KHz(adjusted_mode->crtc_clock);
1264 
1265 		intel_de_write(dev_priv, TRANS_DP2_VFREQHIGH(pipe_config->cpu_transcoder),
1266 			       TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz >> 24));
1267 		intel_de_write(dev_priv, TRANS_DP2_VFREQLOW(pipe_config->cpu_transcoder),
1268 			       TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
1269 	}
1270 
1271 	enable_bs_jitter_was(pipe_config);
1272 
1273 	intel_ddi_enable_transcoder_func(encoder, pipe_config);
1274 
1275 	clear_act_sent(encoder, pipe_config);
1276 
1277 	intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, trans), 0,
1278 		     TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
1279 
1280 	drm_dbg_kms(&dev_priv->drm, "active links %d\n",
1281 		    intel_dp->active_mst_links);
1282 
1283 	wait_for_act_sent(encoder, pipe_config);
1284 
1285 	if (first_mst_stream)
1286 		intel_ddi_wait_for_fec_status(encoder, pipe_config, true);
1287 
1288 	ret = drm_dp_add_payload_part2(&intel_dp->mst_mgr,
1289 				       drm_atomic_get_mst_payload_state(mst_state,
1290 									connector->port));
1291 	if (ret < 0)
1292 		intel_dp_queue_modeset_retry_for_link(state, &dig_port->base, pipe_config);
1293 
1294 	if (DISPLAY_VER(dev_priv) >= 12)
1295 		intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, trans),
1296 			     FECSTALL_DIS_DPTSTREAM_DPTTG,
1297 			     pipe_config->fec_enable ? FECSTALL_DIS_DPTSTREAM_DPTTG : 0);
1298 
1299 	intel_audio_sdp_split_update(pipe_config);
1300 
1301 	intel_enable_transcoder(pipe_config);
1302 
1303 	for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
1304 						 intel_crtc_joined_pipe_mask(pipe_config)) {
1305 		const struct intel_crtc_state *pipe_crtc_state =
1306 			intel_atomic_get_new_crtc_state(state, pipe_crtc);
1307 
1308 		intel_crtc_vblank_on(pipe_crtc_state);
1309 	}
1310 
1311 	intel_hdcp_enable(state, encoder, pipe_config, conn_state);
1312 }
1313 
1314 static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
1315 				      enum pipe *pipe)
1316 {
1317 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
1318 	*pipe = intel_mst->pipe;
1319 	if (intel_mst->connector)
1320 		return true;
1321 	return false;
1322 }
1323 
1324 static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
1325 					struct intel_crtc_state *pipe_config)
1326 {
1327 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
1328 	struct intel_digital_port *dig_port = intel_mst->primary;
1329 
1330 	dig_port->base.get_config(&dig_port->base, pipe_config);
1331 }
1332 
1333 static bool intel_dp_mst_initial_fastset_check(struct intel_encoder *encoder,
1334 					       struct intel_crtc_state *crtc_state)
1335 {
1336 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
1337 	struct intel_digital_port *dig_port = intel_mst->primary;
1338 
1339 	return intel_dp_initial_fastset_check(&dig_port->base, crtc_state);
1340 }
1341 
1342 static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
1343 {
1344 	struct intel_connector *intel_connector = to_intel_connector(connector);
1345 	struct drm_i915_private *i915 = to_i915(intel_connector->base.dev);
1346 	struct intel_dp *intel_dp = intel_connector->mst_port;
1347 	const struct drm_edid *drm_edid;
1348 	int ret;
1349 
1350 	if (drm_connector_is_unregistered(connector))
1351 		return intel_connector_update_modes(connector, NULL);
1352 
1353 	if (!intel_display_driver_check_access(i915))
1354 		return drm_edid_connector_add_modes(connector);
1355 
1356 	drm_edid = drm_dp_mst_edid_read(connector, &intel_dp->mst_mgr, intel_connector->port);
1357 
1358 	ret = intel_connector_update_modes(connector, drm_edid);
1359 
1360 	drm_edid_free(drm_edid);
1361 
1362 	return ret;
1363 }
1364 
1365 static int
1366 intel_dp_mst_connector_late_register(struct drm_connector *connector)
1367 {
1368 	struct intel_connector *intel_connector = to_intel_connector(connector);
1369 	int ret;
1370 
1371 	ret = drm_dp_mst_connector_late_register(connector,
1372 						 intel_connector->port);
1373 	if (ret < 0)
1374 		return ret;
1375 
1376 	ret = intel_connector_register(connector);
1377 	if (ret < 0)
1378 		drm_dp_mst_connector_early_unregister(connector,
1379 						      intel_connector->port);
1380 
1381 	return ret;
1382 }
1383 
1384 static void
1385 intel_dp_mst_connector_early_unregister(struct drm_connector *connector)
1386 {
1387 	struct intel_connector *intel_connector = to_intel_connector(connector);
1388 
1389 	intel_connector_unregister(connector);
1390 	drm_dp_mst_connector_early_unregister(connector,
1391 					      intel_connector->port);
1392 }
1393 
1394 static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
1395 	.fill_modes = drm_helper_probe_single_connector_modes,
1396 	.atomic_get_property = intel_digital_connector_atomic_get_property,
1397 	.atomic_set_property = intel_digital_connector_atomic_set_property,
1398 	.late_register = intel_dp_mst_connector_late_register,
1399 	.early_unregister = intel_dp_mst_connector_early_unregister,
1400 	.destroy = intel_connector_destroy,
1401 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1402 	.atomic_duplicate_state = intel_digital_connector_duplicate_state,
1403 };
1404 
1405 static int intel_dp_mst_get_modes(struct drm_connector *connector)
1406 {
1407 	return intel_dp_mst_get_ddc_modes(connector);
1408 }
1409 
1410 static int
1411 intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
1412 			    struct drm_display_mode *mode,
1413 			    struct drm_modeset_acquire_ctx *ctx,
1414 			    enum drm_mode_status *status)
1415 {
1416 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
1417 	struct intel_connector *intel_connector = to_intel_connector(connector);
1418 	struct intel_dp *intel_dp = intel_connector->mst_port;
1419 	struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
1420 	struct drm_dp_mst_port *port = intel_connector->port;
1421 	const int min_bpp = 18;
1422 	int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
1423 	int max_rate, mode_rate, max_lanes, max_link_clock;
1424 	int ret;
1425 	bool dsc = false, joiner = false;
1426 	u16 dsc_max_compressed_bpp = 0;
1427 	u8 dsc_slice_count = 0;
1428 	int target_clock = mode->clock;
1429 
1430 	if (drm_connector_is_unregistered(connector)) {
1431 		*status = MODE_ERROR;
1432 		return 0;
1433 	}
1434 
1435 	*status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
1436 	if (*status != MODE_OK)
1437 		return 0;
1438 
1439 	if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
1440 		*status = MODE_H_ILLEGAL;
1441 		return 0;
1442 	}
1443 
1444 	if (mode->clock < 10000) {
1445 		*status = MODE_CLOCK_LOW;
1446 		return 0;
1447 	}
1448 
1449 	max_link_clock = intel_dp_max_link_rate(intel_dp);
1450 	max_lanes = intel_dp_max_lane_count(intel_dp);
1451 
1452 	max_rate = intel_dp_max_link_data_rate(intel_dp,
1453 					       max_link_clock, max_lanes);
1454 	mode_rate = intel_dp_link_required(mode->clock, min_bpp);
1455 
1456 	/*
1457 	 * TODO:
1458 	 * - Also check if compression would allow for the mode
1459 	 * - Calculate the overhead using drm_dp_bw_overhead() /
1460 	 *   drm_dp_bw_channel_coding_efficiency(), similarly to the
1461 	 *   compute config code, as drm_dp_calc_pbn_mode() doesn't
1462 	 *   account with all the overheads.
1463 	 * - Check here and during compute config the BW reported by
1464 	 *   DFP_Link_Available_Payload_Bandwidth_Number (or the
1465 	 *   corresponding link capabilities of the sink) in case the
1466 	 *   stream is uncompressed for it by the last branch device.
1467 	 */
1468 	if (intel_dp_need_joiner(intel_dp, intel_connector,
1469 				 mode->hdisplay, target_clock)) {
1470 		joiner = true;
1471 		max_dotclk *= 2;
1472 	}
1473 
1474 	ret = drm_modeset_lock(&mgr->base.lock, ctx);
1475 	if (ret)
1476 		return ret;
1477 
1478 	if (mode_rate > max_rate || mode->clock > max_dotclk ||
1479 	    drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) {
1480 		*status = MODE_CLOCK_HIGH;
1481 		return 0;
1482 	}
1483 
1484 	if (intel_dp_has_dsc(intel_connector)) {
1485 		/*
1486 		 * TBD pass the connector BPC,
1487 		 * for now U8_MAX so that max BPC on that platform would be picked
1488 		 */
1489 		int pipe_bpp = intel_dp_dsc_compute_max_bpp(intel_connector, U8_MAX);
1490 
1491 		if (drm_dp_sink_supports_fec(intel_connector->dp.fec_capability)) {
1492 			dsc_max_compressed_bpp =
1493 				intel_dp_dsc_get_max_compressed_bpp(dev_priv,
1494 								    max_link_clock,
1495 								    max_lanes,
1496 								    target_clock,
1497 								    mode->hdisplay,
1498 								    joiner,
1499 								    INTEL_OUTPUT_FORMAT_RGB,
1500 								    pipe_bpp, 64);
1501 			dsc_slice_count =
1502 				intel_dp_dsc_get_slice_count(intel_connector,
1503 							     target_clock,
1504 							     mode->hdisplay,
1505 							     joiner);
1506 		}
1507 
1508 		dsc = dsc_max_compressed_bpp && dsc_slice_count;
1509 	}
1510 
1511 	if (intel_dp_joiner_needs_dsc(dev_priv, joiner) && !dsc) {
1512 		*status = MODE_CLOCK_HIGH;
1513 		return 0;
1514 	}
1515 
1516 	if (mode_rate > max_rate && !dsc) {
1517 		*status = MODE_CLOCK_HIGH;
1518 		return 0;
1519 	}
1520 
1521 	*status = intel_mode_valid_max_plane_size(dev_priv, mode, joiner);
1522 	return 0;
1523 }
1524 
1525 static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
1526 							 struct drm_atomic_state *state)
1527 {
1528 	struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
1529 											 connector);
1530 	struct intel_connector *intel_connector = to_intel_connector(connector);
1531 	struct intel_dp *intel_dp = intel_connector->mst_port;
1532 	struct intel_crtc *crtc = to_intel_crtc(connector_state->crtc);
1533 
1534 	return &intel_dp->mst_encoders[crtc->pipe]->base.base;
1535 }
1536 
1537 static int
1538 intel_dp_mst_detect(struct drm_connector *connector,
1539 		    struct drm_modeset_acquire_ctx *ctx, bool force)
1540 {
1541 	struct drm_i915_private *i915 = to_i915(connector->dev);
1542 	struct intel_connector *intel_connector = to_intel_connector(connector);
1543 	struct intel_dp *intel_dp = intel_connector->mst_port;
1544 
1545 	if (!intel_display_device_enabled(i915))
1546 		return connector_status_disconnected;
1547 
1548 	if (drm_connector_is_unregistered(connector))
1549 		return connector_status_disconnected;
1550 
1551 	if (!intel_display_driver_check_access(i915))
1552 		return connector->status;
1553 
1554 	return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr,
1555 				      intel_connector->port);
1556 }
1557 
1558 static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
1559 	.get_modes = intel_dp_mst_get_modes,
1560 	.mode_valid_ctx = intel_dp_mst_mode_valid_ctx,
1561 	.atomic_best_encoder = intel_mst_atomic_best_encoder,
1562 	.atomic_check = intel_dp_mst_atomic_check,
1563 	.detect_ctx = intel_dp_mst_detect,
1564 };
1565 
1566 static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
1567 {
1568 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(to_intel_encoder(encoder));
1569 
1570 	drm_encoder_cleanup(encoder);
1571 	kfree(intel_mst);
1572 }
1573 
1574 static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
1575 	.destroy = intel_dp_mst_encoder_destroy,
1576 };
1577 
1578 static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
1579 {
1580 	if (intel_attached_encoder(connector) && connector->base.state->crtc) {
1581 		enum pipe pipe;
1582 		if (!intel_attached_encoder(connector)->get_hw_state(intel_attached_encoder(connector), &pipe))
1583 			return false;
1584 		return true;
1585 	}
1586 	return false;
1587 }
1588 
1589 static int intel_dp_mst_add_properties(struct intel_dp *intel_dp,
1590 				       struct drm_connector *connector,
1591 				       const char *pathprop)
1592 {
1593 	struct drm_i915_private *i915 = to_i915(connector->dev);
1594 
1595 	drm_object_attach_property(&connector->base,
1596 				   i915->drm.mode_config.path_property, 0);
1597 	drm_object_attach_property(&connector->base,
1598 				   i915->drm.mode_config.tile_property, 0);
1599 
1600 	intel_attach_force_audio_property(connector);
1601 	intel_attach_broadcast_rgb_property(connector);
1602 
1603 	/*
1604 	 * Reuse the prop from the SST connector because we're
1605 	 * not allowed to create new props after device registration.
1606 	 */
1607 	connector->max_bpc_property =
1608 		intel_dp->attached_connector->base.max_bpc_property;
1609 	if (connector->max_bpc_property)
1610 		drm_connector_attach_max_bpc_property(connector, 6, 12);
1611 
1612 	return drm_connector_set_path_property(connector, pathprop);
1613 }
1614 
1615 static void
1616 intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp,
1617 					      struct intel_connector *connector)
1618 {
1619 	u8 dpcd_caps[DP_RECEIVER_CAP_SIZE];
1620 
1621 	if (!connector->dp.dsc_decompression_aux)
1622 		return;
1623 
1624 	if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd_caps) < 0)
1625 		return;
1626 
1627 	intel_dp_get_dsc_sink_cap(dpcd_caps[DP_DPCD_REV], connector);
1628 }
1629 
1630 static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector)
1631 {
1632 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1633 	struct drm_dp_aux *aux = connector->dp.dsc_decompression_aux;
1634 	struct drm_dp_desc desc;
1635 	u8 dpcd[DP_RECEIVER_CAP_SIZE];
1636 
1637 	if (!aux)
1638 		return false;
1639 
1640 	/*
1641 	 * A logical port's OUI (at least for affected sinks) is all 0, so
1642 	 * instead of that the parent port's OUI is used for identification.
1643 	 */
1644 	if (drm_dp_mst_port_is_logical(connector->port)) {
1645 		aux = drm_dp_mst_aux_for_parent(connector->port);
1646 		if (!aux)
1647 			aux = &connector->mst_port->aux;
1648 	}
1649 
1650 	if (drm_dp_read_dpcd_caps(aux, dpcd) < 0)
1651 		return false;
1652 
1653 	if (drm_dp_read_desc(aux, &desc, drm_dp_is_branch(dpcd)) < 0)
1654 		return false;
1655 
1656 	if (!drm_dp_has_quirk(&desc,
1657 			      DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC))
1658 		return false;
1659 
1660 	/*
1661 	 * UHBR (MST sink) devices requiring this quirk don't advertise the
1662 	 * HBLANK expansion support. Presuming that they perform HBLANK
1663 	 * expansion internally, or are affected by this issue on modes with a
1664 	 * short HBLANK for other reasons.
1665 	 */
1666 	if (!drm_dp_128b132b_supported(dpcd) &&
1667 	    !(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE))
1668 		return false;
1669 
1670 	drm_dbg_kms(&i915->drm,
1671 		    "[CONNECTOR:%d:%s] DSC HBLANK expansion quirk detected\n",
1672 		    connector->base.base.id, connector->base.name);
1673 
1674 	return true;
1675 }
1676 
1677 static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
1678 							struct drm_dp_mst_port *port,
1679 							const char *pathprop)
1680 {
1681 	struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
1682 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1683 	struct drm_device *dev = dig_port->base.base.dev;
1684 	struct drm_i915_private *dev_priv = to_i915(dev);
1685 	struct intel_connector *intel_connector;
1686 	struct drm_connector *connector;
1687 	enum pipe pipe;
1688 	int ret;
1689 
1690 	intel_connector = intel_connector_alloc();
1691 	if (!intel_connector)
1692 		return NULL;
1693 
1694 	intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
1695 	intel_connector->sync_state = intel_dp_connector_sync_state;
1696 	intel_connector->mst_port = intel_dp;
1697 	intel_connector->port = port;
1698 	drm_dp_mst_get_port_malloc(port);
1699 
1700 	intel_dp_init_modeset_retry_work(intel_connector);
1701 
1702 	intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
1703 	intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
1704 	intel_connector->dp.dsc_hblank_expansion_quirk =
1705 		detect_dsc_hblank_expansion_quirk(intel_connector);
1706 
1707 	connector = &intel_connector->base;
1708 	ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
1709 				 DRM_MODE_CONNECTOR_DisplayPort);
1710 	if (ret) {
1711 		drm_dp_mst_put_port_malloc(port);
1712 		intel_connector_free(intel_connector);
1713 		return NULL;
1714 	}
1715 
1716 	drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
1717 
1718 	for_each_pipe(dev_priv, pipe) {
1719 		struct drm_encoder *enc =
1720 			&intel_dp->mst_encoders[pipe]->base.base;
1721 
1722 		ret = drm_connector_attach_encoder(&intel_connector->base, enc);
1723 		if (ret)
1724 			goto err;
1725 	}
1726 
1727 	ret = intel_dp_mst_add_properties(intel_dp, connector, pathprop);
1728 	if (ret)
1729 		goto err;
1730 
1731 	ret = intel_dp_hdcp_init(dig_port, intel_connector);
1732 	if (ret)
1733 		drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
1734 			    connector->name, connector->base.id);
1735 
1736 	return connector;
1737 
1738 err:
1739 	drm_connector_cleanup(connector);
1740 	return NULL;
1741 }
1742 
1743 static void
1744 intel_dp_mst_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr)
1745 {
1746 	struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
1747 
1748 	intel_hpd_trigger_irq(dp_to_dig_port(intel_dp));
1749 }
1750 
1751 static const struct drm_dp_mst_topology_cbs mst_cbs = {
1752 	.add_connector = intel_dp_add_mst_connector,
1753 	.poll_hpd_irq = intel_dp_mst_poll_hpd_irq,
1754 };
1755 
1756 static struct intel_dp_mst_encoder *
1757 intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe pipe)
1758 {
1759 	struct intel_dp_mst_encoder *intel_mst;
1760 	struct intel_encoder *intel_encoder;
1761 	struct drm_device *dev = dig_port->base.base.dev;
1762 
1763 	intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL);
1764 
1765 	if (!intel_mst)
1766 		return NULL;
1767 
1768 	intel_mst->pipe = pipe;
1769 	intel_encoder = &intel_mst->base;
1770 	intel_mst->primary = dig_port;
1771 
1772 	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
1773 			 DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
1774 
1775 	intel_encoder->type = INTEL_OUTPUT_DP_MST;
1776 	intel_encoder->power_domain = dig_port->base.power_domain;
1777 	intel_encoder->port = dig_port->base.port;
1778 	intel_encoder->cloneable = 0;
1779 	/*
1780 	 * This is wrong, but broken userspace uses the intersection
1781 	 * of possible_crtcs of all the encoders of a given connector
1782 	 * to figure out which crtcs can drive said connector. What
1783 	 * should be used instead is the union of possible_crtcs.
1784 	 * To keep such userspace functioning we must misconfigure
1785 	 * this to make sure the intersection is not empty :(
1786 	 */
1787 	intel_encoder->pipe_mask = ~0;
1788 
1789 	intel_encoder->compute_config = intel_dp_mst_compute_config;
1790 	intel_encoder->compute_config_late = intel_dp_mst_compute_config_late;
1791 	intel_encoder->disable = intel_mst_disable_dp;
1792 	intel_encoder->post_disable = intel_mst_post_disable_dp;
1793 	intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp;
1794 	intel_encoder->update_pipe = intel_ddi_update_pipe;
1795 	intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
1796 	intel_encoder->pre_enable = intel_mst_pre_enable_dp;
1797 	intel_encoder->enable = intel_mst_enable_dp;
1798 	intel_encoder->audio_enable = intel_audio_codec_enable;
1799 	intel_encoder->audio_disable = intel_audio_codec_disable;
1800 	intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
1801 	intel_encoder->get_config = intel_dp_mst_enc_get_config;
1802 	intel_encoder->initial_fastset_check = intel_dp_mst_initial_fastset_check;
1803 
1804 	return intel_mst;
1805 
1806 }
1807 
1808 static bool
1809 intel_dp_create_fake_mst_encoders(struct intel_digital_port *dig_port)
1810 {
1811 	struct intel_dp *intel_dp = &dig_port->dp;
1812 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1813 	enum pipe pipe;
1814 
1815 	for_each_pipe(dev_priv, pipe)
1816 		intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(dig_port, pipe);
1817 	return true;
1818 }
1819 
1820 int
1821 intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port)
1822 {
1823 	return dig_port->dp.active_mst_links;
1824 }
1825 
1826 int
1827 intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
1828 {
1829 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1830 	struct intel_dp *intel_dp = &dig_port->dp;
1831 	enum port port = dig_port->base.port;
1832 	int ret;
1833 
1834 	if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
1835 		return 0;
1836 
1837 	if (DISPLAY_VER(i915) < 12 && port == PORT_A)
1838 		return 0;
1839 
1840 	if (DISPLAY_VER(i915) < 11 && port == PORT_E)
1841 		return 0;
1842 
1843 	intel_dp->mst_mgr.cbs = &mst_cbs;
1844 
1845 	/* create encoders */
1846 	intel_dp_create_fake_mst_encoders(dig_port);
1847 	ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
1848 					   &intel_dp->aux, 16, 3, conn_base_id);
1849 	if (ret) {
1850 		intel_dp->mst_mgr.cbs = NULL;
1851 		return ret;
1852 	}
1853 
1854 	return 0;
1855 }
1856 
1857 bool intel_dp_mst_source_support(struct intel_dp *intel_dp)
1858 {
1859 	return intel_dp->mst_mgr.cbs;
1860 }
1861 
1862 void
1863 intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port)
1864 {
1865 	struct intel_dp *intel_dp = &dig_port->dp;
1866 
1867 	if (!intel_dp_mst_source_support(intel_dp))
1868 		return;
1869 
1870 	drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
1871 	/* encoders will get killed by normal cleanup */
1872 
1873 	intel_dp->mst_mgr.cbs = NULL;
1874 }
1875 
1876 bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state)
1877 {
1878 	return crtc_state->mst_master_transcoder == crtc_state->cpu_transcoder;
1879 }
1880 
1881 bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state)
1882 {
1883 	return crtc_state->mst_master_transcoder != INVALID_TRANSCODER &&
1884 	       crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder;
1885 }
1886 
1887 /**
1888  * intel_dp_mst_add_topology_state_for_connector - add MST topology state for a connector
1889  * @state: atomic state
1890  * @connector: connector to add the state for
1891  * @crtc: the CRTC @connector is attached to
1892  *
1893  * Add the MST topology state for @connector to @state.
1894  *
1895  * Returns 0 on success, negative error code on failure.
1896  */
1897 static int
1898 intel_dp_mst_add_topology_state_for_connector(struct intel_atomic_state *state,
1899 					      struct intel_connector *connector,
1900 					      struct intel_crtc *crtc)
1901 {
1902 	struct drm_dp_mst_topology_state *mst_state;
1903 
1904 	if (!connector->mst_port)
1905 		return 0;
1906 
1907 	mst_state = drm_atomic_get_mst_topology_state(&state->base,
1908 						      &connector->mst_port->mst_mgr);
1909 	if (IS_ERR(mst_state))
1910 		return PTR_ERR(mst_state);
1911 
1912 	mst_state->pending_crtc_mask |= drm_crtc_mask(&crtc->base);
1913 
1914 	return 0;
1915 }
1916 
1917 /**
1918  * intel_dp_mst_add_topology_state_for_crtc - add MST topology state for a CRTC
1919  * @state: atomic state
1920  * @crtc: CRTC to add the state for
1921  *
1922  * Add the MST topology state for @crtc to @state.
1923  *
1924  * Returns 0 on success, negative error code on failure.
1925  */
1926 int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
1927 					     struct intel_crtc *crtc)
1928 {
1929 	struct drm_connector *_connector;
1930 	struct drm_connector_state *conn_state;
1931 	int i;
1932 
1933 	for_each_new_connector_in_state(&state->base, _connector, conn_state, i) {
1934 		struct intel_connector *connector = to_intel_connector(_connector);
1935 		int ret;
1936 
1937 		if (conn_state->crtc != &crtc->base)
1938 			continue;
1939 
1940 		ret = intel_dp_mst_add_topology_state_for_connector(state, connector, crtc);
1941 		if (ret)
1942 			return ret;
1943 	}
1944 
1945 	return 0;
1946 }
1947 
1948 static struct intel_connector *
1949 get_connector_in_state_for_crtc(struct intel_atomic_state *state,
1950 				const struct intel_crtc *crtc)
1951 {
1952 	struct drm_connector_state *old_conn_state;
1953 	struct drm_connector_state *new_conn_state;
1954 	struct drm_connector *_connector;
1955 	int i;
1956 
1957 	for_each_oldnew_connector_in_state(&state->base, _connector,
1958 					   old_conn_state, new_conn_state, i) {
1959 		struct intel_connector *connector =
1960 			to_intel_connector(_connector);
1961 
1962 		if (old_conn_state->crtc == &crtc->base ||
1963 		    new_conn_state->crtc == &crtc->base)
1964 			return connector;
1965 	}
1966 
1967 	return NULL;
1968 }
1969 
1970 /**
1971  * intel_dp_mst_crtc_needs_modeset - check if changes in topology need to modeset the given CRTC
1972  * @state: atomic state
1973  * @crtc: CRTC for which to check the modeset requirement
1974  *
1975  * Check if any change in a MST topology requires a forced modeset on @crtc in
1976  * this topology. One such change is enabling/disabling the DSC decompression
1977  * state in the first branch device's UFP DPCD as required by one CRTC, while
1978  * the other @crtc in the same topology is still active, requiring a full modeset
1979  * on @crtc.
1980  */
1981 bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
1982 				     struct intel_crtc *crtc)
1983 {
1984 	const struct intel_connector *crtc_connector;
1985 	const struct drm_connector_state *conn_state;
1986 	const struct drm_connector *_connector;
1987 	int i;
1988 
1989 	if (!intel_crtc_has_type(intel_atomic_get_new_crtc_state(state, crtc),
1990 				 INTEL_OUTPUT_DP_MST))
1991 		return false;
1992 
1993 	crtc_connector = get_connector_in_state_for_crtc(state, crtc);
1994 
1995 	if (!crtc_connector)
1996 		/* None of the connectors in the topology needs modeset */
1997 		return false;
1998 
1999 	for_each_new_connector_in_state(&state->base, _connector, conn_state, i) {
2000 		const struct intel_connector *connector =
2001 			to_intel_connector(_connector);
2002 		const struct intel_crtc_state *new_crtc_state;
2003 		const struct intel_crtc_state *old_crtc_state;
2004 		struct intel_crtc *crtc_iter;
2005 
2006 		if (connector->mst_port != crtc_connector->mst_port ||
2007 		    !conn_state->crtc)
2008 			continue;
2009 
2010 		crtc_iter = to_intel_crtc(conn_state->crtc);
2011 
2012 		new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc_iter);
2013 		old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc_iter);
2014 
2015 		if (!intel_crtc_needs_modeset(new_crtc_state))
2016 			continue;
2017 
2018 		if (old_crtc_state->dsc.compression_enable ==
2019 		    new_crtc_state->dsc.compression_enable)
2020 			continue;
2021 		/*
2022 		 * Toggling the decompression flag because of this stream in
2023 		 * the first downstream branch device's UFP DPCD may reset the
2024 		 * whole branch device. To avoid the reset while other streams
2025 		 * are also active modeset the whole MST topology in this
2026 		 * case.
2027 		 */
2028 		if (connector->dp.dsc_decompression_aux ==
2029 		    &connector->mst_port->aux)
2030 			return true;
2031 	}
2032 
2033 	return false;
2034 }
2035 
2036 /**
2037  * intel_dp_mst_prepare_probe - Prepare an MST link for topology probing
2038  * @intel_dp: DP port object
2039  *
2040  * Prepare an MST link for topology probing, programming the target
2041  * link parameters to DPCD. This step is a requirement of the enumaration
2042  * of path resources during probing.
2043  */
2044 void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp)
2045 {
2046 	int link_rate = intel_dp_max_link_rate(intel_dp);
2047 	int lane_count = intel_dp_max_lane_count(intel_dp);
2048 	u8 rate_select;
2049 	u8 link_bw;
2050 
2051 	if (intel_dp->link_trained)
2052 		return;
2053 
2054 	if (intel_mst_probed_link_params_valid(intel_dp, link_rate, lane_count))
2055 		return;
2056 
2057 	intel_dp_compute_rate(intel_dp, link_rate, &link_bw, &rate_select);
2058 
2059 	intel_dp_link_training_set_mode(intel_dp, link_rate, false);
2060 	intel_dp_link_training_set_bw(intel_dp, link_bw, rate_select, lane_count,
2061 				      drm_dp_enhanced_frame_cap(intel_dp->dpcd));
2062 
2063 	intel_mst_set_probed_link_params(intel_dp, link_rate, lane_count);
2064 }
2065 
2066 /*
2067  * intel_dp_mst_verify_dpcd_state - verify the MST SW enabled state wrt. the DPCD
2068  * @intel_dp: DP port object
2069  *
2070  * Verify if @intel_dp's MST enabled SW state matches the corresponding DPCD
2071  * state. A long HPD pulse - not long enough to be detected as a disconnected
2072  * state - could've reset the DPCD state, which requires tearing
2073  * down/recreating the MST topology.
2074  *
2075  * Returns %true if the SW MST enabled and DPCD states match, %false
2076  * otherwise.
2077  */
2078 bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp)
2079 {
2080 	struct intel_display *display = to_intel_display(intel_dp);
2081 	struct intel_connector *connector = intel_dp->attached_connector;
2082 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2083 	struct intel_encoder *encoder = &dig_port->base;
2084 	int ret;
2085 	u8 val;
2086 
2087 	if (!intel_dp->is_mst)
2088 		return true;
2089 
2090 	ret = drm_dp_dpcd_readb(intel_dp->mst_mgr.aux, DP_MSTM_CTRL, &val);
2091 
2092 	/* Adjust the expected register value for SST + SideBand. */
2093 	if (ret < 0 || val != (DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC)) {
2094 		drm_dbg_kms(display->drm,
2095 			    "[CONNECTOR:%d:%s][ENCODER:%d:%s] MST mode got reset, removing topology (ret=%d, ctrl=0x%02x)\n",
2096 			    connector->base.base.id, connector->base.name,
2097 			    encoder->base.base.id, encoder->base.name,
2098 			    ret, val);
2099 
2100 		return false;
2101 	}
2102 
2103 	return true;
2104 }
2105