xref: /linux/drivers/gpu/drm/i915/display/intel_dp_mst.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *             2014 Red Hat Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  *
24  */
25 
26 #include <linux/log2.h>
27 #include <linux/math.h>
28 
29 #include <drm/drm_atomic.h>
30 #include <drm/drm_atomic_helper.h>
31 #include <drm/drm_edid.h>
32 #include <drm/drm_fixed.h>
33 #include <drm/drm_print.h>
34 #include <drm/drm_probe_helper.h>
35 
36 #include "intel_atomic.h"
37 #include "intel_audio.h"
38 #include "intel_connector.h"
39 #include "intel_crtc.h"
40 #include "intel_ddi.h"
41 #include "intel_de.h"
42 #include "intel_display_driver.h"
43 #include "intel_display_regs.h"
44 #include "intel_display_types.h"
45 #include "intel_display_utils.h"
46 #include "intel_display_wa.h"
47 #include "intel_dp.h"
48 #include "intel_dp_hdcp.h"
49 #include "intel_dp_link_training.h"
50 #include "intel_dp_mst.h"
51 #include "intel_dp_test.h"
52 #include "intel_dp_tunnel.h"
53 #include "intel_dpio_phy.h"
54 #include "intel_hdcp.h"
55 #include "intel_hotplug.h"
56 #include "intel_link_bw.h"
57 #include "intel_pfit.h"
58 #include "intel_psr.h"
59 #include "intel_step.h"
60 #include "intel_vdsc.h"
61 #include "intel_vrr.h"
62 #include "skl_scaler.h"
63 
64 /*
65  * DP MST (DisplayPort Multi-Stream Transport)
66  *
67  * MST support on the source depends on the platform and port. DP initialization
68  * sets up MST for each MST capable encoder. This will become the primary
69  * encoder for the port.
70  *
71  * MST initialization of each primary encoder creates MST stream encoders, one
72  * per pipe, and initializes the MST topology manager. The MST stream encoders
73  * are sometimes called "fake encoders", because they're virtual, not
74  * physical. Thus there are (number of MST capable ports) x (number of pipes)
75  * MST stream encoders in total.
76  *
77  * Decision to use MST for a sink happens at detect on the connector attached to
78  * the primary encoder, and this will not change while the sink is connected. We
79  * always use MST when possible, including for SST sinks with sideband messaging
80  * support.
81  *
82  * The connectors for the MST streams are added and removed dynamically by the
83  * topology manager. Their connection status is also determined by the topology
84  * manager.
85  *
86  * On hardware, each transcoder may be associated with a single DDI
87  * port. Multiple transcoders may be associated with the same DDI port only if
88  * the port is in MST mode.
89  *
90  * On TGL+, all the transcoders streaming on the same DDI port will indicate a
91  * primary transcoder; the TGL_DP_TP_CTL and TGL_DP_TP_STATUS registers are
92  * relevant only on the primary transcoder. Prior to that, they are port
93  * registers.
94  */
95 
96 /* From fake MST stream encoder to primary encoder */
97 static struct intel_encoder *to_primary_encoder(struct intel_encoder *encoder)
98 {
99 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
100 	struct intel_digital_port *dig_port = intel_mst->primary;
101 
102 	return &dig_port->base;
103 }
104 
105 /* From fake MST stream encoder to primary DP */
106 static struct intel_dp *to_primary_dp(struct intel_encoder *encoder)
107 {
108 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
109 	struct intel_digital_port *dig_port = intel_mst->primary;
110 
111 	return &dig_port->dp;
112 }
113 
114 int intel_dp_mst_active_streams(struct intel_dp *intel_dp)
115 {
116 	return intel_dp->mst.active_streams;
117 }
118 
119 static bool intel_dp_mst_dec_active_streams(struct intel_dp *intel_dp)
120 {
121 	struct intel_display *display = to_intel_display(intel_dp);
122 
123 	drm_dbg_kms(display->drm, "active MST streams %d -> %d\n",
124 		    intel_dp->mst.active_streams, intel_dp->mst.active_streams - 1);
125 
126 	if (drm_WARN_ON(display->drm, intel_dp->mst.active_streams == 0))
127 		return true;
128 
129 	return --intel_dp->mst.active_streams == 0;
130 }
131 
132 static bool intel_dp_mst_inc_active_streams(struct intel_dp *intel_dp)
133 {
134 	struct intel_display *display = to_intel_display(intel_dp);
135 
136 	drm_dbg_kms(display->drm, "active MST streams %d -> %d\n",
137 		    intel_dp->mst.active_streams, intel_dp->mst.active_streams + 1);
138 
139 	return intel_dp->mst.active_streams++ == 0;
140 }
141 
142 /* TODO: return a bpp_x16 value */
143 static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
144 				    bool dsc)
145 {
146 	struct intel_display *display = to_intel_display(crtc_state);
147 	const struct drm_display_mode *adjusted_mode =
148 		&crtc_state->hw.adjusted_mode;
149 
150 	if (!intel_dp_is_uhbr(crtc_state) || DISPLAY_VER(display) >= 20 || !dsc)
151 		return 0;
152 
153 	/*
154 	 * DSC->DPT interface width:
155 	 *   ICL-MTL: 72 bits (each branch has 72 bits, only left branch is used)
156 	 *   LNL+:    144 bits (not a bottleneck in any config)
157 	 *
158 	 * Bspec/49259 suggests that the FEC overhead needs to be
159 	 * applied here, though HW people claim that neither this FEC
160 	 * or any other overhead is applicable here (that is the actual
161 	 * available_bw is just symbol_clock * 72). However based on
162 	 * testing on MTL-P the
163 	 * - DELL U3224KBA display
164 	 * - Unigraf UCD-500 CTS test sink
165 	 * devices the
166 	 * - 5120x2880/995.59Mhz
167 	 * - 6016x3384/1357.23Mhz
168 	 * - 6144x3456/1413.39Mhz
169 	 * modes (all the ones having a DPT limit on the above devices),
170 	 * both the channel coding efficiency and an additional 3%
171 	 * overhead needs to be accounted for.
172 	 */
173 	return div64_u64(mul_u32_u32(intel_dp_link_symbol_clock(crtc_state->port_clock) * 72,
174 				     drm_dp_bw_channel_coding_efficiency(true)),
175 			 mul_u32_u32(adjusted_mode->crtc_clock, 1030000));
176 }
177 
178 static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
179 				    bool ssc, int dsc_slice_count, int bpp_x16)
180 {
181 	const struct drm_display_mode *adjusted_mode =
182 		&crtc_state->hw.adjusted_mode;
183 	unsigned long flags = DRM_DP_BW_OVERHEAD_MST;
184 
185 	flags |= ssc ? DRM_DP_BW_OVERHEAD_SSC_REF_CLK : 0;
186 	flags |= crtc_state->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0;
187 
188 	return intel_dp_link_bw_overhead(crtc_state->port_clock,
189 					 crtc_state->lane_count,
190 					 adjusted_mode->hdisplay,
191 					 dsc_slice_count,
192 					 bpp_x16,
193 					 flags);
194 }
195 
196 static void intel_dp_mst_compute_m_n(const struct intel_crtc_state *crtc_state,
197 				     int overhead,
198 				     int bpp_x16,
199 				     struct intel_link_m_n *m_n)
200 {
201 	const struct drm_display_mode *adjusted_mode =
202 		&crtc_state->hw.adjusted_mode;
203 
204 	/* TODO: Check WA 14013163432 to set data M/N for full BW utilization. */
205 	intel_link_compute_m_n(bpp_x16, crtc_state->lane_count,
206 			       adjusted_mode->crtc_clock,
207 			       crtc_state->port_clock,
208 			       overhead,
209 			       m_n);
210 
211 	m_n->tu = DIV_ROUND_UP_ULL(mul_u32_u32(m_n->data_m, 64), m_n->data_n);
212 }
213 
214 static int intel_dp_mst_calc_pbn(int pixel_clock, int bpp_x16, int bw_overhead)
215 {
216 	int effective_data_rate =
217 		intel_dp_effective_data_rate(pixel_clock, bpp_x16, bw_overhead);
218 
219 	/*
220 	 * TODO: Use drm_dp_calc_pbn_mode() instead, once it's converted
221 	 * to calculate PBN with the BW overhead passed to it.
222 	 */
223 	return DIV_ROUND_UP(effective_data_rate * 64, 54 * 1000);
224 }
225 
226 static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connector,
227 					    const struct intel_crtc_state *crtc_state)
228 {
229 	const struct drm_display_mode *adjusted_mode =
230 		&crtc_state->hw.adjusted_mode;
231 	int num_joined_pipes = intel_crtc_num_joined_pipes(crtc_state);
232 
233 	return intel_dp_dsc_get_slice_count(connector,
234 					    adjusted_mode->clock,
235 					    adjusted_mode->hdisplay,
236 					    num_joined_pipes);
237 }
238 
239 static void mst_stream_update_slots(const struct intel_crtc_state *crtc_state,
240 				    struct drm_dp_mst_topology_state *topology_state)
241 {
242 	u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ?
243 		DP_CAP_ANSI_128B132B : DP_CAP_ANSI_8B10B;
244 
245 	drm_dp_mst_update_slots(topology_state, link_coding_cap);
246 }
247 
248 int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
249 				   struct intel_crtc_state *crtc_state,
250 				   struct drm_connector_state *conn_state,
251 				   int min_bpp_x16, int max_bpp_x16, int bpp_step_x16, bool dsc)
252 {
253 	struct intel_display *display = to_intel_display(intel_dp);
254 	struct drm_atomic_state *state = crtc_state->uapi.state;
255 	struct drm_dp_mst_topology_state *mst_state = NULL;
256 	struct intel_connector *connector =
257 		to_intel_connector(conn_state->connector);
258 	const struct drm_display_mode *adjusted_mode =
259 		&crtc_state->hw.adjusted_mode;
260 	bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
261 	int bpp_x16, slots = -EINVAL;
262 	int dsc_slice_count = 0;
263 	int max_dpt_bpp_x16;
264 
265 	/* shouldn't happen, sanity check */
266 	drm_WARN_ON(display->drm, !dsc && (fxp_q4_to_frac(min_bpp_x16) ||
267 					   fxp_q4_to_frac(max_bpp_x16) ||
268 					   fxp_q4_to_frac(bpp_step_x16)));
269 
270 	if (!bpp_step_x16) {
271 		/* Allow using zero step only to indicate single try for a given bpp. */
272 		drm_WARN_ON(display->drm, min_bpp_x16 != max_bpp_x16);
273 		bpp_step_x16 = 1;
274 	}
275 
276 	if (is_mst) {
277 		mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst.mgr);
278 		if (IS_ERR(mst_state))
279 			return PTR_ERR(mst_state);
280 
281 		mst_state->pbn_div = drm_dp_get_vc_payload_bw(crtc_state->port_clock,
282 							      crtc_state->lane_count);
283 
284 		mst_stream_update_slots(crtc_state, mst_state);
285 	}
286 
287 	/*
288 	 * NOTE: The following must reset crtc_state->fec_enable for UHBR/DSC
289 	 * after it was set by intel_dp_dsc_compute_config() ->
290 	 * intel_dp_needs_8b10b_fec().
291 	 */
292 	crtc_state->fec_enable = intel_dp_needs_8b10b_fec(crtc_state, dsc);
293 	/*
294 	 * If FEC gets enabled only because of another compressed stream, FEC
295 	 * may not be supported for this uncompressed stream on the whole link
296 	 * path until the sink DPRX. In this case a downstream branch device
297 	 * will disable FEC for the uncompressed stream as expected and so the
298 	 * FEC support doesn't need to be checked for this uncompressed stream.
299 	 */
300 	if (crtc_state->fec_enable && dsc &&
301 	    !intel_dp_supports_fec(intel_dp, connector, crtc_state))
302 		return -EINVAL;
303 
304 	max_dpt_bpp_x16 = fxp_q4_from_int(intel_dp_mst_max_dpt_bpp(crtc_state, dsc));
305 	if (max_dpt_bpp_x16 && max_bpp_x16 > max_dpt_bpp_x16) {
306 		drm_dbg_kms(display->drm, "Limiting bpp to max DPT bpp (" FXP_Q4_FMT " -> " FXP_Q4_FMT ")\n",
307 			    FXP_Q4_ARGS(max_bpp_x16), FXP_Q4_ARGS(max_dpt_bpp_x16));
308 		max_bpp_x16 = max_dpt_bpp_x16;
309 	}
310 
311 	drm_dbg_kms(display->drm, "Looking for slots in range min bpp " FXP_Q4_FMT " max bpp " FXP_Q4_FMT "\n",
312 		    FXP_Q4_ARGS(min_bpp_x16), FXP_Q4_ARGS(max_bpp_x16));
313 
314 	if (dsc) {
315 		dsc_slice_count = intel_dp_mst_dsc_get_slice_count(connector, crtc_state);
316 		if (!dsc_slice_count) {
317 			drm_dbg_kms(display->drm, "Can't get valid DSC slice count\n");
318 
319 			return -ENOSPC;
320 		}
321 	}
322 
323 	drm_WARN_ON(display->drm, min_bpp_x16 % bpp_step_x16 || max_bpp_x16 % bpp_step_x16);
324 
325 	for (bpp_x16 = max_bpp_x16; bpp_x16 >= min_bpp_x16; bpp_x16 -= bpp_step_x16) {
326 		int local_bw_overhead;
327 		int link_bpp_x16;
328 
329 		drm_dbg_kms(display->drm, "Trying bpp " FXP_Q4_FMT "\n", FXP_Q4_ARGS(bpp_x16));
330 
331 		if (dsc && !intel_dp_dsc_valid_compressed_bpp(intel_dp, bpp_x16)) {
332 			/* SST must have validated the single bpp tried here already earlier. */
333 			drm_WARN_ON(display->drm, !is_mst);
334 			continue;
335 		}
336 
337 		link_bpp_x16 = dsc ? bpp_x16 :
338 			intel_dp_output_format_link_bpp_x16(crtc_state->output_format,
339 							    fxp_q4_to_int(bpp_x16));
340 
341 		local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state,
342 							     false, dsc_slice_count, link_bpp_x16);
343 
344 		intel_dp_mst_compute_m_n(crtc_state,
345 					 local_bw_overhead,
346 					 link_bpp_x16,
347 					 &crtc_state->dp_m_n);
348 
349 		if (is_mst) {
350 			int remote_bw_overhead;
351 			int remote_tu;
352 			fixed20_12 pbn;
353 
354 			remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state,
355 								      true, dsc_slice_count, link_bpp_x16);
356 
357 			/*
358 			 * The TU size programmed to the HW determines which slots in
359 			 * an MTP frame are used for this stream, which needs to match
360 			 * the payload size programmed to the first downstream branch
361 			 * device's payload table.
362 			 *
363 			 * Note that atm the payload's PBN value DRM core sends via
364 			 * the ALLOCATE_PAYLOAD side-band message matches the payload
365 			 * size (which it calculates from the PBN value) it programs
366 			 * to the first branch device's payload table. The allocation
367 			 * in the payload table could be reduced though (to
368 			 * crtc_state->dp_m_n.tu), provided that the driver doesn't
369 			 * enable SSC on the corresponding link.
370 			 */
371 			pbn.full = dfixed_const(intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock,
372 								      link_bpp_x16,
373 								      remote_bw_overhead));
374 			remote_tu = DIV_ROUND_UP(pbn.full, mst_state->pbn_div.full);
375 
376 			/*
377 			 * Aligning the TUs ensures that symbols consisting of multiple
378 			 * (4) symbol cycles don't get split between two consecutive
379 			 * MTPs, as required by Bspec.
380 			 * TODO: remove the alignment restriction for 128b/132b links
381 			 * on some platforms, where Bspec allows this.
382 			 */
383 			remote_tu = ALIGN(remote_tu, 4 / crtc_state->lane_count);
384 
385 			/*
386 			 * Also align PBNs accordingly, since MST core will derive its
387 			 * own copy of TU from the PBN in drm_dp_atomic_find_time_slots().
388 			 * The above comment about the difference between the PBN
389 			 * allocated for the whole path and the TUs allocated for the
390 			 * first branch device's link also applies here.
391 			 */
392 			pbn.full = remote_tu * mst_state->pbn_div.full;
393 
394 			drm_WARN_ON(display->drm, remote_tu < crtc_state->dp_m_n.tu);
395 			crtc_state->dp_m_n.tu = remote_tu;
396 
397 			slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst.mgr,
398 							      connector->mst.port,
399 							      dfixed_trunc(pbn));
400 
401 			/* TODO: Check this already in drm_dp_atomic_find_time_slots(). */
402 			if (slots > mst_state->total_avail_slots)
403 				slots = -EINVAL;
404 		} else {
405 			/* Same as above for remote_tu */
406 			crtc_state->dp_m_n.tu = ALIGN(crtc_state->dp_m_n.tu,
407 						      4 / crtc_state->lane_count);
408 
409 			if (crtc_state->dp_m_n.tu <= 64)
410 				slots = crtc_state->dp_m_n.tu;
411 			else
412 				slots = -EINVAL;
413 		}
414 
415 		if (slots == -EDEADLK)
416 			return slots;
417 
418 		if (slots >= 0) {
419 			drm_WARN_ON(display->drm, slots != crtc_state->dp_m_n.tu);
420 
421 			break;
422 		}
423 	}
424 
425 	if (slots < 0) {
426 		drm_dbg_kms(display->drm, "failed finding vcpi slots:%d\n",
427 			    slots);
428 		return slots;
429 	}
430 
431 	if (!dsc)
432 		crtc_state->pipe_bpp = fxp_q4_to_int(bpp_x16);
433 	else
434 		crtc_state->dsc.compressed_bpp_x16 = bpp_x16;
435 
436 	drm_dbg_kms(display->drm, "Got %d slots for pipe bpp " FXP_Q4_FMT " dsc %d\n",
437 		    slots, FXP_Q4_ARGS(bpp_x16), dsc);
438 
439 	return 0;
440 }
441 
442 static int mst_stream_compute_link_config(struct intel_dp *intel_dp,
443 					  struct intel_crtc_state *crtc_state,
444 					  struct drm_connector_state *conn_state,
445 					  const struct link_config_limits *limits)
446 {
447 	crtc_state->lane_count = limits->max_lane_count;
448 	crtc_state->port_clock = limits->max_rate;
449 
450 	/*
451 	 * FIXME: allocate the BW according to link_bpp, which in the case of
452 	 * YUV420 is only half of the pipe bpp value.
453 	 */
454 	return intel_dp_mtp_tu_compute_config(intel_dp, crtc_state, conn_state,
455 					      limits->link.min_bpp_x16,
456 					      limits->link.max_bpp_x16,
457 					      fxp_q4_from_int(2 * 3), false);
458 }
459 
460 static int mst_stream_dsc_compute_link_config(struct intel_dp *intel_dp,
461 					      struct intel_crtc_state *crtc_state,
462 					      struct drm_connector_state *conn_state,
463 					      const struct link_config_limits *limits)
464 {
465 	struct intel_display *display = to_intel_display(intel_dp);
466 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
467 
468 	crtc_state->pipe_bpp = limits->pipe.max_bpp;
469 
470 	drm_dbg_kms(display->drm,
471 		    "DSC Sink supported compressed min bpp " FXP_Q4_FMT " compressed max bpp " FXP_Q4_FMT "\n",
472 		    FXP_Q4_ARGS(limits->link.min_bpp_x16), FXP_Q4_ARGS(limits->link.max_bpp_x16));
473 
474 	crtc_state->lane_count = limits->max_lane_count;
475 	crtc_state->port_clock = limits->max_rate;
476 
477 	return intel_dp_mtp_tu_compute_config(intel_dp, crtc_state, conn_state,
478 					      limits->link.min_bpp_x16,
479 					      limits->link.max_bpp_x16,
480 					      intel_dp_dsc_bpp_step_x16(connector),
481 					      true);
482 }
483 
484 static int mode_hblank_period_ns(const struct drm_display_mode *mode)
485 {
486 	return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(mode->htotal - mode->hdisplay,
487 						 NSEC_PER_SEC / 1000),
488 				     mode->crtc_clock);
489 }
490 
491 static bool
492 hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector,
493 				 const struct intel_crtc_state *crtc_state,
494 				 const struct link_config_limits *limits)
495 {
496 	const struct drm_display_mode *adjusted_mode =
497 		&crtc_state->hw.adjusted_mode;
498 	bool is_uhbr_sink = connector->mst.dp &&
499 			    drm_dp_128b132b_supported(connector->mst.dp->dpcd);
500 	int hblank_limit = is_uhbr_sink ? 500 : 300;
501 
502 	if (!connector->dp.dsc_hblank_expansion_quirk)
503 		return false;
504 
505 	if (is_uhbr_sink && !drm_dp_is_uhbr_rate(limits->max_rate))
506 		return false;
507 
508 	if (mode_hblank_period_ns(adjusted_mode) > hblank_limit)
509 		return false;
510 
511 	if (!intel_dp_mst_dsc_get_slice_count(connector, crtc_state))
512 		return false;
513 
514 	return true;
515 }
516 
517 static bool
518 adjust_limits_for_dsc_hblank_expansion_quirk(struct intel_dp *intel_dp,
519 					     const struct intel_connector *connector,
520 					     const struct intel_crtc_state *crtc_state,
521 					     struct link_config_limits *limits,
522 					     bool dsc)
523 {
524 	struct intel_display *display = to_intel_display(connector);
525 	const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
526 	int min_bpp_x16 = limits->link.min_bpp_x16;
527 
528 	if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state, limits))
529 		return true;
530 
531 	if (!dsc) {
532 		if (intel_dp_supports_dsc(intel_dp, connector, crtc_state)) {
533 			drm_dbg_kms(display->drm,
534 				    "[CRTC:%d:%s][CONNECTOR:%d:%s] DSC needed by hblank expansion quirk\n",
535 				    crtc->base.base.id, crtc->base.name,
536 				    connector->base.base.id, connector->base.name);
537 			return false;
538 		}
539 
540 		drm_dbg_kms(display->drm,
541 			    "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to 24 due to hblank expansion quirk\n",
542 			    crtc->base.base.id, crtc->base.name,
543 			    connector->base.base.id, connector->base.name);
544 
545 		if (limits->link.max_bpp_x16 < fxp_q4_from_int(24))
546 			return false;
547 
548 		limits->link.min_bpp_x16 = fxp_q4_from_int(24);
549 
550 		return true;
551 	}
552 
553 	drm_WARN_ON(display->drm, limits->min_rate != limits->max_rate);
554 
555 	if (limits->max_rate < 540000)
556 		min_bpp_x16 = fxp_q4_from_int(13);
557 	else if (limits->max_rate < 810000)
558 		min_bpp_x16 = fxp_q4_from_int(10);
559 
560 	if (limits->link.min_bpp_x16 >= min_bpp_x16)
561 		return true;
562 
563 	drm_dbg_kms(display->drm,
564 		    "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " FXP_Q4_FMT " in DSC mode due to hblank expansion quirk\n",
565 		    crtc->base.base.id, crtc->base.name,
566 		    connector->base.base.id, connector->base.name,
567 		    FXP_Q4_ARGS(min_bpp_x16));
568 
569 	if (limits->link.max_bpp_x16 < min_bpp_x16)
570 		return false;
571 
572 	limits->link.min_bpp_x16 = min_bpp_x16;
573 
574 	return true;
575 }
576 
577 static bool
578 mst_stream_compute_config_limits(struct intel_dp *intel_dp,
579 				 struct drm_connector_state *conn_state,
580 				 struct intel_crtc_state *crtc_state,
581 				 bool dsc,
582 				 struct link_config_limits *limits)
583 {
584 	struct intel_connector *connector =
585 		to_intel_connector(conn_state->connector);
586 
587 	if (!intel_dp_compute_config_limits(intel_dp, conn_state,
588 					    crtc_state, false, dsc,
589 					    limits))
590 		return false;
591 
592 	return adjust_limits_for_dsc_hblank_expansion_quirk(intel_dp,
593 							    connector,
594 							    crtc_state,
595 							    limits,
596 							    dsc);
597 }
598 
599 static int mst_stream_compute_link_for_joined_pipes(struct intel_encoder *encoder,
600 						    struct intel_crtc_state *pipe_config,
601 						    struct drm_connector_state *conn_state,
602 						    int num_joined_pipes)
603 {
604 	struct intel_display *display = to_intel_display(encoder);
605 	struct intel_dp *intel_dp = to_primary_dp(encoder);
606 	const struct drm_display_mode *adjusted_mode =
607 		&pipe_config->hw.adjusted_mode;
608 	struct intel_connector *connector =
609 		to_intel_connector(conn_state->connector);
610 	struct link_config_limits limits;
611 	bool dsc_needed, joiner_needs_dsc;
612 	int ret = 0;
613 
614 	intel_dp_dsc_reset_config(pipe_config);
615 
616 	joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes);
617 
618 	dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
619 		!mst_stream_compute_config_limits(intel_dp, conn_state,
620 						  pipe_config, false, &limits);
621 
622 	if (!dsc_needed) {
623 		ret = mst_stream_compute_link_config(intel_dp, pipe_config,
624 						     conn_state, &limits);
625 
626 		if (ret == -EDEADLK)
627 			return ret;
628 
629 		if (ret ||
630 		    !intel_dp_dotclk_valid(display,
631 					   adjusted_mode->clock,
632 					   adjusted_mode->htotal,
633 					   0,
634 					   num_joined_pipes))
635 			dsc_needed = true;
636 	}
637 
638 	if (dsc_needed && !intel_dp_supports_dsc(intel_dp, connector, pipe_config)) {
639 		drm_dbg_kms(display->drm, "DSC required but not available\n");
640 		return -EINVAL;
641 	}
642 
643 	/* enable compression if the mode doesn't fit available BW */
644 	if (dsc_needed) {
645 		int dsc_slice_count;
646 
647 		drm_dbg_kms(display->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
648 			    str_yes_no(ret), str_yes_no(joiner_needs_dsc),
649 			    str_yes_no(intel_dp->force_dsc_en));
650 
651 
652 		if (!mst_stream_compute_config_limits(intel_dp, conn_state,
653 						      pipe_config, true,
654 						      &limits))
655 			return -EINVAL;
656 
657 		/*
658 		 * FIXME: As bpc is hardcoded to 8, as mentioned above,
659 		 * WARN and ignore the debug flag force_dsc_bpc for now.
660 		 */
661 		drm_WARN(display->drm, intel_dp->force_dsc_bpc,
662 			 "Cannot Force BPC for MST\n");
663 		/*
664 		 * Try to get at least some timeslots and then see, if
665 		 * we can fit there with DSC.
666 		 */
667 		drm_dbg_kms(display->drm, "Trying to find VCPI slots in DSC mode\n");
668 
669 		ret = mst_stream_dsc_compute_link_config(intel_dp, pipe_config,
670 							 conn_state, &limits);
671 		if (ret < 0)
672 			return ret;
673 
674 		ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
675 						  conn_state, &limits,
676 						  pipe_config->dp_m_n.tu);
677 		if (ret)
678 			return ret;
679 
680 		dsc_slice_count = intel_dp_mst_dsc_get_slice_count(connector, pipe_config);
681 
682 		if (!intel_dp_dotclk_valid(display,
683 					   adjusted_mode->clock,
684 					   adjusted_mode->htotal,
685 					   dsc_slice_count,
686 					   num_joined_pipes))
687 			return -EINVAL;
688 	}
689 
690 	if (ret)
691 		return ret;
692 
693 	ret = intel_dp_compute_min_hblank(pipe_config, conn_state);
694 	if (ret)
695 		return ret;
696 
697 	return 0;
698 }
699 
700 static int mst_stream_compute_config(struct intel_encoder *encoder,
701 				     struct intel_crtc_state *pipe_config,
702 				     struct drm_connector_state *conn_state)
703 {
704 	struct intel_display *display = to_intel_display(encoder);
705 	struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
706 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
707 	struct intel_dp *intel_dp = to_primary_dp(encoder);
708 	struct intel_connector *connector =
709 		to_intel_connector(conn_state->connector);
710 	const struct drm_display_mode *adjusted_mode =
711 		&pipe_config->hw.adjusted_mode;
712 	int num_joined_pipes;
713 	int ret = -EINVAL;
714 
715 	if (pipe_config->fec_enable &&
716 	    !intel_dp_supports_fec(intel_dp, connector, pipe_config))
717 		return -EINVAL;
718 
719 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
720 		return -EINVAL;
721 
722 	pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
723 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
724 	pipe_config->has_pch_encoder = false;
725 
726 	for_each_joiner_candidate(connector, adjusted_mode, num_joined_pipes) {
727 		if (num_joined_pipes > 1)
728 			pipe_config->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1,
729 							    crtc->pipe);
730 
731 		ret = mst_stream_compute_link_for_joined_pipes(encoder,
732 							       pipe_config,
733 							       conn_state,
734 							       num_joined_pipes);
735 		if (ret == 0 || ret == -EDEADLK)
736 			break;
737 	}
738 
739 	if (ret)
740 		return ret;
741 
742 	pipe_config->limited_color_range =
743 		intel_dp_limited_color_range(pipe_config, conn_state);
744 
745 	if (display->platform.geminilake || display->platform.broxton)
746 		pipe_config->lane_lat_optim_mask =
747 			bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
748 
749 	intel_vrr_compute_config(pipe_config, conn_state);
750 
751 	intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
752 
753 	intel_ddi_compute_min_voltage_level(pipe_config);
754 
755 	intel_psr_compute_config(intel_dp, pipe_config, conn_state);
756 
757 	return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector,
758 							pipe_config);
759 }
760 
761 /*
762  * Iterate over all connectors and return a mask of
763  * all CPU transcoders streaming over the same DP link.
764  */
765 static unsigned int
766 intel_dp_mst_transcoder_mask(struct intel_atomic_state *state,
767 			     struct intel_dp *mst_port)
768 {
769 	struct intel_display *display = to_intel_display(state);
770 	const struct intel_digital_connector_state *conn_state;
771 	struct intel_connector *connector;
772 	u8 transcoders = 0;
773 	int i;
774 
775 	if (DISPLAY_VER(display) < 12)
776 		return 0;
777 
778 	for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
779 		const struct intel_crtc_state *crtc_state;
780 		struct intel_crtc *crtc;
781 
782 		if (connector->mst.dp != mst_port || !conn_state->base.crtc)
783 			continue;
784 
785 		crtc = to_intel_crtc(conn_state->base.crtc);
786 		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
787 
788 		if (!crtc_state->hw.active)
789 			continue;
790 
791 		transcoders |= BIT(crtc_state->cpu_transcoder);
792 	}
793 
794 	return transcoders;
795 }
796 
797 static u8 get_pipes_downstream_of_mst_port(struct intel_atomic_state *state,
798 					   struct drm_dp_mst_topology_mgr *mst_mgr,
799 					   struct drm_dp_mst_port *parent_port)
800 {
801 	const struct intel_digital_connector_state *conn_state;
802 	struct intel_connector *connector;
803 	u8 mask = 0;
804 	int i;
805 
806 	for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
807 		if (!conn_state->base.crtc)
808 			continue;
809 
810 		if (&connector->mst.dp->mst.mgr != mst_mgr)
811 			continue;
812 
813 		if (connector->mst.port != parent_port &&
814 		    !drm_dp_mst_port_downstream_of_parent(mst_mgr,
815 							  connector->mst.port,
816 							  parent_port))
817 			continue;
818 
819 		mask |= BIT(to_intel_crtc(conn_state->base.crtc)->pipe);
820 	}
821 
822 	return mask;
823 }
824 
825 static int intel_dp_mst_check_dsc_change(struct intel_atomic_state *state,
826 					 struct drm_dp_mst_topology_mgr *mst_mgr,
827 					 struct intel_link_bw_limits *limits)
828 {
829 	struct intel_display *display = to_intel_display(state);
830 	struct intel_crtc *crtc;
831 	u8 mst_pipe_mask;
832 	u8 dsc_pipe_mask = 0;
833 	int ret;
834 
835 	mst_pipe_mask = get_pipes_downstream_of_mst_port(state, mst_mgr, NULL);
836 
837 	for_each_intel_crtc_in_pipe_mask(display->drm, crtc, mst_pipe_mask) {
838 		struct intel_crtc_state *crtc_state =
839 			intel_atomic_get_new_crtc_state(state, crtc);
840 
841 		/* Atomic connector check should've added all the MST CRTCs. */
842 		if (drm_WARN_ON(display->drm, !crtc_state))
843 			return -EINVAL;
844 
845 		if (intel_dsc_enabled_on_link(crtc_state))
846 			dsc_pipe_mask |= BIT(crtc->pipe);
847 	}
848 
849 	if (!dsc_pipe_mask || mst_pipe_mask == dsc_pipe_mask)
850 		return 0;
851 
852 	limits->link_dsc_pipes |= mst_pipe_mask;
853 
854 	ret = intel_modeset_pipes_in_mask_early(state, "MST DSC",
855 						mst_pipe_mask);
856 
857 	return ret ? : -EAGAIN;
858 }
859 
860 static int intel_dp_mst_check_bw(struct intel_atomic_state *state,
861 				 struct drm_dp_mst_topology_mgr *mst_mgr,
862 				 struct drm_dp_mst_topology_state *mst_state,
863 				 struct intel_link_bw_limits *limits)
864 {
865 	struct drm_dp_mst_port *mst_port;
866 	u8 mst_port_pipes;
867 	int ret;
868 
869 	ret = drm_dp_mst_atomic_check_mgr(&state->base, mst_mgr, mst_state, &mst_port);
870 	if (ret != -ENOSPC)
871 		return ret;
872 
873 	mst_port_pipes = get_pipes_downstream_of_mst_port(state, mst_mgr, mst_port);
874 
875 	ret = intel_link_bw_reduce_bpp(state, limits,
876 				       mst_port_pipes, "MST link BW");
877 
878 	return ret ? : -EAGAIN;
879 }
880 
881 /**
882  * intel_dp_mst_atomic_check_link - check all modeset MST link configuration
883  * @state: intel atomic state
884  * @limits: link BW limits
885  *
886  * Check the link configuration for all modeset MST outputs. If the
887  * configuration is invalid @limits will be updated if possible to
888  * reduce the total BW, after which the configuration for all CRTCs in
889  * @state must be recomputed with the updated @limits.
890  *
891  * Returns:
892  *   - 0 if the configuration is valid
893  *   - %-EAGAIN, if the configuration is invalid and @limits got updated
894  *     with fallback values with which the configuration of all CRTCs in
895  *     @state must be recomputed
896  *   - Other negative error, if the configuration is invalid without a
897  *     fallback possibility, or the check failed for another reason
898  */
899 int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state,
900 				   struct intel_link_bw_limits *limits)
901 {
902 	struct drm_dp_mst_topology_mgr *mgr;
903 	struct drm_dp_mst_topology_state *mst_state;
904 	int ret;
905 	int i;
906 
907 	for_each_new_mst_mgr_in_state(&state->base, mgr, mst_state, i) {
908 		ret = intel_dp_mst_check_dsc_change(state, mgr, limits);
909 		if (ret)
910 			return ret;
911 
912 		ret = intel_dp_mst_check_bw(state, mgr, mst_state,
913 					    limits);
914 		if (ret)
915 			return ret;
916 	}
917 
918 	return 0;
919 }
920 
921 static int mst_stream_compute_config_late(struct intel_encoder *encoder,
922 					  struct intel_crtc_state *crtc_state,
923 					  struct drm_connector_state *conn_state)
924 {
925 	struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
926 	struct intel_dp *intel_dp = to_primary_dp(encoder);
927 
928 	/* lowest numbered transcoder will be designated master */
929 	crtc_state->mst_master_transcoder =
930 		ffs(intel_dp_mst_transcoder_mask(state, intel_dp)) - 1;
931 
932 	return 0;
933 }
934 
935 /*
936  * If one of the connectors in a MST stream needs a modeset, mark all CRTCs
937  * that shares the same MST stream as mode changed,
938  * intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do
939  * a fastset when possible.
940  *
941  * On TGL+ this is required since each stream go through a master transcoder,
942  * so if the master transcoder needs modeset, all other streams in the
943  * topology need a modeset. All platforms need to add the atomic state
944  * for all streams in the topology, since a modeset on one may require
945  * changing the MST link BW usage of the others, which in turn needs a
946  * recomputation of the corresponding CRTC states.
947  */
948 static int
949 mst_connector_atomic_topology_check(struct intel_connector *connector,
950 				    struct intel_atomic_state *state)
951 {
952 	struct intel_display *display = to_intel_display(connector);
953 	struct drm_connector_list_iter connector_list_iter;
954 	struct intel_connector *connector_iter;
955 	int ret = 0;
956 
957 	if (!intel_connector_needs_modeset(state, &connector->base))
958 		return 0;
959 
960 	drm_connector_list_iter_begin(display->drm, &connector_list_iter);
961 	for_each_intel_connector_iter(connector_iter, &connector_list_iter) {
962 		struct intel_digital_connector_state *conn_iter_state;
963 		struct intel_crtc_state *crtc_state;
964 		struct intel_crtc *crtc;
965 
966 		if (connector_iter->mst.dp != connector->mst.dp ||
967 		    connector_iter == connector)
968 			continue;
969 
970 		conn_iter_state = intel_atomic_get_digital_connector_state(state,
971 									   connector_iter);
972 		if (IS_ERR(conn_iter_state)) {
973 			ret = PTR_ERR(conn_iter_state);
974 			break;
975 		}
976 
977 		if (!conn_iter_state->base.crtc)
978 			continue;
979 
980 		crtc = to_intel_crtc(conn_iter_state->base.crtc);
981 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
982 		if (IS_ERR(crtc_state)) {
983 			ret = PTR_ERR(crtc_state);
984 			break;
985 		}
986 
987 		ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
988 		if (ret)
989 			break;
990 		crtc_state->uapi.mode_changed = true;
991 	}
992 	drm_connector_list_iter_end(&connector_list_iter);
993 
994 	return ret;
995 }
996 
997 static int
998 mst_connector_atomic_check(struct drm_connector *_connector,
999 			   struct drm_atomic_state *_state)
1000 {
1001 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
1002 	struct intel_connector *connector = to_intel_connector(_connector);
1003 	int ret;
1004 
1005 	ret = intel_digital_connector_atomic_check(&connector->base, &state->base);
1006 	if (ret)
1007 		return ret;
1008 
1009 	ret = mst_connector_atomic_topology_check(connector, state);
1010 	if (ret)
1011 		return ret;
1012 
1013 	if (intel_connector_needs_modeset(state, &connector->base)) {
1014 		ret = intel_dp_tunnel_atomic_check_state(state,
1015 							 connector->mst.dp,
1016 							 connector);
1017 		if (ret)
1018 			return ret;
1019 	}
1020 
1021 	return drm_dp_atomic_release_time_slots(&state->base,
1022 						&connector->mst.dp->mst.mgr,
1023 						connector->mst.port);
1024 }
1025 
1026 static void mst_stream_disable(struct intel_atomic_state *state,
1027 			       struct intel_encoder *encoder,
1028 			       const struct intel_crtc_state *old_crtc_state,
1029 			       const struct drm_connector_state *old_conn_state)
1030 {
1031 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
1032 	struct intel_dp *intel_dp = to_primary_dp(encoder);
1033 	struct intel_connector *connector =
1034 		to_intel_connector(old_conn_state->connector);
1035 
1036 	if (intel_dp_mst_active_streams(intel_dp) == 1)
1037 		intel_dp->link.active = false;
1038 
1039 	intel_hdcp_disable(intel_mst->connector);
1040 
1041 	intel_dp_sink_disable_decompression(state, connector, old_crtc_state);
1042 }
1043 
1044 static void mst_stream_post_disable(struct intel_atomic_state *state,
1045 				    struct intel_encoder *encoder,
1046 				    const struct intel_crtc_state *old_crtc_state,
1047 				    const struct drm_connector_state *old_conn_state)
1048 {
1049 	struct intel_display *display = to_intel_display(encoder);
1050 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
1051 	struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
1052 	struct intel_dp *intel_dp = to_primary_dp(encoder);
1053 	struct intel_connector *connector =
1054 		to_intel_connector(old_conn_state->connector);
1055 	struct drm_dp_mst_topology_state *old_mst_state =
1056 		drm_atomic_get_old_mst_topology_state(&state->base, &intel_dp->mst.mgr);
1057 	struct drm_dp_mst_topology_state *new_mst_state =
1058 		drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst.mgr);
1059 	const struct drm_dp_mst_atomic_payload *old_payload =
1060 		drm_atomic_get_mst_payload_state(old_mst_state, connector->mst.port);
1061 	struct drm_dp_mst_atomic_payload *new_payload =
1062 		drm_atomic_get_mst_payload_state(new_mst_state, connector->mst.port);
1063 	struct intel_crtc *pipe_crtc;
1064 	bool last_mst_stream;
1065 	int i;
1066 
1067 	last_mst_stream = intel_dp_mst_dec_active_streams(intel_dp);
1068 
1069 	drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && last_mst_stream &&
1070 		    !intel_dp_mst_is_master_trans(old_crtc_state));
1071 
1072 	for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
1073 		const struct intel_crtc_state *old_pipe_crtc_state =
1074 			intel_atomic_get_old_crtc_state(state, pipe_crtc);
1075 
1076 		intel_crtc_vblank_off(old_pipe_crtc_state);
1077 	}
1078 
1079 	intel_disable_transcoder(old_crtc_state);
1080 
1081 	drm_dp_remove_payload_part1(&intel_dp->mst.mgr, new_mst_state, new_payload);
1082 
1083 	intel_ddi_clear_act_sent(encoder, old_crtc_state);
1084 
1085 	intel_de_rmw(display,
1086 		     TRANS_DDI_FUNC_CTL(display, old_crtc_state->cpu_transcoder),
1087 		     TRANS_DDI_DP_VC_PAYLOAD_ALLOC, 0);
1088 
1089 	intel_ddi_wait_for_act_sent(encoder, old_crtc_state);
1090 	drm_dp_check_act_status(&intel_dp->mst.mgr);
1091 
1092 	drm_dp_remove_payload_part2(&intel_dp->mst.mgr, new_mst_state,
1093 				    old_payload, new_payload);
1094 
1095 	intel_vrr_transcoder_disable(old_crtc_state);
1096 
1097 	intel_ddi_disable_transcoder_func(old_crtc_state);
1098 
1099 	for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
1100 		const struct intel_crtc_state *old_pipe_crtc_state =
1101 			intel_atomic_get_old_crtc_state(state, pipe_crtc);
1102 
1103 		intel_dsc_disable(old_pipe_crtc_state);
1104 
1105 		if (DISPLAY_VER(display) >= 9)
1106 			skl_scaler_disable(old_pipe_crtc_state);
1107 		else
1108 			ilk_pfit_disable(old_pipe_crtc_state);
1109 	}
1110 
1111 	/*
1112 	 * Power down mst path before disabling the port, otherwise we end
1113 	 * up getting interrupts from the sink upon detecting link loss.
1114 	 */
1115 	drm_dp_send_power_updown_phy(&intel_dp->mst.mgr, connector->mst.port,
1116 				     false);
1117 
1118 	/*
1119 	 * BSpec 4287: disable DIP after the transcoder is disabled and before
1120 	 * the transcoder clock select is set to none.
1121 	 */
1122 	intel_dp_set_infoframes(primary_encoder, false, old_crtc_state, NULL);
1123 	/*
1124 	 * From TGL spec: "If multi-stream slave transcoder: Configure
1125 	 * Transcoder Clock Select to direct no clock to the transcoder"
1126 	 *
1127 	 * From older GENs spec: "Configure Transcoder Clock Select to direct
1128 	 * no clock to the transcoder"
1129 	 */
1130 	if (DISPLAY_VER(display) < 12 || !last_mst_stream)
1131 		intel_ddi_disable_transcoder_clock(old_crtc_state);
1132 
1133 
1134 	intel_mst->connector = NULL;
1135 	if (last_mst_stream)
1136 		primary_encoder->post_disable(state, primary_encoder,
1137 					      old_crtc_state, NULL);
1138 
1139 }
1140 
1141 static void mst_stream_post_pll_disable(struct intel_atomic_state *state,
1142 					struct intel_encoder *encoder,
1143 					const struct intel_crtc_state *old_crtc_state,
1144 					const struct drm_connector_state *old_conn_state)
1145 {
1146 	struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
1147 	struct intel_dp *intel_dp = to_primary_dp(encoder);
1148 
1149 	if (intel_dp_mst_active_streams(intel_dp) == 0 &&
1150 	    primary_encoder->post_pll_disable)
1151 		primary_encoder->post_pll_disable(state, primary_encoder, old_crtc_state, old_conn_state);
1152 }
1153 
1154 static void mst_stream_pre_pll_enable(struct intel_atomic_state *state,
1155 				      struct intel_encoder *encoder,
1156 				      const struct intel_crtc_state *pipe_config,
1157 				      const struct drm_connector_state *conn_state)
1158 {
1159 	struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
1160 	struct intel_dp *intel_dp = to_primary_dp(encoder);
1161 
1162 	if (intel_dp_mst_active_streams(intel_dp) == 0)
1163 		primary_encoder->pre_pll_enable(state, primary_encoder,
1164 						pipe_config, NULL);
1165 	else
1166 		/*
1167 		 * The port PLL state needs to get updated for secondary
1168 		 * streams as for the primary stream.
1169 		 */
1170 		intel_ddi_update_active_dpll(state, primary_encoder,
1171 					     to_intel_crtc(pipe_config->uapi.crtc));
1172 }
1173 
1174 static bool intel_mst_probed_link_params_valid(struct intel_dp *intel_dp,
1175 					       int link_rate, int lane_count)
1176 {
1177 	return intel_dp->link.mst_probed_rate == link_rate &&
1178 		intel_dp->link.mst_probed_lane_count == lane_count;
1179 }
1180 
1181 static void intel_mst_set_probed_link_params(struct intel_dp *intel_dp,
1182 					     int link_rate, int lane_count)
1183 {
1184 	intel_dp->link.mst_probed_rate = link_rate;
1185 	intel_dp->link.mst_probed_lane_count = lane_count;
1186 }
1187 
1188 static void intel_mst_reprobe_topology(struct intel_dp *intel_dp,
1189 				       const struct intel_crtc_state *crtc_state)
1190 {
1191 	if (intel_mst_probed_link_params_valid(intel_dp,
1192 					       crtc_state->port_clock, crtc_state->lane_count))
1193 		return;
1194 
1195 	drm_dp_mst_topology_queue_probe(&intel_dp->mst.mgr);
1196 
1197 	intel_mst_set_probed_link_params(intel_dp,
1198 					 crtc_state->port_clock, crtc_state->lane_count);
1199 }
1200 
1201 static void mst_stream_pre_enable(struct intel_atomic_state *state,
1202 				  struct intel_encoder *encoder,
1203 				  const struct intel_crtc_state *pipe_config,
1204 				  const struct drm_connector_state *conn_state)
1205 {
1206 	struct intel_display *display = to_intel_display(state);
1207 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
1208 	struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
1209 	struct intel_dp *intel_dp = to_primary_dp(encoder);
1210 	struct intel_connector *connector =
1211 		to_intel_connector(conn_state->connector);
1212 	struct drm_dp_mst_topology_state *mst_state =
1213 		drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst.mgr);
1214 	int ret;
1215 	bool first_mst_stream;
1216 
1217 	/* MST encoders are bound to a crtc, not to a connector,
1218 	 * force the mapping here for get_hw_state.
1219 	 */
1220 	connector->encoder = encoder;
1221 	intel_mst->connector = connector;
1222 
1223 	first_mst_stream = intel_dp_mst_inc_active_streams(intel_dp);
1224 	drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && first_mst_stream &&
1225 		    !intel_dp_mst_is_master_trans(pipe_config));
1226 
1227 	if (first_mst_stream)
1228 		intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
1229 
1230 	drm_dp_send_power_updown_phy(&intel_dp->mst.mgr, connector->mst.port, true);
1231 
1232 	intel_dp_sink_enable_decompression(state, connector, pipe_config);
1233 
1234 	if (first_mst_stream) {
1235 		primary_encoder->pre_enable(state, primary_encoder,
1236 					    pipe_config, NULL);
1237 
1238 		intel_mst_reprobe_topology(intel_dp, pipe_config);
1239 	}
1240 
1241 	ret = drm_dp_add_payload_part1(&intel_dp->mst.mgr, mst_state,
1242 				       drm_atomic_get_mst_payload_state(mst_state, connector->mst.port));
1243 	if (ret < 0)
1244 		intel_dp_queue_modeset_retry_for_link(state, primary_encoder, pipe_config);
1245 
1246 	/*
1247 	 * Before Gen 12 this is not done as part of
1248 	 * primary_encoder->pre_enable() and should be done here. For
1249 	 * Gen 12+ the step in which this should be done is different for the
1250 	 * first MST stream, so it's done on the DDI for the first stream and
1251 	 * here for the following ones.
1252 	 */
1253 	if (DISPLAY_VER(display) < 12 || !first_mst_stream)
1254 		intel_ddi_enable_transcoder_clock(encoder, pipe_config);
1255 
1256 	if (DISPLAY_VER(display) >= 13 && !first_mst_stream)
1257 		intel_ddi_config_transcoder_func(encoder, pipe_config);
1258 
1259 	intel_dsc_dp_pps_write(primary_encoder, pipe_config);
1260 	intel_ddi_set_dp_msa(pipe_config, conn_state);
1261 }
1262 
1263 static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
1264 {
1265 	struct intel_display *display = to_intel_display(crtc_state);
1266 	u32 clear = 0;
1267 	u32 set = 0;
1268 
1269 	if (!display->platform.alderlake_p)
1270 		return;
1271 
1272 	if (!IS_DISPLAY_STEP(display, STEP_D0, STEP_FOREVER))
1273 		return;
1274 
1275 	/* Wa_14013163432:adlp */
1276 	if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state))
1277 		set |= DP_MST_FEC_BS_JITTER_WA(crtc_state->cpu_transcoder);
1278 
1279 	/* Wa_14014143976:adlp */
1280 	if (intel_display_wa(display, INTEL_DISPLAY_WA_14014143976)) {
1281 		if (intel_dp_is_uhbr(crtc_state))
1282 			set |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder);
1283 		else if (crtc_state->fec_enable)
1284 			clear |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder);
1285 
1286 		if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state))
1287 			set |= DP_MST_DPT_DPTP_ALIGN_WA(crtc_state->cpu_transcoder);
1288 	}
1289 
1290 	if (!clear && !set)
1291 		return;
1292 
1293 	intel_de_rmw(display, CHICKEN_MISC_3, clear, set);
1294 }
1295 
1296 static void mst_stream_enable(struct intel_atomic_state *state,
1297 			      struct intel_encoder *encoder,
1298 			      const struct intel_crtc_state *pipe_config,
1299 			      const struct drm_connector_state *conn_state)
1300 {
1301 	struct intel_display *display = to_intel_display(encoder);
1302 	struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
1303 	struct intel_dp *intel_dp = to_primary_dp(encoder);
1304 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
1305 	struct drm_dp_mst_topology_state *mst_state =
1306 		drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst.mgr);
1307 	enum transcoder trans = pipe_config->cpu_transcoder;
1308 	bool first_mst_stream = intel_dp_mst_active_streams(intel_dp) == 1;
1309 	struct intel_crtc *pipe_crtc;
1310 	int ret, i;
1311 
1312 	drm_WARN_ON(display->drm, pipe_config->has_pch_encoder);
1313 
1314 	if (intel_dp_is_uhbr(pipe_config)) {
1315 		const struct drm_display_mode *adjusted_mode =
1316 			&pipe_config->hw.adjusted_mode;
1317 		u64 crtc_clock_hz = KHz(adjusted_mode->crtc_clock);
1318 
1319 		intel_de_write(display, TRANS_DP2_VFREQHIGH(pipe_config->cpu_transcoder),
1320 			       TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz >> 24));
1321 		intel_de_write(display, TRANS_DP2_VFREQLOW(pipe_config->cpu_transcoder),
1322 			       TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
1323 	}
1324 
1325 	enable_bs_jitter_was(pipe_config);
1326 
1327 	intel_ddi_enable_transcoder_func(encoder, pipe_config);
1328 
1329 	intel_vrr_transcoder_enable(pipe_config);
1330 
1331 	intel_ddi_clear_act_sent(encoder, pipe_config);
1332 
1333 	intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, trans), 0,
1334 		     TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
1335 
1336 	intel_ddi_wait_for_act_sent(encoder, pipe_config);
1337 	drm_dp_check_act_status(&intel_dp->mst.mgr);
1338 
1339 	if (first_mst_stream)
1340 		intel_ddi_wait_for_fec_status(encoder, pipe_config, true);
1341 
1342 	ret = drm_dp_add_payload_part2(&intel_dp->mst.mgr,
1343 				       drm_atomic_get_mst_payload_state(mst_state,
1344 									connector->mst.port));
1345 	if (ret < 0)
1346 		intel_dp_queue_modeset_retry_for_link(state, primary_encoder, pipe_config);
1347 
1348 	if (DISPLAY_VER(display) >= 12)
1349 		intel_de_rmw(display, CHICKEN_TRANS(display, trans),
1350 			     FECSTALL_DIS_DPTSTREAM_DPTTG,
1351 			     pipe_config->fec_enable ? FECSTALL_DIS_DPTSTREAM_DPTTG : 0);
1352 
1353 	intel_enable_transcoder(pipe_config);
1354 
1355 	for_each_pipe_crtc_modeset_enable(display, pipe_crtc, pipe_config, i) {
1356 		const struct intel_crtc_state *pipe_crtc_state =
1357 			intel_atomic_get_new_crtc_state(state, pipe_crtc);
1358 
1359 		intel_crtc_vblank_on(pipe_crtc_state);
1360 	}
1361 
1362 	intel_hdcp_enable(state, encoder, pipe_config, conn_state);
1363 }
1364 
1365 static bool mst_stream_get_hw_state(struct intel_encoder *encoder,
1366 				    enum pipe *pipe)
1367 {
1368 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
1369 	*pipe = intel_mst->pipe;
1370 	if (intel_mst->connector)
1371 		return true;
1372 	return false;
1373 }
1374 
1375 static void mst_stream_get_config(struct intel_encoder *encoder,
1376 				  struct intel_crtc_state *pipe_config)
1377 {
1378 	struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
1379 
1380 	primary_encoder->get_config(primary_encoder, pipe_config);
1381 }
1382 
1383 static bool mst_stream_initial_fastset_check(struct intel_encoder *encoder,
1384 					     struct intel_crtc_state *crtc_state)
1385 {
1386 	struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
1387 
1388 	return intel_dp_initial_fastset_check(primary_encoder, crtc_state);
1389 }
1390 
1391 static int mst_connector_get_ddc_modes(struct drm_connector *_connector)
1392 {
1393 	struct intel_connector *connector = to_intel_connector(_connector);
1394 	struct intel_display *display = to_intel_display(connector);
1395 	struct intel_dp *intel_dp = connector->mst.dp;
1396 	const struct drm_edid *drm_edid;
1397 	int ret;
1398 
1399 	if (drm_connector_is_unregistered(&connector->base))
1400 		return intel_connector_update_modes(&connector->base, NULL);
1401 
1402 	if (!intel_display_driver_check_access(display))
1403 		return drm_edid_connector_add_modes(&connector->base);
1404 
1405 	drm_edid = drm_dp_mst_edid_read(&connector->base, &intel_dp->mst.mgr, connector->mst.port);
1406 
1407 	ret = intel_connector_update_modes(&connector->base, drm_edid);
1408 
1409 	drm_edid_free(drm_edid);
1410 
1411 	return ret;
1412 }
1413 
1414 static int
1415 mst_connector_late_register(struct drm_connector *_connector)
1416 {
1417 	struct intel_connector *connector = to_intel_connector(_connector);
1418 	int ret;
1419 
1420 	ret = drm_dp_mst_connector_late_register(&connector->base, connector->mst.port);
1421 	if (ret < 0)
1422 		return ret;
1423 
1424 	ret = intel_connector_register(&connector->base);
1425 	if (ret < 0)
1426 		drm_dp_mst_connector_early_unregister(&connector->base, connector->mst.port);
1427 
1428 	return ret;
1429 }
1430 
1431 static void
1432 mst_connector_early_unregister(struct drm_connector *_connector)
1433 {
1434 	struct intel_connector *connector = to_intel_connector(_connector);
1435 
1436 	intel_connector_unregister(&connector->base);
1437 	drm_dp_mst_connector_early_unregister(&connector->base, connector->mst.port);
1438 }
1439 
1440 static const struct drm_connector_funcs mst_connector_funcs = {
1441 	.fill_modes = drm_helper_probe_single_connector_modes,
1442 	.atomic_get_property = intel_digital_connector_atomic_get_property,
1443 	.atomic_set_property = intel_digital_connector_atomic_set_property,
1444 	.late_register = mst_connector_late_register,
1445 	.early_unregister = mst_connector_early_unregister,
1446 	.destroy = intel_connector_destroy,
1447 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1448 	.atomic_duplicate_state = intel_digital_connector_duplicate_state,
1449 };
1450 
1451 static int mst_connector_get_modes(struct drm_connector *_connector)
1452 {
1453 	struct intel_connector *connector = to_intel_connector(_connector);
1454 
1455 	return mst_connector_get_ddc_modes(&connector->base);
1456 }
1457 
1458 static int
1459 mst_connector_mode_valid_ctx(struct drm_connector *_connector,
1460 			     const struct drm_display_mode *mode,
1461 			     struct drm_modeset_acquire_ctx *ctx,
1462 			     enum drm_mode_status *status)
1463 {
1464 	struct intel_connector *connector = to_intel_connector(_connector);
1465 	struct intel_display *display = to_intel_display(connector);
1466 	struct intel_dp *intel_dp = connector->mst.dp;
1467 	struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst.mgr;
1468 	struct drm_dp_mst_port *port = connector->mst.port;
1469 	int max_rate, mode_rate, max_lanes, max_link_clock;
1470 	unsigned long bw_overhead_flags =
1471 		DRM_DP_BW_OVERHEAD_MST | DRM_DP_BW_OVERHEAD_SSC_REF_CLK;
1472 	int min_link_bpp_x16 = fxp_q4_from_int(18);
1473 	static bool supports_dsc;
1474 	int ret;
1475 	bool dsc = false;
1476 	int target_clock = mode->clock;
1477 	int num_joined_pipes;
1478 
1479 	if (drm_connector_is_unregistered(&connector->base)) {
1480 		*status = MODE_ERROR;
1481 		return 0;
1482 	}
1483 
1484 	*status = intel_cpu_transcoder_mode_valid(display, mode);
1485 	if (*status != MODE_OK)
1486 		return 0;
1487 
1488 	if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
1489 		*status = MODE_H_ILLEGAL;
1490 		return 0;
1491 	}
1492 
1493 	if (mode->clock < 10000) {
1494 		*status = MODE_CLOCK_LOW;
1495 		return 0;
1496 	}
1497 
1498 	supports_dsc = intel_dp_has_dsc(connector) &&
1499 		       drm_dp_sink_supports_fec(connector->dp.fec_capability);
1500 
1501 	if (supports_dsc && connector->mst.port->passthrough_aux)
1502 		min_link_bpp_x16 = intel_dp_compute_min_compressed_bpp_x16(connector,
1503 									   INTEL_OUTPUT_FORMAT_RGB);
1504 
1505 	max_link_clock = intel_dp_max_link_rate(intel_dp);
1506 	max_lanes = intel_dp_max_lane_count(intel_dp);
1507 
1508 	max_rate = intel_dp_max_link_data_rate(intel_dp,
1509 					       max_link_clock, max_lanes);
1510 	mode_rate = intel_dp_link_required(max_link_clock, max_lanes,
1511 					   mode->clock, mode->hdisplay,
1512 					   min_link_bpp_x16,
1513 					   bw_overhead_flags);
1514 
1515 	/*
1516 	 * TODO:
1517 	 * - Also check if compression would allow for the mode
1518 	 *   in non-passthrough mode, i.e. the last branch device
1519 	 *   decompressing the stream. This makes a difference only if
1520 	 *   the BW on the link between the last branch device and the
1521 	 *   sink is higher than the BW on the whole MST path from the
1522 	 *   source to the last branch device. Relying on the extra BW
1523 	 *   this provides also requires the
1524 	 *   DFP_Link_Available_Payload_Bandwidth_Number described below.
1525 	 * - Calculate the overhead using drm_dp_bw_overhead() /
1526 	 *   drm_dp_bw_channel_coding_efficiency(), similarly to the
1527 	 *   compute config code, as drm_dp_calc_pbn_mode() doesn't
1528 	 *   account with all the overheads.
1529 	 * - Check here and during compute config the BW reported by
1530 	 *   DFP_Link_Available_Payload_Bandwidth_Number (or the
1531 	 *   corresponding link capabilities of the sink) in case the
1532 	 *   stream is uncompressed for it by the last branch device.
1533 	 */
1534 	ret = drm_modeset_lock(&mgr->base.lock, ctx);
1535 	if (ret)
1536 		return ret;
1537 
1538 	if (mode_rate > max_rate ||
1539 	    drm_dp_calc_pbn_mode(mode->clock, min_link_bpp_x16) > port->full_pbn) {
1540 		*status = MODE_CLOCK_HIGH;
1541 		return 0;
1542 	}
1543 
1544 	*status = MODE_CLOCK_HIGH;
1545 	for_each_joiner_candidate(connector, mode, num_joined_pipes) {
1546 		int dsc_slice_count = 0;
1547 
1548 		if (supports_dsc) {
1549 			/*
1550 			 * TBD pass the connector BPC,
1551 			 * for now U8_MAX so that max BPC on that platform would be picked
1552 			 */
1553 			int pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX);
1554 
1555 			dsc_slice_count = intel_dp_dsc_get_slice_count(connector,
1556 								       mode->clock,
1557 								       mode->hdisplay,
1558 								       num_joined_pipes);
1559 
1560 			if (!drm_dp_is_uhbr_rate(max_link_clock))
1561 				bw_overhead_flags |= DRM_DP_BW_OVERHEAD_FEC;
1562 
1563 			dsc = intel_dp_mode_valid_with_dsc(connector,
1564 							   max_link_clock, max_lanes,
1565 							   target_clock, mode->hdisplay,
1566 							   num_joined_pipes,
1567 							   INTEL_OUTPUT_FORMAT_RGB, pipe_bpp,
1568 							   bw_overhead_flags);
1569 		}
1570 
1571 		if (intel_dp_joiner_needs_dsc(display, num_joined_pipes) && !dsc) {
1572 			*status = MODE_CLOCK_HIGH;
1573 			continue;
1574 		}
1575 
1576 		if (mode_rate > max_rate && !dsc) {
1577 			*status = MODE_CLOCK_HIGH;
1578 			continue;
1579 		}
1580 
1581 		*status = intel_mode_valid_max_plane_size(display, mode, num_joined_pipes);
1582 
1583 		if (*status != MODE_OK)
1584 			continue;
1585 
1586 		if (!dsc)
1587 			dsc_slice_count = 0;
1588 
1589 		if (!intel_dp_dotclk_valid(display,
1590 					   mode->clock,
1591 					   mode->htotal,
1592 					   dsc_slice_count,
1593 					   num_joined_pipes)) {
1594 			*status = MODE_CLOCK_HIGH;
1595 			continue;
1596 		}
1597 
1598 		break;
1599 	}
1600 
1601 	return 0;
1602 }
1603 
1604 static struct drm_encoder *
1605 mst_connector_atomic_best_encoder(struct drm_connector *_connector,
1606 				  struct drm_atomic_state *state)
1607 {
1608 	struct intel_connector *connector = to_intel_connector(_connector);
1609 	struct drm_connector_state *connector_state =
1610 		drm_atomic_get_new_connector_state(state, &connector->base);
1611 	struct intel_dp *intel_dp = connector->mst.dp;
1612 	struct intel_crtc *crtc = to_intel_crtc(connector_state->crtc);
1613 
1614 	return &intel_dp->mst.stream_encoders[crtc->pipe]->base.base;
1615 }
1616 
1617 static int
1618 mst_connector_detect_ctx(struct drm_connector *_connector,
1619 			 struct drm_modeset_acquire_ctx *ctx, bool force)
1620 {
1621 	struct intel_connector *connector = to_intel_connector(_connector);
1622 	struct intel_display *display = to_intel_display(connector);
1623 	struct intel_dp *intel_dp = connector->mst.dp;
1624 
1625 	if (!intel_display_device_enabled(display))
1626 		return connector_status_disconnected;
1627 
1628 	if (drm_connector_is_unregistered(&connector->base))
1629 		return connector_status_disconnected;
1630 
1631 	if (!intel_display_driver_check_access(display))
1632 		return connector->base.status;
1633 
1634 	intel_dp_flush_connector_commits(connector);
1635 
1636 	return drm_dp_mst_detect_port(&connector->base, ctx, &intel_dp->mst.mgr,
1637 				      connector->mst.port);
1638 }
1639 
1640 static const struct drm_connector_helper_funcs mst_connector_helper_funcs = {
1641 	.get_modes = mst_connector_get_modes,
1642 	.mode_valid_ctx = mst_connector_mode_valid_ctx,
1643 	.atomic_best_encoder = mst_connector_atomic_best_encoder,
1644 	.atomic_check = mst_connector_atomic_check,
1645 	.detect_ctx = mst_connector_detect_ctx,
1646 };
1647 
1648 static void mst_stream_encoder_destroy(struct drm_encoder *encoder)
1649 {
1650 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(to_intel_encoder(encoder));
1651 
1652 	drm_encoder_cleanup(encoder);
1653 	kfree(intel_mst);
1654 }
1655 
1656 static const struct drm_encoder_funcs mst_stream_encoder_funcs = {
1657 	.destroy = mst_stream_encoder_destroy,
1658 };
1659 
1660 static bool mst_connector_get_hw_state(struct intel_connector *connector)
1661 {
1662 	/* This is the MST stream encoder set in ->pre_enable, if any */
1663 	struct intel_encoder *encoder = intel_attached_encoder(connector);
1664 	enum pipe pipe;
1665 
1666 	if (!encoder || !connector->base.state->crtc)
1667 		return false;
1668 
1669 	return encoder->get_hw_state(encoder, &pipe);
1670 }
1671 
1672 static int mst_topology_add_connector_properties(struct intel_dp *intel_dp,
1673 						 struct drm_connector *_connector,
1674 						 const char *pathprop)
1675 {
1676 	struct intel_display *display = to_intel_display(intel_dp);
1677 	struct intel_connector *connector = to_intel_connector(_connector);
1678 
1679 	drm_object_attach_property(&connector->base.base,
1680 				   display->drm->mode_config.path_property, 0);
1681 	drm_object_attach_property(&connector->base.base,
1682 				   display->drm->mode_config.tile_property, 0);
1683 
1684 	intel_attach_force_audio_property(&connector->base);
1685 	intel_attach_broadcast_rgb_property(&connector->base);
1686 
1687 	/*
1688 	 * Reuse the prop from the SST connector because we're
1689 	 * not allowed to create new props after device registration.
1690 	 */
1691 	connector->base.max_bpc_property =
1692 		intel_dp->attached_connector->base.max_bpc_property;
1693 	if (connector->base.max_bpc_property)
1694 		drm_connector_attach_max_bpc_property(&connector->base, 6, 12);
1695 
1696 	return drm_connector_set_path_property(&connector->base, pathprop);
1697 }
1698 
1699 static void
1700 intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp,
1701 					      struct intel_connector *connector)
1702 {
1703 	u8 dpcd_caps[DP_RECEIVER_CAP_SIZE];
1704 	struct drm_dp_desc desc;
1705 
1706 	if (!connector->dp.dsc_decompression_aux)
1707 		return;
1708 
1709 	if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd_caps) < 0)
1710 		return;
1711 
1712 	if (drm_dp_read_desc(connector->dp.dsc_decompression_aux, &desc,
1713 			     drm_dp_is_branch(dpcd_caps)) < 0)
1714 		return;
1715 
1716 	intel_dp_get_dsc_sink_cap(dpcd_caps[DP_DPCD_REV],
1717 				  &desc, drm_dp_is_branch(dpcd_caps),
1718 				  connector);
1719 }
1720 
1721 static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector)
1722 {
1723 	struct intel_display *display = to_intel_display(connector);
1724 	struct drm_dp_aux *aux = connector->dp.dsc_decompression_aux;
1725 	struct drm_dp_desc desc;
1726 	u8 dpcd[DP_RECEIVER_CAP_SIZE];
1727 
1728 	if (!aux)
1729 		return false;
1730 
1731 	/*
1732 	 * A logical port's OUI (at least for affected sinks) is all 0, so
1733 	 * instead of that the parent port's OUI is used for identification.
1734 	 */
1735 	if (drm_dp_mst_port_is_logical(connector->mst.port)) {
1736 		aux = drm_dp_mst_aux_for_parent(connector->mst.port);
1737 		if (!aux)
1738 			aux = &connector->mst.dp->aux;
1739 	}
1740 
1741 	if (drm_dp_read_dpcd_caps(aux, dpcd) < 0)
1742 		return false;
1743 
1744 	if (drm_dp_read_desc(aux, &desc, drm_dp_is_branch(dpcd)) < 0)
1745 		return false;
1746 
1747 	if (!drm_dp_has_quirk(&desc,
1748 			      DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC))
1749 		return false;
1750 
1751 	/*
1752 	 * UHBR (MST sink) devices requiring this quirk don't advertise the
1753 	 * HBLANK expansion support. Presuming that they perform HBLANK
1754 	 * expansion internally, or are affected by this issue on modes with a
1755 	 * short HBLANK for other reasons.
1756 	 */
1757 	if (!drm_dp_128b132b_supported(dpcd) &&
1758 	    !(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE))
1759 		return false;
1760 
1761 	drm_dbg_kms(display->drm,
1762 		    "[CONNECTOR:%d:%s] DSC HBLANK expansion quirk detected\n",
1763 		    connector->base.base.id, connector->base.name);
1764 
1765 	return true;
1766 }
1767 
1768 static struct drm_connector *
1769 mst_topology_add_connector(struct drm_dp_mst_topology_mgr *mgr,
1770 			   struct drm_dp_mst_port *port,
1771 			   const char *pathprop)
1772 {
1773 	struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst.mgr);
1774 	struct intel_display *display = to_intel_display(intel_dp);
1775 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1776 	struct intel_connector *connector;
1777 	enum pipe pipe;
1778 	int ret;
1779 
1780 	connector = intel_connector_alloc();
1781 	if (!connector)
1782 		return NULL;
1783 
1784 	connector->get_hw_state = mst_connector_get_hw_state;
1785 	connector->sync_state = intel_dp_connector_sync_state;
1786 	connector->mst.dp = intel_dp;
1787 	connector->mst.port = port;
1788 	drm_dp_mst_get_port_malloc(port);
1789 
1790 	ret = drm_connector_dynamic_init(display->drm, &connector->base, &mst_connector_funcs,
1791 					 DRM_MODE_CONNECTOR_DisplayPort, NULL);
1792 	if (ret)
1793 		goto err_put_port;
1794 
1795 	connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
1796 	intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, connector);
1797 	connector->dp.dsc_hblank_expansion_quirk =
1798 		detect_dsc_hblank_expansion_quirk(connector);
1799 
1800 	drm_connector_helper_add(&connector->base, &mst_connector_helper_funcs);
1801 
1802 	for_each_pipe(display, pipe) {
1803 		struct drm_encoder *enc =
1804 			&intel_dp->mst.stream_encoders[pipe]->base.base;
1805 
1806 		ret = drm_connector_attach_encoder(&connector->base, enc);
1807 		if (ret)
1808 			goto err_cleanup_connector;
1809 	}
1810 
1811 	ret = mst_topology_add_connector_properties(intel_dp, &connector->base, pathprop);
1812 	if (ret)
1813 		goto err_cleanup_connector;
1814 
1815 	ret = intel_dp_hdcp_init(dig_port, connector);
1816 	if (ret)
1817 		drm_dbg_kms(display->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
1818 			    connector->base.name, connector->base.base.id);
1819 
1820 	return &connector->base;
1821 
1822 err_cleanup_connector:
1823 	drm_connector_cleanup(&connector->base);
1824 err_put_port:
1825 	drm_dp_mst_put_port_malloc(port);
1826 	intel_connector_free(connector);
1827 
1828 	return NULL;
1829 }
1830 
1831 static void
1832 mst_topology_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr)
1833 {
1834 	struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst.mgr);
1835 
1836 	intel_hpd_trigger_irq(dp_to_dig_port(intel_dp));
1837 }
1838 
1839 static const struct drm_dp_mst_topology_cbs mst_topology_cbs = {
1840 	.add_connector = mst_topology_add_connector,
1841 	.poll_hpd_irq = mst_topology_poll_hpd_irq,
1842 };
1843 
1844 /* Create a fake encoder for an individual MST stream */
1845 static struct intel_dp_mst_encoder *
1846 mst_stream_encoder_create(struct intel_digital_port *dig_port, enum pipe pipe)
1847 {
1848 	struct intel_display *display = to_intel_display(dig_port);
1849 	struct intel_encoder *primary_encoder = &dig_port->base;
1850 	struct intel_dp_mst_encoder *intel_mst;
1851 	struct intel_encoder *encoder;
1852 
1853 	intel_mst = kzalloc_obj(*intel_mst);
1854 
1855 	if (!intel_mst)
1856 		return NULL;
1857 
1858 	intel_mst->pipe = pipe;
1859 	encoder = &intel_mst->base;
1860 	intel_mst->primary = dig_port;
1861 
1862 	drm_encoder_init(display->drm, &encoder->base, &mst_stream_encoder_funcs,
1863 			 DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
1864 
1865 	encoder->type = INTEL_OUTPUT_DP_MST;
1866 	encoder->power_domain = primary_encoder->power_domain;
1867 	encoder->port = primary_encoder->port;
1868 	encoder->cloneable = 0;
1869 	/*
1870 	 * This is wrong, but broken userspace uses the intersection
1871 	 * of possible_crtcs of all the encoders of a given connector
1872 	 * to figure out which crtcs can drive said connector. What
1873 	 * should be used instead is the union of possible_crtcs.
1874 	 * To keep such userspace functioning we must misconfigure
1875 	 * this to make sure the intersection is not empty :(
1876 	 */
1877 	encoder->pipe_mask = ~0;
1878 
1879 	encoder->compute_config = mst_stream_compute_config;
1880 	encoder->compute_config_late = mst_stream_compute_config_late;
1881 	encoder->disable = mst_stream_disable;
1882 	encoder->post_disable = mst_stream_post_disable;
1883 	encoder->post_pll_disable = mst_stream_post_pll_disable;
1884 	encoder->update_pipe = intel_ddi_update_pipe;
1885 	encoder->pre_pll_enable = mst_stream_pre_pll_enable;
1886 	encoder->pre_enable = mst_stream_pre_enable;
1887 	encoder->enable = mst_stream_enable;
1888 	encoder->audio_enable = intel_audio_codec_enable;
1889 	encoder->audio_disable = intel_audio_codec_disable;
1890 	encoder->get_hw_state = mst_stream_get_hw_state;
1891 	encoder->get_config = mst_stream_get_config;
1892 	encoder->initial_fastset_check = mst_stream_initial_fastset_check;
1893 
1894 	return intel_mst;
1895 
1896 }
1897 
1898 /* Create the fake encoders for MST streams */
1899 static bool
1900 mst_stream_encoders_create(struct intel_digital_port *dig_port)
1901 {
1902 	struct intel_display *display = to_intel_display(dig_port);
1903 	struct intel_dp *intel_dp = &dig_port->dp;
1904 	enum pipe pipe;
1905 
1906 	for_each_pipe(display, pipe)
1907 		intel_dp->mst.stream_encoders[pipe] = mst_stream_encoder_create(dig_port, pipe);
1908 	return true;
1909 }
1910 
1911 int
1912 intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
1913 {
1914 	struct intel_display *display = to_intel_display(dig_port);
1915 	struct intel_dp *intel_dp = &dig_port->dp;
1916 	enum port port = dig_port->base.port;
1917 	int ret;
1918 
1919 	if (!HAS_DP_MST(display) || intel_dp_is_edp(intel_dp))
1920 		return 0;
1921 
1922 	if (DISPLAY_VER(display) < 12 && port == PORT_A)
1923 		return 0;
1924 
1925 	if (DISPLAY_VER(display) < 11 && port == PORT_E)
1926 		return 0;
1927 
1928 	intel_dp->mst.mgr.cbs = &mst_topology_cbs;
1929 
1930 	/* create encoders */
1931 	mst_stream_encoders_create(dig_port);
1932 	ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst.mgr, display->drm,
1933 					   &intel_dp->aux, 16,
1934 					   INTEL_NUM_PIPES(display), conn_base_id);
1935 	if (ret) {
1936 		intel_dp->mst.mgr.cbs = NULL;
1937 		return ret;
1938 	}
1939 
1940 	return 0;
1941 }
1942 
1943 bool intel_dp_mst_source_support(struct intel_dp *intel_dp)
1944 {
1945 	return intel_dp->mst.mgr.cbs;
1946 }
1947 
1948 void
1949 intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port)
1950 {
1951 	struct intel_dp *intel_dp = &dig_port->dp;
1952 
1953 	if (!intel_dp_mst_source_support(intel_dp))
1954 		return;
1955 
1956 	drm_dp_mst_topology_mgr_destroy(&intel_dp->mst.mgr);
1957 	/* encoders will get killed by normal cleanup */
1958 
1959 	intel_dp->mst.mgr.cbs = NULL;
1960 }
1961 
1962 bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state)
1963 {
1964 	return crtc_state->mst_master_transcoder == crtc_state->cpu_transcoder;
1965 }
1966 
1967 bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state)
1968 {
1969 	return crtc_state->mst_master_transcoder != INVALID_TRANSCODER &&
1970 	       crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder;
1971 }
1972 
1973 /**
1974  * intel_dp_mst_add_topology_state_for_connector - add MST topology state for a connector
1975  * @state: atomic state
1976  * @connector: connector to add the state for
1977  * @crtc: the CRTC @connector is attached to
1978  *
1979  * Add the MST topology state for @connector to @state.
1980  *
1981  * Returns 0 on success, negative error code on failure.
1982  */
1983 static int
1984 intel_dp_mst_add_topology_state_for_connector(struct intel_atomic_state *state,
1985 					      struct intel_connector *connector,
1986 					      struct intel_crtc *crtc)
1987 {
1988 	struct drm_dp_mst_topology_state *mst_state;
1989 
1990 	if (!connector->mst.dp)
1991 		return 0;
1992 
1993 	mst_state = drm_atomic_get_mst_topology_state(&state->base,
1994 						      &connector->mst.dp->mst.mgr);
1995 	if (IS_ERR(mst_state))
1996 		return PTR_ERR(mst_state);
1997 
1998 	mst_state->pending_crtc_mask |= drm_crtc_mask(&crtc->base);
1999 
2000 	return 0;
2001 }
2002 
2003 /**
2004  * intel_dp_mst_add_topology_state_for_crtc - add MST topology state for a CRTC
2005  * @state: atomic state
2006  * @crtc: CRTC to add the state for
2007  *
2008  * Add the MST topology state for @crtc to @state.
2009  *
2010  * Returns 0 on success, negative error code on failure.
2011  */
2012 int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
2013 					     struct intel_crtc *crtc)
2014 {
2015 	struct drm_connector *_connector;
2016 	struct drm_connector_state *conn_state;
2017 	int i;
2018 
2019 	for_each_new_connector_in_state(&state->base, _connector, conn_state, i) {
2020 		struct intel_connector *connector = to_intel_connector(_connector);
2021 		int ret;
2022 
2023 		if (conn_state->crtc != &crtc->base)
2024 			continue;
2025 
2026 		ret = intel_dp_mst_add_topology_state_for_connector(state, connector, crtc);
2027 		if (ret)
2028 			return ret;
2029 	}
2030 
2031 	return 0;
2032 }
2033 
2034 static struct intel_connector *
2035 get_connector_in_state_for_crtc(struct intel_atomic_state *state,
2036 				const struct intel_crtc *crtc)
2037 {
2038 	struct drm_connector_state *old_conn_state;
2039 	struct drm_connector_state *new_conn_state;
2040 	struct drm_connector *_connector;
2041 	int i;
2042 
2043 	for_each_oldnew_connector_in_state(&state->base, _connector,
2044 					   old_conn_state, new_conn_state, i) {
2045 		struct intel_connector *connector =
2046 			to_intel_connector(_connector);
2047 
2048 		if (old_conn_state->crtc == &crtc->base ||
2049 		    new_conn_state->crtc == &crtc->base)
2050 			return connector;
2051 	}
2052 
2053 	return NULL;
2054 }
2055 
2056 /**
2057  * intel_dp_mst_crtc_needs_modeset - check if changes in topology need to modeset the given CRTC
2058  * @state: atomic state
2059  * @crtc: CRTC for which to check the modeset requirement
2060  *
2061  * Check if any change in a MST topology requires a forced modeset on @crtc in
2062  * this topology. One such change is enabling/disabling the DSC decompression
2063  * state in the first branch device's UFP DPCD as required by one CRTC, while
2064  * the other @crtc in the same topology is still active, requiring a full modeset
2065  * on @crtc.
2066  */
2067 bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
2068 				     struct intel_crtc *crtc)
2069 {
2070 	const struct intel_connector *crtc_connector;
2071 	const struct drm_connector_state *conn_state;
2072 	const struct drm_connector *_connector;
2073 	int i;
2074 
2075 	if (!intel_crtc_has_type(intel_atomic_get_new_crtc_state(state, crtc),
2076 				 INTEL_OUTPUT_DP_MST))
2077 		return false;
2078 
2079 	crtc_connector = get_connector_in_state_for_crtc(state, crtc);
2080 
2081 	if (!crtc_connector)
2082 		/* None of the connectors in the topology needs modeset */
2083 		return false;
2084 
2085 	for_each_new_connector_in_state(&state->base, _connector, conn_state, i) {
2086 		const struct intel_connector *connector =
2087 			to_intel_connector(_connector);
2088 		const struct intel_crtc_state *new_crtc_state;
2089 		const struct intel_crtc_state *old_crtc_state;
2090 		struct intel_crtc *crtc_iter;
2091 
2092 		if (connector->mst.dp != crtc_connector->mst.dp ||
2093 		    !conn_state->crtc)
2094 			continue;
2095 
2096 		crtc_iter = to_intel_crtc(conn_state->crtc);
2097 
2098 		new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc_iter);
2099 		old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc_iter);
2100 
2101 		if (!intel_crtc_needs_modeset(new_crtc_state))
2102 			continue;
2103 
2104 		if (old_crtc_state->dsc.compression_enable ==
2105 		    new_crtc_state->dsc.compression_enable)
2106 			continue;
2107 		/*
2108 		 * Toggling the decompression flag because of this stream in
2109 		 * the first downstream branch device's UFP DPCD may reset the
2110 		 * whole branch device. To avoid the reset while other streams
2111 		 * are also active modeset the whole MST topology in this
2112 		 * case.
2113 		 */
2114 		if (connector->dp.dsc_decompression_aux ==
2115 		    &connector->mst.dp->aux)
2116 			return true;
2117 	}
2118 
2119 	return false;
2120 }
2121 
2122 /**
2123  * intel_dp_mst_prepare_probe - Prepare an MST link for topology probing
2124  * @intel_dp: DP port object
2125  *
2126  * Prepare an MST link for topology probing, programming the target
2127  * link parameters to DPCD. This step is a requirement of the enumeration
2128  * of path resources during probing.
2129  */
2130 void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp)
2131 {
2132 	int link_rate = intel_dp_max_link_rate(intel_dp);
2133 	int lane_count = intel_dp_max_lane_count(intel_dp);
2134 	u8 rate_select;
2135 	u8 link_bw;
2136 
2137 	if (intel_dp->link.active)
2138 		return;
2139 
2140 	if (intel_mst_probed_link_params_valid(intel_dp, link_rate, lane_count))
2141 		return;
2142 
2143 	intel_dp_compute_rate(intel_dp, link_rate, &link_bw, &rate_select);
2144 
2145 	intel_dp_link_training_set_mode(intel_dp, link_rate, false);
2146 	intel_dp_link_training_set_bw(intel_dp, link_bw, rate_select, lane_count,
2147 				      drm_dp_enhanced_frame_cap(intel_dp->dpcd));
2148 
2149 	intel_mst_set_probed_link_params(intel_dp, link_rate, lane_count);
2150 }
2151 
2152 /*
2153  * intel_dp_mst_verify_dpcd_state - verify the MST SW enabled state wrt. the DPCD
2154  * @intel_dp: DP port object
2155  *
2156  * Verify if @intel_dp's MST enabled SW state matches the corresponding DPCD
2157  * state. A long HPD pulse - not long enough to be detected as a disconnected
2158  * state - could've reset the DPCD state, which requires tearing
2159  * down/recreating the MST topology.
2160  *
2161  * Returns %true if the SW MST enabled and DPCD states match, %false
2162  * otherwise.
2163  */
2164 bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp)
2165 {
2166 	struct intel_display *display = to_intel_display(intel_dp);
2167 	struct intel_connector *connector = intel_dp->attached_connector;
2168 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2169 	struct intel_encoder *encoder = &dig_port->base;
2170 	int ret;
2171 	u8 val;
2172 
2173 	if (!intel_dp->is_mst)
2174 		return true;
2175 
2176 	ret = drm_dp_dpcd_readb(intel_dp->mst.mgr.aux, DP_MSTM_CTRL, &val);
2177 
2178 	/* Adjust the expected register value for SST + SideBand. */
2179 	if (ret < 0 || val != (DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC)) {
2180 		drm_dbg_kms(display->drm,
2181 			    "[CONNECTOR:%d:%s][ENCODER:%d:%s] MST mode got reset, removing topology (ret=%d, ctrl=0x%02x)\n",
2182 			    connector->base.base.id, connector->base.name,
2183 			    encoder->base.base.id, encoder->base.name,
2184 			    ret, val);
2185 
2186 		return false;
2187 	}
2188 
2189 	return true;
2190 }
2191