1 /* 2 * Copyright © 2008 Intel Corporation 3 * 2014 Red Hat Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 22 * IN THE SOFTWARE. 23 * 24 */ 25 26 #include <linux/log2.h> 27 #include <linux/math.h> 28 29 #include <drm/drm_atomic.h> 30 #include <drm/drm_atomic_helper.h> 31 #include <drm/drm_edid.h> 32 #include <drm/drm_fixed.h> 33 #include <drm/drm_print.h> 34 #include <drm/drm_probe_helper.h> 35 36 #include "intel_atomic.h" 37 #include "intel_audio.h" 38 #include "intel_connector.h" 39 #include "intel_crtc.h" 40 #include "intel_ddi.h" 41 #include "intel_de.h" 42 #include "intel_display_driver.h" 43 #include "intel_display_regs.h" 44 #include "intel_display_types.h" 45 #include "intel_display_utils.h" 46 #include "intel_dp.h" 47 #include "intel_dp_hdcp.h" 48 #include "intel_dp_link_training.h" 49 #include "intel_dp_mst.h" 50 #include "intel_dp_test.h" 51 #include "intel_dp_tunnel.h" 52 #include "intel_dpio_phy.h" 53 #include "intel_hdcp.h" 54 #include "intel_hotplug.h" 55 #include "intel_link_bw.h" 56 #include "intel_pfit.h" 57 #include "intel_psr.h" 58 #include "intel_step.h" 59 #include "intel_vdsc.h" 60 #include "intel_vrr.h" 61 #include "skl_scaler.h" 62 63 /* 64 * DP MST (DisplayPort Multi-Stream Transport) 65 * 66 * MST support on the source depends on the platform and port. DP initialization 67 * sets up MST for each MST capable encoder. This will become the primary 68 * encoder for the port. 69 * 70 * MST initialization of each primary encoder creates MST stream encoders, one 71 * per pipe, and initializes the MST topology manager. The MST stream encoders 72 * are sometimes called "fake encoders", because they're virtual, not 73 * physical. Thus there are (number of MST capable ports) x (number of pipes) 74 * MST stream encoders in total. 75 * 76 * Decision to use MST for a sink happens at detect on the connector attached to 77 * the primary encoder, and this will not change while the sink is connected. We 78 * always use MST when possible, including for SST sinks with sideband messaging 79 * support. 80 * 81 * The connectors for the MST streams are added and removed dynamically by the 82 * topology manager. Their connection status is also determined by the topology 83 * manager. 84 * 85 * On hardware, each transcoder may be associated with a single DDI 86 * port. Multiple transcoders may be associated with the same DDI port only if 87 * the port is in MST mode. 88 * 89 * On TGL+, all the transcoders streaming on the same DDI port will indicate a 90 * primary transcoder; the TGL_DP_TP_CTL and TGL_DP_TP_STATUS registers are 91 * relevant only on the primary transcoder. Prior to that, they are port 92 * registers. 93 */ 94 95 /* From fake MST stream encoder to primary encoder */ 96 static struct intel_encoder *to_primary_encoder(struct intel_encoder *encoder) 97 { 98 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); 99 struct intel_digital_port *dig_port = intel_mst->primary; 100 101 return &dig_port->base; 102 } 103 104 /* From fake MST stream encoder to primary DP */ 105 static struct intel_dp *to_primary_dp(struct intel_encoder *encoder) 106 { 107 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); 108 struct intel_digital_port *dig_port = intel_mst->primary; 109 110 return &dig_port->dp; 111 } 112 113 int intel_dp_mst_active_streams(struct intel_dp *intel_dp) 114 { 115 return intel_dp->mst.active_streams; 116 } 117 118 static bool intel_dp_mst_dec_active_streams(struct intel_dp *intel_dp) 119 { 120 struct intel_display *display = to_intel_display(intel_dp); 121 122 drm_dbg_kms(display->drm, "active MST streams %d -> %d\n", 123 intel_dp->mst.active_streams, intel_dp->mst.active_streams - 1); 124 125 if (drm_WARN_ON(display->drm, intel_dp->mst.active_streams == 0)) 126 return true; 127 128 return --intel_dp->mst.active_streams == 0; 129 } 130 131 static bool intel_dp_mst_inc_active_streams(struct intel_dp *intel_dp) 132 { 133 struct intel_display *display = to_intel_display(intel_dp); 134 135 drm_dbg_kms(display->drm, "active MST streams %d -> %d\n", 136 intel_dp->mst.active_streams, intel_dp->mst.active_streams + 1); 137 138 return intel_dp->mst.active_streams++ == 0; 139 } 140 141 /* TODO: return a bpp_x16 value */ 142 static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state, 143 bool dsc) 144 { 145 struct intel_display *display = to_intel_display(crtc_state); 146 const struct drm_display_mode *adjusted_mode = 147 &crtc_state->hw.adjusted_mode; 148 149 if (!intel_dp_is_uhbr(crtc_state) || DISPLAY_VER(display) >= 20 || !dsc) 150 return 0; 151 152 /* 153 * DSC->DPT interface width: 154 * ICL-MTL: 72 bits (each branch has 72 bits, only left branch is used) 155 * LNL+: 144 bits (not a bottleneck in any config) 156 * 157 * Bspec/49259 suggests that the FEC overhead needs to be 158 * applied here, though HW people claim that neither this FEC 159 * or any other overhead is applicable here (that is the actual 160 * available_bw is just symbol_clock * 72). However based on 161 * testing on MTL-P the 162 * - DELL U3224KBA display 163 * - Unigraf UCD-500 CTS test sink 164 * devices the 165 * - 5120x2880/995.59Mhz 166 * - 6016x3384/1357.23Mhz 167 * - 6144x3456/1413.39Mhz 168 * modes (all the ones having a DPT limit on the above devices), 169 * both the channel coding efficiency and an additional 3% 170 * overhead needs to be accounted for. 171 */ 172 return div64_u64(mul_u32_u32(intel_dp_link_symbol_clock(crtc_state->port_clock) * 72, 173 drm_dp_bw_channel_coding_efficiency(true)), 174 mul_u32_u32(adjusted_mode->crtc_clock, 1030000)); 175 } 176 177 static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state, 178 bool ssc, int dsc_slice_count, int bpp_x16) 179 { 180 const struct drm_display_mode *adjusted_mode = 181 &crtc_state->hw.adjusted_mode; 182 unsigned long flags = DRM_DP_BW_OVERHEAD_MST; 183 184 flags |= ssc ? DRM_DP_BW_OVERHEAD_SSC_REF_CLK : 0; 185 flags |= crtc_state->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0; 186 187 return intel_dp_link_bw_overhead(crtc_state->port_clock, 188 crtc_state->lane_count, 189 adjusted_mode->hdisplay, 190 dsc_slice_count, 191 bpp_x16, 192 flags); 193 } 194 195 static void intel_dp_mst_compute_m_n(const struct intel_crtc_state *crtc_state, 196 int overhead, 197 int bpp_x16, 198 struct intel_link_m_n *m_n) 199 { 200 const struct drm_display_mode *adjusted_mode = 201 &crtc_state->hw.adjusted_mode; 202 203 /* TODO: Check WA 14013163432 to set data M/N for full BW utilization. */ 204 intel_link_compute_m_n(bpp_x16, crtc_state->lane_count, 205 adjusted_mode->crtc_clock, 206 crtc_state->port_clock, 207 overhead, 208 m_n); 209 210 m_n->tu = DIV_ROUND_UP_ULL(mul_u32_u32(m_n->data_m, 64), m_n->data_n); 211 } 212 213 static int intel_dp_mst_calc_pbn(int pixel_clock, int bpp_x16, int bw_overhead) 214 { 215 int effective_data_rate = 216 intel_dp_effective_data_rate(pixel_clock, bpp_x16, bw_overhead); 217 218 /* 219 * TODO: Use drm_dp_calc_pbn_mode() instead, once it's converted 220 * to calculate PBN with the BW overhead passed to it. 221 */ 222 return DIV_ROUND_UP(effective_data_rate * 64, 54 * 1000); 223 } 224 225 static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connector, 226 const struct intel_crtc_state *crtc_state) 227 { 228 const struct drm_display_mode *adjusted_mode = 229 &crtc_state->hw.adjusted_mode; 230 int num_joined_pipes = intel_crtc_num_joined_pipes(crtc_state); 231 232 return intel_dp_dsc_get_slice_count(connector, 233 adjusted_mode->clock, 234 adjusted_mode->hdisplay, 235 num_joined_pipes); 236 } 237 238 static void mst_stream_update_slots(const struct intel_crtc_state *crtc_state, 239 struct drm_dp_mst_topology_state *topology_state) 240 { 241 u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ? 242 DP_CAP_ANSI_128B132B : DP_CAP_ANSI_8B10B; 243 244 drm_dp_mst_update_slots(topology_state, link_coding_cap); 245 } 246 247 int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp, 248 struct intel_crtc_state *crtc_state, 249 struct drm_connector_state *conn_state, 250 int min_bpp_x16, int max_bpp_x16, int bpp_step_x16, bool dsc) 251 { 252 struct intel_display *display = to_intel_display(intel_dp); 253 struct drm_atomic_state *state = crtc_state->uapi.state; 254 struct drm_dp_mst_topology_state *mst_state = NULL; 255 struct intel_connector *connector = 256 to_intel_connector(conn_state->connector); 257 const struct drm_display_mode *adjusted_mode = 258 &crtc_state->hw.adjusted_mode; 259 bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); 260 int bpp_x16, slots = -EINVAL; 261 int dsc_slice_count = 0; 262 int max_dpt_bpp_x16; 263 264 /* shouldn't happen, sanity check */ 265 drm_WARN_ON(display->drm, !dsc && (fxp_q4_to_frac(min_bpp_x16) || 266 fxp_q4_to_frac(max_bpp_x16) || 267 fxp_q4_to_frac(bpp_step_x16))); 268 269 if (!bpp_step_x16) { 270 /* Allow using zero step only to indicate single try for a given bpp. */ 271 drm_WARN_ON(display->drm, min_bpp_x16 != max_bpp_x16); 272 bpp_step_x16 = 1; 273 } 274 275 if (is_mst) { 276 mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst.mgr); 277 if (IS_ERR(mst_state)) 278 return PTR_ERR(mst_state); 279 280 mst_state->pbn_div = drm_dp_get_vc_payload_bw(crtc_state->port_clock, 281 crtc_state->lane_count); 282 283 mst_stream_update_slots(crtc_state, mst_state); 284 } 285 286 /* 287 * NOTE: The following must reset crtc_state->fec_enable for UHBR/DSC 288 * after it was set by intel_dp_dsc_compute_config() -> 289 * intel_dp_needs_8b10b_fec(). 290 */ 291 crtc_state->fec_enable = intel_dp_needs_8b10b_fec(crtc_state, dsc); 292 /* 293 * If FEC gets enabled only because of another compressed stream, FEC 294 * may not be supported for this uncompressed stream on the whole link 295 * path until the sink DPRX. In this case a downstream branch device 296 * will disable FEC for the uncompressed stream as expected and so the 297 * FEC support doesn't need to be checked for this uncompressed stream. 298 */ 299 if (crtc_state->fec_enable && dsc && 300 !intel_dp_supports_fec(intel_dp, connector, crtc_state)) 301 return -EINVAL; 302 303 max_dpt_bpp_x16 = fxp_q4_from_int(intel_dp_mst_max_dpt_bpp(crtc_state, dsc)); 304 if (max_dpt_bpp_x16 && max_bpp_x16 > max_dpt_bpp_x16) { 305 drm_dbg_kms(display->drm, "Limiting bpp to max DPT bpp (" FXP_Q4_FMT " -> " FXP_Q4_FMT ")\n", 306 FXP_Q4_ARGS(max_bpp_x16), FXP_Q4_ARGS(max_dpt_bpp_x16)); 307 max_bpp_x16 = max_dpt_bpp_x16; 308 } 309 310 drm_dbg_kms(display->drm, "Looking for slots in range min bpp " FXP_Q4_FMT " max bpp " FXP_Q4_FMT "\n", 311 FXP_Q4_ARGS(min_bpp_x16), FXP_Q4_ARGS(max_bpp_x16)); 312 313 if (dsc) { 314 dsc_slice_count = intel_dp_mst_dsc_get_slice_count(connector, crtc_state); 315 if (!dsc_slice_count) { 316 drm_dbg_kms(display->drm, "Can't get valid DSC slice count\n"); 317 318 return -ENOSPC; 319 } 320 } 321 322 drm_WARN_ON(display->drm, min_bpp_x16 % bpp_step_x16 || max_bpp_x16 % bpp_step_x16); 323 324 for (bpp_x16 = max_bpp_x16; bpp_x16 >= min_bpp_x16; bpp_x16 -= bpp_step_x16) { 325 int local_bw_overhead; 326 int link_bpp_x16; 327 328 drm_dbg_kms(display->drm, "Trying bpp " FXP_Q4_FMT "\n", FXP_Q4_ARGS(bpp_x16)); 329 330 if (dsc && !intel_dp_dsc_valid_compressed_bpp(intel_dp, bpp_x16)) { 331 /* SST must have validated the single bpp tried here already earlier. */ 332 drm_WARN_ON(display->drm, !is_mst); 333 continue; 334 } 335 336 link_bpp_x16 = dsc ? bpp_x16 : 337 intel_dp_output_format_link_bpp_x16(crtc_state->output_format, 338 fxp_q4_to_int(bpp_x16)); 339 340 local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, 341 false, dsc_slice_count, link_bpp_x16); 342 343 intel_dp_mst_compute_m_n(crtc_state, 344 local_bw_overhead, 345 link_bpp_x16, 346 &crtc_state->dp_m_n); 347 348 if (is_mst) { 349 int remote_bw_overhead; 350 int remote_tu; 351 fixed20_12 pbn; 352 353 remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, 354 true, dsc_slice_count, link_bpp_x16); 355 356 /* 357 * The TU size programmed to the HW determines which slots in 358 * an MTP frame are used for this stream, which needs to match 359 * the payload size programmed to the first downstream branch 360 * device's payload table. 361 * 362 * Note that atm the payload's PBN value DRM core sends via 363 * the ALLOCATE_PAYLOAD side-band message matches the payload 364 * size (which it calculates from the PBN value) it programs 365 * to the first branch device's payload table. The allocation 366 * in the payload table could be reduced though (to 367 * crtc_state->dp_m_n.tu), provided that the driver doesn't 368 * enable SSC on the corresponding link. 369 */ 370 pbn.full = dfixed_const(intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock, 371 link_bpp_x16, 372 remote_bw_overhead)); 373 remote_tu = DIV_ROUND_UP(pbn.full, mst_state->pbn_div.full); 374 375 /* 376 * Aligning the TUs ensures that symbols consisting of multiple 377 * (4) symbol cycles don't get split between two consecutive 378 * MTPs, as required by Bspec. 379 * TODO: remove the alignment restriction for 128b/132b links 380 * on some platforms, where Bspec allows this. 381 */ 382 remote_tu = ALIGN(remote_tu, 4 / crtc_state->lane_count); 383 384 /* 385 * Also align PBNs accordingly, since MST core will derive its 386 * own copy of TU from the PBN in drm_dp_atomic_find_time_slots(). 387 * The above comment about the difference between the PBN 388 * allocated for the whole path and the TUs allocated for the 389 * first branch device's link also applies here. 390 */ 391 pbn.full = remote_tu * mst_state->pbn_div.full; 392 393 drm_WARN_ON(display->drm, remote_tu < crtc_state->dp_m_n.tu); 394 crtc_state->dp_m_n.tu = remote_tu; 395 396 slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst.mgr, 397 connector->mst.port, 398 dfixed_trunc(pbn)); 399 400 /* TODO: Check this already in drm_dp_atomic_find_time_slots(). */ 401 if (slots > mst_state->total_avail_slots) 402 slots = -EINVAL; 403 } else { 404 /* Same as above for remote_tu */ 405 crtc_state->dp_m_n.tu = ALIGN(crtc_state->dp_m_n.tu, 406 4 / crtc_state->lane_count); 407 408 if (crtc_state->dp_m_n.tu <= 64) 409 slots = crtc_state->dp_m_n.tu; 410 else 411 slots = -EINVAL; 412 } 413 414 if (slots == -EDEADLK) 415 return slots; 416 417 if (slots >= 0) { 418 drm_WARN_ON(display->drm, slots != crtc_state->dp_m_n.tu); 419 420 break; 421 } 422 } 423 424 if (slots < 0) { 425 drm_dbg_kms(display->drm, "failed finding vcpi slots:%d\n", 426 slots); 427 return slots; 428 } 429 430 if (!dsc) 431 crtc_state->pipe_bpp = fxp_q4_to_int(bpp_x16); 432 else 433 crtc_state->dsc.compressed_bpp_x16 = bpp_x16; 434 435 drm_dbg_kms(display->drm, "Got %d slots for pipe bpp " FXP_Q4_FMT " dsc %d\n", 436 slots, FXP_Q4_ARGS(bpp_x16), dsc); 437 438 return 0; 439 } 440 441 static int mst_stream_compute_link_config(struct intel_dp *intel_dp, 442 struct intel_crtc_state *crtc_state, 443 struct drm_connector_state *conn_state, 444 const struct link_config_limits *limits) 445 { 446 crtc_state->lane_count = limits->max_lane_count; 447 crtc_state->port_clock = limits->max_rate; 448 449 /* 450 * FIXME: allocate the BW according to link_bpp, which in the case of 451 * YUV420 is only half of the pipe bpp value. 452 */ 453 return intel_dp_mtp_tu_compute_config(intel_dp, crtc_state, conn_state, 454 limits->link.min_bpp_x16, 455 limits->link.max_bpp_x16, 456 fxp_q4_from_int(2 * 3), false); 457 } 458 459 static int mst_stream_dsc_compute_link_config(struct intel_dp *intel_dp, 460 struct intel_crtc_state *crtc_state, 461 struct drm_connector_state *conn_state, 462 const struct link_config_limits *limits) 463 { 464 struct intel_display *display = to_intel_display(intel_dp); 465 struct intel_connector *connector = to_intel_connector(conn_state->connector); 466 467 crtc_state->pipe_bpp = limits->pipe.max_bpp; 468 469 drm_dbg_kms(display->drm, 470 "DSC Sink supported compressed min bpp " FXP_Q4_FMT " compressed max bpp " FXP_Q4_FMT "\n", 471 FXP_Q4_ARGS(limits->link.min_bpp_x16), FXP_Q4_ARGS(limits->link.max_bpp_x16)); 472 473 crtc_state->lane_count = limits->max_lane_count; 474 crtc_state->port_clock = limits->max_rate; 475 476 return intel_dp_mtp_tu_compute_config(intel_dp, crtc_state, conn_state, 477 limits->link.min_bpp_x16, 478 limits->link.max_bpp_x16, 479 intel_dp_dsc_bpp_step_x16(connector), 480 true); 481 } 482 483 static int mode_hblank_period_ns(const struct drm_display_mode *mode) 484 { 485 return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(mode->htotal - mode->hdisplay, 486 NSEC_PER_SEC / 1000), 487 mode->crtc_clock); 488 } 489 490 static bool 491 hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector, 492 const struct intel_crtc_state *crtc_state, 493 const struct link_config_limits *limits) 494 { 495 const struct drm_display_mode *adjusted_mode = 496 &crtc_state->hw.adjusted_mode; 497 bool is_uhbr_sink = connector->mst.dp && 498 drm_dp_128b132b_supported(connector->mst.dp->dpcd); 499 int hblank_limit = is_uhbr_sink ? 500 : 300; 500 501 if (!connector->dp.dsc_hblank_expansion_quirk) 502 return false; 503 504 if (is_uhbr_sink && !drm_dp_is_uhbr_rate(limits->max_rate)) 505 return false; 506 507 if (mode_hblank_period_ns(adjusted_mode) > hblank_limit) 508 return false; 509 510 if (!intel_dp_mst_dsc_get_slice_count(connector, crtc_state)) 511 return false; 512 513 return true; 514 } 515 516 static bool 517 adjust_limits_for_dsc_hblank_expansion_quirk(struct intel_dp *intel_dp, 518 const struct intel_connector *connector, 519 const struct intel_crtc_state *crtc_state, 520 struct link_config_limits *limits, 521 bool dsc) 522 { 523 struct intel_display *display = to_intel_display(connector); 524 const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 525 int min_bpp_x16 = limits->link.min_bpp_x16; 526 527 if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state, limits)) 528 return true; 529 530 if (!dsc) { 531 if (intel_dp_supports_dsc(intel_dp, connector, crtc_state)) { 532 drm_dbg_kms(display->drm, 533 "[CRTC:%d:%s][CONNECTOR:%d:%s] DSC needed by hblank expansion quirk\n", 534 crtc->base.base.id, crtc->base.name, 535 connector->base.base.id, connector->base.name); 536 return false; 537 } 538 539 drm_dbg_kms(display->drm, 540 "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to 24 due to hblank expansion quirk\n", 541 crtc->base.base.id, crtc->base.name, 542 connector->base.base.id, connector->base.name); 543 544 if (limits->link.max_bpp_x16 < fxp_q4_from_int(24)) 545 return false; 546 547 limits->link.min_bpp_x16 = fxp_q4_from_int(24); 548 549 return true; 550 } 551 552 drm_WARN_ON(display->drm, limits->min_rate != limits->max_rate); 553 554 if (limits->max_rate < 540000) 555 min_bpp_x16 = fxp_q4_from_int(13); 556 else if (limits->max_rate < 810000) 557 min_bpp_x16 = fxp_q4_from_int(10); 558 559 if (limits->link.min_bpp_x16 >= min_bpp_x16) 560 return true; 561 562 drm_dbg_kms(display->drm, 563 "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " FXP_Q4_FMT " in DSC mode due to hblank expansion quirk\n", 564 crtc->base.base.id, crtc->base.name, 565 connector->base.base.id, connector->base.name, 566 FXP_Q4_ARGS(min_bpp_x16)); 567 568 if (limits->link.max_bpp_x16 < min_bpp_x16) 569 return false; 570 571 limits->link.min_bpp_x16 = min_bpp_x16; 572 573 return true; 574 } 575 576 static bool 577 mst_stream_compute_config_limits(struct intel_dp *intel_dp, 578 struct drm_connector_state *conn_state, 579 struct intel_crtc_state *crtc_state, 580 bool dsc, 581 struct link_config_limits *limits) 582 { 583 struct intel_connector *connector = 584 to_intel_connector(conn_state->connector); 585 586 if (!intel_dp_compute_config_limits(intel_dp, conn_state, 587 crtc_state, false, dsc, 588 limits)) 589 return false; 590 591 return adjust_limits_for_dsc_hblank_expansion_quirk(intel_dp, 592 connector, 593 crtc_state, 594 limits, 595 dsc); 596 } 597 598 static int mst_stream_compute_config(struct intel_encoder *encoder, 599 struct intel_crtc_state *pipe_config, 600 struct drm_connector_state *conn_state) 601 { 602 struct intel_display *display = to_intel_display(encoder); 603 struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state); 604 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 605 struct intel_dp *intel_dp = to_primary_dp(encoder); 606 struct intel_connector *connector = 607 to_intel_connector(conn_state->connector); 608 const struct drm_display_mode *adjusted_mode = 609 &pipe_config->hw.adjusted_mode; 610 struct link_config_limits limits; 611 bool dsc_needed, joiner_needs_dsc; 612 int num_joined_pipes; 613 int ret = 0; 614 615 if (pipe_config->fec_enable && 616 !intel_dp_supports_fec(intel_dp, connector, pipe_config)) 617 return -EINVAL; 618 619 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 620 return -EINVAL; 621 622 num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector, 623 adjusted_mode->crtc_hdisplay, 624 adjusted_mode->crtc_clock); 625 if (num_joined_pipes > 1) 626 pipe_config->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1, crtc->pipe); 627 628 pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; 629 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 630 pipe_config->has_pch_encoder = false; 631 632 joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes); 633 634 dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en || 635 !mst_stream_compute_config_limits(intel_dp, conn_state, 636 pipe_config, false, &limits); 637 638 if (!dsc_needed) { 639 ret = mst_stream_compute_link_config(intel_dp, pipe_config, 640 conn_state, &limits); 641 642 if (ret == -EDEADLK) 643 return ret; 644 645 if (ret) 646 dsc_needed = true; 647 } 648 649 if (dsc_needed && !intel_dp_supports_dsc(intel_dp, connector, pipe_config)) { 650 drm_dbg_kms(display->drm, "DSC required but not available\n"); 651 return -EINVAL; 652 } 653 654 /* enable compression if the mode doesn't fit available BW */ 655 if (dsc_needed) { 656 drm_dbg_kms(display->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n", 657 str_yes_no(ret), str_yes_no(joiner_needs_dsc), 658 str_yes_no(intel_dp->force_dsc_en)); 659 660 661 if (!mst_stream_compute_config_limits(intel_dp, conn_state, 662 pipe_config, true, 663 &limits)) 664 return -EINVAL; 665 666 /* 667 * FIXME: As bpc is hardcoded to 8, as mentioned above, 668 * WARN and ignore the debug flag force_dsc_bpc for now. 669 */ 670 drm_WARN(display->drm, intel_dp->force_dsc_bpc, 671 "Cannot Force BPC for MST\n"); 672 /* 673 * Try to get at least some timeslots and then see, if 674 * we can fit there with DSC. 675 */ 676 drm_dbg_kms(display->drm, "Trying to find VCPI slots in DSC mode\n"); 677 678 ret = mst_stream_dsc_compute_link_config(intel_dp, pipe_config, 679 conn_state, &limits); 680 if (ret < 0) 681 return ret; 682 683 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 684 conn_state, &limits, 685 pipe_config->dp_m_n.tu); 686 } 687 688 if (ret) 689 return ret; 690 691 pipe_config->limited_color_range = 692 intel_dp_limited_color_range(pipe_config, conn_state); 693 694 if (display->platform.geminilake || display->platform.broxton) 695 pipe_config->lane_lat_optim_mask = 696 bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count); 697 698 ret = intel_dp_compute_min_hblank(pipe_config, conn_state); 699 if (ret) 700 return ret; 701 702 intel_vrr_compute_config(pipe_config, conn_state); 703 704 intel_dp_audio_compute_config(encoder, pipe_config, conn_state); 705 706 intel_ddi_compute_min_voltage_level(pipe_config); 707 708 intel_psr_compute_config(intel_dp, pipe_config, conn_state); 709 710 return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector, 711 pipe_config); 712 } 713 714 /* 715 * Iterate over all connectors and return a mask of 716 * all CPU transcoders streaming over the same DP link. 717 */ 718 static unsigned int 719 intel_dp_mst_transcoder_mask(struct intel_atomic_state *state, 720 struct intel_dp *mst_port) 721 { 722 struct intel_display *display = to_intel_display(state); 723 const struct intel_digital_connector_state *conn_state; 724 struct intel_connector *connector; 725 u8 transcoders = 0; 726 int i; 727 728 if (DISPLAY_VER(display) < 12) 729 return 0; 730 731 for_each_new_intel_connector_in_state(state, connector, conn_state, i) { 732 const struct intel_crtc_state *crtc_state; 733 struct intel_crtc *crtc; 734 735 if (connector->mst.dp != mst_port || !conn_state->base.crtc) 736 continue; 737 738 crtc = to_intel_crtc(conn_state->base.crtc); 739 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 740 741 if (!crtc_state->hw.active) 742 continue; 743 744 transcoders |= BIT(crtc_state->cpu_transcoder); 745 } 746 747 return transcoders; 748 } 749 750 static u8 get_pipes_downstream_of_mst_port(struct intel_atomic_state *state, 751 struct drm_dp_mst_topology_mgr *mst_mgr, 752 struct drm_dp_mst_port *parent_port) 753 { 754 const struct intel_digital_connector_state *conn_state; 755 struct intel_connector *connector; 756 u8 mask = 0; 757 int i; 758 759 for_each_new_intel_connector_in_state(state, connector, conn_state, i) { 760 if (!conn_state->base.crtc) 761 continue; 762 763 if (&connector->mst.dp->mst.mgr != mst_mgr) 764 continue; 765 766 if (connector->mst.port != parent_port && 767 !drm_dp_mst_port_downstream_of_parent(mst_mgr, 768 connector->mst.port, 769 parent_port)) 770 continue; 771 772 mask |= BIT(to_intel_crtc(conn_state->base.crtc)->pipe); 773 } 774 775 return mask; 776 } 777 778 static int intel_dp_mst_check_dsc_change(struct intel_atomic_state *state, 779 struct drm_dp_mst_topology_mgr *mst_mgr, 780 struct intel_link_bw_limits *limits) 781 { 782 struct intel_display *display = to_intel_display(state); 783 struct intel_crtc *crtc; 784 u8 mst_pipe_mask; 785 u8 dsc_pipe_mask = 0; 786 int ret; 787 788 mst_pipe_mask = get_pipes_downstream_of_mst_port(state, mst_mgr, NULL); 789 790 for_each_intel_crtc_in_pipe_mask(display->drm, crtc, mst_pipe_mask) { 791 struct intel_crtc_state *crtc_state = 792 intel_atomic_get_new_crtc_state(state, crtc); 793 794 /* Atomic connector check should've added all the MST CRTCs. */ 795 if (drm_WARN_ON(display->drm, !crtc_state)) 796 return -EINVAL; 797 798 if (intel_dsc_enabled_on_link(crtc_state)) 799 dsc_pipe_mask |= BIT(crtc->pipe); 800 } 801 802 if (!dsc_pipe_mask || mst_pipe_mask == dsc_pipe_mask) 803 return 0; 804 805 limits->link_dsc_pipes |= mst_pipe_mask; 806 807 ret = intel_modeset_pipes_in_mask_early(state, "MST DSC", 808 mst_pipe_mask); 809 810 return ret ? : -EAGAIN; 811 } 812 813 static int intel_dp_mst_check_bw(struct intel_atomic_state *state, 814 struct drm_dp_mst_topology_mgr *mst_mgr, 815 struct drm_dp_mst_topology_state *mst_state, 816 struct intel_link_bw_limits *limits) 817 { 818 struct drm_dp_mst_port *mst_port; 819 u8 mst_port_pipes; 820 int ret; 821 822 ret = drm_dp_mst_atomic_check_mgr(&state->base, mst_mgr, mst_state, &mst_port); 823 if (ret != -ENOSPC) 824 return ret; 825 826 mst_port_pipes = get_pipes_downstream_of_mst_port(state, mst_mgr, mst_port); 827 828 ret = intel_link_bw_reduce_bpp(state, limits, 829 mst_port_pipes, "MST link BW"); 830 831 return ret ? : -EAGAIN; 832 } 833 834 /** 835 * intel_dp_mst_atomic_check_link - check all modeset MST link configuration 836 * @state: intel atomic state 837 * @limits: link BW limits 838 * 839 * Check the link configuration for all modeset MST outputs. If the 840 * configuration is invalid @limits will be updated if possible to 841 * reduce the total BW, after which the configuration for all CRTCs in 842 * @state must be recomputed with the updated @limits. 843 * 844 * Returns: 845 * - 0 if the configuration is valid 846 * - %-EAGAIN, if the configuration is invalid and @limits got updated 847 * with fallback values with which the configuration of all CRTCs in 848 * @state must be recomputed 849 * - Other negative error, if the configuration is invalid without a 850 * fallback possibility, or the check failed for another reason 851 */ 852 int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state, 853 struct intel_link_bw_limits *limits) 854 { 855 struct drm_dp_mst_topology_mgr *mgr; 856 struct drm_dp_mst_topology_state *mst_state; 857 int ret; 858 int i; 859 860 for_each_new_mst_mgr_in_state(&state->base, mgr, mst_state, i) { 861 ret = intel_dp_mst_check_dsc_change(state, mgr, limits); 862 if (ret) 863 return ret; 864 865 ret = intel_dp_mst_check_bw(state, mgr, mst_state, 866 limits); 867 if (ret) 868 return ret; 869 } 870 871 return 0; 872 } 873 874 static int mst_stream_compute_config_late(struct intel_encoder *encoder, 875 struct intel_crtc_state *crtc_state, 876 struct drm_connector_state *conn_state) 877 { 878 struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state); 879 struct intel_dp *intel_dp = to_primary_dp(encoder); 880 881 /* lowest numbered transcoder will be designated master */ 882 crtc_state->mst_master_transcoder = 883 ffs(intel_dp_mst_transcoder_mask(state, intel_dp)) - 1; 884 885 return 0; 886 } 887 888 /* 889 * If one of the connectors in a MST stream needs a modeset, mark all CRTCs 890 * that shares the same MST stream as mode changed, 891 * intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do 892 * a fastset when possible. 893 * 894 * On TGL+ this is required since each stream go through a master transcoder, 895 * so if the master transcoder needs modeset, all other streams in the 896 * topology need a modeset. All platforms need to add the atomic state 897 * for all streams in the topology, since a modeset on one may require 898 * changing the MST link BW usage of the others, which in turn needs a 899 * recomputation of the corresponding CRTC states. 900 */ 901 static int 902 mst_connector_atomic_topology_check(struct intel_connector *connector, 903 struct intel_atomic_state *state) 904 { 905 struct intel_display *display = to_intel_display(connector); 906 struct drm_connector_list_iter connector_list_iter; 907 struct intel_connector *connector_iter; 908 int ret = 0; 909 910 if (!intel_connector_needs_modeset(state, &connector->base)) 911 return 0; 912 913 drm_connector_list_iter_begin(display->drm, &connector_list_iter); 914 for_each_intel_connector_iter(connector_iter, &connector_list_iter) { 915 struct intel_digital_connector_state *conn_iter_state; 916 struct intel_crtc_state *crtc_state; 917 struct intel_crtc *crtc; 918 919 if (connector_iter->mst.dp != connector->mst.dp || 920 connector_iter == connector) 921 continue; 922 923 conn_iter_state = intel_atomic_get_digital_connector_state(state, 924 connector_iter); 925 if (IS_ERR(conn_iter_state)) { 926 ret = PTR_ERR(conn_iter_state); 927 break; 928 } 929 930 if (!conn_iter_state->base.crtc) 931 continue; 932 933 crtc = to_intel_crtc(conn_iter_state->base.crtc); 934 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 935 if (IS_ERR(crtc_state)) { 936 ret = PTR_ERR(crtc_state); 937 break; 938 } 939 940 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 941 if (ret) 942 break; 943 crtc_state->uapi.mode_changed = true; 944 } 945 drm_connector_list_iter_end(&connector_list_iter); 946 947 return ret; 948 } 949 950 static int 951 mst_connector_atomic_check(struct drm_connector *_connector, 952 struct drm_atomic_state *_state) 953 { 954 struct intel_atomic_state *state = to_intel_atomic_state(_state); 955 struct intel_connector *connector = to_intel_connector(_connector); 956 int ret; 957 958 ret = intel_digital_connector_atomic_check(&connector->base, &state->base); 959 if (ret) 960 return ret; 961 962 ret = mst_connector_atomic_topology_check(connector, state); 963 if (ret) 964 return ret; 965 966 if (intel_connector_needs_modeset(state, &connector->base)) { 967 ret = intel_dp_tunnel_atomic_check_state(state, 968 connector->mst.dp, 969 connector); 970 if (ret) 971 return ret; 972 } 973 974 return drm_dp_atomic_release_time_slots(&state->base, 975 &connector->mst.dp->mst.mgr, 976 connector->mst.port); 977 } 978 979 static void mst_stream_disable(struct intel_atomic_state *state, 980 struct intel_encoder *encoder, 981 const struct intel_crtc_state *old_crtc_state, 982 const struct drm_connector_state *old_conn_state) 983 { 984 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); 985 struct intel_dp *intel_dp = to_primary_dp(encoder); 986 struct intel_connector *connector = 987 to_intel_connector(old_conn_state->connector); 988 989 if (intel_dp_mst_active_streams(intel_dp) == 1) 990 intel_dp->link.active = false; 991 992 intel_hdcp_disable(intel_mst->connector); 993 994 intel_dp_sink_disable_decompression(state, connector, old_crtc_state); 995 } 996 997 static void mst_stream_post_disable(struct intel_atomic_state *state, 998 struct intel_encoder *encoder, 999 const struct intel_crtc_state *old_crtc_state, 1000 const struct drm_connector_state *old_conn_state) 1001 { 1002 struct intel_display *display = to_intel_display(encoder); 1003 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); 1004 struct intel_encoder *primary_encoder = to_primary_encoder(encoder); 1005 struct intel_dp *intel_dp = to_primary_dp(encoder); 1006 struct intel_connector *connector = 1007 to_intel_connector(old_conn_state->connector); 1008 struct drm_dp_mst_topology_state *old_mst_state = 1009 drm_atomic_get_old_mst_topology_state(&state->base, &intel_dp->mst.mgr); 1010 struct drm_dp_mst_topology_state *new_mst_state = 1011 drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst.mgr); 1012 const struct drm_dp_mst_atomic_payload *old_payload = 1013 drm_atomic_get_mst_payload_state(old_mst_state, connector->mst.port); 1014 struct drm_dp_mst_atomic_payload *new_payload = 1015 drm_atomic_get_mst_payload_state(new_mst_state, connector->mst.port); 1016 struct intel_crtc *pipe_crtc; 1017 bool last_mst_stream; 1018 int i; 1019 1020 last_mst_stream = intel_dp_mst_dec_active_streams(intel_dp); 1021 1022 drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && last_mst_stream && 1023 !intel_dp_mst_is_master_trans(old_crtc_state)); 1024 1025 for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { 1026 const struct intel_crtc_state *old_pipe_crtc_state = 1027 intel_atomic_get_old_crtc_state(state, pipe_crtc); 1028 1029 intel_crtc_vblank_off(old_pipe_crtc_state); 1030 } 1031 1032 intel_disable_transcoder(old_crtc_state); 1033 1034 drm_dp_remove_payload_part1(&intel_dp->mst.mgr, new_mst_state, new_payload); 1035 1036 intel_ddi_clear_act_sent(encoder, old_crtc_state); 1037 1038 intel_de_rmw(display, 1039 TRANS_DDI_FUNC_CTL(display, old_crtc_state->cpu_transcoder), 1040 TRANS_DDI_DP_VC_PAYLOAD_ALLOC, 0); 1041 1042 intel_ddi_wait_for_act_sent(encoder, old_crtc_state); 1043 drm_dp_check_act_status(&intel_dp->mst.mgr); 1044 1045 drm_dp_remove_payload_part2(&intel_dp->mst.mgr, new_mst_state, 1046 old_payload, new_payload); 1047 1048 intel_vrr_transcoder_disable(old_crtc_state); 1049 1050 intel_ddi_disable_transcoder_func(old_crtc_state); 1051 1052 for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { 1053 const struct intel_crtc_state *old_pipe_crtc_state = 1054 intel_atomic_get_old_crtc_state(state, pipe_crtc); 1055 1056 intel_dsc_disable(old_pipe_crtc_state); 1057 1058 if (DISPLAY_VER(display) >= 9) 1059 skl_scaler_disable(old_pipe_crtc_state); 1060 else 1061 ilk_pfit_disable(old_pipe_crtc_state); 1062 } 1063 1064 /* 1065 * Power down mst path before disabling the port, otherwise we end 1066 * up getting interrupts from the sink upon detecting link loss. 1067 */ 1068 drm_dp_send_power_updown_phy(&intel_dp->mst.mgr, connector->mst.port, 1069 false); 1070 1071 /* 1072 * BSpec 4287: disable DIP after the transcoder is disabled and before 1073 * the transcoder clock select is set to none. 1074 */ 1075 intel_dp_set_infoframes(primary_encoder, false, old_crtc_state, NULL); 1076 /* 1077 * From TGL spec: "If multi-stream slave transcoder: Configure 1078 * Transcoder Clock Select to direct no clock to the transcoder" 1079 * 1080 * From older GENs spec: "Configure Transcoder Clock Select to direct 1081 * no clock to the transcoder" 1082 */ 1083 if (DISPLAY_VER(display) < 12 || !last_mst_stream) 1084 intel_ddi_disable_transcoder_clock(old_crtc_state); 1085 1086 1087 intel_mst->connector = NULL; 1088 if (last_mst_stream) 1089 primary_encoder->post_disable(state, primary_encoder, 1090 old_crtc_state, NULL); 1091 1092 } 1093 1094 static void mst_stream_post_pll_disable(struct intel_atomic_state *state, 1095 struct intel_encoder *encoder, 1096 const struct intel_crtc_state *old_crtc_state, 1097 const struct drm_connector_state *old_conn_state) 1098 { 1099 struct intel_encoder *primary_encoder = to_primary_encoder(encoder); 1100 struct intel_dp *intel_dp = to_primary_dp(encoder); 1101 1102 if (intel_dp_mst_active_streams(intel_dp) == 0 && 1103 primary_encoder->post_pll_disable) 1104 primary_encoder->post_pll_disable(state, primary_encoder, old_crtc_state, old_conn_state); 1105 } 1106 1107 static void mst_stream_pre_pll_enable(struct intel_atomic_state *state, 1108 struct intel_encoder *encoder, 1109 const struct intel_crtc_state *pipe_config, 1110 const struct drm_connector_state *conn_state) 1111 { 1112 struct intel_encoder *primary_encoder = to_primary_encoder(encoder); 1113 struct intel_dp *intel_dp = to_primary_dp(encoder); 1114 1115 if (intel_dp_mst_active_streams(intel_dp) == 0) 1116 primary_encoder->pre_pll_enable(state, primary_encoder, 1117 pipe_config, NULL); 1118 else 1119 /* 1120 * The port PLL state needs to get updated for secondary 1121 * streams as for the primary stream. 1122 */ 1123 intel_ddi_update_active_dpll(state, primary_encoder, 1124 to_intel_crtc(pipe_config->uapi.crtc)); 1125 } 1126 1127 static bool intel_mst_probed_link_params_valid(struct intel_dp *intel_dp, 1128 int link_rate, int lane_count) 1129 { 1130 return intel_dp->link.mst_probed_rate == link_rate && 1131 intel_dp->link.mst_probed_lane_count == lane_count; 1132 } 1133 1134 static void intel_mst_set_probed_link_params(struct intel_dp *intel_dp, 1135 int link_rate, int lane_count) 1136 { 1137 intel_dp->link.mst_probed_rate = link_rate; 1138 intel_dp->link.mst_probed_lane_count = lane_count; 1139 } 1140 1141 static void intel_mst_reprobe_topology(struct intel_dp *intel_dp, 1142 const struct intel_crtc_state *crtc_state) 1143 { 1144 if (intel_mst_probed_link_params_valid(intel_dp, 1145 crtc_state->port_clock, crtc_state->lane_count)) 1146 return; 1147 1148 drm_dp_mst_topology_queue_probe(&intel_dp->mst.mgr); 1149 1150 intel_mst_set_probed_link_params(intel_dp, 1151 crtc_state->port_clock, crtc_state->lane_count); 1152 } 1153 1154 static void mst_stream_pre_enable(struct intel_atomic_state *state, 1155 struct intel_encoder *encoder, 1156 const struct intel_crtc_state *pipe_config, 1157 const struct drm_connector_state *conn_state) 1158 { 1159 struct intel_display *display = to_intel_display(state); 1160 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); 1161 struct intel_encoder *primary_encoder = to_primary_encoder(encoder); 1162 struct intel_dp *intel_dp = to_primary_dp(encoder); 1163 struct intel_connector *connector = 1164 to_intel_connector(conn_state->connector); 1165 struct drm_dp_mst_topology_state *mst_state = 1166 drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst.mgr); 1167 int ret; 1168 bool first_mst_stream; 1169 1170 /* MST encoders are bound to a crtc, not to a connector, 1171 * force the mapping here for get_hw_state. 1172 */ 1173 connector->encoder = encoder; 1174 intel_mst->connector = connector; 1175 1176 first_mst_stream = intel_dp_mst_inc_active_streams(intel_dp); 1177 drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && first_mst_stream && 1178 !intel_dp_mst_is_master_trans(pipe_config)); 1179 1180 if (first_mst_stream) 1181 intel_dp_set_power(intel_dp, DP_SET_POWER_D0); 1182 1183 drm_dp_send_power_updown_phy(&intel_dp->mst.mgr, connector->mst.port, true); 1184 1185 intel_dp_sink_enable_decompression(state, connector, pipe_config); 1186 1187 if (first_mst_stream) { 1188 primary_encoder->pre_enable(state, primary_encoder, 1189 pipe_config, NULL); 1190 1191 intel_mst_reprobe_topology(intel_dp, pipe_config); 1192 } 1193 1194 ret = drm_dp_add_payload_part1(&intel_dp->mst.mgr, mst_state, 1195 drm_atomic_get_mst_payload_state(mst_state, connector->mst.port)); 1196 if (ret < 0) 1197 intel_dp_queue_modeset_retry_for_link(state, primary_encoder, pipe_config); 1198 1199 /* 1200 * Before Gen 12 this is not done as part of 1201 * primary_encoder->pre_enable() and should be done here. For 1202 * Gen 12+ the step in which this should be done is different for the 1203 * first MST stream, so it's done on the DDI for the first stream and 1204 * here for the following ones. 1205 */ 1206 if (DISPLAY_VER(display) < 12 || !first_mst_stream) 1207 intel_ddi_enable_transcoder_clock(encoder, pipe_config); 1208 1209 if (DISPLAY_VER(display) >= 13 && !first_mst_stream) 1210 intel_ddi_config_transcoder_func(encoder, pipe_config); 1211 1212 intel_dsc_dp_pps_write(primary_encoder, pipe_config); 1213 intel_ddi_set_dp_msa(pipe_config, conn_state); 1214 } 1215 1216 static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state) 1217 { 1218 struct intel_display *display = to_intel_display(crtc_state); 1219 u32 clear = 0; 1220 u32 set = 0; 1221 1222 if (!display->platform.alderlake_p) 1223 return; 1224 1225 if (!IS_DISPLAY_STEP(display, STEP_D0, STEP_FOREVER)) 1226 return; 1227 1228 /* Wa_14013163432:adlp */ 1229 if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state)) 1230 set |= DP_MST_FEC_BS_JITTER_WA(crtc_state->cpu_transcoder); 1231 1232 /* Wa_14014143976:adlp */ 1233 if (IS_DISPLAY_STEP(display, STEP_E0, STEP_FOREVER)) { 1234 if (intel_dp_is_uhbr(crtc_state)) 1235 set |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder); 1236 else if (crtc_state->fec_enable) 1237 clear |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder); 1238 1239 if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state)) 1240 set |= DP_MST_DPT_DPTP_ALIGN_WA(crtc_state->cpu_transcoder); 1241 } 1242 1243 if (!clear && !set) 1244 return; 1245 1246 intel_de_rmw(display, CHICKEN_MISC_3, clear, set); 1247 } 1248 1249 static void mst_stream_enable(struct intel_atomic_state *state, 1250 struct intel_encoder *encoder, 1251 const struct intel_crtc_state *pipe_config, 1252 const struct drm_connector_state *conn_state) 1253 { 1254 struct intel_display *display = to_intel_display(encoder); 1255 struct intel_encoder *primary_encoder = to_primary_encoder(encoder); 1256 struct intel_dp *intel_dp = to_primary_dp(encoder); 1257 struct intel_connector *connector = to_intel_connector(conn_state->connector); 1258 struct drm_dp_mst_topology_state *mst_state = 1259 drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst.mgr); 1260 enum transcoder trans = pipe_config->cpu_transcoder; 1261 bool first_mst_stream = intel_dp_mst_active_streams(intel_dp) == 1; 1262 struct intel_crtc *pipe_crtc; 1263 int ret, i; 1264 1265 drm_WARN_ON(display->drm, pipe_config->has_pch_encoder); 1266 1267 if (intel_dp_is_uhbr(pipe_config)) { 1268 const struct drm_display_mode *adjusted_mode = 1269 &pipe_config->hw.adjusted_mode; 1270 u64 crtc_clock_hz = KHz(adjusted_mode->crtc_clock); 1271 1272 intel_de_write(display, TRANS_DP2_VFREQHIGH(pipe_config->cpu_transcoder), 1273 TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz >> 24)); 1274 intel_de_write(display, TRANS_DP2_VFREQLOW(pipe_config->cpu_transcoder), 1275 TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff)); 1276 } 1277 1278 enable_bs_jitter_was(pipe_config); 1279 1280 intel_ddi_enable_transcoder_func(encoder, pipe_config); 1281 1282 intel_vrr_transcoder_enable(pipe_config); 1283 1284 intel_ddi_clear_act_sent(encoder, pipe_config); 1285 1286 intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, trans), 0, 1287 TRANS_DDI_DP_VC_PAYLOAD_ALLOC); 1288 1289 intel_ddi_wait_for_act_sent(encoder, pipe_config); 1290 drm_dp_check_act_status(&intel_dp->mst.mgr); 1291 1292 if (first_mst_stream) 1293 intel_ddi_wait_for_fec_status(encoder, pipe_config, true); 1294 1295 ret = drm_dp_add_payload_part2(&intel_dp->mst.mgr, 1296 drm_atomic_get_mst_payload_state(mst_state, 1297 connector->mst.port)); 1298 if (ret < 0) 1299 intel_dp_queue_modeset_retry_for_link(state, primary_encoder, pipe_config); 1300 1301 if (DISPLAY_VER(display) >= 12) 1302 intel_de_rmw(display, CHICKEN_TRANS(display, trans), 1303 FECSTALL_DIS_DPTSTREAM_DPTTG, 1304 pipe_config->fec_enable ? FECSTALL_DIS_DPTSTREAM_DPTTG : 0); 1305 1306 intel_enable_transcoder(pipe_config); 1307 1308 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, pipe_config, i) { 1309 const struct intel_crtc_state *pipe_crtc_state = 1310 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1311 1312 intel_crtc_vblank_on(pipe_crtc_state); 1313 } 1314 1315 intel_hdcp_enable(state, encoder, pipe_config, conn_state); 1316 } 1317 1318 static bool mst_stream_get_hw_state(struct intel_encoder *encoder, 1319 enum pipe *pipe) 1320 { 1321 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); 1322 *pipe = intel_mst->pipe; 1323 if (intel_mst->connector) 1324 return true; 1325 return false; 1326 } 1327 1328 static void mst_stream_get_config(struct intel_encoder *encoder, 1329 struct intel_crtc_state *pipe_config) 1330 { 1331 struct intel_encoder *primary_encoder = to_primary_encoder(encoder); 1332 1333 primary_encoder->get_config(primary_encoder, pipe_config); 1334 } 1335 1336 static bool mst_stream_initial_fastset_check(struct intel_encoder *encoder, 1337 struct intel_crtc_state *crtc_state) 1338 { 1339 struct intel_encoder *primary_encoder = to_primary_encoder(encoder); 1340 1341 return intel_dp_initial_fastset_check(primary_encoder, crtc_state); 1342 } 1343 1344 static int mst_connector_get_ddc_modes(struct drm_connector *_connector) 1345 { 1346 struct intel_connector *connector = to_intel_connector(_connector); 1347 struct intel_display *display = to_intel_display(connector); 1348 struct intel_dp *intel_dp = connector->mst.dp; 1349 const struct drm_edid *drm_edid; 1350 int ret; 1351 1352 if (drm_connector_is_unregistered(&connector->base)) 1353 return intel_connector_update_modes(&connector->base, NULL); 1354 1355 if (!intel_display_driver_check_access(display)) 1356 return drm_edid_connector_add_modes(&connector->base); 1357 1358 drm_edid = drm_dp_mst_edid_read(&connector->base, &intel_dp->mst.mgr, connector->mst.port); 1359 1360 ret = intel_connector_update_modes(&connector->base, drm_edid); 1361 1362 drm_edid_free(drm_edid); 1363 1364 return ret; 1365 } 1366 1367 static int 1368 mst_connector_late_register(struct drm_connector *_connector) 1369 { 1370 struct intel_connector *connector = to_intel_connector(_connector); 1371 int ret; 1372 1373 ret = drm_dp_mst_connector_late_register(&connector->base, connector->mst.port); 1374 if (ret < 0) 1375 return ret; 1376 1377 ret = intel_connector_register(&connector->base); 1378 if (ret < 0) 1379 drm_dp_mst_connector_early_unregister(&connector->base, connector->mst.port); 1380 1381 return ret; 1382 } 1383 1384 static void 1385 mst_connector_early_unregister(struct drm_connector *_connector) 1386 { 1387 struct intel_connector *connector = to_intel_connector(_connector); 1388 1389 intel_connector_unregister(&connector->base); 1390 drm_dp_mst_connector_early_unregister(&connector->base, connector->mst.port); 1391 } 1392 1393 static const struct drm_connector_funcs mst_connector_funcs = { 1394 .fill_modes = drm_helper_probe_single_connector_modes, 1395 .atomic_get_property = intel_digital_connector_atomic_get_property, 1396 .atomic_set_property = intel_digital_connector_atomic_set_property, 1397 .late_register = mst_connector_late_register, 1398 .early_unregister = mst_connector_early_unregister, 1399 .destroy = intel_connector_destroy, 1400 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 1401 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 1402 }; 1403 1404 static int mst_connector_get_modes(struct drm_connector *_connector) 1405 { 1406 struct intel_connector *connector = to_intel_connector(_connector); 1407 1408 return mst_connector_get_ddc_modes(&connector->base); 1409 } 1410 1411 static int 1412 mst_connector_mode_valid_ctx(struct drm_connector *_connector, 1413 const struct drm_display_mode *mode, 1414 struct drm_modeset_acquire_ctx *ctx, 1415 enum drm_mode_status *status) 1416 { 1417 struct intel_connector *connector = to_intel_connector(_connector); 1418 struct intel_display *display = to_intel_display(connector); 1419 struct intel_dp *intel_dp = connector->mst.dp; 1420 struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst.mgr; 1421 struct drm_dp_mst_port *port = connector->mst.port; 1422 const int min_bpp = 18; 1423 int max_dotclk = display->cdclk.max_dotclk_freq; 1424 int max_rate, mode_rate, max_lanes, max_link_clock; 1425 unsigned long bw_overhead_flags = 1426 DRM_DP_BW_OVERHEAD_MST | DRM_DP_BW_OVERHEAD_SSC_REF_CLK; 1427 int ret; 1428 bool dsc = false; 1429 int target_clock = mode->clock; 1430 int num_joined_pipes; 1431 1432 if (drm_connector_is_unregistered(&connector->base)) { 1433 *status = MODE_ERROR; 1434 return 0; 1435 } 1436 1437 *status = intel_cpu_transcoder_mode_valid(display, mode); 1438 if (*status != MODE_OK) 1439 return 0; 1440 1441 if (mode->flags & DRM_MODE_FLAG_DBLCLK) { 1442 *status = MODE_H_ILLEGAL; 1443 return 0; 1444 } 1445 1446 if (mode->clock < 10000) { 1447 *status = MODE_CLOCK_LOW; 1448 return 0; 1449 } 1450 1451 max_link_clock = intel_dp_max_link_rate(intel_dp); 1452 max_lanes = intel_dp_max_lane_count(intel_dp); 1453 1454 max_rate = intel_dp_max_link_data_rate(intel_dp, 1455 max_link_clock, max_lanes); 1456 mode_rate = intel_dp_link_required(max_link_clock, max_lanes, 1457 mode->clock, mode->hdisplay, 1458 fxp_q4_from_int(min_bpp), 1459 bw_overhead_flags); 1460 1461 /* 1462 * TODO: 1463 * - Also check if compression would allow for the mode 1464 * - Calculate the overhead using drm_dp_bw_overhead() / 1465 * drm_dp_bw_channel_coding_efficiency(), similarly to the 1466 * compute config code, as drm_dp_calc_pbn_mode() doesn't 1467 * account with all the overheads. 1468 * - Check here and during compute config the BW reported by 1469 * DFP_Link_Available_Payload_Bandwidth_Number (or the 1470 * corresponding link capabilities of the sink) in case the 1471 * stream is uncompressed for it by the last branch device. 1472 */ 1473 num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector, 1474 mode->hdisplay, target_clock); 1475 max_dotclk *= num_joined_pipes; 1476 1477 ret = drm_modeset_lock(&mgr->base.lock, ctx); 1478 if (ret) 1479 return ret; 1480 1481 if (mode_rate > max_rate || mode->clock > max_dotclk || 1482 drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) { 1483 *status = MODE_CLOCK_HIGH; 1484 return 0; 1485 } 1486 1487 if (intel_dp_has_dsc(connector) && drm_dp_sink_supports_fec(connector->dp.fec_capability)) { 1488 /* 1489 * TBD pass the connector BPC, 1490 * for now U8_MAX so that max BPC on that platform would be picked 1491 */ 1492 int pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX); 1493 1494 if (!drm_dp_is_uhbr_rate(max_link_clock)) 1495 bw_overhead_flags |= DRM_DP_BW_OVERHEAD_FEC; 1496 1497 dsc = intel_dp_mode_valid_with_dsc(connector, 1498 max_link_clock, max_lanes, 1499 target_clock, mode->hdisplay, 1500 num_joined_pipes, 1501 INTEL_OUTPUT_FORMAT_RGB, pipe_bpp, 1502 bw_overhead_flags); 1503 } 1504 1505 if (intel_dp_joiner_needs_dsc(display, num_joined_pipes) && !dsc) { 1506 *status = MODE_CLOCK_HIGH; 1507 return 0; 1508 } 1509 1510 if (mode_rate > max_rate && !dsc) { 1511 *status = MODE_CLOCK_HIGH; 1512 return 0; 1513 } 1514 1515 *status = intel_mode_valid_max_plane_size(display, mode, num_joined_pipes); 1516 return 0; 1517 } 1518 1519 static struct drm_encoder * 1520 mst_connector_atomic_best_encoder(struct drm_connector *_connector, 1521 struct drm_atomic_state *state) 1522 { 1523 struct intel_connector *connector = to_intel_connector(_connector); 1524 struct drm_connector_state *connector_state = 1525 drm_atomic_get_new_connector_state(state, &connector->base); 1526 struct intel_dp *intel_dp = connector->mst.dp; 1527 struct intel_crtc *crtc = to_intel_crtc(connector_state->crtc); 1528 1529 return &intel_dp->mst.stream_encoders[crtc->pipe]->base.base; 1530 } 1531 1532 static int 1533 mst_connector_detect_ctx(struct drm_connector *_connector, 1534 struct drm_modeset_acquire_ctx *ctx, bool force) 1535 { 1536 struct intel_connector *connector = to_intel_connector(_connector); 1537 struct intel_display *display = to_intel_display(connector); 1538 struct intel_dp *intel_dp = connector->mst.dp; 1539 1540 if (!intel_display_device_enabled(display)) 1541 return connector_status_disconnected; 1542 1543 if (drm_connector_is_unregistered(&connector->base)) 1544 return connector_status_disconnected; 1545 1546 if (!intel_display_driver_check_access(display)) 1547 return connector->base.status; 1548 1549 intel_dp_flush_connector_commits(connector); 1550 1551 return drm_dp_mst_detect_port(&connector->base, ctx, &intel_dp->mst.mgr, 1552 connector->mst.port); 1553 } 1554 1555 static const struct drm_connector_helper_funcs mst_connector_helper_funcs = { 1556 .get_modes = mst_connector_get_modes, 1557 .mode_valid_ctx = mst_connector_mode_valid_ctx, 1558 .atomic_best_encoder = mst_connector_atomic_best_encoder, 1559 .atomic_check = mst_connector_atomic_check, 1560 .detect_ctx = mst_connector_detect_ctx, 1561 }; 1562 1563 static void mst_stream_encoder_destroy(struct drm_encoder *encoder) 1564 { 1565 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(to_intel_encoder(encoder)); 1566 1567 drm_encoder_cleanup(encoder); 1568 kfree(intel_mst); 1569 } 1570 1571 static const struct drm_encoder_funcs mst_stream_encoder_funcs = { 1572 .destroy = mst_stream_encoder_destroy, 1573 }; 1574 1575 static bool mst_connector_get_hw_state(struct intel_connector *connector) 1576 { 1577 /* This is the MST stream encoder set in ->pre_enable, if any */ 1578 struct intel_encoder *encoder = intel_attached_encoder(connector); 1579 enum pipe pipe; 1580 1581 if (!encoder || !connector->base.state->crtc) 1582 return false; 1583 1584 return encoder->get_hw_state(encoder, &pipe); 1585 } 1586 1587 static int mst_topology_add_connector_properties(struct intel_dp *intel_dp, 1588 struct drm_connector *_connector, 1589 const char *pathprop) 1590 { 1591 struct intel_display *display = to_intel_display(intel_dp); 1592 struct intel_connector *connector = to_intel_connector(_connector); 1593 1594 drm_object_attach_property(&connector->base.base, 1595 display->drm->mode_config.path_property, 0); 1596 drm_object_attach_property(&connector->base.base, 1597 display->drm->mode_config.tile_property, 0); 1598 1599 intel_attach_force_audio_property(&connector->base); 1600 intel_attach_broadcast_rgb_property(&connector->base); 1601 1602 /* 1603 * Reuse the prop from the SST connector because we're 1604 * not allowed to create new props after device registration. 1605 */ 1606 connector->base.max_bpc_property = 1607 intel_dp->attached_connector->base.max_bpc_property; 1608 if (connector->base.max_bpc_property) 1609 drm_connector_attach_max_bpc_property(&connector->base, 6, 12); 1610 1611 return drm_connector_set_path_property(&connector->base, pathprop); 1612 } 1613 1614 static void 1615 intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp, 1616 struct intel_connector *connector) 1617 { 1618 u8 dpcd_caps[DP_RECEIVER_CAP_SIZE]; 1619 struct drm_dp_desc desc; 1620 1621 if (!connector->dp.dsc_decompression_aux) 1622 return; 1623 1624 if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd_caps) < 0) 1625 return; 1626 1627 if (drm_dp_read_desc(connector->dp.dsc_decompression_aux, &desc, 1628 drm_dp_is_branch(dpcd_caps)) < 0) 1629 return; 1630 1631 intel_dp_get_dsc_sink_cap(dpcd_caps[DP_DPCD_REV], 1632 &desc, drm_dp_is_branch(dpcd_caps), 1633 connector); 1634 } 1635 1636 static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector) 1637 { 1638 struct intel_display *display = to_intel_display(connector); 1639 struct drm_dp_aux *aux = connector->dp.dsc_decompression_aux; 1640 struct drm_dp_desc desc; 1641 u8 dpcd[DP_RECEIVER_CAP_SIZE]; 1642 1643 if (!aux) 1644 return false; 1645 1646 /* 1647 * A logical port's OUI (at least for affected sinks) is all 0, so 1648 * instead of that the parent port's OUI is used for identification. 1649 */ 1650 if (drm_dp_mst_port_is_logical(connector->mst.port)) { 1651 aux = drm_dp_mst_aux_for_parent(connector->mst.port); 1652 if (!aux) 1653 aux = &connector->mst.dp->aux; 1654 } 1655 1656 if (drm_dp_read_dpcd_caps(aux, dpcd) < 0) 1657 return false; 1658 1659 if (drm_dp_read_desc(aux, &desc, drm_dp_is_branch(dpcd)) < 0) 1660 return false; 1661 1662 if (!drm_dp_has_quirk(&desc, 1663 DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC)) 1664 return false; 1665 1666 /* 1667 * UHBR (MST sink) devices requiring this quirk don't advertise the 1668 * HBLANK expansion support. Presuming that they perform HBLANK 1669 * expansion internally, or are affected by this issue on modes with a 1670 * short HBLANK for other reasons. 1671 */ 1672 if (!drm_dp_128b132b_supported(dpcd) && 1673 !(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE)) 1674 return false; 1675 1676 drm_dbg_kms(display->drm, 1677 "[CONNECTOR:%d:%s] DSC HBLANK expansion quirk detected\n", 1678 connector->base.base.id, connector->base.name); 1679 1680 return true; 1681 } 1682 1683 static struct drm_connector * 1684 mst_topology_add_connector(struct drm_dp_mst_topology_mgr *mgr, 1685 struct drm_dp_mst_port *port, 1686 const char *pathprop) 1687 { 1688 struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst.mgr); 1689 struct intel_display *display = to_intel_display(intel_dp); 1690 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1691 struct intel_connector *connector; 1692 enum pipe pipe; 1693 int ret; 1694 1695 connector = intel_connector_alloc(); 1696 if (!connector) 1697 return NULL; 1698 1699 connector->get_hw_state = mst_connector_get_hw_state; 1700 connector->sync_state = intel_dp_connector_sync_state; 1701 connector->mst.dp = intel_dp; 1702 connector->mst.port = port; 1703 drm_dp_mst_get_port_malloc(port); 1704 1705 ret = drm_connector_dynamic_init(display->drm, &connector->base, &mst_connector_funcs, 1706 DRM_MODE_CONNECTOR_DisplayPort, NULL); 1707 if (ret) 1708 goto err_put_port; 1709 1710 connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port); 1711 intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, connector); 1712 connector->dp.dsc_hblank_expansion_quirk = 1713 detect_dsc_hblank_expansion_quirk(connector); 1714 1715 drm_connector_helper_add(&connector->base, &mst_connector_helper_funcs); 1716 1717 for_each_pipe(display, pipe) { 1718 struct drm_encoder *enc = 1719 &intel_dp->mst.stream_encoders[pipe]->base.base; 1720 1721 ret = drm_connector_attach_encoder(&connector->base, enc); 1722 if (ret) 1723 goto err_cleanup_connector; 1724 } 1725 1726 ret = mst_topology_add_connector_properties(intel_dp, &connector->base, pathprop); 1727 if (ret) 1728 goto err_cleanup_connector; 1729 1730 ret = intel_dp_hdcp_init(dig_port, connector); 1731 if (ret) 1732 drm_dbg_kms(display->drm, "[%s:%d] HDCP MST init failed, skipping.\n", 1733 connector->base.name, connector->base.base.id); 1734 1735 return &connector->base; 1736 1737 err_cleanup_connector: 1738 drm_connector_cleanup(&connector->base); 1739 err_put_port: 1740 drm_dp_mst_put_port_malloc(port); 1741 intel_connector_free(connector); 1742 1743 return NULL; 1744 } 1745 1746 static void 1747 mst_topology_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr) 1748 { 1749 struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst.mgr); 1750 1751 intel_hpd_trigger_irq(dp_to_dig_port(intel_dp)); 1752 } 1753 1754 static const struct drm_dp_mst_topology_cbs mst_topology_cbs = { 1755 .add_connector = mst_topology_add_connector, 1756 .poll_hpd_irq = mst_topology_poll_hpd_irq, 1757 }; 1758 1759 /* Create a fake encoder for an individual MST stream */ 1760 static struct intel_dp_mst_encoder * 1761 mst_stream_encoder_create(struct intel_digital_port *dig_port, enum pipe pipe) 1762 { 1763 struct intel_display *display = to_intel_display(dig_port); 1764 struct intel_encoder *primary_encoder = &dig_port->base; 1765 struct intel_dp_mst_encoder *intel_mst; 1766 struct intel_encoder *encoder; 1767 1768 intel_mst = kzalloc_obj(*intel_mst); 1769 1770 if (!intel_mst) 1771 return NULL; 1772 1773 intel_mst->pipe = pipe; 1774 encoder = &intel_mst->base; 1775 intel_mst->primary = dig_port; 1776 1777 drm_encoder_init(display->drm, &encoder->base, &mst_stream_encoder_funcs, 1778 DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe)); 1779 1780 encoder->type = INTEL_OUTPUT_DP_MST; 1781 encoder->power_domain = primary_encoder->power_domain; 1782 encoder->port = primary_encoder->port; 1783 encoder->cloneable = 0; 1784 /* 1785 * This is wrong, but broken userspace uses the intersection 1786 * of possible_crtcs of all the encoders of a given connector 1787 * to figure out which crtcs can drive said connector. What 1788 * should be used instead is the union of possible_crtcs. 1789 * To keep such userspace functioning we must misconfigure 1790 * this to make sure the intersection is not empty :( 1791 */ 1792 encoder->pipe_mask = ~0; 1793 1794 encoder->compute_config = mst_stream_compute_config; 1795 encoder->compute_config_late = mst_stream_compute_config_late; 1796 encoder->disable = mst_stream_disable; 1797 encoder->post_disable = mst_stream_post_disable; 1798 encoder->post_pll_disable = mst_stream_post_pll_disable; 1799 encoder->update_pipe = intel_ddi_update_pipe; 1800 encoder->pre_pll_enable = mst_stream_pre_pll_enable; 1801 encoder->pre_enable = mst_stream_pre_enable; 1802 encoder->enable = mst_stream_enable; 1803 encoder->audio_enable = intel_audio_codec_enable; 1804 encoder->audio_disable = intel_audio_codec_disable; 1805 encoder->get_hw_state = mst_stream_get_hw_state; 1806 encoder->get_config = mst_stream_get_config; 1807 encoder->initial_fastset_check = mst_stream_initial_fastset_check; 1808 1809 return intel_mst; 1810 1811 } 1812 1813 /* Create the fake encoders for MST streams */ 1814 static bool 1815 mst_stream_encoders_create(struct intel_digital_port *dig_port) 1816 { 1817 struct intel_display *display = to_intel_display(dig_port); 1818 struct intel_dp *intel_dp = &dig_port->dp; 1819 enum pipe pipe; 1820 1821 for_each_pipe(display, pipe) 1822 intel_dp->mst.stream_encoders[pipe] = mst_stream_encoder_create(dig_port, pipe); 1823 return true; 1824 } 1825 1826 int 1827 intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id) 1828 { 1829 struct intel_display *display = to_intel_display(dig_port); 1830 struct intel_dp *intel_dp = &dig_port->dp; 1831 enum port port = dig_port->base.port; 1832 int ret; 1833 1834 if (!HAS_DP_MST(display) || intel_dp_is_edp(intel_dp)) 1835 return 0; 1836 1837 if (DISPLAY_VER(display) < 12 && port == PORT_A) 1838 return 0; 1839 1840 if (DISPLAY_VER(display) < 11 && port == PORT_E) 1841 return 0; 1842 1843 intel_dp->mst.mgr.cbs = &mst_topology_cbs; 1844 1845 /* create encoders */ 1846 mst_stream_encoders_create(dig_port); 1847 ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst.mgr, display->drm, 1848 &intel_dp->aux, 16, 1849 INTEL_NUM_PIPES(display), conn_base_id); 1850 if (ret) { 1851 intel_dp->mst.mgr.cbs = NULL; 1852 return ret; 1853 } 1854 1855 return 0; 1856 } 1857 1858 bool intel_dp_mst_source_support(struct intel_dp *intel_dp) 1859 { 1860 return intel_dp->mst.mgr.cbs; 1861 } 1862 1863 void 1864 intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port) 1865 { 1866 struct intel_dp *intel_dp = &dig_port->dp; 1867 1868 if (!intel_dp_mst_source_support(intel_dp)) 1869 return; 1870 1871 drm_dp_mst_topology_mgr_destroy(&intel_dp->mst.mgr); 1872 /* encoders will get killed by normal cleanup */ 1873 1874 intel_dp->mst.mgr.cbs = NULL; 1875 } 1876 1877 bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state) 1878 { 1879 return crtc_state->mst_master_transcoder == crtc_state->cpu_transcoder; 1880 } 1881 1882 bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state) 1883 { 1884 return crtc_state->mst_master_transcoder != INVALID_TRANSCODER && 1885 crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder; 1886 } 1887 1888 /** 1889 * intel_dp_mst_add_topology_state_for_connector - add MST topology state for a connector 1890 * @state: atomic state 1891 * @connector: connector to add the state for 1892 * @crtc: the CRTC @connector is attached to 1893 * 1894 * Add the MST topology state for @connector to @state. 1895 * 1896 * Returns 0 on success, negative error code on failure. 1897 */ 1898 static int 1899 intel_dp_mst_add_topology_state_for_connector(struct intel_atomic_state *state, 1900 struct intel_connector *connector, 1901 struct intel_crtc *crtc) 1902 { 1903 struct drm_dp_mst_topology_state *mst_state; 1904 1905 if (!connector->mst.dp) 1906 return 0; 1907 1908 mst_state = drm_atomic_get_mst_topology_state(&state->base, 1909 &connector->mst.dp->mst.mgr); 1910 if (IS_ERR(mst_state)) 1911 return PTR_ERR(mst_state); 1912 1913 mst_state->pending_crtc_mask |= drm_crtc_mask(&crtc->base); 1914 1915 return 0; 1916 } 1917 1918 /** 1919 * intel_dp_mst_add_topology_state_for_crtc - add MST topology state for a CRTC 1920 * @state: atomic state 1921 * @crtc: CRTC to add the state for 1922 * 1923 * Add the MST topology state for @crtc to @state. 1924 * 1925 * Returns 0 on success, negative error code on failure. 1926 */ 1927 int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state, 1928 struct intel_crtc *crtc) 1929 { 1930 struct drm_connector *_connector; 1931 struct drm_connector_state *conn_state; 1932 int i; 1933 1934 for_each_new_connector_in_state(&state->base, _connector, conn_state, i) { 1935 struct intel_connector *connector = to_intel_connector(_connector); 1936 int ret; 1937 1938 if (conn_state->crtc != &crtc->base) 1939 continue; 1940 1941 ret = intel_dp_mst_add_topology_state_for_connector(state, connector, crtc); 1942 if (ret) 1943 return ret; 1944 } 1945 1946 return 0; 1947 } 1948 1949 static struct intel_connector * 1950 get_connector_in_state_for_crtc(struct intel_atomic_state *state, 1951 const struct intel_crtc *crtc) 1952 { 1953 struct drm_connector_state *old_conn_state; 1954 struct drm_connector_state *new_conn_state; 1955 struct drm_connector *_connector; 1956 int i; 1957 1958 for_each_oldnew_connector_in_state(&state->base, _connector, 1959 old_conn_state, new_conn_state, i) { 1960 struct intel_connector *connector = 1961 to_intel_connector(_connector); 1962 1963 if (old_conn_state->crtc == &crtc->base || 1964 new_conn_state->crtc == &crtc->base) 1965 return connector; 1966 } 1967 1968 return NULL; 1969 } 1970 1971 /** 1972 * intel_dp_mst_crtc_needs_modeset - check if changes in topology need to modeset the given CRTC 1973 * @state: atomic state 1974 * @crtc: CRTC for which to check the modeset requirement 1975 * 1976 * Check if any change in a MST topology requires a forced modeset on @crtc in 1977 * this topology. One such change is enabling/disabling the DSC decompression 1978 * state in the first branch device's UFP DPCD as required by one CRTC, while 1979 * the other @crtc in the same topology is still active, requiring a full modeset 1980 * on @crtc. 1981 */ 1982 bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state, 1983 struct intel_crtc *crtc) 1984 { 1985 const struct intel_connector *crtc_connector; 1986 const struct drm_connector_state *conn_state; 1987 const struct drm_connector *_connector; 1988 int i; 1989 1990 if (!intel_crtc_has_type(intel_atomic_get_new_crtc_state(state, crtc), 1991 INTEL_OUTPUT_DP_MST)) 1992 return false; 1993 1994 crtc_connector = get_connector_in_state_for_crtc(state, crtc); 1995 1996 if (!crtc_connector) 1997 /* None of the connectors in the topology needs modeset */ 1998 return false; 1999 2000 for_each_new_connector_in_state(&state->base, _connector, conn_state, i) { 2001 const struct intel_connector *connector = 2002 to_intel_connector(_connector); 2003 const struct intel_crtc_state *new_crtc_state; 2004 const struct intel_crtc_state *old_crtc_state; 2005 struct intel_crtc *crtc_iter; 2006 2007 if (connector->mst.dp != crtc_connector->mst.dp || 2008 !conn_state->crtc) 2009 continue; 2010 2011 crtc_iter = to_intel_crtc(conn_state->crtc); 2012 2013 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc_iter); 2014 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc_iter); 2015 2016 if (!intel_crtc_needs_modeset(new_crtc_state)) 2017 continue; 2018 2019 if (old_crtc_state->dsc.compression_enable == 2020 new_crtc_state->dsc.compression_enable) 2021 continue; 2022 /* 2023 * Toggling the decompression flag because of this stream in 2024 * the first downstream branch device's UFP DPCD may reset the 2025 * whole branch device. To avoid the reset while other streams 2026 * are also active modeset the whole MST topology in this 2027 * case. 2028 */ 2029 if (connector->dp.dsc_decompression_aux == 2030 &connector->mst.dp->aux) 2031 return true; 2032 } 2033 2034 return false; 2035 } 2036 2037 /** 2038 * intel_dp_mst_prepare_probe - Prepare an MST link for topology probing 2039 * @intel_dp: DP port object 2040 * 2041 * Prepare an MST link for topology probing, programming the target 2042 * link parameters to DPCD. This step is a requirement of the enumeration 2043 * of path resources during probing. 2044 */ 2045 void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp) 2046 { 2047 int link_rate = intel_dp_max_link_rate(intel_dp); 2048 int lane_count = intel_dp_max_lane_count(intel_dp); 2049 u8 rate_select; 2050 u8 link_bw; 2051 2052 if (intel_dp->link.active) 2053 return; 2054 2055 if (intel_mst_probed_link_params_valid(intel_dp, link_rate, lane_count)) 2056 return; 2057 2058 intel_dp_compute_rate(intel_dp, link_rate, &link_bw, &rate_select); 2059 2060 intel_dp_link_training_set_mode(intel_dp, link_rate, false); 2061 intel_dp_link_training_set_bw(intel_dp, link_bw, rate_select, lane_count, 2062 drm_dp_enhanced_frame_cap(intel_dp->dpcd)); 2063 2064 intel_mst_set_probed_link_params(intel_dp, link_rate, lane_count); 2065 } 2066 2067 /* 2068 * intel_dp_mst_verify_dpcd_state - verify the MST SW enabled state wrt. the DPCD 2069 * @intel_dp: DP port object 2070 * 2071 * Verify if @intel_dp's MST enabled SW state matches the corresponding DPCD 2072 * state. A long HPD pulse - not long enough to be detected as a disconnected 2073 * state - could've reset the DPCD state, which requires tearing 2074 * down/recreating the MST topology. 2075 * 2076 * Returns %true if the SW MST enabled and DPCD states match, %false 2077 * otherwise. 2078 */ 2079 bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp) 2080 { 2081 struct intel_display *display = to_intel_display(intel_dp); 2082 struct intel_connector *connector = intel_dp->attached_connector; 2083 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2084 struct intel_encoder *encoder = &dig_port->base; 2085 int ret; 2086 u8 val; 2087 2088 if (!intel_dp->is_mst) 2089 return true; 2090 2091 ret = drm_dp_dpcd_readb(intel_dp->mst.mgr.aux, DP_MSTM_CTRL, &val); 2092 2093 /* Adjust the expected register value for SST + SideBand. */ 2094 if (ret < 0 || val != (DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC)) { 2095 drm_dbg_kms(display->drm, 2096 "[CONNECTOR:%d:%s][ENCODER:%d:%s] MST mode got reset, removing topology (ret=%d, ctrl=0x%02x)\n", 2097 connector->base.base.id, connector->base.name, 2098 encoder->base.base.id, encoder->base.name, 2099 ret, val); 2100 2101 return false; 2102 } 2103 2104 return true; 2105 } 2106