1 /* 2 * Copyright © 2008 Intel Corporation 3 * 2014 Red Hat Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 22 * IN THE SOFTWARE. 23 * 24 */ 25 26 #include <drm/drm_atomic.h> 27 #include <drm/drm_atomic_helper.h> 28 #include <drm/drm_edid.h> 29 #include <drm/drm_fixed.h> 30 #include <drm/drm_probe_helper.h> 31 32 #include "i915_drv.h" 33 #include "i915_reg.h" 34 #include "intel_atomic.h" 35 #include "intel_audio.h" 36 #include "intel_connector.h" 37 #include "intel_crtc.h" 38 #include "intel_ddi.h" 39 #include "intel_de.h" 40 #include "intel_display_driver.h" 41 #include "intel_display_types.h" 42 #include "intel_dp.h" 43 #include "intel_dp_hdcp.h" 44 #include "intel_dp_link_training.h" 45 #include "intel_dp_mst.h" 46 #include "intel_dp_test.h" 47 #include "intel_dp_tunnel.h" 48 #include "intel_dpio_phy.h" 49 #include "intel_hdcp.h" 50 #include "intel_hotplug.h" 51 #include "intel_link_bw.h" 52 #include "intel_psr.h" 53 #include "intel_vdsc.h" 54 #include "skl_scaler.h" 55 56 /* 57 * DP MST (DisplayPort Multi-Stream Transport) 58 * 59 * MST support on the source depends on the platform and port. DP initialization 60 * sets up MST for each MST capable encoder. This will become the primary 61 * encoder for the port. 62 * 63 * MST initialization of each primary encoder creates MST stream encoders, one 64 * per pipe, and initializes the MST topology manager. The MST stream encoders 65 * are sometimes called "fake encoders", because they're virtual, not 66 * physical. Thus there are (number of MST capable ports) x (number of pipes) 67 * MST stream encoders in total. 68 * 69 * Decision to use MST for a sink happens at detect on the connector attached to 70 * the primary encoder, and this will not change while the sink is connected. We 71 * always use MST when possible, including for SST sinks with sideband messaging 72 * support. 73 * 74 * The connectors for the MST streams are added and removed dynamically by the 75 * topology manager. Their connection status is also determined by the topology 76 * manager. 77 * 78 * On hardware, each transcoder may be associated with a single DDI 79 * port. Multiple transcoders may be associated with the same DDI port only if 80 * the port is in MST mode. 81 * 82 * On TGL+, all the transcoders streaming on the same DDI port will indicate a 83 * primary transcoder; the TGL_DP_TP_CTL and TGL_DP_TP_STATUS registers are 84 * relevant only on the primary transcoder. Prior to that, they are port 85 * registers. 86 */ 87 88 /* From fake MST stream encoder to primary encoder */ 89 static struct intel_encoder *to_primary_encoder(struct intel_encoder *encoder) 90 { 91 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); 92 struct intel_digital_port *dig_port = intel_mst->primary; 93 94 return &dig_port->base; 95 } 96 97 /* From fake MST stream encoder to primary DP */ 98 static struct intel_dp *to_primary_dp(struct intel_encoder *encoder) 99 { 100 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); 101 struct intel_digital_port *dig_port = intel_mst->primary; 102 103 return &dig_port->dp; 104 } 105 106 static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state, 107 bool dsc) 108 { 109 struct intel_display *display = to_intel_display(crtc_state); 110 const struct drm_display_mode *adjusted_mode = 111 &crtc_state->hw.adjusted_mode; 112 113 if (!intel_dp_is_uhbr(crtc_state) || DISPLAY_VER(display) >= 20 || !dsc) 114 return INT_MAX; 115 116 /* 117 * DSC->DPT interface width: 118 * ICL-MTL: 72 bits (each branch has 72 bits, only left branch is used) 119 * LNL+: 144 bits (not a bottleneck in any config) 120 * 121 * Bspec/49259 suggests that the FEC overhead needs to be 122 * applied here, though HW people claim that neither this FEC 123 * or any other overhead is applicable here (that is the actual 124 * available_bw is just symbol_clock * 72). However based on 125 * testing on MTL-P the 126 * - DELL U3224KBA display 127 * - Unigraf UCD-500 CTS test sink 128 * devices the 129 * - 5120x2880/995.59Mhz 130 * - 6016x3384/1357.23Mhz 131 * - 6144x3456/1413.39Mhz 132 * modes (all the ones having a DPT limit on the above devices), 133 * both the channel coding efficiency and an additional 3% 134 * overhead needs to be accounted for. 135 */ 136 return div64_u64(mul_u32_u32(intel_dp_link_symbol_clock(crtc_state->port_clock) * 72, 137 drm_dp_bw_channel_coding_efficiency(true)), 138 mul_u32_u32(adjusted_mode->crtc_clock, 1030000)); 139 } 140 141 static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state, 142 bool ssc, int dsc_slice_count, int bpp_x16) 143 { 144 const struct drm_display_mode *adjusted_mode = 145 &crtc_state->hw.adjusted_mode; 146 unsigned long flags = DRM_DP_BW_OVERHEAD_MST; 147 int overhead; 148 149 flags |= intel_dp_is_uhbr(crtc_state) ? DRM_DP_BW_OVERHEAD_UHBR : 0; 150 flags |= ssc ? DRM_DP_BW_OVERHEAD_SSC_REF_CLK : 0; 151 flags |= crtc_state->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0; 152 153 if (dsc_slice_count) 154 flags |= DRM_DP_BW_OVERHEAD_DSC; 155 156 overhead = drm_dp_bw_overhead(crtc_state->lane_count, 157 adjusted_mode->hdisplay, 158 dsc_slice_count, 159 bpp_x16, 160 flags); 161 162 /* 163 * TODO: clarify whether a minimum required by the fixed FEC overhead 164 * in the bspec audio programming sequence is required here. 165 */ 166 return max(overhead, intel_dp_bw_fec_overhead(crtc_state->fec_enable)); 167 } 168 169 static void intel_dp_mst_compute_m_n(const struct intel_crtc_state *crtc_state, 170 int overhead, 171 int bpp_x16, 172 struct intel_link_m_n *m_n) 173 { 174 const struct drm_display_mode *adjusted_mode = 175 &crtc_state->hw.adjusted_mode; 176 177 /* TODO: Check WA 14013163432 to set data M/N for full BW utilization. */ 178 intel_link_compute_m_n(bpp_x16, crtc_state->lane_count, 179 adjusted_mode->crtc_clock, 180 crtc_state->port_clock, 181 overhead, 182 m_n); 183 184 m_n->tu = DIV_ROUND_UP_ULL(mul_u32_u32(m_n->data_m, 64), m_n->data_n); 185 } 186 187 static int intel_dp_mst_calc_pbn(int pixel_clock, int bpp_x16, int bw_overhead) 188 { 189 int effective_data_rate = 190 intel_dp_effective_data_rate(pixel_clock, bpp_x16, bw_overhead); 191 192 /* 193 * TODO: Use drm_dp_calc_pbn_mode() instead, once it's converted 194 * to calculate PBN with the BW overhead passed to it. 195 */ 196 return DIV_ROUND_UP(effective_data_rate * 64, 54 * 1000); 197 } 198 199 static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connector, 200 const struct intel_crtc_state *crtc_state) 201 { 202 const struct drm_display_mode *adjusted_mode = 203 &crtc_state->hw.adjusted_mode; 204 int num_joined_pipes = intel_crtc_num_joined_pipes(crtc_state); 205 206 return intel_dp_dsc_get_slice_count(connector, 207 adjusted_mode->clock, 208 adjusted_mode->hdisplay, 209 num_joined_pipes); 210 } 211 212 int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp, 213 struct intel_crtc_state *crtc_state, 214 int max_bpp, int min_bpp, 215 struct drm_connector_state *conn_state, 216 int step, bool dsc) 217 { 218 struct intel_display *display = to_intel_display(intel_dp); 219 struct drm_atomic_state *state = crtc_state->uapi.state; 220 struct intel_connector *connector = 221 to_intel_connector(conn_state->connector); 222 const struct drm_display_mode *adjusted_mode = 223 &crtc_state->hw.adjusted_mode; 224 fixed20_12 pbn_div; 225 int bpp, slots = -EINVAL; 226 int dsc_slice_count = 0; 227 int max_dpt_bpp; 228 229 if (dsc) { 230 if (!intel_dp_supports_fec(intel_dp, connector, crtc_state)) 231 return -EINVAL; 232 233 crtc_state->fec_enable = !intel_dp_is_uhbr(crtc_state); 234 } 235 236 pbn_div = drm_dp_get_vc_payload_bw(crtc_state->port_clock, 237 crtc_state->lane_count); 238 239 max_dpt_bpp = intel_dp_mst_max_dpt_bpp(crtc_state, dsc); 240 if (max_bpp > max_dpt_bpp) { 241 drm_dbg_kms(display->drm, "Limiting bpp to max DPT bpp (%d -> %d)\n", 242 max_bpp, max_dpt_bpp); 243 max_bpp = max_dpt_bpp; 244 } 245 246 drm_dbg_kms(display->drm, "Looking for slots in range min bpp %d max bpp %d\n", 247 min_bpp, max_bpp); 248 249 if (dsc) { 250 dsc_slice_count = intel_dp_mst_dsc_get_slice_count(connector, crtc_state); 251 if (!dsc_slice_count) { 252 drm_dbg_kms(display->drm, "Can't get valid DSC slice count\n"); 253 254 return -ENOSPC; 255 } 256 } 257 258 for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) { 259 int local_bw_overhead; 260 int link_bpp_x16; 261 262 drm_dbg_kms(display->drm, "Trying bpp %d\n", bpp); 263 264 link_bpp_x16 = fxp_q4_from_int(dsc ? bpp : 265 intel_dp_output_bpp(crtc_state->output_format, bpp)); 266 267 local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, 268 false, dsc_slice_count, link_bpp_x16); 269 intel_dp_mst_compute_m_n(crtc_state, 270 local_bw_overhead, 271 link_bpp_x16, 272 &crtc_state->dp_m_n); 273 274 if (intel_dp->is_mst) { 275 int remote_bw_overhead; 276 int remote_tu; 277 fixed20_12 pbn; 278 279 remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, 280 true, dsc_slice_count, link_bpp_x16); 281 282 /* 283 * The TU size programmed to the HW determines which slots in 284 * an MTP frame are used for this stream, which needs to match 285 * the payload size programmed to the first downstream branch 286 * device's payload table. 287 * 288 * Note that atm the payload's PBN value DRM core sends via 289 * the ALLOCATE_PAYLOAD side-band message matches the payload 290 * size (which it calculates from the PBN value) it programs 291 * to the first branch device's payload table. The allocation 292 * in the payload table could be reduced though (to 293 * crtc_state->dp_m_n.tu), provided that the driver doesn't 294 * enable SSC on the corresponding link. 295 */ 296 pbn.full = dfixed_const(intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock, 297 link_bpp_x16, 298 remote_bw_overhead)); 299 remote_tu = DIV_ROUND_UP(pbn.full, pbn_div.full); 300 301 /* 302 * Aligning the TUs ensures that symbols consisting of multiple 303 * (4) symbol cycles don't get split between two consecutive 304 * MTPs, as required by Bspec. 305 * TODO: remove the alignment restriction for 128b/132b links 306 * on some platforms, where Bspec allows this. 307 */ 308 remote_tu = ALIGN(remote_tu, 4 / crtc_state->lane_count); 309 310 /* 311 * Also align PBNs accordingly, since MST core will derive its 312 * own copy of TU from the PBN in drm_dp_atomic_find_time_slots(). 313 * The above comment about the difference between the PBN 314 * allocated for the whole path and the TUs allocated for the 315 * first branch device's link also applies here. 316 */ 317 pbn.full = remote_tu * pbn_div.full; 318 319 drm_WARN_ON(display->drm, remote_tu < crtc_state->dp_m_n.tu); 320 crtc_state->dp_m_n.tu = remote_tu; 321 322 slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr, 323 connector->port, 324 dfixed_trunc(pbn)); 325 } else { 326 /* Same as above for remote_tu */ 327 crtc_state->dp_m_n.tu = ALIGN(crtc_state->dp_m_n.tu, 328 4 / crtc_state->lane_count); 329 330 if (crtc_state->dp_m_n.tu <= 64) 331 slots = crtc_state->dp_m_n.tu; 332 else 333 slots = -EINVAL; 334 } 335 336 if (slots == -EDEADLK) 337 return slots; 338 339 if (slots >= 0) { 340 drm_WARN_ON(display->drm, slots != crtc_state->dp_m_n.tu); 341 342 break; 343 } 344 } 345 346 if (slots < 0) { 347 drm_dbg_kms(display->drm, "failed finding vcpi slots:%d\n", 348 slots); 349 return slots; 350 } 351 352 if (!dsc) 353 crtc_state->pipe_bpp = bpp; 354 else 355 crtc_state->dsc.compressed_bpp_x16 = fxp_q4_from_int(bpp); 356 357 drm_dbg_kms(display->drm, "Got %d slots for pipe bpp %d dsc %d\n", 358 slots, bpp, dsc); 359 360 return 0; 361 } 362 363 static int mst_stream_find_vcpi_slots_for_bpp(struct intel_dp *intel_dp, 364 struct intel_crtc_state *crtc_state, 365 int max_bpp, int min_bpp, 366 struct link_config_limits *limits, 367 struct drm_connector_state *conn_state, 368 int step, bool dsc) 369 { 370 struct drm_atomic_state *state = crtc_state->uapi.state; 371 struct drm_dp_mst_topology_state *mst_state; 372 373 mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr); 374 if (IS_ERR(mst_state)) 375 return PTR_ERR(mst_state); 376 377 crtc_state->lane_count = limits->max_lane_count; 378 crtc_state->port_clock = limits->max_rate; 379 380 mst_state->pbn_div = drm_dp_get_vc_payload_bw(crtc_state->port_clock, 381 crtc_state->lane_count); 382 383 return intel_dp_mtp_tu_compute_config(intel_dp, crtc_state, 384 max_bpp, min_bpp, 385 conn_state, step, dsc); 386 } 387 388 static int mst_stream_compute_link_config(struct intel_dp *intel_dp, 389 struct intel_crtc_state *crtc_state, 390 struct drm_connector_state *conn_state, 391 struct link_config_limits *limits) 392 { 393 /* 394 * FIXME: allocate the BW according to link_bpp, which in the case of 395 * YUV420 is only half of the pipe bpp value. 396 */ 397 return mst_stream_find_vcpi_slots_for_bpp(intel_dp, crtc_state, 398 fxp_q4_to_int(limits->link.max_bpp_x16), 399 fxp_q4_to_int(limits->link.min_bpp_x16), 400 limits, 401 conn_state, 2 * 3, false); 402 } 403 404 static int mst_stream_dsc_compute_link_config(struct intel_dp *intel_dp, 405 struct intel_crtc_state *crtc_state, 406 struct drm_connector_state *conn_state, 407 struct link_config_limits *limits) 408 { 409 struct intel_display *display = to_intel_display(intel_dp); 410 struct intel_connector *connector = to_intel_connector(conn_state->connector); 411 int i, num_bpc; 412 u8 dsc_bpc[3] = {}; 413 int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp; 414 int min_compressed_bpp, max_compressed_bpp; 415 416 max_bpp = limits->pipe.max_bpp; 417 min_bpp = limits->pipe.min_bpp; 418 419 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd, 420 dsc_bpc); 421 422 drm_dbg_kms(display->drm, "DSC Source supported min bpp %d max bpp %d\n", 423 min_bpp, max_bpp); 424 425 sink_max_bpp = dsc_bpc[0] * 3; 426 sink_min_bpp = sink_max_bpp; 427 428 for (i = 1; i < num_bpc; i++) { 429 if (sink_min_bpp > dsc_bpc[i] * 3) 430 sink_min_bpp = dsc_bpc[i] * 3; 431 if (sink_max_bpp < dsc_bpc[i] * 3) 432 sink_max_bpp = dsc_bpc[i] * 3; 433 } 434 435 drm_dbg_kms(display->drm, "DSC Sink supported min bpp %d max bpp %d\n", 436 sink_min_bpp, sink_max_bpp); 437 438 if (min_bpp < sink_min_bpp) 439 min_bpp = sink_min_bpp; 440 441 if (max_bpp > sink_max_bpp) 442 max_bpp = sink_max_bpp; 443 444 crtc_state->pipe_bpp = max_bpp; 445 446 max_compressed_bpp = fxp_q4_to_int(limits->link.max_bpp_x16); 447 min_compressed_bpp = fxp_q4_to_int_roundup(limits->link.min_bpp_x16); 448 449 drm_dbg_kms(display->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n", 450 min_compressed_bpp, max_compressed_bpp); 451 452 /* Align compressed bpps according to our own constraints */ 453 max_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(display, max_compressed_bpp, 454 crtc_state->pipe_bpp); 455 min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(display, min_compressed_bpp, 456 crtc_state->pipe_bpp); 457 458 return mst_stream_find_vcpi_slots_for_bpp(intel_dp, crtc_state, max_compressed_bpp, 459 min_compressed_bpp, limits, 460 conn_state, 1, true); 461 } 462 463 static int mst_stream_update_slots(struct intel_dp *intel_dp, 464 struct intel_crtc_state *crtc_state, 465 struct drm_connector_state *conn_state) 466 { 467 struct intel_display *display = to_intel_display(intel_dp); 468 struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr; 469 struct drm_dp_mst_topology_state *topology_state; 470 u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ? 471 DP_CAP_ANSI_128B132B : DP_CAP_ANSI_8B10B; 472 473 topology_state = drm_atomic_get_mst_topology_state(conn_state->state, mgr); 474 if (IS_ERR(topology_state)) { 475 drm_dbg_kms(display->drm, "slot update failed\n"); 476 return PTR_ERR(topology_state); 477 } 478 479 drm_dp_mst_update_slots(topology_state, link_coding_cap); 480 481 return 0; 482 } 483 484 static int mode_hblank_period_ns(const struct drm_display_mode *mode) 485 { 486 return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(mode->htotal - mode->hdisplay, 487 NSEC_PER_SEC / 1000), 488 mode->crtc_clock); 489 } 490 491 static bool 492 hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector, 493 const struct intel_crtc_state *crtc_state, 494 const struct link_config_limits *limits) 495 { 496 const struct drm_display_mode *adjusted_mode = 497 &crtc_state->hw.adjusted_mode; 498 bool is_uhbr_sink = connector->mst_port && 499 drm_dp_128b132b_supported(connector->mst_port->dpcd); 500 int hblank_limit = is_uhbr_sink ? 500 : 300; 501 502 if (!connector->dp.dsc_hblank_expansion_quirk) 503 return false; 504 505 if (is_uhbr_sink && !drm_dp_is_uhbr_rate(limits->max_rate)) 506 return false; 507 508 if (mode_hblank_period_ns(adjusted_mode) > hblank_limit) 509 return false; 510 511 if (!intel_dp_mst_dsc_get_slice_count(connector, crtc_state)) 512 return false; 513 514 return true; 515 } 516 517 static bool 518 adjust_limits_for_dsc_hblank_expansion_quirk(struct intel_dp *intel_dp, 519 const struct intel_connector *connector, 520 const struct intel_crtc_state *crtc_state, 521 struct link_config_limits *limits, 522 bool dsc) 523 { 524 struct intel_display *display = to_intel_display(connector); 525 const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 526 int min_bpp_x16 = limits->link.min_bpp_x16; 527 528 if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state, limits)) 529 return true; 530 531 if (!dsc) { 532 if (intel_dp_supports_dsc(intel_dp, connector, crtc_state)) { 533 drm_dbg_kms(display->drm, 534 "[CRTC:%d:%s][CONNECTOR:%d:%s] DSC needed by hblank expansion quirk\n", 535 crtc->base.base.id, crtc->base.name, 536 connector->base.base.id, connector->base.name); 537 return false; 538 } 539 540 drm_dbg_kms(display->drm, 541 "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to 24 due to hblank expansion quirk\n", 542 crtc->base.base.id, crtc->base.name, 543 connector->base.base.id, connector->base.name); 544 545 if (limits->link.max_bpp_x16 < fxp_q4_from_int(24)) 546 return false; 547 548 limits->link.min_bpp_x16 = fxp_q4_from_int(24); 549 550 return true; 551 } 552 553 drm_WARN_ON(display->drm, limits->min_rate != limits->max_rate); 554 555 if (limits->max_rate < 540000) 556 min_bpp_x16 = fxp_q4_from_int(13); 557 else if (limits->max_rate < 810000) 558 min_bpp_x16 = fxp_q4_from_int(10); 559 560 if (limits->link.min_bpp_x16 >= min_bpp_x16) 561 return true; 562 563 drm_dbg_kms(display->drm, 564 "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " FXP_Q4_FMT " in DSC mode due to hblank expansion quirk\n", 565 crtc->base.base.id, crtc->base.name, 566 connector->base.base.id, connector->base.name, 567 FXP_Q4_ARGS(min_bpp_x16)); 568 569 if (limits->link.max_bpp_x16 < min_bpp_x16) 570 return false; 571 572 limits->link.min_bpp_x16 = min_bpp_x16; 573 574 return true; 575 } 576 577 static bool 578 mst_stream_compute_config_limits(struct intel_dp *intel_dp, 579 const struct intel_connector *connector, 580 struct intel_crtc_state *crtc_state, 581 bool dsc, 582 struct link_config_limits *limits) 583 { 584 if (!intel_dp_compute_config_limits(intel_dp, crtc_state, false, dsc, 585 limits)) 586 return false; 587 588 return adjust_limits_for_dsc_hblank_expansion_quirk(intel_dp, 589 connector, 590 crtc_state, 591 limits, 592 dsc); 593 } 594 595 static int mst_stream_compute_config(struct intel_encoder *encoder, 596 struct intel_crtc_state *pipe_config, 597 struct drm_connector_state *conn_state) 598 { 599 struct intel_display *display = to_intel_display(encoder); 600 struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state); 601 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 602 struct intel_dp *intel_dp = to_primary_dp(encoder); 603 struct intel_connector *connector = 604 to_intel_connector(conn_state->connector); 605 const struct drm_display_mode *adjusted_mode = 606 &pipe_config->hw.adjusted_mode; 607 struct link_config_limits limits; 608 bool dsc_needed, joiner_needs_dsc; 609 int num_joined_pipes; 610 int ret = 0; 611 612 if (pipe_config->fec_enable && 613 !intel_dp_supports_fec(intel_dp, connector, pipe_config)) 614 return -EINVAL; 615 616 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 617 return -EINVAL; 618 619 num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector, 620 adjusted_mode->crtc_hdisplay, 621 adjusted_mode->crtc_clock); 622 if (num_joined_pipes > 1) 623 pipe_config->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1, crtc->pipe); 624 625 pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; 626 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 627 pipe_config->has_pch_encoder = false; 628 629 joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes); 630 631 dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en || 632 !mst_stream_compute_config_limits(intel_dp, connector, 633 pipe_config, false, &limits); 634 635 if (!dsc_needed) { 636 ret = mst_stream_compute_link_config(intel_dp, pipe_config, 637 conn_state, &limits); 638 639 if (ret == -EDEADLK) 640 return ret; 641 642 if (ret) 643 dsc_needed = true; 644 } 645 646 if (dsc_needed && !intel_dp_supports_dsc(intel_dp, connector, pipe_config)) { 647 drm_dbg_kms(display->drm, "DSC required but not available\n"); 648 return -EINVAL; 649 } 650 651 /* enable compression if the mode doesn't fit available BW */ 652 if (dsc_needed) { 653 drm_dbg_kms(display->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n", 654 str_yes_no(ret), str_yes_no(joiner_needs_dsc), 655 str_yes_no(intel_dp->force_dsc_en)); 656 657 658 if (!mst_stream_compute_config_limits(intel_dp, connector, 659 pipe_config, true, 660 &limits)) 661 return -EINVAL; 662 663 /* 664 * FIXME: As bpc is hardcoded to 8, as mentioned above, 665 * WARN and ignore the debug flag force_dsc_bpc for now. 666 */ 667 drm_WARN(display->drm, intel_dp->force_dsc_bpc, 668 "Cannot Force BPC for MST\n"); 669 /* 670 * Try to get at least some timeslots and then see, if 671 * we can fit there with DSC. 672 */ 673 drm_dbg_kms(display->drm, "Trying to find VCPI slots in DSC mode\n"); 674 675 ret = mst_stream_dsc_compute_link_config(intel_dp, pipe_config, 676 conn_state, &limits); 677 if (ret < 0) 678 return ret; 679 680 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 681 conn_state, &limits, 682 pipe_config->dp_m_n.tu, false); 683 } 684 685 if (ret) 686 return ret; 687 688 ret = mst_stream_update_slots(intel_dp, pipe_config, conn_state); 689 if (ret) 690 return ret; 691 692 pipe_config->limited_color_range = 693 intel_dp_limited_color_range(pipe_config, conn_state); 694 695 if (display->platform.geminilake || display->platform.broxton) 696 pipe_config->lane_lat_optim_mask = 697 bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count); 698 699 intel_dp_audio_compute_config(encoder, pipe_config, conn_state); 700 701 intel_ddi_compute_min_voltage_level(pipe_config); 702 703 intel_psr_compute_config(intel_dp, pipe_config, conn_state); 704 705 return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector, 706 pipe_config); 707 } 708 709 /* 710 * Iterate over all connectors and return a mask of 711 * all CPU transcoders streaming over the same DP link. 712 */ 713 static unsigned int 714 intel_dp_mst_transcoder_mask(struct intel_atomic_state *state, 715 struct intel_dp *mst_port) 716 { 717 struct intel_display *display = to_intel_display(state); 718 const struct intel_digital_connector_state *conn_state; 719 struct intel_connector *connector; 720 u8 transcoders = 0; 721 int i; 722 723 if (DISPLAY_VER(display) < 12) 724 return 0; 725 726 for_each_new_intel_connector_in_state(state, connector, conn_state, i) { 727 const struct intel_crtc_state *crtc_state; 728 struct intel_crtc *crtc; 729 730 if (connector->mst_port != mst_port || !conn_state->base.crtc) 731 continue; 732 733 crtc = to_intel_crtc(conn_state->base.crtc); 734 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 735 736 if (!crtc_state->hw.active) 737 continue; 738 739 transcoders |= BIT(crtc_state->cpu_transcoder); 740 } 741 742 return transcoders; 743 } 744 745 static u8 get_pipes_downstream_of_mst_port(struct intel_atomic_state *state, 746 struct drm_dp_mst_topology_mgr *mst_mgr, 747 struct drm_dp_mst_port *parent_port) 748 { 749 const struct intel_digital_connector_state *conn_state; 750 struct intel_connector *connector; 751 u8 mask = 0; 752 int i; 753 754 for_each_new_intel_connector_in_state(state, connector, conn_state, i) { 755 if (!conn_state->base.crtc) 756 continue; 757 758 if (&connector->mst_port->mst_mgr != mst_mgr) 759 continue; 760 761 if (connector->port != parent_port && 762 !drm_dp_mst_port_downstream_of_parent(mst_mgr, 763 connector->port, 764 parent_port)) 765 continue; 766 767 mask |= BIT(to_intel_crtc(conn_state->base.crtc)->pipe); 768 } 769 770 return mask; 771 } 772 773 static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state, 774 struct drm_dp_mst_topology_mgr *mst_mgr, 775 struct intel_link_bw_limits *limits) 776 { 777 struct intel_display *display = to_intel_display(state); 778 struct intel_crtc *crtc; 779 u8 mst_pipe_mask; 780 u8 fec_pipe_mask = 0; 781 int ret; 782 783 mst_pipe_mask = get_pipes_downstream_of_mst_port(state, mst_mgr, NULL); 784 785 for_each_intel_crtc_in_pipe_mask(display->drm, crtc, mst_pipe_mask) { 786 struct intel_crtc_state *crtc_state = 787 intel_atomic_get_new_crtc_state(state, crtc); 788 789 /* Atomic connector check should've added all the MST CRTCs. */ 790 if (drm_WARN_ON(display->drm, !crtc_state)) 791 return -EINVAL; 792 793 if (crtc_state->fec_enable) 794 fec_pipe_mask |= BIT(crtc->pipe); 795 } 796 797 if (!fec_pipe_mask || mst_pipe_mask == fec_pipe_mask) 798 return 0; 799 800 limits->force_fec_pipes |= mst_pipe_mask; 801 802 ret = intel_modeset_pipes_in_mask_early(state, "MST FEC", 803 mst_pipe_mask); 804 805 return ret ? : -EAGAIN; 806 } 807 808 static int intel_dp_mst_check_bw(struct intel_atomic_state *state, 809 struct drm_dp_mst_topology_mgr *mst_mgr, 810 struct drm_dp_mst_topology_state *mst_state, 811 struct intel_link_bw_limits *limits) 812 { 813 struct drm_dp_mst_port *mst_port; 814 u8 mst_port_pipes; 815 int ret; 816 817 ret = drm_dp_mst_atomic_check_mgr(&state->base, mst_mgr, mst_state, &mst_port); 818 if (ret != -ENOSPC) 819 return ret; 820 821 mst_port_pipes = get_pipes_downstream_of_mst_port(state, mst_mgr, mst_port); 822 823 ret = intel_link_bw_reduce_bpp(state, limits, 824 mst_port_pipes, "MST link BW"); 825 826 return ret ? : -EAGAIN; 827 } 828 829 /** 830 * intel_dp_mst_atomic_check_link - check all modeset MST link configuration 831 * @state: intel atomic state 832 * @limits: link BW limits 833 * 834 * Check the link configuration for all modeset MST outputs. If the 835 * configuration is invalid @limits will be updated if possible to 836 * reduce the total BW, after which the configuration for all CRTCs in 837 * @state must be recomputed with the updated @limits. 838 * 839 * Returns: 840 * - 0 if the confugration is valid 841 * - %-EAGAIN, if the configuration is invalid and @limits got updated 842 * with fallback values with which the configuration of all CRTCs in 843 * @state must be recomputed 844 * - Other negative error, if the configuration is invalid without a 845 * fallback possibility, or the check failed for another reason 846 */ 847 int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state, 848 struct intel_link_bw_limits *limits) 849 { 850 struct drm_dp_mst_topology_mgr *mgr; 851 struct drm_dp_mst_topology_state *mst_state; 852 int ret; 853 int i; 854 855 for_each_new_mst_mgr_in_state(&state->base, mgr, mst_state, i) { 856 ret = intel_dp_mst_check_fec_change(state, mgr, limits); 857 if (ret) 858 return ret; 859 860 ret = intel_dp_mst_check_bw(state, mgr, mst_state, 861 limits); 862 if (ret) 863 return ret; 864 } 865 866 return 0; 867 } 868 869 static int mst_stream_compute_config_late(struct intel_encoder *encoder, 870 struct intel_crtc_state *crtc_state, 871 struct drm_connector_state *conn_state) 872 { 873 struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state); 874 struct intel_dp *intel_dp = to_primary_dp(encoder); 875 876 /* lowest numbered transcoder will be designated master */ 877 crtc_state->mst_master_transcoder = 878 ffs(intel_dp_mst_transcoder_mask(state, intel_dp)) - 1; 879 880 return 0; 881 } 882 883 /* 884 * If one of the connectors in a MST stream needs a modeset, mark all CRTCs 885 * that shares the same MST stream as mode changed, 886 * intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do 887 * a fastset when possible. 888 * 889 * On TGL+ this is required since each stream go through a master transcoder, 890 * so if the master transcoder needs modeset, all other streams in the 891 * topology need a modeset. All platforms need to add the atomic state 892 * for all streams in the topology, since a modeset on one may require 893 * changing the MST link BW usage of the others, which in turn needs a 894 * recomputation of the corresponding CRTC states. 895 */ 896 static int 897 mst_connector_atomic_topology_check(struct intel_connector *connector, 898 struct intel_atomic_state *state) 899 { 900 struct intel_display *display = to_intel_display(connector); 901 struct drm_connector_list_iter connector_list_iter; 902 struct intel_connector *connector_iter; 903 int ret = 0; 904 905 if (!intel_connector_needs_modeset(state, &connector->base)) 906 return 0; 907 908 drm_connector_list_iter_begin(display->drm, &connector_list_iter); 909 for_each_intel_connector_iter(connector_iter, &connector_list_iter) { 910 struct intel_digital_connector_state *conn_iter_state; 911 struct intel_crtc_state *crtc_state; 912 struct intel_crtc *crtc; 913 914 if (connector_iter->mst_port != connector->mst_port || 915 connector_iter == connector) 916 continue; 917 918 conn_iter_state = intel_atomic_get_digital_connector_state(state, 919 connector_iter); 920 if (IS_ERR(conn_iter_state)) { 921 ret = PTR_ERR(conn_iter_state); 922 break; 923 } 924 925 if (!conn_iter_state->base.crtc) 926 continue; 927 928 crtc = to_intel_crtc(conn_iter_state->base.crtc); 929 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 930 if (IS_ERR(crtc_state)) { 931 ret = PTR_ERR(crtc_state); 932 break; 933 } 934 935 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 936 if (ret) 937 break; 938 crtc_state->uapi.mode_changed = true; 939 } 940 drm_connector_list_iter_end(&connector_list_iter); 941 942 return ret; 943 } 944 945 static int 946 mst_connector_atomic_check(struct drm_connector *connector, 947 struct drm_atomic_state *_state) 948 { 949 struct intel_atomic_state *state = to_intel_atomic_state(_state); 950 struct intel_connector *intel_connector = 951 to_intel_connector(connector); 952 int ret; 953 954 ret = intel_digital_connector_atomic_check(connector, &state->base); 955 if (ret) 956 return ret; 957 958 ret = mst_connector_atomic_topology_check(intel_connector, state); 959 if (ret) 960 return ret; 961 962 if (intel_connector_needs_modeset(state, connector)) { 963 ret = intel_dp_tunnel_atomic_check_state(state, 964 intel_connector->mst_port, 965 intel_connector); 966 if (ret) 967 return ret; 968 } 969 970 return drm_dp_atomic_release_time_slots(&state->base, 971 &intel_connector->mst_port->mst_mgr, 972 intel_connector->port); 973 } 974 975 static void mst_stream_disable(struct intel_atomic_state *state, 976 struct intel_encoder *encoder, 977 const struct intel_crtc_state *old_crtc_state, 978 const struct drm_connector_state *old_conn_state) 979 { 980 struct intel_display *display = to_intel_display(state); 981 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); 982 struct intel_dp *intel_dp = to_primary_dp(encoder); 983 struct intel_connector *connector = 984 to_intel_connector(old_conn_state->connector); 985 986 drm_dbg_kms(display->drm, "active links %d\n", 987 intel_dp->active_mst_links); 988 989 if (intel_dp->active_mst_links == 1) 990 intel_dp->link_trained = false; 991 992 intel_hdcp_disable(intel_mst->connector); 993 994 intel_dp_sink_disable_decompression(state, connector, old_crtc_state); 995 } 996 997 static void mst_stream_post_disable(struct intel_atomic_state *state, 998 struct intel_encoder *encoder, 999 const struct intel_crtc_state *old_crtc_state, 1000 const struct drm_connector_state *old_conn_state) 1001 { 1002 struct intel_display *display = to_intel_display(encoder); 1003 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); 1004 struct intel_encoder *primary_encoder = to_primary_encoder(encoder); 1005 struct intel_dp *intel_dp = to_primary_dp(encoder); 1006 struct intel_connector *connector = 1007 to_intel_connector(old_conn_state->connector); 1008 struct drm_dp_mst_topology_state *old_mst_state = 1009 drm_atomic_get_old_mst_topology_state(&state->base, &intel_dp->mst_mgr); 1010 struct drm_dp_mst_topology_state *new_mst_state = 1011 drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr); 1012 const struct drm_dp_mst_atomic_payload *old_payload = 1013 drm_atomic_get_mst_payload_state(old_mst_state, connector->port); 1014 struct drm_dp_mst_atomic_payload *new_payload = 1015 drm_atomic_get_mst_payload_state(new_mst_state, connector->port); 1016 struct intel_crtc *pipe_crtc; 1017 bool last_mst_stream; 1018 int i; 1019 1020 intel_dp->active_mst_links--; 1021 last_mst_stream = intel_dp->active_mst_links == 0; 1022 drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && last_mst_stream && 1023 !intel_dp_mst_is_master_trans(old_crtc_state)); 1024 1025 for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { 1026 const struct intel_crtc_state *old_pipe_crtc_state = 1027 intel_atomic_get_old_crtc_state(state, pipe_crtc); 1028 1029 intel_crtc_vblank_off(old_pipe_crtc_state); 1030 } 1031 1032 intel_disable_transcoder(old_crtc_state); 1033 1034 drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload); 1035 1036 intel_ddi_clear_act_sent(encoder, old_crtc_state); 1037 1038 intel_de_rmw(display, 1039 TRANS_DDI_FUNC_CTL(display, old_crtc_state->cpu_transcoder), 1040 TRANS_DDI_DP_VC_PAYLOAD_ALLOC, 0); 1041 1042 intel_ddi_wait_for_act_sent(encoder, old_crtc_state); 1043 drm_dp_check_act_status(&intel_dp->mst_mgr); 1044 1045 drm_dp_remove_payload_part2(&intel_dp->mst_mgr, new_mst_state, 1046 old_payload, new_payload); 1047 1048 intel_ddi_disable_transcoder_func(old_crtc_state); 1049 1050 for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { 1051 const struct intel_crtc_state *old_pipe_crtc_state = 1052 intel_atomic_get_old_crtc_state(state, pipe_crtc); 1053 1054 intel_dsc_disable(old_pipe_crtc_state); 1055 1056 if (DISPLAY_VER(display) >= 9) 1057 skl_scaler_disable(old_pipe_crtc_state); 1058 else 1059 ilk_pfit_disable(old_pipe_crtc_state); 1060 } 1061 1062 /* 1063 * Power down mst path before disabling the port, otherwise we end 1064 * up getting interrupts from the sink upon detecting link loss. 1065 */ 1066 drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, 1067 false); 1068 1069 /* 1070 * BSpec 4287: disable DIP after the transcoder is disabled and before 1071 * the transcoder clock select is set to none. 1072 */ 1073 intel_dp_set_infoframes(primary_encoder, false, old_crtc_state, NULL); 1074 /* 1075 * From TGL spec: "If multi-stream slave transcoder: Configure 1076 * Transcoder Clock Select to direct no clock to the transcoder" 1077 * 1078 * From older GENs spec: "Configure Transcoder Clock Select to direct 1079 * no clock to the transcoder" 1080 */ 1081 if (DISPLAY_VER(display) < 12 || !last_mst_stream) 1082 intel_ddi_disable_transcoder_clock(old_crtc_state); 1083 1084 1085 intel_mst->connector = NULL; 1086 if (last_mst_stream) 1087 primary_encoder->post_disable(state, primary_encoder, 1088 old_crtc_state, NULL); 1089 1090 drm_dbg_kms(display->drm, "active links %d\n", 1091 intel_dp->active_mst_links); 1092 } 1093 1094 static void mst_stream_post_pll_disable(struct intel_atomic_state *state, 1095 struct intel_encoder *encoder, 1096 const struct intel_crtc_state *old_crtc_state, 1097 const struct drm_connector_state *old_conn_state) 1098 { 1099 struct intel_encoder *primary_encoder = to_primary_encoder(encoder); 1100 struct intel_dp *intel_dp = to_primary_dp(encoder); 1101 1102 if (intel_dp->active_mst_links == 0 && 1103 primary_encoder->post_pll_disable) 1104 primary_encoder->post_pll_disable(state, primary_encoder, old_crtc_state, old_conn_state); 1105 } 1106 1107 static void mst_stream_pre_pll_enable(struct intel_atomic_state *state, 1108 struct intel_encoder *encoder, 1109 const struct intel_crtc_state *pipe_config, 1110 const struct drm_connector_state *conn_state) 1111 { 1112 struct intel_encoder *primary_encoder = to_primary_encoder(encoder); 1113 struct intel_dp *intel_dp = to_primary_dp(encoder); 1114 1115 if (intel_dp->active_mst_links == 0) 1116 primary_encoder->pre_pll_enable(state, primary_encoder, 1117 pipe_config, NULL); 1118 else 1119 /* 1120 * The port PLL state needs to get updated for secondary 1121 * streams as for the primary stream. 1122 */ 1123 intel_ddi_update_active_dpll(state, primary_encoder, 1124 to_intel_crtc(pipe_config->uapi.crtc)); 1125 } 1126 1127 static bool intel_mst_probed_link_params_valid(struct intel_dp *intel_dp, 1128 int link_rate, int lane_count) 1129 { 1130 return intel_dp->link.mst_probed_rate == link_rate && 1131 intel_dp->link.mst_probed_lane_count == lane_count; 1132 } 1133 1134 static void intel_mst_set_probed_link_params(struct intel_dp *intel_dp, 1135 int link_rate, int lane_count) 1136 { 1137 intel_dp->link.mst_probed_rate = link_rate; 1138 intel_dp->link.mst_probed_lane_count = lane_count; 1139 } 1140 1141 static void intel_mst_reprobe_topology(struct intel_dp *intel_dp, 1142 const struct intel_crtc_state *crtc_state) 1143 { 1144 if (intel_mst_probed_link_params_valid(intel_dp, 1145 crtc_state->port_clock, crtc_state->lane_count)) 1146 return; 1147 1148 drm_dp_mst_topology_queue_probe(&intel_dp->mst_mgr); 1149 1150 intel_mst_set_probed_link_params(intel_dp, 1151 crtc_state->port_clock, crtc_state->lane_count); 1152 } 1153 1154 static void mst_stream_pre_enable(struct intel_atomic_state *state, 1155 struct intel_encoder *encoder, 1156 const struct intel_crtc_state *pipe_config, 1157 const struct drm_connector_state *conn_state) 1158 { 1159 struct intel_display *display = to_intel_display(state); 1160 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); 1161 struct intel_encoder *primary_encoder = to_primary_encoder(encoder); 1162 struct intel_dp *intel_dp = to_primary_dp(encoder); 1163 struct intel_connector *connector = 1164 to_intel_connector(conn_state->connector); 1165 struct drm_dp_mst_topology_state *mst_state = 1166 drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr); 1167 int ret; 1168 bool first_mst_stream; 1169 1170 /* MST encoders are bound to a crtc, not to a connector, 1171 * force the mapping here for get_hw_state. 1172 */ 1173 connector->encoder = encoder; 1174 intel_mst->connector = connector; 1175 first_mst_stream = intel_dp->active_mst_links == 0; 1176 drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && first_mst_stream && 1177 !intel_dp_mst_is_master_trans(pipe_config)); 1178 1179 drm_dbg_kms(display->drm, "active links %d\n", 1180 intel_dp->active_mst_links); 1181 1182 if (first_mst_stream) 1183 intel_dp_set_power(intel_dp, DP_SET_POWER_D0); 1184 1185 drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true); 1186 1187 intel_dp_sink_enable_decompression(state, connector, pipe_config); 1188 1189 if (first_mst_stream) { 1190 primary_encoder->pre_enable(state, primary_encoder, 1191 pipe_config, NULL); 1192 1193 intel_mst_reprobe_topology(intel_dp, pipe_config); 1194 } 1195 1196 intel_dp->active_mst_links++; 1197 1198 ret = drm_dp_add_payload_part1(&intel_dp->mst_mgr, mst_state, 1199 drm_atomic_get_mst_payload_state(mst_state, connector->port)); 1200 if (ret < 0) 1201 intel_dp_queue_modeset_retry_for_link(state, primary_encoder, pipe_config); 1202 1203 /* 1204 * Before Gen 12 this is not done as part of 1205 * primary_encoder->pre_enable() and should be done here. For 1206 * Gen 12+ the step in which this should be done is different for the 1207 * first MST stream, so it's done on the DDI for the first stream and 1208 * here for the following ones. 1209 */ 1210 if (DISPLAY_VER(display) < 12 || !first_mst_stream) 1211 intel_ddi_enable_transcoder_clock(encoder, pipe_config); 1212 1213 if (DISPLAY_VER(display) >= 13 && !first_mst_stream) 1214 intel_ddi_config_transcoder_func(encoder, pipe_config); 1215 1216 intel_dsc_dp_pps_write(primary_encoder, pipe_config); 1217 intel_ddi_set_dp_msa(pipe_config, conn_state); 1218 } 1219 1220 static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state) 1221 { 1222 struct intel_display *display = to_intel_display(crtc_state); 1223 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 1224 u32 clear = 0; 1225 u32 set = 0; 1226 1227 if (!IS_ALDERLAKE_P(i915)) 1228 return; 1229 1230 if (!IS_DISPLAY_STEP(display, STEP_D0, STEP_FOREVER)) 1231 return; 1232 1233 /* Wa_14013163432:adlp */ 1234 if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state)) 1235 set |= DP_MST_FEC_BS_JITTER_WA(crtc_state->cpu_transcoder); 1236 1237 /* Wa_14014143976:adlp */ 1238 if (IS_DISPLAY_STEP(display, STEP_E0, STEP_FOREVER)) { 1239 if (intel_dp_is_uhbr(crtc_state)) 1240 set |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder); 1241 else if (crtc_state->fec_enable) 1242 clear |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder); 1243 1244 if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state)) 1245 set |= DP_MST_DPT_DPTP_ALIGN_WA(crtc_state->cpu_transcoder); 1246 } 1247 1248 if (!clear && !set) 1249 return; 1250 1251 intel_de_rmw(display, CHICKEN_MISC_3, clear, set); 1252 } 1253 1254 static void mst_stream_enable(struct intel_atomic_state *state, 1255 struct intel_encoder *encoder, 1256 const struct intel_crtc_state *pipe_config, 1257 const struct drm_connector_state *conn_state) 1258 { 1259 struct intel_display *display = to_intel_display(encoder); 1260 struct intel_encoder *primary_encoder = to_primary_encoder(encoder); 1261 struct intel_dp *intel_dp = to_primary_dp(encoder); 1262 struct intel_connector *connector = to_intel_connector(conn_state->connector); 1263 struct drm_dp_mst_topology_state *mst_state = 1264 drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr); 1265 enum transcoder trans = pipe_config->cpu_transcoder; 1266 bool first_mst_stream = intel_dp->active_mst_links == 1; 1267 struct intel_crtc *pipe_crtc; 1268 int ret, i; 1269 1270 drm_WARN_ON(display->drm, pipe_config->has_pch_encoder); 1271 1272 if (intel_dp_is_uhbr(pipe_config)) { 1273 const struct drm_display_mode *adjusted_mode = 1274 &pipe_config->hw.adjusted_mode; 1275 u64 crtc_clock_hz = KHz(adjusted_mode->crtc_clock); 1276 1277 intel_de_write(display, TRANS_DP2_VFREQHIGH(pipe_config->cpu_transcoder), 1278 TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz >> 24)); 1279 intel_de_write(display, TRANS_DP2_VFREQLOW(pipe_config->cpu_transcoder), 1280 TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff)); 1281 } 1282 1283 enable_bs_jitter_was(pipe_config); 1284 1285 intel_ddi_enable_transcoder_func(encoder, pipe_config); 1286 1287 intel_ddi_clear_act_sent(encoder, pipe_config); 1288 1289 intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, trans), 0, 1290 TRANS_DDI_DP_VC_PAYLOAD_ALLOC); 1291 1292 drm_dbg_kms(display->drm, "active links %d\n", 1293 intel_dp->active_mst_links); 1294 1295 intel_ddi_wait_for_act_sent(encoder, pipe_config); 1296 drm_dp_check_act_status(&intel_dp->mst_mgr); 1297 1298 if (first_mst_stream) 1299 intel_ddi_wait_for_fec_status(encoder, pipe_config, true); 1300 1301 ret = drm_dp_add_payload_part2(&intel_dp->mst_mgr, 1302 drm_atomic_get_mst_payload_state(mst_state, 1303 connector->port)); 1304 if (ret < 0) 1305 intel_dp_queue_modeset_retry_for_link(state, primary_encoder, pipe_config); 1306 1307 if (DISPLAY_VER(display) >= 12) 1308 intel_de_rmw(display, CHICKEN_TRANS(display, trans), 1309 FECSTALL_DIS_DPTSTREAM_DPTTG, 1310 pipe_config->fec_enable ? FECSTALL_DIS_DPTSTREAM_DPTTG : 0); 1311 1312 intel_audio_sdp_split_update(pipe_config); 1313 1314 intel_enable_transcoder(pipe_config); 1315 1316 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, pipe_config, i) { 1317 const struct intel_crtc_state *pipe_crtc_state = 1318 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1319 1320 intel_crtc_vblank_on(pipe_crtc_state); 1321 } 1322 1323 intel_hdcp_enable(state, encoder, pipe_config, conn_state); 1324 } 1325 1326 static bool mst_stream_get_hw_state(struct intel_encoder *encoder, 1327 enum pipe *pipe) 1328 { 1329 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); 1330 *pipe = intel_mst->pipe; 1331 if (intel_mst->connector) 1332 return true; 1333 return false; 1334 } 1335 1336 static void mst_stream_get_config(struct intel_encoder *encoder, 1337 struct intel_crtc_state *pipe_config) 1338 { 1339 struct intel_encoder *primary_encoder = to_primary_encoder(encoder); 1340 1341 primary_encoder->get_config(primary_encoder, pipe_config); 1342 } 1343 1344 static bool mst_stream_initial_fastset_check(struct intel_encoder *encoder, 1345 struct intel_crtc_state *crtc_state) 1346 { 1347 struct intel_encoder *primary_encoder = to_primary_encoder(encoder); 1348 1349 return intel_dp_initial_fastset_check(primary_encoder, crtc_state); 1350 } 1351 1352 static int mst_connector_get_ddc_modes(struct drm_connector *connector) 1353 { 1354 struct intel_display *display = to_intel_display(connector->dev); 1355 struct intel_connector *intel_connector = to_intel_connector(connector); 1356 struct intel_dp *intel_dp = intel_connector->mst_port; 1357 const struct drm_edid *drm_edid; 1358 int ret; 1359 1360 if (drm_connector_is_unregistered(connector)) 1361 return intel_connector_update_modes(connector, NULL); 1362 1363 if (!intel_display_driver_check_access(display)) 1364 return drm_edid_connector_add_modes(connector); 1365 1366 drm_edid = drm_dp_mst_edid_read(connector, &intel_dp->mst_mgr, intel_connector->port); 1367 1368 ret = intel_connector_update_modes(connector, drm_edid); 1369 1370 drm_edid_free(drm_edid); 1371 1372 return ret; 1373 } 1374 1375 static int 1376 mst_connector_late_register(struct drm_connector *connector) 1377 { 1378 struct intel_connector *intel_connector = to_intel_connector(connector); 1379 int ret; 1380 1381 ret = drm_dp_mst_connector_late_register(connector, 1382 intel_connector->port); 1383 if (ret < 0) 1384 return ret; 1385 1386 ret = intel_connector_register(connector); 1387 if (ret < 0) 1388 drm_dp_mst_connector_early_unregister(connector, 1389 intel_connector->port); 1390 1391 return ret; 1392 } 1393 1394 static void 1395 mst_connector_early_unregister(struct drm_connector *connector) 1396 { 1397 struct intel_connector *intel_connector = to_intel_connector(connector); 1398 1399 intel_connector_unregister(connector); 1400 drm_dp_mst_connector_early_unregister(connector, 1401 intel_connector->port); 1402 } 1403 1404 static const struct drm_connector_funcs mst_connector_funcs = { 1405 .fill_modes = drm_helper_probe_single_connector_modes, 1406 .atomic_get_property = intel_digital_connector_atomic_get_property, 1407 .atomic_set_property = intel_digital_connector_atomic_set_property, 1408 .late_register = mst_connector_late_register, 1409 .early_unregister = mst_connector_early_unregister, 1410 .destroy = intel_connector_destroy, 1411 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 1412 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 1413 }; 1414 1415 static int mst_connector_get_modes(struct drm_connector *connector) 1416 { 1417 return mst_connector_get_ddc_modes(connector); 1418 } 1419 1420 static int 1421 mst_connector_mode_valid_ctx(struct drm_connector *connector, 1422 struct drm_display_mode *mode, 1423 struct drm_modeset_acquire_ctx *ctx, 1424 enum drm_mode_status *status) 1425 { 1426 struct intel_display *display = to_intel_display(connector->dev); 1427 struct drm_i915_private *dev_priv = to_i915(connector->dev); 1428 struct intel_connector *intel_connector = to_intel_connector(connector); 1429 struct intel_dp *intel_dp = intel_connector->mst_port; 1430 struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr; 1431 struct drm_dp_mst_port *port = intel_connector->port; 1432 const int min_bpp = 18; 1433 int max_dotclk = display->cdclk.max_dotclk_freq; 1434 int max_rate, mode_rate, max_lanes, max_link_clock; 1435 int ret; 1436 bool dsc = false; 1437 u16 dsc_max_compressed_bpp = 0; 1438 u8 dsc_slice_count = 0; 1439 int target_clock = mode->clock; 1440 int num_joined_pipes; 1441 1442 if (drm_connector_is_unregistered(connector)) { 1443 *status = MODE_ERROR; 1444 return 0; 1445 } 1446 1447 *status = intel_cpu_transcoder_mode_valid(dev_priv, mode); 1448 if (*status != MODE_OK) 1449 return 0; 1450 1451 if (mode->flags & DRM_MODE_FLAG_DBLCLK) { 1452 *status = MODE_H_ILLEGAL; 1453 return 0; 1454 } 1455 1456 if (mode->clock < 10000) { 1457 *status = MODE_CLOCK_LOW; 1458 return 0; 1459 } 1460 1461 max_link_clock = intel_dp_max_link_rate(intel_dp); 1462 max_lanes = intel_dp_max_lane_count(intel_dp); 1463 1464 max_rate = intel_dp_max_link_data_rate(intel_dp, 1465 max_link_clock, max_lanes); 1466 mode_rate = intel_dp_link_required(mode->clock, min_bpp); 1467 1468 /* 1469 * TODO: 1470 * - Also check if compression would allow for the mode 1471 * - Calculate the overhead using drm_dp_bw_overhead() / 1472 * drm_dp_bw_channel_coding_efficiency(), similarly to the 1473 * compute config code, as drm_dp_calc_pbn_mode() doesn't 1474 * account with all the overheads. 1475 * - Check here and during compute config the BW reported by 1476 * DFP_Link_Available_Payload_Bandwidth_Number (or the 1477 * corresponding link capabilities of the sink) in case the 1478 * stream is uncompressed for it by the last branch device. 1479 */ 1480 num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, intel_connector, 1481 mode->hdisplay, target_clock); 1482 max_dotclk *= num_joined_pipes; 1483 1484 ret = drm_modeset_lock(&mgr->base.lock, ctx); 1485 if (ret) 1486 return ret; 1487 1488 if (mode_rate > max_rate || mode->clock > max_dotclk || 1489 drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) { 1490 *status = MODE_CLOCK_HIGH; 1491 return 0; 1492 } 1493 1494 if (intel_dp_has_dsc(intel_connector)) { 1495 /* 1496 * TBD pass the connector BPC, 1497 * for now U8_MAX so that max BPC on that platform would be picked 1498 */ 1499 int pipe_bpp = intel_dp_dsc_compute_max_bpp(intel_connector, U8_MAX); 1500 1501 if (drm_dp_sink_supports_fec(intel_connector->dp.fec_capability)) { 1502 dsc_max_compressed_bpp = 1503 intel_dp_dsc_get_max_compressed_bpp(display, 1504 max_link_clock, 1505 max_lanes, 1506 target_clock, 1507 mode->hdisplay, 1508 num_joined_pipes, 1509 INTEL_OUTPUT_FORMAT_RGB, 1510 pipe_bpp, 64); 1511 dsc_slice_count = 1512 intel_dp_dsc_get_slice_count(intel_connector, 1513 target_clock, 1514 mode->hdisplay, 1515 num_joined_pipes); 1516 } 1517 1518 dsc = dsc_max_compressed_bpp && dsc_slice_count; 1519 } 1520 1521 if (intel_dp_joiner_needs_dsc(display, num_joined_pipes) && !dsc) { 1522 *status = MODE_CLOCK_HIGH; 1523 return 0; 1524 } 1525 1526 if (mode_rate > max_rate && !dsc) { 1527 *status = MODE_CLOCK_HIGH; 1528 return 0; 1529 } 1530 1531 *status = intel_mode_valid_max_plane_size(dev_priv, mode, num_joined_pipes); 1532 return 0; 1533 } 1534 1535 static struct drm_encoder * 1536 mst_connector_atomic_best_encoder(struct drm_connector *connector, 1537 struct drm_atomic_state *state) 1538 { 1539 struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, 1540 connector); 1541 struct intel_connector *intel_connector = to_intel_connector(connector); 1542 struct intel_dp *intel_dp = intel_connector->mst_port; 1543 struct intel_crtc *crtc = to_intel_crtc(connector_state->crtc); 1544 1545 return &intel_dp->mst_encoders[crtc->pipe]->base.base; 1546 } 1547 1548 static int 1549 mst_connector_detect_ctx(struct drm_connector *connector, 1550 struct drm_modeset_acquire_ctx *ctx, bool force) 1551 { 1552 struct intel_display *display = to_intel_display(connector->dev); 1553 struct intel_connector *intel_connector = to_intel_connector(connector); 1554 struct intel_dp *intel_dp = intel_connector->mst_port; 1555 1556 if (!intel_display_device_enabled(display)) 1557 return connector_status_disconnected; 1558 1559 if (drm_connector_is_unregistered(connector)) 1560 return connector_status_disconnected; 1561 1562 if (!intel_display_driver_check_access(display)) 1563 return connector->status; 1564 1565 intel_dp_flush_connector_commits(intel_connector); 1566 1567 return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr, 1568 intel_connector->port); 1569 } 1570 1571 static const struct drm_connector_helper_funcs mst_connector_helper_funcs = { 1572 .get_modes = mst_connector_get_modes, 1573 .mode_valid_ctx = mst_connector_mode_valid_ctx, 1574 .atomic_best_encoder = mst_connector_atomic_best_encoder, 1575 .atomic_check = mst_connector_atomic_check, 1576 .detect_ctx = mst_connector_detect_ctx, 1577 }; 1578 1579 static void mst_stream_encoder_destroy(struct drm_encoder *encoder) 1580 { 1581 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(to_intel_encoder(encoder)); 1582 1583 drm_encoder_cleanup(encoder); 1584 kfree(intel_mst); 1585 } 1586 1587 static const struct drm_encoder_funcs mst_stream_encoder_funcs = { 1588 .destroy = mst_stream_encoder_destroy, 1589 }; 1590 1591 static bool mst_connector_get_hw_state(struct intel_connector *connector) 1592 { 1593 /* This is the MST stream encoder set in ->pre_enable, if any */ 1594 struct intel_encoder *encoder = intel_attached_encoder(connector); 1595 enum pipe pipe; 1596 1597 if (!encoder || !connector->base.state->crtc) 1598 return false; 1599 1600 return encoder->get_hw_state(encoder, &pipe); 1601 } 1602 1603 static int mst_topology_add_connector_properties(struct intel_dp *intel_dp, 1604 struct drm_connector *connector, 1605 const char *pathprop) 1606 { 1607 struct intel_display *display = to_intel_display(intel_dp); 1608 1609 drm_object_attach_property(&connector->base, 1610 display->drm->mode_config.path_property, 0); 1611 drm_object_attach_property(&connector->base, 1612 display->drm->mode_config.tile_property, 0); 1613 1614 intel_attach_force_audio_property(connector); 1615 intel_attach_broadcast_rgb_property(connector); 1616 1617 /* 1618 * Reuse the prop from the SST connector because we're 1619 * not allowed to create new props after device registration. 1620 */ 1621 connector->max_bpc_property = 1622 intel_dp->attached_connector->base.max_bpc_property; 1623 if (connector->max_bpc_property) 1624 drm_connector_attach_max_bpc_property(connector, 6, 12); 1625 1626 return drm_connector_set_path_property(connector, pathprop); 1627 } 1628 1629 static void 1630 intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp, 1631 struct intel_connector *connector) 1632 { 1633 u8 dpcd_caps[DP_RECEIVER_CAP_SIZE]; 1634 1635 if (!connector->dp.dsc_decompression_aux) 1636 return; 1637 1638 if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd_caps) < 0) 1639 return; 1640 1641 intel_dp_get_dsc_sink_cap(dpcd_caps[DP_DPCD_REV], connector); 1642 } 1643 1644 static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector) 1645 { 1646 struct intel_display *display = to_intel_display(connector); 1647 struct drm_dp_aux *aux = connector->dp.dsc_decompression_aux; 1648 struct drm_dp_desc desc; 1649 u8 dpcd[DP_RECEIVER_CAP_SIZE]; 1650 1651 if (!aux) 1652 return false; 1653 1654 /* 1655 * A logical port's OUI (at least for affected sinks) is all 0, so 1656 * instead of that the parent port's OUI is used for identification. 1657 */ 1658 if (drm_dp_mst_port_is_logical(connector->port)) { 1659 aux = drm_dp_mst_aux_for_parent(connector->port); 1660 if (!aux) 1661 aux = &connector->mst_port->aux; 1662 } 1663 1664 if (drm_dp_read_dpcd_caps(aux, dpcd) < 0) 1665 return false; 1666 1667 if (drm_dp_read_desc(aux, &desc, drm_dp_is_branch(dpcd)) < 0) 1668 return false; 1669 1670 if (!drm_dp_has_quirk(&desc, 1671 DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC)) 1672 return false; 1673 1674 /* 1675 * UHBR (MST sink) devices requiring this quirk don't advertise the 1676 * HBLANK expansion support. Presuming that they perform HBLANK 1677 * expansion internally, or are affected by this issue on modes with a 1678 * short HBLANK for other reasons. 1679 */ 1680 if (!drm_dp_128b132b_supported(dpcd) && 1681 !(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE)) 1682 return false; 1683 1684 drm_dbg_kms(display->drm, 1685 "[CONNECTOR:%d:%s] DSC HBLANK expansion quirk detected\n", 1686 connector->base.base.id, connector->base.name); 1687 1688 return true; 1689 } 1690 1691 static struct drm_connector * 1692 mst_topology_add_connector(struct drm_dp_mst_topology_mgr *mgr, 1693 struct drm_dp_mst_port *port, 1694 const char *pathprop) 1695 { 1696 struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr); 1697 struct intel_display *display = to_intel_display(intel_dp); 1698 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1699 struct intel_connector *intel_connector; 1700 struct drm_connector *connector; 1701 enum pipe pipe; 1702 int ret; 1703 1704 intel_connector = intel_connector_alloc(); 1705 if (!intel_connector) 1706 return NULL; 1707 1708 connector = &intel_connector->base; 1709 1710 intel_connector->get_hw_state = mst_connector_get_hw_state; 1711 intel_connector->sync_state = intel_dp_connector_sync_state; 1712 intel_connector->mst_port = intel_dp; 1713 intel_connector->port = port; 1714 drm_dp_mst_get_port_malloc(port); 1715 1716 intel_dp_init_modeset_retry_work(intel_connector); 1717 1718 ret = drm_connector_dynamic_init(display->drm, connector, &mst_connector_funcs, 1719 DRM_MODE_CONNECTOR_DisplayPort, NULL); 1720 if (ret) { 1721 drm_dp_mst_put_port_malloc(port); 1722 intel_connector_free(intel_connector); 1723 return NULL; 1724 } 1725 1726 intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port); 1727 intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector); 1728 intel_connector->dp.dsc_hblank_expansion_quirk = 1729 detect_dsc_hblank_expansion_quirk(intel_connector); 1730 1731 drm_connector_helper_add(connector, &mst_connector_helper_funcs); 1732 1733 for_each_pipe(display, pipe) { 1734 struct drm_encoder *enc = 1735 &intel_dp->mst_encoders[pipe]->base.base; 1736 1737 ret = drm_connector_attach_encoder(&intel_connector->base, enc); 1738 if (ret) 1739 goto err; 1740 } 1741 1742 ret = mst_topology_add_connector_properties(intel_dp, connector, pathprop); 1743 if (ret) 1744 goto err; 1745 1746 ret = intel_dp_hdcp_init(dig_port, intel_connector); 1747 if (ret) 1748 drm_dbg_kms(display->drm, "[%s:%d] HDCP MST init failed, skipping.\n", 1749 connector->name, connector->base.id); 1750 1751 return connector; 1752 1753 err: 1754 drm_connector_cleanup(connector); 1755 return NULL; 1756 } 1757 1758 static void 1759 mst_topology_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr) 1760 { 1761 struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr); 1762 1763 intel_hpd_trigger_irq(dp_to_dig_port(intel_dp)); 1764 } 1765 1766 static const struct drm_dp_mst_topology_cbs mst_topology_cbs = { 1767 .add_connector = mst_topology_add_connector, 1768 .poll_hpd_irq = mst_topology_poll_hpd_irq, 1769 }; 1770 1771 /* Create a fake encoder for an individual MST stream */ 1772 static struct intel_dp_mst_encoder * 1773 mst_stream_encoder_create(struct intel_digital_port *dig_port, enum pipe pipe) 1774 { 1775 struct intel_display *display = to_intel_display(dig_port); 1776 struct intel_encoder *primary_encoder = &dig_port->base; 1777 struct intel_dp_mst_encoder *intel_mst; 1778 struct intel_encoder *encoder; 1779 1780 intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL); 1781 1782 if (!intel_mst) 1783 return NULL; 1784 1785 intel_mst->pipe = pipe; 1786 encoder = &intel_mst->base; 1787 intel_mst->primary = dig_port; 1788 1789 drm_encoder_init(display->drm, &encoder->base, &mst_stream_encoder_funcs, 1790 DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe)); 1791 1792 encoder->type = INTEL_OUTPUT_DP_MST; 1793 encoder->power_domain = primary_encoder->power_domain; 1794 encoder->port = primary_encoder->port; 1795 encoder->cloneable = 0; 1796 /* 1797 * This is wrong, but broken userspace uses the intersection 1798 * of possible_crtcs of all the encoders of a given connector 1799 * to figure out which crtcs can drive said connector. What 1800 * should be used instead is the union of possible_crtcs. 1801 * To keep such userspace functioning we must misconfigure 1802 * this to make sure the intersection is not empty :( 1803 */ 1804 encoder->pipe_mask = ~0; 1805 1806 encoder->compute_config = mst_stream_compute_config; 1807 encoder->compute_config_late = mst_stream_compute_config_late; 1808 encoder->disable = mst_stream_disable; 1809 encoder->post_disable = mst_stream_post_disable; 1810 encoder->post_pll_disable = mst_stream_post_pll_disable; 1811 encoder->update_pipe = intel_ddi_update_pipe; 1812 encoder->pre_pll_enable = mst_stream_pre_pll_enable; 1813 encoder->pre_enable = mst_stream_pre_enable; 1814 encoder->enable = mst_stream_enable; 1815 encoder->audio_enable = intel_audio_codec_enable; 1816 encoder->audio_disable = intel_audio_codec_disable; 1817 encoder->get_hw_state = mst_stream_get_hw_state; 1818 encoder->get_config = mst_stream_get_config; 1819 encoder->initial_fastset_check = mst_stream_initial_fastset_check; 1820 1821 return intel_mst; 1822 1823 } 1824 1825 /* Create the fake encoders for MST streams */ 1826 static bool 1827 mst_stream_encoders_create(struct intel_digital_port *dig_port) 1828 { 1829 struct intel_display *display = to_intel_display(dig_port); 1830 struct intel_dp *intel_dp = &dig_port->dp; 1831 enum pipe pipe; 1832 1833 for_each_pipe(display, pipe) 1834 intel_dp->mst_encoders[pipe] = mst_stream_encoder_create(dig_port, pipe); 1835 return true; 1836 } 1837 1838 int 1839 intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port) 1840 { 1841 return dig_port->dp.active_mst_links; 1842 } 1843 1844 int 1845 intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id) 1846 { 1847 struct intel_display *display = to_intel_display(dig_port); 1848 struct intel_dp *intel_dp = &dig_port->dp; 1849 enum port port = dig_port->base.port; 1850 int ret; 1851 1852 if (!HAS_DP_MST(display) || intel_dp_is_edp(intel_dp)) 1853 return 0; 1854 1855 if (DISPLAY_VER(display) < 12 && port == PORT_A) 1856 return 0; 1857 1858 if (DISPLAY_VER(display) < 11 && port == PORT_E) 1859 return 0; 1860 1861 intel_dp->mst_mgr.cbs = &mst_topology_cbs; 1862 1863 /* create encoders */ 1864 mst_stream_encoders_create(dig_port); 1865 ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, display->drm, 1866 &intel_dp->aux, 16, 3, conn_base_id); 1867 if (ret) { 1868 intel_dp->mst_mgr.cbs = NULL; 1869 return ret; 1870 } 1871 1872 return 0; 1873 } 1874 1875 bool intel_dp_mst_source_support(struct intel_dp *intel_dp) 1876 { 1877 return intel_dp->mst_mgr.cbs; 1878 } 1879 1880 void 1881 intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port) 1882 { 1883 struct intel_dp *intel_dp = &dig_port->dp; 1884 1885 if (!intel_dp_mst_source_support(intel_dp)) 1886 return; 1887 1888 drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr); 1889 /* encoders will get killed by normal cleanup */ 1890 1891 intel_dp->mst_mgr.cbs = NULL; 1892 } 1893 1894 bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state) 1895 { 1896 return crtc_state->mst_master_transcoder == crtc_state->cpu_transcoder; 1897 } 1898 1899 bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state) 1900 { 1901 return crtc_state->mst_master_transcoder != INVALID_TRANSCODER && 1902 crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder; 1903 } 1904 1905 /** 1906 * intel_dp_mst_add_topology_state_for_connector - add MST topology state for a connector 1907 * @state: atomic state 1908 * @connector: connector to add the state for 1909 * @crtc: the CRTC @connector is attached to 1910 * 1911 * Add the MST topology state for @connector to @state. 1912 * 1913 * Returns 0 on success, negative error code on failure. 1914 */ 1915 static int 1916 intel_dp_mst_add_topology_state_for_connector(struct intel_atomic_state *state, 1917 struct intel_connector *connector, 1918 struct intel_crtc *crtc) 1919 { 1920 struct drm_dp_mst_topology_state *mst_state; 1921 1922 if (!connector->mst_port) 1923 return 0; 1924 1925 mst_state = drm_atomic_get_mst_topology_state(&state->base, 1926 &connector->mst_port->mst_mgr); 1927 if (IS_ERR(mst_state)) 1928 return PTR_ERR(mst_state); 1929 1930 mst_state->pending_crtc_mask |= drm_crtc_mask(&crtc->base); 1931 1932 return 0; 1933 } 1934 1935 /** 1936 * intel_dp_mst_add_topology_state_for_crtc - add MST topology state for a CRTC 1937 * @state: atomic state 1938 * @crtc: CRTC to add the state for 1939 * 1940 * Add the MST topology state for @crtc to @state. 1941 * 1942 * Returns 0 on success, negative error code on failure. 1943 */ 1944 int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state, 1945 struct intel_crtc *crtc) 1946 { 1947 struct drm_connector *_connector; 1948 struct drm_connector_state *conn_state; 1949 int i; 1950 1951 for_each_new_connector_in_state(&state->base, _connector, conn_state, i) { 1952 struct intel_connector *connector = to_intel_connector(_connector); 1953 int ret; 1954 1955 if (conn_state->crtc != &crtc->base) 1956 continue; 1957 1958 ret = intel_dp_mst_add_topology_state_for_connector(state, connector, crtc); 1959 if (ret) 1960 return ret; 1961 } 1962 1963 return 0; 1964 } 1965 1966 static struct intel_connector * 1967 get_connector_in_state_for_crtc(struct intel_atomic_state *state, 1968 const struct intel_crtc *crtc) 1969 { 1970 struct drm_connector_state *old_conn_state; 1971 struct drm_connector_state *new_conn_state; 1972 struct drm_connector *_connector; 1973 int i; 1974 1975 for_each_oldnew_connector_in_state(&state->base, _connector, 1976 old_conn_state, new_conn_state, i) { 1977 struct intel_connector *connector = 1978 to_intel_connector(_connector); 1979 1980 if (old_conn_state->crtc == &crtc->base || 1981 new_conn_state->crtc == &crtc->base) 1982 return connector; 1983 } 1984 1985 return NULL; 1986 } 1987 1988 /** 1989 * intel_dp_mst_crtc_needs_modeset - check if changes in topology need to modeset the given CRTC 1990 * @state: atomic state 1991 * @crtc: CRTC for which to check the modeset requirement 1992 * 1993 * Check if any change in a MST topology requires a forced modeset on @crtc in 1994 * this topology. One such change is enabling/disabling the DSC decompression 1995 * state in the first branch device's UFP DPCD as required by one CRTC, while 1996 * the other @crtc in the same topology is still active, requiring a full modeset 1997 * on @crtc. 1998 */ 1999 bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state, 2000 struct intel_crtc *crtc) 2001 { 2002 const struct intel_connector *crtc_connector; 2003 const struct drm_connector_state *conn_state; 2004 const struct drm_connector *_connector; 2005 int i; 2006 2007 if (!intel_crtc_has_type(intel_atomic_get_new_crtc_state(state, crtc), 2008 INTEL_OUTPUT_DP_MST)) 2009 return false; 2010 2011 crtc_connector = get_connector_in_state_for_crtc(state, crtc); 2012 2013 if (!crtc_connector) 2014 /* None of the connectors in the topology needs modeset */ 2015 return false; 2016 2017 for_each_new_connector_in_state(&state->base, _connector, conn_state, i) { 2018 const struct intel_connector *connector = 2019 to_intel_connector(_connector); 2020 const struct intel_crtc_state *new_crtc_state; 2021 const struct intel_crtc_state *old_crtc_state; 2022 struct intel_crtc *crtc_iter; 2023 2024 if (connector->mst_port != crtc_connector->mst_port || 2025 !conn_state->crtc) 2026 continue; 2027 2028 crtc_iter = to_intel_crtc(conn_state->crtc); 2029 2030 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc_iter); 2031 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc_iter); 2032 2033 if (!intel_crtc_needs_modeset(new_crtc_state)) 2034 continue; 2035 2036 if (old_crtc_state->dsc.compression_enable == 2037 new_crtc_state->dsc.compression_enable) 2038 continue; 2039 /* 2040 * Toggling the decompression flag because of this stream in 2041 * the first downstream branch device's UFP DPCD may reset the 2042 * whole branch device. To avoid the reset while other streams 2043 * are also active modeset the whole MST topology in this 2044 * case. 2045 */ 2046 if (connector->dp.dsc_decompression_aux == 2047 &connector->mst_port->aux) 2048 return true; 2049 } 2050 2051 return false; 2052 } 2053 2054 /** 2055 * intel_dp_mst_prepare_probe - Prepare an MST link for topology probing 2056 * @intel_dp: DP port object 2057 * 2058 * Prepare an MST link for topology probing, programming the target 2059 * link parameters to DPCD. This step is a requirement of the enumaration 2060 * of path resources during probing. 2061 */ 2062 void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp) 2063 { 2064 int link_rate = intel_dp_max_link_rate(intel_dp); 2065 int lane_count = intel_dp_max_lane_count(intel_dp); 2066 u8 rate_select; 2067 u8 link_bw; 2068 2069 if (intel_dp->link_trained) 2070 return; 2071 2072 if (intel_mst_probed_link_params_valid(intel_dp, link_rate, lane_count)) 2073 return; 2074 2075 intel_dp_compute_rate(intel_dp, link_rate, &link_bw, &rate_select); 2076 2077 intel_dp_link_training_set_mode(intel_dp, link_rate, false); 2078 intel_dp_link_training_set_bw(intel_dp, link_bw, rate_select, lane_count, 2079 drm_dp_enhanced_frame_cap(intel_dp->dpcd)); 2080 2081 intel_mst_set_probed_link_params(intel_dp, link_rate, lane_count); 2082 } 2083 2084 /* 2085 * intel_dp_mst_verify_dpcd_state - verify the MST SW enabled state wrt. the DPCD 2086 * @intel_dp: DP port object 2087 * 2088 * Verify if @intel_dp's MST enabled SW state matches the corresponding DPCD 2089 * state. A long HPD pulse - not long enough to be detected as a disconnected 2090 * state - could've reset the DPCD state, which requires tearing 2091 * down/recreating the MST topology. 2092 * 2093 * Returns %true if the SW MST enabled and DPCD states match, %false 2094 * otherwise. 2095 */ 2096 bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp) 2097 { 2098 struct intel_display *display = to_intel_display(intel_dp); 2099 struct intel_connector *connector = intel_dp->attached_connector; 2100 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2101 struct intel_encoder *encoder = &dig_port->base; 2102 int ret; 2103 u8 val; 2104 2105 if (!intel_dp->is_mst) 2106 return true; 2107 2108 ret = drm_dp_dpcd_readb(intel_dp->mst_mgr.aux, DP_MSTM_CTRL, &val); 2109 2110 /* Adjust the expected register value for SST + SideBand. */ 2111 if (ret < 0 || val != (DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC)) { 2112 drm_dbg_kms(display->drm, 2113 "[CONNECTOR:%d:%s][ENCODER:%d:%s] MST mode got reset, removing topology (ret=%d, ctrl=0x%02x)\n", 2114 connector->base.base.id, connector->base.name, 2115 encoder->base.base.id, encoder->base.name, 2116 ret, val); 2117 2118 return false; 2119 } 2120 2121 return true; 2122 } 2123