1 /* 2 * Copyright © 2008 Intel Corporation 3 * 2014 Red Hat Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 22 * IN THE SOFTWARE. 23 * 24 */ 25 26 #include <drm/drm_atomic.h> 27 #include <drm/drm_atomic_helper.h> 28 #include <drm/drm_edid.h> 29 #include <drm/drm_fixed.h> 30 #include <drm/drm_probe_helper.h> 31 32 #include "i915_drv.h" 33 #include "i915_reg.h" 34 #include "intel_atomic.h" 35 #include "intel_audio.h" 36 #include "intel_connector.h" 37 #include "intel_crtc.h" 38 #include "intel_ddi.h" 39 #include "intel_de.h" 40 #include "intel_display_driver.h" 41 #include "intel_display_types.h" 42 #include "intel_dp.h" 43 #include "intel_dp_hdcp.h" 44 #include "intel_dp_link_training.h" 45 #include "intel_dp_mst.h" 46 #include "intel_dp_test.h" 47 #include "intel_dp_tunnel.h" 48 #include "intel_dpio_phy.h" 49 #include "intel_hdcp.h" 50 #include "intel_hotplug.h" 51 #include "intel_link_bw.h" 52 #include "intel_pfit.h" 53 #include "intel_psr.h" 54 #include "intel_vdsc.h" 55 #include "intel_vrr.h" 56 #include "skl_scaler.h" 57 58 /* 59 * DP MST (DisplayPort Multi-Stream Transport) 60 * 61 * MST support on the source depends on the platform and port. DP initialization 62 * sets up MST for each MST capable encoder. This will become the primary 63 * encoder for the port. 64 * 65 * MST initialization of each primary encoder creates MST stream encoders, one 66 * per pipe, and initializes the MST topology manager. The MST stream encoders 67 * are sometimes called "fake encoders", because they're virtual, not 68 * physical. Thus there are (number of MST capable ports) x (number of pipes) 69 * MST stream encoders in total. 70 * 71 * Decision to use MST for a sink happens at detect on the connector attached to 72 * the primary encoder, and this will not change while the sink is connected. We 73 * always use MST when possible, including for SST sinks with sideband messaging 74 * support. 75 * 76 * The connectors for the MST streams are added and removed dynamically by the 77 * topology manager. Their connection status is also determined by the topology 78 * manager. 79 * 80 * On hardware, each transcoder may be associated with a single DDI 81 * port. Multiple transcoders may be associated with the same DDI port only if 82 * the port is in MST mode. 83 * 84 * On TGL+, all the transcoders streaming on the same DDI port will indicate a 85 * primary transcoder; the TGL_DP_TP_CTL and TGL_DP_TP_STATUS registers are 86 * relevant only on the primary transcoder. Prior to that, they are port 87 * registers. 88 */ 89 90 /* From fake MST stream encoder to primary encoder */ 91 static struct intel_encoder *to_primary_encoder(struct intel_encoder *encoder) 92 { 93 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); 94 struct intel_digital_port *dig_port = intel_mst->primary; 95 96 return &dig_port->base; 97 } 98 99 /* From fake MST stream encoder to primary DP */ 100 static struct intel_dp *to_primary_dp(struct intel_encoder *encoder) 101 { 102 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); 103 struct intel_digital_port *dig_port = intel_mst->primary; 104 105 return &dig_port->dp; 106 } 107 108 int intel_dp_mst_active_streams(struct intel_dp *intel_dp) 109 { 110 return intel_dp->mst.active_streams; 111 } 112 113 static bool intel_dp_mst_dec_active_streams(struct intel_dp *intel_dp) 114 { 115 struct intel_display *display = to_intel_display(intel_dp); 116 117 drm_dbg_kms(display->drm, "active MST streams %d -> %d\n", 118 intel_dp->mst.active_streams, intel_dp->mst.active_streams - 1); 119 120 if (drm_WARN_ON(display->drm, intel_dp->mst.active_streams == 0)) 121 return true; 122 123 return --intel_dp->mst.active_streams == 0; 124 } 125 126 static bool intel_dp_mst_inc_active_streams(struct intel_dp *intel_dp) 127 { 128 struct intel_display *display = to_intel_display(intel_dp); 129 130 drm_dbg_kms(display->drm, "active MST streams %d -> %d\n", 131 intel_dp->mst.active_streams, intel_dp->mst.active_streams + 1); 132 133 return intel_dp->mst.active_streams++ == 0; 134 } 135 136 static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state, 137 bool dsc) 138 { 139 struct intel_display *display = to_intel_display(crtc_state); 140 const struct drm_display_mode *adjusted_mode = 141 &crtc_state->hw.adjusted_mode; 142 143 if (!intel_dp_is_uhbr(crtc_state) || DISPLAY_VER(display) >= 20 || !dsc) 144 return 0; 145 146 /* 147 * DSC->DPT interface width: 148 * ICL-MTL: 72 bits (each branch has 72 bits, only left branch is used) 149 * LNL+: 144 bits (not a bottleneck in any config) 150 * 151 * Bspec/49259 suggests that the FEC overhead needs to be 152 * applied here, though HW people claim that neither this FEC 153 * or any other overhead is applicable here (that is the actual 154 * available_bw is just symbol_clock * 72). However based on 155 * testing on MTL-P the 156 * - DELL U3224KBA display 157 * - Unigraf UCD-500 CTS test sink 158 * devices the 159 * - 5120x2880/995.59Mhz 160 * - 6016x3384/1357.23Mhz 161 * - 6144x3456/1413.39Mhz 162 * modes (all the ones having a DPT limit on the above devices), 163 * both the channel coding efficiency and an additional 3% 164 * overhead needs to be accounted for. 165 */ 166 return div64_u64(mul_u32_u32(intel_dp_link_symbol_clock(crtc_state->port_clock) * 72, 167 drm_dp_bw_channel_coding_efficiency(true)), 168 mul_u32_u32(adjusted_mode->crtc_clock, 1030000)); 169 } 170 171 static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state, 172 bool ssc, int dsc_slice_count, int bpp_x16) 173 { 174 const struct drm_display_mode *adjusted_mode = 175 &crtc_state->hw.adjusted_mode; 176 unsigned long flags = DRM_DP_BW_OVERHEAD_MST; 177 int overhead; 178 179 flags |= intel_dp_is_uhbr(crtc_state) ? DRM_DP_BW_OVERHEAD_UHBR : 0; 180 flags |= ssc ? DRM_DP_BW_OVERHEAD_SSC_REF_CLK : 0; 181 flags |= crtc_state->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0; 182 183 if (dsc_slice_count) 184 flags |= DRM_DP_BW_OVERHEAD_DSC; 185 186 overhead = drm_dp_bw_overhead(crtc_state->lane_count, 187 adjusted_mode->hdisplay, 188 dsc_slice_count, 189 bpp_x16, 190 flags); 191 192 /* 193 * TODO: clarify whether a minimum required by the fixed FEC overhead 194 * in the bspec audio programming sequence is required here. 195 */ 196 return max(overhead, intel_dp_bw_fec_overhead(crtc_state->fec_enable)); 197 } 198 199 static void intel_dp_mst_compute_m_n(const struct intel_crtc_state *crtc_state, 200 int overhead, 201 int bpp_x16, 202 struct intel_link_m_n *m_n) 203 { 204 const struct drm_display_mode *adjusted_mode = 205 &crtc_state->hw.adjusted_mode; 206 207 /* TODO: Check WA 14013163432 to set data M/N for full BW utilization. */ 208 intel_link_compute_m_n(bpp_x16, crtc_state->lane_count, 209 adjusted_mode->crtc_clock, 210 crtc_state->port_clock, 211 overhead, 212 m_n); 213 214 m_n->tu = DIV_ROUND_UP_ULL(mul_u32_u32(m_n->data_m, 64), m_n->data_n); 215 } 216 217 static int intel_dp_mst_calc_pbn(int pixel_clock, int bpp_x16, int bw_overhead) 218 { 219 int effective_data_rate = 220 intel_dp_effective_data_rate(pixel_clock, bpp_x16, bw_overhead); 221 222 /* 223 * TODO: Use drm_dp_calc_pbn_mode() instead, once it's converted 224 * to calculate PBN with the BW overhead passed to it. 225 */ 226 return DIV_ROUND_UP(effective_data_rate * 64, 54 * 1000); 227 } 228 229 static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connector, 230 const struct intel_crtc_state *crtc_state) 231 { 232 const struct drm_display_mode *adjusted_mode = 233 &crtc_state->hw.adjusted_mode; 234 int num_joined_pipes = intel_crtc_num_joined_pipes(crtc_state); 235 236 return intel_dp_dsc_get_slice_count(connector, 237 adjusted_mode->clock, 238 adjusted_mode->hdisplay, 239 num_joined_pipes); 240 } 241 242 static void intel_dp_mst_compute_min_hblank(struct intel_crtc_state *crtc_state, 243 int bpp_x16) 244 { 245 struct intel_display *display = to_intel_display(crtc_state); 246 const struct drm_display_mode *adjusted_mode = 247 &crtc_state->hw.adjusted_mode; 248 int symbol_size = intel_dp_is_uhbr(crtc_state) ? 32 : 8; 249 int hblank; 250 251 if (DISPLAY_VER(display) < 20) 252 return; 253 254 /* Calculate min Hblank Link Layer Symbol Cycle Count for 8b/10b MST & 128b/132b */ 255 hblank = DIV_ROUND_UP((DIV_ROUND_UP 256 (adjusted_mode->htotal - adjusted_mode->hdisplay, 4) * bpp_x16), 257 symbol_size); 258 259 crtc_state->min_hblank = hblank; 260 } 261 262 int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp, 263 struct intel_crtc_state *crtc_state, 264 struct drm_connector_state *conn_state, 265 int min_bpp_x16, int max_bpp_x16, int bpp_step_x16, bool dsc) 266 { 267 struct intel_display *display = to_intel_display(intel_dp); 268 struct drm_atomic_state *state = crtc_state->uapi.state; 269 struct drm_dp_mst_topology_state *mst_state = NULL; 270 struct intel_connector *connector = 271 to_intel_connector(conn_state->connector); 272 const struct drm_display_mode *adjusted_mode = 273 &crtc_state->hw.adjusted_mode; 274 bool is_mst = intel_dp->is_mst; 275 int bpp_x16, slots = -EINVAL; 276 int dsc_slice_count = 0; 277 int max_dpt_bpp_x16; 278 279 /* shouldn't happen, sanity check */ 280 drm_WARN_ON(display->drm, !dsc && (fxp_q4_to_frac(min_bpp_x16) || 281 fxp_q4_to_frac(max_bpp_x16) || 282 fxp_q4_to_frac(bpp_step_x16))); 283 284 if (is_mst) { 285 mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst.mgr); 286 if (IS_ERR(mst_state)) 287 return PTR_ERR(mst_state); 288 289 mst_state->pbn_div = drm_dp_get_vc_payload_bw(crtc_state->port_clock, 290 crtc_state->lane_count); 291 } 292 293 if (dsc) { 294 if (!intel_dp_supports_fec(intel_dp, connector, crtc_state)) 295 return -EINVAL; 296 297 crtc_state->fec_enable = !intel_dp_is_uhbr(crtc_state); 298 } 299 300 max_dpt_bpp_x16 = fxp_q4_from_int(intel_dp_mst_max_dpt_bpp(crtc_state, dsc)); 301 if (max_dpt_bpp_x16 && max_bpp_x16 > max_dpt_bpp_x16) { 302 drm_dbg_kms(display->drm, "Limiting bpp to max DPT bpp (" FXP_Q4_FMT " -> " FXP_Q4_FMT ")\n", 303 FXP_Q4_ARGS(max_bpp_x16), FXP_Q4_ARGS(max_dpt_bpp_x16)); 304 max_bpp_x16 = max_dpt_bpp_x16; 305 } 306 307 drm_dbg_kms(display->drm, "Looking for slots in range min bpp " FXP_Q4_FMT " max bpp " FXP_Q4_FMT "\n", 308 FXP_Q4_ARGS(min_bpp_x16), FXP_Q4_ARGS(max_bpp_x16)); 309 310 if (dsc) { 311 dsc_slice_count = intel_dp_mst_dsc_get_slice_count(connector, crtc_state); 312 if (!dsc_slice_count) { 313 drm_dbg_kms(display->drm, "Can't get valid DSC slice count\n"); 314 315 return -ENOSPC; 316 } 317 } 318 319 for (bpp_x16 = max_bpp_x16; bpp_x16 >= min_bpp_x16; bpp_x16 -= bpp_step_x16) { 320 int local_bw_overhead; 321 int link_bpp_x16; 322 323 drm_dbg_kms(display->drm, "Trying bpp " FXP_Q4_FMT "\n", FXP_Q4_ARGS(bpp_x16)); 324 325 link_bpp_x16 = dsc ? bpp_x16 : 326 fxp_q4_from_int(intel_dp_output_bpp(crtc_state->output_format, 327 fxp_q4_to_int(bpp_x16))); 328 329 local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, 330 false, dsc_slice_count, link_bpp_x16); 331 332 intel_dp_mst_compute_min_hblank(crtc_state, link_bpp_x16); 333 334 intel_dp_mst_compute_m_n(crtc_state, 335 local_bw_overhead, 336 link_bpp_x16, 337 &crtc_state->dp_m_n); 338 339 if (is_mst) { 340 int remote_bw_overhead; 341 int remote_tu; 342 fixed20_12 pbn; 343 344 remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, 345 true, dsc_slice_count, link_bpp_x16); 346 347 /* 348 * The TU size programmed to the HW determines which slots in 349 * an MTP frame are used for this stream, which needs to match 350 * the payload size programmed to the first downstream branch 351 * device's payload table. 352 * 353 * Note that atm the payload's PBN value DRM core sends via 354 * the ALLOCATE_PAYLOAD side-band message matches the payload 355 * size (which it calculates from the PBN value) it programs 356 * to the first branch device's payload table. The allocation 357 * in the payload table could be reduced though (to 358 * crtc_state->dp_m_n.tu), provided that the driver doesn't 359 * enable SSC on the corresponding link. 360 */ 361 pbn.full = dfixed_const(intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock, 362 link_bpp_x16, 363 remote_bw_overhead)); 364 remote_tu = DIV_ROUND_UP(pbn.full, mst_state->pbn_div.full); 365 366 /* 367 * Aligning the TUs ensures that symbols consisting of multiple 368 * (4) symbol cycles don't get split between two consecutive 369 * MTPs, as required by Bspec. 370 * TODO: remove the alignment restriction for 128b/132b links 371 * on some platforms, where Bspec allows this. 372 */ 373 remote_tu = ALIGN(remote_tu, 4 / crtc_state->lane_count); 374 375 /* 376 * Also align PBNs accordingly, since MST core will derive its 377 * own copy of TU from the PBN in drm_dp_atomic_find_time_slots(). 378 * The above comment about the difference between the PBN 379 * allocated for the whole path and the TUs allocated for the 380 * first branch device's link also applies here. 381 */ 382 pbn.full = remote_tu * mst_state->pbn_div.full; 383 384 drm_WARN_ON(display->drm, remote_tu < crtc_state->dp_m_n.tu); 385 crtc_state->dp_m_n.tu = remote_tu; 386 387 slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst.mgr, 388 connector->mst.port, 389 dfixed_trunc(pbn)); 390 } else { 391 /* Same as above for remote_tu */ 392 crtc_state->dp_m_n.tu = ALIGN(crtc_state->dp_m_n.tu, 393 4 / crtc_state->lane_count); 394 395 if (crtc_state->dp_m_n.tu <= 64) 396 slots = crtc_state->dp_m_n.tu; 397 else 398 slots = -EINVAL; 399 } 400 401 if (slots == -EDEADLK) 402 return slots; 403 404 if (slots >= 0) { 405 drm_WARN_ON(display->drm, slots != crtc_state->dp_m_n.tu); 406 407 break; 408 } 409 410 /* Allow using zero step to indicate one try */ 411 if (!bpp_step_x16) 412 break; 413 } 414 415 if (slots < 0) { 416 drm_dbg_kms(display->drm, "failed finding vcpi slots:%d\n", 417 slots); 418 return slots; 419 } 420 421 if (!dsc) 422 crtc_state->pipe_bpp = fxp_q4_to_int(bpp_x16); 423 else 424 crtc_state->dsc.compressed_bpp_x16 = bpp_x16; 425 426 drm_dbg_kms(display->drm, "Got %d slots for pipe bpp " FXP_Q4_FMT " dsc %d\n", 427 slots, FXP_Q4_ARGS(bpp_x16), dsc); 428 429 return 0; 430 } 431 432 static int mst_stream_compute_link_config(struct intel_dp *intel_dp, 433 struct intel_crtc_state *crtc_state, 434 struct drm_connector_state *conn_state, 435 const struct link_config_limits *limits) 436 { 437 crtc_state->lane_count = limits->max_lane_count; 438 crtc_state->port_clock = limits->max_rate; 439 440 /* 441 * FIXME: allocate the BW according to link_bpp, which in the case of 442 * YUV420 is only half of the pipe bpp value. 443 */ 444 return intel_dp_mtp_tu_compute_config(intel_dp, crtc_state, conn_state, 445 limits->link.min_bpp_x16, 446 limits->link.max_bpp_x16, 447 fxp_q4_from_int(2 * 3), false); 448 } 449 450 static int mst_stream_dsc_compute_link_config(struct intel_dp *intel_dp, 451 struct intel_crtc_state *crtc_state, 452 struct drm_connector_state *conn_state, 453 const struct link_config_limits *limits) 454 { 455 struct intel_display *display = to_intel_display(intel_dp); 456 struct intel_connector *connector = to_intel_connector(conn_state->connector); 457 int num_bpc; 458 u8 dsc_bpc[3] = {}; 459 int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp; 460 int min_compressed_bpp, max_compressed_bpp; 461 462 max_bpp = limits->pipe.max_bpp; 463 min_bpp = limits->pipe.min_bpp; 464 465 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd, 466 dsc_bpc); 467 468 drm_dbg_kms(display->drm, "DSC Source supported min bpp %d max bpp %d\n", 469 min_bpp, max_bpp); 470 471 sink_min_bpp = min_array(dsc_bpc, num_bpc) * 3; 472 sink_max_bpp = max_array(dsc_bpc, num_bpc) * 3; 473 474 drm_dbg_kms(display->drm, "DSC Sink supported min bpp %d max bpp %d\n", 475 sink_min_bpp, sink_max_bpp); 476 477 if (min_bpp < sink_min_bpp) 478 min_bpp = sink_min_bpp; 479 480 if (max_bpp > sink_max_bpp) 481 max_bpp = sink_max_bpp; 482 483 crtc_state->pipe_bpp = max_bpp; 484 485 max_compressed_bpp = fxp_q4_to_int(limits->link.max_bpp_x16); 486 min_compressed_bpp = fxp_q4_to_int_roundup(limits->link.min_bpp_x16); 487 488 drm_dbg_kms(display->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n", 489 min_compressed_bpp, max_compressed_bpp); 490 491 /* Align compressed bpps according to our own constraints */ 492 max_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(display, max_compressed_bpp, 493 crtc_state->pipe_bpp); 494 min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(display, min_compressed_bpp, 495 crtc_state->pipe_bpp); 496 497 crtc_state->lane_count = limits->max_lane_count; 498 crtc_state->port_clock = limits->max_rate; 499 500 return intel_dp_mtp_tu_compute_config(intel_dp, crtc_state, conn_state, 501 fxp_q4_from_int(min_compressed_bpp), 502 fxp_q4_from_int(max_compressed_bpp), 503 fxp_q4_from_int(1), true); 504 } 505 506 static int mst_stream_update_slots(struct intel_dp *intel_dp, 507 struct intel_crtc_state *crtc_state, 508 struct drm_connector_state *conn_state) 509 { 510 struct intel_display *display = to_intel_display(intel_dp); 511 struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst.mgr; 512 struct drm_dp_mst_topology_state *topology_state; 513 u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ? 514 DP_CAP_ANSI_128B132B : DP_CAP_ANSI_8B10B; 515 516 topology_state = drm_atomic_get_mst_topology_state(conn_state->state, mgr); 517 if (IS_ERR(topology_state)) { 518 drm_dbg_kms(display->drm, "slot update failed\n"); 519 return PTR_ERR(topology_state); 520 } 521 522 drm_dp_mst_update_slots(topology_state, link_coding_cap); 523 524 return 0; 525 } 526 527 static int mode_hblank_period_ns(const struct drm_display_mode *mode) 528 { 529 return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(mode->htotal - mode->hdisplay, 530 NSEC_PER_SEC / 1000), 531 mode->crtc_clock); 532 } 533 534 static bool 535 hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector, 536 const struct intel_crtc_state *crtc_state, 537 const struct link_config_limits *limits) 538 { 539 const struct drm_display_mode *adjusted_mode = 540 &crtc_state->hw.adjusted_mode; 541 bool is_uhbr_sink = connector->mst.dp && 542 drm_dp_128b132b_supported(connector->mst.dp->dpcd); 543 int hblank_limit = is_uhbr_sink ? 500 : 300; 544 545 if (!connector->dp.dsc_hblank_expansion_quirk) 546 return false; 547 548 if (is_uhbr_sink && !drm_dp_is_uhbr_rate(limits->max_rate)) 549 return false; 550 551 if (mode_hblank_period_ns(adjusted_mode) > hblank_limit) 552 return false; 553 554 if (!intel_dp_mst_dsc_get_slice_count(connector, crtc_state)) 555 return false; 556 557 return true; 558 } 559 560 static bool 561 adjust_limits_for_dsc_hblank_expansion_quirk(struct intel_dp *intel_dp, 562 const struct intel_connector *connector, 563 const struct intel_crtc_state *crtc_state, 564 struct link_config_limits *limits, 565 bool dsc) 566 { 567 struct intel_display *display = to_intel_display(connector); 568 const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 569 int min_bpp_x16 = limits->link.min_bpp_x16; 570 571 if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state, limits)) 572 return true; 573 574 if (!dsc) { 575 if (intel_dp_supports_dsc(intel_dp, connector, crtc_state)) { 576 drm_dbg_kms(display->drm, 577 "[CRTC:%d:%s][CONNECTOR:%d:%s] DSC needed by hblank expansion quirk\n", 578 crtc->base.base.id, crtc->base.name, 579 connector->base.base.id, connector->base.name); 580 return false; 581 } 582 583 drm_dbg_kms(display->drm, 584 "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to 24 due to hblank expansion quirk\n", 585 crtc->base.base.id, crtc->base.name, 586 connector->base.base.id, connector->base.name); 587 588 if (limits->link.max_bpp_x16 < fxp_q4_from_int(24)) 589 return false; 590 591 limits->link.min_bpp_x16 = fxp_q4_from_int(24); 592 593 return true; 594 } 595 596 drm_WARN_ON(display->drm, limits->min_rate != limits->max_rate); 597 598 if (limits->max_rate < 540000) 599 min_bpp_x16 = fxp_q4_from_int(13); 600 else if (limits->max_rate < 810000) 601 min_bpp_x16 = fxp_q4_from_int(10); 602 603 if (limits->link.min_bpp_x16 >= min_bpp_x16) 604 return true; 605 606 drm_dbg_kms(display->drm, 607 "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " FXP_Q4_FMT " in DSC mode due to hblank expansion quirk\n", 608 crtc->base.base.id, crtc->base.name, 609 connector->base.base.id, connector->base.name, 610 FXP_Q4_ARGS(min_bpp_x16)); 611 612 if (limits->link.max_bpp_x16 < min_bpp_x16) 613 return false; 614 615 limits->link.min_bpp_x16 = min_bpp_x16; 616 617 return true; 618 } 619 620 static bool 621 mst_stream_compute_config_limits(struct intel_dp *intel_dp, 622 const struct intel_connector *connector, 623 struct intel_crtc_state *crtc_state, 624 bool dsc, 625 struct link_config_limits *limits) 626 { 627 if (!intel_dp_compute_config_limits(intel_dp, crtc_state, false, dsc, 628 limits)) 629 return false; 630 631 return adjust_limits_for_dsc_hblank_expansion_quirk(intel_dp, 632 connector, 633 crtc_state, 634 limits, 635 dsc); 636 } 637 638 static int mst_stream_compute_config(struct intel_encoder *encoder, 639 struct intel_crtc_state *pipe_config, 640 struct drm_connector_state *conn_state) 641 { 642 struct intel_display *display = to_intel_display(encoder); 643 struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state); 644 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 645 struct intel_dp *intel_dp = to_primary_dp(encoder); 646 struct intel_connector *connector = 647 to_intel_connector(conn_state->connector); 648 const struct drm_display_mode *adjusted_mode = 649 &pipe_config->hw.adjusted_mode; 650 struct link_config_limits limits; 651 bool dsc_needed, joiner_needs_dsc; 652 int num_joined_pipes; 653 int ret = 0; 654 655 if (pipe_config->fec_enable && 656 !intel_dp_supports_fec(intel_dp, connector, pipe_config)) 657 return -EINVAL; 658 659 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 660 return -EINVAL; 661 662 num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector, 663 adjusted_mode->crtc_hdisplay, 664 adjusted_mode->crtc_clock); 665 if (num_joined_pipes > 1) 666 pipe_config->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1, crtc->pipe); 667 668 pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; 669 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 670 pipe_config->has_pch_encoder = false; 671 672 joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes); 673 674 dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en || 675 !mst_stream_compute_config_limits(intel_dp, connector, 676 pipe_config, false, &limits); 677 678 if (!dsc_needed) { 679 ret = mst_stream_compute_link_config(intel_dp, pipe_config, 680 conn_state, &limits); 681 682 if (ret == -EDEADLK) 683 return ret; 684 685 if (ret) 686 dsc_needed = true; 687 } 688 689 if (dsc_needed && !intel_dp_supports_dsc(intel_dp, connector, pipe_config)) { 690 drm_dbg_kms(display->drm, "DSC required but not available\n"); 691 return -EINVAL; 692 } 693 694 /* enable compression if the mode doesn't fit available BW */ 695 if (dsc_needed) { 696 drm_dbg_kms(display->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n", 697 str_yes_no(ret), str_yes_no(joiner_needs_dsc), 698 str_yes_no(intel_dp->force_dsc_en)); 699 700 701 if (!mst_stream_compute_config_limits(intel_dp, connector, 702 pipe_config, true, 703 &limits)) 704 return -EINVAL; 705 706 /* 707 * FIXME: As bpc is hardcoded to 8, as mentioned above, 708 * WARN and ignore the debug flag force_dsc_bpc for now. 709 */ 710 drm_WARN(display->drm, intel_dp->force_dsc_bpc, 711 "Cannot Force BPC for MST\n"); 712 /* 713 * Try to get at least some timeslots and then see, if 714 * we can fit there with DSC. 715 */ 716 drm_dbg_kms(display->drm, "Trying to find VCPI slots in DSC mode\n"); 717 718 ret = mst_stream_dsc_compute_link_config(intel_dp, pipe_config, 719 conn_state, &limits); 720 if (ret < 0) 721 return ret; 722 723 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 724 conn_state, &limits, 725 pipe_config->dp_m_n.tu); 726 } 727 728 if (ret) 729 return ret; 730 731 ret = mst_stream_update_slots(intel_dp, pipe_config, conn_state); 732 if (ret) 733 return ret; 734 735 pipe_config->limited_color_range = 736 intel_dp_limited_color_range(pipe_config, conn_state); 737 738 if (display->platform.geminilake || display->platform.broxton) 739 pipe_config->lane_lat_optim_mask = 740 bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count); 741 742 intel_vrr_compute_config(pipe_config, conn_state); 743 744 intel_dp_audio_compute_config(encoder, pipe_config, conn_state); 745 746 intel_ddi_compute_min_voltage_level(pipe_config); 747 748 intel_psr_compute_config(intel_dp, pipe_config, conn_state); 749 750 return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector, 751 pipe_config); 752 } 753 754 /* 755 * Iterate over all connectors and return a mask of 756 * all CPU transcoders streaming over the same DP link. 757 */ 758 static unsigned int 759 intel_dp_mst_transcoder_mask(struct intel_atomic_state *state, 760 struct intel_dp *mst_port) 761 { 762 struct intel_display *display = to_intel_display(state); 763 const struct intel_digital_connector_state *conn_state; 764 struct intel_connector *connector; 765 u8 transcoders = 0; 766 int i; 767 768 if (DISPLAY_VER(display) < 12) 769 return 0; 770 771 for_each_new_intel_connector_in_state(state, connector, conn_state, i) { 772 const struct intel_crtc_state *crtc_state; 773 struct intel_crtc *crtc; 774 775 if (connector->mst.dp != mst_port || !conn_state->base.crtc) 776 continue; 777 778 crtc = to_intel_crtc(conn_state->base.crtc); 779 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 780 781 if (!crtc_state->hw.active) 782 continue; 783 784 transcoders |= BIT(crtc_state->cpu_transcoder); 785 } 786 787 return transcoders; 788 } 789 790 static u8 get_pipes_downstream_of_mst_port(struct intel_atomic_state *state, 791 struct drm_dp_mst_topology_mgr *mst_mgr, 792 struct drm_dp_mst_port *parent_port) 793 { 794 const struct intel_digital_connector_state *conn_state; 795 struct intel_connector *connector; 796 u8 mask = 0; 797 int i; 798 799 for_each_new_intel_connector_in_state(state, connector, conn_state, i) { 800 if (!conn_state->base.crtc) 801 continue; 802 803 if (&connector->mst.dp->mst.mgr != mst_mgr) 804 continue; 805 806 if (connector->mst.port != parent_port && 807 !drm_dp_mst_port_downstream_of_parent(mst_mgr, 808 connector->mst.port, 809 parent_port)) 810 continue; 811 812 mask |= BIT(to_intel_crtc(conn_state->base.crtc)->pipe); 813 } 814 815 return mask; 816 } 817 818 static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state, 819 struct drm_dp_mst_topology_mgr *mst_mgr, 820 struct intel_link_bw_limits *limits) 821 { 822 struct intel_display *display = to_intel_display(state); 823 struct intel_crtc *crtc; 824 u8 mst_pipe_mask; 825 u8 fec_pipe_mask = 0; 826 int ret; 827 828 mst_pipe_mask = get_pipes_downstream_of_mst_port(state, mst_mgr, NULL); 829 830 for_each_intel_crtc_in_pipe_mask(display->drm, crtc, mst_pipe_mask) { 831 struct intel_crtc_state *crtc_state = 832 intel_atomic_get_new_crtc_state(state, crtc); 833 834 /* Atomic connector check should've added all the MST CRTCs. */ 835 if (drm_WARN_ON(display->drm, !crtc_state)) 836 return -EINVAL; 837 838 if (crtc_state->fec_enable) 839 fec_pipe_mask |= BIT(crtc->pipe); 840 } 841 842 if (!fec_pipe_mask || mst_pipe_mask == fec_pipe_mask) 843 return 0; 844 845 limits->force_fec_pipes |= mst_pipe_mask; 846 847 ret = intel_modeset_pipes_in_mask_early(state, "MST FEC", 848 mst_pipe_mask); 849 850 return ret ? : -EAGAIN; 851 } 852 853 static int intel_dp_mst_check_bw(struct intel_atomic_state *state, 854 struct drm_dp_mst_topology_mgr *mst_mgr, 855 struct drm_dp_mst_topology_state *mst_state, 856 struct intel_link_bw_limits *limits) 857 { 858 struct drm_dp_mst_port *mst_port; 859 u8 mst_port_pipes; 860 int ret; 861 862 ret = drm_dp_mst_atomic_check_mgr(&state->base, mst_mgr, mst_state, &mst_port); 863 if (ret != -ENOSPC) 864 return ret; 865 866 mst_port_pipes = get_pipes_downstream_of_mst_port(state, mst_mgr, mst_port); 867 868 ret = intel_link_bw_reduce_bpp(state, limits, 869 mst_port_pipes, "MST link BW"); 870 871 return ret ? : -EAGAIN; 872 } 873 874 /** 875 * intel_dp_mst_atomic_check_link - check all modeset MST link configuration 876 * @state: intel atomic state 877 * @limits: link BW limits 878 * 879 * Check the link configuration for all modeset MST outputs. If the 880 * configuration is invalid @limits will be updated if possible to 881 * reduce the total BW, after which the configuration for all CRTCs in 882 * @state must be recomputed with the updated @limits. 883 * 884 * Returns: 885 * - 0 if the configuration is valid 886 * - %-EAGAIN, if the configuration is invalid and @limits got updated 887 * with fallback values with which the configuration of all CRTCs in 888 * @state must be recomputed 889 * - Other negative error, if the configuration is invalid without a 890 * fallback possibility, or the check failed for another reason 891 */ 892 int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state, 893 struct intel_link_bw_limits *limits) 894 { 895 struct drm_dp_mst_topology_mgr *mgr; 896 struct drm_dp_mst_topology_state *mst_state; 897 int ret; 898 int i; 899 900 for_each_new_mst_mgr_in_state(&state->base, mgr, mst_state, i) { 901 ret = intel_dp_mst_check_fec_change(state, mgr, limits); 902 if (ret) 903 return ret; 904 905 ret = intel_dp_mst_check_bw(state, mgr, mst_state, 906 limits); 907 if (ret) 908 return ret; 909 } 910 911 return 0; 912 } 913 914 static int mst_stream_compute_config_late(struct intel_encoder *encoder, 915 struct intel_crtc_state *crtc_state, 916 struct drm_connector_state *conn_state) 917 { 918 struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state); 919 struct intel_dp *intel_dp = to_primary_dp(encoder); 920 921 /* lowest numbered transcoder will be designated master */ 922 crtc_state->mst_master_transcoder = 923 ffs(intel_dp_mst_transcoder_mask(state, intel_dp)) - 1; 924 925 return 0; 926 } 927 928 /* 929 * If one of the connectors in a MST stream needs a modeset, mark all CRTCs 930 * that shares the same MST stream as mode changed, 931 * intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do 932 * a fastset when possible. 933 * 934 * On TGL+ this is required since each stream go through a master transcoder, 935 * so if the master transcoder needs modeset, all other streams in the 936 * topology need a modeset. All platforms need to add the atomic state 937 * for all streams in the topology, since a modeset on one may require 938 * changing the MST link BW usage of the others, which in turn needs a 939 * recomputation of the corresponding CRTC states. 940 */ 941 static int 942 mst_connector_atomic_topology_check(struct intel_connector *connector, 943 struct intel_atomic_state *state) 944 { 945 struct intel_display *display = to_intel_display(connector); 946 struct drm_connector_list_iter connector_list_iter; 947 struct intel_connector *connector_iter; 948 int ret = 0; 949 950 if (!intel_connector_needs_modeset(state, &connector->base)) 951 return 0; 952 953 drm_connector_list_iter_begin(display->drm, &connector_list_iter); 954 for_each_intel_connector_iter(connector_iter, &connector_list_iter) { 955 struct intel_digital_connector_state *conn_iter_state; 956 struct intel_crtc_state *crtc_state; 957 struct intel_crtc *crtc; 958 959 if (connector_iter->mst.dp != connector->mst.dp || 960 connector_iter == connector) 961 continue; 962 963 conn_iter_state = intel_atomic_get_digital_connector_state(state, 964 connector_iter); 965 if (IS_ERR(conn_iter_state)) { 966 ret = PTR_ERR(conn_iter_state); 967 break; 968 } 969 970 if (!conn_iter_state->base.crtc) 971 continue; 972 973 crtc = to_intel_crtc(conn_iter_state->base.crtc); 974 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 975 if (IS_ERR(crtc_state)) { 976 ret = PTR_ERR(crtc_state); 977 break; 978 } 979 980 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 981 if (ret) 982 break; 983 crtc_state->uapi.mode_changed = true; 984 } 985 drm_connector_list_iter_end(&connector_list_iter); 986 987 return ret; 988 } 989 990 static int 991 mst_connector_atomic_check(struct drm_connector *_connector, 992 struct drm_atomic_state *_state) 993 { 994 struct intel_atomic_state *state = to_intel_atomic_state(_state); 995 struct intel_connector *connector = to_intel_connector(_connector); 996 int ret; 997 998 ret = intel_digital_connector_atomic_check(&connector->base, &state->base); 999 if (ret) 1000 return ret; 1001 1002 ret = mst_connector_atomic_topology_check(connector, state); 1003 if (ret) 1004 return ret; 1005 1006 if (intel_connector_needs_modeset(state, &connector->base)) { 1007 ret = intel_dp_tunnel_atomic_check_state(state, 1008 connector->mst.dp, 1009 connector); 1010 if (ret) 1011 return ret; 1012 } 1013 1014 return drm_dp_atomic_release_time_slots(&state->base, 1015 &connector->mst.dp->mst.mgr, 1016 connector->mst.port); 1017 } 1018 1019 static void mst_stream_disable(struct intel_atomic_state *state, 1020 struct intel_encoder *encoder, 1021 const struct intel_crtc_state *old_crtc_state, 1022 const struct drm_connector_state *old_conn_state) 1023 { 1024 struct intel_display *display = to_intel_display(state); 1025 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); 1026 struct intel_dp *intel_dp = to_primary_dp(encoder); 1027 struct intel_connector *connector = 1028 to_intel_connector(old_conn_state->connector); 1029 enum transcoder trans = old_crtc_state->cpu_transcoder; 1030 1031 if (intel_dp_mst_active_streams(intel_dp) == 1) 1032 intel_dp->link.active = false; 1033 1034 intel_hdcp_disable(intel_mst->connector); 1035 1036 intel_dp_sink_disable_decompression(state, connector, old_crtc_state); 1037 1038 if (DISPLAY_VER(display) >= 20) 1039 intel_de_write(display, DP_MIN_HBLANK_CTL(trans), 0); 1040 } 1041 1042 static void mst_stream_post_disable(struct intel_atomic_state *state, 1043 struct intel_encoder *encoder, 1044 const struct intel_crtc_state *old_crtc_state, 1045 const struct drm_connector_state *old_conn_state) 1046 { 1047 struct intel_display *display = to_intel_display(encoder); 1048 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); 1049 struct intel_encoder *primary_encoder = to_primary_encoder(encoder); 1050 struct intel_dp *intel_dp = to_primary_dp(encoder); 1051 struct intel_connector *connector = 1052 to_intel_connector(old_conn_state->connector); 1053 struct drm_dp_mst_topology_state *old_mst_state = 1054 drm_atomic_get_old_mst_topology_state(&state->base, &intel_dp->mst.mgr); 1055 struct drm_dp_mst_topology_state *new_mst_state = 1056 drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst.mgr); 1057 const struct drm_dp_mst_atomic_payload *old_payload = 1058 drm_atomic_get_mst_payload_state(old_mst_state, connector->mst.port); 1059 struct drm_dp_mst_atomic_payload *new_payload = 1060 drm_atomic_get_mst_payload_state(new_mst_state, connector->mst.port); 1061 struct intel_crtc *pipe_crtc; 1062 bool last_mst_stream; 1063 int i; 1064 1065 last_mst_stream = intel_dp_mst_dec_active_streams(intel_dp); 1066 1067 drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && last_mst_stream && 1068 !intel_dp_mst_is_master_trans(old_crtc_state)); 1069 1070 for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { 1071 const struct intel_crtc_state *old_pipe_crtc_state = 1072 intel_atomic_get_old_crtc_state(state, pipe_crtc); 1073 1074 intel_crtc_vblank_off(old_pipe_crtc_state); 1075 } 1076 1077 intel_disable_transcoder(old_crtc_state); 1078 1079 drm_dp_remove_payload_part1(&intel_dp->mst.mgr, new_mst_state, new_payload); 1080 1081 intel_ddi_clear_act_sent(encoder, old_crtc_state); 1082 1083 intel_de_rmw(display, 1084 TRANS_DDI_FUNC_CTL(display, old_crtc_state->cpu_transcoder), 1085 TRANS_DDI_DP_VC_PAYLOAD_ALLOC, 0); 1086 1087 intel_ddi_wait_for_act_sent(encoder, old_crtc_state); 1088 drm_dp_check_act_status(&intel_dp->mst.mgr); 1089 1090 drm_dp_remove_payload_part2(&intel_dp->mst.mgr, new_mst_state, 1091 old_payload, new_payload); 1092 1093 intel_vrr_transcoder_disable(old_crtc_state); 1094 1095 intel_ddi_disable_transcoder_func(old_crtc_state); 1096 1097 for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { 1098 const struct intel_crtc_state *old_pipe_crtc_state = 1099 intel_atomic_get_old_crtc_state(state, pipe_crtc); 1100 1101 intel_dsc_disable(old_pipe_crtc_state); 1102 1103 if (DISPLAY_VER(display) >= 9) 1104 skl_scaler_disable(old_pipe_crtc_state); 1105 else 1106 ilk_pfit_disable(old_pipe_crtc_state); 1107 } 1108 1109 /* 1110 * Power down mst path before disabling the port, otherwise we end 1111 * up getting interrupts from the sink upon detecting link loss. 1112 */ 1113 drm_dp_send_power_updown_phy(&intel_dp->mst.mgr, connector->mst.port, 1114 false); 1115 1116 /* 1117 * BSpec 4287: disable DIP after the transcoder is disabled and before 1118 * the transcoder clock select is set to none. 1119 */ 1120 intel_dp_set_infoframes(primary_encoder, false, old_crtc_state, NULL); 1121 /* 1122 * From TGL spec: "If multi-stream slave transcoder: Configure 1123 * Transcoder Clock Select to direct no clock to the transcoder" 1124 * 1125 * From older GENs spec: "Configure Transcoder Clock Select to direct 1126 * no clock to the transcoder" 1127 */ 1128 if (DISPLAY_VER(display) < 12 || !last_mst_stream) 1129 intel_ddi_disable_transcoder_clock(old_crtc_state); 1130 1131 1132 intel_mst->connector = NULL; 1133 if (last_mst_stream) 1134 primary_encoder->post_disable(state, primary_encoder, 1135 old_crtc_state, NULL); 1136 1137 } 1138 1139 static void mst_stream_post_pll_disable(struct intel_atomic_state *state, 1140 struct intel_encoder *encoder, 1141 const struct intel_crtc_state *old_crtc_state, 1142 const struct drm_connector_state *old_conn_state) 1143 { 1144 struct intel_encoder *primary_encoder = to_primary_encoder(encoder); 1145 struct intel_dp *intel_dp = to_primary_dp(encoder); 1146 1147 if (intel_dp_mst_active_streams(intel_dp) == 0 && 1148 primary_encoder->post_pll_disable) 1149 primary_encoder->post_pll_disable(state, primary_encoder, old_crtc_state, old_conn_state); 1150 } 1151 1152 static void mst_stream_pre_pll_enable(struct intel_atomic_state *state, 1153 struct intel_encoder *encoder, 1154 const struct intel_crtc_state *pipe_config, 1155 const struct drm_connector_state *conn_state) 1156 { 1157 struct intel_encoder *primary_encoder = to_primary_encoder(encoder); 1158 struct intel_dp *intel_dp = to_primary_dp(encoder); 1159 1160 if (intel_dp_mst_active_streams(intel_dp) == 0) 1161 primary_encoder->pre_pll_enable(state, primary_encoder, 1162 pipe_config, NULL); 1163 else 1164 /* 1165 * The port PLL state needs to get updated for secondary 1166 * streams as for the primary stream. 1167 */ 1168 intel_ddi_update_active_dpll(state, primary_encoder, 1169 to_intel_crtc(pipe_config->uapi.crtc)); 1170 } 1171 1172 static bool intel_mst_probed_link_params_valid(struct intel_dp *intel_dp, 1173 int link_rate, int lane_count) 1174 { 1175 return intel_dp->link.mst_probed_rate == link_rate && 1176 intel_dp->link.mst_probed_lane_count == lane_count; 1177 } 1178 1179 static void intel_mst_set_probed_link_params(struct intel_dp *intel_dp, 1180 int link_rate, int lane_count) 1181 { 1182 intel_dp->link.mst_probed_rate = link_rate; 1183 intel_dp->link.mst_probed_lane_count = lane_count; 1184 } 1185 1186 static void intel_mst_reprobe_topology(struct intel_dp *intel_dp, 1187 const struct intel_crtc_state *crtc_state) 1188 { 1189 if (intel_mst_probed_link_params_valid(intel_dp, 1190 crtc_state->port_clock, crtc_state->lane_count)) 1191 return; 1192 1193 drm_dp_mst_topology_queue_probe(&intel_dp->mst.mgr); 1194 1195 intel_mst_set_probed_link_params(intel_dp, 1196 crtc_state->port_clock, crtc_state->lane_count); 1197 } 1198 1199 static void mst_stream_pre_enable(struct intel_atomic_state *state, 1200 struct intel_encoder *encoder, 1201 const struct intel_crtc_state *pipe_config, 1202 const struct drm_connector_state *conn_state) 1203 { 1204 struct intel_display *display = to_intel_display(state); 1205 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); 1206 struct intel_encoder *primary_encoder = to_primary_encoder(encoder); 1207 struct intel_dp *intel_dp = to_primary_dp(encoder); 1208 struct intel_connector *connector = 1209 to_intel_connector(conn_state->connector); 1210 struct drm_dp_mst_topology_state *mst_state = 1211 drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst.mgr); 1212 int ret; 1213 bool first_mst_stream; 1214 1215 /* MST encoders are bound to a crtc, not to a connector, 1216 * force the mapping here for get_hw_state. 1217 */ 1218 connector->encoder = encoder; 1219 intel_mst->connector = connector; 1220 1221 first_mst_stream = intel_dp_mst_inc_active_streams(intel_dp); 1222 drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && first_mst_stream && 1223 !intel_dp_mst_is_master_trans(pipe_config)); 1224 1225 if (first_mst_stream) 1226 intel_dp_set_power(intel_dp, DP_SET_POWER_D0); 1227 1228 drm_dp_send_power_updown_phy(&intel_dp->mst.mgr, connector->mst.port, true); 1229 1230 intel_dp_sink_enable_decompression(state, connector, pipe_config); 1231 1232 if (first_mst_stream) { 1233 primary_encoder->pre_enable(state, primary_encoder, 1234 pipe_config, NULL); 1235 1236 intel_mst_reprobe_topology(intel_dp, pipe_config); 1237 } 1238 1239 ret = drm_dp_add_payload_part1(&intel_dp->mst.mgr, mst_state, 1240 drm_atomic_get_mst_payload_state(mst_state, connector->mst.port)); 1241 if (ret < 0) 1242 intel_dp_queue_modeset_retry_for_link(state, primary_encoder, pipe_config); 1243 1244 /* 1245 * Before Gen 12 this is not done as part of 1246 * primary_encoder->pre_enable() and should be done here. For 1247 * Gen 12+ the step in which this should be done is different for the 1248 * first MST stream, so it's done on the DDI for the first stream and 1249 * here for the following ones. 1250 */ 1251 if (DISPLAY_VER(display) < 12 || !first_mst_stream) 1252 intel_ddi_enable_transcoder_clock(encoder, pipe_config); 1253 1254 if (DISPLAY_VER(display) >= 13 && !first_mst_stream) 1255 intel_ddi_config_transcoder_func(encoder, pipe_config); 1256 1257 intel_dsc_dp_pps_write(primary_encoder, pipe_config); 1258 intel_ddi_set_dp_msa(pipe_config, conn_state); 1259 } 1260 1261 static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state) 1262 { 1263 struct intel_display *display = to_intel_display(crtc_state); 1264 u32 clear = 0; 1265 u32 set = 0; 1266 1267 if (!display->platform.alderlake_p) 1268 return; 1269 1270 if (!IS_DISPLAY_STEP(display, STEP_D0, STEP_FOREVER)) 1271 return; 1272 1273 /* Wa_14013163432:adlp */ 1274 if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state)) 1275 set |= DP_MST_FEC_BS_JITTER_WA(crtc_state->cpu_transcoder); 1276 1277 /* Wa_14014143976:adlp */ 1278 if (IS_DISPLAY_STEP(display, STEP_E0, STEP_FOREVER)) { 1279 if (intel_dp_is_uhbr(crtc_state)) 1280 set |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder); 1281 else if (crtc_state->fec_enable) 1282 clear |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder); 1283 1284 if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state)) 1285 set |= DP_MST_DPT_DPTP_ALIGN_WA(crtc_state->cpu_transcoder); 1286 } 1287 1288 if (!clear && !set) 1289 return; 1290 1291 intel_de_rmw(display, CHICKEN_MISC_3, clear, set); 1292 } 1293 1294 static void mst_stream_enable(struct intel_atomic_state *state, 1295 struct intel_encoder *encoder, 1296 const struct intel_crtc_state *pipe_config, 1297 const struct drm_connector_state *conn_state) 1298 { 1299 struct intel_display *display = to_intel_display(encoder); 1300 struct intel_encoder *primary_encoder = to_primary_encoder(encoder); 1301 struct intel_dp *intel_dp = to_primary_dp(encoder); 1302 struct intel_connector *connector = to_intel_connector(conn_state->connector); 1303 struct drm_dp_mst_topology_state *mst_state = 1304 drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst.mgr); 1305 enum transcoder trans = pipe_config->cpu_transcoder; 1306 bool first_mst_stream = intel_dp_mst_active_streams(intel_dp) == 1; 1307 struct intel_crtc *pipe_crtc; 1308 int ret, i, min_hblank; 1309 1310 drm_WARN_ON(display->drm, pipe_config->has_pch_encoder); 1311 1312 if (intel_dp_is_uhbr(pipe_config)) { 1313 const struct drm_display_mode *adjusted_mode = 1314 &pipe_config->hw.adjusted_mode; 1315 u64 crtc_clock_hz = KHz(adjusted_mode->crtc_clock); 1316 1317 intel_de_write(display, TRANS_DP2_VFREQHIGH(pipe_config->cpu_transcoder), 1318 TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz >> 24)); 1319 intel_de_write(display, TRANS_DP2_VFREQLOW(pipe_config->cpu_transcoder), 1320 TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff)); 1321 } 1322 1323 if (DISPLAY_VER(display) >= 20) { 1324 /* 1325 * adjust the BlankingStart/BlankingEnd framing control from 1326 * the calculated value 1327 */ 1328 min_hblank = pipe_config->min_hblank - 2; 1329 1330 /* Maximum value to be programmed is limited to 0x10 */ 1331 min_hblank = min(0x10, min_hblank); 1332 1333 /* 1334 * Minimum hblank accepted for 128b/132b would be 5 and for 1335 * 8b/10b would be 3 symbol count 1336 */ 1337 if (intel_dp_is_uhbr(pipe_config)) 1338 min_hblank = max(min_hblank, 5); 1339 else 1340 min_hblank = max(min_hblank, 3); 1341 1342 intel_de_write(display, DP_MIN_HBLANK_CTL(trans), 1343 min_hblank); 1344 } 1345 1346 enable_bs_jitter_was(pipe_config); 1347 1348 intel_ddi_enable_transcoder_func(encoder, pipe_config); 1349 1350 intel_vrr_transcoder_enable(pipe_config); 1351 1352 intel_ddi_clear_act_sent(encoder, pipe_config); 1353 1354 intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, trans), 0, 1355 TRANS_DDI_DP_VC_PAYLOAD_ALLOC); 1356 1357 intel_ddi_wait_for_act_sent(encoder, pipe_config); 1358 drm_dp_check_act_status(&intel_dp->mst.mgr); 1359 1360 if (first_mst_stream) 1361 intel_ddi_wait_for_fec_status(encoder, pipe_config, true); 1362 1363 ret = drm_dp_add_payload_part2(&intel_dp->mst.mgr, 1364 drm_atomic_get_mst_payload_state(mst_state, 1365 connector->mst.port)); 1366 if (ret < 0) 1367 intel_dp_queue_modeset_retry_for_link(state, primary_encoder, pipe_config); 1368 1369 if (DISPLAY_VER(display) >= 12) 1370 intel_de_rmw(display, CHICKEN_TRANS(display, trans), 1371 FECSTALL_DIS_DPTSTREAM_DPTTG, 1372 pipe_config->fec_enable ? FECSTALL_DIS_DPTSTREAM_DPTTG : 0); 1373 1374 intel_audio_sdp_split_update(pipe_config); 1375 1376 intel_enable_transcoder(pipe_config); 1377 1378 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, pipe_config, i) { 1379 const struct intel_crtc_state *pipe_crtc_state = 1380 intel_atomic_get_new_crtc_state(state, pipe_crtc); 1381 1382 intel_crtc_vblank_on(pipe_crtc_state); 1383 } 1384 1385 intel_hdcp_enable(state, encoder, pipe_config, conn_state); 1386 } 1387 1388 static bool mst_stream_get_hw_state(struct intel_encoder *encoder, 1389 enum pipe *pipe) 1390 { 1391 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); 1392 *pipe = intel_mst->pipe; 1393 if (intel_mst->connector) 1394 return true; 1395 return false; 1396 } 1397 1398 static void mst_stream_get_config(struct intel_encoder *encoder, 1399 struct intel_crtc_state *pipe_config) 1400 { 1401 struct intel_encoder *primary_encoder = to_primary_encoder(encoder); 1402 1403 primary_encoder->get_config(primary_encoder, pipe_config); 1404 } 1405 1406 static bool mst_stream_initial_fastset_check(struct intel_encoder *encoder, 1407 struct intel_crtc_state *crtc_state) 1408 { 1409 struct intel_encoder *primary_encoder = to_primary_encoder(encoder); 1410 1411 return intel_dp_initial_fastset_check(primary_encoder, crtc_state); 1412 } 1413 1414 static int mst_connector_get_ddc_modes(struct drm_connector *_connector) 1415 { 1416 struct intel_connector *connector = to_intel_connector(_connector); 1417 struct intel_display *display = to_intel_display(connector); 1418 struct intel_dp *intel_dp = connector->mst.dp; 1419 const struct drm_edid *drm_edid; 1420 int ret; 1421 1422 if (drm_connector_is_unregistered(&connector->base)) 1423 return intel_connector_update_modes(&connector->base, NULL); 1424 1425 if (!intel_display_driver_check_access(display)) 1426 return drm_edid_connector_add_modes(&connector->base); 1427 1428 drm_edid = drm_dp_mst_edid_read(&connector->base, &intel_dp->mst.mgr, connector->mst.port); 1429 1430 ret = intel_connector_update_modes(&connector->base, drm_edid); 1431 1432 drm_edid_free(drm_edid); 1433 1434 return ret; 1435 } 1436 1437 static int 1438 mst_connector_late_register(struct drm_connector *_connector) 1439 { 1440 struct intel_connector *connector = to_intel_connector(_connector); 1441 int ret; 1442 1443 ret = drm_dp_mst_connector_late_register(&connector->base, connector->mst.port); 1444 if (ret < 0) 1445 return ret; 1446 1447 ret = intel_connector_register(&connector->base); 1448 if (ret < 0) 1449 drm_dp_mst_connector_early_unregister(&connector->base, connector->mst.port); 1450 1451 return ret; 1452 } 1453 1454 static void 1455 mst_connector_early_unregister(struct drm_connector *_connector) 1456 { 1457 struct intel_connector *connector = to_intel_connector(_connector); 1458 1459 intel_connector_unregister(&connector->base); 1460 drm_dp_mst_connector_early_unregister(&connector->base, connector->mst.port); 1461 } 1462 1463 static const struct drm_connector_funcs mst_connector_funcs = { 1464 .fill_modes = drm_helper_probe_single_connector_modes, 1465 .atomic_get_property = intel_digital_connector_atomic_get_property, 1466 .atomic_set_property = intel_digital_connector_atomic_set_property, 1467 .late_register = mst_connector_late_register, 1468 .early_unregister = mst_connector_early_unregister, 1469 .destroy = intel_connector_destroy, 1470 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 1471 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 1472 }; 1473 1474 static int mst_connector_get_modes(struct drm_connector *_connector) 1475 { 1476 struct intel_connector *connector = to_intel_connector(_connector); 1477 1478 return mst_connector_get_ddc_modes(&connector->base); 1479 } 1480 1481 static int 1482 mst_connector_mode_valid_ctx(struct drm_connector *_connector, 1483 const struct drm_display_mode *mode, 1484 struct drm_modeset_acquire_ctx *ctx, 1485 enum drm_mode_status *status) 1486 { 1487 struct intel_connector *connector = to_intel_connector(_connector); 1488 struct intel_display *display = to_intel_display(connector); 1489 struct intel_dp *intel_dp = connector->mst.dp; 1490 struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst.mgr; 1491 struct drm_dp_mst_port *port = connector->mst.port; 1492 const int min_bpp = 18; 1493 int max_dotclk = display->cdclk.max_dotclk_freq; 1494 int max_rate, mode_rate, max_lanes, max_link_clock; 1495 int ret; 1496 bool dsc = false; 1497 u16 dsc_max_compressed_bpp = 0; 1498 u8 dsc_slice_count = 0; 1499 int target_clock = mode->clock; 1500 int num_joined_pipes; 1501 1502 if (drm_connector_is_unregistered(&connector->base)) { 1503 *status = MODE_ERROR; 1504 return 0; 1505 } 1506 1507 *status = intel_cpu_transcoder_mode_valid(display, mode); 1508 if (*status != MODE_OK) 1509 return 0; 1510 1511 if (mode->flags & DRM_MODE_FLAG_DBLCLK) { 1512 *status = MODE_H_ILLEGAL; 1513 return 0; 1514 } 1515 1516 if (mode->clock < 10000) { 1517 *status = MODE_CLOCK_LOW; 1518 return 0; 1519 } 1520 1521 max_link_clock = intel_dp_max_link_rate(intel_dp); 1522 max_lanes = intel_dp_max_lane_count(intel_dp); 1523 1524 max_rate = intel_dp_max_link_data_rate(intel_dp, 1525 max_link_clock, max_lanes); 1526 mode_rate = intel_dp_link_required(mode->clock, min_bpp); 1527 1528 /* 1529 * TODO: 1530 * - Also check if compression would allow for the mode 1531 * - Calculate the overhead using drm_dp_bw_overhead() / 1532 * drm_dp_bw_channel_coding_efficiency(), similarly to the 1533 * compute config code, as drm_dp_calc_pbn_mode() doesn't 1534 * account with all the overheads. 1535 * - Check here and during compute config the BW reported by 1536 * DFP_Link_Available_Payload_Bandwidth_Number (or the 1537 * corresponding link capabilities of the sink) in case the 1538 * stream is uncompressed for it by the last branch device. 1539 */ 1540 num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector, 1541 mode->hdisplay, target_clock); 1542 max_dotclk *= num_joined_pipes; 1543 1544 ret = drm_modeset_lock(&mgr->base.lock, ctx); 1545 if (ret) 1546 return ret; 1547 1548 if (mode_rate > max_rate || mode->clock > max_dotclk || 1549 drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) { 1550 *status = MODE_CLOCK_HIGH; 1551 return 0; 1552 } 1553 1554 if (intel_dp_has_dsc(connector)) { 1555 /* 1556 * TBD pass the connector BPC, 1557 * for now U8_MAX so that max BPC on that platform would be picked 1558 */ 1559 int pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX); 1560 1561 if (drm_dp_sink_supports_fec(connector->dp.fec_capability)) { 1562 dsc_max_compressed_bpp = 1563 intel_dp_dsc_get_max_compressed_bpp(display, 1564 max_link_clock, 1565 max_lanes, 1566 target_clock, 1567 mode->hdisplay, 1568 num_joined_pipes, 1569 INTEL_OUTPUT_FORMAT_RGB, 1570 pipe_bpp, 64); 1571 dsc_slice_count = 1572 intel_dp_dsc_get_slice_count(connector, 1573 target_clock, 1574 mode->hdisplay, 1575 num_joined_pipes); 1576 } 1577 1578 dsc = dsc_max_compressed_bpp && dsc_slice_count; 1579 } 1580 1581 if (intel_dp_joiner_needs_dsc(display, num_joined_pipes) && !dsc) { 1582 *status = MODE_CLOCK_HIGH; 1583 return 0; 1584 } 1585 1586 if (mode_rate > max_rate && !dsc) { 1587 *status = MODE_CLOCK_HIGH; 1588 return 0; 1589 } 1590 1591 *status = intel_mode_valid_max_plane_size(display, mode, num_joined_pipes); 1592 return 0; 1593 } 1594 1595 static struct drm_encoder * 1596 mst_connector_atomic_best_encoder(struct drm_connector *_connector, 1597 struct drm_atomic_state *state) 1598 { 1599 struct intel_connector *connector = to_intel_connector(_connector); 1600 struct drm_connector_state *connector_state = 1601 drm_atomic_get_new_connector_state(state, &connector->base); 1602 struct intel_dp *intel_dp = connector->mst.dp; 1603 struct intel_crtc *crtc = to_intel_crtc(connector_state->crtc); 1604 1605 return &intel_dp->mst.stream_encoders[crtc->pipe]->base.base; 1606 } 1607 1608 static int 1609 mst_connector_detect_ctx(struct drm_connector *_connector, 1610 struct drm_modeset_acquire_ctx *ctx, bool force) 1611 { 1612 struct intel_connector *connector = to_intel_connector(_connector); 1613 struct intel_display *display = to_intel_display(connector); 1614 struct intel_dp *intel_dp = connector->mst.dp; 1615 1616 if (!intel_display_device_enabled(display)) 1617 return connector_status_disconnected; 1618 1619 if (drm_connector_is_unregistered(&connector->base)) 1620 return connector_status_disconnected; 1621 1622 if (!intel_display_driver_check_access(display)) 1623 return connector->base.status; 1624 1625 intel_dp_flush_connector_commits(connector); 1626 1627 return drm_dp_mst_detect_port(&connector->base, ctx, &intel_dp->mst.mgr, 1628 connector->mst.port); 1629 } 1630 1631 static const struct drm_connector_helper_funcs mst_connector_helper_funcs = { 1632 .get_modes = mst_connector_get_modes, 1633 .mode_valid_ctx = mst_connector_mode_valid_ctx, 1634 .atomic_best_encoder = mst_connector_atomic_best_encoder, 1635 .atomic_check = mst_connector_atomic_check, 1636 .detect_ctx = mst_connector_detect_ctx, 1637 }; 1638 1639 static void mst_stream_encoder_destroy(struct drm_encoder *encoder) 1640 { 1641 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(to_intel_encoder(encoder)); 1642 1643 drm_encoder_cleanup(encoder); 1644 kfree(intel_mst); 1645 } 1646 1647 static const struct drm_encoder_funcs mst_stream_encoder_funcs = { 1648 .destroy = mst_stream_encoder_destroy, 1649 }; 1650 1651 static bool mst_connector_get_hw_state(struct intel_connector *connector) 1652 { 1653 /* This is the MST stream encoder set in ->pre_enable, if any */ 1654 struct intel_encoder *encoder = intel_attached_encoder(connector); 1655 enum pipe pipe; 1656 1657 if (!encoder || !connector->base.state->crtc) 1658 return false; 1659 1660 return encoder->get_hw_state(encoder, &pipe); 1661 } 1662 1663 static int mst_topology_add_connector_properties(struct intel_dp *intel_dp, 1664 struct drm_connector *_connector, 1665 const char *pathprop) 1666 { 1667 struct intel_display *display = to_intel_display(intel_dp); 1668 struct intel_connector *connector = to_intel_connector(_connector); 1669 1670 drm_object_attach_property(&connector->base.base, 1671 display->drm->mode_config.path_property, 0); 1672 drm_object_attach_property(&connector->base.base, 1673 display->drm->mode_config.tile_property, 0); 1674 1675 intel_attach_force_audio_property(&connector->base); 1676 intel_attach_broadcast_rgb_property(&connector->base); 1677 1678 /* 1679 * Reuse the prop from the SST connector because we're 1680 * not allowed to create new props after device registration. 1681 */ 1682 connector->base.max_bpc_property = 1683 intel_dp->attached_connector->base.max_bpc_property; 1684 if (connector->base.max_bpc_property) 1685 drm_connector_attach_max_bpc_property(&connector->base, 6, 12); 1686 1687 return drm_connector_set_path_property(&connector->base, pathprop); 1688 } 1689 1690 static void 1691 intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp, 1692 struct intel_connector *connector) 1693 { 1694 u8 dpcd_caps[DP_RECEIVER_CAP_SIZE]; 1695 1696 if (!connector->dp.dsc_decompression_aux) 1697 return; 1698 1699 if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd_caps) < 0) 1700 return; 1701 1702 intel_dp_get_dsc_sink_cap(dpcd_caps[DP_DPCD_REV], connector); 1703 } 1704 1705 static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector) 1706 { 1707 struct intel_display *display = to_intel_display(connector); 1708 struct drm_dp_aux *aux = connector->dp.dsc_decompression_aux; 1709 struct drm_dp_desc desc; 1710 u8 dpcd[DP_RECEIVER_CAP_SIZE]; 1711 1712 if (!aux) 1713 return false; 1714 1715 /* 1716 * A logical port's OUI (at least for affected sinks) is all 0, so 1717 * instead of that the parent port's OUI is used for identification. 1718 */ 1719 if (drm_dp_mst_port_is_logical(connector->mst.port)) { 1720 aux = drm_dp_mst_aux_for_parent(connector->mst.port); 1721 if (!aux) 1722 aux = &connector->mst.dp->aux; 1723 } 1724 1725 if (drm_dp_read_dpcd_caps(aux, dpcd) < 0) 1726 return false; 1727 1728 if (drm_dp_read_desc(aux, &desc, drm_dp_is_branch(dpcd)) < 0) 1729 return false; 1730 1731 if (!drm_dp_has_quirk(&desc, 1732 DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC)) 1733 return false; 1734 1735 /* 1736 * UHBR (MST sink) devices requiring this quirk don't advertise the 1737 * HBLANK expansion support. Presuming that they perform HBLANK 1738 * expansion internally, or are affected by this issue on modes with a 1739 * short HBLANK for other reasons. 1740 */ 1741 if (!drm_dp_128b132b_supported(dpcd) && 1742 !(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE)) 1743 return false; 1744 1745 drm_dbg_kms(display->drm, 1746 "[CONNECTOR:%d:%s] DSC HBLANK expansion quirk detected\n", 1747 connector->base.base.id, connector->base.name); 1748 1749 return true; 1750 } 1751 1752 static struct drm_connector * 1753 mst_topology_add_connector(struct drm_dp_mst_topology_mgr *mgr, 1754 struct drm_dp_mst_port *port, 1755 const char *pathprop) 1756 { 1757 struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst.mgr); 1758 struct intel_display *display = to_intel_display(intel_dp); 1759 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1760 struct intel_connector *connector; 1761 enum pipe pipe; 1762 int ret; 1763 1764 connector = intel_connector_alloc(); 1765 if (!connector) 1766 return NULL; 1767 1768 connector->get_hw_state = mst_connector_get_hw_state; 1769 connector->sync_state = intel_dp_connector_sync_state; 1770 connector->mst.dp = intel_dp; 1771 connector->mst.port = port; 1772 drm_dp_mst_get_port_malloc(port); 1773 1774 ret = drm_connector_dynamic_init(display->drm, &connector->base, &mst_connector_funcs, 1775 DRM_MODE_CONNECTOR_DisplayPort, NULL); 1776 if (ret) 1777 goto err_put_port; 1778 1779 connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port); 1780 intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, connector); 1781 connector->dp.dsc_hblank_expansion_quirk = 1782 detect_dsc_hblank_expansion_quirk(connector); 1783 1784 drm_connector_helper_add(&connector->base, &mst_connector_helper_funcs); 1785 1786 for_each_pipe(display, pipe) { 1787 struct drm_encoder *enc = 1788 &intel_dp->mst.stream_encoders[pipe]->base.base; 1789 1790 ret = drm_connector_attach_encoder(&connector->base, enc); 1791 if (ret) 1792 goto err_cleanup_connector; 1793 } 1794 1795 ret = mst_topology_add_connector_properties(intel_dp, &connector->base, pathprop); 1796 if (ret) 1797 goto err_cleanup_connector; 1798 1799 ret = intel_dp_hdcp_init(dig_port, connector); 1800 if (ret) 1801 drm_dbg_kms(display->drm, "[%s:%d] HDCP MST init failed, skipping.\n", 1802 connector->base.name, connector->base.base.id); 1803 1804 return &connector->base; 1805 1806 err_cleanup_connector: 1807 drm_connector_cleanup(&connector->base); 1808 err_put_port: 1809 drm_dp_mst_put_port_malloc(port); 1810 intel_connector_free(connector); 1811 1812 return NULL; 1813 } 1814 1815 static void 1816 mst_topology_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr) 1817 { 1818 struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst.mgr); 1819 1820 intel_hpd_trigger_irq(dp_to_dig_port(intel_dp)); 1821 } 1822 1823 static const struct drm_dp_mst_topology_cbs mst_topology_cbs = { 1824 .add_connector = mst_topology_add_connector, 1825 .poll_hpd_irq = mst_topology_poll_hpd_irq, 1826 }; 1827 1828 /* Create a fake encoder for an individual MST stream */ 1829 static struct intel_dp_mst_encoder * 1830 mst_stream_encoder_create(struct intel_digital_port *dig_port, enum pipe pipe) 1831 { 1832 struct intel_display *display = to_intel_display(dig_port); 1833 struct intel_encoder *primary_encoder = &dig_port->base; 1834 struct intel_dp_mst_encoder *intel_mst; 1835 struct intel_encoder *encoder; 1836 1837 intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL); 1838 1839 if (!intel_mst) 1840 return NULL; 1841 1842 intel_mst->pipe = pipe; 1843 encoder = &intel_mst->base; 1844 intel_mst->primary = dig_port; 1845 1846 drm_encoder_init(display->drm, &encoder->base, &mst_stream_encoder_funcs, 1847 DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe)); 1848 1849 encoder->type = INTEL_OUTPUT_DP_MST; 1850 encoder->power_domain = primary_encoder->power_domain; 1851 encoder->port = primary_encoder->port; 1852 encoder->cloneable = 0; 1853 /* 1854 * This is wrong, but broken userspace uses the intersection 1855 * of possible_crtcs of all the encoders of a given connector 1856 * to figure out which crtcs can drive said connector. What 1857 * should be used instead is the union of possible_crtcs. 1858 * To keep such userspace functioning we must misconfigure 1859 * this to make sure the intersection is not empty :( 1860 */ 1861 encoder->pipe_mask = ~0; 1862 1863 encoder->compute_config = mst_stream_compute_config; 1864 encoder->compute_config_late = mst_stream_compute_config_late; 1865 encoder->disable = mst_stream_disable; 1866 encoder->post_disable = mst_stream_post_disable; 1867 encoder->post_pll_disable = mst_stream_post_pll_disable; 1868 encoder->update_pipe = intel_ddi_update_pipe; 1869 encoder->pre_pll_enable = mst_stream_pre_pll_enable; 1870 encoder->pre_enable = mst_stream_pre_enable; 1871 encoder->enable = mst_stream_enable; 1872 encoder->audio_enable = intel_audio_codec_enable; 1873 encoder->audio_disable = intel_audio_codec_disable; 1874 encoder->get_hw_state = mst_stream_get_hw_state; 1875 encoder->get_config = mst_stream_get_config; 1876 encoder->initial_fastset_check = mst_stream_initial_fastset_check; 1877 1878 return intel_mst; 1879 1880 } 1881 1882 /* Create the fake encoders for MST streams */ 1883 static bool 1884 mst_stream_encoders_create(struct intel_digital_port *dig_port) 1885 { 1886 struct intel_display *display = to_intel_display(dig_port); 1887 struct intel_dp *intel_dp = &dig_port->dp; 1888 enum pipe pipe; 1889 1890 for_each_pipe(display, pipe) 1891 intel_dp->mst.stream_encoders[pipe] = mst_stream_encoder_create(dig_port, pipe); 1892 return true; 1893 } 1894 1895 int 1896 intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id) 1897 { 1898 struct intel_display *display = to_intel_display(dig_port); 1899 struct intel_dp *intel_dp = &dig_port->dp; 1900 enum port port = dig_port->base.port; 1901 int ret; 1902 1903 if (!HAS_DP_MST(display) || intel_dp_is_edp(intel_dp)) 1904 return 0; 1905 1906 if (DISPLAY_VER(display) < 12 && port == PORT_A) 1907 return 0; 1908 1909 if (DISPLAY_VER(display) < 11 && port == PORT_E) 1910 return 0; 1911 1912 intel_dp->mst.mgr.cbs = &mst_topology_cbs; 1913 1914 /* create encoders */ 1915 mst_stream_encoders_create(dig_port); 1916 ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst.mgr, display->drm, 1917 &intel_dp->aux, 16, 1918 INTEL_NUM_PIPES(display), conn_base_id); 1919 if (ret) { 1920 intel_dp->mst.mgr.cbs = NULL; 1921 return ret; 1922 } 1923 1924 return 0; 1925 } 1926 1927 bool intel_dp_mst_source_support(struct intel_dp *intel_dp) 1928 { 1929 return intel_dp->mst.mgr.cbs; 1930 } 1931 1932 void 1933 intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port) 1934 { 1935 struct intel_dp *intel_dp = &dig_port->dp; 1936 1937 if (!intel_dp_mst_source_support(intel_dp)) 1938 return; 1939 1940 drm_dp_mst_topology_mgr_destroy(&intel_dp->mst.mgr); 1941 /* encoders will get killed by normal cleanup */ 1942 1943 intel_dp->mst.mgr.cbs = NULL; 1944 } 1945 1946 bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state) 1947 { 1948 return crtc_state->mst_master_transcoder == crtc_state->cpu_transcoder; 1949 } 1950 1951 bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state) 1952 { 1953 return crtc_state->mst_master_transcoder != INVALID_TRANSCODER && 1954 crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder; 1955 } 1956 1957 /** 1958 * intel_dp_mst_add_topology_state_for_connector - add MST topology state for a connector 1959 * @state: atomic state 1960 * @connector: connector to add the state for 1961 * @crtc: the CRTC @connector is attached to 1962 * 1963 * Add the MST topology state for @connector to @state. 1964 * 1965 * Returns 0 on success, negative error code on failure. 1966 */ 1967 static int 1968 intel_dp_mst_add_topology_state_for_connector(struct intel_atomic_state *state, 1969 struct intel_connector *connector, 1970 struct intel_crtc *crtc) 1971 { 1972 struct drm_dp_mst_topology_state *mst_state; 1973 1974 if (!connector->mst.dp) 1975 return 0; 1976 1977 mst_state = drm_atomic_get_mst_topology_state(&state->base, 1978 &connector->mst.dp->mst.mgr); 1979 if (IS_ERR(mst_state)) 1980 return PTR_ERR(mst_state); 1981 1982 mst_state->pending_crtc_mask |= drm_crtc_mask(&crtc->base); 1983 1984 return 0; 1985 } 1986 1987 /** 1988 * intel_dp_mst_add_topology_state_for_crtc - add MST topology state for a CRTC 1989 * @state: atomic state 1990 * @crtc: CRTC to add the state for 1991 * 1992 * Add the MST topology state for @crtc to @state. 1993 * 1994 * Returns 0 on success, negative error code on failure. 1995 */ 1996 int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state, 1997 struct intel_crtc *crtc) 1998 { 1999 struct drm_connector *_connector; 2000 struct drm_connector_state *conn_state; 2001 int i; 2002 2003 for_each_new_connector_in_state(&state->base, _connector, conn_state, i) { 2004 struct intel_connector *connector = to_intel_connector(_connector); 2005 int ret; 2006 2007 if (conn_state->crtc != &crtc->base) 2008 continue; 2009 2010 ret = intel_dp_mst_add_topology_state_for_connector(state, connector, crtc); 2011 if (ret) 2012 return ret; 2013 } 2014 2015 return 0; 2016 } 2017 2018 static struct intel_connector * 2019 get_connector_in_state_for_crtc(struct intel_atomic_state *state, 2020 const struct intel_crtc *crtc) 2021 { 2022 struct drm_connector_state *old_conn_state; 2023 struct drm_connector_state *new_conn_state; 2024 struct drm_connector *_connector; 2025 int i; 2026 2027 for_each_oldnew_connector_in_state(&state->base, _connector, 2028 old_conn_state, new_conn_state, i) { 2029 struct intel_connector *connector = 2030 to_intel_connector(_connector); 2031 2032 if (old_conn_state->crtc == &crtc->base || 2033 new_conn_state->crtc == &crtc->base) 2034 return connector; 2035 } 2036 2037 return NULL; 2038 } 2039 2040 /** 2041 * intel_dp_mst_crtc_needs_modeset - check if changes in topology need to modeset the given CRTC 2042 * @state: atomic state 2043 * @crtc: CRTC for which to check the modeset requirement 2044 * 2045 * Check if any change in a MST topology requires a forced modeset on @crtc in 2046 * this topology. One such change is enabling/disabling the DSC decompression 2047 * state in the first branch device's UFP DPCD as required by one CRTC, while 2048 * the other @crtc in the same topology is still active, requiring a full modeset 2049 * on @crtc. 2050 */ 2051 bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state, 2052 struct intel_crtc *crtc) 2053 { 2054 const struct intel_connector *crtc_connector; 2055 const struct drm_connector_state *conn_state; 2056 const struct drm_connector *_connector; 2057 int i; 2058 2059 if (!intel_crtc_has_type(intel_atomic_get_new_crtc_state(state, crtc), 2060 INTEL_OUTPUT_DP_MST)) 2061 return false; 2062 2063 crtc_connector = get_connector_in_state_for_crtc(state, crtc); 2064 2065 if (!crtc_connector) 2066 /* None of the connectors in the topology needs modeset */ 2067 return false; 2068 2069 for_each_new_connector_in_state(&state->base, _connector, conn_state, i) { 2070 const struct intel_connector *connector = 2071 to_intel_connector(_connector); 2072 const struct intel_crtc_state *new_crtc_state; 2073 const struct intel_crtc_state *old_crtc_state; 2074 struct intel_crtc *crtc_iter; 2075 2076 if (connector->mst.dp != crtc_connector->mst.dp || 2077 !conn_state->crtc) 2078 continue; 2079 2080 crtc_iter = to_intel_crtc(conn_state->crtc); 2081 2082 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc_iter); 2083 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc_iter); 2084 2085 if (!intel_crtc_needs_modeset(new_crtc_state)) 2086 continue; 2087 2088 if (old_crtc_state->dsc.compression_enable == 2089 new_crtc_state->dsc.compression_enable) 2090 continue; 2091 /* 2092 * Toggling the decompression flag because of this stream in 2093 * the first downstream branch device's UFP DPCD may reset the 2094 * whole branch device. To avoid the reset while other streams 2095 * are also active modeset the whole MST topology in this 2096 * case. 2097 */ 2098 if (connector->dp.dsc_decompression_aux == 2099 &connector->mst.dp->aux) 2100 return true; 2101 } 2102 2103 return false; 2104 } 2105 2106 /** 2107 * intel_dp_mst_prepare_probe - Prepare an MST link for topology probing 2108 * @intel_dp: DP port object 2109 * 2110 * Prepare an MST link for topology probing, programming the target 2111 * link parameters to DPCD. This step is a requirement of the enumeration 2112 * of path resources during probing. 2113 */ 2114 void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp) 2115 { 2116 int link_rate = intel_dp_max_link_rate(intel_dp); 2117 int lane_count = intel_dp_max_lane_count(intel_dp); 2118 u8 rate_select; 2119 u8 link_bw; 2120 2121 if (intel_dp->link.active) 2122 return; 2123 2124 if (intel_mst_probed_link_params_valid(intel_dp, link_rate, lane_count)) 2125 return; 2126 2127 intel_dp_compute_rate(intel_dp, link_rate, &link_bw, &rate_select); 2128 2129 intel_dp_link_training_set_mode(intel_dp, link_rate, false); 2130 intel_dp_link_training_set_bw(intel_dp, link_bw, rate_select, lane_count, 2131 drm_dp_enhanced_frame_cap(intel_dp->dpcd)); 2132 2133 intel_mst_set_probed_link_params(intel_dp, link_rate, lane_count); 2134 } 2135 2136 /* 2137 * intel_dp_mst_verify_dpcd_state - verify the MST SW enabled state wrt. the DPCD 2138 * @intel_dp: DP port object 2139 * 2140 * Verify if @intel_dp's MST enabled SW state matches the corresponding DPCD 2141 * state. A long HPD pulse - not long enough to be detected as a disconnected 2142 * state - could've reset the DPCD state, which requires tearing 2143 * down/recreating the MST topology. 2144 * 2145 * Returns %true if the SW MST enabled and DPCD states match, %false 2146 * otherwise. 2147 */ 2148 bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp) 2149 { 2150 struct intel_display *display = to_intel_display(intel_dp); 2151 struct intel_connector *connector = intel_dp->attached_connector; 2152 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2153 struct intel_encoder *encoder = &dig_port->base; 2154 int ret; 2155 u8 val; 2156 2157 if (!intel_dp->is_mst) 2158 return true; 2159 2160 ret = drm_dp_dpcd_readb(intel_dp->mst.mgr.aux, DP_MSTM_CTRL, &val); 2161 2162 /* Adjust the expected register value for SST + SideBand. */ 2163 if (ret < 0 || val != (DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC)) { 2164 drm_dbg_kms(display->drm, 2165 "[CONNECTOR:%d:%s][ENCODER:%d:%s] MST mode got reset, removing topology (ret=%d, ctrl=0x%02x)\n", 2166 connector->base.base.id, connector->base.name, 2167 encoder->base.base.id, encoder->base.name, 2168 ret, val); 2169 2170 return false; 2171 } 2172 2173 return true; 2174 } 2175