1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/notifier.h> 31 #include <linux/slab.h> 32 #include <linux/sort.h> 33 #include <linux/string_helpers.h> 34 #include <linux/timekeeping.h> 35 #include <linux/types.h> 36 37 #include <asm/byteorder.h> 38 39 #include <drm/display/drm_dp_helper.h> 40 #include <drm/display/drm_dp_tunnel.h> 41 #include <drm/display/drm_dsc_helper.h> 42 #include <drm/display/drm_hdmi_helper.h> 43 #include <drm/drm_atomic_helper.h> 44 #include <drm/drm_crtc.h> 45 #include <drm/drm_edid.h> 46 #include <drm/drm_fixed.h> 47 #include <drm/drm_probe_helper.h> 48 49 #include "g4x_dp.h" 50 #include "i915_drv.h" 51 #include "i915_irq.h" 52 #include "i915_reg.h" 53 #include "intel_alpm.h" 54 #include "intel_atomic.h" 55 #include "intel_audio.h" 56 #include "intel_backlight.h" 57 #include "intel_combo_phy_regs.h" 58 #include "intel_connector.h" 59 #include "intel_crtc.h" 60 #include "intel_cx0_phy.h" 61 #include "intel_ddi.h" 62 #include "intel_de.h" 63 #include "intel_display_driver.h" 64 #include "intel_display_types.h" 65 #include "intel_dp.h" 66 #include "intel_dp_aux.h" 67 #include "intel_dp_hdcp.h" 68 #include "intel_dp_link_training.h" 69 #include "intel_dp_mst.h" 70 #include "intel_dp_tunnel.h" 71 #include "intel_dpio_phy.h" 72 #include "intel_dpll.h" 73 #include "intel_drrs.h" 74 #include "intel_encoder.h" 75 #include "intel_fifo_underrun.h" 76 #include "intel_hdcp.h" 77 #include "intel_hdmi.h" 78 #include "intel_hotplug.h" 79 #include "intel_hotplug_irq.h" 80 #include "intel_lspcon.h" 81 #include "intel_lvds.h" 82 #include "intel_modeset_lock.h" 83 #include "intel_panel.h" 84 #include "intel_pch_display.h" 85 #include "intel_pps.h" 86 #include "intel_psr.h" 87 #include "intel_tc.h" 88 #include "intel_vdsc.h" 89 #include "intel_vrr.h" 90 #include "intel_crtc_state_dump.h" 91 92 /* DP DSC throughput values used for slice count calculations KPixels/s */ 93 #define DP_DSC_PEAK_PIXEL_RATE 2720000 94 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 95 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 96 97 /* Max DSC line buffer depth supported by HW. */ 98 #define INTEL_DP_DSC_MAX_LINE_BUF_DEPTH 13 99 100 /* DP DSC FEC Overhead factor in ppm = 1/(0.972261) = 1.028530 */ 101 #define DP_DSC_FEC_OVERHEAD_FACTOR 1028530 102 103 /* Compliance test status bits */ 104 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 105 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 106 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 107 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 108 109 110 /* Constants for DP DSC configurations */ 111 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 112 113 /* With Single pipe configuration, HW is capable of supporting maximum 114 * of 4 slices per line. 115 */ 116 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 117 118 /** 119 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 120 * @intel_dp: DP struct 121 * 122 * If a CPU or PCH DP output is attached to an eDP panel, this function 123 * will return true, and false otherwise. 124 * 125 * This function is not safe to use prior to encoder type being set. 126 */ 127 bool intel_dp_is_edp(struct intel_dp *intel_dp) 128 { 129 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 130 131 return dig_port->base.type == INTEL_OUTPUT_EDP; 132 } 133 134 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 135 136 /* Is link rate UHBR and thus 128b/132b? */ 137 bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state) 138 { 139 return drm_dp_is_uhbr_rate(crtc_state->port_clock); 140 } 141 142 /** 143 * intel_dp_link_symbol_size - get the link symbol size for a given link rate 144 * @rate: link rate in 10kbit/s units 145 * 146 * Returns the link symbol size in bits/symbol units depending on the link 147 * rate -> channel coding. 148 */ 149 int intel_dp_link_symbol_size(int rate) 150 { 151 return drm_dp_is_uhbr_rate(rate) ? 32 : 10; 152 } 153 154 /** 155 * intel_dp_link_symbol_clock - convert link rate to link symbol clock 156 * @rate: link rate in 10kbit/s units 157 * 158 * Returns the link symbol clock frequency in kHz units depending on the 159 * link rate and channel coding. 160 */ 161 int intel_dp_link_symbol_clock(int rate) 162 { 163 return DIV_ROUND_CLOSEST(rate * 10, intel_dp_link_symbol_size(rate)); 164 } 165 166 static int max_dprx_rate(struct intel_dp *intel_dp) 167 { 168 if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) 169 return drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel); 170 171 return drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 172 } 173 174 static int max_dprx_lane_count(struct intel_dp *intel_dp) 175 { 176 if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) 177 return drm_dp_tunnel_max_dprx_lane_count(intel_dp->tunnel); 178 179 return drm_dp_max_lane_count(intel_dp->dpcd); 180 } 181 182 static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp) 183 { 184 intel_dp->sink_rates[0] = 162000; 185 intel_dp->num_sink_rates = 1; 186 } 187 188 /* update sink rates from dpcd */ 189 static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp) 190 { 191 static const int dp_rates[] = { 192 162000, 270000, 540000, 810000 193 }; 194 int i, max_rate; 195 int max_lttpr_rate; 196 197 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { 198 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ 199 static const int quirk_rates[] = { 162000, 270000, 324000 }; 200 201 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); 202 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); 203 204 return; 205 } 206 207 /* 208 * Sink rates for 8b/10b. 209 */ 210 max_rate = max_dprx_rate(intel_dp); 211 max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps); 212 if (max_lttpr_rate) 213 max_rate = min(max_rate, max_lttpr_rate); 214 215 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 216 if (dp_rates[i] > max_rate) 217 break; 218 intel_dp->sink_rates[i] = dp_rates[i]; 219 } 220 221 /* 222 * Sink rates for 128b/132b. If set, sink should support all 8b/10b 223 * rates and 10 Gbps. 224 */ 225 if (drm_dp_128b132b_supported(intel_dp->dpcd)) { 226 u8 uhbr_rates = 0; 227 228 BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3); 229 230 drm_dp_dpcd_readb(&intel_dp->aux, 231 DP_128B132B_SUPPORTED_LINK_RATES, &uhbr_rates); 232 233 if (drm_dp_lttpr_count(intel_dp->lttpr_common_caps)) { 234 /* We have a repeater */ 235 if (intel_dp->lttpr_common_caps[0] >= 0x20 && 236 intel_dp->lttpr_common_caps[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - 237 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] & 238 DP_PHY_REPEATER_128B132B_SUPPORTED) { 239 /* Repeater supports 128b/132b, valid UHBR rates */ 240 uhbr_rates &= intel_dp->lttpr_common_caps[DP_PHY_REPEATER_128B132B_RATES - 241 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; 242 } else { 243 /* Does not support 128b/132b */ 244 uhbr_rates = 0; 245 } 246 } 247 248 if (uhbr_rates & DP_UHBR10) 249 intel_dp->sink_rates[i++] = 1000000; 250 if (uhbr_rates & DP_UHBR13_5) 251 intel_dp->sink_rates[i++] = 1350000; 252 if (uhbr_rates & DP_UHBR20) 253 intel_dp->sink_rates[i++] = 2000000; 254 } 255 256 intel_dp->num_sink_rates = i; 257 } 258 259 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 260 { 261 struct intel_connector *connector = intel_dp->attached_connector; 262 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 263 struct intel_encoder *encoder = &intel_dig_port->base; 264 265 intel_dp_set_dpcd_sink_rates(intel_dp); 266 267 if (intel_dp->num_sink_rates) 268 return; 269 270 drm_err(&dp_to_i915(intel_dp)->drm, 271 "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD with no link rates, using defaults\n", 272 connector->base.base.id, connector->base.name, 273 encoder->base.base.id, encoder->base.name); 274 275 intel_dp_set_default_sink_rates(intel_dp); 276 } 277 278 static void intel_dp_set_default_max_sink_lane_count(struct intel_dp *intel_dp) 279 { 280 intel_dp->max_sink_lane_count = 1; 281 } 282 283 static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp) 284 { 285 struct intel_connector *connector = intel_dp->attached_connector; 286 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 287 struct intel_encoder *encoder = &intel_dig_port->base; 288 289 intel_dp->max_sink_lane_count = max_dprx_lane_count(intel_dp); 290 291 switch (intel_dp->max_sink_lane_count) { 292 case 1: 293 case 2: 294 case 4: 295 return; 296 } 297 298 drm_err(&dp_to_i915(intel_dp)->drm, 299 "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD max lane count (%d), using default\n", 300 connector->base.base.id, connector->base.name, 301 encoder->base.base.id, encoder->base.name, 302 intel_dp->max_sink_lane_count); 303 304 intel_dp_set_default_max_sink_lane_count(intel_dp); 305 } 306 307 /* Get length of rates array potentially limited by max_rate. */ 308 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 309 { 310 int i; 311 312 /* Limit results by potentially reduced max rate */ 313 for (i = 0; i < len; i++) { 314 if (rates[len - i - 1] <= max_rate) 315 return len - i; 316 } 317 318 return 0; 319 } 320 321 /* Get length of common rates array potentially limited by max_rate. */ 322 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 323 int max_rate) 324 { 325 return intel_dp_rate_limit_len(intel_dp->common_rates, 326 intel_dp->num_common_rates, max_rate); 327 } 328 329 int intel_dp_common_rate(struct intel_dp *intel_dp, int index) 330 { 331 if (drm_WARN_ON(&dp_to_i915(intel_dp)->drm, 332 index < 0 || index >= intel_dp->num_common_rates)) 333 return 162000; 334 335 return intel_dp->common_rates[index]; 336 } 337 338 /* Theoretical max between source and sink */ 339 int intel_dp_max_common_rate(struct intel_dp *intel_dp) 340 { 341 return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1); 342 } 343 344 int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port) 345 { 346 int vbt_max_lanes = intel_bios_dp_max_lane_count(dig_port->base.devdata); 347 int max_lanes = dig_port->max_lanes; 348 349 if (vbt_max_lanes) 350 max_lanes = min(max_lanes, vbt_max_lanes); 351 352 return max_lanes; 353 } 354 355 /* Theoretical max between source and sink */ 356 int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 357 { 358 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 359 int source_max = intel_dp_max_source_lane_count(dig_port); 360 int sink_max = intel_dp->max_sink_lane_count; 361 int lane_max = intel_tc_port_max_lane_count(dig_port); 362 int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); 363 364 if (lttpr_max) 365 sink_max = min(sink_max, lttpr_max); 366 367 return min3(source_max, sink_max, lane_max); 368 } 369 370 static int forced_lane_count(struct intel_dp *intel_dp) 371 { 372 return clamp(intel_dp->link.force_lane_count, 1, intel_dp_max_common_lane_count(intel_dp)); 373 } 374 375 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 376 { 377 int lane_count; 378 379 if (intel_dp->link.force_lane_count) 380 lane_count = forced_lane_count(intel_dp); 381 else 382 lane_count = intel_dp->link.max_lane_count; 383 384 switch (lane_count) { 385 case 1: 386 case 2: 387 case 4: 388 return lane_count; 389 default: 390 MISSING_CASE(lane_count); 391 return 1; 392 } 393 } 394 395 static int intel_dp_min_lane_count(struct intel_dp *intel_dp) 396 { 397 if (intel_dp->link.force_lane_count) 398 return forced_lane_count(intel_dp); 399 400 return 1; 401 } 402 403 /* 404 * The required data bandwidth for a mode with given pixel clock and bpp. This 405 * is the required net bandwidth independent of the data bandwidth efficiency. 406 * 407 * TODO: check if callers of this functions should use 408 * intel_dp_effective_data_rate() instead. 409 */ 410 int 411 intel_dp_link_required(int pixel_clock, int bpp) 412 { 413 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 414 return DIV_ROUND_UP(pixel_clock * bpp, 8); 415 } 416 417 /** 418 * intel_dp_effective_data_rate - Return the pixel data rate accounting for BW allocation overhead 419 * @pixel_clock: pixel clock in kHz 420 * @bpp_x16: bits per pixel .4 fixed point format 421 * @bw_overhead: BW allocation overhead in 1ppm units 422 * 423 * Return the effective pixel data rate in kB/sec units taking into account 424 * the provided SSC, FEC, DSC BW allocation overhead. 425 */ 426 int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16, 427 int bw_overhead) 428 { 429 return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_clock * bpp_x16, bw_overhead), 430 1000000 * 16 * 8); 431 } 432 433 /** 434 * intel_dp_max_link_data_rate: Calculate the maximum rate for the given link params 435 * @intel_dp: Intel DP object 436 * @max_dprx_rate: Maximum data rate of the DPRX 437 * @max_dprx_lanes: Maximum lane count of the DPRX 438 * 439 * Calculate the maximum data rate for the provided link parameters taking into 440 * account any BW limitations by a DP tunnel attached to @intel_dp. 441 * 442 * Returns the maximum data rate in kBps units. 443 */ 444 int intel_dp_max_link_data_rate(struct intel_dp *intel_dp, 445 int max_dprx_rate, int max_dprx_lanes) 446 { 447 int max_rate = drm_dp_max_dprx_data_rate(max_dprx_rate, max_dprx_lanes); 448 449 if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) 450 max_rate = min(max_rate, 451 drm_dp_tunnel_available_bw(intel_dp->tunnel)); 452 453 return max_rate; 454 } 455 456 bool intel_dp_has_joiner(struct intel_dp *intel_dp) 457 { 458 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 459 struct intel_encoder *encoder = &intel_dig_port->base; 460 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 461 462 /* eDP MSO is not compatible with joiner */ 463 if (intel_dp->mso_link_count) 464 return false; 465 466 return DISPLAY_VER(dev_priv) >= 12 || 467 (DISPLAY_VER(dev_priv) == 11 && 468 encoder->port != PORT_A); 469 } 470 471 static int dg2_max_source_rate(struct intel_dp *intel_dp) 472 { 473 return intel_dp_is_edp(intel_dp) ? 810000 : 1350000; 474 } 475 476 static int icl_max_source_rate(struct intel_dp *intel_dp) 477 { 478 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 479 480 if (intel_encoder_is_combo(encoder) && !intel_dp_is_edp(intel_dp)) 481 return 540000; 482 483 return 810000; 484 } 485 486 static int ehl_max_source_rate(struct intel_dp *intel_dp) 487 { 488 if (intel_dp_is_edp(intel_dp)) 489 return 540000; 490 491 return 810000; 492 } 493 494 static int mtl_max_source_rate(struct intel_dp *intel_dp) 495 { 496 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 497 498 if (intel_encoder_is_c10phy(encoder)) 499 return 810000; 500 501 if (DISPLAY_VER_FULL(to_i915(encoder->base.dev)) == IP_VER(14, 1)) 502 return 1350000; 503 504 return 2000000; 505 } 506 507 static int vbt_max_link_rate(struct intel_dp *intel_dp) 508 { 509 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 510 int max_rate; 511 512 max_rate = intel_bios_dp_max_link_rate(encoder->devdata); 513 514 if (intel_dp_is_edp(intel_dp)) { 515 struct intel_connector *connector = intel_dp->attached_connector; 516 int edp_max_rate = connector->panel.vbt.edp.max_link_rate; 517 518 if (max_rate && edp_max_rate) 519 max_rate = min(max_rate, edp_max_rate); 520 else if (edp_max_rate) 521 max_rate = edp_max_rate; 522 } 523 524 return max_rate; 525 } 526 527 static void 528 intel_dp_set_source_rates(struct intel_dp *intel_dp) 529 { 530 /* The values must be in increasing order */ 531 static const int mtl_rates[] = { 532 162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000, 533 810000, 1000000, 2000000, 534 }; 535 static const int icl_rates[] = { 536 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000, 537 1000000, 1350000, 538 }; 539 static const int bxt_rates[] = { 540 162000, 216000, 243000, 270000, 324000, 432000, 540000 541 }; 542 static const int skl_rates[] = { 543 162000, 216000, 270000, 324000, 432000, 540000 544 }; 545 static const int hsw_rates[] = { 546 162000, 270000, 540000 547 }; 548 static const int g4x_rates[] = { 549 162000, 270000 550 }; 551 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 552 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 553 const int *source_rates; 554 int size, max_rate = 0, vbt_max_rate; 555 556 /* This should only be done once */ 557 drm_WARN_ON(&dev_priv->drm, 558 intel_dp->source_rates || intel_dp->num_source_rates); 559 560 if (DISPLAY_VER(dev_priv) >= 14) { 561 source_rates = mtl_rates; 562 size = ARRAY_SIZE(mtl_rates); 563 max_rate = mtl_max_source_rate(intel_dp); 564 } else if (DISPLAY_VER(dev_priv) >= 11) { 565 source_rates = icl_rates; 566 size = ARRAY_SIZE(icl_rates); 567 if (IS_DG2(dev_priv)) 568 max_rate = dg2_max_source_rate(intel_dp); 569 else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) || 570 IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) 571 max_rate = 810000; 572 else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) 573 max_rate = ehl_max_source_rate(intel_dp); 574 else 575 max_rate = icl_max_source_rate(intel_dp); 576 } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 577 source_rates = bxt_rates; 578 size = ARRAY_SIZE(bxt_rates); 579 } else if (DISPLAY_VER(dev_priv) == 9) { 580 source_rates = skl_rates; 581 size = ARRAY_SIZE(skl_rates); 582 } else if ((IS_HASWELL(dev_priv) && !IS_HASWELL_ULX(dev_priv)) || 583 IS_BROADWELL(dev_priv)) { 584 source_rates = hsw_rates; 585 size = ARRAY_SIZE(hsw_rates); 586 } else { 587 source_rates = g4x_rates; 588 size = ARRAY_SIZE(g4x_rates); 589 } 590 591 vbt_max_rate = vbt_max_link_rate(intel_dp); 592 if (max_rate && vbt_max_rate) 593 max_rate = min(max_rate, vbt_max_rate); 594 else if (vbt_max_rate) 595 max_rate = vbt_max_rate; 596 597 if (max_rate) 598 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 599 600 intel_dp->source_rates = source_rates; 601 intel_dp->num_source_rates = size; 602 } 603 604 static int intersect_rates(const int *source_rates, int source_len, 605 const int *sink_rates, int sink_len, 606 int *common_rates) 607 { 608 int i = 0, j = 0, k = 0; 609 610 while (i < source_len && j < sink_len) { 611 if (source_rates[i] == sink_rates[j]) { 612 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 613 return k; 614 common_rates[k] = source_rates[i]; 615 ++k; 616 ++i; 617 ++j; 618 } else if (source_rates[i] < sink_rates[j]) { 619 ++i; 620 } else { 621 ++j; 622 } 623 } 624 return k; 625 } 626 627 /* return index of rate in rates array, or -1 if not found */ 628 int intel_dp_rate_index(const int *rates, int len, int rate) 629 { 630 int i; 631 632 for (i = 0; i < len; i++) 633 if (rate == rates[i]) 634 return i; 635 636 return -1; 637 } 638 639 static int intel_dp_link_config_rate(struct intel_dp *intel_dp, 640 const struct intel_dp_link_config *lc) 641 { 642 return intel_dp_common_rate(intel_dp, lc->link_rate_idx); 643 } 644 645 static int intel_dp_link_config_lane_count(const struct intel_dp_link_config *lc) 646 { 647 return 1 << lc->lane_count_exp; 648 } 649 650 static int intel_dp_link_config_bw(struct intel_dp *intel_dp, 651 const struct intel_dp_link_config *lc) 652 { 653 return drm_dp_max_dprx_data_rate(intel_dp_link_config_rate(intel_dp, lc), 654 intel_dp_link_config_lane_count(lc)); 655 } 656 657 static int link_config_cmp_by_bw(const void *a, const void *b, const void *p) 658 { 659 struct intel_dp *intel_dp = (struct intel_dp *)p; /* remove const */ 660 const struct intel_dp_link_config *lc_a = a; 661 const struct intel_dp_link_config *lc_b = b; 662 int bw_a = intel_dp_link_config_bw(intel_dp, lc_a); 663 int bw_b = intel_dp_link_config_bw(intel_dp, lc_b); 664 665 if (bw_a != bw_b) 666 return bw_a - bw_b; 667 668 return intel_dp_link_config_rate(intel_dp, lc_a) - 669 intel_dp_link_config_rate(intel_dp, lc_b); 670 } 671 672 static void intel_dp_link_config_init(struct intel_dp *intel_dp) 673 { 674 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 675 struct intel_dp_link_config *lc; 676 int num_common_lane_configs; 677 int i; 678 int j; 679 680 if (drm_WARN_ON(&i915->drm, !is_power_of_2(intel_dp_max_common_lane_count(intel_dp)))) 681 return; 682 683 num_common_lane_configs = ilog2(intel_dp_max_common_lane_count(intel_dp)) + 1; 684 685 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates * num_common_lane_configs > 686 ARRAY_SIZE(intel_dp->link.configs))) 687 return; 688 689 intel_dp->link.num_configs = intel_dp->num_common_rates * num_common_lane_configs; 690 691 lc = &intel_dp->link.configs[0]; 692 for (i = 0; i < intel_dp->num_common_rates; i++) { 693 for (j = 0; j < num_common_lane_configs; j++) { 694 lc->lane_count_exp = j; 695 lc->link_rate_idx = i; 696 697 lc++; 698 } 699 } 700 701 sort_r(intel_dp->link.configs, intel_dp->link.num_configs, 702 sizeof(intel_dp->link.configs[0]), 703 link_config_cmp_by_bw, NULL, 704 intel_dp); 705 } 706 707 void intel_dp_link_config_get(struct intel_dp *intel_dp, int idx, int *link_rate, int *lane_count) 708 { 709 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 710 const struct intel_dp_link_config *lc; 711 712 if (drm_WARN_ON(&i915->drm, idx < 0 || idx >= intel_dp->link.num_configs)) 713 idx = 0; 714 715 lc = &intel_dp->link.configs[idx]; 716 717 *link_rate = intel_dp_link_config_rate(intel_dp, lc); 718 *lane_count = intel_dp_link_config_lane_count(lc); 719 } 720 721 int intel_dp_link_config_index(struct intel_dp *intel_dp, int link_rate, int lane_count) 722 { 723 int link_rate_idx = intel_dp_rate_index(intel_dp->common_rates, intel_dp->num_common_rates, 724 link_rate); 725 int lane_count_exp = ilog2(lane_count); 726 int i; 727 728 for (i = 0; i < intel_dp->link.num_configs; i++) { 729 const struct intel_dp_link_config *lc = &intel_dp->link.configs[i]; 730 731 if (lc->lane_count_exp == lane_count_exp && 732 lc->link_rate_idx == link_rate_idx) 733 return i; 734 } 735 736 return -1; 737 } 738 739 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 740 { 741 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 742 743 drm_WARN_ON(&i915->drm, 744 !intel_dp->num_source_rates || !intel_dp->num_sink_rates); 745 746 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 747 intel_dp->num_source_rates, 748 intel_dp->sink_rates, 749 intel_dp->num_sink_rates, 750 intel_dp->common_rates); 751 752 /* Paranoia, there should always be something in common. */ 753 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) { 754 intel_dp->common_rates[0] = 162000; 755 intel_dp->num_common_rates = 1; 756 } 757 758 intel_dp_link_config_init(intel_dp); 759 } 760 761 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 762 u8 lane_count) 763 { 764 /* 765 * FIXME: we need to synchronize the current link parameters with 766 * hardware readout. Currently fast link training doesn't work on 767 * boot-up. 768 */ 769 if (link_rate == 0 || 770 link_rate > intel_dp->link.max_rate) 771 return false; 772 773 if (lane_count == 0 || 774 lane_count > intel_dp_max_lane_count(intel_dp)) 775 return false; 776 777 return true; 778 } 779 780 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 781 { 782 return div_u64(mul_u32_u32(mode_clock, DP_DSC_FEC_OVERHEAD_FACTOR), 783 1000000U); 784 } 785 786 int intel_dp_bw_fec_overhead(bool fec_enabled) 787 { 788 /* 789 * TODO: Calculate the actual overhead for a given mode. 790 * The hard-coded 1/0.972261=2.853% overhead factor 791 * corresponds (for instance) to the 8b/10b DP FEC 2.4% + 792 * 0.453% DSC overhead. This is enough for a 3840 width mode, 793 * which has a DSC overhead of up to ~0.2%, but may not be 794 * enough for a 1024 width mode where this is ~0.8% (on a 4 795 * lane DP link, with 2 DSC slices and 8 bpp color depth). 796 */ 797 return fec_enabled ? DP_DSC_FEC_OVERHEAD_FACTOR : 1000000; 798 } 799 800 static int 801 small_joiner_ram_size_bits(struct drm_i915_private *i915) 802 { 803 if (DISPLAY_VER(i915) >= 13) 804 return 17280 * 8; 805 else if (DISPLAY_VER(i915) >= 11) 806 return 7680 * 8; 807 else 808 return 6144 * 8; 809 } 810 811 u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp) 812 { 813 u32 bits_per_pixel = bpp; 814 int i; 815 816 /* Error out if the max bpp is less than smallest allowed valid bpp */ 817 if (bits_per_pixel < valid_dsc_bpp[0]) { 818 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 819 bits_per_pixel, valid_dsc_bpp[0]); 820 return 0; 821 } 822 823 /* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */ 824 if (DISPLAY_VER(i915) >= 13) { 825 bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1); 826 827 /* 828 * According to BSpec, 27 is the max DSC output bpp, 829 * 8 is the min DSC output bpp. 830 * While we can still clamp higher bpp values to 27, saving bandwidth, 831 * if it is required to oompress up to bpp < 8, means we can't do 832 * that and probably means we can't fit the required mode, even with 833 * DSC enabled. 834 */ 835 if (bits_per_pixel < 8) { 836 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min 8\n", 837 bits_per_pixel); 838 return 0; 839 } 840 bits_per_pixel = min_t(u32, bits_per_pixel, 27); 841 } else { 842 /* Find the nearest match in the array of known BPPs from VESA */ 843 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 844 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 845 break; 846 } 847 drm_dbg_kms(&i915->drm, "Set dsc bpp from %d to VESA %d\n", 848 bits_per_pixel, valid_dsc_bpp[i]); 849 850 bits_per_pixel = valid_dsc_bpp[i]; 851 } 852 853 return bits_per_pixel; 854 } 855 856 static 857 u32 get_max_compressed_bpp_with_joiner(struct drm_i915_private *i915, 858 u32 mode_clock, u32 mode_hdisplay, 859 bool bigjoiner) 860 { 861 u32 max_bpp_small_joiner_ram; 862 863 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 864 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / mode_hdisplay; 865 866 if (bigjoiner) { 867 int bigjoiner_interface_bits = DISPLAY_VER(i915) >= 14 ? 36 : 24; 868 /* With bigjoiner multiple dsc engines are used in parallel so PPC is 2 */ 869 int ppc = 2; 870 u32 max_bpp_bigjoiner = 871 i915->display.cdclk.max_cdclk_freq * ppc * bigjoiner_interface_bits / 872 intel_dp_mode_to_fec_clock(mode_clock); 873 874 max_bpp_small_joiner_ram *= 2; 875 876 return min(max_bpp_small_joiner_ram, max_bpp_bigjoiner); 877 } 878 879 return max_bpp_small_joiner_ram; 880 } 881 882 u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915, 883 u32 link_clock, u32 lane_count, 884 u32 mode_clock, u32 mode_hdisplay, 885 bool bigjoiner, 886 enum intel_output_format output_format, 887 u32 pipe_bpp, 888 u32 timeslots) 889 { 890 u32 bits_per_pixel, joiner_max_bpp; 891 892 /* 893 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 894 * (LinkSymbolClock)* 8 * (TimeSlots / 64) 895 * for SST -> TimeSlots is 64(i.e all TimeSlots that are available) 896 * for MST -> TimeSlots has to be calculated, based on mode requirements 897 * 898 * Due to FEC overhead, the available bw is reduced to 97.2261%. 899 * To support the given mode: 900 * Bandwidth required should be <= Available link Bandwidth * FEC Overhead 901 * =>ModeClock * bits_per_pixel <= Available Link Bandwidth * FEC Overhead 902 * =>bits_per_pixel <= Available link Bandwidth * FEC Overhead / ModeClock 903 * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock) * 8 (TimeSlots / 64) / 904 * (ModeClock / FEC Overhead) 905 * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock * TimeSlots) / 906 * (ModeClock / FEC Overhead * 8) 907 */ 908 bits_per_pixel = ((link_clock * lane_count) * timeslots) / 909 (intel_dp_mode_to_fec_clock(mode_clock) * 8); 910 911 /* Bandwidth required for 420 is half, that of 444 format */ 912 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 913 bits_per_pixel *= 2; 914 915 /* 916 * According to DSC 1.2a Section 4.1.1 Table 4.1 the maximum 917 * supported PPS value can be 63.9375 and with the further 918 * mention that for 420, 422 formats, bpp should be programmed double 919 * the target bpp restricting our target bpp to be 31.9375 at max. 920 */ 921 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 922 bits_per_pixel = min_t(u32, bits_per_pixel, 31); 923 924 drm_dbg_kms(&i915->drm, "Max link bpp is %u for %u timeslots " 925 "total bw %u pixel clock %u\n", 926 bits_per_pixel, timeslots, 927 (link_clock * lane_count * 8), 928 intel_dp_mode_to_fec_clock(mode_clock)); 929 930 joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, mode_clock, 931 mode_hdisplay, bigjoiner); 932 bits_per_pixel = min(bits_per_pixel, joiner_max_bpp); 933 934 bits_per_pixel = intel_dp_dsc_nearest_valid_bpp(i915, bits_per_pixel, pipe_bpp); 935 936 return bits_per_pixel; 937 } 938 939 u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector, 940 int mode_clock, int mode_hdisplay, 941 bool bigjoiner) 942 { 943 struct drm_i915_private *i915 = to_i915(connector->base.dev); 944 u8 min_slice_count, i; 945 int max_slice_width; 946 947 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 948 min_slice_count = DIV_ROUND_UP(mode_clock, 949 DP_DSC_MAX_ENC_THROUGHPUT_0); 950 else 951 min_slice_count = DIV_ROUND_UP(mode_clock, 952 DP_DSC_MAX_ENC_THROUGHPUT_1); 953 954 /* 955 * Due to some DSC engine BW limitations, we need to enable second 956 * slice and VDSC engine, whenever we approach close enough to max CDCLK 957 */ 958 if (mode_clock >= ((i915->display.cdclk.max_cdclk_freq * 85) / 100)) 959 min_slice_count = max_t(u8, min_slice_count, 2); 960 961 max_slice_width = drm_dp_dsc_sink_max_slice_width(connector->dp.dsc_dpcd); 962 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 963 drm_dbg_kms(&i915->drm, 964 "Unsupported slice width %d by DP DSC Sink device\n", 965 max_slice_width); 966 return 0; 967 } 968 /* Also take into account max slice width */ 969 min_slice_count = max_t(u8, min_slice_count, 970 DIV_ROUND_UP(mode_hdisplay, 971 max_slice_width)); 972 973 /* Find the closest match to the valid slice count values */ 974 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 975 u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner; 976 977 if (test_slice_count > 978 drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd, false)) 979 break; 980 981 /* big joiner needs small joiner to be enabled */ 982 if (bigjoiner && test_slice_count < 4) 983 continue; 984 985 if (min_slice_count <= test_slice_count) 986 return test_slice_count; 987 } 988 989 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", 990 min_slice_count); 991 return 0; 992 } 993 994 static bool source_can_output(struct intel_dp *intel_dp, 995 enum intel_output_format format) 996 { 997 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 998 999 switch (format) { 1000 case INTEL_OUTPUT_FORMAT_RGB: 1001 return true; 1002 1003 case INTEL_OUTPUT_FORMAT_YCBCR444: 1004 /* 1005 * No YCbCr output support on gmch platforms. 1006 * Also, ILK doesn't seem capable of DP YCbCr output. 1007 * The displayed image is severly corrupted. SNB+ is fine. 1008 */ 1009 return !HAS_GMCH(i915) && !IS_IRONLAKE(i915); 1010 1011 case INTEL_OUTPUT_FORMAT_YCBCR420: 1012 /* Platform < Gen 11 cannot output YCbCr420 format */ 1013 return DISPLAY_VER(i915) >= 11; 1014 1015 default: 1016 MISSING_CASE(format); 1017 return false; 1018 } 1019 } 1020 1021 static bool 1022 dfp_can_convert_from_rgb(struct intel_dp *intel_dp, 1023 enum intel_output_format sink_format) 1024 { 1025 if (!drm_dp_is_branch(intel_dp->dpcd)) 1026 return false; 1027 1028 if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444) 1029 return intel_dp->dfp.rgb_to_ycbcr; 1030 1031 if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) 1032 return intel_dp->dfp.rgb_to_ycbcr && 1033 intel_dp->dfp.ycbcr_444_to_420; 1034 1035 return false; 1036 } 1037 1038 static bool 1039 dfp_can_convert_from_ycbcr444(struct intel_dp *intel_dp, 1040 enum intel_output_format sink_format) 1041 { 1042 if (!drm_dp_is_branch(intel_dp->dpcd)) 1043 return false; 1044 1045 if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) 1046 return intel_dp->dfp.ycbcr_444_to_420; 1047 1048 return false; 1049 } 1050 1051 static bool 1052 dfp_can_convert(struct intel_dp *intel_dp, 1053 enum intel_output_format output_format, 1054 enum intel_output_format sink_format) 1055 { 1056 switch (output_format) { 1057 case INTEL_OUTPUT_FORMAT_RGB: 1058 return dfp_can_convert_from_rgb(intel_dp, sink_format); 1059 case INTEL_OUTPUT_FORMAT_YCBCR444: 1060 return dfp_can_convert_from_ycbcr444(intel_dp, sink_format); 1061 default: 1062 MISSING_CASE(output_format); 1063 return false; 1064 } 1065 1066 return false; 1067 } 1068 1069 static enum intel_output_format 1070 intel_dp_output_format(struct intel_connector *connector, 1071 enum intel_output_format sink_format) 1072 { 1073 struct intel_dp *intel_dp = intel_attached_dp(connector); 1074 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1075 enum intel_output_format force_dsc_output_format = 1076 intel_dp->force_dsc_output_format; 1077 enum intel_output_format output_format; 1078 if (force_dsc_output_format) { 1079 if (source_can_output(intel_dp, force_dsc_output_format) && 1080 (!drm_dp_is_branch(intel_dp->dpcd) || 1081 sink_format != force_dsc_output_format || 1082 dfp_can_convert(intel_dp, force_dsc_output_format, sink_format))) 1083 return force_dsc_output_format; 1084 1085 drm_dbg_kms(&i915->drm, "Cannot force DSC output format\n"); 1086 } 1087 1088 if (sink_format == INTEL_OUTPUT_FORMAT_RGB || 1089 dfp_can_convert_from_rgb(intel_dp, sink_format)) 1090 output_format = INTEL_OUTPUT_FORMAT_RGB; 1091 1092 else if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444 || 1093 dfp_can_convert_from_ycbcr444(intel_dp, sink_format)) 1094 output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 1095 1096 else 1097 output_format = INTEL_OUTPUT_FORMAT_YCBCR420; 1098 1099 drm_WARN_ON(&i915->drm, !source_can_output(intel_dp, output_format)); 1100 1101 return output_format; 1102 } 1103 1104 int intel_dp_min_bpp(enum intel_output_format output_format) 1105 { 1106 if (output_format == INTEL_OUTPUT_FORMAT_RGB) 1107 return 6 * 3; 1108 else 1109 return 8 * 3; 1110 } 1111 1112 int intel_dp_output_bpp(enum intel_output_format output_format, int bpp) 1113 { 1114 /* 1115 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 1116 * format of the number of bytes per pixel will be half the number 1117 * of bytes of RGB pixel. 1118 */ 1119 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 1120 bpp /= 2; 1121 1122 return bpp; 1123 } 1124 1125 static enum intel_output_format 1126 intel_dp_sink_format(struct intel_connector *connector, 1127 const struct drm_display_mode *mode) 1128 { 1129 const struct drm_display_info *info = &connector->base.display_info; 1130 1131 if (drm_mode_is_420_only(info, mode)) 1132 return INTEL_OUTPUT_FORMAT_YCBCR420; 1133 1134 return INTEL_OUTPUT_FORMAT_RGB; 1135 } 1136 1137 static int 1138 intel_dp_mode_min_output_bpp(struct intel_connector *connector, 1139 const struct drm_display_mode *mode) 1140 { 1141 enum intel_output_format output_format, sink_format; 1142 1143 sink_format = intel_dp_sink_format(connector, mode); 1144 1145 output_format = intel_dp_output_format(connector, sink_format); 1146 1147 return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format)); 1148 } 1149 1150 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 1151 int hdisplay) 1152 { 1153 /* 1154 * Older platforms don't like hdisplay==4096 with DP. 1155 * 1156 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 1157 * and frame counter increment), but we don't get vblank interrupts, 1158 * and the pipe underruns immediately. The link also doesn't seem 1159 * to get trained properly. 1160 * 1161 * On CHV the vblank interrupts don't seem to disappear but 1162 * otherwise the symptoms are similar. 1163 * 1164 * TODO: confirm the behaviour on HSW+ 1165 */ 1166 return hdisplay == 4096 && !HAS_DDI(dev_priv); 1167 } 1168 1169 static int intel_dp_max_tmds_clock(struct intel_dp *intel_dp) 1170 { 1171 struct intel_connector *connector = intel_dp->attached_connector; 1172 const struct drm_display_info *info = &connector->base.display_info; 1173 int max_tmds_clock = intel_dp->dfp.max_tmds_clock; 1174 1175 /* Only consider the sink's max TMDS clock if we know this is a HDMI DFP */ 1176 if (max_tmds_clock && info->max_tmds_clock) 1177 max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock); 1178 1179 return max_tmds_clock; 1180 } 1181 1182 static enum drm_mode_status 1183 intel_dp_tmds_clock_valid(struct intel_dp *intel_dp, 1184 int clock, int bpc, 1185 enum intel_output_format sink_format, 1186 bool respect_downstream_limits) 1187 { 1188 int tmds_clock, min_tmds_clock, max_tmds_clock; 1189 1190 if (!respect_downstream_limits) 1191 return MODE_OK; 1192 1193 tmds_clock = intel_hdmi_tmds_clock(clock, bpc, sink_format); 1194 1195 min_tmds_clock = intel_dp->dfp.min_tmds_clock; 1196 max_tmds_clock = intel_dp_max_tmds_clock(intel_dp); 1197 1198 if (min_tmds_clock && tmds_clock < min_tmds_clock) 1199 return MODE_CLOCK_LOW; 1200 1201 if (max_tmds_clock && tmds_clock > max_tmds_clock) 1202 return MODE_CLOCK_HIGH; 1203 1204 return MODE_OK; 1205 } 1206 1207 static enum drm_mode_status 1208 intel_dp_mode_valid_downstream(struct intel_connector *connector, 1209 const struct drm_display_mode *mode, 1210 int target_clock) 1211 { 1212 struct intel_dp *intel_dp = intel_attached_dp(connector); 1213 const struct drm_display_info *info = &connector->base.display_info; 1214 enum drm_mode_status status; 1215 enum intel_output_format sink_format; 1216 1217 /* If PCON supports FRL MODE, check FRL bandwidth constraints */ 1218 if (intel_dp->dfp.pcon_max_frl_bw) { 1219 int target_bw; 1220 int max_frl_bw; 1221 int bpp = intel_dp_mode_min_output_bpp(connector, mode); 1222 1223 target_bw = bpp * target_clock; 1224 1225 max_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 1226 1227 /* converting bw from Gbps to Kbps*/ 1228 max_frl_bw = max_frl_bw * 1000000; 1229 1230 if (target_bw > max_frl_bw) 1231 return MODE_CLOCK_HIGH; 1232 1233 return MODE_OK; 1234 } 1235 1236 if (intel_dp->dfp.max_dotclock && 1237 target_clock > intel_dp->dfp.max_dotclock) 1238 return MODE_CLOCK_HIGH; 1239 1240 sink_format = intel_dp_sink_format(connector, mode); 1241 1242 /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ 1243 status = intel_dp_tmds_clock_valid(intel_dp, target_clock, 1244 8, sink_format, true); 1245 1246 if (status != MODE_OK) { 1247 if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 1248 !connector->base.ycbcr_420_allowed || 1249 !drm_mode_is_420_also(info, mode)) 1250 return status; 1251 sink_format = INTEL_OUTPUT_FORMAT_YCBCR420; 1252 status = intel_dp_tmds_clock_valid(intel_dp, target_clock, 1253 8, sink_format, true); 1254 if (status != MODE_OK) 1255 return status; 1256 } 1257 1258 return MODE_OK; 1259 } 1260 1261 bool intel_dp_need_joiner(struct intel_dp *intel_dp, 1262 struct intel_connector *connector, 1263 int hdisplay, int clock) 1264 { 1265 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1266 1267 if (!intel_dp_has_joiner(intel_dp)) 1268 return false; 1269 1270 return clock > i915->display.cdclk.max_dotclk_freq || hdisplay > 5120 || 1271 connector->force_bigjoiner_enable; 1272 } 1273 1274 bool intel_dp_has_dsc(const struct intel_connector *connector) 1275 { 1276 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1277 1278 if (!HAS_DSC(i915)) 1279 return false; 1280 1281 if (connector->mst_port && !HAS_DSC_MST(i915)) 1282 return false; 1283 1284 if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP && 1285 connector->panel.vbt.edp.dsc_disable) 1286 return false; 1287 1288 if (!drm_dp_sink_supports_dsc(connector->dp.dsc_dpcd)) 1289 return false; 1290 1291 return true; 1292 } 1293 1294 static enum drm_mode_status 1295 intel_dp_mode_valid(struct drm_connector *_connector, 1296 struct drm_display_mode *mode) 1297 { 1298 struct intel_connector *connector = to_intel_connector(_connector); 1299 struct intel_dp *intel_dp = intel_attached_dp(connector); 1300 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1301 const struct drm_display_mode *fixed_mode; 1302 int target_clock = mode->clock; 1303 int max_rate, mode_rate, max_lanes, max_link_clock; 1304 int max_dotclk = dev_priv->display.cdclk.max_dotclk_freq; 1305 u16 dsc_max_compressed_bpp = 0; 1306 u8 dsc_slice_count = 0; 1307 enum drm_mode_status status; 1308 bool dsc = false, joiner = false; 1309 1310 status = intel_cpu_transcoder_mode_valid(dev_priv, mode); 1311 if (status != MODE_OK) 1312 return status; 1313 1314 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 1315 return MODE_H_ILLEGAL; 1316 1317 if (mode->clock < 10000) 1318 return MODE_CLOCK_LOW; 1319 1320 fixed_mode = intel_panel_fixed_mode(connector, mode); 1321 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 1322 status = intel_panel_mode_valid(connector, mode); 1323 if (status != MODE_OK) 1324 return status; 1325 1326 target_clock = fixed_mode->clock; 1327 } 1328 1329 if (intel_dp_need_joiner(intel_dp, connector, 1330 mode->hdisplay, target_clock)) { 1331 joiner = true; 1332 max_dotclk *= 2; 1333 } 1334 if (target_clock > max_dotclk) 1335 return MODE_CLOCK_HIGH; 1336 1337 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 1338 return MODE_H_ILLEGAL; 1339 1340 max_link_clock = intel_dp_max_link_rate(intel_dp); 1341 max_lanes = intel_dp_max_lane_count(intel_dp); 1342 1343 max_rate = intel_dp_max_link_data_rate(intel_dp, max_link_clock, max_lanes); 1344 1345 mode_rate = intel_dp_link_required(target_clock, 1346 intel_dp_mode_min_output_bpp(connector, mode)); 1347 1348 if (intel_dp_has_dsc(connector)) { 1349 enum intel_output_format sink_format, output_format; 1350 int pipe_bpp; 1351 1352 sink_format = intel_dp_sink_format(connector, mode); 1353 output_format = intel_dp_output_format(connector, sink_format); 1354 /* 1355 * TBD pass the connector BPC, 1356 * for now U8_MAX so that max BPC on that platform would be picked 1357 */ 1358 pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX); 1359 1360 /* 1361 * Output bpp is stored in 6.4 format so right shift by 4 to get the 1362 * integer value since we support only integer values of bpp. 1363 */ 1364 if (intel_dp_is_edp(intel_dp)) { 1365 dsc_max_compressed_bpp = 1366 drm_edp_dsc_sink_output_bpp(connector->dp.dsc_dpcd) >> 4; 1367 dsc_slice_count = 1368 drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd, 1369 true); 1370 } else if (drm_dp_sink_supports_fec(connector->dp.fec_capability)) { 1371 dsc_max_compressed_bpp = 1372 intel_dp_dsc_get_max_compressed_bpp(dev_priv, 1373 max_link_clock, 1374 max_lanes, 1375 target_clock, 1376 mode->hdisplay, 1377 joiner, 1378 output_format, 1379 pipe_bpp, 64); 1380 dsc_slice_count = 1381 intel_dp_dsc_get_slice_count(connector, 1382 target_clock, 1383 mode->hdisplay, 1384 joiner); 1385 } 1386 1387 dsc = dsc_max_compressed_bpp && dsc_slice_count; 1388 } 1389 1390 if (intel_dp_joiner_needs_dsc(dev_priv, joiner) && !dsc) 1391 return MODE_CLOCK_HIGH; 1392 1393 if (mode_rate > max_rate && !dsc) 1394 return MODE_CLOCK_HIGH; 1395 1396 status = intel_dp_mode_valid_downstream(connector, mode, target_clock); 1397 if (status != MODE_OK) 1398 return status; 1399 1400 return intel_mode_valid_max_plane_size(dev_priv, mode, joiner); 1401 } 1402 1403 bool intel_dp_source_supports_tps3(struct drm_i915_private *i915) 1404 { 1405 return DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915) || IS_HASWELL(i915); 1406 } 1407 1408 bool intel_dp_source_supports_tps4(struct drm_i915_private *i915) 1409 { 1410 return DISPLAY_VER(i915) >= 10; 1411 } 1412 1413 static void snprintf_int_array(char *str, size_t len, 1414 const int *array, int nelem) 1415 { 1416 int i; 1417 1418 str[0] = '\0'; 1419 1420 for (i = 0; i < nelem; i++) { 1421 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 1422 if (r >= len) 1423 return; 1424 str += r; 1425 len -= r; 1426 } 1427 } 1428 1429 static void intel_dp_print_rates(struct intel_dp *intel_dp) 1430 { 1431 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1432 char str[128]; /* FIXME: too big for stack? */ 1433 1434 if (!drm_debug_enabled(DRM_UT_KMS)) 1435 return; 1436 1437 snprintf_int_array(str, sizeof(str), 1438 intel_dp->source_rates, intel_dp->num_source_rates); 1439 drm_dbg_kms(&i915->drm, "source rates: %s\n", str); 1440 1441 snprintf_int_array(str, sizeof(str), 1442 intel_dp->sink_rates, intel_dp->num_sink_rates); 1443 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); 1444 1445 snprintf_int_array(str, sizeof(str), 1446 intel_dp->common_rates, intel_dp->num_common_rates); 1447 drm_dbg_kms(&i915->drm, "common rates: %s\n", str); 1448 } 1449 1450 static int forced_link_rate(struct intel_dp *intel_dp) 1451 { 1452 int len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->link.force_rate); 1453 1454 if (len == 0) 1455 return intel_dp_common_rate(intel_dp, 0); 1456 1457 return intel_dp_common_rate(intel_dp, len - 1); 1458 } 1459 1460 int 1461 intel_dp_max_link_rate(struct intel_dp *intel_dp) 1462 { 1463 int len; 1464 1465 if (intel_dp->link.force_rate) 1466 return forced_link_rate(intel_dp); 1467 1468 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->link.max_rate); 1469 1470 return intel_dp_common_rate(intel_dp, len - 1); 1471 } 1472 1473 static int 1474 intel_dp_min_link_rate(struct intel_dp *intel_dp) 1475 { 1476 if (intel_dp->link.force_rate) 1477 return forced_link_rate(intel_dp); 1478 1479 return intel_dp_common_rate(intel_dp, 0); 1480 } 1481 1482 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 1483 { 1484 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1485 int i = intel_dp_rate_index(intel_dp->sink_rates, 1486 intel_dp->num_sink_rates, rate); 1487 1488 if (drm_WARN_ON(&i915->drm, i < 0)) 1489 i = 0; 1490 1491 return i; 1492 } 1493 1494 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1495 u8 *link_bw, u8 *rate_select) 1496 { 1497 /* eDP 1.4 rate select method. */ 1498 if (intel_dp->use_rate_select) { 1499 *link_bw = 0; 1500 *rate_select = 1501 intel_dp_rate_select(intel_dp, port_clock); 1502 } else { 1503 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 1504 *rate_select = 0; 1505 } 1506 } 1507 1508 bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp) 1509 { 1510 struct intel_connector *connector = intel_dp->attached_connector; 1511 1512 return connector->base.display_info.is_hdmi; 1513 } 1514 1515 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 1516 const struct intel_crtc_state *pipe_config) 1517 { 1518 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1519 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1520 1521 if (DISPLAY_VER(dev_priv) >= 12) 1522 return true; 1523 1524 if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A && 1525 !intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) 1526 return true; 1527 1528 return false; 1529 } 1530 1531 bool intel_dp_supports_fec(struct intel_dp *intel_dp, 1532 const struct intel_connector *connector, 1533 const struct intel_crtc_state *pipe_config) 1534 { 1535 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 1536 drm_dp_sink_supports_fec(connector->dp.fec_capability); 1537 } 1538 1539 bool intel_dp_supports_dsc(const struct intel_connector *connector, 1540 const struct intel_crtc_state *crtc_state) 1541 { 1542 if (!intel_dp_has_dsc(connector)) 1543 return false; 1544 1545 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable) 1546 return false; 1547 1548 return intel_dsc_source_support(crtc_state); 1549 } 1550 1551 static int intel_dp_hdmi_compute_bpc(struct intel_dp *intel_dp, 1552 const struct intel_crtc_state *crtc_state, 1553 int bpc, bool respect_downstream_limits) 1554 { 1555 int clock = crtc_state->hw.adjusted_mode.crtc_clock; 1556 1557 /* 1558 * Current bpc could already be below 8bpc due to 1559 * FDI bandwidth constraints or other limits. 1560 * HDMI minimum is 8bpc however. 1561 */ 1562 bpc = max(bpc, 8); 1563 1564 /* 1565 * We will never exceed downstream TMDS clock limits while 1566 * attempting deep color. If the user insists on forcing an 1567 * out of spec mode they will have to be satisfied with 8bpc. 1568 */ 1569 if (!respect_downstream_limits) 1570 bpc = 8; 1571 1572 for (; bpc >= 8; bpc -= 2) { 1573 if (intel_hdmi_bpc_possible(crtc_state, bpc, 1574 intel_dp_has_hdmi_sink(intel_dp)) && 1575 intel_dp_tmds_clock_valid(intel_dp, clock, bpc, crtc_state->sink_format, 1576 respect_downstream_limits) == MODE_OK) 1577 return bpc; 1578 } 1579 1580 return -EINVAL; 1581 } 1582 1583 static int intel_dp_max_bpp(struct intel_dp *intel_dp, 1584 const struct intel_crtc_state *crtc_state, 1585 bool respect_downstream_limits) 1586 { 1587 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1588 struct intel_connector *intel_connector = intel_dp->attached_connector; 1589 int bpp, bpc; 1590 1591 bpc = crtc_state->pipe_bpp / 3; 1592 1593 if (intel_dp->dfp.max_bpc) 1594 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc); 1595 1596 if (intel_dp->dfp.min_tmds_clock) { 1597 int max_hdmi_bpc; 1598 1599 max_hdmi_bpc = intel_dp_hdmi_compute_bpc(intel_dp, crtc_state, bpc, 1600 respect_downstream_limits); 1601 if (max_hdmi_bpc < 0) 1602 return 0; 1603 1604 bpc = min(bpc, max_hdmi_bpc); 1605 } 1606 1607 bpp = bpc * 3; 1608 if (intel_dp_is_edp(intel_dp)) { 1609 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1610 if (intel_connector->base.display_info.bpc == 0 && 1611 intel_connector->panel.vbt.edp.bpp && 1612 intel_connector->panel.vbt.edp.bpp < bpp) { 1613 drm_dbg_kms(&dev_priv->drm, 1614 "clamping bpp for eDP panel to BIOS-provided %i\n", 1615 intel_connector->panel.vbt.edp.bpp); 1616 bpp = intel_connector->panel.vbt.edp.bpp; 1617 } 1618 } 1619 1620 return bpp; 1621 } 1622 1623 /* Adjust link config limits based on compliance test requests. */ 1624 void 1625 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 1626 struct intel_crtc_state *pipe_config, 1627 struct link_config_limits *limits) 1628 { 1629 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1630 1631 /* For DP Compliance we override the computed bpp for the pipe */ 1632 if (intel_dp->compliance.test_data.bpc != 0) { 1633 int bpp = 3 * intel_dp->compliance.test_data.bpc; 1634 1635 limits->pipe.min_bpp = limits->pipe.max_bpp = bpp; 1636 pipe_config->dither_force_disable = bpp == 6 * 3; 1637 1638 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); 1639 } 1640 1641 /* Use values requested by Compliance Test Request */ 1642 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 1643 int index; 1644 1645 /* Validate the compliance test data since max values 1646 * might have changed due to link train fallback. 1647 */ 1648 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 1649 intel_dp->compliance.test_lane_count)) { 1650 index = intel_dp_rate_index(intel_dp->common_rates, 1651 intel_dp->num_common_rates, 1652 intel_dp->compliance.test_link_rate); 1653 if (index >= 0) 1654 limits->min_rate = limits->max_rate = 1655 intel_dp->compliance.test_link_rate; 1656 limits->min_lane_count = limits->max_lane_count = 1657 intel_dp->compliance.test_lane_count; 1658 } 1659 } 1660 } 1661 1662 static bool has_seamless_m_n(struct intel_connector *connector) 1663 { 1664 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1665 1666 /* 1667 * Seamless M/N reprogramming only implemented 1668 * for BDW+ double buffered M/N registers so far. 1669 */ 1670 return HAS_DOUBLE_BUFFERED_M_N(i915) && 1671 intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS; 1672 } 1673 1674 static int intel_dp_mode_clock(const struct intel_crtc_state *crtc_state, 1675 const struct drm_connector_state *conn_state) 1676 { 1677 struct intel_connector *connector = to_intel_connector(conn_state->connector); 1678 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 1679 1680 /* FIXME a bit of a mess wrt clock vs. crtc_clock */ 1681 if (has_seamless_m_n(connector)) 1682 return intel_panel_highest_mode(connector, adjusted_mode)->clock; 1683 else 1684 return adjusted_mode->crtc_clock; 1685 } 1686 1687 /* Optimize link config in order: max bpp, min clock, min lanes */ 1688 static int 1689 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 1690 struct intel_crtc_state *pipe_config, 1691 const struct drm_connector_state *conn_state, 1692 const struct link_config_limits *limits) 1693 { 1694 int bpp, i, lane_count, clock = intel_dp_mode_clock(pipe_config, conn_state); 1695 int mode_rate, link_rate, link_avail; 1696 1697 for (bpp = fxp_q4_to_int(limits->link.max_bpp_x16); 1698 bpp >= fxp_q4_to_int(limits->link.min_bpp_x16); 1699 bpp -= 2 * 3) { 1700 int link_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); 1701 1702 mode_rate = intel_dp_link_required(clock, link_bpp); 1703 1704 for (i = 0; i < intel_dp->num_common_rates; i++) { 1705 link_rate = intel_dp_common_rate(intel_dp, i); 1706 if (link_rate < limits->min_rate || 1707 link_rate > limits->max_rate) 1708 continue; 1709 1710 for (lane_count = limits->min_lane_count; 1711 lane_count <= limits->max_lane_count; 1712 lane_count <<= 1) { 1713 link_avail = intel_dp_max_link_data_rate(intel_dp, 1714 link_rate, 1715 lane_count); 1716 1717 1718 if (mode_rate <= link_avail) { 1719 pipe_config->lane_count = lane_count; 1720 pipe_config->pipe_bpp = bpp; 1721 pipe_config->port_clock = link_rate; 1722 1723 return 0; 1724 } 1725 } 1726 } 1727 } 1728 1729 return -EINVAL; 1730 } 1731 1732 static 1733 u8 intel_dp_dsc_max_src_input_bpc(struct drm_i915_private *i915) 1734 { 1735 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 1736 if (DISPLAY_VER(i915) >= 12) 1737 return 12; 1738 if (DISPLAY_VER(i915) == 11) 1739 return 10; 1740 1741 return 0; 1742 } 1743 1744 int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector, 1745 u8 max_req_bpc) 1746 { 1747 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1748 int i, num_bpc; 1749 u8 dsc_bpc[3] = {}; 1750 u8 dsc_max_bpc; 1751 1752 dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915); 1753 1754 if (!dsc_max_bpc) 1755 return dsc_max_bpc; 1756 1757 dsc_max_bpc = min_t(u8, dsc_max_bpc, max_req_bpc); 1758 1759 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd, 1760 dsc_bpc); 1761 for (i = 0; i < num_bpc; i++) { 1762 if (dsc_max_bpc >= dsc_bpc[i]) 1763 return dsc_bpc[i] * 3; 1764 } 1765 1766 return 0; 1767 } 1768 1769 static int intel_dp_source_dsc_version_minor(struct drm_i915_private *i915) 1770 { 1771 return DISPLAY_VER(i915) >= 14 ? 2 : 1; 1772 } 1773 1774 static int intel_dp_sink_dsc_version_minor(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) 1775 { 1776 return (dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & DP_DSC_MINOR_MASK) >> 1777 DP_DSC_MINOR_SHIFT; 1778 } 1779 1780 static int intel_dp_get_slice_height(int vactive) 1781 { 1782 int slice_height; 1783 1784 /* 1785 * VDSC 1.2a spec in Section 3.8 Options for Slices implies that 108 1786 * lines is an optimal slice height, but any size can be used as long as 1787 * vertical active integer multiple and maximum vertical slice count 1788 * requirements are met. 1789 */ 1790 for (slice_height = 108; slice_height <= vactive; slice_height += 2) 1791 if (vactive % slice_height == 0) 1792 return slice_height; 1793 1794 /* 1795 * Highly unlikely we reach here as most of the resolutions will end up 1796 * finding appropriate slice_height in above loop but returning 1797 * slice_height as 2 here as it should work with all resolutions. 1798 */ 1799 return 2; 1800 } 1801 1802 static int intel_dp_dsc_compute_params(const struct intel_connector *connector, 1803 struct intel_crtc_state *crtc_state) 1804 { 1805 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1806 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 1807 int ret; 1808 1809 /* 1810 * RC_MODEL_SIZE is currently a constant across all configurations. 1811 * 1812 * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and 1813 * DP_DSC_RC_BUF_SIZE for this. 1814 */ 1815 vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; 1816 vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay; 1817 1818 vdsc_cfg->slice_height = intel_dp_get_slice_height(vdsc_cfg->pic_height); 1819 1820 ret = intel_dsc_compute_params(crtc_state); 1821 if (ret) 1822 return ret; 1823 1824 vdsc_cfg->dsc_version_major = 1825 (connector->dp.dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 1826 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 1827 vdsc_cfg->dsc_version_minor = 1828 min(intel_dp_source_dsc_version_minor(i915), 1829 intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd)); 1830 if (vdsc_cfg->convert_rgb) 1831 vdsc_cfg->convert_rgb = 1832 connector->dp.dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 1833 DP_DSC_RGB; 1834 1835 vdsc_cfg->line_buf_depth = min(INTEL_DP_DSC_MAX_LINE_BUF_DEPTH, 1836 drm_dp_dsc_sink_line_buf_depth(connector->dp.dsc_dpcd)); 1837 if (!vdsc_cfg->line_buf_depth) { 1838 drm_dbg_kms(&i915->drm, 1839 "DSC Sink Line Buffer Depth invalid\n"); 1840 return -EINVAL; 1841 } 1842 1843 vdsc_cfg->block_pred_enable = 1844 connector->dp.dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 1845 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 1846 1847 return drm_dsc_compute_rc_parameters(vdsc_cfg); 1848 } 1849 1850 static bool intel_dp_dsc_supports_format(const struct intel_connector *connector, 1851 enum intel_output_format output_format) 1852 { 1853 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1854 u8 sink_dsc_format; 1855 1856 switch (output_format) { 1857 case INTEL_OUTPUT_FORMAT_RGB: 1858 sink_dsc_format = DP_DSC_RGB; 1859 break; 1860 case INTEL_OUTPUT_FORMAT_YCBCR444: 1861 sink_dsc_format = DP_DSC_YCbCr444; 1862 break; 1863 case INTEL_OUTPUT_FORMAT_YCBCR420: 1864 if (min(intel_dp_source_dsc_version_minor(i915), 1865 intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd)) < 2) 1866 return false; 1867 sink_dsc_format = DP_DSC_YCbCr420_Native; 1868 break; 1869 default: 1870 return false; 1871 } 1872 1873 return drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd, sink_dsc_format); 1874 } 1875 1876 static bool is_bw_sufficient_for_dsc_config(u16 compressed_bppx16, u32 link_clock, 1877 u32 lane_count, u32 mode_clock, 1878 enum intel_output_format output_format, 1879 int timeslots) 1880 { 1881 u32 available_bw, required_bw; 1882 1883 available_bw = (link_clock * lane_count * timeslots * 16) / 8; 1884 required_bw = compressed_bppx16 * (intel_dp_mode_to_fec_clock(mode_clock)); 1885 1886 return available_bw > required_bw; 1887 } 1888 1889 static int dsc_compute_link_config(struct intel_dp *intel_dp, 1890 struct intel_crtc_state *pipe_config, 1891 struct link_config_limits *limits, 1892 u16 compressed_bppx16, 1893 int timeslots) 1894 { 1895 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 1896 int link_rate, lane_count; 1897 int i; 1898 1899 for (i = 0; i < intel_dp->num_common_rates; i++) { 1900 link_rate = intel_dp_common_rate(intel_dp, i); 1901 if (link_rate < limits->min_rate || link_rate > limits->max_rate) 1902 continue; 1903 1904 for (lane_count = limits->min_lane_count; 1905 lane_count <= limits->max_lane_count; 1906 lane_count <<= 1) { 1907 if (!is_bw_sufficient_for_dsc_config(compressed_bppx16, link_rate, 1908 lane_count, adjusted_mode->clock, 1909 pipe_config->output_format, 1910 timeslots)) 1911 continue; 1912 1913 pipe_config->lane_count = lane_count; 1914 pipe_config->port_clock = link_rate; 1915 1916 return 0; 1917 } 1918 } 1919 1920 return -EINVAL; 1921 } 1922 1923 static 1924 u16 intel_dp_dsc_max_sink_compressed_bppx16(const struct intel_connector *connector, 1925 struct intel_crtc_state *pipe_config, 1926 int bpc) 1927 { 1928 u16 max_bppx16 = drm_edp_dsc_sink_output_bpp(connector->dp.dsc_dpcd); 1929 1930 if (max_bppx16) 1931 return max_bppx16; 1932 /* 1933 * If support not given in DPCD 67h, 68h use the Maximum Allowed bit rate 1934 * values as given in spec Table 2-157 DP v2.0 1935 */ 1936 switch (pipe_config->output_format) { 1937 case INTEL_OUTPUT_FORMAT_RGB: 1938 case INTEL_OUTPUT_FORMAT_YCBCR444: 1939 return (3 * bpc) << 4; 1940 case INTEL_OUTPUT_FORMAT_YCBCR420: 1941 return (3 * (bpc / 2)) << 4; 1942 default: 1943 MISSING_CASE(pipe_config->output_format); 1944 break; 1945 } 1946 1947 return 0; 1948 } 1949 1950 int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config) 1951 { 1952 /* From Mandatory bit rate range Support Table 2-157 (DP v2.0) */ 1953 switch (pipe_config->output_format) { 1954 case INTEL_OUTPUT_FORMAT_RGB: 1955 case INTEL_OUTPUT_FORMAT_YCBCR444: 1956 return 8; 1957 case INTEL_OUTPUT_FORMAT_YCBCR420: 1958 return 6; 1959 default: 1960 MISSING_CASE(pipe_config->output_format); 1961 break; 1962 } 1963 1964 return 0; 1965 } 1966 1967 int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector, 1968 struct intel_crtc_state *pipe_config, 1969 int bpc) 1970 { 1971 return intel_dp_dsc_max_sink_compressed_bppx16(connector, 1972 pipe_config, bpc) >> 4; 1973 } 1974 1975 static int dsc_src_min_compressed_bpp(void) 1976 { 1977 /* Min Compressed bpp supported by source is 8 */ 1978 return 8; 1979 } 1980 1981 static int dsc_src_max_compressed_bpp(struct intel_dp *intel_dp) 1982 { 1983 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1984 1985 /* 1986 * Max Compressed bpp for Gen 13+ is 27bpp. 1987 * For earlier platform is 23bpp. (Bspec:49259). 1988 */ 1989 if (DISPLAY_VER(i915) < 13) 1990 return 23; 1991 else 1992 return 27; 1993 } 1994 1995 /* 1996 * From a list of valid compressed bpps try different compressed bpp and find a 1997 * suitable link configuration that can support it. 1998 */ 1999 static int 2000 icl_dsc_compute_link_config(struct intel_dp *intel_dp, 2001 struct intel_crtc_state *pipe_config, 2002 struct link_config_limits *limits, 2003 int dsc_max_bpp, 2004 int dsc_min_bpp, 2005 int pipe_bpp, 2006 int timeslots) 2007 { 2008 int i, ret; 2009 2010 /* Compressed BPP should be less than the Input DSC bpp */ 2011 dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1); 2012 2013 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) { 2014 if (valid_dsc_bpp[i] < dsc_min_bpp) 2015 continue; 2016 if (valid_dsc_bpp[i] > dsc_max_bpp) 2017 break; 2018 2019 ret = dsc_compute_link_config(intel_dp, 2020 pipe_config, 2021 limits, 2022 valid_dsc_bpp[i] << 4, 2023 timeslots); 2024 if (ret == 0) { 2025 pipe_config->dsc.compressed_bpp_x16 = 2026 fxp_q4_from_int(valid_dsc_bpp[i]); 2027 return 0; 2028 } 2029 } 2030 2031 return -EINVAL; 2032 } 2033 2034 /* 2035 * From XE_LPD onwards we supports compression bpps in steps of 1 up to 2036 * uncompressed bpp-1. So we start from max compressed bpp and see if any 2037 * link configuration is able to support that compressed bpp, if not we 2038 * step down and check for lower compressed bpp. 2039 */ 2040 static int 2041 xelpd_dsc_compute_link_config(struct intel_dp *intel_dp, 2042 const struct intel_connector *connector, 2043 struct intel_crtc_state *pipe_config, 2044 struct link_config_limits *limits, 2045 int dsc_max_bpp, 2046 int dsc_min_bpp, 2047 int pipe_bpp, 2048 int timeslots) 2049 { 2050 u8 bppx16_incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd); 2051 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2052 u16 compressed_bppx16; 2053 u8 bppx16_step; 2054 int ret; 2055 2056 if (DISPLAY_VER(i915) < 14 || bppx16_incr <= 1) 2057 bppx16_step = 16; 2058 else 2059 bppx16_step = 16 / bppx16_incr; 2060 2061 /* Compressed BPP should be less than the Input DSC bpp */ 2062 dsc_max_bpp = min(dsc_max_bpp << 4, (pipe_bpp << 4) - bppx16_step); 2063 dsc_min_bpp = dsc_min_bpp << 4; 2064 2065 for (compressed_bppx16 = dsc_max_bpp; 2066 compressed_bppx16 >= dsc_min_bpp; 2067 compressed_bppx16 -= bppx16_step) { 2068 if (intel_dp->force_dsc_fractional_bpp_en && 2069 !fxp_q4_to_frac(compressed_bppx16)) 2070 continue; 2071 ret = dsc_compute_link_config(intel_dp, 2072 pipe_config, 2073 limits, 2074 compressed_bppx16, 2075 timeslots); 2076 if (ret == 0) { 2077 pipe_config->dsc.compressed_bpp_x16 = compressed_bppx16; 2078 if (intel_dp->force_dsc_fractional_bpp_en && 2079 fxp_q4_to_frac(compressed_bppx16)) 2080 drm_dbg_kms(&i915->drm, "Forcing DSC fractional bpp\n"); 2081 2082 return 0; 2083 } 2084 } 2085 return -EINVAL; 2086 } 2087 2088 static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp, 2089 const struct intel_connector *connector, 2090 struct intel_crtc_state *pipe_config, 2091 struct link_config_limits *limits, 2092 int pipe_bpp, 2093 int timeslots) 2094 { 2095 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2096 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2097 int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp; 2098 int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp; 2099 int dsc_joiner_max_bpp; 2100 2101 dsc_src_min_bpp = dsc_src_min_compressed_bpp(); 2102 dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config); 2103 dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp); 2104 dsc_min_bpp = max(dsc_min_bpp, fxp_q4_to_int_roundup(limits->link.min_bpp_x16)); 2105 2106 dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp); 2107 dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector, 2108 pipe_config, 2109 pipe_bpp / 3); 2110 dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp; 2111 2112 dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, adjusted_mode->clock, 2113 adjusted_mode->hdisplay, 2114 pipe_config->joiner_pipes); 2115 dsc_max_bpp = min(dsc_max_bpp, dsc_joiner_max_bpp); 2116 dsc_max_bpp = min(dsc_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16)); 2117 2118 if (DISPLAY_VER(i915) >= 13) 2119 return xelpd_dsc_compute_link_config(intel_dp, connector, pipe_config, limits, 2120 dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots); 2121 return icl_dsc_compute_link_config(intel_dp, pipe_config, limits, 2122 dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots); 2123 } 2124 2125 static 2126 u8 intel_dp_dsc_min_src_input_bpc(struct drm_i915_private *i915) 2127 { 2128 /* Min DSC Input BPC for ICL+ is 8 */ 2129 return HAS_DSC(i915) ? 8 : 0; 2130 } 2131 2132 static 2133 bool is_dsc_pipe_bpp_sufficient(struct drm_i915_private *i915, 2134 struct drm_connector_state *conn_state, 2135 struct link_config_limits *limits, 2136 int pipe_bpp) 2137 { 2138 u8 dsc_max_bpc, dsc_min_bpc, dsc_max_pipe_bpp, dsc_min_pipe_bpp; 2139 2140 dsc_max_bpc = min(intel_dp_dsc_max_src_input_bpc(i915), conn_state->max_requested_bpc); 2141 dsc_min_bpc = intel_dp_dsc_min_src_input_bpc(i915); 2142 2143 dsc_max_pipe_bpp = min(dsc_max_bpc * 3, limits->pipe.max_bpp); 2144 dsc_min_pipe_bpp = max(dsc_min_bpc * 3, limits->pipe.min_bpp); 2145 2146 return pipe_bpp >= dsc_min_pipe_bpp && 2147 pipe_bpp <= dsc_max_pipe_bpp; 2148 } 2149 2150 static 2151 int intel_dp_force_dsc_pipe_bpp(struct intel_dp *intel_dp, 2152 struct drm_connector_state *conn_state, 2153 struct link_config_limits *limits) 2154 { 2155 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2156 int forced_bpp; 2157 2158 if (!intel_dp->force_dsc_bpc) 2159 return 0; 2160 2161 forced_bpp = intel_dp->force_dsc_bpc * 3; 2162 2163 if (is_dsc_pipe_bpp_sufficient(i915, conn_state, limits, forced_bpp)) { 2164 drm_dbg_kms(&i915->drm, "Input DSC BPC forced to %d\n", intel_dp->force_dsc_bpc); 2165 return forced_bpp; 2166 } 2167 2168 drm_dbg_kms(&i915->drm, "Cannot force DSC BPC:%d, due to DSC BPC limits\n", 2169 intel_dp->force_dsc_bpc); 2170 2171 return 0; 2172 } 2173 2174 static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp, 2175 struct intel_crtc_state *pipe_config, 2176 struct drm_connector_state *conn_state, 2177 struct link_config_limits *limits, 2178 int timeslots) 2179 { 2180 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2181 const struct intel_connector *connector = 2182 to_intel_connector(conn_state->connector); 2183 u8 max_req_bpc = conn_state->max_requested_bpc; 2184 u8 dsc_max_bpc, dsc_max_bpp; 2185 u8 dsc_min_bpc, dsc_min_bpp; 2186 u8 dsc_bpc[3] = {}; 2187 int forced_bpp, pipe_bpp; 2188 int num_bpc, i, ret; 2189 2190 forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, conn_state, limits); 2191 2192 if (forced_bpp) { 2193 ret = dsc_compute_compressed_bpp(intel_dp, connector, pipe_config, 2194 limits, forced_bpp, timeslots); 2195 if (ret == 0) { 2196 pipe_config->pipe_bpp = forced_bpp; 2197 return 0; 2198 } 2199 } 2200 2201 dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915); 2202 if (!dsc_max_bpc) 2203 return -EINVAL; 2204 2205 dsc_max_bpc = min_t(u8, dsc_max_bpc, max_req_bpc); 2206 dsc_max_bpp = min(dsc_max_bpc * 3, limits->pipe.max_bpp); 2207 2208 dsc_min_bpc = intel_dp_dsc_min_src_input_bpc(i915); 2209 dsc_min_bpp = max(dsc_min_bpc * 3, limits->pipe.min_bpp); 2210 2211 /* 2212 * Get the maximum DSC bpc that will be supported by any valid 2213 * link configuration and compressed bpp. 2214 */ 2215 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd, dsc_bpc); 2216 for (i = 0; i < num_bpc; i++) { 2217 pipe_bpp = dsc_bpc[i] * 3; 2218 if (pipe_bpp < dsc_min_bpp) 2219 break; 2220 if (pipe_bpp > dsc_max_bpp) 2221 continue; 2222 ret = dsc_compute_compressed_bpp(intel_dp, connector, pipe_config, 2223 limits, pipe_bpp, timeslots); 2224 if (ret == 0) { 2225 pipe_config->pipe_bpp = pipe_bpp; 2226 return 0; 2227 } 2228 } 2229 2230 return -EINVAL; 2231 } 2232 2233 static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp, 2234 struct intel_crtc_state *pipe_config, 2235 struct drm_connector_state *conn_state, 2236 struct link_config_limits *limits) 2237 { 2238 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2239 struct intel_connector *connector = 2240 to_intel_connector(conn_state->connector); 2241 int pipe_bpp, forced_bpp; 2242 int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp; 2243 int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp; 2244 2245 forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, conn_state, limits); 2246 2247 if (forced_bpp) { 2248 pipe_bpp = forced_bpp; 2249 } else { 2250 int max_bpc = min(limits->pipe.max_bpp / 3, (int)conn_state->max_requested_bpc); 2251 2252 /* For eDP use max bpp that can be supported with DSC. */ 2253 pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, max_bpc); 2254 if (!is_dsc_pipe_bpp_sufficient(i915, conn_state, limits, pipe_bpp)) { 2255 drm_dbg_kms(&i915->drm, 2256 "Computed BPC is not in DSC BPC limits\n"); 2257 return -EINVAL; 2258 } 2259 } 2260 pipe_config->port_clock = limits->max_rate; 2261 pipe_config->lane_count = limits->max_lane_count; 2262 2263 dsc_src_min_bpp = dsc_src_min_compressed_bpp(); 2264 dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config); 2265 dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp); 2266 dsc_min_bpp = max(dsc_min_bpp, fxp_q4_to_int_roundup(limits->link.min_bpp_x16)); 2267 2268 dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp); 2269 dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector, 2270 pipe_config, 2271 pipe_bpp / 3); 2272 dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp; 2273 dsc_max_bpp = min(dsc_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16)); 2274 2275 /* Compressed BPP should be less than the Input DSC bpp */ 2276 dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1); 2277 2278 pipe_config->dsc.compressed_bpp_x16 = 2279 fxp_q4_from_int(max(dsc_min_bpp, dsc_max_bpp)); 2280 2281 pipe_config->pipe_bpp = pipe_bpp; 2282 2283 return 0; 2284 } 2285 2286 int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 2287 struct intel_crtc_state *pipe_config, 2288 struct drm_connector_state *conn_state, 2289 struct link_config_limits *limits, 2290 int timeslots, 2291 bool compute_pipe_bpp) 2292 { 2293 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2294 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 2295 const struct intel_connector *connector = 2296 to_intel_connector(conn_state->connector); 2297 const struct drm_display_mode *adjusted_mode = 2298 &pipe_config->hw.adjusted_mode; 2299 int ret; 2300 2301 pipe_config->fec_enable = pipe_config->fec_enable || 2302 (!intel_dp_is_edp(intel_dp) && 2303 intel_dp_supports_fec(intel_dp, connector, pipe_config)); 2304 2305 if (!intel_dp_supports_dsc(connector, pipe_config)) 2306 return -EINVAL; 2307 2308 if (!intel_dp_dsc_supports_format(connector, pipe_config->output_format)) 2309 return -EINVAL; 2310 2311 /* 2312 * compute pipe bpp is set to false for DP MST DSC case 2313 * and compressed_bpp is calculated same time once 2314 * vpci timeslots are allocated, because overall bpp 2315 * calculation procedure is bit different for MST case. 2316 */ 2317 if (compute_pipe_bpp) { 2318 if (intel_dp_is_edp(intel_dp)) 2319 ret = intel_edp_dsc_compute_pipe_bpp(intel_dp, pipe_config, 2320 conn_state, limits); 2321 else 2322 ret = intel_dp_dsc_compute_pipe_bpp(intel_dp, pipe_config, 2323 conn_state, limits, timeslots); 2324 if (ret) { 2325 drm_dbg_kms(&dev_priv->drm, 2326 "No Valid pipe bpp for given mode ret = %d\n", ret); 2327 return ret; 2328 } 2329 } 2330 2331 /* Calculate Slice count */ 2332 if (intel_dp_is_edp(intel_dp)) { 2333 pipe_config->dsc.slice_count = 2334 drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd, 2335 true); 2336 if (!pipe_config->dsc.slice_count) { 2337 drm_dbg_kms(&dev_priv->drm, "Unsupported Slice Count %d\n", 2338 pipe_config->dsc.slice_count); 2339 return -EINVAL; 2340 } 2341 } else { 2342 u8 dsc_dp_slice_count; 2343 2344 dsc_dp_slice_count = 2345 intel_dp_dsc_get_slice_count(connector, 2346 adjusted_mode->crtc_clock, 2347 adjusted_mode->crtc_hdisplay, 2348 pipe_config->joiner_pipes); 2349 if (!dsc_dp_slice_count) { 2350 drm_dbg_kms(&dev_priv->drm, 2351 "Compressed Slice Count not supported\n"); 2352 return -EINVAL; 2353 } 2354 2355 pipe_config->dsc.slice_count = dsc_dp_slice_count; 2356 } 2357 /* 2358 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 2359 * is greater than the maximum Cdclock and if slice count is even 2360 * then we need to use 2 VDSC instances. 2361 */ 2362 if (pipe_config->joiner_pipes || pipe_config->dsc.slice_count > 1) 2363 pipe_config->dsc.dsc_split = true; 2364 2365 ret = intel_dp_dsc_compute_params(connector, pipe_config); 2366 if (ret < 0) { 2367 drm_dbg_kms(&dev_priv->drm, 2368 "Cannot compute valid DSC parameters for Input Bpp = %d" 2369 "Compressed BPP = " FXP_Q4_FMT "\n", 2370 pipe_config->pipe_bpp, 2371 FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16)); 2372 return ret; 2373 } 2374 2375 pipe_config->dsc.compression_enable = true; 2376 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 2377 "Compressed Bpp = " FXP_Q4_FMT " Slice Count = %d\n", 2378 pipe_config->pipe_bpp, 2379 FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16), 2380 pipe_config->dsc.slice_count); 2381 2382 return 0; 2383 } 2384 2385 /** 2386 * intel_dp_compute_config_link_bpp_limits - compute output link bpp limits 2387 * @intel_dp: intel DP 2388 * @crtc_state: crtc state 2389 * @dsc: DSC compression mode 2390 * @limits: link configuration limits 2391 * 2392 * Calculates the output link min, max bpp values in @limits based on the 2393 * pipe bpp range, @crtc_state and @dsc mode. 2394 * 2395 * Returns %true in case of success. 2396 */ 2397 bool 2398 intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp, 2399 const struct intel_crtc_state *crtc_state, 2400 bool dsc, 2401 struct link_config_limits *limits) 2402 { 2403 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 2404 const struct drm_display_mode *adjusted_mode = 2405 &crtc_state->hw.adjusted_mode; 2406 const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2407 const struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 2408 int max_link_bpp_x16; 2409 2410 max_link_bpp_x16 = min(crtc_state->max_link_bpp_x16, 2411 fxp_q4_from_int(limits->pipe.max_bpp)); 2412 2413 if (!dsc) { 2414 max_link_bpp_x16 = rounddown(max_link_bpp_x16, fxp_q4_from_int(2 * 3)); 2415 2416 if (max_link_bpp_x16 < fxp_q4_from_int(limits->pipe.min_bpp)) 2417 return false; 2418 2419 limits->link.min_bpp_x16 = fxp_q4_from_int(limits->pipe.min_bpp); 2420 } else { 2421 /* 2422 * TODO: set the DSC link limits already here, atm these are 2423 * initialized only later in intel_edp_dsc_compute_pipe_bpp() / 2424 * intel_dp_dsc_compute_pipe_bpp() 2425 */ 2426 limits->link.min_bpp_x16 = 0; 2427 } 2428 2429 limits->link.max_bpp_x16 = max_link_bpp_x16; 2430 2431 drm_dbg_kms(&i915->drm, 2432 "[ENCODER:%d:%s][CRTC:%d:%s] DP link limits: pixel clock %d kHz DSC %s max lanes %d max rate %d max pipe_bpp %d max link_bpp " FXP_Q4_FMT "\n", 2433 encoder->base.base.id, encoder->base.name, 2434 crtc->base.base.id, crtc->base.name, 2435 adjusted_mode->crtc_clock, 2436 dsc ? "on" : "off", 2437 limits->max_lane_count, 2438 limits->max_rate, 2439 limits->pipe.max_bpp, 2440 FXP_Q4_ARGS(limits->link.max_bpp_x16)); 2441 2442 return true; 2443 } 2444 2445 static bool 2446 intel_dp_compute_config_limits(struct intel_dp *intel_dp, 2447 struct intel_crtc_state *crtc_state, 2448 bool respect_downstream_limits, 2449 bool dsc, 2450 struct link_config_limits *limits) 2451 { 2452 limits->min_rate = intel_dp_min_link_rate(intel_dp); 2453 limits->max_rate = intel_dp_max_link_rate(intel_dp); 2454 2455 /* FIXME 128b/132b SST support missing */ 2456 limits->max_rate = min(limits->max_rate, 810000); 2457 limits->min_rate = min(limits->min_rate, limits->max_rate); 2458 2459 limits->min_lane_count = intel_dp_min_lane_count(intel_dp); 2460 limits->max_lane_count = intel_dp_max_lane_count(intel_dp); 2461 2462 limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format); 2463 limits->pipe.max_bpp = intel_dp_max_bpp(intel_dp, crtc_state, 2464 respect_downstream_limits); 2465 2466 if (intel_dp->use_max_params) { 2467 /* 2468 * Use the maximum clock and number of lanes the eDP panel 2469 * advertizes being capable of in case the initial fast 2470 * optimal params failed us. The panels are generally 2471 * designed to support only a single clock and lane 2472 * configuration, and typically on older panels these 2473 * values correspond to the native resolution of the panel. 2474 */ 2475 limits->min_lane_count = limits->max_lane_count; 2476 limits->min_rate = limits->max_rate; 2477 } 2478 2479 intel_dp_adjust_compliance_config(intel_dp, crtc_state, limits); 2480 2481 return intel_dp_compute_config_link_bpp_limits(intel_dp, 2482 crtc_state, 2483 dsc, 2484 limits); 2485 } 2486 2487 int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state) 2488 { 2489 const struct drm_display_mode *adjusted_mode = 2490 &crtc_state->hw.adjusted_mode; 2491 int bpp = crtc_state->dsc.compression_enable ? 2492 fxp_q4_to_int_roundup(crtc_state->dsc.compressed_bpp_x16) : 2493 crtc_state->pipe_bpp; 2494 2495 return intel_dp_link_required(adjusted_mode->crtc_clock, bpp); 2496 } 2497 2498 bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, bool use_joiner) 2499 { 2500 /* 2501 * Pipe joiner needs compression up to display 12 due to bandwidth 2502 * limitation. DG2 onwards pipe joiner can be enabled without 2503 * compression. 2504 */ 2505 return DISPLAY_VER(i915) < 13 && use_joiner; 2506 } 2507 2508 static int 2509 intel_dp_compute_link_config(struct intel_encoder *encoder, 2510 struct intel_crtc_state *pipe_config, 2511 struct drm_connector_state *conn_state, 2512 bool respect_downstream_limits) 2513 { 2514 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2515 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 2516 struct intel_connector *connector = 2517 to_intel_connector(conn_state->connector); 2518 const struct drm_display_mode *adjusted_mode = 2519 &pipe_config->hw.adjusted_mode; 2520 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2521 struct link_config_limits limits; 2522 bool dsc_needed, joiner_needs_dsc; 2523 int ret = 0; 2524 2525 if (pipe_config->fec_enable && 2526 !intel_dp_supports_fec(intel_dp, connector, pipe_config)) 2527 return -EINVAL; 2528 2529 if (intel_dp_need_joiner(intel_dp, connector, 2530 adjusted_mode->crtc_hdisplay, 2531 adjusted_mode->crtc_clock)) 2532 pipe_config->joiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe); 2533 2534 joiner_needs_dsc = intel_dp_joiner_needs_dsc(i915, pipe_config->joiner_pipes); 2535 2536 dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en || 2537 !intel_dp_compute_config_limits(intel_dp, pipe_config, 2538 respect_downstream_limits, 2539 false, 2540 &limits); 2541 2542 if (!dsc_needed) { 2543 /* 2544 * Optimize for slow and wide for everything, because there are some 2545 * eDP 1.3 and 1.4 panels don't work well with fast and narrow. 2546 */ 2547 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, 2548 conn_state, &limits); 2549 if (ret) 2550 dsc_needed = true; 2551 } 2552 2553 if (dsc_needed) { 2554 drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n", 2555 str_yes_no(ret), str_yes_no(joiner_needs_dsc), 2556 str_yes_no(intel_dp->force_dsc_en)); 2557 2558 if (!intel_dp_compute_config_limits(intel_dp, pipe_config, 2559 respect_downstream_limits, 2560 true, 2561 &limits)) 2562 return -EINVAL; 2563 2564 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 2565 conn_state, &limits, 64, true); 2566 if (ret < 0) 2567 return ret; 2568 } 2569 2570 drm_dbg_kms(&i915->drm, 2571 "DP lane count %d clock %d bpp input %d compressed " FXP_Q4_FMT " link rate required %d available %d\n", 2572 pipe_config->lane_count, pipe_config->port_clock, 2573 pipe_config->pipe_bpp, 2574 FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16), 2575 intel_dp_config_required_rate(pipe_config), 2576 intel_dp_max_link_data_rate(intel_dp, 2577 pipe_config->port_clock, 2578 pipe_config->lane_count)); 2579 2580 return 0; 2581 } 2582 2583 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 2584 const struct drm_connector_state *conn_state) 2585 { 2586 const struct intel_digital_connector_state *intel_conn_state = 2587 to_intel_digital_connector_state(conn_state); 2588 const struct drm_display_mode *adjusted_mode = 2589 &crtc_state->hw.adjusted_mode; 2590 2591 /* 2592 * Our YCbCr output is always limited range. 2593 * crtc_state->limited_color_range only applies to RGB, 2594 * and it must never be set for YCbCr or we risk setting 2595 * some conflicting bits in TRANSCONF which will mess up 2596 * the colors on the monitor. 2597 */ 2598 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 2599 return false; 2600 2601 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 2602 /* 2603 * See: 2604 * CEA-861-E - 5.1 Default Encoding Parameters 2605 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 2606 */ 2607 return crtc_state->pipe_bpp != 18 && 2608 drm_default_rgb_quant_range(adjusted_mode) == 2609 HDMI_QUANTIZATION_RANGE_LIMITED; 2610 } else { 2611 return intel_conn_state->broadcast_rgb == 2612 INTEL_BROADCAST_RGB_LIMITED; 2613 } 2614 } 2615 2616 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 2617 enum port port) 2618 { 2619 if (IS_G4X(dev_priv)) 2620 return false; 2621 if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A) 2622 return false; 2623 2624 return true; 2625 } 2626 2627 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, 2628 const struct drm_connector_state *conn_state, 2629 struct drm_dp_vsc_sdp *vsc) 2630 { 2631 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2632 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2633 2634 if (crtc_state->has_panel_replay) { 2635 /* 2636 * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223 2637 * VSC SDP supporting 3D stereo, Panel Replay, and Pixel 2638 * Encoding/Colorimetry Format indication. 2639 */ 2640 vsc->revision = 0x7; 2641 } else { 2642 /* 2643 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2644 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 2645 * Colorimetry Format indication. 2646 */ 2647 vsc->revision = 0x5; 2648 } 2649 2650 vsc->length = 0x13; 2651 2652 /* DP 1.4a spec, Table 2-120 */ 2653 switch (crtc_state->output_format) { 2654 case INTEL_OUTPUT_FORMAT_YCBCR444: 2655 vsc->pixelformat = DP_PIXELFORMAT_YUV444; 2656 break; 2657 case INTEL_OUTPUT_FORMAT_YCBCR420: 2658 vsc->pixelformat = DP_PIXELFORMAT_YUV420; 2659 break; 2660 case INTEL_OUTPUT_FORMAT_RGB: 2661 default: 2662 vsc->pixelformat = DP_PIXELFORMAT_RGB; 2663 } 2664 2665 switch (conn_state->colorspace) { 2666 case DRM_MODE_COLORIMETRY_BT709_YCC: 2667 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2668 break; 2669 case DRM_MODE_COLORIMETRY_XVYCC_601: 2670 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; 2671 break; 2672 case DRM_MODE_COLORIMETRY_XVYCC_709: 2673 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; 2674 break; 2675 case DRM_MODE_COLORIMETRY_SYCC_601: 2676 vsc->colorimetry = DP_COLORIMETRY_SYCC_601; 2677 break; 2678 case DRM_MODE_COLORIMETRY_OPYCC_601: 2679 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; 2680 break; 2681 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 2682 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; 2683 break; 2684 case DRM_MODE_COLORIMETRY_BT2020_RGB: 2685 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; 2686 break; 2687 case DRM_MODE_COLORIMETRY_BT2020_YCC: 2688 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; 2689 break; 2690 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 2691 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 2692 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; 2693 break; 2694 default: 2695 /* 2696 * RGB->YCBCR color conversion uses the BT.709 2697 * color space. 2698 */ 2699 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2700 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2701 else 2702 vsc->colorimetry = DP_COLORIMETRY_DEFAULT; 2703 break; 2704 } 2705 2706 vsc->bpc = crtc_state->pipe_bpp / 3; 2707 2708 /* only RGB pixelformat supports 6 bpc */ 2709 drm_WARN_ON(&dev_priv->drm, 2710 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); 2711 2712 /* all YCbCr are always limited range */ 2713 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; 2714 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; 2715 } 2716 2717 static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp, 2718 struct intel_crtc_state *crtc_state) 2719 { 2720 struct drm_dp_as_sdp *as_sdp = &crtc_state->infoframes.as_sdp; 2721 const struct drm_display_mode *adjusted_mode = 2722 &crtc_state->hw.adjusted_mode; 2723 2724 if (!crtc_state->vrr.enable || !intel_dp->as_sdp_supported) 2725 return; 2726 2727 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC); 2728 2729 /* Currently only DP_AS_SDP_AVT_FIXED_VTOTAL mode supported */ 2730 as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC; 2731 as_sdp->length = 0x9; 2732 as_sdp->duration_incr_ms = 0; 2733 as_sdp->duration_incr_ms = 0; 2734 2735 if (crtc_state->cmrr.enable) { 2736 as_sdp->mode = DP_AS_SDP_FAVT_TRR_REACHED; 2737 as_sdp->vtotal = adjusted_mode->vtotal; 2738 as_sdp->target_rr = drm_mode_vrefresh(adjusted_mode); 2739 as_sdp->target_rr_divider = true; 2740 } else { 2741 as_sdp->mode = DP_AS_SDP_AVT_FIXED_VTOTAL; 2742 as_sdp->vtotal = adjusted_mode->vtotal; 2743 as_sdp->target_rr = 0; 2744 } 2745 } 2746 2747 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, 2748 struct intel_crtc_state *crtc_state, 2749 const struct drm_connector_state *conn_state) 2750 { 2751 struct drm_dp_vsc_sdp *vsc; 2752 2753 if ((!intel_dp->colorimetry_support || 2754 !intel_dp_needs_vsc_sdp(crtc_state, conn_state)) && 2755 !crtc_state->has_psr) 2756 return; 2757 2758 vsc = &crtc_state->infoframes.vsc; 2759 2760 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 2761 vsc->sdp_type = DP_SDP_VSC; 2762 2763 /* Needs colorimetry */ 2764 if (intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 2765 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2766 vsc); 2767 } else if (crtc_state->has_panel_replay) { 2768 /* 2769 * [Panel Replay without colorimetry info] 2770 * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223 2771 * VSC SDP supporting 3D stereo + Panel Replay. 2772 */ 2773 vsc->revision = 0x6; 2774 vsc->length = 0x10; 2775 } else if (crtc_state->has_sel_update) { 2776 /* 2777 * [PSR2 without colorimetry] 2778 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 2779 * 3D stereo + PSR/PSR2 + Y-coordinate. 2780 */ 2781 vsc->revision = 0x4; 2782 vsc->length = 0xe; 2783 } else { 2784 /* 2785 * [PSR1] 2786 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2787 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or 2788 * higher). 2789 */ 2790 vsc->revision = 0x2; 2791 vsc->length = 0x8; 2792 } 2793 } 2794 2795 static void 2796 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 2797 struct intel_crtc_state *crtc_state, 2798 const struct drm_connector_state *conn_state) 2799 { 2800 int ret; 2801 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2802 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; 2803 2804 if (!conn_state->hdr_output_metadata) 2805 return; 2806 2807 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); 2808 2809 if (ret) { 2810 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); 2811 return; 2812 } 2813 2814 crtc_state->infoframes.enable |= 2815 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 2816 } 2817 2818 static bool can_enable_drrs(struct intel_connector *connector, 2819 const struct intel_crtc_state *pipe_config, 2820 const struct drm_display_mode *downclock_mode) 2821 { 2822 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2823 2824 if (pipe_config->vrr.enable) 2825 return false; 2826 2827 /* 2828 * DRRS and PSR can't be enable together, so giving preference to PSR 2829 * as it allows more power-savings by complete shutting down display, 2830 * so to guarantee this, intel_drrs_compute_config() must be called 2831 * after intel_psr_compute_config(). 2832 */ 2833 if (pipe_config->has_psr) 2834 return false; 2835 2836 /* FIXME missing FDI M2/N2 etc. */ 2837 if (pipe_config->has_pch_encoder) 2838 return false; 2839 2840 if (!intel_cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder)) 2841 return false; 2842 2843 return downclock_mode && 2844 intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS; 2845 } 2846 2847 static void 2848 intel_dp_drrs_compute_config(struct intel_connector *connector, 2849 struct intel_crtc_state *pipe_config, 2850 int link_bpp_x16) 2851 { 2852 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2853 const struct drm_display_mode *downclock_mode = 2854 intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode); 2855 int pixel_clock; 2856 2857 /* 2858 * FIXME all joined pipes share the same transcoder. 2859 * Need to account for that when updating M/N live. 2860 */ 2861 if (has_seamless_m_n(connector) && !pipe_config->joiner_pipes) 2862 pipe_config->update_m_n = true; 2863 2864 if (!can_enable_drrs(connector, pipe_config, downclock_mode)) { 2865 if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder)) 2866 intel_zero_m_n(&pipe_config->dp_m2_n2); 2867 return; 2868 } 2869 2870 if (IS_IRONLAKE(i915) || IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) 2871 pipe_config->msa_timing_delay = connector->panel.vbt.edp.drrs_msa_timing_delay; 2872 2873 pipe_config->has_drrs = true; 2874 2875 pixel_clock = downclock_mode->clock; 2876 if (pipe_config->splitter.enable) 2877 pixel_clock /= pipe_config->splitter.link_count; 2878 2879 intel_link_compute_m_n(link_bpp_x16, pipe_config->lane_count, pixel_clock, 2880 pipe_config->port_clock, 2881 intel_dp_bw_fec_overhead(pipe_config->fec_enable), 2882 &pipe_config->dp_m2_n2); 2883 2884 /* FIXME: abstract this better */ 2885 if (pipe_config->splitter.enable) 2886 pipe_config->dp_m2_n2.data_m *= pipe_config->splitter.link_count; 2887 } 2888 2889 static bool intel_dp_has_audio(struct intel_encoder *encoder, 2890 const struct drm_connector_state *conn_state) 2891 { 2892 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2893 const struct intel_digital_connector_state *intel_conn_state = 2894 to_intel_digital_connector_state(conn_state); 2895 struct intel_connector *connector = 2896 to_intel_connector(conn_state->connector); 2897 2898 if (!intel_dp_port_has_audio(i915, encoder->port)) 2899 return false; 2900 2901 if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 2902 return connector->base.display_info.has_audio; 2903 else 2904 return intel_conn_state->force_audio == HDMI_AUDIO_ON; 2905 } 2906 2907 static int 2908 intel_dp_compute_output_format(struct intel_encoder *encoder, 2909 struct intel_crtc_state *crtc_state, 2910 struct drm_connector_state *conn_state, 2911 bool respect_downstream_limits) 2912 { 2913 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2914 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2915 struct intel_connector *connector = intel_dp->attached_connector; 2916 const struct drm_display_info *info = &connector->base.display_info; 2917 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2918 bool ycbcr_420_only; 2919 int ret; 2920 2921 ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode); 2922 2923 if (ycbcr_420_only && !connector->base.ycbcr_420_allowed) { 2924 drm_dbg_kms(&i915->drm, 2925 "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n"); 2926 crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB; 2927 } else { 2928 crtc_state->sink_format = intel_dp_sink_format(connector, adjusted_mode); 2929 } 2930 2931 crtc_state->output_format = intel_dp_output_format(connector, crtc_state->sink_format); 2932 2933 ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state, 2934 respect_downstream_limits); 2935 if (ret) { 2936 if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 2937 !connector->base.ycbcr_420_allowed || 2938 !drm_mode_is_420_also(info, adjusted_mode)) 2939 return ret; 2940 2941 crtc_state->sink_format = INTEL_OUTPUT_FORMAT_YCBCR420; 2942 crtc_state->output_format = intel_dp_output_format(connector, 2943 crtc_state->sink_format); 2944 ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state, 2945 respect_downstream_limits); 2946 } 2947 2948 return ret; 2949 } 2950 2951 void 2952 intel_dp_audio_compute_config(struct intel_encoder *encoder, 2953 struct intel_crtc_state *pipe_config, 2954 struct drm_connector_state *conn_state) 2955 { 2956 pipe_config->has_audio = 2957 intel_dp_has_audio(encoder, conn_state) && 2958 intel_audio_compute_config(encoder, pipe_config, conn_state); 2959 2960 pipe_config->sdp_split_enable = pipe_config->has_audio && 2961 intel_dp_is_uhbr(pipe_config); 2962 } 2963 2964 static void intel_dp_queue_modeset_retry_work(struct intel_connector *connector) 2965 { 2966 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2967 2968 drm_connector_get(&connector->base); 2969 if (!queue_work(i915->unordered_wq, &connector->modeset_retry_work)) 2970 drm_connector_put(&connector->base); 2971 } 2972 2973 void 2974 intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state, 2975 struct intel_encoder *encoder, 2976 const struct intel_crtc_state *crtc_state) 2977 { 2978 struct intel_connector *connector; 2979 struct intel_digital_connector_state *conn_state; 2980 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2981 int i; 2982 2983 if (intel_dp->needs_modeset_retry) 2984 return; 2985 2986 intel_dp->needs_modeset_retry = true; 2987 2988 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { 2989 intel_dp_queue_modeset_retry_work(intel_dp->attached_connector); 2990 2991 return; 2992 } 2993 2994 for_each_new_intel_connector_in_state(state, connector, conn_state, i) { 2995 if (!conn_state->base.crtc) 2996 continue; 2997 2998 if (connector->mst_port == intel_dp) 2999 intel_dp_queue_modeset_retry_work(connector); 3000 } 3001 } 3002 3003 int 3004 intel_dp_compute_config(struct intel_encoder *encoder, 3005 struct intel_crtc_state *pipe_config, 3006 struct drm_connector_state *conn_state) 3007 { 3008 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3009 struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state); 3010 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 3011 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3012 const struct drm_display_mode *fixed_mode; 3013 struct intel_connector *connector = intel_dp->attached_connector; 3014 int ret = 0, link_bpp_x16; 3015 3016 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A) 3017 pipe_config->has_pch_encoder = true; 3018 3019 fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode); 3020 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 3021 ret = intel_panel_compute_config(connector, adjusted_mode); 3022 if (ret) 3023 return ret; 3024 } 3025 3026 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 3027 return -EINVAL; 3028 3029 if (!connector->base.interlace_allowed && 3030 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 3031 return -EINVAL; 3032 3033 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 3034 return -EINVAL; 3035 3036 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 3037 return -EINVAL; 3038 3039 /* 3040 * Try to respect downstream TMDS clock limits first, if 3041 * that fails assume the user might know something we don't. 3042 */ 3043 ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, true); 3044 if (ret) 3045 ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, false); 3046 if (ret) 3047 return ret; 3048 3049 if ((intel_dp_is_edp(intel_dp) && fixed_mode) || 3050 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 3051 ret = intel_panel_fitting(pipe_config, conn_state); 3052 if (ret) 3053 return ret; 3054 } 3055 3056 pipe_config->limited_color_range = 3057 intel_dp_limited_color_range(pipe_config, conn_state); 3058 3059 pipe_config->enhanced_framing = 3060 drm_dp_enhanced_frame_cap(intel_dp->dpcd); 3061 3062 if (pipe_config->dsc.compression_enable) 3063 link_bpp_x16 = pipe_config->dsc.compressed_bpp_x16; 3064 else 3065 link_bpp_x16 = fxp_q4_from_int(intel_dp_output_bpp(pipe_config->output_format, 3066 pipe_config->pipe_bpp)); 3067 3068 if (intel_dp->mso_link_count) { 3069 int n = intel_dp->mso_link_count; 3070 int overlap = intel_dp->mso_pixel_overlap; 3071 3072 pipe_config->splitter.enable = true; 3073 pipe_config->splitter.link_count = n; 3074 pipe_config->splitter.pixel_overlap = overlap; 3075 3076 drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n", 3077 n, overlap); 3078 3079 adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap; 3080 adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap; 3081 adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap; 3082 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap; 3083 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap; 3084 adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap; 3085 adjusted_mode->crtc_clock /= n; 3086 } 3087 3088 intel_dp_audio_compute_config(encoder, pipe_config, conn_state); 3089 3090 intel_link_compute_m_n(link_bpp_x16, 3091 pipe_config->lane_count, 3092 adjusted_mode->crtc_clock, 3093 pipe_config->port_clock, 3094 intel_dp_bw_fec_overhead(pipe_config->fec_enable), 3095 &pipe_config->dp_m_n); 3096 3097 /* FIXME: abstract this better */ 3098 if (pipe_config->splitter.enable) 3099 pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count; 3100 3101 if (!HAS_DDI(dev_priv)) 3102 g4x_dp_set_clock(encoder, pipe_config); 3103 3104 intel_vrr_compute_config(pipe_config, conn_state); 3105 intel_dp_compute_as_sdp(intel_dp, pipe_config); 3106 intel_psr_compute_config(intel_dp, pipe_config, conn_state); 3107 intel_alpm_lobf_compute_config(intel_dp, pipe_config, conn_state); 3108 intel_dp_drrs_compute_config(connector, pipe_config, link_bpp_x16); 3109 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 3110 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 3111 3112 return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector, 3113 pipe_config); 3114 } 3115 3116 void intel_dp_set_link_params(struct intel_dp *intel_dp, 3117 int link_rate, int lane_count) 3118 { 3119 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); 3120 intel_dp->link_trained = false; 3121 intel_dp->needs_modeset_retry = false; 3122 intel_dp->link_rate = link_rate; 3123 intel_dp->lane_count = lane_count; 3124 } 3125 3126 void intel_dp_reset_link_params(struct intel_dp *intel_dp) 3127 { 3128 intel_dp->link.max_lane_count = intel_dp_max_common_lane_count(intel_dp); 3129 intel_dp->link.max_rate = intel_dp_max_common_rate(intel_dp); 3130 intel_dp->link.mst_probed_lane_count = 0; 3131 intel_dp->link.mst_probed_rate = 0; 3132 intel_dp->link.retrain_disabled = false; 3133 intel_dp->link.seq_train_failures = 0; 3134 } 3135 3136 /* Enable backlight PWM and backlight PP control. */ 3137 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 3138 const struct drm_connector_state *conn_state) 3139 { 3140 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 3141 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3142 3143 if (!intel_dp_is_edp(intel_dp)) 3144 return; 3145 3146 drm_dbg_kms(&i915->drm, "\n"); 3147 3148 intel_backlight_enable(crtc_state, conn_state); 3149 intel_pps_backlight_on(intel_dp); 3150 } 3151 3152 /* Disable backlight PP control and backlight PWM. */ 3153 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 3154 { 3155 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 3156 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3157 3158 if (!intel_dp_is_edp(intel_dp)) 3159 return; 3160 3161 drm_dbg_kms(&i915->drm, "\n"); 3162 3163 intel_pps_backlight_off(intel_dp); 3164 intel_backlight_disable(old_conn_state); 3165 } 3166 3167 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 3168 { 3169 /* 3170 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 3171 * be capable of signalling downstream hpd with a long pulse. 3172 * Whether or not that means D3 is safe to use is not clear, 3173 * but let's assume so until proven otherwise. 3174 * 3175 * FIXME should really check all downstream ports... 3176 */ 3177 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 3178 drm_dp_is_branch(intel_dp->dpcd) && 3179 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 3180 } 3181 3182 static int 3183 write_dsc_decompression_flag(struct drm_dp_aux *aux, u8 flag, bool set) 3184 { 3185 int err; 3186 u8 val; 3187 3188 err = drm_dp_dpcd_readb(aux, DP_DSC_ENABLE, &val); 3189 if (err < 0) 3190 return err; 3191 3192 if (set) 3193 val |= flag; 3194 else 3195 val &= ~flag; 3196 3197 return drm_dp_dpcd_writeb(aux, DP_DSC_ENABLE, val); 3198 } 3199 3200 static void 3201 intel_dp_sink_set_dsc_decompression(struct intel_connector *connector, 3202 bool enable) 3203 { 3204 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3205 3206 if (write_dsc_decompression_flag(connector->dp.dsc_decompression_aux, 3207 DP_DECOMPRESSION_EN, enable) < 0) 3208 drm_dbg_kms(&i915->drm, 3209 "Failed to %s sink decompression state\n", 3210 str_enable_disable(enable)); 3211 } 3212 3213 static void 3214 intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector, 3215 bool enable) 3216 { 3217 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3218 struct drm_dp_aux *aux = connector->port ? 3219 connector->port->passthrough_aux : NULL; 3220 3221 if (!aux) 3222 return; 3223 3224 if (write_dsc_decompression_flag(aux, 3225 DP_DSC_PASSTHROUGH_EN, enable) < 0) 3226 drm_dbg_kms(&i915->drm, 3227 "Failed to %s sink compression passthrough state\n", 3228 str_enable_disable(enable)); 3229 } 3230 3231 static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state, 3232 const struct intel_connector *connector, 3233 bool for_get_ref) 3234 { 3235 struct drm_i915_private *i915 = to_i915(state->base.dev); 3236 struct drm_connector *_connector_iter; 3237 struct drm_connector_state *old_conn_state; 3238 struct drm_connector_state *new_conn_state; 3239 int ref_count = 0; 3240 int i; 3241 3242 /* 3243 * On SST the decompression AUX device won't be shared, each connector 3244 * uses for this its own AUX targeting the sink device. 3245 */ 3246 if (!connector->mst_port) 3247 return connector->dp.dsc_decompression_enabled ? 1 : 0; 3248 3249 for_each_oldnew_connector_in_state(&state->base, _connector_iter, 3250 old_conn_state, new_conn_state, i) { 3251 const struct intel_connector * 3252 connector_iter = to_intel_connector(_connector_iter); 3253 3254 if (connector_iter->mst_port != connector->mst_port) 3255 continue; 3256 3257 if (!connector_iter->dp.dsc_decompression_enabled) 3258 continue; 3259 3260 drm_WARN_ON(&i915->drm, 3261 (for_get_ref && !new_conn_state->crtc) || 3262 (!for_get_ref && !old_conn_state->crtc)); 3263 3264 if (connector_iter->dp.dsc_decompression_aux == 3265 connector->dp.dsc_decompression_aux) 3266 ref_count++; 3267 } 3268 3269 return ref_count; 3270 } 3271 3272 static bool intel_dp_dsc_aux_get_ref(struct intel_atomic_state *state, 3273 struct intel_connector *connector) 3274 { 3275 bool ret = intel_dp_dsc_aux_ref_count(state, connector, true) == 0; 3276 3277 connector->dp.dsc_decompression_enabled = true; 3278 3279 return ret; 3280 } 3281 3282 static bool intel_dp_dsc_aux_put_ref(struct intel_atomic_state *state, 3283 struct intel_connector *connector) 3284 { 3285 connector->dp.dsc_decompression_enabled = false; 3286 3287 return intel_dp_dsc_aux_ref_count(state, connector, false) == 0; 3288 } 3289 3290 /** 3291 * intel_dp_sink_enable_decompression - Enable DSC decompression in sink/last branch device 3292 * @state: atomic state 3293 * @connector: connector to enable the decompression for 3294 * @new_crtc_state: new state for the CRTC driving @connector 3295 * 3296 * Enable the DSC decompression if required in the %DP_DSC_ENABLE DPCD 3297 * register of the appropriate sink/branch device. On SST this is always the 3298 * sink device, whereas on MST based on each device's DSC capabilities it's 3299 * either the last branch device (enabling decompression in it) or both the 3300 * last branch device (enabling passthrough in it) and the sink device 3301 * (enabling decompression in it). 3302 */ 3303 void intel_dp_sink_enable_decompression(struct intel_atomic_state *state, 3304 struct intel_connector *connector, 3305 const struct intel_crtc_state *new_crtc_state) 3306 { 3307 struct drm_i915_private *i915 = to_i915(state->base.dev); 3308 3309 if (!new_crtc_state->dsc.compression_enable) 3310 return; 3311 3312 if (drm_WARN_ON(&i915->drm, 3313 !connector->dp.dsc_decompression_aux || 3314 connector->dp.dsc_decompression_enabled)) 3315 return; 3316 3317 if (!intel_dp_dsc_aux_get_ref(state, connector)) 3318 return; 3319 3320 intel_dp_sink_set_dsc_passthrough(connector, true); 3321 intel_dp_sink_set_dsc_decompression(connector, true); 3322 } 3323 3324 /** 3325 * intel_dp_sink_disable_decompression - Disable DSC decompression in sink/last branch device 3326 * @state: atomic state 3327 * @connector: connector to disable the decompression for 3328 * @old_crtc_state: old state for the CRTC driving @connector 3329 * 3330 * Disable the DSC decompression if required in the %DP_DSC_ENABLE DPCD 3331 * register of the appropriate sink/branch device, corresponding to the 3332 * sequence in intel_dp_sink_enable_decompression(). 3333 */ 3334 void intel_dp_sink_disable_decompression(struct intel_atomic_state *state, 3335 struct intel_connector *connector, 3336 const struct intel_crtc_state *old_crtc_state) 3337 { 3338 struct drm_i915_private *i915 = to_i915(state->base.dev); 3339 3340 if (!old_crtc_state->dsc.compression_enable) 3341 return; 3342 3343 if (drm_WARN_ON(&i915->drm, 3344 !connector->dp.dsc_decompression_aux || 3345 !connector->dp.dsc_decompression_enabled)) 3346 return; 3347 3348 if (!intel_dp_dsc_aux_put_ref(state, connector)) 3349 return; 3350 3351 intel_dp_sink_set_dsc_decompression(connector, false); 3352 intel_dp_sink_set_dsc_passthrough(connector, false); 3353 } 3354 3355 static void 3356 intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) 3357 { 3358 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3359 u8 oui[] = { 0x00, 0xaa, 0x01 }; 3360 u8 buf[3] = {}; 3361 3362 /* 3363 * During driver init, we want to be careful and avoid changing the source OUI if it's 3364 * already set to what we want, so as to avoid clearing any state by accident 3365 */ 3366 if (careful) { 3367 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0) 3368 drm_err(&i915->drm, "Failed to read source OUI\n"); 3369 3370 if (memcmp(oui, buf, sizeof(oui)) == 0) 3371 return; 3372 } 3373 3374 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) 3375 drm_err(&i915->drm, "Failed to write source OUI\n"); 3376 3377 intel_dp->last_oui_write = jiffies; 3378 } 3379 3380 void intel_dp_wait_source_oui(struct intel_dp *intel_dp) 3381 { 3382 struct intel_connector *connector = intel_dp->attached_connector; 3383 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3384 3385 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n", 3386 connector->base.base.id, connector->base.name, 3387 connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout); 3388 3389 wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 3390 connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout); 3391 } 3392 3393 /* If the device supports it, try to set the power state appropriately */ 3394 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) 3395 { 3396 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3397 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3398 int ret, i; 3399 3400 /* Should have a valid DPCD by this point */ 3401 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3402 return; 3403 3404 if (mode != DP_SET_POWER_D0) { 3405 if (downstream_hpd_needs_d0(intel_dp)) 3406 return; 3407 3408 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 3409 } else { 3410 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 3411 3412 lspcon_resume(dp_to_dig_port(intel_dp)); 3413 3414 /* Write the source OUI as early as possible */ 3415 if (intel_dp_is_edp(intel_dp)) 3416 intel_edp_init_source_oui(intel_dp, false); 3417 3418 /* 3419 * When turning on, we need to retry for 1ms to give the sink 3420 * time to wake up. 3421 */ 3422 for (i = 0; i < 3; i++) { 3423 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 3424 if (ret == 1) 3425 break; 3426 msleep(1); 3427 } 3428 3429 if (ret == 1 && lspcon->active) 3430 lspcon_wait_pcon_mode(lspcon); 3431 } 3432 3433 if (ret != 1) 3434 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n", 3435 encoder->base.base.id, encoder->base.name, 3436 mode == DP_SET_POWER_D0 ? "D0" : "D3"); 3437 } 3438 3439 static bool 3440 intel_dp_get_dpcd(struct intel_dp *intel_dp); 3441 3442 /** 3443 * intel_dp_sync_state - sync the encoder state during init/resume 3444 * @encoder: intel encoder to sync 3445 * @crtc_state: state for the CRTC connected to the encoder 3446 * 3447 * Sync any state stored in the encoder wrt. HW state during driver init 3448 * and system resume. 3449 */ 3450 void intel_dp_sync_state(struct intel_encoder *encoder, 3451 const struct intel_crtc_state *crtc_state) 3452 { 3453 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3454 bool dpcd_updated = false; 3455 3456 /* 3457 * Don't clobber DPCD if it's been already read out during output 3458 * setup (eDP) or detect. 3459 */ 3460 if (crtc_state && intel_dp->dpcd[DP_DPCD_REV] == 0) { 3461 intel_dp_get_dpcd(intel_dp); 3462 dpcd_updated = true; 3463 } 3464 3465 intel_dp_tunnel_resume(intel_dp, crtc_state, dpcd_updated); 3466 3467 if (crtc_state) { 3468 intel_dp_reset_link_params(intel_dp); 3469 intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count); 3470 intel_dp->link_trained = true; 3471 } 3472 } 3473 3474 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, 3475 struct intel_crtc_state *crtc_state) 3476 { 3477 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3478 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3479 bool fastset = true; 3480 3481 /* 3482 * If BIOS has set an unsupported or non-standard link rate for some 3483 * reason force an encoder recompute and full modeset. 3484 */ 3485 if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates, 3486 crtc_state->port_clock) < 0) { 3487 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to unsupported link rate\n", 3488 encoder->base.base.id, encoder->base.name); 3489 crtc_state->uapi.connectors_changed = true; 3490 fastset = false; 3491 } 3492 3493 /* 3494 * FIXME hack to force full modeset when DSC is being used. 3495 * 3496 * As long as we do not have full state readout and config comparison 3497 * of crtc_state->dsc, we have no way to ensure reliable fastset. 3498 * Remove once we have readout for DSC. 3499 */ 3500 if (crtc_state->dsc.compression_enable) { 3501 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to DSC being enabled\n", 3502 encoder->base.base.id, encoder->base.name); 3503 crtc_state->uapi.mode_changed = true; 3504 fastset = false; 3505 } 3506 3507 if (CAN_PANEL_REPLAY(intel_dp)) { 3508 drm_dbg_kms(&i915->drm, 3509 "[ENCODER:%d:%s] Forcing full modeset to compute panel replay state\n", 3510 encoder->base.base.id, encoder->base.name); 3511 crtc_state->uapi.mode_changed = true; 3512 fastset = false; 3513 } 3514 3515 return fastset; 3516 } 3517 3518 static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp) 3519 { 3520 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3521 3522 /* Clear the cached register set to avoid using stale values */ 3523 3524 memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd)); 3525 3526 if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER, 3527 intel_dp->pcon_dsc_dpcd, 3528 sizeof(intel_dp->pcon_dsc_dpcd)) < 0) 3529 drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n", 3530 DP_PCON_DSC_ENCODER); 3531 3532 drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n", 3533 (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd); 3534 } 3535 3536 static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask) 3537 { 3538 static const int bw_gbps[] = {9, 18, 24, 32, 40, 48}; 3539 int i; 3540 3541 for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) { 3542 if (frl_bw_mask & (1 << i)) 3543 return bw_gbps[i]; 3544 } 3545 return 0; 3546 } 3547 3548 static int intel_dp_pcon_set_frl_mask(int max_frl) 3549 { 3550 switch (max_frl) { 3551 case 48: 3552 return DP_PCON_FRL_BW_MASK_48GBPS; 3553 case 40: 3554 return DP_PCON_FRL_BW_MASK_40GBPS; 3555 case 32: 3556 return DP_PCON_FRL_BW_MASK_32GBPS; 3557 case 24: 3558 return DP_PCON_FRL_BW_MASK_24GBPS; 3559 case 18: 3560 return DP_PCON_FRL_BW_MASK_18GBPS; 3561 case 9: 3562 return DP_PCON_FRL_BW_MASK_9GBPS; 3563 } 3564 3565 return 0; 3566 } 3567 3568 static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp) 3569 { 3570 struct intel_connector *intel_connector = intel_dp->attached_connector; 3571 struct drm_connector *connector = &intel_connector->base; 3572 int max_frl_rate; 3573 int max_lanes, rate_per_lane; 3574 int max_dsc_lanes, dsc_rate_per_lane; 3575 3576 max_lanes = connector->display_info.hdmi.max_lanes; 3577 rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane; 3578 max_frl_rate = max_lanes * rate_per_lane; 3579 3580 if (connector->display_info.hdmi.dsc_cap.v_1p2) { 3581 max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes; 3582 dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane; 3583 if (max_dsc_lanes && dsc_rate_per_lane) 3584 max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane); 3585 } 3586 3587 return max_frl_rate; 3588 } 3589 3590 static bool 3591 intel_dp_pcon_is_frl_trained(struct intel_dp *intel_dp, 3592 u8 max_frl_bw_mask, u8 *frl_trained_mask) 3593 { 3594 if (drm_dp_pcon_hdmi_link_active(&intel_dp->aux) && 3595 drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, frl_trained_mask) == DP_PCON_HDMI_MODE_FRL && 3596 *frl_trained_mask >= max_frl_bw_mask) 3597 return true; 3598 3599 return false; 3600 } 3601 3602 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) 3603 { 3604 #define TIMEOUT_FRL_READY_MS 500 3605 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000 3606 3607 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3608 int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret; 3609 u8 max_frl_bw_mask = 0, frl_trained_mask; 3610 bool is_active; 3611 3612 max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 3613 drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw); 3614 3615 max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp); 3616 drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw); 3617 3618 max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw); 3619 3620 if (max_frl_bw <= 0) 3621 return -EINVAL; 3622 3623 max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); 3624 drm_dbg(&i915->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask); 3625 3626 if (intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask)) 3627 goto frl_trained; 3628 3629 ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false); 3630 if (ret < 0) 3631 return ret; 3632 /* Wait for PCON to be FRL Ready */ 3633 wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS); 3634 3635 if (!is_active) 3636 return -ETIMEDOUT; 3637 3638 ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, 3639 DP_PCON_ENABLE_SEQUENTIAL_LINK); 3640 if (ret < 0) 3641 return ret; 3642 ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, 3643 DP_PCON_FRL_LINK_TRAIN_NORMAL); 3644 if (ret < 0) 3645 return ret; 3646 ret = drm_dp_pcon_frl_enable(&intel_dp->aux); 3647 if (ret < 0) 3648 return ret; 3649 /* 3650 * Wait for FRL to be completed 3651 * Check if the HDMI Link is up and active. 3652 */ 3653 wait_for(is_active = 3654 intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask), 3655 TIMEOUT_HDMI_LINK_ACTIVE_MS); 3656 3657 if (!is_active) 3658 return -ETIMEDOUT; 3659 3660 frl_trained: 3661 drm_dbg(&i915->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask); 3662 intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask); 3663 intel_dp->frl.is_trained = true; 3664 drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps); 3665 3666 return 0; 3667 } 3668 3669 static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp) 3670 { 3671 if (drm_dp_is_branch(intel_dp->dpcd) && 3672 intel_dp_has_hdmi_sink(intel_dp) && 3673 intel_dp_hdmi_sink_max_frl(intel_dp) > 0) 3674 return true; 3675 3676 return false; 3677 } 3678 3679 static 3680 int intel_dp_pcon_set_tmds_mode(struct intel_dp *intel_dp) 3681 { 3682 int ret; 3683 u8 buf = 0; 3684 3685 /* Set PCON source control mode */ 3686 buf |= DP_PCON_ENABLE_SOURCE_CTL_MODE; 3687 3688 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); 3689 if (ret < 0) 3690 return ret; 3691 3692 /* Set HDMI LINK ENABLE */ 3693 buf |= DP_PCON_ENABLE_HDMI_LINK; 3694 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); 3695 if (ret < 0) 3696 return ret; 3697 3698 return 0; 3699 } 3700 3701 void intel_dp_check_frl_training(struct intel_dp *intel_dp) 3702 { 3703 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3704 3705 /* 3706 * Always go for FRL training if: 3707 * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7) 3708 * -sink is HDMI2.1 3709 */ 3710 if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) || 3711 !intel_dp_is_hdmi_2_1_sink(intel_dp) || 3712 intel_dp->frl.is_trained) 3713 return; 3714 3715 if (intel_dp_pcon_start_frl_training(intel_dp) < 0) { 3716 int ret, mode; 3717 3718 drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n"); 3719 ret = intel_dp_pcon_set_tmds_mode(intel_dp); 3720 mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL); 3721 3722 if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS) 3723 drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n"); 3724 } else { 3725 drm_dbg(&dev_priv->drm, "FRL training Completed\n"); 3726 } 3727 } 3728 3729 static int 3730 intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state) 3731 { 3732 int vactive = crtc_state->hw.adjusted_mode.vdisplay; 3733 3734 return intel_hdmi_dsc_get_slice_height(vactive); 3735 } 3736 3737 static int 3738 intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp, 3739 const struct intel_crtc_state *crtc_state) 3740 { 3741 struct intel_connector *intel_connector = intel_dp->attached_connector; 3742 struct drm_connector *connector = &intel_connector->base; 3743 int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice; 3744 int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices; 3745 int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd); 3746 int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd); 3747 3748 return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices, 3749 pcon_max_slice_width, 3750 hdmi_max_slices, hdmi_throughput); 3751 } 3752 3753 static int 3754 intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp, 3755 const struct intel_crtc_state *crtc_state, 3756 int num_slices, int slice_width) 3757 { 3758 struct intel_connector *intel_connector = intel_dp->attached_connector; 3759 struct drm_connector *connector = &intel_connector->base; 3760 int output_format = crtc_state->output_format; 3761 bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp; 3762 int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd); 3763 int hdmi_max_chunk_bytes = 3764 connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024; 3765 3766 return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width, 3767 num_slices, output_format, hdmi_all_bpp, 3768 hdmi_max_chunk_bytes); 3769 } 3770 3771 void 3772 intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, 3773 const struct intel_crtc_state *crtc_state) 3774 { 3775 u8 pps_param[6]; 3776 int slice_height; 3777 int slice_width; 3778 int num_slices; 3779 int bits_per_pixel; 3780 int ret; 3781 struct intel_connector *intel_connector = intel_dp->attached_connector; 3782 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3783 struct drm_connector *connector; 3784 bool hdmi_is_dsc_1_2; 3785 3786 if (!intel_dp_is_hdmi_2_1_sink(intel_dp)) 3787 return; 3788 3789 if (!intel_connector) 3790 return; 3791 connector = &intel_connector->base; 3792 hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2; 3793 3794 if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) || 3795 !hdmi_is_dsc_1_2) 3796 return; 3797 3798 slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state); 3799 if (!slice_height) 3800 return; 3801 3802 num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state); 3803 if (!num_slices) 3804 return; 3805 3806 slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, 3807 num_slices); 3808 3809 bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state, 3810 num_slices, slice_width); 3811 if (!bits_per_pixel) 3812 return; 3813 3814 pps_param[0] = slice_height & 0xFF; 3815 pps_param[1] = slice_height >> 8; 3816 pps_param[2] = slice_width & 0xFF; 3817 pps_param[3] = slice_width >> 8; 3818 pps_param[4] = bits_per_pixel & 0xFF; 3819 pps_param[5] = (bits_per_pixel >> 8) & 0x3; 3820 3821 ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param); 3822 if (ret < 0) 3823 drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n"); 3824 } 3825 3826 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, 3827 const struct intel_crtc_state *crtc_state) 3828 { 3829 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3830 bool ycbcr444_to_420 = false; 3831 bool rgb_to_ycbcr = false; 3832 u8 tmp; 3833 3834 if (intel_dp->dpcd[DP_DPCD_REV] < 0x13) 3835 return; 3836 3837 if (!drm_dp_is_branch(intel_dp->dpcd)) 3838 return; 3839 3840 tmp = intel_dp_has_hdmi_sink(intel_dp) ? DP_HDMI_DVI_OUTPUT_CONFIG : 0; 3841 3842 if (drm_dp_dpcd_writeb(&intel_dp->aux, 3843 DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1) 3844 drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n", 3845 str_enable_disable(intel_dp_has_hdmi_sink(intel_dp))); 3846 3847 if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 3848 switch (crtc_state->output_format) { 3849 case INTEL_OUTPUT_FORMAT_YCBCR420: 3850 break; 3851 case INTEL_OUTPUT_FORMAT_YCBCR444: 3852 ycbcr444_to_420 = true; 3853 break; 3854 case INTEL_OUTPUT_FORMAT_RGB: 3855 rgb_to_ycbcr = true; 3856 ycbcr444_to_420 = true; 3857 break; 3858 default: 3859 MISSING_CASE(crtc_state->output_format); 3860 break; 3861 } 3862 } else if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR444) { 3863 switch (crtc_state->output_format) { 3864 case INTEL_OUTPUT_FORMAT_YCBCR444: 3865 break; 3866 case INTEL_OUTPUT_FORMAT_RGB: 3867 rgb_to_ycbcr = true; 3868 break; 3869 default: 3870 MISSING_CASE(crtc_state->output_format); 3871 break; 3872 } 3873 } 3874 3875 tmp = ycbcr444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0; 3876 3877 if (drm_dp_dpcd_writeb(&intel_dp->aux, 3878 DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) 3879 drm_dbg_kms(&i915->drm, 3880 "Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n", 3881 str_enable_disable(intel_dp->dfp.ycbcr_444_to_420)); 3882 3883 tmp = rgb_to_ycbcr ? DP_CONVERSION_BT709_RGB_YCBCR_ENABLE : 0; 3884 3885 if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0) 3886 drm_dbg_kms(&i915->drm, 3887 "Failed to %s protocol converter RGB->YCbCr conversion mode\n", 3888 str_enable_disable(tmp)); 3889 } 3890 3891 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 3892 { 3893 u8 dprx = 0; 3894 3895 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 3896 &dprx) != 1) 3897 return false; 3898 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 3899 } 3900 3901 static void intel_dp_read_dsc_dpcd(struct drm_dp_aux *aux, 3902 u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) 3903 { 3904 if (drm_dp_dpcd_read(aux, DP_DSC_SUPPORT, dsc_dpcd, 3905 DP_DSC_RECEIVER_CAP_SIZE) < 0) { 3906 drm_err(aux->drm_dev, 3907 "Failed to read DPCD register 0x%x\n", 3908 DP_DSC_SUPPORT); 3909 return; 3910 } 3911 3912 drm_dbg_kms(aux->drm_dev, "DSC DPCD: %*ph\n", 3913 DP_DSC_RECEIVER_CAP_SIZE, 3914 dsc_dpcd); 3915 } 3916 3917 void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector) 3918 { 3919 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3920 3921 /* 3922 * Clear the cached register set to avoid using stale values 3923 * for the sinks that do not support DSC. 3924 */ 3925 memset(connector->dp.dsc_dpcd, 0, sizeof(connector->dp.dsc_dpcd)); 3926 3927 /* Clear fec_capable to avoid using stale values */ 3928 connector->dp.fec_capability = 0; 3929 3930 if (dpcd_rev < DP_DPCD_REV_14) 3931 return; 3932 3933 intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux, 3934 connector->dp.dsc_dpcd); 3935 3936 if (drm_dp_dpcd_readb(connector->dp.dsc_decompression_aux, DP_FEC_CAPABILITY, 3937 &connector->dp.fec_capability) < 0) { 3938 drm_err(&i915->drm, "Failed to read FEC DPCD register\n"); 3939 return; 3940 } 3941 3942 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", 3943 connector->dp.fec_capability); 3944 } 3945 3946 static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *connector) 3947 { 3948 if (edp_dpcd_rev < DP_EDP_14) 3949 return; 3950 3951 intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux, connector->dp.dsc_dpcd); 3952 } 3953 3954 static void intel_edp_mso_mode_fixup(struct intel_connector *connector, 3955 struct drm_display_mode *mode) 3956 { 3957 struct intel_dp *intel_dp = intel_attached_dp(connector); 3958 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3959 int n = intel_dp->mso_link_count; 3960 int overlap = intel_dp->mso_pixel_overlap; 3961 3962 if (!mode || !n) 3963 return; 3964 3965 mode->hdisplay = (mode->hdisplay - overlap) * n; 3966 mode->hsync_start = (mode->hsync_start - overlap) * n; 3967 mode->hsync_end = (mode->hsync_end - overlap) * n; 3968 mode->htotal = (mode->htotal - overlap) * n; 3969 mode->clock *= n; 3970 3971 drm_mode_set_name(mode); 3972 3973 drm_dbg_kms(&i915->drm, 3974 "[CONNECTOR:%d:%s] using generated MSO mode: " DRM_MODE_FMT "\n", 3975 connector->base.base.id, connector->base.name, 3976 DRM_MODE_ARG(mode)); 3977 } 3978 3979 void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp) 3980 { 3981 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3982 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3983 struct intel_connector *connector = intel_dp->attached_connector; 3984 3985 if (connector->panel.vbt.edp.bpp && pipe_bpp > connector->panel.vbt.edp.bpp) { 3986 /* 3987 * This is a big fat ugly hack. 3988 * 3989 * Some machines in UEFI boot mode provide us a VBT that has 18 3990 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 3991 * unknown we fail to light up. Yet the same BIOS boots up with 3992 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 3993 * max, not what it tells us to use. 3994 * 3995 * Note: This will still be broken if the eDP panel is not lit 3996 * up by the BIOS, and thus we can't get the mode at module 3997 * load. 3998 */ 3999 drm_dbg_kms(&dev_priv->drm, 4000 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 4001 pipe_bpp, connector->panel.vbt.edp.bpp); 4002 connector->panel.vbt.edp.bpp = pipe_bpp; 4003 } 4004 } 4005 4006 static void intel_edp_mso_init(struct intel_dp *intel_dp) 4007 { 4008 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4009 struct intel_connector *connector = intel_dp->attached_connector; 4010 struct drm_display_info *info = &connector->base.display_info; 4011 u8 mso; 4012 4013 if (intel_dp->edp_dpcd[0] < DP_EDP_14) 4014 return; 4015 4016 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) { 4017 drm_err(&i915->drm, "Failed to read MSO cap\n"); 4018 return; 4019 } 4020 4021 /* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */ 4022 mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK; 4023 if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) { 4024 drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso); 4025 mso = 0; 4026 } 4027 4028 if (mso) { 4029 drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration, pixel overlap %u\n", 4030 mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso, 4031 info->mso_pixel_overlap); 4032 if (!HAS_MSO(i915)) { 4033 drm_err(&i915->drm, "No source MSO support, disabling\n"); 4034 mso = 0; 4035 } 4036 } 4037 4038 intel_dp->mso_link_count = mso; 4039 intel_dp->mso_pixel_overlap = mso ? info->mso_pixel_overlap : 0; 4040 } 4041 4042 static bool 4043 intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector) 4044 { 4045 struct drm_i915_private *dev_priv = 4046 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 4047 4048 /* this function is meant to be called only once */ 4049 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 4050 4051 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) 4052 return false; 4053 4054 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4055 drm_dp_is_branch(intel_dp->dpcd)); 4056 4057 /* 4058 * Read the eDP display control registers. 4059 * 4060 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 4061 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 4062 * set, but require eDP 1.4+ detection (e.g. for supported link rates 4063 * method). The display control registers should read zero if they're 4064 * not supported anyway. 4065 */ 4066 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 4067 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 4068 sizeof(intel_dp->edp_dpcd)) { 4069 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 4070 (int)sizeof(intel_dp->edp_dpcd), 4071 intel_dp->edp_dpcd); 4072 4073 intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14; 4074 } 4075 4076 /* 4077 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 4078 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 4079 */ 4080 intel_psr_init_dpcd(intel_dp); 4081 4082 /* Clear the default sink rates */ 4083 intel_dp->num_sink_rates = 0; 4084 4085 /* Read the eDP 1.4+ supported link rates. */ 4086 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4087 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 4088 int i; 4089 4090 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 4091 sink_rates, sizeof(sink_rates)); 4092 4093 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 4094 int val = le16_to_cpu(sink_rates[i]); 4095 4096 if (val == 0) 4097 break; 4098 4099 /* Value read multiplied by 200kHz gives the per-lane 4100 * link rate in kHz. The source rates are, however, 4101 * stored in terms of LS_Clk kHz. The full conversion 4102 * back to symbols is 4103 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 4104 */ 4105 intel_dp->sink_rates[i] = (val * 200) / 10; 4106 } 4107 intel_dp->num_sink_rates = i; 4108 } 4109 4110 /* 4111 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 4112 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 4113 */ 4114 if (intel_dp->num_sink_rates) 4115 intel_dp->use_rate_select = true; 4116 else 4117 intel_dp_set_sink_rates(intel_dp); 4118 intel_dp_set_max_sink_lane_count(intel_dp); 4119 4120 /* Read the eDP DSC DPCD registers */ 4121 if (HAS_DSC(dev_priv)) 4122 intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0], 4123 connector); 4124 4125 /* 4126 * If needed, program our source OUI so we can make various Intel-specific AUX services 4127 * available (such as HDR backlight controls) 4128 */ 4129 intel_edp_init_source_oui(intel_dp, true); 4130 4131 return true; 4132 } 4133 4134 static bool 4135 intel_dp_has_sink_count(struct intel_dp *intel_dp) 4136 { 4137 if (!intel_dp->attached_connector) 4138 return false; 4139 4140 return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base, 4141 intel_dp->dpcd, 4142 &intel_dp->desc); 4143 } 4144 4145 void intel_dp_update_sink_caps(struct intel_dp *intel_dp) 4146 { 4147 intel_dp_set_sink_rates(intel_dp); 4148 intel_dp_set_max_sink_lane_count(intel_dp); 4149 intel_dp_set_common_rates(intel_dp); 4150 } 4151 4152 static bool 4153 intel_dp_get_dpcd(struct intel_dp *intel_dp) 4154 { 4155 int ret; 4156 4157 if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0) 4158 return false; 4159 4160 /* 4161 * Don't clobber cached eDP rates. Also skip re-reading 4162 * the OUI/ID since we know it won't change. 4163 */ 4164 if (!intel_dp_is_edp(intel_dp)) { 4165 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4166 drm_dp_is_branch(intel_dp->dpcd)); 4167 4168 intel_dp_update_sink_caps(intel_dp); 4169 } 4170 4171 if (intel_dp_has_sink_count(intel_dp)) { 4172 ret = drm_dp_read_sink_count(&intel_dp->aux); 4173 if (ret < 0) 4174 return false; 4175 4176 /* 4177 * Sink count can change between short pulse hpd hence 4178 * a member variable in intel_dp will track any changes 4179 * between short pulse interrupts. 4180 */ 4181 intel_dp->sink_count = ret; 4182 4183 /* 4184 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 4185 * a dongle is present but no display. Unless we require to know 4186 * if a dongle is present or not, we don't need to update 4187 * downstream port information. So, an early return here saves 4188 * time from performing other operations which are not required. 4189 */ 4190 if (!intel_dp->sink_count) 4191 return false; 4192 } 4193 4194 return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd, 4195 intel_dp->downstream_ports) == 0; 4196 } 4197 4198 static const char *intel_dp_mst_mode_str(enum drm_dp_mst_mode mst_mode) 4199 { 4200 if (mst_mode == DRM_DP_MST) 4201 return "MST"; 4202 else if (mst_mode == DRM_DP_SST_SIDEBAND_MSG) 4203 return "SST w/ sideband messaging"; 4204 else 4205 return "SST"; 4206 } 4207 4208 static enum drm_dp_mst_mode 4209 intel_dp_mst_mode_choose(struct intel_dp *intel_dp, 4210 enum drm_dp_mst_mode sink_mst_mode) 4211 { 4212 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4213 4214 if (!i915->display.params.enable_dp_mst) 4215 return DRM_DP_SST; 4216 4217 if (!intel_dp_mst_source_support(intel_dp)) 4218 return DRM_DP_SST; 4219 4220 if (sink_mst_mode == DRM_DP_SST_SIDEBAND_MSG && 4221 !(intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B)) 4222 return DRM_DP_SST; 4223 4224 return sink_mst_mode; 4225 } 4226 4227 static enum drm_dp_mst_mode 4228 intel_dp_mst_detect(struct intel_dp *intel_dp) 4229 { 4230 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4231 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4232 enum drm_dp_mst_mode sink_mst_mode; 4233 enum drm_dp_mst_mode mst_detect; 4234 4235 sink_mst_mode = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 4236 4237 mst_detect = intel_dp_mst_mode_choose(intel_dp, sink_mst_mode); 4238 4239 drm_dbg_kms(&i915->drm, 4240 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s -> enable: %s\n", 4241 encoder->base.base.id, encoder->base.name, 4242 str_yes_no(intel_dp_mst_source_support(intel_dp)), 4243 intel_dp_mst_mode_str(sink_mst_mode), 4244 str_yes_no(i915->display.params.enable_dp_mst), 4245 intel_dp_mst_mode_str(mst_detect)); 4246 4247 return mst_detect; 4248 } 4249 4250 static void 4251 intel_dp_mst_configure(struct intel_dp *intel_dp) 4252 { 4253 if (!intel_dp_mst_source_support(intel_dp)) 4254 return; 4255 4256 intel_dp->is_mst = intel_dp->mst_detect != DRM_DP_SST; 4257 4258 if (intel_dp->is_mst) 4259 intel_dp_mst_prepare_probe(intel_dp); 4260 4261 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); 4262 4263 /* Avoid stale info on the next detect cycle. */ 4264 intel_dp->mst_detect = DRM_DP_SST; 4265 } 4266 4267 static void 4268 intel_dp_mst_disconnect(struct intel_dp *intel_dp) 4269 { 4270 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4271 4272 if (!intel_dp->is_mst) 4273 return; 4274 4275 drm_dbg_kms(&i915->drm, "MST device may have disappeared %d vs %d\n", 4276 intel_dp->is_mst, intel_dp->mst_mgr.mst_state); 4277 intel_dp->is_mst = false; 4278 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); 4279 } 4280 4281 static bool 4282 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi) 4283 { 4284 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 4) == 4; 4285 } 4286 4287 static bool intel_dp_ack_sink_irq_esi(struct intel_dp *intel_dp, u8 esi[4]) 4288 { 4289 int retry; 4290 4291 for (retry = 0; retry < 3; retry++) { 4292 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SINK_COUNT_ESI + 1, 4293 &esi[1], 3) == 3) 4294 return true; 4295 } 4296 4297 return false; 4298 } 4299 4300 bool 4301 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 4302 const struct drm_connector_state *conn_state) 4303 { 4304 /* 4305 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 4306 * of Color Encoding Format and Content Color Gamut], in order to 4307 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 4308 */ 4309 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 4310 return true; 4311 4312 switch (conn_state->colorspace) { 4313 case DRM_MODE_COLORIMETRY_SYCC_601: 4314 case DRM_MODE_COLORIMETRY_OPYCC_601: 4315 case DRM_MODE_COLORIMETRY_BT2020_YCC: 4316 case DRM_MODE_COLORIMETRY_BT2020_RGB: 4317 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 4318 return true; 4319 default: 4320 break; 4321 } 4322 4323 return false; 4324 } 4325 4326 static ssize_t intel_dp_as_sdp_pack(const struct drm_dp_as_sdp *as_sdp, 4327 struct dp_sdp *sdp, size_t size) 4328 { 4329 size_t length = sizeof(struct dp_sdp); 4330 4331 if (size < length) 4332 return -ENOSPC; 4333 4334 memset(sdp, 0, size); 4335 4336 /* Prepare AS (Adaptive Sync) SDP Header */ 4337 sdp->sdp_header.HB0 = 0; 4338 sdp->sdp_header.HB1 = as_sdp->sdp_type; 4339 sdp->sdp_header.HB2 = 0x02; 4340 sdp->sdp_header.HB3 = as_sdp->length; 4341 4342 /* Fill AS (Adaptive Sync) SDP Payload */ 4343 sdp->db[0] = as_sdp->mode; 4344 sdp->db[1] = as_sdp->vtotal & 0xFF; 4345 sdp->db[2] = (as_sdp->vtotal >> 8) & 0xFF; 4346 sdp->db[3] = as_sdp->target_rr & 0xFF; 4347 sdp->db[4] = (as_sdp->target_rr >> 8) & 0x3; 4348 4349 if (as_sdp->target_rr_divider) 4350 sdp->db[4] |= 0x20; 4351 4352 return length; 4353 } 4354 4355 static ssize_t 4356 intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915, 4357 const struct hdmi_drm_infoframe *drm_infoframe, 4358 struct dp_sdp *sdp, 4359 size_t size) 4360 { 4361 size_t length = sizeof(struct dp_sdp); 4362 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 4363 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 4364 ssize_t len; 4365 4366 if (size < length) 4367 return -ENOSPC; 4368 4369 memset(sdp, 0, size); 4370 4371 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); 4372 if (len < 0) { 4373 drm_dbg_kms(&i915->drm, "buffer size is smaller than hdr metadata infoframe\n"); 4374 return -ENOSPC; 4375 } 4376 4377 if (len != infoframe_size) { 4378 drm_dbg_kms(&i915->drm, "wrong static hdr metadata size\n"); 4379 return -ENOSPC; 4380 } 4381 4382 /* 4383 * Set up the infoframe sdp packet for HDR static metadata. 4384 * Prepare VSC Header for SU as per DP 1.4a spec, 4385 * Table 2-100 and Table 2-101 4386 */ 4387 4388 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ 4389 sdp->sdp_header.HB0 = 0; 4390 /* 4391 * Packet Type 80h + Non-audio INFOFRAME Type value 4392 * HDMI_INFOFRAME_TYPE_DRM: 0x87 4393 * - 80h + Non-audio INFOFRAME Type value 4394 * - InfoFrame Type: 0x07 4395 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] 4396 */ 4397 sdp->sdp_header.HB1 = drm_infoframe->type; 4398 /* 4399 * Least Significant Eight Bits of (Data Byte Count – 1) 4400 * infoframe_size - 1 4401 */ 4402 sdp->sdp_header.HB2 = 0x1D; 4403 /* INFOFRAME SDP Version Number */ 4404 sdp->sdp_header.HB3 = (0x13 << 2); 4405 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 4406 sdp->db[0] = drm_infoframe->version; 4407 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 4408 sdp->db[1] = drm_infoframe->length; 4409 /* 4410 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 4411 * HDMI_INFOFRAME_HEADER_SIZE 4412 */ 4413 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); 4414 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 4415 HDMI_DRM_INFOFRAME_SIZE); 4416 4417 /* 4418 * Size of DP infoframe sdp packet for HDR static metadata consists of 4419 * - DP SDP Header(struct dp_sdp_header): 4 bytes 4420 * - Two Data Blocks: 2 bytes 4421 * CTA Header Byte2 (INFOFRAME Version Number) 4422 * CTA Header Byte3 (Length of INFOFRAME) 4423 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 4424 * 4425 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 4426 * infoframe size. But GEN11+ has larger than that size, write_infoframe 4427 * will pad rest of the size. 4428 */ 4429 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; 4430 } 4431 4432 static void intel_write_dp_sdp(struct intel_encoder *encoder, 4433 const struct intel_crtc_state *crtc_state, 4434 unsigned int type) 4435 { 4436 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4437 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4438 struct dp_sdp sdp = {}; 4439 ssize_t len; 4440 4441 if ((crtc_state->infoframes.enable & 4442 intel_hdmi_infoframe_enable(type)) == 0) 4443 return; 4444 4445 switch (type) { 4446 case DP_SDP_VSC: 4447 len = drm_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp); 4448 break; 4449 case HDMI_PACKET_TYPE_GAMUT_METADATA: 4450 len = intel_dp_hdr_metadata_infoframe_sdp_pack(dev_priv, 4451 &crtc_state->infoframes.drm.drm, 4452 &sdp, sizeof(sdp)); 4453 break; 4454 case DP_SDP_ADAPTIVE_SYNC: 4455 len = intel_dp_as_sdp_pack(&crtc_state->infoframes.as_sdp, &sdp, 4456 sizeof(sdp)); 4457 break; 4458 default: 4459 MISSING_CASE(type); 4460 return; 4461 } 4462 4463 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 4464 return; 4465 4466 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); 4467 } 4468 4469 void intel_dp_set_infoframes(struct intel_encoder *encoder, 4470 bool enable, 4471 const struct intel_crtc_state *crtc_state, 4472 const struct drm_connector_state *conn_state) 4473 { 4474 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4475 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(dev_priv, 4476 crtc_state->cpu_transcoder); 4477 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | 4478 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | 4479 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 4480 4481 if (HAS_AS_SDP(dev_priv)) 4482 dip_enable |= VIDEO_DIP_ENABLE_AS_ADL; 4483 4484 u32 val = intel_de_read(dev_priv, reg) & ~dip_enable; 4485 4486 /* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */ 4487 if (!enable && HAS_DSC(dev_priv)) 4488 val &= ~VDIP_ENABLE_PPS; 4489 4490 /* 4491 * This routine disables VSC DIP if the function is called 4492 * to disable SDP or if it does not have PSR 4493 */ 4494 if (!enable || !crtc_state->has_psr) 4495 val &= ~VIDEO_DIP_ENABLE_VSC_HSW; 4496 4497 intel_de_write(dev_priv, reg, val); 4498 intel_de_posting_read(dev_priv, reg); 4499 4500 if (!enable) 4501 return; 4502 4503 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); 4504 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_ADAPTIVE_SYNC); 4505 4506 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); 4507 } 4508 4509 static 4510 int intel_dp_as_sdp_unpack(struct drm_dp_as_sdp *as_sdp, 4511 const void *buffer, size_t size) 4512 { 4513 const struct dp_sdp *sdp = buffer; 4514 4515 if (size < sizeof(struct dp_sdp)) 4516 return -EINVAL; 4517 4518 memset(as_sdp, 0, sizeof(*as_sdp)); 4519 4520 if (sdp->sdp_header.HB0 != 0) 4521 return -EINVAL; 4522 4523 if (sdp->sdp_header.HB1 != DP_SDP_ADAPTIVE_SYNC) 4524 return -EINVAL; 4525 4526 if (sdp->sdp_header.HB2 != 0x02) 4527 return -EINVAL; 4528 4529 if ((sdp->sdp_header.HB3 & 0x3F) != 9) 4530 return -EINVAL; 4531 4532 as_sdp->length = sdp->sdp_header.HB3 & DP_ADAPTIVE_SYNC_SDP_LENGTH; 4533 as_sdp->mode = sdp->db[0] & DP_ADAPTIVE_SYNC_SDP_OPERATION_MODE; 4534 as_sdp->vtotal = (sdp->db[2] << 8) | sdp->db[1]; 4535 as_sdp->target_rr = (u64)sdp->db[3] | ((u64)sdp->db[4] & 0x3); 4536 as_sdp->target_rr_divider = sdp->db[4] & 0x20 ? true : false; 4537 4538 return 0; 4539 } 4540 4541 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, 4542 const void *buffer, size_t size) 4543 { 4544 const struct dp_sdp *sdp = buffer; 4545 4546 if (size < sizeof(struct dp_sdp)) 4547 return -EINVAL; 4548 4549 memset(vsc, 0, sizeof(*vsc)); 4550 4551 if (sdp->sdp_header.HB0 != 0) 4552 return -EINVAL; 4553 4554 if (sdp->sdp_header.HB1 != DP_SDP_VSC) 4555 return -EINVAL; 4556 4557 vsc->sdp_type = sdp->sdp_header.HB1; 4558 vsc->revision = sdp->sdp_header.HB2; 4559 vsc->length = sdp->sdp_header.HB3; 4560 4561 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || 4562 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe) || 4563 (sdp->sdp_header.HB2 == 0x6 && sdp->sdp_header.HB3 == 0x10)) { 4564 /* 4565 * - HB2 = 0x2, HB3 = 0x8 4566 * VSC SDP supporting 3D stereo + PSR 4567 * - HB2 = 0x4, HB3 = 0xe 4568 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of 4569 * first scan line of the SU region (applies to eDP v1.4b 4570 * and higher). 4571 * - HB2 = 0x6, HB3 = 0x10 4572 * VSC SDP supporting 3D stereo + Panel Replay. 4573 */ 4574 return 0; 4575 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { 4576 /* 4577 * - HB2 = 0x5, HB3 = 0x13 4578 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry 4579 * Format. 4580 */ 4581 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; 4582 vsc->colorimetry = sdp->db[16] & 0xf; 4583 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; 4584 4585 switch (sdp->db[17] & 0x7) { 4586 case 0x0: 4587 vsc->bpc = 6; 4588 break; 4589 case 0x1: 4590 vsc->bpc = 8; 4591 break; 4592 case 0x2: 4593 vsc->bpc = 10; 4594 break; 4595 case 0x3: 4596 vsc->bpc = 12; 4597 break; 4598 case 0x4: 4599 vsc->bpc = 16; 4600 break; 4601 default: 4602 MISSING_CASE(sdp->db[17] & 0x7); 4603 return -EINVAL; 4604 } 4605 4606 vsc->content_type = sdp->db[18] & 0x7; 4607 } else { 4608 return -EINVAL; 4609 } 4610 4611 return 0; 4612 } 4613 4614 static void 4615 intel_read_dp_as_sdp(struct intel_encoder *encoder, 4616 struct intel_crtc_state *crtc_state, 4617 struct drm_dp_as_sdp *as_sdp) 4618 { 4619 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4620 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4621 unsigned int type = DP_SDP_ADAPTIVE_SYNC; 4622 struct dp_sdp sdp = {}; 4623 int ret; 4624 4625 if ((crtc_state->infoframes.enable & 4626 intel_hdmi_infoframe_enable(type)) == 0) 4627 return; 4628 4629 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 4630 sizeof(sdp)); 4631 4632 ret = intel_dp_as_sdp_unpack(as_sdp, &sdp, sizeof(sdp)); 4633 if (ret) 4634 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP AS SDP\n"); 4635 } 4636 4637 static int 4638 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, 4639 const void *buffer, size_t size) 4640 { 4641 int ret; 4642 4643 const struct dp_sdp *sdp = buffer; 4644 4645 if (size < sizeof(struct dp_sdp)) 4646 return -EINVAL; 4647 4648 if (sdp->sdp_header.HB0 != 0) 4649 return -EINVAL; 4650 4651 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) 4652 return -EINVAL; 4653 4654 /* 4655 * Least Significant Eight Bits of (Data Byte Count – 1) 4656 * 1Dh (i.e., Data Byte Count = 30 bytes). 4657 */ 4658 if (sdp->sdp_header.HB2 != 0x1D) 4659 return -EINVAL; 4660 4661 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ 4662 if ((sdp->sdp_header.HB3 & 0x3) != 0) 4663 return -EINVAL; 4664 4665 /* INFOFRAME SDP Version Number */ 4666 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) 4667 return -EINVAL; 4668 4669 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 4670 if (sdp->db[0] != 1) 4671 return -EINVAL; 4672 4673 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 4674 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) 4675 return -EINVAL; 4676 4677 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], 4678 HDMI_DRM_INFOFRAME_SIZE); 4679 4680 return ret; 4681 } 4682 4683 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, 4684 struct intel_crtc_state *crtc_state, 4685 struct drm_dp_vsc_sdp *vsc) 4686 { 4687 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4688 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4689 unsigned int type = DP_SDP_VSC; 4690 struct dp_sdp sdp = {}; 4691 int ret; 4692 4693 if ((crtc_state->infoframes.enable & 4694 intel_hdmi_infoframe_enable(type)) == 0) 4695 return; 4696 4697 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); 4698 4699 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); 4700 4701 if (ret) 4702 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n"); 4703 } 4704 4705 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, 4706 struct intel_crtc_state *crtc_state, 4707 struct hdmi_drm_infoframe *drm_infoframe) 4708 { 4709 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4710 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4711 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; 4712 struct dp_sdp sdp = {}; 4713 int ret; 4714 4715 if ((crtc_state->infoframes.enable & 4716 intel_hdmi_infoframe_enable(type)) == 0) 4717 return; 4718 4719 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 4720 sizeof(sdp)); 4721 4722 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, 4723 sizeof(sdp)); 4724 4725 if (ret) 4726 drm_dbg_kms(&dev_priv->drm, 4727 "Failed to unpack DP HDR Metadata Infoframe SDP\n"); 4728 } 4729 4730 void intel_read_dp_sdp(struct intel_encoder *encoder, 4731 struct intel_crtc_state *crtc_state, 4732 unsigned int type) 4733 { 4734 switch (type) { 4735 case DP_SDP_VSC: 4736 intel_read_dp_vsc_sdp(encoder, crtc_state, 4737 &crtc_state->infoframes.vsc); 4738 break; 4739 case HDMI_PACKET_TYPE_GAMUT_METADATA: 4740 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, 4741 &crtc_state->infoframes.drm.drm); 4742 break; 4743 case DP_SDP_ADAPTIVE_SYNC: 4744 intel_read_dp_as_sdp(encoder, crtc_state, 4745 &crtc_state->infoframes.as_sdp); 4746 break; 4747 default: 4748 MISSING_CASE(type); 4749 break; 4750 } 4751 } 4752 4753 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 4754 { 4755 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4756 int status = 0; 4757 int test_link_rate; 4758 u8 test_lane_count, test_link_bw; 4759 /* (DP CTS 1.2) 4760 * 4.3.1.11 4761 */ 4762 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 4763 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 4764 &test_lane_count); 4765 4766 if (status <= 0) { 4767 drm_dbg_kms(&i915->drm, "Lane count read failed\n"); 4768 return DP_TEST_NAK; 4769 } 4770 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 4771 4772 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 4773 &test_link_bw); 4774 if (status <= 0) { 4775 drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); 4776 return DP_TEST_NAK; 4777 } 4778 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 4779 4780 /* Validate the requested link rate and lane count */ 4781 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 4782 test_lane_count)) 4783 return DP_TEST_NAK; 4784 4785 intel_dp->compliance.test_lane_count = test_lane_count; 4786 intel_dp->compliance.test_link_rate = test_link_rate; 4787 4788 return DP_TEST_ACK; 4789 } 4790 4791 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 4792 { 4793 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4794 u8 test_pattern; 4795 u8 test_misc; 4796 __be16 h_width, v_height; 4797 int status = 0; 4798 4799 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 4800 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 4801 &test_pattern); 4802 if (status <= 0) { 4803 drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); 4804 return DP_TEST_NAK; 4805 } 4806 if (test_pattern != DP_COLOR_RAMP) 4807 return DP_TEST_NAK; 4808 4809 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 4810 &h_width, 2); 4811 if (status <= 0) { 4812 drm_dbg_kms(&i915->drm, "H Width read failed\n"); 4813 return DP_TEST_NAK; 4814 } 4815 4816 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 4817 &v_height, 2); 4818 if (status <= 0) { 4819 drm_dbg_kms(&i915->drm, "V Height read failed\n"); 4820 return DP_TEST_NAK; 4821 } 4822 4823 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 4824 &test_misc); 4825 if (status <= 0) { 4826 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); 4827 return DP_TEST_NAK; 4828 } 4829 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 4830 return DP_TEST_NAK; 4831 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 4832 return DP_TEST_NAK; 4833 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 4834 case DP_TEST_BIT_DEPTH_6: 4835 intel_dp->compliance.test_data.bpc = 6; 4836 break; 4837 case DP_TEST_BIT_DEPTH_8: 4838 intel_dp->compliance.test_data.bpc = 8; 4839 break; 4840 default: 4841 return DP_TEST_NAK; 4842 } 4843 4844 intel_dp->compliance.test_data.video_pattern = test_pattern; 4845 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 4846 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 4847 /* Set test active flag here so userspace doesn't interrupt things */ 4848 intel_dp->compliance.test_active = true; 4849 4850 return DP_TEST_ACK; 4851 } 4852 4853 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 4854 { 4855 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4856 u8 test_result = DP_TEST_ACK; 4857 struct intel_connector *intel_connector = intel_dp->attached_connector; 4858 struct drm_connector *connector = &intel_connector->base; 4859 4860 if (intel_connector->detect_edid == NULL || 4861 connector->edid_corrupt || 4862 intel_dp->aux.i2c_defer_count > 6) { 4863 /* Check EDID read for NACKs, DEFERs and corruption 4864 * (DP CTS 1.2 Core r1.1) 4865 * 4.2.2.4 : Failed EDID read, I2C_NAK 4866 * 4.2.2.5 : Failed EDID read, I2C_DEFER 4867 * 4.2.2.6 : EDID corruption detected 4868 * Use failsafe mode for all cases 4869 */ 4870 if (intel_dp->aux.i2c_nack_count > 0 || 4871 intel_dp->aux.i2c_defer_count > 0) 4872 drm_dbg_kms(&i915->drm, 4873 "EDID read had %d NACKs, %d DEFERs\n", 4874 intel_dp->aux.i2c_nack_count, 4875 intel_dp->aux.i2c_defer_count); 4876 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 4877 } else { 4878 /* FIXME: Get rid of drm_edid_raw() */ 4879 const struct edid *block = drm_edid_raw(intel_connector->detect_edid); 4880 4881 /* We have to write the checksum of the last block read */ 4882 block += block->extensions; 4883 4884 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 4885 block->checksum) <= 0) 4886 drm_dbg_kms(&i915->drm, 4887 "Failed to write EDID checksum\n"); 4888 4889 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 4890 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 4891 } 4892 4893 /* Set test active flag here so userspace doesn't interrupt things */ 4894 intel_dp->compliance.test_active = true; 4895 4896 return test_result; 4897 } 4898 4899 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, 4900 const struct intel_crtc_state *crtc_state) 4901 { 4902 struct drm_i915_private *dev_priv = 4903 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 4904 struct drm_dp_phy_test_params *data = 4905 &intel_dp->compliance.test_data.phytest; 4906 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4907 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4908 enum pipe pipe = crtc->pipe; 4909 u32 pattern_val; 4910 4911 switch (data->phy_pattern) { 4912 case DP_LINK_QUAL_PATTERN_DISABLE: 4913 drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n"); 4914 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 4915 if (DISPLAY_VER(dev_priv) >= 10) 4916 intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), 4917 DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK, 4918 DP_TP_CTL_LINK_TRAIN_NORMAL); 4919 break; 4920 case DP_LINK_QUAL_PATTERN_D10_2: 4921 drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n"); 4922 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 4923 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); 4924 break; 4925 case DP_LINK_QUAL_PATTERN_ERROR_RATE: 4926 drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n"); 4927 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 4928 DDI_DP_COMP_CTL_ENABLE | 4929 DDI_DP_COMP_CTL_SCRAMBLED_0); 4930 break; 4931 case DP_LINK_QUAL_PATTERN_PRBS7: 4932 drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n"); 4933 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 4934 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); 4935 break; 4936 case DP_LINK_QUAL_PATTERN_80BIT_CUSTOM: 4937 /* 4938 * FIXME: Ideally pattern should come from DPCD 0x250. As 4939 * current firmware of DPR-100 could not set it, so hardcoding 4940 * now for complaince test. 4941 */ 4942 drm_dbg_kms(&dev_priv->drm, 4943 "Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); 4944 pattern_val = 0x3e0f83e0; 4945 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); 4946 pattern_val = 0x0f83e0f8; 4947 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); 4948 pattern_val = 0x0000f83e; 4949 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); 4950 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 4951 DDI_DP_COMP_CTL_ENABLE | 4952 DDI_DP_COMP_CTL_CUSTOM80); 4953 break; 4954 case DP_LINK_QUAL_PATTERN_CP2520_PAT_1: 4955 /* 4956 * FIXME: Ideally pattern should come from DPCD 0x24A. As 4957 * current firmware of DPR-100 could not set it, so hardcoding 4958 * now for complaince test. 4959 */ 4960 drm_dbg_kms(&dev_priv->drm, "Set HBR2 compliance Phy Test Pattern\n"); 4961 pattern_val = 0xFB; 4962 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 4963 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | 4964 pattern_val); 4965 break; 4966 case DP_LINK_QUAL_PATTERN_CP2520_PAT_3: 4967 if (DISPLAY_VER(dev_priv) < 10) { 4968 drm_warn(&dev_priv->drm, "Platform does not support TPS4\n"); 4969 break; 4970 } 4971 drm_dbg_kms(&dev_priv->drm, "Set TPS4 compliance Phy Test Pattern\n"); 4972 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 4973 intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), 4974 DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK, 4975 DP_TP_CTL_TRAIN_PAT4_SEL_TP4A | DP_TP_CTL_LINK_TRAIN_PAT4); 4976 break; 4977 default: 4978 drm_warn(&dev_priv->drm, "Invalid Phy Test Pattern\n"); 4979 } 4980 } 4981 4982 static void intel_dp_process_phy_request(struct intel_dp *intel_dp, 4983 const struct intel_crtc_state *crtc_state) 4984 { 4985 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4986 struct drm_dp_phy_test_params *data = 4987 &intel_dp->compliance.test_data.phytest; 4988 u8 link_status[DP_LINK_STATUS_SIZE]; 4989 4990 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 4991 link_status) < 0) { 4992 drm_dbg_kms(&i915->drm, "failed to get link status\n"); 4993 return; 4994 } 4995 4996 /* retrieve vswing & pre-emphasis setting */ 4997 intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, 4998 link_status); 4999 5000 intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX); 5001 5002 intel_dp_phy_pattern_update(intel_dp, crtc_state); 5003 5004 drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, 5005 intel_dp->train_set, crtc_state->lane_count); 5006 5007 drm_dp_set_phy_test_pattern(&intel_dp->aux, data, 5008 intel_dp->dpcd[DP_DPCD_REV]); 5009 } 5010 5011 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 5012 { 5013 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5014 struct drm_dp_phy_test_params *data = 5015 &intel_dp->compliance.test_data.phytest; 5016 5017 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { 5018 drm_dbg_kms(&i915->drm, "DP Phy Test pattern AUX read failure\n"); 5019 return DP_TEST_NAK; 5020 } 5021 5022 /* Set test active flag here so userspace doesn't interrupt things */ 5023 intel_dp->compliance.test_active = true; 5024 5025 return DP_TEST_ACK; 5026 } 5027 5028 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 5029 { 5030 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5031 u8 response = DP_TEST_NAK; 5032 u8 request = 0; 5033 int status; 5034 5035 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 5036 if (status <= 0) { 5037 drm_dbg_kms(&i915->drm, 5038 "Could not read test request from sink\n"); 5039 goto update_status; 5040 } 5041 5042 switch (request) { 5043 case DP_TEST_LINK_TRAINING: 5044 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); 5045 response = intel_dp_autotest_link_training(intel_dp); 5046 break; 5047 case DP_TEST_LINK_VIDEO_PATTERN: 5048 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); 5049 response = intel_dp_autotest_video_pattern(intel_dp); 5050 break; 5051 case DP_TEST_LINK_EDID_READ: 5052 drm_dbg_kms(&i915->drm, "EDID test requested\n"); 5053 response = intel_dp_autotest_edid(intel_dp); 5054 break; 5055 case DP_TEST_LINK_PHY_TEST_PATTERN: 5056 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); 5057 response = intel_dp_autotest_phy_pattern(intel_dp); 5058 break; 5059 default: 5060 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", 5061 request); 5062 break; 5063 } 5064 5065 if (response & DP_TEST_ACK) 5066 intel_dp->compliance.test_type = request; 5067 5068 update_status: 5069 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 5070 if (status <= 0) 5071 drm_dbg_kms(&i915->drm, 5072 "Could not write test response to sink\n"); 5073 } 5074 5075 static bool intel_dp_link_ok(struct intel_dp *intel_dp, 5076 u8 link_status[DP_LINK_STATUS_SIZE]) 5077 { 5078 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 5079 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 5080 bool uhbr = intel_dp->link_rate >= 1000000; 5081 bool ok; 5082 5083 if (uhbr) 5084 ok = drm_dp_128b132b_lane_channel_eq_done(link_status, 5085 intel_dp->lane_count); 5086 else 5087 ok = drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 5088 5089 if (ok) 5090 return true; 5091 5092 intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); 5093 drm_dbg_kms(&i915->drm, 5094 "[ENCODER:%d:%s] %s link not ok, retraining\n", 5095 encoder->base.base.id, encoder->base.name, 5096 uhbr ? "128b/132b" : "8b/10b"); 5097 5098 return false; 5099 } 5100 5101 static void 5102 intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack) 5103 { 5104 bool handled = false; 5105 5106 drm_dp_mst_hpd_irq_handle_event(&intel_dp->mst_mgr, esi, ack, &handled); 5107 5108 if (esi[1] & DP_CP_IRQ) { 5109 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 5110 ack[1] |= DP_CP_IRQ; 5111 } 5112 } 5113 5114 static bool intel_dp_mst_link_status(struct intel_dp *intel_dp) 5115 { 5116 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 5117 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 5118 u8 link_status[DP_LINK_STATUS_SIZE] = {}; 5119 const size_t esi_link_status_size = DP_LINK_STATUS_SIZE - 2; 5120 5121 if (drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS_ESI, link_status, 5122 esi_link_status_size) != esi_link_status_size) { 5123 drm_err(&i915->drm, 5124 "[ENCODER:%d:%s] Failed to read link status\n", 5125 encoder->base.base.id, encoder->base.name); 5126 return false; 5127 } 5128 5129 return intel_dp_link_ok(intel_dp, link_status); 5130 } 5131 5132 /** 5133 * intel_dp_check_mst_status - service any pending MST interrupts, check link status 5134 * @intel_dp: Intel DP struct 5135 * 5136 * Read any pending MST interrupts, call MST core to handle these and ack the 5137 * interrupts. Check if the main and AUX link state is ok. 5138 * 5139 * Returns: 5140 * - %true if pending interrupts were serviced (or no interrupts were 5141 * pending) w/o detecting an error condition. 5142 * - %false if an error condition - like AUX failure or a loss of link - is 5143 * detected, or another condition - like a DP tunnel BW state change - needs 5144 * servicing from the hotplug work. 5145 */ 5146 static bool 5147 intel_dp_check_mst_status(struct intel_dp *intel_dp) 5148 { 5149 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5150 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5151 struct intel_encoder *encoder = &dig_port->base; 5152 bool link_ok = true; 5153 bool reprobe_needed = false; 5154 5155 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); 5156 5157 for (;;) { 5158 u8 esi[4] = {}; 5159 u8 ack[4] = {}; 5160 5161 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { 5162 drm_dbg_kms(&i915->drm, 5163 "failed to get ESI - device may have failed\n"); 5164 link_ok = false; 5165 5166 break; 5167 } 5168 5169 drm_dbg_kms(&i915->drm, "DPRX ESI: %4ph\n", esi); 5170 5171 if (intel_dp->active_mst_links > 0 && link_ok && 5172 esi[3] & LINK_STATUS_CHANGED) { 5173 if (!intel_dp_mst_link_status(intel_dp)) 5174 link_ok = false; 5175 ack[3] |= LINK_STATUS_CHANGED; 5176 } 5177 5178 intel_dp_mst_hpd_irq(intel_dp, esi, ack); 5179 5180 if (esi[3] & DP_TUNNELING_IRQ) { 5181 if (drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr, 5182 &intel_dp->aux)) 5183 reprobe_needed = true; 5184 ack[3] |= DP_TUNNELING_IRQ; 5185 } 5186 5187 if (mem_is_zero(ack, sizeof(ack))) 5188 break; 5189 5190 if (!intel_dp_ack_sink_irq_esi(intel_dp, ack)) 5191 drm_dbg_kms(&i915->drm, "Failed to ack ESI\n"); 5192 5193 if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY)) 5194 drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr); 5195 } 5196 5197 if (!link_ok || intel_dp->link.force_retrain) 5198 intel_encoder_link_check_queue_work(encoder, 0); 5199 5200 return !reprobe_needed; 5201 } 5202 5203 static void 5204 intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp) 5205 { 5206 bool is_active; 5207 u8 buf = 0; 5208 5209 is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux); 5210 if (intel_dp->frl.is_trained && !is_active) { 5211 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0) 5212 return; 5213 5214 buf &= ~DP_PCON_ENABLE_HDMI_LINK; 5215 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0) 5216 return; 5217 5218 drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base); 5219 5220 intel_dp->frl.is_trained = false; 5221 5222 /* Restart FRL training or fall back to TMDS mode */ 5223 intel_dp_check_frl_training(intel_dp); 5224 } 5225 } 5226 5227 static bool 5228 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 5229 { 5230 u8 link_status[DP_LINK_STATUS_SIZE]; 5231 5232 if (!intel_dp->link_trained) 5233 return false; 5234 5235 /* 5236 * While PSR source HW is enabled, it will control main-link sending 5237 * frames, enabling and disabling it so trying to do a retrain will fail 5238 * as the link would or not be on or it could mix training patterns 5239 * and frame data at the same time causing retrain to fail. 5240 * Also when exiting PSR, HW will retrain the link anyways fixing 5241 * any link status error. 5242 */ 5243 if (intel_psr_enabled(intel_dp)) 5244 return false; 5245 5246 if (intel_dp->link.force_retrain) 5247 return true; 5248 5249 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 5250 link_status) < 0) 5251 return false; 5252 5253 /* 5254 * Validate the cached values of intel_dp->link_rate and 5255 * intel_dp->lane_count before attempting to retrain. 5256 * 5257 * FIXME would be nice to user the crtc state here, but since 5258 * we need to call this from the short HPD handler that seems 5259 * a bit hard. 5260 */ 5261 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 5262 intel_dp->lane_count)) 5263 return false; 5264 5265 if (intel_dp->link.retrain_disabled) 5266 return false; 5267 5268 if (intel_dp->link.seq_train_failures) 5269 return true; 5270 5271 /* Retrain if link not ok */ 5272 return !intel_dp_link_ok(intel_dp, link_status); 5273 } 5274 5275 static bool intel_dp_has_connector(struct intel_dp *intel_dp, 5276 const struct drm_connector_state *conn_state) 5277 { 5278 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5279 struct intel_encoder *encoder; 5280 enum pipe pipe; 5281 5282 if (!conn_state->best_encoder) 5283 return false; 5284 5285 /* SST */ 5286 encoder = &dp_to_dig_port(intel_dp)->base; 5287 if (conn_state->best_encoder == &encoder->base) 5288 return true; 5289 5290 /* MST */ 5291 for_each_pipe(i915, pipe) { 5292 encoder = &intel_dp->mst_encoders[pipe]->base; 5293 if (conn_state->best_encoder == &encoder->base) 5294 return true; 5295 } 5296 5297 return false; 5298 } 5299 5300 int intel_dp_get_active_pipes(struct intel_dp *intel_dp, 5301 struct drm_modeset_acquire_ctx *ctx, 5302 u8 *pipe_mask) 5303 { 5304 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5305 struct drm_connector_list_iter conn_iter; 5306 struct intel_connector *connector; 5307 int ret = 0; 5308 5309 *pipe_mask = 0; 5310 5311 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 5312 for_each_intel_connector_iter(connector, &conn_iter) { 5313 struct drm_connector_state *conn_state = 5314 connector->base.state; 5315 struct intel_crtc_state *crtc_state; 5316 struct intel_crtc *crtc; 5317 5318 if (!intel_dp_has_connector(intel_dp, conn_state)) 5319 continue; 5320 5321 crtc = to_intel_crtc(conn_state->crtc); 5322 if (!crtc) 5323 continue; 5324 5325 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 5326 if (ret) 5327 break; 5328 5329 crtc_state = to_intel_crtc_state(crtc->base.state); 5330 5331 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 5332 5333 if (!crtc_state->hw.active) 5334 continue; 5335 5336 if (conn_state->commit) 5337 drm_WARN_ON(&i915->drm, 5338 !wait_for_completion_timeout(&conn_state->commit->hw_done, 5339 msecs_to_jiffies(5000))); 5340 5341 *pipe_mask |= BIT(crtc->pipe); 5342 } 5343 drm_connector_list_iter_end(&conn_iter); 5344 5345 return ret; 5346 } 5347 5348 static bool intel_dp_is_connected(struct intel_dp *intel_dp) 5349 { 5350 struct intel_connector *connector = intel_dp->attached_connector; 5351 5352 return connector->base.status == connector_status_connected || 5353 intel_dp->is_mst; 5354 } 5355 5356 static int intel_dp_retrain_link(struct intel_encoder *encoder, 5357 struct drm_modeset_acquire_ctx *ctx) 5358 { 5359 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5360 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5361 u8 pipe_mask; 5362 int ret; 5363 5364 if (!intel_dp_is_connected(intel_dp)) 5365 return 0; 5366 5367 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 5368 ctx); 5369 if (ret) 5370 return ret; 5371 5372 if (!intel_dp_needs_link_retrain(intel_dp)) 5373 return 0; 5374 5375 ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask); 5376 if (ret) 5377 return ret; 5378 5379 if (pipe_mask == 0) 5380 return 0; 5381 5382 if (!intel_dp_needs_link_retrain(intel_dp)) 5383 return 0; 5384 5385 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link (forced %s)\n", 5386 encoder->base.base.id, encoder->base.name, 5387 str_yes_no(intel_dp->link.force_retrain)); 5388 5389 ret = intel_modeset_commit_pipes(dev_priv, pipe_mask, ctx); 5390 if (ret == -EDEADLK) 5391 return ret; 5392 5393 intel_dp->link.force_retrain = false; 5394 5395 if (ret) 5396 drm_dbg_kms(&dev_priv->drm, 5397 "[ENCODER:%d:%s] link retraining failed: %pe\n", 5398 encoder->base.base.id, encoder->base.name, 5399 ERR_PTR(ret)); 5400 5401 return ret; 5402 } 5403 5404 void intel_dp_link_check(struct intel_encoder *encoder) 5405 { 5406 struct drm_modeset_acquire_ctx ctx; 5407 int ret; 5408 5409 intel_modeset_lock_ctx_retry(&ctx, NULL, 0, ret) 5410 ret = intel_dp_retrain_link(encoder, &ctx); 5411 } 5412 5413 void intel_dp_check_link_state(struct intel_dp *intel_dp) 5414 { 5415 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5416 struct intel_encoder *encoder = &dig_port->base; 5417 5418 if (!intel_dp_is_connected(intel_dp)) 5419 return; 5420 5421 if (!intel_dp_needs_link_retrain(intel_dp)) 5422 return; 5423 5424 intel_encoder_link_check_queue_work(encoder, 0); 5425 } 5426 5427 static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, 5428 struct drm_modeset_acquire_ctx *ctx, 5429 u8 *pipe_mask) 5430 { 5431 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5432 struct drm_connector_list_iter conn_iter; 5433 struct intel_connector *connector; 5434 int ret = 0; 5435 5436 *pipe_mask = 0; 5437 5438 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 5439 for_each_intel_connector_iter(connector, &conn_iter) { 5440 struct drm_connector_state *conn_state = 5441 connector->base.state; 5442 struct intel_crtc_state *crtc_state; 5443 struct intel_crtc *crtc; 5444 5445 if (!intel_dp_has_connector(intel_dp, conn_state)) 5446 continue; 5447 5448 crtc = to_intel_crtc(conn_state->crtc); 5449 if (!crtc) 5450 continue; 5451 5452 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 5453 if (ret) 5454 break; 5455 5456 crtc_state = to_intel_crtc_state(crtc->base.state); 5457 5458 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 5459 5460 if (!crtc_state->hw.active) 5461 continue; 5462 5463 if (conn_state->commit && 5464 !try_wait_for_completion(&conn_state->commit->hw_done)) 5465 continue; 5466 5467 *pipe_mask |= BIT(crtc->pipe); 5468 } 5469 drm_connector_list_iter_end(&conn_iter); 5470 5471 return ret; 5472 } 5473 5474 static int intel_dp_do_phy_test(struct intel_encoder *encoder, 5475 struct drm_modeset_acquire_ctx *ctx) 5476 { 5477 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5478 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5479 struct intel_crtc *crtc; 5480 u8 pipe_mask; 5481 int ret; 5482 5483 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 5484 ctx); 5485 if (ret) 5486 return ret; 5487 5488 ret = intel_dp_prep_phy_test(intel_dp, ctx, &pipe_mask); 5489 if (ret) 5490 return ret; 5491 5492 if (pipe_mask == 0) 5493 return 0; 5494 5495 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n", 5496 encoder->base.base.id, encoder->base.name); 5497 5498 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { 5499 const struct intel_crtc_state *crtc_state = 5500 to_intel_crtc_state(crtc->base.state); 5501 5502 /* test on the MST master transcoder */ 5503 if (DISPLAY_VER(dev_priv) >= 12 && 5504 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 5505 !intel_dp_mst_is_master_trans(crtc_state)) 5506 continue; 5507 5508 intel_dp_process_phy_request(intel_dp, crtc_state); 5509 break; 5510 } 5511 5512 return 0; 5513 } 5514 5515 void intel_dp_phy_test(struct intel_encoder *encoder) 5516 { 5517 struct drm_modeset_acquire_ctx ctx; 5518 int ret; 5519 5520 drm_modeset_acquire_init(&ctx, 0); 5521 5522 for (;;) { 5523 ret = intel_dp_do_phy_test(encoder, &ctx); 5524 5525 if (ret == -EDEADLK) { 5526 drm_modeset_backoff(&ctx); 5527 continue; 5528 } 5529 5530 break; 5531 } 5532 5533 drm_modeset_drop_locks(&ctx); 5534 drm_modeset_acquire_fini(&ctx); 5535 drm_WARN(encoder->base.dev, ret, 5536 "Acquiring modeset locks failed with %i\n", ret); 5537 } 5538 5539 static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) 5540 { 5541 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5542 u8 val; 5543 5544 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 5545 return; 5546 5547 if (drm_dp_dpcd_readb(&intel_dp->aux, 5548 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 5549 return; 5550 5551 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 5552 5553 if (val & DP_AUTOMATED_TEST_REQUEST) 5554 intel_dp_handle_test_request(intel_dp); 5555 5556 if (val & DP_CP_IRQ) 5557 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 5558 5559 if (val & DP_SINK_SPECIFIC_IRQ) 5560 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); 5561 } 5562 5563 static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp) 5564 { 5565 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5566 bool reprobe_needed = false; 5567 u8 val; 5568 5569 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 5570 return false; 5571 5572 if (drm_dp_dpcd_readb(&intel_dp->aux, 5573 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) 5574 return false; 5575 5576 if ((val & DP_TUNNELING_IRQ) && 5577 drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr, 5578 &intel_dp->aux)) 5579 reprobe_needed = true; 5580 5581 if (drm_dp_dpcd_writeb(&intel_dp->aux, 5582 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) 5583 return reprobe_needed; 5584 5585 if (val & HDMI_LINK_STATUS_CHANGED) 5586 intel_dp_handle_hdmi_link_status_change(intel_dp); 5587 5588 return reprobe_needed; 5589 } 5590 5591 /* 5592 * According to DP spec 5593 * 5.1.2: 5594 * 1. Read DPCD 5595 * 2. Configure link according to Receiver Capabilities 5596 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 5597 * 4. Check link status on receipt of hot-plug interrupt 5598 * 5599 * intel_dp_short_pulse - handles short pulse interrupts 5600 * when full detection is not required. 5601 * Returns %true if short pulse is handled and full detection 5602 * is NOT required and %false otherwise. 5603 */ 5604 static bool 5605 intel_dp_short_pulse(struct intel_dp *intel_dp) 5606 { 5607 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5608 u8 old_sink_count = intel_dp->sink_count; 5609 bool reprobe_needed = false; 5610 bool ret; 5611 5612 /* 5613 * Clearing compliance test variables to allow capturing 5614 * of values for next automated test request. 5615 */ 5616 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5617 5618 /* 5619 * Now read the DPCD to see if it's actually running 5620 * If the current value of sink count doesn't match with 5621 * the value that was stored earlier or dpcd read failed 5622 * we need to do full detection 5623 */ 5624 ret = intel_dp_get_dpcd(intel_dp); 5625 5626 if ((old_sink_count != intel_dp->sink_count) || !ret) { 5627 /* No need to proceed if we are going to do full detect */ 5628 return false; 5629 } 5630 5631 intel_dp_check_device_service_irq(intel_dp); 5632 reprobe_needed = intel_dp_check_link_service_irq(intel_dp); 5633 5634 /* Handle CEC interrupts, if any */ 5635 drm_dp_cec_irq(&intel_dp->aux); 5636 5637 intel_dp_check_link_state(intel_dp); 5638 5639 intel_psr_short_pulse(intel_dp); 5640 5641 switch (intel_dp->compliance.test_type) { 5642 case DP_TEST_LINK_TRAINING: 5643 drm_dbg_kms(&dev_priv->drm, 5644 "Link Training Compliance Test requested\n"); 5645 /* Send a Hotplug Uevent to userspace to start modeset */ 5646 drm_kms_helper_hotplug_event(&dev_priv->drm); 5647 break; 5648 case DP_TEST_LINK_PHY_TEST_PATTERN: 5649 drm_dbg_kms(&dev_priv->drm, 5650 "PHY test pattern Compliance Test requested\n"); 5651 /* 5652 * Schedule long hpd to do the test 5653 * 5654 * FIXME get rid of the ad-hoc phy test modeset code 5655 * and properly incorporate it into the normal modeset. 5656 */ 5657 reprobe_needed = true; 5658 } 5659 5660 return !reprobe_needed; 5661 } 5662 5663 /* XXX this is probably wrong for multiple downstream ports */ 5664 static enum drm_connector_status 5665 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 5666 { 5667 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5668 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5669 u8 *dpcd = intel_dp->dpcd; 5670 u8 type; 5671 5672 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) 5673 return connector_status_connected; 5674 5675 lspcon_resume(dig_port); 5676 5677 if (!intel_dp_get_dpcd(intel_dp)) 5678 return connector_status_disconnected; 5679 5680 intel_dp->mst_detect = intel_dp_mst_detect(intel_dp); 5681 5682 /* if there's no downstream port, we're done */ 5683 if (!drm_dp_is_branch(dpcd)) 5684 return connector_status_connected; 5685 5686 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 5687 if (intel_dp_has_sink_count(intel_dp) && 5688 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 5689 return intel_dp->sink_count ? 5690 connector_status_connected : connector_status_disconnected; 5691 } 5692 5693 if (intel_dp->mst_detect == DRM_DP_MST) 5694 return connector_status_connected; 5695 5696 /* If no HPD, poke DDC gently */ 5697 if (drm_probe_ddc(&intel_dp->aux.ddc)) 5698 return connector_status_connected; 5699 5700 /* Well we tried, say unknown for unreliable port types */ 5701 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 5702 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 5703 if (type == DP_DS_PORT_TYPE_VGA || 5704 type == DP_DS_PORT_TYPE_NON_EDID) 5705 return connector_status_unknown; 5706 } else { 5707 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 5708 DP_DWN_STRM_PORT_TYPE_MASK; 5709 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 5710 type == DP_DWN_STRM_PORT_TYPE_OTHER) 5711 return connector_status_unknown; 5712 } 5713 5714 /* Anything else is out of spec, warn and ignore */ 5715 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); 5716 return connector_status_disconnected; 5717 } 5718 5719 static enum drm_connector_status 5720 edp_detect(struct intel_dp *intel_dp) 5721 { 5722 return connector_status_connected; 5723 } 5724 5725 void intel_digital_port_lock(struct intel_encoder *encoder) 5726 { 5727 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5728 5729 if (dig_port->lock) 5730 dig_port->lock(dig_port); 5731 } 5732 5733 void intel_digital_port_unlock(struct intel_encoder *encoder) 5734 { 5735 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5736 5737 if (dig_port->unlock) 5738 dig_port->unlock(dig_port); 5739 } 5740 5741 /* 5742 * intel_digital_port_connected_locked - is the specified port connected? 5743 * @encoder: intel_encoder 5744 * 5745 * In cases where there's a connector physically connected but it can't be used 5746 * by our hardware we also return false, since the rest of the driver should 5747 * pretty much treat the port as disconnected. This is relevant for type-C 5748 * (starting on ICL) where there's ownership involved. 5749 * 5750 * The caller must hold the lock acquired by calling intel_digital_port_lock() 5751 * when calling this function. 5752 * 5753 * Return %true if port is connected, %false otherwise. 5754 */ 5755 bool intel_digital_port_connected_locked(struct intel_encoder *encoder) 5756 { 5757 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5758 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5759 bool is_glitch_free = intel_tc_port_handles_hpd_glitches(dig_port); 5760 bool is_connected = false; 5761 intel_wakeref_t wakeref; 5762 5763 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) { 5764 unsigned long wait_expires = jiffies + msecs_to_jiffies_timeout(4); 5765 5766 do { 5767 is_connected = dig_port->connected(encoder); 5768 if (is_connected || is_glitch_free) 5769 break; 5770 usleep_range(10, 30); 5771 } while (time_before(jiffies, wait_expires)); 5772 } 5773 5774 return is_connected; 5775 } 5776 5777 bool intel_digital_port_connected(struct intel_encoder *encoder) 5778 { 5779 bool ret; 5780 5781 intel_digital_port_lock(encoder); 5782 ret = intel_digital_port_connected_locked(encoder); 5783 intel_digital_port_unlock(encoder); 5784 5785 return ret; 5786 } 5787 5788 static const struct drm_edid * 5789 intel_dp_get_edid(struct intel_dp *intel_dp) 5790 { 5791 struct intel_connector *connector = intel_dp->attached_connector; 5792 const struct drm_edid *fixed_edid = connector->panel.fixed_edid; 5793 5794 /* Use panel fixed edid if we have one */ 5795 if (fixed_edid) { 5796 /* invalid edid */ 5797 if (IS_ERR(fixed_edid)) 5798 return NULL; 5799 5800 return drm_edid_dup(fixed_edid); 5801 } 5802 5803 return drm_edid_read_ddc(&connector->base, &intel_dp->aux.ddc); 5804 } 5805 5806 static void 5807 intel_dp_update_dfp(struct intel_dp *intel_dp, 5808 const struct drm_edid *drm_edid) 5809 { 5810 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5811 struct intel_connector *connector = intel_dp->attached_connector; 5812 5813 intel_dp->dfp.max_bpc = 5814 drm_dp_downstream_max_bpc(intel_dp->dpcd, 5815 intel_dp->downstream_ports, drm_edid); 5816 5817 intel_dp->dfp.max_dotclock = 5818 drm_dp_downstream_max_dotclock(intel_dp->dpcd, 5819 intel_dp->downstream_ports); 5820 5821 intel_dp->dfp.min_tmds_clock = 5822 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd, 5823 intel_dp->downstream_ports, 5824 drm_edid); 5825 intel_dp->dfp.max_tmds_clock = 5826 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd, 5827 intel_dp->downstream_ports, 5828 drm_edid); 5829 5830 intel_dp->dfp.pcon_max_frl_bw = 5831 drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd, 5832 intel_dp->downstream_ports); 5833 5834 drm_dbg_kms(&i915->drm, 5835 "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n", 5836 connector->base.base.id, connector->base.name, 5837 intel_dp->dfp.max_bpc, 5838 intel_dp->dfp.max_dotclock, 5839 intel_dp->dfp.min_tmds_clock, 5840 intel_dp->dfp.max_tmds_clock, 5841 intel_dp->dfp.pcon_max_frl_bw); 5842 5843 intel_dp_get_pcon_dsc_cap(intel_dp); 5844 } 5845 5846 static bool 5847 intel_dp_can_ycbcr420(struct intel_dp *intel_dp) 5848 { 5849 if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420) && 5850 (!drm_dp_is_branch(intel_dp->dpcd) || intel_dp->dfp.ycbcr420_passthrough)) 5851 return true; 5852 5853 if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_RGB) && 5854 dfp_can_convert_from_rgb(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420)) 5855 return true; 5856 5857 if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR444) && 5858 dfp_can_convert_from_ycbcr444(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420)) 5859 return true; 5860 5861 return false; 5862 } 5863 5864 static void 5865 intel_dp_update_420(struct intel_dp *intel_dp) 5866 { 5867 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5868 struct intel_connector *connector = intel_dp->attached_connector; 5869 5870 intel_dp->dfp.ycbcr420_passthrough = 5871 drm_dp_downstream_420_passthrough(intel_dp->dpcd, 5872 intel_dp->downstream_ports); 5873 /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */ 5874 intel_dp->dfp.ycbcr_444_to_420 = 5875 dp_to_dig_port(intel_dp)->lspcon.active || 5876 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd, 5877 intel_dp->downstream_ports); 5878 intel_dp->dfp.rgb_to_ycbcr = 5879 drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 5880 intel_dp->downstream_ports, 5881 DP_DS_HDMI_BT709_RGB_YCBCR_CONV); 5882 5883 connector->base.ycbcr_420_allowed = intel_dp_can_ycbcr420(intel_dp); 5884 5885 drm_dbg_kms(&i915->drm, 5886 "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", 5887 connector->base.base.id, connector->base.name, 5888 str_yes_no(intel_dp->dfp.rgb_to_ycbcr), 5889 str_yes_no(connector->base.ycbcr_420_allowed), 5890 str_yes_no(intel_dp->dfp.ycbcr_444_to_420)); 5891 } 5892 5893 static void 5894 intel_dp_set_edid(struct intel_dp *intel_dp) 5895 { 5896 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5897 struct intel_connector *connector = intel_dp->attached_connector; 5898 const struct drm_edid *drm_edid; 5899 bool vrr_capable; 5900 5901 intel_dp_unset_edid(intel_dp); 5902 drm_edid = intel_dp_get_edid(intel_dp); 5903 connector->detect_edid = drm_edid; 5904 5905 /* Below we depend on display info having been updated */ 5906 drm_edid_connector_update(&connector->base, drm_edid); 5907 5908 vrr_capable = intel_vrr_is_capable(connector); 5909 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n", 5910 connector->base.base.id, connector->base.name, str_yes_no(vrr_capable)); 5911 drm_connector_set_vrr_capable_property(&connector->base, vrr_capable); 5912 5913 intel_dp_update_dfp(intel_dp, drm_edid); 5914 intel_dp_update_420(intel_dp); 5915 5916 drm_dp_cec_attach(&intel_dp->aux, 5917 connector->base.display_info.source_physical_address); 5918 } 5919 5920 static void 5921 intel_dp_unset_edid(struct intel_dp *intel_dp) 5922 { 5923 struct intel_connector *connector = intel_dp->attached_connector; 5924 5925 drm_dp_cec_unset_edid(&intel_dp->aux); 5926 drm_edid_free(connector->detect_edid); 5927 connector->detect_edid = NULL; 5928 5929 intel_dp->dfp.max_bpc = 0; 5930 intel_dp->dfp.max_dotclock = 0; 5931 intel_dp->dfp.min_tmds_clock = 0; 5932 intel_dp->dfp.max_tmds_clock = 0; 5933 5934 intel_dp->dfp.pcon_max_frl_bw = 0; 5935 5936 intel_dp->dfp.ycbcr_444_to_420 = false; 5937 connector->base.ycbcr_420_allowed = false; 5938 5939 drm_connector_set_vrr_capable_property(&connector->base, 5940 false); 5941 } 5942 5943 static void 5944 intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *connector) 5945 { 5946 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5947 5948 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 5949 if (!HAS_DSC(i915)) 5950 return; 5951 5952 if (intel_dp_is_edp(intel_dp)) 5953 intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0], 5954 connector); 5955 else 5956 intel_dp_get_dsc_sink_cap(intel_dp->dpcd[DP_DPCD_REV], 5957 connector); 5958 } 5959 5960 static void 5961 intel_dp_detect_sdp_caps(struct intel_dp *intel_dp) 5962 { 5963 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5964 5965 intel_dp->as_sdp_supported = HAS_AS_SDP(i915) && 5966 drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd); 5967 } 5968 5969 static int 5970 intel_dp_detect(struct drm_connector *connector, 5971 struct drm_modeset_acquire_ctx *ctx, 5972 bool force) 5973 { 5974 struct drm_i915_private *dev_priv = to_i915(connector->dev); 5975 struct intel_connector *intel_connector = 5976 to_intel_connector(connector); 5977 struct intel_dp *intel_dp = intel_attached_dp(intel_connector); 5978 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5979 struct intel_encoder *encoder = &dig_port->base; 5980 enum drm_connector_status status; 5981 int ret; 5982 5983 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 5984 connector->base.id, connector->name); 5985 drm_WARN_ON(&dev_priv->drm, 5986 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 5987 5988 if (!intel_display_device_enabled(dev_priv)) 5989 return connector_status_disconnected; 5990 5991 if (!intel_display_driver_check_access(dev_priv)) 5992 return connector->status; 5993 5994 /* Can't disconnect eDP */ 5995 if (intel_dp_is_edp(intel_dp)) 5996 status = edp_detect(intel_dp); 5997 else if (intel_digital_port_connected(encoder)) 5998 status = intel_dp_detect_dpcd(intel_dp); 5999 else 6000 status = connector_status_disconnected; 6001 6002 if (status != connector_status_disconnected && 6003 !intel_dp_mst_verify_dpcd_state(intel_dp)) 6004 /* 6005 * This requires retrying detection for instance to re-enable 6006 * the MST mode that got reset via a long HPD pulse. The retry 6007 * will happen either via the hotplug handler's retry logic, 6008 * ensured by setting the connector here to SST/disconnected, 6009 * or via a userspace connector probing in response to the 6010 * hotplug uevent sent when removing the MST connectors. 6011 */ 6012 status = connector_status_disconnected; 6013 6014 if (status == connector_status_disconnected) { 6015 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 6016 memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd)); 6017 intel_dp->psr.sink_panel_replay_support = false; 6018 intel_dp->psr.sink_panel_replay_su_support = false; 6019 6020 intel_dp_mst_disconnect(intel_dp); 6021 6022 intel_dp_tunnel_disconnect(intel_dp); 6023 6024 goto out; 6025 } 6026 6027 ret = intel_dp_tunnel_detect(intel_dp, ctx); 6028 if (ret == -EDEADLK) 6029 return ret; 6030 6031 if (ret == 1) 6032 intel_connector->base.epoch_counter++; 6033 6034 if (!intel_dp_is_edp(intel_dp)) 6035 intel_psr_init_dpcd(intel_dp); 6036 6037 intel_dp_detect_dsc_caps(intel_dp, intel_connector); 6038 6039 intel_dp_detect_sdp_caps(intel_dp); 6040 6041 if (intel_dp->reset_link_params) { 6042 intel_dp_reset_link_params(intel_dp); 6043 intel_dp->reset_link_params = false; 6044 } 6045 6046 intel_dp_mst_configure(intel_dp); 6047 6048 intel_dp_print_rates(intel_dp); 6049 6050 if (intel_dp->is_mst) { 6051 /* 6052 * If we are in MST mode then this connector 6053 * won't appear connected or have anything 6054 * with EDID on it 6055 */ 6056 status = connector_status_disconnected; 6057 goto out; 6058 } 6059 6060 /* 6061 * Some external monitors do not signal loss of link synchronization 6062 * with an IRQ_HPD, so force a link status check. 6063 * 6064 * TODO: this probably became redundant, so remove it: the link state 6065 * is rechecked/recovered now after modesets, where the loss of 6066 * synchronization tends to occur. 6067 */ 6068 if (!intel_dp_is_edp(intel_dp)) 6069 intel_dp_check_link_state(intel_dp); 6070 6071 /* 6072 * Clearing NACK and defer counts to get their exact values 6073 * while reading EDID which are required by Compliance tests 6074 * 4.2.2.4 and 4.2.2.5 6075 */ 6076 intel_dp->aux.i2c_nack_count = 0; 6077 intel_dp->aux.i2c_defer_count = 0; 6078 6079 intel_dp_set_edid(intel_dp); 6080 if (intel_dp_is_edp(intel_dp) || 6081 to_intel_connector(connector)->detect_edid) 6082 status = connector_status_connected; 6083 6084 intel_dp_check_device_service_irq(intel_dp); 6085 6086 out: 6087 if (status != connector_status_connected && !intel_dp->is_mst) 6088 intel_dp_unset_edid(intel_dp); 6089 6090 if (!intel_dp_is_edp(intel_dp)) 6091 drm_dp_set_subconnector_property(connector, 6092 status, 6093 intel_dp->dpcd, 6094 intel_dp->downstream_ports); 6095 return status; 6096 } 6097 6098 static void 6099 intel_dp_force(struct drm_connector *connector) 6100 { 6101 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6102 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6103 struct intel_encoder *intel_encoder = &dig_port->base; 6104 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 6105 6106 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 6107 connector->base.id, connector->name); 6108 6109 if (!intel_display_driver_check_access(dev_priv)) 6110 return; 6111 6112 intel_dp_unset_edid(intel_dp); 6113 6114 if (connector->status != connector_status_connected) 6115 return; 6116 6117 intel_dp_set_edid(intel_dp); 6118 } 6119 6120 static int intel_dp_get_modes(struct drm_connector *connector) 6121 { 6122 struct intel_connector *intel_connector = to_intel_connector(connector); 6123 int num_modes; 6124 6125 /* drm_edid_connector_update() done in ->detect() or ->force() */ 6126 num_modes = drm_edid_connector_add_modes(connector); 6127 6128 /* Also add fixed mode, which may or may not be present in EDID */ 6129 if (intel_dp_is_edp(intel_attached_dp(intel_connector))) 6130 num_modes += intel_panel_get_modes(intel_connector); 6131 6132 if (num_modes) 6133 return num_modes; 6134 6135 if (!intel_connector->detect_edid) { 6136 struct intel_dp *intel_dp = intel_attached_dp(intel_connector); 6137 struct drm_display_mode *mode; 6138 6139 mode = drm_dp_downstream_mode(connector->dev, 6140 intel_dp->dpcd, 6141 intel_dp->downstream_ports); 6142 if (mode) { 6143 drm_mode_probed_add(connector, mode); 6144 num_modes++; 6145 } 6146 } 6147 6148 return num_modes; 6149 } 6150 6151 static int 6152 intel_dp_connector_register(struct drm_connector *connector) 6153 { 6154 struct drm_i915_private *i915 = to_i915(connector->dev); 6155 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6156 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6157 struct intel_lspcon *lspcon = &dig_port->lspcon; 6158 int ret; 6159 6160 ret = intel_connector_register(connector); 6161 if (ret) 6162 return ret; 6163 6164 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", 6165 intel_dp->aux.name, connector->kdev->kobj.name); 6166 6167 intel_dp->aux.dev = connector->kdev; 6168 ret = drm_dp_aux_register(&intel_dp->aux); 6169 if (!ret) 6170 drm_dp_cec_register_connector(&intel_dp->aux, connector); 6171 6172 if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata)) 6173 return ret; 6174 6175 /* 6176 * ToDo: Clean this up to handle lspcon init and resume more 6177 * efficiently and streamlined. 6178 */ 6179 if (lspcon_init(dig_port)) { 6180 lspcon_detect_hdr_capability(lspcon); 6181 if (lspcon->hdr_supported) 6182 drm_connector_attach_hdr_output_metadata_property(connector); 6183 } 6184 6185 return ret; 6186 } 6187 6188 static void 6189 intel_dp_connector_unregister(struct drm_connector *connector) 6190 { 6191 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6192 6193 drm_dp_cec_unregister_connector(&intel_dp->aux); 6194 drm_dp_aux_unregister(&intel_dp->aux); 6195 intel_connector_unregister(connector); 6196 } 6197 6198 void intel_dp_connector_sync_state(struct intel_connector *connector, 6199 const struct intel_crtc_state *crtc_state) 6200 { 6201 struct drm_i915_private *i915 = to_i915(connector->base.dev); 6202 6203 if (crtc_state && crtc_state->dsc.compression_enable) { 6204 drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux); 6205 connector->dp.dsc_decompression_enabled = true; 6206 } else { 6207 connector->dp.dsc_decompression_enabled = false; 6208 } 6209 } 6210 6211 void intel_dp_encoder_flush_work(struct drm_encoder *_encoder) 6212 { 6213 struct intel_encoder *encoder = to_intel_encoder(_encoder); 6214 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 6215 struct intel_dp *intel_dp = &dig_port->dp; 6216 6217 intel_encoder_link_check_flush_work(encoder); 6218 6219 intel_dp_mst_encoder_cleanup(dig_port); 6220 6221 intel_dp_tunnel_destroy(intel_dp); 6222 6223 intel_pps_vdd_off_sync(intel_dp); 6224 6225 /* 6226 * Ensure power off delay is respected on module remove, so that we can 6227 * reduce delays at driver probe. See pps_init_timestamps(). 6228 */ 6229 intel_pps_wait_power_cycle(intel_dp); 6230 6231 intel_dp_aux_fini(intel_dp); 6232 } 6233 6234 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 6235 { 6236 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 6237 6238 intel_pps_vdd_off_sync(intel_dp); 6239 6240 intel_dp_tunnel_suspend(intel_dp); 6241 } 6242 6243 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder) 6244 { 6245 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 6246 6247 intel_pps_wait_power_cycle(intel_dp); 6248 } 6249 6250 static int intel_modeset_tile_group(struct intel_atomic_state *state, 6251 int tile_group_id) 6252 { 6253 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6254 struct drm_connector_list_iter conn_iter; 6255 struct drm_connector *connector; 6256 int ret = 0; 6257 6258 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 6259 drm_for_each_connector_iter(connector, &conn_iter) { 6260 struct drm_connector_state *conn_state; 6261 struct intel_crtc_state *crtc_state; 6262 struct intel_crtc *crtc; 6263 6264 if (!connector->has_tile || 6265 connector->tile_group->id != tile_group_id) 6266 continue; 6267 6268 conn_state = drm_atomic_get_connector_state(&state->base, 6269 connector); 6270 if (IS_ERR(conn_state)) { 6271 ret = PTR_ERR(conn_state); 6272 break; 6273 } 6274 6275 crtc = to_intel_crtc(conn_state->crtc); 6276 6277 if (!crtc) 6278 continue; 6279 6280 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6281 crtc_state->uapi.mode_changed = true; 6282 6283 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 6284 if (ret) 6285 break; 6286 } 6287 drm_connector_list_iter_end(&conn_iter); 6288 6289 return ret; 6290 } 6291 6292 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 6293 { 6294 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6295 struct intel_crtc *crtc; 6296 6297 if (transcoders == 0) 6298 return 0; 6299 6300 for_each_intel_crtc(&dev_priv->drm, crtc) { 6301 struct intel_crtc_state *crtc_state; 6302 int ret; 6303 6304 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6305 if (IS_ERR(crtc_state)) 6306 return PTR_ERR(crtc_state); 6307 6308 if (!crtc_state->hw.enable) 6309 continue; 6310 6311 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 6312 continue; 6313 6314 crtc_state->uapi.mode_changed = true; 6315 6316 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 6317 if (ret) 6318 return ret; 6319 6320 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 6321 if (ret) 6322 return ret; 6323 6324 transcoders &= ~BIT(crtc_state->cpu_transcoder); 6325 } 6326 6327 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 6328 6329 return 0; 6330 } 6331 6332 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 6333 struct drm_connector *connector) 6334 { 6335 const struct drm_connector_state *old_conn_state = 6336 drm_atomic_get_old_connector_state(&state->base, connector); 6337 const struct intel_crtc_state *old_crtc_state; 6338 struct intel_crtc *crtc; 6339 u8 transcoders; 6340 6341 crtc = to_intel_crtc(old_conn_state->crtc); 6342 if (!crtc) 6343 return 0; 6344 6345 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 6346 6347 if (!old_crtc_state->hw.active) 6348 return 0; 6349 6350 transcoders = old_crtc_state->sync_mode_slaves_mask; 6351 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 6352 transcoders |= BIT(old_crtc_state->master_transcoder); 6353 6354 return intel_modeset_affected_transcoders(state, 6355 transcoders); 6356 } 6357 6358 static int intel_dp_connector_atomic_check(struct drm_connector *conn, 6359 struct drm_atomic_state *_state) 6360 { 6361 struct drm_i915_private *dev_priv = to_i915(conn->dev); 6362 struct intel_atomic_state *state = to_intel_atomic_state(_state); 6363 struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(_state, conn); 6364 struct intel_connector *intel_conn = to_intel_connector(conn); 6365 struct intel_dp *intel_dp = enc_to_intel_dp(intel_conn->encoder); 6366 int ret; 6367 6368 ret = intel_digital_connector_atomic_check(conn, &state->base); 6369 if (ret) 6370 return ret; 6371 6372 if (intel_dp_mst_source_support(intel_dp)) { 6373 ret = drm_dp_mst_root_conn_atomic_check(conn_state, &intel_dp->mst_mgr); 6374 if (ret) 6375 return ret; 6376 } 6377 6378 if (!intel_connector_needs_modeset(state, conn)) 6379 return 0; 6380 6381 ret = intel_dp_tunnel_atomic_check_state(state, 6382 intel_dp, 6383 intel_conn); 6384 if (ret) 6385 return ret; 6386 6387 /* 6388 * We don't enable port sync on BDW due to missing w/as and 6389 * due to not having adjusted the modeset sequence appropriately. 6390 */ 6391 if (DISPLAY_VER(dev_priv) < 9) 6392 return 0; 6393 6394 if (conn->has_tile) { 6395 ret = intel_modeset_tile_group(state, conn->tile_group->id); 6396 if (ret) 6397 return ret; 6398 } 6399 6400 return intel_modeset_synced_crtcs(state, conn); 6401 } 6402 6403 static void intel_dp_oob_hotplug_event(struct drm_connector *connector, 6404 enum drm_connector_status hpd_state) 6405 { 6406 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); 6407 struct drm_i915_private *i915 = to_i915(connector->dev); 6408 bool hpd_high = hpd_state == connector_status_connected; 6409 unsigned int hpd_pin = encoder->hpd_pin; 6410 bool need_work = false; 6411 6412 spin_lock_irq(&i915->irq_lock); 6413 if (hpd_high != test_bit(hpd_pin, &i915->display.hotplug.oob_hotplug_last_state)) { 6414 i915->display.hotplug.event_bits |= BIT(hpd_pin); 6415 6416 __assign_bit(hpd_pin, &i915->display.hotplug.oob_hotplug_last_state, hpd_high); 6417 need_work = true; 6418 } 6419 spin_unlock_irq(&i915->irq_lock); 6420 6421 if (need_work) 6422 intel_hpd_schedule_detection(i915); 6423 } 6424 6425 static const struct drm_connector_funcs intel_dp_connector_funcs = { 6426 .force = intel_dp_force, 6427 .fill_modes = drm_helper_probe_single_connector_modes, 6428 .atomic_get_property = intel_digital_connector_atomic_get_property, 6429 .atomic_set_property = intel_digital_connector_atomic_set_property, 6430 .late_register = intel_dp_connector_register, 6431 .early_unregister = intel_dp_connector_unregister, 6432 .destroy = intel_connector_destroy, 6433 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 6434 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 6435 .oob_hotplug_event = intel_dp_oob_hotplug_event, 6436 }; 6437 6438 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 6439 .detect_ctx = intel_dp_detect, 6440 .get_modes = intel_dp_get_modes, 6441 .mode_valid = intel_dp_mode_valid, 6442 .atomic_check = intel_dp_connector_atomic_check, 6443 }; 6444 6445 enum irqreturn 6446 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) 6447 { 6448 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6449 struct intel_dp *intel_dp = &dig_port->dp; 6450 u8 dpcd[DP_RECEIVER_CAP_SIZE]; 6451 6452 if (dig_port->base.type == INTEL_OUTPUT_EDP && 6453 (long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) { 6454 /* 6455 * vdd off can generate a long/short pulse on eDP which 6456 * would require vdd on to handle it, and thus we 6457 * would end up in an endless cycle of 6458 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." 6459 */ 6460 drm_dbg_kms(&i915->drm, 6461 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", 6462 long_hpd ? "long" : "short", 6463 dig_port->base.base.base.id, 6464 dig_port->base.base.name); 6465 return IRQ_HANDLED; 6466 } 6467 6468 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", 6469 dig_port->base.base.base.id, 6470 dig_port->base.base.name, 6471 long_hpd ? "long" : "short"); 6472 6473 /* 6474 * TBT DP tunnels require the GFX driver to read out the DPRX caps in 6475 * response to long HPD pulses. The DP hotplug handler does that, 6476 * however the hotplug handler may be blocked by another 6477 * connector's/encoder's hotplug handler. Since the TBT CM may not 6478 * complete the DP tunnel BW request for the latter connector/encoder 6479 * waiting for this encoder's DPRX read, perform a dummy read here. 6480 */ 6481 if (long_hpd) 6482 intel_dp_read_dprx_caps(intel_dp, dpcd); 6483 6484 if (long_hpd) { 6485 intel_dp->reset_link_params = true; 6486 return IRQ_NONE; 6487 } 6488 6489 if (intel_dp->is_mst) { 6490 if (!intel_dp_check_mst_status(intel_dp)) 6491 return IRQ_NONE; 6492 } else if (!intel_dp_short_pulse(intel_dp)) { 6493 return IRQ_NONE; 6494 } 6495 6496 return IRQ_HANDLED; 6497 } 6498 6499 static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv, 6500 const struct intel_bios_encoder_data *devdata, 6501 enum port port) 6502 { 6503 /* 6504 * eDP not supported on g4x. so bail out early just 6505 * for a bit extra safety in case the VBT is bonkers. 6506 */ 6507 if (DISPLAY_VER(dev_priv) < 5) 6508 return false; 6509 6510 if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A) 6511 return true; 6512 6513 return devdata && intel_bios_encoder_supports_edp(devdata); 6514 } 6515 6516 bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port) 6517 { 6518 struct intel_display *display = &i915->display; 6519 const struct intel_bios_encoder_data *devdata = 6520 intel_bios_encoder_data_lookup(display, port); 6521 6522 return _intel_dp_is_port_edp(i915, devdata, port); 6523 } 6524 6525 bool 6526 intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder) 6527 { 6528 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 6529 enum port port = encoder->port; 6530 6531 if (intel_bios_encoder_is_lspcon(encoder->devdata)) 6532 return false; 6533 6534 if (DISPLAY_VER(i915) >= 11) 6535 return true; 6536 6537 if (port == PORT_A) 6538 return false; 6539 6540 if (IS_HASWELL(i915) || IS_BROADWELL(i915) || 6541 DISPLAY_VER(i915) >= 9) 6542 return true; 6543 6544 return false; 6545 } 6546 6547 static void 6548 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 6549 { 6550 struct drm_i915_private *dev_priv = to_i915(connector->dev); 6551 enum port port = dp_to_dig_port(intel_dp)->base.port; 6552 6553 if (!intel_dp_is_edp(intel_dp)) 6554 drm_connector_attach_dp_subconnector_property(connector); 6555 6556 if (!IS_G4X(dev_priv) && port != PORT_A) 6557 intel_attach_force_audio_property(connector); 6558 6559 intel_attach_broadcast_rgb_property(connector); 6560 if (HAS_GMCH(dev_priv)) 6561 drm_connector_attach_max_bpc_property(connector, 6, 10); 6562 else if (DISPLAY_VER(dev_priv) >= 5) 6563 drm_connector_attach_max_bpc_property(connector, 6, 12); 6564 6565 /* Register HDMI colorspace for case of lspcon */ 6566 if (intel_bios_encoder_is_lspcon(dp_to_dig_port(intel_dp)->base.devdata)) { 6567 drm_connector_attach_content_type_property(connector); 6568 intel_attach_hdmi_colorspace_property(connector); 6569 } else { 6570 intel_attach_dp_colorspace_property(connector); 6571 } 6572 6573 if (intel_dp_has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base)) 6574 drm_connector_attach_hdr_output_metadata_property(connector); 6575 6576 if (HAS_VRR(dev_priv)) 6577 drm_connector_attach_vrr_capable_property(connector); 6578 } 6579 6580 static void 6581 intel_edp_add_properties(struct intel_dp *intel_dp) 6582 { 6583 struct intel_connector *connector = intel_dp->attached_connector; 6584 struct drm_i915_private *i915 = to_i915(connector->base.dev); 6585 const struct drm_display_mode *fixed_mode = 6586 intel_panel_preferred_fixed_mode(connector); 6587 6588 intel_attach_scaling_mode_property(&connector->base); 6589 6590 drm_connector_set_panel_orientation_with_quirk(&connector->base, 6591 i915->display.vbt.orientation, 6592 fixed_mode->hdisplay, 6593 fixed_mode->vdisplay); 6594 } 6595 6596 static void intel_edp_backlight_setup(struct intel_dp *intel_dp, 6597 struct intel_connector *connector) 6598 { 6599 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6600 enum pipe pipe = INVALID_PIPE; 6601 6602 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 6603 /* 6604 * Figure out the current pipe for the initial backlight setup. 6605 * If the current pipe isn't valid, try the PPS pipe, and if that 6606 * fails just assume pipe A. 6607 */ 6608 pipe = vlv_active_pipe(intel_dp); 6609 6610 if (pipe != PIPE_A && pipe != PIPE_B) 6611 pipe = intel_dp->pps.pps_pipe; 6612 6613 if (pipe != PIPE_A && pipe != PIPE_B) 6614 pipe = PIPE_A; 6615 } 6616 6617 intel_backlight_setup(connector, pipe); 6618 } 6619 6620 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 6621 struct intel_connector *intel_connector) 6622 { 6623 struct intel_display *display = to_intel_display(intel_dp); 6624 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6625 struct drm_connector *connector = &intel_connector->base; 6626 struct drm_display_mode *fixed_mode; 6627 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 6628 bool has_dpcd; 6629 const struct drm_edid *drm_edid; 6630 6631 if (!intel_dp_is_edp(intel_dp)) 6632 return true; 6633 6634 /* 6635 * On IBX/CPT we may get here with LVDS already registered. Since the 6636 * driver uses the only internal power sequencer available for both 6637 * eDP and LVDS bail out early in this case to prevent interfering 6638 * with an already powered-on LVDS power sequencer. 6639 */ 6640 if (intel_get_lvds_encoder(dev_priv)) { 6641 drm_WARN_ON(&dev_priv->drm, 6642 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 6643 drm_info(&dev_priv->drm, 6644 "LVDS was detected, not registering eDP\n"); 6645 6646 return false; 6647 } 6648 6649 intel_bios_init_panel_early(display, &intel_connector->panel, 6650 encoder->devdata); 6651 6652 if (!intel_pps_init(intel_dp)) { 6653 drm_info(&dev_priv->drm, 6654 "[ENCODER:%d:%s] unusable PPS, disabling eDP\n", 6655 encoder->base.base.id, encoder->base.name); 6656 /* 6657 * The BIOS may have still enabled VDD on the PPS even 6658 * though it's unusable. Make sure we turn it back off 6659 * and to release the power domain references/etc. 6660 */ 6661 goto out_vdd_off; 6662 } 6663 6664 /* 6665 * Enable HPD sense for live status check. 6666 * intel_hpd_irq_setup() will turn it off again 6667 * if it's no longer needed later. 6668 * 6669 * The DPCD probe below will make sure VDD is on. 6670 */ 6671 intel_hpd_enable_detection(encoder); 6672 6673 intel_alpm_init_dpcd(intel_dp); 6674 6675 /* Cache DPCD and EDID for edp. */ 6676 has_dpcd = intel_edp_init_dpcd(intel_dp, intel_connector); 6677 6678 if (!has_dpcd) { 6679 /* if this fails, presume the device is a ghost */ 6680 drm_info(&dev_priv->drm, 6681 "[ENCODER:%d:%s] failed to retrieve link info, disabling eDP\n", 6682 encoder->base.base.id, encoder->base.name); 6683 goto out_vdd_off; 6684 } 6685 6686 /* 6687 * VBT and straps are liars. Also check HPD as that seems 6688 * to be the most reliable piece of information available. 6689 * 6690 * ... expect on devices that forgot to hook HPD up for eDP 6691 * (eg. Acer Chromebook C710), so we'll check it only if multiple 6692 * ports are attempting to use the same AUX CH, according to VBT. 6693 */ 6694 if (intel_bios_dp_has_shared_aux_ch(encoder->devdata)) { 6695 /* 6696 * If this fails, presume the DPCD answer came 6697 * from some other port using the same AUX CH. 6698 * 6699 * FIXME maybe cleaner to check this before the 6700 * DPCD read? Would need sort out the VDD handling... 6701 */ 6702 if (!intel_digital_port_connected(encoder)) { 6703 drm_info(&dev_priv->drm, 6704 "[ENCODER:%d:%s] HPD is down, disabling eDP\n", 6705 encoder->base.base.id, encoder->base.name); 6706 goto out_vdd_off; 6707 } 6708 6709 /* 6710 * Unfortunately even the HPD based detection fails on 6711 * eg. Asus B360M-A (CFL+CNP), so as a last resort fall 6712 * back to checking for a VGA branch device. Only do this 6713 * on known affected platforms to minimize false positives. 6714 */ 6715 if (DISPLAY_VER(dev_priv) == 9 && drm_dp_is_branch(intel_dp->dpcd) && 6716 (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) == 6717 DP_DWN_STRM_PORT_TYPE_ANALOG) { 6718 drm_info(&dev_priv->drm, 6719 "[ENCODER:%d:%s] VGA converter detected, disabling eDP\n", 6720 encoder->base.base.id, encoder->base.name); 6721 goto out_vdd_off; 6722 } 6723 } 6724 6725 mutex_lock(&dev_priv->drm.mode_config.mutex); 6726 drm_edid = drm_edid_read_ddc(connector, connector->ddc); 6727 if (!drm_edid) { 6728 /* Fallback to EDID from ACPI OpRegion, if any */ 6729 drm_edid = intel_opregion_get_edid(intel_connector); 6730 if (drm_edid) 6731 drm_dbg_kms(&dev_priv->drm, 6732 "[CONNECTOR:%d:%s] Using OpRegion EDID\n", 6733 connector->base.id, connector->name); 6734 } 6735 if (drm_edid) { 6736 if (drm_edid_connector_update(connector, drm_edid) || 6737 !drm_edid_connector_add_modes(connector)) { 6738 drm_edid_connector_update(connector, NULL); 6739 drm_edid_free(drm_edid); 6740 drm_edid = ERR_PTR(-EINVAL); 6741 } 6742 } else { 6743 drm_edid = ERR_PTR(-ENOENT); 6744 } 6745 6746 intel_bios_init_panel_late(display, &intel_connector->panel, encoder->devdata, 6747 IS_ERR(drm_edid) ? NULL : drm_edid); 6748 6749 intel_panel_add_edid_fixed_modes(intel_connector, true); 6750 6751 /* MSO requires information from the EDID */ 6752 intel_edp_mso_init(intel_dp); 6753 6754 /* multiply the mode clock and horizontal timings for MSO */ 6755 list_for_each_entry(fixed_mode, &intel_connector->panel.fixed_modes, head) 6756 intel_edp_mso_mode_fixup(intel_connector, fixed_mode); 6757 6758 /* fallback to VBT if available for eDP */ 6759 if (!intel_panel_preferred_fixed_mode(intel_connector)) 6760 intel_panel_add_vbt_lfp_fixed_mode(intel_connector); 6761 6762 mutex_unlock(&dev_priv->drm.mode_config.mutex); 6763 6764 if (!intel_panel_preferred_fixed_mode(intel_connector)) { 6765 drm_info(&dev_priv->drm, 6766 "[ENCODER:%d:%s] failed to find fixed mode for the panel, disabling eDP\n", 6767 encoder->base.base.id, encoder->base.name); 6768 goto out_vdd_off; 6769 } 6770 6771 intel_panel_init(intel_connector, drm_edid); 6772 6773 intel_edp_backlight_setup(intel_dp, intel_connector); 6774 6775 intel_edp_add_properties(intel_dp); 6776 6777 intel_pps_init_late(intel_dp); 6778 6779 return true; 6780 6781 out_vdd_off: 6782 intel_pps_vdd_off_sync(intel_dp); 6783 6784 return false; 6785 } 6786 6787 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 6788 { 6789 struct intel_connector *intel_connector; 6790 struct drm_connector *connector; 6791 6792 intel_connector = container_of(work, typeof(*intel_connector), 6793 modeset_retry_work); 6794 connector = &intel_connector->base; 6795 drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n", connector->base.id, 6796 connector->name); 6797 6798 /* Grab the locks before changing connector property*/ 6799 mutex_lock(&connector->dev->mode_config.mutex); 6800 /* Set connector link status to BAD and send a Uevent to notify 6801 * userspace to do a modeset. 6802 */ 6803 drm_connector_set_link_status_property(connector, 6804 DRM_MODE_LINK_STATUS_BAD); 6805 mutex_unlock(&connector->dev->mode_config.mutex); 6806 /* Send Hotplug uevent so userspace can reprobe */ 6807 drm_kms_helper_connector_hotplug_event(connector); 6808 6809 drm_connector_put(connector); 6810 } 6811 6812 void intel_dp_init_modeset_retry_work(struct intel_connector *connector) 6813 { 6814 INIT_WORK(&connector->modeset_retry_work, 6815 intel_dp_modeset_retry_work_fn); 6816 } 6817 6818 bool 6819 intel_dp_init_connector(struct intel_digital_port *dig_port, 6820 struct intel_connector *intel_connector) 6821 { 6822 struct drm_connector *connector = &intel_connector->base; 6823 struct intel_dp *intel_dp = &dig_port->dp; 6824 struct intel_encoder *intel_encoder = &dig_port->base; 6825 struct drm_device *dev = intel_encoder->base.dev; 6826 struct drm_i915_private *dev_priv = to_i915(dev); 6827 enum port port = intel_encoder->port; 6828 int type; 6829 6830 /* Initialize the work for modeset in case of link train failure */ 6831 intel_dp_init_modeset_retry_work(intel_connector); 6832 6833 if (drm_WARN(dev, dig_port->max_lanes < 1, 6834 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 6835 dig_port->max_lanes, intel_encoder->base.base.id, 6836 intel_encoder->base.name)) 6837 return false; 6838 6839 intel_dp->reset_link_params = true; 6840 intel_dp->pps.pps_pipe = INVALID_PIPE; 6841 intel_dp->pps.active_pipe = INVALID_PIPE; 6842 6843 /* Preserve the current hw state. */ 6844 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 6845 intel_dp->attached_connector = intel_connector; 6846 6847 if (_intel_dp_is_port_edp(dev_priv, intel_encoder->devdata, port)) { 6848 /* 6849 * Currently we don't support eDP on TypeC ports, although in 6850 * theory it could work on TypeC legacy ports. 6851 */ 6852 drm_WARN_ON(dev, intel_encoder_is_tc(intel_encoder)); 6853 type = DRM_MODE_CONNECTOR_eDP; 6854 intel_encoder->type = INTEL_OUTPUT_EDP; 6855 6856 /* eDP only on port B and/or C on vlv/chv */ 6857 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 6858 IS_CHERRYVIEW(dev_priv)) && 6859 port != PORT_B && port != PORT_C)) 6860 return false; 6861 } else { 6862 type = DRM_MODE_CONNECTOR_DisplayPort; 6863 } 6864 6865 intel_dp_set_default_sink_rates(intel_dp); 6866 intel_dp_set_default_max_sink_lane_count(intel_dp); 6867 6868 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 6869 intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); 6870 6871 intel_dp_aux_init(intel_dp); 6872 intel_connector->dp.dsc_decompression_aux = &intel_dp->aux; 6873 6874 drm_dbg_kms(&dev_priv->drm, 6875 "Adding %s connector on [ENCODER:%d:%s]\n", 6876 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 6877 intel_encoder->base.base.id, intel_encoder->base.name); 6878 6879 drm_connector_init_with_ddc(dev, connector, &intel_dp_connector_funcs, 6880 type, &intel_dp->aux.ddc); 6881 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 6882 6883 if (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) < 12) 6884 connector->interlace_allowed = true; 6885 6886 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 6887 intel_connector->base.polled = intel_connector->polled; 6888 6889 intel_connector_attach_encoder(intel_connector, intel_encoder); 6890 6891 if (HAS_DDI(dev_priv)) 6892 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 6893 else 6894 intel_connector->get_hw_state = intel_connector_get_hw_state; 6895 intel_connector->sync_state = intel_dp_connector_sync_state; 6896 6897 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 6898 intel_dp_aux_fini(intel_dp); 6899 goto fail; 6900 } 6901 6902 intel_dp_set_source_rates(intel_dp); 6903 intel_dp_set_common_rates(intel_dp); 6904 intel_dp_reset_link_params(intel_dp); 6905 6906 /* init MST on ports that can support it */ 6907 intel_dp_mst_encoder_init(dig_port, 6908 intel_connector->base.base.id); 6909 6910 intel_dp_add_properties(intel_dp, connector); 6911 6912 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 6913 int ret = intel_dp_hdcp_init(dig_port, intel_connector); 6914 if (ret) 6915 drm_dbg_kms(&dev_priv->drm, 6916 "HDCP init failed, skipping.\n"); 6917 } 6918 6919 intel_dp->colorimetry_support = 6920 intel_dp_get_colorimetry_status(intel_dp); 6921 6922 intel_dp->frl.is_trained = false; 6923 intel_dp->frl.trained_rate_gbps = 0; 6924 6925 intel_psr_init(intel_dp); 6926 6927 return true; 6928 6929 fail: 6930 intel_display_power_flush_work(dev_priv); 6931 drm_connector_cleanup(connector); 6932 6933 return false; 6934 } 6935 6936 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 6937 { 6938 struct intel_encoder *encoder; 6939 6940 if (!HAS_DISPLAY(dev_priv)) 6941 return; 6942 6943 for_each_intel_encoder(&dev_priv->drm, encoder) { 6944 struct intel_dp *intel_dp; 6945 6946 if (encoder->type != INTEL_OUTPUT_DDI) 6947 continue; 6948 6949 intel_dp = enc_to_intel_dp(encoder); 6950 6951 if (!intel_dp_mst_source_support(intel_dp)) 6952 continue; 6953 6954 if (intel_dp->is_mst) 6955 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 6956 } 6957 } 6958 6959 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 6960 { 6961 struct intel_encoder *encoder; 6962 6963 if (!HAS_DISPLAY(dev_priv)) 6964 return; 6965 6966 for_each_intel_encoder(&dev_priv->drm, encoder) { 6967 struct intel_dp *intel_dp; 6968 int ret; 6969 6970 if (encoder->type != INTEL_OUTPUT_DDI) 6971 continue; 6972 6973 intel_dp = enc_to_intel_dp(encoder); 6974 6975 if (!intel_dp_mst_source_support(intel_dp)) 6976 continue; 6977 6978 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 6979 true); 6980 if (ret) { 6981 intel_dp->is_mst = false; 6982 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 6983 false); 6984 } 6985 } 6986 } 6987