1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/notifier.h> 31 #include <linux/slab.h> 32 #include <linux/string_helpers.h> 33 #include <linux/timekeeping.h> 34 #include <linux/types.h> 35 36 #include <asm/byteorder.h> 37 38 #include <drm/display/drm_dp_helper.h> 39 #include <drm/display/drm_dp_tunnel.h> 40 #include <drm/display/drm_dsc_helper.h> 41 #include <drm/display/drm_hdmi_helper.h> 42 #include <drm/drm_atomic_helper.h> 43 #include <drm/drm_crtc.h> 44 #include <drm/drm_edid.h> 45 #include <drm/drm_probe_helper.h> 46 47 #include "g4x_dp.h" 48 #include "i915_drv.h" 49 #include "i915_irq.h" 50 #include "i915_reg.h" 51 #include "intel_alpm.h" 52 #include "intel_atomic.h" 53 #include "intel_audio.h" 54 #include "intel_backlight.h" 55 #include "intel_combo_phy_regs.h" 56 #include "intel_connector.h" 57 #include "intel_crtc.h" 58 #include "intel_cx0_phy.h" 59 #include "intel_ddi.h" 60 #include "intel_de.h" 61 #include "intel_display_driver.h" 62 #include "intel_display_types.h" 63 #include "intel_dp.h" 64 #include "intel_dp_aux.h" 65 #include "intel_dp_hdcp.h" 66 #include "intel_dp_link_training.h" 67 #include "intel_dp_mst.h" 68 #include "intel_dp_tunnel.h" 69 #include "intel_dpio_phy.h" 70 #include "intel_dpll.h" 71 #include "intel_drrs.h" 72 #include "intel_encoder.h" 73 #include "intel_fifo_underrun.h" 74 #include "intel_hdcp.h" 75 #include "intel_hdmi.h" 76 #include "intel_hotplug.h" 77 #include "intel_hotplug_irq.h" 78 #include "intel_lspcon.h" 79 #include "intel_lvds.h" 80 #include "intel_modeset_lock.h" 81 #include "intel_panel.h" 82 #include "intel_pch_display.h" 83 #include "intel_pps.h" 84 #include "intel_psr.h" 85 #include "intel_tc.h" 86 #include "intel_vdsc.h" 87 #include "intel_vrr.h" 88 #include "intel_crtc_state_dump.h" 89 90 /* DP DSC throughput values used for slice count calculations KPixels/s */ 91 #define DP_DSC_PEAK_PIXEL_RATE 2720000 92 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 93 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 94 95 /* Max DSC line buffer depth supported by HW. */ 96 #define INTEL_DP_DSC_MAX_LINE_BUF_DEPTH 13 97 98 /* DP DSC FEC Overhead factor in ppm = 1/(0.972261) = 1.028530 */ 99 #define DP_DSC_FEC_OVERHEAD_FACTOR 1028530 100 101 /* Compliance test status bits */ 102 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 103 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 104 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 105 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 106 107 108 /* Constants for DP DSC configurations */ 109 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 110 111 /* With Single pipe configuration, HW is capable of supporting maximum 112 * of 4 slices per line. 113 */ 114 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 115 116 /** 117 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 118 * @intel_dp: DP struct 119 * 120 * If a CPU or PCH DP output is attached to an eDP panel, this function 121 * will return true, and false otherwise. 122 * 123 * This function is not safe to use prior to encoder type being set. 124 */ 125 bool intel_dp_is_edp(struct intel_dp *intel_dp) 126 { 127 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 128 129 return dig_port->base.type == INTEL_OUTPUT_EDP; 130 } 131 132 bool intel_dp_as_sdp_supported(struct intel_dp *intel_dp) 133 { 134 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 135 136 return HAS_AS_SDP(i915) && 137 drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd); 138 } 139 140 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 141 142 /* Is link rate UHBR and thus 128b/132b? */ 143 bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state) 144 { 145 return drm_dp_is_uhbr_rate(crtc_state->port_clock); 146 } 147 148 /** 149 * intel_dp_link_symbol_size - get the link symbol size for a given link rate 150 * @rate: link rate in 10kbit/s units 151 * 152 * Returns the link symbol size in bits/symbol units depending on the link 153 * rate -> channel coding. 154 */ 155 int intel_dp_link_symbol_size(int rate) 156 { 157 return drm_dp_is_uhbr_rate(rate) ? 32 : 10; 158 } 159 160 /** 161 * intel_dp_link_symbol_clock - convert link rate to link symbol clock 162 * @rate: link rate in 10kbit/s units 163 * 164 * Returns the link symbol clock frequency in kHz units depending on the 165 * link rate and channel coding. 166 */ 167 int intel_dp_link_symbol_clock(int rate) 168 { 169 return DIV_ROUND_CLOSEST(rate * 10, intel_dp_link_symbol_size(rate)); 170 } 171 172 static int max_dprx_rate(struct intel_dp *intel_dp) 173 { 174 if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) 175 return drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel); 176 177 return drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 178 } 179 180 static int max_dprx_lane_count(struct intel_dp *intel_dp) 181 { 182 if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) 183 return drm_dp_tunnel_max_dprx_lane_count(intel_dp->tunnel); 184 185 return drm_dp_max_lane_count(intel_dp->dpcd); 186 } 187 188 static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp) 189 { 190 intel_dp->sink_rates[0] = 162000; 191 intel_dp->num_sink_rates = 1; 192 } 193 194 /* update sink rates from dpcd */ 195 static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp) 196 { 197 static const int dp_rates[] = { 198 162000, 270000, 540000, 810000 199 }; 200 int i, max_rate; 201 int max_lttpr_rate; 202 203 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { 204 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ 205 static const int quirk_rates[] = { 162000, 270000, 324000 }; 206 207 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); 208 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); 209 210 return; 211 } 212 213 /* 214 * Sink rates for 8b/10b. 215 */ 216 max_rate = max_dprx_rate(intel_dp); 217 max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps); 218 if (max_lttpr_rate) 219 max_rate = min(max_rate, max_lttpr_rate); 220 221 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 222 if (dp_rates[i] > max_rate) 223 break; 224 intel_dp->sink_rates[i] = dp_rates[i]; 225 } 226 227 /* 228 * Sink rates for 128b/132b. If set, sink should support all 8b/10b 229 * rates and 10 Gbps. 230 */ 231 if (drm_dp_128b132b_supported(intel_dp->dpcd)) { 232 u8 uhbr_rates = 0; 233 234 BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3); 235 236 drm_dp_dpcd_readb(&intel_dp->aux, 237 DP_128B132B_SUPPORTED_LINK_RATES, &uhbr_rates); 238 239 if (drm_dp_lttpr_count(intel_dp->lttpr_common_caps)) { 240 /* We have a repeater */ 241 if (intel_dp->lttpr_common_caps[0] >= 0x20 && 242 intel_dp->lttpr_common_caps[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - 243 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] & 244 DP_PHY_REPEATER_128B132B_SUPPORTED) { 245 /* Repeater supports 128b/132b, valid UHBR rates */ 246 uhbr_rates &= intel_dp->lttpr_common_caps[DP_PHY_REPEATER_128B132B_RATES - 247 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; 248 } else { 249 /* Does not support 128b/132b */ 250 uhbr_rates = 0; 251 } 252 } 253 254 if (uhbr_rates & DP_UHBR10) 255 intel_dp->sink_rates[i++] = 1000000; 256 if (uhbr_rates & DP_UHBR13_5) 257 intel_dp->sink_rates[i++] = 1350000; 258 if (uhbr_rates & DP_UHBR20) 259 intel_dp->sink_rates[i++] = 2000000; 260 } 261 262 intel_dp->num_sink_rates = i; 263 } 264 265 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 266 { 267 struct intel_connector *connector = intel_dp->attached_connector; 268 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 269 struct intel_encoder *encoder = &intel_dig_port->base; 270 271 intel_dp_set_dpcd_sink_rates(intel_dp); 272 273 if (intel_dp->num_sink_rates) 274 return; 275 276 drm_err(&dp_to_i915(intel_dp)->drm, 277 "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD with no link rates, using defaults\n", 278 connector->base.base.id, connector->base.name, 279 encoder->base.base.id, encoder->base.name); 280 281 intel_dp_set_default_sink_rates(intel_dp); 282 } 283 284 static void intel_dp_set_default_max_sink_lane_count(struct intel_dp *intel_dp) 285 { 286 intel_dp->max_sink_lane_count = 1; 287 } 288 289 static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp) 290 { 291 struct intel_connector *connector = intel_dp->attached_connector; 292 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 293 struct intel_encoder *encoder = &intel_dig_port->base; 294 295 intel_dp->max_sink_lane_count = max_dprx_lane_count(intel_dp); 296 297 switch (intel_dp->max_sink_lane_count) { 298 case 1: 299 case 2: 300 case 4: 301 return; 302 } 303 304 drm_err(&dp_to_i915(intel_dp)->drm, 305 "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD max lane count (%d), using default\n", 306 connector->base.base.id, connector->base.name, 307 encoder->base.base.id, encoder->base.name, 308 intel_dp->max_sink_lane_count); 309 310 intel_dp_set_default_max_sink_lane_count(intel_dp); 311 } 312 313 /* Get length of rates array potentially limited by max_rate. */ 314 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 315 { 316 int i; 317 318 /* Limit results by potentially reduced max rate */ 319 for (i = 0; i < len; i++) { 320 if (rates[len - i - 1] <= max_rate) 321 return len - i; 322 } 323 324 return 0; 325 } 326 327 /* Get length of common rates array potentially limited by max_rate. */ 328 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 329 int max_rate) 330 { 331 return intel_dp_rate_limit_len(intel_dp->common_rates, 332 intel_dp->num_common_rates, max_rate); 333 } 334 335 int intel_dp_common_rate(struct intel_dp *intel_dp, int index) 336 { 337 if (drm_WARN_ON(&dp_to_i915(intel_dp)->drm, 338 index < 0 || index >= intel_dp->num_common_rates)) 339 return 162000; 340 341 return intel_dp->common_rates[index]; 342 } 343 344 /* Theoretical max between source and sink */ 345 int intel_dp_max_common_rate(struct intel_dp *intel_dp) 346 { 347 return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1); 348 } 349 350 int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port) 351 { 352 int vbt_max_lanes = intel_bios_dp_max_lane_count(dig_port->base.devdata); 353 int max_lanes = dig_port->max_lanes; 354 355 if (vbt_max_lanes) 356 max_lanes = min(max_lanes, vbt_max_lanes); 357 358 return max_lanes; 359 } 360 361 /* Theoretical max between source and sink */ 362 int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 363 { 364 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 365 int source_max = intel_dp_max_source_lane_count(dig_port); 366 int sink_max = intel_dp->max_sink_lane_count; 367 int lane_max = intel_tc_port_max_lane_count(dig_port); 368 int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); 369 370 if (lttpr_max) 371 sink_max = min(sink_max, lttpr_max); 372 373 return min3(source_max, sink_max, lane_max); 374 } 375 376 static int forced_lane_count(struct intel_dp *intel_dp) 377 { 378 return clamp(intel_dp->link.force_lane_count, 1, intel_dp_max_common_lane_count(intel_dp)); 379 } 380 381 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 382 { 383 int lane_count; 384 385 if (intel_dp->link.force_lane_count) 386 lane_count = forced_lane_count(intel_dp); 387 else 388 lane_count = intel_dp->link.max_lane_count; 389 390 switch (lane_count) { 391 case 1: 392 case 2: 393 case 4: 394 return lane_count; 395 default: 396 MISSING_CASE(lane_count); 397 return 1; 398 } 399 } 400 401 static int intel_dp_min_lane_count(struct intel_dp *intel_dp) 402 { 403 if (intel_dp->link.force_lane_count) 404 return forced_lane_count(intel_dp); 405 406 return 1; 407 } 408 409 /* 410 * The required data bandwidth for a mode with given pixel clock and bpp. This 411 * is the required net bandwidth independent of the data bandwidth efficiency. 412 * 413 * TODO: check if callers of this functions should use 414 * intel_dp_effective_data_rate() instead. 415 */ 416 int 417 intel_dp_link_required(int pixel_clock, int bpp) 418 { 419 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 420 return DIV_ROUND_UP(pixel_clock * bpp, 8); 421 } 422 423 /** 424 * intel_dp_effective_data_rate - Return the pixel data rate accounting for BW allocation overhead 425 * @pixel_clock: pixel clock in kHz 426 * @bpp_x16: bits per pixel .4 fixed point format 427 * @bw_overhead: BW allocation overhead in 1ppm units 428 * 429 * Return the effective pixel data rate in kB/sec units taking into account 430 * the provided SSC, FEC, DSC BW allocation overhead. 431 */ 432 int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16, 433 int bw_overhead) 434 { 435 return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_clock * bpp_x16, bw_overhead), 436 1000000 * 16 * 8); 437 } 438 439 /** 440 * intel_dp_max_link_data_rate: Calculate the maximum rate for the given link params 441 * @intel_dp: Intel DP object 442 * @max_dprx_rate: Maximum data rate of the DPRX 443 * @max_dprx_lanes: Maximum lane count of the DPRX 444 * 445 * Calculate the maximum data rate for the provided link parameters taking into 446 * account any BW limitations by a DP tunnel attached to @intel_dp. 447 * 448 * Returns the maximum data rate in kBps units. 449 */ 450 int intel_dp_max_link_data_rate(struct intel_dp *intel_dp, 451 int max_dprx_rate, int max_dprx_lanes) 452 { 453 int max_rate = drm_dp_max_dprx_data_rate(max_dprx_rate, max_dprx_lanes); 454 455 if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) 456 max_rate = min(max_rate, 457 drm_dp_tunnel_available_bw(intel_dp->tunnel)); 458 459 return max_rate; 460 } 461 462 bool intel_dp_has_joiner(struct intel_dp *intel_dp) 463 { 464 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 465 struct intel_encoder *encoder = &intel_dig_port->base; 466 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 467 468 /* eDP MSO is not compatible with joiner */ 469 if (intel_dp->mso_link_count) 470 return false; 471 472 return DISPLAY_VER(dev_priv) >= 12 || 473 (DISPLAY_VER(dev_priv) == 11 && 474 encoder->port != PORT_A); 475 } 476 477 static int dg2_max_source_rate(struct intel_dp *intel_dp) 478 { 479 return intel_dp_is_edp(intel_dp) ? 810000 : 1350000; 480 } 481 482 static int icl_max_source_rate(struct intel_dp *intel_dp) 483 { 484 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 485 486 if (intel_encoder_is_combo(encoder) && !intel_dp_is_edp(intel_dp)) 487 return 540000; 488 489 return 810000; 490 } 491 492 static int ehl_max_source_rate(struct intel_dp *intel_dp) 493 { 494 if (intel_dp_is_edp(intel_dp)) 495 return 540000; 496 497 return 810000; 498 } 499 500 static int mtl_max_source_rate(struct intel_dp *intel_dp) 501 { 502 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 503 504 if (intel_encoder_is_c10phy(encoder)) 505 return 810000; 506 507 if (DISPLAY_VER_FULL(to_i915(encoder->base.dev)) == IP_VER(14, 1)) 508 return 1350000; 509 510 return 2000000; 511 } 512 513 static int vbt_max_link_rate(struct intel_dp *intel_dp) 514 { 515 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 516 int max_rate; 517 518 max_rate = intel_bios_dp_max_link_rate(encoder->devdata); 519 520 if (intel_dp_is_edp(intel_dp)) { 521 struct intel_connector *connector = intel_dp->attached_connector; 522 int edp_max_rate = connector->panel.vbt.edp.max_link_rate; 523 524 if (max_rate && edp_max_rate) 525 max_rate = min(max_rate, edp_max_rate); 526 else if (edp_max_rate) 527 max_rate = edp_max_rate; 528 } 529 530 return max_rate; 531 } 532 533 static void 534 intel_dp_set_source_rates(struct intel_dp *intel_dp) 535 { 536 /* The values must be in increasing order */ 537 static const int mtl_rates[] = { 538 162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000, 539 810000, 1000000, 2000000, 540 }; 541 static const int icl_rates[] = { 542 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000, 543 1000000, 1350000, 544 }; 545 static const int bxt_rates[] = { 546 162000, 216000, 243000, 270000, 324000, 432000, 540000 547 }; 548 static const int skl_rates[] = { 549 162000, 216000, 270000, 324000, 432000, 540000 550 }; 551 static const int hsw_rates[] = { 552 162000, 270000, 540000 553 }; 554 static const int g4x_rates[] = { 555 162000, 270000 556 }; 557 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 558 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 559 const int *source_rates; 560 int size, max_rate = 0, vbt_max_rate; 561 562 /* This should only be done once */ 563 drm_WARN_ON(&dev_priv->drm, 564 intel_dp->source_rates || intel_dp->num_source_rates); 565 566 if (DISPLAY_VER(dev_priv) >= 14) { 567 source_rates = mtl_rates; 568 size = ARRAY_SIZE(mtl_rates); 569 max_rate = mtl_max_source_rate(intel_dp); 570 } else if (DISPLAY_VER(dev_priv) >= 11) { 571 source_rates = icl_rates; 572 size = ARRAY_SIZE(icl_rates); 573 if (IS_DG2(dev_priv)) 574 max_rate = dg2_max_source_rate(intel_dp); 575 else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) || 576 IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) 577 max_rate = 810000; 578 else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) 579 max_rate = ehl_max_source_rate(intel_dp); 580 else 581 max_rate = icl_max_source_rate(intel_dp); 582 } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 583 source_rates = bxt_rates; 584 size = ARRAY_SIZE(bxt_rates); 585 } else if (DISPLAY_VER(dev_priv) == 9) { 586 source_rates = skl_rates; 587 size = ARRAY_SIZE(skl_rates); 588 } else if ((IS_HASWELL(dev_priv) && !IS_HASWELL_ULX(dev_priv)) || 589 IS_BROADWELL(dev_priv)) { 590 source_rates = hsw_rates; 591 size = ARRAY_SIZE(hsw_rates); 592 } else { 593 source_rates = g4x_rates; 594 size = ARRAY_SIZE(g4x_rates); 595 } 596 597 vbt_max_rate = vbt_max_link_rate(intel_dp); 598 if (max_rate && vbt_max_rate) 599 max_rate = min(max_rate, vbt_max_rate); 600 else if (vbt_max_rate) 601 max_rate = vbt_max_rate; 602 603 if (max_rate) 604 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 605 606 intel_dp->source_rates = source_rates; 607 intel_dp->num_source_rates = size; 608 } 609 610 static int intersect_rates(const int *source_rates, int source_len, 611 const int *sink_rates, int sink_len, 612 int *common_rates) 613 { 614 int i = 0, j = 0, k = 0; 615 616 while (i < source_len && j < sink_len) { 617 if (source_rates[i] == sink_rates[j]) { 618 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 619 return k; 620 common_rates[k] = source_rates[i]; 621 ++k; 622 ++i; 623 ++j; 624 } else if (source_rates[i] < sink_rates[j]) { 625 ++i; 626 } else { 627 ++j; 628 } 629 } 630 return k; 631 } 632 633 /* return index of rate in rates array, or -1 if not found */ 634 int intel_dp_rate_index(const int *rates, int len, int rate) 635 { 636 int i; 637 638 for (i = 0; i < len; i++) 639 if (rate == rates[i]) 640 return i; 641 642 return -1; 643 } 644 645 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 646 { 647 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 648 649 drm_WARN_ON(&i915->drm, 650 !intel_dp->num_source_rates || !intel_dp->num_sink_rates); 651 652 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 653 intel_dp->num_source_rates, 654 intel_dp->sink_rates, 655 intel_dp->num_sink_rates, 656 intel_dp->common_rates); 657 658 /* Paranoia, there should always be something in common. */ 659 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) { 660 intel_dp->common_rates[0] = 162000; 661 intel_dp->num_common_rates = 1; 662 } 663 } 664 665 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 666 u8 lane_count) 667 { 668 /* 669 * FIXME: we need to synchronize the current link parameters with 670 * hardware readout. Currently fast link training doesn't work on 671 * boot-up. 672 */ 673 if (link_rate == 0 || 674 link_rate > intel_dp->link.max_rate) 675 return false; 676 677 if (lane_count == 0 || 678 lane_count > intel_dp_max_lane_count(intel_dp)) 679 return false; 680 681 return true; 682 } 683 684 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 685 { 686 return div_u64(mul_u32_u32(mode_clock, DP_DSC_FEC_OVERHEAD_FACTOR), 687 1000000U); 688 } 689 690 int intel_dp_bw_fec_overhead(bool fec_enabled) 691 { 692 /* 693 * TODO: Calculate the actual overhead for a given mode. 694 * The hard-coded 1/0.972261=2.853% overhead factor 695 * corresponds (for instance) to the 8b/10b DP FEC 2.4% + 696 * 0.453% DSC overhead. This is enough for a 3840 width mode, 697 * which has a DSC overhead of up to ~0.2%, but may not be 698 * enough for a 1024 width mode where this is ~0.8% (on a 4 699 * lane DP link, with 2 DSC slices and 8 bpp color depth). 700 */ 701 return fec_enabled ? DP_DSC_FEC_OVERHEAD_FACTOR : 1000000; 702 } 703 704 static int 705 small_joiner_ram_size_bits(struct drm_i915_private *i915) 706 { 707 if (DISPLAY_VER(i915) >= 13) 708 return 17280 * 8; 709 else if (DISPLAY_VER(i915) >= 11) 710 return 7680 * 8; 711 else 712 return 6144 * 8; 713 } 714 715 u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp) 716 { 717 u32 bits_per_pixel = bpp; 718 int i; 719 720 /* Error out if the max bpp is less than smallest allowed valid bpp */ 721 if (bits_per_pixel < valid_dsc_bpp[0]) { 722 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 723 bits_per_pixel, valid_dsc_bpp[0]); 724 return 0; 725 } 726 727 /* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */ 728 if (DISPLAY_VER(i915) >= 13) { 729 bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1); 730 731 /* 732 * According to BSpec, 27 is the max DSC output bpp, 733 * 8 is the min DSC output bpp. 734 * While we can still clamp higher bpp values to 27, saving bandwidth, 735 * if it is required to oompress up to bpp < 8, means we can't do 736 * that and probably means we can't fit the required mode, even with 737 * DSC enabled. 738 */ 739 if (bits_per_pixel < 8) { 740 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min 8\n", 741 bits_per_pixel); 742 return 0; 743 } 744 bits_per_pixel = min_t(u32, bits_per_pixel, 27); 745 } else { 746 /* Find the nearest match in the array of known BPPs from VESA */ 747 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 748 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 749 break; 750 } 751 drm_dbg_kms(&i915->drm, "Set dsc bpp from %d to VESA %d\n", 752 bits_per_pixel, valid_dsc_bpp[i]); 753 754 bits_per_pixel = valid_dsc_bpp[i]; 755 } 756 757 return bits_per_pixel; 758 } 759 760 static 761 u32 get_max_compressed_bpp_with_joiner(struct drm_i915_private *i915, 762 u32 mode_clock, u32 mode_hdisplay, 763 bool bigjoiner) 764 { 765 u32 max_bpp_small_joiner_ram; 766 767 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 768 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / mode_hdisplay; 769 770 if (bigjoiner) { 771 int bigjoiner_interface_bits = DISPLAY_VER(i915) >= 14 ? 36 : 24; 772 /* With bigjoiner multiple dsc engines are used in parallel so PPC is 2 */ 773 int ppc = 2; 774 u32 max_bpp_bigjoiner = 775 i915->display.cdclk.max_cdclk_freq * ppc * bigjoiner_interface_bits / 776 intel_dp_mode_to_fec_clock(mode_clock); 777 778 max_bpp_small_joiner_ram *= 2; 779 780 return min(max_bpp_small_joiner_ram, max_bpp_bigjoiner); 781 } 782 783 return max_bpp_small_joiner_ram; 784 } 785 786 u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915, 787 u32 link_clock, u32 lane_count, 788 u32 mode_clock, u32 mode_hdisplay, 789 bool bigjoiner, 790 enum intel_output_format output_format, 791 u32 pipe_bpp, 792 u32 timeslots) 793 { 794 u32 bits_per_pixel, joiner_max_bpp; 795 796 /* 797 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 798 * (LinkSymbolClock)* 8 * (TimeSlots / 64) 799 * for SST -> TimeSlots is 64(i.e all TimeSlots that are available) 800 * for MST -> TimeSlots has to be calculated, based on mode requirements 801 * 802 * Due to FEC overhead, the available bw is reduced to 97.2261%. 803 * To support the given mode: 804 * Bandwidth required should be <= Available link Bandwidth * FEC Overhead 805 * =>ModeClock * bits_per_pixel <= Available Link Bandwidth * FEC Overhead 806 * =>bits_per_pixel <= Available link Bandwidth * FEC Overhead / ModeClock 807 * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock) * 8 (TimeSlots / 64) / 808 * (ModeClock / FEC Overhead) 809 * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock * TimeSlots) / 810 * (ModeClock / FEC Overhead * 8) 811 */ 812 bits_per_pixel = ((link_clock * lane_count) * timeslots) / 813 (intel_dp_mode_to_fec_clock(mode_clock) * 8); 814 815 /* Bandwidth required for 420 is half, that of 444 format */ 816 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 817 bits_per_pixel *= 2; 818 819 /* 820 * According to DSC 1.2a Section 4.1.1 Table 4.1 the maximum 821 * supported PPS value can be 63.9375 and with the further 822 * mention that for 420, 422 formats, bpp should be programmed double 823 * the target bpp restricting our target bpp to be 31.9375 at max. 824 */ 825 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 826 bits_per_pixel = min_t(u32, bits_per_pixel, 31); 827 828 drm_dbg_kms(&i915->drm, "Max link bpp is %u for %u timeslots " 829 "total bw %u pixel clock %u\n", 830 bits_per_pixel, timeslots, 831 (link_clock * lane_count * 8), 832 intel_dp_mode_to_fec_clock(mode_clock)); 833 834 joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, mode_clock, 835 mode_hdisplay, bigjoiner); 836 bits_per_pixel = min(bits_per_pixel, joiner_max_bpp); 837 838 bits_per_pixel = intel_dp_dsc_nearest_valid_bpp(i915, bits_per_pixel, pipe_bpp); 839 840 return bits_per_pixel; 841 } 842 843 u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector, 844 int mode_clock, int mode_hdisplay, 845 bool bigjoiner) 846 { 847 struct drm_i915_private *i915 = to_i915(connector->base.dev); 848 u8 min_slice_count, i; 849 int max_slice_width; 850 851 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 852 min_slice_count = DIV_ROUND_UP(mode_clock, 853 DP_DSC_MAX_ENC_THROUGHPUT_0); 854 else 855 min_slice_count = DIV_ROUND_UP(mode_clock, 856 DP_DSC_MAX_ENC_THROUGHPUT_1); 857 858 /* 859 * Due to some DSC engine BW limitations, we need to enable second 860 * slice and VDSC engine, whenever we approach close enough to max CDCLK 861 */ 862 if (mode_clock >= ((i915->display.cdclk.max_cdclk_freq * 85) / 100)) 863 min_slice_count = max_t(u8, min_slice_count, 2); 864 865 max_slice_width = drm_dp_dsc_sink_max_slice_width(connector->dp.dsc_dpcd); 866 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 867 drm_dbg_kms(&i915->drm, 868 "Unsupported slice width %d by DP DSC Sink device\n", 869 max_slice_width); 870 return 0; 871 } 872 /* Also take into account max slice width */ 873 min_slice_count = max_t(u8, min_slice_count, 874 DIV_ROUND_UP(mode_hdisplay, 875 max_slice_width)); 876 877 /* Find the closest match to the valid slice count values */ 878 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 879 u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner; 880 881 if (test_slice_count > 882 drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd, false)) 883 break; 884 885 /* big joiner needs small joiner to be enabled */ 886 if (bigjoiner && test_slice_count < 4) 887 continue; 888 889 if (min_slice_count <= test_slice_count) 890 return test_slice_count; 891 } 892 893 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", 894 min_slice_count); 895 return 0; 896 } 897 898 static bool source_can_output(struct intel_dp *intel_dp, 899 enum intel_output_format format) 900 { 901 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 902 903 switch (format) { 904 case INTEL_OUTPUT_FORMAT_RGB: 905 return true; 906 907 case INTEL_OUTPUT_FORMAT_YCBCR444: 908 /* 909 * No YCbCr output support on gmch platforms. 910 * Also, ILK doesn't seem capable of DP YCbCr output. 911 * The displayed image is severly corrupted. SNB+ is fine. 912 */ 913 return !HAS_GMCH(i915) && !IS_IRONLAKE(i915); 914 915 case INTEL_OUTPUT_FORMAT_YCBCR420: 916 /* Platform < Gen 11 cannot output YCbCr420 format */ 917 return DISPLAY_VER(i915) >= 11; 918 919 default: 920 MISSING_CASE(format); 921 return false; 922 } 923 } 924 925 static bool 926 dfp_can_convert_from_rgb(struct intel_dp *intel_dp, 927 enum intel_output_format sink_format) 928 { 929 if (!drm_dp_is_branch(intel_dp->dpcd)) 930 return false; 931 932 if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444) 933 return intel_dp->dfp.rgb_to_ycbcr; 934 935 if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) 936 return intel_dp->dfp.rgb_to_ycbcr && 937 intel_dp->dfp.ycbcr_444_to_420; 938 939 return false; 940 } 941 942 static bool 943 dfp_can_convert_from_ycbcr444(struct intel_dp *intel_dp, 944 enum intel_output_format sink_format) 945 { 946 if (!drm_dp_is_branch(intel_dp->dpcd)) 947 return false; 948 949 if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) 950 return intel_dp->dfp.ycbcr_444_to_420; 951 952 return false; 953 } 954 955 static bool 956 dfp_can_convert(struct intel_dp *intel_dp, 957 enum intel_output_format output_format, 958 enum intel_output_format sink_format) 959 { 960 switch (output_format) { 961 case INTEL_OUTPUT_FORMAT_RGB: 962 return dfp_can_convert_from_rgb(intel_dp, sink_format); 963 case INTEL_OUTPUT_FORMAT_YCBCR444: 964 return dfp_can_convert_from_ycbcr444(intel_dp, sink_format); 965 default: 966 MISSING_CASE(output_format); 967 return false; 968 } 969 970 return false; 971 } 972 973 static enum intel_output_format 974 intel_dp_output_format(struct intel_connector *connector, 975 enum intel_output_format sink_format) 976 { 977 struct intel_dp *intel_dp = intel_attached_dp(connector); 978 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 979 enum intel_output_format force_dsc_output_format = 980 intel_dp->force_dsc_output_format; 981 enum intel_output_format output_format; 982 if (force_dsc_output_format) { 983 if (source_can_output(intel_dp, force_dsc_output_format) && 984 (!drm_dp_is_branch(intel_dp->dpcd) || 985 sink_format != force_dsc_output_format || 986 dfp_can_convert(intel_dp, force_dsc_output_format, sink_format))) 987 return force_dsc_output_format; 988 989 drm_dbg_kms(&i915->drm, "Cannot force DSC output format\n"); 990 } 991 992 if (sink_format == INTEL_OUTPUT_FORMAT_RGB || 993 dfp_can_convert_from_rgb(intel_dp, sink_format)) 994 output_format = INTEL_OUTPUT_FORMAT_RGB; 995 996 else if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444 || 997 dfp_can_convert_from_ycbcr444(intel_dp, sink_format)) 998 output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 999 1000 else 1001 output_format = INTEL_OUTPUT_FORMAT_YCBCR420; 1002 1003 drm_WARN_ON(&i915->drm, !source_can_output(intel_dp, output_format)); 1004 1005 return output_format; 1006 } 1007 1008 int intel_dp_min_bpp(enum intel_output_format output_format) 1009 { 1010 if (output_format == INTEL_OUTPUT_FORMAT_RGB) 1011 return 6 * 3; 1012 else 1013 return 8 * 3; 1014 } 1015 1016 int intel_dp_output_bpp(enum intel_output_format output_format, int bpp) 1017 { 1018 /* 1019 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 1020 * format of the number of bytes per pixel will be half the number 1021 * of bytes of RGB pixel. 1022 */ 1023 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 1024 bpp /= 2; 1025 1026 return bpp; 1027 } 1028 1029 static enum intel_output_format 1030 intel_dp_sink_format(struct intel_connector *connector, 1031 const struct drm_display_mode *mode) 1032 { 1033 const struct drm_display_info *info = &connector->base.display_info; 1034 1035 if (drm_mode_is_420_only(info, mode)) 1036 return INTEL_OUTPUT_FORMAT_YCBCR420; 1037 1038 return INTEL_OUTPUT_FORMAT_RGB; 1039 } 1040 1041 static int 1042 intel_dp_mode_min_output_bpp(struct intel_connector *connector, 1043 const struct drm_display_mode *mode) 1044 { 1045 enum intel_output_format output_format, sink_format; 1046 1047 sink_format = intel_dp_sink_format(connector, mode); 1048 1049 output_format = intel_dp_output_format(connector, sink_format); 1050 1051 return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format)); 1052 } 1053 1054 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 1055 int hdisplay) 1056 { 1057 /* 1058 * Older platforms don't like hdisplay==4096 with DP. 1059 * 1060 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 1061 * and frame counter increment), but we don't get vblank interrupts, 1062 * and the pipe underruns immediately. The link also doesn't seem 1063 * to get trained properly. 1064 * 1065 * On CHV the vblank interrupts don't seem to disappear but 1066 * otherwise the symptoms are similar. 1067 * 1068 * TODO: confirm the behaviour on HSW+ 1069 */ 1070 return hdisplay == 4096 && !HAS_DDI(dev_priv); 1071 } 1072 1073 static int intel_dp_max_tmds_clock(struct intel_dp *intel_dp) 1074 { 1075 struct intel_connector *connector = intel_dp->attached_connector; 1076 const struct drm_display_info *info = &connector->base.display_info; 1077 int max_tmds_clock = intel_dp->dfp.max_tmds_clock; 1078 1079 /* Only consider the sink's max TMDS clock if we know this is a HDMI DFP */ 1080 if (max_tmds_clock && info->max_tmds_clock) 1081 max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock); 1082 1083 return max_tmds_clock; 1084 } 1085 1086 static enum drm_mode_status 1087 intel_dp_tmds_clock_valid(struct intel_dp *intel_dp, 1088 int clock, int bpc, 1089 enum intel_output_format sink_format, 1090 bool respect_downstream_limits) 1091 { 1092 int tmds_clock, min_tmds_clock, max_tmds_clock; 1093 1094 if (!respect_downstream_limits) 1095 return MODE_OK; 1096 1097 tmds_clock = intel_hdmi_tmds_clock(clock, bpc, sink_format); 1098 1099 min_tmds_clock = intel_dp->dfp.min_tmds_clock; 1100 max_tmds_clock = intel_dp_max_tmds_clock(intel_dp); 1101 1102 if (min_tmds_clock && tmds_clock < min_tmds_clock) 1103 return MODE_CLOCK_LOW; 1104 1105 if (max_tmds_clock && tmds_clock > max_tmds_clock) 1106 return MODE_CLOCK_HIGH; 1107 1108 return MODE_OK; 1109 } 1110 1111 static enum drm_mode_status 1112 intel_dp_mode_valid_downstream(struct intel_connector *connector, 1113 const struct drm_display_mode *mode, 1114 int target_clock) 1115 { 1116 struct intel_dp *intel_dp = intel_attached_dp(connector); 1117 const struct drm_display_info *info = &connector->base.display_info; 1118 enum drm_mode_status status; 1119 enum intel_output_format sink_format; 1120 1121 /* If PCON supports FRL MODE, check FRL bandwidth constraints */ 1122 if (intel_dp->dfp.pcon_max_frl_bw) { 1123 int target_bw; 1124 int max_frl_bw; 1125 int bpp = intel_dp_mode_min_output_bpp(connector, mode); 1126 1127 target_bw = bpp * target_clock; 1128 1129 max_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 1130 1131 /* converting bw from Gbps to Kbps*/ 1132 max_frl_bw = max_frl_bw * 1000000; 1133 1134 if (target_bw > max_frl_bw) 1135 return MODE_CLOCK_HIGH; 1136 1137 return MODE_OK; 1138 } 1139 1140 if (intel_dp->dfp.max_dotclock && 1141 target_clock > intel_dp->dfp.max_dotclock) 1142 return MODE_CLOCK_HIGH; 1143 1144 sink_format = intel_dp_sink_format(connector, mode); 1145 1146 /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ 1147 status = intel_dp_tmds_clock_valid(intel_dp, target_clock, 1148 8, sink_format, true); 1149 1150 if (status != MODE_OK) { 1151 if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 1152 !connector->base.ycbcr_420_allowed || 1153 !drm_mode_is_420_also(info, mode)) 1154 return status; 1155 sink_format = INTEL_OUTPUT_FORMAT_YCBCR420; 1156 status = intel_dp_tmds_clock_valid(intel_dp, target_clock, 1157 8, sink_format, true); 1158 if (status != MODE_OK) 1159 return status; 1160 } 1161 1162 return MODE_OK; 1163 } 1164 1165 bool intel_dp_need_joiner(struct intel_dp *intel_dp, 1166 struct intel_connector *connector, 1167 int hdisplay, int clock) 1168 { 1169 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1170 1171 if (!intel_dp_has_joiner(intel_dp)) 1172 return false; 1173 1174 return clock > i915->display.cdclk.max_dotclk_freq || hdisplay > 5120 || 1175 connector->force_bigjoiner_enable; 1176 } 1177 1178 bool intel_dp_has_dsc(const struct intel_connector *connector) 1179 { 1180 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1181 1182 if (!HAS_DSC(i915)) 1183 return false; 1184 1185 if (connector->mst_port && !HAS_DSC_MST(i915)) 1186 return false; 1187 1188 if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP && 1189 connector->panel.vbt.edp.dsc_disable) 1190 return false; 1191 1192 if (!drm_dp_sink_supports_dsc(connector->dp.dsc_dpcd)) 1193 return false; 1194 1195 return true; 1196 } 1197 1198 static enum drm_mode_status 1199 intel_dp_mode_valid(struct drm_connector *_connector, 1200 struct drm_display_mode *mode) 1201 { 1202 struct intel_connector *connector = to_intel_connector(_connector); 1203 struct intel_dp *intel_dp = intel_attached_dp(connector); 1204 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1205 const struct drm_display_mode *fixed_mode; 1206 int target_clock = mode->clock; 1207 int max_rate, mode_rate, max_lanes, max_link_clock; 1208 int max_dotclk = dev_priv->display.cdclk.max_dotclk_freq; 1209 u16 dsc_max_compressed_bpp = 0; 1210 u8 dsc_slice_count = 0; 1211 enum drm_mode_status status; 1212 bool dsc = false, joiner = false; 1213 1214 status = intel_cpu_transcoder_mode_valid(dev_priv, mode); 1215 if (status != MODE_OK) 1216 return status; 1217 1218 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 1219 return MODE_H_ILLEGAL; 1220 1221 if (mode->clock < 10000) 1222 return MODE_CLOCK_LOW; 1223 1224 fixed_mode = intel_panel_fixed_mode(connector, mode); 1225 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 1226 status = intel_panel_mode_valid(connector, mode); 1227 if (status != MODE_OK) 1228 return status; 1229 1230 target_clock = fixed_mode->clock; 1231 } 1232 1233 if (intel_dp_need_joiner(intel_dp, connector, 1234 mode->hdisplay, target_clock)) { 1235 joiner = true; 1236 max_dotclk *= 2; 1237 } 1238 if (target_clock > max_dotclk) 1239 return MODE_CLOCK_HIGH; 1240 1241 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 1242 return MODE_H_ILLEGAL; 1243 1244 max_link_clock = intel_dp_max_link_rate(intel_dp); 1245 max_lanes = intel_dp_max_lane_count(intel_dp); 1246 1247 max_rate = intel_dp_max_link_data_rate(intel_dp, max_link_clock, max_lanes); 1248 1249 mode_rate = intel_dp_link_required(target_clock, 1250 intel_dp_mode_min_output_bpp(connector, mode)); 1251 1252 if (intel_dp_has_dsc(connector)) { 1253 enum intel_output_format sink_format, output_format; 1254 int pipe_bpp; 1255 1256 sink_format = intel_dp_sink_format(connector, mode); 1257 output_format = intel_dp_output_format(connector, sink_format); 1258 /* 1259 * TBD pass the connector BPC, 1260 * for now U8_MAX so that max BPC on that platform would be picked 1261 */ 1262 pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX); 1263 1264 /* 1265 * Output bpp is stored in 6.4 format so right shift by 4 to get the 1266 * integer value since we support only integer values of bpp. 1267 */ 1268 if (intel_dp_is_edp(intel_dp)) { 1269 dsc_max_compressed_bpp = 1270 drm_edp_dsc_sink_output_bpp(connector->dp.dsc_dpcd) >> 4; 1271 dsc_slice_count = 1272 drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd, 1273 true); 1274 } else if (drm_dp_sink_supports_fec(connector->dp.fec_capability)) { 1275 dsc_max_compressed_bpp = 1276 intel_dp_dsc_get_max_compressed_bpp(dev_priv, 1277 max_link_clock, 1278 max_lanes, 1279 target_clock, 1280 mode->hdisplay, 1281 joiner, 1282 output_format, 1283 pipe_bpp, 64); 1284 dsc_slice_count = 1285 intel_dp_dsc_get_slice_count(connector, 1286 target_clock, 1287 mode->hdisplay, 1288 joiner); 1289 } 1290 1291 dsc = dsc_max_compressed_bpp && dsc_slice_count; 1292 } 1293 1294 if (intel_dp_joiner_needs_dsc(dev_priv, joiner) && !dsc) 1295 return MODE_CLOCK_HIGH; 1296 1297 if (mode_rate > max_rate && !dsc) 1298 return MODE_CLOCK_HIGH; 1299 1300 status = intel_dp_mode_valid_downstream(connector, mode, target_clock); 1301 if (status != MODE_OK) 1302 return status; 1303 1304 return intel_mode_valid_max_plane_size(dev_priv, mode, joiner); 1305 } 1306 1307 bool intel_dp_source_supports_tps3(struct drm_i915_private *i915) 1308 { 1309 return DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915) || IS_HASWELL(i915); 1310 } 1311 1312 bool intel_dp_source_supports_tps4(struct drm_i915_private *i915) 1313 { 1314 return DISPLAY_VER(i915) >= 10; 1315 } 1316 1317 static void snprintf_int_array(char *str, size_t len, 1318 const int *array, int nelem) 1319 { 1320 int i; 1321 1322 str[0] = '\0'; 1323 1324 for (i = 0; i < nelem; i++) { 1325 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 1326 if (r >= len) 1327 return; 1328 str += r; 1329 len -= r; 1330 } 1331 } 1332 1333 static void intel_dp_print_rates(struct intel_dp *intel_dp) 1334 { 1335 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1336 char str[128]; /* FIXME: too big for stack? */ 1337 1338 if (!drm_debug_enabled(DRM_UT_KMS)) 1339 return; 1340 1341 snprintf_int_array(str, sizeof(str), 1342 intel_dp->source_rates, intel_dp->num_source_rates); 1343 drm_dbg_kms(&i915->drm, "source rates: %s\n", str); 1344 1345 snprintf_int_array(str, sizeof(str), 1346 intel_dp->sink_rates, intel_dp->num_sink_rates); 1347 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); 1348 1349 snprintf_int_array(str, sizeof(str), 1350 intel_dp->common_rates, intel_dp->num_common_rates); 1351 drm_dbg_kms(&i915->drm, "common rates: %s\n", str); 1352 } 1353 1354 static int forced_link_rate(struct intel_dp *intel_dp) 1355 { 1356 int len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->link.force_rate); 1357 1358 if (len == 0) 1359 return intel_dp_common_rate(intel_dp, 0); 1360 1361 return intel_dp_common_rate(intel_dp, len - 1); 1362 } 1363 1364 int 1365 intel_dp_max_link_rate(struct intel_dp *intel_dp) 1366 { 1367 int len; 1368 1369 if (intel_dp->link.force_rate) 1370 return forced_link_rate(intel_dp); 1371 1372 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->link.max_rate); 1373 1374 return intel_dp_common_rate(intel_dp, len - 1); 1375 } 1376 1377 static int 1378 intel_dp_min_link_rate(struct intel_dp *intel_dp) 1379 { 1380 if (intel_dp->link.force_rate) 1381 return forced_link_rate(intel_dp); 1382 1383 return intel_dp_common_rate(intel_dp, 0); 1384 } 1385 1386 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 1387 { 1388 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1389 int i = intel_dp_rate_index(intel_dp->sink_rates, 1390 intel_dp->num_sink_rates, rate); 1391 1392 if (drm_WARN_ON(&i915->drm, i < 0)) 1393 i = 0; 1394 1395 return i; 1396 } 1397 1398 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1399 u8 *link_bw, u8 *rate_select) 1400 { 1401 /* eDP 1.4 rate select method. */ 1402 if (intel_dp->use_rate_select) { 1403 *link_bw = 0; 1404 *rate_select = 1405 intel_dp_rate_select(intel_dp, port_clock); 1406 } else { 1407 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 1408 *rate_select = 0; 1409 } 1410 } 1411 1412 bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp) 1413 { 1414 struct intel_connector *connector = intel_dp->attached_connector; 1415 1416 return connector->base.display_info.is_hdmi; 1417 } 1418 1419 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 1420 const struct intel_crtc_state *pipe_config) 1421 { 1422 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1423 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1424 1425 if (DISPLAY_VER(dev_priv) >= 12) 1426 return true; 1427 1428 if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A && 1429 !intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) 1430 return true; 1431 1432 return false; 1433 } 1434 1435 bool intel_dp_supports_fec(struct intel_dp *intel_dp, 1436 const struct intel_connector *connector, 1437 const struct intel_crtc_state *pipe_config) 1438 { 1439 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 1440 drm_dp_sink_supports_fec(connector->dp.fec_capability); 1441 } 1442 1443 bool intel_dp_supports_dsc(const struct intel_connector *connector, 1444 const struct intel_crtc_state *crtc_state) 1445 { 1446 if (!intel_dp_has_dsc(connector)) 1447 return false; 1448 1449 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable) 1450 return false; 1451 1452 return intel_dsc_source_support(crtc_state); 1453 } 1454 1455 static int intel_dp_hdmi_compute_bpc(struct intel_dp *intel_dp, 1456 const struct intel_crtc_state *crtc_state, 1457 int bpc, bool respect_downstream_limits) 1458 { 1459 int clock = crtc_state->hw.adjusted_mode.crtc_clock; 1460 1461 /* 1462 * Current bpc could already be below 8bpc due to 1463 * FDI bandwidth constraints or other limits. 1464 * HDMI minimum is 8bpc however. 1465 */ 1466 bpc = max(bpc, 8); 1467 1468 /* 1469 * We will never exceed downstream TMDS clock limits while 1470 * attempting deep color. If the user insists on forcing an 1471 * out of spec mode they will have to be satisfied with 8bpc. 1472 */ 1473 if (!respect_downstream_limits) 1474 bpc = 8; 1475 1476 for (; bpc >= 8; bpc -= 2) { 1477 if (intel_hdmi_bpc_possible(crtc_state, bpc, 1478 intel_dp_has_hdmi_sink(intel_dp)) && 1479 intel_dp_tmds_clock_valid(intel_dp, clock, bpc, crtc_state->sink_format, 1480 respect_downstream_limits) == MODE_OK) 1481 return bpc; 1482 } 1483 1484 return -EINVAL; 1485 } 1486 1487 static int intel_dp_max_bpp(struct intel_dp *intel_dp, 1488 const struct intel_crtc_state *crtc_state, 1489 bool respect_downstream_limits) 1490 { 1491 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1492 struct intel_connector *intel_connector = intel_dp->attached_connector; 1493 int bpp, bpc; 1494 1495 bpc = crtc_state->pipe_bpp / 3; 1496 1497 if (intel_dp->dfp.max_bpc) 1498 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc); 1499 1500 if (intel_dp->dfp.min_tmds_clock) { 1501 int max_hdmi_bpc; 1502 1503 max_hdmi_bpc = intel_dp_hdmi_compute_bpc(intel_dp, crtc_state, bpc, 1504 respect_downstream_limits); 1505 if (max_hdmi_bpc < 0) 1506 return 0; 1507 1508 bpc = min(bpc, max_hdmi_bpc); 1509 } 1510 1511 bpp = bpc * 3; 1512 if (intel_dp_is_edp(intel_dp)) { 1513 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1514 if (intel_connector->base.display_info.bpc == 0 && 1515 intel_connector->panel.vbt.edp.bpp && 1516 intel_connector->panel.vbt.edp.bpp < bpp) { 1517 drm_dbg_kms(&dev_priv->drm, 1518 "clamping bpp for eDP panel to BIOS-provided %i\n", 1519 intel_connector->panel.vbt.edp.bpp); 1520 bpp = intel_connector->panel.vbt.edp.bpp; 1521 } 1522 } 1523 1524 return bpp; 1525 } 1526 1527 /* Adjust link config limits based on compliance test requests. */ 1528 void 1529 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 1530 struct intel_crtc_state *pipe_config, 1531 struct link_config_limits *limits) 1532 { 1533 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1534 1535 /* For DP Compliance we override the computed bpp for the pipe */ 1536 if (intel_dp->compliance.test_data.bpc != 0) { 1537 int bpp = 3 * intel_dp->compliance.test_data.bpc; 1538 1539 limits->pipe.min_bpp = limits->pipe.max_bpp = bpp; 1540 pipe_config->dither_force_disable = bpp == 6 * 3; 1541 1542 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); 1543 } 1544 1545 /* Use values requested by Compliance Test Request */ 1546 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 1547 int index; 1548 1549 /* Validate the compliance test data since max values 1550 * might have changed due to link train fallback. 1551 */ 1552 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 1553 intel_dp->compliance.test_lane_count)) { 1554 index = intel_dp_rate_index(intel_dp->common_rates, 1555 intel_dp->num_common_rates, 1556 intel_dp->compliance.test_link_rate); 1557 if (index >= 0) 1558 limits->min_rate = limits->max_rate = 1559 intel_dp->compliance.test_link_rate; 1560 limits->min_lane_count = limits->max_lane_count = 1561 intel_dp->compliance.test_lane_count; 1562 } 1563 } 1564 } 1565 1566 static bool has_seamless_m_n(struct intel_connector *connector) 1567 { 1568 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1569 1570 /* 1571 * Seamless M/N reprogramming only implemented 1572 * for BDW+ double buffered M/N registers so far. 1573 */ 1574 return HAS_DOUBLE_BUFFERED_M_N(i915) && 1575 intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS; 1576 } 1577 1578 static int intel_dp_mode_clock(const struct intel_crtc_state *crtc_state, 1579 const struct drm_connector_state *conn_state) 1580 { 1581 struct intel_connector *connector = to_intel_connector(conn_state->connector); 1582 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 1583 1584 /* FIXME a bit of a mess wrt clock vs. crtc_clock */ 1585 if (has_seamless_m_n(connector)) 1586 return intel_panel_highest_mode(connector, adjusted_mode)->clock; 1587 else 1588 return adjusted_mode->crtc_clock; 1589 } 1590 1591 /* Optimize link config in order: max bpp, min clock, min lanes */ 1592 static int 1593 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 1594 struct intel_crtc_state *pipe_config, 1595 const struct drm_connector_state *conn_state, 1596 const struct link_config_limits *limits) 1597 { 1598 int bpp, i, lane_count, clock = intel_dp_mode_clock(pipe_config, conn_state); 1599 int mode_rate, link_rate, link_avail; 1600 1601 for (bpp = to_bpp_int(limits->link.max_bpp_x16); 1602 bpp >= to_bpp_int(limits->link.min_bpp_x16); 1603 bpp -= 2 * 3) { 1604 int link_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); 1605 1606 mode_rate = intel_dp_link_required(clock, link_bpp); 1607 1608 for (i = 0; i < intel_dp->num_common_rates; i++) { 1609 link_rate = intel_dp_common_rate(intel_dp, i); 1610 if (link_rate < limits->min_rate || 1611 link_rate > limits->max_rate) 1612 continue; 1613 1614 for (lane_count = limits->min_lane_count; 1615 lane_count <= limits->max_lane_count; 1616 lane_count <<= 1) { 1617 link_avail = intel_dp_max_link_data_rate(intel_dp, 1618 link_rate, 1619 lane_count); 1620 1621 1622 if (mode_rate <= link_avail) { 1623 pipe_config->lane_count = lane_count; 1624 pipe_config->pipe_bpp = bpp; 1625 pipe_config->port_clock = link_rate; 1626 1627 return 0; 1628 } 1629 } 1630 } 1631 } 1632 1633 return -EINVAL; 1634 } 1635 1636 static 1637 u8 intel_dp_dsc_max_src_input_bpc(struct drm_i915_private *i915) 1638 { 1639 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 1640 if (DISPLAY_VER(i915) >= 12) 1641 return 12; 1642 if (DISPLAY_VER(i915) == 11) 1643 return 10; 1644 1645 return 0; 1646 } 1647 1648 int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector, 1649 u8 max_req_bpc) 1650 { 1651 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1652 int i, num_bpc; 1653 u8 dsc_bpc[3] = {}; 1654 u8 dsc_max_bpc; 1655 1656 dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915); 1657 1658 if (!dsc_max_bpc) 1659 return dsc_max_bpc; 1660 1661 dsc_max_bpc = min_t(u8, dsc_max_bpc, max_req_bpc); 1662 1663 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd, 1664 dsc_bpc); 1665 for (i = 0; i < num_bpc; i++) { 1666 if (dsc_max_bpc >= dsc_bpc[i]) 1667 return dsc_bpc[i] * 3; 1668 } 1669 1670 return 0; 1671 } 1672 1673 static int intel_dp_source_dsc_version_minor(struct drm_i915_private *i915) 1674 { 1675 return DISPLAY_VER(i915) >= 14 ? 2 : 1; 1676 } 1677 1678 static int intel_dp_sink_dsc_version_minor(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) 1679 { 1680 return (dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & DP_DSC_MINOR_MASK) >> 1681 DP_DSC_MINOR_SHIFT; 1682 } 1683 1684 static int intel_dp_get_slice_height(int vactive) 1685 { 1686 int slice_height; 1687 1688 /* 1689 * VDSC 1.2a spec in Section 3.8 Options for Slices implies that 108 1690 * lines is an optimal slice height, but any size can be used as long as 1691 * vertical active integer multiple and maximum vertical slice count 1692 * requirements are met. 1693 */ 1694 for (slice_height = 108; slice_height <= vactive; slice_height += 2) 1695 if (vactive % slice_height == 0) 1696 return slice_height; 1697 1698 /* 1699 * Highly unlikely we reach here as most of the resolutions will end up 1700 * finding appropriate slice_height in above loop but returning 1701 * slice_height as 2 here as it should work with all resolutions. 1702 */ 1703 return 2; 1704 } 1705 1706 static int intel_dp_dsc_compute_params(const struct intel_connector *connector, 1707 struct intel_crtc_state *crtc_state) 1708 { 1709 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1710 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 1711 int ret; 1712 1713 /* 1714 * RC_MODEL_SIZE is currently a constant across all configurations. 1715 * 1716 * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and 1717 * DP_DSC_RC_BUF_SIZE for this. 1718 */ 1719 vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; 1720 vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay; 1721 1722 vdsc_cfg->slice_height = intel_dp_get_slice_height(vdsc_cfg->pic_height); 1723 1724 ret = intel_dsc_compute_params(crtc_state); 1725 if (ret) 1726 return ret; 1727 1728 vdsc_cfg->dsc_version_major = 1729 (connector->dp.dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 1730 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 1731 vdsc_cfg->dsc_version_minor = 1732 min(intel_dp_source_dsc_version_minor(i915), 1733 intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd)); 1734 if (vdsc_cfg->convert_rgb) 1735 vdsc_cfg->convert_rgb = 1736 connector->dp.dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 1737 DP_DSC_RGB; 1738 1739 vdsc_cfg->line_buf_depth = min(INTEL_DP_DSC_MAX_LINE_BUF_DEPTH, 1740 drm_dp_dsc_sink_line_buf_depth(connector->dp.dsc_dpcd)); 1741 if (!vdsc_cfg->line_buf_depth) { 1742 drm_dbg_kms(&i915->drm, 1743 "DSC Sink Line Buffer Depth invalid\n"); 1744 return -EINVAL; 1745 } 1746 1747 vdsc_cfg->block_pred_enable = 1748 connector->dp.dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 1749 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 1750 1751 return drm_dsc_compute_rc_parameters(vdsc_cfg); 1752 } 1753 1754 static bool intel_dp_dsc_supports_format(const struct intel_connector *connector, 1755 enum intel_output_format output_format) 1756 { 1757 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1758 u8 sink_dsc_format; 1759 1760 switch (output_format) { 1761 case INTEL_OUTPUT_FORMAT_RGB: 1762 sink_dsc_format = DP_DSC_RGB; 1763 break; 1764 case INTEL_OUTPUT_FORMAT_YCBCR444: 1765 sink_dsc_format = DP_DSC_YCbCr444; 1766 break; 1767 case INTEL_OUTPUT_FORMAT_YCBCR420: 1768 if (min(intel_dp_source_dsc_version_minor(i915), 1769 intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd)) < 2) 1770 return false; 1771 sink_dsc_format = DP_DSC_YCbCr420_Native; 1772 break; 1773 default: 1774 return false; 1775 } 1776 1777 return drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd, sink_dsc_format); 1778 } 1779 1780 static bool is_bw_sufficient_for_dsc_config(u16 compressed_bppx16, u32 link_clock, 1781 u32 lane_count, u32 mode_clock, 1782 enum intel_output_format output_format, 1783 int timeslots) 1784 { 1785 u32 available_bw, required_bw; 1786 1787 available_bw = (link_clock * lane_count * timeslots * 16) / 8; 1788 required_bw = compressed_bppx16 * (intel_dp_mode_to_fec_clock(mode_clock)); 1789 1790 return available_bw > required_bw; 1791 } 1792 1793 static int dsc_compute_link_config(struct intel_dp *intel_dp, 1794 struct intel_crtc_state *pipe_config, 1795 struct link_config_limits *limits, 1796 u16 compressed_bppx16, 1797 int timeslots) 1798 { 1799 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 1800 int link_rate, lane_count; 1801 int i; 1802 1803 for (i = 0; i < intel_dp->num_common_rates; i++) { 1804 link_rate = intel_dp_common_rate(intel_dp, i); 1805 if (link_rate < limits->min_rate || link_rate > limits->max_rate) 1806 continue; 1807 1808 for (lane_count = limits->min_lane_count; 1809 lane_count <= limits->max_lane_count; 1810 lane_count <<= 1) { 1811 if (!is_bw_sufficient_for_dsc_config(compressed_bppx16, link_rate, 1812 lane_count, adjusted_mode->clock, 1813 pipe_config->output_format, 1814 timeslots)) 1815 continue; 1816 1817 pipe_config->lane_count = lane_count; 1818 pipe_config->port_clock = link_rate; 1819 1820 return 0; 1821 } 1822 } 1823 1824 return -EINVAL; 1825 } 1826 1827 static 1828 u16 intel_dp_dsc_max_sink_compressed_bppx16(const struct intel_connector *connector, 1829 struct intel_crtc_state *pipe_config, 1830 int bpc) 1831 { 1832 u16 max_bppx16 = drm_edp_dsc_sink_output_bpp(connector->dp.dsc_dpcd); 1833 1834 if (max_bppx16) 1835 return max_bppx16; 1836 /* 1837 * If support not given in DPCD 67h, 68h use the Maximum Allowed bit rate 1838 * values as given in spec Table 2-157 DP v2.0 1839 */ 1840 switch (pipe_config->output_format) { 1841 case INTEL_OUTPUT_FORMAT_RGB: 1842 case INTEL_OUTPUT_FORMAT_YCBCR444: 1843 return (3 * bpc) << 4; 1844 case INTEL_OUTPUT_FORMAT_YCBCR420: 1845 return (3 * (bpc / 2)) << 4; 1846 default: 1847 MISSING_CASE(pipe_config->output_format); 1848 break; 1849 } 1850 1851 return 0; 1852 } 1853 1854 int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config) 1855 { 1856 /* From Mandatory bit rate range Support Table 2-157 (DP v2.0) */ 1857 switch (pipe_config->output_format) { 1858 case INTEL_OUTPUT_FORMAT_RGB: 1859 case INTEL_OUTPUT_FORMAT_YCBCR444: 1860 return 8; 1861 case INTEL_OUTPUT_FORMAT_YCBCR420: 1862 return 6; 1863 default: 1864 MISSING_CASE(pipe_config->output_format); 1865 break; 1866 } 1867 1868 return 0; 1869 } 1870 1871 int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector, 1872 struct intel_crtc_state *pipe_config, 1873 int bpc) 1874 { 1875 return intel_dp_dsc_max_sink_compressed_bppx16(connector, 1876 pipe_config, bpc) >> 4; 1877 } 1878 1879 static int dsc_src_min_compressed_bpp(void) 1880 { 1881 /* Min Compressed bpp supported by source is 8 */ 1882 return 8; 1883 } 1884 1885 static int dsc_src_max_compressed_bpp(struct intel_dp *intel_dp) 1886 { 1887 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1888 1889 /* 1890 * Max Compressed bpp for Gen 13+ is 27bpp. 1891 * For earlier platform is 23bpp. (Bspec:49259). 1892 */ 1893 if (DISPLAY_VER(i915) < 13) 1894 return 23; 1895 else 1896 return 27; 1897 } 1898 1899 /* 1900 * From a list of valid compressed bpps try different compressed bpp and find a 1901 * suitable link configuration that can support it. 1902 */ 1903 static int 1904 icl_dsc_compute_link_config(struct intel_dp *intel_dp, 1905 struct intel_crtc_state *pipe_config, 1906 struct link_config_limits *limits, 1907 int dsc_max_bpp, 1908 int dsc_min_bpp, 1909 int pipe_bpp, 1910 int timeslots) 1911 { 1912 int i, ret; 1913 1914 /* Compressed BPP should be less than the Input DSC bpp */ 1915 dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1); 1916 1917 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) { 1918 if (valid_dsc_bpp[i] < dsc_min_bpp) 1919 continue; 1920 if (valid_dsc_bpp[i] > dsc_max_bpp) 1921 break; 1922 1923 ret = dsc_compute_link_config(intel_dp, 1924 pipe_config, 1925 limits, 1926 valid_dsc_bpp[i] << 4, 1927 timeslots); 1928 if (ret == 0) { 1929 pipe_config->dsc.compressed_bpp_x16 = 1930 to_bpp_x16(valid_dsc_bpp[i]); 1931 return 0; 1932 } 1933 } 1934 1935 return -EINVAL; 1936 } 1937 1938 /* 1939 * From XE_LPD onwards we supports compression bpps in steps of 1 up to 1940 * uncompressed bpp-1. So we start from max compressed bpp and see if any 1941 * link configuration is able to support that compressed bpp, if not we 1942 * step down and check for lower compressed bpp. 1943 */ 1944 static int 1945 xelpd_dsc_compute_link_config(struct intel_dp *intel_dp, 1946 const struct intel_connector *connector, 1947 struct intel_crtc_state *pipe_config, 1948 struct link_config_limits *limits, 1949 int dsc_max_bpp, 1950 int dsc_min_bpp, 1951 int pipe_bpp, 1952 int timeslots) 1953 { 1954 u8 bppx16_incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd); 1955 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1956 u16 compressed_bppx16; 1957 u8 bppx16_step; 1958 int ret; 1959 1960 if (DISPLAY_VER(i915) < 14 || bppx16_incr <= 1) 1961 bppx16_step = 16; 1962 else 1963 bppx16_step = 16 / bppx16_incr; 1964 1965 /* Compressed BPP should be less than the Input DSC bpp */ 1966 dsc_max_bpp = min(dsc_max_bpp << 4, (pipe_bpp << 4) - bppx16_step); 1967 dsc_min_bpp = dsc_min_bpp << 4; 1968 1969 for (compressed_bppx16 = dsc_max_bpp; 1970 compressed_bppx16 >= dsc_min_bpp; 1971 compressed_bppx16 -= bppx16_step) { 1972 if (intel_dp->force_dsc_fractional_bpp_en && 1973 !to_bpp_frac(compressed_bppx16)) 1974 continue; 1975 ret = dsc_compute_link_config(intel_dp, 1976 pipe_config, 1977 limits, 1978 compressed_bppx16, 1979 timeslots); 1980 if (ret == 0) { 1981 pipe_config->dsc.compressed_bpp_x16 = compressed_bppx16; 1982 if (intel_dp->force_dsc_fractional_bpp_en && 1983 to_bpp_frac(compressed_bppx16)) 1984 drm_dbg_kms(&i915->drm, "Forcing DSC fractional bpp\n"); 1985 1986 return 0; 1987 } 1988 } 1989 return -EINVAL; 1990 } 1991 1992 static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp, 1993 const struct intel_connector *connector, 1994 struct intel_crtc_state *pipe_config, 1995 struct link_config_limits *limits, 1996 int pipe_bpp, 1997 int timeslots) 1998 { 1999 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2000 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2001 int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp; 2002 int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp; 2003 int dsc_joiner_max_bpp; 2004 2005 dsc_src_min_bpp = dsc_src_min_compressed_bpp(); 2006 dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config); 2007 dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp); 2008 dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16)); 2009 2010 dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp); 2011 dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector, 2012 pipe_config, 2013 pipe_bpp / 3); 2014 dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp; 2015 2016 dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, adjusted_mode->clock, 2017 adjusted_mode->hdisplay, 2018 pipe_config->joiner_pipes); 2019 dsc_max_bpp = min(dsc_max_bpp, dsc_joiner_max_bpp); 2020 dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16)); 2021 2022 if (DISPLAY_VER(i915) >= 13) 2023 return xelpd_dsc_compute_link_config(intel_dp, connector, pipe_config, limits, 2024 dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots); 2025 return icl_dsc_compute_link_config(intel_dp, pipe_config, limits, 2026 dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots); 2027 } 2028 2029 static 2030 u8 intel_dp_dsc_min_src_input_bpc(struct drm_i915_private *i915) 2031 { 2032 /* Min DSC Input BPC for ICL+ is 8 */ 2033 return HAS_DSC(i915) ? 8 : 0; 2034 } 2035 2036 static 2037 bool is_dsc_pipe_bpp_sufficient(struct drm_i915_private *i915, 2038 struct drm_connector_state *conn_state, 2039 struct link_config_limits *limits, 2040 int pipe_bpp) 2041 { 2042 u8 dsc_max_bpc, dsc_min_bpc, dsc_max_pipe_bpp, dsc_min_pipe_bpp; 2043 2044 dsc_max_bpc = min(intel_dp_dsc_max_src_input_bpc(i915), conn_state->max_requested_bpc); 2045 dsc_min_bpc = intel_dp_dsc_min_src_input_bpc(i915); 2046 2047 dsc_max_pipe_bpp = min(dsc_max_bpc * 3, limits->pipe.max_bpp); 2048 dsc_min_pipe_bpp = max(dsc_min_bpc * 3, limits->pipe.min_bpp); 2049 2050 return pipe_bpp >= dsc_min_pipe_bpp && 2051 pipe_bpp <= dsc_max_pipe_bpp; 2052 } 2053 2054 static 2055 int intel_dp_force_dsc_pipe_bpp(struct intel_dp *intel_dp, 2056 struct drm_connector_state *conn_state, 2057 struct link_config_limits *limits) 2058 { 2059 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2060 int forced_bpp; 2061 2062 if (!intel_dp->force_dsc_bpc) 2063 return 0; 2064 2065 forced_bpp = intel_dp->force_dsc_bpc * 3; 2066 2067 if (is_dsc_pipe_bpp_sufficient(i915, conn_state, limits, forced_bpp)) { 2068 drm_dbg_kms(&i915->drm, "Input DSC BPC forced to %d\n", intel_dp->force_dsc_bpc); 2069 return forced_bpp; 2070 } 2071 2072 drm_dbg_kms(&i915->drm, "Cannot force DSC BPC:%d, due to DSC BPC limits\n", 2073 intel_dp->force_dsc_bpc); 2074 2075 return 0; 2076 } 2077 2078 static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp, 2079 struct intel_crtc_state *pipe_config, 2080 struct drm_connector_state *conn_state, 2081 struct link_config_limits *limits, 2082 int timeslots) 2083 { 2084 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2085 const struct intel_connector *connector = 2086 to_intel_connector(conn_state->connector); 2087 u8 max_req_bpc = conn_state->max_requested_bpc; 2088 u8 dsc_max_bpc, dsc_max_bpp; 2089 u8 dsc_min_bpc, dsc_min_bpp; 2090 u8 dsc_bpc[3] = {}; 2091 int forced_bpp, pipe_bpp; 2092 int num_bpc, i, ret; 2093 2094 forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, conn_state, limits); 2095 2096 if (forced_bpp) { 2097 ret = dsc_compute_compressed_bpp(intel_dp, connector, pipe_config, 2098 limits, forced_bpp, timeslots); 2099 if (ret == 0) { 2100 pipe_config->pipe_bpp = forced_bpp; 2101 return 0; 2102 } 2103 } 2104 2105 dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915); 2106 if (!dsc_max_bpc) 2107 return -EINVAL; 2108 2109 dsc_max_bpc = min_t(u8, dsc_max_bpc, max_req_bpc); 2110 dsc_max_bpp = min(dsc_max_bpc * 3, limits->pipe.max_bpp); 2111 2112 dsc_min_bpc = intel_dp_dsc_min_src_input_bpc(i915); 2113 dsc_min_bpp = max(dsc_min_bpc * 3, limits->pipe.min_bpp); 2114 2115 /* 2116 * Get the maximum DSC bpc that will be supported by any valid 2117 * link configuration and compressed bpp. 2118 */ 2119 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd, dsc_bpc); 2120 for (i = 0; i < num_bpc; i++) { 2121 pipe_bpp = dsc_bpc[i] * 3; 2122 if (pipe_bpp < dsc_min_bpp) 2123 break; 2124 if (pipe_bpp > dsc_max_bpp) 2125 continue; 2126 ret = dsc_compute_compressed_bpp(intel_dp, connector, pipe_config, 2127 limits, pipe_bpp, timeslots); 2128 if (ret == 0) { 2129 pipe_config->pipe_bpp = pipe_bpp; 2130 return 0; 2131 } 2132 } 2133 2134 return -EINVAL; 2135 } 2136 2137 static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp, 2138 struct intel_crtc_state *pipe_config, 2139 struct drm_connector_state *conn_state, 2140 struct link_config_limits *limits) 2141 { 2142 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2143 struct intel_connector *connector = 2144 to_intel_connector(conn_state->connector); 2145 int pipe_bpp, forced_bpp; 2146 int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp; 2147 int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp; 2148 2149 forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, conn_state, limits); 2150 2151 if (forced_bpp) { 2152 pipe_bpp = forced_bpp; 2153 } else { 2154 int max_bpc = min(limits->pipe.max_bpp / 3, (int)conn_state->max_requested_bpc); 2155 2156 /* For eDP use max bpp that can be supported with DSC. */ 2157 pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, max_bpc); 2158 if (!is_dsc_pipe_bpp_sufficient(i915, conn_state, limits, pipe_bpp)) { 2159 drm_dbg_kms(&i915->drm, 2160 "Computed BPC is not in DSC BPC limits\n"); 2161 return -EINVAL; 2162 } 2163 } 2164 pipe_config->port_clock = limits->max_rate; 2165 pipe_config->lane_count = limits->max_lane_count; 2166 2167 dsc_src_min_bpp = dsc_src_min_compressed_bpp(); 2168 dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config); 2169 dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp); 2170 dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16)); 2171 2172 dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp); 2173 dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector, 2174 pipe_config, 2175 pipe_bpp / 3); 2176 dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp; 2177 dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16)); 2178 2179 /* Compressed BPP should be less than the Input DSC bpp */ 2180 dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1); 2181 2182 pipe_config->dsc.compressed_bpp_x16 = 2183 to_bpp_x16(max(dsc_min_bpp, dsc_max_bpp)); 2184 2185 pipe_config->pipe_bpp = pipe_bpp; 2186 2187 return 0; 2188 } 2189 2190 int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 2191 struct intel_crtc_state *pipe_config, 2192 struct drm_connector_state *conn_state, 2193 struct link_config_limits *limits, 2194 int timeslots, 2195 bool compute_pipe_bpp) 2196 { 2197 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2198 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 2199 const struct intel_connector *connector = 2200 to_intel_connector(conn_state->connector); 2201 const struct drm_display_mode *adjusted_mode = 2202 &pipe_config->hw.adjusted_mode; 2203 int ret; 2204 2205 pipe_config->fec_enable = pipe_config->fec_enable || 2206 (!intel_dp_is_edp(intel_dp) && 2207 intel_dp_supports_fec(intel_dp, connector, pipe_config)); 2208 2209 if (!intel_dp_supports_dsc(connector, pipe_config)) 2210 return -EINVAL; 2211 2212 if (!intel_dp_dsc_supports_format(connector, pipe_config->output_format)) 2213 return -EINVAL; 2214 2215 /* 2216 * compute pipe bpp is set to false for DP MST DSC case 2217 * and compressed_bpp is calculated same time once 2218 * vpci timeslots are allocated, because overall bpp 2219 * calculation procedure is bit different for MST case. 2220 */ 2221 if (compute_pipe_bpp) { 2222 if (intel_dp_is_edp(intel_dp)) 2223 ret = intel_edp_dsc_compute_pipe_bpp(intel_dp, pipe_config, 2224 conn_state, limits); 2225 else 2226 ret = intel_dp_dsc_compute_pipe_bpp(intel_dp, pipe_config, 2227 conn_state, limits, timeslots); 2228 if (ret) { 2229 drm_dbg_kms(&dev_priv->drm, 2230 "No Valid pipe bpp for given mode ret = %d\n", ret); 2231 return ret; 2232 } 2233 } 2234 2235 /* Calculate Slice count */ 2236 if (intel_dp_is_edp(intel_dp)) { 2237 pipe_config->dsc.slice_count = 2238 drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd, 2239 true); 2240 if (!pipe_config->dsc.slice_count) { 2241 drm_dbg_kms(&dev_priv->drm, "Unsupported Slice Count %d\n", 2242 pipe_config->dsc.slice_count); 2243 return -EINVAL; 2244 } 2245 } else { 2246 u8 dsc_dp_slice_count; 2247 2248 dsc_dp_slice_count = 2249 intel_dp_dsc_get_slice_count(connector, 2250 adjusted_mode->crtc_clock, 2251 adjusted_mode->crtc_hdisplay, 2252 pipe_config->joiner_pipes); 2253 if (!dsc_dp_slice_count) { 2254 drm_dbg_kms(&dev_priv->drm, 2255 "Compressed Slice Count not supported\n"); 2256 return -EINVAL; 2257 } 2258 2259 pipe_config->dsc.slice_count = dsc_dp_slice_count; 2260 } 2261 /* 2262 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 2263 * is greater than the maximum Cdclock and if slice count is even 2264 * then we need to use 2 VDSC instances. 2265 */ 2266 if (pipe_config->joiner_pipes || pipe_config->dsc.slice_count > 1) 2267 pipe_config->dsc.dsc_split = true; 2268 2269 ret = intel_dp_dsc_compute_params(connector, pipe_config); 2270 if (ret < 0) { 2271 drm_dbg_kms(&dev_priv->drm, 2272 "Cannot compute valid DSC parameters for Input Bpp = %d" 2273 "Compressed BPP = " BPP_X16_FMT "\n", 2274 pipe_config->pipe_bpp, 2275 BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16)); 2276 return ret; 2277 } 2278 2279 pipe_config->dsc.compression_enable = true; 2280 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 2281 "Compressed Bpp = " BPP_X16_FMT " Slice Count = %d\n", 2282 pipe_config->pipe_bpp, 2283 BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16), 2284 pipe_config->dsc.slice_count); 2285 2286 return 0; 2287 } 2288 2289 /** 2290 * intel_dp_compute_config_link_bpp_limits - compute output link bpp limits 2291 * @intel_dp: intel DP 2292 * @crtc_state: crtc state 2293 * @dsc: DSC compression mode 2294 * @limits: link configuration limits 2295 * 2296 * Calculates the output link min, max bpp values in @limits based on the 2297 * pipe bpp range, @crtc_state and @dsc mode. 2298 * 2299 * Returns %true in case of success. 2300 */ 2301 bool 2302 intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp, 2303 const struct intel_crtc_state *crtc_state, 2304 bool dsc, 2305 struct link_config_limits *limits) 2306 { 2307 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 2308 const struct drm_display_mode *adjusted_mode = 2309 &crtc_state->hw.adjusted_mode; 2310 const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2311 const struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 2312 int max_link_bpp_x16; 2313 2314 max_link_bpp_x16 = min(crtc_state->max_link_bpp_x16, 2315 to_bpp_x16(limits->pipe.max_bpp)); 2316 2317 if (!dsc) { 2318 max_link_bpp_x16 = rounddown(max_link_bpp_x16, to_bpp_x16(2 * 3)); 2319 2320 if (max_link_bpp_x16 < to_bpp_x16(limits->pipe.min_bpp)) 2321 return false; 2322 2323 limits->link.min_bpp_x16 = to_bpp_x16(limits->pipe.min_bpp); 2324 } else { 2325 /* 2326 * TODO: set the DSC link limits already here, atm these are 2327 * initialized only later in intel_edp_dsc_compute_pipe_bpp() / 2328 * intel_dp_dsc_compute_pipe_bpp() 2329 */ 2330 limits->link.min_bpp_x16 = 0; 2331 } 2332 2333 limits->link.max_bpp_x16 = max_link_bpp_x16; 2334 2335 drm_dbg_kms(&i915->drm, 2336 "[ENCODER:%d:%s][CRTC:%d:%s] DP link limits: pixel clock %d kHz DSC %s max lanes %d max rate %d max pipe_bpp %d max link_bpp " BPP_X16_FMT "\n", 2337 encoder->base.base.id, encoder->base.name, 2338 crtc->base.base.id, crtc->base.name, 2339 adjusted_mode->crtc_clock, 2340 dsc ? "on" : "off", 2341 limits->max_lane_count, 2342 limits->max_rate, 2343 limits->pipe.max_bpp, 2344 BPP_X16_ARGS(limits->link.max_bpp_x16)); 2345 2346 return true; 2347 } 2348 2349 static bool 2350 intel_dp_compute_config_limits(struct intel_dp *intel_dp, 2351 struct intel_crtc_state *crtc_state, 2352 bool respect_downstream_limits, 2353 bool dsc, 2354 struct link_config_limits *limits) 2355 { 2356 limits->min_rate = intel_dp_min_link_rate(intel_dp); 2357 limits->max_rate = intel_dp_max_link_rate(intel_dp); 2358 2359 /* FIXME 128b/132b SST support missing */ 2360 limits->max_rate = min(limits->max_rate, 810000); 2361 limits->min_rate = min(limits->min_rate, limits->max_rate); 2362 2363 limits->min_lane_count = intel_dp_min_lane_count(intel_dp); 2364 limits->max_lane_count = intel_dp_max_lane_count(intel_dp); 2365 2366 limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format); 2367 limits->pipe.max_bpp = intel_dp_max_bpp(intel_dp, crtc_state, 2368 respect_downstream_limits); 2369 2370 if (intel_dp->use_max_params) { 2371 /* 2372 * Use the maximum clock and number of lanes the eDP panel 2373 * advertizes being capable of in case the initial fast 2374 * optimal params failed us. The panels are generally 2375 * designed to support only a single clock and lane 2376 * configuration, and typically on older panels these 2377 * values correspond to the native resolution of the panel. 2378 */ 2379 limits->min_lane_count = limits->max_lane_count; 2380 limits->min_rate = limits->max_rate; 2381 } 2382 2383 intel_dp_adjust_compliance_config(intel_dp, crtc_state, limits); 2384 2385 return intel_dp_compute_config_link_bpp_limits(intel_dp, 2386 crtc_state, 2387 dsc, 2388 limits); 2389 } 2390 2391 int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state) 2392 { 2393 const struct drm_display_mode *adjusted_mode = 2394 &crtc_state->hw.adjusted_mode; 2395 int bpp = crtc_state->dsc.compression_enable ? 2396 to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) : 2397 crtc_state->pipe_bpp; 2398 2399 return intel_dp_link_required(adjusted_mode->crtc_clock, bpp); 2400 } 2401 2402 bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, bool use_joiner) 2403 { 2404 /* 2405 * Pipe joiner needs compression up to display 12 due to bandwidth 2406 * limitation. DG2 onwards pipe joiner can be enabled without 2407 * compression. 2408 */ 2409 return DISPLAY_VER(i915) < 13 && use_joiner; 2410 } 2411 2412 static int 2413 intel_dp_compute_link_config(struct intel_encoder *encoder, 2414 struct intel_crtc_state *pipe_config, 2415 struct drm_connector_state *conn_state, 2416 bool respect_downstream_limits) 2417 { 2418 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2419 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 2420 struct intel_connector *connector = 2421 to_intel_connector(conn_state->connector); 2422 const struct drm_display_mode *adjusted_mode = 2423 &pipe_config->hw.adjusted_mode; 2424 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2425 struct link_config_limits limits; 2426 bool dsc_needed, joiner_needs_dsc; 2427 int ret = 0; 2428 2429 if (pipe_config->fec_enable && 2430 !intel_dp_supports_fec(intel_dp, connector, pipe_config)) 2431 return -EINVAL; 2432 2433 if (intel_dp_need_joiner(intel_dp, connector, 2434 adjusted_mode->crtc_hdisplay, 2435 adjusted_mode->crtc_clock)) 2436 pipe_config->joiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe); 2437 2438 joiner_needs_dsc = intel_dp_joiner_needs_dsc(i915, pipe_config->joiner_pipes); 2439 2440 dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en || 2441 !intel_dp_compute_config_limits(intel_dp, pipe_config, 2442 respect_downstream_limits, 2443 false, 2444 &limits); 2445 2446 if (!dsc_needed) { 2447 /* 2448 * Optimize for slow and wide for everything, because there are some 2449 * eDP 1.3 and 1.4 panels don't work well with fast and narrow. 2450 */ 2451 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, 2452 conn_state, &limits); 2453 if (ret) 2454 dsc_needed = true; 2455 } 2456 2457 if (dsc_needed) { 2458 drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n", 2459 str_yes_no(ret), str_yes_no(joiner_needs_dsc), 2460 str_yes_no(intel_dp->force_dsc_en)); 2461 2462 if (!intel_dp_compute_config_limits(intel_dp, pipe_config, 2463 respect_downstream_limits, 2464 true, 2465 &limits)) 2466 return -EINVAL; 2467 2468 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 2469 conn_state, &limits, 64, true); 2470 if (ret < 0) 2471 return ret; 2472 } 2473 2474 drm_dbg_kms(&i915->drm, 2475 "DP lane count %d clock %d bpp input %d compressed " BPP_X16_FMT " link rate required %d available %d\n", 2476 pipe_config->lane_count, pipe_config->port_clock, 2477 pipe_config->pipe_bpp, 2478 BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16), 2479 intel_dp_config_required_rate(pipe_config), 2480 intel_dp_max_link_data_rate(intel_dp, 2481 pipe_config->port_clock, 2482 pipe_config->lane_count)); 2483 2484 return 0; 2485 } 2486 2487 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 2488 const struct drm_connector_state *conn_state) 2489 { 2490 const struct intel_digital_connector_state *intel_conn_state = 2491 to_intel_digital_connector_state(conn_state); 2492 const struct drm_display_mode *adjusted_mode = 2493 &crtc_state->hw.adjusted_mode; 2494 2495 /* 2496 * Our YCbCr output is always limited range. 2497 * crtc_state->limited_color_range only applies to RGB, 2498 * and it must never be set for YCbCr or we risk setting 2499 * some conflicting bits in TRANSCONF which will mess up 2500 * the colors on the monitor. 2501 */ 2502 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 2503 return false; 2504 2505 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 2506 /* 2507 * See: 2508 * CEA-861-E - 5.1 Default Encoding Parameters 2509 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 2510 */ 2511 return crtc_state->pipe_bpp != 18 && 2512 drm_default_rgb_quant_range(adjusted_mode) == 2513 HDMI_QUANTIZATION_RANGE_LIMITED; 2514 } else { 2515 return intel_conn_state->broadcast_rgb == 2516 INTEL_BROADCAST_RGB_LIMITED; 2517 } 2518 } 2519 2520 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 2521 enum port port) 2522 { 2523 if (IS_G4X(dev_priv)) 2524 return false; 2525 if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A) 2526 return false; 2527 2528 return true; 2529 } 2530 2531 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, 2532 const struct drm_connector_state *conn_state, 2533 struct drm_dp_vsc_sdp *vsc) 2534 { 2535 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2536 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2537 2538 if (crtc_state->has_panel_replay) { 2539 /* 2540 * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223 2541 * VSC SDP supporting 3D stereo, Panel Replay, and Pixel 2542 * Encoding/Colorimetry Format indication. 2543 */ 2544 vsc->revision = 0x7; 2545 } else { 2546 /* 2547 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2548 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 2549 * Colorimetry Format indication. 2550 */ 2551 vsc->revision = 0x5; 2552 } 2553 2554 vsc->length = 0x13; 2555 2556 /* DP 1.4a spec, Table 2-120 */ 2557 switch (crtc_state->output_format) { 2558 case INTEL_OUTPUT_FORMAT_YCBCR444: 2559 vsc->pixelformat = DP_PIXELFORMAT_YUV444; 2560 break; 2561 case INTEL_OUTPUT_FORMAT_YCBCR420: 2562 vsc->pixelformat = DP_PIXELFORMAT_YUV420; 2563 break; 2564 case INTEL_OUTPUT_FORMAT_RGB: 2565 default: 2566 vsc->pixelformat = DP_PIXELFORMAT_RGB; 2567 } 2568 2569 switch (conn_state->colorspace) { 2570 case DRM_MODE_COLORIMETRY_BT709_YCC: 2571 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2572 break; 2573 case DRM_MODE_COLORIMETRY_XVYCC_601: 2574 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; 2575 break; 2576 case DRM_MODE_COLORIMETRY_XVYCC_709: 2577 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; 2578 break; 2579 case DRM_MODE_COLORIMETRY_SYCC_601: 2580 vsc->colorimetry = DP_COLORIMETRY_SYCC_601; 2581 break; 2582 case DRM_MODE_COLORIMETRY_OPYCC_601: 2583 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; 2584 break; 2585 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 2586 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; 2587 break; 2588 case DRM_MODE_COLORIMETRY_BT2020_RGB: 2589 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; 2590 break; 2591 case DRM_MODE_COLORIMETRY_BT2020_YCC: 2592 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; 2593 break; 2594 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 2595 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 2596 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; 2597 break; 2598 default: 2599 /* 2600 * RGB->YCBCR color conversion uses the BT.709 2601 * color space. 2602 */ 2603 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2604 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2605 else 2606 vsc->colorimetry = DP_COLORIMETRY_DEFAULT; 2607 break; 2608 } 2609 2610 vsc->bpc = crtc_state->pipe_bpp / 3; 2611 2612 /* only RGB pixelformat supports 6 bpc */ 2613 drm_WARN_ON(&dev_priv->drm, 2614 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); 2615 2616 /* all YCbCr are always limited range */ 2617 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; 2618 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; 2619 } 2620 2621 static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp, 2622 struct intel_crtc_state *crtc_state) 2623 { 2624 struct drm_dp_as_sdp *as_sdp = &crtc_state->infoframes.as_sdp; 2625 const struct drm_display_mode *adjusted_mode = 2626 &crtc_state->hw.adjusted_mode; 2627 2628 if (!crtc_state->vrr.enable || 2629 !intel_dp_as_sdp_supported(intel_dp)) 2630 return; 2631 2632 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC); 2633 2634 /* Currently only DP_AS_SDP_AVT_FIXED_VTOTAL mode supported */ 2635 as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC; 2636 as_sdp->length = 0x9; 2637 as_sdp->duration_incr_ms = 0; 2638 as_sdp->duration_incr_ms = 0; 2639 2640 if (crtc_state->cmrr.enable) { 2641 as_sdp->mode = DP_AS_SDP_FAVT_TRR_REACHED; 2642 as_sdp->vtotal = adjusted_mode->vtotal; 2643 as_sdp->target_rr = drm_mode_vrefresh(adjusted_mode); 2644 as_sdp->target_rr_divider = true; 2645 } else { 2646 as_sdp->mode = DP_AS_SDP_AVT_FIXED_VTOTAL; 2647 as_sdp->vtotal = adjusted_mode->vtotal; 2648 as_sdp->target_rr = 0; 2649 } 2650 } 2651 2652 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, 2653 struct intel_crtc_state *crtc_state, 2654 const struct drm_connector_state *conn_state) 2655 { 2656 struct drm_dp_vsc_sdp *vsc; 2657 2658 if ((!intel_dp->colorimetry_support || 2659 !intel_dp_needs_vsc_sdp(crtc_state, conn_state)) && 2660 !crtc_state->has_psr) 2661 return; 2662 2663 vsc = &crtc_state->infoframes.vsc; 2664 2665 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 2666 vsc->sdp_type = DP_SDP_VSC; 2667 2668 /* Needs colorimetry */ 2669 if (intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 2670 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2671 vsc); 2672 } else if (crtc_state->has_panel_replay) { 2673 /* 2674 * [Panel Replay without colorimetry info] 2675 * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223 2676 * VSC SDP supporting 3D stereo + Panel Replay. 2677 */ 2678 vsc->revision = 0x6; 2679 vsc->length = 0x10; 2680 } else if (crtc_state->has_sel_update) { 2681 /* 2682 * [PSR2 without colorimetry] 2683 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 2684 * 3D stereo + PSR/PSR2 + Y-coordinate. 2685 */ 2686 vsc->revision = 0x4; 2687 vsc->length = 0xe; 2688 } else { 2689 /* 2690 * [PSR1] 2691 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2692 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or 2693 * higher). 2694 */ 2695 vsc->revision = 0x2; 2696 vsc->length = 0x8; 2697 } 2698 } 2699 2700 static void 2701 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 2702 struct intel_crtc_state *crtc_state, 2703 const struct drm_connector_state *conn_state) 2704 { 2705 int ret; 2706 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2707 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; 2708 2709 if (!conn_state->hdr_output_metadata) 2710 return; 2711 2712 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); 2713 2714 if (ret) { 2715 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); 2716 return; 2717 } 2718 2719 crtc_state->infoframes.enable |= 2720 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 2721 } 2722 2723 static bool can_enable_drrs(struct intel_connector *connector, 2724 const struct intel_crtc_state *pipe_config, 2725 const struct drm_display_mode *downclock_mode) 2726 { 2727 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2728 2729 if (pipe_config->vrr.enable) 2730 return false; 2731 2732 /* 2733 * DRRS and PSR can't be enable together, so giving preference to PSR 2734 * as it allows more power-savings by complete shutting down display, 2735 * so to guarantee this, intel_drrs_compute_config() must be called 2736 * after intel_psr_compute_config(). 2737 */ 2738 if (pipe_config->has_psr) 2739 return false; 2740 2741 /* FIXME missing FDI M2/N2 etc. */ 2742 if (pipe_config->has_pch_encoder) 2743 return false; 2744 2745 if (!intel_cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder)) 2746 return false; 2747 2748 return downclock_mode && 2749 intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS; 2750 } 2751 2752 static void 2753 intel_dp_drrs_compute_config(struct intel_connector *connector, 2754 struct intel_crtc_state *pipe_config, 2755 int link_bpp_x16) 2756 { 2757 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2758 const struct drm_display_mode *downclock_mode = 2759 intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode); 2760 int pixel_clock; 2761 2762 /* 2763 * FIXME all joined pipes share the same transcoder. 2764 * Need to account for that when updating M/N live. 2765 */ 2766 if (has_seamless_m_n(connector) && !pipe_config->joiner_pipes) 2767 pipe_config->update_m_n = true; 2768 2769 if (!can_enable_drrs(connector, pipe_config, downclock_mode)) { 2770 if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder)) 2771 intel_zero_m_n(&pipe_config->dp_m2_n2); 2772 return; 2773 } 2774 2775 if (IS_IRONLAKE(i915) || IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) 2776 pipe_config->msa_timing_delay = connector->panel.vbt.edp.drrs_msa_timing_delay; 2777 2778 pipe_config->has_drrs = true; 2779 2780 pixel_clock = downclock_mode->clock; 2781 if (pipe_config->splitter.enable) 2782 pixel_clock /= pipe_config->splitter.link_count; 2783 2784 intel_link_compute_m_n(link_bpp_x16, pipe_config->lane_count, pixel_clock, 2785 pipe_config->port_clock, 2786 intel_dp_bw_fec_overhead(pipe_config->fec_enable), 2787 &pipe_config->dp_m2_n2); 2788 2789 /* FIXME: abstract this better */ 2790 if (pipe_config->splitter.enable) 2791 pipe_config->dp_m2_n2.data_m *= pipe_config->splitter.link_count; 2792 } 2793 2794 static bool intel_dp_has_audio(struct intel_encoder *encoder, 2795 const struct drm_connector_state *conn_state) 2796 { 2797 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2798 const struct intel_digital_connector_state *intel_conn_state = 2799 to_intel_digital_connector_state(conn_state); 2800 struct intel_connector *connector = 2801 to_intel_connector(conn_state->connector); 2802 2803 if (!intel_dp_port_has_audio(i915, encoder->port)) 2804 return false; 2805 2806 if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 2807 return connector->base.display_info.has_audio; 2808 else 2809 return intel_conn_state->force_audio == HDMI_AUDIO_ON; 2810 } 2811 2812 static int 2813 intel_dp_compute_output_format(struct intel_encoder *encoder, 2814 struct intel_crtc_state *crtc_state, 2815 struct drm_connector_state *conn_state, 2816 bool respect_downstream_limits) 2817 { 2818 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2819 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2820 struct intel_connector *connector = intel_dp->attached_connector; 2821 const struct drm_display_info *info = &connector->base.display_info; 2822 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2823 bool ycbcr_420_only; 2824 int ret; 2825 2826 ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode); 2827 2828 if (ycbcr_420_only && !connector->base.ycbcr_420_allowed) { 2829 drm_dbg_kms(&i915->drm, 2830 "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n"); 2831 crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB; 2832 } else { 2833 crtc_state->sink_format = intel_dp_sink_format(connector, adjusted_mode); 2834 } 2835 2836 crtc_state->output_format = intel_dp_output_format(connector, crtc_state->sink_format); 2837 2838 ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state, 2839 respect_downstream_limits); 2840 if (ret) { 2841 if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 2842 !connector->base.ycbcr_420_allowed || 2843 !drm_mode_is_420_also(info, adjusted_mode)) 2844 return ret; 2845 2846 crtc_state->sink_format = INTEL_OUTPUT_FORMAT_YCBCR420; 2847 crtc_state->output_format = intel_dp_output_format(connector, 2848 crtc_state->sink_format); 2849 ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state, 2850 respect_downstream_limits); 2851 } 2852 2853 return ret; 2854 } 2855 2856 void 2857 intel_dp_audio_compute_config(struct intel_encoder *encoder, 2858 struct intel_crtc_state *pipe_config, 2859 struct drm_connector_state *conn_state) 2860 { 2861 pipe_config->has_audio = 2862 intel_dp_has_audio(encoder, conn_state) && 2863 intel_audio_compute_config(encoder, pipe_config, conn_state); 2864 2865 pipe_config->sdp_split_enable = pipe_config->has_audio && 2866 intel_dp_is_uhbr(pipe_config); 2867 } 2868 2869 static void intel_dp_queue_modeset_retry_work(struct intel_connector *connector) 2870 { 2871 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2872 2873 drm_connector_get(&connector->base); 2874 if (!queue_work(i915->unordered_wq, &connector->modeset_retry_work)) 2875 drm_connector_put(&connector->base); 2876 } 2877 2878 /* NOTE: @state is only valid for MST links and can be %NULL for SST. */ 2879 void 2880 intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state, 2881 struct intel_encoder *encoder, 2882 const struct intel_crtc_state *crtc_state) 2883 { 2884 struct intel_connector *connector; 2885 struct intel_digital_connector_state *conn_state; 2886 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2887 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2888 int i; 2889 2890 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { 2891 intel_dp_queue_modeset_retry_work(intel_dp->attached_connector); 2892 2893 return; 2894 } 2895 2896 if (drm_WARN_ON(&i915->drm, !state)) 2897 return; 2898 2899 for_each_new_intel_connector_in_state(state, connector, conn_state, i) { 2900 if (!conn_state->base.crtc) 2901 continue; 2902 2903 if (connector->mst_port == intel_dp) 2904 intel_dp_queue_modeset_retry_work(connector); 2905 } 2906 } 2907 2908 int 2909 intel_dp_compute_config(struct intel_encoder *encoder, 2910 struct intel_crtc_state *pipe_config, 2911 struct drm_connector_state *conn_state) 2912 { 2913 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2914 struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state); 2915 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2916 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2917 const struct drm_display_mode *fixed_mode; 2918 struct intel_connector *connector = intel_dp->attached_connector; 2919 int ret = 0, link_bpp_x16; 2920 2921 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A) 2922 pipe_config->has_pch_encoder = true; 2923 2924 fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode); 2925 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 2926 ret = intel_panel_compute_config(connector, adjusted_mode); 2927 if (ret) 2928 return ret; 2929 } 2930 2931 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 2932 return -EINVAL; 2933 2934 if (!connector->base.interlace_allowed && 2935 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 2936 return -EINVAL; 2937 2938 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 2939 return -EINVAL; 2940 2941 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 2942 return -EINVAL; 2943 2944 /* 2945 * Try to respect downstream TMDS clock limits first, if 2946 * that fails assume the user might know something we don't. 2947 */ 2948 ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, true); 2949 if (ret) 2950 ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, false); 2951 if (ret) 2952 return ret; 2953 2954 if ((intel_dp_is_edp(intel_dp) && fixed_mode) || 2955 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 2956 ret = intel_panel_fitting(pipe_config, conn_state); 2957 if (ret) 2958 return ret; 2959 } 2960 2961 pipe_config->limited_color_range = 2962 intel_dp_limited_color_range(pipe_config, conn_state); 2963 2964 pipe_config->enhanced_framing = 2965 drm_dp_enhanced_frame_cap(intel_dp->dpcd); 2966 2967 if (pipe_config->dsc.compression_enable) 2968 link_bpp_x16 = pipe_config->dsc.compressed_bpp_x16; 2969 else 2970 link_bpp_x16 = to_bpp_x16(intel_dp_output_bpp(pipe_config->output_format, 2971 pipe_config->pipe_bpp)); 2972 2973 if (intel_dp->mso_link_count) { 2974 int n = intel_dp->mso_link_count; 2975 int overlap = intel_dp->mso_pixel_overlap; 2976 2977 pipe_config->splitter.enable = true; 2978 pipe_config->splitter.link_count = n; 2979 pipe_config->splitter.pixel_overlap = overlap; 2980 2981 drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n", 2982 n, overlap); 2983 2984 adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap; 2985 adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap; 2986 adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap; 2987 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap; 2988 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap; 2989 adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap; 2990 adjusted_mode->crtc_clock /= n; 2991 } 2992 2993 intel_dp_audio_compute_config(encoder, pipe_config, conn_state); 2994 2995 intel_link_compute_m_n(link_bpp_x16, 2996 pipe_config->lane_count, 2997 adjusted_mode->crtc_clock, 2998 pipe_config->port_clock, 2999 intel_dp_bw_fec_overhead(pipe_config->fec_enable), 3000 &pipe_config->dp_m_n); 3001 3002 /* FIXME: abstract this better */ 3003 if (pipe_config->splitter.enable) 3004 pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count; 3005 3006 if (!HAS_DDI(dev_priv)) 3007 g4x_dp_set_clock(encoder, pipe_config); 3008 3009 intel_vrr_compute_config(pipe_config, conn_state); 3010 intel_dp_compute_as_sdp(intel_dp, pipe_config); 3011 intel_psr_compute_config(intel_dp, pipe_config, conn_state); 3012 intel_alpm_lobf_compute_config(intel_dp, pipe_config, conn_state); 3013 intel_dp_drrs_compute_config(connector, pipe_config, link_bpp_x16); 3014 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 3015 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 3016 3017 return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector, 3018 pipe_config); 3019 } 3020 3021 void intel_dp_set_link_params(struct intel_dp *intel_dp, 3022 int link_rate, int lane_count) 3023 { 3024 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); 3025 intel_dp->link_trained = false; 3026 intel_dp->link_rate = link_rate; 3027 intel_dp->lane_count = lane_count; 3028 } 3029 3030 void intel_dp_reset_link_params(struct intel_dp *intel_dp) 3031 { 3032 intel_dp->link.max_lane_count = intel_dp_max_common_lane_count(intel_dp); 3033 intel_dp->link.max_rate = intel_dp_max_common_rate(intel_dp); 3034 intel_dp->link.retrain_disabled = false; 3035 intel_dp->link.seq_train_failures = 0; 3036 } 3037 3038 /* Enable backlight PWM and backlight PP control. */ 3039 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 3040 const struct drm_connector_state *conn_state) 3041 { 3042 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 3043 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3044 3045 if (!intel_dp_is_edp(intel_dp)) 3046 return; 3047 3048 drm_dbg_kms(&i915->drm, "\n"); 3049 3050 intel_backlight_enable(crtc_state, conn_state); 3051 intel_pps_backlight_on(intel_dp); 3052 } 3053 3054 /* Disable backlight PP control and backlight PWM. */ 3055 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 3056 { 3057 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 3058 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3059 3060 if (!intel_dp_is_edp(intel_dp)) 3061 return; 3062 3063 drm_dbg_kms(&i915->drm, "\n"); 3064 3065 intel_pps_backlight_off(intel_dp); 3066 intel_backlight_disable(old_conn_state); 3067 } 3068 3069 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 3070 { 3071 /* 3072 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 3073 * be capable of signalling downstream hpd with a long pulse. 3074 * Whether or not that means D3 is safe to use is not clear, 3075 * but let's assume so until proven otherwise. 3076 * 3077 * FIXME should really check all downstream ports... 3078 */ 3079 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 3080 drm_dp_is_branch(intel_dp->dpcd) && 3081 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 3082 } 3083 3084 static int 3085 write_dsc_decompression_flag(struct drm_dp_aux *aux, u8 flag, bool set) 3086 { 3087 int err; 3088 u8 val; 3089 3090 err = drm_dp_dpcd_readb(aux, DP_DSC_ENABLE, &val); 3091 if (err < 0) 3092 return err; 3093 3094 if (set) 3095 val |= flag; 3096 else 3097 val &= ~flag; 3098 3099 return drm_dp_dpcd_writeb(aux, DP_DSC_ENABLE, val); 3100 } 3101 3102 static void 3103 intel_dp_sink_set_dsc_decompression(struct intel_connector *connector, 3104 bool enable) 3105 { 3106 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3107 3108 if (write_dsc_decompression_flag(connector->dp.dsc_decompression_aux, 3109 DP_DECOMPRESSION_EN, enable) < 0) 3110 drm_dbg_kms(&i915->drm, 3111 "Failed to %s sink decompression state\n", 3112 str_enable_disable(enable)); 3113 } 3114 3115 static void 3116 intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector, 3117 bool enable) 3118 { 3119 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3120 struct drm_dp_aux *aux = connector->port ? 3121 connector->port->passthrough_aux : NULL; 3122 3123 if (!aux) 3124 return; 3125 3126 if (write_dsc_decompression_flag(aux, 3127 DP_DSC_PASSTHROUGH_EN, enable) < 0) 3128 drm_dbg_kms(&i915->drm, 3129 "Failed to %s sink compression passthrough state\n", 3130 str_enable_disable(enable)); 3131 } 3132 3133 static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state, 3134 const struct intel_connector *connector, 3135 bool for_get_ref) 3136 { 3137 struct drm_i915_private *i915 = to_i915(state->base.dev); 3138 struct drm_connector *_connector_iter; 3139 struct drm_connector_state *old_conn_state; 3140 struct drm_connector_state *new_conn_state; 3141 int ref_count = 0; 3142 int i; 3143 3144 /* 3145 * On SST the decompression AUX device won't be shared, each connector 3146 * uses for this its own AUX targeting the sink device. 3147 */ 3148 if (!connector->mst_port) 3149 return connector->dp.dsc_decompression_enabled ? 1 : 0; 3150 3151 for_each_oldnew_connector_in_state(&state->base, _connector_iter, 3152 old_conn_state, new_conn_state, i) { 3153 const struct intel_connector * 3154 connector_iter = to_intel_connector(_connector_iter); 3155 3156 if (connector_iter->mst_port != connector->mst_port) 3157 continue; 3158 3159 if (!connector_iter->dp.dsc_decompression_enabled) 3160 continue; 3161 3162 drm_WARN_ON(&i915->drm, 3163 (for_get_ref && !new_conn_state->crtc) || 3164 (!for_get_ref && !old_conn_state->crtc)); 3165 3166 if (connector_iter->dp.dsc_decompression_aux == 3167 connector->dp.dsc_decompression_aux) 3168 ref_count++; 3169 } 3170 3171 return ref_count; 3172 } 3173 3174 static bool intel_dp_dsc_aux_get_ref(struct intel_atomic_state *state, 3175 struct intel_connector *connector) 3176 { 3177 bool ret = intel_dp_dsc_aux_ref_count(state, connector, true) == 0; 3178 3179 connector->dp.dsc_decompression_enabled = true; 3180 3181 return ret; 3182 } 3183 3184 static bool intel_dp_dsc_aux_put_ref(struct intel_atomic_state *state, 3185 struct intel_connector *connector) 3186 { 3187 connector->dp.dsc_decompression_enabled = false; 3188 3189 return intel_dp_dsc_aux_ref_count(state, connector, false) == 0; 3190 } 3191 3192 /** 3193 * intel_dp_sink_enable_decompression - Enable DSC decompression in sink/last branch device 3194 * @state: atomic state 3195 * @connector: connector to enable the decompression for 3196 * @new_crtc_state: new state for the CRTC driving @connector 3197 * 3198 * Enable the DSC decompression if required in the %DP_DSC_ENABLE DPCD 3199 * register of the appropriate sink/branch device. On SST this is always the 3200 * sink device, whereas on MST based on each device's DSC capabilities it's 3201 * either the last branch device (enabling decompression in it) or both the 3202 * last branch device (enabling passthrough in it) and the sink device 3203 * (enabling decompression in it). 3204 */ 3205 void intel_dp_sink_enable_decompression(struct intel_atomic_state *state, 3206 struct intel_connector *connector, 3207 const struct intel_crtc_state *new_crtc_state) 3208 { 3209 struct drm_i915_private *i915 = to_i915(state->base.dev); 3210 3211 if (!new_crtc_state->dsc.compression_enable) 3212 return; 3213 3214 if (drm_WARN_ON(&i915->drm, 3215 !connector->dp.dsc_decompression_aux || 3216 connector->dp.dsc_decompression_enabled)) 3217 return; 3218 3219 if (!intel_dp_dsc_aux_get_ref(state, connector)) 3220 return; 3221 3222 intel_dp_sink_set_dsc_passthrough(connector, true); 3223 intel_dp_sink_set_dsc_decompression(connector, true); 3224 } 3225 3226 /** 3227 * intel_dp_sink_disable_decompression - Disable DSC decompression in sink/last branch device 3228 * @state: atomic state 3229 * @connector: connector to disable the decompression for 3230 * @old_crtc_state: old state for the CRTC driving @connector 3231 * 3232 * Disable the DSC decompression if required in the %DP_DSC_ENABLE DPCD 3233 * register of the appropriate sink/branch device, corresponding to the 3234 * sequence in intel_dp_sink_enable_decompression(). 3235 */ 3236 void intel_dp_sink_disable_decompression(struct intel_atomic_state *state, 3237 struct intel_connector *connector, 3238 const struct intel_crtc_state *old_crtc_state) 3239 { 3240 struct drm_i915_private *i915 = to_i915(state->base.dev); 3241 3242 if (!old_crtc_state->dsc.compression_enable) 3243 return; 3244 3245 if (drm_WARN_ON(&i915->drm, 3246 !connector->dp.dsc_decompression_aux || 3247 !connector->dp.dsc_decompression_enabled)) 3248 return; 3249 3250 if (!intel_dp_dsc_aux_put_ref(state, connector)) 3251 return; 3252 3253 intel_dp_sink_set_dsc_decompression(connector, false); 3254 intel_dp_sink_set_dsc_passthrough(connector, false); 3255 } 3256 3257 static void 3258 intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) 3259 { 3260 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3261 u8 oui[] = { 0x00, 0xaa, 0x01 }; 3262 u8 buf[3] = {}; 3263 3264 /* 3265 * During driver init, we want to be careful and avoid changing the source OUI if it's 3266 * already set to what we want, so as to avoid clearing any state by accident 3267 */ 3268 if (careful) { 3269 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0) 3270 drm_err(&i915->drm, "Failed to read source OUI\n"); 3271 3272 if (memcmp(oui, buf, sizeof(oui)) == 0) 3273 return; 3274 } 3275 3276 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) 3277 drm_err(&i915->drm, "Failed to write source OUI\n"); 3278 3279 intel_dp->last_oui_write = jiffies; 3280 } 3281 3282 void intel_dp_wait_source_oui(struct intel_dp *intel_dp) 3283 { 3284 struct intel_connector *connector = intel_dp->attached_connector; 3285 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3286 3287 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n", 3288 connector->base.base.id, connector->base.name, 3289 connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout); 3290 3291 wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 3292 connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout); 3293 } 3294 3295 /* If the device supports it, try to set the power state appropriately */ 3296 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) 3297 { 3298 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3299 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3300 int ret, i; 3301 3302 /* Should have a valid DPCD by this point */ 3303 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3304 return; 3305 3306 if (mode != DP_SET_POWER_D0) { 3307 if (downstream_hpd_needs_d0(intel_dp)) 3308 return; 3309 3310 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 3311 } else { 3312 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 3313 3314 lspcon_resume(dp_to_dig_port(intel_dp)); 3315 3316 /* Write the source OUI as early as possible */ 3317 if (intel_dp_is_edp(intel_dp)) 3318 intel_edp_init_source_oui(intel_dp, false); 3319 3320 /* 3321 * When turning on, we need to retry for 1ms to give the sink 3322 * time to wake up. 3323 */ 3324 for (i = 0; i < 3; i++) { 3325 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 3326 if (ret == 1) 3327 break; 3328 msleep(1); 3329 } 3330 3331 if (ret == 1 && lspcon->active) 3332 lspcon_wait_pcon_mode(lspcon); 3333 } 3334 3335 if (ret != 1) 3336 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n", 3337 encoder->base.base.id, encoder->base.name, 3338 mode == DP_SET_POWER_D0 ? "D0" : "D3"); 3339 } 3340 3341 static bool 3342 intel_dp_get_dpcd(struct intel_dp *intel_dp); 3343 3344 /** 3345 * intel_dp_sync_state - sync the encoder state during init/resume 3346 * @encoder: intel encoder to sync 3347 * @crtc_state: state for the CRTC connected to the encoder 3348 * 3349 * Sync any state stored in the encoder wrt. HW state during driver init 3350 * and system resume. 3351 */ 3352 void intel_dp_sync_state(struct intel_encoder *encoder, 3353 const struct intel_crtc_state *crtc_state) 3354 { 3355 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3356 bool dpcd_updated = false; 3357 3358 /* 3359 * Don't clobber DPCD if it's been already read out during output 3360 * setup (eDP) or detect. 3361 */ 3362 if (crtc_state && intel_dp->dpcd[DP_DPCD_REV] == 0) { 3363 intel_dp_get_dpcd(intel_dp); 3364 dpcd_updated = true; 3365 } 3366 3367 intel_dp_tunnel_resume(intel_dp, crtc_state, dpcd_updated); 3368 3369 if (crtc_state) 3370 intel_dp_reset_link_params(intel_dp); 3371 } 3372 3373 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, 3374 struct intel_crtc_state *crtc_state) 3375 { 3376 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3377 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3378 bool fastset = true; 3379 3380 /* 3381 * If BIOS has set an unsupported or non-standard link rate for some 3382 * reason force an encoder recompute and full modeset. 3383 */ 3384 if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates, 3385 crtc_state->port_clock) < 0) { 3386 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to unsupported link rate\n", 3387 encoder->base.base.id, encoder->base.name); 3388 crtc_state->uapi.connectors_changed = true; 3389 fastset = false; 3390 } 3391 3392 /* 3393 * FIXME hack to force full modeset when DSC is being used. 3394 * 3395 * As long as we do not have full state readout and config comparison 3396 * of crtc_state->dsc, we have no way to ensure reliable fastset. 3397 * Remove once we have readout for DSC. 3398 */ 3399 if (crtc_state->dsc.compression_enable) { 3400 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to DSC being enabled\n", 3401 encoder->base.base.id, encoder->base.name); 3402 crtc_state->uapi.mode_changed = true; 3403 fastset = false; 3404 } 3405 3406 if (CAN_PANEL_REPLAY(intel_dp)) { 3407 drm_dbg_kms(&i915->drm, 3408 "[ENCODER:%d:%s] Forcing full modeset to compute panel replay state\n", 3409 encoder->base.base.id, encoder->base.name); 3410 crtc_state->uapi.mode_changed = true; 3411 fastset = false; 3412 } 3413 3414 return fastset; 3415 } 3416 3417 static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp) 3418 { 3419 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3420 3421 /* Clear the cached register set to avoid using stale values */ 3422 3423 memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd)); 3424 3425 if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER, 3426 intel_dp->pcon_dsc_dpcd, 3427 sizeof(intel_dp->pcon_dsc_dpcd)) < 0) 3428 drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n", 3429 DP_PCON_DSC_ENCODER); 3430 3431 drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n", 3432 (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd); 3433 } 3434 3435 static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask) 3436 { 3437 int bw_gbps[] = {9, 18, 24, 32, 40, 48}; 3438 int i; 3439 3440 for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) { 3441 if (frl_bw_mask & (1 << i)) 3442 return bw_gbps[i]; 3443 } 3444 return 0; 3445 } 3446 3447 static int intel_dp_pcon_set_frl_mask(int max_frl) 3448 { 3449 switch (max_frl) { 3450 case 48: 3451 return DP_PCON_FRL_BW_MASK_48GBPS; 3452 case 40: 3453 return DP_PCON_FRL_BW_MASK_40GBPS; 3454 case 32: 3455 return DP_PCON_FRL_BW_MASK_32GBPS; 3456 case 24: 3457 return DP_PCON_FRL_BW_MASK_24GBPS; 3458 case 18: 3459 return DP_PCON_FRL_BW_MASK_18GBPS; 3460 case 9: 3461 return DP_PCON_FRL_BW_MASK_9GBPS; 3462 } 3463 3464 return 0; 3465 } 3466 3467 static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp) 3468 { 3469 struct intel_connector *intel_connector = intel_dp->attached_connector; 3470 struct drm_connector *connector = &intel_connector->base; 3471 int max_frl_rate; 3472 int max_lanes, rate_per_lane; 3473 int max_dsc_lanes, dsc_rate_per_lane; 3474 3475 max_lanes = connector->display_info.hdmi.max_lanes; 3476 rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane; 3477 max_frl_rate = max_lanes * rate_per_lane; 3478 3479 if (connector->display_info.hdmi.dsc_cap.v_1p2) { 3480 max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes; 3481 dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane; 3482 if (max_dsc_lanes && dsc_rate_per_lane) 3483 max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane); 3484 } 3485 3486 return max_frl_rate; 3487 } 3488 3489 static bool 3490 intel_dp_pcon_is_frl_trained(struct intel_dp *intel_dp, 3491 u8 max_frl_bw_mask, u8 *frl_trained_mask) 3492 { 3493 if (drm_dp_pcon_hdmi_link_active(&intel_dp->aux) && 3494 drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, frl_trained_mask) == DP_PCON_HDMI_MODE_FRL && 3495 *frl_trained_mask >= max_frl_bw_mask) 3496 return true; 3497 3498 return false; 3499 } 3500 3501 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) 3502 { 3503 #define TIMEOUT_FRL_READY_MS 500 3504 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000 3505 3506 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3507 int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret; 3508 u8 max_frl_bw_mask = 0, frl_trained_mask; 3509 bool is_active; 3510 3511 max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 3512 drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw); 3513 3514 max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp); 3515 drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw); 3516 3517 max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw); 3518 3519 if (max_frl_bw <= 0) 3520 return -EINVAL; 3521 3522 max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); 3523 drm_dbg(&i915->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask); 3524 3525 if (intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask)) 3526 goto frl_trained; 3527 3528 ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false); 3529 if (ret < 0) 3530 return ret; 3531 /* Wait for PCON to be FRL Ready */ 3532 wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS); 3533 3534 if (!is_active) 3535 return -ETIMEDOUT; 3536 3537 ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, 3538 DP_PCON_ENABLE_SEQUENTIAL_LINK); 3539 if (ret < 0) 3540 return ret; 3541 ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, 3542 DP_PCON_FRL_LINK_TRAIN_NORMAL); 3543 if (ret < 0) 3544 return ret; 3545 ret = drm_dp_pcon_frl_enable(&intel_dp->aux); 3546 if (ret < 0) 3547 return ret; 3548 /* 3549 * Wait for FRL to be completed 3550 * Check if the HDMI Link is up and active. 3551 */ 3552 wait_for(is_active = 3553 intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask), 3554 TIMEOUT_HDMI_LINK_ACTIVE_MS); 3555 3556 if (!is_active) 3557 return -ETIMEDOUT; 3558 3559 frl_trained: 3560 drm_dbg(&i915->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask); 3561 intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask); 3562 intel_dp->frl.is_trained = true; 3563 drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps); 3564 3565 return 0; 3566 } 3567 3568 static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp) 3569 { 3570 if (drm_dp_is_branch(intel_dp->dpcd) && 3571 intel_dp_has_hdmi_sink(intel_dp) && 3572 intel_dp_hdmi_sink_max_frl(intel_dp) > 0) 3573 return true; 3574 3575 return false; 3576 } 3577 3578 static 3579 int intel_dp_pcon_set_tmds_mode(struct intel_dp *intel_dp) 3580 { 3581 int ret; 3582 u8 buf = 0; 3583 3584 /* Set PCON source control mode */ 3585 buf |= DP_PCON_ENABLE_SOURCE_CTL_MODE; 3586 3587 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); 3588 if (ret < 0) 3589 return ret; 3590 3591 /* Set HDMI LINK ENABLE */ 3592 buf |= DP_PCON_ENABLE_HDMI_LINK; 3593 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); 3594 if (ret < 0) 3595 return ret; 3596 3597 return 0; 3598 } 3599 3600 void intel_dp_check_frl_training(struct intel_dp *intel_dp) 3601 { 3602 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3603 3604 /* 3605 * Always go for FRL training if: 3606 * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7) 3607 * -sink is HDMI2.1 3608 */ 3609 if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) || 3610 !intel_dp_is_hdmi_2_1_sink(intel_dp) || 3611 intel_dp->frl.is_trained) 3612 return; 3613 3614 if (intel_dp_pcon_start_frl_training(intel_dp) < 0) { 3615 int ret, mode; 3616 3617 drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n"); 3618 ret = intel_dp_pcon_set_tmds_mode(intel_dp); 3619 mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL); 3620 3621 if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS) 3622 drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n"); 3623 } else { 3624 drm_dbg(&dev_priv->drm, "FRL training Completed\n"); 3625 } 3626 } 3627 3628 static int 3629 intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state) 3630 { 3631 int vactive = crtc_state->hw.adjusted_mode.vdisplay; 3632 3633 return intel_hdmi_dsc_get_slice_height(vactive); 3634 } 3635 3636 static int 3637 intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp, 3638 const struct intel_crtc_state *crtc_state) 3639 { 3640 struct intel_connector *intel_connector = intel_dp->attached_connector; 3641 struct drm_connector *connector = &intel_connector->base; 3642 int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice; 3643 int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices; 3644 int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd); 3645 int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd); 3646 3647 return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices, 3648 pcon_max_slice_width, 3649 hdmi_max_slices, hdmi_throughput); 3650 } 3651 3652 static int 3653 intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp, 3654 const struct intel_crtc_state *crtc_state, 3655 int num_slices, int slice_width) 3656 { 3657 struct intel_connector *intel_connector = intel_dp->attached_connector; 3658 struct drm_connector *connector = &intel_connector->base; 3659 int output_format = crtc_state->output_format; 3660 bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp; 3661 int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd); 3662 int hdmi_max_chunk_bytes = 3663 connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024; 3664 3665 return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width, 3666 num_slices, output_format, hdmi_all_bpp, 3667 hdmi_max_chunk_bytes); 3668 } 3669 3670 void 3671 intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, 3672 const struct intel_crtc_state *crtc_state) 3673 { 3674 u8 pps_param[6]; 3675 int slice_height; 3676 int slice_width; 3677 int num_slices; 3678 int bits_per_pixel; 3679 int ret; 3680 struct intel_connector *intel_connector = intel_dp->attached_connector; 3681 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3682 struct drm_connector *connector; 3683 bool hdmi_is_dsc_1_2; 3684 3685 if (!intel_dp_is_hdmi_2_1_sink(intel_dp)) 3686 return; 3687 3688 if (!intel_connector) 3689 return; 3690 connector = &intel_connector->base; 3691 hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2; 3692 3693 if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) || 3694 !hdmi_is_dsc_1_2) 3695 return; 3696 3697 slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state); 3698 if (!slice_height) 3699 return; 3700 3701 num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state); 3702 if (!num_slices) 3703 return; 3704 3705 slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, 3706 num_slices); 3707 3708 bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state, 3709 num_slices, slice_width); 3710 if (!bits_per_pixel) 3711 return; 3712 3713 pps_param[0] = slice_height & 0xFF; 3714 pps_param[1] = slice_height >> 8; 3715 pps_param[2] = slice_width & 0xFF; 3716 pps_param[3] = slice_width >> 8; 3717 pps_param[4] = bits_per_pixel & 0xFF; 3718 pps_param[5] = (bits_per_pixel >> 8) & 0x3; 3719 3720 ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param); 3721 if (ret < 0) 3722 drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n"); 3723 } 3724 3725 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, 3726 const struct intel_crtc_state *crtc_state) 3727 { 3728 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3729 bool ycbcr444_to_420 = false; 3730 bool rgb_to_ycbcr = false; 3731 u8 tmp; 3732 3733 if (intel_dp->dpcd[DP_DPCD_REV] < 0x13) 3734 return; 3735 3736 if (!drm_dp_is_branch(intel_dp->dpcd)) 3737 return; 3738 3739 tmp = intel_dp_has_hdmi_sink(intel_dp) ? DP_HDMI_DVI_OUTPUT_CONFIG : 0; 3740 3741 if (drm_dp_dpcd_writeb(&intel_dp->aux, 3742 DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1) 3743 drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n", 3744 str_enable_disable(intel_dp_has_hdmi_sink(intel_dp))); 3745 3746 if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 3747 switch (crtc_state->output_format) { 3748 case INTEL_OUTPUT_FORMAT_YCBCR420: 3749 break; 3750 case INTEL_OUTPUT_FORMAT_YCBCR444: 3751 ycbcr444_to_420 = true; 3752 break; 3753 case INTEL_OUTPUT_FORMAT_RGB: 3754 rgb_to_ycbcr = true; 3755 ycbcr444_to_420 = true; 3756 break; 3757 default: 3758 MISSING_CASE(crtc_state->output_format); 3759 break; 3760 } 3761 } else if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR444) { 3762 switch (crtc_state->output_format) { 3763 case INTEL_OUTPUT_FORMAT_YCBCR444: 3764 break; 3765 case INTEL_OUTPUT_FORMAT_RGB: 3766 rgb_to_ycbcr = true; 3767 break; 3768 default: 3769 MISSING_CASE(crtc_state->output_format); 3770 break; 3771 } 3772 } 3773 3774 tmp = ycbcr444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0; 3775 3776 if (drm_dp_dpcd_writeb(&intel_dp->aux, 3777 DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) 3778 drm_dbg_kms(&i915->drm, 3779 "Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n", 3780 str_enable_disable(intel_dp->dfp.ycbcr_444_to_420)); 3781 3782 tmp = rgb_to_ycbcr ? DP_CONVERSION_BT709_RGB_YCBCR_ENABLE : 0; 3783 3784 if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0) 3785 drm_dbg_kms(&i915->drm, 3786 "Failed to %s protocol converter RGB->YCbCr conversion mode\n", 3787 str_enable_disable(tmp)); 3788 } 3789 3790 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 3791 { 3792 u8 dprx = 0; 3793 3794 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 3795 &dprx) != 1) 3796 return false; 3797 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 3798 } 3799 3800 static void intel_dp_read_dsc_dpcd(struct drm_dp_aux *aux, 3801 u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) 3802 { 3803 if (drm_dp_dpcd_read(aux, DP_DSC_SUPPORT, dsc_dpcd, 3804 DP_DSC_RECEIVER_CAP_SIZE) < 0) { 3805 drm_err(aux->drm_dev, 3806 "Failed to read DPCD register 0x%x\n", 3807 DP_DSC_SUPPORT); 3808 return; 3809 } 3810 3811 drm_dbg_kms(aux->drm_dev, "DSC DPCD: %*ph\n", 3812 DP_DSC_RECEIVER_CAP_SIZE, 3813 dsc_dpcd); 3814 } 3815 3816 void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector) 3817 { 3818 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3819 3820 /* 3821 * Clear the cached register set to avoid using stale values 3822 * for the sinks that do not support DSC. 3823 */ 3824 memset(connector->dp.dsc_dpcd, 0, sizeof(connector->dp.dsc_dpcd)); 3825 3826 /* Clear fec_capable to avoid using stale values */ 3827 connector->dp.fec_capability = 0; 3828 3829 if (dpcd_rev < DP_DPCD_REV_14) 3830 return; 3831 3832 intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux, 3833 connector->dp.dsc_dpcd); 3834 3835 if (drm_dp_dpcd_readb(connector->dp.dsc_decompression_aux, DP_FEC_CAPABILITY, 3836 &connector->dp.fec_capability) < 0) { 3837 drm_err(&i915->drm, "Failed to read FEC DPCD register\n"); 3838 return; 3839 } 3840 3841 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", 3842 connector->dp.fec_capability); 3843 } 3844 3845 static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *connector) 3846 { 3847 if (edp_dpcd_rev < DP_EDP_14) 3848 return; 3849 3850 intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux, connector->dp.dsc_dpcd); 3851 } 3852 3853 static void intel_edp_mso_mode_fixup(struct intel_connector *connector, 3854 struct drm_display_mode *mode) 3855 { 3856 struct intel_dp *intel_dp = intel_attached_dp(connector); 3857 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3858 int n = intel_dp->mso_link_count; 3859 int overlap = intel_dp->mso_pixel_overlap; 3860 3861 if (!mode || !n) 3862 return; 3863 3864 mode->hdisplay = (mode->hdisplay - overlap) * n; 3865 mode->hsync_start = (mode->hsync_start - overlap) * n; 3866 mode->hsync_end = (mode->hsync_end - overlap) * n; 3867 mode->htotal = (mode->htotal - overlap) * n; 3868 mode->clock *= n; 3869 3870 drm_mode_set_name(mode); 3871 3872 drm_dbg_kms(&i915->drm, 3873 "[CONNECTOR:%d:%s] using generated MSO mode: " DRM_MODE_FMT "\n", 3874 connector->base.base.id, connector->base.name, 3875 DRM_MODE_ARG(mode)); 3876 } 3877 3878 void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp) 3879 { 3880 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3881 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3882 struct intel_connector *connector = intel_dp->attached_connector; 3883 3884 if (connector->panel.vbt.edp.bpp && pipe_bpp > connector->panel.vbt.edp.bpp) { 3885 /* 3886 * This is a big fat ugly hack. 3887 * 3888 * Some machines in UEFI boot mode provide us a VBT that has 18 3889 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 3890 * unknown we fail to light up. Yet the same BIOS boots up with 3891 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 3892 * max, not what it tells us to use. 3893 * 3894 * Note: This will still be broken if the eDP panel is not lit 3895 * up by the BIOS, and thus we can't get the mode at module 3896 * load. 3897 */ 3898 drm_dbg_kms(&dev_priv->drm, 3899 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 3900 pipe_bpp, connector->panel.vbt.edp.bpp); 3901 connector->panel.vbt.edp.bpp = pipe_bpp; 3902 } 3903 } 3904 3905 static void intel_edp_mso_init(struct intel_dp *intel_dp) 3906 { 3907 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3908 struct intel_connector *connector = intel_dp->attached_connector; 3909 struct drm_display_info *info = &connector->base.display_info; 3910 u8 mso; 3911 3912 if (intel_dp->edp_dpcd[0] < DP_EDP_14) 3913 return; 3914 3915 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) { 3916 drm_err(&i915->drm, "Failed to read MSO cap\n"); 3917 return; 3918 } 3919 3920 /* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */ 3921 mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK; 3922 if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) { 3923 drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso); 3924 mso = 0; 3925 } 3926 3927 if (mso) { 3928 drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration, pixel overlap %u\n", 3929 mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso, 3930 info->mso_pixel_overlap); 3931 if (!HAS_MSO(i915)) { 3932 drm_err(&i915->drm, "No source MSO support, disabling\n"); 3933 mso = 0; 3934 } 3935 } 3936 3937 intel_dp->mso_link_count = mso; 3938 intel_dp->mso_pixel_overlap = mso ? info->mso_pixel_overlap : 0; 3939 } 3940 3941 static bool 3942 intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector) 3943 { 3944 struct drm_i915_private *dev_priv = 3945 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 3946 3947 /* this function is meant to be called only once */ 3948 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 3949 3950 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) 3951 return false; 3952 3953 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 3954 drm_dp_is_branch(intel_dp->dpcd)); 3955 3956 /* 3957 * Read the eDP display control registers. 3958 * 3959 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 3960 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 3961 * set, but require eDP 1.4+ detection (e.g. for supported link rates 3962 * method). The display control registers should read zero if they're 3963 * not supported anyway. 3964 */ 3965 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 3966 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 3967 sizeof(intel_dp->edp_dpcd)) { 3968 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 3969 (int)sizeof(intel_dp->edp_dpcd), 3970 intel_dp->edp_dpcd); 3971 3972 intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14; 3973 } 3974 3975 /* 3976 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 3977 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 3978 */ 3979 intel_psr_init_dpcd(intel_dp); 3980 3981 /* Clear the default sink rates */ 3982 intel_dp->num_sink_rates = 0; 3983 3984 /* Read the eDP 1.4+ supported link rates. */ 3985 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 3986 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 3987 int i; 3988 3989 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 3990 sink_rates, sizeof(sink_rates)); 3991 3992 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 3993 int val = le16_to_cpu(sink_rates[i]); 3994 3995 if (val == 0) 3996 break; 3997 3998 /* Value read multiplied by 200kHz gives the per-lane 3999 * link rate in kHz. The source rates are, however, 4000 * stored in terms of LS_Clk kHz. The full conversion 4001 * back to symbols is 4002 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 4003 */ 4004 intel_dp->sink_rates[i] = (val * 200) / 10; 4005 } 4006 intel_dp->num_sink_rates = i; 4007 } 4008 4009 /* 4010 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 4011 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 4012 */ 4013 if (intel_dp->num_sink_rates) 4014 intel_dp->use_rate_select = true; 4015 else 4016 intel_dp_set_sink_rates(intel_dp); 4017 intel_dp_set_max_sink_lane_count(intel_dp); 4018 4019 /* Read the eDP DSC DPCD registers */ 4020 if (HAS_DSC(dev_priv)) 4021 intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0], 4022 connector); 4023 4024 /* 4025 * If needed, program our source OUI so we can make various Intel-specific AUX services 4026 * available (such as HDR backlight controls) 4027 */ 4028 intel_edp_init_source_oui(intel_dp, true); 4029 4030 return true; 4031 } 4032 4033 static bool 4034 intel_dp_has_sink_count(struct intel_dp *intel_dp) 4035 { 4036 if (!intel_dp->attached_connector) 4037 return false; 4038 4039 return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base, 4040 intel_dp->dpcd, 4041 &intel_dp->desc); 4042 } 4043 4044 void intel_dp_update_sink_caps(struct intel_dp *intel_dp) 4045 { 4046 intel_dp_set_sink_rates(intel_dp); 4047 intel_dp_set_max_sink_lane_count(intel_dp); 4048 intel_dp_set_common_rates(intel_dp); 4049 } 4050 4051 static bool 4052 intel_dp_get_dpcd(struct intel_dp *intel_dp) 4053 { 4054 int ret; 4055 4056 if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0) 4057 return false; 4058 4059 /* 4060 * Don't clobber cached eDP rates. Also skip re-reading 4061 * the OUI/ID since we know it won't change. 4062 */ 4063 if (!intel_dp_is_edp(intel_dp)) { 4064 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4065 drm_dp_is_branch(intel_dp->dpcd)); 4066 4067 intel_dp_update_sink_caps(intel_dp); 4068 } 4069 4070 if (intel_dp_has_sink_count(intel_dp)) { 4071 ret = drm_dp_read_sink_count(&intel_dp->aux); 4072 if (ret < 0) 4073 return false; 4074 4075 /* 4076 * Sink count can change between short pulse hpd hence 4077 * a member variable in intel_dp will track any changes 4078 * between short pulse interrupts. 4079 */ 4080 intel_dp->sink_count = ret; 4081 4082 /* 4083 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 4084 * a dongle is present but no display. Unless we require to know 4085 * if a dongle is present or not, we don't need to update 4086 * downstream port information. So, an early return here saves 4087 * time from performing other operations which are not required. 4088 */ 4089 if (!intel_dp->sink_count) 4090 return false; 4091 } 4092 4093 return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd, 4094 intel_dp->downstream_ports) == 0; 4095 } 4096 4097 static const char *intel_dp_mst_mode_str(enum drm_dp_mst_mode mst_mode) 4098 { 4099 if (mst_mode == DRM_DP_MST) 4100 return "MST"; 4101 else if (mst_mode == DRM_DP_SST_SIDEBAND_MSG) 4102 return "SST w/ sideband messaging"; 4103 else 4104 return "SST"; 4105 } 4106 4107 static enum drm_dp_mst_mode 4108 intel_dp_mst_mode_choose(struct intel_dp *intel_dp, 4109 enum drm_dp_mst_mode sink_mst_mode) 4110 { 4111 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4112 4113 if (!i915->display.params.enable_dp_mst) 4114 return DRM_DP_SST; 4115 4116 if (!intel_dp_mst_source_support(intel_dp)) 4117 return DRM_DP_SST; 4118 4119 if (sink_mst_mode == DRM_DP_SST_SIDEBAND_MSG && 4120 !(intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B)) 4121 return DRM_DP_SST; 4122 4123 return sink_mst_mode; 4124 } 4125 4126 static enum drm_dp_mst_mode 4127 intel_dp_mst_detect(struct intel_dp *intel_dp) 4128 { 4129 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4130 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4131 enum drm_dp_mst_mode sink_mst_mode; 4132 enum drm_dp_mst_mode mst_detect; 4133 4134 sink_mst_mode = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 4135 4136 mst_detect = intel_dp_mst_mode_choose(intel_dp, sink_mst_mode); 4137 4138 drm_dbg_kms(&i915->drm, 4139 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s -> enable: %s\n", 4140 encoder->base.base.id, encoder->base.name, 4141 str_yes_no(intel_dp_mst_source_support(intel_dp)), 4142 intel_dp_mst_mode_str(sink_mst_mode), 4143 str_yes_no(i915->display.params.enable_dp_mst), 4144 intel_dp_mst_mode_str(mst_detect)); 4145 4146 return mst_detect; 4147 } 4148 4149 static void 4150 intel_dp_mst_configure(struct intel_dp *intel_dp) 4151 { 4152 if (!intel_dp_mst_source_support(intel_dp)) 4153 return; 4154 4155 intel_dp->is_mst = intel_dp->mst_detect != DRM_DP_SST; 4156 4157 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); 4158 4159 /* Avoid stale info on the next detect cycle. */ 4160 intel_dp->mst_detect = DRM_DP_SST; 4161 } 4162 4163 static void 4164 intel_dp_mst_disconnect(struct intel_dp *intel_dp) 4165 { 4166 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4167 4168 if (!intel_dp->is_mst) 4169 return; 4170 4171 drm_dbg_kms(&i915->drm, "MST device may have disappeared %d vs %d\n", 4172 intel_dp->is_mst, intel_dp->mst_mgr.mst_state); 4173 intel_dp->is_mst = false; 4174 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); 4175 } 4176 4177 static bool 4178 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi) 4179 { 4180 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 4) == 4; 4181 } 4182 4183 static bool intel_dp_ack_sink_irq_esi(struct intel_dp *intel_dp, u8 esi[4]) 4184 { 4185 int retry; 4186 4187 for (retry = 0; retry < 3; retry++) { 4188 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SINK_COUNT_ESI + 1, 4189 &esi[1], 3) == 3) 4190 return true; 4191 } 4192 4193 return false; 4194 } 4195 4196 bool 4197 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 4198 const struct drm_connector_state *conn_state) 4199 { 4200 /* 4201 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 4202 * of Color Encoding Format and Content Color Gamut], in order to 4203 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 4204 */ 4205 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 4206 return true; 4207 4208 switch (conn_state->colorspace) { 4209 case DRM_MODE_COLORIMETRY_SYCC_601: 4210 case DRM_MODE_COLORIMETRY_OPYCC_601: 4211 case DRM_MODE_COLORIMETRY_BT2020_YCC: 4212 case DRM_MODE_COLORIMETRY_BT2020_RGB: 4213 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 4214 return true; 4215 default: 4216 break; 4217 } 4218 4219 return false; 4220 } 4221 4222 static ssize_t intel_dp_as_sdp_pack(const struct drm_dp_as_sdp *as_sdp, 4223 struct dp_sdp *sdp, size_t size) 4224 { 4225 size_t length = sizeof(struct dp_sdp); 4226 4227 if (size < length) 4228 return -ENOSPC; 4229 4230 memset(sdp, 0, size); 4231 4232 /* Prepare AS (Adaptive Sync) SDP Header */ 4233 sdp->sdp_header.HB0 = 0; 4234 sdp->sdp_header.HB1 = as_sdp->sdp_type; 4235 sdp->sdp_header.HB2 = 0x02; 4236 sdp->sdp_header.HB3 = as_sdp->length; 4237 4238 /* Fill AS (Adaptive Sync) SDP Payload */ 4239 sdp->db[0] = as_sdp->mode; 4240 sdp->db[1] = as_sdp->vtotal & 0xFF; 4241 sdp->db[2] = (as_sdp->vtotal >> 8) & 0xFF; 4242 sdp->db[3] = as_sdp->target_rr & 0xFF; 4243 sdp->db[4] = (as_sdp->target_rr >> 8) & 0x3; 4244 4245 if (as_sdp->target_rr_divider) 4246 sdp->db[4] |= 0x20; 4247 4248 return length; 4249 } 4250 4251 static ssize_t 4252 intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915, 4253 const struct hdmi_drm_infoframe *drm_infoframe, 4254 struct dp_sdp *sdp, 4255 size_t size) 4256 { 4257 size_t length = sizeof(struct dp_sdp); 4258 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 4259 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 4260 ssize_t len; 4261 4262 if (size < length) 4263 return -ENOSPC; 4264 4265 memset(sdp, 0, size); 4266 4267 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); 4268 if (len < 0) { 4269 drm_dbg_kms(&i915->drm, "buffer size is smaller than hdr metadata infoframe\n"); 4270 return -ENOSPC; 4271 } 4272 4273 if (len != infoframe_size) { 4274 drm_dbg_kms(&i915->drm, "wrong static hdr metadata size\n"); 4275 return -ENOSPC; 4276 } 4277 4278 /* 4279 * Set up the infoframe sdp packet for HDR static metadata. 4280 * Prepare VSC Header for SU as per DP 1.4a spec, 4281 * Table 2-100 and Table 2-101 4282 */ 4283 4284 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ 4285 sdp->sdp_header.HB0 = 0; 4286 /* 4287 * Packet Type 80h + Non-audio INFOFRAME Type value 4288 * HDMI_INFOFRAME_TYPE_DRM: 0x87 4289 * - 80h + Non-audio INFOFRAME Type value 4290 * - InfoFrame Type: 0x07 4291 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] 4292 */ 4293 sdp->sdp_header.HB1 = drm_infoframe->type; 4294 /* 4295 * Least Significant Eight Bits of (Data Byte Count – 1) 4296 * infoframe_size - 1 4297 */ 4298 sdp->sdp_header.HB2 = 0x1D; 4299 /* INFOFRAME SDP Version Number */ 4300 sdp->sdp_header.HB3 = (0x13 << 2); 4301 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 4302 sdp->db[0] = drm_infoframe->version; 4303 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 4304 sdp->db[1] = drm_infoframe->length; 4305 /* 4306 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 4307 * HDMI_INFOFRAME_HEADER_SIZE 4308 */ 4309 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); 4310 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 4311 HDMI_DRM_INFOFRAME_SIZE); 4312 4313 /* 4314 * Size of DP infoframe sdp packet for HDR static metadata consists of 4315 * - DP SDP Header(struct dp_sdp_header): 4 bytes 4316 * - Two Data Blocks: 2 bytes 4317 * CTA Header Byte2 (INFOFRAME Version Number) 4318 * CTA Header Byte3 (Length of INFOFRAME) 4319 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 4320 * 4321 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 4322 * infoframe size. But GEN11+ has larger than that size, write_infoframe 4323 * will pad rest of the size. 4324 */ 4325 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; 4326 } 4327 4328 static void intel_write_dp_sdp(struct intel_encoder *encoder, 4329 const struct intel_crtc_state *crtc_state, 4330 unsigned int type) 4331 { 4332 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4333 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4334 struct dp_sdp sdp = {}; 4335 ssize_t len; 4336 4337 if ((crtc_state->infoframes.enable & 4338 intel_hdmi_infoframe_enable(type)) == 0) 4339 return; 4340 4341 switch (type) { 4342 case DP_SDP_VSC: 4343 len = drm_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp); 4344 break; 4345 case HDMI_PACKET_TYPE_GAMUT_METADATA: 4346 len = intel_dp_hdr_metadata_infoframe_sdp_pack(dev_priv, 4347 &crtc_state->infoframes.drm.drm, 4348 &sdp, sizeof(sdp)); 4349 break; 4350 case DP_SDP_ADAPTIVE_SYNC: 4351 len = intel_dp_as_sdp_pack(&crtc_state->infoframes.as_sdp, &sdp, 4352 sizeof(sdp)); 4353 break; 4354 default: 4355 MISSING_CASE(type); 4356 return; 4357 } 4358 4359 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 4360 return; 4361 4362 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); 4363 } 4364 4365 void intel_dp_set_infoframes(struct intel_encoder *encoder, 4366 bool enable, 4367 const struct intel_crtc_state *crtc_state, 4368 const struct drm_connector_state *conn_state) 4369 { 4370 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4371 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(dev_priv, 4372 crtc_state->cpu_transcoder); 4373 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | 4374 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | 4375 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 4376 4377 if (HAS_AS_SDP(dev_priv)) 4378 dip_enable |= VIDEO_DIP_ENABLE_AS_ADL; 4379 4380 u32 val = intel_de_read(dev_priv, reg) & ~dip_enable; 4381 4382 /* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */ 4383 if (!enable && HAS_DSC(dev_priv)) 4384 val &= ~VDIP_ENABLE_PPS; 4385 4386 /* When PSR is enabled, this routine doesn't disable VSC DIP */ 4387 if (!crtc_state->has_psr) 4388 val &= ~VIDEO_DIP_ENABLE_VSC_HSW; 4389 4390 intel_de_write(dev_priv, reg, val); 4391 intel_de_posting_read(dev_priv, reg); 4392 4393 if (!enable) 4394 return; 4395 4396 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); 4397 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_ADAPTIVE_SYNC); 4398 4399 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); 4400 } 4401 4402 static 4403 int intel_dp_as_sdp_unpack(struct drm_dp_as_sdp *as_sdp, 4404 const void *buffer, size_t size) 4405 { 4406 const struct dp_sdp *sdp = buffer; 4407 4408 if (size < sizeof(struct dp_sdp)) 4409 return -EINVAL; 4410 4411 memset(as_sdp, 0, sizeof(*as_sdp)); 4412 4413 if (sdp->sdp_header.HB0 != 0) 4414 return -EINVAL; 4415 4416 if (sdp->sdp_header.HB1 != DP_SDP_ADAPTIVE_SYNC) 4417 return -EINVAL; 4418 4419 if (sdp->sdp_header.HB2 != 0x02) 4420 return -EINVAL; 4421 4422 if ((sdp->sdp_header.HB3 & 0x3F) != 9) 4423 return -EINVAL; 4424 4425 as_sdp->length = sdp->sdp_header.HB3 & DP_ADAPTIVE_SYNC_SDP_LENGTH; 4426 as_sdp->mode = sdp->db[0] & DP_ADAPTIVE_SYNC_SDP_OPERATION_MODE; 4427 as_sdp->vtotal = (sdp->db[2] << 8) | sdp->db[1]; 4428 as_sdp->target_rr = (u64)sdp->db[3] | ((u64)sdp->db[4] & 0x3); 4429 as_sdp->target_rr_divider = sdp->db[4] & 0x20 ? true : false; 4430 4431 return 0; 4432 } 4433 4434 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, 4435 const void *buffer, size_t size) 4436 { 4437 const struct dp_sdp *sdp = buffer; 4438 4439 if (size < sizeof(struct dp_sdp)) 4440 return -EINVAL; 4441 4442 memset(vsc, 0, sizeof(*vsc)); 4443 4444 if (sdp->sdp_header.HB0 != 0) 4445 return -EINVAL; 4446 4447 if (sdp->sdp_header.HB1 != DP_SDP_VSC) 4448 return -EINVAL; 4449 4450 vsc->sdp_type = sdp->sdp_header.HB1; 4451 vsc->revision = sdp->sdp_header.HB2; 4452 vsc->length = sdp->sdp_header.HB3; 4453 4454 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || 4455 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe) || 4456 (sdp->sdp_header.HB2 == 0x6 && sdp->sdp_header.HB3 == 0x10)) { 4457 /* 4458 * - HB2 = 0x2, HB3 = 0x8 4459 * VSC SDP supporting 3D stereo + PSR 4460 * - HB2 = 0x4, HB3 = 0xe 4461 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of 4462 * first scan line of the SU region (applies to eDP v1.4b 4463 * and higher). 4464 * - HB2 = 0x6, HB3 = 0x10 4465 * VSC SDP supporting 3D stereo + Panel Replay. 4466 */ 4467 return 0; 4468 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { 4469 /* 4470 * - HB2 = 0x5, HB3 = 0x13 4471 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry 4472 * Format. 4473 */ 4474 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; 4475 vsc->colorimetry = sdp->db[16] & 0xf; 4476 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; 4477 4478 switch (sdp->db[17] & 0x7) { 4479 case 0x0: 4480 vsc->bpc = 6; 4481 break; 4482 case 0x1: 4483 vsc->bpc = 8; 4484 break; 4485 case 0x2: 4486 vsc->bpc = 10; 4487 break; 4488 case 0x3: 4489 vsc->bpc = 12; 4490 break; 4491 case 0x4: 4492 vsc->bpc = 16; 4493 break; 4494 default: 4495 MISSING_CASE(sdp->db[17] & 0x7); 4496 return -EINVAL; 4497 } 4498 4499 vsc->content_type = sdp->db[18] & 0x7; 4500 } else { 4501 return -EINVAL; 4502 } 4503 4504 return 0; 4505 } 4506 4507 static void 4508 intel_read_dp_as_sdp(struct intel_encoder *encoder, 4509 struct intel_crtc_state *crtc_state, 4510 struct drm_dp_as_sdp *as_sdp) 4511 { 4512 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4513 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4514 unsigned int type = DP_SDP_ADAPTIVE_SYNC; 4515 struct dp_sdp sdp = {}; 4516 int ret; 4517 4518 if ((crtc_state->infoframes.enable & 4519 intel_hdmi_infoframe_enable(type)) == 0) 4520 return; 4521 4522 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 4523 sizeof(sdp)); 4524 4525 ret = intel_dp_as_sdp_unpack(as_sdp, &sdp, sizeof(sdp)); 4526 if (ret) 4527 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP AS SDP\n"); 4528 } 4529 4530 static int 4531 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, 4532 const void *buffer, size_t size) 4533 { 4534 int ret; 4535 4536 const struct dp_sdp *sdp = buffer; 4537 4538 if (size < sizeof(struct dp_sdp)) 4539 return -EINVAL; 4540 4541 if (sdp->sdp_header.HB0 != 0) 4542 return -EINVAL; 4543 4544 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) 4545 return -EINVAL; 4546 4547 /* 4548 * Least Significant Eight Bits of (Data Byte Count – 1) 4549 * 1Dh (i.e., Data Byte Count = 30 bytes). 4550 */ 4551 if (sdp->sdp_header.HB2 != 0x1D) 4552 return -EINVAL; 4553 4554 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ 4555 if ((sdp->sdp_header.HB3 & 0x3) != 0) 4556 return -EINVAL; 4557 4558 /* INFOFRAME SDP Version Number */ 4559 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) 4560 return -EINVAL; 4561 4562 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 4563 if (sdp->db[0] != 1) 4564 return -EINVAL; 4565 4566 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 4567 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) 4568 return -EINVAL; 4569 4570 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], 4571 HDMI_DRM_INFOFRAME_SIZE); 4572 4573 return ret; 4574 } 4575 4576 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, 4577 struct intel_crtc_state *crtc_state, 4578 struct drm_dp_vsc_sdp *vsc) 4579 { 4580 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4581 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4582 unsigned int type = DP_SDP_VSC; 4583 struct dp_sdp sdp = {}; 4584 int ret; 4585 4586 if ((crtc_state->infoframes.enable & 4587 intel_hdmi_infoframe_enable(type)) == 0) 4588 return; 4589 4590 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); 4591 4592 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); 4593 4594 if (ret) 4595 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n"); 4596 } 4597 4598 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, 4599 struct intel_crtc_state *crtc_state, 4600 struct hdmi_drm_infoframe *drm_infoframe) 4601 { 4602 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4603 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4604 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; 4605 struct dp_sdp sdp = {}; 4606 int ret; 4607 4608 if ((crtc_state->infoframes.enable & 4609 intel_hdmi_infoframe_enable(type)) == 0) 4610 return; 4611 4612 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 4613 sizeof(sdp)); 4614 4615 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, 4616 sizeof(sdp)); 4617 4618 if (ret) 4619 drm_dbg_kms(&dev_priv->drm, 4620 "Failed to unpack DP HDR Metadata Infoframe SDP\n"); 4621 } 4622 4623 void intel_read_dp_sdp(struct intel_encoder *encoder, 4624 struct intel_crtc_state *crtc_state, 4625 unsigned int type) 4626 { 4627 switch (type) { 4628 case DP_SDP_VSC: 4629 intel_read_dp_vsc_sdp(encoder, crtc_state, 4630 &crtc_state->infoframes.vsc); 4631 break; 4632 case HDMI_PACKET_TYPE_GAMUT_METADATA: 4633 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, 4634 &crtc_state->infoframes.drm.drm); 4635 break; 4636 case DP_SDP_ADAPTIVE_SYNC: 4637 intel_read_dp_as_sdp(encoder, crtc_state, 4638 &crtc_state->infoframes.as_sdp); 4639 break; 4640 default: 4641 MISSING_CASE(type); 4642 break; 4643 } 4644 } 4645 4646 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 4647 { 4648 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4649 int status = 0; 4650 int test_link_rate; 4651 u8 test_lane_count, test_link_bw; 4652 /* (DP CTS 1.2) 4653 * 4.3.1.11 4654 */ 4655 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 4656 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 4657 &test_lane_count); 4658 4659 if (status <= 0) { 4660 drm_dbg_kms(&i915->drm, "Lane count read failed\n"); 4661 return DP_TEST_NAK; 4662 } 4663 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 4664 4665 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 4666 &test_link_bw); 4667 if (status <= 0) { 4668 drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); 4669 return DP_TEST_NAK; 4670 } 4671 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 4672 4673 /* Validate the requested link rate and lane count */ 4674 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 4675 test_lane_count)) 4676 return DP_TEST_NAK; 4677 4678 intel_dp->compliance.test_lane_count = test_lane_count; 4679 intel_dp->compliance.test_link_rate = test_link_rate; 4680 4681 return DP_TEST_ACK; 4682 } 4683 4684 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 4685 { 4686 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4687 u8 test_pattern; 4688 u8 test_misc; 4689 __be16 h_width, v_height; 4690 int status = 0; 4691 4692 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 4693 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 4694 &test_pattern); 4695 if (status <= 0) { 4696 drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); 4697 return DP_TEST_NAK; 4698 } 4699 if (test_pattern != DP_COLOR_RAMP) 4700 return DP_TEST_NAK; 4701 4702 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 4703 &h_width, 2); 4704 if (status <= 0) { 4705 drm_dbg_kms(&i915->drm, "H Width read failed\n"); 4706 return DP_TEST_NAK; 4707 } 4708 4709 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 4710 &v_height, 2); 4711 if (status <= 0) { 4712 drm_dbg_kms(&i915->drm, "V Height read failed\n"); 4713 return DP_TEST_NAK; 4714 } 4715 4716 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 4717 &test_misc); 4718 if (status <= 0) { 4719 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); 4720 return DP_TEST_NAK; 4721 } 4722 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 4723 return DP_TEST_NAK; 4724 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 4725 return DP_TEST_NAK; 4726 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 4727 case DP_TEST_BIT_DEPTH_6: 4728 intel_dp->compliance.test_data.bpc = 6; 4729 break; 4730 case DP_TEST_BIT_DEPTH_8: 4731 intel_dp->compliance.test_data.bpc = 8; 4732 break; 4733 default: 4734 return DP_TEST_NAK; 4735 } 4736 4737 intel_dp->compliance.test_data.video_pattern = test_pattern; 4738 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 4739 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 4740 /* Set test active flag here so userspace doesn't interrupt things */ 4741 intel_dp->compliance.test_active = true; 4742 4743 return DP_TEST_ACK; 4744 } 4745 4746 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 4747 { 4748 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4749 u8 test_result = DP_TEST_ACK; 4750 struct intel_connector *intel_connector = intel_dp->attached_connector; 4751 struct drm_connector *connector = &intel_connector->base; 4752 4753 if (intel_connector->detect_edid == NULL || 4754 connector->edid_corrupt || 4755 intel_dp->aux.i2c_defer_count > 6) { 4756 /* Check EDID read for NACKs, DEFERs and corruption 4757 * (DP CTS 1.2 Core r1.1) 4758 * 4.2.2.4 : Failed EDID read, I2C_NAK 4759 * 4.2.2.5 : Failed EDID read, I2C_DEFER 4760 * 4.2.2.6 : EDID corruption detected 4761 * Use failsafe mode for all cases 4762 */ 4763 if (intel_dp->aux.i2c_nack_count > 0 || 4764 intel_dp->aux.i2c_defer_count > 0) 4765 drm_dbg_kms(&i915->drm, 4766 "EDID read had %d NACKs, %d DEFERs\n", 4767 intel_dp->aux.i2c_nack_count, 4768 intel_dp->aux.i2c_defer_count); 4769 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 4770 } else { 4771 /* FIXME: Get rid of drm_edid_raw() */ 4772 const struct edid *block = drm_edid_raw(intel_connector->detect_edid); 4773 4774 /* We have to write the checksum of the last block read */ 4775 block += block->extensions; 4776 4777 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 4778 block->checksum) <= 0) 4779 drm_dbg_kms(&i915->drm, 4780 "Failed to write EDID checksum\n"); 4781 4782 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 4783 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 4784 } 4785 4786 /* Set test active flag here so userspace doesn't interrupt things */ 4787 intel_dp->compliance.test_active = true; 4788 4789 return test_result; 4790 } 4791 4792 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, 4793 const struct intel_crtc_state *crtc_state) 4794 { 4795 struct drm_i915_private *dev_priv = 4796 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 4797 struct drm_dp_phy_test_params *data = 4798 &intel_dp->compliance.test_data.phytest; 4799 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4800 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4801 enum pipe pipe = crtc->pipe; 4802 u32 pattern_val; 4803 4804 switch (data->phy_pattern) { 4805 case DP_LINK_QUAL_PATTERN_DISABLE: 4806 drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n"); 4807 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 4808 if (DISPLAY_VER(dev_priv) >= 10) 4809 intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), 4810 DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK, 4811 DP_TP_CTL_LINK_TRAIN_NORMAL); 4812 break; 4813 case DP_LINK_QUAL_PATTERN_D10_2: 4814 drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n"); 4815 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 4816 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); 4817 break; 4818 case DP_LINK_QUAL_PATTERN_ERROR_RATE: 4819 drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n"); 4820 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 4821 DDI_DP_COMP_CTL_ENABLE | 4822 DDI_DP_COMP_CTL_SCRAMBLED_0); 4823 break; 4824 case DP_LINK_QUAL_PATTERN_PRBS7: 4825 drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n"); 4826 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 4827 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); 4828 break; 4829 case DP_LINK_QUAL_PATTERN_80BIT_CUSTOM: 4830 /* 4831 * FIXME: Ideally pattern should come from DPCD 0x250. As 4832 * current firmware of DPR-100 could not set it, so hardcoding 4833 * now for complaince test. 4834 */ 4835 drm_dbg_kms(&dev_priv->drm, 4836 "Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); 4837 pattern_val = 0x3e0f83e0; 4838 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); 4839 pattern_val = 0x0f83e0f8; 4840 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); 4841 pattern_val = 0x0000f83e; 4842 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); 4843 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 4844 DDI_DP_COMP_CTL_ENABLE | 4845 DDI_DP_COMP_CTL_CUSTOM80); 4846 break; 4847 case DP_LINK_QUAL_PATTERN_CP2520_PAT_1: 4848 /* 4849 * FIXME: Ideally pattern should come from DPCD 0x24A. As 4850 * current firmware of DPR-100 could not set it, so hardcoding 4851 * now for complaince test. 4852 */ 4853 drm_dbg_kms(&dev_priv->drm, "Set HBR2 compliance Phy Test Pattern\n"); 4854 pattern_val = 0xFB; 4855 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 4856 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | 4857 pattern_val); 4858 break; 4859 case DP_LINK_QUAL_PATTERN_CP2520_PAT_3: 4860 if (DISPLAY_VER(dev_priv) < 10) { 4861 drm_warn(&dev_priv->drm, "Platform does not support TPS4\n"); 4862 break; 4863 } 4864 drm_dbg_kms(&dev_priv->drm, "Set TPS4 compliance Phy Test Pattern\n"); 4865 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 4866 intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), 4867 DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK, 4868 DP_TP_CTL_TRAIN_PAT4_SEL_TP4A | DP_TP_CTL_LINK_TRAIN_PAT4); 4869 break; 4870 default: 4871 drm_warn(&dev_priv->drm, "Invalid Phy Test Pattern\n"); 4872 } 4873 } 4874 4875 static void intel_dp_process_phy_request(struct intel_dp *intel_dp, 4876 const struct intel_crtc_state *crtc_state) 4877 { 4878 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4879 struct drm_dp_phy_test_params *data = 4880 &intel_dp->compliance.test_data.phytest; 4881 u8 link_status[DP_LINK_STATUS_SIZE]; 4882 4883 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 4884 link_status) < 0) { 4885 drm_dbg_kms(&i915->drm, "failed to get link status\n"); 4886 return; 4887 } 4888 4889 /* retrieve vswing & pre-emphasis setting */ 4890 intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, 4891 link_status); 4892 4893 intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX); 4894 4895 intel_dp_phy_pattern_update(intel_dp, crtc_state); 4896 4897 drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, 4898 intel_dp->train_set, crtc_state->lane_count); 4899 4900 drm_dp_set_phy_test_pattern(&intel_dp->aux, data, 4901 intel_dp->dpcd[DP_DPCD_REV]); 4902 } 4903 4904 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 4905 { 4906 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4907 struct drm_dp_phy_test_params *data = 4908 &intel_dp->compliance.test_data.phytest; 4909 4910 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { 4911 drm_dbg_kms(&i915->drm, "DP Phy Test pattern AUX read failure\n"); 4912 return DP_TEST_NAK; 4913 } 4914 4915 /* Set test active flag here so userspace doesn't interrupt things */ 4916 intel_dp->compliance.test_active = true; 4917 4918 return DP_TEST_ACK; 4919 } 4920 4921 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 4922 { 4923 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4924 u8 response = DP_TEST_NAK; 4925 u8 request = 0; 4926 int status; 4927 4928 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 4929 if (status <= 0) { 4930 drm_dbg_kms(&i915->drm, 4931 "Could not read test request from sink\n"); 4932 goto update_status; 4933 } 4934 4935 switch (request) { 4936 case DP_TEST_LINK_TRAINING: 4937 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); 4938 response = intel_dp_autotest_link_training(intel_dp); 4939 break; 4940 case DP_TEST_LINK_VIDEO_PATTERN: 4941 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); 4942 response = intel_dp_autotest_video_pattern(intel_dp); 4943 break; 4944 case DP_TEST_LINK_EDID_READ: 4945 drm_dbg_kms(&i915->drm, "EDID test requested\n"); 4946 response = intel_dp_autotest_edid(intel_dp); 4947 break; 4948 case DP_TEST_LINK_PHY_TEST_PATTERN: 4949 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); 4950 response = intel_dp_autotest_phy_pattern(intel_dp); 4951 break; 4952 default: 4953 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", 4954 request); 4955 break; 4956 } 4957 4958 if (response & DP_TEST_ACK) 4959 intel_dp->compliance.test_type = request; 4960 4961 update_status: 4962 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 4963 if (status <= 0) 4964 drm_dbg_kms(&i915->drm, 4965 "Could not write test response to sink\n"); 4966 } 4967 4968 static bool intel_dp_link_ok(struct intel_dp *intel_dp, 4969 u8 link_status[DP_LINK_STATUS_SIZE]) 4970 { 4971 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4972 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 4973 bool uhbr = intel_dp->link_rate >= 1000000; 4974 bool ok; 4975 4976 if (uhbr) 4977 ok = drm_dp_128b132b_lane_channel_eq_done(link_status, 4978 intel_dp->lane_count); 4979 else 4980 ok = drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 4981 4982 if (ok) 4983 return true; 4984 4985 intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); 4986 drm_dbg_kms(&i915->drm, 4987 "[ENCODER:%d:%s] %s link not ok, retraining\n", 4988 encoder->base.base.id, encoder->base.name, 4989 uhbr ? "128b/132b" : "8b/10b"); 4990 4991 return false; 4992 } 4993 4994 static void 4995 intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack) 4996 { 4997 bool handled = false; 4998 4999 drm_dp_mst_hpd_irq_handle_event(&intel_dp->mst_mgr, esi, ack, &handled); 5000 5001 if (esi[1] & DP_CP_IRQ) { 5002 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 5003 ack[1] |= DP_CP_IRQ; 5004 } 5005 } 5006 5007 static bool intel_dp_mst_link_status(struct intel_dp *intel_dp) 5008 { 5009 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 5010 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 5011 u8 link_status[DP_LINK_STATUS_SIZE] = {}; 5012 const size_t esi_link_status_size = DP_LINK_STATUS_SIZE - 2; 5013 5014 if (drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS_ESI, link_status, 5015 esi_link_status_size) != esi_link_status_size) { 5016 drm_err(&i915->drm, 5017 "[ENCODER:%d:%s] Failed to read link status\n", 5018 encoder->base.base.id, encoder->base.name); 5019 return false; 5020 } 5021 5022 return intel_dp_link_ok(intel_dp, link_status); 5023 } 5024 5025 /** 5026 * intel_dp_check_mst_status - service any pending MST interrupts, check link status 5027 * @intel_dp: Intel DP struct 5028 * 5029 * Read any pending MST interrupts, call MST core to handle these and ack the 5030 * interrupts. Check if the main and AUX link state is ok. 5031 * 5032 * Returns: 5033 * - %true if pending interrupts were serviced (or no interrupts were 5034 * pending) w/o detecting an error condition. 5035 * - %false if an error condition - like AUX failure or a loss of link - is 5036 * detected, or another condition - like a DP tunnel BW state change - needs 5037 * servicing from the hotplug work. 5038 */ 5039 static bool 5040 intel_dp_check_mst_status(struct intel_dp *intel_dp) 5041 { 5042 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5043 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5044 struct intel_encoder *encoder = &dig_port->base; 5045 bool link_ok = true; 5046 bool reprobe_needed = false; 5047 5048 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); 5049 5050 for (;;) { 5051 u8 esi[4] = {}; 5052 u8 ack[4] = {}; 5053 5054 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { 5055 drm_dbg_kms(&i915->drm, 5056 "failed to get ESI - device may have failed\n"); 5057 link_ok = false; 5058 5059 break; 5060 } 5061 5062 drm_dbg_kms(&i915->drm, "DPRX ESI: %4ph\n", esi); 5063 5064 if (intel_dp->active_mst_links > 0 && link_ok && 5065 esi[3] & LINK_STATUS_CHANGED) { 5066 if (!intel_dp_mst_link_status(intel_dp)) 5067 link_ok = false; 5068 ack[3] |= LINK_STATUS_CHANGED; 5069 } 5070 5071 intel_dp_mst_hpd_irq(intel_dp, esi, ack); 5072 5073 if (esi[3] & DP_TUNNELING_IRQ) { 5074 if (drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr, 5075 &intel_dp->aux)) 5076 reprobe_needed = true; 5077 ack[3] |= DP_TUNNELING_IRQ; 5078 } 5079 5080 if (!memchr_inv(ack, 0, sizeof(ack))) 5081 break; 5082 5083 if (!intel_dp_ack_sink_irq_esi(intel_dp, ack)) 5084 drm_dbg_kms(&i915->drm, "Failed to ack ESI\n"); 5085 5086 if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY)) 5087 drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr); 5088 } 5089 5090 if (!link_ok || intel_dp->link.force_retrain) 5091 intel_encoder_link_check_queue_work(encoder, 0); 5092 5093 return !reprobe_needed; 5094 } 5095 5096 static void 5097 intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp) 5098 { 5099 bool is_active; 5100 u8 buf = 0; 5101 5102 is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux); 5103 if (intel_dp->frl.is_trained && !is_active) { 5104 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0) 5105 return; 5106 5107 buf &= ~DP_PCON_ENABLE_HDMI_LINK; 5108 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0) 5109 return; 5110 5111 drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base); 5112 5113 intel_dp->frl.is_trained = false; 5114 5115 /* Restart FRL training or fall back to TMDS mode */ 5116 intel_dp_check_frl_training(intel_dp); 5117 } 5118 } 5119 5120 static bool 5121 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 5122 { 5123 u8 link_status[DP_LINK_STATUS_SIZE]; 5124 5125 if (!intel_dp->link_trained) 5126 return false; 5127 5128 /* 5129 * While PSR source HW is enabled, it will control main-link sending 5130 * frames, enabling and disabling it so trying to do a retrain will fail 5131 * as the link would or not be on or it could mix training patterns 5132 * and frame data at the same time causing retrain to fail. 5133 * Also when exiting PSR, HW will retrain the link anyways fixing 5134 * any link status error. 5135 */ 5136 if (intel_psr_enabled(intel_dp)) 5137 return false; 5138 5139 if (intel_dp->link.force_retrain) 5140 return true; 5141 5142 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 5143 link_status) < 0) 5144 return false; 5145 5146 /* 5147 * Validate the cached values of intel_dp->link_rate and 5148 * intel_dp->lane_count before attempting to retrain. 5149 * 5150 * FIXME would be nice to user the crtc state here, but since 5151 * we need to call this from the short HPD handler that seems 5152 * a bit hard. 5153 */ 5154 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 5155 intel_dp->lane_count)) 5156 return false; 5157 5158 if (intel_dp->link.retrain_disabled) 5159 return false; 5160 5161 if (intel_dp->link.seq_train_failures) 5162 return true; 5163 5164 /* Retrain if link not ok */ 5165 return !intel_dp_link_ok(intel_dp, link_status); 5166 } 5167 5168 static bool intel_dp_has_connector(struct intel_dp *intel_dp, 5169 const struct drm_connector_state *conn_state) 5170 { 5171 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5172 struct intel_encoder *encoder; 5173 enum pipe pipe; 5174 5175 if (!conn_state->best_encoder) 5176 return false; 5177 5178 /* SST */ 5179 encoder = &dp_to_dig_port(intel_dp)->base; 5180 if (conn_state->best_encoder == &encoder->base) 5181 return true; 5182 5183 /* MST */ 5184 for_each_pipe(i915, pipe) { 5185 encoder = &intel_dp->mst_encoders[pipe]->base; 5186 if (conn_state->best_encoder == &encoder->base) 5187 return true; 5188 } 5189 5190 return false; 5191 } 5192 5193 int intel_dp_get_active_pipes(struct intel_dp *intel_dp, 5194 struct drm_modeset_acquire_ctx *ctx, 5195 u8 *pipe_mask) 5196 { 5197 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5198 struct drm_connector_list_iter conn_iter; 5199 struct intel_connector *connector; 5200 int ret = 0; 5201 5202 *pipe_mask = 0; 5203 5204 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 5205 for_each_intel_connector_iter(connector, &conn_iter) { 5206 struct drm_connector_state *conn_state = 5207 connector->base.state; 5208 struct intel_crtc_state *crtc_state; 5209 struct intel_crtc *crtc; 5210 5211 if (!intel_dp_has_connector(intel_dp, conn_state)) 5212 continue; 5213 5214 crtc = to_intel_crtc(conn_state->crtc); 5215 if (!crtc) 5216 continue; 5217 5218 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 5219 if (ret) 5220 break; 5221 5222 crtc_state = to_intel_crtc_state(crtc->base.state); 5223 5224 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 5225 5226 if (!crtc_state->hw.active) 5227 continue; 5228 5229 if (conn_state->commit) 5230 drm_WARN_ON(&i915->drm, 5231 !wait_for_completion_timeout(&conn_state->commit->hw_done, 5232 msecs_to_jiffies(5000))); 5233 5234 *pipe_mask |= BIT(crtc->pipe); 5235 } 5236 drm_connector_list_iter_end(&conn_iter); 5237 5238 return ret; 5239 } 5240 5241 static bool intel_dp_is_connected(struct intel_dp *intel_dp) 5242 { 5243 struct intel_connector *connector = intel_dp->attached_connector; 5244 5245 return connector->base.status == connector_status_connected || 5246 intel_dp->is_mst; 5247 } 5248 5249 static int intel_dp_retrain_link(struct intel_encoder *encoder, 5250 struct drm_modeset_acquire_ctx *ctx) 5251 { 5252 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5253 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5254 struct intel_crtc *crtc; 5255 bool mst_output = false; 5256 u8 pipe_mask; 5257 int ret; 5258 5259 if (!intel_dp_is_connected(intel_dp)) 5260 return 0; 5261 5262 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 5263 ctx); 5264 if (ret) 5265 return ret; 5266 5267 if (!intel_dp_needs_link_retrain(intel_dp)) 5268 return 0; 5269 5270 ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask); 5271 if (ret) 5272 return ret; 5273 5274 if (pipe_mask == 0) 5275 return 0; 5276 5277 if (!intel_dp_needs_link_retrain(intel_dp)) 5278 return 0; 5279 5280 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link (forced %s)\n", 5281 encoder->base.base.id, encoder->base.name, 5282 str_yes_no(intel_dp->link.force_retrain)); 5283 5284 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { 5285 const struct intel_crtc_state *crtc_state = 5286 to_intel_crtc_state(crtc->base.state); 5287 5288 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { 5289 mst_output = true; 5290 break; 5291 } 5292 5293 /* Suppress underruns caused by re-training */ 5294 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 5295 if (crtc_state->has_pch_encoder) 5296 intel_set_pch_fifo_underrun_reporting(dev_priv, 5297 intel_crtc_pch_transcoder(crtc), false); 5298 } 5299 5300 /* TODO: use a modeset for SST as well. */ 5301 if (mst_output) { 5302 ret = intel_modeset_commit_pipes(dev_priv, pipe_mask, ctx); 5303 5304 if (ret && ret != -EDEADLK) 5305 drm_dbg_kms(&dev_priv->drm, 5306 "[ENCODER:%d:%s] link retraining failed: %pe\n", 5307 encoder->base.base.id, encoder->base.name, 5308 ERR_PTR(ret)); 5309 5310 goto out; 5311 } 5312 5313 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { 5314 const struct intel_crtc_state *crtc_state = 5315 to_intel_crtc_state(crtc->base.state); 5316 5317 intel_dp->link_trained = false; 5318 5319 intel_dp_check_frl_training(intel_dp); 5320 intel_dp_pcon_dsc_configure(intel_dp, crtc_state); 5321 intel_dp_start_link_train(NULL, intel_dp, crtc_state); 5322 intel_dp_stop_link_train(intel_dp, crtc_state); 5323 break; 5324 } 5325 5326 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { 5327 const struct intel_crtc_state *crtc_state = 5328 to_intel_crtc_state(crtc->base.state); 5329 5330 /* Keep underrun reporting disabled until things are stable */ 5331 intel_crtc_wait_for_next_vblank(crtc); 5332 5333 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 5334 if (crtc_state->has_pch_encoder) 5335 intel_set_pch_fifo_underrun_reporting(dev_priv, 5336 intel_crtc_pch_transcoder(crtc), true); 5337 } 5338 5339 out: 5340 if (ret != -EDEADLK) 5341 intel_dp->link.force_retrain = false; 5342 5343 return ret; 5344 } 5345 5346 void intel_dp_link_check(struct intel_encoder *encoder) 5347 { 5348 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 5349 struct drm_modeset_acquire_ctx ctx; 5350 int ret; 5351 5352 intel_modeset_lock_ctx_retry(&ctx, NULL, 0, ret) 5353 ret = intel_dp_retrain_link(encoder, &ctx); 5354 5355 drm_WARN_ON(&i915->drm, ret); 5356 } 5357 5358 void intel_dp_check_link_state(struct intel_dp *intel_dp) 5359 { 5360 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5361 struct intel_encoder *encoder = &dig_port->base; 5362 5363 if (!intel_dp_is_connected(intel_dp)) 5364 return; 5365 5366 if (!intel_dp_needs_link_retrain(intel_dp)) 5367 return; 5368 5369 intel_encoder_link_check_queue_work(encoder, 0); 5370 } 5371 5372 static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, 5373 struct drm_modeset_acquire_ctx *ctx, 5374 u8 *pipe_mask) 5375 { 5376 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5377 struct drm_connector_list_iter conn_iter; 5378 struct intel_connector *connector; 5379 int ret = 0; 5380 5381 *pipe_mask = 0; 5382 5383 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 5384 for_each_intel_connector_iter(connector, &conn_iter) { 5385 struct drm_connector_state *conn_state = 5386 connector->base.state; 5387 struct intel_crtc_state *crtc_state; 5388 struct intel_crtc *crtc; 5389 5390 if (!intel_dp_has_connector(intel_dp, conn_state)) 5391 continue; 5392 5393 crtc = to_intel_crtc(conn_state->crtc); 5394 if (!crtc) 5395 continue; 5396 5397 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 5398 if (ret) 5399 break; 5400 5401 crtc_state = to_intel_crtc_state(crtc->base.state); 5402 5403 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 5404 5405 if (!crtc_state->hw.active) 5406 continue; 5407 5408 if (conn_state->commit && 5409 !try_wait_for_completion(&conn_state->commit->hw_done)) 5410 continue; 5411 5412 *pipe_mask |= BIT(crtc->pipe); 5413 } 5414 drm_connector_list_iter_end(&conn_iter); 5415 5416 return ret; 5417 } 5418 5419 static int intel_dp_do_phy_test(struct intel_encoder *encoder, 5420 struct drm_modeset_acquire_ctx *ctx) 5421 { 5422 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5423 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5424 struct intel_crtc *crtc; 5425 u8 pipe_mask; 5426 int ret; 5427 5428 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 5429 ctx); 5430 if (ret) 5431 return ret; 5432 5433 ret = intel_dp_prep_phy_test(intel_dp, ctx, &pipe_mask); 5434 if (ret) 5435 return ret; 5436 5437 if (pipe_mask == 0) 5438 return 0; 5439 5440 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n", 5441 encoder->base.base.id, encoder->base.name); 5442 5443 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { 5444 const struct intel_crtc_state *crtc_state = 5445 to_intel_crtc_state(crtc->base.state); 5446 5447 /* test on the MST master transcoder */ 5448 if (DISPLAY_VER(dev_priv) >= 12 && 5449 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 5450 !intel_dp_mst_is_master_trans(crtc_state)) 5451 continue; 5452 5453 intel_dp_process_phy_request(intel_dp, crtc_state); 5454 break; 5455 } 5456 5457 return 0; 5458 } 5459 5460 void intel_dp_phy_test(struct intel_encoder *encoder) 5461 { 5462 struct drm_modeset_acquire_ctx ctx; 5463 int ret; 5464 5465 drm_modeset_acquire_init(&ctx, 0); 5466 5467 for (;;) { 5468 ret = intel_dp_do_phy_test(encoder, &ctx); 5469 5470 if (ret == -EDEADLK) { 5471 drm_modeset_backoff(&ctx); 5472 continue; 5473 } 5474 5475 break; 5476 } 5477 5478 drm_modeset_drop_locks(&ctx); 5479 drm_modeset_acquire_fini(&ctx); 5480 drm_WARN(encoder->base.dev, ret, 5481 "Acquiring modeset locks failed with %i\n", ret); 5482 } 5483 5484 static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) 5485 { 5486 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5487 u8 val; 5488 5489 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 5490 return; 5491 5492 if (drm_dp_dpcd_readb(&intel_dp->aux, 5493 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 5494 return; 5495 5496 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 5497 5498 if (val & DP_AUTOMATED_TEST_REQUEST) 5499 intel_dp_handle_test_request(intel_dp); 5500 5501 if (val & DP_CP_IRQ) 5502 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 5503 5504 if (val & DP_SINK_SPECIFIC_IRQ) 5505 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); 5506 } 5507 5508 static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp) 5509 { 5510 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5511 bool reprobe_needed = false; 5512 u8 val; 5513 5514 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 5515 return false; 5516 5517 if (drm_dp_dpcd_readb(&intel_dp->aux, 5518 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) 5519 return false; 5520 5521 if ((val & DP_TUNNELING_IRQ) && 5522 drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr, 5523 &intel_dp->aux)) 5524 reprobe_needed = true; 5525 5526 if (drm_dp_dpcd_writeb(&intel_dp->aux, 5527 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) 5528 return reprobe_needed; 5529 5530 if (val & HDMI_LINK_STATUS_CHANGED) 5531 intel_dp_handle_hdmi_link_status_change(intel_dp); 5532 5533 return reprobe_needed; 5534 } 5535 5536 /* 5537 * According to DP spec 5538 * 5.1.2: 5539 * 1. Read DPCD 5540 * 2. Configure link according to Receiver Capabilities 5541 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 5542 * 4. Check link status on receipt of hot-plug interrupt 5543 * 5544 * intel_dp_short_pulse - handles short pulse interrupts 5545 * when full detection is not required. 5546 * Returns %true if short pulse is handled and full detection 5547 * is NOT required and %false otherwise. 5548 */ 5549 static bool 5550 intel_dp_short_pulse(struct intel_dp *intel_dp) 5551 { 5552 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5553 u8 old_sink_count = intel_dp->sink_count; 5554 bool reprobe_needed = false; 5555 bool ret; 5556 5557 /* 5558 * Clearing compliance test variables to allow capturing 5559 * of values for next automated test request. 5560 */ 5561 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5562 5563 /* 5564 * Now read the DPCD to see if it's actually running 5565 * If the current value of sink count doesn't match with 5566 * the value that was stored earlier or dpcd read failed 5567 * we need to do full detection 5568 */ 5569 ret = intel_dp_get_dpcd(intel_dp); 5570 5571 if ((old_sink_count != intel_dp->sink_count) || !ret) { 5572 /* No need to proceed if we are going to do full detect */ 5573 return false; 5574 } 5575 5576 intel_dp_check_device_service_irq(intel_dp); 5577 reprobe_needed = intel_dp_check_link_service_irq(intel_dp); 5578 5579 /* Handle CEC interrupts, if any */ 5580 drm_dp_cec_irq(&intel_dp->aux); 5581 5582 intel_dp_check_link_state(intel_dp); 5583 5584 intel_psr_short_pulse(intel_dp); 5585 5586 switch (intel_dp->compliance.test_type) { 5587 case DP_TEST_LINK_TRAINING: 5588 drm_dbg_kms(&dev_priv->drm, 5589 "Link Training Compliance Test requested\n"); 5590 /* Send a Hotplug Uevent to userspace to start modeset */ 5591 drm_kms_helper_hotplug_event(&dev_priv->drm); 5592 break; 5593 case DP_TEST_LINK_PHY_TEST_PATTERN: 5594 drm_dbg_kms(&dev_priv->drm, 5595 "PHY test pattern Compliance Test requested\n"); 5596 /* 5597 * Schedule long hpd to do the test 5598 * 5599 * FIXME get rid of the ad-hoc phy test modeset code 5600 * and properly incorporate it into the normal modeset. 5601 */ 5602 reprobe_needed = true; 5603 } 5604 5605 return !reprobe_needed; 5606 } 5607 5608 /* XXX this is probably wrong for multiple downstream ports */ 5609 static enum drm_connector_status 5610 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 5611 { 5612 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5613 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5614 u8 *dpcd = intel_dp->dpcd; 5615 u8 type; 5616 5617 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) 5618 return connector_status_connected; 5619 5620 lspcon_resume(dig_port); 5621 5622 if (!intel_dp_get_dpcd(intel_dp)) 5623 return connector_status_disconnected; 5624 5625 intel_dp->mst_detect = intel_dp_mst_detect(intel_dp); 5626 5627 /* if there's no downstream port, we're done */ 5628 if (!drm_dp_is_branch(dpcd)) 5629 return connector_status_connected; 5630 5631 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 5632 if (intel_dp_has_sink_count(intel_dp) && 5633 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 5634 return intel_dp->sink_count ? 5635 connector_status_connected : connector_status_disconnected; 5636 } 5637 5638 if (intel_dp->mst_detect == DRM_DP_MST) 5639 return connector_status_connected; 5640 5641 /* If no HPD, poke DDC gently */ 5642 if (drm_probe_ddc(&intel_dp->aux.ddc)) 5643 return connector_status_connected; 5644 5645 /* Well we tried, say unknown for unreliable port types */ 5646 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 5647 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 5648 if (type == DP_DS_PORT_TYPE_VGA || 5649 type == DP_DS_PORT_TYPE_NON_EDID) 5650 return connector_status_unknown; 5651 } else { 5652 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 5653 DP_DWN_STRM_PORT_TYPE_MASK; 5654 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 5655 type == DP_DWN_STRM_PORT_TYPE_OTHER) 5656 return connector_status_unknown; 5657 } 5658 5659 /* Anything else is out of spec, warn and ignore */ 5660 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); 5661 return connector_status_disconnected; 5662 } 5663 5664 static enum drm_connector_status 5665 edp_detect(struct intel_dp *intel_dp) 5666 { 5667 return connector_status_connected; 5668 } 5669 5670 void intel_digital_port_lock(struct intel_encoder *encoder) 5671 { 5672 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5673 5674 if (dig_port->lock) 5675 dig_port->lock(dig_port); 5676 } 5677 5678 void intel_digital_port_unlock(struct intel_encoder *encoder) 5679 { 5680 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5681 5682 if (dig_port->unlock) 5683 dig_port->unlock(dig_port); 5684 } 5685 5686 /* 5687 * intel_digital_port_connected_locked - is the specified port connected? 5688 * @encoder: intel_encoder 5689 * 5690 * In cases where there's a connector physically connected but it can't be used 5691 * by our hardware we also return false, since the rest of the driver should 5692 * pretty much treat the port as disconnected. This is relevant for type-C 5693 * (starting on ICL) where there's ownership involved. 5694 * 5695 * The caller must hold the lock acquired by calling intel_digital_port_lock() 5696 * when calling this function. 5697 * 5698 * Return %true if port is connected, %false otherwise. 5699 */ 5700 bool intel_digital_port_connected_locked(struct intel_encoder *encoder) 5701 { 5702 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5703 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5704 bool is_glitch_free = intel_tc_port_handles_hpd_glitches(dig_port); 5705 bool is_connected = false; 5706 intel_wakeref_t wakeref; 5707 5708 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) { 5709 unsigned long wait_expires = jiffies + msecs_to_jiffies_timeout(4); 5710 5711 do { 5712 is_connected = dig_port->connected(encoder); 5713 if (is_connected || is_glitch_free) 5714 break; 5715 usleep_range(10, 30); 5716 } while (time_before(jiffies, wait_expires)); 5717 } 5718 5719 return is_connected; 5720 } 5721 5722 bool intel_digital_port_connected(struct intel_encoder *encoder) 5723 { 5724 bool ret; 5725 5726 intel_digital_port_lock(encoder); 5727 ret = intel_digital_port_connected_locked(encoder); 5728 intel_digital_port_unlock(encoder); 5729 5730 return ret; 5731 } 5732 5733 static const struct drm_edid * 5734 intel_dp_get_edid(struct intel_dp *intel_dp) 5735 { 5736 struct intel_connector *connector = intel_dp->attached_connector; 5737 const struct drm_edid *fixed_edid = connector->panel.fixed_edid; 5738 5739 /* Use panel fixed edid if we have one */ 5740 if (fixed_edid) { 5741 /* invalid edid */ 5742 if (IS_ERR(fixed_edid)) 5743 return NULL; 5744 5745 return drm_edid_dup(fixed_edid); 5746 } 5747 5748 return drm_edid_read_ddc(&connector->base, &intel_dp->aux.ddc); 5749 } 5750 5751 static void 5752 intel_dp_update_dfp(struct intel_dp *intel_dp, 5753 const struct drm_edid *drm_edid) 5754 { 5755 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5756 struct intel_connector *connector = intel_dp->attached_connector; 5757 5758 intel_dp->dfp.max_bpc = 5759 drm_dp_downstream_max_bpc(intel_dp->dpcd, 5760 intel_dp->downstream_ports, drm_edid); 5761 5762 intel_dp->dfp.max_dotclock = 5763 drm_dp_downstream_max_dotclock(intel_dp->dpcd, 5764 intel_dp->downstream_ports); 5765 5766 intel_dp->dfp.min_tmds_clock = 5767 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd, 5768 intel_dp->downstream_ports, 5769 drm_edid); 5770 intel_dp->dfp.max_tmds_clock = 5771 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd, 5772 intel_dp->downstream_ports, 5773 drm_edid); 5774 5775 intel_dp->dfp.pcon_max_frl_bw = 5776 drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd, 5777 intel_dp->downstream_ports); 5778 5779 drm_dbg_kms(&i915->drm, 5780 "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n", 5781 connector->base.base.id, connector->base.name, 5782 intel_dp->dfp.max_bpc, 5783 intel_dp->dfp.max_dotclock, 5784 intel_dp->dfp.min_tmds_clock, 5785 intel_dp->dfp.max_tmds_clock, 5786 intel_dp->dfp.pcon_max_frl_bw); 5787 5788 intel_dp_get_pcon_dsc_cap(intel_dp); 5789 } 5790 5791 static bool 5792 intel_dp_can_ycbcr420(struct intel_dp *intel_dp) 5793 { 5794 if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420) && 5795 (!drm_dp_is_branch(intel_dp->dpcd) || intel_dp->dfp.ycbcr420_passthrough)) 5796 return true; 5797 5798 if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_RGB) && 5799 dfp_can_convert_from_rgb(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420)) 5800 return true; 5801 5802 if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR444) && 5803 dfp_can_convert_from_ycbcr444(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420)) 5804 return true; 5805 5806 return false; 5807 } 5808 5809 static void 5810 intel_dp_update_420(struct intel_dp *intel_dp) 5811 { 5812 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5813 struct intel_connector *connector = intel_dp->attached_connector; 5814 5815 intel_dp->dfp.ycbcr420_passthrough = 5816 drm_dp_downstream_420_passthrough(intel_dp->dpcd, 5817 intel_dp->downstream_ports); 5818 /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */ 5819 intel_dp->dfp.ycbcr_444_to_420 = 5820 dp_to_dig_port(intel_dp)->lspcon.active || 5821 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd, 5822 intel_dp->downstream_ports); 5823 intel_dp->dfp.rgb_to_ycbcr = 5824 drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 5825 intel_dp->downstream_ports, 5826 DP_DS_HDMI_BT709_RGB_YCBCR_CONV); 5827 5828 connector->base.ycbcr_420_allowed = intel_dp_can_ycbcr420(intel_dp); 5829 5830 drm_dbg_kms(&i915->drm, 5831 "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", 5832 connector->base.base.id, connector->base.name, 5833 str_yes_no(intel_dp->dfp.rgb_to_ycbcr), 5834 str_yes_no(connector->base.ycbcr_420_allowed), 5835 str_yes_no(intel_dp->dfp.ycbcr_444_to_420)); 5836 } 5837 5838 static void 5839 intel_dp_set_edid(struct intel_dp *intel_dp) 5840 { 5841 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5842 struct intel_connector *connector = intel_dp->attached_connector; 5843 const struct drm_edid *drm_edid; 5844 bool vrr_capable; 5845 5846 intel_dp_unset_edid(intel_dp); 5847 drm_edid = intel_dp_get_edid(intel_dp); 5848 connector->detect_edid = drm_edid; 5849 5850 /* Below we depend on display info having been updated */ 5851 drm_edid_connector_update(&connector->base, drm_edid); 5852 5853 vrr_capable = intel_vrr_is_capable(connector); 5854 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n", 5855 connector->base.base.id, connector->base.name, str_yes_no(vrr_capable)); 5856 drm_connector_set_vrr_capable_property(&connector->base, vrr_capable); 5857 5858 intel_dp_update_dfp(intel_dp, drm_edid); 5859 intel_dp_update_420(intel_dp); 5860 5861 drm_dp_cec_attach(&intel_dp->aux, 5862 connector->base.display_info.source_physical_address); 5863 } 5864 5865 static void 5866 intel_dp_unset_edid(struct intel_dp *intel_dp) 5867 { 5868 struct intel_connector *connector = intel_dp->attached_connector; 5869 5870 drm_dp_cec_unset_edid(&intel_dp->aux); 5871 drm_edid_free(connector->detect_edid); 5872 connector->detect_edid = NULL; 5873 5874 intel_dp->dfp.max_bpc = 0; 5875 intel_dp->dfp.max_dotclock = 0; 5876 intel_dp->dfp.min_tmds_clock = 0; 5877 intel_dp->dfp.max_tmds_clock = 0; 5878 5879 intel_dp->dfp.pcon_max_frl_bw = 0; 5880 5881 intel_dp->dfp.ycbcr_444_to_420 = false; 5882 connector->base.ycbcr_420_allowed = false; 5883 5884 drm_connector_set_vrr_capable_property(&connector->base, 5885 false); 5886 } 5887 5888 static void 5889 intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *connector) 5890 { 5891 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5892 5893 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 5894 if (!HAS_DSC(i915)) 5895 return; 5896 5897 if (intel_dp_is_edp(intel_dp)) 5898 intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0], 5899 connector); 5900 else 5901 intel_dp_get_dsc_sink_cap(intel_dp->dpcd[DP_DPCD_REV], 5902 connector); 5903 } 5904 5905 static int 5906 intel_dp_detect(struct drm_connector *connector, 5907 struct drm_modeset_acquire_ctx *ctx, 5908 bool force) 5909 { 5910 struct drm_i915_private *dev_priv = to_i915(connector->dev); 5911 struct intel_connector *intel_connector = 5912 to_intel_connector(connector); 5913 struct intel_dp *intel_dp = intel_attached_dp(intel_connector); 5914 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5915 struct intel_encoder *encoder = &dig_port->base; 5916 enum drm_connector_status status; 5917 int ret; 5918 5919 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 5920 connector->base.id, connector->name); 5921 drm_WARN_ON(&dev_priv->drm, 5922 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 5923 5924 if (!intel_display_device_enabled(dev_priv)) 5925 return connector_status_disconnected; 5926 5927 if (!intel_display_driver_check_access(dev_priv)) 5928 return connector->status; 5929 5930 /* Can't disconnect eDP */ 5931 if (intel_dp_is_edp(intel_dp)) 5932 status = edp_detect(intel_dp); 5933 else if (intel_digital_port_connected(encoder)) 5934 status = intel_dp_detect_dpcd(intel_dp); 5935 else 5936 status = connector_status_disconnected; 5937 5938 if (status == connector_status_disconnected) { 5939 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5940 memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd)); 5941 intel_dp->psr.sink_panel_replay_support = false; 5942 intel_dp->psr.sink_panel_replay_su_support = false; 5943 5944 intel_dp_mst_disconnect(intel_dp); 5945 5946 intel_dp_tunnel_disconnect(intel_dp); 5947 5948 goto out; 5949 } 5950 5951 ret = intel_dp_tunnel_detect(intel_dp, ctx); 5952 if (ret == -EDEADLK) 5953 return ret; 5954 5955 if (ret == 1) 5956 intel_connector->base.epoch_counter++; 5957 5958 if (!intel_dp_is_edp(intel_dp)) 5959 intel_psr_init_dpcd(intel_dp); 5960 5961 intel_dp_detect_dsc_caps(intel_dp, intel_connector); 5962 5963 intel_dp_mst_configure(intel_dp); 5964 5965 if (intel_dp->reset_link_params) { 5966 intel_dp_reset_link_params(intel_dp); 5967 intel_dp->reset_link_params = false; 5968 } 5969 5970 intel_dp_print_rates(intel_dp); 5971 5972 if (intel_dp->is_mst) { 5973 /* 5974 * If we are in MST mode then this connector 5975 * won't appear connected or have anything 5976 * with EDID on it 5977 */ 5978 status = connector_status_disconnected; 5979 goto out; 5980 } 5981 5982 /* 5983 * Some external monitors do not signal loss of link synchronization 5984 * with an IRQ_HPD, so force a link status check. 5985 * 5986 * TODO: this probably became redundant, so remove it: the link state 5987 * is rechecked/recovered now after modesets, where the loss of 5988 * synchronization tends to occur. 5989 */ 5990 if (!intel_dp_is_edp(intel_dp)) 5991 intel_dp_check_link_state(intel_dp); 5992 5993 /* 5994 * Clearing NACK and defer counts to get their exact values 5995 * while reading EDID which are required by Compliance tests 5996 * 4.2.2.4 and 4.2.2.5 5997 */ 5998 intel_dp->aux.i2c_nack_count = 0; 5999 intel_dp->aux.i2c_defer_count = 0; 6000 6001 intel_dp_set_edid(intel_dp); 6002 if (intel_dp_is_edp(intel_dp) || 6003 to_intel_connector(connector)->detect_edid) 6004 status = connector_status_connected; 6005 6006 intel_dp_check_device_service_irq(intel_dp); 6007 6008 out: 6009 if (status != connector_status_connected && !intel_dp->is_mst) 6010 intel_dp_unset_edid(intel_dp); 6011 6012 if (!intel_dp_is_edp(intel_dp)) 6013 drm_dp_set_subconnector_property(connector, 6014 status, 6015 intel_dp->dpcd, 6016 intel_dp->downstream_ports); 6017 return status; 6018 } 6019 6020 static void 6021 intel_dp_force(struct drm_connector *connector) 6022 { 6023 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6024 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6025 struct intel_encoder *intel_encoder = &dig_port->base; 6026 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 6027 6028 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 6029 connector->base.id, connector->name); 6030 6031 if (!intel_display_driver_check_access(dev_priv)) 6032 return; 6033 6034 intel_dp_unset_edid(intel_dp); 6035 6036 if (connector->status != connector_status_connected) 6037 return; 6038 6039 intel_dp_set_edid(intel_dp); 6040 } 6041 6042 static int intel_dp_get_modes(struct drm_connector *connector) 6043 { 6044 struct intel_connector *intel_connector = to_intel_connector(connector); 6045 int num_modes; 6046 6047 /* drm_edid_connector_update() done in ->detect() or ->force() */ 6048 num_modes = drm_edid_connector_add_modes(connector); 6049 6050 /* Also add fixed mode, which may or may not be present in EDID */ 6051 if (intel_dp_is_edp(intel_attached_dp(intel_connector))) 6052 num_modes += intel_panel_get_modes(intel_connector); 6053 6054 if (num_modes) 6055 return num_modes; 6056 6057 if (!intel_connector->detect_edid) { 6058 struct intel_dp *intel_dp = intel_attached_dp(intel_connector); 6059 struct drm_display_mode *mode; 6060 6061 mode = drm_dp_downstream_mode(connector->dev, 6062 intel_dp->dpcd, 6063 intel_dp->downstream_ports); 6064 if (mode) { 6065 drm_mode_probed_add(connector, mode); 6066 num_modes++; 6067 } 6068 } 6069 6070 return num_modes; 6071 } 6072 6073 static int 6074 intel_dp_connector_register(struct drm_connector *connector) 6075 { 6076 struct drm_i915_private *i915 = to_i915(connector->dev); 6077 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6078 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6079 struct intel_lspcon *lspcon = &dig_port->lspcon; 6080 int ret; 6081 6082 ret = intel_connector_register(connector); 6083 if (ret) 6084 return ret; 6085 6086 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", 6087 intel_dp->aux.name, connector->kdev->kobj.name); 6088 6089 intel_dp->aux.dev = connector->kdev; 6090 ret = drm_dp_aux_register(&intel_dp->aux); 6091 if (!ret) 6092 drm_dp_cec_register_connector(&intel_dp->aux, connector); 6093 6094 if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata)) 6095 return ret; 6096 6097 /* 6098 * ToDo: Clean this up to handle lspcon init and resume more 6099 * efficiently and streamlined. 6100 */ 6101 if (lspcon_init(dig_port)) { 6102 lspcon_detect_hdr_capability(lspcon); 6103 if (lspcon->hdr_supported) 6104 drm_connector_attach_hdr_output_metadata_property(connector); 6105 } 6106 6107 return ret; 6108 } 6109 6110 static void 6111 intel_dp_connector_unregister(struct drm_connector *connector) 6112 { 6113 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6114 6115 drm_dp_cec_unregister_connector(&intel_dp->aux); 6116 drm_dp_aux_unregister(&intel_dp->aux); 6117 intel_connector_unregister(connector); 6118 } 6119 6120 void intel_dp_connector_sync_state(struct intel_connector *connector, 6121 const struct intel_crtc_state *crtc_state) 6122 { 6123 struct drm_i915_private *i915 = to_i915(connector->base.dev); 6124 6125 if (crtc_state && crtc_state->dsc.compression_enable) { 6126 drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux); 6127 connector->dp.dsc_decompression_enabled = true; 6128 } else { 6129 connector->dp.dsc_decompression_enabled = false; 6130 } 6131 } 6132 6133 void intel_dp_encoder_flush_work(struct drm_encoder *_encoder) 6134 { 6135 struct intel_encoder *encoder = to_intel_encoder(_encoder); 6136 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 6137 struct intel_dp *intel_dp = &dig_port->dp; 6138 6139 intel_encoder_link_check_flush_work(encoder); 6140 6141 intel_dp_mst_encoder_cleanup(dig_port); 6142 6143 intel_dp_tunnel_destroy(intel_dp); 6144 6145 intel_pps_vdd_off_sync(intel_dp); 6146 6147 /* 6148 * Ensure power off delay is respected on module remove, so that we can 6149 * reduce delays at driver probe. See pps_init_timestamps(). 6150 */ 6151 intel_pps_wait_power_cycle(intel_dp); 6152 6153 intel_dp_aux_fini(intel_dp); 6154 } 6155 6156 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 6157 { 6158 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 6159 6160 intel_pps_vdd_off_sync(intel_dp); 6161 6162 intel_dp_tunnel_suspend(intel_dp); 6163 } 6164 6165 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder) 6166 { 6167 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 6168 6169 intel_pps_wait_power_cycle(intel_dp); 6170 } 6171 6172 static int intel_modeset_tile_group(struct intel_atomic_state *state, 6173 int tile_group_id) 6174 { 6175 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6176 struct drm_connector_list_iter conn_iter; 6177 struct drm_connector *connector; 6178 int ret = 0; 6179 6180 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 6181 drm_for_each_connector_iter(connector, &conn_iter) { 6182 struct drm_connector_state *conn_state; 6183 struct intel_crtc_state *crtc_state; 6184 struct intel_crtc *crtc; 6185 6186 if (!connector->has_tile || 6187 connector->tile_group->id != tile_group_id) 6188 continue; 6189 6190 conn_state = drm_atomic_get_connector_state(&state->base, 6191 connector); 6192 if (IS_ERR(conn_state)) { 6193 ret = PTR_ERR(conn_state); 6194 break; 6195 } 6196 6197 crtc = to_intel_crtc(conn_state->crtc); 6198 6199 if (!crtc) 6200 continue; 6201 6202 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6203 crtc_state->uapi.mode_changed = true; 6204 6205 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 6206 if (ret) 6207 break; 6208 } 6209 drm_connector_list_iter_end(&conn_iter); 6210 6211 return ret; 6212 } 6213 6214 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 6215 { 6216 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6217 struct intel_crtc *crtc; 6218 6219 if (transcoders == 0) 6220 return 0; 6221 6222 for_each_intel_crtc(&dev_priv->drm, crtc) { 6223 struct intel_crtc_state *crtc_state; 6224 int ret; 6225 6226 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6227 if (IS_ERR(crtc_state)) 6228 return PTR_ERR(crtc_state); 6229 6230 if (!crtc_state->hw.enable) 6231 continue; 6232 6233 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 6234 continue; 6235 6236 crtc_state->uapi.mode_changed = true; 6237 6238 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 6239 if (ret) 6240 return ret; 6241 6242 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 6243 if (ret) 6244 return ret; 6245 6246 transcoders &= ~BIT(crtc_state->cpu_transcoder); 6247 } 6248 6249 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 6250 6251 return 0; 6252 } 6253 6254 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 6255 struct drm_connector *connector) 6256 { 6257 const struct drm_connector_state *old_conn_state = 6258 drm_atomic_get_old_connector_state(&state->base, connector); 6259 const struct intel_crtc_state *old_crtc_state; 6260 struct intel_crtc *crtc; 6261 u8 transcoders; 6262 6263 crtc = to_intel_crtc(old_conn_state->crtc); 6264 if (!crtc) 6265 return 0; 6266 6267 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 6268 6269 if (!old_crtc_state->hw.active) 6270 return 0; 6271 6272 transcoders = old_crtc_state->sync_mode_slaves_mask; 6273 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 6274 transcoders |= BIT(old_crtc_state->master_transcoder); 6275 6276 return intel_modeset_affected_transcoders(state, 6277 transcoders); 6278 } 6279 6280 static int intel_dp_connector_atomic_check(struct drm_connector *conn, 6281 struct drm_atomic_state *_state) 6282 { 6283 struct drm_i915_private *dev_priv = to_i915(conn->dev); 6284 struct intel_atomic_state *state = to_intel_atomic_state(_state); 6285 struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(_state, conn); 6286 struct intel_connector *intel_conn = to_intel_connector(conn); 6287 struct intel_dp *intel_dp = enc_to_intel_dp(intel_conn->encoder); 6288 int ret; 6289 6290 ret = intel_digital_connector_atomic_check(conn, &state->base); 6291 if (ret) 6292 return ret; 6293 6294 if (intel_dp_mst_source_support(intel_dp)) { 6295 ret = drm_dp_mst_root_conn_atomic_check(conn_state, &intel_dp->mst_mgr); 6296 if (ret) 6297 return ret; 6298 } 6299 6300 if (!intel_connector_needs_modeset(state, conn)) 6301 return 0; 6302 6303 ret = intel_dp_tunnel_atomic_check_state(state, 6304 intel_dp, 6305 intel_conn); 6306 if (ret) 6307 return ret; 6308 6309 /* 6310 * We don't enable port sync on BDW due to missing w/as and 6311 * due to not having adjusted the modeset sequence appropriately. 6312 */ 6313 if (DISPLAY_VER(dev_priv) < 9) 6314 return 0; 6315 6316 if (conn->has_tile) { 6317 ret = intel_modeset_tile_group(state, conn->tile_group->id); 6318 if (ret) 6319 return ret; 6320 } 6321 6322 return intel_modeset_synced_crtcs(state, conn); 6323 } 6324 6325 static void intel_dp_oob_hotplug_event(struct drm_connector *connector, 6326 enum drm_connector_status hpd_state) 6327 { 6328 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); 6329 struct drm_i915_private *i915 = to_i915(connector->dev); 6330 bool hpd_high = hpd_state == connector_status_connected; 6331 unsigned int hpd_pin = encoder->hpd_pin; 6332 bool need_work = false; 6333 6334 spin_lock_irq(&i915->irq_lock); 6335 if (hpd_high != test_bit(hpd_pin, &i915->display.hotplug.oob_hotplug_last_state)) { 6336 i915->display.hotplug.event_bits |= BIT(hpd_pin); 6337 6338 __assign_bit(hpd_pin, &i915->display.hotplug.oob_hotplug_last_state, hpd_high); 6339 need_work = true; 6340 } 6341 spin_unlock_irq(&i915->irq_lock); 6342 6343 if (need_work) 6344 intel_hpd_schedule_detection(i915); 6345 } 6346 6347 static const struct drm_connector_funcs intel_dp_connector_funcs = { 6348 .force = intel_dp_force, 6349 .fill_modes = drm_helper_probe_single_connector_modes, 6350 .atomic_get_property = intel_digital_connector_atomic_get_property, 6351 .atomic_set_property = intel_digital_connector_atomic_set_property, 6352 .late_register = intel_dp_connector_register, 6353 .early_unregister = intel_dp_connector_unregister, 6354 .destroy = intel_connector_destroy, 6355 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 6356 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 6357 .oob_hotplug_event = intel_dp_oob_hotplug_event, 6358 }; 6359 6360 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 6361 .detect_ctx = intel_dp_detect, 6362 .get_modes = intel_dp_get_modes, 6363 .mode_valid = intel_dp_mode_valid, 6364 .atomic_check = intel_dp_connector_atomic_check, 6365 }; 6366 6367 enum irqreturn 6368 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) 6369 { 6370 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6371 struct intel_dp *intel_dp = &dig_port->dp; 6372 u8 dpcd[DP_RECEIVER_CAP_SIZE]; 6373 6374 if (dig_port->base.type == INTEL_OUTPUT_EDP && 6375 (long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) { 6376 /* 6377 * vdd off can generate a long/short pulse on eDP which 6378 * would require vdd on to handle it, and thus we 6379 * would end up in an endless cycle of 6380 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." 6381 */ 6382 drm_dbg_kms(&i915->drm, 6383 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", 6384 long_hpd ? "long" : "short", 6385 dig_port->base.base.base.id, 6386 dig_port->base.base.name); 6387 return IRQ_HANDLED; 6388 } 6389 6390 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", 6391 dig_port->base.base.base.id, 6392 dig_port->base.base.name, 6393 long_hpd ? "long" : "short"); 6394 6395 /* 6396 * TBT DP tunnels require the GFX driver to read out the DPRX caps in 6397 * response to long HPD pulses. The DP hotplug handler does that, 6398 * however the hotplug handler may be blocked by another 6399 * connector's/encoder's hotplug handler. Since the TBT CM may not 6400 * complete the DP tunnel BW request for the latter connector/encoder 6401 * waiting for this encoder's DPRX read, perform a dummy read here. 6402 */ 6403 if (long_hpd) 6404 intel_dp_read_dprx_caps(intel_dp, dpcd); 6405 6406 if (long_hpd) { 6407 intel_dp->reset_link_params = true; 6408 return IRQ_NONE; 6409 } 6410 6411 if (intel_dp->is_mst) { 6412 if (!intel_dp_check_mst_status(intel_dp)) 6413 return IRQ_NONE; 6414 } else if (!intel_dp_short_pulse(intel_dp)) { 6415 return IRQ_NONE; 6416 } 6417 6418 return IRQ_HANDLED; 6419 } 6420 6421 static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv, 6422 const struct intel_bios_encoder_data *devdata, 6423 enum port port) 6424 { 6425 /* 6426 * eDP not supported on g4x. so bail out early just 6427 * for a bit extra safety in case the VBT is bonkers. 6428 */ 6429 if (DISPLAY_VER(dev_priv) < 5) 6430 return false; 6431 6432 if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A) 6433 return true; 6434 6435 return devdata && intel_bios_encoder_supports_edp(devdata); 6436 } 6437 6438 bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port) 6439 { 6440 const struct intel_bios_encoder_data *devdata = 6441 intel_bios_encoder_data_lookup(i915, port); 6442 6443 return _intel_dp_is_port_edp(i915, devdata, port); 6444 } 6445 6446 bool 6447 intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder) 6448 { 6449 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 6450 enum port port = encoder->port; 6451 6452 if (intel_bios_encoder_is_lspcon(encoder->devdata)) 6453 return false; 6454 6455 if (DISPLAY_VER(i915) >= 11) 6456 return true; 6457 6458 if (port == PORT_A) 6459 return false; 6460 6461 if (IS_HASWELL(i915) || IS_BROADWELL(i915) || 6462 DISPLAY_VER(i915) >= 9) 6463 return true; 6464 6465 return false; 6466 } 6467 6468 static void 6469 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 6470 { 6471 struct drm_i915_private *dev_priv = to_i915(connector->dev); 6472 enum port port = dp_to_dig_port(intel_dp)->base.port; 6473 6474 if (!intel_dp_is_edp(intel_dp)) 6475 drm_connector_attach_dp_subconnector_property(connector); 6476 6477 if (!IS_G4X(dev_priv) && port != PORT_A) 6478 intel_attach_force_audio_property(connector); 6479 6480 intel_attach_broadcast_rgb_property(connector); 6481 if (HAS_GMCH(dev_priv)) 6482 drm_connector_attach_max_bpc_property(connector, 6, 10); 6483 else if (DISPLAY_VER(dev_priv) >= 5) 6484 drm_connector_attach_max_bpc_property(connector, 6, 12); 6485 6486 /* Register HDMI colorspace for case of lspcon */ 6487 if (intel_bios_encoder_is_lspcon(dp_to_dig_port(intel_dp)->base.devdata)) { 6488 drm_connector_attach_content_type_property(connector); 6489 intel_attach_hdmi_colorspace_property(connector); 6490 } else { 6491 intel_attach_dp_colorspace_property(connector); 6492 } 6493 6494 if (intel_dp_has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base)) 6495 drm_connector_attach_hdr_output_metadata_property(connector); 6496 6497 if (HAS_VRR(dev_priv)) 6498 drm_connector_attach_vrr_capable_property(connector); 6499 } 6500 6501 static void 6502 intel_edp_add_properties(struct intel_dp *intel_dp) 6503 { 6504 struct intel_connector *connector = intel_dp->attached_connector; 6505 struct drm_i915_private *i915 = to_i915(connector->base.dev); 6506 const struct drm_display_mode *fixed_mode = 6507 intel_panel_preferred_fixed_mode(connector); 6508 6509 intel_attach_scaling_mode_property(&connector->base); 6510 6511 drm_connector_set_panel_orientation_with_quirk(&connector->base, 6512 i915->display.vbt.orientation, 6513 fixed_mode->hdisplay, 6514 fixed_mode->vdisplay); 6515 } 6516 6517 static void intel_edp_backlight_setup(struct intel_dp *intel_dp, 6518 struct intel_connector *connector) 6519 { 6520 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6521 enum pipe pipe = INVALID_PIPE; 6522 6523 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 6524 /* 6525 * Figure out the current pipe for the initial backlight setup. 6526 * If the current pipe isn't valid, try the PPS pipe, and if that 6527 * fails just assume pipe A. 6528 */ 6529 pipe = vlv_active_pipe(intel_dp); 6530 6531 if (pipe != PIPE_A && pipe != PIPE_B) 6532 pipe = intel_dp->pps.pps_pipe; 6533 6534 if (pipe != PIPE_A && pipe != PIPE_B) 6535 pipe = PIPE_A; 6536 } 6537 6538 intel_backlight_setup(connector, pipe); 6539 } 6540 6541 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 6542 struct intel_connector *intel_connector) 6543 { 6544 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6545 struct drm_connector *connector = &intel_connector->base; 6546 struct drm_display_mode *fixed_mode; 6547 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 6548 bool has_dpcd; 6549 const struct drm_edid *drm_edid; 6550 6551 if (!intel_dp_is_edp(intel_dp)) 6552 return true; 6553 6554 /* 6555 * On IBX/CPT we may get here with LVDS already registered. Since the 6556 * driver uses the only internal power sequencer available for both 6557 * eDP and LVDS bail out early in this case to prevent interfering 6558 * with an already powered-on LVDS power sequencer. 6559 */ 6560 if (intel_get_lvds_encoder(dev_priv)) { 6561 drm_WARN_ON(&dev_priv->drm, 6562 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 6563 drm_info(&dev_priv->drm, 6564 "LVDS was detected, not registering eDP\n"); 6565 6566 return false; 6567 } 6568 6569 intel_bios_init_panel_early(dev_priv, &intel_connector->panel, 6570 encoder->devdata); 6571 6572 if (!intel_pps_init(intel_dp)) { 6573 drm_info(&dev_priv->drm, 6574 "[ENCODER:%d:%s] unusable PPS, disabling eDP\n", 6575 encoder->base.base.id, encoder->base.name); 6576 /* 6577 * The BIOS may have still enabled VDD on the PPS even 6578 * though it's unusable. Make sure we turn it back off 6579 * and to release the power domain references/etc. 6580 */ 6581 goto out_vdd_off; 6582 } 6583 6584 /* 6585 * Enable HPD sense for live status check. 6586 * intel_hpd_irq_setup() will turn it off again 6587 * if it's no longer needed later. 6588 * 6589 * The DPCD probe below will make sure VDD is on. 6590 */ 6591 intel_hpd_enable_detection(encoder); 6592 6593 intel_alpm_init_dpcd(intel_dp); 6594 6595 /* Cache DPCD and EDID for edp. */ 6596 has_dpcd = intel_edp_init_dpcd(intel_dp, intel_connector); 6597 6598 if (!has_dpcd) { 6599 /* if this fails, presume the device is a ghost */ 6600 drm_info(&dev_priv->drm, 6601 "[ENCODER:%d:%s] failed to retrieve link info, disabling eDP\n", 6602 encoder->base.base.id, encoder->base.name); 6603 goto out_vdd_off; 6604 } 6605 6606 /* 6607 * VBT and straps are liars. Also check HPD as that seems 6608 * to be the most reliable piece of information available. 6609 * 6610 * ... expect on devices that forgot to hook HPD up for eDP 6611 * (eg. Acer Chromebook C710), so we'll check it only if multiple 6612 * ports are attempting to use the same AUX CH, according to VBT. 6613 */ 6614 if (intel_bios_dp_has_shared_aux_ch(encoder->devdata)) { 6615 /* 6616 * If this fails, presume the DPCD answer came 6617 * from some other port using the same AUX CH. 6618 * 6619 * FIXME maybe cleaner to check this before the 6620 * DPCD read? Would need sort out the VDD handling... 6621 */ 6622 if (!intel_digital_port_connected(encoder)) { 6623 drm_info(&dev_priv->drm, 6624 "[ENCODER:%d:%s] HPD is down, disabling eDP\n", 6625 encoder->base.base.id, encoder->base.name); 6626 goto out_vdd_off; 6627 } 6628 6629 /* 6630 * Unfortunately even the HPD based detection fails on 6631 * eg. Asus B360M-A (CFL+CNP), so as a last resort fall 6632 * back to checking for a VGA branch device. Only do this 6633 * on known affected platforms to minimize false positives. 6634 */ 6635 if (DISPLAY_VER(dev_priv) == 9 && drm_dp_is_branch(intel_dp->dpcd) && 6636 (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) == 6637 DP_DWN_STRM_PORT_TYPE_ANALOG) { 6638 drm_info(&dev_priv->drm, 6639 "[ENCODER:%d:%s] VGA converter detected, disabling eDP\n", 6640 encoder->base.base.id, encoder->base.name); 6641 goto out_vdd_off; 6642 } 6643 } 6644 6645 mutex_lock(&dev_priv->drm.mode_config.mutex); 6646 drm_edid = drm_edid_read_ddc(connector, connector->ddc); 6647 if (!drm_edid) { 6648 /* Fallback to EDID from ACPI OpRegion, if any */ 6649 drm_edid = intel_opregion_get_edid(intel_connector); 6650 if (drm_edid) 6651 drm_dbg_kms(&dev_priv->drm, 6652 "[CONNECTOR:%d:%s] Using OpRegion EDID\n", 6653 connector->base.id, connector->name); 6654 } 6655 if (drm_edid) { 6656 if (drm_edid_connector_update(connector, drm_edid) || 6657 !drm_edid_connector_add_modes(connector)) { 6658 drm_edid_connector_update(connector, NULL); 6659 drm_edid_free(drm_edid); 6660 drm_edid = ERR_PTR(-EINVAL); 6661 } 6662 } else { 6663 drm_edid = ERR_PTR(-ENOENT); 6664 } 6665 6666 intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata, 6667 IS_ERR(drm_edid) ? NULL : drm_edid); 6668 6669 intel_panel_add_edid_fixed_modes(intel_connector, true); 6670 6671 /* MSO requires information from the EDID */ 6672 intel_edp_mso_init(intel_dp); 6673 6674 /* multiply the mode clock and horizontal timings for MSO */ 6675 list_for_each_entry(fixed_mode, &intel_connector->panel.fixed_modes, head) 6676 intel_edp_mso_mode_fixup(intel_connector, fixed_mode); 6677 6678 /* fallback to VBT if available for eDP */ 6679 if (!intel_panel_preferred_fixed_mode(intel_connector)) 6680 intel_panel_add_vbt_lfp_fixed_mode(intel_connector); 6681 6682 mutex_unlock(&dev_priv->drm.mode_config.mutex); 6683 6684 if (!intel_panel_preferred_fixed_mode(intel_connector)) { 6685 drm_info(&dev_priv->drm, 6686 "[ENCODER:%d:%s] failed to find fixed mode for the panel, disabling eDP\n", 6687 encoder->base.base.id, encoder->base.name); 6688 goto out_vdd_off; 6689 } 6690 6691 intel_panel_init(intel_connector, drm_edid); 6692 6693 intel_edp_backlight_setup(intel_dp, intel_connector); 6694 6695 intel_edp_add_properties(intel_dp); 6696 6697 intel_pps_init_late(intel_dp); 6698 6699 return true; 6700 6701 out_vdd_off: 6702 intel_pps_vdd_off_sync(intel_dp); 6703 6704 return false; 6705 } 6706 6707 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 6708 { 6709 struct intel_connector *intel_connector; 6710 struct drm_connector *connector; 6711 6712 intel_connector = container_of(work, typeof(*intel_connector), 6713 modeset_retry_work); 6714 connector = &intel_connector->base; 6715 drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n", connector->base.id, 6716 connector->name); 6717 6718 /* Grab the locks before changing connector property*/ 6719 mutex_lock(&connector->dev->mode_config.mutex); 6720 /* Set connector link status to BAD and send a Uevent to notify 6721 * userspace to do a modeset. 6722 */ 6723 drm_connector_set_link_status_property(connector, 6724 DRM_MODE_LINK_STATUS_BAD); 6725 mutex_unlock(&connector->dev->mode_config.mutex); 6726 /* Send Hotplug uevent so userspace can reprobe */ 6727 drm_kms_helper_connector_hotplug_event(connector); 6728 6729 drm_connector_put(connector); 6730 } 6731 6732 void intel_dp_init_modeset_retry_work(struct intel_connector *connector) 6733 { 6734 INIT_WORK(&connector->modeset_retry_work, 6735 intel_dp_modeset_retry_work_fn); 6736 } 6737 6738 bool 6739 intel_dp_init_connector(struct intel_digital_port *dig_port, 6740 struct intel_connector *intel_connector) 6741 { 6742 struct drm_connector *connector = &intel_connector->base; 6743 struct intel_dp *intel_dp = &dig_port->dp; 6744 struct intel_encoder *intel_encoder = &dig_port->base; 6745 struct drm_device *dev = intel_encoder->base.dev; 6746 struct drm_i915_private *dev_priv = to_i915(dev); 6747 enum port port = intel_encoder->port; 6748 int type; 6749 6750 /* Initialize the work for modeset in case of link train failure */ 6751 intel_dp_init_modeset_retry_work(intel_connector); 6752 6753 if (drm_WARN(dev, dig_port->max_lanes < 1, 6754 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 6755 dig_port->max_lanes, intel_encoder->base.base.id, 6756 intel_encoder->base.name)) 6757 return false; 6758 6759 intel_dp->reset_link_params = true; 6760 intel_dp->pps.pps_pipe = INVALID_PIPE; 6761 intel_dp->pps.active_pipe = INVALID_PIPE; 6762 6763 /* Preserve the current hw state. */ 6764 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 6765 intel_dp->attached_connector = intel_connector; 6766 6767 if (_intel_dp_is_port_edp(dev_priv, intel_encoder->devdata, port)) { 6768 /* 6769 * Currently we don't support eDP on TypeC ports, although in 6770 * theory it could work on TypeC legacy ports. 6771 */ 6772 drm_WARN_ON(dev, intel_encoder_is_tc(intel_encoder)); 6773 type = DRM_MODE_CONNECTOR_eDP; 6774 intel_encoder->type = INTEL_OUTPUT_EDP; 6775 6776 /* eDP only on port B and/or C on vlv/chv */ 6777 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 6778 IS_CHERRYVIEW(dev_priv)) && 6779 port != PORT_B && port != PORT_C)) 6780 return false; 6781 } else { 6782 type = DRM_MODE_CONNECTOR_DisplayPort; 6783 } 6784 6785 intel_dp_set_default_sink_rates(intel_dp); 6786 intel_dp_set_default_max_sink_lane_count(intel_dp); 6787 6788 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 6789 intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); 6790 6791 intel_dp_aux_init(intel_dp); 6792 intel_connector->dp.dsc_decompression_aux = &intel_dp->aux; 6793 6794 drm_dbg_kms(&dev_priv->drm, 6795 "Adding %s connector on [ENCODER:%d:%s]\n", 6796 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 6797 intel_encoder->base.base.id, intel_encoder->base.name); 6798 6799 drm_connector_init_with_ddc(dev, connector, &intel_dp_connector_funcs, 6800 type, &intel_dp->aux.ddc); 6801 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 6802 6803 if (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) < 12) 6804 connector->interlace_allowed = true; 6805 6806 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 6807 intel_connector->base.polled = intel_connector->polled; 6808 6809 intel_connector_attach_encoder(intel_connector, intel_encoder); 6810 6811 if (HAS_DDI(dev_priv)) 6812 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 6813 else 6814 intel_connector->get_hw_state = intel_connector_get_hw_state; 6815 intel_connector->sync_state = intel_dp_connector_sync_state; 6816 6817 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 6818 intel_dp_aux_fini(intel_dp); 6819 goto fail; 6820 } 6821 6822 intel_dp_set_source_rates(intel_dp); 6823 intel_dp_set_common_rates(intel_dp); 6824 intel_dp_reset_link_params(intel_dp); 6825 6826 /* init MST on ports that can support it */ 6827 intel_dp_mst_encoder_init(dig_port, 6828 intel_connector->base.base.id); 6829 6830 intel_dp_add_properties(intel_dp, connector); 6831 6832 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 6833 int ret = intel_dp_hdcp_init(dig_port, intel_connector); 6834 if (ret) 6835 drm_dbg_kms(&dev_priv->drm, 6836 "HDCP init failed, skipping.\n"); 6837 } 6838 6839 intel_dp->colorimetry_support = 6840 intel_dp_get_colorimetry_status(intel_dp); 6841 6842 intel_dp->frl.is_trained = false; 6843 intel_dp->frl.trained_rate_gbps = 0; 6844 6845 intel_psr_init(intel_dp); 6846 6847 return true; 6848 6849 fail: 6850 intel_display_power_flush_work(dev_priv); 6851 drm_connector_cleanup(connector); 6852 6853 return false; 6854 } 6855 6856 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 6857 { 6858 struct intel_encoder *encoder; 6859 6860 if (!HAS_DISPLAY(dev_priv)) 6861 return; 6862 6863 for_each_intel_encoder(&dev_priv->drm, encoder) { 6864 struct intel_dp *intel_dp; 6865 6866 if (encoder->type != INTEL_OUTPUT_DDI) 6867 continue; 6868 6869 intel_dp = enc_to_intel_dp(encoder); 6870 6871 if (!intel_dp_mst_source_support(intel_dp)) 6872 continue; 6873 6874 if (intel_dp->is_mst) 6875 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 6876 } 6877 } 6878 6879 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 6880 { 6881 struct intel_encoder *encoder; 6882 6883 if (!HAS_DISPLAY(dev_priv)) 6884 return; 6885 6886 for_each_intel_encoder(&dev_priv->drm, encoder) { 6887 struct intel_dp *intel_dp; 6888 int ret; 6889 6890 if (encoder->type != INTEL_OUTPUT_DDI) 6891 continue; 6892 6893 intel_dp = enc_to_intel_dp(encoder); 6894 6895 if (!intel_dp_mst_source_support(intel_dp)) 6896 continue; 6897 6898 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 6899 true); 6900 if (ret) { 6901 intel_dp->is_mst = false; 6902 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 6903 false); 6904 } 6905 } 6906 } 6907