1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/notifier.h> 31 #include <linux/slab.h> 32 #include <linux/string_helpers.h> 33 #include <linux/timekeeping.h> 34 #include <linux/types.h> 35 36 #include <asm/byteorder.h> 37 38 #include <drm/display/drm_dp_helper.h> 39 #include <drm/display/drm_dp_tunnel.h> 40 #include <drm/display/drm_dsc_helper.h> 41 #include <drm/display/drm_hdmi_helper.h> 42 #include <drm/drm_atomic_helper.h> 43 #include <drm/drm_crtc.h> 44 #include <drm/drm_edid.h> 45 #include <drm/drm_probe_helper.h> 46 47 #include "g4x_dp.h" 48 #include "i915_drv.h" 49 #include "i915_irq.h" 50 #include "i915_reg.h" 51 #include "intel_atomic.h" 52 #include "intel_audio.h" 53 #include "intel_backlight.h" 54 #include "intel_combo_phy_regs.h" 55 #include "intel_connector.h" 56 #include "intel_crtc.h" 57 #include "intel_cx0_phy.h" 58 #include "intel_ddi.h" 59 #include "intel_de.h" 60 #include "intel_display_driver.h" 61 #include "intel_display_types.h" 62 #include "intel_dp.h" 63 #include "intel_dp_aux.h" 64 #include "intel_dp_hdcp.h" 65 #include "intel_dp_link_training.h" 66 #include "intel_dp_mst.h" 67 #include "intel_dp_tunnel.h" 68 #include "intel_dpio_phy.h" 69 #include "intel_dpll.h" 70 #include "intel_drrs.h" 71 #include "intel_fifo_underrun.h" 72 #include "intel_hdcp.h" 73 #include "intel_hdmi.h" 74 #include "intel_hotplug.h" 75 #include "intel_hotplug_irq.h" 76 #include "intel_lspcon.h" 77 #include "intel_lvds.h" 78 #include "intel_panel.h" 79 #include "intel_pch_display.h" 80 #include "intel_pps.h" 81 #include "intel_psr.h" 82 #include "intel_tc.h" 83 #include "intel_vdsc.h" 84 #include "intel_vrr.h" 85 #include "intel_crtc_state_dump.h" 86 87 /* DP DSC throughput values used for slice count calculations KPixels/s */ 88 #define DP_DSC_PEAK_PIXEL_RATE 2720000 89 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 90 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 91 92 /* DP DSC FEC Overhead factor in ppm = 1/(0.972261) = 1.028530 */ 93 #define DP_DSC_FEC_OVERHEAD_FACTOR 1028530 94 95 /* Compliance test status bits */ 96 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 97 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 98 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 99 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 100 101 102 /* Constants for DP DSC configurations */ 103 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 104 105 /* With Single pipe configuration, HW is capable of supporting maximum 106 * of 4 slices per line. 107 */ 108 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 109 110 /** 111 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 112 * @intel_dp: DP struct 113 * 114 * If a CPU or PCH DP output is attached to an eDP panel, this function 115 * will return true, and false otherwise. 116 * 117 * This function is not safe to use prior to encoder type being set. 118 */ 119 bool intel_dp_is_edp(struct intel_dp *intel_dp) 120 { 121 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 122 123 return dig_port->base.type == INTEL_OUTPUT_EDP; 124 } 125 126 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 127 128 /* Is link rate UHBR and thus 128b/132b? */ 129 bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state) 130 { 131 return drm_dp_is_uhbr_rate(crtc_state->port_clock); 132 } 133 134 /** 135 * intel_dp_link_symbol_size - get the link symbol size for a given link rate 136 * @rate: link rate in 10kbit/s units 137 * 138 * Returns the link symbol size in bits/symbol units depending on the link 139 * rate -> channel coding. 140 */ 141 int intel_dp_link_symbol_size(int rate) 142 { 143 return drm_dp_is_uhbr_rate(rate) ? 32 : 10; 144 } 145 146 /** 147 * intel_dp_link_symbol_clock - convert link rate to link symbol clock 148 * @rate: link rate in 10kbit/s units 149 * 150 * Returns the link symbol clock frequency in kHz units depending on the 151 * link rate and channel coding. 152 */ 153 int intel_dp_link_symbol_clock(int rate) 154 { 155 return DIV_ROUND_CLOSEST(rate * 10, intel_dp_link_symbol_size(rate)); 156 } 157 158 static int max_dprx_rate(struct intel_dp *intel_dp) 159 { 160 if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) 161 return drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel); 162 163 return drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 164 } 165 166 static int max_dprx_lane_count(struct intel_dp *intel_dp) 167 { 168 if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) 169 return drm_dp_tunnel_max_dprx_lane_count(intel_dp->tunnel); 170 171 return drm_dp_max_lane_count(intel_dp->dpcd); 172 } 173 174 static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp) 175 { 176 intel_dp->sink_rates[0] = 162000; 177 intel_dp->num_sink_rates = 1; 178 } 179 180 /* update sink rates from dpcd */ 181 static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp) 182 { 183 static const int dp_rates[] = { 184 162000, 270000, 540000, 810000 185 }; 186 int i, max_rate; 187 int max_lttpr_rate; 188 189 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { 190 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ 191 static const int quirk_rates[] = { 162000, 270000, 324000 }; 192 193 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); 194 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); 195 196 return; 197 } 198 199 /* 200 * Sink rates for 8b/10b. 201 */ 202 max_rate = max_dprx_rate(intel_dp); 203 max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps); 204 if (max_lttpr_rate) 205 max_rate = min(max_rate, max_lttpr_rate); 206 207 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 208 if (dp_rates[i] > max_rate) 209 break; 210 intel_dp->sink_rates[i] = dp_rates[i]; 211 } 212 213 /* 214 * Sink rates for 128b/132b. If set, sink should support all 8b/10b 215 * rates and 10 Gbps. 216 */ 217 if (intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B) { 218 u8 uhbr_rates = 0; 219 220 BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3); 221 222 drm_dp_dpcd_readb(&intel_dp->aux, 223 DP_128B132B_SUPPORTED_LINK_RATES, &uhbr_rates); 224 225 if (drm_dp_lttpr_count(intel_dp->lttpr_common_caps)) { 226 /* We have a repeater */ 227 if (intel_dp->lttpr_common_caps[0] >= 0x20 && 228 intel_dp->lttpr_common_caps[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - 229 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] & 230 DP_PHY_REPEATER_128B132B_SUPPORTED) { 231 /* Repeater supports 128b/132b, valid UHBR rates */ 232 uhbr_rates &= intel_dp->lttpr_common_caps[DP_PHY_REPEATER_128B132B_RATES - 233 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; 234 } else { 235 /* Does not support 128b/132b */ 236 uhbr_rates = 0; 237 } 238 } 239 240 if (uhbr_rates & DP_UHBR10) 241 intel_dp->sink_rates[i++] = 1000000; 242 if (uhbr_rates & DP_UHBR13_5) 243 intel_dp->sink_rates[i++] = 1350000; 244 if (uhbr_rates & DP_UHBR20) 245 intel_dp->sink_rates[i++] = 2000000; 246 } 247 248 intel_dp->num_sink_rates = i; 249 } 250 251 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 252 { 253 struct intel_connector *connector = intel_dp->attached_connector; 254 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 255 struct intel_encoder *encoder = &intel_dig_port->base; 256 257 intel_dp_set_dpcd_sink_rates(intel_dp); 258 259 if (intel_dp->num_sink_rates) 260 return; 261 262 drm_err(&dp_to_i915(intel_dp)->drm, 263 "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD with no link rates, using defaults\n", 264 connector->base.base.id, connector->base.name, 265 encoder->base.base.id, encoder->base.name); 266 267 intel_dp_set_default_sink_rates(intel_dp); 268 } 269 270 static void intel_dp_set_default_max_sink_lane_count(struct intel_dp *intel_dp) 271 { 272 intel_dp->max_sink_lane_count = 1; 273 } 274 275 static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp) 276 { 277 struct intel_connector *connector = intel_dp->attached_connector; 278 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 279 struct intel_encoder *encoder = &intel_dig_port->base; 280 281 intel_dp->max_sink_lane_count = max_dprx_lane_count(intel_dp); 282 283 switch (intel_dp->max_sink_lane_count) { 284 case 1: 285 case 2: 286 case 4: 287 return; 288 } 289 290 drm_err(&dp_to_i915(intel_dp)->drm, 291 "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD max lane count (%d), using default\n", 292 connector->base.base.id, connector->base.name, 293 encoder->base.base.id, encoder->base.name, 294 intel_dp->max_sink_lane_count); 295 296 intel_dp_set_default_max_sink_lane_count(intel_dp); 297 } 298 299 /* Get length of rates array potentially limited by max_rate. */ 300 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 301 { 302 int i; 303 304 /* Limit results by potentially reduced max rate */ 305 for (i = 0; i < len; i++) { 306 if (rates[len - i - 1] <= max_rate) 307 return len - i; 308 } 309 310 return 0; 311 } 312 313 /* Get length of common rates array potentially limited by max_rate. */ 314 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 315 int max_rate) 316 { 317 return intel_dp_rate_limit_len(intel_dp->common_rates, 318 intel_dp->num_common_rates, max_rate); 319 } 320 321 static int intel_dp_common_rate(struct intel_dp *intel_dp, int index) 322 { 323 if (drm_WARN_ON(&dp_to_i915(intel_dp)->drm, 324 index < 0 || index >= intel_dp->num_common_rates)) 325 return 162000; 326 327 return intel_dp->common_rates[index]; 328 } 329 330 /* Theoretical max between source and sink */ 331 int intel_dp_max_common_rate(struct intel_dp *intel_dp) 332 { 333 return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1); 334 } 335 336 static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port) 337 { 338 int vbt_max_lanes = intel_bios_dp_max_lane_count(dig_port->base.devdata); 339 int max_lanes = dig_port->max_lanes; 340 341 if (vbt_max_lanes) 342 max_lanes = min(max_lanes, vbt_max_lanes); 343 344 return max_lanes; 345 } 346 347 /* Theoretical max between source and sink */ 348 int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 349 { 350 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 351 int source_max = intel_dp_max_source_lane_count(dig_port); 352 int sink_max = intel_dp->max_sink_lane_count; 353 int lane_max = intel_tc_port_max_lane_count(dig_port); 354 int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); 355 356 if (lttpr_max) 357 sink_max = min(sink_max, lttpr_max); 358 359 return min3(source_max, sink_max, lane_max); 360 } 361 362 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 363 { 364 switch (intel_dp->max_link_lane_count) { 365 case 1: 366 case 2: 367 case 4: 368 return intel_dp->max_link_lane_count; 369 default: 370 MISSING_CASE(intel_dp->max_link_lane_count); 371 return 1; 372 } 373 } 374 375 /* 376 * The required data bandwidth for a mode with given pixel clock and bpp. This 377 * is the required net bandwidth independent of the data bandwidth efficiency. 378 * 379 * TODO: check if callers of this functions should use 380 * intel_dp_effective_data_rate() instead. 381 */ 382 int 383 intel_dp_link_required(int pixel_clock, int bpp) 384 { 385 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 386 return DIV_ROUND_UP(pixel_clock * bpp, 8); 387 } 388 389 /** 390 * intel_dp_effective_data_rate - Return the pixel data rate accounting for BW allocation overhead 391 * @pixel_clock: pixel clock in kHz 392 * @bpp_x16: bits per pixel .4 fixed point format 393 * @bw_overhead: BW allocation overhead in 1ppm units 394 * 395 * Return the effective pixel data rate in kB/sec units taking into account 396 * the provided SSC, FEC, DSC BW allocation overhead. 397 */ 398 int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16, 399 int bw_overhead) 400 { 401 return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_clock * bpp_x16, bw_overhead), 402 1000000 * 16 * 8); 403 } 404 405 /** 406 * intel_dp_max_link_data_rate: Calculate the maximum rate for the given link params 407 * @intel_dp: Intel DP object 408 * @max_dprx_rate: Maximum data rate of the DPRX 409 * @max_dprx_lanes: Maximum lane count of the DPRX 410 * 411 * Calculate the maximum data rate for the provided link parameters taking into 412 * account any BW limitations by a DP tunnel attached to @intel_dp. 413 * 414 * Returns the maximum data rate in kBps units. 415 */ 416 int intel_dp_max_link_data_rate(struct intel_dp *intel_dp, 417 int max_dprx_rate, int max_dprx_lanes) 418 { 419 int max_rate = drm_dp_max_dprx_data_rate(max_dprx_rate, max_dprx_lanes); 420 421 if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) 422 max_rate = min(max_rate, 423 drm_dp_tunnel_available_bw(intel_dp->tunnel)); 424 425 return max_rate; 426 } 427 428 bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) 429 { 430 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 431 struct intel_encoder *encoder = &intel_dig_port->base; 432 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 433 434 return DISPLAY_VER(dev_priv) >= 12 || 435 (DISPLAY_VER(dev_priv) == 11 && 436 encoder->port != PORT_A); 437 } 438 439 static int dg2_max_source_rate(struct intel_dp *intel_dp) 440 { 441 return intel_dp_is_edp(intel_dp) ? 810000 : 1350000; 442 } 443 444 static int icl_max_source_rate(struct intel_dp *intel_dp) 445 { 446 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 447 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 448 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 449 450 if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp)) 451 return 540000; 452 453 return 810000; 454 } 455 456 static int ehl_max_source_rate(struct intel_dp *intel_dp) 457 { 458 if (intel_dp_is_edp(intel_dp)) 459 return 540000; 460 461 return 810000; 462 } 463 464 static int mtl_max_source_rate(struct intel_dp *intel_dp) 465 { 466 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 467 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 468 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 469 470 if (intel_is_c10phy(i915, phy)) 471 return 810000; 472 473 return 2000000; 474 } 475 476 static int vbt_max_link_rate(struct intel_dp *intel_dp) 477 { 478 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 479 int max_rate; 480 481 max_rate = intel_bios_dp_max_link_rate(encoder->devdata); 482 483 if (intel_dp_is_edp(intel_dp)) { 484 struct intel_connector *connector = intel_dp->attached_connector; 485 int edp_max_rate = connector->panel.vbt.edp.max_link_rate; 486 487 if (max_rate && edp_max_rate) 488 max_rate = min(max_rate, edp_max_rate); 489 else if (edp_max_rate) 490 max_rate = edp_max_rate; 491 } 492 493 return max_rate; 494 } 495 496 static void 497 intel_dp_set_source_rates(struct intel_dp *intel_dp) 498 { 499 /* The values must be in increasing order */ 500 static const int mtl_rates[] = { 501 162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000, 502 810000, 1000000, 1350000, 2000000, 503 }; 504 static const int icl_rates[] = { 505 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000, 506 1000000, 1350000, 507 }; 508 static const int bxt_rates[] = { 509 162000, 216000, 243000, 270000, 324000, 432000, 540000 510 }; 511 static const int skl_rates[] = { 512 162000, 216000, 270000, 324000, 432000, 540000 513 }; 514 static const int hsw_rates[] = { 515 162000, 270000, 540000 516 }; 517 static const int g4x_rates[] = { 518 162000, 270000 519 }; 520 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 521 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 522 const int *source_rates; 523 int size, max_rate = 0, vbt_max_rate; 524 525 /* This should only be done once */ 526 drm_WARN_ON(&dev_priv->drm, 527 intel_dp->source_rates || intel_dp->num_source_rates); 528 529 if (DISPLAY_VER(dev_priv) >= 14) { 530 source_rates = mtl_rates; 531 size = ARRAY_SIZE(mtl_rates); 532 max_rate = mtl_max_source_rate(intel_dp); 533 } else if (DISPLAY_VER(dev_priv) >= 11) { 534 source_rates = icl_rates; 535 size = ARRAY_SIZE(icl_rates); 536 if (IS_DG2(dev_priv)) 537 max_rate = dg2_max_source_rate(intel_dp); 538 else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) || 539 IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) 540 max_rate = 810000; 541 else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) 542 max_rate = ehl_max_source_rate(intel_dp); 543 else 544 max_rate = icl_max_source_rate(intel_dp); 545 } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 546 source_rates = bxt_rates; 547 size = ARRAY_SIZE(bxt_rates); 548 } else if (DISPLAY_VER(dev_priv) == 9) { 549 source_rates = skl_rates; 550 size = ARRAY_SIZE(skl_rates); 551 } else if ((IS_HASWELL(dev_priv) && !IS_HASWELL_ULX(dev_priv)) || 552 IS_BROADWELL(dev_priv)) { 553 source_rates = hsw_rates; 554 size = ARRAY_SIZE(hsw_rates); 555 } else { 556 source_rates = g4x_rates; 557 size = ARRAY_SIZE(g4x_rates); 558 } 559 560 vbt_max_rate = vbt_max_link_rate(intel_dp); 561 if (max_rate && vbt_max_rate) 562 max_rate = min(max_rate, vbt_max_rate); 563 else if (vbt_max_rate) 564 max_rate = vbt_max_rate; 565 566 if (max_rate) 567 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 568 569 intel_dp->source_rates = source_rates; 570 intel_dp->num_source_rates = size; 571 } 572 573 static int intersect_rates(const int *source_rates, int source_len, 574 const int *sink_rates, int sink_len, 575 int *common_rates) 576 { 577 int i = 0, j = 0, k = 0; 578 579 while (i < source_len && j < sink_len) { 580 if (source_rates[i] == sink_rates[j]) { 581 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 582 return k; 583 common_rates[k] = source_rates[i]; 584 ++k; 585 ++i; 586 ++j; 587 } else if (source_rates[i] < sink_rates[j]) { 588 ++i; 589 } else { 590 ++j; 591 } 592 } 593 return k; 594 } 595 596 /* return index of rate in rates array, or -1 if not found */ 597 static int intel_dp_rate_index(const int *rates, int len, int rate) 598 { 599 int i; 600 601 for (i = 0; i < len; i++) 602 if (rate == rates[i]) 603 return i; 604 605 return -1; 606 } 607 608 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 609 { 610 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 611 612 drm_WARN_ON(&i915->drm, 613 !intel_dp->num_source_rates || !intel_dp->num_sink_rates); 614 615 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 616 intel_dp->num_source_rates, 617 intel_dp->sink_rates, 618 intel_dp->num_sink_rates, 619 intel_dp->common_rates); 620 621 /* Paranoia, there should always be something in common. */ 622 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) { 623 intel_dp->common_rates[0] = 162000; 624 intel_dp->num_common_rates = 1; 625 } 626 } 627 628 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 629 u8 lane_count) 630 { 631 /* 632 * FIXME: we need to synchronize the current link parameters with 633 * hardware readout. Currently fast link training doesn't work on 634 * boot-up. 635 */ 636 if (link_rate == 0 || 637 link_rate > intel_dp->max_link_rate) 638 return false; 639 640 if (lane_count == 0 || 641 lane_count > intel_dp_max_lane_count(intel_dp)) 642 return false; 643 644 return true; 645 } 646 647 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 648 int link_rate, 649 u8 lane_count) 650 { 651 /* FIXME figure out what we actually want here */ 652 const struct drm_display_mode *fixed_mode = 653 intel_panel_preferred_fixed_mode(intel_dp->attached_connector); 654 int mode_rate, max_rate; 655 656 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 657 max_rate = intel_dp_max_link_data_rate(intel_dp, link_rate, lane_count); 658 if (mode_rate > max_rate) 659 return false; 660 661 return true; 662 } 663 664 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 665 int link_rate, u8 lane_count) 666 { 667 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 668 int index; 669 670 /* 671 * TODO: Enable fallback on MST links once MST link compute can handle 672 * the fallback params. 673 */ 674 if (intel_dp->is_mst) { 675 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 676 return -1; 677 } 678 679 if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) { 680 drm_dbg_kms(&i915->drm, 681 "Retrying Link training for eDP with max parameters\n"); 682 intel_dp->use_max_params = true; 683 return 0; 684 } 685 686 index = intel_dp_rate_index(intel_dp->common_rates, 687 intel_dp->num_common_rates, 688 link_rate); 689 if (index > 0) { 690 if (intel_dp_is_edp(intel_dp) && 691 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 692 intel_dp_common_rate(intel_dp, index - 1), 693 lane_count)) { 694 drm_dbg_kms(&i915->drm, 695 "Retrying Link training for eDP with same parameters\n"); 696 return 0; 697 } 698 intel_dp->max_link_rate = intel_dp_common_rate(intel_dp, index - 1); 699 intel_dp->max_link_lane_count = lane_count; 700 } else if (lane_count > 1) { 701 if (intel_dp_is_edp(intel_dp) && 702 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 703 intel_dp_max_common_rate(intel_dp), 704 lane_count >> 1)) { 705 drm_dbg_kms(&i915->drm, 706 "Retrying Link training for eDP with same parameters\n"); 707 return 0; 708 } 709 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 710 intel_dp->max_link_lane_count = lane_count >> 1; 711 } else { 712 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 713 return -1; 714 } 715 716 return 0; 717 } 718 719 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 720 { 721 return div_u64(mul_u32_u32(mode_clock, DP_DSC_FEC_OVERHEAD_FACTOR), 722 1000000U); 723 } 724 725 int intel_dp_bw_fec_overhead(bool fec_enabled) 726 { 727 /* 728 * TODO: Calculate the actual overhead for a given mode. 729 * The hard-coded 1/0.972261=2.853% overhead factor 730 * corresponds (for instance) to the 8b/10b DP FEC 2.4% + 731 * 0.453% DSC overhead. This is enough for a 3840 width mode, 732 * which has a DSC overhead of up to ~0.2%, but may not be 733 * enough for a 1024 width mode where this is ~0.8% (on a 4 734 * lane DP link, with 2 DSC slices and 8 bpp color depth). 735 */ 736 return fec_enabled ? DP_DSC_FEC_OVERHEAD_FACTOR : 1000000; 737 } 738 739 static int 740 small_joiner_ram_size_bits(struct drm_i915_private *i915) 741 { 742 if (DISPLAY_VER(i915) >= 13) 743 return 17280 * 8; 744 else if (DISPLAY_VER(i915) >= 11) 745 return 7680 * 8; 746 else 747 return 6144 * 8; 748 } 749 750 u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp) 751 { 752 u32 bits_per_pixel = bpp; 753 int i; 754 755 /* Error out if the max bpp is less than smallest allowed valid bpp */ 756 if (bits_per_pixel < valid_dsc_bpp[0]) { 757 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 758 bits_per_pixel, valid_dsc_bpp[0]); 759 return 0; 760 } 761 762 /* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */ 763 if (DISPLAY_VER(i915) >= 13) { 764 bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1); 765 766 /* 767 * According to BSpec, 27 is the max DSC output bpp, 768 * 8 is the min DSC output bpp. 769 * While we can still clamp higher bpp values to 27, saving bandwidth, 770 * if it is required to oompress up to bpp < 8, means we can't do 771 * that and probably means we can't fit the required mode, even with 772 * DSC enabled. 773 */ 774 if (bits_per_pixel < 8) { 775 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min 8\n", 776 bits_per_pixel); 777 return 0; 778 } 779 bits_per_pixel = min_t(u32, bits_per_pixel, 27); 780 } else { 781 /* Find the nearest match in the array of known BPPs from VESA */ 782 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 783 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 784 break; 785 } 786 drm_dbg_kms(&i915->drm, "Set dsc bpp from %d to VESA %d\n", 787 bits_per_pixel, valid_dsc_bpp[i]); 788 789 bits_per_pixel = valid_dsc_bpp[i]; 790 } 791 792 return bits_per_pixel; 793 } 794 795 static 796 u32 get_max_compressed_bpp_with_joiner(struct drm_i915_private *i915, 797 u32 mode_clock, u32 mode_hdisplay, 798 bool bigjoiner) 799 { 800 u32 max_bpp_small_joiner_ram; 801 802 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 803 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / mode_hdisplay; 804 805 if (bigjoiner) { 806 int bigjoiner_interface_bits = DISPLAY_VER(i915) >= 14 ? 36 : 24; 807 /* With bigjoiner multiple dsc engines are used in parallel so PPC is 2 */ 808 int ppc = 2; 809 u32 max_bpp_bigjoiner = 810 i915->display.cdclk.max_cdclk_freq * ppc * bigjoiner_interface_bits / 811 intel_dp_mode_to_fec_clock(mode_clock); 812 813 max_bpp_small_joiner_ram *= 2; 814 815 return min(max_bpp_small_joiner_ram, max_bpp_bigjoiner); 816 } 817 818 return max_bpp_small_joiner_ram; 819 } 820 821 u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915, 822 u32 link_clock, u32 lane_count, 823 u32 mode_clock, u32 mode_hdisplay, 824 bool bigjoiner, 825 enum intel_output_format output_format, 826 u32 pipe_bpp, 827 u32 timeslots) 828 { 829 u32 bits_per_pixel, joiner_max_bpp; 830 831 /* 832 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 833 * (LinkSymbolClock)* 8 * (TimeSlots / 64) 834 * for SST -> TimeSlots is 64(i.e all TimeSlots that are available) 835 * for MST -> TimeSlots has to be calculated, based on mode requirements 836 * 837 * Due to FEC overhead, the available bw is reduced to 97.2261%. 838 * To support the given mode: 839 * Bandwidth required should be <= Available link Bandwidth * FEC Overhead 840 * =>ModeClock * bits_per_pixel <= Available Link Bandwidth * FEC Overhead 841 * =>bits_per_pixel <= Available link Bandwidth * FEC Overhead / ModeClock 842 * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock) * 8 (TimeSlots / 64) / 843 * (ModeClock / FEC Overhead) 844 * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock * TimeSlots) / 845 * (ModeClock / FEC Overhead * 8) 846 */ 847 bits_per_pixel = ((link_clock * lane_count) * timeslots) / 848 (intel_dp_mode_to_fec_clock(mode_clock) * 8); 849 850 /* Bandwidth required for 420 is half, that of 444 format */ 851 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 852 bits_per_pixel *= 2; 853 854 /* 855 * According to DSC 1.2a Section 4.1.1 Table 4.1 the maximum 856 * supported PPS value can be 63.9375 and with the further 857 * mention that for 420, 422 formats, bpp should be programmed double 858 * the target bpp restricting our target bpp to be 31.9375 at max. 859 */ 860 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 861 bits_per_pixel = min_t(u32, bits_per_pixel, 31); 862 863 drm_dbg_kms(&i915->drm, "Max link bpp is %u for %u timeslots " 864 "total bw %u pixel clock %u\n", 865 bits_per_pixel, timeslots, 866 (link_clock * lane_count * 8), 867 intel_dp_mode_to_fec_clock(mode_clock)); 868 869 joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, mode_clock, 870 mode_hdisplay, bigjoiner); 871 bits_per_pixel = min(bits_per_pixel, joiner_max_bpp); 872 873 bits_per_pixel = intel_dp_dsc_nearest_valid_bpp(i915, bits_per_pixel, pipe_bpp); 874 875 return bits_per_pixel; 876 } 877 878 u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector, 879 int mode_clock, int mode_hdisplay, 880 bool bigjoiner) 881 { 882 struct drm_i915_private *i915 = to_i915(connector->base.dev); 883 u8 min_slice_count, i; 884 int max_slice_width; 885 886 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 887 min_slice_count = DIV_ROUND_UP(mode_clock, 888 DP_DSC_MAX_ENC_THROUGHPUT_0); 889 else 890 min_slice_count = DIV_ROUND_UP(mode_clock, 891 DP_DSC_MAX_ENC_THROUGHPUT_1); 892 893 /* 894 * Due to some DSC engine BW limitations, we need to enable second 895 * slice and VDSC engine, whenever we approach close enough to max CDCLK 896 */ 897 if (mode_clock >= ((i915->display.cdclk.max_cdclk_freq * 85) / 100)) 898 min_slice_count = max_t(u8, min_slice_count, 2); 899 900 max_slice_width = drm_dp_dsc_sink_max_slice_width(connector->dp.dsc_dpcd); 901 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 902 drm_dbg_kms(&i915->drm, 903 "Unsupported slice width %d by DP DSC Sink device\n", 904 max_slice_width); 905 return 0; 906 } 907 /* Also take into account max slice width */ 908 min_slice_count = max_t(u8, min_slice_count, 909 DIV_ROUND_UP(mode_hdisplay, 910 max_slice_width)); 911 912 /* Find the closest match to the valid slice count values */ 913 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 914 u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner; 915 916 if (test_slice_count > 917 drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd, false)) 918 break; 919 920 /* big joiner needs small joiner to be enabled */ 921 if (bigjoiner && test_slice_count < 4) 922 continue; 923 924 if (min_slice_count <= test_slice_count) 925 return test_slice_count; 926 } 927 928 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", 929 min_slice_count); 930 return 0; 931 } 932 933 static bool source_can_output(struct intel_dp *intel_dp, 934 enum intel_output_format format) 935 { 936 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 937 938 switch (format) { 939 case INTEL_OUTPUT_FORMAT_RGB: 940 return true; 941 942 case INTEL_OUTPUT_FORMAT_YCBCR444: 943 /* 944 * No YCbCr output support on gmch platforms. 945 * Also, ILK doesn't seem capable of DP YCbCr output. 946 * The displayed image is severly corrupted. SNB+ is fine. 947 */ 948 return !HAS_GMCH(i915) && !IS_IRONLAKE(i915); 949 950 case INTEL_OUTPUT_FORMAT_YCBCR420: 951 /* Platform < Gen 11 cannot output YCbCr420 format */ 952 return DISPLAY_VER(i915) >= 11; 953 954 default: 955 MISSING_CASE(format); 956 return false; 957 } 958 } 959 960 static bool 961 dfp_can_convert_from_rgb(struct intel_dp *intel_dp, 962 enum intel_output_format sink_format) 963 { 964 if (!drm_dp_is_branch(intel_dp->dpcd)) 965 return false; 966 967 if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444) 968 return intel_dp->dfp.rgb_to_ycbcr; 969 970 if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) 971 return intel_dp->dfp.rgb_to_ycbcr && 972 intel_dp->dfp.ycbcr_444_to_420; 973 974 return false; 975 } 976 977 static bool 978 dfp_can_convert_from_ycbcr444(struct intel_dp *intel_dp, 979 enum intel_output_format sink_format) 980 { 981 if (!drm_dp_is_branch(intel_dp->dpcd)) 982 return false; 983 984 if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) 985 return intel_dp->dfp.ycbcr_444_to_420; 986 987 return false; 988 } 989 990 static bool 991 dfp_can_convert(struct intel_dp *intel_dp, 992 enum intel_output_format output_format, 993 enum intel_output_format sink_format) 994 { 995 switch (output_format) { 996 case INTEL_OUTPUT_FORMAT_RGB: 997 return dfp_can_convert_from_rgb(intel_dp, sink_format); 998 case INTEL_OUTPUT_FORMAT_YCBCR444: 999 return dfp_can_convert_from_ycbcr444(intel_dp, sink_format); 1000 default: 1001 MISSING_CASE(output_format); 1002 return false; 1003 } 1004 1005 return false; 1006 } 1007 1008 static enum intel_output_format 1009 intel_dp_output_format(struct intel_connector *connector, 1010 enum intel_output_format sink_format) 1011 { 1012 struct intel_dp *intel_dp = intel_attached_dp(connector); 1013 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1014 enum intel_output_format force_dsc_output_format = 1015 intel_dp->force_dsc_output_format; 1016 enum intel_output_format output_format; 1017 if (force_dsc_output_format) { 1018 if (source_can_output(intel_dp, force_dsc_output_format) && 1019 (!drm_dp_is_branch(intel_dp->dpcd) || 1020 sink_format != force_dsc_output_format || 1021 dfp_can_convert(intel_dp, force_dsc_output_format, sink_format))) 1022 return force_dsc_output_format; 1023 1024 drm_dbg_kms(&i915->drm, "Cannot force DSC output format\n"); 1025 } 1026 1027 if (sink_format == INTEL_OUTPUT_FORMAT_RGB || 1028 dfp_can_convert_from_rgb(intel_dp, sink_format)) 1029 output_format = INTEL_OUTPUT_FORMAT_RGB; 1030 1031 else if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444 || 1032 dfp_can_convert_from_ycbcr444(intel_dp, sink_format)) 1033 output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 1034 1035 else 1036 output_format = INTEL_OUTPUT_FORMAT_YCBCR420; 1037 1038 drm_WARN_ON(&i915->drm, !source_can_output(intel_dp, output_format)); 1039 1040 return output_format; 1041 } 1042 1043 int intel_dp_min_bpp(enum intel_output_format output_format) 1044 { 1045 if (output_format == INTEL_OUTPUT_FORMAT_RGB) 1046 return 6 * 3; 1047 else 1048 return 8 * 3; 1049 } 1050 1051 int intel_dp_output_bpp(enum intel_output_format output_format, int bpp) 1052 { 1053 /* 1054 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 1055 * format of the number of bytes per pixel will be half the number 1056 * of bytes of RGB pixel. 1057 */ 1058 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 1059 bpp /= 2; 1060 1061 return bpp; 1062 } 1063 1064 static enum intel_output_format 1065 intel_dp_sink_format(struct intel_connector *connector, 1066 const struct drm_display_mode *mode) 1067 { 1068 const struct drm_display_info *info = &connector->base.display_info; 1069 1070 if (drm_mode_is_420_only(info, mode)) 1071 return INTEL_OUTPUT_FORMAT_YCBCR420; 1072 1073 return INTEL_OUTPUT_FORMAT_RGB; 1074 } 1075 1076 static int 1077 intel_dp_mode_min_output_bpp(struct intel_connector *connector, 1078 const struct drm_display_mode *mode) 1079 { 1080 enum intel_output_format output_format, sink_format; 1081 1082 sink_format = intel_dp_sink_format(connector, mode); 1083 1084 output_format = intel_dp_output_format(connector, sink_format); 1085 1086 return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format)); 1087 } 1088 1089 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 1090 int hdisplay) 1091 { 1092 /* 1093 * Older platforms don't like hdisplay==4096 with DP. 1094 * 1095 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 1096 * and frame counter increment), but we don't get vblank interrupts, 1097 * and the pipe underruns immediately. The link also doesn't seem 1098 * to get trained properly. 1099 * 1100 * On CHV the vblank interrupts don't seem to disappear but 1101 * otherwise the symptoms are similar. 1102 * 1103 * TODO: confirm the behaviour on HSW+ 1104 */ 1105 return hdisplay == 4096 && !HAS_DDI(dev_priv); 1106 } 1107 1108 static int intel_dp_max_tmds_clock(struct intel_dp *intel_dp) 1109 { 1110 struct intel_connector *connector = intel_dp->attached_connector; 1111 const struct drm_display_info *info = &connector->base.display_info; 1112 int max_tmds_clock = intel_dp->dfp.max_tmds_clock; 1113 1114 /* Only consider the sink's max TMDS clock if we know this is a HDMI DFP */ 1115 if (max_tmds_clock && info->max_tmds_clock) 1116 max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock); 1117 1118 return max_tmds_clock; 1119 } 1120 1121 static enum drm_mode_status 1122 intel_dp_tmds_clock_valid(struct intel_dp *intel_dp, 1123 int clock, int bpc, 1124 enum intel_output_format sink_format, 1125 bool respect_downstream_limits) 1126 { 1127 int tmds_clock, min_tmds_clock, max_tmds_clock; 1128 1129 if (!respect_downstream_limits) 1130 return MODE_OK; 1131 1132 tmds_clock = intel_hdmi_tmds_clock(clock, bpc, sink_format); 1133 1134 min_tmds_clock = intel_dp->dfp.min_tmds_clock; 1135 max_tmds_clock = intel_dp_max_tmds_clock(intel_dp); 1136 1137 if (min_tmds_clock && tmds_clock < min_tmds_clock) 1138 return MODE_CLOCK_LOW; 1139 1140 if (max_tmds_clock && tmds_clock > max_tmds_clock) 1141 return MODE_CLOCK_HIGH; 1142 1143 return MODE_OK; 1144 } 1145 1146 static enum drm_mode_status 1147 intel_dp_mode_valid_downstream(struct intel_connector *connector, 1148 const struct drm_display_mode *mode, 1149 int target_clock) 1150 { 1151 struct intel_dp *intel_dp = intel_attached_dp(connector); 1152 const struct drm_display_info *info = &connector->base.display_info; 1153 enum drm_mode_status status; 1154 enum intel_output_format sink_format; 1155 1156 /* If PCON supports FRL MODE, check FRL bandwidth constraints */ 1157 if (intel_dp->dfp.pcon_max_frl_bw) { 1158 int target_bw; 1159 int max_frl_bw; 1160 int bpp = intel_dp_mode_min_output_bpp(connector, mode); 1161 1162 target_bw = bpp * target_clock; 1163 1164 max_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 1165 1166 /* converting bw from Gbps to Kbps*/ 1167 max_frl_bw = max_frl_bw * 1000000; 1168 1169 if (target_bw > max_frl_bw) 1170 return MODE_CLOCK_HIGH; 1171 1172 return MODE_OK; 1173 } 1174 1175 if (intel_dp->dfp.max_dotclock && 1176 target_clock > intel_dp->dfp.max_dotclock) 1177 return MODE_CLOCK_HIGH; 1178 1179 sink_format = intel_dp_sink_format(connector, mode); 1180 1181 /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ 1182 status = intel_dp_tmds_clock_valid(intel_dp, target_clock, 1183 8, sink_format, true); 1184 1185 if (status != MODE_OK) { 1186 if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 1187 !connector->base.ycbcr_420_allowed || 1188 !drm_mode_is_420_also(info, mode)) 1189 return status; 1190 sink_format = INTEL_OUTPUT_FORMAT_YCBCR420; 1191 status = intel_dp_tmds_clock_valid(intel_dp, target_clock, 1192 8, sink_format, true); 1193 if (status != MODE_OK) 1194 return status; 1195 } 1196 1197 return MODE_OK; 1198 } 1199 1200 bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp, 1201 int hdisplay, int clock) 1202 { 1203 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1204 struct intel_connector *connector = intel_dp->attached_connector; 1205 1206 if (!intel_dp_can_bigjoiner(intel_dp)) 1207 return false; 1208 1209 return clock > i915->max_dotclk_freq || hdisplay > 5120 || 1210 connector->force_bigjoiner_enable; 1211 } 1212 1213 static enum drm_mode_status 1214 intel_dp_mode_valid(struct drm_connector *_connector, 1215 struct drm_display_mode *mode) 1216 { 1217 struct intel_connector *connector = to_intel_connector(_connector); 1218 struct intel_dp *intel_dp = intel_attached_dp(connector); 1219 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1220 const struct drm_display_mode *fixed_mode; 1221 int target_clock = mode->clock; 1222 int max_rate, mode_rate, max_lanes, max_link_clock; 1223 int max_dotclk = dev_priv->max_dotclk_freq; 1224 u16 dsc_max_compressed_bpp = 0; 1225 u8 dsc_slice_count = 0; 1226 enum drm_mode_status status; 1227 bool dsc = false, bigjoiner = false; 1228 1229 status = intel_cpu_transcoder_mode_valid(dev_priv, mode); 1230 if (status != MODE_OK) 1231 return status; 1232 1233 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 1234 return MODE_H_ILLEGAL; 1235 1236 fixed_mode = intel_panel_fixed_mode(connector, mode); 1237 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 1238 status = intel_panel_mode_valid(connector, mode); 1239 if (status != MODE_OK) 1240 return status; 1241 1242 target_clock = fixed_mode->clock; 1243 } 1244 1245 if (mode->clock < 10000) 1246 return MODE_CLOCK_LOW; 1247 1248 if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) { 1249 bigjoiner = true; 1250 max_dotclk *= 2; 1251 } 1252 if (target_clock > max_dotclk) 1253 return MODE_CLOCK_HIGH; 1254 1255 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 1256 return MODE_H_ILLEGAL; 1257 1258 max_link_clock = intel_dp_max_link_rate(intel_dp); 1259 max_lanes = intel_dp_max_lane_count(intel_dp); 1260 1261 max_rate = intel_dp_max_link_data_rate(intel_dp, max_link_clock, max_lanes); 1262 1263 mode_rate = intel_dp_link_required(target_clock, 1264 intel_dp_mode_min_output_bpp(connector, mode)); 1265 1266 if (HAS_DSC(dev_priv) && 1267 drm_dp_sink_supports_dsc(connector->dp.dsc_dpcd)) { 1268 enum intel_output_format sink_format, output_format; 1269 int pipe_bpp; 1270 1271 sink_format = intel_dp_sink_format(connector, mode); 1272 output_format = intel_dp_output_format(connector, sink_format); 1273 /* 1274 * TBD pass the connector BPC, 1275 * for now U8_MAX so that max BPC on that platform would be picked 1276 */ 1277 pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX); 1278 1279 /* 1280 * Output bpp is stored in 6.4 format so right shift by 4 to get the 1281 * integer value since we support only integer values of bpp. 1282 */ 1283 if (intel_dp_is_edp(intel_dp)) { 1284 dsc_max_compressed_bpp = 1285 drm_edp_dsc_sink_output_bpp(connector->dp.dsc_dpcd) >> 4; 1286 dsc_slice_count = 1287 drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd, 1288 true); 1289 } else if (drm_dp_sink_supports_fec(connector->dp.fec_capability)) { 1290 dsc_max_compressed_bpp = 1291 intel_dp_dsc_get_max_compressed_bpp(dev_priv, 1292 max_link_clock, 1293 max_lanes, 1294 target_clock, 1295 mode->hdisplay, 1296 bigjoiner, 1297 output_format, 1298 pipe_bpp, 64); 1299 dsc_slice_count = 1300 intel_dp_dsc_get_slice_count(connector, 1301 target_clock, 1302 mode->hdisplay, 1303 bigjoiner); 1304 } 1305 1306 dsc = dsc_max_compressed_bpp && dsc_slice_count; 1307 } 1308 1309 /* 1310 * Big joiner configuration needs DSC for TGL which is not true for 1311 * XE_LPD where uncompressed joiner is supported. 1312 */ 1313 if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc) 1314 return MODE_CLOCK_HIGH; 1315 1316 if (mode_rate > max_rate && !dsc) 1317 return MODE_CLOCK_HIGH; 1318 1319 status = intel_dp_mode_valid_downstream(connector, mode, target_clock); 1320 if (status != MODE_OK) 1321 return status; 1322 1323 return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner); 1324 } 1325 1326 bool intel_dp_source_supports_tps3(struct drm_i915_private *i915) 1327 { 1328 return DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915) || IS_HASWELL(i915); 1329 } 1330 1331 bool intel_dp_source_supports_tps4(struct drm_i915_private *i915) 1332 { 1333 return DISPLAY_VER(i915) >= 10; 1334 } 1335 1336 static void snprintf_int_array(char *str, size_t len, 1337 const int *array, int nelem) 1338 { 1339 int i; 1340 1341 str[0] = '\0'; 1342 1343 for (i = 0; i < nelem; i++) { 1344 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 1345 if (r >= len) 1346 return; 1347 str += r; 1348 len -= r; 1349 } 1350 } 1351 1352 static void intel_dp_print_rates(struct intel_dp *intel_dp) 1353 { 1354 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1355 char str[128]; /* FIXME: too big for stack? */ 1356 1357 if (!drm_debug_enabled(DRM_UT_KMS)) 1358 return; 1359 1360 snprintf_int_array(str, sizeof(str), 1361 intel_dp->source_rates, intel_dp->num_source_rates); 1362 drm_dbg_kms(&i915->drm, "source rates: %s\n", str); 1363 1364 snprintf_int_array(str, sizeof(str), 1365 intel_dp->sink_rates, intel_dp->num_sink_rates); 1366 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); 1367 1368 snprintf_int_array(str, sizeof(str), 1369 intel_dp->common_rates, intel_dp->num_common_rates); 1370 drm_dbg_kms(&i915->drm, "common rates: %s\n", str); 1371 } 1372 1373 int 1374 intel_dp_max_link_rate(struct intel_dp *intel_dp) 1375 { 1376 int len; 1377 1378 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 1379 1380 return intel_dp_common_rate(intel_dp, len - 1); 1381 } 1382 1383 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 1384 { 1385 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1386 int i = intel_dp_rate_index(intel_dp->sink_rates, 1387 intel_dp->num_sink_rates, rate); 1388 1389 if (drm_WARN_ON(&i915->drm, i < 0)) 1390 i = 0; 1391 1392 return i; 1393 } 1394 1395 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1396 u8 *link_bw, u8 *rate_select) 1397 { 1398 /* eDP 1.4 rate select method. */ 1399 if (intel_dp->use_rate_select) { 1400 *link_bw = 0; 1401 *rate_select = 1402 intel_dp_rate_select(intel_dp, port_clock); 1403 } else { 1404 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 1405 *rate_select = 0; 1406 } 1407 } 1408 1409 bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp) 1410 { 1411 struct intel_connector *connector = intel_dp->attached_connector; 1412 1413 return connector->base.display_info.is_hdmi; 1414 } 1415 1416 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 1417 const struct intel_crtc_state *pipe_config) 1418 { 1419 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1420 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1421 1422 if (DISPLAY_VER(dev_priv) >= 12) 1423 return true; 1424 1425 if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A) 1426 return true; 1427 1428 return false; 1429 } 1430 1431 bool intel_dp_supports_fec(struct intel_dp *intel_dp, 1432 const struct intel_connector *connector, 1433 const struct intel_crtc_state *pipe_config) 1434 { 1435 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 1436 drm_dp_sink_supports_fec(connector->dp.fec_capability); 1437 } 1438 1439 static bool intel_dp_supports_dsc(const struct intel_connector *connector, 1440 const struct intel_crtc_state *crtc_state) 1441 { 1442 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable) 1443 return false; 1444 1445 return intel_dsc_source_support(crtc_state) && 1446 connector->dp.dsc_decompression_aux && 1447 drm_dp_sink_supports_dsc(connector->dp.dsc_dpcd); 1448 } 1449 1450 static int intel_dp_hdmi_compute_bpc(struct intel_dp *intel_dp, 1451 const struct intel_crtc_state *crtc_state, 1452 int bpc, bool respect_downstream_limits) 1453 { 1454 int clock = crtc_state->hw.adjusted_mode.crtc_clock; 1455 1456 /* 1457 * Current bpc could already be below 8bpc due to 1458 * FDI bandwidth constraints or other limits. 1459 * HDMI minimum is 8bpc however. 1460 */ 1461 bpc = max(bpc, 8); 1462 1463 /* 1464 * We will never exceed downstream TMDS clock limits while 1465 * attempting deep color. If the user insists on forcing an 1466 * out of spec mode they will have to be satisfied with 8bpc. 1467 */ 1468 if (!respect_downstream_limits) 1469 bpc = 8; 1470 1471 for (; bpc >= 8; bpc -= 2) { 1472 if (intel_hdmi_bpc_possible(crtc_state, bpc, 1473 intel_dp_has_hdmi_sink(intel_dp)) && 1474 intel_dp_tmds_clock_valid(intel_dp, clock, bpc, crtc_state->sink_format, 1475 respect_downstream_limits) == MODE_OK) 1476 return bpc; 1477 } 1478 1479 return -EINVAL; 1480 } 1481 1482 static int intel_dp_max_bpp(struct intel_dp *intel_dp, 1483 const struct intel_crtc_state *crtc_state, 1484 bool respect_downstream_limits) 1485 { 1486 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1487 struct intel_connector *intel_connector = intel_dp->attached_connector; 1488 int bpp, bpc; 1489 1490 bpc = crtc_state->pipe_bpp / 3; 1491 1492 if (intel_dp->dfp.max_bpc) 1493 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc); 1494 1495 if (intel_dp->dfp.min_tmds_clock) { 1496 int max_hdmi_bpc; 1497 1498 max_hdmi_bpc = intel_dp_hdmi_compute_bpc(intel_dp, crtc_state, bpc, 1499 respect_downstream_limits); 1500 if (max_hdmi_bpc < 0) 1501 return 0; 1502 1503 bpc = min(bpc, max_hdmi_bpc); 1504 } 1505 1506 bpp = bpc * 3; 1507 if (intel_dp_is_edp(intel_dp)) { 1508 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1509 if (intel_connector->base.display_info.bpc == 0 && 1510 intel_connector->panel.vbt.edp.bpp && 1511 intel_connector->panel.vbt.edp.bpp < bpp) { 1512 drm_dbg_kms(&dev_priv->drm, 1513 "clamping bpp for eDP panel to BIOS-provided %i\n", 1514 intel_connector->panel.vbt.edp.bpp); 1515 bpp = intel_connector->panel.vbt.edp.bpp; 1516 } 1517 } 1518 1519 return bpp; 1520 } 1521 1522 /* Adjust link config limits based on compliance test requests. */ 1523 void 1524 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 1525 struct intel_crtc_state *pipe_config, 1526 struct link_config_limits *limits) 1527 { 1528 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1529 1530 /* For DP Compliance we override the computed bpp for the pipe */ 1531 if (intel_dp->compliance.test_data.bpc != 0) { 1532 int bpp = 3 * intel_dp->compliance.test_data.bpc; 1533 1534 limits->pipe.min_bpp = limits->pipe.max_bpp = bpp; 1535 pipe_config->dither_force_disable = bpp == 6 * 3; 1536 1537 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); 1538 } 1539 1540 /* Use values requested by Compliance Test Request */ 1541 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 1542 int index; 1543 1544 /* Validate the compliance test data since max values 1545 * might have changed due to link train fallback. 1546 */ 1547 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 1548 intel_dp->compliance.test_lane_count)) { 1549 index = intel_dp_rate_index(intel_dp->common_rates, 1550 intel_dp->num_common_rates, 1551 intel_dp->compliance.test_link_rate); 1552 if (index >= 0) 1553 limits->min_rate = limits->max_rate = 1554 intel_dp->compliance.test_link_rate; 1555 limits->min_lane_count = limits->max_lane_count = 1556 intel_dp->compliance.test_lane_count; 1557 } 1558 } 1559 } 1560 1561 static bool has_seamless_m_n(struct intel_connector *connector) 1562 { 1563 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1564 1565 /* 1566 * Seamless M/N reprogramming only implemented 1567 * for BDW+ double buffered M/N registers so far. 1568 */ 1569 return HAS_DOUBLE_BUFFERED_M_N(i915) && 1570 intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS; 1571 } 1572 1573 static int intel_dp_mode_clock(const struct intel_crtc_state *crtc_state, 1574 const struct drm_connector_state *conn_state) 1575 { 1576 struct intel_connector *connector = to_intel_connector(conn_state->connector); 1577 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 1578 1579 /* FIXME a bit of a mess wrt clock vs. crtc_clock */ 1580 if (has_seamless_m_n(connector)) 1581 return intel_panel_highest_mode(connector, adjusted_mode)->clock; 1582 else 1583 return adjusted_mode->crtc_clock; 1584 } 1585 1586 /* Optimize link config in order: max bpp, min clock, min lanes */ 1587 static int 1588 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 1589 struct intel_crtc_state *pipe_config, 1590 const struct drm_connector_state *conn_state, 1591 const struct link_config_limits *limits) 1592 { 1593 int bpp, i, lane_count, clock = intel_dp_mode_clock(pipe_config, conn_state); 1594 int mode_rate, link_rate, link_avail; 1595 1596 for (bpp = to_bpp_int(limits->link.max_bpp_x16); 1597 bpp >= to_bpp_int(limits->link.min_bpp_x16); 1598 bpp -= 2 * 3) { 1599 int link_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); 1600 1601 mode_rate = intel_dp_link_required(clock, link_bpp); 1602 1603 for (i = 0; i < intel_dp->num_common_rates; i++) { 1604 link_rate = intel_dp_common_rate(intel_dp, i); 1605 if (link_rate < limits->min_rate || 1606 link_rate > limits->max_rate) 1607 continue; 1608 1609 for (lane_count = limits->min_lane_count; 1610 lane_count <= limits->max_lane_count; 1611 lane_count <<= 1) { 1612 link_avail = intel_dp_max_link_data_rate(intel_dp, 1613 link_rate, 1614 lane_count); 1615 1616 1617 if (mode_rate <= link_avail) { 1618 pipe_config->lane_count = lane_count; 1619 pipe_config->pipe_bpp = bpp; 1620 pipe_config->port_clock = link_rate; 1621 1622 return 0; 1623 } 1624 } 1625 } 1626 } 1627 1628 return -EINVAL; 1629 } 1630 1631 static 1632 u8 intel_dp_dsc_max_src_input_bpc(struct drm_i915_private *i915) 1633 { 1634 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 1635 if (DISPLAY_VER(i915) >= 12) 1636 return 12; 1637 if (DISPLAY_VER(i915) == 11) 1638 return 10; 1639 1640 return 0; 1641 } 1642 1643 int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector, 1644 u8 max_req_bpc) 1645 { 1646 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1647 int i, num_bpc; 1648 u8 dsc_bpc[3] = {}; 1649 u8 dsc_max_bpc; 1650 1651 dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915); 1652 1653 if (!dsc_max_bpc) 1654 return dsc_max_bpc; 1655 1656 dsc_max_bpc = min_t(u8, dsc_max_bpc, max_req_bpc); 1657 1658 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd, 1659 dsc_bpc); 1660 for (i = 0; i < num_bpc; i++) { 1661 if (dsc_max_bpc >= dsc_bpc[i]) 1662 return dsc_bpc[i] * 3; 1663 } 1664 1665 return 0; 1666 } 1667 1668 static int intel_dp_source_dsc_version_minor(struct drm_i915_private *i915) 1669 { 1670 return DISPLAY_VER(i915) >= 14 ? 2 : 1; 1671 } 1672 1673 static int intel_dp_sink_dsc_version_minor(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) 1674 { 1675 return (dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & DP_DSC_MINOR_MASK) >> 1676 DP_DSC_MINOR_SHIFT; 1677 } 1678 1679 static int intel_dp_get_slice_height(int vactive) 1680 { 1681 int slice_height; 1682 1683 /* 1684 * VDSC 1.2a spec in Section 3.8 Options for Slices implies that 108 1685 * lines is an optimal slice height, but any size can be used as long as 1686 * vertical active integer multiple and maximum vertical slice count 1687 * requirements are met. 1688 */ 1689 for (slice_height = 108; slice_height <= vactive; slice_height += 2) 1690 if (vactive % slice_height == 0) 1691 return slice_height; 1692 1693 /* 1694 * Highly unlikely we reach here as most of the resolutions will end up 1695 * finding appropriate slice_height in above loop but returning 1696 * slice_height as 2 here as it should work with all resolutions. 1697 */ 1698 return 2; 1699 } 1700 1701 static int intel_dp_dsc_compute_params(const struct intel_connector *connector, 1702 struct intel_crtc_state *crtc_state) 1703 { 1704 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1705 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 1706 u8 line_buf_depth; 1707 int ret; 1708 1709 /* 1710 * RC_MODEL_SIZE is currently a constant across all configurations. 1711 * 1712 * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and 1713 * DP_DSC_RC_BUF_SIZE for this. 1714 */ 1715 vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; 1716 vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay; 1717 1718 vdsc_cfg->slice_height = intel_dp_get_slice_height(vdsc_cfg->pic_height); 1719 1720 ret = intel_dsc_compute_params(crtc_state); 1721 if (ret) 1722 return ret; 1723 1724 vdsc_cfg->dsc_version_major = 1725 (connector->dp.dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 1726 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 1727 vdsc_cfg->dsc_version_minor = 1728 min(intel_dp_source_dsc_version_minor(i915), 1729 intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd)); 1730 if (vdsc_cfg->convert_rgb) 1731 vdsc_cfg->convert_rgb = 1732 connector->dp.dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 1733 DP_DSC_RGB; 1734 1735 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(connector->dp.dsc_dpcd); 1736 if (!line_buf_depth) { 1737 drm_dbg_kms(&i915->drm, 1738 "DSC Sink Line Buffer Depth invalid\n"); 1739 return -EINVAL; 1740 } 1741 1742 if (vdsc_cfg->dsc_version_minor == 2) 1743 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? 1744 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; 1745 else 1746 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? 1747 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 1748 1749 vdsc_cfg->block_pred_enable = 1750 connector->dp.dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 1751 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 1752 1753 return drm_dsc_compute_rc_parameters(vdsc_cfg); 1754 } 1755 1756 static bool intel_dp_dsc_supports_format(const struct intel_connector *connector, 1757 enum intel_output_format output_format) 1758 { 1759 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1760 u8 sink_dsc_format; 1761 1762 switch (output_format) { 1763 case INTEL_OUTPUT_FORMAT_RGB: 1764 sink_dsc_format = DP_DSC_RGB; 1765 break; 1766 case INTEL_OUTPUT_FORMAT_YCBCR444: 1767 sink_dsc_format = DP_DSC_YCbCr444; 1768 break; 1769 case INTEL_OUTPUT_FORMAT_YCBCR420: 1770 if (min(intel_dp_source_dsc_version_minor(i915), 1771 intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd)) < 2) 1772 return false; 1773 sink_dsc_format = DP_DSC_YCbCr420_Native; 1774 break; 1775 default: 1776 return false; 1777 } 1778 1779 return drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd, sink_dsc_format); 1780 } 1781 1782 static bool is_bw_sufficient_for_dsc_config(u16 compressed_bppx16, u32 link_clock, 1783 u32 lane_count, u32 mode_clock, 1784 enum intel_output_format output_format, 1785 int timeslots) 1786 { 1787 u32 available_bw, required_bw; 1788 1789 available_bw = (link_clock * lane_count * timeslots * 16) / 8; 1790 required_bw = compressed_bppx16 * (intel_dp_mode_to_fec_clock(mode_clock)); 1791 1792 return available_bw > required_bw; 1793 } 1794 1795 static int dsc_compute_link_config(struct intel_dp *intel_dp, 1796 struct intel_crtc_state *pipe_config, 1797 struct link_config_limits *limits, 1798 u16 compressed_bppx16, 1799 int timeslots) 1800 { 1801 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 1802 int link_rate, lane_count; 1803 int i; 1804 1805 for (i = 0; i < intel_dp->num_common_rates; i++) { 1806 link_rate = intel_dp_common_rate(intel_dp, i); 1807 if (link_rate < limits->min_rate || link_rate > limits->max_rate) 1808 continue; 1809 1810 for (lane_count = limits->min_lane_count; 1811 lane_count <= limits->max_lane_count; 1812 lane_count <<= 1) { 1813 if (!is_bw_sufficient_for_dsc_config(compressed_bppx16, link_rate, 1814 lane_count, adjusted_mode->clock, 1815 pipe_config->output_format, 1816 timeslots)) 1817 continue; 1818 1819 pipe_config->lane_count = lane_count; 1820 pipe_config->port_clock = link_rate; 1821 1822 return 0; 1823 } 1824 } 1825 1826 return -EINVAL; 1827 } 1828 1829 static 1830 u16 intel_dp_dsc_max_sink_compressed_bppx16(const struct intel_connector *connector, 1831 struct intel_crtc_state *pipe_config, 1832 int bpc) 1833 { 1834 u16 max_bppx16 = drm_edp_dsc_sink_output_bpp(connector->dp.dsc_dpcd); 1835 1836 if (max_bppx16) 1837 return max_bppx16; 1838 /* 1839 * If support not given in DPCD 67h, 68h use the Maximum Allowed bit rate 1840 * values as given in spec Table 2-157 DP v2.0 1841 */ 1842 switch (pipe_config->output_format) { 1843 case INTEL_OUTPUT_FORMAT_RGB: 1844 case INTEL_OUTPUT_FORMAT_YCBCR444: 1845 return (3 * bpc) << 4; 1846 case INTEL_OUTPUT_FORMAT_YCBCR420: 1847 return (3 * (bpc / 2)) << 4; 1848 default: 1849 MISSING_CASE(pipe_config->output_format); 1850 break; 1851 } 1852 1853 return 0; 1854 } 1855 1856 int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config) 1857 { 1858 /* From Mandatory bit rate range Support Table 2-157 (DP v2.0) */ 1859 switch (pipe_config->output_format) { 1860 case INTEL_OUTPUT_FORMAT_RGB: 1861 case INTEL_OUTPUT_FORMAT_YCBCR444: 1862 return 8; 1863 case INTEL_OUTPUT_FORMAT_YCBCR420: 1864 return 6; 1865 default: 1866 MISSING_CASE(pipe_config->output_format); 1867 break; 1868 } 1869 1870 return 0; 1871 } 1872 1873 int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector, 1874 struct intel_crtc_state *pipe_config, 1875 int bpc) 1876 { 1877 return intel_dp_dsc_max_sink_compressed_bppx16(connector, 1878 pipe_config, bpc) >> 4; 1879 } 1880 1881 static int dsc_src_min_compressed_bpp(void) 1882 { 1883 /* Min Compressed bpp supported by source is 8 */ 1884 return 8; 1885 } 1886 1887 static int dsc_src_max_compressed_bpp(struct intel_dp *intel_dp) 1888 { 1889 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1890 1891 /* 1892 * Max Compressed bpp for Gen 13+ is 27bpp. 1893 * For earlier platform is 23bpp. (Bspec:49259). 1894 */ 1895 if (DISPLAY_VER(i915) < 13) 1896 return 23; 1897 else 1898 return 27; 1899 } 1900 1901 /* 1902 * From a list of valid compressed bpps try different compressed bpp and find a 1903 * suitable link configuration that can support it. 1904 */ 1905 static int 1906 icl_dsc_compute_link_config(struct intel_dp *intel_dp, 1907 struct intel_crtc_state *pipe_config, 1908 struct link_config_limits *limits, 1909 int dsc_max_bpp, 1910 int dsc_min_bpp, 1911 int pipe_bpp, 1912 int timeslots) 1913 { 1914 int i, ret; 1915 1916 /* Compressed BPP should be less than the Input DSC bpp */ 1917 dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1); 1918 1919 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) { 1920 if (valid_dsc_bpp[i] < dsc_min_bpp || 1921 valid_dsc_bpp[i] > dsc_max_bpp) 1922 break; 1923 1924 ret = dsc_compute_link_config(intel_dp, 1925 pipe_config, 1926 limits, 1927 valid_dsc_bpp[i] << 4, 1928 timeslots); 1929 if (ret == 0) { 1930 pipe_config->dsc.compressed_bpp_x16 = 1931 to_bpp_x16(valid_dsc_bpp[i]); 1932 return 0; 1933 } 1934 } 1935 1936 return -EINVAL; 1937 } 1938 1939 /* 1940 * From XE_LPD onwards we supports compression bpps in steps of 1 up to 1941 * uncompressed bpp-1. So we start from max compressed bpp and see if any 1942 * link configuration is able to support that compressed bpp, if not we 1943 * step down and check for lower compressed bpp. 1944 */ 1945 static int 1946 xelpd_dsc_compute_link_config(struct intel_dp *intel_dp, 1947 const struct intel_connector *connector, 1948 struct intel_crtc_state *pipe_config, 1949 struct link_config_limits *limits, 1950 int dsc_max_bpp, 1951 int dsc_min_bpp, 1952 int pipe_bpp, 1953 int timeslots) 1954 { 1955 u8 bppx16_incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd); 1956 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1957 u16 compressed_bppx16; 1958 u8 bppx16_step; 1959 int ret; 1960 1961 if (DISPLAY_VER(i915) < 14 || bppx16_incr <= 1) 1962 bppx16_step = 16; 1963 else 1964 bppx16_step = 16 / bppx16_incr; 1965 1966 /* Compressed BPP should be less than the Input DSC bpp */ 1967 dsc_max_bpp = min(dsc_max_bpp << 4, (pipe_bpp << 4) - bppx16_step); 1968 dsc_min_bpp = dsc_min_bpp << 4; 1969 1970 for (compressed_bppx16 = dsc_max_bpp; 1971 compressed_bppx16 >= dsc_min_bpp; 1972 compressed_bppx16 -= bppx16_step) { 1973 if (intel_dp->force_dsc_fractional_bpp_en && 1974 !to_bpp_frac(compressed_bppx16)) 1975 continue; 1976 ret = dsc_compute_link_config(intel_dp, 1977 pipe_config, 1978 limits, 1979 compressed_bppx16, 1980 timeslots); 1981 if (ret == 0) { 1982 pipe_config->dsc.compressed_bpp_x16 = compressed_bppx16; 1983 if (intel_dp->force_dsc_fractional_bpp_en && 1984 to_bpp_frac(compressed_bppx16)) 1985 drm_dbg_kms(&i915->drm, "Forcing DSC fractional bpp\n"); 1986 1987 return 0; 1988 } 1989 } 1990 return -EINVAL; 1991 } 1992 1993 static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp, 1994 const struct intel_connector *connector, 1995 struct intel_crtc_state *pipe_config, 1996 struct link_config_limits *limits, 1997 int pipe_bpp, 1998 int timeslots) 1999 { 2000 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2001 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2002 int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp; 2003 int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp; 2004 int dsc_joiner_max_bpp; 2005 2006 dsc_src_min_bpp = dsc_src_min_compressed_bpp(); 2007 dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config); 2008 dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp); 2009 dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16)); 2010 2011 dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp); 2012 dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector, 2013 pipe_config, 2014 pipe_bpp / 3); 2015 dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp; 2016 2017 dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, adjusted_mode->clock, 2018 adjusted_mode->hdisplay, 2019 pipe_config->bigjoiner_pipes); 2020 dsc_max_bpp = min(dsc_max_bpp, dsc_joiner_max_bpp); 2021 dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16)); 2022 2023 if (DISPLAY_VER(i915) >= 13) 2024 return xelpd_dsc_compute_link_config(intel_dp, connector, pipe_config, limits, 2025 dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots); 2026 return icl_dsc_compute_link_config(intel_dp, pipe_config, limits, 2027 dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots); 2028 } 2029 2030 static 2031 u8 intel_dp_dsc_min_src_input_bpc(struct drm_i915_private *i915) 2032 { 2033 /* Min DSC Input BPC for ICL+ is 8 */ 2034 return HAS_DSC(i915) ? 8 : 0; 2035 } 2036 2037 static 2038 bool is_dsc_pipe_bpp_sufficient(struct drm_i915_private *i915, 2039 struct drm_connector_state *conn_state, 2040 struct link_config_limits *limits, 2041 int pipe_bpp) 2042 { 2043 u8 dsc_max_bpc, dsc_min_bpc, dsc_max_pipe_bpp, dsc_min_pipe_bpp; 2044 2045 dsc_max_bpc = min(intel_dp_dsc_max_src_input_bpc(i915), conn_state->max_requested_bpc); 2046 dsc_min_bpc = intel_dp_dsc_min_src_input_bpc(i915); 2047 2048 dsc_max_pipe_bpp = min(dsc_max_bpc * 3, limits->pipe.max_bpp); 2049 dsc_min_pipe_bpp = max(dsc_min_bpc * 3, limits->pipe.min_bpp); 2050 2051 return pipe_bpp >= dsc_min_pipe_bpp && 2052 pipe_bpp <= dsc_max_pipe_bpp; 2053 } 2054 2055 static 2056 int intel_dp_force_dsc_pipe_bpp(struct intel_dp *intel_dp, 2057 struct drm_connector_state *conn_state, 2058 struct link_config_limits *limits) 2059 { 2060 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2061 int forced_bpp; 2062 2063 if (!intel_dp->force_dsc_bpc) 2064 return 0; 2065 2066 forced_bpp = intel_dp->force_dsc_bpc * 3; 2067 2068 if (is_dsc_pipe_bpp_sufficient(i915, conn_state, limits, forced_bpp)) { 2069 drm_dbg_kms(&i915->drm, "Input DSC BPC forced to %d\n", intel_dp->force_dsc_bpc); 2070 return forced_bpp; 2071 } 2072 2073 drm_dbg_kms(&i915->drm, "Cannot force DSC BPC:%d, due to DSC BPC limits\n", 2074 intel_dp->force_dsc_bpc); 2075 2076 return 0; 2077 } 2078 2079 static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp, 2080 struct intel_crtc_state *pipe_config, 2081 struct drm_connector_state *conn_state, 2082 struct link_config_limits *limits, 2083 int timeslots) 2084 { 2085 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2086 const struct intel_connector *connector = 2087 to_intel_connector(conn_state->connector); 2088 u8 max_req_bpc = conn_state->max_requested_bpc; 2089 u8 dsc_max_bpc, dsc_max_bpp; 2090 u8 dsc_min_bpc, dsc_min_bpp; 2091 u8 dsc_bpc[3] = {}; 2092 int forced_bpp, pipe_bpp; 2093 int num_bpc, i, ret; 2094 2095 forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, conn_state, limits); 2096 2097 if (forced_bpp) { 2098 ret = dsc_compute_compressed_bpp(intel_dp, connector, pipe_config, 2099 limits, forced_bpp, timeslots); 2100 if (ret == 0) { 2101 pipe_config->pipe_bpp = forced_bpp; 2102 return 0; 2103 } 2104 } 2105 2106 dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915); 2107 if (!dsc_max_bpc) 2108 return -EINVAL; 2109 2110 dsc_max_bpc = min_t(u8, dsc_max_bpc, max_req_bpc); 2111 dsc_max_bpp = min(dsc_max_bpc * 3, limits->pipe.max_bpp); 2112 2113 dsc_min_bpc = intel_dp_dsc_min_src_input_bpc(i915); 2114 dsc_min_bpp = max(dsc_min_bpc * 3, limits->pipe.min_bpp); 2115 2116 /* 2117 * Get the maximum DSC bpc that will be supported by any valid 2118 * link configuration and compressed bpp. 2119 */ 2120 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd, dsc_bpc); 2121 for (i = 0; i < num_bpc; i++) { 2122 pipe_bpp = dsc_bpc[i] * 3; 2123 if (pipe_bpp < dsc_min_bpp) 2124 break; 2125 if (pipe_bpp > dsc_max_bpp) 2126 continue; 2127 ret = dsc_compute_compressed_bpp(intel_dp, connector, pipe_config, 2128 limits, pipe_bpp, timeslots); 2129 if (ret == 0) { 2130 pipe_config->pipe_bpp = pipe_bpp; 2131 return 0; 2132 } 2133 } 2134 2135 return -EINVAL; 2136 } 2137 2138 static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp, 2139 struct intel_crtc_state *pipe_config, 2140 struct drm_connector_state *conn_state, 2141 struct link_config_limits *limits) 2142 { 2143 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2144 struct intel_connector *connector = 2145 to_intel_connector(conn_state->connector); 2146 int pipe_bpp, forced_bpp; 2147 int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp; 2148 int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp; 2149 2150 forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, conn_state, limits); 2151 2152 if (forced_bpp) { 2153 pipe_bpp = forced_bpp; 2154 } else { 2155 int max_bpc = min(limits->pipe.max_bpp / 3, (int)conn_state->max_requested_bpc); 2156 2157 /* For eDP use max bpp that can be supported with DSC. */ 2158 pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, max_bpc); 2159 if (!is_dsc_pipe_bpp_sufficient(i915, conn_state, limits, pipe_bpp)) { 2160 drm_dbg_kms(&i915->drm, 2161 "Computed BPC is not in DSC BPC limits\n"); 2162 return -EINVAL; 2163 } 2164 } 2165 pipe_config->port_clock = limits->max_rate; 2166 pipe_config->lane_count = limits->max_lane_count; 2167 2168 dsc_src_min_bpp = dsc_src_min_compressed_bpp(); 2169 dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config); 2170 dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp); 2171 dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16)); 2172 2173 dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp); 2174 dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector, 2175 pipe_config, 2176 pipe_bpp / 3); 2177 dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp; 2178 dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16)); 2179 2180 /* Compressed BPP should be less than the Input DSC bpp */ 2181 dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1); 2182 2183 pipe_config->dsc.compressed_bpp_x16 = 2184 to_bpp_x16(max(dsc_min_bpp, dsc_max_bpp)); 2185 2186 pipe_config->pipe_bpp = pipe_bpp; 2187 2188 return 0; 2189 } 2190 2191 int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 2192 struct intel_crtc_state *pipe_config, 2193 struct drm_connector_state *conn_state, 2194 struct link_config_limits *limits, 2195 int timeslots, 2196 bool compute_pipe_bpp) 2197 { 2198 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2199 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 2200 const struct intel_connector *connector = 2201 to_intel_connector(conn_state->connector); 2202 const struct drm_display_mode *adjusted_mode = 2203 &pipe_config->hw.adjusted_mode; 2204 int ret; 2205 2206 pipe_config->fec_enable = pipe_config->fec_enable || 2207 (!intel_dp_is_edp(intel_dp) && 2208 intel_dp_supports_fec(intel_dp, connector, pipe_config)); 2209 2210 if (!intel_dp_supports_dsc(connector, pipe_config)) 2211 return -EINVAL; 2212 2213 if (!intel_dp_dsc_supports_format(connector, pipe_config->output_format)) 2214 return -EINVAL; 2215 2216 /* 2217 * compute pipe bpp is set to false for DP MST DSC case 2218 * and compressed_bpp is calculated same time once 2219 * vpci timeslots are allocated, because overall bpp 2220 * calculation procedure is bit different for MST case. 2221 */ 2222 if (compute_pipe_bpp) { 2223 if (intel_dp_is_edp(intel_dp)) 2224 ret = intel_edp_dsc_compute_pipe_bpp(intel_dp, pipe_config, 2225 conn_state, limits); 2226 else 2227 ret = intel_dp_dsc_compute_pipe_bpp(intel_dp, pipe_config, 2228 conn_state, limits, timeslots); 2229 if (ret) { 2230 drm_dbg_kms(&dev_priv->drm, 2231 "No Valid pipe bpp for given mode ret = %d\n", ret); 2232 return ret; 2233 } 2234 } 2235 2236 /* Calculate Slice count */ 2237 if (intel_dp_is_edp(intel_dp)) { 2238 pipe_config->dsc.slice_count = 2239 drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd, 2240 true); 2241 if (!pipe_config->dsc.slice_count) { 2242 drm_dbg_kms(&dev_priv->drm, "Unsupported Slice Count %d\n", 2243 pipe_config->dsc.slice_count); 2244 return -EINVAL; 2245 } 2246 } else { 2247 u8 dsc_dp_slice_count; 2248 2249 dsc_dp_slice_count = 2250 intel_dp_dsc_get_slice_count(connector, 2251 adjusted_mode->crtc_clock, 2252 adjusted_mode->crtc_hdisplay, 2253 pipe_config->bigjoiner_pipes); 2254 if (!dsc_dp_slice_count) { 2255 drm_dbg_kms(&dev_priv->drm, 2256 "Compressed Slice Count not supported\n"); 2257 return -EINVAL; 2258 } 2259 2260 pipe_config->dsc.slice_count = dsc_dp_slice_count; 2261 } 2262 /* 2263 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 2264 * is greater than the maximum Cdclock and if slice count is even 2265 * then we need to use 2 VDSC instances. 2266 */ 2267 if (pipe_config->bigjoiner_pipes || pipe_config->dsc.slice_count > 1) 2268 pipe_config->dsc.dsc_split = true; 2269 2270 ret = intel_dp_dsc_compute_params(connector, pipe_config); 2271 if (ret < 0) { 2272 drm_dbg_kms(&dev_priv->drm, 2273 "Cannot compute valid DSC parameters for Input Bpp = %d" 2274 "Compressed BPP = " BPP_X16_FMT "\n", 2275 pipe_config->pipe_bpp, 2276 BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16)); 2277 return ret; 2278 } 2279 2280 pipe_config->dsc.compression_enable = true; 2281 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 2282 "Compressed Bpp = " BPP_X16_FMT " Slice Count = %d\n", 2283 pipe_config->pipe_bpp, 2284 BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16), 2285 pipe_config->dsc.slice_count); 2286 2287 return 0; 2288 } 2289 2290 /** 2291 * intel_dp_compute_config_link_bpp_limits - compute output link bpp limits 2292 * @intel_dp: intel DP 2293 * @crtc_state: crtc state 2294 * @dsc: DSC compression mode 2295 * @limits: link configuration limits 2296 * 2297 * Calculates the output link min, max bpp values in @limits based on the 2298 * pipe bpp range, @crtc_state and @dsc mode. 2299 * 2300 * Returns %true in case of success. 2301 */ 2302 bool 2303 intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp, 2304 const struct intel_crtc_state *crtc_state, 2305 bool dsc, 2306 struct link_config_limits *limits) 2307 { 2308 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 2309 const struct drm_display_mode *adjusted_mode = 2310 &crtc_state->hw.adjusted_mode; 2311 const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2312 const struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 2313 int max_link_bpp_x16; 2314 2315 max_link_bpp_x16 = min(crtc_state->max_link_bpp_x16, 2316 to_bpp_x16(limits->pipe.max_bpp)); 2317 2318 if (!dsc) { 2319 max_link_bpp_x16 = rounddown(max_link_bpp_x16, to_bpp_x16(2 * 3)); 2320 2321 if (max_link_bpp_x16 < to_bpp_x16(limits->pipe.min_bpp)) 2322 return false; 2323 2324 limits->link.min_bpp_x16 = to_bpp_x16(limits->pipe.min_bpp); 2325 } else { 2326 /* 2327 * TODO: set the DSC link limits already here, atm these are 2328 * initialized only later in intel_edp_dsc_compute_pipe_bpp() / 2329 * intel_dp_dsc_compute_pipe_bpp() 2330 */ 2331 limits->link.min_bpp_x16 = 0; 2332 } 2333 2334 limits->link.max_bpp_x16 = max_link_bpp_x16; 2335 2336 drm_dbg_kms(&i915->drm, 2337 "[ENCODER:%d:%s][CRTC:%d:%s] DP link limits: pixel clock %d kHz DSC %s max lanes %d max rate %d max pipe_bpp %d max link_bpp " BPP_X16_FMT "\n", 2338 encoder->base.base.id, encoder->base.name, 2339 crtc->base.base.id, crtc->base.name, 2340 adjusted_mode->crtc_clock, 2341 dsc ? "on" : "off", 2342 limits->max_lane_count, 2343 limits->max_rate, 2344 limits->pipe.max_bpp, 2345 BPP_X16_ARGS(limits->link.max_bpp_x16)); 2346 2347 return true; 2348 } 2349 2350 static bool 2351 intel_dp_compute_config_limits(struct intel_dp *intel_dp, 2352 struct intel_crtc_state *crtc_state, 2353 bool respect_downstream_limits, 2354 bool dsc, 2355 struct link_config_limits *limits) 2356 { 2357 limits->min_rate = intel_dp_common_rate(intel_dp, 0); 2358 limits->max_rate = intel_dp_max_link_rate(intel_dp); 2359 2360 /* FIXME 128b/132b SST support missing */ 2361 limits->max_rate = min(limits->max_rate, 810000); 2362 2363 limits->min_lane_count = 1; 2364 limits->max_lane_count = intel_dp_max_lane_count(intel_dp); 2365 2366 limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format); 2367 limits->pipe.max_bpp = intel_dp_max_bpp(intel_dp, crtc_state, 2368 respect_downstream_limits); 2369 2370 if (intel_dp->use_max_params) { 2371 /* 2372 * Use the maximum clock and number of lanes the eDP panel 2373 * advertizes being capable of in case the initial fast 2374 * optimal params failed us. The panels are generally 2375 * designed to support only a single clock and lane 2376 * configuration, and typically on older panels these 2377 * values correspond to the native resolution of the panel. 2378 */ 2379 limits->min_lane_count = limits->max_lane_count; 2380 limits->min_rate = limits->max_rate; 2381 } 2382 2383 intel_dp_adjust_compliance_config(intel_dp, crtc_state, limits); 2384 2385 return intel_dp_compute_config_link_bpp_limits(intel_dp, 2386 crtc_state, 2387 dsc, 2388 limits); 2389 } 2390 2391 int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state) 2392 { 2393 const struct drm_display_mode *adjusted_mode = 2394 &crtc_state->hw.adjusted_mode; 2395 int bpp = crtc_state->dsc.compression_enable ? 2396 to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) : 2397 crtc_state->pipe_bpp; 2398 2399 return intel_dp_link_required(adjusted_mode->crtc_clock, bpp); 2400 } 2401 2402 static int 2403 intel_dp_compute_link_config(struct intel_encoder *encoder, 2404 struct intel_crtc_state *pipe_config, 2405 struct drm_connector_state *conn_state, 2406 bool respect_downstream_limits) 2407 { 2408 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2409 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 2410 const struct intel_connector *connector = 2411 to_intel_connector(conn_state->connector); 2412 const struct drm_display_mode *adjusted_mode = 2413 &pipe_config->hw.adjusted_mode; 2414 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2415 struct link_config_limits limits; 2416 bool joiner_needs_dsc = false; 2417 bool dsc_needed; 2418 int ret = 0; 2419 2420 if (pipe_config->fec_enable && 2421 !intel_dp_supports_fec(intel_dp, connector, pipe_config)) 2422 return -EINVAL; 2423 2424 if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay, 2425 adjusted_mode->crtc_clock)) 2426 pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe); 2427 2428 /* 2429 * Pipe joiner needs compression up to display 12 due to bandwidth 2430 * limitation. DG2 onwards pipe joiner can be enabled without 2431 * compression. 2432 */ 2433 joiner_needs_dsc = DISPLAY_VER(i915) < 13 && pipe_config->bigjoiner_pipes; 2434 2435 dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en || 2436 !intel_dp_compute_config_limits(intel_dp, pipe_config, 2437 respect_downstream_limits, 2438 false, 2439 &limits); 2440 2441 if (!dsc_needed) { 2442 /* 2443 * Optimize for slow and wide for everything, because there are some 2444 * eDP 1.3 and 1.4 panels don't work well with fast and narrow. 2445 */ 2446 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, 2447 conn_state, &limits); 2448 if (ret) 2449 dsc_needed = true; 2450 } 2451 2452 if (dsc_needed) { 2453 drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n", 2454 str_yes_no(ret), str_yes_no(joiner_needs_dsc), 2455 str_yes_no(intel_dp->force_dsc_en)); 2456 2457 if (!intel_dp_compute_config_limits(intel_dp, pipe_config, 2458 respect_downstream_limits, 2459 true, 2460 &limits)) 2461 return -EINVAL; 2462 2463 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 2464 conn_state, &limits, 64, true); 2465 if (ret < 0) 2466 return ret; 2467 } 2468 2469 drm_dbg_kms(&i915->drm, 2470 "DP lane count %d clock %d bpp input %d compressed " BPP_X16_FMT " link rate required %d available %d\n", 2471 pipe_config->lane_count, pipe_config->port_clock, 2472 pipe_config->pipe_bpp, 2473 BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16), 2474 intel_dp_config_required_rate(pipe_config), 2475 intel_dp_max_link_data_rate(intel_dp, 2476 pipe_config->port_clock, 2477 pipe_config->lane_count)); 2478 2479 return 0; 2480 } 2481 2482 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 2483 const struct drm_connector_state *conn_state) 2484 { 2485 const struct intel_digital_connector_state *intel_conn_state = 2486 to_intel_digital_connector_state(conn_state); 2487 const struct drm_display_mode *adjusted_mode = 2488 &crtc_state->hw.adjusted_mode; 2489 2490 /* 2491 * Our YCbCr output is always limited range. 2492 * crtc_state->limited_color_range only applies to RGB, 2493 * and it must never be set for YCbCr or we risk setting 2494 * some conflicting bits in TRANSCONF which will mess up 2495 * the colors on the monitor. 2496 */ 2497 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 2498 return false; 2499 2500 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 2501 /* 2502 * See: 2503 * CEA-861-E - 5.1 Default Encoding Parameters 2504 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 2505 */ 2506 return crtc_state->pipe_bpp != 18 && 2507 drm_default_rgb_quant_range(adjusted_mode) == 2508 HDMI_QUANTIZATION_RANGE_LIMITED; 2509 } else { 2510 return intel_conn_state->broadcast_rgb == 2511 INTEL_BROADCAST_RGB_LIMITED; 2512 } 2513 } 2514 2515 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 2516 enum port port) 2517 { 2518 if (IS_G4X(dev_priv)) 2519 return false; 2520 if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A) 2521 return false; 2522 2523 return true; 2524 } 2525 2526 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, 2527 const struct drm_connector_state *conn_state, 2528 struct drm_dp_vsc_sdp *vsc) 2529 { 2530 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2531 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2532 2533 if (crtc_state->has_panel_replay) { 2534 /* 2535 * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223 2536 * VSC SDP supporting 3D stereo, Panel Replay, and Pixel 2537 * Encoding/Colorimetry Format indication. 2538 */ 2539 vsc->revision = 0x7; 2540 } else { 2541 /* 2542 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2543 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 2544 * Colorimetry Format indication. 2545 */ 2546 vsc->revision = 0x5; 2547 } 2548 2549 vsc->length = 0x13; 2550 2551 /* DP 1.4a spec, Table 2-120 */ 2552 switch (crtc_state->output_format) { 2553 case INTEL_OUTPUT_FORMAT_YCBCR444: 2554 vsc->pixelformat = DP_PIXELFORMAT_YUV444; 2555 break; 2556 case INTEL_OUTPUT_FORMAT_YCBCR420: 2557 vsc->pixelformat = DP_PIXELFORMAT_YUV420; 2558 break; 2559 case INTEL_OUTPUT_FORMAT_RGB: 2560 default: 2561 vsc->pixelformat = DP_PIXELFORMAT_RGB; 2562 } 2563 2564 switch (conn_state->colorspace) { 2565 case DRM_MODE_COLORIMETRY_BT709_YCC: 2566 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2567 break; 2568 case DRM_MODE_COLORIMETRY_XVYCC_601: 2569 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; 2570 break; 2571 case DRM_MODE_COLORIMETRY_XVYCC_709: 2572 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; 2573 break; 2574 case DRM_MODE_COLORIMETRY_SYCC_601: 2575 vsc->colorimetry = DP_COLORIMETRY_SYCC_601; 2576 break; 2577 case DRM_MODE_COLORIMETRY_OPYCC_601: 2578 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; 2579 break; 2580 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 2581 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; 2582 break; 2583 case DRM_MODE_COLORIMETRY_BT2020_RGB: 2584 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; 2585 break; 2586 case DRM_MODE_COLORIMETRY_BT2020_YCC: 2587 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; 2588 break; 2589 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 2590 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 2591 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; 2592 break; 2593 default: 2594 /* 2595 * RGB->YCBCR color conversion uses the BT.709 2596 * color space. 2597 */ 2598 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2599 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2600 else 2601 vsc->colorimetry = DP_COLORIMETRY_DEFAULT; 2602 break; 2603 } 2604 2605 vsc->bpc = crtc_state->pipe_bpp / 3; 2606 2607 /* only RGB pixelformat supports 6 bpc */ 2608 drm_WARN_ON(&dev_priv->drm, 2609 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); 2610 2611 /* all YCbCr are always limited range */ 2612 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; 2613 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; 2614 } 2615 2616 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, 2617 struct intel_crtc_state *crtc_state, 2618 const struct drm_connector_state *conn_state) 2619 { 2620 struct drm_dp_vsc_sdp *vsc; 2621 2622 if ((!intel_dp->colorimetry_support || 2623 !intel_dp_needs_vsc_sdp(crtc_state, conn_state)) && 2624 !crtc_state->has_psr) 2625 return; 2626 2627 vsc = &crtc_state->infoframes.vsc; 2628 2629 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 2630 vsc->sdp_type = DP_SDP_VSC; 2631 2632 /* Needs colorimetry */ 2633 if (intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 2634 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2635 vsc); 2636 } else if (crtc_state->has_psr2) { 2637 /* 2638 * [PSR2 without colorimetry] 2639 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 2640 * 3D stereo + PSR/PSR2 + Y-coordinate. 2641 */ 2642 vsc->revision = 0x4; 2643 vsc->length = 0xe; 2644 } else if (crtc_state->has_panel_replay) { 2645 /* 2646 * [Panel Replay without colorimetry info] 2647 * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223 2648 * VSC SDP supporting 3D stereo + Panel Replay. 2649 */ 2650 vsc->revision = 0x6; 2651 vsc->length = 0x10; 2652 } else { 2653 /* 2654 * [PSR1] 2655 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2656 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or 2657 * higher). 2658 */ 2659 vsc->revision = 0x2; 2660 vsc->length = 0x8; 2661 } 2662 } 2663 2664 static void 2665 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 2666 struct intel_crtc_state *crtc_state, 2667 const struct drm_connector_state *conn_state) 2668 { 2669 int ret; 2670 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2671 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; 2672 2673 if (!conn_state->hdr_output_metadata) 2674 return; 2675 2676 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); 2677 2678 if (ret) { 2679 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); 2680 return; 2681 } 2682 2683 crtc_state->infoframes.enable |= 2684 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 2685 } 2686 2687 static bool can_enable_drrs(struct intel_connector *connector, 2688 const struct intel_crtc_state *pipe_config, 2689 const struct drm_display_mode *downclock_mode) 2690 { 2691 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2692 2693 if (pipe_config->vrr.enable) 2694 return false; 2695 2696 /* 2697 * DRRS and PSR can't be enable together, so giving preference to PSR 2698 * as it allows more power-savings by complete shutting down display, 2699 * so to guarantee this, intel_drrs_compute_config() must be called 2700 * after intel_psr_compute_config(). 2701 */ 2702 if (pipe_config->has_psr) 2703 return false; 2704 2705 /* FIXME missing FDI M2/N2 etc. */ 2706 if (pipe_config->has_pch_encoder) 2707 return false; 2708 2709 if (!intel_cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder)) 2710 return false; 2711 2712 return downclock_mode && 2713 intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS; 2714 } 2715 2716 static void 2717 intel_dp_drrs_compute_config(struct intel_connector *connector, 2718 struct intel_crtc_state *pipe_config, 2719 int link_bpp_x16) 2720 { 2721 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2722 const struct drm_display_mode *downclock_mode = 2723 intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode); 2724 int pixel_clock; 2725 2726 if (has_seamless_m_n(connector)) 2727 pipe_config->update_m_n = true; 2728 2729 if (!can_enable_drrs(connector, pipe_config, downclock_mode)) { 2730 if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder)) 2731 intel_zero_m_n(&pipe_config->dp_m2_n2); 2732 return; 2733 } 2734 2735 if (IS_IRONLAKE(i915) || IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) 2736 pipe_config->msa_timing_delay = connector->panel.vbt.edp.drrs_msa_timing_delay; 2737 2738 pipe_config->has_drrs = true; 2739 2740 pixel_clock = downclock_mode->clock; 2741 if (pipe_config->splitter.enable) 2742 pixel_clock /= pipe_config->splitter.link_count; 2743 2744 intel_link_compute_m_n(link_bpp_x16, pipe_config->lane_count, pixel_clock, 2745 pipe_config->port_clock, 2746 intel_dp_bw_fec_overhead(pipe_config->fec_enable), 2747 &pipe_config->dp_m2_n2); 2748 2749 /* FIXME: abstract this better */ 2750 if (pipe_config->splitter.enable) 2751 pipe_config->dp_m2_n2.data_m *= pipe_config->splitter.link_count; 2752 } 2753 2754 static bool intel_dp_has_audio(struct intel_encoder *encoder, 2755 struct intel_crtc_state *crtc_state, 2756 const struct drm_connector_state *conn_state) 2757 { 2758 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2759 const struct intel_digital_connector_state *intel_conn_state = 2760 to_intel_digital_connector_state(conn_state); 2761 struct intel_connector *connector = 2762 to_intel_connector(conn_state->connector); 2763 2764 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 2765 !intel_dp_port_has_audio(i915, encoder->port)) 2766 return false; 2767 2768 if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 2769 return connector->base.display_info.has_audio; 2770 else 2771 return intel_conn_state->force_audio == HDMI_AUDIO_ON; 2772 } 2773 2774 static int 2775 intel_dp_compute_output_format(struct intel_encoder *encoder, 2776 struct intel_crtc_state *crtc_state, 2777 struct drm_connector_state *conn_state, 2778 bool respect_downstream_limits) 2779 { 2780 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2781 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2782 struct intel_connector *connector = intel_dp->attached_connector; 2783 const struct drm_display_info *info = &connector->base.display_info; 2784 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2785 bool ycbcr_420_only; 2786 int ret; 2787 2788 ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode); 2789 2790 if (ycbcr_420_only && !connector->base.ycbcr_420_allowed) { 2791 drm_dbg_kms(&i915->drm, 2792 "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n"); 2793 crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB; 2794 } else { 2795 crtc_state->sink_format = intel_dp_sink_format(connector, adjusted_mode); 2796 } 2797 2798 crtc_state->output_format = intel_dp_output_format(connector, crtc_state->sink_format); 2799 2800 ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state, 2801 respect_downstream_limits); 2802 if (ret) { 2803 if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 2804 !connector->base.ycbcr_420_allowed || 2805 !drm_mode_is_420_also(info, adjusted_mode)) 2806 return ret; 2807 2808 crtc_state->sink_format = INTEL_OUTPUT_FORMAT_YCBCR420; 2809 crtc_state->output_format = intel_dp_output_format(connector, 2810 crtc_state->sink_format); 2811 ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state, 2812 respect_downstream_limits); 2813 } 2814 2815 return ret; 2816 } 2817 2818 void 2819 intel_dp_audio_compute_config(struct intel_encoder *encoder, 2820 struct intel_crtc_state *pipe_config, 2821 struct drm_connector_state *conn_state) 2822 { 2823 pipe_config->has_audio = 2824 intel_dp_has_audio(encoder, pipe_config, conn_state) && 2825 intel_audio_compute_config(encoder, pipe_config, conn_state); 2826 2827 pipe_config->sdp_split_enable = pipe_config->has_audio && 2828 intel_dp_is_uhbr(pipe_config); 2829 } 2830 2831 void intel_dp_queue_modeset_retry_work(struct intel_connector *connector) 2832 { 2833 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2834 2835 drm_connector_get(&connector->base); 2836 if (!queue_work(i915->unordered_wq, &connector->modeset_retry_work)) 2837 drm_connector_put(&connector->base); 2838 } 2839 2840 void 2841 intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state, 2842 struct intel_encoder *encoder, 2843 const struct intel_crtc_state *crtc_state) 2844 { 2845 struct intel_connector *connector; 2846 struct intel_digital_connector_state *conn_state; 2847 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2848 int i; 2849 2850 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { 2851 intel_dp_queue_modeset_retry_work(intel_dp->attached_connector); 2852 2853 return; 2854 } 2855 2856 for_each_new_intel_connector_in_state(state, connector, conn_state, i) { 2857 if (!conn_state->base.crtc) 2858 continue; 2859 2860 if (connector->mst_port == intel_dp) 2861 intel_dp_queue_modeset_retry_work(connector); 2862 } 2863 } 2864 2865 int 2866 intel_dp_compute_config(struct intel_encoder *encoder, 2867 struct intel_crtc_state *pipe_config, 2868 struct drm_connector_state *conn_state) 2869 { 2870 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2871 struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state); 2872 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2873 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2874 const struct drm_display_mode *fixed_mode; 2875 struct intel_connector *connector = intel_dp->attached_connector; 2876 int ret = 0, link_bpp_x16; 2877 2878 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A) 2879 pipe_config->has_pch_encoder = true; 2880 2881 fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode); 2882 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 2883 ret = intel_panel_compute_config(connector, adjusted_mode); 2884 if (ret) 2885 return ret; 2886 } 2887 2888 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 2889 return -EINVAL; 2890 2891 if (!connector->base.interlace_allowed && 2892 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 2893 return -EINVAL; 2894 2895 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 2896 return -EINVAL; 2897 2898 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 2899 return -EINVAL; 2900 2901 /* 2902 * Try to respect downstream TMDS clock limits first, if 2903 * that fails assume the user might know something we don't. 2904 */ 2905 ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, true); 2906 if (ret) 2907 ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, false); 2908 if (ret) 2909 return ret; 2910 2911 if ((intel_dp_is_edp(intel_dp) && fixed_mode) || 2912 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 2913 ret = intel_panel_fitting(pipe_config, conn_state); 2914 if (ret) 2915 return ret; 2916 } 2917 2918 pipe_config->limited_color_range = 2919 intel_dp_limited_color_range(pipe_config, conn_state); 2920 2921 pipe_config->enhanced_framing = 2922 drm_dp_enhanced_frame_cap(intel_dp->dpcd); 2923 2924 if (pipe_config->dsc.compression_enable) 2925 link_bpp_x16 = pipe_config->dsc.compressed_bpp_x16; 2926 else 2927 link_bpp_x16 = to_bpp_x16(intel_dp_output_bpp(pipe_config->output_format, 2928 pipe_config->pipe_bpp)); 2929 2930 if (intel_dp->mso_link_count) { 2931 int n = intel_dp->mso_link_count; 2932 int overlap = intel_dp->mso_pixel_overlap; 2933 2934 pipe_config->splitter.enable = true; 2935 pipe_config->splitter.link_count = n; 2936 pipe_config->splitter.pixel_overlap = overlap; 2937 2938 drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n", 2939 n, overlap); 2940 2941 adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap; 2942 adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap; 2943 adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap; 2944 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap; 2945 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap; 2946 adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap; 2947 adjusted_mode->crtc_clock /= n; 2948 } 2949 2950 intel_dp_audio_compute_config(encoder, pipe_config, conn_state); 2951 2952 intel_link_compute_m_n(link_bpp_x16, 2953 pipe_config->lane_count, 2954 adjusted_mode->crtc_clock, 2955 pipe_config->port_clock, 2956 intel_dp_bw_fec_overhead(pipe_config->fec_enable), 2957 &pipe_config->dp_m_n); 2958 2959 /* FIXME: abstract this better */ 2960 if (pipe_config->splitter.enable) 2961 pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count; 2962 2963 if (!HAS_DDI(dev_priv)) 2964 g4x_dp_set_clock(encoder, pipe_config); 2965 2966 intel_vrr_compute_config(pipe_config, conn_state); 2967 intel_psr_compute_config(intel_dp, pipe_config, conn_state); 2968 intel_dp_drrs_compute_config(connector, pipe_config, link_bpp_x16); 2969 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 2970 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 2971 2972 return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector, 2973 pipe_config); 2974 } 2975 2976 void intel_dp_set_link_params(struct intel_dp *intel_dp, 2977 int link_rate, int lane_count) 2978 { 2979 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); 2980 intel_dp->link_trained = false; 2981 intel_dp->link_rate = link_rate; 2982 intel_dp->lane_count = lane_count; 2983 } 2984 2985 static void intel_dp_reset_max_link_params(struct intel_dp *intel_dp) 2986 { 2987 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 2988 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 2989 } 2990 2991 /* Enable backlight PWM and backlight PP control. */ 2992 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 2993 const struct drm_connector_state *conn_state) 2994 { 2995 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 2996 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2997 2998 if (!intel_dp_is_edp(intel_dp)) 2999 return; 3000 3001 drm_dbg_kms(&i915->drm, "\n"); 3002 3003 intel_backlight_enable(crtc_state, conn_state); 3004 intel_pps_backlight_on(intel_dp); 3005 } 3006 3007 /* Disable backlight PP control and backlight PWM. */ 3008 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 3009 { 3010 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 3011 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3012 3013 if (!intel_dp_is_edp(intel_dp)) 3014 return; 3015 3016 drm_dbg_kms(&i915->drm, "\n"); 3017 3018 intel_pps_backlight_off(intel_dp); 3019 intel_backlight_disable(old_conn_state); 3020 } 3021 3022 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 3023 { 3024 /* 3025 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 3026 * be capable of signalling downstream hpd with a long pulse. 3027 * Whether or not that means D3 is safe to use is not clear, 3028 * but let's assume so until proven otherwise. 3029 * 3030 * FIXME should really check all downstream ports... 3031 */ 3032 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 3033 drm_dp_is_branch(intel_dp->dpcd) && 3034 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 3035 } 3036 3037 static int 3038 write_dsc_decompression_flag(struct drm_dp_aux *aux, u8 flag, bool set) 3039 { 3040 int err; 3041 u8 val; 3042 3043 err = drm_dp_dpcd_readb(aux, DP_DSC_ENABLE, &val); 3044 if (err < 0) 3045 return err; 3046 3047 if (set) 3048 val |= flag; 3049 else 3050 val &= ~flag; 3051 3052 return drm_dp_dpcd_writeb(aux, DP_DSC_ENABLE, val); 3053 } 3054 3055 static void 3056 intel_dp_sink_set_dsc_decompression(struct intel_connector *connector, 3057 bool enable) 3058 { 3059 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3060 3061 if (write_dsc_decompression_flag(connector->dp.dsc_decompression_aux, 3062 DP_DECOMPRESSION_EN, enable) < 0) 3063 drm_dbg_kms(&i915->drm, 3064 "Failed to %s sink decompression state\n", 3065 str_enable_disable(enable)); 3066 } 3067 3068 static void 3069 intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector, 3070 bool enable) 3071 { 3072 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3073 struct drm_dp_aux *aux = connector->port ? 3074 connector->port->passthrough_aux : NULL; 3075 3076 if (!aux) 3077 return; 3078 3079 if (write_dsc_decompression_flag(aux, 3080 DP_DSC_PASSTHROUGH_EN, enable) < 0) 3081 drm_dbg_kms(&i915->drm, 3082 "Failed to %s sink compression passthrough state\n", 3083 str_enable_disable(enable)); 3084 } 3085 3086 static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state, 3087 const struct intel_connector *connector, 3088 bool for_get_ref) 3089 { 3090 struct drm_i915_private *i915 = to_i915(state->base.dev); 3091 struct drm_connector *_connector_iter; 3092 struct drm_connector_state *old_conn_state; 3093 struct drm_connector_state *new_conn_state; 3094 int ref_count = 0; 3095 int i; 3096 3097 /* 3098 * On SST the decompression AUX device won't be shared, each connector 3099 * uses for this its own AUX targeting the sink device. 3100 */ 3101 if (!connector->mst_port) 3102 return connector->dp.dsc_decompression_enabled ? 1 : 0; 3103 3104 for_each_oldnew_connector_in_state(&state->base, _connector_iter, 3105 old_conn_state, new_conn_state, i) { 3106 const struct intel_connector * 3107 connector_iter = to_intel_connector(_connector_iter); 3108 3109 if (connector_iter->mst_port != connector->mst_port) 3110 continue; 3111 3112 if (!connector_iter->dp.dsc_decompression_enabled) 3113 continue; 3114 3115 drm_WARN_ON(&i915->drm, 3116 (for_get_ref && !new_conn_state->crtc) || 3117 (!for_get_ref && !old_conn_state->crtc)); 3118 3119 if (connector_iter->dp.dsc_decompression_aux == 3120 connector->dp.dsc_decompression_aux) 3121 ref_count++; 3122 } 3123 3124 return ref_count; 3125 } 3126 3127 static bool intel_dp_dsc_aux_get_ref(struct intel_atomic_state *state, 3128 struct intel_connector *connector) 3129 { 3130 bool ret = intel_dp_dsc_aux_ref_count(state, connector, true) == 0; 3131 3132 connector->dp.dsc_decompression_enabled = true; 3133 3134 return ret; 3135 } 3136 3137 static bool intel_dp_dsc_aux_put_ref(struct intel_atomic_state *state, 3138 struct intel_connector *connector) 3139 { 3140 connector->dp.dsc_decompression_enabled = false; 3141 3142 return intel_dp_dsc_aux_ref_count(state, connector, false) == 0; 3143 } 3144 3145 /** 3146 * intel_dp_sink_enable_decompression - Enable DSC decompression in sink/last branch device 3147 * @state: atomic state 3148 * @connector: connector to enable the decompression for 3149 * @new_crtc_state: new state for the CRTC driving @connector 3150 * 3151 * Enable the DSC decompression if required in the %DP_DSC_ENABLE DPCD 3152 * register of the appropriate sink/branch device. On SST this is always the 3153 * sink device, whereas on MST based on each device's DSC capabilities it's 3154 * either the last branch device (enabling decompression in it) or both the 3155 * last branch device (enabling passthrough in it) and the sink device 3156 * (enabling decompression in it). 3157 */ 3158 void intel_dp_sink_enable_decompression(struct intel_atomic_state *state, 3159 struct intel_connector *connector, 3160 const struct intel_crtc_state *new_crtc_state) 3161 { 3162 struct drm_i915_private *i915 = to_i915(state->base.dev); 3163 3164 if (!new_crtc_state->dsc.compression_enable) 3165 return; 3166 3167 if (drm_WARN_ON(&i915->drm, 3168 !connector->dp.dsc_decompression_aux || 3169 connector->dp.dsc_decompression_enabled)) 3170 return; 3171 3172 if (!intel_dp_dsc_aux_get_ref(state, connector)) 3173 return; 3174 3175 intel_dp_sink_set_dsc_passthrough(connector, true); 3176 intel_dp_sink_set_dsc_decompression(connector, true); 3177 } 3178 3179 /** 3180 * intel_dp_sink_disable_decompression - Disable DSC decompression in sink/last branch device 3181 * @state: atomic state 3182 * @connector: connector to disable the decompression for 3183 * @old_crtc_state: old state for the CRTC driving @connector 3184 * 3185 * Disable the DSC decompression if required in the %DP_DSC_ENABLE DPCD 3186 * register of the appropriate sink/branch device, corresponding to the 3187 * sequence in intel_dp_sink_enable_decompression(). 3188 */ 3189 void intel_dp_sink_disable_decompression(struct intel_atomic_state *state, 3190 struct intel_connector *connector, 3191 const struct intel_crtc_state *old_crtc_state) 3192 { 3193 struct drm_i915_private *i915 = to_i915(state->base.dev); 3194 3195 if (!old_crtc_state->dsc.compression_enable) 3196 return; 3197 3198 if (drm_WARN_ON(&i915->drm, 3199 !connector->dp.dsc_decompression_aux || 3200 !connector->dp.dsc_decompression_enabled)) 3201 return; 3202 3203 if (!intel_dp_dsc_aux_put_ref(state, connector)) 3204 return; 3205 3206 intel_dp_sink_set_dsc_decompression(connector, false); 3207 intel_dp_sink_set_dsc_passthrough(connector, false); 3208 } 3209 3210 static void 3211 intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) 3212 { 3213 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3214 u8 oui[] = { 0x00, 0xaa, 0x01 }; 3215 u8 buf[3] = {}; 3216 3217 /* 3218 * During driver init, we want to be careful and avoid changing the source OUI if it's 3219 * already set to what we want, so as to avoid clearing any state by accident 3220 */ 3221 if (careful) { 3222 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0) 3223 drm_err(&i915->drm, "Failed to read source OUI\n"); 3224 3225 if (memcmp(oui, buf, sizeof(oui)) == 0) 3226 return; 3227 } 3228 3229 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) 3230 drm_err(&i915->drm, "Failed to write source OUI\n"); 3231 3232 intel_dp->last_oui_write = jiffies; 3233 } 3234 3235 void intel_dp_wait_source_oui(struct intel_dp *intel_dp) 3236 { 3237 struct intel_connector *connector = intel_dp->attached_connector; 3238 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3239 3240 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n", 3241 connector->base.base.id, connector->base.name, 3242 connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout); 3243 3244 wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 3245 connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout); 3246 } 3247 3248 /* If the device supports it, try to set the power state appropriately */ 3249 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) 3250 { 3251 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3252 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3253 int ret, i; 3254 3255 /* Should have a valid DPCD by this point */ 3256 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3257 return; 3258 3259 if (mode != DP_SET_POWER_D0) { 3260 if (downstream_hpd_needs_d0(intel_dp)) 3261 return; 3262 3263 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 3264 } else { 3265 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 3266 3267 lspcon_resume(dp_to_dig_port(intel_dp)); 3268 3269 /* Write the source OUI as early as possible */ 3270 if (intel_dp_is_edp(intel_dp)) 3271 intel_edp_init_source_oui(intel_dp, false); 3272 3273 /* 3274 * When turning on, we need to retry for 1ms to give the sink 3275 * time to wake up. 3276 */ 3277 for (i = 0; i < 3; i++) { 3278 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 3279 if (ret == 1) 3280 break; 3281 msleep(1); 3282 } 3283 3284 if (ret == 1 && lspcon->active) 3285 lspcon_wait_pcon_mode(lspcon); 3286 } 3287 3288 if (ret != 1) 3289 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n", 3290 encoder->base.base.id, encoder->base.name, 3291 mode == DP_SET_POWER_D0 ? "D0" : "D3"); 3292 } 3293 3294 static bool 3295 intel_dp_get_dpcd(struct intel_dp *intel_dp); 3296 3297 /** 3298 * intel_dp_sync_state - sync the encoder state during init/resume 3299 * @encoder: intel encoder to sync 3300 * @crtc_state: state for the CRTC connected to the encoder 3301 * 3302 * Sync any state stored in the encoder wrt. HW state during driver init 3303 * and system resume. 3304 */ 3305 void intel_dp_sync_state(struct intel_encoder *encoder, 3306 const struct intel_crtc_state *crtc_state) 3307 { 3308 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3309 bool dpcd_updated = false; 3310 3311 /* 3312 * Don't clobber DPCD if it's been already read out during output 3313 * setup (eDP) or detect. 3314 */ 3315 if (crtc_state && intel_dp->dpcd[DP_DPCD_REV] == 0) { 3316 intel_dp_get_dpcd(intel_dp); 3317 dpcd_updated = true; 3318 } 3319 3320 intel_dp_tunnel_resume(intel_dp, crtc_state, dpcd_updated); 3321 3322 if (crtc_state) 3323 intel_dp_reset_max_link_params(intel_dp); 3324 } 3325 3326 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, 3327 struct intel_crtc_state *crtc_state) 3328 { 3329 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3330 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3331 bool fastset = true; 3332 3333 /* 3334 * If BIOS has set an unsupported or non-standard link rate for some 3335 * reason force an encoder recompute and full modeset. 3336 */ 3337 if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates, 3338 crtc_state->port_clock) < 0) { 3339 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to unsupported link rate\n", 3340 encoder->base.base.id, encoder->base.name); 3341 crtc_state->uapi.connectors_changed = true; 3342 fastset = false; 3343 } 3344 3345 /* 3346 * FIXME hack to force full modeset when DSC is being used. 3347 * 3348 * As long as we do not have full state readout and config comparison 3349 * of crtc_state->dsc, we have no way to ensure reliable fastset. 3350 * Remove once we have readout for DSC. 3351 */ 3352 if (crtc_state->dsc.compression_enable) { 3353 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to DSC being enabled\n", 3354 encoder->base.base.id, encoder->base.name); 3355 crtc_state->uapi.mode_changed = true; 3356 fastset = false; 3357 } 3358 3359 return fastset; 3360 } 3361 3362 static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp) 3363 { 3364 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3365 3366 /* Clear the cached register set to avoid using stale values */ 3367 3368 memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd)); 3369 3370 if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER, 3371 intel_dp->pcon_dsc_dpcd, 3372 sizeof(intel_dp->pcon_dsc_dpcd)) < 0) 3373 drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n", 3374 DP_PCON_DSC_ENCODER); 3375 3376 drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n", 3377 (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd); 3378 } 3379 3380 static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask) 3381 { 3382 int bw_gbps[] = {9, 18, 24, 32, 40, 48}; 3383 int i; 3384 3385 for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) { 3386 if (frl_bw_mask & (1 << i)) 3387 return bw_gbps[i]; 3388 } 3389 return 0; 3390 } 3391 3392 static int intel_dp_pcon_set_frl_mask(int max_frl) 3393 { 3394 switch (max_frl) { 3395 case 48: 3396 return DP_PCON_FRL_BW_MASK_48GBPS; 3397 case 40: 3398 return DP_PCON_FRL_BW_MASK_40GBPS; 3399 case 32: 3400 return DP_PCON_FRL_BW_MASK_32GBPS; 3401 case 24: 3402 return DP_PCON_FRL_BW_MASK_24GBPS; 3403 case 18: 3404 return DP_PCON_FRL_BW_MASK_18GBPS; 3405 case 9: 3406 return DP_PCON_FRL_BW_MASK_9GBPS; 3407 } 3408 3409 return 0; 3410 } 3411 3412 static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp) 3413 { 3414 struct intel_connector *intel_connector = intel_dp->attached_connector; 3415 struct drm_connector *connector = &intel_connector->base; 3416 int max_frl_rate; 3417 int max_lanes, rate_per_lane; 3418 int max_dsc_lanes, dsc_rate_per_lane; 3419 3420 max_lanes = connector->display_info.hdmi.max_lanes; 3421 rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane; 3422 max_frl_rate = max_lanes * rate_per_lane; 3423 3424 if (connector->display_info.hdmi.dsc_cap.v_1p2) { 3425 max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes; 3426 dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane; 3427 if (max_dsc_lanes && dsc_rate_per_lane) 3428 max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane); 3429 } 3430 3431 return max_frl_rate; 3432 } 3433 3434 static bool 3435 intel_dp_pcon_is_frl_trained(struct intel_dp *intel_dp, 3436 u8 max_frl_bw_mask, u8 *frl_trained_mask) 3437 { 3438 if (drm_dp_pcon_hdmi_link_active(&intel_dp->aux) && 3439 drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, frl_trained_mask) == DP_PCON_HDMI_MODE_FRL && 3440 *frl_trained_mask >= max_frl_bw_mask) 3441 return true; 3442 3443 return false; 3444 } 3445 3446 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) 3447 { 3448 #define TIMEOUT_FRL_READY_MS 500 3449 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000 3450 3451 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3452 int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret; 3453 u8 max_frl_bw_mask = 0, frl_trained_mask; 3454 bool is_active; 3455 3456 max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 3457 drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw); 3458 3459 max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp); 3460 drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw); 3461 3462 max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw); 3463 3464 if (max_frl_bw <= 0) 3465 return -EINVAL; 3466 3467 max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); 3468 drm_dbg(&i915->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask); 3469 3470 if (intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask)) 3471 goto frl_trained; 3472 3473 ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false); 3474 if (ret < 0) 3475 return ret; 3476 /* Wait for PCON to be FRL Ready */ 3477 wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS); 3478 3479 if (!is_active) 3480 return -ETIMEDOUT; 3481 3482 ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, 3483 DP_PCON_ENABLE_SEQUENTIAL_LINK); 3484 if (ret < 0) 3485 return ret; 3486 ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, 3487 DP_PCON_FRL_LINK_TRAIN_NORMAL); 3488 if (ret < 0) 3489 return ret; 3490 ret = drm_dp_pcon_frl_enable(&intel_dp->aux); 3491 if (ret < 0) 3492 return ret; 3493 /* 3494 * Wait for FRL to be completed 3495 * Check if the HDMI Link is up and active. 3496 */ 3497 wait_for(is_active = 3498 intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask), 3499 TIMEOUT_HDMI_LINK_ACTIVE_MS); 3500 3501 if (!is_active) 3502 return -ETIMEDOUT; 3503 3504 frl_trained: 3505 drm_dbg(&i915->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask); 3506 intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask); 3507 intel_dp->frl.is_trained = true; 3508 drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps); 3509 3510 return 0; 3511 } 3512 3513 static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp) 3514 { 3515 if (drm_dp_is_branch(intel_dp->dpcd) && 3516 intel_dp_has_hdmi_sink(intel_dp) && 3517 intel_dp_hdmi_sink_max_frl(intel_dp) > 0) 3518 return true; 3519 3520 return false; 3521 } 3522 3523 static 3524 int intel_dp_pcon_set_tmds_mode(struct intel_dp *intel_dp) 3525 { 3526 int ret; 3527 u8 buf = 0; 3528 3529 /* Set PCON source control mode */ 3530 buf |= DP_PCON_ENABLE_SOURCE_CTL_MODE; 3531 3532 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); 3533 if (ret < 0) 3534 return ret; 3535 3536 /* Set HDMI LINK ENABLE */ 3537 buf |= DP_PCON_ENABLE_HDMI_LINK; 3538 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); 3539 if (ret < 0) 3540 return ret; 3541 3542 return 0; 3543 } 3544 3545 void intel_dp_check_frl_training(struct intel_dp *intel_dp) 3546 { 3547 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3548 3549 /* 3550 * Always go for FRL training if: 3551 * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7) 3552 * -sink is HDMI2.1 3553 */ 3554 if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) || 3555 !intel_dp_is_hdmi_2_1_sink(intel_dp) || 3556 intel_dp->frl.is_trained) 3557 return; 3558 3559 if (intel_dp_pcon_start_frl_training(intel_dp) < 0) { 3560 int ret, mode; 3561 3562 drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n"); 3563 ret = intel_dp_pcon_set_tmds_mode(intel_dp); 3564 mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL); 3565 3566 if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS) 3567 drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n"); 3568 } else { 3569 drm_dbg(&dev_priv->drm, "FRL training Completed\n"); 3570 } 3571 } 3572 3573 static int 3574 intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state) 3575 { 3576 int vactive = crtc_state->hw.adjusted_mode.vdisplay; 3577 3578 return intel_hdmi_dsc_get_slice_height(vactive); 3579 } 3580 3581 static int 3582 intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp, 3583 const struct intel_crtc_state *crtc_state) 3584 { 3585 struct intel_connector *intel_connector = intel_dp->attached_connector; 3586 struct drm_connector *connector = &intel_connector->base; 3587 int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice; 3588 int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices; 3589 int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd); 3590 int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd); 3591 3592 return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices, 3593 pcon_max_slice_width, 3594 hdmi_max_slices, hdmi_throughput); 3595 } 3596 3597 static int 3598 intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp, 3599 const struct intel_crtc_state *crtc_state, 3600 int num_slices, int slice_width) 3601 { 3602 struct intel_connector *intel_connector = intel_dp->attached_connector; 3603 struct drm_connector *connector = &intel_connector->base; 3604 int output_format = crtc_state->output_format; 3605 bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp; 3606 int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd); 3607 int hdmi_max_chunk_bytes = 3608 connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024; 3609 3610 return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width, 3611 num_slices, output_format, hdmi_all_bpp, 3612 hdmi_max_chunk_bytes); 3613 } 3614 3615 void 3616 intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, 3617 const struct intel_crtc_state *crtc_state) 3618 { 3619 u8 pps_param[6]; 3620 int slice_height; 3621 int slice_width; 3622 int num_slices; 3623 int bits_per_pixel; 3624 int ret; 3625 struct intel_connector *intel_connector = intel_dp->attached_connector; 3626 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3627 struct drm_connector *connector; 3628 bool hdmi_is_dsc_1_2; 3629 3630 if (!intel_dp_is_hdmi_2_1_sink(intel_dp)) 3631 return; 3632 3633 if (!intel_connector) 3634 return; 3635 connector = &intel_connector->base; 3636 hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2; 3637 3638 if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) || 3639 !hdmi_is_dsc_1_2) 3640 return; 3641 3642 slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state); 3643 if (!slice_height) 3644 return; 3645 3646 num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state); 3647 if (!num_slices) 3648 return; 3649 3650 slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, 3651 num_slices); 3652 3653 bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state, 3654 num_slices, slice_width); 3655 if (!bits_per_pixel) 3656 return; 3657 3658 pps_param[0] = slice_height & 0xFF; 3659 pps_param[1] = slice_height >> 8; 3660 pps_param[2] = slice_width & 0xFF; 3661 pps_param[3] = slice_width >> 8; 3662 pps_param[4] = bits_per_pixel & 0xFF; 3663 pps_param[5] = (bits_per_pixel >> 8) & 0x3; 3664 3665 ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param); 3666 if (ret < 0) 3667 drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n"); 3668 } 3669 3670 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, 3671 const struct intel_crtc_state *crtc_state) 3672 { 3673 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3674 bool ycbcr444_to_420 = false; 3675 bool rgb_to_ycbcr = false; 3676 u8 tmp; 3677 3678 if (intel_dp->dpcd[DP_DPCD_REV] < 0x13) 3679 return; 3680 3681 if (!drm_dp_is_branch(intel_dp->dpcd)) 3682 return; 3683 3684 tmp = intel_dp_has_hdmi_sink(intel_dp) ? DP_HDMI_DVI_OUTPUT_CONFIG : 0; 3685 3686 if (drm_dp_dpcd_writeb(&intel_dp->aux, 3687 DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1) 3688 drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n", 3689 str_enable_disable(intel_dp_has_hdmi_sink(intel_dp))); 3690 3691 if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 3692 switch (crtc_state->output_format) { 3693 case INTEL_OUTPUT_FORMAT_YCBCR420: 3694 break; 3695 case INTEL_OUTPUT_FORMAT_YCBCR444: 3696 ycbcr444_to_420 = true; 3697 break; 3698 case INTEL_OUTPUT_FORMAT_RGB: 3699 rgb_to_ycbcr = true; 3700 ycbcr444_to_420 = true; 3701 break; 3702 default: 3703 MISSING_CASE(crtc_state->output_format); 3704 break; 3705 } 3706 } else if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR444) { 3707 switch (crtc_state->output_format) { 3708 case INTEL_OUTPUT_FORMAT_YCBCR444: 3709 break; 3710 case INTEL_OUTPUT_FORMAT_RGB: 3711 rgb_to_ycbcr = true; 3712 break; 3713 default: 3714 MISSING_CASE(crtc_state->output_format); 3715 break; 3716 } 3717 } 3718 3719 tmp = ycbcr444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0; 3720 3721 if (drm_dp_dpcd_writeb(&intel_dp->aux, 3722 DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) 3723 drm_dbg_kms(&i915->drm, 3724 "Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n", 3725 str_enable_disable(intel_dp->dfp.ycbcr_444_to_420)); 3726 3727 tmp = rgb_to_ycbcr ? DP_CONVERSION_BT709_RGB_YCBCR_ENABLE : 0; 3728 3729 if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0) 3730 drm_dbg_kms(&i915->drm, 3731 "Failed to %s protocol converter RGB->YCbCr conversion mode\n", 3732 str_enable_disable(tmp)); 3733 } 3734 3735 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 3736 { 3737 u8 dprx = 0; 3738 3739 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 3740 &dprx) != 1) 3741 return false; 3742 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 3743 } 3744 3745 static void intel_dp_read_dsc_dpcd(struct drm_dp_aux *aux, 3746 u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) 3747 { 3748 if (drm_dp_dpcd_read(aux, DP_DSC_SUPPORT, dsc_dpcd, 3749 DP_DSC_RECEIVER_CAP_SIZE) < 0) { 3750 drm_err(aux->drm_dev, 3751 "Failed to read DPCD register 0x%x\n", 3752 DP_DSC_SUPPORT); 3753 return; 3754 } 3755 3756 drm_dbg_kms(aux->drm_dev, "DSC DPCD: %*ph\n", 3757 DP_DSC_RECEIVER_CAP_SIZE, 3758 dsc_dpcd); 3759 } 3760 3761 void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector) 3762 { 3763 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3764 3765 /* 3766 * Clear the cached register set to avoid using stale values 3767 * for the sinks that do not support DSC. 3768 */ 3769 memset(connector->dp.dsc_dpcd, 0, sizeof(connector->dp.dsc_dpcd)); 3770 3771 /* Clear fec_capable to avoid using stale values */ 3772 connector->dp.fec_capability = 0; 3773 3774 if (dpcd_rev < DP_DPCD_REV_14) 3775 return; 3776 3777 intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux, 3778 connector->dp.dsc_dpcd); 3779 3780 if (drm_dp_dpcd_readb(connector->dp.dsc_decompression_aux, DP_FEC_CAPABILITY, 3781 &connector->dp.fec_capability) < 0) { 3782 drm_err(&i915->drm, "Failed to read FEC DPCD register\n"); 3783 return; 3784 } 3785 3786 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", 3787 connector->dp.fec_capability); 3788 } 3789 3790 static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *connector) 3791 { 3792 if (edp_dpcd_rev < DP_EDP_14) 3793 return; 3794 3795 intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux, connector->dp.dsc_dpcd); 3796 } 3797 3798 static void intel_edp_mso_mode_fixup(struct intel_connector *connector, 3799 struct drm_display_mode *mode) 3800 { 3801 struct intel_dp *intel_dp = intel_attached_dp(connector); 3802 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3803 int n = intel_dp->mso_link_count; 3804 int overlap = intel_dp->mso_pixel_overlap; 3805 3806 if (!mode || !n) 3807 return; 3808 3809 mode->hdisplay = (mode->hdisplay - overlap) * n; 3810 mode->hsync_start = (mode->hsync_start - overlap) * n; 3811 mode->hsync_end = (mode->hsync_end - overlap) * n; 3812 mode->htotal = (mode->htotal - overlap) * n; 3813 mode->clock *= n; 3814 3815 drm_mode_set_name(mode); 3816 3817 drm_dbg_kms(&i915->drm, 3818 "[CONNECTOR:%d:%s] using generated MSO mode: " DRM_MODE_FMT "\n", 3819 connector->base.base.id, connector->base.name, 3820 DRM_MODE_ARG(mode)); 3821 } 3822 3823 void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp) 3824 { 3825 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3826 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3827 struct intel_connector *connector = intel_dp->attached_connector; 3828 3829 if (connector->panel.vbt.edp.bpp && pipe_bpp > connector->panel.vbt.edp.bpp) { 3830 /* 3831 * This is a big fat ugly hack. 3832 * 3833 * Some machines in UEFI boot mode provide us a VBT that has 18 3834 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 3835 * unknown we fail to light up. Yet the same BIOS boots up with 3836 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 3837 * max, not what it tells us to use. 3838 * 3839 * Note: This will still be broken if the eDP panel is not lit 3840 * up by the BIOS, and thus we can't get the mode at module 3841 * load. 3842 */ 3843 drm_dbg_kms(&dev_priv->drm, 3844 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 3845 pipe_bpp, connector->panel.vbt.edp.bpp); 3846 connector->panel.vbt.edp.bpp = pipe_bpp; 3847 } 3848 } 3849 3850 static void intel_edp_mso_init(struct intel_dp *intel_dp) 3851 { 3852 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3853 struct intel_connector *connector = intel_dp->attached_connector; 3854 struct drm_display_info *info = &connector->base.display_info; 3855 u8 mso; 3856 3857 if (intel_dp->edp_dpcd[0] < DP_EDP_14) 3858 return; 3859 3860 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) { 3861 drm_err(&i915->drm, "Failed to read MSO cap\n"); 3862 return; 3863 } 3864 3865 /* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */ 3866 mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK; 3867 if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) { 3868 drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso); 3869 mso = 0; 3870 } 3871 3872 if (mso) { 3873 drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration, pixel overlap %u\n", 3874 mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso, 3875 info->mso_pixel_overlap); 3876 if (!HAS_MSO(i915)) { 3877 drm_err(&i915->drm, "No source MSO support, disabling\n"); 3878 mso = 0; 3879 } 3880 } 3881 3882 intel_dp->mso_link_count = mso; 3883 intel_dp->mso_pixel_overlap = mso ? info->mso_pixel_overlap : 0; 3884 } 3885 3886 static bool 3887 intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector) 3888 { 3889 struct drm_i915_private *dev_priv = 3890 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 3891 3892 /* this function is meant to be called only once */ 3893 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 3894 3895 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) 3896 return false; 3897 3898 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 3899 drm_dp_is_branch(intel_dp->dpcd)); 3900 3901 /* 3902 * Read the eDP display control registers. 3903 * 3904 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 3905 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 3906 * set, but require eDP 1.4+ detection (e.g. for supported link rates 3907 * method). The display control registers should read zero if they're 3908 * not supported anyway. 3909 */ 3910 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 3911 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 3912 sizeof(intel_dp->edp_dpcd)) { 3913 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 3914 (int)sizeof(intel_dp->edp_dpcd), 3915 intel_dp->edp_dpcd); 3916 3917 intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14; 3918 } 3919 3920 /* 3921 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 3922 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 3923 */ 3924 intel_psr_init_dpcd(intel_dp); 3925 3926 /* Clear the default sink rates */ 3927 intel_dp->num_sink_rates = 0; 3928 3929 /* Read the eDP 1.4+ supported link rates. */ 3930 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 3931 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 3932 int i; 3933 3934 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 3935 sink_rates, sizeof(sink_rates)); 3936 3937 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 3938 int val = le16_to_cpu(sink_rates[i]); 3939 3940 if (val == 0) 3941 break; 3942 3943 /* Value read multiplied by 200kHz gives the per-lane 3944 * link rate in kHz. The source rates are, however, 3945 * stored in terms of LS_Clk kHz. The full conversion 3946 * back to symbols is 3947 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 3948 */ 3949 intel_dp->sink_rates[i] = (val * 200) / 10; 3950 } 3951 intel_dp->num_sink_rates = i; 3952 } 3953 3954 /* 3955 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 3956 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 3957 */ 3958 if (intel_dp->num_sink_rates) 3959 intel_dp->use_rate_select = true; 3960 else 3961 intel_dp_set_sink_rates(intel_dp); 3962 intel_dp_set_max_sink_lane_count(intel_dp); 3963 3964 /* Read the eDP DSC DPCD registers */ 3965 if (HAS_DSC(dev_priv)) 3966 intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0], 3967 connector); 3968 3969 /* 3970 * If needed, program our source OUI so we can make various Intel-specific AUX services 3971 * available (such as HDR backlight controls) 3972 */ 3973 intel_edp_init_source_oui(intel_dp, true); 3974 3975 return true; 3976 } 3977 3978 static bool 3979 intel_dp_has_sink_count(struct intel_dp *intel_dp) 3980 { 3981 if (!intel_dp->attached_connector) 3982 return false; 3983 3984 return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base, 3985 intel_dp->dpcd, 3986 &intel_dp->desc); 3987 } 3988 3989 void intel_dp_update_sink_caps(struct intel_dp *intel_dp) 3990 { 3991 intel_dp_set_sink_rates(intel_dp); 3992 intel_dp_set_max_sink_lane_count(intel_dp); 3993 intel_dp_set_common_rates(intel_dp); 3994 } 3995 3996 static bool 3997 intel_dp_get_dpcd(struct intel_dp *intel_dp) 3998 { 3999 int ret; 4000 4001 if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0) 4002 return false; 4003 4004 /* 4005 * Don't clobber cached eDP rates. Also skip re-reading 4006 * the OUI/ID since we know it won't change. 4007 */ 4008 if (!intel_dp_is_edp(intel_dp)) { 4009 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4010 drm_dp_is_branch(intel_dp->dpcd)); 4011 4012 intel_dp_update_sink_caps(intel_dp); 4013 } 4014 4015 if (intel_dp_has_sink_count(intel_dp)) { 4016 ret = drm_dp_read_sink_count(&intel_dp->aux); 4017 if (ret < 0) 4018 return false; 4019 4020 /* 4021 * Sink count can change between short pulse hpd hence 4022 * a member variable in intel_dp will track any changes 4023 * between short pulse interrupts. 4024 */ 4025 intel_dp->sink_count = ret; 4026 4027 /* 4028 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 4029 * a dongle is present but no display. Unless we require to know 4030 * if a dongle is present or not, we don't need to update 4031 * downstream port information. So, an early return here saves 4032 * time from performing other operations which are not required. 4033 */ 4034 if (!intel_dp->sink_count) 4035 return false; 4036 } 4037 4038 return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd, 4039 intel_dp->downstream_ports) == 0; 4040 } 4041 4042 static bool 4043 intel_dp_can_mst(struct intel_dp *intel_dp) 4044 { 4045 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4046 4047 return i915->display.params.enable_dp_mst && 4048 intel_dp_mst_source_support(intel_dp) && 4049 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 4050 } 4051 4052 static void 4053 intel_dp_configure_mst(struct intel_dp *intel_dp) 4054 { 4055 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4056 struct intel_encoder *encoder = 4057 &dp_to_dig_port(intel_dp)->base; 4058 bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 4059 4060 drm_dbg_kms(&i915->drm, 4061 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 4062 encoder->base.base.id, encoder->base.name, 4063 str_yes_no(intel_dp_mst_source_support(intel_dp)), 4064 str_yes_no(sink_can_mst), 4065 str_yes_no(i915->display.params.enable_dp_mst)); 4066 4067 if (!intel_dp_mst_source_support(intel_dp)) 4068 return; 4069 4070 intel_dp->is_mst = sink_can_mst && 4071 i915->display.params.enable_dp_mst; 4072 4073 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4074 intel_dp->is_mst); 4075 } 4076 4077 static bool 4078 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi) 4079 { 4080 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 4) == 4; 4081 } 4082 4083 static bool intel_dp_ack_sink_irq_esi(struct intel_dp *intel_dp, u8 esi[4]) 4084 { 4085 int retry; 4086 4087 for (retry = 0; retry < 3; retry++) { 4088 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SINK_COUNT_ESI + 1, 4089 &esi[1], 3) == 3) 4090 return true; 4091 } 4092 4093 return false; 4094 } 4095 4096 bool 4097 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 4098 const struct drm_connector_state *conn_state) 4099 { 4100 /* 4101 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 4102 * of Color Encoding Format and Content Color Gamut], in order to 4103 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 4104 */ 4105 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 4106 return true; 4107 4108 switch (conn_state->colorspace) { 4109 case DRM_MODE_COLORIMETRY_SYCC_601: 4110 case DRM_MODE_COLORIMETRY_OPYCC_601: 4111 case DRM_MODE_COLORIMETRY_BT2020_YCC: 4112 case DRM_MODE_COLORIMETRY_BT2020_RGB: 4113 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 4114 return true; 4115 default: 4116 break; 4117 } 4118 4119 return false; 4120 } 4121 4122 static ssize_t 4123 intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915, 4124 const struct hdmi_drm_infoframe *drm_infoframe, 4125 struct dp_sdp *sdp, 4126 size_t size) 4127 { 4128 size_t length = sizeof(struct dp_sdp); 4129 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 4130 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 4131 ssize_t len; 4132 4133 if (size < length) 4134 return -ENOSPC; 4135 4136 memset(sdp, 0, size); 4137 4138 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); 4139 if (len < 0) { 4140 drm_dbg_kms(&i915->drm, "buffer size is smaller than hdr metadata infoframe\n"); 4141 return -ENOSPC; 4142 } 4143 4144 if (len != infoframe_size) { 4145 drm_dbg_kms(&i915->drm, "wrong static hdr metadata size\n"); 4146 return -ENOSPC; 4147 } 4148 4149 /* 4150 * Set up the infoframe sdp packet for HDR static metadata. 4151 * Prepare VSC Header for SU as per DP 1.4a spec, 4152 * Table 2-100 and Table 2-101 4153 */ 4154 4155 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ 4156 sdp->sdp_header.HB0 = 0; 4157 /* 4158 * Packet Type 80h + Non-audio INFOFRAME Type value 4159 * HDMI_INFOFRAME_TYPE_DRM: 0x87 4160 * - 80h + Non-audio INFOFRAME Type value 4161 * - InfoFrame Type: 0x07 4162 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] 4163 */ 4164 sdp->sdp_header.HB1 = drm_infoframe->type; 4165 /* 4166 * Least Significant Eight Bits of (Data Byte Count – 1) 4167 * infoframe_size - 1 4168 */ 4169 sdp->sdp_header.HB2 = 0x1D; 4170 /* INFOFRAME SDP Version Number */ 4171 sdp->sdp_header.HB3 = (0x13 << 2); 4172 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 4173 sdp->db[0] = drm_infoframe->version; 4174 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 4175 sdp->db[1] = drm_infoframe->length; 4176 /* 4177 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 4178 * HDMI_INFOFRAME_HEADER_SIZE 4179 */ 4180 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); 4181 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 4182 HDMI_DRM_INFOFRAME_SIZE); 4183 4184 /* 4185 * Size of DP infoframe sdp packet for HDR static metadata consists of 4186 * - DP SDP Header(struct dp_sdp_header): 4 bytes 4187 * - Two Data Blocks: 2 bytes 4188 * CTA Header Byte2 (INFOFRAME Version Number) 4189 * CTA Header Byte3 (Length of INFOFRAME) 4190 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 4191 * 4192 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 4193 * infoframe size. But GEN11+ has larger than that size, write_infoframe 4194 * will pad rest of the size. 4195 */ 4196 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; 4197 } 4198 4199 static void intel_write_dp_sdp(struct intel_encoder *encoder, 4200 const struct intel_crtc_state *crtc_state, 4201 unsigned int type) 4202 { 4203 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4204 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4205 struct dp_sdp sdp = {}; 4206 ssize_t len; 4207 4208 if ((crtc_state->infoframes.enable & 4209 intel_hdmi_infoframe_enable(type)) == 0) 4210 return; 4211 4212 switch (type) { 4213 case DP_SDP_VSC: 4214 len = drm_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp); 4215 break; 4216 case HDMI_PACKET_TYPE_GAMUT_METADATA: 4217 len = intel_dp_hdr_metadata_infoframe_sdp_pack(dev_priv, 4218 &crtc_state->infoframes.drm.drm, 4219 &sdp, sizeof(sdp)); 4220 break; 4221 default: 4222 MISSING_CASE(type); 4223 return; 4224 } 4225 4226 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 4227 return; 4228 4229 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); 4230 } 4231 4232 void intel_dp_set_infoframes(struct intel_encoder *encoder, 4233 bool enable, 4234 const struct intel_crtc_state *crtc_state, 4235 const struct drm_connector_state *conn_state) 4236 { 4237 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4238 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); 4239 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | 4240 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | 4241 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 4242 u32 val = intel_de_read(dev_priv, reg) & ~dip_enable; 4243 4244 /* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */ 4245 if (!enable && HAS_DSC(dev_priv)) 4246 val &= ~VDIP_ENABLE_PPS; 4247 4248 /* When PSR is enabled, this routine doesn't disable VSC DIP */ 4249 if (!crtc_state->has_psr) 4250 val &= ~VIDEO_DIP_ENABLE_VSC_HSW; 4251 4252 intel_de_write(dev_priv, reg, val); 4253 intel_de_posting_read(dev_priv, reg); 4254 4255 if (!enable) 4256 return; 4257 4258 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); 4259 4260 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); 4261 } 4262 4263 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, 4264 const void *buffer, size_t size) 4265 { 4266 const struct dp_sdp *sdp = buffer; 4267 4268 if (size < sizeof(struct dp_sdp)) 4269 return -EINVAL; 4270 4271 memset(vsc, 0, sizeof(*vsc)); 4272 4273 if (sdp->sdp_header.HB0 != 0) 4274 return -EINVAL; 4275 4276 if (sdp->sdp_header.HB1 != DP_SDP_VSC) 4277 return -EINVAL; 4278 4279 vsc->sdp_type = sdp->sdp_header.HB1; 4280 vsc->revision = sdp->sdp_header.HB2; 4281 vsc->length = sdp->sdp_header.HB3; 4282 4283 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || 4284 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) { 4285 /* 4286 * - HB2 = 0x2, HB3 = 0x8 4287 * VSC SDP supporting 3D stereo + PSR 4288 * - HB2 = 0x4, HB3 = 0xe 4289 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of 4290 * first scan line of the SU region (applies to eDP v1.4b 4291 * and higher). 4292 */ 4293 return 0; 4294 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { 4295 /* 4296 * - HB2 = 0x5, HB3 = 0x13 4297 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry 4298 * Format. 4299 */ 4300 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; 4301 vsc->colorimetry = sdp->db[16] & 0xf; 4302 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; 4303 4304 switch (sdp->db[17] & 0x7) { 4305 case 0x0: 4306 vsc->bpc = 6; 4307 break; 4308 case 0x1: 4309 vsc->bpc = 8; 4310 break; 4311 case 0x2: 4312 vsc->bpc = 10; 4313 break; 4314 case 0x3: 4315 vsc->bpc = 12; 4316 break; 4317 case 0x4: 4318 vsc->bpc = 16; 4319 break; 4320 default: 4321 MISSING_CASE(sdp->db[17] & 0x7); 4322 return -EINVAL; 4323 } 4324 4325 vsc->content_type = sdp->db[18] & 0x7; 4326 } else { 4327 return -EINVAL; 4328 } 4329 4330 return 0; 4331 } 4332 4333 static int 4334 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, 4335 const void *buffer, size_t size) 4336 { 4337 int ret; 4338 4339 const struct dp_sdp *sdp = buffer; 4340 4341 if (size < sizeof(struct dp_sdp)) 4342 return -EINVAL; 4343 4344 if (sdp->sdp_header.HB0 != 0) 4345 return -EINVAL; 4346 4347 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) 4348 return -EINVAL; 4349 4350 /* 4351 * Least Significant Eight Bits of (Data Byte Count – 1) 4352 * 1Dh (i.e., Data Byte Count = 30 bytes). 4353 */ 4354 if (sdp->sdp_header.HB2 != 0x1D) 4355 return -EINVAL; 4356 4357 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ 4358 if ((sdp->sdp_header.HB3 & 0x3) != 0) 4359 return -EINVAL; 4360 4361 /* INFOFRAME SDP Version Number */ 4362 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) 4363 return -EINVAL; 4364 4365 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 4366 if (sdp->db[0] != 1) 4367 return -EINVAL; 4368 4369 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 4370 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) 4371 return -EINVAL; 4372 4373 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], 4374 HDMI_DRM_INFOFRAME_SIZE); 4375 4376 return ret; 4377 } 4378 4379 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, 4380 struct intel_crtc_state *crtc_state, 4381 struct drm_dp_vsc_sdp *vsc) 4382 { 4383 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4384 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4385 unsigned int type = DP_SDP_VSC; 4386 struct dp_sdp sdp = {}; 4387 int ret; 4388 4389 if ((crtc_state->infoframes.enable & 4390 intel_hdmi_infoframe_enable(type)) == 0) 4391 return; 4392 4393 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); 4394 4395 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); 4396 4397 if (ret) 4398 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n"); 4399 } 4400 4401 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, 4402 struct intel_crtc_state *crtc_state, 4403 struct hdmi_drm_infoframe *drm_infoframe) 4404 { 4405 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4406 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4407 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; 4408 struct dp_sdp sdp = {}; 4409 int ret; 4410 4411 if ((crtc_state->infoframes.enable & 4412 intel_hdmi_infoframe_enable(type)) == 0) 4413 return; 4414 4415 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 4416 sizeof(sdp)); 4417 4418 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, 4419 sizeof(sdp)); 4420 4421 if (ret) 4422 drm_dbg_kms(&dev_priv->drm, 4423 "Failed to unpack DP HDR Metadata Infoframe SDP\n"); 4424 } 4425 4426 void intel_read_dp_sdp(struct intel_encoder *encoder, 4427 struct intel_crtc_state *crtc_state, 4428 unsigned int type) 4429 { 4430 switch (type) { 4431 case DP_SDP_VSC: 4432 intel_read_dp_vsc_sdp(encoder, crtc_state, 4433 &crtc_state->infoframes.vsc); 4434 break; 4435 case HDMI_PACKET_TYPE_GAMUT_METADATA: 4436 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, 4437 &crtc_state->infoframes.drm.drm); 4438 break; 4439 default: 4440 MISSING_CASE(type); 4441 break; 4442 } 4443 } 4444 4445 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 4446 { 4447 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4448 int status = 0; 4449 int test_link_rate; 4450 u8 test_lane_count, test_link_bw; 4451 /* (DP CTS 1.2) 4452 * 4.3.1.11 4453 */ 4454 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 4455 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 4456 &test_lane_count); 4457 4458 if (status <= 0) { 4459 drm_dbg_kms(&i915->drm, "Lane count read failed\n"); 4460 return DP_TEST_NAK; 4461 } 4462 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 4463 4464 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 4465 &test_link_bw); 4466 if (status <= 0) { 4467 drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); 4468 return DP_TEST_NAK; 4469 } 4470 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 4471 4472 /* Validate the requested link rate and lane count */ 4473 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 4474 test_lane_count)) 4475 return DP_TEST_NAK; 4476 4477 intel_dp->compliance.test_lane_count = test_lane_count; 4478 intel_dp->compliance.test_link_rate = test_link_rate; 4479 4480 return DP_TEST_ACK; 4481 } 4482 4483 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 4484 { 4485 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4486 u8 test_pattern; 4487 u8 test_misc; 4488 __be16 h_width, v_height; 4489 int status = 0; 4490 4491 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 4492 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 4493 &test_pattern); 4494 if (status <= 0) { 4495 drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); 4496 return DP_TEST_NAK; 4497 } 4498 if (test_pattern != DP_COLOR_RAMP) 4499 return DP_TEST_NAK; 4500 4501 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 4502 &h_width, 2); 4503 if (status <= 0) { 4504 drm_dbg_kms(&i915->drm, "H Width read failed\n"); 4505 return DP_TEST_NAK; 4506 } 4507 4508 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 4509 &v_height, 2); 4510 if (status <= 0) { 4511 drm_dbg_kms(&i915->drm, "V Height read failed\n"); 4512 return DP_TEST_NAK; 4513 } 4514 4515 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 4516 &test_misc); 4517 if (status <= 0) { 4518 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); 4519 return DP_TEST_NAK; 4520 } 4521 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 4522 return DP_TEST_NAK; 4523 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 4524 return DP_TEST_NAK; 4525 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 4526 case DP_TEST_BIT_DEPTH_6: 4527 intel_dp->compliance.test_data.bpc = 6; 4528 break; 4529 case DP_TEST_BIT_DEPTH_8: 4530 intel_dp->compliance.test_data.bpc = 8; 4531 break; 4532 default: 4533 return DP_TEST_NAK; 4534 } 4535 4536 intel_dp->compliance.test_data.video_pattern = test_pattern; 4537 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 4538 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 4539 /* Set test active flag here so userspace doesn't interrupt things */ 4540 intel_dp->compliance.test_active = true; 4541 4542 return DP_TEST_ACK; 4543 } 4544 4545 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 4546 { 4547 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4548 u8 test_result = DP_TEST_ACK; 4549 struct intel_connector *intel_connector = intel_dp->attached_connector; 4550 struct drm_connector *connector = &intel_connector->base; 4551 4552 if (intel_connector->detect_edid == NULL || 4553 connector->edid_corrupt || 4554 intel_dp->aux.i2c_defer_count > 6) { 4555 /* Check EDID read for NACKs, DEFERs and corruption 4556 * (DP CTS 1.2 Core r1.1) 4557 * 4.2.2.4 : Failed EDID read, I2C_NAK 4558 * 4.2.2.5 : Failed EDID read, I2C_DEFER 4559 * 4.2.2.6 : EDID corruption detected 4560 * Use failsafe mode for all cases 4561 */ 4562 if (intel_dp->aux.i2c_nack_count > 0 || 4563 intel_dp->aux.i2c_defer_count > 0) 4564 drm_dbg_kms(&i915->drm, 4565 "EDID read had %d NACKs, %d DEFERs\n", 4566 intel_dp->aux.i2c_nack_count, 4567 intel_dp->aux.i2c_defer_count); 4568 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 4569 } else { 4570 /* FIXME: Get rid of drm_edid_raw() */ 4571 const struct edid *block = drm_edid_raw(intel_connector->detect_edid); 4572 4573 /* We have to write the checksum of the last block read */ 4574 block += block->extensions; 4575 4576 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 4577 block->checksum) <= 0) 4578 drm_dbg_kms(&i915->drm, 4579 "Failed to write EDID checksum\n"); 4580 4581 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 4582 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 4583 } 4584 4585 /* Set test active flag here so userspace doesn't interrupt things */ 4586 intel_dp->compliance.test_active = true; 4587 4588 return test_result; 4589 } 4590 4591 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, 4592 const struct intel_crtc_state *crtc_state) 4593 { 4594 struct drm_i915_private *dev_priv = 4595 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 4596 struct drm_dp_phy_test_params *data = 4597 &intel_dp->compliance.test_data.phytest; 4598 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4599 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4600 enum pipe pipe = crtc->pipe; 4601 u32 pattern_val; 4602 4603 switch (data->phy_pattern) { 4604 case DP_LINK_QUAL_PATTERN_DISABLE: 4605 drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n"); 4606 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 4607 if (DISPLAY_VER(dev_priv) >= 10) 4608 intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), 4609 DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK, 4610 DP_TP_CTL_LINK_TRAIN_NORMAL); 4611 break; 4612 case DP_LINK_QUAL_PATTERN_D10_2: 4613 drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n"); 4614 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 4615 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); 4616 break; 4617 case DP_LINK_QUAL_PATTERN_ERROR_RATE: 4618 drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n"); 4619 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 4620 DDI_DP_COMP_CTL_ENABLE | 4621 DDI_DP_COMP_CTL_SCRAMBLED_0); 4622 break; 4623 case DP_LINK_QUAL_PATTERN_PRBS7: 4624 drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n"); 4625 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 4626 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); 4627 break; 4628 case DP_LINK_QUAL_PATTERN_80BIT_CUSTOM: 4629 /* 4630 * FIXME: Ideally pattern should come from DPCD 0x250. As 4631 * current firmware of DPR-100 could not set it, so hardcoding 4632 * now for complaince test. 4633 */ 4634 drm_dbg_kms(&dev_priv->drm, 4635 "Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); 4636 pattern_val = 0x3e0f83e0; 4637 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); 4638 pattern_val = 0x0f83e0f8; 4639 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); 4640 pattern_val = 0x0000f83e; 4641 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); 4642 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 4643 DDI_DP_COMP_CTL_ENABLE | 4644 DDI_DP_COMP_CTL_CUSTOM80); 4645 break; 4646 case DP_LINK_QUAL_PATTERN_CP2520_PAT_1: 4647 /* 4648 * FIXME: Ideally pattern should come from DPCD 0x24A. As 4649 * current firmware of DPR-100 could not set it, so hardcoding 4650 * now for complaince test. 4651 */ 4652 drm_dbg_kms(&dev_priv->drm, "Set HBR2 compliance Phy Test Pattern\n"); 4653 pattern_val = 0xFB; 4654 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 4655 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | 4656 pattern_val); 4657 break; 4658 case DP_LINK_QUAL_PATTERN_CP2520_PAT_3: 4659 if (DISPLAY_VER(dev_priv) < 10) { 4660 drm_warn(&dev_priv->drm, "Platform does not support TPS4\n"); 4661 break; 4662 } 4663 drm_dbg_kms(&dev_priv->drm, "Set TPS4 compliance Phy Test Pattern\n"); 4664 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 4665 intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), 4666 DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK, 4667 DP_TP_CTL_TRAIN_PAT4_SEL_TP4A | DP_TP_CTL_LINK_TRAIN_PAT4); 4668 break; 4669 default: 4670 drm_warn(&dev_priv->drm, "Invalid Phy Test Pattern\n"); 4671 } 4672 } 4673 4674 static void intel_dp_process_phy_request(struct intel_dp *intel_dp, 4675 const struct intel_crtc_state *crtc_state) 4676 { 4677 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4678 struct drm_dp_phy_test_params *data = 4679 &intel_dp->compliance.test_data.phytest; 4680 u8 link_status[DP_LINK_STATUS_SIZE]; 4681 4682 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 4683 link_status) < 0) { 4684 drm_dbg_kms(&i915->drm, "failed to get link status\n"); 4685 return; 4686 } 4687 4688 /* retrieve vswing & pre-emphasis setting */ 4689 intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, 4690 link_status); 4691 4692 intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX); 4693 4694 intel_dp_phy_pattern_update(intel_dp, crtc_state); 4695 4696 drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, 4697 intel_dp->train_set, crtc_state->lane_count); 4698 4699 drm_dp_set_phy_test_pattern(&intel_dp->aux, data, 4700 intel_dp->dpcd[DP_DPCD_REV]); 4701 } 4702 4703 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 4704 { 4705 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4706 struct drm_dp_phy_test_params *data = 4707 &intel_dp->compliance.test_data.phytest; 4708 4709 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { 4710 drm_dbg_kms(&i915->drm, "DP Phy Test pattern AUX read failure\n"); 4711 return DP_TEST_NAK; 4712 } 4713 4714 /* Set test active flag here so userspace doesn't interrupt things */ 4715 intel_dp->compliance.test_active = true; 4716 4717 return DP_TEST_ACK; 4718 } 4719 4720 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 4721 { 4722 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4723 u8 response = DP_TEST_NAK; 4724 u8 request = 0; 4725 int status; 4726 4727 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 4728 if (status <= 0) { 4729 drm_dbg_kms(&i915->drm, 4730 "Could not read test request from sink\n"); 4731 goto update_status; 4732 } 4733 4734 switch (request) { 4735 case DP_TEST_LINK_TRAINING: 4736 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); 4737 response = intel_dp_autotest_link_training(intel_dp); 4738 break; 4739 case DP_TEST_LINK_VIDEO_PATTERN: 4740 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); 4741 response = intel_dp_autotest_video_pattern(intel_dp); 4742 break; 4743 case DP_TEST_LINK_EDID_READ: 4744 drm_dbg_kms(&i915->drm, "EDID test requested\n"); 4745 response = intel_dp_autotest_edid(intel_dp); 4746 break; 4747 case DP_TEST_LINK_PHY_TEST_PATTERN: 4748 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); 4749 response = intel_dp_autotest_phy_pattern(intel_dp); 4750 break; 4751 default: 4752 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", 4753 request); 4754 break; 4755 } 4756 4757 if (response & DP_TEST_ACK) 4758 intel_dp->compliance.test_type = request; 4759 4760 update_status: 4761 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 4762 if (status <= 0) 4763 drm_dbg_kms(&i915->drm, 4764 "Could not write test response to sink\n"); 4765 } 4766 4767 static bool intel_dp_link_ok(struct intel_dp *intel_dp, 4768 u8 link_status[DP_LINK_STATUS_SIZE]) 4769 { 4770 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4771 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 4772 bool uhbr = intel_dp->link_rate >= 1000000; 4773 bool ok; 4774 4775 if (uhbr) 4776 ok = drm_dp_128b132b_lane_channel_eq_done(link_status, 4777 intel_dp->lane_count); 4778 else 4779 ok = drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 4780 4781 if (ok) 4782 return true; 4783 4784 intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); 4785 drm_dbg_kms(&i915->drm, 4786 "[ENCODER:%d:%s] %s link not ok, retraining\n", 4787 encoder->base.base.id, encoder->base.name, 4788 uhbr ? "128b/132b" : "8b/10b"); 4789 4790 return false; 4791 } 4792 4793 static void 4794 intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack) 4795 { 4796 bool handled = false; 4797 4798 drm_dp_mst_hpd_irq_handle_event(&intel_dp->mst_mgr, esi, ack, &handled); 4799 4800 if (esi[1] & DP_CP_IRQ) { 4801 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 4802 ack[1] |= DP_CP_IRQ; 4803 } 4804 } 4805 4806 static bool intel_dp_mst_link_status(struct intel_dp *intel_dp) 4807 { 4808 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4809 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 4810 u8 link_status[DP_LINK_STATUS_SIZE] = {}; 4811 const size_t esi_link_status_size = DP_LINK_STATUS_SIZE - 2; 4812 4813 if (drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS_ESI, link_status, 4814 esi_link_status_size) != esi_link_status_size) { 4815 drm_err(&i915->drm, 4816 "[ENCODER:%d:%s] Failed to read link status\n", 4817 encoder->base.base.id, encoder->base.name); 4818 return false; 4819 } 4820 4821 return intel_dp_link_ok(intel_dp, link_status); 4822 } 4823 4824 /** 4825 * intel_dp_check_mst_status - service any pending MST interrupts, check link status 4826 * @intel_dp: Intel DP struct 4827 * 4828 * Read any pending MST interrupts, call MST core to handle these and ack the 4829 * interrupts. Check if the main and AUX link state is ok. 4830 * 4831 * Returns: 4832 * - %true if pending interrupts were serviced (or no interrupts were 4833 * pending) w/o detecting an error condition. 4834 * - %false if an error condition - like AUX failure or a loss of link - is 4835 * detected, or another condition - like a DP tunnel BW state change - needs 4836 * servicing from the hotplug work. 4837 */ 4838 static bool 4839 intel_dp_check_mst_status(struct intel_dp *intel_dp) 4840 { 4841 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4842 bool link_ok = true; 4843 bool reprobe_needed = false; 4844 4845 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); 4846 4847 for (;;) { 4848 u8 esi[4] = {}; 4849 u8 ack[4] = {}; 4850 4851 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { 4852 drm_dbg_kms(&i915->drm, 4853 "failed to get ESI - device may have failed\n"); 4854 link_ok = false; 4855 4856 break; 4857 } 4858 4859 drm_dbg_kms(&i915->drm, "DPRX ESI: %4ph\n", esi); 4860 4861 if (intel_dp->active_mst_links > 0 && link_ok && 4862 esi[3] & LINK_STATUS_CHANGED) { 4863 if (!intel_dp_mst_link_status(intel_dp)) 4864 link_ok = false; 4865 ack[3] |= LINK_STATUS_CHANGED; 4866 } 4867 4868 intel_dp_mst_hpd_irq(intel_dp, esi, ack); 4869 4870 if (esi[3] & DP_TUNNELING_IRQ) { 4871 if (drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr, 4872 &intel_dp->aux)) 4873 reprobe_needed = true; 4874 ack[3] |= DP_TUNNELING_IRQ; 4875 } 4876 4877 if (!memchr_inv(ack, 0, sizeof(ack))) 4878 break; 4879 4880 if (!intel_dp_ack_sink_irq_esi(intel_dp, ack)) 4881 drm_dbg_kms(&i915->drm, "Failed to ack ESI\n"); 4882 4883 if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY)) 4884 drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr); 4885 } 4886 4887 return link_ok && !reprobe_needed; 4888 } 4889 4890 static void 4891 intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp) 4892 { 4893 bool is_active; 4894 u8 buf = 0; 4895 4896 is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux); 4897 if (intel_dp->frl.is_trained && !is_active) { 4898 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0) 4899 return; 4900 4901 buf &= ~DP_PCON_ENABLE_HDMI_LINK; 4902 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0) 4903 return; 4904 4905 drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base); 4906 4907 intel_dp->frl.is_trained = false; 4908 4909 /* Restart FRL training or fall back to TMDS mode */ 4910 intel_dp_check_frl_training(intel_dp); 4911 } 4912 } 4913 4914 static bool 4915 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 4916 { 4917 u8 link_status[DP_LINK_STATUS_SIZE]; 4918 4919 if (!intel_dp->link_trained) 4920 return false; 4921 4922 /* 4923 * While PSR source HW is enabled, it will control main-link sending 4924 * frames, enabling and disabling it so trying to do a retrain will fail 4925 * as the link would or not be on or it could mix training patterns 4926 * and frame data at the same time causing retrain to fail. 4927 * Also when exiting PSR, HW will retrain the link anyways fixing 4928 * any link status error. 4929 */ 4930 if (intel_psr_enabled(intel_dp)) 4931 return false; 4932 4933 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 4934 link_status) < 0) 4935 return false; 4936 4937 /* 4938 * Validate the cached values of intel_dp->link_rate and 4939 * intel_dp->lane_count before attempting to retrain. 4940 * 4941 * FIXME would be nice to user the crtc state here, but since 4942 * we need to call this from the short HPD handler that seems 4943 * a bit hard. 4944 */ 4945 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 4946 intel_dp->lane_count)) 4947 return false; 4948 4949 /* Retrain if link not ok */ 4950 return !intel_dp_link_ok(intel_dp, link_status); 4951 } 4952 4953 static bool intel_dp_has_connector(struct intel_dp *intel_dp, 4954 const struct drm_connector_state *conn_state) 4955 { 4956 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4957 struct intel_encoder *encoder; 4958 enum pipe pipe; 4959 4960 if (!conn_state->best_encoder) 4961 return false; 4962 4963 /* SST */ 4964 encoder = &dp_to_dig_port(intel_dp)->base; 4965 if (conn_state->best_encoder == &encoder->base) 4966 return true; 4967 4968 /* MST */ 4969 for_each_pipe(i915, pipe) { 4970 encoder = &intel_dp->mst_encoders[pipe]->base; 4971 if (conn_state->best_encoder == &encoder->base) 4972 return true; 4973 } 4974 4975 return false; 4976 } 4977 4978 int intel_dp_get_active_pipes(struct intel_dp *intel_dp, 4979 struct drm_modeset_acquire_ctx *ctx, 4980 u8 *pipe_mask) 4981 { 4982 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4983 struct drm_connector_list_iter conn_iter; 4984 struct intel_connector *connector; 4985 int ret = 0; 4986 4987 *pipe_mask = 0; 4988 4989 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 4990 for_each_intel_connector_iter(connector, &conn_iter) { 4991 struct drm_connector_state *conn_state = 4992 connector->base.state; 4993 struct intel_crtc_state *crtc_state; 4994 struct intel_crtc *crtc; 4995 4996 if (!intel_dp_has_connector(intel_dp, conn_state)) 4997 continue; 4998 4999 crtc = to_intel_crtc(conn_state->crtc); 5000 if (!crtc) 5001 continue; 5002 5003 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 5004 if (ret) 5005 break; 5006 5007 crtc_state = to_intel_crtc_state(crtc->base.state); 5008 5009 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 5010 5011 if (!crtc_state->hw.active) 5012 continue; 5013 5014 if (conn_state->commit) 5015 drm_WARN_ON(&i915->drm, 5016 !wait_for_completion_timeout(&conn_state->commit->hw_done, 5017 msecs_to_jiffies(5000))); 5018 5019 *pipe_mask |= BIT(crtc->pipe); 5020 } 5021 drm_connector_list_iter_end(&conn_iter); 5022 5023 return ret; 5024 } 5025 5026 static bool intel_dp_is_connected(struct intel_dp *intel_dp) 5027 { 5028 struct intel_connector *connector = intel_dp->attached_connector; 5029 5030 return connector->base.status == connector_status_connected || 5031 intel_dp->is_mst; 5032 } 5033 5034 int intel_dp_retrain_link(struct intel_encoder *encoder, 5035 struct drm_modeset_acquire_ctx *ctx) 5036 { 5037 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5038 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5039 struct intel_crtc *crtc; 5040 u8 pipe_mask; 5041 int ret; 5042 5043 if (!intel_dp_is_connected(intel_dp)) 5044 return 0; 5045 5046 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 5047 ctx); 5048 if (ret) 5049 return ret; 5050 5051 if (!intel_dp_needs_link_retrain(intel_dp)) 5052 return 0; 5053 5054 ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask); 5055 if (ret) 5056 return ret; 5057 5058 if (pipe_mask == 0) 5059 return 0; 5060 5061 if (!intel_dp_needs_link_retrain(intel_dp)) 5062 return 0; 5063 5064 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", 5065 encoder->base.base.id, encoder->base.name); 5066 5067 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { 5068 const struct intel_crtc_state *crtc_state = 5069 to_intel_crtc_state(crtc->base.state); 5070 5071 /* Suppress underruns caused by re-training */ 5072 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 5073 if (crtc_state->has_pch_encoder) 5074 intel_set_pch_fifo_underrun_reporting(dev_priv, 5075 intel_crtc_pch_transcoder(crtc), false); 5076 } 5077 5078 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { 5079 const struct intel_crtc_state *crtc_state = 5080 to_intel_crtc_state(crtc->base.state); 5081 5082 /* retrain on the MST master transcoder */ 5083 if (DISPLAY_VER(dev_priv) >= 12 && 5084 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 5085 !intel_dp_mst_is_master_trans(crtc_state)) 5086 continue; 5087 5088 intel_dp_check_frl_training(intel_dp); 5089 intel_dp_pcon_dsc_configure(intel_dp, crtc_state); 5090 intel_dp_start_link_train(intel_dp, crtc_state); 5091 intel_dp_stop_link_train(intel_dp, crtc_state); 5092 break; 5093 } 5094 5095 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { 5096 const struct intel_crtc_state *crtc_state = 5097 to_intel_crtc_state(crtc->base.state); 5098 5099 /* Keep underrun reporting disabled until things are stable */ 5100 intel_crtc_wait_for_next_vblank(crtc); 5101 5102 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 5103 if (crtc_state->has_pch_encoder) 5104 intel_set_pch_fifo_underrun_reporting(dev_priv, 5105 intel_crtc_pch_transcoder(crtc), true); 5106 } 5107 5108 return 0; 5109 } 5110 5111 static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, 5112 struct drm_modeset_acquire_ctx *ctx, 5113 u8 *pipe_mask) 5114 { 5115 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5116 struct drm_connector_list_iter conn_iter; 5117 struct intel_connector *connector; 5118 int ret = 0; 5119 5120 *pipe_mask = 0; 5121 5122 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 5123 for_each_intel_connector_iter(connector, &conn_iter) { 5124 struct drm_connector_state *conn_state = 5125 connector->base.state; 5126 struct intel_crtc_state *crtc_state; 5127 struct intel_crtc *crtc; 5128 5129 if (!intel_dp_has_connector(intel_dp, conn_state)) 5130 continue; 5131 5132 crtc = to_intel_crtc(conn_state->crtc); 5133 if (!crtc) 5134 continue; 5135 5136 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 5137 if (ret) 5138 break; 5139 5140 crtc_state = to_intel_crtc_state(crtc->base.state); 5141 5142 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 5143 5144 if (!crtc_state->hw.active) 5145 continue; 5146 5147 if (conn_state->commit && 5148 !try_wait_for_completion(&conn_state->commit->hw_done)) 5149 continue; 5150 5151 *pipe_mask |= BIT(crtc->pipe); 5152 } 5153 drm_connector_list_iter_end(&conn_iter); 5154 5155 return ret; 5156 } 5157 5158 static int intel_dp_do_phy_test(struct intel_encoder *encoder, 5159 struct drm_modeset_acquire_ctx *ctx) 5160 { 5161 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5162 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5163 struct intel_crtc *crtc; 5164 u8 pipe_mask; 5165 int ret; 5166 5167 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 5168 ctx); 5169 if (ret) 5170 return ret; 5171 5172 ret = intel_dp_prep_phy_test(intel_dp, ctx, &pipe_mask); 5173 if (ret) 5174 return ret; 5175 5176 if (pipe_mask == 0) 5177 return 0; 5178 5179 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n", 5180 encoder->base.base.id, encoder->base.name); 5181 5182 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { 5183 const struct intel_crtc_state *crtc_state = 5184 to_intel_crtc_state(crtc->base.state); 5185 5186 /* test on the MST master transcoder */ 5187 if (DISPLAY_VER(dev_priv) >= 12 && 5188 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 5189 !intel_dp_mst_is_master_trans(crtc_state)) 5190 continue; 5191 5192 intel_dp_process_phy_request(intel_dp, crtc_state); 5193 break; 5194 } 5195 5196 return 0; 5197 } 5198 5199 void intel_dp_phy_test(struct intel_encoder *encoder) 5200 { 5201 struct drm_modeset_acquire_ctx ctx; 5202 int ret; 5203 5204 drm_modeset_acquire_init(&ctx, 0); 5205 5206 for (;;) { 5207 ret = intel_dp_do_phy_test(encoder, &ctx); 5208 5209 if (ret == -EDEADLK) { 5210 drm_modeset_backoff(&ctx); 5211 continue; 5212 } 5213 5214 break; 5215 } 5216 5217 drm_modeset_drop_locks(&ctx); 5218 drm_modeset_acquire_fini(&ctx); 5219 drm_WARN(encoder->base.dev, ret, 5220 "Acquiring modeset locks failed with %i\n", ret); 5221 } 5222 5223 static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) 5224 { 5225 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5226 u8 val; 5227 5228 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 5229 return; 5230 5231 if (drm_dp_dpcd_readb(&intel_dp->aux, 5232 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 5233 return; 5234 5235 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 5236 5237 if (val & DP_AUTOMATED_TEST_REQUEST) 5238 intel_dp_handle_test_request(intel_dp); 5239 5240 if (val & DP_CP_IRQ) 5241 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 5242 5243 if (val & DP_SINK_SPECIFIC_IRQ) 5244 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); 5245 } 5246 5247 static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp) 5248 { 5249 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5250 bool reprobe_needed = false; 5251 u8 val; 5252 5253 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 5254 return false; 5255 5256 if (drm_dp_dpcd_readb(&intel_dp->aux, 5257 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) 5258 return false; 5259 5260 if ((val & DP_TUNNELING_IRQ) && 5261 drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr, 5262 &intel_dp->aux)) 5263 reprobe_needed = true; 5264 5265 if (drm_dp_dpcd_writeb(&intel_dp->aux, 5266 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) 5267 return reprobe_needed; 5268 5269 if (val & HDMI_LINK_STATUS_CHANGED) 5270 intel_dp_handle_hdmi_link_status_change(intel_dp); 5271 5272 return reprobe_needed; 5273 } 5274 5275 /* 5276 * According to DP spec 5277 * 5.1.2: 5278 * 1. Read DPCD 5279 * 2. Configure link according to Receiver Capabilities 5280 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 5281 * 4. Check link status on receipt of hot-plug interrupt 5282 * 5283 * intel_dp_short_pulse - handles short pulse interrupts 5284 * when full detection is not required. 5285 * Returns %true if short pulse is handled and full detection 5286 * is NOT required and %false otherwise. 5287 */ 5288 static bool 5289 intel_dp_short_pulse(struct intel_dp *intel_dp) 5290 { 5291 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5292 u8 old_sink_count = intel_dp->sink_count; 5293 bool reprobe_needed = false; 5294 bool ret; 5295 5296 /* 5297 * Clearing compliance test variables to allow capturing 5298 * of values for next automated test request. 5299 */ 5300 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5301 5302 /* 5303 * Now read the DPCD to see if it's actually running 5304 * If the current value of sink count doesn't match with 5305 * the value that was stored earlier or dpcd read failed 5306 * we need to do full detection 5307 */ 5308 ret = intel_dp_get_dpcd(intel_dp); 5309 5310 if ((old_sink_count != intel_dp->sink_count) || !ret) { 5311 /* No need to proceed if we are going to do full detect */ 5312 return false; 5313 } 5314 5315 intel_dp_check_device_service_irq(intel_dp); 5316 reprobe_needed = intel_dp_check_link_service_irq(intel_dp); 5317 5318 /* Handle CEC interrupts, if any */ 5319 drm_dp_cec_irq(&intel_dp->aux); 5320 5321 /* defer to the hotplug work for link retraining if needed */ 5322 if (intel_dp_needs_link_retrain(intel_dp)) 5323 return false; 5324 5325 intel_psr_short_pulse(intel_dp); 5326 5327 switch (intel_dp->compliance.test_type) { 5328 case DP_TEST_LINK_TRAINING: 5329 drm_dbg_kms(&dev_priv->drm, 5330 "Link Training Compliance Test requested\n"); 5331 /* Send a Hotplug Uevent to userspace to start modeset */ 5332 drm_kms_helper_hotplug_event(&dev_priv->drm); 5333 break; 5334 case DP_TEST_LINK_PHY_TEST_PATTERN: 5335 drm_dbg_kms(&dev_priv->drm, 5336 "PHY test pattern Compliance Test requested\n"); 5337 /* 5338 * Schedule long hpd to do the test 5339 * 5340 * FIXME get rid of the ad-hoc phy test modeset code 5341 * and properly incorporate it into the normal modeset. 5342 */ 5343 reprobe_needed = true; 5344 } 5345 5346 return !reprobe_needed; 5347 } 5348 5349 /* XXX this is probably wrong for multiple downstream ports */ 5350 static enum drm_connector_status 5351 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 5352 { 5353 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5354 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5355 u8 *dpcd = intel_dp->dpcd; 5356 u8 type; 5357 5358 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) 5359 return connector_status_connected; 5360 5361 lspcon_resume(dig_port); 5362 5363 if (!intel_dp_get_dpcd(intel_dp)) 5364 return connector_status_disconnected; 5365 5366 /* if there's no downstream port, we're done */ 5367 if (!drm_dp_is_branch(dpcd)) 5368 return connector_status_connected; 5369 5370 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 5371 if (intel_dp_has_sink_count(intel_dp) && 5372 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 5373 return intel_dp->sink_count ? 5374 connector_status_connected : connector_status_disconnected; 5375 } 5376 5377 if (intel_dp_can_mst(intel_dp)) 5378 return connector_status_connected; 5379 5380 /* If no HPD, poke DDC gently */ 5381 if (drm_probe_ddc(&intel_dp->aux.ddc)) 5382 return connector_status_connected; 5383 5384 /* Well we tried, say unknown for unreliable port types */ 5385 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 5386 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 5387 if (type == DP_DS_PORT_TYPE_VGA || 5388 type == DP_DS_PORT_TYPE_NON_EDID) 5389 return connector_status_unknown; 5390 } else { 5391 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 5392 DP_DWN_STRM_PORT_TYPE_MASK; 5393 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 5394 type == DP_DWN_STRM_PORT_TYPE_OTHER) 5395 return connector_status_unknown; 5396 } 5397 5398 /* Anything else is out of spec, warn and ignore */ 5399 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); 5400 return connector_status_disconnected; 5401 } 5402 5403 static enum drm_connector_status 5404 edp_detect(struct intel_dp *intel_dp) 5405 { 5406 return connector_status_connected; 5407 } 5408 5409 void intel_digital_port_lock(struct intel_encoder *encoder) 5410 { 5411 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5412 5413 if (dig_port->lock) 5414 dig_port->lock(dig_port); 5415 } 5416 5417 void intel_digital_port_unlock(struct intel_encoder *encoder) 5418 { 5419 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5420 5421 if (dig_port->unlock) 5422 dig_port->unlock(dig_port); 5423 } 5424 5425 /* 5426 * intel_digital_port_connected_locked - is the specified port connected? 5427 * @encoder: intel_encoder 5428 * 5429 * In cases where there's a connector physically connected but it can't be used 5430 * by our hardware we also return false, since the rest of the driver should 5431 * pretty much treat the port as disconnected. This is relevant for type-C 5432 * (starting on ICL) where there's ownership involved. 5433 * 5434 * The caller must hold the lock acquired by calling intel_digital_port_lock() 5435 * when calling this function. 5436 * 5437 * Return %true if port is connected, %false otherwise. 5438 */ 5439 bool intel_digital_port_connected_locked(struct intel_encoder *encoder) 5440 { 5441 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5442 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5443 bool is_glitch_free = intel_tc_port_handles_hpd_glitches(dig_port); 5444 bool is_connected = false; 5445 intel_wakeref_t wakeref; 5446 5447 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) { 5448 unsigned long wait_expires = jiffies + msecs_to_jiffies_timeout(4); 5449 5450 do { 5451 is_connected = dig_port->connected(encoder); 5452 if (is_connected || is_glitch_free) 5453 break; 5454 usleep_range(10, 30); 5455 } while (time_before(jiffies, wait_expires)); 5456 } 5457 5458 return is_connected; 5459 } 5460 5461 bool intel_digital_port_connected(struct intel_encoder *encoder) 5462 { 5463 bool ret; 5464 5465 intel_digital_port_lock(encoder); 5466 ret = intel_digital_port_connected_locked(encoder); 5467 intel_digital_port_unlock(encoder); 5468 5469 return ret; 5470 } 5471 5472 static const struct drm_edid * 5473 intel_dp_get_edid(struct intel_dp *intel_dp) 5474 { 5475 struct intel_connector *connector = intel_dp->attached_connector; 5476 const struct drm_edid *fixed_edid = connector->panel.fixed_edid; 5477 5478 /* Use panel fixed edid if we have one */ 5479 if (fixed_edid) { 5480 /* invalid edid */ 5481 if (IS_ERR(fixed_edid)) 5482 return NULL; 5483 5484 return drm_edid_dup(fixed_edid); 5485 } 5486 5487 return drm_edid_read_ddc(&connector->base, &intel_dp->aux.ddc); 5488 } 5489 5490 static void 5491 intel_dp_update_dfp(struct intel_dp *intel_dp, 5492 const struct drm_edid *drm_edid) 5493 { 5494 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5495 struct intel_connector *connector = intel_dp->attached_connector; 5496 5497 intel_dp->dfp.max_bpc = 5498 drm_dp_downstream_max_bpc(intel_dp->dpcd, 5499 intel_dp->downstream_ports, drm_edid); 5500 5501 intel_dp->dfp.max_dotclock = 5502 drm_dp_downstream_max_dotclock(intel_dp->dpcd, 5503 intel_dp->downstream_ports); 5504 5505 intel_dp->dfp.min_tmds_clock = 5506 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd, 5507 intel_dp->downstream_ports, 5508 drm_edid); 5509 intel_dp->dfp.max_tmds_clock = 5510 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd, 5511 intel_dp->downstream_ports, 5512 drm_edid); 5513 5514 intel_dp->dfp.pcon_max_frl_bw = 5515 drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd, 5516 intel_dp->downstream_ports); 5517 5518 drm_dbg_kms(&i915->drm, 5519 "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n", 5520 connector->base.base.id, connector->base.name, 5521 intel_dp->dfp.max_bpc, 5522 intel_dp->dfp.max_dotclock, 5523 intel_dp->dfp.min_tmds_clock, 5524 intel_dp->dfp.max_tmds_clock, 5525 intel_dp->dfp.pcon_max_frl_bw); 5526 5527 intel_dp_get_pcon_dsc_cap(intel_dp); 5528 } 5529 5530 static bool 5531 intel_dp_can_ycbcr420(struct intel_dp *intel_dp) 5532 { 5533 if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420) && 5534 (!drm_dp_is_branch(intel_dp->dpcd) || intel_dp->dfp.ycbcr420_passthrough)) 5535 return true; 5536 5537 if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_RGB) && 5538 dfp_can_convert_from_rgb(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420)) 5539 return true; 5540 5541 if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR444) && 5542 dfp_can_convert_from_ycbcr444(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420)) 5543 return true; 5544 5545 return false; 5546 } 5547 5548 static void 5549 intel_dp_update_420(struct intel_dp *intel_dp) 5550 { 5551 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5552 struct intel_connector *connector = intel_dp->attached_connector; 5553 5554 intel_dp->dfp.ycbcr420_passthrough = 5555 drm_dp_downstream_420_passthrough(intel_dp->dpcd, 5556 intel_dp->downstream_ports); 5557 /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */ 5558 intel_dp->dfp.ycbcr_444_to_420 = 5559 dp_to_dig_port(intel_dp)->lspcon.active || 5560 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd, 5561 intel_dp->downstream_ports); 5562 intel_dp->dfp.rgb_to_ycbcr = 5563 drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 5564 intel_dp->downstream_ports, 5565 DP_DS_HDMI_BT709_RGB_YCBCR_CONV); 5566 5567 connector->base.ycbcr_420_allowed = intel_dp_can_ycbcr420(intel_dp); 5568 5569 drm_dbg_kms(&i915->drm, 5570 "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", 5571 connector->base.base.id, connector->base.name, 5572 str_yes_no(intel_dp->dfp.rgb_to_ycbcr), 5573 str_yes_no(connector->base.ycbcr_420_allowed), 5574 str_yes_no(intel_dp->dfp.ycbcr_444_to_420)); 5575 } 5576 5577 static void 5578 intel_dp_set_edid(struct intel_dp *intel_dp) 5579 { 5580 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5581 struct intel_connector *connector = intel_dp->attached_connector; 5582 const struct drm_edid *drm_edid; 5583 bool vrr_capable; 5584 5585 intel_dp_unset_edid(intel_dp); 5586 drm_edid = intel_dp_get_edid(intel_dp); 5587 connector->detect_edid = drm_edid; 5588 5589 /* Below we depend on display info having been updated */ 5590 drm_edid_connector_update(&connector->base, drm_edid); 5591 5592 vrr_capable = intel_vrr_is_capable(connector); 5593 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n", 5594 connector->base.base.id, connector->base.name, str_yes_no(vrr_capable)); 5595 drm_connector_set_vrr_capable_property(&connector->base, vrr_capable); 5596 5597 intel_dp_update_dfp(intel_dp, drm_edid); 5598 intel_dp_update_420(intel_dp); 5599 5600 drm_dp_cec_attach(&intel_dp->aux, 5601 connector->base.display_info.source_physical_address); 5602 } 5603 5604 static void 5605 intel_dp_unset_edid(struct intel_dp *intel_dp) 5606 { 5607 struct intel_connector *connector = intel_dp->attached_connector; 5608 5609 drm_dp_cec_unset_edid(&intel_dp->aux); 5610 drm_edid_free(connector->detect_edid); 5611 connector->detect_edid = NULL; 5612 5613 intel_dp->dfp.max_bpc = 0; 5614 intel_dp->dfp.max_dotclock = 0; 5615 intel_dp->dfp.min_tmds_clock = 0; 5616 intel_dp->dfp.max_tmds_clock = 0; 5617 5618 intel_dp->dfp.pcon_max_frl_bw = 0; 5619 5620 intel_dp->dfp.ycbcr_444_to_420 = false; 5621 connector->base.ycbcr_420_allowed = false; 5622 5623 drm_connector_set_vrr_capable_property(&connector->base, 5624 false); 5625 } 5626 5627 static void 5628 intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *connector) 5629 { 5630 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5631 5632 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 5633 if (!HAS_DSC(i915)) 5634 return; 5635 5636 if (intel_dp_is_edp(intel_dp)) 5637 intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0], 5638 connector); 5639 else 5640 intel_dp_get_dsc_sink_cap(intel_dp->dpcd[DP_DPCD_REV], 5641 connector); 5642 } 5643 5644 static int 5645 intel_dp_detect(struct drm_connector *connector, 5646 struct drm_modeset_acquire_ctx *ctx, 5647 bool force) 5648 { 5649 struct drm_i915_private *dev_priv = to_i915(connector->dev); 5650 struct intel_connector *intel_connector = 5651 to_intel_connector(connector); 5652 struct intel_dp *intel_dp = intel_attached_dp(intel_connector); 5653 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5654 struct intel_encoder *encoder = &dig_port->base; 5655 enum drm_connector_status status; 5656 int ret; 5657 5658 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 5659 connector->base.id, connector->name); 5660 drm_WARN_ON(&dev_priv->drm, 5661 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 5662 5663 if (!intel_display_device_enabled(dev_priv)) 5664 return connector_status_disconnected; 5665 5666 if (!intel_display_driver_check_access(dev_priv)) 5667 return connector->status; 5668 5669 /* Can't disconnect eDP */ 5670 if (intel_dp_is_edp(intel_dp)) 5671 status = edp_detect(intel_dp); 5672 else if (intel_digital_port_connected(encoder)) 5673 status = intel_dp_detect_dpcd(intel_dp); 5674 else 5675 status = connector_status_disconnected; 5676 5677 if (status == connector_status_disconnected) { 5678 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5679 memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd)); 5680 intel_dp->psr.sink_panel_replay_support = false; 5681 5682 if (intel_dp->is_mst) { 5683 drm_dbg_kms(&dev_priv->drm, 5684 "MST device may have disappeared %d vs %d\n", 5685 intel_dp->is_mst, 5686 intel_dp->mst_mgr.mst_state); 5687 intel_dp->is_mst = false; 5688 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 5689 intel_dp->is_mst); 5690 } 5691 5692 intel_dp_tunnel_disconnect(intel_dp); 5693 5694 goto out; 5695 } 5696 5697 ret = intel_dp_tunnel_detect(intel_dp, ctx); 5698 if (ret == -EDEADLK) 5699 return ret; 5700 5701 if (ret == 1) 5702 intel_connector->base.epoch_counter++; 5703 5704 if (!intel_dp_is_edp(intel_dp)) 5705 intel_psr_init_dpcd(intel_dp); 5706 5707 intel_dp_detect_dsc_caps(intel_dp, intel_connector); 5708 5709 intel_dp_configure_mst(intel_dp); 5710 5711 /* 5712 * TODO: Reset link params when switching to MST mode, until MST 5713 * supports link training fallback params. 5714 */ 5715 if (intel_dp->reset_link_params || intel_dp->is_mst) { 5716 intel_dp_reset_max_link_params(intel_dp); 5717 intel_dp->reset_link_params = false; 5718 } 5719 5720 intel_dp_print_rates(intel_dp); 5721 5722 if (intel_dp->is_mst) { 5723 /* 5724 * If we are in MST mode then this connector 5725 * won't appear connected or have anything 5726 * with EDID on it 5727 */ 5728 status = connector_status_disconnected; 5729 goto out; 5730 } 5731 5732 /* 5733 * Some external monitors do not signal loss of link synchronization 5734 * with an IRQ_HPD, so force a link status check. 5735 */ 5736 if (!intel_dp_is_edp(intel_dp)) { 5737 ret = intel_dp_retrain_link(encoder, ctx); 5738 if (ret) 5739 return ret; 5740 } 5741 5742 /* 5743 * Clearing NACK and defer counts to get their exact values 5744 * while reading EDID which are required by Compliance tests 5745 * 4.2.2.4 and 4.2.2.5 5746 */ 5747 intel_dp->aux.i2c_nack_count = 0; 5748 intel_dp->aux.i2c_defer_count = 0; 5749 5750 intel_dp_set_edid(intel_dp); 5751 if (intel_dp_is_edp(intel_dp) || 5752 to_intel_connector(connector)->detect_edid) 5753 status = connector_status_connected; 5754 5755 intel_dp_check_device_service_irq(intel_dp); 5756 5757 out: 5758 if (status != connector_status_connected && !intel_dp->is_mst) 5759 intel_dp_unset_edid(intel_dp); 5760 5761 if (!intel_dp_is_edp(intel_dp)) 5762 drm_dp_set_subconnector_property(connector, 5763 status, 5764 intel_dp->dpcd, 5765 intel_dp->downstream_ports); 5766 return status; 5767 } 5768 5769 static void 5770 intel_dp_force(struct drm_connector *connector) 5771 { 5772 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 5773 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5774 struct intel_encoder *intel_encoder = &dig_port->base; 5775 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 5776 5777 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 5778 connector->base.id, connector->name); 5779 5780 if (!intel_display_driver_check_access(dev_priv)) 5781 return; 5782 5783 intel_dp_unset_edid(intel_dp); 5784 5785 if (connector->status != connector_status_connected) 5786 return; 5787 5788 intel_dp_set_edid(intel_dp); 5789 } 5790 5791 static int intel_dp_get_modes(struct drm_connector *connector) 5792 { 5793 struct intel_connector *intel_connector = to_intel_connector(connector); 5794 int num_modes; 5795 5796 /* drm_edid_connector_update() done in ->detect() or ->force() */ 5797 num_modes = drm_edid_connector_add_modes(connector); 5798 5799 /* Also add fixed mode, which may or may not be present in EDID */ 5800 if (intel_dp_is_edp(intel_attached_dp(intel_connector))) 5801 num_modes += intel_panel_get_modes(intel_connector); 5802 5803 if (num_modes) 5804 return num_modes; 5805 5806 if (!intel_connector->detect_edid) { 5807 struct intel_dp *intel_dp = intel_attached_dp(intel_connector); 5808 struct drm_display_mode *mode; 5809 5810 mode = drm_dp_downstream_mode(connector->dev, 5811 intel_dp->dpcd, 5812 intel_dp->downstream_ports); 5813 if (mode) { 5814 drm_mode_probed_add(connector, mode); 5815 num_modes++; 5816 } 5817 } 5818 5819 return num_modes; 5820 } 5821 5822 static int 5823 intel_dp_connector_register(struct drm_connector *connector) 5824 { 5825 struct drm_i915_private *i915 = to_i915(connector->dev); 5826 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 5827 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5828 struct intel_lspcon *lspcon = &dig_port->lspcon; 5829 int ret; 5830 5831 ret = intel_connector_register(connector); 5832 if (ret) 5833 return ret; 5834 5835 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", 5836 intel_dp->aux.name, connector->kdev->kobj.name); 5837 5838 intel_dp->aux.dev = connector->kdev; 5839 ret = drm_dp_aux_register(&intel_dp->aux); 5840 if (!ret) 5841 drm_dp_cec_register_connector(&intel_dp->aux, connector); 5842 5843 if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata)) 5844 return ret; 5845 5846 /* 5847 * ToDo: Clean this up to handle lspcon init and resume more 5848 * efficiently and streamlined. 5849 */ 5850 if (lspcon_init(dig_port)) { 5851 lspcon_detect_hdr_capability(lspcon); 5852 if (lspcon->hdr_supported) 5853 drm_connector_attach_hdr_output_metadata_property(connector); 5854 } 5855 5856 return ret; 5857 } 5858 5859 static void 5860 intel_dp_connector_unregister(struct drm_connector *connector) 5861 { 5862 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 5863 5864 drm_dp_cec_unregister_connector(&intel_dp->aux); 5865 drm_dp_aux_unregister(&intel_dp->aux); 5866 intel_connector_unregister(connector); 5867 } 5868 5869 void intel_dp_connector_sync_state(struct intel_connector *connector, 5870 const struct intel_crtc_state *crtc_state) 5871 { 5872 struct drm_i915_private *i915 = to_i915(connector->base.dev); 5873 5874 if (crtc_state && crtc_state->dsc.compression_enable) { 5875 drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux); 5876 connector->dp.dsc_decompression_enabled = true; 5877 } else { 5878 connector->dp.dsc_decompression_enabled = false; 5879 } 5880 } 5881 5882 void intel_dp_encoder_flush_work(struct drm_encoder *encoder) 5883 { 5884 struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); 5885 struct intel_dp *intel_dp = &dig_port->dp; 5886 5887 intel_dp_mst_encoder_cleanup(dig_port); 5888 5889 intel_dp_tunnel_destroy(intel_dp); 5890 5891 intel_pps_vdd_off_sync(intel_dp); 5892 5893 /* 5894 * Ensure power off delay is respected on module remove, so that we can 5895 * reduce delays at driver probe. See pps_init_timestamps(). 5896 */ 5897 intel_pps_wait_power_cycle(intel_dp); 5898 5899 intel_dp_aux_fini(intel_dp); 5900 } 5901 5902 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 5903 { 5904 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 5905 5906 intel_pps_vdd_off_sync(intel_dp); 5907 5908 intel_dp_tunnel_suspend(intel_dp); 5909 } 5910 5911 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder) 5912 { 5913 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 5914 5915 intel_pps_wait_power_cycle(intel_dp); 5916 } 5917 5918 static int intel_modeset_tile_group(struct intel_atomic_state *state, 5919 int tile_group_id) 5920 { 5921 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5922 struct drm_connector_list_iter conn_iter; 5923 struct drm_connector *connector; 5924 int ret = 0; 5925 5926 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 5927 drm_for_each_connector_iter(connector, &conn_iter) { 5928 struct drm_connector_state *conn_state; 5929 struct intel_crtc_state *crtc_state; 5930 struct intel_crtc *crtc; 5931 5932 if (!connector->has_tile || 5933 connector->tile_group->id != tile_group_id) 5934 continue; 5935 5936 conn_state = drm_atomic_get_connector_state(&state->base, 5937 connector); 5938 if (IS_ERR(conn_state)) { 5939 ret = PTR_ERR(conn_state); 5940 break; 5941 } 5942 5943 crtc = to_intel_crtc(conn_state->crtc); 5944 5945 if (!crtc) 5946 continue; 5947 5948 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 5949 crtc_state->uapi.mode_changed = true; 5950 5951 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 5952 if (ret) 5953 break; 5954 } 5955 drm_connector_list_iter_end(&conn_iter); 5956 5957 return ret; 5958 } 5959 5960 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 5961 { 5962 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5963 struct intel_crtc *crtc; 5964 5965 if (transcoders == 0) 5966 return 0; 5967 5968 for_each_intel_crtc(&dev_priv->drm, crtc) { 5969 struct intel_crtc_state *crtc_state; 5970 int ret; 5971 5972 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5973 if (IS_ERR(crtc_state)) 5974 return PTR_ERR(crtc_state); 5975 5976 if (!crtc_state->hw.enable) 5977 continue; 5978 5979 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 5980 continue; 5981 5982 crtc_state->uapi.mode_changed = true; 5983 5984 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 5985 if (ret) 5986 return ret; 5987 5988 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 5989 if (ret) 5990 return ret; 5991 5992 transcoders &= ~BIT(crtc_state->cpu_transcoder); 5993 } 5994 5995 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 5996 5997 return 0; 5998 } 5999 6000 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 6001 struct drm_connector *connector) 6002 { 6003 const struct drm_connector_state *old_conn_state = 6004 drm_atomic_get_old_connector_state(&state->base, connector); 6005 const struct intel_crtc_state *old_crtc_state; 6006 struct intel_crtc *crtc; 6007 u8 transcoders; 6008 6009 crtc = to_intel_crtc(old_conn_state->crtc); 6010 if (!crtc) 6011 return 0; 6012 6013 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 6014 6015 if (!old_crtc_state->hw.active) 6016 return 0; 6017 6018 transcoders = old_crtc_state->sync_mode_slaves_mask; 6019 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 6020 transcoders |= BIT(old_crtc_state->master_transcoder); 6021 6022 return intel_modeset_affected_transcoders(state, 6023 transcoders); 6024 } 6025 6026 static int intel_dp_connector_atomic_check(struct drm_connector *conn, 6027 struct drm_atomic_state *_state) 6028 { 6029 struct drm_i915_private *dev_priv = to_i915(conn->dev); 6030 struct intel_atomic_state *state = to_intel_atomic_state(_state); 6031 struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(_state, conn); 6032 struct intel_connector *intel_conn = to_intel_connector(conn); 6033 struct intel_dp *intel_dp = enc_to_intel_dp(intel_conn->encoder); 6034 int ret; 6035 6036 ret = intel_digital_connector_atomic_check(conn, &state->base); 6037 if (ret) 6038 return ret; 6039 6040 if (intel_dp_mst_source_support(intel_dp)) { 6041 ret = drm_dp_mst_root_conn_atomic_check(conn_state, &intel_dp->mst_mgr); 6042 if (ret) 6043 return ret; 6044 } 6045 6046 if (!intel_connector_needs_modeset(state, conn)) 6047 return 0; 6048 6049 ret = intel_dp_tunnel_atomic_check_state(state, 6050 intel_dp, 6051 intel_conn); 6052 if (ret) 6053 return ret; 6054 6055 /* 6056 * We don't enable port sync on BDW due to missing w/as and 6057 * due to not having adjusted the modeset sequence appropriately. 6058 */ 6059 if (DISPLAY_VER(dev_priv) < 9) 6060 return 0; 6061 6062 if (conn->has_tile) { 6063 ret = intel_modeset_tile_group(state, conn->tile_group->id); 6064 if (ret) 6065 return ret; 6066 } 6067 6068 return intel_modeset_synced_crtcs(state, conn); 6069 } 6070 6071 static void intel_dp_oob_hotplug_event(struct drm_connector *connector, 6072 enum drm_connector_status hpd_state) 6073 { 6074 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); 6075 struct drm_i915_private *i915 = to_i915(connector->dev); 6076 bool hpd_high = hpd_state == connector_status_connected; 6077 unsigned int hpd_pin = encoder->hpd_pin; 6078 bool need_work = false; 6079 6080 spin_lock_irq(&i915->irq_lock); 6081 if (hpd_high != test_bit(hpd_pin, &i915->display.hotplug.oob_hotplug_last_state)) { 6082 i915->display.hotplug.event_bits |= BIT(hpd_pin); 6083 6084 __assign_bit(hpd_pin, &i915->display.hotplug.oob_hotplug_last_state, hpd_high); 6085 need_work = true; 6086 } 6087 spin_unlock_irq(&i915->irq_lock); 6088 6089 if (need_work) 6090 intel_hpd_schedule_detection(i915); 6091 } 6092 6093 static const struct drm_connector_funcs intel_dp_connector_funcs = { 6094 .force = intel_dp_force, 6095 .fill_modes = drm_helper_probe_single_connector_modes, 6096 .atomic_get_property = intel_digital_connector_atomic_get_property, 6097 .atomic_set_property = intel_digital_connector_atomic_set_property, 6098 .late_register = intel_dp_connector_register, 6099 .early_unregister = intel_dp_connector_unregister, 6100 .destroy = intel_connector_destroy, 6101 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 6102 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 6103 .oob_hotplug_event = intel_dp_oob_hotplug_event, 6104 }; 6105 6106 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 6107 .detect_ctx = intel_dp_detect, 6108 .get_modes = intel_dp_get_modes, 6109 .mode_valid = intel_dp_mode_valid, 6110 .atomic_check = intel_dp_connector_atomic_check, 6111 }; 6112 6113 enum irqreturn 6114 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) 6115 { 6116 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6117 struct intel_dp *intel_dp = &dig_port->dp; 6118 u8 dpcd[DP_RECEIVER_CAP_SIZE]; 6119 6120 if (dig_port->base.type == INTEL_OUTPUT_EDP && 6121 (long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) { 6122 /* 6123 * vdd off can generate a long/short pulse on eDP which 6124 * would require vdd on to handle it, and thus we 6125 * would end up in an endless cycle of 6126 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." 6127 */ 6128 drm_dbg_kms(&i915->drm, 6129 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", 6130 long_hpd ? "long" : "short", 6131 dig_port->base.base.base.id, 6132 dig_port->base.base.name); 6133 return IRQ_HANDLED; 6134 } 6135 6136 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", 6137 dig_port->base.base.base.id, 6138 dig_port->base.base.name, 6139 long_hpd ? "long" : "short"); 6140 6141 /* 6142 * TBT DP tunnels require the GFX driver to read out the DPRX caps in 6143 * response to long HPD pulses. The DP hotplug handler does that, 6144 * however the hotplug handler may be blocked by another 6145 * connector's/encoder's hotplug handler. Since the TBT CM may not 6146 * complete the DP tunnel BW request for the latter connector/encoder 6147 * waiting for this encoder's DPRX read, perform a dummy read here. 6148 */ 6149 if (long_hpd) 6150 intel_dp_read_dprx_caps(intel_dp, dpcd); 6151 6152 if (long_hpd) { 6153 intel_dp->reset_link_params = true; 6154 return IRQ_NONE; 6155 } 6156 6157 if (intel_dp->is_mst) { 6158 if (!intel_dp_check_mst_status(intel_dp)) 6159 return IRQ_NONE; 6160 } else if (!intel_dp_short_pulse(intel_dp)) { 6161 return IRQ_NONE; 6162 } 6163 6164 return IRQ_HANDLED; 6165 } 6166 6167 static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv, 6168 const struct intel_bios_encoder_data *devdata, 6169 enum port port) 6170 { 6171 /* 6172 * eDP not supported on g4x. so bail out early just 6173 * for a bit extra safety in case the VBT is bonkers. 6174 */ 6175 if (DISPLAY_VER(dev_priv) < 5) 6176 return false; 6177 6178 if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A) 6179 return true; 6180 6181 return devdata && intel_bios_encoder_supports_edp(devdata); 6182 } 6183 6184 bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port) 6185 { 6186 const struct intel_bios_encoder_data *devdata = 6187 intel_bios_encoder_data_lookup(i915, port); 6188 6189 return _intel_dp_is_port_edp(i915, devdata, port); 6190 } 6191 6192 static bool 6193 has_gamut_metadata_dip(struct intel_encoder *encoder) 6194 { 6195 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 6196 enum port port = encoder->port; 6197 6198 if (intel_bios_encoder_is_lspcon(encoder->devdata)) 6199 return false; 6200 6201 if (DISPLAY_VER(i915) >= 11) 6202 return true; 6203 6204 if (port == PORT_A) 6205 return false; 6206 6207 if (IS_HASWELL(i915) || IS_BROADWELL(i915) || 6208 DISPLAY_VER(i915) >= 9) 6209 return true; 6210 6211 return false; 6212 } 6213 6214 static void 6215 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 6216 { 6217 struct drm_i915_private *dev_priv = to_i915(connector->dev); 6218 enum port port = dp_to_dig_port(intel_dp)->base.port; 6219 6220 if (!intel_dp_is_edp(intel_dp)) 6221 drm_connector_attach_dp_subconnector_property(connector); 6222 6223 if (!IS_G4X(dev_priv) && port != PORT_A) 6224 intel_attach_force_audio_property(connector); 6225 6226 intel_attach_broadcast_rgb_property(connector); 6227 if (HAS_GMCH(dev_priv)) 6228 drm_connector_attach_max_bpc_property(connector, 6, 10); 6229 else if (DISPLAY_VER(dev_priv) >= 5) 6230 drm_connector_attach_max_bpc_property(connector, 6, 12); 6231 6232 /* Register HDMI colorspace for case of lspcon */ 6233 if (intel_bios_encoder_is_lspcon(dp_to_dig_port(intel_dp)->base.devdata)) { 6234 drm_connector_attach_content_type_property(connector); 6235 intel_attach_hdmi_colorspace_property(connector); 6236 } else { 6237 intel_attach_dp_colorspace_property(connector); 6238 } 6239 6240 if (has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base)) 6241 drm_connector_attach_hdr_output_metadata_property(connector); 6242 6243 if (HAS_VRR(dev_priv)) 6244 drm_connector_attach_vrr_capable_property(connector); 6245 } 6246 6247 static void 6248 intel_edp_add_properties(struct intel_dp *intel_dp) 6249 { 6250 struct intel_connector *connector = intel_dp->attached_connector; 6251 struct drm_i915_private *i915 = to_i915(connector->base.dev); 6252 const struct drm_display_mode *fixed_mode = 6253 intel_panel_preferred_fixed_mode(connector); 6254 6255 intel_attach_scaling_mode_property(&connector->base); 6256 6257 drm_connector_set_panel_orientation_with_quirk(&connector->base, 6258 i915->display.vbt.orientation, 6259 fixed_mode->hdisplay, 6260 fixed_mode->vdisplay); 6261 } 6262 6263 static void intel_edp_backlight_setup(struct intel_dp *intel_dp, 6264 struct intel_connector *connector) 6265 { 6266 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6267 enum pipe pipe = INVALID_PIPE; 6268 6269 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 6270 /* 6271 * Figure out the current pipe for the initial backlight setup. 6272 * If the current pipe isn't valid, try the PPS pipe, and if that 6273 * fails just assume pipe A. 6274 */ 6275 pipe = vlv_active_pipe(intel_dp); 6276 6277 if (pipe != PIPE_A && pipe != PIPE_B) 6278 pipe = intel_dp->pps.pps_pipe; 6279 6280 if (pipe != PIPE_A && pipe != PIPE_B) 6281 pipe = PIPE_A; 6282 } 6283 6284 intel_backlight_setup(connector, pipe); 6285 } 6286 6287 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 6288 struct intel_connector *intel_connector) 6289 { 6290 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6291 struct drm_connector *connector = &intel_connector->base; 6292 struct drm_display_mode *fixed_mode; 6293 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 6294 bool has_dpcd; 6295 const struct drm_edid *drm_edid; 6296 6297 if (!intel_dp_is_edp(intel_dp)) 6298 return true; 6299 6300 /* 6301 * On IBX/CPT we may get here with LVDS already registered. Since the 6302 * driver uses the only internal power sequencer available for both 6303 * eDP and LVDS bail out early in this case to prevent interfering 6304 * with an already powered-on LVDS power sequencer. 6305 */ 6306 if (intel_get_lvds_encoder(dev_priv)) { 6307 drm_WARN_ON(&dev_priv->drm, 6308 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 6309 drm_info(&dev_priv->drm, 6310 "LVDS was detected, not registering eDP\n"); 6311 6312 return false; 6313 } 6314 6315 intel_bios_init_panel_early(dev_priv, &intel_connector->panel, 6316 encoder->devdata); 6317 6318 if (!intel_pps_init(intel_dp)) { 6319 drm_info(&dev_priv->drm, 6320 "[ENCODER:%d:%s] unusable PPS, disabling eDP\n", 6321 encoder->base.base.id, encoder->base.name); 6322 /* 6323 * The BIOS may have still enabled VDD on the PPS even 6324 * though it's unusable. Make sure we turn it back off 6325 * and to release the power domain references/etc. 6326 */ 6327 goto out_vdd_off; 6328 } 6329 6330 /* 6331 * Enable HPD sense for live status check. 6332 * intel_hpd_irq_setup() will turn it off again 6333 * if it's no longer needed later. 6334 * 6335 * The DPCD probe below will make sure VDD is on. 6336 */ 6337 intel_hpd_enable_detection(encoder); 6338 6339 /* Cache DPCD and EDID for edp. */ 6340 has_dpcd = intel_edp_init_dpcd(intel_dp, intel_connector); 6341 6342 if (!has_dpcd) { 6343 /* if this fails, presume the device is a ghost */ 6344 drm_info(&dev_priv->drm, 6345 "[ENCODER:%d:%s] failed to retrieve link info, disabling eDP\n", 6346 encoder->base.base.id, encoder->base.name); 6347 goto out_vdd_off; 6348 } 6349 6350 /* 6351 * VBT and straps are liars. Also check HPD as that seems 6352 * to be the most reliable piece of information available. 6353 * 6354 * ... expect on devices that forgot to hook HPD up for eDP 6355 * (eg. Acer Chromebook C710), so we'll check it only if multiple 6356 * ports are attempting to use the same AUX CH, according to VBT. 6357 */ 6358 if (intel_bios_dp_has_shared_aux_ch(encoder->devdata)) { 6359 /* 6360 * If this fails, presume the DPCD answer came 6361 * from some other port using the same AUX CH. 6362 * 6363 * FIXME maybe cleaner to check this before the 6364 * DPCD read? Would need sort out the VDD handling... 6365 */ 6366 if (!intel_digital_port_connected(encoder)) { 6367 drm_info(&dev_priv->drm, 6368 "[ENCODER:%d:%s] HPD is down, disabling eDP\n", 6369 encoder->base.base.id, encoder->base.name); 6370 goto out_vdd_off; 6371 } 6372 6373 /* 6374 * Unfortunately even the HPD based detection fails on 6375 * eg. Asus B360M-A (CFL+CNP), so as a last resort fall 6376 * back to checking for a VGA branch device. Only do this 6377 * on known affected platforms to minimize false positives. 6378 */ 6379 if (DISPLAY_VER(dev_priv) == 9 && drm_dp_is_branch(intel_dp->dpcd) && 6380 (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) == 6381 DP_DWN_STRM_PORT_TYPE_ANALOG) { 6382 drm_info(&dev_priv->drm, 6383 "[ENCODER:%d:%s] VGA converter detected, disabling eDP\n", 6384 encoder->base.base.id, encoder->base.name); 6385 goto out_vdd_off; 6386 } 6387 } 6388 6389 mutex_lock(&dev_priv->drm.mode_config.mutex); 6390 drm_edid = drm_edid_read_ddc(connector, connector->ddc); 6391 if (!drm_edid) { 6392 /* Fallback to EDID from ACPI OpRegion, if any */ 6393 drm_edid = intel_opregion_get_edid(intel_connector); 6394 if (drm_edid) 6395 drm_dbg_kms(&dev_priv->drm, 6396 "[CONNECTOR:%d:%s] Using OpRegion EDID\n", 6397 connector->base.id, connector->name); 6398 } 6399 if (drm_edid) { 6400 if (drm_edid_connector_update(connector, drm_edid) || 6401 !drm_edid_connector_add_modes(connector)) { 6402 drm_edid_connector_update(connector, NULL); 6403 drm_edid_free(drm_edid); 6404 drm_edid = ERR_PTR(-EINVAL); 6405 } 6406 } else { 6407 drm_edid = ERR_PTR(-ENOENT); 6408 } 6409 6410 intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata, 6411 IS_ERR(drm_edid) ? NULL : drm_edid); 6412 6413 intel_panel_add_edid_fixed_modes(intel_connector, true); 6414 6415 /* MSO requires information from the EDID */ 6416 intel_edp_mso_init(intel_dp); 6417 6418 /* multiply the mode clock and horizontal timings for MSO */ 6419 list_for_each_entry(fixed_mode, &intel_connector->panel.fixed_modes, head) 6420 intel_edp_mso_mode_fixup(intel_connector, fixed_mode); 6421 6422 /* fallback to VBT if available for eDP */ 6423 if (!intel_panel_preferred_fixed_mode(intel_connector)) 6424 intel_panel_add_vbt_lfp_fixed_mode(intel_connector); 6425 6426 mutex_unlock(&dev_priv->drm.mode_config.mutex); 6427 6428 if (!intel_panel_preferred_fixed_mode(intel_connector)) { 6429 drm_info(&dev_priv->drm, 6430 "[ENCODER:%d:%s] failed to find fixed mode for the panel, disabling eDP\n", 6431 encoder->base.base.id, encoder->base.name); 6432 goto out_vdd_off; 6433 } 6434 6435 intel_panel_init(intel_connector, drm_edid); 6436 6437 intel_edp_backlight_setup(intel_dp, intel_connector); 6438 6439 intel_edp_add_properties(intel_dp); 6440 6441 intel_pps_init_late(intel_dp); 6442 6443 return true; 6444 6445 out_vdd_off: 6446 intel_pps_vdd_off_sync(intel_dp); 6447 6448 return false; 6449 } 6450 6451 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 6452 { 6453 struct intel_connector *intel_connector; 6454 struct drm_connector *connector; 6455 6456 intel_connector = container_of(work, typeof(*intel_connector), 6457 modeset_retry_work); 6458 connector = &intel_connector->base; 6459 drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n", connector->base.id, 6460 connector->name); 6461 6462 /* Grab the locks before changing connector property*/ 6463 mutex_lock(&connector->dev->mode_config.mutex); 6464 /* Set connector link status to BAD and send a Uevent to notify 6465 * userspace to do a modeset. 6466 */ 6467 drm_connector_set_link_status_property(connector, 6468 DRM_MODE_LINK_STATUS_BAD); 6469 mutex_unlock(&connector->dev->mode_config.mutex); 6470 /* Send Hotplug uevent so userspace can reprobe */ 6471 drm_kms_helper_connector_hotplug_event(connector); 6472 6473 drm_connector_put(connector); 6474 } 6475 6476 void intel_dp_init_modeset_retry_work(struct intel_connector *connector) 6477 { 6478 INIT_WORK(&connector->modeset_retry_work, 6479 intel_dp_modeset_retry_work_fn); 6480 } 6481 6482 bool 6483 intel_dp_init_connector(struct intel_digital_port *dig_port, 6484 struct intel_connector *intel_connector) 6485 { 6486 struct drm_connector *connector = &intel_connector->base; 6487 struct intel_dp *intel_dp = &dig_port->dp; 6488 struct intel_encoder *intel_encoder = &dig_port->base; 6489 struct drm_device *dev = intel_encoder->base.dev; 6490 struct drm_i915_private *dev_priv = to_i915(dev); 6491 enum port port = intel_encoder->port; 6492 enum phy phy = intel_port_to_phy(dev_priv, port); 6493 int type; 6494 6495 /* Initialize the work for modeset in case of link train failure */ 6496 intel_dp_init_modeset_retry_work(intel_connector); 6497 6498 if (drm_WARN(dev, dig_port->max_lanes < 1, 6499 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 6500 dig_port->max_lanes, intel_encoder->base.base.id, 6501 intel_encoder->base.name)) 6502 return false; 6503 6504 intel_dp->reset_link_params = true; 6505 intel_dp->pps.pps_pipe = INVALID_PIPE; 6506 intel_dp->pps.active_pipe = INVALID_PIPE; 6507 6508 /* Preserve the current hw state. */ 6509 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 6510 intel_dp->attached_connector = intel_connector; 6511 6512 if (_intel_dp_is_port_edp(dev_priv, intel_encoder->devdata, port)) { 6513 /* 6514 * Currently we don't support eDP on TypeC ports, although in 6515 * theory it could work on TypeC legacy ports. 6516 */ 6517 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); 6518 type = DRM_MODE_CONNECTOR_eDP; 6519 intel_encoder->type = INTEL_OUTPUT_EDP; 6520 6521 /* eDP only on port B and/or C on vlv/chv */ 6522 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 6523 IS_CHERRYVIEW(dev_priv)) && 6524 port != PORT_B && port != PORT_C)) 6525 return false; 6526 } else { 6527 type = DRM_MODE_CONNECTOR_DisplayPort; 6528 } 6529 6530 intel_dp_set_default_sink_rates(intel_dp); 6531 intel_dp_set_default_max_sink_lane_count(intel_dp); 6532 6533 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 6534 intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); 6535 6536 intel_dp_aux_init(intel_dp); 6537 intel_connector->dp.dsc_decompression_aux = &intel_dp->aux; 6538 6539 drm_dbg_kms(&dev_priv->drm, 6540 "Adding %s connector on [ENCODER:%d:%s]\n", 6541 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 6542 intel_encoder->base.base.id, intel_encoder->base.name); 6543 6544 drm_connector_init_with_ddc(dev, connector, &intel_dp_connector_funcs, 6545 type, &intel_dp->aux.ddc); 6546 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 6547 6548 if (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) < 12) 6549 connector->interlace_allowed = true; 6550 6551 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 6552 intel_connector->base.polled = intel_connector->polled; 6553 6554 intel_connector_attach_encoder(intel_connector, intel_encoder); 6555 6556 if (HAS_DDI(dev_priv)) 6557 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 6558 else 6559 intel_connector->get_hw_state = intel_connector_get_hw_state; 6560 6561 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 6562 intel_dp_aux_fini(intel_dp); 6563 goto fail; 6564 } 6565 6566 intel_dp_set_source_rates(intel_dp); 6567 intel_dp_set_common_rates(intel_dp); 6568 intel_dp_reset_max_link_params(intel_dp); 6569 6570 /* init MST on ports that can support it */ 6571 intel_dp_mst_encoder_init(dig_port, 6572 intel_connector->base.base.id); 6573 6574 intel_dp_add_properties(intel_dp, connector); 6575 6576 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 6577 int ret = intel_dp_hdcp_init(dig_port, intel_connector); 6578 if (ret) 6579 drm_dbg_kms(&dev_priv->drm, 6580 "HDCP init failed, skipping.\n"); 6581 } 6582 6583 intel_dp->colorimetry_support = 6584 intel_dp_get_colorimetry_status(intel_dp); 6585 6586 intel_dp->frl.is_trained = false; 6587 intel_dp->frl.trained_rate_gbps = 0; 6588 6589 intel_psr_init(intel_dp); 6590 6591 return true; 6592 6593 fail: 6594 intel_display_power_flush_work(dev_priv); 6595 drm_connector_cleanup(connector); 6596 6597 return false; 6598 } 6599 6600 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 6601 { 6602 struct intel_encoder *encoder; 6603 6604 if (!HAS_DISPLAY(dev_priv)) 6605 return; 6606 6607 for_each_intel_encoder(&dev_priv->drm, encoder) { 6608 struct intel_dp *intel_dp; 6609 6610 if (encoder->type != INTEL_OUTPUT_DDI) 6611 continue; 6612 6613 intel_dp = enc_to_intel_dp(encoder); 6614 6615 if (!intel_dp_mst_source_support(intel_dp)) 6616 continue; 6617 6618 if (intel_dp->is_mst) 6619 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 6620 } 6621 } 6622 6623 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 6624 { 6625 struct intel_encoder *encoder; 6626 6627 if (!HAS_DISPLAY(dev_priv)) 6628 return; 6629 6630 for_each_intel_encoder(&dev_priv->drm, encoder) { 6631 struct intel_dp *intel_dp; 6632 int ret; 6633 6634 if (encoder->type != INTEL_OUTPUT_DDI) 6635 continue; 6636 6637 intel_dp = enc_to_intel_dp(encoder); 6638 6639 if (!intel_dp_mst_source_support(intel_dp)) 6640 continue; 6641 6642 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 6643 true); 6644 if (ret) { 6645 intel_dp->is_mst = false; 6646 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 6647 false); 6648 } 6649 } 6650 } 6651