1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/notifier.h> 31 #include <linux/reboot.h> 32 #include <linux/slab.h> 33 #include <linux/types.h> 34 35 #include <asm/byteorder.h> 36 37 #include <drm/drm_atomic_helper.h> 38 #include <drm/drm_crtc.h> 39 #include <drm/drm_dp_helper.h> 40 #include <drm/drm_edid.h> 41 #include <drm/drm_hdcp.h> 42 #include <drm/drm_probe_helper.h> 43 44 #include "i915_debugfs.h" 45 #include "i915_drv.h" 46 #include "i915_trace.h" 47 #include "intel_atomic.h" 48 #include "intel_audio.h" 49 #include "intel_connector.h" 50 #include "intel_ddi.h" 51 #include "intel_display_types.h" 52 #include "intel_dp.h" 53 #include "intel_dp_link_training.h" 54 #include "intel_dp_mst.h" 55 #include "intel_dpio_phy.h" 56 #include "intel_fifo_underrun.h" 57 #include "intel_hdcp.h" 58 #include "intel_hdmi.h" 59 #include "intel_hotplug.h" 60 #include "intel_lspcon.h" 61 #include "intel_lvds.h" 62 #include "intel_panel.h" 63 #include "intel_psr.h" 64 #include "intel_sideband.h" 65 #include "intel_tc.h" 66 #include "intel_vdsc.h" 67 68 #define DP_DPRX_ESI_LEN 14 69 70 /* DP DSC throughput values used for slice count calculations KPixels/s */ 71 #define DP_DSC_PEAK_PIXEL_RATE 2720000 72 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 73 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 74 75 /* DP DSC FEC Overhead factor = 1/(0.972261) */ 76 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 77 78 /* Compliance test status bits */ 79 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 80 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 81 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 82 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 83 84 struct dp_link_dpll { 85 int clock; 86 struct dpll dpll; 87 }; 88 89 static const struct dp_link_dpll g4x_dpll[] = { 90 { 162000, 91 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, 92 { 270000, 93 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } 94 }; 95 96 static const struct dp_link_dpll pch_dpll[] = { 97 { 162000, 98 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, 99 { 270000, 100 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } 101 }; 102 103 static const struct dp_link_dpll vlv_dpll[] = { 104 { 162000, 105 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, 106 { 270000, 107 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } 108 }; 109 110 /* 111 * CHV supports eDP 1.4 that have more link rates. 112 * Below only provides the fixed rate but exclude variable rate. 113 */ 114 static const struct dp_link_dpll chv_dpll[] = { 115 /* 116 * CHV requires to program fractional division for m2. 117 * m2 is stored in fixed point format using formula below 118 * (m2_int << 22) | m2_fraction 119 */ 120 { 162000, /* m2_int = 32, m2_fraction = 1677722 */ 121 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } }, 122 { 270000, /* m2_int = 27, m2_fraction = 0 */ 123 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, 124 }; 125 126 /* Constants for DP DSC configurations */ 127 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 128 129 /* With Single pipe configuration, HW is capable of supporting maximum 130 * of 4 slices per line. 131 */ 132 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 133 134 /** 135 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 136 * @intel_dp: DP struct 137 * 138 * If a CPU or PCH DP output is attached to an eDP panel, this function 139 * will return true, and false otherwise. 140 */ 141 bool intel_dp_is_edp(struct intel_dp *intel_dp) 142 { 143 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 144 145 return intel_dig_port->base.type == INTEL_OUTPUT_EDP; 146 } 147 148 static void intel_dp_link_down(struct intel_encoder *encoder, 149 const struct intel_crtc_state *old_crtc_state); 150 static bool edp_panel_vdd_on(struct intel_dp *intel_dp); 151 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 152 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 153 const struct intel_crtc_state *crtc_state); 154 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 155 enum pipe pipe); 156 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 157 158 /* update sink rates from dpcd */ 159 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 160 { 161 static const int dp_rates[] = { 162 162000, 270000, 540000, 810000 163 }; 164 int i, max_rate; 165 166 if (drm_dp_has_quirk(&intel_dp->desc, 0, 167 DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { 168 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ 169 static const int quirk_rates[] = { 162000, 270000, 324000 }; 170 171 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); 172 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); 173 174 return; 175 } 176 177 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 178 179 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 180 if (dp_rates[i] > max_rate) 181 break; 182 intel_dp->sink_rates[i] = dp_rates[i]; 183 } 184 185 intel_dp->num_sink_rates = i; 186 } 187 188 /* Get length of rates array potentially limited by max_rate. */ 189 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 190 { 191 int i; 192 193 /* Limit results by potentially reduced max rate */ 194 for (i = 0; i < len; i++) { 195 if (rates[len - i - 1] <= max_rate) 196 return len - i; 197 } 198 199 return 0; 200 } 201 202 /* Get length of common rates array potentially limited by max_rate. */ 203 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 204 int max_rate) 205 { 206 return intel_dp_rate_limit_len(intel_dp->common_rates, 207 intel_dp->num_common_rates, max_rate); 208 } 209 210 /* Theoretical max between source and sink */ 211 static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 212 { 213 return intel_dp->common_rates[intel_dp->num_common_rates - 1]; 214 } 215 216 /* Theoretical max between source and sink */ 217 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 218 { 219 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 220 int source_max = intel_dig_port->max_lanes; 221 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 222 int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port); 223 224 return min3(source_max, sink_max, fia_max); 225 } 226 227 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 228 { 229 return intel_dp->max_link_lane_count; 230 } 231 232 int 233 intel_dp_link_required(int pixel_clock, int bpp) 234 { 235 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 236 return DIV_ROUND_UP(pixel_clock * bpp, 8); 237 } 238 239 int 240 intel_dp_max_data_rate(int max_link_clock, int max_lanes) 241 { 242 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the 243 * link rate that is generally expressed in Gbps. Since, 8 bits of data 244 * is transmitted every LS_Clk per lane, there is no need to account for 245 * the channel encoding that is done in the PHY layer here. 246 */ 247 248 return max_link_clock * max_lanes; 249 } 250 251 static int 252 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp) 253 { 254 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 255 struct intel_encoder *encoder = &intel_dig_port->base; 256 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 257 int max_dotclk = dev_priv->max_dotclk_freq; 258 int ds_max_dotclk; 259 260 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 261 262 if (type != DP_DS_PORT_TYPE_VGA) 263 return max_dotclk; 264 265 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd, 266 intel_dp->downstream_ports); 267 268 if (ds_max_dotclk != 0) 269 max_dotclk = min(max_dotclk, ds_max_dotclk); 270 271 return max_dotclk; 272 } 273 274 static int cnl_max_source_rate(struct intel_dp *intel_dp) 275 { 276 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 277 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 278 enum port port = dig_port->base.port; 279 280 u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; 281 282 /* Low voltage SKUs are limited to max of 5.4G */ 283 if (voltage == VOLTAGE_INFO_0_85V) 284 return 540000; 285 286 /* For this SKU 8.1G is supported in all ports */ 287 if (IS_CNL_WITH_PORT_F(dev_priv)) 288 return 810000; 289 290 /* For other SKUs, max rate on ports A and D is 5.4G */ 291 if (port == PORT_A || port == PORT_D) 292 return 540000; 293 294 return 810000; 295 } 296 297 static int icl_max_source_rate(struct intel_dp *intel_dp) 298 { 299 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 300 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 301 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 302 303 if (intel_phy_is_combo(dev_priv, phy) && 304 !IS_ELKHARTLAKE(dev_priv) && 305 !intel_dp_is_edp(intel_dp)) 306 return 540000; 307 308 return 810000; 309 } 310 311 static void 312 intel_dp_set_source_rates(struct intel_dp *intel_dp) 313 { 314 /* The values must be in increasing order */ 315 static const int cnl_rates[] = { 316 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000 317 }; 318 static const int bxt_rates[] = { 319 162000, 216000, 243000, 270000, 324000, 432000, 540000 320 }; 321 static const int skl_rates[] = { 322 162000, 216000, 270000, 324000, 432000, 540000 323 }; 324 static const int hsw_rates[] = { 325 162000, 270000, 540000 326 }; 327 static const int g4x_rates[] = { 328 162000, 270000 329 }; 330 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 331 struct intel_encoder *encoder = &dig_port->base; 332 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 333 const int *source_rates; 334 int size, max_rate = 0, vbt_max_rate; 335 336 /* This should only be done once */ 337 drm_WARN_ON(&dev_priv->drm, 338 intel_dp->source_rates || intel_dp->num_source_rates); 339 340 if (INTEL_GEN(dev_priv) >= 10) { 341 source_rates = cnl_rates; 342 size = ARRAY_SIZE(cnl_rates); 343 if (IS_GEN(dev_priv, 10)) 344 max_rate = cnl_max_source_rate(intel_dp); 345 else 346 max_rate = icl_max_source_rate(intel_dp); 347 } else if (IS_GEN9_LP(dev_priv)) { 348 source_rates = bxt_rates; 349 size = ARRAY_SIZE(bxt_rates); 350 } else if (IS_GEN9_BC(dev_priv)) { 351 source_rates = skl_rates; 352 size = ARRAY_SIZE(skl_rates); 353 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 354 IS_BROADWELL(dev_priv)) { 355 source_rates = hsw_rates; 356 size = ARRAY_SIZE(hsw_rates); 357 } else { 358 source_rates = g4x_rates; 359 size = ARRAY_SIZE(g4x_rates); 360 } 361 362 vbt_max_rate = intel_bios_dp_max_link_rate(encoder); 363 if (max_rate && vbt_max_rate) 364 max_rate = min(max_rate, vbt_max_rate); 365 else if (vbt_max_rate) 366 max_rate = vbt_max_rate; 367 368 if (max_rate) 369 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 370 371 intel_dp->source_rates = source_rates; 372 intel_dp->num_source_rates = size; 373 } 374 375 static int intersect_rates(const int *source_rates, int source_len, 376 const int *sink_rates, int sink_len, 377 int *common_rates) 378 { 379 int i = 0, j = 0, k = 0; 380 381 while (i < source_len && j < sink_len) { 382 if (source_rates[i] == sink_rates[j]) { 383 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 384 return k; 385 common_rates[k] = source_rates[i]; 386 ++k; 387 ++i; 388 ++j; 389 } else if (source_rates[i] < sink_rates[j]) { 390 ++i; 391 } else { 392 ++j; 393 } 394 } 395 return k; 396 } 397 398 /* return index of rate in rates array, or -1 if not found */ 399 static int intel_dp_rate_index(const int *rates, int len, int rate) 400 { 401 int i; 402 403 for (i = 0; i < len; i++) 404 if (rate == rates[i]) 405 return i; 406 407 return -1; 408 } 409 410 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 411 { 412 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 413 414 drm_WARN_ON(&i915->drm, 415 !intel_dp->num_source_rates || !intel_dp->num_sink_rates); 416 417 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 418 intel_dp->num_source_rates, 419 intel_dp->sink_rates, 420 intel_dp->num_sink_rates, 421 intel_dp->common_rates); 422 423 /* Paranoia, there should always be something in common. */ 424 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) { 425 intel_dp->common_rates[0] = 162000; 426 intel_dp->num_common_rates = 1; 427 } 428 } 429 430 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 431 u8 lane_count) 432 { 433 /* 434 * FIXME: we need to synchronize the current link parameters with 435 * hardware readout. Currently fast link training doesn't work on 436 * boot-up. 437 */ 438 if (link_rate == 0 || 439 link_rate > intel_dp->max_link_rate) 440 return false; 441 442 if (lane_count == 0 || 443 lane_count > intel_dp_max_lane_count(intel_dp)) 444 return false; 445 446 return true; 447 } 448 449 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 450 int link_rate, 451 u8 lane_count) 452 { 453 const struct drm_display_mode *fixed_mode = 454 intel_dp->attached_connector->panel.fixed_mode; 455 int mode_rate, max_rate; 456 457 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 458 max_rate = intel_dp_max_data_rate(link_rate, lane_count); 459 if (mode_rate > max_rate) 460 return false; 461 462 return true; 463 } 464 465 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 466 int link_rate, u8 lane_count) 467 { 468 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 469 int index; 470 471 index = intel_dp_rate_index(intel_dp->common_rates, 472 intel_dp->num_common_rates, 473 link_rate); 474 if (index > 0) { 475 if (intel_dp_is_edp(intel_dp) && 476 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 477 intel_dp->common_rates[index - 1], 478 lane_count)) { 479 drm_dbg_kms(&i915->drm, 480 "Retrying Link training for eDP with same parameters\n"); 481 return 0; 482 } 483 intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; 484 intel_dp->max_link_lane_count = lane_count; 485 } else if (lane_count > 1) { 486 if (intel_dp_is_edp(intel_dp) && 487 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 488 intel_dp_max_common_rate(intel_dp), 489 lane_count >> 1)) { 490 drm_dbg_kms(&i915->drm, 491 "Retrying Link training for eDP with same parameters\n"); 492 return 0; 493 } 494 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 495 intel_dp->max_link_lane_count = lane_count >> 1; 496 } else { 497 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 498 return -1; 499 } 500 501 return 0; 502 } 503 504 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 505 { 506 return div_u64(mul_u32_u32(mode_clock, 1000000U), 507 DP_DSC_FEC_OVERHEAD_FACTOR); 508 } 509 510 static int 511 small_joiner_ram_size_bits(struct drm_i915_private *i915) 512 { 513 if (INTEL_GEN(i915) >= 11) 514 return 7680 * 8; 515 else 516 return 6144 * 8; 517 } 518 519 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, 520 u32 link_clock, u32 lane_count, 521 u32 mode_clock, u32 mode_hdisplay) 522 { 523 u32 bits_per_pixel, max_bpp_small_joiner_ram; 524 int i; 525 526 /* 527 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 528 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP) 529 * for SST -> TimeSlotsPerMTP is 1, 530 * for MST -> TimeSlotsPerMTP has to be calculated 531 */ 532 bits_per_pixel = (link_clock * lane_count * 8) / 533 intel_dp_mode_to_fec_clock(mode_clock); 534 drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel); 535 536 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 537 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / 538 mode_hdisplay; 539 drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n", 540 max_bpp_small_joiner_ram); 541 542 /* 543 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW 544 * check, output bpp from small joiner RAM check) 545 */ 546 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 547 548 /* Error out if the max bpp is less than smallest allowed valid bpp */ 549 if (bits_per_pixel < valid_dsc_bpp[0]) { 550 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 551 bits_per_pixel, valid_dsc_bpp[0]); 552 return 0; 553 } 554 555 /* Find the nearest match in the array of known BPPs from VESA */ 556 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 557 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 558 break; 559 } 560 bits_per_pixel = valid_dsc_bpp[i]; 561 562 /* 563 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 564 * fractional part is 0 565 */ 566 return bits_per_pixel << 4; 567 } 568 569 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 570 int mode_clock, int mode_hdisplay) 571 { 572 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 573 u8 min_slice_count, i; 574 int max_slice_width; 575 576 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 577 min_slice_count = DIV_ROUND_UP(mode_clock, 578 DP_DSC_MAX_ENC_THROUGHPUT_0); 579 else 580 min_slice_count = DIV_ROUND_UP(mode_clock, 581 DP_DSC_MAX_ENC_THROUGHPUT_1); 582 583 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 584 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 585 drm_dbg_kms(&i915->drm, 586 "Unsupported slice width %d by DP DSC Sink device\n", 587 max_slice_width); 588 return 0; 589 } 590 /* Also take into account max slice width */ 591 min_slice_count = min_t(u8, min_slice_count, 592 DIV_ROUND_UP(mode_hdisplay, 593 max_slice_width)); 594 595 /* Find the closest match to the valid slice count values */ 596 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 597 if (valid_dsc_slicecount[i] > 598 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 599 false)) 600 break; 601 if (min_slice_count <= valid_dsc_slicecount[i]) 602 return valid_dsc_slicecount[i]; 603 } 604 605 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", 606 min_slice_count); 607 return 0; 608 } 609 610 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 611 int hdisplay) 612 { 613 /* 614 * Older platforms don't like hdisplay==4096 with DP. 615 * 616 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 617 * and frame counter increment), but we don't get vblank interrupts, 618 * and the pipe underruns immediately. The link also doesn't seem 619 * to get trained properly. 620 * 621 * On CHV the vblank interrupts don't seem to disappear but 622 * otherwise the symptoms are similar. 623 * 624 * TODO: confirm the behaviour on HSW+ 625 */ 626 return hdisplay == 4096 && !HAS_DDI(dev_priv); 627 } 628 629 static enum drm_mode_status 630 intel_dp_mode_valid(struct drm_connector *connector, 631 struct drm_display_mode *mode) 632 { 633 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 634 struct intel_connector *intel_connector = to_intel_connector(connector); 635 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 636 struct drm_i915_private *dev_priv = to_i915(connector->dev); 637 int target_clock = mode->clock; 638 int max_rate, mode_rate, max_lanes, max_link_clock; 639 int max_dotclk; 640 u16 dsc_max_output_bpp = 0; 641 u8 dsc_slice_count = 0; 642 643 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 644 return MODE_NO_DBLESCAN; 645 646 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp); 647 648 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 649 if (mode->hdisplay > fixed_mode->hdisplay) 650 return MODE_PANEL; 651 652 if (mode->vdisplay > fixed_mode->vdisplay) 653 return MODE_PANEL; 654 655 target_clock = fixed_mode->clock; 656 } 657 658 max_link_clock = intel_dp_max_link_rate(intel_dp); 659 max_lanes = intel_dp_max_lane_count(intel_dp); 660 661 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 662 mode_rate = intel_dp_link_required(target_clock, 18); 663 664 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 665 return MODE_H_ILLEGAL; 666 667 /* 668 * Output bpp is stored in 6.4 format so right shift by 4 to get the 669 * integer value since we support only integer values of bpp. 670 */ 671 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) && 672 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { 673 if (intel_dp_is_edp(intel_dp)) { 674 dsc_max_output_bpp = 675 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; 676 dsc_slice_count = 677 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 678 true); 679 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { 680 dsc_max_output_bpp = 681 intel_dp_dsc_get_output_bpp(dev_priv, 682 max_link_clock, 683 max_lanes, 684 target_clock, 685 mode->hdisplay) >> 4; 686 dsc_slice_count = 687 intel_dp_dsc_get_slice_count(intel_dp, 688 target_clock, 689 mode->hdisplay); 690 } 691 } 692 693 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) || 694 target_clock > max_dotclk) 695 return MODE_CLOCK_HIGH; 696 697 if (mode->clock < 10000) 698 return MODE_CLOCK_LOW; 699 700 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 701 return MODE_H_ILLEGAL; 702 703 return intel_mode_valid_max_plane_size(dev_priv, mode); 704 } 705 706 u32 intel_dp_pack_aux(const u8 *src, int src_bytes) 707 { 708 int i; 709 u32 v = 0; 710 711 if (src_bytes > 4) 712 src_bytes = 4; 713 for (i = 0; i < src_bytes; i++) 714 v |= ((u32)src[i]) << ((3 - i) * 8); 715 return v; 716 } 717 718 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes) 719 { 720 int i; 721 if (dst_bytes > 4) 722 dst_bytes = 4; 723 for (i = 0; i < dst_bytes; i++) 724 dst[i] = src >> ((3-i) * 8); 725 } 726 727 static void 728 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp); 729 static void 730 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 731 bool force_disable_vdd); 732 static void 733 intel_dp_pps_init(struct intel_dp *intel_dp); 734 735 static intel_wakeref_t 736 pps_lock(struct intel_dp *intel_dp) 737 { 738 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 739 intel_wakeref_t wakeref; 740 741 /* 742 * See intel_power_sequencer_reset() why we need 743 * a power domain reference here. 744 */ 745 wakeref = intel_display_power_get(dev_priv, 746 intel_aux_power_domain(dp_to_dig_port(intel_dp))); 747 748 mutex_lock(&dev_priv->pps_mutex); 749 750 return wakeref; 751 } 752 753 static intel_wakeref_t 754 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref) 755 { 756 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 757 758 mutex_unlock(&dev_priv->pps_mutex); 759 intel_display_power_put(dev_priv, 760 intel_aux_power_domain(dp_to_dig_port(intel_dp)), 761 wakeref); 762 return 0; 763 } 764 765 #define with_pps_lock(dp, wf) \ 766 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf))) 767 768 static void 769 vlv_power_sequencer_kick(struct intel_dp *intel_dp) 770 { 771 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 772 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 773 enum pipe pipe = intel_dp->pps_pipe; 774 bool pll_enabled, release_cl_override = false; 775 enum dpio_phy phy = DPIO_PHY(pipe); 776 enum dpio_channel ch = vlv_pipe_to_channel(pipe); 777 u32 DP; 778 779 if (drm_WARN(&dev_priv->drm, 780 intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN, 781 "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n", 782 pipe_name(pipe), intel_dig_port->base.base.base.id, 783 intel_dig_port->base.base.name)) 784 return; 785 786 drm_dbg_kms(&dev_priv->drm, 787 "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n", 788 pipe_name(pipe), intel_dig_port->base.base.base.id, 789 intel_dig_port->base.base.name); 790 791 /* Preserve the BIOS-computed detected bit. This is 792 * supposed to be read-only. 793 */ 794 DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 795 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 796 DP |= DP_PORT_WIDTH(1); 797 DP |= DP_LINK_TRAIN_PAT_1; 798 799 if (IS_CHERRYVIEW(dev_priv)) 800 DP |= DP_PIPE_SEL_CHV(pipe); 801 else 802 DP |= DP_PIPE_SEL(pipe); 803 804 pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE; 805 806 /* 807 * The DPLL for the pipe must be enabled for this to work. 808 * So enable temporarily it if it's not already enabled. 809 */ 810 if (!pll_enabled) { 811 release_cl_override = IS_CHERRYVIEW(dev_priv) && 812 !chv_phy_powergate_ch(dev_priv, phy, ch, true); 813 814 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ? 815 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) { 816 drm_err(&dev_priv->drm, 817 "Failed to force on pll for pipe %c!\n", 818 pipe_name(pipe)); 819 return; 820 } 821 } 822 823 /* 824 * Similar magic as in intel_dp_enable_port(). 825 * We _must_ do this port enable + disable trick 826 * to make this power sequencer lock onto the port. 827 * Otherwise even VDD force bit won't work. 828 */ 829 intel_de_write(dev_priv, intel_dp->output_reg, DP); 830 intel_de_posting_read(dev_priv, intel_dp->output_reg); 831 832 intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN); 833 intel_de_posting_read(dev_priv, intel_dp->output_reg); 834 835 intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN); 836 intel_de_posting_read(dev_priv, intel_dp->output_reg); 837 838 if (!pll_enabled) { 839 vlv_force_pll_off(dev_priv, pipe); 840 841 if (release_cl_override) 842 chv_phy_powergate_ch(dev_priv, phy, ch, false); 843 } 844 } 845 846 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) 847 { 848 struct intel_encoder *encoder; 849 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); 850 851 /* 852 * We don't have power sequencer currently. 853 * Pick one that's not used by other ports. 854 */ 855 for_each_intel_dp(&dev_priv->drm, encoder) { 856 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 857 858 if (encoder->type == INTEL_OUTPUT_EDP) { 859 drm_WARN_ON(&dev_priv->drm, 860 intel_dp->active_pipe != INVALID_PIPE && 861 intel_dp->active_pipe != 862 intel_dp->pps_pipe); 863 864 if (intel_dp->pps_pipe != INVALID_PIPE) 865 pipes &= ~(1 << intel_dp->pps_pipe); 866 } else { 867 drm_WARN_ON(&dev_priv->drm, 868 intel_dp->pps_pipe != INVALID_PIPE); 869 870 if (intel_dp->active_pipe != INVALID_PIPE) 871 pipes &= ~(1 << intel_dp->active_pipe); 872 } 873 } 874 875 if (pipes == 0) 876 return INVALID_PIPE; 877 878 return ffs(pipes) - 1; 879 } 880 881 static enum pipe 882 vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 883 { 884 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 885 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 886 enum pipe pipe; 887 888 lockdep_assert_held(&dev_priv->pps_mutex); 889 890 /* We should never land here with regular DP ports */ 891 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 892 893 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE && 894 intel_dp->active_pipe != intel_dp->pps_pipe); 895 896 if (intel_dp->pps_pipe != INVALID_PIPE) 897 return intel_dp->pps_pipe; 898 899 pipe = vlv_find_free_pps(dev_priv); 900 901 /* 902 * Didn't find one. This should not happen since there 903 * are two power sequencers and up to two eDP ports. 904 */ 905 if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE)) 906 pipe = PIPE_A; 907 908 vlv_steal_power_sequencer(dev_priv, pipe); 909 intel_dp->pps_pipe = pipe; 910 911 drm_dbg_kms(&dev_priv->drm, 912 "picked pipe %c power sequencer for [ENCODER:%d:%s]\n", 913 pipe_name(intel_dp->pps_pipe), 914 intel_dig_port->base.base.base.id, 915 intel_dig_port->base.base.name); 916 917 /* init power sequencer on this pipe and port */ 918 intel_dp_init_panel_power_sequencer(intel_dp); 919 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 920 921 /* 922 * Even vdd force doesn't work until we've made 923 * the power sequencer lock in on the port. 924 */ 925 vlv_power_sequencer_kick(intel_dp); 926 927 return intel_dp->pps_pipe; 928 } 929 930 static int 931 bxt_power_sequencer_idx(struct intel_dp *intel_dp) 932 { 933 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 934 int backlight_controller = dev_priv->vbt.backlight.controller; 935 936 lockdep_assert_held(&dev_priv->pps_mutex); 937 938 /* We should never land here with regular DP ports */ 939 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 940 941 if (!intel_dp->pps_reset) 942 return backlight_controller; 943 944 intel_dp->pps_reset = false; 945 946 /* 947 * Only the HW needs to be reprogrammed, the SW state is fixed and 948 * has been setup during connector init. 949 */ 950 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 951 952 return backlight_controller; 953 } 954 955 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, 956 enum pipe pipe); 957 958 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv, 959 enum pipe pipe) 960 { 961 return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON; 962 } 963 964 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv, 965 enum pipe pipe) 966 { 967 return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD; 968 } 969 970 static bool vlv_pipe_any(struct drm_i915_private *dev_priv, 971 enum pipe pipe) 972 { 973 return true; 974 } 975 976 static enum pipe 977 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, 978 enum port port, 979 vlv_pipe_check pipe_check) 980 { 981 enum pipe pipe; 982 983 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 984 u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) & 985 PANEL_PORT_SELECT_MASK; 986 987 if (port_sel != PANEL_PORT_SELECT_VLV(port)) 988 continue; 989 990 if (!pipe_check(dev_priv, pipe)) 991 continue; 992 993 return pipe; 994 } 995 996 return INVALID_PIPE; 997 } 998 999 static void 1000 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) 1001 { 1002 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1003 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1004 enum port port = intel_dig_port->base.port; 1005 1006 lockdep_assert_held(&dev_priv->pps_mutex); 1007 1008 /* try to find a pipe with this port selected */ 1009 /* first pick one where the panel is on */ 1010 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1011 vlv_pipe_has_pp_on); 1012 /* didn't find one? pick one where vdd is on */ 1013 if (intel_dp->pps_pipe == INVALID_PIPE) 1014 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1015 vlv_pipe_has_vdd_on); 1016 /* didn't find one? pick one with just the correct port */ 1017 if (intel_dp->pps_pipe == INVALID_PIPE) 1018 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1019 vlv_pipe_any); 1020 1021 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ 1022 if (intel_dp->pps_pipe == INVALID_PIPE) { 1023 drm_dbg_kms(&dev_priv->drm, 1024 "no initial power sequencer for [ENCODER:%d:%s]\n", 1025 intel_dig_port->base.base.base.id, 1026 intel_dig_port->base.base.name); 1027 return; 1028 } 1029 1030 drm_dbg_kms(&dev_priv->drm, 1031 "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n", 1032 intel_dig_port->base.base.base.id, 1033 intel_dig_port->base.base.name, 1034 pipe_name(intel_dp->pps_pipe)); 1035 1036 intel_dp_init_panel_power_sequencer(intel_dp); 1037 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 1038 } 1039 1040 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) 1041 { 1042 struct intel_encoder *encoder; 1043 1044 if (drm_WARN_ON(&dev_priv->drm, 1045 !(IS_VALLEYVIEW(dev_priv) || 1046 IS_CHERRYVIEW(dev_priv) || 1047 IS_GEN9_LP(dev_priv)))) 1048 return; 1049 1050 /* 1051 * We can't grab pps_mutex here due to deadlock with power_domain 1052 * mutex when power_domain functions are called while holding pps_mutex. 1053 * That also means that in order to use pps_pipe the code needs to 1054 * hold both a power domain reference and pps_mutex, and the power domain 1055 * reference get/put must be done while _not_ holding pps_mutex. 1056 * pps_{lock,unlock}() do these steps in the correct order, so one 1057 * should use them always. 1058 */ 1059 1060 for_each_intel_dp(&dev_priv->drm, encoder) { 1061 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1062 1063 drm_WARN_ON(&dev_priv->drm, 1064 intel_dp->active_pipe != INVALID_PIPE); 1065 1066 if (encoder->type != INTEL_OUTPUT_EDP) 1067 continue; 1068 1069 if (IS_GEN9_LP(dev_priv)) 1070 intel_dp->pps_reset = true; 1071 else 1072 intel_dp->pps_pipe = INVALID_PIPE; 1073 } 1074 } 1075 1076 struct pps_registers { 1077 i915_reg_t pp_ctrl; 1078 i915_reg_t pp_stat; 1079 i915_reg_t pp_on; 1080 i915_reg_t pp_off; 1081 i915_reg_t pp_div; 1082 }; 1083 1084 static void intel_pps_get_registers(struct intel_dp *intel_dp, 1085 struct pps_registers *regs) 1086 { 1087 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1088 int pps_idx = 0; 1089 1090 memset(regs, 0, sizeof(*regs)); 1091 1092 if (IS_GEN9_LP(dev_priv)) 1093 pps_idx = bxt_power_sequencer_idx(intel_dp); 1094 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1095 pps_idx = vlv_power_sequencer_pipe(intel_dp); 1096 1097 regs->pp_ctrl = PP_CONTROL(pps_idx); 1098 regs->pp_stat = PP_STATUS(pps_idx); 1099 regs->pp_on = PP_ON_DELAYS(pps_idx); 1100 regs->pp_off = PP_OFF_DELAYS(pps_idx); 1101 1102 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ 1103 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 1104 regs->pp_div = INVALID_MMIO_REG; 1105 else 1106 regs->pp_div = PP_DIVISOR(pps_idx); 1107 } 1108 1109 static i915_reg_t 1110 _pp_ctrl_reg(struct intel_dp *intel_dp) 1111 { 1112 struct pps_registers regs; 1113 1114 intel_pps_get_registers(intel_dp, ®s); 1115 1116 return regs.pp_ctrl; 1117 } 1118 1119 static i915_reg_t 1120 _pp_stat_reg(struct intel_dp *intel_dp) 1121 { 1122 struct pps_registers regs; 1123 1124 intel_pps_get_registers(intel_dp, ®s); 1125 1126 return regs.pp_stat; 1127 } 1128 1129 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing 1130 This function only applicable when panel PM state is not to be tracked */ 1131 static int edp_notify_handler(struct notifier_block *this, unsigned long code, 1132 void *unused) 1133 { 1134 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), 1135 edp_notifier); 1136 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1137 intel_wakeref_t wakeref; 1138 1139 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART) 1140 return 0; 1141 1142 with_pps_lock(intel_dp, wakeref) { 1143 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1144 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); 1145 i915_reg_t pp_ctrl_reg, pp_div_reg; 1146 u32 pp_div; 1147 1148 pp_ctrl_reg = PP_CONTROL(pipe); 1149 pp_div_reg = PP_DIVISOR(pipe); 1150 pp_div = intel_de_read(dev_priv, pp_div_reg); 1151 pp_div &= PP_REFERENCE_DIVIDER_MASK; 1152 1153 /* 0x1F write to PP_DIV_REG sets max cycle delay */ 1154 intel_de_write(dev_priv, pp_div_reg, pp_div | 0x1F); 1155 intel_de_write(dev_priv, pp_ctrl_reg, 1156 PANEL_UNLOCK_REGS); 1157 msleep(intel_dp->panel_power_cycle_delay); 1158 } 1159 } 1160 1161 return 0; 1162 } 1163 1164 static bool edp_have_panel_power(struct intel_dp *intel_dp) 1165 { 1166 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1167 1168 lockdep_assert_held(&dev_priv->pps_mutex); 1169 1170 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1171 intel_dp->pps_pipe == INVALID_PIPE) 1172 return false; 1173 1174 return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0; 1175 } 1176 1177 static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 1178 { 1179 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1180 1181 lockdep_assert_held(&dev_priv->pps_mutex); 1182 1183 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1184 intel_dp->pps_pipe == INVALID_PIPE) 1185 return false; 1186 1187 return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; 1188 } 1189 1190 static void 1191 intel_dp_check_edp(struct intel_dp *intel_dp) 1192 { 1193 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1194 1195 if (!intel_dp_is_edp(intel_dp)) 1196 return; 1197 1198 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { 1199 drm_WARN(&dev_priv->drm, 1, 1200 "eDP powered off while attempting aux channel communication.\n"); 1201 drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n", 1202 intel_de_read(dev_priv, _pp_stat_reg(intel_dp)), 1203 intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp))); 1204 } 1205 } 1206 1207 static u32 1208 intel_dp_aux_wait_done(struct intel_dp *intel_dp) 1209 { 1210 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1211 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1212 const unsigned int timeout_ms = 10; 1213 u32 status; 1214 bool done; 1215 1216 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1217 done = wait_event_timeout(i915->gmbus_wait_queue, C, 1218 msecs_to_jiffies_timeout(timeout_ms)); 1219 1220 /* just trace the final value */ 1221 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1222 1223 if (!done) 1224 drm_err(&i915->drm, 1225 "%s: did not complete or timeout within %ums (status 0x%08x)\n", 1226 intel_dp->aux.name, timeout_ms, status); 1227 #undef C 1228 1229 return status; 1230 } 1231 1232 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1233 { 1234 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1235 1236 if (index) 1237 return 0; 1238 1239 /* 1240 * The clock divider is based off the hrawclk, and would like to run at 1241 * 2MHz. So, take the hrawclk value and divide by 2000 and use that 1242 */ 1243 return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000); 1244 } 1245 1246 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1247 { 1248 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1249 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1250 u32 freq; 1251 1252 if (index) 1253 return 0; 1254 1255 /* 1256 * The clock divider is based off the cdclk or PCH rawclk, and would 1257 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 1258 * divide by 2000 and use that 1259 */ 1260 if (dig_port->aux_ch == AUX_CH_A) 1261 freq = dev_priv->cdclk.hw.cdclk; 1262 else 1263 freq = RUNTIME_INFO(dev_priv)->rawclk_freq; 1264 return DIV_ROUND_CLOSEST(freq, 2000); 1265 } 1266 1267 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1268 { 1269 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1270 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1271 1272 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { 1273 /* Workaround for non-ULT HSW */ 1274 switch (index) { 1275 case 0: return 63; 1276 case 1: return 72; 1277 default: return 0; 1278 } 1279 } 1280 1281 return ilk_get_aux_clock_divider(intel_dp, index); 1282 } 1283 1284 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1285 { 1286 /* 1287 * SKL doesn't need us to program the AUX clock divider (Hardware will 1288 * derive the clock from CDCLK automatically). We still implement the 1289 * get_aux_clock_divider vfunc to plug-in into the existing code. 1290 */ 1291 return index ? 0 : 1; 1292 } 1293 1294 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, 1295 int send_bytes, 1296 u32 aux_clock_divider) 1297 { 1298 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1299 struct drm_i915_private *dev_priv = 1300 to_i915(intel_dig_port->base.base.dev); 1301 u32 precharge, timeout; 1302 1303 if (IS_GEN(dev_priv, 6)) 1304 precharge = 3; 1305 else 1306 precharge = 5; 1307 1308 if (IS_BROADWELL(dev_priv)) 1309 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 1310 else 1311 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 1312 1313 return DP_AUX_CH_CTL_SEND_BUSY | 1314 DP_AUX_CH_CTL_DONE | 1315 DP_AUX_CH_CTL_INTERRUPT | 1316 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1317 timeout | 1318 DP_AUX_CH_CTL_RECEIVE_ERROR | 1319 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1320 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1321 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 1322 } 1323 1324 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, 1325 int send_bytes, 1326 u32 unused) 1327 { 1328 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1329 struct drm_i915_private *i915 = 1330 to_i915(intel_dig_port->base.base.dev); 1331 enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port); 1332 u32 ret; 1333 1334 ret = DP_AUX_CH_CTL_SEND_BUSY | 1335 DP_AUX_CH_CTL_DONE | 1336 DP_AUX_CH_CTL_INTERRUPT | 1337 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1338 DP_AUX_CH_CTL_TIME_OUT_MAX | 1339 DP_AUX_CH_CTL_RECEIVE_ERROR | 1340 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1341 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | 1342 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 1343 1344 if (intel_phy_is_tc(i915, phy) && 1345 intel_dig_port->tc_mode == TC_PORT_TBT_ALT) 1346 ret |= DP_AUX_CH_CTL_TBT_IO; 1347 1348 return ret; 1349 } 1350 1351 static int 1352 intel_dp_aux_xfer(struct intel_dp *intel_dp, 1353 const u8 *send, int send_bytes, 1354 u8 *recv, int recv_size, 1355 u32 aux_send_ctl_flags) 1356 { 1357 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1358 struct drm_i915_private *i915 = 1359 to_i915(intel_dig_port->base.base.dev); 1360 struct intel_uncore *uncore = &i915->uncore; 1361 enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port); 1362 bool is_tc_port = intel_phy_is_tc(i915, phy); 1363 i915_reg_t ch_ctl, ch_data[5]; 1364 u32 aux_clock_divider; 1365 enum intel_display_power_domain aux_domain; 1366 intel_wakeref_t aux_wakeref; 1367 intel_wakeref_t pps_wakeref; 1368 int i, ret, recv_bytes; 1369 int try, clock = 0; 1370 u32 status; 1371 bool vdd; 1372 1373 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1374 for (i = 0; i < ARRAY_SIZE(ch_data); i++) 1375 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); 1376 1377 if (is_tc_port) 1378 intel_tc_port_lock(intel_dig_port); 1379 1380 aux_domain = intel_aux_power_domain(intel_dig_port); 1381 1382 aux_wakeref = intel_display_power_get(i915, aux_domain); 1383 pps_wakeref = pps_lock(intel_dp); 1384 1385 /* 1386 * We will be called with VDD already enabled for dpcd/edid/oui reads. 1387 * In such cases we want to leave VDD enabled and it's up to upper layers 1388 * to turn it off. But for eg. i2c-dev access we need to turn it on/off 1389 * ourselves. 1390 */ 1391 vdd = edp_panel_vdd_on(intel_dp); 1392 1393 /* dp aux is extremely sensitive to irq latency, hence request the 1394 * lowest possible wakeup latency and so prevent the cpu from going into 1395 * deep sleep states. 1396 */ 1397 cpu_latency_qos_update_request(&i915->pm_qos, 0); 1398 1399 intel_dp_check_edp(intel_dp); 1400 1401 /* Try to wait for any previous AUX channel activity */ 1402 for (try = 0; try < 3; try++) { 1403 status = intel_uncore_read_notrace(uncore, ch_ctl); 1404 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1405 break; 1406 msleep(1); 1407 } 1408 /* just trace the final value */ 1409 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1410 1411 if (try == 3) { 1412 const u32 status = intel_uncore_read(uncore, ch_ctl); 1413 1414 if (status != intel_dp->aux_busy_last_status) { 1415 drm_WARN(&i915->drm, 1, 1416 "%s: not started (status 0x%08x)\n", 1417 intel_dp->aux.name, status); 1418 intel_dp->aux_busy_last_status = status; 1419 } 1420 1421 ret = -EBUSY; 1422 goto out; 1423 } 1424 1425 /* Only 5 data registers! */ 1426 if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) { 1427 ret = -E2BIG; 1428 goto out; 1429 } 1430 1431 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 1432 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 1433 send_bytes, 1434 aux_clock_divider); 1435 1436 send_ctl |= aux_send_ctl_flags; 1437 1438 /* Must try at least 3 times according to DP spec */ 1439 for (try = 0; try < 5; try++) { 1440 /* Load the send data into the aux channel data registers */ 1441 for (i = 0; i < send_bytes; i += 4) 1442 intel_uncore_write(uncore, 1443 ch_data[i >> 2], 1444 intel_dp_pack_aux(send + i, 1445 send_bytes - i)); 1446 1447 /* Send the command and wait for it to complete */ 1448 intel_uncore_write(uncore, ch_ctl, send_ctl); 1449 1450 status = intel_dp_aux_wait_done(intel_dp); 1451 1452 /* Clear done status and any errors */ 1453 intel_uncore_write(uncore, 1454 ch_ctl, 1455 status | 1456 DP_AUX_CH_CTL_DONE | 1457 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1458 DP_AUX_CH_CTL_RECEIVE_ERROR); 1459 1460 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 1461 * 400us delay required for errors and timeouts 1462 * Timeout errors from the HW already meet this 1463 * requirement so skip to next iteration 1464 */ 1465 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 1466 continue; 1467 1468 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1469 usleep_range(400, 500); 1470 continue; 1471 } 1472 if (status & DP_AUX_CH_CTL_DONE) 1473 goto done; 1474 } 1475 } 1476 1477 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 1478 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n", 1479 intel_dp->aux.name, status); 1480 ret = -EBUSY; 1481 goto out; 1482 } 1483 1484 done: 1485 /* Check for timeout or receive error. 1486 * Timeouts occur when the sink is not connected 1487 */ 1488 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1489 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n", 1490 intel_dp->aux.name, status); 1491 ret = -EIO; 1492 goto out; 1493 } 1494 1495 /* Timeouts occur when the device isn't connected, so they're 1496 * "normal" -- don't fill the kernel log with these */ 1497 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 1498 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n", 1499 intel_dp->aux.name, status); 1500 ret = -ETIMEDOUT; 1501 goto out; 1502 } 1503 1504 /* Unload any bytes sent back from the other side */ 1505 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 1506 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 1507 1508 /* 1509 * By BSpec: "Message sizes of 0 or >20 are not allowed." 1510 * We have no idea of what happened so we return -EBUSY so 1511 * drm layer takes care for the necessary retries. 1512 */ 1513 if (recv_bytes == 0 || recv_bytes > 20) { 1514 drm_dbg_kms(&i915->drm, 1515 "%s: Forbidden recv_bytes = %d on aux transaction\n", 1516 intel_dp->aux.name, recv_bytes); 1517 ret = -EBUSY; 1518 goto out; 1519 } 1520 1521 if (recv_bytes > recv_size) 1522 recv_bytes = recv_size; 1523 1524 for (i = 0; i < recv_bytes; i += 4) 1525 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]), 1526 recv + i, recv_bytes - i); 1527 1528 ret = recv_bytes; 1529 out: 1530 cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE); 1531 1532 if (vdd) 1533 edp_panel_vdd_off(intel_dp, false); 1534 1535 pps_unlock(intel_dp, pps_wakeref); 1536 intel_display_power_put_async(i915, aux_domain, aux_wakeref); 1537 1538 if (is_tc_port) 1539 intel_tc_port_unlock(intel_dig_port); 1540 1541 return ret; 1542 } 1543 1544 #define BARE_ADDRESS_SIZE 3 1545 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) 1546 1547 static void 1548 intel_dp_aux_header(u8 txbuf[HEADER_SIZE], 1549 const struct drm_dp_aux_msg *msg) 1550 { 1551 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf); 1552 txbuf[1] = (msg->address >> 8) & 0xff; 1553 txbuf[2] = msg->address & 0xff; 1554 txbuf[3] = msg->size - 1; 1555 } 1556 1557 static ssize_t 1558 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 1559 { 1560 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); 1561 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1562 u8 txbuf[20], rxbuf[20]; 1563 size_t txsize, rxsize; 1564 int ret; 1565 1566 intel_dp_aux_header(txbuf, msg); 1567 1568 switch (msg->request & ~DP_AUX_I2C_MOT) { 1569 case DP_AUX_NATIVE_WRITE: 1570 case DP_AUX_I2C_WRITE: 1571 case DP_AUX_I2C_WRITE_STATUS_UPDATE: 1572 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 1573 rxsize = 2; /* 0 or 1 data bytes */ 1574 1575 if (drm_WARN_ON(&i915->drm, txsize > 20)) 1576 return -E2BIG; 1577 1578 drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size); 1579 1580 if (msg->buffer) 1581 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); 1582 1583 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1584 rxbuf, rxsize, 0); 1585 if (ret > 0) { 1586 msg->reply = rxbuf[0] >> 4; 1587 1588 if (ret > 1) { 1589 /* Number of bytes written in a short write. */ 1590 ret = clamp_t(int, rxbuf[1], 0, msg->size); 1591 } else { 1592 /* Return payload size. */ 1593 ret = msg->size; 1594 } 1595 } 1596 break; 1597 1598 case DP_AUX_NATIVE_READ: 1599 case DP_AUX_I2C_READ: 1600 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; 1601 rxsize = msg->size + 1; 1602 1603 if (drm_WARN_ON(&i915->drm, rxsize > 20)) 1604 return -E2BIG; 1605 1606 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1607 rxbuf, rxsize, 0); 1608 if (ret > 0) { 1609 msg->reply = rxbuf[0] >> 4; 1610 /* 1611 * Assume happy day, and copy the data. The caller is 1612 * expected to check msg->reply before touching it. 1613 * 1614 * Return payload size. 1615 */ 1616 ret--; 1617 memcpy(msg->buffer, rxbuf + 1, ret); 1618 } 1619 break; 1620 1621 default: 1622 ret = -EINVAL; 1623 break; 1624 } 1625 1626 return ret; 1627 } 1628 1629 1630 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 1631 { 1632 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1633 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1634 enum aux_ch aux_ch = dig_port->aux_ch; 1635 1636 switch (aux_ch) { 1637 case AUX_CH_B: 1638 case AUX_CH_C: 1639 case AUX_CH_D: 1640 return DP_AUX_CH_CTL(aux_ch); 1641 default: 1642 MISSING_CASE(aux_ch); 1643 return DP_AUX_CH_CTL(AUX_CH_B); 1644 } 1645 } 1646 1647 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 1648 { 1649 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1650 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1651 enum aux_ch aux_ch = dig_port->aux_ch; 1652 1653 switch (aux_ch) { 1654 case AUX_CH_B: 1655 case AUX_CH_C: 1656 case AUX_CH_D: 1657 return DP_AUX_CH_DATA(aux_ch, index); 1658 default: 1659 MISSING_CASE(aux_ch); 1660 return DP_AUX_CH_DATA(AUX_CH_B, index); 1661 } 1662 } 1663 1664 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 1665 { 1666 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1667 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1668 enum aux_ch aux_ch = dig_port->aux_ch; 1669 1670 switch (aux_ch) { 1671 case AUX_CH_A: 1672 return DP_AUX_CH_CTL(aux_ch); 1673 case AUX_CH_B: 1674 case AUX_CH_C: 1675 case AUX_CH_D: 1676 return PCH_DP_AUX_CH_CTL(aux_ch); 1677 default: 1678 MISSING_CASE(aux_ch); 1679 return DP_AUX_CH_CTL(AUX_CH_A); 1680 } 1681 } 1682 1683 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 1684 { 1685 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1686 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1687 enum aux_ch aux_ch = dig_port->aux_ch; 1688 1689 switch (aux_ch) { 1690 case AUX_CH_A: 1691 return DP_AUX_CH_DATA(aux_ch, index); 1692 case AUX_CH_B: 1693 case AUX_CH_C: 1694 case AUX_CH_D: 1695 return PCH_DP_AUX_CH_DATA(aux_ch, index); 1696 default: 1697 MISSING_CASE(aux_ch); 1698 return DP_AUX_CH_DATA(AUX_CH_A, index); 1699 } 1700 } 1701 1702 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 1703 { 1704 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1705 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1706 enum aux_ch aux_ch = dig_port->aux_ch; 1707 1708 switch (aux_ch) { 1709 case AUX_CH_A: 1710 case AUX_CH_B: 1711 case AUX_CH_C: 1712 case AUX_CH_D: 1713 case AUX_CH_E: 1714 case AUX_CH_F: 1715 case AUX_CH_G: 1716 return DP_AUX_CH_CTL(aux_ch); 1717 default: 1718 MISSING_CASE(aux_ch); 1719 return DP_AUX_CH_CTL(AUX_CH_A); 1720 } 1721 } 1722 1723 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 1724 { 1725 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1726 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1727 enum aux_ch aux_ch = dig_port->aux_ch; 1728 1729 switch (aux_ch) { 1730 case AUX_CH_A: 1731 case AUX_CH_B: 1732 case AUX_CH_C: 1733 case AUX_CH_D: 1734 case AUX_CH_E: 1735 case AUX_CH_F: 1736 case AUX_CH_G: 1737 return DP_AUX_CH_DATA(aux_ch, index); 1738 default: 1739 MISSING_CASE(aux_ch); 1740 return DP_AUX_CH_DATA(AUX_CH_A, index); 1741 } 1742 } 1743 1744 static void 1745 intel_dp_aux_fini(struct intel_dp *intel_dp) 1746 { 1747 kfree(intel_dp->aux.name); 1748 } 1749 1750 static void 1751 intel_dp_aux_init(struct intel_dp *intel_dp) 1752 { 1753 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1754 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1755 struct intel_encoder *encoder = &dig_port->base; 1756 1757 if (INTEL_GEN(dev_priv) >= 9) { 1758 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 1759 intel_dp->aux_ch_data_reg = skl_aux_data_reg; 1760 } else if (HAS_PCH_SPLIT(dev_priv)) { 1761 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; 1762 intel_dp->aux_ch_data_reg = ilk_aux_data_reg; 1763 } else { 1764 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; 1765 intel_dp->aux_ch_data_reg = g4x_aux_data_reg; 1766 } 1767 1768 if (INTEL_GEN(dev_priv) >= 9) 1769 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 1770 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 1771 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 1772 else if (HAS_PCH_SPLIT(dev_priv)) 1773 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 1774 else 1775 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 1776 1777 if (INTEL_GEN(dev_priv) >= 9) 1778 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 1779 else 1780 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 1781 1782 drm_dp_aux_init(&intel_dp->aux); 1783 1784 /* Failure to allocate our preferred name is not critical */ 1785 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/port %c", 1786 aux_ch_name(dig_port->aux_ch), 1787 port_name(encoder->port)); 1788 intel_dp->aux.transfer = intel_dp_aux_transfer; 1789 } 1790 1791 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) 1792 { 1793 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1794 1795 return max_rate >= 540000; 1796 } 1797 1798 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp) 1799 { 1800 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1801 1802 return max_rate >= 810000; 1803 } 1804 1805 static void 1806 intel_dp_set_clock(struct intel_encoder *encoder, 1807 struct intel_crtc_state *pipe_config) 1808 { 1809 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1810 const struct dp_link_dpll *divisor = NULL; 1811 int i, count = 0; 1812 1813 if (IS_G4X(dev_priv)) { 1814 divisor = g4x_dpll; 1815 count = ARRAY_SIZE(g4x_dpll); 1816 } else if (HAS_PCH_SPLIT(dev_priv)) { 1817 divisor = pch_dpll; 1818 count = ARRAY_SIZE(pch_dpll); 1819 } else if (IS_CHERRYVIEW(dev_priv)) { 1820 divisor = chv_dpll; 1821 count = ARRAY_SIZE(chv_dpll); 1822 } else if (IS_VALLEYVIEW(dev_priv)) { 1823 divisor = vlv_dpll; 1824 count = ARRAY_SIZE(vlv_dpll); 1825 } 1826 1827 if (divisor && count) { 1828 for (i = 0; i < count; i++) { 1829 if (pipe_config->port_clock == divisor[i].clock) { 1830 pipe_config->dpll = divisor[i].dpll; 1831 pipe_config->clock_set = true; 1832 break; 1833 } 1834 } 1835 } 1836 } 1837 1838 static void snprintf_int_array(char *str, size_t len, 1839 const int *array, int nelem) 1840 { 1841 int i; 1842 1843 str[0] = '\0'; 1844 1845 for (i = 0; i < nelem; i++) { 1846 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 1847 if (r >= len) 1848 return; 1849 str += r; 1850 len -= r; 1851 } 1852 } 1853 1854 static void intel_dp_print_rates(struct intel_dp *intel_dp) 1855 { 1856 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1857 char str[128]; /* FIXME: too big for stack? */ 1858 1859 if (!drm_debug_enabled(DRM_UT_KMS)) 1860 return; 1861 1862 snprintf_int_array(str, sizeof(str), 1863 intel_dp->source_rates, intel_dp->num_source_rates); 1864 drm_dbg_kms(&i915->drm, "source rates: %s\n", str); 1865 1866 snprintf_int_array(str, sizeof(str), 1867 intel_dp->sink_rates, intel_dp->num_sink_rates); 1868 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); 1869 1870 snprintf_int_array(str, sizeof(str), 1871 intel_dp->common_rates, intel_dp->num_common_rates); 1872 drm_dbg_kms(&i915->drm, "common rates: %s\n", str); 1873 } 1874 1875 int 1876 intel_dp_max_link_rate(struct intel_dp *intel_dp) 1877 { 1878 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1879 int len; 1880 1881 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 1882 if (drm_WARN_ON(&i915->drm, len <= 0)) 1883 return 162000; 1884 1885 return intel_dp->common_rates[len - 1]; 1886 } 1887 1888 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 1889 { 1890 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1891 int i = intel_dp_rate_index(intel_dp->sink_rates, 1892 intel_dp->num_sink_rates, rate); 1893 1894 if (drm_WARN_ON(&i915->drm, i < 0)) 1895 i = 0; 1896 1897 return i; 1898 } 1899 1900 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1901 u8 *link_bw, u8 *rate_select) 1902 { 1903 /* eDP 1.4 rate select method. */ 1904 if (intel_dp->use_rate_select) { 1905 *link_bw = 0; 1906 *rate_select = 1907 intel_dp_rate_select(intel_dp, port_clock); 1908 } else { 1909 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 1910 *rate_select = 0; 1911 } 1912 } 1913 1914 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 1915 const struct intel_crtc_state *pipe_config) 1916 { 1917 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1918 1919 /* On TGL, FEC is supported on all Pipes */ 1920 if (INTEL_GEN(dev_priv) >= 12) 1921 return true; 1922 1923 if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A) 1924 return true; 1925 1926 return false; 1927 } 1928 1929 static bool intel_dp_supports_fec(struct intel_dp *intel_dp, 1930 const struct intel_crtc_state *pipe_config) 1931 { 1932 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 1933 drm_dp_sink_supports_fec(intel_dp->fec_capable); 1934 } 1935 1936 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, 1937 const struct intel_crtc_state *crtc_state) 1938 { 1939 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1940 1941 if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable) 1942 return false; 1943 1944 return intel_dsc_source_support(encoder, crtc_state) && 1945 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); 1946 } 1947 1948 static int intel_dp_compute_bpp(struct intel_dp *intel_dp, 1949 struct intel_crtc_state *pipe_config) 1950 { 1951 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1952 struct intel_connector *intel_connector = intel_dp->attached_connector; 1953 int bpp, bpc; 1954 1955 bpp = pipe_config->pipe_bpp; 1956 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports); 1957 1958 if (bpc > 0) 1959 bpp = min(bpp, 3*bpc); 1960 1961 if (intel_dp_is_edp(intel_dp)) { 1962 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1963 if (intel_connector->base.display_info.bpc == 0 && 1964 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { 1965 drm_dbg_kms(&dev_priv->drm, 1966 "clamping bpp for eDP panel to BIOS-provided %i\n", 1967 dev_priv->vbt.edp.bpp); 1968 bpp = dev_priv->vbt.edp.bpp; 1969 } 1970 } 1971 1972 return bpp; 1973 } 1974 1975 /* Adjust link config limits based on compliance test requests. */ 1976 void 1977 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 1978 struct intel_crtc_state *pipe_config, 1979 struct link_config_limits *limits) 1980 { 1981 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1982 1983 /* For DP Compliance we override the computed bpp for the pipe */ 1984 if (intel_dp->compliance.test_data.bpc != 0) { 1985 int bpp = 3 * intel_dp->compliance.test_data.bpc; 1986 1987 limits->min_bpp = limits->max_bpp = bpp; 1988 pipe_config->dither_force_disable = bpp == 6 * 3; 1989 1990 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); 1991 } 1992 1993 /* Use values requested by Compliance Test Request */ 1994 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 1995 int index; 1996 1997 /* Validate the compliance test data since max values 1998 * might have changed due to link train fallback. 1999 */ 2000 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 2001 intel_dp->compliance.test_lane_count)) { 2002 index = intel_dp_rate_index(intel_dp->common_rates, 2003 intel_dp->num_common_rates, 2004 intel_dp->compliance.test_link_rate); 2005 if (index >= 0) 2006 limits->min_clock = limits->max_clock = index; 2007 limits->min_lane_count = limits->max_lane_count = 2008 intel_dp->compliance.test_lane_count; 2009 } 2010 } 2011 } 2012 2013 static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp) 2014 { 2015 /* 2016 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 2017 * format of the number of bytes per pixel will be half the number 2018 * of bytes of RGB pixel. 2019 */ 2020 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2021 bpp /= 2; 2022 2023 return bpp; 2024 } 2025 2026 /* Optimize link config in order: max bpp, min clock, min lanes */ 2027 static int 2028 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 2029 struct intel_crtc_state *pipe_config, 2030 const struct link_config_limits *limits) 2031 { 2032 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2033 int bpp, clock, lane_count; 2034 int mode_rate, link_clock, link_avail; 2035 2036 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 2037 int output_bpp = intel_dp_output_bpp(pipe_config, bpp); 2038 2039 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 2040 output_bpp); 2041 2042 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { 2043 for (lane_count = limits->min_lane_count; 2044 lane_count <= limits->max_lane_count; 2045 lane_count <<= 1) { 2046 link_clock = intel_dp->common_rates[clock]; 2047 link_avail = intel_dp_max_data_rate(link_clock, 2048 lane_count); 2049 2050 if (mode_rate <= link_avail) { 2051 pipe_config->lane_count = lane_count; 2052 pipe_config->pipe_bpp = bpp; 2053 pipe_config->port_clock = link_clock; 2054 2055 return 0; 2056 } 2057 } 2058 } 2059 } 2060 2061 return -EINVAL; 2062 } 2063 2064 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) 2065 { 2066 int i, num_bpc; 2067 u8 dsc_bpc[3] = {0}; 2068 2069 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, 2070 dsc_bpc); 2071 for (i = 0; i < num_bpc; i++) { 2072 if (dsc_max_bpc >= dsc_bpc[i]) 2073 return dsc_bpc[i] * 3; 2074 } 2075 2076 return 0; 2077 } 2078 2079 #define DSC_SUPPORTED_VERSION_MIN 1 2080 2081 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, 2082 struct intel_crtc_state *crtc_state) 2083 { 2084 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2085 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2086 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 2087 u8 line_buf_depth; 2088 int ret; 2089 2090 ret = intel_dsc_compute_params(encoder, crtc_state); 2091 if (ret) 2092 return ret; 2093 2094 /* 2095 * Slice Height of 8 works for all currently available panels. So start 2096 * with that if pic_height is an integral multiple of 8. Eventually add 2097 * logic to try multiple slice heights. 2098 */ 2099 if (vdsc_cfg->pic_height % 8 == 0) 2100 vdsc_cfg->slice_height = 8; 2101 else if (vdsc_cfg->pic_height % 4 == 0) 2102 vdsc_cfg->slice_height = 4; 2103 else 2104 vdsc_cfg->slice_height = 2; 2105 2106 vdsc_cfg->dsc_version_major = 2107 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2108 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 2109 vdsc_cfg->dsc_version_minor = 2110 min(DSC_SUPPORTED_VERSION_MIN, 2111 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2112 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); 2113 2114 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 2115 DP_DSC_RGB; 2116 2117 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); 2118 if (!line_buf_depth) { 2119 drm_dbg_kms(&i915->drm, 2120 "DSC Sink Line Buffer Depth invalid\n"); 2121 return -EINVAL; 2122 } 2123 2124 if (vdsc_cfg->dsc_version_minor == 2) 2125 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? 2126 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; 2127 else 2128 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? 2129 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 2130 2131 vdsc_cfg->block_pred_enable = 2132 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 2133 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 2134 2135 return drm_dsc_compute_rc_parameters(vdsc_cfg); 2136 } 2137 2138 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 2139 struct intel_crtc_state *pipe_config, 2140 struct drm_connector_state *conn_state, 2141 struct link_config_limits *limits) 2142 { 2143 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2144 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 2145 const struct drm_display_mode *adjusted_mode = 2146 &pipe_config->hw.adjusted_mode; 2147 u8 dsc_max_bpc; 2148 int pipe_bpp; 2149 int ret; 2150 2151 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && 2152 intel_dp_supports_fec(intel_dp, pipe_config); 2153 2154 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 2155 return -EINVAL; 2156 2157 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 2158 if (INTEL_GEN(dev_priv) >= 12) 2159 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc); 2160 else 2161 dsc_max_bpc = min_t(u8, 10, 2162 conn_state->max_requested_bpc); 2163 2164 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); 2165 2166 /* Min Input BPC for ICL+ is 8 */ 2167 if (pipe_bpp < 8 * 3) { 2168 drm_dbg_kms(&dev_priv->drm, 2169 "No DSC support for less than 8bpc\n"); 2170 return -EINVAL; 2171 } 2172 2173 /* 2174 * For now enable DSC for max bpp, max link rate, max lane count. 2175 * Optimize this later for the minimum possible link rate/lane count 2176 * with DSC enabled for the requested mode. 2177 */ 2178 pipe_config->pipe_bpp = pipe_bpp; 2179 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock]; 2180 pipe_config->lane_count = limits->max_lane_count; 2181 2182 if (intel_dp_is_edp(intel_dp)) { 2183 pipe_config->dsc.compressed_bpp = 2184 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, 2185 pipe_config->pipe_bpp); 2186 pipe_config->dsc.slice_count = 2187 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 2188 true); 2189 } else { 2190 u16 dsc_max_output_bpp; 2191 u8 dsc_dp_slice_count; 2192 2193 dsc_max_output_bpp = 2194 intel_dp_dsc_get_output_bpp(dev_priv, 2195 pipe_config->port_clock, 2196 pipe_config->lane_count, 2197 adjusted_mode->crtc_clock, 2198 adjusted_mode->crtc_hdisplay); 2199 dsc_dp_slice_count = 2200 intel_dp_dsc_get_slice_count(intel_dp, 2201 adjusted_mode->crtc_clock, 2202 adjusted_mode->crtc_hdisplay); 2203 if (!dsc_max_output_bpp || !dsc_dp_slice_count) { 2204 drm_dbg_kms(&dev_priv->drm, 2205 "Compressed BPP/Slice Count not supported\n"); 2206 return -EINVAL; 2207 } 2208 pipe_config->dsc.compressed_bpp = min_t(u16, 2209 dsc_max_output_bpp >> 4, 2210 pipe_config->pipe_bpp); 2211 pipe_config->dsc.slice_count = dsc_dp_slice_count; 2212 } 2213 /* 2214 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 2215 * is greater than the maximum Cdclock and if slice count is even 2216 * then we need to use 2 VDSC instances. 2217 */ 2218 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) { 2219 if (pipe_config->dsc.slice_count > 1) { 2220 pipe_config->dsc.dsc_split = true; 2221 } else { 2222 drm_dbg_kms(&dev_priv->drm, 2223 "Cannot split stream to use 2 VDSC instances\n"); 2224 return -EINVAL; 2225 } 2226 } 2227 2228 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); 2229 if (ret < 0) { 2230 drm_dbg_kms(&dev_priv->drm, 2231 "Cannot compute valid DSC parameters for Input Bpp = %d " 2232 "Compressed BPP = %d\n", 2233 pipe_config->pipe_bpp, 2234 pipe_config->dsc.compressed_bpp); 2235 return ret; 2236 } 2237 2238 pipe_config->dsc.compression_enable = true; 2239 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 2240 "Compressed Bpp = %d Slice Count = %d\n", 2241 pipe_config->pipe_bpp, 2242 pipe_config->dsc.compressed_bpp, 2243 pipe_config->dsc.slice_count); 2244 2245 return 0; 2246 } 2247 2248 int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state) 2249 { 2250 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) 2251 return 6 * 3; 2252 else 2253 return 8 * 3; 2254 } 2255 2256 static int 2257 intel_dp_compute_link_config(struct intel_encoder *encoder, 2258 struct intel_crtc_state *pipe_config, 2259 struct drm_connector_state *conn_state) 2260 { 2261 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2262 const struct drm_display_mode *adjusted_mode = 2263 &pipe_config->hw.adjusted_mode; 2264 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2265 struct link_config_limits limits; 2266 int common_len; 2267 int ret; 2268 2269 common_len = intel_dp_common_len_rate_limit(intel_dp, 2270 intel_dp->max_link_rate); 2271 2272 /* No common link rates between source and sink */ 2273 drm_WARN_ON(encoder->base.dev, common_len <= 0); 2274 2275 limits.min_clock = 0; 2276 limits.max_clock = common_len - 1; 2277 2278 limits.min_lane_count = 1; 2279 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); 2280 2281 limits.min_bpp = intel_dp_min_bpp(pipe_config); 2282 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config); 2283 2284 if (intel_dp_is_edp(intel_dp)) { 2285 /* 2286 * Use the maximum clock and number of lanes the eDP panel 2287 * advertizes being capable of. The panels are generally 2288 * designed to support only a single clock and lane 2289 * configuration, and typically these values correspond to the 2290 * native resolution of the panel. 2291 */ 2292 limits.min_lane_count = limits.max_lane_count; 2293 limits.min_clock = limits.max_clock; 2294 } 2295 2296 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); 2297 2298 drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i " 2299 "max rate %d max bpp %d pixel clock %iKHz\n", 2300 limits.max_lane_count, 2301 intel_dp->common_rates[limits.max_clock], 2302 limits.max_bpp, adjusted_mode->crtc_clock); 2303 2304 /* 2305 * Optimize for slow and wide. This is the place to add alternative 2306 * optimization policy. 2307 */ 2308 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); 2309 2310 /* enable compression if the mode doesn't fit available BW */ 2311 drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en); 2312 if (ret || intel_dp->force_dsc_en) { 2313 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 2314 conn_state, &limits); 2315 if (ret < 0) 2316 return ret; 2317 } 2318 2319 if (pipe_config->dsc.compression_enable) { 2320 drm_dbg_kms(&i915->drm, 2321 "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", 2322 pipe_config->lane_count, pipe_config->port_clock, 2323 pipe_config->pipe_bpp, 2324 pipe_config->dsc.compressed_bpp); 2325 2326 drm_dbg_kms(&i915->drm, 2327 "DP link rate required %i available %i\n", 2328 intel_dp_link_required(adjusted_mode->crtc_clock, 2329 pipe_config->dsc.compressed_bpp), 2330 intel_dp_max_data_rate(pipe_config->port_clock, 2331 pipe_config->lane_count)); 2332 } else { 2333 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", 2334 pipe_config->lane_count, pipe_config->port_clock, 2335 pipe_config->pipe_bpp); 2336 2337 drm_dbg_kms(&i915->drm, 2338 "DP link rate required %i available %i\n", 2339 intel_dp_link_required(adjusted_mode->crtc_clock, 2340 pipe_config->pipe_bpp), 2341 intel_dp_max_data_rate(pipe_config->port_clock, 2342 pipe_config->lane_count)); 2343 } 2344 return 0; 2345 } 2346 2347 static int 2348 intel_dp_ycbcr420_config(struct intel_dp *intel_dp, 2349 struct intel_crtc_state *crtc_state, 2350 const struct drm_connector_state *conn_state) 2351 { 2352 struct drm_connector *connector = conn_state->connector; 2353 const struct drm_display_info *info = &connector->display_info; 2354 const struct drm_display_mode *adjusted_mode = 2355 &crtc_state->hw.adjusted_mode; 2356 2357 if (!drm_mode_is_420_only(info, adjusted_mode) || 2358 !intel_dp_get_colorimetry_status(intel_dp) || 2359 !connector->ycbcr_420_allowed) 2360 return 0; 2361 2362 crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; 2363 2364 return intel_pch_panel_fitting(crtc_state, conn_state); 2365 } 2366 2367 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 2368 const struct drm_connector_state *conn_state) 2369 { 2370 const struct intel_digital_connector_state *intel_conn_state = 2371 to_intel_digital_connector_state(conn_state); 2372 const struct drm_display_mode *adjusted_mode = 2373 &crtc_state->hw.adjusted_mode; 2374 2375 /* 2376 * Our YCbCr output is always limited range. 2377 * crtc_state->limited_color_range only applies to RGB, 2378 * and it must never be set for YCbCr or we risk setting 2379 * some conflicting bits in PIPECONF which will mess up 2380 * the colors on the monitor. 2381 */ 2382 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 2383 return false; 2384 2385 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 2386 /* 2387 * See: 2388 * CEA-861-E - 5.1 Default Encoding Parameters 2389 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 2390 */ 2391 return crtc_state->pipe_bpp != 18 && 2392 drm_default_rgb_quant_range(adjusted_mode) == 2393 HDMI_QUANTIZATION_RANGE_LIMITED; 2394 } else { 2395 return intel_conn_state->broadcast_rgb == 2396 INTEL_BROADCAST_RGB_LIMITED; 2397 } 2398 } 2399 2400 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 2401 enum port port) 2402 { 2403 if (IS_G4X(dev_priv)) 2404 return false; 2405 if (INTEL_GEN(dev_priv) < 12 && port == PORT_A) 2406 return false; 2407 2408 return true; 2409 } 2410 2411 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, 2412 const struct drm_connector_state *conn_state, 2413 struct drm_dp_vsc_sdp *vsc) 2414 { 2415 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2416 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2417 2418 /* 2419 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2420 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 2421 * Colorimetry Format indication. 2422 */ 2423 vsc->revision = 0x5; 2424 vsc->length = 0x13; 2425 2426 /* DP 1.4a spec, Table 2-120 */ 2427 switch (crtc_state->output_format) { 2428 case INTEL_OUTPUT_FORMAT_YCBCR444: 2429 vsc->pixelformat = DP_PIXELFORMAT_YUV444; 2430 break; 2431 case INTEL_OUTPUT_FORMAT_YCBCR420: 2432 vsc->pixelformat = DP_PIXELFORMAT_YUV420; 2433 break; 2434 case INTEL_OUTPUT_FORMAT_RGB: 2435 default: 2436 vsc->pixelformat = DP_PIXELFORMAT_RGB; 2437 } 2438 2439 switch (conn_state->colorspace) { 2440 case DRM_MODE_COLORIMETRY_BT709_YCC: 2441 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2442 break; 2443 case DRM_MODE_COLORIMETRY_XVYCC_601: 2444 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; 2445 break; 2446 case DRM_MODE_COLORIMETRY_XVYCC_709: 2447 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; 2448 break; 2449 case DRM_MODE_COLORIMETRY_SYCC_601: 2450 vsc->colorimetry = DP_COLORIMETRY_SYCC_601; 2451 break; 2452 case DRM_MODE_COLORIMETRY_OPYCC_601: 2453 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; 2454 break; 2455 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 2456 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; 2457 break; 2458 case DRM_MODE_COLORIMETRY_BT2020_RGB: 2459 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; 2460 break; 2461 case DRM_MODE_COLORIMETRY_BT2020_YCC: 2462 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; 2463 break; 2464 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 2465 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 2466 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; 2467 break; 2468 default: 2469 /* 2470 * RGB->YCBCR color conversion uses the BT.709 2471 * color space. 2472 */ 2473 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2474 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2475 else 2476 vsc->colorimetry = DP_COLORIMETRY_DEFAULT; 2477 break; 2478 } 2479 2480 vsc->bpc = crtc_state->pipe_bpp / 3; 2481 2482 /* only RGB pixelformat supports 6 bpc */ 2483 drm_WARN_ON(&dev_priv->drm, 2484 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); 2485 2486 /* all YCbCr are always limited range */ 2487 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; 2488 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; 2489 } 2490 2491 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, 2492 struct intel_crtc_state *crtc_state, 2493 const struct drm_connector_state *conn_state) 2494 { 2495 struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc; 2496 2497 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */ 2498 if (crtc_state->has_psr) 2499 return; 2500 2501 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) 2502 return; 2503 2504 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 2505 vsc->sdp_type = DP_SDP_VSC; 2506 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2507 &crtc_state->infoframes.vsc); 2508 } 2509 2510 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, 2511 const struct intel_crtc_state *crtc_state, 2512 const struct drm_connector_state *conn_state, 2513 struct drm_dp_vsc_sdp *vsc) 2514 { 2515 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2516 2517 vsc->sdp_type = DP_SDP_VSC; 2518 2519 if (dev_priv->psr.psr2_enabled) { 2520 if (dev_priv->psr.colorimetry_support && 2521 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 2522 /* [PSR2, +Colorimetry] */ 2523 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2524 vsc); 2525 } else { 2526 /* 2527 * [PSR2, -Colorimetry] 2528 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 2529 * 3D stereo + PSR/PSR2 + Y-coordinate. 2530 */ 2531 vsc->revision = 0x4; 2532 vsc->length = 0xe; 2533 } 2534 } else { 2535 /* 2536 * [PSR1] 2537 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2538 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or 2539 * higher). 2540 */ 2541 vsc->revision = 0x2; 2542 vsc->length = 0x8; 2543 } 2544 } 2545 2546 static void 2547 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 2548 struct intel_crtc_state *crtc_state, 2549 const struct drm_connector_state *conn_state) 2550 { 2551 int ret; 2552 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2553 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; 2554 2555 if (!conn_state->hdr_output_metadata) 2556 return; 2557 2558 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); 2559 2560 if (ret) { 2561 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); 2562 return; 2563 } 2564 2565 crtc_state->infoframes.enable |= 2566 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 2567 } 2568 2569 int 2570 intel_dp_compute_config(struct intel_encoder *encoder, 2571 struct intel_crtc_state *pipe_config, 2572 struct drm_connector_state *conn_state) 2573 { 2574 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2575 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2576 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2577 struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); 2578 enum port port = encoder->port; 2579 struct intel_connector *intel_connector = intel_dp->attached_connector; 2580 struct intel_digital_connector_state *intel_conn_state = 2581 to_intel_digital_connector_state(conn_state); 2582 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0, 2583 DP_DPCD_QUIRK_CONSTANT_N); 2584 int ret = 0, output_bpp; 2585 2586 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 2587 pipe_config->has_pch_encoder = true; 2588 2589 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 2590 2591 if (lspcon->active) 2592 lspcon_ycbcr420_config(&intel_connector->base, pipe_config); 2593 else 2594 ret = intel_dp_ycbcr420_config(intel_dp, pipe_config, 2595 conn_state); 2596 if (ret) 2597 return ret; 2598 2599 pipe_config->has_drrs = false; 2600 if (!intel_dp_port_has_audio(dev_priv, port)) 2601 pipe_config->has_audio = false; 2602 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 2603 pipe_config->has_audio = intel_dp->has_audio; 2604 else 2605 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; 2606 2607 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 2608 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 2609 adjusted_mode); 2610 2611 if (HAS_GMCH(dev_priv)) 2612 ret = intel_gmch_panel_fitting(pipe_config, conn_state); 2613 else 2614 ret = intel_pch_panel_fitting(pipe_config, conn_state); 2615 if (ret) 2616 return ret; 2617 } 2618 2619 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 2620 return -EINVAL; 2621 2622 if (HAS_GMCH(dev_priv) && 2623 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 2624 return -EINVAL; 2625 2626 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 2627 return -EINVAL; 2628 2629 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 2630 return -EINVAL; 2631 2632 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); 2633 if (ret < 0) 2634 return ret; 2635 2636 pipe_config->limited_color_range = 2637 intel_dp_limited_color_range(pipe_config, conn_state); 2638 2639 if (pipe_config->dsc.compression_enable) 2640 output_bpp = pipe_config->dsc.compressed_bpp; 2641 else 2642 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp); 2643 2644 intel_link_compute_m_n(output_bpp, 2645 pipe_config->lane_count, 2646 adjusted_mode->crtc_clock, 2647 pipe_config->port_clock, 2648 &pipe_config->dp_m_n, 2649 constant_n, pipe_config->fec_enable); 2650 2651 if (intel_connector->panel.downclock_mode != NULL && 2652 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { 2653 pipe_config->has_drrs = true; 2654 intel_link_compute_m_n(output_bpp, 2655 pipe_config->lane_count, 2656 intel_connector->panel.downclock_mode->clock, 2657 pipe_config->port_clock, 2658 &pipe_config->dp_m2_n2, 2659 constant_n, pipe_config->fec_enable); 2660 } 2661 2662 if (!HAS_DDI(dev_priv)) 2663 intel_dp_set_clock(encoder, pipe_config); 2664 2665 intel_psr_compute_config(intel_dp, pipe_config); 2666 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 2667 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 2668 2669 return 0; 2670 } 2671 2672 void intel_dp_set_link_params(struct intel_dp *intel_dp, 2673 int link_rate, u8 lane_count, 2674 bool link_mst) 2675 { 2676 intel_dp->link_trained = false; 2677 intel_dp->link_rate = link_rate; 2678 intel_dp->lane_count = lane_count; 2679 intel_dp->link_mst = link_mst; 2680 } 2681 2682 static void intel_dp_prepare(struct intel_encoder *encoder, 2683 const struct intel_crtc_state *pipe_config) 2684 { 2685 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2686 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2687 enum port port = encoder->port; 2688 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 2689 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2690 2691 intel_dp_set_link_params(intel_dp, pipe_config->port_clock, 2692 pipe_config->lane_count, 2693 intel_crtc_has_type(pipe_config, 2694 INTEL_OUTPUT_DP_MST)); 2695 2696 /* 2697 * There are four kinds of DP registers: 2698 * 2699 * IBX PCH 2700 * SNB CPU 2701 * IVB CPU 2702 * CPT PCH 2703 * 2704 * IBX PCH and CPU are the same for almost everything, 2705 * except that the CPU DP PLL is configured in this 2706 * register 2707 * 2708 * CPT PCH is quite different, having many bits moved 2709 * to the TRANS_DP_CTL register instead. That 2710 * configuration happens (oddly) in ilk_pch_enable 2711 */ 2712 2713 /* Preserve the BIOS-computed detected bit. This is 2714 * supposed to be read-only. 2715 */ 2716 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 2717 2718 /* Handle DP bits in common between all three register formats */ 2719 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 2720 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count); 2721 2722 /* Split out the IBX/CPU vs CPT settings */ 2723 2724 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 2725 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2726 intel_dp->DP |= DP_SYNC_HS_HIGH; 2727 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2728 intel_dp->DP |= DP_SYNC_VS_HIGH; 2729 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2730 2731 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2732 intel_dp->DP |= DP_ENHANCED_FRAMING; 2733 2734 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe); 2735 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 2736 u32 trans_dp; 2737 2738 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2739 2740 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe)); 2741 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2742 trans_dp |= TRANS_DP_ENH_FRAMING; 2743 else 2744 trans_dp &= ~TRANS_DP_ENH_FRAMING; 2745 intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp); 2746 } else { 2747 if (IS_G4X(dev_priv) && pipe_config->limited_color_range) 2748 intel_dp->DP |= DP_COLOR_RANGE_16_235; 2749 2750 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2751 intel_dp->DP |= DP_SYNC_HS_HIGH; 2752 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2753 intel_dp->DP |= DP_SYNC_VS_HIGH; 2754 intel_dp->DP |= DP_LINK_TRAIN_OFF; 2755 2756 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2757 intel_dp->DP |= DP_ENHANCED_FRAMING; 2758 2759 if (IS_CHERRYVIEW(dev_priv)) 2760 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe); 2761 else 2762 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe); 2763 } 2764 } 2765 2766 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 2767 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 2768 2769 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) 2770 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) 2771 2772 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 2773 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 2774 2775 static void intel_pps_verify_state(struct intel_dp *intel_dp); 2776 2777 static void wait_panel_status(struct intel_dp *intel_dp, 2778 u32 mask, 2779 u32 value) 2780 { 2781 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2782 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2783 2784 lockdep_assert_held(&dev_priv->pps_mutex); 2785 2786 intel_pps_verify_state(intel_dp); 2787 2788 pp_stat_reg = _pp_stat_reg(intel_dp); 2789 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2790 2791 drm_dbg_kms(&dev_priv->drm, 2792 "mask %08x value %08x status %08x control %08x\n", 2793 mask, value, 2794 intel_de_read(dev_priv, pp_stat_reg), 2795 intel_de_read(dev_priv, pp_ctrl_reg)); 2796 2797 if (intel_de_wait_for_register(dev_priv, pp_stat_reg, 2798 mask, value, 5000)) 2799 drm_err(&dev_priv->drm, 2800 "Panel status timeout: status %08x control %08x\n", 2801 intel_de_read(dev_priv, pp_stat_reg), 2802 intel_de_read(dev_priv, pp_ctrl_reg)); 2803 2804 drm_dbg_kms(&dev_priv->drm, "Wait complete\n"); 2805 } 2806 2807 static void wait_panel_on(struct intel_dp *intel_dp) 2808 { 2809 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2810 2811 drm_dbg_kms(&i915->drm, "Wait for panel power on\n"); 2812 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 2813 } 2814 2815 static void wait_panel_off(struct intel_dp *intel_dp) 2816 { 2817 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2818 2819 drm_dbg_kms(&i915->drm, "Wait for panel power off time\n"); 2820 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 2821 } 2822 2823 static void wait_panel_power_cycle(struct intel_dp *intel_dp) 2824 { 2825 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2826 ktime_t panel_power_on_time; 2827 s64 panel_power_off_duration; 2828 2829 drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n"); 2830 2831 /* take the difference of currrent time and panel power off time 2832 * and then make panel wait for t11_t12 if needed. */ 2833 panel_power_on_time = ktime_get_boottime(); 2834 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time); 2835 2836 /* When we disable the VDD override bit last we have to do the manual 2837 * wait. */ 2838 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay) 2839 wait_remaining_ms_from_jiffies(jiffies, 2840 intel_dp->panel_power_cycle_delay - panel_power_off_duration); 2841 2842 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 2843 } 2844 2845 static void wait_backlight_on(struct intel_dp *intel_dp) 2846 { 2847 wait_remaining_ms_from_jiffies(intel_dp->last_power_on, 2848 intel_dp->backlight_on_delay); 2849 } 2850 2851 static void edp_wait_backlight_off(struct intel_dp *intel_dp) 2852 { 2853 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off, 2854 intel_dp->backlight_off_delay); 2855 } 2856 2857 /* Read the current pp_control value, unlocking the register if it 2858 * is locked 2859 */ 2860 2861 static u32 ilk_get_pp_control(struct intel_dp *intel_dp) 2862 { 2863 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2864 u32 control; 2865 2866 lockdep_assert_held(&dev_priv->pps_mutex); 2867 2868 control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)); 2869 if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) && 2870 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { 2871 control &= ~PANEL_UNLOCK_MASK; 2872 control |= PANEL_UNLOCK_REGS; 2873 } 2874 return control; 2875 } 2876 2877 /* 2878 * Must be paired with edp_panel_vdd_off(). 2879 * Must hold pps_mutex around the whole on/off sequence. 2880 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 2881 */ 2882 static bool edp_panel_vdd_on(struct intel_dp *intel_dp) 2883 { 2884 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2885 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2886 u32 pp; 2887 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2888 bool need_to_disable = !intel_dp->want_panel_vdd; 2889 2890 lockdep_assert_held(&dev_priv->pps_mutex); 2891 2892 if (!intel_dp_is_edp(intel_dp)) 2893 return false; 2894 2895 cancel_delayed_work(&intel_dp->panel_vdd_work); 2896 intel_dp->want_panel_vdd = true; 2897 2898 if (edp_have_panel_vdd(intel_dp)) 2899 return need_to_disable; 2900 2901 intel_display_power_get(dev_priv, 2902 intel_aux_power_domain(intel_dig_port)); 2903 2904 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n", 2905 intel_dig_port->base.base.base.id, 2906 intel_dig_port->base.base.name); 2907 2908 if (!edp_have_panel_power(intel_dp)) 2909 wait_panel_power_cycle(intel_dp); 2910 2911 pp = ilk_get_pp_control(intel_dp); 2912 pp |= EDP_FORCE_VDD; 2913 2914 pp_stat_reg = _pp_stat_reg(intel_dp); 2915 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2916 2917 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2918 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2919 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 2920 intel_de_read(dev_priv, pp_stat_reg), 2921 intel_de_read(dev_priv, pp_ctrl_reg)); 2922 /* 2923 * If the panel wasn't on, delay before accessing aux channel 2924 */ 2925 if (!edp_have_panel_power(intel_dp)) { 2926 drm_dbg_kms(&dev_priv->drm, 2927 "[ENCODER:%d:%s] panel power wasn't enabled\n", 2928 intel_dig_port->base.base.base.id, 2929 intel_dig_port->base.base.name); 2930 msleep(intel_dp->panel_power_up_delay); 2931 } 2932 2933 return need_to_disable; 2934 } 2935 2936 /* 2937 * Must be paired with intel_edp_panel_vdd_off() or 2938 * intel_edp_panel_off(). 2939 * Nested calls to these functions are not allowed since 2940 * we drop the lock. Caller must use some higher level 2941 * locking to prevent nested calls from other threads. 2942 */ 2943 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) 2944 { 2945 intel_wakeref_t wakeref; 2946 bool vdd; 2947 2948 if (!intel_dp_is_edp(intel_dp)) 2949 return; 2950 2951 vdd = false; 2952 with_pps_lock(intel_dp, wakeref) 2953 vdd = edp_panel_vdd_on(intel_dp); 2954 I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n", 2955 dp_to_dig_port(intel_dp)->base.base.base.id, 2956 dp_to_dig_port(intel_dp)->base.base.name); 2957 } 2958 2959 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) 2960 { 2961 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2962 struct intel_digital_port *intel_dig_port = 2963 dp_to_dig_port(intel_dp); 2964 u32 pp; 2965 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2966 2967 lockdep_assert_held(&dev_priv->pps_mutex); 2968 2969 drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd); 2970 2971 if (!edp_have_panel_vdd(intel_dp)) 2972 return; 2973 2974 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n", 2975 intel_dig_port->base.base.base.id, 2976 intel_dig_port->base.base.name); 2977 2978 pp = ilk_get_pp_control(intel_dp); 2979 pp &= ~EDP_FORCE_VDD; 2980 2981 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2982 pp_stat_reg = _pp_stat_reg(intel_dp); 2983 2984 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2985 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2986 2987 /* Make sure sequencer is idle before allowing subsequent activity */ 2988 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 2989 intel_de_read(dev_priv, pp_stat_reg), 2990 intel_de_read(dev_priv, pp_ctrl_reg)); 2991 2992 if ((pp & PANEL_POWER_ON) == 0) 2993 intel_dp->panel_power_off_time = ktime_get_boottime(); 2994 2995 intel_display_power_put_unchecked(dev_priv, 2996 intel_aux_power_domain(intel_dig_port)); 2997 } 2998 2999 static void edp_panel_vdd_work(struct work_struct *__work) 3000 { 3001 struct intel_dp *intel_dp = 3002 container_of(to_delayed_work(__work), 3003 struct intel_dp, panel_vdd_work); 3004 intel_wakeref_t wakeref; 3005 3006 with_pps_lock(intel_dp, wakeref) { 3007 if (!intel_dp->want_panel_vdd) 3008 edp_panel_vdd_off_sync(intel_dp); 3009 } 3010 } 3011 3012 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) 3013 { 3014 unsigned long delay; 3015 3016 /* 3017 * Queue the timer to fire a long time from now (relative to the power 3018 * down delay) to keep the panel power up across a sequence of 3019 * operations. 3020 */ 3021 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5); 3022 schedule_delayed_work(&intel_dp->panel_vdd_work, delay); 3023 } 3024 3025 /* 3026 * Must be paired with edp_panel_vdd_on(). 3027 * Must hold pps_mutex around the whole on/off sequence. 3028 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 3029 */ 3030 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 3031 { 3032 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3033 3034 lockdep_assert_held(&dev_priv->pps_mutex); 3035 3036 if (!intel_dp_is_edp(intel_dp)) 3037 return; 3038 3039 I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on", 3040 dp_to_dig_port(intel_dp)->base.base.base.id, 3041 dp_to_dig_port(intel_dp)->base.base.name); 3042 3043 intel_dp->want_panel_vdd = false; 3044 3045 if (sync) 3046 edp_panel_vdd_off_sync(intel_dp); 3047 else 3048 edp_panel_vdd_schedule_off(intel_dp); 3049 } 3050 3051 static void edp_panel_on(struct intel_dp *intel_dp) 3052 { 3053 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3054 u32 pp; 3055 i915_reg_t pp_ctrl_reg; 3056 3057 lockdep_assert_held(&dev_priv->pps_mutex); 3058 3059 if (!intel_dp_is_edp(intel_dp)) 3060 return; 3061 3062 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n", 3063 dp_to_dig_port(intel_dp)->base.base.base.id, 3064 dp_to_dig_port(intel_dp)->base.base.name); 3065 3066 if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp), 3067 "[ENCODER:%d:%s] panel power already on\n", 3068 dp_to_dig_port(intel_dp)->base.base.base.id, 3069 dp_to_dig_port(intel_dp)->base.base.name)) 3070 return; 3071 3072 wait_panel_power_cycle(intel_dp); 3073 3074 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3075 pp = ilk_get_pp_control(intel_dp); 3076 if (IS_GEN(dev_priv, 5)) { 3077 /* ILK workaround: disable reset around power sequence */ 3078 pp &= ~PANEL_POWER_RESET; 3079 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3080 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3081 } 3082 3083 pp |= PANEL_POWER_ON; 3084 if (!IS_GEN(dev_priv, 5)) 3085 pp |= PANEL_POWER_RESET; 3086 3087 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3088 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3089 3090 wait_panel_on(intel_dp); 3091 intel_dp->last_power_on = jiffies; 3092 3093 if (IS_GEN(dev_priv, 5)) { 3094 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 3095 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3096 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3097 } 3098 } 3099 3100 void intel_edp_panel_on(struct intel_dp *intel_dp) 3101 { 3102 intel_wakeref_t wakeref; 3103 3104 if (!intel_dp_is_edp(intel_dp)) 3105 return; 3106 3107 with_pps_lock(intel_dp, wakeref) 3108 edp_panel_on(intel_dp); 3109 } 3110 3111 3112 static void edp_panel_off(struct intel_dp *intel_dp) 3113 { 3114 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3115 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3116 u32 pp; 3117 i915_reg_t pp_ctrl_reg; 3118 3119 lockdep_assert_held(&dev_priv->pps_mutex); 3120 3121 if (!intel_dp_is_edp(intel_dp)) 3122 return; 3123 3124 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n", 3125 dig_port->base.base.base.id, dig_port->base.base.name); 3126 3127 drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd, 3128 "Need [ENCODER:%d:%s] VDD to turn off panel\n", 3129 dig_port->base.base.base.id, dig_port->base.base.name); 3130 3131 pp = ilk_get_pp_control(intel_dp); 3132 /* We need to switch off panel power _and_ force vdd, for otherwise some 3133 * panels get very unhappy and cease to work. */ 3134 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | 3135 EDP_BLC_ENABLE); 3136 3137 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3138 3139 intel_dp->want_panel_vdd = false; 3140 3141 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3142 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3143 3144 wait_panel_off(intel_dp); 3145 intel_dp->panel_power_off_time = ktime_get_boottime(); 3146 3147 /* We got a reference when we enabled the VDD. */ 3148 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port)); 3149 } 3150 3151 void intel_edp_panel_off(struct intel_dp *intel_dp) 3152 { 3153 intel_wakeref_t wakeref; 3154 3155 if (!intel_dp_is_edp(intel_dp)) 3156 return; 3157 3158 with_pps_lock(intel_dp, wakeref) 3159 edp_panel_off(intel_dp); 3160 } 3161 3162 /* Enable backlight in the panel power control. */ 3163 static void _intel_edp_backlight_on(struct intel_dp *intel_dp) 3164 { 3165 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3166 intel_wakeref_t wakeref; 3167 3168 /* 3169 * If we enable the backlight right away following a panel power 3170 * on, we may see slight flicker as the panel syncs with the eDP 3171 * link. So delay a bit to make sure the image is solid before 3172 * allowing it to appear. 3173 */ 3174 wait_backlight_on(intel_dp); 3175 3176 with_pps_lock(intel_dp, wakeref) { 3177 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3178 u32 pp; 3179 3180 pp = ilk_get_pp_control(intel_dp); 3181 pp |= EDP_BLC_ENABLE; 3182 3183 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3184 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3185 } 3186 } 3187 3188 /* Enable backlight PWM and backlight PP control. */ 3189 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 3190 const struct drm_connector_state *conn_state) 3191 { 3192 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 3193 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3194 3195 if (!intel_dp_is_edp(intel_dp)) 3196 return; 3197 3198 drm_dbg_kms(&i915->drm, "\n"); 3199 3200 intel_panel_enable_backlight(crtc_state, conn_state); 3201 _intel_edp_backlight_on(intel_dp); 3202 } 3203 3204 /* Disable backlight in the panel power control. */ 3205 static void _intel_edp_backlight_off(struct intel_dp *intel_dp) 3206 { 3207 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3208 intel_wakeref_t wakeref; 3209 3210 if (!intel_dp_is_edp(intel_dp)) 3211 return; 3212 3213 with_pps_lock(intel_dp, wakeref) { 3214 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3215 u32 pp; 3216 3217 pp = ilk_get_pp_control(intel_dp); 3218 pp &= ~EDP_BLC_ENABLE; 3219 3220 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3221 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3222 } 3223 3224 intel_dp->last_backlight_off = jiffies; 3225 edp_wait_backlight_off(intel_dp); 3226 } 3227 3228 /* Disable backlight PP control and backlight PWM. */ 3229 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 3230 { 3231 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 3232 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3233 3234 if (!intel_dp_is_edp(intel_dp)) 3235 return; 3236 3237 drm_dbg_kms(&i915->drm, "\n"); 3238 3239 _intel_edp_backlight_off(intel_dp); 3240 intel_panel_disable_backlight(old_conn_state); 3241 } 3242 3243 /* 3244 * Hook for controlling the panel power control backlight through the bl_power 3245 * sysfs attribute. Take care to handle multiple calls. 3246 */ 3247 static void intel_edp_backlight_power(struct intel_connector *connector, 3248 bool enable) 3249 { 3250 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3251 struct intel_dp *intel_dp = intel_attached_dp(connector); 3252 intel_wakeref_t wakeref; 3253 bool is_enabled; 3254 3255 is_enabled = false; 3256 with_pps_lock(intel_dp, wakeref) 3257 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE; 3258 if (is_enabled == enable) 3259 return; 3260 3261 drm_dbg_kms(&i915->drm, "panel power control backlight %s\n", 3262 enable ? "enable" : "disable"); 3263 3264 if (enable) 3265 _intel_edp_backlight_on(intel_dp); 3266 else 3267 _intel_edp_backlight_off(intel_dp); 3268 } 3269 3270 static void assert_dp_port(struct intel_dp *intel_dp, bool state) 3271 { 3272 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3273 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 3274 bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN; 3275 3276 I915_STATE_WARN(cur_state != state, 3277 "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", 3278 dig_port->base.base.base.id, dig_port->base.base.name, 3279 onoff(state), onoff(cur_state)); 3280 } 3281 #define assert_dp_port_disabled(d) assert_dp_port((d), false) 3282 3283 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) 3284 { 3285 bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE; 3286 3287 I915_STATE_WARN(cur_state != state, 3288 "eDP PLL state assertion failure (expected %s, current %s)\n", 3289 onoff(state), onoff(cur_state)); 3290 } 3291 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true) 3292 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false) 3293 3294 static void ilk_edp_pll_on(struct intel_dp *intel_dp, 3295 const struct intel_crtc_state *pipe_config) 3296 { 3297 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3298 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3299 3300 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 3301 assert_dp_port_disabled(intel_dp); 3302 assert_edp_pll_disabled(dev_priv); 3303 3304 drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n", 3305 pipe_config->port_clock); 3306 3307 intel_dp->DP &= ~DP_PLL_FREQ_MASK; 3308 3309 if (pipe_config->port_clock == 162000) 3310 intel_dp->DP |= DP_PLL_FREQ_162MHZ; 3311 else 3312 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 3313 3314 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3315 intel_de_posting_read(dev_priv, DP_A); 3316 udelay(500); 3317 3318 /* 3319 * [DevILK] Work around required when enabling DP PLL 3320 * while a pipe is enabled going to FDI: 3321 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI 3322 * 2. Program DP PLL enable 3323 */ 3324 if (IS_GEN(dev_priv, 5)) 3325 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe); 3326 3327 intel_dp->DP |= DP_PLL_ENABLE; 3328 3329 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3330 intel_de_posting_read(dev_priv, DP_A); 3331 udelay(200); 3332 } 3333 3334 static void ilk_edp_pll_off(struct intel_dp *intel_dp, 3335 const struct intel_crtc_state *old_crtc_state) 3336 { 3337 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 3338 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3339 3340 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); 3341 assert_dp_port_disabled(intel_dp); 3342 assert_edp_pll_enabled(dev_priv); 3343 3344 drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n"); 3345 3346 intel_dp->DP &= ~DP_PLL_ENABLE; 3347 3348 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3349 intel_de_posting_read(dev_priv, DP_A); 3350 udelay(200); 3351 } 3352 3353 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 3354 { 3355 /* 3356 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 3357 * be capable of signalling downstream hpd with a long pulse. 3358 * Whether or not that means D3 is safe to use is not clear, 3359 * but let's assume so until proven otherwise. 3360 * 3361 * FIXME should really check all downstream ports... 3362 */ 3363 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 3364 drm_dp_is_branch(intel_dp->dpcd) && 3365 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 3366 } 3367 3368 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 3369 const struct intel_crtc_state *crtc_state, 3370 bool enable) 3371 { 3372 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3373 int ret; 3374 3375 if (!crtc_state->dsc.compression_enable) 3376 return; 3377 3378 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, 3379 enable ? DP_DECOMPRESSION_EN : 0); 3380 if (ret < 0) 3381 drm_dbg_kms(&i915->drm, 3382 "Failed to %s sink decompression state\n", 3383 enable ? "enable" : "disable"); 3384 } 3385 3386 /* If the sink supports it, try to set the power state appropriately */ 3387 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 3388 { 3389 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3390 int ret, i; 3391 3392 /* Should have a valid DPCD by this point */ 3393 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3394 return; 3395 3396 if (mode != DRM_MODE_DPMS_ON) { 3397 if (downstream_hpd_needs_d0(intel_dp)) 3398 return; 3399 3400 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, 3401 DP_SET_POWER_D3); 3402 } else { 3403 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 3404 3405 /* 3406 * When turning on, we need to retry for 1ms to give the sink 3407 * time to wake up. 3408 */ 3409 for (i = 0; i < 3; i++) { 3410 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, 3411 DP_SET_POWER_D0); 3412 if (ret == 1) 3413 break; 3414 msleep(1); 3415 } 3416 3417 if (ret == 1 && lspcon->active) 3418 lspcon_wait_pcon_mode(lspcon); 3419 } 3420 3421 if (ret != 1) 3422 drm_dbg_kms(&i915->drm, "failed to %s sink power state\n", 3423 mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); 3424 } 3425 3426 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, 3427 enum port port, enum pipe *pipe) 3428 { 3429 enum pipe p; 3430 3431 for_each_pipe(dev_priv, p) { 3432 u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p)); 3433 3434 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) { 3435 *pipe = p; 3436 return true; 3437 } 3438 } 3439 3440 drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n", 3441 port_name(port)); 3442 3443 /* must initialize pipe to something for the asserts */ 3444 *pipe = PIPE_A; 3445 3446 return false; 3447 } 3448 3449 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, 3450 i915_reg_t dp_reg, enum port port, 3451 enum pipe *pipe) 3452 { 3453 bool ret; 3454 u32 val; 3455 3456 val = intel_de_read(dev_priv, dp_reg); 3457 3458 ret = val & DP_PORT_EN; 3459 3460 /* asserts want to know the pipe even if the port is disabled */ 3461 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 3462 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB; 3463 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) 3464 ret &= cpt_dp_port_selected(dev_priv, port, pipe); 3465 else if (IS_CHERRYVIEW(dev_priv)) 3466 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV; 3467 else 3468 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT; 3469 3470 return ret; 3471 } 3472 3473 static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 3474 enum pipe *pipe) 3475 { 3476 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3477 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3478 intel_wakeref_t wakeref; 3479 bool ret; 3480 3481 wakeref = intel_display_power_get_if_enabled(dev_priv, 3482 encoder->power_domain); 3483 if (!wakeref) 3484 return false; 3485 3486 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 3487 encoder->port, pipe); 3488 3489 intel_display_power_put(dev_priv, encoder->power_domain, wakeref); 3490 3491 return ret; 3492 } 3493 3494 static void intel_dp_get_config(struct intel_encoder *encoder, 3495 struct intel_crtc_state *pipe_config) 3496 { 3497 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3498 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3499 u32 tmp, flags = 0; 3500 enum port port = encoder->port; 3501 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3502 3503 if (encoder->type == INTEL_OUTPUT_EDP) 3504 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP); 3505 else 3506 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); 3507 3508 tmp = intel_de_read(dev_priv, intel_dp->output_reg); 3509 3510 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; 3511 3512 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 3513 u32 trans_dp = intel_de_read(dev_priv, 3514 TRANS_DP_CTL(crtc->pipe)); 3515 3516 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH) 3517 flags |= DRM_MODE_FLAG_PHSYNC; 3518 else 3519 flags |= DRM_MODE_FLAG_NHSYNC; 3520 3521 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH) 3522 flags |= DRM_MODE_FLAG_PVSYNC; 3523 else 3524 flags |= DRM_MODE_FLAG_NVSYNC; 3525 } else { 3526 if (tmp & DP_SYNC_HS_HIGH) 3527 flags |= DRM_MODE_FLAG_PHSYNC; 3528 else 3529 flags |= DRM_MODE_FLAG_NHSYNC; 3530 3531 if (tmp & DP_SYNC_VS_HIGH) 3532 flags |= DRM_MODE_FLAG_PVSYNC; 3533 else 3534 flags |= DRM_MODE_FLAG_NVSYNC; 3535 } 3536 3537 pipe_config->hw.adjusted_mode.flags |= flags; 3538 3539 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235) 3540 pipe_config->limited_color_range = true; 3541 3542 pipe_config->lane_count = 3543 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; 3544 3545 intel_dp_get_m_n(crtc, pipe_config); 3546 3547 if (port == PORT_A) { 3548 if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) 3549 pipe_config->port_clock = 162000; 3550 else 3551 pipe_config->port_clock = 270000; 3552 } 3553 3554 pipe_config->hw.adjusted_mode.crtc_clock = 3555 intel_dotclock_calculate(pipe_config->port_clock, 3556 &pipe_config->dp_m_n); 3557 3558 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp && 3559 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { 3560 /* 3561 * This is a big fat ugly hack. 3562 * 3563 * Some machines in UEFI boot mode provide us a VBT that has 18 3564 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 3565 * unknown we fail to light up. Yet the same BIOS boots up with 3566 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 3567 * max, not what it tells us to use. 3568 * 3569 * Note: This will still be broken if the eDP panel is not lit 3570 * up by the BIOS, and thus we can't get the mode at module 3571 * load. 3572 */ 3573 drm_dbg_kms(&dev_priv->drm, 3574 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 3575 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); 3576 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; 3577 } 3578 } 3579 3580 static void intel_disable_dp(struct intel_atomic_state *state, 3581 struct intel_encoder *encoder, 3582 const struct intel_crtc_state *old_crtc_state, 3583 const struct drm_connector_state *old_conn_state) 3584 { 3585 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3586 3587 intel_dp->link_trained = false; 3588 3589 if (old_crtc_state->has_audio) 3590 intel_audio_codec_disable(encoder, 3591 old_crtc_state, old_conn_state); 3592 3593 /* Make sure the panel is off before trying to change the mode. But also 3594 * ensure that we have vdd while we switch off the panel. */ 3595 intel_edp_panel_vdd_on(intel_dp); 3596 intel_edp_backlight_off(old_conn_state); 3597 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 3598 intel_edp_panel_off(intel_dp); 3599 } 3600 3601 static void g4x_disable_dp(struct intel_atomic_state *state, 3602 struct intel_encoder *encoder, 3603 const struct intel_crtc_state *old_crtc_state, 3604 const struct drm_connector_state *old_conn_state) 3605 { 3606 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); 3607 } 3608 3609 static void vlv_disable_dp(struct intel_atomic_state *state, 3610 struct intel_encoder *encoder, 3611 const struct intel_crtc_state *old_crtc_state, 3612 const struct drm_connector_state *old_conn_state) 3613 { 3614 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); 3615 } 3616 3617 static void g4x_post_disable_dp(struct intel_atomic_state *state, 3618 struct intel_encoder *encoder, 3619 const struct intel_crtc_state *old_crtc_state, 3620 const struct drm_connector_state *old_conn_state) 3621 { 3622 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3623 enum port port = encoder->port; 3624 3625 /* 3626 * Bspec does not list a specific disable sequence for g4x DP. 3627 * Follow the ilk+ sequence (disable pipe before the port) for 3628 * g4x DP as it does not suffer from underruns like the normal 3629 * g4x modeset sequence (disable pipe after the port). 3630 */ 3631 intel_dp_link_down(encoder, old_crtc_state); 3632 3633 /* Only ilk+ has port A */ 3634 if (port == PORT_A) 3635 ilk_edp_pll_off(intel_dp, old_crtc_state); 3636 } 3637 3638 static void vlv_post_disable_dp(struct intel_atomic_state *state, 3639 struct intel_encoder *encoder, 3640 const struct intel_crtc_state *old_crtc_state, 3641 const struct drm_connector_state *old_conn_state) 3642 { 3643 intel_dp_link_down(encoder, old_crtc_state); 3644 } 3645 3646 static void chv_post_disable_dp(struct intel_atomic_state *state, 3647 struct intel_encoder *encoder, 3648 const struct intel_crtc_state *old_crtc_state, 3649 const struct drm_connector_state *old_conn_state) 3650 { 3651 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3652 3653 intel_dp_link_down(encoder, old_crtc_state); 3654 3655 vlv_dpio_get(dev_priv); 3656 3657 /* Assert data lane reset */ 3658 chv_data_lane_soft_reset(encoder, old_crtc_state, true); 3659 3660 vlv_dpio_put(dev_priv); 3661 } 3662 3663 static void 3664 cpt_set_link_train(struct intel_dp *intel_dp, 3665 u8 dp_train_pat) 3666 { 3667 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3668 u32 *DP = &intel_dp->DP; 3669 3670 *DP &= ~DP_LINK_TRAIN_MASK_CPT; 3671 3672 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 3673 case DP_TRAINING_PATTERN_DISABLE: 3674 *DP |= DP_LINK_TRAIN_OFF_CPT; 3675 break; 3676 case DP_TRAINING_PATTERN_1: 3677 *DP |= DP_LINK_TRAIN_PAT_1_CPT; 3678 break; 3679 case DP_TRAINING_PATTERN_2: 3680 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3681 break; 3682 case DP_TRAINING_PATTERN_3: 3683 drm_dbg_kms(&dev_priv->drm, 3684 "TPS3 not supported, using TPS2 instead\n"); 3685 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3686 break; 3687 } 3688 3689 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3690 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3691 } 3692 3693 static void 3694 g4x_set_link_train(struct intel_dp *intel_dp, 3695 u8 dp_train_pat) 3696 { 3697 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3698 u32 *DP = &intel_dp->DP; 3699 3700 *DP &= ~DP_LINK_TRAIN_MASK; 3701 3702 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 3703 case DP_TRAINING_PATTERN_DISABLE: 3704 *DP |= DP_LINK_TRAIN_OFF; 3705 break; 3706 case DP_TRAINING_PATTERN_1: 3707 *DP |= DP_LINK_TRAIN_PAT_1; 3708 break; 3709 case DP_TRAINING_PATTERN_2: 3710 *DP |= DP_LINK_TRAIN_PAT_2; 3711 break; 3712 case DP_TRAINING_PATTERN_3: 3713 drm_dbg_kms(&dev_priv->drm, 3714 "TPS3 not supported, using TPS2 instead\n"); 3715 *DP |= DP_LINK_TRAIN_PAT_2; 3716 break; 3717 } 3718 3719 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3720 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3721 } 3722 3723 static void intel_dp_enable_port(struct intel_dp *intel_dp, 3724 const struct intel_crtc_state *old_crtc_state) 3725 { 3726 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3727 3728 /* enable with pattern 1 (as per spec) */ 3729 3730 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1); 3731 3732 /* 3733 * Magic for VLV/CHV. We _must_ first set up the register 3734 * without actually enabling the port, and then do another 3735 * write to enable the port. Otherwise link training will 3736 * fail when the power sequencer is freshly used for this port. 3737 */ 3738 intel_dp->DP |= DP_PORT_EN; 3739 if (old_crtc_state->has_audio) 3740 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 3741 3742 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3743 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3744 } 3745 3746 static void intel_enable_dp(struct intel_atomic_state *state, 3747 struct intel_encoder *encoder, 3748 const struct intel_crtc_state *pipe_config, 3749 const struct drm_connector_state *conn_state) 3750 { 3751 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3752 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3753 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3754 u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg); 3755 enum pipe pipe = crtc->pipe; 3756 intel_wakeref_t wakeref; 3757 3758 if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN)) 3759 return; 3760 3761 with_pps_lock(intel_dp, wakeref) { 3762 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3763 vlv_init_panel_power_sequencer(encoder, pipe_config); 3764 3765 intel_dp_enable_port(intel_dp, pipe_config); 3766 3767 edp_panel_vdd_on(intel_dp); 3768 edp_panel_on(intel_dp); 3769 edp_panel_vdd_off(intel_dp, true); 3770 } 3771 3772 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 3773 unsigned int lane_mask = 0x0; 3774 3775 if (IS_CHERRYVIEW(dev_priv)) 3776 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count); 3777 3778 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp), 3779 lane_mask); 3780 } 3781 3782 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 3783 intel_dp_start_link_train(intel_dp); 3784 intel_dp_stop_link_train(intel_dp); 3785 3786 if (pipe_config->has_audio) { 3787 drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n", 3788 pipe_name(pipe)); 3789 intel_audio_codec_enable(encoder, pipe_config, conn_state); 3790 } 3791 } 3792 3793 static void g4x_enable_dp(struct intel_atomic_state *state, 3794 struct intel_encoder *encoder, 3795 const struct intel_crtc_state *pipe_config, 3796 const struct drm_connector_state *conn_state) 3797 { 3798 intel_enable_dp(state, encoder, pipe_config, conn_state); 3799 intel_edp_backlight_on(pipe_config, conn_state); 3800 } 3801 3802 static void vlv_enable_dp(struct intel_atomic_state *state, 3803 struct intel_encoder *encoder, 3804 const struct intel_crtc_state *pipe_config, 3805 const struct drm_connector_state *conn_state) 3806 { 3807 intel_edp_backlight_on(pipe_config, conn_state); 3808 } 3809 3810 static void g4x_pre_enable_dp(struct intel_atomic_state *state, 3811 struct intel_encoder *encoder, 3812 const struct intel_crtc_state *pipe_config, 3813 const struct drm_connector_state *conn_state) 3814 { 3815 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3816 enum port port = encoder->port; 3817 3818 intel_dp_prepare(encoder, pipe_config); 3819 3820 /* Only ilk+ has port A */ 3821 if (port == PORT_A) 3822 ilk_edp_pll_on(intel_dp, pipe_config); 3823 } 3824 3825 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) 3826 { 3827 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3828 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); 3829 enum pipe pipe = intel_dp->pps_pipe; 3830 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe); 3831 3832 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 3833 3834 if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B)) 3835 return; 3836 3837 edp_panel_vdd_off_sync(intel_dp); 3838 3839 /* 3840 * VLV seems to get confused when multiple power sequencers 3841 * have the same port selected (even if only one has power/vdd 3842 * enabled). The failure manifests as vlv_wait_port_ready() failing 3843 * CHV on the other hand doesn't seem to mind having the same port 3844 * selected in multiple power sequencers, but let's clear the 3845 * port select always when logically disconnecting a power sequencer 3846 * from a port. 3847 */ 3848 drm_dbg_kms(&dev_priv->drm, 3849 "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n", 3850 pipe_name(pipe), intel_dig_port->base.base.base.id, 3851 intel_dig_port->base.base.name); 3852 intel_de_write(dev_priv, pp_on_reg, 0); 3853 intel_de_posting_read(dev_priv, pp_on_reg); 3854 3855 intel_dp->pps_pipe = INVALID_PIPE; 3856 } 3857 3858 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 3859 enum pipe pipe) 3860 { 3861 struct intel_encoder *encoder; 3862 3863 lockdep_assert_held(&dev_priv->pps_mutex); 3864 3865 for_each_intel_dp(&dev_priv->drm, encoder) { 3866 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3867 3868 drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe, 3869 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n", 3870 pipe_name(pipe), encoder->base.base.id, 3871 encoder->base.name); 3872 3873 if (intel_dp->pps_pipe != pipe) 3874 continue; 3875 3876 drm_dbg_kms(&dev_priv->drm, 3877 "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n", 3878 pipe_name(pipe), encoder->base.base.id, 3879 encoder->base.name); 3880 3881 /* make sure vdd is off before we steal it */ 3882 vlv_detach_power_sequencer(intel_dp); 3883 } 3884 } 3885 3886 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 3887 const struct intel_crtc_state *crtc_state) 3888 { 3889 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3890 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3891 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3892 3893 lockdep_assert_held(&dev_priv->pps_mutex); 3894 3895 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 3896 3897 if (intel_dp->pps_pipe != INVALID_PIPE && 3898 intel_dp->pps_pipe != crtc->pipe) { 3899 /* 3900 * If another power sequencer was being used on this 3901 * port previously make sure to turn off vdd there while 3902 * we still have control of it. 3903 */ 3904 vlv_detach_power_sequencer(intel_dp); 3905 } 3906 3907 /* 3908 * We may be stealing the power 3909 * sequencer from another port. 3910 */ 3911 vlv_steal_power_sequencer(dev_priv, crtc->pipe); 3912 3913 intel_dp->active_pipe = crtc->pipe; 3914 3915 if (!intel_dp_is_edp(intel_dp)) 3916 return; 3917 3918 /* now it's all ours */ 3919 intel_dp->pps_pipe = crtc->pipe; 3920 3921 drm_dbg_kms(&dev_priv->drm, 3922 "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n", 3923 pipe_name(intel_dp->pps_pipe), encoder->base.base.id, 3924 encoder->base.name); 3925 3926 /* init power sequencer on this pipe and port */ 3927 intel_dp_init_panel_power_sequencer(intel_dp); 3928 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 3929 } 3930 3931 static void vlv_pre_enable_dp(struct intel_atomic_state *state, 3932 struct intel_encoder *encoder, 3933 const struct intel_crtc_state *pipe_config, 3934 const struct drm_connector_state *conn_state) 3935 { 3936 vlv_phy_pre_encoder_enable(encoder, pipe_config); 3937 3938 intel_enable_dp(state, encoder, pipe_config, conn_state); 3939 } 3940 3941 static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state, 3942 struct intel_encoder *encoder, 3943 const struct intel_crtc_state *pipe_config, 3944 const struct drm_connector_state *conn_state) 3945 { 3946 intel_dp_prepare(encoder, pipe_config); 3947 3948 vlv_phy_pre_pll_enable(encoder, pipe_config); 3949 } 3950 3951 static void chv_pre_enable_dp(struct intel_atomic_state *state, 3952 struct intel_encoder *encoder, 3953 const struct intel_crtc_state *pipe_config, 3954 const struct drm_connector_state *conn_state) 3955 { 3956 chv_phy_pre_encoder_enable(encoder, pipe_config); 3957 3958 intel_enable_dp(state, encoder, pipe_config, conn_state); 3959 3960 /* Second common lane will stay alive on its own now */ 3961 chv_phy_release_cl2_override(encoder); 3962 } 3963 3964 static void chv_dp_pre_pll_enable(struct intel_atomic_state *state, 3965 struct intel_encoder *encoder, 3966 const struct intel_crtc_state *pipe_config, 3967 const struct drm_connector_state *conn_state) 3968 { 3969 intel_dp_prepare(encoder, pipe_config); 3970 3971 chv_phy_pre_pll_enable(encoder, pipe_config); 3972 } 3973 3974 static void chv_dp_post_pll_disable(struct intel_atomic_state *state, 3975 struct intel_encoder *encoder, 3976 const struct intel_crtc_state *old_crtc_state, 3977 const struct drm_connector_state *old_conn_state) 3978 { 3979 chv_phy_post_pll_disable(encoder, old_crtc_state); 3980 } 3981 3982 /* 3983 * Fetch AUX CH registers 0x202 - 0x207 which contain 3984 * link status information 3985 */ 3986 bool 3987 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]) 3988 { 3989 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status, 3990 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; 3991 } 3992 3993 static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp) 3994 { 3995 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 3996 } 3997 3998 static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp) 3999 { 4000 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 4001 } 4002 4003 static u8 intel_dp_pre_empemph_max_2(struct intel_dp *intel_dp) 4004 { 4005 return DP_TRAIN_PRE_EMPH_LEVEL_2; 4006 } 4007 4008 static u8 intel_dp_pre_empemph_max_3(struct intel_dp *intel_dp) 4009 { 4010 return DP_TRAIN_PRE_EMPH_LEVEL_3; 4011 } 4012 4013 static void vlv_set_signal_levels(struct intel_dp *intel_dp) 4014 { 4015 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4016 unsigned long demph_reg_value, preemph_reg_value, 4017 uniqtranscale_reg_value; 4018 u8 train_set = intel_dp->train_set[0]; 4019 4020 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4021 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4022 preemph_reg_value = 0x0004000; 4023 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4024 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4025 demph_reg_value = 0x2B405555; 4026 uniqtranscale_reg_value = 0x552AB83A; 4027 break; 4028 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4029 demph_reg_value = 0x2B404040; 4030 uniqtranscale_reg_value = 0x5548B83A; 4031 break; 4032 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4033 demph_reg_value = 0x2B245555; 4034 uniqtranscale_reg_value = 0x5560B83A; 4035 break; 4036 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4037 demph_reg_value = 0x2B405555; 4038 uniqtranscale_reg_value = 0x5598DA3A; 4039 break; 4040 default: 4041 return; 4042 } 4043 break; 4044 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4045 preemph_reg_value = 0x0002000; 4046 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4047 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4048 demph_reg_value = 0x2B404040; 4049 uniqtranscale_reg_value = 0x5552B83A; 4050 break; 4051 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4052 demph_reg_value = 0x2B404848; 4053 uniqtranscale_reg_value = 0x5580B83A; 4054 break; 4055 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4056 demph_reg_value = 0x2B404040; 4057 uniqtranscale_reg_value = 0x55ADDA3A; 4058 break; 4059 default: 4060 return; 4061 } 4062 break; 4063 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4064 preemph_reg_value = 0x0000000; 4065 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4066 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4067 demph_reg_value = 0x2B305555; 4068 uniqtranscale_reg_value = 0x5570B83A; 4069 break; 4070 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4071 demph_reg_value = 0x2B2B4040; 4072 uniqtranscale_reg_value = 0x55ADDA3A; 4073 break; 4074 default: 4075 return; 4076 } 4077 break; 4078 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4079 preemph_reg_value = 0x0006000; 4080 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4081 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4082 demph_reg_value = 0x1B405555; 4083 uniqtranscale_reg_value = 0x55ADDA3A; 4084 break; 4085 default: 4086 return; 4087 } 4088 break; 4089 default: 4090 return; 4091 } 4092 4093 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value, 4094 uniqtranscale_reg_value, 0); 4095 } 4096 4097 static void chv_set_signal_levels(struct intel_dp *intel_dp) 4098 { 4099 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4100 u32 deemph_reg_value, margin_reg_value; 4101 bool uniq_trans_scale = false; 4102 u8 train_set = intel_dp->train_set[0]; 4103 4104 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4105 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4106 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4107 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4108 deemph_reg_value = 128; 4109 margin_reg_value = 52; 4110 break; 4111 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4112 deemph_reg_value = 128; 4113 margin_reg_value = 77; 4114 break; 4115 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4116 deemph_reg_value = 128; 4117 margin_reg_value = 102; 4118 break; 4119 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4120 deemph_reg_value = 128; 4121 margin_reg_value = 154; 4122 uniq_trans_scale = true; 4123 break; 4124 default: 4125 return; 4126 } 4127 break; 4128 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4129 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4130 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4131 deemph_reg_value = 85; 4132 margin_reg_value = 78; 4133 break; 4134 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4135 deemph_reg_value = 85; 4136 margin_reg_value = 116; 4137 break; 4138 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4139 deemph_reg_value = 85; 4140 margin_reg_value = 154; 4141 break; 4142 default: 4143 return; 4144 } 4145 break; 4146 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4147 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4148 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4149 deemph_reg_value = 64; 4150 margin_reg_value = 104; 4151 break; 4152 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4153 deemph_reg_value = 64; 4154 margin_reg_value = 154; 4155 break; 4156 default: 4157 return; 4158 } 4159 break; 4160 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4161 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4162 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4163 deemph_reg_value = 43; 4164 margin_reg_value = 154; 4165 break; 4166 default: 4167 return; 4168 } 4169 break; 4170 default: 4171 return; 4172 } 4173 4174 chv_set_phy_signal_level(encoder, deemph_reg_value, 4175 margin_reg_value, uniq_trans_scale); 4176 } 4177 4178 static u32 g4x_signal_levels(u8 train_set) 4179 { 4180 u32 signal_levels = 0; 4181 4182 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4183 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4184 default: 4185 signal_levels |= DP_VOLTAGE_0_4; 4186 break; 4187 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4188 signal_levels |= DP_VOLTAGE_0_6; 4189 break; 4190 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4191 signal_levels |= DP_VOLTAGE_0_8; 4192 break; 4193 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4194 signal_levels |= DP_VOLTAGE_1_2; 4195 break; 4196 } 4197 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4198 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4199 default: 4200 signal_levels |= DP_PRE_EMPHASIS_0; 4201 break; 4202 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4203 signal_levels |= DP_PRE_EMPHASIS_3_5; 4204 break; 4205 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4206 signal_levels |= DP_PRE_EMPHASIS_6; 4207 break; 4208 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4209 signal_levels |= DP_PRE_EMPHASIS_9_5; 4210 break; 4211 } 4212 return signal_levels; 4213 } 4214 4215 static void 4216 g4x_set_signal_levels(struct intel_dp *intel_dp) 4217 { 4218 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4219 u8 train_set = intel_dp->train_set[0]; 4220 u32 signal_levels; 4221 4222 signal_levels = g4x_signal_levels(train_set); 4223 4224 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4225 signal_levels); 4226 4227 intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK); 4228 intel_dp->DP |= signal_levels; 4229 4230 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4231 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4232 } 4233 4234 /* SNB CPU eDP voltage swing and pre-emphasis control */ 4235 static u32 snb_cpu_edp_signal_levels(u8 train_set) 4236 { 4237 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4238 DP_TRAIN_PRE_EMPHASIS_MASK); 4239 4240 switch (signal_levels) { 4241 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4242 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4243 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4244 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4245 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 4246 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4247 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4248 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 4249 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4250 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4251 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 4252 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4253 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4254 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 4255 default: 4256 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4257 "0x%x\n", signal_levels); 4258 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4259 } 4260 } 4261 4262 static void 4263 snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp) 4264 { 4265 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4266 u8 train_set = intel_dp->train_set[0]; 4267 u32 signal_levels; 4268 4269 signal_levels = snb_cpu_edp_signal_levels(train_set); 4270 4271 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4272 signal_levels); 4273 4274 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 4275 intel_dp->DP |= signal_levels; 4276 4277 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4278 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4279 } 4280 4281 /* IVB CPU eDP voltage swing and pre-emphasis control */ 4282 static u32 ivb_cpu_edp_signal_levels(u8 train_set) 4283 { 4284 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4285 DP_TRAIN_PRE_EMPHASIS_MASK); 4286 4287 switch (signal_levels) { 4288 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4289 return EDP_LINK_TRAIN_400MV_0DB_IVB; 4290 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4291 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 4292 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4293 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4294 return EDP_LINK_TRAIN_400MV_6DB_IVB; 4295 4296 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4297 return EDP_LINK_TRAIN_600MV_0DB_IVB; 4298 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4299 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 4300 4301 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4302 return EDP_LINK_TRAIN_800MV_0DB_IVB; 4303 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4304 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 4305 4306 default: 4307 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4308 "0x%x\n", signal_levels); 4309 return EDP_LINK_TRAIN_500MV_0DB_IVB; 4310 } 4311 } 4312 4313 static void 4314 ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp) 4315 { 4316 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4317 u8 train_set = intel_dp->train_set[0]; 4318 u32 signal_levels; 4319 4320 signal_levels = ivb_cpu_edp_signal_levels(train_set); 4321 4322 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4323 signal_levels); 4324 4325 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 4326 intel_dp->DP |= signal_levels; 4327 4328 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4329 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4330 } 4331 4332 void intel_dp_set_signal_levels(struct intel_dp *intel_dp) 4333 { 4334 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4335 u8 train_set = intel_dp->train_set[0]; 4336 4337 drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n", 4338 train_set & DP_TRAIN_VOLTAGE_SWING_MASK, 4339 train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : ""); 4340 drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n", 4341 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> 4342 DP_TRAIN_PRE_EMPHASIS_SHIFT, 4343 train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? 4344 " (max)" : ""); 4345 4346 intel_dp->set_signal_levels(intel_dp); 4347 } 4348 4349 void 4350 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, 4351 u8 dp_train_pat) 4352 { 4353 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4354 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); 4355 4356 if (dp_train_pat & train_pat_mask) 4357 drm_dbg_kms(&dev_priv->drm, 4358 "Using DP training pattern TPS%d\n", 4359 dp_train_pat & train_pat_mask); 4360 4361 intel_dp->set_link_train(intel_dp, dp_train_pat); 4362 } 4363 4364 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 4365 { 4366 if (intel_dp->set_idle_link_train) 4367 intel_dp->set_idle_link_train(intel_dp); 4368 } 4369 4370 static void 4371 intel_dp_link_down(struct intel_encoder *encoder, 4372 const struct intel_crtc_state *old_crtc_state) 4373 { 4374 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4375 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4376 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 4377 enum port port = encoder->port; 4378 u32 DP = intel_dp->DP; 4379 4380 if (drm_WARN_ON(&dev_priv->drm, 4381 (intel_de_read(dev_priv, intel_dp->output_reg) & 4382 DP_PORT_EN) == 0)) 4383 return; 4384 4385 drm_dbg_kms(&dev_priv->drm, "\n"); 4386 4387 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 4388 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { 4389 DP &= ~DP_LINK_TRAIN_MASK_CPT; 4390 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; 4391 } else { 4392 DP &= ~DP_LINK_TRAIN_MASK; 4393 DP |= DP_LINK_TRAIN_PAT_IDLE; 4394 } 4395 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4396 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4397 4398 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 4399 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4400 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4401 4402 /* 4403 * HW workaround for IBX, we need to move the port 4404 * to transcoder A after disabling it to allow the 4405 * matching HDMI port to be enabled on transcoder A. 4406 */ 4407 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) { 4408 /* 4409 * We get CPU/PCH FIFO underruns on the other pipe when 4410 * doing the workaround. Sweep them under the rug. 4411 */ 4412 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4413 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4414 4415 /* always enable with pattern 1 (as per spec) */ 4416 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK); 4417 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) | 4418 DP_LINK_TRAIN_PAT_1; 4419 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4420 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4421 4422 DP &= ~DP_PORT_EN; 4423 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4424 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4425 4426 intel_wait_for_vblank_if_active(dev_priv, PIPE_A); 4427 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4428 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4429 } 4430 4431 msleep(intel_dp->panel_power_down_delay); 4432 4433 intel_dp->DP = DP; 4434 4435 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 4436 intel_wakeref_t wakeref; 4437 4438 with_pps_lock(intel_dp, wakeref) 4439 intel_dp->active_pipe = INVALID_PIPE; 4440 } 4441 } 4442 4443 static void 4444 intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp) 4445 { 4446 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4447 u8 dpcd_ext[6]; 4448 4449 /* 4450 * Prior to DP1.3 the bit represented by 4451 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved. 4452 * if it is set DP_DPCD_REV at 0000h could be at a value less than 4453 * the true capability of the panel. The only way to check is to 4454 * then compare 0000h and 2200h. 4455 */ 4456 if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] & 4457 DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)) 4458 return; 4459 4460 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV, 4461 &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) { 4462 drm_err(&i915->drm, 4463 "DPCD failed read at extended capabilities\n"); 4464 return; 4465 } 4466 4467 if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) { 4468 drm_dbg_kms(&i915->drm, 4469 "DPCD extended DPCD rev less than base DPCD rev\n"); 4470 return; 4471 } 4472 4473 if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext))) 4474 return; 4475 4476 drm_dbg_kms(&i915->drm, "Base DPCD: %*ph\n", 4477 (int)sizeof(intel_dp->dpcd), intel_dp->dpcd); 4478 4479 memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)); 4480 } 4481 4482 bool 4483 intel_dp_read_dpcd(struct intel_dp *intel_dp) 4484 { 4485 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4486 4487 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, 4488 sizeof(intel_dp->dpcd)) < 0) 4489 return false; /* aux transfer failed */ 4490 4491 intel_dp_extended_receiver_capabilities(intel_dp); 4492 4493 drm_dbg_kms(&i915->drm, "DPCD: %*ph\n", (int)sizeof(intel_dp->dpcd), 4494 intel_dp->dpcd); 4495 4496 return intel_dp->dpcd[DP_DPCD_REV] != 0; 4497 } 4498 4499 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 4500 { 4501 u8 dprx = 0; 4502 4503 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 4504 &dprx) != 1) 4505 return false; 4506 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 4507 } 4508 4509 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) 4510 { 4511 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4512 4513 /* 4514 * Clear the cached register set to avoid using stale values 4515 * for the sinks that do not support DSC. 4516 */ 4517 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 4518 4519 /* Clear fec_capable to avoid using stale values */ 4520 intel_dp->fec_capable = 0; 4521 4522 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ 4523 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || 4524 intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4525 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, 4526 intel_dp->dsc_dpcd, 4527 sizeof(intel_dp->dsc_dpcd)) < 0) 4528 drm_err(&i915->drm, 4529 "Failed to read DPCD register 0x%x\n", 4530 DP_DSC_SUPPORT); 4531 4532 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n", 4533 (int)sizeof(intel_dp->dsc_dpcd), 4534 intel_dp->dsc_dpcd); 4535 4536 /* FEC is supported only on DP 1.4 */ 4537 if (!intel_dp_is_edp(intel_dp) && 4538 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, 4539 &intel_dp->fec_capable) < 0) 4540 drm_err(&i915->drm, 4541 "Failed to read FEC DPCD register\n"); 4542 4543 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", 4544 intel_dp->fec_capable); 4545 } 4546 } 4547 4548 static bool 4549 intel_edp_init_dpcd(struct intel_dp *intel_dp) 4550 { 4551 struct drm_i915_private *dev_priv = 4552 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 4553 4554 /* this function is meant to be called only once */ 4555 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 4556 4557 if (!intel_dp_read_dpcd(intel_dp)) 4558 return false; 4559 4560 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4561 drm_dp_is_branch(intel_dp->dpcd)); 4562 4563 /* 4564 * Read the eDP display control registers. 4565 * 4566 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 4567 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 4568 * set, but require eDP 1.4+ detection (e.g. for supported link rates 4569 * method). The display control registers should read zero if they're 4570 * not supported anyway. 4571 */ 4572 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 4573 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 4574 sizeof(intel_dp->edp_dpcd)) 4575 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 4576 (int)sizeof(intel_dp->edp_dpcd), 4577 intel_dp->edp_dpcd); 4578 4579 /* 4580 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 4581 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 4582 */ 4583 intel_psr_init_dpcd(intel_dp); 4584 4585 /* Read the eDP 1.4+ supported link rates. */ 4586 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4587 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 4588 int i; 4589 4590 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 4591 sink_rates, sizeof(sink_rates)); 4592 4593 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 4594 int val = le16_to_cpu(sink_rates[i]); 4595 4596 if (val == 0) 4597 break; 4598 4599 /* Value read multiplied by 200kHz gives the per-lane 4600 * link rate in kHz. The source rates are, however, 4601 * stored in terms of LS_Clk kHz. The full conversion 4602 * back to symbols is 4603 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 4604 */ 4605 intel_dp->sink_rates[i] = (val * 200) / 10; 4606 } 4607 intel_dp->num_sink_rates = i; 4608 } 4609 4610 /* 4611 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 4612 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 4613 */ 4614 if (intel_dp->num_sink_rates) 4615 intel_dp->use_rate_select = true; 4616 else 4617 intel_dp_set_sink_rates(intel_dp); 4618 4619 intel_dp_set_common_rates(intel_dp); 4620 4621 /* Read the eDP DSC DPCD registers */ 4622 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 4623 intel_dp_get_dsc_sink_cap(intel_dp); 4624 4625 return true; 4626 } 4627 4628 4629 static bool 4630 intel_dp_get_dpcd(struct intel_dp *intel_dp) 4631 { 4632 if (!intel_dp_read_dpcd(intel_dp)) 4633 return false; 4634 4635 /* 4636 * Don't clobber cached eDP rates. Also skip re-reading 4637 * the OUI/ID since we know it won't change. 4638 */ 4639 if (!intel_dp_is_edp(intel_dp)) { 4640 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4641 drm_dp_is_branch(intel_dp->dpcd)); 4642 4643 intel_dp_set_sink_rates(intel_dp); 4644 intel_dp_set_common_rates(intel_dp); 4645 } 4646 4647 /* 4648 * Some eDP panels do not set a valid value for sink count, that is why 4649 * it don't care about read it here and in intel_edp_init_dpcd(). 4650 */ 4651 if (!intel_dp_is_edp(intel_dp) && 4652 !drm_dp_has_quirk(&intel_dp->desc, 0, 4653 DP_DPCD_QUIRK_NO_SINK_COUNT)) { 4654 u8 count; 4655 ssize_t r; 4656 4657 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count); 4658 if (r < 1) 4659 return false; 4660 4661 /* 4662 * Sink count can change between short pulse hpd hence 4663 * a member variable in intel_dp will track any changes 4664 * between short pulse interrupts. 4665 */ 4666 intel_dp->sink_count = DP_GET_SINK_COUNT(count); 4667 4668 /* 4669 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 4670 * a dongle is present but no display. Unless we require to know 4671 * if a dongle is present or not, we don't need to update 4672 * downstream port information. So, an early return here saves 4673 * time from performing other operations which are not required. 4674 */ 4675 if (!intel_dp->sink_count) 4676 return false; 4677 } 4678 4679 if (!drm_dp_is_branch(intel_dp->dpcd)) 4680 return true; /* native DP sink */ 4681 4682 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 4683 return true; /* no per-port downstream info */ 4684 4685 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0, 4686 intel_dp->downstream_ports, 4687 DP_MAX_DOWNSTREAM_PORTS) < 0) 4688 return false; /* downstream port status fetch failed */ 4689 4690 return true; 4691 } 4692 4693 static bool 4694 intel_dp_sink_can_mst(struct intel_dp *intel_dp) 4695 { 4696 u8 mstm_cap; 4697 4698 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) 4699 return false; 4700 4701 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1) 4702 return false; 4703 4704 return mstm_cap & DP_MST_CAP; 4705 } 4706 4707 static bool 4708 intel_dp_can_mst(struct intel_dp *intel_dp) 4709 { 4710 return i915_modparams.enable_dp_mst && 4711 intel_dp->can_mst && 4712 intel_dp_sink_can_mst(intel_dp); 4713 } 4714 4715 static void 4716 intel_dp_configure_mst(struct intel_dp *intel_dp) 4717 { 4718 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4719 struct intel_encoder *encoder = 4720 &dp_to_dig_port(intel_dp)->base; 4721 bool sink_can_mst = intel_dp_sink_can_mst(intel_dp); 4722 4723 drm_dbg_kms(&i915->drm, 4724 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 4725 encoder->base.base.id, encoder->base.name, 4726 yesno(intel_dp->can_mst), yesno(sink_can_mst), 4727 yesno(i915_modparams.enable_dp_mst)); 4728 4729 if (!intel_dp->can_mst) 4730 return; 4731 4732 intel_dp->is_mst = sink_can_mst && 4733 i915_modparams.enable_dp_mst; 4734 4735 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4736 intel_dp->is_mst); 4737 } 4738 4739 static bool 4740 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 4741 { 4742 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 4743 sink_irq_vector, DP_DPRX_ESI_LEN) == 4744 DP_DPRX_ESI_LEN; 4745 } 4746 4747 bool 4748 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 4749 const struct drm_connector_state *conn_state) 4750 { 4751 /* 4752 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 4753 * of Color Encoding Format and Content Color Gamut], in order to 4754 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 4755 */ 4756 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 4757 return true; 4758 4759 switch (conn_state->colorspace) { 4760 case DRM_MODE_COLORIMETRY_SYCC_601: 4761 case DRM_MODE_COLORIMETRY_OPYCC_601: 4762 case DRM_MODE_COLORIMETRY_BT2020_YCC: 4763 case DRM_MODE_COLORIMETRY_BT2020_RGB: 4764 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 4765 return true; 4766 default: 4767 break; 4768 } 4769 4770 return false; 4771 } 4772 4773 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, 4774 struct dp_sdp *sdp, size_t size) 4775 { 4776 size_t length = sizeof(struct dp_sdp); 4777 4778 if (size < length) 4779 return -ENOSPC; 4780 4781 memset(sdp, 0, size); 4782 4783 /* 4784 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 4785 * VSC SDP Header Bytes 4786 */ 4787 sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */ 4788 sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */ 4789 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ 4790 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ 4791 4792 /* 4793 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as 4794 * per DP 1.4a spec. 4795 */ 4796 if (vsc->revision != 0x5) 4797 goto out; 4798 4799 /* VSC SDP Payload for DB16 through DB18 */ 4800 /* Pixel Encoding and Colorimetry Formats */ 4801 sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */ 4802 sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */ 4803 4804 switch (vsc->bpc) { 4805 case 6: 4806 /* 6bpc: 0x0 */ 4807 break; 4808 case 8: 4809 sdp->db[17] = 0x1; /* DB17[3:0] */ 4810 break; 4811 case 10: 4812 sdp->db[17] = 0x2; 4813 break; 4814 case 12: 4815 sdp->db[17] = 0x3; 4816 break; 4817 case 16: 4818 sdp->db[17] = 0x4; 4819 break; 4820 default: 4821 MISSING_CASE(vsc->bpc); 4822 break; 4823 } 4824 /* Dynamic Range and Component Bit Depth */ 4825 if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA) 4826 sdp->db[17] |= 0x80; /* DB17[7] */ 4827 4828 /* Content Type */ 4829 sdp->db[18] = vsc->content_type & 0x7; 4830 4831 out: 4832 return length; 4833 } 4834 4835 static ssize_t 4836 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe, 4837 struct dp_sdp *sdp, 4838 size_t size) 4839 { 4840 size_t length = sizeof(struct dp_sdp); 4841 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 4842 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 4843 ssize_t len; 4844 4845 if (size < length) 4846 return -ENOSPC; 4847 4848 memset(sdp, 0, size); 4849 4850 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); 4851 if (len < 0) { 4852 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); 4853 return -ENOSPC; 4854 } 4855 4856 if (len != infoframe_size) { 4857 DRM_DEBUG_KMS("wrong static hdr metadata size\n"); 4858 return -ENOSPC; 4859 } 4860 4861 /* 4862 * Set up the infoframe sdp packet for HDR static metadata. 4863 * Prepare VSC Header for SU as per DP 1.4a spec, 4864 * Table 2-100 and Table 2-101 4865 */ 4866 4867 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ 4868 sdp->sdp_header.HB0 = 0; 4869 /* 4870 * Packet Type 80h + Non-audio INFOFRAME Type value 4871 * HDMI_INFOFRAME_TYPE_DRM: 0x87 4872 * - 80h + Non-audio INFOFRAME Type value 4873 * - InfoFrame Type: 0x07 4874 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] 4875 */ 4876 sdp->sdp_header.HB1 = drm_infoframe->type; 4877 /* 4878 * Least Significant Eight Bits of (Data Byte Count – 1) 4879 * infoframe_size - 1 4880 */ 4881 sdp->sdp_header.HB2 = 0x1D; 4882 /* INFOFRAME SDP Version Number */ 4883 sdp->sdp_header.HB3 = (0x13 << 2); 4884 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 4885 sdp->db[0] = drm_infoframe->version; 4886 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 4887 sdp->db[1] = drm_infoframe->length; 4888 /* 4889 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 4890 * HDMI_INFOFRAME_HEADER_SIZE 4891 */ 4892 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); 4893 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 4894 HDMI_DRM_INFOFRAME_SIZE); 4895 4896 /* 4897 * Size of DP infoframe sdp packet for HDR static metadata consists of 4898 * - DP SDP Header(struct dp_sdp_header): 4 bytes 4899 * - Two Data Blocks: 2 bytes 4900 * CTA Header Byte2 (INFOFRAME Version Number) 4901 * CTA Header Byte3 (Length of INFOFRAME) 4902 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 4903 * 4904 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 4905 * infoframe size. But GEN11+ has larger than that size, write_infoframe 4906 * will pad rest of the size. 4907 */ 4908 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; 4909 } 4910 4911 static void intel_write_dp_sdp(struct intel_encoder *encoder, 4912 const struct intel_crtc_state *crtc_state, 4913 unsigned int type) 4914 { 4915 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 4916 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4917 struct dp_sdp sdp = {}; 4918 ssize_t len; 4919 4920 if ((crtc_state->infoframes.enable & 4921 intel_hdmi_infoframe_enable(type)) == 0) 4922 return; 4923 4924 switch (type) { 4925 case DP_SDP_VSC: 4926 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp, 4927 sizeof(sdp)); 4928 break; 4929 case HDMI_PACKET_TYPE_GAMUT_METADATA: 4930 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm, 4931 &sdp, sizeof(sdp)); 4932 break; 4933 default: 4934 MISSING_CASE(type); 4935 return; 4936 } 4937 4938 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 4939 return; 4940 4941 intel_dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); 4942 } 4943 4944 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, 4945 const struct intel_crtc_state *crtc_state, 4946 struct drm_dp_vsc_sdp *vsc) 4947 { 4948 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 4949 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4950 struct dp_sdp sdp = {}; 4951 ssize_t len; 4952 4953 len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp)); 4954 4955 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 4956 return; 4957 4958 intel_dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC, 4959 &sdp, len); 4960 } 4961 4962 void intel_dp_set_infoframes(struct intel_encoder *encoder, 4963 bool enable, 4964 const struct intel_crtc_state *crtc_state, 4965 const struct drm_connector_state *conn_state) 4966 { 4967 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4968 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4969 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); 4970 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | 4971 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | 4972 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 4973 u32 val = intel_de_read(dev_priv, reg); 4974 4975 /* TODO: Add DSC case (DIP_ENABLE_PPS) */ 4976 /* When PSR is enabled, this routine doesn't disable VSC DIP */ 4977 if (intel_psr_enabled(intel_dp)) 4978 val &= ~dip_enable; 4979 else 4980 val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW); 4981 4982 if (!enable) { 4983 intel_de_write(dev_priv, reg, val); 4984 intel_de_posting_read(dev_priv, reg); 4985 return; 4986 } 4987 4988 intel_de_write(dev_priv, reg, val); 4989 intel_de_posting_read(dev_priv, reg); 4990 4991 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 4992 if (!intel_psr_enabled(intel_dp)) 4993 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); 4994 4995 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); 4996 } 4997 4998 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, 4999 const void *buffer, size_t size) 5000 { 5001 const struct dp_sdp *sdp = buffer; 5002 5003 if (size < sizeof(struct dp_sdp)) 5004 return -EINVAL; 5005 5006 memset(vsc, 0, size); 5007 5008 if (sdp->sdp_header.HB0 != 0) 5009 return -EINVAL; 5010 5011 if (sdp->sdp_header.HB1 != DP_SDP_VSC) 5012 return -EINVAL; 5013 5014 vsc->sdp_type = sdp->sdp_header.HB1; 5015 vsc->revision = sdp->sdp_header.HB2; 5016 vsc->length = sdp->sdp_header.HB3; 5017 5018 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || 5019 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) { 5020 /* 5021 * - HB2 = 0x2, HB3 = 0x8 5022 * VSC SDP supporting 3D stereo + PSR 5023 * - HB2 = 0x4, HB3 = 0xe 5024 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of 5025 * first scan line of the SU region (applies to eDP v1.4b 5026 * and higher). 5027 */ 5028 return 0; 5029 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { 5030 /* 5031 * - HB2 = 0x5, HB3 = 0x13 5032 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry 5033 * Format. 5034 */ 5035 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; 5036 vsc->colorimetry = sdp->db[16] & 0xf; 5037 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; 5038 5039 switch (sdp->db[17] & 0x7) { 5040 case 0x0: 5041 vsc->bpc = 6; 5042 break; 5043 case 0x1: 5044 vsc->bpc = 8; 5045 break; 5046 case 0x2: 5047 vsc->bpc = 10; 5048 break; 5049 case 0x3: 5050 vsc->bpc = 12; 5051 break; 5052 case 0x4: 5053 vsc->bpc = 16; 5054 break; 5055 default: 5056 MISSING_CASE(sdp->db[17] & 0x7); 5057 return -EINVAL; 5058 } 5059 5060 vsc->content_type = sdp->db[18] & 0x7; 5061 } else { 5062 return -EINVAL; 5063 } 5064 5065 return 0; 5066 } 5067 5068 static int 5069 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, 5070 const void *buffer, size_t size) 5071 { 5072 int ret; 5073 5074 const struct dp_sdp *sdp = buffer; 5075 5076 if (size < sizeof(struct dp_sdp)) 5077 return -EINVAL; 5078 5079 if (sdp->sdp_header.HB0 != 0) 5080 return -EINVAL; 5081 5082 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) 5083 return -EINVAL; 5084 5085 /* 5086 * Least Significant Eight Bits of (Data Byte Count – 1) 5087 * 1Dh (i.e., Data Byte Count = 30 bytes). 5088 */ 5089 if (sdp->sdp_header.HB2 != 0x1D) 5090 return -EINVAL; 5091 5092 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ 5093 if ((sdp->sdp_header.HB3 & 0x3) != 0) 5094 return -EINVAL; 5095 5096 /* INFOFRAME SDP Version Number */ 5097 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) 5098 return -EINVAL; 5099 5100 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 5101 if (sdp->db[0] != 1) 5102 return -EINVAL; 5103 5104 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 5105 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) 5106 return -EINVAL; 5107 5108 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], 5109 HDMI_DRM_INFOFRAME_SIZE); 5110 5111 return ret; 5112 } 5113 5114 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, 5115 struct intel_crtc_state *crtc_state, 5116 struct drm_dp_vsc_sdp *vsc) 5117 { 5118 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 5119 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5120 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5121 unsigned int type = DP_SDP_VSC; 5122 struct dp_sdp sdp = {}; 5123 int ret; 5124 5125 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 5126 if (intel_psr_enabled(intel_dp)) 5127 return; 5128 5129 if ((crtc_state->infoframes.enable & 5130 intel_hdmi_infoframe_enable(type)) == 0) 5131 return; 5132 5133 intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); 5134 5135 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); 5136 5137 if (ret) 5138 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n"); 5139 } 5140 5141 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, 5142 struct intel_crtc_state *crtc_state, 5143 struct hdmi_drm_infoframe *drm_infoframe) 5144 { 5145 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 5146 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5147 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; 5148 struct dp_sdp sdp = {}; 5149 int ret; 5150 5151 if ((crtc_state->infoframes.enable & 5152 intel_hdmi_infoframe_enable(type)) == 0) 5153 return; 5154 5155 intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 5156 sizeof(sdp)); 5157 5158 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, 5159 sizeof(sdp)); 5160 5161 if (ret) 5162 drm_dbg_kms(&dev_priv->drm, 5163 "Failed to unpack DP HDR Metadata Infoframe SDP\n"); 5164 } 5165 5166 void intel_read_dp_sdp(struct intel_encoder *encoder, 5167 struct intel_crtc_state *crtc_state, 5168 unsigned int type) 5169 { 5170 if (encoder->type != INTEL_OUTPUT_DDI) 5171 return; 5172 5173 switch (type) { 5174 case DP_SDP_VSC: 5175 intel_read_dp_vsc_sdp(encoder, crtc_state, 5176 &crtc_state->infoframes.vsc); 5177 break; 5178 case HDMI_PACKET_TYPE_GAMUT_METADATA: 5179 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, 5180 &crtc_state->infoframes.drm.drm); 5181 break; 5182 default: 5183 MISSING_CASE(type); 5184 break; 5185 } 5186 } 5187 5188 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 5189 { 5190 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5191 int status = 0; 5192 int test_link_rate; 5193 u8 test_lane_count, test_link_bw; 5194 /* (DP CTS 1.2) 5195 * 4.3.1.11 5196 */ 5197 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 5198 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 5199 &test_lane_count); 5200 5201 if (status <= 0) { 5202 drm_dbg_kms(&i915->drm, "Lane count read failed\n"); 5203 return DP_TEST_NAK; 5204 } 5205 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 5206 5207 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 5208 &test_link_bw); 5209 if (status <= 0) { 5210 drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); 5211 return DP_TEST_NAK; 5212 } 5213 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 5214 5215 /* Validate the requested link rate and lane count */ 5216 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 5217 test_lane_count)) 5218 return DP_TEST_NAK; 5219 5220 intel_dp->compliance.test_lane_count = test_lane_count; 5221 intel_dp->compliance.test_link_rate = test_link_rate; 5222 5223 return DP_TEST_ACK; 5224 } 5225 5226 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 5227 { 5228 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5229 u8 test_pattern; 5230 u8 test_misc; 5231 __be16 h_width, v_height; 5232 int status = 0; 5233 5234 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 5235 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 5236 &test_pattern); 5237 if (status <= 0) { 5238 drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); 5239 return DP_TEST_NAK; 5240 } 5241 if (test_pattern != DP_COLOR_RAMP) 5242 return DP_TEST_NAK; 5243 5244 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 5245 &h_width, 2); 5246 if (status <= 0) { 5247 drm_dbg_kms(&i915->drm, "H Width read failed\n"); 5248 return DP_TEST_NAK; 5249 } 5250 5251 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 5252 &v_height, 2); 5253 if (status <= 0) { 5254 drm_dbg_kms(&i915->drm, "V Height read failed\n"); 5255 return DP_TEST_NAK; 5256 } 5257 5258 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 5259 &test_misc); 5260 if (status <= 0) { 5261 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); 5262 return DP_TEST_NAK; 5263 } 5264 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 5265 return DP_TEST_NAK; 5266 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 5267 return DP_TEST_NAK; 5268 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 5269 case DP_TEST_BIT_DEPTH_6: 5270 intel_dp->compliance.test_data.bpc = 6; 5271 break; 5272 case DP_TEST_BIT_DEPTH_8: 5273 intel_dp->compliance.test_data.bpc = 8; 5274 break; 5275 default: 5276 return DP_TEST_NAK; 5277 } 5278 5279 intel_dp->compliance.test_data.video_pattern = test_pattern; 5280 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 5281 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 5282 /* Set test active flag here so userspace doesn't interrupt things */ 5283 intel_dp->compliance.test_active = true; 5284 5285 return DP_TEST_ACK; 5286 } 5287 5288 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 5289 { 5290 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5291 u8 test_result = DP_TEST_ACK; 5292 struct intel_connector *intel_connector = intel_dp->attached_connector; 5293 struct drm_connector *connector = &intel_connector->base; 5294 5295 if (intel_connector->detect_edid == NULL || 5296 connector->edid_corrupt || 5297 intel_dp->aux.i2c_defer_count > 6) { 5298 /* Check EDID read for NACKs, DEFERs and corruption 5299 * (DP CTS 1.2 Core r1.1) 5300 * 4.2.2.4 : Failed EDID read, I2C_NAK 5301 * 4.2.2.5 : Failed EDID read, I2C_DEFER 5302 * 4.2.2.6 : EDID corruption detected 5303 * Use failsafe mode for all cases 5304 */ 5305 if (intel_dp->aux.i2c_nack_count > 0 || 5306 intel_dp->aux.i2c_defer_count > 0) 5307 drm_dbg_kms(&i915->drm, 5308 "EDID read had %d NACKs, %d DEFERs\n", 5309 intel_dp->aux.i2c_nack_count, 5310 intel_dp->aux.i2c_defer_count); 5311 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 5312 } else { 5313 struct edid *block = intel_connector->detect_edid; 5314 5315 /* We have to write the checksum 5316 * of the last block read 5317 */ 5318 block += intel_connector->detect_edid->extensions; 5319 5320 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 5321 block->checksum) <= 0) 5322 drm_dbg_kms(&i915->drm, 5323 "Failed to write EDID checksum\n"); 5324 5325 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 5326 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 5327 } 5328 5329 /* Set test active flag here so userspace doesn't interrupt things */ 5330 intel_dp->compliance.test_active = true; 5331 5332 return test_result; 5333 } 5334 5335 static u8 intel_dp_prepare_phytest(struct intel_dp *intel_dp) 5336 { 5337 struct drm_dp_phy_test_params *data = 5338 &intel_dp->compliance.test_data.phytest; 5339 5340 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { 5341 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n"); 5342 return DP_TEST_NAK; 5343 } 5344 5345 /* 5346 * link_mst is set to false to avoid executing mst related code 5347 * during compliance testing. 5348 */ 5349 intel_dp->link_mst = false; 5350 5351 return DP_TEST_ACK; 5352 } 5353 5354 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp) 5355 { 5356 struct drm_i915_private *dev_priv = 5357 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 5358 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 5359 struct drm_dp_phy_test_params *data = 5360 &intel_dp->compliance.test_data.phytest; 5361 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); 5362 enum pipe pipe = crtc->pipe; 5363 u32 pattern_val; 5364 5365 switch (data->phy_pattern) { 5366 case DP_PHY_TEST_PATTERN_NONE: 5367 DRM_DEBUG_KMS("Disable Phy Test Pattern\n"); 5368 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 5369 break; 5370 case DP_PHY_TEST_PATTERN_D10_2: 5371 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n"); 5372 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5373 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); 5374 break; 5375 case DP_PHY_TEST_PATTERN_ERROR_COUNT: 5376 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n"); 5377 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5378 DDI_DP_COMP_CTL_ENABLE | 5379 DDI_DP_COMP_CTL_SCRAMBLED_0); 5380 break; 5381 case DP_PHY_TEST_PATTERN_PRBS7: 5382 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n"); 5383 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5384 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); 5385 break; 5386 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: 5387 /* 5388 * FIXME: Ideally pattern should come from DPCD 0x250. As 5389 * current firmware of DPR-100 could not set it, so hardcoding 5390 * now for complaince test. 5391 */ 5392 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); 5393 pattern_val = 0x3e0f83e0; 5394 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); 5395 pattern_val = 0x0f83e0f8; 5396 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); 5397 pattern_val = 0x0000f83e; 5398 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); 5399 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5400 DDI_DP_COMP_CTL_ENABLE | 5401 DDI_DP_COMP_CTL_CUSTOM80); 5402 break; 5403 case DP_PHY_TEST_PATTERN_CP2520: 5404 /* 5405 * FIXME: Ideally pattern should come from DPCD 0x24A. As 5406 * current firmware of DPR-100 could not set it, so hardcoding 5407 * now for complaince test. 5408 */ 5409 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n"); 5410 pattern_val = 0xFB; 5411 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5412 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | 5413 pattern_val); 5414 break; 5415 default: 5416 WARN(1, "Invalid Phy Test Pattern\n"); 5417 } 5418 } 5419 5420 static void 5421 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp) 5422 { 5423 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 5424 struct drm_device *dev = intel_dig_port->base.base.dev; 5425 struct drm_i915_private *dev_priv = to_i915(dev); 5426 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); 5427 enum pipe pipe = crtc->pipe; 5428 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 5429 5430 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 5431 TRANS_DDI_FUNC_CTL(pipe)); 5432 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 5433 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 5434 5435 trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE | 5436 TGL_TRANS_DDI_PORT_MASK); 5437 trans_conf_value &= ~PIPECONF_ENABLE; 5438 dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE; 5439 5440 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 5441 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 5442 trans_ddi_func_ctl_value); 5443 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 5444 } 5445 5446 static void 5447 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt) 5448 { 5449 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 5450 struct drm_device *dev = intel_dig_port->base.base.dev; 5451 struct drm_i915_private *dev_priv = to_i915(dev); 5452 enum port port = intel_dig_port->base.port; 5453 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); 5454 enum pipe pipe = crtc->pipe; 5455 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 5456 5457 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 5458 TRANS_DDI_FUNC_CTL(pipe)); 5459 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 5460 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 5461 5462 trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE | 5463 TGL_TRANS_DDI_SELECT_PORT(port); 5464 trans_conf_value |= PIPECONF_ENABLE; 5465 dp_tp_ctl_value |= DP_TP_CTL_ENABLE; 5466 5467 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 5468 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 5469 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 5470 trans_ddi_func_ctl_value); 5471 } 5472 5473 void intel_dp_process_phy_request(struct intel_dp *intel_dp) 5474 { 5475 struct drm_dp_phy_test_params *data = 5476 &intel_dp->compliance.test_data.phytest; 5477 u8 link_status[DP_LINK_STATUS_SIZE]; 5478 5479 if (!intel_dp_get_link_status(intel_dp, link_status)) { 5480 DRM_DEBUG_KMS("failed to get link status\n"); 5481 return; 5482 } 5483 5484 /* retrieve vswing & pre-emphasis setting */ 5485 intel_dp_get_adjust_train(intel_dp, link_status); 5486 5487 intel_dp_autotest_phy_ddi_disable(intel_dp); 5488 5489 intel_dp_set_signal_levels(intel_dp); 5490 5491 intel_dp_phy_pattern_update(intel_dp); 5492 5493 intel_dp_autotest_phy_ddi_enable(intel_dp, data->num_lanes); 5494 5495 drm_dp_set_phy_test_pattern(&intel_dp->aux, data, 5496 link_status[DP_DPCD_REV]); 5497 } 5498 5499 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 5500 { 5501 u8 test_result; 5502 5503 test_result = intel_dp_prepare_phytest(intel_dp); 5504 if (test_result != DP_TEST_ACK) 5505 DRM_ERROR("Phy test preparation failed\n"); 5506 5507 intel_dp_process_phy_request(intel_dp); 5508 5509 return test_result; 5510 } 5511 5512 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 5513 { 5514 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5515 u8 response = DP_TEST_NAK; 5516 u8 request = 0; 5517 int status; 5518 5519 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 5520 if (status <= 0) { 5521 drm_dbg_kms(&i915->drm, 5522 "Could not read test request from sink\n"); 5523 goto update_status; 5524 } 5525 5526 switch (request) { 5527 case DP_TEST_LINK_TRAINING: 5528 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); 5529 response = intel_dp_autotest_link_training(intel_dp); 5530 break; 5531 case DP_TEST_LINK_VIDEO_PATTERN: 5532 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); 5533 response = intel_dp_autotest_video_pattern(intel_dp); 5534 break; 5535 case DP_TEST_LINK_EDID_READ: 5536 drm_dbg_kms(&i915->drm, "EDID test requested\n"); 5537 response = intel_dp_autotest_edid(intel_dp); 5538 break; 5539 case DP_TEST_LINK_PHY_TEST_PATTERN: 5540 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); 5541 response = intel_dp_autotest_phy_pattern(intel_dp); 5542 break; 5543 default: 5544 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", 5545 request); 5546 break; 5547 } 5548 5549 if (response & DP_TEST_ACK) 5550 intel_dp->compliance.test_type = request; 5551 5552 update_status: 5553 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 5554 if (status <= 0) 5555 drm_dbg_kms(&i915->drm, 5556 "Could not write test response to sink\n"); 5557 } 5558 5559 /** 5560 * intel_dp_check_mst_status - service any pending MST interrupts, check link status 5561 * @intel_dp: Intel DP struct 5562 * 5563 * Read any pending MST interrupts, call MST core to handle these and ack the 5564 * interrupts. Check if the main and AUX link state is ok. 5565 * 5566 * Returns: 5567 * - %true if pending interrupts were serviced (or no interrupts were 5568 * pending) w/o detecting an error condition. 5569 * - %false if an error condition - like AUX failure or a loss of link - is 5570 * detected, which needs servicing from the hotplug work. 5571 */ 5572 static bool 5573 intel_dp_check_mst_status(struct intel_dp *intel_dp) 5574 { 5575 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5576 bool link_ok = true; 5577 5578 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); 5579 5580 for (;;) { 5581 u8 esi[DP_DPRX_ESI_LEN] = {}; 5582 bool handled; 5583 int retry; 5584 5585 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { 5586 drm_dbg_kms(&i915->drm, 5587 "failed to get ESI - device may have failed\n"); 5588 link_ok = false; 5589 5590 break; 5591 } 5592 5593 /* check link status - esi[10] = 0x200c */ 5594 if (intel_dp->active_mst_links > 0 && link_ok && 5595 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 5596 drm_dbg_kms(&i915->drm, 5597 "channel EQ not ok, retraining\n"); 5598 link_ok = false; 5599 } 5600 5601 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); 5602 5603 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); 5604 if (!handled) 5605 break; 5606 5607 for (retry = 0; retry < 3; retry++) { 5608 int wret; 5609 5610 wret = drm_dp_dpcd_write(&intel_dp->aux, 5611 DP_SINK_COUNT_ESI+1, 5612 &esi[1], 3); 5613 if (wret == 3) 5614 break; 5615 } 5616 } 5617 5618 return link_ok; 5619 } 5620 5621 static bool 5622 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 5623 { 5624 u8 link_status[DP_LINK_STATUS_SIZE]; 5625 5626 if (!intel_dp->link_trained) 5627 return false; 5628 5629 /* 5630 * While PSR source HW is enabled, it will control main-link sending 5631 * frames, enabling and disabling it so trying to do a retrain will fail 5632 * as the link would or not be on or it could mix training patterns 5633 * and frame data at the same time causing retrain to fail. 5634 * Also when exiting PSR, HW will retrain the link anyways fixing 5635 * any link status error. 5636 */ 5637 if (intel_psr_enabled(intel_dp)) 5638 return false; 5639 5640 if (!intel_dp_get_link_status(intel_dp, link_status)) 5641 return false; 5642 5643 /* 5644 * Validate the cached values of intel_dp->link_rate and 5645 * intel_dp->lane_count before attempting to retrain. 5646 */ 5647 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 5648 intel_dp->lane_count)) 5649 return false; 5650 5651 /* Retrain if Channel EQ or CR not ok */ 5652 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 5653 } 5654 5655 static bool intel_dp_has_connector(struct intel_dp *intel_dp, 5656 const struct drm_connector_state *conn_state) 5657 { 5658 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5659 struct intel_encoder *encoder; 5660 enum pipe pipe; 5661 5662 if (!conn_state->best_encoder) 5663 return false; 5664 5665 /* SST */ 5666 encoder = &dp_to_dig_port(intel_dp)->base; 5667 if (conn_state->best_encoder == &encoder->base) 5668 return true; 5669 5670 /* MST */ 5671 for_each_pipe(i915, pipe) { 5672 encoder = &intel_dp->mst_encoders[pipe]->base; 5673 if (conn_state->best_encoder == &encoder->base) 5674 return true; 5675 } 5676 5677 return false; 5678 } 5679 5680 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, 5681 struct drm_modeset_acquire_ctx *ctx, 5682 u32 *crtc_mask) 5683 { 5684 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5685 struct drm_connector_list_iter conn_iter; 5686 struct intel_connector *connector; 5687 int ret = 0; 5688 5689 *crtc_mask = 0; 5690 5691 if (!intel_dp_needs_link_retrain(intel_dp)) 5692 return 0; 5693 5694 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 5695 for_each_intel_connector_iter(connector, &conn_iter) { 5696 struct drm_connector_state *conn_state = 5697 connector->base.state; 5698 struct intel_crtc_state *crtc_state; 5699 struct intel_crtc *crtc; 5700 5701 if (!intel_dp_has_connector(intel_dp, conn_state)) 5702 continue; 5703 5704 crtc = to_intel_crtc(conn_state->crtc); 5705 if (!crtc) 5706 continue; 5707 5708 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 5709 if (ret) 5710 break; 5711 5712 crtc_state = to_intel_crtc_state(crtc->base.state); 5713 5714 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 5715 5716 if (!crtc_state->hw.active) 5717 continue; 5718 5719 if (conn_state->commit && 5720 !try_wait_for_completion(&conn_state->commit->hw_done)) 5721 continue; 5722 5723 *crtc_mask |= drm_crtc_mask(&crtc->base); 5724 } 5725 drm_connector_list_iter_end(&conn_iter); 5726 5727 if (!intel_dp_needs_link_retrain(intel_dp)) 5728 *crtc_mask = 0; 5729 5730 return ret; 5731 } 5732 5733 static bool intel_dp_is_connected(struct intel_dp *intel_dp) 5734 { 5735 struct intel_connector *connector = intel_dp->attached_connector; 5736 5737 return connector->base.status == connector_status_connected || 5738 intel_dp->is_mst; 5739 } 5740 5741 int intel_dp_retrain_link(struct intel_encoder *encoder, 5742 struct drm_modeset_acquire_ctx *ctx) 5743 { 5744 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5745 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5746 struct intel_crtc *crtc; 5747 u32 crtc_mask; 5748 int ret; 5749 5750 if (!intel_dp_is_connected(intel_dp)) 5751 return 0; 5752 5753 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 5754 ctx); 5755 if (ret) 5756 return ret; 5757 5758 ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask); 5759 if (ret) 5760 return ret; 5761 5762 if (crtc_mask == 0) 5763 return 0; 5764 5765 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", 5766 encoder->base.base.id, encoder->base.name); 5767 5768 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 5769 const struct intel_crtc_state *crtc_state = 5770 to_intel_crtc_state(crtc->base.state); 5771 5772 /* Suppress underruns caused by re-training */ 5773 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 5774 if (crtc_state->has_pch_encoder) 5775 intel_set_pch_fifo_underrun_reporting(dev_priv, 5776 intel_crtc_pch_transcoder(crtc), false); 5777 } 5778 5779 intel_dp_start_link_train(intel_dp); 5780 intel_dp_stop_link_train(intel_dp); 5781 5782 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 5783 const struct intel_crtc_state *crtc_state = 5784 to_intel_crtc_state(crtc->base.state); 5785 5786 /* Keep underrun reporting disabled until things are stable */ 5787 intel_wait_for_vblank(dev_priv, crtc->pipe); 5788 5789 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 5790 if (crtc_state->has_pch_encoder) 5791 intel_set_pch_fifo_underrun_reporting(dev_priv, 5792 intel_crtc_pch_transcoder(crtc), true); 5793 } 5794 5795 return 0; 5796 } 5797 5798 /* 5799 * If display is now connected check links status, 5800 * there has been known issues of link loss triggering 5801 * long pulse. 5802 * 5803 * Some sinks (eg. ASUS PB287Q) seem to perform some 5804 * weird HPD ping pong during modesets. So we can apparently 5805 * end up with HPD going low during a modeset, and then 5806 * going back up soon after. And once that happens we must 5807 * retrain the link to get a picture. That's in case no 5808 * userspace component reacted to intermittent HPD dip. 5809 */ 5810 static enum intel_hotplug_state 5811 intel_dp_hotplug(struct intel_encoder *encoder, 5812 struct intel_connector *connector) 5813 { 5814 struct drm_modeset_acquire_ctx ctx; 5815 enum intel_hotplug_state state; 5816 int ret; 5817 5818 state = intel_encoder_hotplug(encoder, connector); 5819 5820 drm_modeset_acquire_init(&ctx, 0); 5821 5822 for (;;) { 5823 ret = intel_dp_retrain_link(encoder, &ctx); 5824 5825 if (ret == -EDEADLK) { 5826 drm_modeset_backoff(&ctx); 5827 continue; 5828 } 5829 5830 break; 5831 } 5832 5833 drm_modeset_drop_locks(&ctx); 5834 drm_modeset_acquire_fini(&ctx); 5835 drm_WARN(encoder->base.dev, ret, 5836 "Acquiring modeset locks failed with %i\n", ret); 5837 5838 /* 5839 * Keeping it consistent with intel_ddi_hotplug() and 5840 * intel_hdmi_hotplug(). 5841 */ 5842 if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries) 5843 state = INTEL_HOTPLUG_RETRY; 5844 5845 return state; 5846 } 5847 5848 static void intel_dp_check_service_irq(struct intel_dp *intel_dp) 5849 { 5850 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5851 u8 val; 5852 5853 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 5854 return; 5855 5856 if (drm_dp_dpcd_readb(&intel_dp->aux, 5857 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 5858 return; 5859 5860 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 5861 5862 if (val & DP_AUTOMATED_TEST_REQUEST) 5863 intel_dp_handle_test_request(intel_dp); 5864 5865 if (val & DP_CP_IRQ) 5866 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 5867 5868 if (val & DP_SINK_SPECIFIC_IRQ) 5869 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); 5870 } 5871 5872 /* 5873 * According to DP spec 5874 * 5.1.2: 5875 * 1. Read DPCD 5876 * 2. Configure link according to Receiver Capabilities 5877 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 5878 * 4. Check link status on receipt of hot-plug interrupt 5879 * 5880 * intel_dp_short_pulse - handles short pulse interrupts 5881 * when full detection is not required. 5882 * Returns %true if short pulse is handled and full detection 5883 * is NOT required and %false otherwise. 5884 */ 5885 static bool 5886 intel_dp_short_pulse(struct intel_dp *intel_dp) 5887 { 5888 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5889 u8 old_sink_count = intel_dp->sink_count; 5890 bool ret; 5891 5892 /* 5893 * Clearing compliance test variables to allow capturing 5894 * of values for next automated test request. 5895 */ 5896 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5897 5898 /* 5899 * Now read the DPCD to see if it's actually running 5900 * If the current value of sink count doesn't match with 5901 * the value that was stored earlier or dpcd read failed 5902 * we need to do full detection 5903 */ 5904 ret = intel_dp_get_dpcd(intel_dp); 5905 5906 if ((old_sink_count != intel_dp->sink_count) || !ret) { 5907 /* No need to proceed if we are going to do full detect */ 5908 return false; 5909 } 5910 5911 intel_dp_check_service_irq(intel_dp); 5912 5913 /* Handle CEC interrupts, if any */ 5914 drm_dp_cec_irq(&intel_dp->aux); 5915 5916 /* defer to the hotplug work for link retraining if needed */ 5917 if (intel_dp_needs_link_retrain(intel_dp)) 5918 return false; 5919 5920 intel_psr_short_pulse(intel_dp); 5921 5922 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 5923 drm_dbg_kms(&dev_priv->drm, 5924 "Link Training Compliance Test requested\n"); 5925 /* Send a Hotplug Uevent to userspace to start modeset */ 5926 drm_kms_helper_hotplug_event(&dev_priv->drm); 5927 } 5928 5929 return true; 5930 } 5931 5932 /* XXX this is probably wrong for multiple downstream ports */ 5933 static enum drm_connector_status 5934 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 5935 { 5936 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5937 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 5938 u8 *dpcd = intel_dp->dpcd; 5939 u8 type; 5940 5941 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) 5942 return connector_status_connected; 5943 5944 if (lspcon->active) 5945 lspcon_resume(lspcon); 5946 5947 if (!intel_dp_get_dpcd(intel_dp)) 5948 return connector_status_disconnected; 5949 5950 /* if there's no downstream port, we're done */ 5951 if (!drm_dp_is_branch(dpcd)) 5952 return connector_status_connected; 5953 5954 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 5955 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 5956 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 5957 5958 return intel_dp->sink_count ? 5959 connector_status_connected : connector_status_disconnected; 5960 } 5961 5962 if (intel_dp_can_mst(intel_dp)) 5963 return connector_status_connected; 5964 5965 /* If no HPD, poke DDC gently */ 5966 if (drm_probe_ddc(&intel_dp->aux.ddc)) 5967 return connector_status_connected; 5968 5969 /* Well we tried, say unknown for unreliable port types */ 5970 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 5971 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 5972 if (type == DP_DS_PORT_TYPE_VGA || 5973 type == DP_DS_PORT_TYPE_NON_EDID) 5974 return connector_status_unknown; 5975 } else { 5976 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 5977 DP_DWN_STRM_PORT_TYPE_MASK; 5978 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 5979 type == DP_DWN_STRM_PORT_TYPE_OTHER) 5980 return connector_status_unknown; 5981 } 5982 5983 /* Anything else is out of spec, warn and ignore */ 5984 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); 5985 return connector_status_disconnected; 5986 } 5987 5988 static enum drm_connector_status 5989 edp_detect(struct intel_dp *intel_dp) 5990 { 5991 return connector_status_connected; 5992 } 5993 5994 static bool ibx_digital_port_connected(struct intel_encoder *encoder) 5995 { 5996 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5997 u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin]; 5998 5999 return intel_de_read(dev_priv, SDEISR) & bit; 6000 } 6001 6002 static bool g4x_digital_port_connected(struct intel_encoder *encoder) 6003 { 6004 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6005 u32 bit; 6006 6007 switch (encoder->hpd_pin) { 6008 case HPD_PORT_B: 6009 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; 6010 break; 6011 case HPD_PORT_C: 6012 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; 6013 break; 6014 case HPD_PORT_D: 6015 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; 6016 break; 6017 default: 6018 MISSING_CASE(encoder->hpd_pin); 6019 return false; 6020 } 6021 6022 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 6023 } 6024 6025 static bool gm45_digital_port_connected(struct intel_encoder *encoder) 6026 { 6027 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6028 u32 bit; 6029 6030 switch (encoder->hpd_pin) { 6031 case HPD_PORT_B: 6032 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45; 6033 break; 6034 case HPD_PORT_C: 6035 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45; 6036 break; 6037 case HPD_PORT_D: 6038 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45; 6039 break; 6040 default: 6041 MISSING_CASE(encoder->hpd_pin); 6042 return false; 6043 } 6044 6045 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 6046 } 6047 6048 static bool ilk_digital_port_connected(struct intel_encoder *encoder) 6049 { 6050 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6051 u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin]; 6052 6053 return intel_de_read(dev_priv, DEISR) & bit; 6054 } 6055 6056 /* 6057 * intel_digital_port_connected - is the specified port connected? 6058 * @encoder: intel_encoder 6059 * 6060 * In cases where there's a connector physically connected but it can't be used 6061 * by our hardware we also return false, since the rest of the driver should 6062 * pretty much treat the port as disconnected. This is relevant for type-C 6063 * (starting on ICL) where there's ownership involved. 6064 * 6065 * Return %true if port is connected, %false otherwise. 6066 */ 6067 bool intel_digital_port_connected(struct intel_encoder *encoder) 6068 { 6069 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6070 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 6071 bool is_connected = false; 6072 intel_wakeref_t wakeref; 6073 6074 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) 6075 is_connected = dig_port->connected(encoder); 6076 6077 return is_connected; 6078 } 6079 6080 static struct edid * 6081 intel_dp_get_edid(struct intel_dp *intel_dp) 6082 { 6083 struct intel_connector *intel_connector = intel_dp->attached_connector; 6084 6085 /* use cached edid if we have one */ 6086 if (intel_connector->edid) { 6087 /* invalid edid */ 6088 if (IS_ERR(intel_connector->edid)) 6089 return NULL; 6090 6091 return drm_edid_duplicate(intel_connector->edid); 6092 } else 6093 return drm_get_edid(&intel_connector->base, 6094 &intel_dp->aux.ddc); 6095 } 6096 6097 static void 6098 intel_dp_set_edid(struct intel_dp *intel_dp) 6099 { 6100 struct intel_connector *intel_connector = intel_dp->attached_connector; 6101 struct edid *edid; 6102 6103 intel_dp_unset_edid(intel_dp); 6104 edid = intel_dp_get_edid(intel_dp); 6105 intel_connector->detect_edid = edid; 6106 6107 intel_dp->has_audio = drm_detect_monitor_audio(edid); 6108 drm_dp_cec_set_edid(&intel_dp->aux, edid); 6109 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 6110 } 6111 6112 static void 6113 intel_dp_unset_edid(struct intel_dp *intel_dp) 6114 { 6115 struct intel_connector *intel_connector = intel_dp->attached_connector; 6116 6117 drm_dp_cec_unset_edid(&intel_dp->aux); 6118 kfree(intel_connector->detect_edid); 6119 intel_connector->detect_edid = NULL; 6120 6121 intel_dp->has_audio = false; 6122 intel_dp->edid_quirks = 0; 6123 } 6124 6125 static int 6126 intel_dp_detect(struct drm_connector *connector, 6127 struct drm_modeset_acquire_ctx *ctx, 6128 bool force) 6129 { 6130 struct drm_i915_private *dev_priv = to_i915(connector->dev); 6131 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6132 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6133 struct intel_encoder *encoder = &dig_port->base; 6134 enum drm_connector_status status; 6135 6136 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 6137 connector->base.id, connector->name); 6138 drm_WARN_ON(&dev_priv->drm, 6139 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 6140 6141 /* Can't disconnect eDP */ 6142 if (intel_dp_is_edp(intel_dp)) 6143 status = edp_detect(intel_dp); 6144 else if (intel_digital_port_connected(encoder)) 6145 status = intel_dp_detect_dpcd(intel_dp); 6146 else 6147 status = connector_status_disconnected; 6148 6149 if (status == connector_status_disconnected) { 6150 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 6151 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 6152 6153 if (intel_dp->is_mst) { 6154 drm_dbg_kms(&dev_priv->drm, 6155 "MST device may have disappeared %d vs %d\n", 6156 intel_dp->is_mst, 6157 intel_dp->mst_mgr.mst_state); 6158 intel_dp->is_mst = false; 6159 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 6160 intel_dp->is_mst); 6161 } 6162 6163 goto out; 6164 } 6165 6166 if (intel_dp->reset_link_params) { 6167 /* Initial max link lane count */ 6168 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 6169 6170 /* Initial max link rate */ 6171 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 6172 6173 intel_dp->reset_link_params = false; 6174 } 6175 6176 intel_dp_print_rates(intel_dp); 6177 6178 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 6179 if (INTEL_GEN(dev_priv) >= 11) 6180 intel_dp_get_dsc_sink_cap(intel_dp); 6181 6182 intel_dp_configure_mst(intel_dp); 6183 6184 if (intel_dp->is_mst) { 6185 /* 6186 * If we are in MST mode then this connector 6187 * won't appear connected or have anything 6188 * with EDID on it 6189 */ 6190 status = connector_status_disconnected; 6191 goto out; 6192 } 6193 6194 /* 6195 * Some external monitors do not signal loss of link synchronization 6196 * with an IRQ_HPD, so force a link status check. 6197 */ 6198 if (!intel_dp_is_edp(intel_dp)) { 6199 int ret; 6200 6201 ret = intel_dp_retrain_link(encoder, ctx); 6202 if (ret) 6203 return ret; 6204 } 6205 6206 /* 6207 * Clearing NACK and defer counts to get their exact values 6208 * while reading EDID which are required by Compliance tests 6209 * 4.2.2.4 and 4.2.2.5 6210 */ 6211 intel_dp->aux.i2c_nack_count = 0; 6212 intel_dp->aux.i2c_defer_count = 0; 6213 6214 intel_dp_set_edid(intel_dp); 6215 if (intel_dp_is_edp(intel_dp) || 6216 to_intel_connector(connector)->detect_edid) 6217 status = connector_status_connected; 6218 6219 intel_dp_check_service_irq(intel_dp); 6220 6221 out: 6222 if (status != connector_status_connected && !intel_dp->is_mst) 6223 intel_dp_unset_edid(intel_dp); 6224 6225 /* 6226 * Make sure the refs for power wells enabled during detect are 6227 * dropped to avoid a new detect cycle triggered by HPD polling. 6228 */ 6229 intel_display_power_flush_work(dev_priv); 6230 6231 return status; 6232 } 6233 6234 static void 6235 intel_dp_force(struct drm_connector *connector) 6236 { 6237 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6238 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6239 struct intel_encoder *intel_encoder = &dig_port->base; 6240 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 6241 enum intel_display_power_domain aux_domain = 6242 intel_aux_power_domain(dig_port); 6243 intel_wakeref_t wakeref; 6244 6245 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 6246 connector->base.id, connector->name); 6247 intel_dp_unset_edid(intel_dp); 6248 6249 if (connector->status != connector_status_connected) 6250 return; 6251 6252 wakeref = intel_display_power_get(dev_priv, aux_domain); 6253 6254 intel_dp_set_edid(intel_dp); 6255 6256 intel_display_power_put(dev_priv, aux_domain, wakeref); 6257 } 6258 6259 static int intel_dp_get_modes(struct drm_connector *connector) 6260 { 6261 struct intel_connector *intel_connector = to_intel_connector(connector); 6262 struct edid *edid; 6263 6264 edid = intel_connector->detect_edid; 6265 if (edid) { 6266 int ret = intel_connector_update_modes(connector, edid); 6267 if (ret) 6268 return ret; 6269 } 6270 6271 /* if eDP has no EDID, fall back to fixed mode */ 6272 if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) && 6273 intel_connector->panel.fixed_mode) { 6274 struct drm_display_mode *mode; 6275 6276 mode = drm_mode_duplicate(connector->dev, 6277 intel_connector->panel.fixed_mode); 6278 if (mode) { 6279 drm_mode_probed_add(connector, mode); 6280 return 1; 6281 } 6282 } 6283 6284 return 0; 6285 } 6286 6287 static int 6288 intel_dp_connector_register(struct drm_connector *connector) 6289 { 6290 struct drm_i915_private *i915 = to_i915(connector->dev); 6291 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6292 int ret; 6293 6294 ret = intel_connector_register(connector); 6295 if (ret) 6296 return ret; 6297 6298 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", 6299 intel_dp->aux.name, connector->kdev->kobj.name); 6300 6301 intel_dp->aux.dev = connector->kdev; 6302 ret = drm_dp_aux_register(&intel_dp->aux); 6303 if (!ret) 6304 drm_dp_cec_register_connector(&intel_dp->aux, connector); 6305 return ret; 6306 } 6307 6308 static void 6309 intel_dp_connector_unregister(struct drm_connector *connector) 6310 { 6311 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6312 6313 drm_dp_cec_unregister_connector(&intel_dp->aux); 6314 drm_dp_aux_unregister(&intel_dp->aux); 6315 intel_connector_unregister(connector); 6316 } 6317 6318 void intel_dp_encoder_flush_work(struct drm_encoder *encoder) 6319 { 6320 struct intel_digital_port *intel_dig_port = enc_to_dig_port(to_intel_encoder(encoder)); 6321 struct intel_dp *intel_dp = &intel_dig_port->dp; 6322 6323 intel_dp_mst_encoder_cleanup(intel_dig_port); 6324 if (intel_dp_is_edp(intel_dp)) { 6325 intel_wakeref_t wakeref; 6326 6327 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 6328 /* 6329 * vdd might still be enabled do to the delayed vdd off. 6330 * Make sure vdd is actually turned off here. 6331 */ 6332 with_pps_lock(intel_dp, wakeref) 6333 edp_panel_vdd_off_sync(intel_dp); 6334 6335 if (intel_dp->edp_notifier.notifier_call) { 6336 unregister_reboot_notifier(&intel_dp->edp_notifier); 6337 intel_dp->edp_notifier.notifier_call = NULL; 6338 } 6339 } 6340 6341 intel_dp_aux_fini(intel_dp); 6342 } 6343 6344 static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 6345 { 6346 intel_dp_encoder_flush_work(encoder); 6347 6348 drm_encoder_cleanup(encoder); 6349 kfree(enc_to_dig_port(to_intel_encoder(encoder))); 6350 } 6351 6352 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 6353 { 6354 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 6355 intel_wakeref_t wakeref; 6356 6357 if (!intel_dp_is_edp(intel_dp)) 6358 return; 6359 6360 /* 6361 * vdd might still be enabled do to the delayed vdd off. 6362 * Make sure vdd is actually turned off here. 6363 */ 6364 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 6365 with_pps_lock(intel_dp, wakeref) 6366 edp_panel_vdd_off_sync(intel_dp); 6367 } 6368 6369 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout) 6370 { 6371 long ret; 6372 6373 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count)) 6374 ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C, 6375 msecs_to_jiffies(timeout)); 6376 6377 if (!ret) 6378 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n"); 6379 } 6380 6381 static 6382 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, 6383 u8 *an) 6384 { 6385 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6386 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base)); 6387 static const struct drm_dp_aux_msg msg = { 6388 .request = DP_AUX_NATIVE_WRITE, 6389 .address = DP_AUX_HDCP_AKSV, 6390 .size = DRM_HDCP_KSV_LEN, 6391 }; 6392 u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0; 6393 ssize_t dpcd_ret; 6394 int ret; 6395 6396 /* Output An first, that's easy */ 6397 dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN, 6398 an, DRM_HDCP_AN_LEN); 6399 if (dpcd_ret != DRM_HDCP_AN_LEN) { 6400 drm_dbg_kms(&i915->drm, 6401 "Failed to write An over DP/AUX (%zd)\n", 6402 dpcd_ret); 6403 return dpcd_ret >= 0 ? -EIO : dpcd_ret; 6404 } 6405 6406 /* 6407 * Since Aksv is Oh-So-Secret, we can't access it in software. So in 6408 * order to get it on the wire, we need to create the AUX header as if 6409 * we were writing the data, and then tickle the hardware to output the 6410 * data once the header is sent out. 6411 */ 6412 intel_dp_aux_header(txbuf, &msg); 6413 6414 ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size, 6415 rxbuf, sizeof(rxbuf), 6416 DP_AUX_CH_CTL_AUX_AKSV_SELECT); 6417 if (ret < 0) { 6418 drm_dbg_kms(&i915->drm, 6419 "Write Aksv over DP/AUX failed (%d)\n", ret); 6420 return ret; 6421 } else if (ret == 0) { 6422 drm_dbg_kms(&i915->drm, "Aksv write over DP/AUX was empty\n"); 6423 return -EIO; 6424 } 6425 6426 reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK; 6427 if (reply != DP_AUX_NATIVE_REPLY_ACK) { 6428 drm_dbg_kms(&i915->drm, 6429 "Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n", 6430 reply); 6431 return -EIO; 6432 } 6433 return 0; 6434 } 6435 6436 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, 6437 u8 *bksv) 6438 { 6439 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6440 ssize_t ret; 6441 6442 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, 6443 DRM_HDCP_KSV_LEN); 6444 if (ret != DRM_HDCP_KSV_LEN) { 6445 drm_dbg_kms(&i915->drm, 6446 "Read Bksv from DP/AUX failed (%zd)\n", ret); 6447 return ret >= 0 ? -EIO : ret; 6448 } 6449 return 0; 6450 } 6451 6452 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, 6453 u8 *bstatus) 6454 { 6455 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6456 ssize_t ret; 6457 6458 /* 6459 * For some reason the HDMI and DP HDCP specs call this register 6460 * definition by different names. In the HDMI spec, it's called BSTATUS, 6461 * but in DP it's called BINFO. 6462 */ 6463 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO, 6464 bstatus, DRM_HDCP_BSTATUS_LEN); 6465 if (ret != DRM_HDCP_BSTATUS_LEN) { 6466 drm_dbg_kms(&i915->drm, 6467 "Read bstatus from DP/AUX failed (%zd)\n", ret); 6468 return ret >= 0 ? -EIO : ret; 6469 } 6470 return 0; 6471 } 6472 6473 static 6474 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port, 6475 u8 *bcaps) 6476 { 6477 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6478 ssize_t ret; 6479 6480 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS, 6481 bcaps, 1); 6482 if (ret != 1) { 6483 drm_dbg_kms(&i915->drm, 6484 "Read bcaps from DP/AUX failed (%zd)\n", ret); 6485 return ret >= 0 ? -EIO : ret; 6486 } 6487 6488 return 0; 6489 } 6490 6491 static 6492 int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port, 6493 bool *repeater_present) 6494 { 6495 ssize_t ret; 6496 u8 bcaps; 6497 6498 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps); 6499 if (ret) 6500 return ret; 6501 6502 *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT; 6503 return 0; 6504 } 6505 6506 static 6507 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port, 6508 u8 *ri_prime) 6509 { 6510 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6511 ssize_t ret; 6512 6513 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, 6514 ri_prime, DRM_HDCP_RI_LEN); 6515 if (ret != DRM_HDCP_RI_LEN) { 6516 drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n", 6517 ret); 6518 return ret >= 0 ? -EIO : ret; 6519 } 6520 return 0; 6521 } 6522 6523 static 6524 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port, 6525 bool *ksv_ready) 6526 { 6527 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6528 ssize_t ret; 6529 u8 bstatus; 6530 6531 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, 6532 &bstatus, 1); 6533 if (ret != 1) { 6534 drm_dbg_kms(&i915->drm, 6535 "Read bstatus from DP/AUX failed (%zd)\n", ret); 6536 return ret >= 0 ? -EIO : ret; 6537 } 6538 *ksv_ready = bstatus & DP_BSTATUS_READY; 6539 return 0; 6540 } 6541 6542 static 6543 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, 6544 int num_downstream, u8 *ksv_fifo) 6545 { 6546 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6547 ssize_t ret; 6548 int i; 6549 6550 /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */ 6551 for (i = 0; i < num_downstream; i += 3) { 6552 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN; 6553 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6554 DP_AUX_HDCP_KSV_FIFO, 6555 ksv_fifo + i * DRM_HDCP_KSV_LEN, 6556 len); 6557 if (ret != len) { 6558 drm_dbg_kms(&i915->drm, 6559 "Read ksv[%d] from DP/AUX failed (%zd)\n", 6560 i, ret); 6561 return ret >= 0 ? -EIO : ret; 6562 } 6563 } 6564 return 0; 6565 } 6566 6567 static 6568 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, 6569 int i, u32 *part) 6570 { 6571 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6572 ssize_t ret; 6573 6574 if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) 6575 return -EINVAL; 6576 6577 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6578 DP_AUX_HDCP_V_PRIME(i), part, 6579 DRM_HDCP_V_PRIME_PART_LEN); 6580 if (ret != DRM_HDCP_V_PRIME_PART_LEN) { 6581 drm_dbg_kms(&i915->drm, 6582 "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); 6583 return ret >= 0 ? -EIO : ret; 6584 } 6585 return 0; 6586 } 6587 6588 static 6589 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, 6590 bool enable) 6591 { 6592 /* Not used for single stream DisplayPort setups */ 6593 return 0; 6594 } 6595 6596 static 6597 bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port) 6598 { 6599 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6600 ssize_t ret; 6601 u8 bstatus; 6602 6603 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, 6604 &bstatus, 1); 6605 if (ret != 1) { 6606 drm_dbg_kms(&i915->drm, 6607 "Read bstatus from DP/AUX failed (%zd)\n", ret); 6608 return false; 6609 } 6610 6611 return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ)); 6612 } 6613 6614 static 6615 int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port, 6616 bool *hdcp_capable) 6617 { 6618 ssize_t ret; 6619 u8 bcaps; 6620 6621 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps); 6622 if (ret) 6623 return ret; 6624 6625 *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE; 6626 return 0; 6627 } 6628 6629 struct hdcp2_dp_errata_stream_type { 6630 u8 msg_id; 6631 u8 stream_type; 6632 } __packed; 6633 6634 struct hdcp2_dp_msg_data { 6635 u8 msg_id; 6636 u32 offset; 6637 bool msg_detectable; 6638 u32 timeout; 6639 u32 timeout2; /* Added for non_paired situation */ 6640 }; 6641 6642 static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = { 6643 { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 }, 6644 { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET, 6645 false, HDCP_2_2_CERT_TIMEOUT_MS, 0 }, 6646 { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET, 6647 false, 0, 0 }, 6648 { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET, 6649 false, 0, 0 }, 6650 { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET, 6651 true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS, 6652 HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS }, 6653 { HDCP_2_2_AKE_SEND_PAIRING_INFO, 6654 DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true, 6655 HDCP_2_2_PAIRING_TIMEOUT_MS, 0 }, 6656 { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 }, 6657 { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET, 6658 false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 }, 6659 { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false, 6660 0, 0 }, 6661 { HDCP_2_2_REP_SEND_RECVID_LIST, 6662 DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true, 6663 HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 }, 6664 { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false, 6665 0, 0 }, 6666 { HDCP_2_2_REP_STREAM_MANAGE, 6667 DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false, 6668 0, 0 }, 6669 { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET, 6670 false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 }, 6671 /* local define to shovel this through the write_2_2 interface */ 6672 #define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50 6673 { HDCP_2_2_ERRATA_DP_STREAM_TYPE, 6674 DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false, 6675 0, 0 }, 6676 }; 6677 6678 static int 6679 intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port, 6680 u8 *rx_status) 6681 { 6682 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6683 ssize_t ret; 6684 6685 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6686 DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status, 6687 HDCP_2_2_DP_RXSTATUS_LEN); 6688 if (ret != HDCP_2_2_DP_RXSTATUS_LEN) { 6689 drm_dbg_kms(&i915->drm, 6690 "Read bstatus from DP/AUX failed (%zd)\n", ret); 6691 return ret >= 0 ? -EIO : ret; 6692 } 6693 6694 return 0; 6695 } 6696 6697 static 6698 int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port, 6699 u8 msg_id, bool *msg_ready) 6700 { 6701 u8 rx_status; 6702 int ret; 6703 6704 *msg_ready = false; 6705 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status); 6706 if (ret < 0) 6707 return ret; 6708 6709 switch (msg_id) { 6710 case HDCP_2_2_AKE_SEND_HPRIME: 6711 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status)) 6712 *msg_ready = true; 6713 break; 6714 case HDCP_2_2_AKE_SEND_PAIRING_INFO: 6715 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status)) 6716 *msg_ready = true; 6717 break; 6718 case HDCP_2_2_REP_SEND_RECVID_LIST: 6719 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) 6720 *msg_ready = true; 6721 break; 6722 default: 6723 DRM_ERROR("Unidentified msg_id: %d\n", msg_id); 6724 return -EINVAL; 6725 } 6726 6727 return 0; 6728 } 6729 6730 static ssize_t 6731 intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port, 6732 const struct hdcp2_dp_msg_data *hdcp2_msg_data) 6733 { 6734 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6735 struct intel_dp *dp = &intel_dig_port->dp; 6736 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; 6737 u8 msg_id = hdcp2_msg_data->msg_id; 6738 int ret, timeout; 6739 bool msg_ready = false; 6740 6741 if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired) 6742 timeout = hdcp2_msg_data->timeout2; 6743 else 6744 timeout = hdcp2_msg_data->timeout; 6745 6746 /* 6747 * There is no way to detect the CERT, LPRIME and STREAM_READY 6748 * availability. So Wait for timeout and read the msg. 6749 */ 6750 if (!hdcp2_msg_data->msg_detectable) { 6751 mdelay(timeout); 6752 ret = 0; 6753 } else { 6754 /* 6755 * As we want to check the msg availability at timeout, Ignoring 6756 * the timeout at wait for CP_IRQ. 6757 */ 6758 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout); 6759 ret = hdcp2_detect_msg_availability(intel_dig_port, 6760 msg_id, &msg_ready); 6761 if (!msg_ready) 6762 ret = -ETIMEDOUT; 6763 } 6764 6765 if (ret) 6766 drm_dbg_kms(&i915->drm, 6767 "msg_id %d, ret %d, timeout(mSec): %d\n", 6768 hdcp2_msg_data->msg_id, ret, timeout); 6769 6770 return ret; 6771 } 6772 6773 static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id) 6774 { 6775 int i; 6776 6777 for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++) 6778 if (hdcp2_dp_msg_data[i].msg_id == msg_id) 6779 return &hdcp2_dp_msg_data[i]; 6780 6781 return NULL; 6782 } 6783 6784 static 6785 int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port, 6786 void *buf, size_t size) 6787 { 6788 struct intel_dp *dp = &intel_dig_port->dp; 6789 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; 6790 unsigned int offset; 6791 u8 *byte = buf; 6792 ssize_t ret, bytes_to_write, len; 6793 const struct hdcp2_dp_msg_data *hdcp2_msg_data; 6794 6795 hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte); 6796 if (!hdcp2_msg_data) 6797 return -EINVAL; 6798 6799 offset = hdcp2_msg_data->offset; 6800 6801 /* No msg_id in DP HDCP2.2 msgs */ 6802 bytes_to_write = size - 1; 6803 byte++; 6804 6805 hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count); 6806 6807 while (bytes_to_write) { 6808 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ? 6809 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write; 6810 6811 ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, 6812 offset, (void *)byte, len); 6813 if (ret < 0) 6814 return ret; 6815 6816 bytes_to_write -= ret; 6817 byte += ret; 6818 offset += ret; 6819 } 6820 6821 return size; 6822 } 6823 6824 static 6825 ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port) 6826 { 6827 u8 rx_info[HDCP_2_2_RXINFO_LEN]; 6828 u32 dev_cnt; 6829 ssize_t ret; 6830 6831 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6832 DP_HDCP_2_2_REG_RXINFO_OFFSET, 6833 (void *)rx_info, HDCP_2_2_RXINFO_LEN); 6834 if (ret != HDCP_2_2_RXINFO_LEN) 6835 return ret >= 0 ? -EIO : ret; 6836 6837 dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | 6838 HDCP_2_2_DEV_COUNT_LO(rx_info[1])); 6839 6840 if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT) 6841 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT; 6842 6843 ret = sizeof(struct hdcp2_rep_send_receiverid_list) - 6844 HDCP_2_2_RECEIVER_IDS_MAX_LEN + 6845 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN); 6846 6847 return ret; 6848 } 6849 6850 static 6851 int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, 6852 u8 msg_id, void *buf, size_t size) 6853 { 6854 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6855 unsigned int offset; 6856 u8 *byte = buf; 6857 ssize_t ret, bytes_to_recv, len; 6858 const struct hdcp2_dp_msg_data *hdcp2_msg_data; 6859 6860 hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id); 6861 if (!hdcp2_msg_data) 6862 return -EINVAL; 6863 offset = hdcp2_msg_data->offset; 6864 6865 ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data); 6866 if (ret < 0) 6867 return ret; 6868 6869 if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) { 6870 ret = get_receiver_id_list_size(intel_dig_port); 6871 if (ret < 0) 6872 return ret; 6873 6874 size = ret; 6875 } 6876 bytes_to_recv = size - 1; 6877 6878 /* DP adaptation msgs has no msg_id */ 6879 byte++; 6880 6881 while (bytes_to_recv) { 6882 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ? 6883 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv; 6884 6885 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset, 6886 (void *)byte, len); 6887 if (ret < 0) { 6888 drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n", 6889 msg_id, ret); 6890 return ret; 6891 } 6892 6893 bytes_to_recv -= ret; 6894 byte += ret; 6895 offset += ret; 6896 } 6897 byte = buf; 6898 *byte = msg_id; 6899 6900 return size; 6901 } 6902 6903 static 6904 int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port, 6905 bool is_repeater, u8 content_type) 6906 { 6907 int ret; 6908 struct hdcp2_dp_errata_stream_type stream_type_msg; 6909 6910 if (is_repeater) 6911 return 0; 6912 6913 /* 6914 * Errata for DP: As Stream type is used for encryption, Receiver 6915 * should be communicated with stream type for the decryption of the 6916 * content. 6917 * Repeater will be communicated with stream type as a part of it's 6918 * auth later in time. 6919 */ 6920 stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE; 6921 stream_type_msg.stream_type = content_type; 6922 6923 ret = intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg, 6924 sizeof(stream_type_msg)); 6925 6926 return ret < 0 ? ret : 0; 6927 6928 } 6929 6930 static 6931 int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port) 6932 { 6933 u8 rx_status; 6934 int ret; 6935 6936 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status); 6937 if (ret) 6938 return ret; 6939 6940 if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status)) 6941 ret = HDCP_REAUTH_REQUEST; 6942 else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status)) 6943 ret = HDCP_LINK_INTEGRITY_FAILURE; 6944 else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) 6945 ret = HDCP_TOPOLOGY_CHANGE; 6946 6947 return ret; 6948 } 6949 6950 static 6951 int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port, 6952 bool *capable) 6953 { 6954 u8 rx_caps[3]; 6955 int ret; 6956 6957 *capable = false; 6958 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6959 DP_HDCP_2_2_REG_RX_CAPS_OFFSET, 6960 rx_caps, HDCP_2_2_RXCAPS_LEN); 6961 if (ret != HDCP_2_2_RXCAPS_LEN) 6962 return ret >= 0 ? -EIO : ret; 6963 6964 if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL && 6965 HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2])) 6966 *capable = true; 6967 6968 return 0; 6969 } 6970 6971 static const struct intel_hdcp_shim intel_dp_hdcp_shim = { 6972 .write_an_aksv = intel_dp_hdcp_write_an_aksv, 6973 .read_bksv = intel_dp_hdcp_read_bksv, 6974 .read_bstatus = intel_dp_hdcp_read_bstatus, 6975 .repeater_present = intel_dp_hdcp_repeater_present, 6976 .read_ri_prime = intel_dp_hdcp_read_ri_prime, 6977 .read_ksv_ready = intel_dp_hdcp_read_ksv_ready, 6978 .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo, 6979 .read_v_prime_part = intel_dp_hdcp_read_v_prime_part, 6980 .toggle_signalling = intel_dp_hdcp_toggle_signalling, 6981 .check_link = intel_dp_hdcp_check_link, 6982 .hdcp_capable = intel_dp_hdcp_capable, 6983 .write_2_2_msg = intel_dp_hdcp2_write_msg, 6984 .read_2_2_msg = intel_dp_hdcp2_read_msg, 6985 .config_stream_type = intel_dp_hdcp2_config_stream_type, 6986 .check_2_2_link = intel_dp_hdcp2_check_link, 6987 .hdcp_2_2_capable = intel_dp_hdcp2_capable, 6988 .protocol = HDCP_PROTOCOL_DP, 6989 }; 6990 6991 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) 6992 { 6993 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6994 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6995 6996 lockdep_assert_held(&dev_priv->pps_mutex); 6997 6998 if (!edp_have_panel_vdd(intel_dp)) 6999 return; 7000 7001 /* 7002 * The VDD bit needs a power domain reference, so if the bit is 7003 * already enabled when we boot or resume, grab this reference and 7004 * schedule a vdd off, so we don't hold on to the reference 7005 * indefinitely. 7006 */ 7007 drm_dbg_kms(&dev_priv->drm, 7008 "VDD left on by BIOS, adjusting state tracking\n"); 7009 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port)); 7010 7011 edp_panel_vdd_schedule_off(intel_dp); 7012 } 7013 7014 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) 7015 { 7016 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7017 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 7018 enum pipe pipe; 7019 7020 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 7021 encoder->port, &pipe)) 7022 return pipe; 7023 7024 return INVALID_PIPE; 7025 } 7026 7027 void intel_dp_encoder_reset(struct drm_encoder *encoder) 7028 { 7029 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 7030 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder)); 7031 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 7032 intel_wakeref_t wakeref; 7033 7034 if (!HAS_DDI(dev_priv)) 7035 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 7036 7037 if (lspcon->active) 7038 lspcon_resume(lspcon); 7039 7040 intel_dp->reset_link_params = true; 7041 7042 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 7043 !intel_dp_is_edp(intel_dp)) 7044 return; 7045 7046 with_pps_lock(intel_dp, wakeref) { 7047 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7048 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 7049 7050 if (intel_dp_is_edp(intel_dp)) { 7051 /* 7052 * Reinit the power sequencer, in case BIOS did 7053 * something nasty with it. 7054 */ 7055 intel_dp_pps_init(intel_dp); 7056 intel_edp_panel_vdd_sanitize(intel_dp); 7057 } 7058 } 7059 } 7060 7061 static int intel_modeset_tile_group(struct intel_atomic_state *state, 7062 int tile_group_id) 7063 { 7064 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7065 struct drm_connector_list_iter conn_iter; 7066 struct drm_connector *connector; 7067 int ret = 0; 7068 7069 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 7070 drm_for_each_connector_iter(connector, &conn_iter) { 7071 struct drm_connector_state *conn_state; 7072 struct intel_crtc_state *crtc_state; 7073 struct intel_crtc *crtc; 7074 7075 if (!connector->has_tile || 7076 connector->tile_group->id != tile_group_id) 7077 continue; 7078 7079 conn_state = drm_atomic_get_connector_state(&state->base, 7080 connector); 7081 if (IS_ERR(conn_state)) { 7082 ret = PTR_ERR(conn_state); 7083 break; 7084 } 7085 7086 crtc = to_intel_crtc(conn_state->crtc); 7087 7088 if (!crtc) 7089 continue; 7090 7091 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 7092 crtc_state->uapi.mode_changed = true; 7093 7094 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 7095 if (ret) 7096 break; 7097 } 7098 drm_connector_list_iter_end(&conn_iter); 7099 7100 return ret; 7101 } 7102 7103 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 7104 { 7105 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7106 struct intel_crtc *crtc; 7107 7108 if (transcoders == 0) 7109 return 0; 7110 7111 for_each_intel_crtc(&dev_priv->drm, crtc) { 7112 struct intel_crtc_state *crtc_state; 7113 int ret; 7114 7115 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 7116 if (IS_ERR(crtc_state)) 7117 return PTR_ERR(crtc_state); 7118 7119 if (!crtc_state->hw.enable) 7120 continue; 7121 7122 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 7123 continue; 7124 7125 crtc_state->uapi.mode_changed = true; 7126 7127 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 7128 if (ret) 7129 return ret; 7130 7131 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 7132 if (ret) 7133 return ret; 7134 7135 transcoders &= ~BIT(crtc_state->cpu_transcoder); 7136 } 7137 7138 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 7139 7140 return 0; 7141 } 7142 7143 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 7144 struct drm_connector *connector) 7145 { 7146 const struct drm_connector_state *old_conn_state = 7147 drm_atomic_get_old_connector_state(&state->base, connector); 7148 const struct intel_crtc_state *old_crtc_state; 7149 struct intel_crtc *crtc; 7150 u8 transcoders; 7151 7152 crtc = to_intel_crtc(old_conn_state->crtc); 7153 if (!crtc) 7154 return 0; 7155 7156 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 7157 7158 if (!old_crtc_state->hw.active) 7159 return 0; 7160 7161 transcoders = old_crtc_state->sync_mode_slaves_mask; 7162 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 7163 transcoders |= BIT(old_crtc_state->master_transcoder); 7164 7165 return intel_modeset_affected_transcoders(state, 7166 transcoders); 7167 } 7168 7169 static int intel_dp_connector_atomic_check(struct drm_connector *conn, 7170 struct drm_atomic_state *_state) 7171 { 7172 struct drm_i915_private *dev_priv = to_i915(conn->dev); 7173 struct intel_atomic_state *state = to_intel_atomic_state(_state); 7174 int ret; 7175 7176 ret = intel_digital_connector_atomic_check(conn, &state->base); 7177 if (ret) 7178 return ret; 7179 7180 /* 7181 * We don't enable port sync on BDW due to missing w/as and 7182 * due to not having adjusted the modeset sequence appropriately. 7183 */ 7184 if (INTEL_GEN(dev_priv) < 9) 7185 return 0; 7186 7187 if (!intel_connector_needs_modeset(state, conn)) 7188 return 0; 7189 7190 if (conn->has_tile) { 7191 ret = intel_modeset_tile_group(state, conn->tile_group->id); 7192 if (ret) 7193 return ret; 7194 } 7195 7196 return intel_modeset_synced_crtcs(state, conn); 7197 } 7198 7199 static const struct drm_connector_funcs intel_dp_connector_funcs = { 7200 .force = intel_dp_force, 7201 .fill_modes = drm_helper_probe_single_connector_modes, 7202 .atomic_get_property = intel_digital_connector_atomic_get_property, 7203 .atomic_set_property = intel_digital_connector_atomic_set_property, 7204 .late_register = intel_dp_connector_register, 7205 .early_unregister = intel_dp_connector_unregister, 7206 .destroy = intel_connector_destroy, 7207 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 7208 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 7209 }; 7210 7211 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 7212 .detect_ctx = intel_dp_detect, 7213 .get_modes = intel_dp_get_modes, 7214 .mode_valid = intel_dp_mode_valid, 7215 .atomic_check = intel_dp_connector_atomic_check, 7216 }; 7217 7218 static const struct drm_encoder_funcs intel_dp_enc_funcs = { 7219 .reset = intel_dp_encoder_reset, 7220 .destroy = intel_dp_encoder_destroy, 7221 }; 7222 7223 static bool intel_edp_have_power(struct intel_dp *intel_dp) 7224 { 7225 intel_wakeref_t wakeref; 7226 bool have_power = false; 7227 7228 with_pps_lock(intel_dp, wakeref) { 7229 have_power = edp_have_panel_power(intel_dp) && 7230 edp_have_panel_vdd(intel_dp); 7231 } 7232 7233 return have_power; 7234 } 7235 7236 enum irqreturn 7237 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) 7238 { 7239 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 7240 struct intel_dp *intel_dp = &intel_dig_port->dp; 7241 7242 if (intel_dig_port->base.type == INTEL_OUTPUT_EDP && 7243 (long_hpd || !intel_edp_have_power(intel_dp))) { 7244 /* 7245 * vdd off can generate a long/short pulse on eDP which 7246 * would require vdd on to handle it, and thus we 7247 * would end up in an endless cycle of 7248 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." 7249 */ 7250 drm_dbg_kms(&i915->drm, 7251 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", 7252 long_hpd ? "long" : "short", 7253 intel_dig_port->base.base.base.id, 7254 intel_dig_port->base.base.name); 7255 return IRQ_HANDLED; 7256 } 7257 7258 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", 7259 intel_dig_port->base.base.base.id, 7260 intel_dig_port->base.base.name, 7261 long_hpd ? "long" : "short"); 7262 7263 if (long_hpd) { 7264 intel_dp->reset_link_params = true; 7265 return IRQ_NONE; 7266 } 7267 7268 if (intel_dp->is_mst) { 7269 if (!intel_dp_check_mst_status(intel_dp)) 7270 return IRQ_NONE; 7271 } else if (!intel_dp_short_pulse(intel_dp)) { 7272 return IRQ_NONE; 7273 } 7274 7275 return IRQ_HANDLED; 7276 } 7277 7278 /* check the VBT to see whether the eDP is on another port */ 7279 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) 7280 { 7281 /* 7282 * eDP not supported on g4x. so bail out early just 7283 * for a bit extra safety in case the VBT is bonkers. 7284 */ 7285 if (INTEL_GEN(dev_priv) < 5) 7286 return false; 7287 7288 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A) 7289 return true; 7290 7291 return intel_bios_is_port_edp(dev_priv, port); 7292 } 7293 7294 static void 7295 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 7296 { 7297 struct drm_i915_private *dev_priv = to_i915(connector->dev); 7298 enum port port = dp_to_dig_port(intel_dp)->base.port; 7299 7300 if (!IS_G4X(dev_priv) && port != PORT_A) 7301 intel_attach_force_audio_property(connector); 7302 7303 intel_attach_broadcast_rgb_property(connector); 7304 if (HAS_GMCH(dev_priv)) 7305 drm_connector_attach_max_bpc_property(connector, 6, 10); 7306 else if (INTEL_GEN(dev_priv) >= 5) 7307 drm_connector_attach_max_bpc_property(connector, 6, 12); 7308 7309 intel_attach_colorspace_property(connector); 7310 7311 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11) 7312 drm_object_attach_property(&connector->base, 7313 connector->dev->mode_config.hdr_output_metadata_property, 7314 0); 7315 7316 if (intel_dp_is_edp(intel_dp)) { 7317 u32 allowed_scalers; 7318 7319 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); 7320 if (!HAS_GMCH(dev_priv)) 7321 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); 7322 7323 drm_connector_attach_scaling_mode_property(connector, allowed_scalers); 7324 7325 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; 7326 7327 } 7328 } 7329 7330 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) 7331 { 7332 intel_dp->panel_power_off_time = ktime_get_boottime(); 7333 intel_dp->last_power_on = jiffies; 7334 intel_dp->last_backlight_off = jiffies; 7335 } 7336 7337 static void 7338 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) 7339 { 7340 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7341 u32 pp_on, pp_off, pp_ctl; 7342 struct pps_registers regs; 7343 7344 intel_pps_get_registers(intel_dp, ®s); 7345 7346 pp_ctl = ilk_get_pp_control(intel_dp); 7347 7348 /* Ensure PPS is unlocked */ 7349 if (!HAS_DDI(dev_priv)) 7350 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 7351 7352 pp_on = intel_de_read(dev_priv, regs.pp_on); 7353 pp_off = intel_de_read(dev_priv, regs.pp_off); 7354 7355 /* Pull timing values out of registers */ 7356 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on); 7357 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on); 7358 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off); 7359 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off); 7360 7361 if (i915_mmio_reg_valid(regs.pp_div)) { 7362 u32 pp_div; 7363 7364 pp_div = intel_de_read(dev_priv, regs.pp_div); 7365 7366 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000; 7367 } else { 7368 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000; 7369 } 7370 } 7371 7372 static void 7373 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq) 7374 { 7375 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 7376 state_name, 7377 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); 7378 } 7379 7380 static void 7381 intel_pps_verify_state(struct intel_dp *intel_dp) 7382 { 7383 struct edp_power_seq hw; 7384 struct edp_power_seq *sw = &intel_dp->pps_delays; 7385 7386 intel_pps_readout_hw_state(intel_dp, &hw); 7387 7388 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || 7389 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { 7390 DRM_ERROR("PPS state mismatch\n"); 7391 intel_pps_dump_state("sw", sw); 7392 intel_pps_dump_state("hw", &hw); 7393 } 7394 } 7395 7396 static void 7397 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp) 7398 { 7399 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7400 struct edp_power_seq cur, vbt, spec, 7401 *final = &intel_dp->pps_delays; 7402 7403 lockdep_assert_held(&dev_priv->pps_mutex); 7404 7405 /* already initialized? */ 7406 if (final->t11_t12 != 0) 7407 return; 7408 7409 intel_pps_readout_hw_state(intel_dp, &cur); 7410 7411 intel_pps_dump_state("cur", &cur); 7412 7413 vbt = dev_priv->vbt.edp.pps; 7414 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay 7415 * of 500ms appears to be too short. Ocassionally the panel 7416 * just fails to power back on. Increasing the delay to 800ms 7417 * seems sufficient to avoid this problem. 7418 */ 7419 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { 7420 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); 7421 drm_dbg_kms(&dev_priv->drm, 7422 "Increasing T12 panel delay as per the quirk to %d\n", 7423 vbt.t11_t12); 7424 } 7425 /* T11_T12 delay is special and actually in units of 100ms, but zero 7426 * based in the hw (so we need to add 100 ms). But the sw vbt 7427 * table multiplies it with 1000 to make it in units of 100usec, 7428 * too. */ 7429 vbt.t11_t12 += 100 * 10; 7430 7431 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 7432 * our hw here, which are all in 100usec. */ 7433 spec.t1_t3 = 210 * 10; 7434 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 7435 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 7436 spec.t10 = 500 * 10; 7437 /* This one is special and actually in units of 100ms, but zero 7438 * based in the hw (so we need to add 100 ms). But the sw vbt 7439 * table multiplies it with 1000 to make it in units of 100usec, 7440 * too. */ 7441 spec.t11_t12 = (510 + 100) * 10; 7442 7443 intel_pps_dump_state("vbt", &vbt); 7444 7445 /* Use the max of the register settings and vbt. If both are 7446 * unset, fall back to the spec limits. */ 7447 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \ 7448 spec.field : \ 7449 max(cur.field, vbt.field)) 7450 assign_final(t1_t3); 7451 assign_final(t8); 7452 assign_final(t9); 7453 assign_final(t10); 7454 assign_final(t11_t12); 7455 #undef assign_final 7456 7457 #define get_delay(field) (DIV_ROUND_UP(final->field, 10)) 7458 intel_dp->panel_power_up_delay = get_delay(t1_t3); 7459 intel_dp->backlight_on_delay = get_delay(t8); 7460 intel_dp->backlight_off_delay = get_delay(t9); 7461 intel_dp->panel_power_down_delay = get_delay(t10); 7462 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 7463 #undef get_delay 7464 7465 drm_dbg_kms(&dev_priv->drm, 7466 "panel power up delay %d, power down delay %d, power cycle delay %d\n", 7467 intel_dp->panel_power_up_delay, 7468 intel_dp->panel_power_down_delay, 7469 intel_dp->panel_power_cycle_delay); 7470 7471 drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n", 7472 intel_dp->backlight_on_delay, 7473 intel_dp->backlight_off_delay); 7474 7475 /* 7476 * We override the HW backlight delays to 1 because we do manual waits 7477 * on them. For T8, even BSpec recommends doing it. For T9, if we 7478 * don't do this, we'll end up waiting for the backlight off delay 7479 * twice: once when we do the manual sleep, and once when we disable 7480 * the panel and wait for the PP_STATUS bit to become zero. 7481 */ 7482 final->t8 = 1; 7483 final->t9 = 1; 7484 7485 /* 7486 * HW has only a 100msec granularity for t11_t12 so round it up 7487 * accordingly. 7488 */ 7489 final->t11_t12 = roundup(final->t11_t12, 100 * 10); 7490 } 7491 7492 static void 7493 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 7494 bool force_disable_vdd) 7495 { 7496 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7497 u32 pp_on, pp_off, port_sel = 0; 7498 int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000; 7499 struct pps_registers regs; 7500 enum port port = dp_to_dig_port(intel_dp)->base.port; 7501 const struct edp_power_seq *seq = &intel_dp->pps_delays; 7502 7503 lockdep_assert_held(&dev_priv->pps_mutex); 7504 7505 intel_pps_get_registers(intel_dp, ®s); 7506 7507 /* 7508 * On some VLV machines the BIOS can leave the VDD 7509 * enabled even on power sequencers which aren't 7510 * hooked up to any port. This would mess up the 7511 * power domain tracking the first time we pick 7512 * one of these power sequencers for use since 7513 * edp_panel_vdd_on() would notice that the VDD was 7514 * already on and therefore wouldn't grab the power 7515 * domain reference. Disable VDD first to avoid this. 7516 * This also avoids spuriously turning the VDD on as 7517 * soon as the new power sequencer gets initialized. 7518 */ 7519 if (force_disable_vdd) { 7520 u32 pp = ilk_get_pp_control(intel_dp); 7521 7522 drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON, 7523 "Panel power already on\n"); 7524 7525 if (pp & EDP_FORCE_VDD) 7526 drm_dbg_kms(&dev_priv->drm, 7527 "VDD already on, disabling first\n"); 7528 7529 pp &= ~EDP_FORCE_VDD; 7530 7531 intel_de_write(dev_priv, regs.pp_ctrl, pp); 7532 } 7533 7534 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) | 7535 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8); 7536 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) | 7537 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10); 7538 7539 /* Haswell doesn't have any port selection bits for the panel 7540 * power sequencer any more. */ 7541 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7542 port_sel = PANEL_PORT_SELECT_VLV(port); 7543 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 7544 switch (port) { 7545 case PORT_A: 7546 port_sel = PANEL_PORT_SELECT_DPA; 7547 break; 7548 case PORT_C: 7549 port_sel = PANEL_PORT_SELECT_DPC; 7550 break; 7551 case PORT_D: 7552 port_sel = PANEL_PORT_SELECT_DPD; 7553 break; 7554 default: 7555 MISSING_CASE(port); 7556 break; 7557 } 7558 } 7559 7560 pp_on |= port_sel; 7561 7562 intel_de_write(dev_priv, regs.pp_on, pp_on); 7563 intel_de_write(dev_priv, regs.pp_off, pp_off); 7564 7565 /* 7566 * Compute the divisor for the pp clock, simply match the Bspec formula. 7567 */ 7568 if (i915_mmio_reg_valid(regs.pp_div)) { 7569 intel_de_write(dev_priv, regs.pp_div, 7570 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); 7571 } else { 7572 u32 pp_ctl; 7573 7574 pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl); 7575 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK; 7576 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)); 7577 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 7578 } 7579 7580 drm_dbg_kms(&dev_priv->drm, 7581 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 7582 intel_de_read(dev_priv, regs.pp_on), 7583 intel_de_read(dev_priv, regs.pp_off), 7584 i915_mmio_reg_valid(regs.pp_div) ? 7585 intel_de_read(dev_priv, regs.pp_div) : 7586 (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); 7587 } 7588 7589 static void intel_dp_pps_init(struct intel_dp *intel_dp) 7590 { 7591 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7592 7593 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7594 vlv_initial_power_sequencer_setup(intel_dp); 7595 } else { 7596 intel_dp_init_panel_power_sequencer(intel_dp); 7597 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 7598 } 7599 } 7600 7601 /** 7602 * intel_dp_set_drrs_state - program registers for RR switch to take effect 7603 * @dev_priv: i915 device 7604 * @crtc_state: a pointer to the active intel_crtc_state 7605 * @refresh_rate: RR to be programmed 7606 * 7607 * This function gets called when refresh rate (RR) has to be changed from 7608 * one frequency to another. Switches can be between high and low RR 7609 * supported by the panel or to any other RR based on media playback (in 7610 * this case, RR value needs to be passed from user space). 7611 * 7612 * The caller of this function needs to take a lock on dev_priv->drrs. 7613 */ 7614 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, 7615 const struct intel_crtc_state *crtc_state, 7616 int refresh_rate) 7617 { 7618 struct intel_dp *intel_dp = dev_priv->drrs.dp; 7619 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 7620 enum drrs_refresh_rate_type index = DRRS_HIGH_RR; 7621 7622 if (refresh_rate <= 0) { 7623 drm_dbg_kms(&dev_priv->drm, 7624 "Refresh rate should be positive non-zero.\n"); 7625 return; 7626 } 7627 7628 if (intel_dp == NULL) { 7629 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n"); 7630 return; 7631 } 7632 7633 if (!intel_crtc) { 7634 drm_dbg_kms(&dev_priv->drm, 7635 "DRRS: intel_crtc not initialized\n"); 7636 return; 7637 } 7638 7639 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { 7640 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n"); 7641 return; 7642 } 7643 7644 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh == 7645 refresh_rate) 7646 index = DRRS_LOW_RR; 7647 7648 if (index == dev_priv->drrs.refresh_rate_type) { 7649 drm_dbg_kms(&dev_priv->drm, 7650 "DRRS requested for previously set RR...ignoring\n"); 7651 return; 7652 } 7653 7654 if (!crtc_state->hw.active) { 7655 drm_dbg_kms(&dev_priv->drm, 7656 "eDP encoder disabled. CRTC not Active\n"); 7657 return; 7658 } 7659 7660 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { 7661 switch (index) { 7662 case DRRS_HIGH_RR: 7663 intel_dp_set_m_n(crtc_state, M1_N1); 7664 break; 7665 case DRRS_LOW_RR: 7666 intel_dp_set_m_n(crtc_state, M2_N2); 7667 break; 7668 case DRRS_MAX_RR: 7669 default: 7670 drm_err(&dev_priv->drm, 7671 "Unsupported refreshrate type\n"); 7672 } 7673 } else if (INTEL_GEN(dev_priv) > 6) { 7674 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); 7675 u32 val; 7676 7677 val = intel_de_read(dev_priv, reg); 7678 if (index > DRRS_HIGH_RR) { 7679 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7680 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7681 else 7682 val |= PIPECONF_EDP_RR_MODE_SWITCH; 7683 } else { 7684 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7685 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7686 else 7687 val &= ~PIPECONF_EDP_RR_MODE_SWITCH; 7688 } 7689 intel_de_write(dev_priv, reg, val); 7690 } 7691 7692 dev_priv->drrs.refresh_rate_type = index; 7693 7694 drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n", 7695 refresh_rate); 7696 } 7697 7698 /** 7699 * intel_edp_drrs_enable - init drrs struct if supported 7700 * @intel_dp: DP struct 7701 * @crtc_state: A pointer to the active crtc state. 7702 * 7703 * Initializes frontbuffer_bits and drrs.dp 7704 */ 7705 void intel_edp_drrs_enable(struct intel_dp *intel_dp, 7706 const struct intel_crtc_state *crtc_state) 7707 { 7708 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7709 7710 if (!crtc_state->has_drrs) { 7711 drm_dbg_kms(&dev_priv->drm, "Panel doesn't support DRRS\n"); 7712 return; 7713 } 7714 7715 if (dev_priv->psr.enabled) { 7716 drm_dbg_kms(&dev_priv->drm, 7717 "PSR enabled. Not enabling DRRS.\n"); 7718 return; 7719 } 7720 7721 mutex_lock(&dev_priv->drrs.mutex); 7722 if (dev_priv->drrs.dp) { 7723 drm_dbg_kms(&dev_priv->drm, "DRRS already enabled\n"); 7724 goto unlock; 7725 } 7726 7727 dev_priv->drrs.busy_frontbuffer_bits = 0; 7728 7729 dev_priv->drrs.dp = intel_dp; 7730 7731 unlock: 7732 mutex_unlock(&dev_priv->drrs.mutex); 7733 } 7734 7735 /** 7736 * intel_edp_drrs_disable - Disable DRRS 7737 * @intel_dp: DP struct 7738 * @old_crtc_state: Pointer to old crtc_state. 7739 * 7740 */ 7741 void intel_edp_drrs_disable(struct intel_dp *intel_dp, 7742 const struct intel_crtc_state *old_crtc_state) 7743 { 7744 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7745 7746 if (!old_crtc_state->has_drrs) 7747 return; 7748 7749 mutex_lock(&dev_priv->drrs.mutex); 7750 if (!dev_priv->drrs.dp) { 7751 mutex_unlock(&dev_priv->drrs.mutex); 7752 return; 7753 } 7754 7755 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7756 intel_dp_set_drrs_state(dev_priv, old_crtc_state, 7757 intel_dp->attached_connector->panel.fixed_mode->vrefresh); 7758 7759 dev_priv->drrs.dp = NULL; 7760 mutex_unlock(&dev_priv->drrs.mutex); 7761 7762 cancel_delayed_work_sync(&dev_priv->drrs.work); 7763 } 7764 7765 static void intel_edp_drrs_downclock_work(struct work_struct *work) 7766 { 7767 struct drm_i915_private *dev_priv = 7768 container_of(work, typeof(*dev_priv), drrs.work.work); 7769 struct intel_dp *intel_dp; 7770 7771 mutex_lock(&dev_priv->drrs.mutex); 7772 7773 intel_dp = dev_priv->drrs.dp; 7774 7775 if (!intel_dp) 7776 goto unlock; 7777 7778 /* 7779 * The delayed work can race with an invalidate hence we need to 7780 * recheck. 7781 */ 7782 7783 if (dev_priv->drrs.busy_frontbuffer_bits) 7784 goto unlock; 7785 7786 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { 7787 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 7788 7789 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7790 intel_dp->attached_connector->panel.downclock_mode->vrefresh); 7791 } 7792 7793 unlock: 7794 mutex_unlock(&dev_priv->drrs.mutex); 7795 } 7796 7797 /** 7798 * intel_edp_drrs_invalidate - Disable Idleness DRRS 7799 * @dev_priv: i915 device 7800 * @frontbuffer_bits: frontbuffer plane tracking bits 7801 * 7802 * This function gets called everytime rendering on the given planes start. 7803 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). 7804 * 7805 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7806 */ 7807 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, 7808 unsigned int frontbuffer_bits) 7809 { 7810 struct drm_crtc *crtc; 7811 enum pipe pipe; 7812 7813 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7814 return; 7815 7816 cancel_delayed_work(&dev_priv->drrs.work); 7817 7818 mutex_lock(&dev_priv->drrs.mutex); 7819 if (!dev_priv->drrs.dp) { 7820 mutex_unlock(&dev_priv->drrs.mutex); 7821 return; 7822 } 7823 7824 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; 7825 pipe = to_intel_crtc(crtc)->pipe; 7826 7827 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7828 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; 7829 7830 /* invalidate means busy screen hence upclock */ 7831 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7832 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7833 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh); 7834 7835 mutex_unlock(&dev_priv->drrs.mutex); 7836 } 7837 7838 /** 7839 * intel_edp_drrs_flush - Restart Idleness DRRS 7840 * @dev_priv: i915 device 7841 * @frontbuffer_bits: frontbuffer plane tracking bits 7842 * 7843 * This function gets called every time rendering on the given planes has 7844 * completed or flip on a crtc is completed. So DRRS should be upclocked 7845 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, 7846 * if no other planes are dirty. 7847 * 7848 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7849 */ 7850 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, 7851 unsigned int frontbuffer_bits) 7852 { 7853 struct drm_crtc *crtc; 7854 enum pipe pipe; 7855 7856 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7857 return; 7858 7859 cancel_delayed_work(&dev_priv->drrs.work); 7860 7861 mutex_lock(&dev_priv->drrs.mutex); 7862 if (!dev_priv->drrs.dp) { 7863 mutex_unlock(&dev_priv->drrs.mutex); 7864 return; 7865 } 7866 7867 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; 7868 pipe = to_intel_crtc(crtc)->pipe; 7869 7870 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7871 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; 7872 7873 /* flush means busy screen hence upclock */ 7874 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7875 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7876 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh); 7877 7878 /* 7879 * flush also means no more activity hence schedule downclock, if all 7880 * other fbs are quiescent too 7881 */ 7882 if (!dev_priv->drrs.busy_frontbuffer_bits) 7883 schedule_delayed_work(&dev_priv->drrs.work, 7884 msecs_to_jiffies(1000)); 7885 mutex_unlock(&dev_priv->drrs.mutex); 7886 } 7887 7888 /** 7889 * DOC: Display Refresh Rate Switching (DRRS) 7890 * 7891 * Display Refresh Rate Switching (DRRS) is a power conservation feature 7892 * which enables swtching between low and high refresh rates, 7893 * dynamically, based on the usage scenario. This feature is applicable 7894 * for internal panels. 7895 * 7896 * Indication that the panel supports DRRS is given by the panel EDID, which 7897 * would list multiple refresh rates for one resolution. 7898 * 7899 * DRRS is of 2 types - static and seamless. 7900 * Static DRRS involves changing refresh rate (RR) by doing a full modeset 7901 * (may appear as a blink on screen) and is used in dock-undock scenario. 7902 * Seamless DRRS involves changing RR without any visual effect to the user 7903 * and can be used during normal system usage. This is done by programming 7904 * certain registers. 7905 * 7906 * Support for static/seamless DRRS may be indicated in the VBT based on 7907 * inputs from the panel spec. 7908 * 7909 * DRRS saves power by switching to low RR based on usage scenarios. 7910 * 7911 * The implementation is based on frontbuffer tracking implementation. When 7912 * there is a disturbance on the screen triggered by user activity or a periodic 7913 * system activity, DRRS is disabled (RR is changed to high RR). When there is 7914 * no movement on screen, after a timeout of 1 second, a switch to low RR is 7915 * made. 7916 * 7917 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() 7918 * and intel_edp_drrs_flush() are called. 7919 * 7920 * DRRS can be further extended to support other internal panels and also 7921 * the scenario of video playback wherein RR is set based on the rate 7922 * requested by userspace. 7923 */ 7924 7925 /** 7926 * intel_dp_drrs_init - Init basic DRRS work and mutex. 7927 * @connector: eDP connector 7928 * @fixed_mode: preferred mode of panel 7929 * 7930 * This function is called only once at driver load to initialize basic 7931 * DRRS stuff. 7932 * 7933 * Returns: 7934 * Downclock mode if panel supports it, else return NULL. 7935 * DRRS support is determined by the presence of downclock mode (apart 7936 * from VBT setting). 7937 */ 7938 static struct drm_display_mode * 7939 intel_dp_drrs_init(struct intel_connector *connector, 7940 struct drm_display_mode *fixed_mode) 7941 { 7942 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 7943 struct drm_display_mode *downclock_mode = NULL; 7944 7945 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); 7946 mutex_init(&dev_priv->drrs.mutex); 7947 7948 if (INTEL_GEN(dev_priv) <= 6) { 7949 drm_dbg_kms(&dev_priv->drm, 7950 "DRRS supported for Gen7 and above\n"); 7951 return NULL; 7952 } 7953 7954 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { 7955 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); 7956 return NULL; 7957 } 7958 7959 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); 7960 if (!downclock_mode) { 7961 drm_dbg_kms(&dev_priv->drm, 7962 "Downclock mode is not found. DRRS not supported\n"); 7963 return NULL; 7964 } 7965 7966 dev_priv->drrs.type = dev_priv->vbt.drrs_type; 7967 7968 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; 7969 drm_dbg_kms(&dev_priv->drm, 7970 "seamless DRRS supported for eDP panel.\n"); 7971 return downclock_mode; 7972 } 7973 7974 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 7975 struct intel_connector *intel_connector) 7976 { 7977 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7978 struct drm_device *dev = &dev_priv->drm; 7979 struct drm_connector *connector = &intel_connector->base; 7980 struct drm_display_mode *fixed_mode = NULL; 7981 struct drm_display_mode *downclock_mode = NULL; 7982 bool has_dpcd; 7983 enum pipe pipe = INVALID_PIPE; 7984 intel_wakeref_t wakeref; 7985 struct edid *edid; 7986 7987 if (!intel_dp_is_edp(intel_dp)) 7988 return true; 7989 7990 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work); 7991 7992 /* 7993 * On IBX/CPT we may get here with LVDS already registered. Since the 7994 * driver uses the only internal power sequencer available for both 7995 * eDP and LVDS bail out early in this case to prevent interfering 7996 * with an already powered-on LVDS power sequencer. 7997 */ 7998 if (intel_get_lvds_encoder(dev_priv)) { 7999 drm_WARN_ON(dev, 8000 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 8001 drm_info(&dev_priv->drm, 8002 "LVDS was detected, not registering eDP\n"); 8003 8004 return false; 8005 } 8006 8007 with_pps_lock(intel_dp, wakeref) { 8008 intel_dp_init_panel_power_timestamps(intel_dp); 8009 intel_dp_pps_init(intel_dp); 8010 intel_edp_panel_vdd_sanitize(intel_dp); 8011 } 8012 8013 /* Cache DPCD and EDID for edp. */ 8014 has_dpcd = intel_edp_init_dpcd(intel_dp); 8015 8016 if (!has_dpcd) { 8017 /* if this fails, presume the device is a ghost */ 8018 drm_info(&dev_priv->drm, 8019 "failed to retrieve link info, disabling eDP\n"); 8020 goto out_vdd_off; 8021 } 8022 8023 mutex_lock(&dev->mode_config.mutex); 8024 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 8025 if (edid) { 8026 if (drm_add_edid_modes(connector, edid)) { 8027 drm_connector_update_edid_property(connector, edid); 8028 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 8029 } else { 8030 kfree(edid); 8031 edid = ERR_PTR(-EINVAL); 8032 } 8033 } else { 8034 edid = ERR_PTR(-ENOENT); 8035 } 8036 intel_connector->edid = edid; 8037 8038 fixed_mode = intel_panel_edid_fixed_mode(intel_connector); 8039 if (fixed_mode) 8040 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); 8041 8042 /* fallback to VBT if available for eDP */ 8043 if (!fixed_mode) 8044 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); 8045 mutex_unlock(&dev->mode_config.mutex); 8046 8047 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 8048 intel_dp->edp_notifier.notifier_call = edp_notify_handler; 8049 register_reboot_notifier(&intel_dp->edp_notifier); 8050 8051 /* 8052 * Figure out the current pipe for the initial backlight setup. 8053 * If the current pipe isn't valid, try the PPS pipe, and if that 8054 * fails just assume pipe A. 8055 */ 8056 pipe = vlv_active_pipe(intel_dp); 8057 8058 if (pipe != PIPE_A && pipe != PIPE_B) 8059 pipe = intel_dp->pps_pipe; 8060 8061 if (pipe != PIPE_A && pipe != PIPE_B) 8062 pipe = PIPE_A; 8063 8064 drm_dbg_kms(&dev_priv->drm, 8065 "using pipe %c for initial backlight setup\n", 8066 pipe_name(pipe)); 8067 } 8068 8069 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 8070 intel_connector->panel.backlight.power = intel_edp_backlight_power; 8071 intel_panel_setup_backlight(connector, pipe); 8072 8073 if (fixed_mode) { 8074 drm_connector_set_panel_orientation_with_quirk(connector, 8075 dev_priv->vbt.orientation, 8076 fixed_mode->hdisplay, fixed_mode->vdisplay); 8077 } 8078 8079 return true; 8080 8081 out_vdd_off: 8082 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 8083 /* 8084 * vdd might still be enabled do to the delayed vdd off. 8085 * Make sure vdd is actually turned off here. 8086 */ 8087 with_pps_lock(intel_dp, wakeref) 8088 edp_panel_vdd_off_sync(intel_dp); 8089 8090 return false; 8091 } 8092 8093 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 8094 { 8095 struct intel_connector *intel_connector; 8096 struct drm_connector *connector; 8097 8098 intel_connector = container_of(work, typeof(*intel_connector), 8099 modeset_retry_work); 8100 connector = &intel_connector->base; 8101 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 8102 connector->name); 8103 8104 /* Grab the locks before changing connector property*/ 8105 mutex_lock(&connector->dev->mode_config.mutex); 8106 /* Set connector link status to BAD and send a Uevent to notify 8107 * userspace to do a modeset. 8108 */ 8109 drm_connector_set_link_status_property(connector, 8110 DRM_MODE_LINK_STATUS_BAD); 8111 mutex_unlock(&connector->dev->mode_config.mutex); 8112 /* Send Hotplug uevent so userspace can reprobe */ 8113 drm_kms_helper_hotplug_event(connector->dev); 8114 } 8115 8116 bool 8117 intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 8118 struct intel_connector *intel_connector) 8119 { 8120 struct drm_connector *connector = &intel_connector->base; 8121 struct intel_dp *intel_dp = &intel_dig_port->dp; 8122 struct intel_encoder *intel_encoder = &intel_dig_port->base; 8123 struct drm_device *dev = intel_encoder->base.dev; 8124 struct drm_i915_private *dev_priv = to_i915(dev); 8125 enum port port = intel_encoder->port; 8126 enum phy phy = intel_port_to_phy(dev_priv, port); 8127 int type; 8128 8129 /* Initialize the work for modeset in case of link train failure */ 8130 INIT_WORK(&intel_connector->modeset_retry_work, 8131 intel_dp_modeset_retry_work_fn); 8132 8133 if (drm_WARN(dev, intel_dig_port->max_lanes < 1, 8134 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 8135 intel_dig_port->max_lanes, intel_encoder->base.base.id, 8136 intel_encoder->base.name)) 8137 return false; 8138 8139 intel_dp_set_source_rates(intel_dp); 8140 8141 intel_dp->reset_link_params = true; 8142 intel_dp->pps_pipe = INVALID_PIPE; 8143 intel_dp->active_pipe = INVALID_PIPE; 8144 8145 /* Preserve the current hw state. */ 8146 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 8147 intel_dp->attached_connector = intel_connector; 8148 8149 if (intel_dp_is_port_edp(dev_priv, port)) { 8150 /* 8151 * Currently we don't support eDP on TypeC ports, although in 8152 * theory it could work on TypeC legacy ports. 8153 */ 8154 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); 8155 type = DRM_MODE_CONNECTOR_eDP; 8156 } else { 8157 type = DRM_MODE_CONNECTOR_DisplayPort; 8158 } 8159 8160 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 8161 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 8162 8163 /* 8164 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 8165 * for DP the encoder type can be set by the caller to 8166 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. 8167 */ 8168 if (type == DRM_MODE_CONNECTOR_eDP) 8169 intel_encoder->type = INTEL_OUTPUT_EDP; 8170 8171 /* eDP only on port B and/or C on vlv/chv */ 8172 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 8173 IS_CHERRYVIEW(dev_priv)) && 8174 intel_dp_is_edp(intel_dp) && 8175 port != PORT_B && port != PORT_C)) 8176 return false; 8177 8178 drm_dbg_kms(&dev_priv->drm, 8179 "Adding %s connector on [ENCODER:%d:%s]\n", 8180 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 8181 intel_encoder->base.base.id, intel_encoder->base.name); 8182 8183 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 8184 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 8185 8186 if (!HAS_GMCH(dev_priv)) 8187 connector->interlace_allowed = true; 8188 connector->doublescan_allowed = 0; 8189 8190 if (INTEL_GEN(dev_priv) >= 11) 8191 connector->ycbcr_420_allowed = true; 8192 8193 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); 8194 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 8195 8196 intel_dp_aux_init(intel_dp); 8197 8198 intel_connector_attach_encoder(intel_connector, intel_encoder); 8199 8200 if (HAS_DDI(dev_priv)) 8201 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 8202 else 8203 intel_connector->get_hw_state = intel_connector_get_hw_state; 8204 8205 /* init MST on ports that can support it */ 8206 intel_dp_mst_encoder_init(intel_dig_port, 8207 intel_connector->base.base.id); 8208 8209 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 8210 intel_dp_aux_fini(intel_dp); 8211 intel_dp_mst_encoder_cleanup(intel_dig_port); 8212 goto fail; 8213 } 8214 8215 intel_dp_add_properties(intel_dp, connector); 8216 8217 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 8218 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim); 8219 if (ret) 8220 drm_dbg_kms(&dev_priv->drm, 8221 "HDCP init failed, skipping.\n"); 8222 } 8223 8224 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 8225 * 0xd. Failure to do so will result in spurious interrupts being 8226 * generated on the port when a cable is not attached. 8227 */ 8228 if (IS_G45(dev_priv)) { 8229 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); 8230 intel_de_write(dev_priv, PEG_BAND_GAP_DATA, 8231 (temp & ~0xf) | 0xd); 8232 } 8233 8234 return true; 8235 8236 fail: 8237 drm_connector_cleanup(connector); 8238 8239 return false; 8240 } 8241 8242 bool intel_dp_init(struct drm_i915_private *dev_priv, 8243 i915_reg_t output_reg, 8244 enum port port) 8245 { 8246 struct intel_digital_port *intel_dig_port; 8247 struct intel_encoder *intel_encoder; 8248 struct drm_encoder *encoder; 8249 struct intel_connector *intel_connector; 8250 8251 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); 8252 if (!intel_dig_port) 8253 return false; 8254 8255 intel_connector = intel_connector_alloc(); 8256 if (!intel_connector) 8257 goto err_connector_alloc; 8258 8259 intel_encoder = &intel_dig_port->base; 8260 encoder = &intel_encoder->base; 8261 8262 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, 8263 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, 8264 "DP %c", port_name(port))) 8265 goto err_encoder_init; 8266 8267 intel_encoder->hotplug = intel_dp_hotplug; 8268 intel_encoder->compute_config = intel_dp_compute_config; 8269 intel_encoder->get_hw_state = intel_dp_get_hw_state; 8270 intel_encoder->get_config = intel_dp_get_config; 8271 intel_encoder->update_pipe = intel_panel_update_backlight; 8272 intel_encoder->suspend = intel_dp_encoder_suspend; 8273 if (IS_CHERRYVIEW(dev_priv)) { 8274 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; 8275 intel_encoder->pre_enable = chv_pre_enable_dp; 8276 intel_encoder->enable = vlv_enable_dp; 8277 intel_encoder->disable = vlv_disable_dp; 8278 intel_encoder->post_disable = chv_post_disable_dp; 8279 intel_encoder->post_pll_disable = chv_dp_post_pll_disable; 8280 } else if (IS_VALLEYVIEW(dev_priv)) { 8281 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; 8282 intel_encoder->pre_enable = vlv_pre_enable_dp; 8283 intel_encoder->enable = vlv_enable_dp; 8284 intel_encoder->disable = vlv_disable_dp; 8285 intel_encoder->post_disable = vlv_post_disable_dp; 8286 } else { 8287 intel_encoder->pre_enable = g4x_pre_enable_dp; 8288 intel_encoder->enable = g4x_enable_dp; 8289 intel_encoder->disable = g4x_disable_dp; 8290 intel_encoder->post_disable = g4x_post_disable_dp; 8291 } 8292 8293 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 8294 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) 8295 intel_dig_port->dp.set_link_train = cpt_set_link_train; 8296 else 8297 intel_dig_port->dp.set_link_train = g4x_set_link_train; 8298 8299 if (IS_CHERRYVIEW(dev_priv)) 8300 intel_dig_port->dp.set_signal_levels = chv_set_signal_levels; 8301 else if (IS_VALLEYVIEW(dev_priv)) 8302 intel_dig_port->dp.set_signal_levels = vlv_set_signal_levels; 8303 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 8304 intel_dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels; 8305 else if (IS_GEN(dev_priv, 6) && port == PORT_A) 8306 intel_dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels; 8307 else 8308 intel_dig_port->dp.set_signal_levels = g4x_set_signal_levels; 8309 8310 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) || 8311 (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) { 8312 intel_dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3; 8313 intel_dig_port->dp.voltage_max = intel_dp_voltage_max_3; 8314 } else { 8315 intel_dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2; 8316 intel_dig_port->dp.voltage_max = intel_dp_voltage_max_2; 8317 } 8318 8319 intel_dig_port->dp.output_reg = output_reg; 8320 intel_dig_port->max_lanes = 4; 8321 intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); 8322 intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port); 8323 8324 intel_encoder->type = INTEL_OUTPUT_DP; 8325 intel_encoder->power_domain = intel_port_to_power_domain(port); 8326 if (IS_CHERRYVIEW(dev_priv)) { 8327 if (port == PORT_D) 8328 intel_encoder->pipe_mask = BIT(PIPE_C); 8329 else 8330 intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B); 8331 } else { 8332 intel_encoder->pipe_mask = ~0; 8333 } 8334 intel_encoder->cloneable = 0; 8335 intel_encoder->port = port; 8336 8337 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; 8338 8339 if (HAS_GMCH(dev_priv)) { 8340 if (IS_GM45(dev_priv)) 8341 intel_dig_port->connected = gm45_digital_port_connected; 8342 else 8343 intel_dig_port->connected = g4x_digital_port_connected; 8344 } else { 8345 if (port == PORT_A) 8346 intel_dig_port->connected = ilk_digital_port_connected; 8347 else 8348 intel_dig_port->connected = ibx_digital_port_connected; 8349 } 8350 8351 if (port != PORT_A) 8352 intel_infoframe_init(intel_dig_port); 8353 8354 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); 8355 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) 8356 goto err_init_connector; 8357 8358 return true; 8359 8360 err_init_connector: 8361 drm_encoder_cleanup(encoder); 8362 err_encoder_init: 8363 kfree(intel_connector); 8364 err_connector_alloc: 8365 kfree(intel_dig_port); 8366 return false; 8367 } 8368 8369 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 8370 { 8371 struct intel_encoder *encoder; 8372 8373 for_each_intel_encoder(&dev_priv->drm, encoder) { 8374 struct intel_dp *intel_dp; 8375 8376 if (encoder->type != INTEL_OUTPUT_DDI) 8377 continue; 8378 8379 intel_dp = enc_to_intel_dp(encoder); 8380 8381 if (!intel_dp->can_mst) 8382 continue; 8383 8384 if (intel_dp->is_mst) 8385 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 8386 } 8387 } 8388 8389 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 8390 { 8391 struct intel_encoder *encoder; 8392 8393 for_each_intel_encoder(&dev_priv->drm, encoder) { 8394 struct intel_dp *intel_dp; 8395 int ret; 8396 8397 if (encoder->type != INTEL_OUTPUT_DDI) 8398 continue; 8399 8400 intel_dp = enc_to_intel_dp(encoder); 8401 8402 if (!intel_dp->can_mst) 8403 continue; 8404 8405 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 8406 true); 8407 if (ret) { 8408 intel_dp->is_mst = false; 8409 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 8410 false); 8411 } 8412 } 8413 } 8414