1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/iopoll.h> 31 #include <linux/log2.h> 32 #include <linux/math.h> 33 #include <linux/notifier.h> 34 #include <linux/seq_buf.h> 35 #include <linux/slab.h> 36 #include <linux/sort.h> 37 #include <linux/string_helpers.h> 38 #include <linux/timekeeping.h> 39 #include <linux/types.h> 40 #include <asm/byteorder.h> 41 42 #include <drm/display/drm_dp_helper.h> 43 #include <drm/display/drm_dp_tunnel.h> 44 #include <drm/display/drm_dsc_helper.h> 45 #include <drm/display/drm_hdmi_helper.h> 46 #include <drm/drm_atomic_helper.h> 47 #include <drm/drm_crtc.h> 48 #include <drm/drm_edid.h> 49 #include <drm/drm_fixed.h> 50 #include <drm/drm_print.h> 51 #include <drm/drm_probe_helper.h> 52 53 #include "g4x_dp.h" 54 #include "intel_alpm.h" 55 #include "intel_atomic.h" 56 #include "intel_audio.h" 57 #include "intel_backlight.h" 58 #include "intel_combo_phy_regs.h" 59 #include "intel_connector.h" 60 #include "intel_crtc.h" 61 #include "intel_crtc_state_dump.h" 62 #include "intel_cx0_phy.h" 63 #include "intel_ddi.h" 64 #include "intel_de.h" 65 #include "intel_display_driver.h" 66 #include "intel_display_jiffies.h" 67 #include "intel_display_utils.h" 68 #include "intel_display_regs.h" 69 #include "intel_display_rpm.h" 70 #include "intel_display_types.h" 71 #include "intel_dp.h" 72 #include "intel_dp_aux.h" 73 #include "intel_dp_hdcp.h" 74 #include "intel_dp_link_training.h" 75 #include "intel_dp_mst.h" 76 #include "intel_dp_test.h" 77 #include "intel_dp_tunnel.h" 78 #include "intel_dpio_phy.h" 79 #include "intel_dpll.h" 80 #include "intel_drrs.h" 81 #include "intel_encoder.h" 82 #include "intel_fifo_underrun.h" 83 #include "intel_hdcp.h" 84 #include "intel_hdmi.h" 85 #include "intel_hotplug.h" 86 #include "intel_hotplug_irq.h" 87 #include "intel_lspcon.h" 88 #include "intel_lvds.h" 89 #include "intel_modeset_lock.h" 90 #include "intel_panel.h" 91 #include "intel_pch_display.h" 92 #include "intel_pfit.h" 93 #include "intel_pps.h" 94 #include "intel_psr.h" 95 #include "intel_quirks.h" 96 #include "intel_tc.h" 97 #include "intel_vblank.h" 98 #include "intel_vdsc.h" 99 #include "intel_vrr.h" 100 101 /* Max DSC line buffer depth supported by HW. */ 102 #define INTEL_DP_DSC_MAX_LINE_BUF_DEPTH 13 103 104 /* DP DSC FEC Overhead factor in ppm = 1/(0.972261) = 1.028530 */ 105 #define DP_DSC_FEC_OVERHEAD_FACTOR 1028530 106 107 /* Constants for DP DSC configurations */ 108 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 109 110 /** 111 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 112 * @intel_dp: DP struct 113 * 114 * If a CPU or PCH DP output is attached to an eDP panel, this function 115 * will return true, and false otherwise. 116 * 117 * This function is not safe to use prior to encoder type being set. 118 */ 119 bool intel_dp_is_edp(struct intel_dp *intel_dp) 120 { 121 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 122 123 return dig_port->base.type == INTEL_OUTPUT_EDP; 124 } 125 126 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 127 128 /* Is link rate UHBR and thus 128b/132b? */ 129 bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state) 130 { 131 return drm_dp_is_uhbr_rate(crtc_state->port_clock); 132 } 133 134 /** 135 * intel_dp_link_symbol_size - get the link symbol size for a given link rate 136 * @rate: link rate in 10kbit/s units 137 * 138 * Returns the link symbol size in bits/symbol units depending on the link 139 * rate -> channel coding. 140 */ 141 int intel_dp_link_symbol_size(int rate) 142 { 143 return drm_dp_is_uhbr_rate(rate) ? 32 : 10; 144 } 145 146 /** 147 * intel_dp_link_symbol_clock - convert link rate to link symbol clock 148 * @rate: link rate in 10kbit/s units 149 * 150 * Returns the link symbol clock frequency in kHz units depending on the 151 * link rate and channel coding. 152 */ 153 int intel_dp_link_symbol_clock(int rate) 154 { 155 return DIV_ROUND_CLOSEST(rate * 10, intel_dp_link_symbol_size(rate)); 156 } 157 158 static int max_dprx_rate(struct intel_dp *intel_dp) 159 { 160 struct intel_display *display = to_intel_display(intel_dp); 161 int max_rate; 162 163 if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) 164 max_rate = drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel); 165 else 166 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 167 168 /* 169 * Some platforms + eDP panels may not reliably support HBR3 170 * due to signal integrity limitations, despite advertising it. 171 * Cap the link rate to HBR2 to avoid unstable configurations for the 172 * known machines. 173 */ 174 if (intel_dp_is_edp(intel_dp) && intel_has_quirk(display, QUIRK_EDP_LIMIT_RATE_HBR2)) 175 max_rate = min(max_rate, 540000); 176 177 return max_rate; 178 } 179 180 static int max_dprx_lane_count(struct intel_dp *intel_dp) 181 { 182 if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) 183 return drm_dp_tunnel_max_dprx_lane_count(intel_dp->tunnel); 184 185 return drm_dp_max_lane_count(intel_dp->dpcd); 186 } 187 188 static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp) 189 { 190 intel_dp->sink_rates[0] = 162000; 191 intel_dp->num_sink_rates = 1; 192 } 193 194 /* update sink rates from dpcd */ 195 static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp) 196 { 197 static const int dp_rates[] = { 198 162000, 270000, 540000, 810000 199 }; 200 int i, max_rate; 201 int max_lttpr_rate; 202 203 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { 204 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ 205 static const int quirk_rates[] = { 162000, 270000, 324000 }; 206 207 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); 208 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); 209 210 return; 211 } 212 213 /* 214 * Sink rates for 8b/10b. 215 */ 216 max_rate = max_dprx_rate(intel_dp); 217 max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps); 218 if (max_lttpr_rate) 219 max_rate = min(max_rate, max_lttpr_rate); 220 221 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 222 if (dp_rates[i] > max_rate) 223 break; 224 intel_dp->sink_rates[i] = dp_rates[i]; 225 } 226 227 /* 228 * Sink rates for 128b/132b. If set, sink should support all 8b/10b 229 * rates and 10 Gbps. 230 */ 231 if (drm_dp_128b132b_supported(intel_dp->dpcd)) { 232 u8 uhbr_rates = 0; 233 234 BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3); 235 236 drm_dp_dpcd_readb(&intel_dp->aux, 237 DP_128B132B_SUPPORTED_LINK_RATES, &uhbr_rates); 238 239 if (drm_dp_lttpr_count(intel_dp->lttpr_common_caps)) { 240 /* We have a repeater */ 241 if (intel_dp->lttpr_common_caps[0] >= 0x20 && 242 intel_dp->lttpr_common_caps[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - 243 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] & 244 DP_PHY_REPEATER_128B132B_SUPPORTED) { 245 /* Repeater supports 128b/132b, valid UHBR rates */ 246 uhbr_rates &= intel_dp->lttpr_common_caps[DP_PHY_REPEATER_128B132B_RATES - 247 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; 248 } else { 249 /* Does not support 128b/132b */ 250 uhbr_rates = 0; 251 } 252 } 253 254 if (uhbr_rates & DP_UHBR10) 255 intel_dp->sink_rates[i++] = 1000000; 256 if (uhbr_rates & DP_UHBR13_5) 257 intel_dp->sink_rates[i++] = 1350000; 258 if (uhbr_rates & DP_UHBR20) 259 intel_dp->sink_rates[i++] = 2000000; 260 } 261 262 intel_dp->num_sink_rates = i; 263 } 264 265 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 266 { 267 struct intel_display *display = to_intel_display(intel_dp); 268 struct intel_connector *connector = intel_dp->attached_connector; 269 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 270 struct intel_encoder *encoder = &intel_dig_port->base; 271 272 intel_dp_set_dpcd_sink_rates(intel_dp); 273 274 if (intel_dp->num_sink_rates) 275 return; 276 277 drm_err(display->drm, 278 "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD with no link rates, using defaults\n", 279 connector->base.base.id, connector->base.name, 280 encoder->base.base.id, encoder->base.name); 281 282 intel_dp_set_default_sink_rates(intel_dp); 283 } 284 285 static void intel_dp_set_default_max_sink_lane_count(struct intel_dp *intel_dp) 286 { 287 intel_dp->max_sink_lane_count = 1; 288 } 289 290 static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp) 291 { 292 struct intel_display *display = to_intel_display(intel_dp); 293 struct intel_connector *connector = intel_dp->attached_connector; 294 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 295 struct intel_encoder *encoder = &intel_dig_port->base; 296 297 intel_dp->max_sink_lane_count = max_dprx_lane_count(intel_dp); 298 299 switch (intel_dp->max_sink_lane_count) { 300 case 1: 301 case 2: 302 case 4: 303 return; 304 } 305 306 drm_err(display->drm, 307 "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD max lane count (%d), using default\n", 308 connector->base.base.id, connector->base.name, 309 encoder->base.base.id, encoder->base.name, 310 intel_dp->max_sink_lane_count); 311 312 intel_dp_set_default_max_sink_lane_count(intel_dp); 313 } 314 315 /* Get length of rates array potentially limited by max_rate. */ 316 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 317 { 318 int i; 319 320 /* Limit results by potentially reduced max rate */ 321 for (i = 0; i < len; i++) { 322 if (rates[len - i - 1] <= max_rate) 323 return len - i; 324 } 325 326 return 0; 327 } 328 329 /* Get length of common rates array potentially limited by max_rate. */ 330 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 331 int max_rate) 332 { 333 return intel_dp_rate_limit_len(intel_dp->common_rates, 334 intel_dp->num_common_rates, max_rate); 335 } 336 337 int intel_dp_common_rate(struct intel_dp *intel_dp, int index) 338 { 339 struct intel_display *display = to_intel_display(intel_dp); 340 341 if (drm_WARN_ON(display->drm, 342 index < 0 || index >= intel_dp->num_common_rates)) 343 return 162000; 344 345 return intel_dp->common_rates[index]; 346 } 347 348 /* Theoretical max between source and sink */ 349 int intel_dp_max_common_rate(struct intel_dp *intel_dp) 350 { 351 return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1); 352 } 353 354 int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port) 355 { 356 int vbt_max_lanes = intel_bios_dp_max_lane_count(dig_port->base.devdata); 357 int max_lanes = dig_port->max_lanes; 358 359 if (vbt_max_lanes) 360 max_lanes = min(max_lanes, vbt_max_lanes); 361 362 return max_lanes; 363 } 364 365 /* Theoretical max between source and sink */ 366 int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 367 { 368 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 369 int source_max = intel_dp_max_source_lane_count(dig_port); 370 int sink_max = intel_dp->max_sink_lane_count; 371 int lane_max = intel_tc_port_max_lane_count(dig_port); 372 int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); 373 374 if (lttpr_max) 375 sink_max = min(sink_max, lttpr_max); 376 377 return min3(source_max, sink_max, lane_max); 378 } 379 380 static int forced_lane_count(struct intel_dp *intel_dp) 381 { 382 return clamp(intel_dp->link.force_lane_count, 1, intel_dp_max_common_lane_count(intel_dp)); 383 } 384 385 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 386 { 387 int lane_count; 388 389 if (intel_dp->link.force_lane_count) 390 lane_count = forced_lane_count(intel_dp); 391 else 392 lane_count = intel_dp->link.max_lane_count; 393 394 switch (lane_count) { 395 case 1: 396 case 2: 397 case 4: 398 return lane_count; 399 default: 400 MISSING_CASE(lane_count); 401 return 1; 402 } 403 } 404 405 static int intel_dp_min_lane_count(struct intel_dp *intel_dp) 406 { 407 if (intel_dp->link.force_lane_count) 408 return forced_lane_count(intel_dp); 409 410 return 1; 411 } 412 413 int intel_dp_link_bw_overhead(int link_clock, int lane_count, int hdisplay, 414 int dsc_slice_count, int bpp_x16, unsigned long flags) 415 { 416 int overhead; 417 418 WARN_ON(flags & ~(DRM_DP_BW_OVERHEAD_MST | DRM_DP_BW_OVERHEAD_SSC_REF_CLK | 419 DRM_DP_BW_OVERHEAD_FEC)); 420 421 if (drm_dp_is_uhbr_rate(link_clock)) 422 flags |= DRM_DP_BW_OVERHEAD_UHBR; 423 424 if (dsc_slice_count) 425 flags |= DRM_DP_BW_OVERHEAD_DSC; 426 427 overhead = drm_dp_bw_overhead(lane_count, hdisplay, 428 dsc_slice_count, 429 bpp_x16, 430 flags); 431 432 /* 433 * TODO: clarify whether a minimum required by the fixed FEC overhead 434 * in the bspec audio programming sequence is required here. 435 */ 436 return max(overhead, intel_dp_bw_fec_overhead(flags & DRM_DP_BW_OVERHEAD_FEC)); 437 } 438 439 /* 440 * The required data bandwidth for a mode with given pixel clock and bpp. This 441 * is the required net bandwidth independent of the data bandwidth efficiency. 442 */ 443 int intel_dp_link_required(int link_clock, int lane_count, 444 int mode_clock, int mode_hdisplay, 445 int link_bpp_x16, unsigned long bw_overhead_flags) 446 { 447 int bw_overhead = intel_dp_link_bw_overhead(link_clock, lane_count, mode_hdisplay, 448 0, link_bpp_x16, bw_overhead_flags); 449 450 return intel_dp_effective_data_rate(mode_clock, link_bpp_x16, bw_overhead); 451 } 452 453 /** 454 * intel_dp_effective_data_rate - Return the pixel data rate accounting for BW allocation overhead 455 * @pixel_clock: pixel clock in kHz 456 * @bpp_x16: bits per pixel .4 fixed point format 457 * @bw_overhead: BW allocation overhead in 1ppm units 458 * 459 * Return the effective pixel data rate in kB/sec units taking into account 460 * the provided SSC, FEC, DSC BW allocation overhead. 461 */ 462 int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16, 463 int bw_overhead) 464 { 465 return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_clock * bpp_x16, bw_overhead), 466 1000000 * 16 * 8); 467 } 468 469 /** 470 * intel_dp_max_link_data_rate: Calculate the maximum rate for the given link params 471 * @intel_dp: Intel DP object 472 * @max_dprx_rate: Maximum data rate of the DPRX 473 * @max_dprx_lanes: Maximum lane count of the DPRX 474 * 475 * Calculate the maximum data rate for the provided link parameters taking into 476 * account any BW limitations by a DP tunnel attached to @intel_dp. 477 * 478 * Returns the maximum data rate in kBps units. 479 */ 480 int intel_dp_max_link_data_rate(struct intel_dp *intel_dp, 481 int max_dprx_rate, int max_dprx_lanes) 482 { 483 int max_rate = drm_dp_max_dprx_data_rate(max_dprx_rate, max_dprx_lanes); 484 485 if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) 486 max_rate = min(max_rate, 487 drm_dp_tunnel_available_bw(intel_dp->tunnel)); 488 489 return max_rate; 490 } 491 492 bool intel_dp_has_joiner(struct intel_dp *intel_dp) 493 { 494 struct intel_display *display = to_intel_display(intel_dp); 495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 496 struct intel_encoder *encoder = &intel_dig_port->base; 497 struct intel_connector *connector = intel_dp->attached_connector; 498 499 /* eDP MSO is not compatible with joiner */ 500 if (intel_dp->mso_link_count) 501 return false; 502 503 if (intel_dp_is_edp(intel_dp) && 504 !connector->panel.vbt.edp.pipe_joiner_enable) 505 return false; 506 507 return DISPLAY_VER(display) >= 12 || 508 (DISPLAY_VER(display) == 11 && 509 encoder->port != PORT_A); 510 } 511 512 static int dg2_max_source_rate(struct intel_dp *intel_dp) 513 { 514 return intel_dp_is_edp(intel_dp) ? 810000 : 1350000; 515 } 516 517 static int icl_max_source_rate(struct intel_dp *intel_dp) 518 { 519 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 520 521 if (intel_encoder_is_combo(encoder) && !intel_dp_is_edp(intel_dp)) 522 return 540000; 523 524 return 810000; 525 } 526 527 static int ehl_max_source_rate(struct intel_dp *intel_dp) 528 { 529 if (intel_dp_is_edp(intel_dp)) 530 return 540000; 531 532 return 810000; 533 } 534 535 static int mtl_max_source_rate(struct intel_dp *intel_dp) 536 { 537 struct intel_display *display = to_intel_display(intel_dp); 538 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 539 540 if (intel_encoder_is_c10phy(encoder) || 541 display->platform.pantherlake_wildcatlake) 542 return 810000; 543 544 if (DISPLAY_VERx100(display) == 1401) 545 return 1350000; 546 547 return 2000000; 548 } 549 550 static int vbt_max_link_rate(struct intel_dp *intel_dp) 551 { 552 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 553 int max_rate; 554 555 max_rate = intel_bios_dp_max_link_rate(encoder->devdata); 556 557 if (intel_dp_is_edp(intel_dp)) { 558 struct intel_connector *connector = intel_dp->attached_connector; 559 int edp_max_rate = connector->panel.vbt.edp.max_link_rate; 560 561 if (max_rate && edp_max_rate) 562 max_rate = min(max_rate, edp_max_rate); 563 else if (edp_max_rate) 564 max_rate = edp_max_rate; 565 } 566 567 return max_rate; 568 } 569 570 static void 571 intel_dp_set_source_rates(struct intel_dp *intel_dp) 572 { 573 /* The values must be in increasing order */ 574 static const int bmg_rates[] = { 575 162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000, 576 810000, 1000000, 1350000, 577 }; 578 static const int mtl_rates[] = { 579 162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000, 580 810000, 1000000, 2000000, 581 }; 582 static const int icl_rates[] = { 583 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000, 584 1000000, 1350000, 585 }; 586 static const int bxt_rates[] = { 587 162000, 216000, 243000, 270000, 324000, 432000, 540000 588 }; 589 static const int skl_rates[] = { 590 162000, 216000, 270000, 324000, 432000, 540000 591 }; 592 static const int hsw_rates[] = { 593 162000, 270000, 540000 594 }; 595 static const int g4x_rates[] = { 596 162000, 270000 597 }; 598 struct intel_display *display = to_intel_display(intel_dp); 599 const int *source_rates; 600 int size, max_rate = 0, vbt_max_rate; 601 602 /* This should only be done once */ 603 drm_WARN_ON(display->drm, 604 intel_dp->source_rates || intel_dp->num_source_rates); 605 606 if (DISPLAY_VER(display) >= 14) { 607 if (display->platform.battlemage) { 608 source_rates = bmg_rates; 609 size = ARRAY_SIZE(bmg_rates); 610 } else { 611 source_rates = mtl_rates; 612 size = ARRAY_SIZE(mtl_rates); 613 } 614 max_rate = mtl_max_source_rate(intel_dp); 615 } else if (DISPLAY_VER(display) >= 11) { 616 source_rates = icl_rates; 617 size = ARRAY_SIZE(icl_rates); 618 if (display->platform.dg2) 619 max_rate = dg2_max_source_rate(intel_dp); 620 else if (display->platform.alderlake_p || display->platform.alderlake_s || 621 display->platform.dg1 || display->platform.rocketlake) 622 max_rate = 810000; 623 else if (display->platform.jasperlake || display->platform.elkhartlake) 624 max_rate = ehl_max_source_rate(intel_dp); 625 else 626 max_rate = icl_max_source_rate(intel_dp); 627 } else if (display->platform.geminilake || display->platform.broxton) { 628 source_rates = bxt_rates; 629 size = ARRAY_SIZE(bxt_rates); 630 } else if (DISPLAY_VER(display) == 9) { 631 source_rates = skl_rates; 632 size = ARRAY_SIZE(skl_rates); 633 } else if ((display->platform.haswell && !display->platform.haswell_ulx) || 634 display->platform.broadwell) { 635 source_rates = hsw_rates; 636 size = ARRAY_SIZE(hsw_rates); 637 } else { 638 source_rates = g4x_rates; 639 size = ARRAY_SIZE(g4x_rates); 640 } 641 642 vbt_max_rate = vbt_max_link_rate(intel_dp); 643 if (max_rate && vbt_max_rate) 644 max_rate = min(max_rate, vbt_max_rate); 645 else if (vbt_max_rate) 646 max_rate = vbt_max_rate; 647 648 if (max_rate) 649 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 650 651 intel_dp->source_rates = source_rates; 652 intel_dp->num_source_rates = size; 653 } 654 655 static int intersect_rates(const int *source_rates, int source_len, 656 const int *sink_rates, int sink_len, 657 int *common_rates) 658 { 659 int i = 0, j = 0, k = 0; 660 661 while (i < source_len && j < sink_len) { 662 if (source_rates[i] == sink_rates[j]) { 663 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 664 return k; 665 common_rates[k] = source_rates[i]; 666 ++k; 667 ++i; 668 ++j; 669 } else if (source_rates[i] < sink_rates[j]) { 670 ++i; 671 } else { 672 ++j; 673 } 674 } 675 return k; 676 } 677 678 /* return index of rate in rates array, or -1 if not found */ 679 int intel_dp_rate_index(const int *rates, int len, int rate) 680 { 681 int i; 682 683 for (i = 0; i < len; i++) 684 if (rate == rates[i]) 685 return i; 686 687 return -1; 688 } 689 690 static int intel_dp_link_config_rate(struct intel_dp *intel_dp, 691 const struct intel_dp_link_config *lc) 692 { 693 return intel_dp_common_rate(intel_dp, lc->link_rate_idx); 694 } 695 696 static int intel_dp_link_config_lane_count(const struct intel_dp_link_config *lc) 697 { 698 return 1 << lc->lane_count_exp; 699 } 700 701 static int intel_dp_link_config_bw(struct intel_dp *intel_dp, 702 const struct intel_dp_link_config *lc) 703 { 704 return drm_dp_max_dprx_data_rate(intel_dp_link_config_rate(intel_dp, lc), 705 intel_dp_link_config_lane_count(lc)); 706 } 707 708 static int link_config_cmp_by_bw(const void *a, const void *b, const void *p) 709 { 710 struct intel_dp *intel_dp = (struct intel_dp *)p; /* remove const */ 711 const struct intel_dp_link_config *lc_a = a; 712 const struct intel_dp_link_config *lc_b = b; 713 int bw_a = intel_dp_link_config_bw(intel_dp, lc_a); 714 int bw_b = intel_dp_link_config_bw(intel_dp, lc_b); 715 716 if (bw_a != bw_b) 717 return bw_a - bw_b; 718 719 return intel_dp_link_config_rate(intel_dp, lc_a) - 720 intel_dp_link_config_rate(intel_dp, lc_b); 721 } 722 723 static void intel_dp_link_config_init(struct intel_dp *intel_dp) 724 { 725 struct intel_display *display = to_intel_display(intel_dp); 726 struct intel_dp_link_config *lc; 727 int num_common_lane_configs; 728 int i; 729 int j; 730 731 if (drm_WARN_ON(display->drm, !is_power_of_2(intel_dp_max_common_lane_count(intel_dp)))) 732 return; 733 734 num_common_lane_configs = ilog2(intel_dp_max_common_lane_count(intel_dp)) + 1; 735 736 if (drm_WARN_ON(display->drm, intel_dp->num_common_rates * num_common_lane_configs > 737 ARRAY_SIZE(intel_dp->link.configs))) 738 return; 739 740 intel_dp->link.num_configs = intel_dp->num_common_rates * num_common_lane_configs; 741 742 lc = &intel_dp->link.configs[0]; 743 for (i = 0; i < intel_dp->num_common_rates; i++) { 744 for (j = 0; j < num_common_lane_configs; j++) { 745 lc->lane_count_exp = j; 746 lc->link_rate_idx = i; 747 748 lc++; 749 } 750 } 751 752 sort_r(intel_dp->link.configs, intel_dp->link.num_configs, 753 sizeof(intel_dp->link.configs[0]), 754 link_config_cmp_by_bw, NULL, 755 intel_dp); 756 } 757 758 void intel_dp_link_config_get(struct intel_dp *intel_dp, int idx, int *link_rate, int *lane_count) 759 { 760 struct intel_display *display = to_intel_display(intel_dp); 761 const struct intel_dp_link_config *lc; 762 763 if (drm_WARN_ON(display->drm, idx < 0 || idx >= intel_dp->link.num_configs)) 764 idx = 0; 765 766 lc = &intel_dp->link.configs[idx]; 767 768 *link_rate = intel_dp_link_config_rate(intel_dp, lc); 769 *lane_count = intel_dp_link_config_lane_count(lc); 770 } 771 772 int intel_dp_link_config_index(struct intel_dp *intel_dp, int link_rate, int lane_count) 773 { 774 int link_rate_idx = intel_dp_rate_index(intel_dp->common_rates, intel_dp->num_common_rates, 775 link_rate); 776 int lane_count_exp = ilog2(lane_count); 777 int i; 778 779 for (i = 0; i < intel_dp->link.num_configs; i++) { 780 const struct intel_dp_link_config *lc = &intel_dp->link.configs[i]; 781 782 if (lc->lane_count_exp == lane_count_exp && 783 lc->link_rate_idx == link_rate_idx) 784 return i; 785 } 786 787 return -1; 788 } 789 790 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 791 { 792 struct intel_display *display = to_intel_display(intel_dp); 793 794 drm_WARN_ON(display->drm, 795 !intel_dp->num_source_rates || !intel_dp->num_sink_rates); 796 797 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 798 intel_dp->num_source_rates, 799 intel_dp->sink_rates, 800 intel_dp->num_sink_rates, 801 intel_dp->common_rates); 802 803 /* Paranoia, there should always be something in common. */ 804 if (drm_WARN_ON(display->drm, intel_dp->num_common_rates == 0)) { 805 intel_dp->common_rates[0] = 162000; 806 intel_dp->num_common_rates = 1; 807 } 808 809 intel_dp_link_config_init(intel_dp); 810 } 811 812 bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 813 u8 lane_count) 814 { 815 /* 816 * FIXME: we need to synchronize the current link parameters with 817 * hardware readout. Currently fast link training doesn't work on 818 * boot-up. 819 */ 820 if (link_rate == 0 || 821 link_rate > intel_dp->link.max_rate) 822 return false; 823 824 if (lane_count == 0 || 825 lane_count > intel_dp_max_lane_count(intel_dp)) 826 return false; 827 828 return true; 829 } 830 831 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 832 { 833 return div_u64(mul_u32_u32(mode_clock, DP_DSC_FEC_OVERHEAD_FACTOR), 834 1000000U); 835 } 836 837 int intel_dp_bw_fec_overhead(bool fec_enabled) 838 { 839 /* 840 * TODO: Calculate the actual overhead for a given mode. 841 * The hard-coded 1/0.972261=2.853% overhead factor 842 * corresponds (for instance) to the 8b/10b DP FEC 2.4% + 843 * 0.453% DSC overhead. This is enough for a 3840 width mode, 844 * which has a DSC overhead of up to ~0.2%, but may not be 845 * enough for a 1024 width mode where this is ~0.8% (on a 4 846 * lane DP link, with 2 DSC slices and 8 bpp color depth). 847 */ 848 return fec_enabled ? DP_DSC_FEC_OVERHEAD_FACTOR : 1000000; 849 } 850 851 static int 852 small_joiner_ram_size_bits(struct intel_display *display) 853 { 854 if (DISPLAY_VER(display) >= 13) 855 return 17280 * 8; 856 else if (DISPLAY_VER(display) >= 11) 857 return 7680 * 8; 858 else 859 return 6144 * 8; 860 } 861 862 static int align_min_vesa_compressed_bpp_x16(int min_link_bpp_x16) 863 { 864 int i; 865 866 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) { 867 int vesa_bpp_x16 = fxp_q4_from_int(valid_dsc_bpp[i]); 868 869 if (vesa_bpp_x16 >= min_link_bpp_x16) 870 return vesa_bpp_x16; 871 } 872 873 return 0; 874 } 875 876 static int align_max_vesa_compressed_bpp_x16(int max_link_bpp_x16) 877 { 878 int i; 879 880 for (i = ARRAY_SIZE(valid_dsc_bpp) - 1; i >= 0; i--) { 881 int vesa_bpp_x16 = fxp_q4_from_int(valid_dsc_bpp[i]); 882 883 if (vesa_bpp_x16 <= max_link_bpp_x16) 884 return vesa_bpp_x16; 885 } 886 887 return 0; 888 } 889 890 static int bigjoiner_interface_bits(struct intel_display *display) 891 { 892 return DISPLAY_VER(display) >= 14 ? 36 : 24; 893 } 894 895 static u32 bigjoiner_bw_max_bpp(struct intel_display *display, u32 mode_clock, 896 int num_joined_pipes) 897 { 898 u32 max_bpp; 899 /* With bigjoiner multiple dsc engines are used in parallel so PPC is 2 */ 900 int ppc = 2; 901 int num_big_joiners = num_joined_pipes / 2; 902 903 max_bpp = display->cdclk.max_cdclk_freq * ppc * bigjoiner_interface_bits(display) / 904 intel_dp_mode_to_fec_clock(mode_clock); 905 906 max_bpp *= num_big_joiners; 907 908 return max_bpp; 909 910 } 911 912 static u32 small_joiner_ram_max_bpp(struct intel_display *display, 913 u32 mode_hdisplay, 914 int num_joined_pipes) 915 { 916 u32 max_bpp; 917 918 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 919 max_bpp = small_joiner_ram_size_bits(display) / mode_hdisplay; 920 921 max_bpp *= num_joined_pipes; 922 923 return max_bpp; 924 } 925 926 static int ultrajoiner_ram_bits(void) 927 { 928 return 4 * 72 * 512; 929 } 930 931 static u32 ultrajoiner_ram_max_bpp(u32 mode_hdisplay) 932 { 933 return ultrajoiner_ram_bits() / mode_hdisplay; 934 } 935 936 /* TODO: return a bpp_x16 value */ 937 static 938 u32 get_max_compressed_bpp_with_joiner(struct intel_display *display, 939 u32 mode_clock, u32 mode_hdisplay, 940 int num_joined_pipes) 941 { 942 u32 max_bpp = small_joiner_ram_max_bpp(display, mode_hdisplay, num_joined_pipes); 943 944 if (num_joined_pipes > 1) 945 max_bpp = min(max_bpp, bigjoiner_bw_max_bpp(display, mode_clock, 946 num_joined_pipes)); 947 if (num_joined_pipes == 4) 948 max_bpp = min(max_bpp, ultrajoiner_ram_max_bpp(mode_hdisplay)); 949 950 return max_bpp; 951 } 952 953 static int intel_dp_dsc_min_slice_count(const struct intel_connector *connector, 954 int mode_clock, int mode_hdisplay) 955 { 956 struct intel_display *display = to_intel_display(connector); 957 bool is_edp = 958 connector->base.connector_type == DRM_MODE_CONNECTOR_eDP; 959 int min_slice_count; 960 int max_slice_width; 961 int tp_rgb_yuv444; 962 int tp_yuv422_420; 963 964 /* 965 * TODO: allow using less than the maximum number of slices 966 * supported by the eDP sink, to allow using fewer DSC engines. 967 */ 968 if (is_edp) 969 return drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd, true); 970 971 /* 972 * TODO: Use the throughput value specific to the actual RGB/YUV 973 * format of the output. 974 * The RGB/YUV444 throughput value should be always either equal 975 * or smaller than the YUV422/420 value, but let's not depend on 976 * this assumption. 977 */ 978 if (mode_clock > max(connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444, 979 connector->dp.dsc_branch_caps.overall_throughput.yuv422_420)) 980 return 0; 981 982 if (mode_hdisplay > connector->dp.dsc_branch_caps.max_line_width) 983 return 0; 984 985 /* 986 * TODO: Pass the total pixel rate of all the streams transferred to 987 * an MST tiled display, calculate the total slice count for all tiles 988 * from this and the per-tile slice count from the total slice count. 989 */ 990 tp_rgb_yuv444 = drm_dp_dsc_sink_max_slice_throughput(connector->dp.dsc_dpcd, 991 mode_clock, true); 992 tp_yuv422_420 = drm_dp_dsc_sink_max_slice_throughput(connector->dp.dsc_dpcd, 993 mode_clock, false); 994 995 /* 996 * TODO: Use the throughput value specific to the actual RGB/YUV 997 * format of the output. 998 * For now use the smaller of these, which is ok, potentially 999 * resulting in a higher than required minimum slice count. 1000 * The RGB/YUV444 throughput value should be always either equal 1001 * or smaller than the YUV422/420 value, but let's not depend on 1002 * this assumption. 1003 */ 1004 min_slice_count = DIV_ROUND_UP(mode_clock, min(tp_rgb_yuv444, tp_yuv422_420)); 1005 1006 /* 1007 * Due to some DSC engine BW limitations, we need to enable second 1008 * slice and VDSC engine, whenever we approach close enough to max CDCLK 1009 */ 1010 if (mode_clock >= ((display->cdclk.max_cdclk_freq * 85) / 100)) 1011 min_slice_count = max(min_slice_count, 2); 1012 1013 max_slice_width = drm_dp_dsc_sink_max_slice_width(connector->dp.dsc_dpcd); 1014 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 1015 drm_dbg_kms(display->drm, 1016 "Unsupported slice width %d by DP DSC Sink device\n", 1017 max_slice_width); 1018 return 0; 1019 } 1020 /* Also take into account max slice width */ 1021 min_slice_count = max(min_slice_count, 1022 DIV_ROUND_UP(mode_hdisplay, max_slice_width)); 1023 1024 return min_slice_count; 1025 } 1026 1027 static bool 1028 intel_dp_dsc_get_slice_config(const struct intel_connector *connector, 1029 int mode_clock, int mode_hdisplay, 1030 int num_joined_pipes, 1031 struct intel_dsc_slice_config *config_ret) 1032 { 1033 struct intel_display *display = to_intel_display(connector); 1034 int min_slice_count = 1035 intel_dp_dsc_min_slice_count(connector, mode_clock, mode_hdisplay); 1036 bool is_edp = 1037 connector->base.connector_type == DRM_MODE_CONNECTOR_eDP; 1038 u32 sink_slice_count_mask = 1039 drm_dp_dsc_sink_slice_count_mask(connector->dp.dsc_dpcd, is_edp); 1040 int slices_per_pipe; 1041 1042 /* 1043 * Find the closest match to the valid slice count values 1044 * 1045 * Max HW DSC-per-pipe x slice-per-DSC (= slice-per-pipe) capability: 1046 * ICL: 2x2 1047 * BMG: 2x2, or for ultrajoined 4 pipes: 3x1 1048 * TGL+: 2x4 (TODO: Add support for this) 1049 * 1050 * TODO: Explore if it's worth increasing the number of slices (from 1 1051 * to 2 or 3), so that multiple VDSC engines can be used, thus 1052 * reducing the minimum CDCLK requirement, which in turn is determined 1053 * by the 1 pixel per clock VDSC engine throughput in 1054 * intel_vdsc_min_cdclk(). 1055 */ 1056 for (slices_per_pipe = 1; slices_per_pipe <= 4; slices_per_pipe++) { 1057 struct intel_dsc_slice_config config; 1058 int slices_per_line; 1059 1060 if (!intel_dsc_get_slice_config(display, 1061 num_joined_pipes, slices_per_pipe, 1062 &config)) 1063 continue; 1064 1065 slices_per_line = intel_dsc_line_slice_count(&config); 1066 1067 if (!(drm_dp_dsc_slice_count_to_mask(slices_per_line) & 1068 sink_slice_count_mask)) 1069 continue; 1070 1071 if (mode_hdisplay % slices_per_line) 1072 continue; 1073 1074 if (min_slice_count <= slices_per_line) { 1075 *config_ret = config; 1076 1077 return true; 1078 } 1079 } 1080 1081 /* Print slice count 1,2,4,..24 if bit#0,1,3,..23 is set in the mask. */ 1082 sink_slice_count_mask <<= 1; 1083 drm_dbg_kms(display->drm, 1084 "[CONNECTOR:%d:%s] Unsupported slice count (min: %d, sink supported: %*pbl)\n", 1085 connector->base.base.id, connector->base.name, 1086 min_slice_count, 1087 (int)BITS_PER_TYPE(sink_slice_count_mask), &sink_slice_count_mask); 1088 1089 return false; 1090 } 1091 1092 u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector, 1093 int mode_clock, int mode_hdisplay, 1094 int num_joined_pipes) 1095 { 1096 struct intel_dsc_slice_config config; 1097 1098 if (!intel_dp_dsc_get_slice_config(connector, 1099 mode_clock, mode_hdisplay, 1100 num_joined_pipes, &config)) 1101 return 0; 1102 1103 return intel_dsc_line_slice_count(&config); 1104 } 1105 1106 static bool source_can_output(struct intel_dp *intel_dp, 1107 enum intel_output_format format) 1108 { 1109 struct intel_display *display = to_intel_display(intel_dp); 1110 1111 switch (format) { 1112 case INTEL_OUTPUT_FORMAT_RGB: 1113 return true; 1114 1115 case INTEL_OUTPUT_FORMAT_YCBCR444: 1116 /* 1117 * No YCbCr output support on gmch platforms. 1118 * Also, ILK doesn't seem capable of DP YCbCr output. 1119 * The displayed image is severely corrupted. SNB+ is fine. 1120 */ 1121 return !HAS_GMCH(display) && !display->platform.ironlake; 1122 1123 case INTEL_OUTPUT_FORMAT_YCBCR420: 1124 /* Platform < Gen 11 cannot output YCbCr420 format */ 1125 return DISPLAY_VER(display) >= 11; 1126 1127 default: 1128 MISSING_CASE(format); 1129 return false; 1130 } 1131 } 1132 1133 static bool 1134 dfp_can_convert_from_rgb(struct intel_dp *intel_dp, 1135 enum intel_output_format sink_format) 1136 { 1137 if (!drm_dp_is_branch(intel_dp->dpcd)) 1138 return false; 1139 1140 if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444) 1141 return intel_dp->dfp.rgb_to_ycbcr; 1142 1143 if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) 1144 return intel_dp->dfp.rgb_to_ycbcr && 1145 intel_dp->dfp.ycbcr_444_to_420; 1146 1147 return false; 1148 } 1149 1150 static bool 1151 dfp_can_convert_from_ycbcr444(struct intel_dp *intel_dp, 1152 enum intel_output_format sink_format) 1153 { 1154 if (!drm_dp_is_branch(intel_dp->dpcd)) 1155 return false; 1156 1157 if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) 1158 return intel_dp->dfp.ycbcr_444_to_420; 1159 1160 return false; 1161 } 1162 1163 static bool 1164 dfp_can_convert(struct intel_dp *intel_dp, 1165 enum intel_output_format output_format, 1166 enum intel_output_format sink_format) 1167 { 1168 switch (output_format) { 1169 case INTEL_OUTPUT_FORMAT_RGB: 1170 return dfp_can_convert_from_rgb(intel_dp, sink_format); 1171 case INTEL_OUTPUT_FORMAT_YCBCR444: 1172 return dfp_can_convert_from_ycbcr444(intel_dp, sink_format); 1173 default: 1174 MISSING_CASE(output_format); 1175 return false; 1176 } 1177 1178 return false; 1179 } 1180 1181 static enum intel_output_format 1182 intel_dp_output_format(struct intel_connector *connector, 1183 enum intel_output_format sink_format) 1184 { 1185 struct intel_display *display = to_intel_display(connector); 1186 struct intel_dp *intel_dp = intel_attached_dp(connector); 1187 enum intel_output_format force_dsc_output_format = 1188 intel_dp->force_dsc_output_format; 1189 enum intel_output_format output_format; 1190 if (force_dsc_output_format) { 1191 if (source_can_output(intel_dp, force_dsc_output_format) && 1192 (!drm_dp_is_branch(intel_dp->dpcd) || 1193 sink_format != force_dsc_output_format || 1194 dfp_can_convert(intel_dp, force_dsc_output_format, sink_format))) 1195 return force_dsc_output_format; 1196 1197 drm_dbg_kms(display->drm, "Cannot force DSC output format\n"); 1198 } 1199 1200 if (sink_format == INTEL_OUTPUT_FORMAT_RGB || 1201 dfp_can_convert_from_rgb(intel_dp, sink_format)) 1202 output_format = INTEL_OUTPUT_FORMAT_RGB; 1203 1204 else if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444 || 1205 dfp_can_convert_from_ycbcr444(intel_dp, sink_format)) 1206 output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 1207 1208 else 1209 output_format = INTEL_OUTPUT_FORMAT_YCBCR420; 1210 1211 drm_WARN_ON(display->drm, !source_can_output(intel_dp, output_format)); 1212 1213 return output_format; 1214 } 1215 1216 int intel_dp_min_bpp(enum intel_output_format output_format) 1217 { 1218 if (output_format == INTEL_OUTPUT_FORMAT_RGB) 1219 return intel_display_min_pipe_bpp(); 1220 else 1221 return 8 * 3; 1222 } 1223 1224 int intel_dp_output_format_link_bpp_x16(enum intel_output_format output_format, int pipe_bpp) 1225 { 1226 /* 1227 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 1228 * format of the number of bytes per pixel will be half the number 1229 * of bytes of RGB pixel. 1230 */ 1231 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 1232 pipe_bpp /= 2; 1233 1234 return fxp_q4_from_int(pipe_bpp); 1235 } 1236 1237 static enum intel_output_format 1238 intel_dp_sink_format(struct intel_connector *connector, 1239 const struct drm_display_mode *mode) 1240 { 1241 const struct drm_display_info *info = &connector->base.display_info; 1242 1243 if (drm_mode_is_420_only(info, mode)) 1244 return INTEL_OUTPUT_FORMAT_YCBCR420; 1245 1246 return INTEL_OUTPUT_FORMAT_RGB; 1247 } 1248 1249 static int 1250 intel_dp_mode_min_link_bpp_x16(struct intel_connector *connector, 1251 const struct drm_display_mode *mode) 1252 { 1253 enum intel_output_format output_format, sink_format; 1254 1255 sink_format = intel_dp_sink_format(connector, mode); 1256 1257 output_format = intel_dp_output_format(connector, sink_format); 1258 1259 return intel_dp_output_format_link_bpp_x16(output_format, 1260 intel_dp_min_bpp(output_format)); 1261 } 1262 1263 static bool intel_dp_hdisplay_bad(struct intel_display *display, 1264 int hdisplay) 1265 { 1266 /* 1267 * Older platforms don't like hdisplay==4096 with DP. 1268 * 1269 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 1270 * and frame counter increment), but we don't get vblank interrupts, 1271 * and the pipe underruns immediately. The link also doesn't seem 1272 * to get trained properly. 1273 * 1274 * On CHV the vblank interrupts don't seem to disappear but 1275 * otherwise the symptoms are similar. 1276 * 1277 * TODO: confirm the behaviour on HSW+ 1278 */ 1279 return hdisplay == 4096 && !HAS_DDI(display); 1280 } 1281 1282 static int intel_dp_max_tmds_clock(struct intel_dp *intel_dp) 1283 { 1284 struct intel_connector *connector = intel_dp->attached_connector; 1285 const struct drm_display_info *info = &connector->base.display_info; 1286 int max_tmds_clock = intel_dp->dfp.max_tmds_clock; 1287 1288 /* Only consider the sink's max TMDS clock if we know this is a HDMI DFP */ 1289 if (max_tmds_clock && info->max_tmds_clock) 1290 max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock); 1291 1292 return max_tmds_clock; 1293 } 1294 1295 static enum drm_mode_status 1296 intel_dp_tmds_clock_valid(struct intel_dp *intel_dp, 1297 int clock, int bpc, 1298 enum intel_output_format sink_format, 1299 bool respect_downstream_limits) 1300 { 1301 int tmds_clock, min_tmds_clock, max_tmds_clock; 1302 1303 if (!respect_downstream_limits) 1304 return MODE_OK; 1305 1306 tmds_clock = intel_hdmi_tmds_clock(clock, bpc, sink_format); 1307 1308 min_tmds_clock = intel_dp->dfp.min_tmds_clock; 1309 max_tmds_clock = intel_dp_max_tmds_clock(intel_dp); 1310 1311 if (min_tmds_clock && tmds_clock < min_tmds_clock) 1312 return MODE_CLOCK_LOW; 1313 1314 if (max_tmds_clock && tmds_clock > max_tmds_clock) 1315 return MODE_CLOCK_HIGH; 1316 1317 return MODE_OK; 1318 } 1319 1320 static enum drm_mode_status 1321 intel_dp_mode_valid_downstream(struct intel_connector *connector, 1322 const struct drm_display_mode *mode, 1323 int target_clock) 1324 { 1325 struct intel_dp *intel_dp = intel_attached_dp(connector); 1326 const struct drm_display_info *info = &connector->base.display_info; 1327 enum drm_mode_status status; 1328 enum intel_output_format sink_format; 1329 1330 /* If PCON supports FRL MODE, check FRL bandwidth constraints */ 1331 if (intel_dp->dfp.pcon_max_frl_bw) { 1332 int link_bpp_x16 = intel_dp_mode_min_link_bpp_x16(connector, mode); 1333 int target_bw; 1334 int max_frl_bw; 1335 1336 target_bw = fxp_q4_to_int_roundup(link_bpp_x16) * target_clock; 1337 1338 max_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 1339 1340 /* converting bw from Gbps to Kbps*/ 1341 max_frl_bw = max_frl_bw * 1000000; 1342 1343 if (target_bw > max_frl_bw) 1344 return MODE_CLOCK_HIGH; 1345 1346 return MODE_OK; 1347 } 1348 1349 if (intel_dp->dfp.max_dotclock && 1350 target_clock > intel_dp->dfp.max_dotclock) 1351 return MODE_CLOCK_HIGH; 1352 1353 sink_format = intel_dp_sink_format(connector, mode); 1354 1355 /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ 1356 status = intel_dp_tmds_clock_valid(intel_dp, target_clock, 1357 8, sink_format, true); 1358 1359 if (status != MODE_OK) { 1360 if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 1361 !connector->base.ycbcr_420_allowed || 1362 !drm_mode_is_420_also(info, mode)) 1363 return status; 1364 sink_format = INTEL_OUTPUT_FORMAT_YCBCR420; 1365 status = intel_dp_tmds_clock_valid(intel_dp, target_clock, 1366 8, sink_format, true); 1367 if (status != MODE_OK) 1368 return status; 1369 } 1370 1371 return MODE_OK; 1372 } 1373 1374 int intel_dp_max_hdisplay_per_pipe(struct intel_display *display) 1375 { 1376 return DISPLAY_VER(display) >= 30 ? 6144 : 5120; 1377 } 1378 1379 bool intel_dp_has_dsc(const struct intel_connector *connector) 1380 { 1381 struct intel_display *display = to_intel_display(connector); 1382 1383 if (!HAS_DSC(display)) 1384 return false; 1385 1386 if (connector->mst.dp && !HAS_DSC_MST(display)) 1387 return false; 1388 1389 if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP && 1390 connector->panel.vbt.edp.dsc_disable) 1391 return false; 1392 1393 if (!drm_dp_sink_supports_dsc(connector->dp.dsc_dpcd)) 1394 return false; 1395 1396 return true; 1397 } 1398 1399 static 1400 bool intel_dp_can_join(struct intel_dp *intel_dp, 1401 int num_joined_pipes) 1402 { 1403 struct intel_display *display = to_intel_display(intel_dp); 1404 1405 if (num_joined_pipes > 1 && !intel_dp_has_joiner(intel_dp)) 1406 return false; 1407 1408 switch (num_joined_pipes) { 1409 case 1: 1410 return true; 1411 case 2: 1412 return HAS_BIGJOINER(display) || 1413 HAS_UNCOMPRESSED_JOINER(display); 1414 case 4: 1415 return HAS_ULTRAJOINER(display); 1416 default: 1417 return false; 1418 } 1419 } 1420 1421 bool intel_dp_dotclk_valid(struct intel_display *display, 1422 int target_clock, 1423 int htotal, 1424 int dsc_slice_count, 1425 int num_joined_pipes) 1426 { 1427 int max_dotclk = display->cdclk.max_dotclk_freq; 1428 int effective_dotclk_limit; 1429 1430 effective_dotclk_limit = max_dotclk * num_joined_pipes; 1431 1432 if (dsc_slice_count) 1433 target_clock = intel_dsc_get_pixel_rate_with_dsc_bubbles(display, 1434 target_clock, 1435 htotal, 1436 dsc_slice_count); 1437 else 1438 effective_dotclk_limit = 1439 intel_max_uncompressed_dotclock(display) * num_joined_pipes; 1440 1441 return target_clock <= effective_dotclk_limit; 1442 } 1443 1444 static enum drm_mode_status 1445 intel_dp_mode_valid(struct drm_connector *_connector, 1446 const struct drm_display_mode *mode) 1447 { 1448 struct intel_display *display = to_intel_display(_connector->dev); 1449 struct intel_connector *connector = to_intel_connector(_connector); 1450 struct intel_dp *intel_dp = intel_attached_dp(connector); 1451 enum intel_output_format sink_format, output_format; 1452 const struct drm_display_mode *fixed_mode; 1453 int target_clock = mode->clock; 1454 int max_rate, mode_rate, max_lanes, max_link_clock; 1455 u16 dsc_max_compressed_bpp = 0; 1456 enum drm_mode_status status; 1457 bool dsc = false; 1458 int num_joined_pipes; 1459 int link_bpp_x16; 1460 1461 status = intel_cpu_transcoder_mode_valid(display, mode); 1462 if (status != MODE_OK) 1463 return status; 1464 1465 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 1466 return MODE_H_ILLEGAL; 1467 1468 if (mode->clock < 10000) 1469 return MODE_CLOCK_LOW; 1470 1471 if (intel_dp_hdisplay_bad(display, mode->hdisplay)) 1472 return MODE_H_ILLEGAL; 1473 1474 fixed_mode = intel_panel_fixed_mode(connector, mode); 1475 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 1476 status = intel_panel_mode_valid(connector, mode); 1477 if (status != MODE_OK) 1478 return status; 1479 1480 target_clock = fixed_mode->clock; 1481 } 1482 1483 sink_format = intel_dp_sink_format(connector, mode); 1484 output_format = intel_dp_output_format(connector, sink_format); 1485 1486 max_link_clock = intel_dp_max_link_rate(intel_dp); 1487 max_lanes = intel_dp_max_lane_count(intel_dp); 1488 1489 max_rate = intel_dp_max_link_data_rate(intel_dp, max_link_clock, max_lanes); 1490 1491 link_bpp_x16 = intel_dp_mode_min_link_bpp_x16(connector, mode); 1492 mode_rate = intel_dp_link_required(max_link_clock, max_lanes, 1493 target_clock, mode->hdisplay, 1494 link_bpp_x16, 0); 1495 1496 /* 1497 * We cannot determine the required pipe‑join count before knowing whether 1498 * DSC is needed, nor can we determine DSC need without knowing the pipe 1499 * count. 1500 * Because of this dependency cycle, the only correct approach is to iterate 1501 * over candidate pipe counts and evaluate each combination. 1502 */ 1503 status = MODE_CLOCK_HIGH; 1504 for_each_joiner_candidate(connector, mode, num_joined_pipes) { 1505 int dsc_slice_count = 0; 1506 1507 status = intel_pfit_mode_valid(display, mode, output_format, num_joined_pipes); 1508 if (status != MODE_OK) 1509 continue; 1510 1511 if (intel_dp_has_dsc(connector)) { 1512 int pipe_bpp; 1513 1514 dsc_slice_count = intel_dp_dsc_get_slice_count(connector, 1515 target_clock, 1516 mode->hdisplay, 1517 num_joined_pipes); 1518 1519 /* 1520 * TBD pass the connector BPC, 1521 * for now U8_MAX so that max BPC on that platform would be picked 1522 */ 1523 pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX); 1524 1525 /* 1526 * Output bpp is stored in 6.4 format so right shift by 4 to get the 1527 * integer value since we support only integer values of bpp. 1528 */ 1529 if (intel_dp_is_edp(intel_dp)) { 1530 dsc_max_compressed_bpp = 1531 drm_edp_dsc_sink_output_bpp(connector->dp.dsc_dpcd) >> 4; 1532 1533 dsc = dsc_max_compressed_bpp && dsc_slice_count; 1534 } else if (drm_dp_sink_supports_fec(connector->dp.fec_capability)) { 1535 unsigned long bw_overhead_flags = 0; 1536 1537 if (!drm_dp_is_uhbr_rate(max_link_clock)) 1538 bw_overhead_flags |= DRM_DP_BW_OVERHEAD_FEC; 1539 1540 dsc = intel_dp_mode_valid_with_dsc(connector, 1541 max_link_clock, max_lanes, 1542 target_clock, mode->hdisplay, 1543 num_joined_pipes, 1544 output_format, pipe_bpp, 1545 bw_overhead_flags); 1546 } 1547 } 1548 1549 if (intel_dp_joiner_needs_dsc(display, num_joined_pipes) && !dsc) { 1550 status = MODE_CLOCK_HIGH; 1551 continue; 1552 } 1553 1554 if (mode_rate > max_rate && !dsc) { 1555 status = MODE_CLOCK_HIGH; 1556 continue; 1557 } 1558 1559 status = intel_mode_valid_max_plane_size(display, mode, num_joined_pipes); 1560 if (status != MODE_OK) 1561 continue; 1562 1563 if (!dsc) 1564 dsc_slice_count = 0; 1565 1566 if (!intel_dp_dotclk_valid(display, 1567 target_clock, 1568 mode->htotal, 1569 dsc_slice_count, 1570 num_joined_pipes)) { 1571 status = MODE_CLOCK_HIGH; 1572 continue; 1573 } 1574 1575 break; 1576 } 1577 1578 if (status != MODE_OK) 1579 return status; 1580 1581 return intel_dp_mode_valid_downstream(connector, mode, target_clock); 1582 } 1583 1584 bool intel_dp_source_supports_tps3(struct intel_display *display) 1585 { 1586 return DISPLAY_VER(display) >= 9 || 1587 display->platform.broadwell || display->platform.haswell; 1588 } 1589 1590 bool intel_dp_source_supports_tps4(struct intel_display *display) 1591 { 1592 return DISPLAY_VER(display) >= 10; 1593 } 1594 1595 static void seq_buf_print_array(struct seq_buf *s, const int *array, int nelem) 1596 { 1597 int i; 1598 1599 for (i = 0; i < nelem; i++) 1600 seq_buf_printf(s, "%s%d", i ? ", " : "", array[i]); 1601 } 1602 1603 static void intel_dp_print_rates(struct intel_dp *intel_dp) 1604 { 1605 struct intel_display *display = to_intel_display(intel_dp); 1606 DECLARE_SEQ_BUF(s, 128); /* FIXME: too big for stack? */ 1607 1608 if (!drm_debug_enabled(DRM_UT_KMS)) 1609 return; 1610 1611 seq_buf_print_array(&s, intel_dp->source_rates, intel_dp->num_source_rates); 1612 drm_dbg_kms(display->drm, "source rates: %s\n", seq_buf_str(&s)); 1613 1614 seq_buf_clear(&s); 1615 seq_buf_print_array(&s, intel_dp->sink_rates, intel_dp->num_sink_rates); 1616 drm_dbg_kms(display->drm, "sink rates: %s\n", seq_buf_str(&s)); 1617 1618 seq_buf_clear(&s); 1619 seq_buf_print_array(&s, intel_dp->common_rates, intel_dp->num_common_rates); 1620 drm_dbg_kms(display->drm, "common rates: %s\n", seq_buf_str(&s)); 1621 } 1622 1623 static int forced_link_rate(struct intel_dp *intel_dp) 1624 { 1625 int len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->link.force_rate); 1626 1627 if (len == 0) 1628 return intel_dp_common_rate(intel_dp, 0); 1629 1630 return intel_dp_common_rate(intel_dp, len - 1); 1631 } 1632 1633 int 1634 intel_dp_max_link_rate(struct intel_dp *intel_dp) 1635 { 1636 int len; 1637 1638 if (intel_dp->link.force_rate) 1639 return forced_link_rate(intel_dp); 1640 1641 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->link.max_rate); 1642 1643 return intel_dp_common_rate(intel_dp, len - 1); 1644 } 1645 1646 static int 1647 intel_dp_min_link_rate(struct intel_dp *intel_dp) 1648 { 1649 if (intel_dp->link.force_rate) 1650 return forced_link_rate(intel_dp); 1651 1652 return intel_dp_common_rate(intel_dp, 0); 1653 } 1654 1655 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 1656 { 1657 struct intel_display *display = to_intel_display(intel_dp); 1658 int i = intel_dp_rate_index(intel_dp->sink_rates, 1659 intel_dp->num_sink_rates, rate); 1660 1661 if (drm_WARN_ON(display->drm, i < 0)) 1662 i = 0; 1663 1664 return i; 1665 } 1666 1667 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1668 u8 *link_bw, u8 *rate_select) 1669 { 1670 struct intel_display *display = to_intel_display(intel_dp); 1671 1672 /* FIXME g4x can't generate an exact 2.7GHz with the 96MHz non-SSC refclk */ 1673 if (display->platform.g4x && port_clock == 268800) 1674 port_clock = 270000; 1675 1676 /* eDP 1.4 rate select method. */ 1677 if (intel_dp->use_rate_select) { 1678 *link_bw = 0; 1679 *rate_select = 1680 intel_dp_rate_select(intel_dp, port_clock); 1681 } else { 1682 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 1683 *rate_select = 0; 1684 } 1685 } 1686 1687 bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp) 1688 { 1689 struct intel_connector *connector = intel_dp->attached_connector; 1690 1691 return connector->base.display_info.is_hdmi; 1692 } 1693 1694 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 1695 const struct intel_crtc_state *pipe_config) 1696 { 1697 struct intel_display *display = to_intel_display(intel_dp); 1698 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1699 1700 if (DISPLAY_VER(display) >= 12) 1701 return true; 1702 1703 if (DISPLAY_VER(display) == 11 && encoder->port != PORT_A && 1704 !intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) 1705 return true; 1706 1707 return false; 1708 } 1709 1710 bool intel_dp_supports_fec(struct intel_dp *intel_dp, 1711 const struct intel_connector *connector, 1712 const struct intel_crtc_state *pipe_config) 1713 { 1714 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 1715 drm_dp_sink_supports_fec(connector->dp.fec_capability); 1716 } 1717 1718 bool intel_dp_supports_dsc(struct intel_dp *intel_dp, 1719 const struct intel_connector *connector, 1720 const struct intel_crtc_state *crtc_state) 1721 { 1722 if (!intel_dp_has_dsc(connector)) 1723 return false; 1724 1725 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && 1726 !intel_dp_supports_fec(intel_dp, connector, crtc_state)) 1727 return false; 1728 1729 return intel_dsc_source_support(crtc_state); 1730 } 1731 1732 static int intel_dp_hdmi_compute_bpc(struct intel_dp *intel_dp, 1733 const struct intel_crtc_state *crtc_state, 1734 int bpc, bool respect_downstream_limits) 1735 { 1736 int clock = crtc_state->hw.adjusted_mode.crtc_clock; 1737 1738 /* 1739 * Current bpc could already be below 8bpc due to 1740 * FDI bandwidth constraints or other limits. 1741 * HDMI minimum is 8bpc however. 1742 */ 1743 bpc = max(bpc, 8); 1744 1745 /* 1746 * We will never exceed downstream TMDS clock limits while 1747 * attempting deep color. If the user insists on forcing an 1748 * out of spec mode they will have to be satisfied with 8bpc. 1749 */ 1750 if (!respect_downstream_limits) 1751 bpc = 8; 1752 1753 for (; bpc >= 8; bpc -= 2) { 1754 if (intel_hdmi_bpc_possible(crtc_state, bpc, 1755 intel_dp_has_hdmi_sink(intel_dp)) && 1756 intel_dp_tmds_clock_valid(intel_dp, clock, bpc, crtc_state->sink_format, 1757 respect_downstream_limits) == MODE_OK) 1758 return bpc; 1759 } 1760 1761 return -EINVAL; 1762 } 1763 1764 static int intel_dp_max_bpp(struct intel_dp *intel_dp, 1765 const struct intel_crtc_state *crtc_state, 1766 bool respect_downstream_limits) 1767 { 1768 struct intel_display *display = to_intel_display(intel_dp); 1769 struct intel_connector *connector = intel_dp->attached_connector; 1770 int bpp, bpc; 1771 1772 bpc = crtc_state->max_pipe_bpp / 3; 1773 1774 if (intel_dp->dfp.max_bpc) 1775 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc); 1776 1777 if (intel_dp->dfp.min_tmds_clock) { 1778 int max_hdmi_bpc; 1779 1780 max_hdmi_bpc = intel_dp_hdmi_compute_bpc(intel_dp, crtc_state, bpc, 1781 respect_downstream_limits); 1782 if (max_hdmi_bpc < 0) 1783 return 0; 1784 1785 bpc = min(bpc, max_hdmi_bpc); 1786 } 1787 1788 bpp = bpc * 3; 1789 if (intel_dp_is_edp(intel_dp)) { 1790 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1791 if (connector->base.display_info.bpc == 0 && 1792 connector->panel.vbt.edp.bpp && 1793 connector->panel.vbt.edp.bpp < bpp) { 1794 drm_dbg_kms(display->drm, 1795 "clamping bpp for eDP panel to BIOS-provided %i\n", 1796 connector->panel.vbt.edp.bpp); 1797 bpp = connector->panel.vbt.edp.bpp; 1798 } 1799 } 1800 1801 return bpp; 1802 } 1803 1804 static bool has_seamless_m_n(struct intel_connector *connector) 1805 { 1806 struct intel_display *display = to_intel_display(connector); 1807 1808 /* 1809 * Seamless M/N reprogramming only implemented 1810 * for BDW+ double buffered M/N registers so far. 1811 */ 1812 return HAS_DOUBLE_BUFFERED_M_N(display) && 1813 intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS; 1814 } 1815 1816 static int intel_dp_mode_clock(const struct intel_crtc_state *crtc_state, 1817 const struct drm_connector_state *conn_state) 1818 { 1819 struct intel_connector *connector = to_intel_connector(conn_state->connector); 1820 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 1821 1822 /* FIXME a bit of a mess wrt clock vs. crtc_clock */ 1823 if (has_seamless_m_n(connector)) 1824 return intel_panel_highest_mode(connector, adjusted_mode)->clock; 1825 else 1826 return adjusted_mode->crtc_clock; 1827 } 1828 1829 /* Optimize link config in order: max bpp, min clock, min lanes */ 1830 static int 1831 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 1832 struct intel_crtc_state *pipe_config, 1833 const struct drm_connector_state *conn_state, 1834 const struct link_config_limits *limits) 1835 { 1836 int bpp, i, lane_count, clock = intel_dp_mode_clock(pipe_config, conn_state); 1837 int link_rate, link_avail; 1838 1839 for (bpp = fxp_q4_to_int(limits->link.max_bpp_x16); 1840 bpp >= fxp_q4_to_int(limits->link.min_bpp_x16); 1841 bpp -= 2 * 3) { 1842 int link_bpp_x16 = 1843 intel_dp_output_format_link_bpp_x16(pipe_config->output_format, bpp); 1844 1845 for (i = 0; i < intel_dp->num_common_rates; i++) { 1846 link_rate = intel_dp_common_rate(intel_dp, i); 1847 if (link_rate < limits->min_rate || 1848 link_rate > limits->max_rate) 1849 continue; 1850 1851 for (lane_count = limits->min_lane_count; 1852 lane_count <= limits->max_lane_count; 1853 lane_count <<= 1) { 1854 const struct drm_display_mode *adjusted_mode = 1855 &pipe_config->hw.adjusted_mode; 1856 int mode_rate = 1857 intel_dp_link_required(link_rate, lane_count, 1858 clock, adjusted_mode->hdisplay, 1859 link_bpp_x16, 0); 1860 1861 link_avail = intel_dp_max_link_data_rate(intel_dp, 1862 link_rate, 1863 lane_count); 1864 1865 if (mode_rate <= link_avail) { 1866 pipe_config->lane_count = lane_count; 1867 pipe_config->pipe_bpp = bpp; 1868 pipe_config->port_clock = link_rate; 1869 1870 return 0; 1871 } 1872 } 1873 } 1874 } 1875 1876 return -EINVAL; 1877 } 1878 1879 int intel_dp_dsc_max_src_input_bpc(struct intel_display *display) 1880 { 1881 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 1882 if (DISPLAY_VER(display) >= 12) 1883 return 12; 1884 if (DISPLAY_VER(display) == 11) 1885 return 10; 1886 1887 return intel_dp_dsc_min_src_input_bpc(); 1888 } 1889 1890 static int align_min_sink_dsc_input_bpp(const struct intel_connector *connector, 1891 int min_pipe_bpp) 1892 { 1893 u8 dsc_bpc[3]; 1894 int num_bpc; 1895 int i; 1896 1897 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd, 1898 dsc_bpc); 1899 for (i = num_bpc - 1; i >= 0; i--) { 1900 if (dsc_bpc[i] * 3 >= min_pipe_bpp) 1901 return dsc_bpc[i] * 3; 1902 } 1903 1904 return 0; 1905 } 1906 1907 static int align_max_sink_dsc_input_bpp(const struct intel_connector *connector, 1908 int max_pipe_bpp) 1909 { 1910 u8 dsc_bpc[3]; 1911 int num_bpc; 1912 int i; 1913 1914 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd, 1915 dsc_bpc); 1916 for (i = 0; i < num_bpc; i++) { 1917 if (dsc_bpc[i] * 3 <= max_pipe_bpp) 1918 return dsc_bpc[i] * 3; 1919 } 1920 1921 return 0; 1922 } 1923 1924 int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector, 1925 u8 max_req_bpc) 1926 { 1927 struct intel_display *display = to_intel_display(connector); 1928 int dsc_max_bpc; 1929 1930 dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(display); 1931 1932 if (!dsc_max_bpc) 1933 return dsc_max_bpc; 1934 1935 dsc_max_bpc = min(dsc_max_bpc, max_req_bpc); 1936 1937 return align_max_sink_dsc_input_bpp(connector, dsc_max_bpc * 3); 1938 } 1939 1940 static int intel_dp_source_dsc_version_minor(struct intel_display *display) 1941 { 1942 return DISPLAY_VER(display) >= 14 ? 2 : 1; 1943 } 1944 1945 static int intel_dp_sink_dsc_version_minor(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) 1946 { 1947 return (dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & DP_DSC_MINOR_MASK) >> 1948 DP_DSC_MINOR_SHIFT; 1949 } 1950 1951 static int intel_dp_get_slice_height(int vactive) 1952 { 1953 int slice_height; 1954 1955 /* 1956 * VDSC 1.2a spec in Section 3.8 Options for Slices implies that 108 1957 * lines is an optimal slice height, but any size can be used as long as 1958 * vertical active integer multiple and maximum vertical slice count 1959 * requirements are met. 1960 */ 1961 for (slice_height = 108; slice_height <= vactive; slice_height += 2) 1962 if (vactive % slice_height == 0) 1963 return slice_height; 1964 1965 /* 1966 * Highly unlikely we reach here as most of the resolutions will end up 1967 * finding appropriate slice_height in above loop but returning 1968 * slice_height as 2 here as it should work with all resolutions. 1969 */ 1970 return 2; 1971 } 1972 1973 static int intel_dp_dsc_compute_params(const struct intel_connector *connector, 1974 struct intel_crtc_state *crtc_state) 1975 { 1976 struct intel_display *display = to_intel_display(connector); 1977 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 1978 int ret; 1979 1980 /* 1981 * RC_MODEL_SIZE is currently a constant across all configurations. 1982 * 1983 * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and 1984 * DP_DSC_RC_BUF_SIZE for this. 1985 */ 1986 vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; 1987 vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay; 1988 1989 vdsc_cfg->slice_height = intel_dp_get_slice_height(vdsc_cfg->pic_height); 1990 1991 ret = intel_dsc_compute_params(crtc_state); 1992 if (ret) 1993 return ret; 1994 1995 vdsc_cfg->dsc_version_major = 1996 (connector->dp.dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 1997 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 1998 vdsc_cfg->dsc_version_minor = 1999 min(intel_dp_source_dsc_version_minor(display), 2000 intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd)); 2001 if (vdsc_cfg->convert_rgb) 2002 vdsc_cfg->convert_rgb = 2003 connector->dp.dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 2004 DP_DSC_RGB; 2005 2006 vdsc_cfg->line_buf_depth = min(INTEL_DP_DSC_MAX_LINE_BUF_DEPTH, 2007 drm_dp_dsc_sink_line_buf_depth(connector->dp.dsc_dpcd)); 2008 if (!vdsc_cfg->line_buf_depth) { 2009 drm_dbg_kms(display->drm, 2010 "DSC Sink Line Buffer Depth invalid\n"); 2011 return -EINVAL; 2012 } 2013 2014 vdsc_cfg->block_pred_enable = 2015 connector->dp.dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 2016 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 2017 2018 return drm_dsc_compute_rc_parameters(vdsc_cfg); 2019 } 2020 2021 static bool intel_dp_dsc_supports_format(const struct intel_connector *connector, 2022 enum intel_output_format output_format) 2023 { 2024 struct intel_display *display = to_intel_display(connector); 2025 u8 sink_dsc_format; 2026 2027 switch (output_format) { 2028 case INTEL_OUTPUT_FORMAT_RGB: 2029 sink_dsc_format = DP_DSC_RGB; 2030 break; 2031 case INTEL_OUTPUT_FORMAT_YCBCR444: 2032 sink_dsc_format = DP_DSC_YCbCr444; 2033 break; 2034 case INTEL_OUTPUT_FORMAT_YCBCR420: 2035 if (min(intel_dp_source_dsc_version_minor(display), 2036 intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd)) < 2) 2037 return false; 2038 sink_dsc_format = DP_DSC_YCbCr420_Native; 2039 break; 2040 default: 2041 return false; 2042 } 2043 2044 return drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd, sink_dsc_format); 2045 } 2046 2047 static bool is_bw_sufficient_for_dsc_config(struct intel_dp *intel_dp, 2048 int link_clock, int lane_count, 2049 int mode_clock, int mode_hdisplay, 2050 int dsc_slice_count, int link_bpp_x16, 2051 unsigned long bw_overhead_flags) 2052 { 2053 int available_bw; 2054 int required_bw; 2055 2056 available_bw = intel_dp_max_link_data_rate(intel_dp, link_clock, lane_count); 2057 required_bw = intel_dp_link_required(link_clock, lane_count, 2058 mode_clock, mode_hdisplay, 2059 link_bpp_x16, bw_overhead_flags); 2060 2061 return available_bw >= required_bw; 2062 } 2063 2064 static int dsc_compute_link_config(struct intel_dp *intel_dp, 2065 struct intel_crtc_state *pipe_config, 2066 struct drm_connector_state *conn_state, 2067 const struct link_config_limits *limits, 2068 int dsc_bpp_x16) 2069 { 2070 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2071 int link_rate, lane_count; 2072 int i; 2073 2074 for (i = 0; i < intel_dp->num_common_rates; i++) { 2075 link_rate = intel_dp_common_rate(intel_dp, i); 2076 if (link_rate < limits->min_rate || link_rate > limits->max_rate) 2077 continue; 2078 2079 for (lane_count = limits->min_lane_count; 2080 lane_count <= limits->max_lane_count; 2081 lane_count <<= 1) { 2082 2083 /* 2084 * FIXME: intel_dp_mtp_tu_compute_config() requires 2085 * ->lane_count and ->port_clock set before we know 2086 * they'll work. If we end up failing altogether, 2087 * they'll remain in crtc state. This shouldn't matter, 2088 * as we'd then bail out from compute config, but it's 2089 * just ugly. 2090 */ 2091 pipe_config->lane_count = lane_count; 2092 pipe_config->port_clock = link_rate; 2093 2094 if (drm_dp_is_uhbr_rate(link_rate)) { 2095 int ret; 2096 2097 ret = intel_dp_mtp_tu_compute_config(intel_dp, 2098 pipe_config, 2099 conn_state, 2100 dsc_bpp_x16, 2101 dsc_bpp_x16, 2102 0, true); 2103 if (ret) 2104 continue; 2105 } else { 2106 unsigned long bw_overhead_flags = 2107 pipe_config->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0; 2108 int line_slice_count = 2109 intel_dsc_line_slice_count(&pipe_config->dsc.slice_config); 2110 2111 if (!is_bw_sufficient_for_dsc_config(intel_dp, 2112 link_rate, lane_count, 2113 adjusted_mode->crtc_clock, 2114 adjusted_mode->hdisplay, 2115 line_slice_count, 2116 dsc_bpp_x16, 2117 bw_overhead_flags)) 2118 continue; 2119 } 2120 2121 return 0; 2122 } 2123 } 2124 2125 return -EINVAL; 2126 } 2127 2128 static 2129 u16 intel_dp_dsc_max_sink_compressed_bppx16(const struct intel_connector *connector, 2130 enum intel_output_format output_format, 2131 int bpc) 2132 { 2133 u16 max_bppx16 = drm_edp_dsc_sink_output_bpp(connector->dp.dsc_dpcd); 2134 2135 if (max_bppx16) 2136 return max_bppx16; 2137 /* 2138 * If support not given in DPCD 67h, 68h use the Maximum Allowed bit rate 2139 * values as given in spec Table 2-157 DP v2.0 2140 */ 2141 switch (output_format) { 2142 case INTEL_OUTPUT_FORMAT_RGB: 2143 case INTEL_OUTPUT_FORMAT_YCBCR444: 2144 return (3 * bpc) << 4; 2145 case INTEL_OUTPUT_FORMAT_YCBCR420: 2146 return (3 * (bpc / 2)) << 4; 2147 default: 2148 MISSING_CASE(output_format); 2149 break; 2150 } 2151 2152 return 0; 2153 } 2154 2155 static int intel_dp_dsc_sink_min_compressed_bpp(enum intel_output_format output_format) 2156 { 2157 /* From Mandatory bit rate range Support Table 2-157 (DP v2.0) */ 2158 switch (output_format) { 2159 case INTEL_OUTPUT_FORMAT_RGB: 2160 case INTEL_OUTPUT_FORMAT_YCBCR444: 2161 return 8; 2162 case INTEL_OUTPUT_FORMAT_YCBCR420: 2163 return 6; 2164 default: 2165 MISSING_CASE(output_format); 2166 break; 2167 } 2168 2169 return 0; 2170 } 2171 2172 static int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector, 2173 enum intel_output_format output_format, 2174 int bpc) 2175 { 2176 return intel_dp_dsc_max_sink_compressed_bppx16(connector, 2177 output_format, bpc) >> 4; 2178 } 2179 2180 int intel_dp_dsc_min_src_compressed_bpp(void) 2181 { 2182 /* Min Compressed bpp supported by source is 8 */ 2183 return 8; 2184 } 2185 2186 static int dsc_src_max_compressed_bpp(struct intel_dp *intel_dp) 2187 { 2188 struct intel_display *display = to_intel_display(intel_dp); 2189 2190 /* 2191 * Forcing DSC and using the platform's max compressed bpp is seen to cause 2192 * underruns. Since DSC isn't needed in these cases, limit the 2193 * max compressed bpp to 18, which is a safe value across platforms with different 2194 * pipe bpps. 2195 */ 2196 if (intel_dp->force_dsc_en) 2197 return 18; 2198 2199 /* 2200 * Max Compressed bpp for Gen 13+ is 27bpp. 2201 * For earlier platform is 23bpp. (Bspec:49259). 2202 */ 2203 if (DISPLAY_VER(display) < 13) 2204 return 23; 2205 else 2206 return 27; 2207 } 2208 2209 /* 2210 * Note: for pre-13 display you still need to check the validity of each step. 2211 */ 2212 int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector) 2213 { 2214 struct intel_display *display = to_intel_display(connector); 2215 u8 incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd); 2216 2217 if (DISPLAY_VER(display) < 14 || !incr) 2218 return fxp_q4_from_int(1); 2219 2220 if (connector->mst.dp && 2221 !connector->link.force_bpp_x16 && !connector->mst.dp->force_dsc_fractional_bpp_en) 2222 return fxp_q4_from_int(1); 2223 2224 /* fxp q4 */ 2225 return fxp_q4_from_int(1) / incr; 2226 } 2227 2228 /* 2229 * Note: for bpp_x16 to be valid it must be also within the source/sink's 2230 * min..max bpp capability range. 2231 */ 2232 bool intel_dp_dsc_valid_compressed_bpp(struct intel_dp *intel_dp, int bpp_x16) 2233 { 2234 struct intel_display *display = to_intel_display(intel_dp); 2235 2236 if (DISPLAY_VER(display) >= 13) { 2237 if (intel_dp->force_dsc_fractional_bpp_en && !fxp_q4_to_frac(bpp_x16)) 2238 return false; 2239 2240 return true; 2241 } 2242 2243 if (fxp_q4_to_frac(bpp_x16)) 2244 return false; 2245 2246 return align_max_vesa_compressed_bpp_x16(bpp_x16) == bpp_x16; 2247 } 2248 2249 static int align_min_compressed_bpp_x16(const struct intel_connector *connector, int min_bpp_x16) 2250 { 2251 struct intel_display *display = to_intel_display(connector); 2252 2253 if (DISPLAY_VER(display) >= 13) { 2254 int bpp_step_x16 = intel_dp_dsc_bpp_step_x16(connector); 2255 2256 drm_WARN_ON(display->drm, !is_power_of_2(bpp_step_x16)); 2257 2258 return round_up(min_bpp_x16, bpp_step_x16); 2259 } else { 2260 return align_min_vesa_compressed_bpp_x16(min_bpp_x16); 2261 } 2262 } 2263 2264 static int align_max_compressed_bpp_x16(const struct intel_connector *connector, 2265 enum intel_output_format output_format, 2266 int pipe_bpp, int max_bpp_x16) 2267 { 2268 struct intel_display *display = to_intel_display(connector); 2269 int link_bpp_x16 = intel_dp_output_format_link_bpp_x16(output_format, pipe_bpp); 2270 int bpp_step_x16 = intel_dp_dsc_bpp_step_x16(connector); 2271 2272 max_bpp_x16 = min(max_bpp_x16, link_bpp_x16 - bpp_step_x16); 2273 2274 if (DISPLAY_VER(display) >= 13) { 2275 drm_WARN_ON(display->drm, !is_power_of_2(bpp_step_x16)); 2276 2277 return round_down(max_bpp_x16, bpp_step_x16); 2278 } else { 2279 return align_max_vesa_compressed_bpp_x16(max_bpp_x16); 2280 } 2281 } 2282 2283 /* 2284 * Find the max compressed BPP we can find a link configuration for. The BPPs to 2285 * try depend on the source (platform) and sink. 2286 */ 2287 static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp, 2288 struct intel_crtc_state *pipe_config, 2289 struct drm_connector_state *conn_state, 2290 const struct link_config_limits *limits, 2291 int pipe_bpp) 2292 { 2293 struct intel_display *display = to_intel_display(intel_dp); 2294 const struct intel_connector *connector = to_intel_connector(conn_state->connector); 2295 int min_bpp_x16, max_bpp_x16, bpp_step_x16; 2296 int bpp_x16; 2297 int ret; 2298 2299 min_bpp_x16 = limits->link.min_bpp_x16; 2300 max_bpp_x16 = limits->link.max_bpp_x16; 2301 bpp_step_x16 = intel_dp_dsc_bpp_step_x16(connector); 2302 2303 max_bpp_x16 = align_max_compressed_bpp_x16(connector, pipe_config->output_format, 2304 pipe_bpp, max_bpp_x16); 2305 if (intel_dp_is_edp(intel_dp)) { 2306 pipe_config->port_clock = limits->max_rate; 2307 pipe_config->lane_count = limits->max_lane_count; 2308 2309 pipe_config->dsc.compressed_bpp_x16 = max_bpp_x16; 2310 2311 return 0; 2312 } 2313 2314 for (bpp_x16 = max_bpp_x16; bpp_x16 >= min_bpp_x16; bpp_x16 -= bpp_step_x16) { 2315 if (!intel_dp_dsc_valid_compressed_bpp(intel_dp, bpp_x16)) 2316 continue; 2317 2318 ret = dsc_compute_link_config(intel_dp, 2319 pipe_config, 2320 conn_state, 2321 limits, 2322 bpp_x16); 2323 if (ret == 0) { 2324 pipe_config->dsc.compressed_bpp_x16 = bpp_x16; 2325 if (intel_dp->force_dsc_fractional_bpp_en && 2326 fxp_q4_to_frac(bpp_x16)) 2327 drm_dbg_kms(display->drm, 2328 "Forcing DSC fractional bpp\n"); 2329 2330 return 0; 2331 } 2332 } 2333 2334 return -EINVAL; 2335 } 2336 2337 int intel_dp_dsc_min_src_input_bpc(void) 2338 { 2339 /* Min DSC Input BPC for ICL+ is 8 */ 2340 return 8; 2341 } 2342 2343 static 2344 bool is_dsc_pipe_bpp_sufficient(const struct link_config_limits *limits, 2345 int pipe_bpp) 2346 { 2347 return pipe_bpp >= limits->pipe.min_bpp && 2348 pipe_bpp <= limits->pipe.max_bpp; 2349 } 2350 2351 static 2352 int intel_dp_force_dsc_pipe_bpp(struct intel_dp *intel_dp, 2353 const struct link_config_limits *limits) 2354 { 2355 struct intel_display *display = to_intel_display(intel_dp); 2356 int forced_bpp; 2357 2358 if (!intel_dp->force_dsc_bpc) 2359 return 0; 2360 2361 forced_bpp = intel_dp->force_dsc_bpc * 3; 2362 2363 if (is_dsc_pipe_bpp_sufficient(limits, forced_bpp)) { 2364 drm_dbg_kms(display->drm, "Input DSC BPC forced to %d\n", 2365 intel_dp->force_dsc_bpc); 2366 return forced_bpp; 2367 } 2368 2369 drm_dbg_kms(display->drm, 2370 "Cannot force DSC BPC:%d, due to DSC BPC limits\n", 2371 intel_dp->force_dsc_bpc); 2372 2373 return 0; 2374 } 2375 2376 static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp, 2377 struct intel_crtc_state *pipe_config, 2378 struct drm_connector_state *conn_state, 2379 const struct link_config_limits *limits) 2380 { 2381 int forced_bpp, pipe_bpp; 2382 int ret; 2383 2384 forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, limits); 2385 if (forced_bpp) 2386 pipe_bpp = forced_bpp; 2387 else 2388 pipe_bpp = limits->pipe.max_bpp; 2389 2390 ret = dsc_compute_compressed_bpp(intel_dp, pipe_config, conn_state, 2391 limits, pipe_bpp); 2392 if (ret) 2393 return -EINVAL; 2394 2395 pipe_config->pipe_bpp = pipe_bpp; 2396 2397 return 0; 2398 } 2399 2400 /* 2401 * Return whether FEC must be enabled for 8b10b SST or MST links. On 128b132b 2402 * links FEC is always enabled implicitly by the HW, so this function returns 2403 * false for that case. 2404 */ 2405 bool intel_dp_needs_8b10b_fec(const struct intel_crtc_state *crtc_state, 2406 bool dsc_enabled_on_crtc) 2407 { 2408 if (intel_dp_is_uhbr(crtc_state)) 2409 return false; 2410 2411 /* 2412 * Though eDP v1.5 supports FEC with DSC, unlike DP, it is optional. 2413 * Since, FEC is a bandwidth overhead, continue to not enable it for 2414 * eDP. Until, there is a good reason to do so. 2415 */ 2416 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) 2417 return false; 2418 2419 return dsc_enabled_on_crtc || intel_dsc_enabled_on_link(crtc_state); 2420 } 2421 2422 void intel_dp_dsc_reset_config(struct intel_crtc_state *crtc_state) 2423 { 2424 crtc_state->fec_enable = false; 2425 2426 crtc_state->dsc.compression_enable = false; 2427 crtc_state->dsc.compressed_bpp_x16 = 0; 2428 2429 memset(&crtc_state->dsc.slice_config, 0, sizeof(crtc_state->dsc.slice_config)); 2430 memset(&crtc_state->dsc.config, 0, sizeof(crtc_state->dsc.config)); 2431 } 2432 2433 int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 2434 struct intel_crtc_state *pipe_config, 2435 struct drm_connector_state *conn_state, 2436 const struct link_config_limits *limits, 2437 int timeslots) 2438 { 2439 struct intel_display *display = to_intel_display(intel_dp); 2440 const struct intel_connector *connector = 2441 to_intel_connector(conn_state->connector); 2442 const struct drm_display_mode *adjusted_mode = 2443 &pipe_config->hw.adjusted_mode; 2444 int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config); 2445 bool is_mst = intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST); 2446 int ret; 2447 2448 /* 2449 * FIXME: set the FEC enabled state once pipe_config->port_clock is 2450 * already known, so the UHBR/non-UHBR mode can be determined. 2451 */ 2452 pipe_config->fec_enable = intel_dp_needs_8b10b_fec(pipe_config, true); 2453 2454 if (!intel_dp_dsc_supports_format(connector, pipe_config->output_format)) 2455 return -EINVAL; 2456 2457 /* 2458 * Link parameters, pipe bpp and compressed bpp have already been 2459 * figured out for DP MST DSC. 2460 */ 2461 if (!is_mst) { 2462 ret = intel_dp_dsc_compute_pipe_bpp(intel_dp, pipe_config, 2463 conn_state, limits); 2464 if (ret) { 2465 drm_dbg_kms(display->drm, 2466 "No Valid pipe bpp for given mode ret = %d\n", ret); 2467 return ret; 2468 } 2469 } 2470 2471 if (!intel_dp_dsc_get_slice_config(connector, adjusted_mode->crtc_clock, 2472 adjusted_mode->crtc_hdisplay, num_joined_pipes, 2473 &pipe_config->dsc.slice_config)) 2474 return -EINVAL; 2475 2476 ret = intel_dp_dsc_compute_params(connector, pipe_config); 2477 if (ret < 0) { 2478 drm_dbg_kms(display->drm, 2479 "Cannot compute valid DSC parameters for Input Bpp = %d" 2480 "Compressed BPP = " FXP_Q4_FMT "\n", 2481 pipe_config->pipe_bpp, 2482 FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16)); 2483 return ret; 2484 } 2485 2486 intel_dsc_enable_on_crtc(pipe_config); 2487 2488 drm_dbg_kms(display->drm, "DP DSC computed with Input Bpp = %d " 2489 "Compressed Bpp = " FXP_Q4_FMT " Slice Count = %d\n", 2490 pipe_config->pipe_bpp, 2491 FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16), 2492 intel_dsc_line_slice_count(&pipe_config->dsc.slice_config)); 2493 2494 return 0; 2495 } 2496 2497 static int 2498 dsc_throughput_quirk_max_bpp_x16(const struct intel_connector *connector, 2499 int mode_clock) 2500 { 2501 if (!connector->dp.dsc_throughput_quirk) 2502 return INT_MAX; 2503 2504 /* 2505 * Synaptics Panamera branch devices have a problem decompressing a 2506 * stream with a compressed link-bpp higher than 12, if the pixel 2507 * clock is higher than ~50 % of the maximum overall throughput 2508 * reported by the branch device. Work around this by limiting the 2509 * maximum link bpp for such pixel clocks. 2510 * 2511 * TODO: Use the throughput value specific to the actual RGB/YUV 2512 * format of the output, after determining the pixel clock limit for 2513 * YUV modes. For now use the smaller of the throughput values, which 2514 * may result in limiting the link-bpp value already at a lower than 2515 * required mode clock in case of native YUV422/420 output formats. 2516 * The RGB/YUV444 throughput value should be always either equal or 2517 * smaller than the YUV422/420 value, but let's not depend on this 2518 * assumption. 2519 */ 2520 if (mode_clock < 2521 min(connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444, 2522 connector->dp.dsc_branch_caps.overall_throughput.yuv422_420) / 2) 2523 return INT_MAX; 2524 2525 return fxp_q4_from_int(12); 2526 } 2527 2528 int intel_dp_compute_min_compressed_bpp_x16(struct intel_connector *connector, 2529 enum intel_output_format output_format) 2530 { 2531 int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp; 2532 int min_bpp_x16; 2533 2534 dsc_src_min_bpp = intel_dp_dsc_min_src_compressed_bpp(); 2535 dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(output_format); 2536 dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp); 2537 2538 min_bpp_x16 = fxp_q4_from_int(dsc_min_bpp); 2539 2540 min_bpp_x16 = align_min_compressed_bpp_x16(connector, min_bpp_x16); 2541 2542 return min_bpp_x16; 2543 } 2544 2545 static int compute_max_compressed_bpp_x16(struct intel_connector *connector, 2546 int mode_clock, int mode_hdisplay, 2547 int num_joined_pipes, 2548 enum intel_output_format output_format, 2549 int pipe_max_bpp, int max_link_bpp_x16) 2550 { 2551 struct intel_display *display = to_intel_display(connector); 2552 struct intel_dp *intel_dp = intel_attached_dp(connector); 2553 int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp; 2554 int throughput_max_bpp_x16; 2555 int joiner_max_bpp; 2556 2557 dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp); 2558 joiner_max_bpp = get_max_compressed_bpp_with_joiner(display, 2559 mode_clock, 2560 mode_hdisplay, 2561 num_joined_pipes); 2562 dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector, 2563 output_format, 2564 pipe_max_bpp / 3); 2565 dsc_max_bpp = min(dsc_sink_max_bpp, dsc_src_max_bpp); 2566 dsc_max_bpp = min(dsc_max_bpp, joiner_max_bpp); 2567 2568 max_link_bpp_x16 = min(max_link_bpp_x16, fxp_q4_from_int(dsc_max_bpp)); 2569 2570 throughput_max_bpp_x16 = dsc_throughput_quirk_max_bpp_x16(connector, 2571 mode_clock); 2572 if (throughput_max_bpp_x16 < max_link_bpp_x16) { 2573 max_link_bpp_x16 = throughput_max_bpp_x16; 2574 2575 drm_dbg_kms(display->drm, 2576 "[CONNECTOR:%d:%s] Decreasing link max bpp to " FXP_Q4_FMT " due to DSC throughput quirk\n", 2577 connector->base.base.id, connector->base.name, 2578 FXP_Q4_ARGS(max_link_bpp_x16)); 2579 } 2580 2581 max_link_bpp_x16 = align_max_compressed_bpp_x16(connector, output_format, 2582 pipe_max_bpp, max_link_bpp_x16); 2583 2584 return max_link_bpp_x16; 2585 } 2586 2587 bool intel_dp_mode_valid_with_dsc(struct intel_connector *connector, 2588 int link_clock, int lane_count, 2589 int mode_clock, int mode_hdisplay, 2590 int num_joined_pipes, 2591 enum intel_output_format output_format, 2592 int pipe_bpp, unsigned long bw_overhead_flags) 2593 { 2594 struct intel_dp *intel_dp = intel_attached_dp(connector); 2595 int min_bpp_x16 = intel_dp_compute_min_compressed_bpp_x16(connector, 2596 output_format); 2597 int max_bpp_x16 = compute_max_compressed_bpp_x16(connector, 2598 mode_clock, mode_hdisplay, 2599 num_joined_pipes, 2600 output_format, 2601 pipe_bpp, INT_MAX); 2602 int dsc_slice_count = intel_dp_dsc_get_slice_count(connector, 2603 mode_clock, 2604 mode_hdisplay, 2605 num_joined_pipes); 2606 2607 if (min_bpp_x16 <= 0 || min_bpp_x16 > max_bpp_x16) 2608 return false; 2609 2610 if (dsc_slice_count == 0) 2611 return false; 2612 2613 return is_bw_sufficient_for_dsc_config(intel_dp, 2614 link_clock, lane_count, 2615 mode_clock, mode_hdisplay, 2616 dsc_slice_count, min_bpp_x16, 2617 bw_overhead_flags); 2618 } 2619 2620 /* 2621 * Calculate the output link min, max bpp values in limits based on the pipe bpp 2622 * range, crtc_state and dsc mode. Return true on success. 2623 */ 2624 static bool 2625 intel_dp_compute_config_link_bpp_limits(struct intel_connector *connector, 2626 const struct intel_crtc_state *crtc_state, 2627 bool dsc, 2628 struct link_config_limits *limits) 2629 { 2630 struct intel_display *display = to_intel_display(connector); 2631 struct intel_dp *intel_dp = intel_attached_dp(connector); 2632 const struct drm_display_mode *adjusted_mode = 2633 &crtc_state->hw.adjusted_mode; 2634 const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2635 const struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 2636 int max_link_bpp_x16; 2637 2638 max_link_bpp_x16 = min(crtc_state->max_link_bpp_x16, 2639 fxp_q4_from_int(limits->pipe.max_bpp)); 2640 2641 if (!dsc) { 2642 max_link_bpp_x16 = rounddown(max_link_bpp_x16, fxp_q4_from_int(2 * 3)); 2643 2644 if (max_link_bpp_x16 < fxp_q4_from_int(limits->pipe.min_bpp)) 2645 return false; 2646 2647 limits->link.min_bpp_x16 = fxp_q4_from_int(limits->pipe.min_bpp); 2648 } else { 2649 limits->link.min_bpp_x16 = 2650 intel_dp_compute_min_compressed_bpp_x16(connector, 2651 crtc_state->output_format); 2652 2653 max_link_bpp_x16 = 2654 compute_max_compressed_bpp_x16(connector, 2655 adjusted_mode->crtc_clock, 2656 adjusted_mode->hdisplay, 2657 intel_crtc_num_joined_pipes(crtc_state), 2658 crtc_state->output_format, 2659 limits->pipe.max_bpp, 2660 max_link_bpp_x16); 2661 } 2662 2663 limits->link.max_bpp_x16 = max_link_bpp_x16; 2664 2665 drm_dbg_kms(display->drm, 2666 "[ENCODER:%d:%s][CRTC:%d:%s] DP link limits: pixel clock %d kHz DSC %s max lanes %d max rate %d max pipe_bpp %d min link_bpp " FXP_Q4_FMT " max link_bpp " FXP_Q4_FMT "\n", 2667 encoder->base.base.id, encoder->base.name, 2668 crtc->base.base.id, crtc->base.name, 2669 adjusted_mode->crtc_clock, 2670 str_on_off(dsc), 2671 limits->max_lane_count, 2672 limits->max_rate, 2673 limits->pipe.max_bpp, 2674 FXP_Q4_ARGS(limits->link.min_bpp_x16), 2675 FXP_Q4_ARGS(limits->link.max_bpp_x16)); 2676 2677 if (limits->link.min_bpp_x16 <= 0 || 2678 limits->link.min_bpp_x16 > limits->link.max_bpp_x16) 2679 return false; 2680 2681 return true; 2682 } 2683 2684 static bool 2685 intel_dp_dsc_compute_pipe_bpp_limits(struct intel_connector *connector, 2686 struct link_config_limits *limits) 2687 { 2688 struct intel_display *display = to_intel_display(connector); 2689 const struct link_config_limits orig_limits = *limits; 2690 int dsc_min_bpc = intel_dp_dsc_min_src_input_bpc(); 2691 int dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(display); 2692 2693 limits->pipe.min_bpp = max(limits->pipe.min_bpp, dsc_min_bpc * 3); 2694 limits->pipe.min_bpp = align_min_sink_dsc_input_bpp(connector, limits->pipe.min_bpp); 2695 2696 limits->pipe.max_bpp = min(limits->pipe.max_bpp, dsc_max_bpc * 3); 2697 limits->pipe.max_bpp = align_max_sink_dsc_input_bpp(connector, limits->pipe.max_bpp); 2698 2699 if (limits->pipe.min_bpp <= 0 || 2700 limits->pipe.min_bpp > limits->pipe.max_bpp) { 2701 drm_dbg_kms(display->drm, 2702 "[CONNECTOR:%d:%s] Invalid DSC src/sink input BPP (src:%d-%d pipe:%d-%d sink-align:%d-%d)\n", 2703 connector->base.base.id, connector->base.name, 2704 dsc_min_bpc * 3, dsc_max_bpc * 3, 2705 orig_limits.pipe.min_bpp, orig_limits.pipe.max_bpp, 2706 limits->pipe.min_bpp, limits->pipe.max_bpp); 2707 2708 return false; 2709 } 2710 2711 return true; 2712 } 2713 2714 bool 2715 intel_dp_compute_config_limits(struct intel_dp *intel_dp, 2716 struct drm_connector_state *conn_state, 2717 struct intel_crtc_state *crtc_state, 2718 bool respect_downstream_limits, 2719 bool dsc, 2720 struct link_config_limits *limits) 2721 { 2722 struct intel_display *display = to_intel_display(intel_dp); 2723 bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); 2724 struct intel_connector *connector = 2725 to_intel_connector(conn_state->connector); 2726 2727 limits->min_rate = intel_dp_min_link_rate(intel_dp); 2728 limits->max_rate = intel_dp_max_link_rate(intel_dp); 2729 2730 limits->min_rate = min(limits->min_rate, limits->max_rate); 2731 2732 limits->min_lane_count = intel_dp_min_lane_count(intel_dp); 2733 limits->max_lane_count = intel_dp_max_lane_count(intel_dp); 2734 2735 limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format); 2736 if (is_mst) { 2737 /* 2738 * FIXME: If all the streams can't fit into the link with their 2739 * current pipe_bpp we should reduce pipe_bpp across the board 2740 * until things start to fit. Until then we limit to <= 8bpc 2741 * since that's what was hardcoded for all MST streams 2742 * previously. This hack should be removed once we have the 2743 * proper retry logic in place. 2744 */ 2745 limits->pipe.max_bpp = min(crtc_state->max_pipe_bpp, 24); 2746 } else { 2747 limits->pipe.max_bpp = intel_dp_max_bpp(intel_dp, crtc_state, 2748 respect_downstream_limits); 2749 } 2750 2751 if (!dsc && intel_dp_in_hdr_mode(conn_state)) { 2752 if (intel_dp_supports_dsc(intel_dp, connector, crtc_state) && 2753 limits->pipe.max_bpp >= 30) 2754 limits->pipe.min_bpp = max(limits->pipe.min_bpp, 30); 2755 else 2756 drm_dbg_kms(display->drm, 2757 "[CONNECTOR:%d:%s] Can't force 30 bpp for HDR (pipe bpp: %d-%d DSC-support: %s)\n", 2758 connector->base.base.id, connector->base.name, 2759 limits->pipe.min_bpp, limits->pipe.max_bpp, 2760 str_yes_no(intel_dp_supports_dsc(intel_dp, connector, 2761 crtc_state))); 2762 } 2763 2764 if (limits->pipe.min_bpp <= 0 || 2765 limits->pipe.min_bpp > limits->pipe.max_bpp) { 2766 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] Invalid pipe bpp range: %d-%d\n", 2767 connector->base.base.id, connector->base.name, 2768 limits->pipe.min_bpp, limits->pipe.max_bpp); 2769 2770 return false; 2771 } 2772 2773 if (dsc && !intel_dp_dsc_compute_pipe_bpp_limits(connector, limits)) 2774 return false; 2775 2776 /* 2777 * crtc_state->pipe_bpp is the non-DP specific baseline (platform / 2778 * EDID) maximum pipe BPP limited by the max-BPC connector property 2779 * request. Since by now pipe.max_bpp is <= the above baseline 2780 * maximum BPP, the only remaining reason for adjusting pipe.max_bpp 2781 * is the max-BPC connector property request. Adjust pipe.max_bpp to 2782 * this request within the current valid pipe.min_bpp .. pipe.max_bpp 2783 * range. 2784 */ 2785 limits->pipe.max_bpp = clamp(crtc_state->pipe_bpp, limits->pipe.min_bpp, 2786 limits->pipe.max_bpp); 2787 if (dsc) 2788 limits->pipe.max_bpp = align_max_sink_dsc_input_bpp(connector, 2789 limits->pipe.max_bpp); 2790 2791 if (limits->pipe.max_bpp != crtc_state->pipe_bpp) 2792 drm_dbg_kms(display->drm, 2793 "[CONNECTOR:%d:%s] Adjusting requested max pipe bpp %d -> %d\n", 2794 connector->base.base.id, connector->base.name, 2795 crtc_state->pipe_bpp, limits->pipe.max_bpp); 2796 2797 if (is_mst || intel_dp->use_max_params) { 2798 /* 2799 * For MST we always configure max link bw - the spec doesn't 2800 * seem to suggest we should do otherwise. 2801 * 2802 * Use the maximum clock and number of lanes the eDP panel 2803 * advertizes being capable of in case the initial fast 2804 * optimal params failed us. The panels are generally 2805 * designed to support only a single clock and lane 2806 * configuration, and typically on older panels these 2807 * values correspond to the native resolution of the panel. 2808 */ 2809 limits->min_lane_count = limits->max_lane_count; 2810 limits->min_rate = limits->max_rate; 2811 } 2812 2813 intel_dp_test_compute_config(intel_dp, crtc_state, limits); 2814 2815 return intel_dp_compute_config_link_bpp_limits(connector, 2816 crtc_state, 2817 dsc, 2818 limits); 2819 } 2820 2821 int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state) 2822 { 2823 const struct drm_display_mode *adjusted_mode = 2824 &crtc_state->hw.adjusted_mode; 2825 int link_bpp_x16 = crtc_state->dsc.compression_enable ? 2826 crtc_state->dsc.compressed_bpp_x16 : 2827 fxp_q4_from_int(crtc_state->pipe_bpp); 2828 2829 return intel_dp_link_required(crtc_state->port_clock, crtc_state->lane_count, 2830 adjusted_mode->crtc_clock, adjusted_mode->hdisplay, 2831 link_bpp_x16, 0); 2832 } 2833 2834 bool intel_dp_joiner_needs_dsc(struct intel_display *display, 2835 int num_joined_pipes) 2836 { 2837 /* 2838 * Pipe joiner needs compression up to display 12 due to bandwidth 2839 * limitation. DG2 onwards pipe joiner can be enabled without 2840 * compression. 2841 * Ultrajoiner always needs compression. 2842 */ 2843 return (!HAS_UNCOMPRESSED_JOINER(display) && num_joined_pipes == 2) || 2844 num_joined_pipes == 4; 2845 } 2846 2847 static int 2848 intel_dp_compute_link_for_joined_pipes(struct intel_encoder *encoder, 2849 struct intel_crtc_state *pipe_config, 2850 struct drm_connector_state *conn_state, 2851 bool respect_downstream_limits) 2852 { 2853 struct intel_display *display = to_intel_display(encoder); 2854 int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config); 2855 struct intel_connector *connector = 2856 to_intel_connector(conn_state->connector); 2857 const struct drm_display_mode *adjusted_mode = 2858 &pipe_config->hw.adjusted_mode; 2859 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2860 struct link_config_limits limits; 2861 bool dsc_needed, joiner_needs_dsc; 2862 int ret = 0; 2863 2864 intel_dp_dsc_reset_config(pipe_config); 2865 2866 joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes); 2867 2868 dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en || 2869 !intel_dp_compute_config_limits(intel_dp, conn_state, pipe_config, 2870 respect_downstream_limits, 2871 false, 2872 &limits); 2873 2874 if (!dsc_needed) { 2875 /* 2876 * Optimize for slow and wide for everything, because there are some 2877 * eDP 1.3 and 1.4 panels don't work well with fast and narrow. 2878 */ 2879 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, 2880 conn_state, &limits); 2881 if (!ret && intel_dp_is_uhbr(pipe_config)) 2882 ret = intel_dp_mtp_tu_compute_config(intel_dp, 2883 pipe_config, 2884 conn_state, 2885 fxp_q4_from_int(pipe_config->pipe_bpp), 2886 fxp_q4_from_int(pipe_config->pipe_bpp), 2887 0, false); 2888 2889 if (ret || 2890 !intel_dp_dotclk_valid(display, 2891 adjusted_mode->crtc_clock, 2892 adjusted_mode->crtc_htotal, 2893 0, 2894 num_joined_pipes)) 2895 dsc_needed = true; 2896 } 2897 2898 if (dsc_needed && !intel_dp_supports_dsc(intel_dp, connector, pipe_config)) { 2899 drm_dbg_kms(display->drm, "DSC required but not available\n"); 2900 return -EINVAL; 2901 } 2902 2903 if (dsc_needed) { 2904 int dsc_slice_count; 2905 2906 drm_dbg_kms(display->drm, 2907 "Try DSC (fallback=%s, joiner=%s, force=%s)\n", 2908 str_yes_no(ret), str_yes_no(joiner_needs_dsc), 2909 str_yes_no(intel_dp->force_dsc_en)); 2910 2911 if (!intel_dp_compute_config_limits(intel_dp, conn_state, pipe_config, 2912 respect_downstream_limits, 2913 true, 2914 &limits)) 2915 return -EINVAL; 2916 2917 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 2918 conn_state, &limits, 64); 2919 if (ret < 0) 2920 return ret; 2921 2922 dsc_slice_count = intel_dsc_line_slice_count(&pipe_config->dsc.slice_config); 2923 2924 if (!intel_dp_dotclk_valid(display, 2925 adjusted_mode->crtc_clock, 2926 adjusted_mode->crtc_htotal, 2927 dsc_slice_count, 2928 num_joined_pipes)) 2929 return -EINVAL; 2930 } 2931 2932 drm_dbg_kms(display->drm, 2933 "DP lane count %d clock %d bpp input %d compressed " FXP_Q4_FMT " HDR %s link rate required %d available %d\n", 2934 pipe_config->lane_count, pipe_config->port_clock, 2935 pipe_config->pipe_bpp, 2936 FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16), 2937 str_yes_no(intel_dp_in_hdr_mode(conn_state)), 2938 intel_dp_config_required_rate(pipe_config), 2939 intel_dp_max_link_data_rate(intel_dp, 2940 pipe_config->port_clock, 2941 pipe_config->lane_count)); 2942 2943 return 0; 2944 } 2945 2946 static int 2947 intel_dp_compute_link_config(struct intel_encoder *encoder, 2948 struct intel_crtc_state *crtc_state, 2949 struct drm_connector_state *conn_state, 2950 bool respect_downstream_limits) 2951 { 2952 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2953 struct intel_connector *connector = 2954 to_intel_connector(conn_state->connector); 2955 const struct drm_display_mode *adjusted_mode = 2956 &crtc_state->hw.adjusted_mode; 2957 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2958 int num_joined_pipes; 2959 int ret = -EINVAL; 2960 2961 if (crtc_state->fec_enable && 2962 !intel_dp_supports_fec(intel_dp, connector, crtc_state)) 2963 return -EINVAL; 2964 2965 for_each_joiner_candidate(connector, adjusted_mode, num_joined_pipes) { 2966 /* 2967 * NOTE: 2968 * The crtc_state->joiner_pipes should have been set at the end 2969 * only if all the conditions are met. However that would mean 2970 * that num_joined_pipes is passed around to all helpers and 2971 * make them use it instead of using crtc_state->joiner_pipes 2972 * directly or indirectly (via intel_crtc_num_joined_pipes()). 2973 * 2974 * For now, setting crtc_state->joiner_pipes to the candidate 2975 * value to avoid the above churn and resetting it to 0, in case 2976 * no joiner candidate is found to be suitable for the given 2977 * configuration. 2978 */ 2979 if (num_joined_pipes > 1) 2980 crtc_state->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1, 2981 crtc->pipe); 2982 2983 ret = intel_dp_compute_link_for_joined_pipes(encoder, crtc_state, conn_state, 2984 respect_downstream_limits); 2985 if (ret == 0 || ret == -EDEADLK) 2986 break; 2987 } 2988 2989 if (ret < 0) 2990 crtc_state->joiner_pipes = 0; 2991 2992 return ret; 2993 } 2994 2995 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 2996 const struct drm_connector_state *conn_state) 2997 { 2998 const struct intel_digital_connector_state *intel_conn_state = 2999 to_intel_digital_connector_state(conn_state); 3000 const struct drm_display_mode *adjusted_mode = 3001 &crtc_state->hw.adjusted_mode; 3002 3003 /* 3004 * Our YCbCr output is always limited range. 3005 * crtc_state->limited_color_range only applies to RGB, 3006 * and it must never be set for YCbCr or we risk setting 3007 * some conflicting bits in TRANSCONF which will mess up 3008 * the colors on the monitor. 3009 */ 3010 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3011 return false; 3012 3013 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 3014 /* 3015 * See: 3016 * CEA-861-E - 5.1 Default Encoding Parameters 3017 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 3018 */ 3019 return crtc_state->pipe_bpp != 18 && 3020 drm_default_rgb_quant_range(adjusted_mode) == 3021 HDMI_QUANTIZATION_RANGE_LIMITED; 3022 } else { 3023 return intel_conn_state->broadcast_rgb == 3024 INTEL_BROADCAST_RGB_LIMITED; 3025 } 3026 } 3027 3028 static bool intel_dp_port_has_audio(struct intel_display *display, enum port port) 3029 { 3030 if (display->platform.g4x) 3031 return false; 3032 if (DISPLAY_VER(display) < 12 && port == PORT_A) 3033 return false; 3034 3035 return true; 3036 } 3037 3038 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, 3039 const struct drm_connector_state *conn_state, 3040 struct drm_dp_vsc_sdp *vsc) 3041 { 3042 struct intel_display *display = to_intel_display(crtc_state); 3043 3044 if (crtc_state->has_panel_replay) { 3045 /* 3046 * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223 3047 * VSC SDP supporting 3D stereo, Panel Replay, and Pixel 3048 * Encoding/Colorimetry Format indication. 3049 */ 3050 vsc->revision = 0x7; 3051 } else { 3052 /* 3053 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 3054 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 3055 * Colorimetry Format indication. 3056 */ 3057 vsc->revision = 0x5; 3058 } 3059 3060 vsc->length = 0x13; 3061 3062 /* DP 1.4a spec, Table 2-120 */ 3063 switch (crtc_state->output_format) { 3064 case INTEL_OUTPUT_FORMAT_YCBCR444: 3065 vsc->pixelformat = DP_PIXELFORMAT_YUV444; 3066 break; 3067 case INTEL_OUTPUT_FORMAT_YCBCR420: 3068 vsc->pixelformat = DP_PIXELFORMAT_YUV420; 3069 break; 3070 case INTEL_OUTPUT_FORMAT_RGB: 3071 default: 3072 vsc->pixelformat = DP_PIXELFORMAT_RGB; 3073 } 3074 3075 switch (conn_state->colorspace) { 3076 case DRM_MODE_COLORIMETRY_BT709_YCC: 3077 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 3078 break; 3079 case DRM_MODE_COLORIMETRY_XVYCC_601: 3080 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; 3081 break; 3082 case DRM_MODE_COLORIMETRY_XVYCC_709: 3083 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; 3084 break; 3085 case DRM_MODE_COLORIMETRY_SYCC_601: 3086 vsc->colorimetry = DP_COLORIMETRY_SYCC_601; 3087 break; 3088 case DRM_MODE_COLORIMETRY_OPYCC_601: 3089 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; 3090 break; 3091 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 3092 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; 3093 break; 3094 case DRM_MODE_COLORIMETRY_BT2020_RGB: 3095 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; 3096 break; 3097 case DRM_MODE_COLORIMETRY_BT2020_YCC: 3098 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; 3099 break; 3100 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 3101 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 3102 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; 3103 break; 3104 default: 3105 /* 3106 * RGB->YCBCR color conversion uses the BT.709 3107 * color space. 3108 */ 3109 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 3110 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 3111 else 3112 vsc->colorimetry = DP_COLORIMETRY_DEFAULT; 3113 break; 3114 } 3115 3116 vsc->bpc = crtc_state->pipe_bpp / 3; 3117 3118 /* only RGB pixelformat supports 6 bpc */ 3119 drm_WARN_ON(display->drm, 3120 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); 3121 3122 /* all YCbCr are always limited range */ 3123 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; 3124 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; 3125 } 3126 3127 static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp, 3128 struct intel_crtc_state *crtc_state) 3129 { 3130 struct drm_dp_as_sdp *as_sdp = &crtc_state->infoframes.as_sdp; 3131 const struct drm_display_mode *adjusted_mode = 3132 &crtc_state->hw.adjusted_mode; 3133 3134 if (!crtc_state->vrr.enable || !intel_dp->as_sdp_supported) 3135 return; 3136 3137 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC); 3138 3139 as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC; 3140 as_sdp->length = 0x9; 3141 as_sdp->duration_incr_ms = 0; 3142 as_sdp->vtotal = intel_vrr_vmin_vtotal(crtc_state); 3143 3144 if (crtc_state->cmrr.enable) { 3145 as_sdp->mode = DP_AS_SDP_FAVT_TRR_REACHED; 3146 as_sdp->target_rr = drm_mode_vrefresh(adjusted_mode); 3147 as_sdp->target_rr_divider = true; 3148 } else { 3149 as_sdp->mode = DP_AS_SDP_AVT_DYNAMIC_VTOTAL; 3150 as_sdp->target_rr = 0; 3151 } 3152 } 3153 3154 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, 3155 struct intel_crtc_state *crtc_state, 3156 const struct drm_connector_state *conn_state) 3157 { 3158 struct drm_dp_vsc_sdp *vsc; 3159 3160 if ((!intel_dp->colorimetry_support || 3161 !intel_dp_needs_vsc_sdp(crtc_state, conn_state)) && 3162 !crtc_state->has_psr) 3163 return; 3164 3165 vsc = &crtc_state->infoframes.vsc; 3166 3167 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 3168 vsc->sdp_type = DP_SDP_VSC; 3169 3170 /* Needs colorimetry */ 3171 if (intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 3172 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 3173 vsc); 3174 } else if (crtc_state->has_panel_replay) { 3175 /* 3176 * [Panel Replay without colorimetry info] 3177 * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223 3178 * VSC SDP supporting 3D stereo + Panel Replay. 3179 */ 3180 vsc->revision = 0x6; 3181 vsc->length = 0x10; 3182 } else if (crtc_state->has_sel_update) { 3183 /* 3184 * [PSR2 without colorimetry] 3185 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 3186 * 3D stereo + PSR/PSR2 + Y-coordinate. 3187 */ 3188 vsc->revision = 0x4; 3189 vsc->length = 0xe; 3190 } else { 3191 /* 3192 * [PSR1] 3193 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 3194 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or 3195 * higher). 3196 */ 3197 vsc->revision = 0x2; 3198 vsc->length = 0x8; 3199 } 3200 } 3201 3202 bool 3203 intel_dp_in_hdr_mode(const struct drm_connector_state *conn_state) 3204 { 3205 struct hdr_output_metadata *hdr_metadata; 3206 3207 if (!conn_state->hdr_output_metadata) 3208 return false; 3209 3210 hdr_metadata = conn_state->hdr_output_metadata->data; 3211 3212 return hdr_metadata->hdmi_metadata_type1.eotf == HDMI_EOTF_SMPTE_ST2084; 3213 } 3214 3215 static void 3216 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 3217 struct intel_crtc_state *crtc_state, 3218 const struct drm_connector_state *conn_state) 3219 { 3220 struct intel_display *display = to_intel_display(intel_dp); 3221 int ret; 3222 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; 3223 3224 if (!conn_state->hdr_output_metadata) 3225 return; 3226 3227 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); 3228 3229 if (ret) { 3230 drm_dbg_kms(display->drm, 3231 "couldn't set HDR metadata in infoframe\n"); 3232 return; 3233 } 3234 3235 crtc_state->infoframes.enable |= 3236 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 3237 } 3238 3239 static bool can_enable_drrs(struct intel_connector *connector, 3240 const struct intel_crtc_state *pipe_config, 3241 const struct drm_display_mode *downclock_mode) 3242 { 3243 struct intel_display *display = to_intel_display(connector); 3244 3245 if (pipe_config->vrr.enable) 3246 return false; 3247 3248 /* 3249 * DRRS and PSR can't be enable together, so giving preference to PSR 3250 * as it allows more power-savings by complete shutting down display, 3251 * so to guarantee this, intel_drrs_compute_config() must be called 3252 * after intel_psr_compute_config(). 3253 */ 3254 if (pipe_config->has_psr) 3255 return false; 3256 3257 /* FIXME missing FDI M2/N2 etc. */ 3258 if (pipe_config->has_pch_encoder) 3259 return false; 3260 3261 if (!intel_cpu_transcoder_has_drrs(display, pipe_config->cpu_transcoder)) 3262 return false; 3263 3264 return downclock_mode && 3265 intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS; 3266 } 3267 3268 static void 3269 intel_dp_drrs_compute_config(struct intel_connector *connector, 3270 struct intel_crtc_state *pipe_config, 3271 int link_bpp_x16) 3272 { 3273 struct intel_display *display = to_intel_display(connector); 3274 const struct drm_display_mode *downclock_mode = 3275 intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode); 3276 int pixel_clock; 3277 3278 /* 3279 * FIXME all joined pipes share the same transcoder. 3280 * Need to account for that when updating M/N live. 3281 */ 3282 if (has_seamless_m_n(connector) && !pipe_config->joiner_pipes) 3283 pipe_config->update_m_n = true; 3284 3285 if (!can_enable_drrs(connector, pipe_config, downclock_mode)) { 3286 if (intel_cpu_transcoder_has_m2_n2(display, pipe_config->cpu_transcoder)) 3287 intel_zero_m_n(&pipe_config->dp_m2_n2); 3288 return; 3289 } 3290 3291 if (display->platform.ironlake || display->platform.sandybridge || 3292 display->platform.ivybridge) 3293 pipe_config->msa_timing_delay = connector->panel.vbt.edp.drrs_msa_timing_delay; 3294 3295 pipe_config->has_drrs = true; 3296 3297 pixel_clock = downclock_mode->clock; 3298 if (pipe_config->splitter.enable) 3299 pixel_clock /= pipe_config->splitter.link_count; 3300 3301 intel_link_compute_m_n(link_bpp_x16, pipe_config->lane_count, pixel_clock, 3302 pipe_config->port_clock, 3303 intel_dp_bw_fec_overhead(pipe_config->fec_enable), 3304 &pipe_config->dp_m2_n2); 3305 3306 /* FIXME: abstract this better */ 3307 if (pipe_config->splitter.enable) 3308 pipe_config->dp_m2_n2.data_m *= pipe_config->splitter.link_count; 3309 } 3310 3311 static bool intel_dp_has_audio(struct intel_encoder *encoder, 3312 const struct drm_connector_state *conn_state) 3313 { 3314 struct intel_display *display = to_intel_display(encoder); 3315 const struct intel_digital_connector_state *intel_conn_state = 3316 to_intel_digital_connector_state(conn_state); 3317 struct intel_connector *connector = 3318 to_intel_connector(conn_state->connector); 3319 3320 if (!intel_dp_port_has_audio(display, encoder->port)) 3321 return false; 3322 3323 if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 3324 return connector->base.display_info.has_audio; 3325 else 3326 return intel_conn_state->force_audio == HDMI_AUDIO_ON; 3327 } 3328 3329 static int 3330 intel_dp_compute_output_format(struct intel_encoder *encoder, 3331 struct intel_crtc_state *crtc_state, 3332 struct drm_connector_state *conn_state, 3333 bool respect_downstream_limits) 3334 { 3335 struct intel_display *display = to_intel_display(encoder); 3336 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3337 struct intel_connector *connector = intel_dp->attached_connector; 3338 const struct drm_display_info *info = &connector->base.display_info; 3339 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 3340 bool ycbcr_420_only; 3341 int ret; 3342 3343 ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode); 3344 3345 if (ycbcr_420_only && !connector->base.ycbcr_420_allowed) { 3346 drm_dbg_kms(display->drm, 3347 "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n"); 3348 crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB; 3349 } else { 3350 crtc_state->sink_format = intel_dp_sink_format(connector, adjusted_mode); 3351 } 3352 3353 crtc_state->output_format = intel_dp_output_format(connector, crtc_state->sink_format); 3354 3355 ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state, 3356 respect_downstream_limits); 3357 if (ret) { 3358 if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 3359 !connector->base.ycbcr_420_allowed || 3360 !drm_mode_is_420_also(info, adjusted_mode)) 3361 return ret; 3362 3363 crtc_state->sink_format = INTEL_OUTPUT_FORMAT_YCBCR420; 3364 crtc_state->output_format = intel_dp_output_format(connector, 3365 crtc_state->sink_format); 3366 ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state, 3367 respect_downstream_limits); 3368 } 3369 3370 return ret; 3371 } 3372 3373 void 3374 intel_dp_audio_compute_config(struct intel_encoder *encoder, 3375 struct intel_crtc_state *pipe_config, 3376 struct drm_connector_state *conn_state) 3377 { 3378 pipe_config->has_audio = 3379 intel_dp_has_audio(encoder, conn_state) && 3380 intel_audio_compute_config(encoder, pipe_config, conn_state); 3381 3382 pipe_config->sdp_split_enable = pipe_config->has_audio && 3383 intel_dp_is_uhbr(pipe_config); 3384 } 3385 3386 void 3387 intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state, 3388 struct intel_encoder *encoder, 3389 const struct intel_crtc_state *crtc_state) 3390 { 3391 struct intel_connector *connector; 3392 struct intel_digital_connector_state *conn_state; 3393 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3394 int i; 3395 3396 if (intel_dp->needs_modeset_retry) 3397 return; 3398 3399 intel_dp->needs_modeset_retry = true; 3400 3401 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { 3402 intel_connector_queue_modeset_retry_work(intel_dp->attached_connector); 3403 3404 return; 3405 } 3406 3407 for_each_new_intel_connector_in_state(state, connector, conn_state, i) { 3408 if (!conn_state->base.crtc) 3409 continue; 3410 3411 if (connector->mst.dp == intel_dp) 3412 intel_connector_queue_modeset_retry_work(connector); 3413 } 3414 } 3415 3416 int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state, 3417 const struct drm_connector_state *conn_state) 3418 { 3419 struct intel_display *display = to_intel_display(crtc_state); 3420 const struct drm_display_mode *adjusted_mode = 3421 &crtc_state->hw.adjusted_mode; 3422 struct intel_connector *connector = to_intel_connector(conn_state->connector); 3423 int symbol_size = intel_dp_is_uhbr(crtc_state) ? 32 : 8; 3424 /* 3425 * min symbol cycles is 3(BS,VBID, BE) for 128b/132b and 3426 * 5(BS, VBID, MVID, MAUD, BE) for 8b/10b 3427 */ 3428 int min_sym_cycles = intel_dp_is_uhbr(crtc_state) ? 3 : 5; 3429 bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); 3430 int num_joined_pipes = intel_crtc_num_joined_pipes(crtc_state); 3431 int min_hblank; 3432 int max_lane_count = 4; 3433 int hactive_sym_cycles, htotal_sym_cycles; 3434 int dsc_slices = 0; 3435 int link_bpp_x16; 3436 3437 if (DISPLAY_VER(display) < 30) 3438 return 0; 3439 3440 /* MIN_HBLANK should be set only for 8b/10b MST or for 128b/132b SST/MST */ 3441 if (!is_mst && !intel_dp_is_uhbr(crtc_state)) 3442 return 0; 3443 3444 if (crtc_state->dsc.compression_enable) { 3445 dsc_slices = intel_dp_dsc_get_slice_count(connector, 3446 adjusted_mode->crtc_clock, 3447 adjusted_mode->crtc_hdisplay, 3448 num_joined_pipes); 3449 if (!dsc_slices) { 3450 drm_dbg(display->drm, "failed to calculate dsc slice count\n"); 3451 return -EINVAL; 3452 } 3453 } 3454 3455 if (crtc_state->dsc.compression_enable) 3456 link_bpp_x16 = crtc_state->dsc.compressed_bpp_x16; 3457 else 3458 link_bpp_x16 = intel_dp_output_format_link_bpp_x16(crtc_state->output_format, 3459 crtc_state->pipe_bpp); 3460 3461 /* Calculate min Hblank Link Layer Symbol Cycle Count for 8b/10b MST & 128b/132b */ 3462 hactive_sym_cycles = drm_dp_link_symbol_cycles(max_lane_count, 3463 adjusted_mode->hdisplay, 3464 dsc_slices, 3465 link_bpp_x16, 3466 symbol_size, is_mst); 3467 htotal_sym_cycles = adjusted_mode->htotal * hactive_sym_cycles / 3468 adjusted_mode->hdisplay; 3469 3470 min_hblank = htotal_sym_cycles - hactive_sym_cycles; 3471 /* minimum Hblank calculation: https://groups.vesa.org/wg/DP/document/20494 */ 3472 min_hblank = max(min_hblank, min_sym_cycles); 3473 3474 /* 3475 * adjust the BlankingStart/BlankingEnd framing control from 3476 * the calculated value 3477 */ 3478 min_hblank = min_hblank - 2; 3479 3480 /* 3481 * min_hblank formula is undergoing a change, to avoid underrun use the 3482 * recomended value in spec to compare with the calculated one and use the 3483 * minimum value 3484 */ 3485 if (intel_dp_is_uhbr(crtc_state)) { 3486 /* 3487 * Note: Bspec requires a min_hblank of 2 for YCBCR420 3488 * with compressed bpp 6, but the minimum compressed bpp 3489 * supported by the driver is 8. 3490 */ 3491 drm_WARN_ON(display->drm, 3492 (crtc_state->dsc.compression_enable && 3493 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && 3494 crtc_state->dsc.compressed_bpp_x16 < fxp_q4_from_int(8))); 3495 min_hblank = min(3, min_hblank); 3496 } else { 3497 min_hblank = min(10, min_hblank); 3498 } 3499 3500 crtc_state->min_hblank = min_hblank; 3501 3502 return 0; 3503 } 3504 3505 int 3506 intel_dp_compute_config(struct intel_encoder *encoder, 3507 struct intel_crtc_state *pipe_config, 3508 struct drm_connector_state *conn_state) 3509 { 3510 struct intel_display *display = to_intel_display(encoder); 3511 struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state); 3512 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 3513 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3514 const struct drm_display_mode *fixed_mode; 3515 struct intel_connector *connector = intel_dp->attached_connector; 3516 int ret = 0, link_bpp_x16; 3517 3518 fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode); 3519 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 3520 ret = intel_panel_compute_config(connector, adjusted_mode); 3521 if (ret) 3522 return ret; 3523 } 3524 3525 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 3526 return -EINVAL; 3527 3528 if (!connector->base.interlace_allowed && 3529 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 3530 return -EINVAL; 3531 3532 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 3533 return -EINVAL; 3534 3535 if (intel_dp_hdisplay_bad(display, adjusted_mode->crtc_hdisplay)) 3536 return -EINVAL; 3537 3538 /* 3539 * Try to respect downstream TMDS clock limits first, if 3540 * that fails assume the user might know something we don't. 3541 */ 3542 ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, true); 3543 if (ret) 3544 ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, false); 3545 if (ret) 3546 return ret; 3547 3548 if ((intel_dp_is_edp(intel_dp) && fixed_mode) || 3549 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 3550 ret = intel_pfit_compute_config(pipe_config, conn_state); 3551 if (ret) 3552 return ret; 3553 } 3554 3555 pipe_config->limited_color_range = 3556 intel_dp_limited_color_range(pipe_config, conn_state); 3557 3558 if (intel_dp_is_uhbr(pipe_config)) { 3559 /* 128b/132b SST also needs this */ 3560 pipe_config->mst_master_transcoder = pipe_config->cpu_transcoder; 3561 } else { 3562 pipe_config->enhanced_framing = 3563 drm_dp_enhanced_frame_cap(intel_dp->dpcd); 3564 } 3565 3566 if (pipe_config->dsc.compression_enable) 3567 link_bpp_x16 = pipe_config->dsc.compressed_bpp_x16; 3568 else 3569 link_bpp_x16 = intel_dp_output_format_link_bpp_x16(pipe_config->output_format, 3570 pipe_config->pipe_bpp); 3571 3572 if (intel_dp->mso_link_count) { 3573 int n = intel_dp->mso_link_count; 3574 int overlap = intel_dp->mso_pixel_overlap; 3575 3576 pipe_config->splitter.enable = true; 3577 pipe_config->splitter.link_count = n; 3578 pipe_config->splitter.pixel_overlap = overlap; 3579 3580 drm_dbg_kms(display->drm, 3581 "MSO link count %d, pixel overlap %d\n", 3582 n, overlap); 3583 3584 adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap; 3585 adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap; 3586 adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap; 3587 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap; 3588 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap; 3589 adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap; 3590 adjusted_mode->crtc_clock /= n; 3591 } 3592 3593 intel_dp_audio_compute_config(encoder, pipe_config, conn_state); 3594 3595 if (!intel_dp_is_uhbr(pipe_config)) { 3596 intel_link_compute_m_n(link_bpp_x16, 3597 pipe_config->lane_count, 3598 adjusted_mode->crtc_clock, 3599 pipe_config->port_clock, 3600 intel_dp_bw_fec_overhead(pipe_config->fec_enable), 3601 &pipe_config->dp_m_n); 3602 } 3603 3604 ret = intel_dp_compute_min_hblank(pipe_config, conn_state); 3605 if (ret) 3606 return ret; 3607 3608 /* FIXME: abstract this better */ 3609 if (pipe_config->splitter.enable) 3610 pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count; 3611 3612 intel_vrr_compute_config(pipe_config, conn_state); 3613 intel_dp_compute_as_sdp(intel_dp, pipe_config); 3614 intel_psr_compute_config(intel_dp, pipe_config, conn_state); 3615 intel_alpm_lobf_compute_config(intel_dp, pipe_config, conn_state); 3616 intel_dp_drrs_compute_config(connector, pipe_config, link_bpp_x16); 3617 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 3618 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 3619 3620 return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector, 3621 pipe_config); 3622 } 3623 3624 void intel_dp_set_link_params(struct intel_dp *intel_dp, 3625 int link_rate, int lane_count) 3626 { 3627 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); 3628 intel_dp->link.active = false; 3629 intel_dp->needs_modeset_retry = false; 3630 intel_dp->link_rate = link_rate; 3631 intel_dp->lane_count = lane_count; 3632 } 3633 3634 void intel_dp_reset_link_params(struct intel_dp *intel_dp) 3635 { 3636 intel_dp->link.max_lane_count = intel_dp_max_common_lane_count(intel_dp); 3637 intel_dp->link.max_rate = intel_dp_max_common_rate(intel_dp); 3638 intel_dp->link.mst_probed_lane_count = 0; 3639 intel_dp->link.mst_probed_rate = 0; 3640 intel_dp->link.retrain_disabled = false; 3641 intel_dp->link.seq_train_failures = 0; 3642 } 3643 3644 /* Enable backlight PWM and backlight PP control. */ 3645 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 3646 const struct drm_connector_state *conn_state) 3647 { 3648 struct intel_display *display = to_intel_display(crtc_state); 3649 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 3650 3651 if (!intel_dp_is_edp(intel_dp)) 3652 return; 3653 3654 drm_dbg_kms(display->drm, "\n"); 3655 3656 intel_backlight_enable(crtc_state, conn_state); 3657 intel_pps_backlight_on(intel_dp); 3658 } 3659 3660 /* Disable backlight PP control and backlight PWM. */ 3661 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 3662 { 3663 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 3664 struct intel_display *display = to_intel_display(intel_dp); 3665 3666 if (!intel_dp_is_edp(intel_dp)) 3667 return; 3668 3669 drm_dbg_kms(display->drm, "\n"); 3670 3671 intel_pps_backlight_off(intel_dp); 3672 intel_backlight_disable(old_conn_state); 3673 } 3674 3675 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 3676 { 3677 /* 3678 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 3679 * be capable of signalling downstream hpd with a long pulse. 3680 * Whether or not that means D3 is safe to use is not clear, 3681 * but let's assume so until proven otherwise. 3682 * 3683 * FIXME should really check all downstream ports... 3684 */ 3685 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 3686 drm_dp_is_branch(intel_dp->dpcd) && 3687 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 3688 } 3689 3690 static int 3691 write_dsc_decompression_flag(struct drm_dp_aux *aux, u8 flag, bool set) 3692 { 3693 int err; 3694 u8 val; 3695 3696 err = drm_dp_dpcd_readb(aux, DP_DSC_ENABLE, &val); 3697 if (err < 0) 3698 return err; 3699 3700 if (set) 3701 val |= flag; 3702 else 3703 val &= ~flag; 3704 3705 return drm_dp_dpcd_writeb(aux, DP_DSC_ENABLE, val); 3706 } 3707 3708 static void 3709 intel_dp_sink_set_dsc_decompression(struct intel_connector *connector, 3710 bool enable) 3711 { 3712 struct intel_display *display = to_intel_display(connector); 3713 3714 if (write_dsc_decompression_flag(connector->dp.dsc_decompression_aux, 3715 DP_DECOMPRESSION_EN, enable) < 0) 3716 drm_dbg_kms(display->drm, 3717 "Failed to %s sink decompression state\n", 3718 str_enable_disable(enable)); 3719 } 3720 3721 static void 3722 intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector, 3723 bool enable) 3724 { 3725 struct intel_display *display = to_intel_display(connector); 3726 struct drm_dp_aux *aux = connector->mst.port ? 3727 connector->mst.port->passthrough_aux : NULL; 3728 3729 if (!aux) 3730 return; 3731 3732 if (write_dsc_decompression_flag(aux, 3733 DP_DSC_PASSTHROUGH_EN, enable) < 0) 3734 drm_dbg_kms(display->drm, 3735 "Failed to %s sink compression passthrough state\n", 3736 str_enable_disable(enable)); 3737 } 3738 3739 static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state, 3740 const struct intel_connector *connector, 3741 bool for_get_ref) 3742 { 3743 struct intel_display *display = to_intel_display(state); 3744 struct drm_connector *_connector_iter; 3745 struct drm_connector_state *old_conn_state; 3746 struct drm_connector_state *new_conn_state; 3747 int ref_count = 0; 3748 int i; 3749 3750 /* 3751 * On SST the decompression AUX device won't be shared, each connector 3752 * uses for this its own AUX targeting the sink device. 3753 */ 3754 if (!connector->mst.dp) 3755 return connector->dp.dsc_decompression_enabled ? 1 : 0; 3756 3757 for_each_oldnew_connector_in_state(&state->base, _connector_iter, 3758 old_conn_state, new_conn_state, i) { 3759 const struct intel_connector * 3760 connector_iter = to_intel_connector(_connector_iter); 3761 3762 if (connector_iter->mst.dp != connector->mst.dp) 3763 continue; 3764 3765 if (!connector_iter->dp.dsc_decompression_enabled) 3766 continue; 3767 3768 drm_WARN_ON(display->drm, 3769 (for_get_ref && !new_conn_state->crtc) || 3770 (!for_get_ref && !old_conn_state->crtc)); 3771 3772 if (connector_iter->dp.dsc_decompression_aux == 3773 connector->dp.dsc_decompression_aux) 3774 ref_count++; 3775 } 3776 3777 return ref_count; 3778 } 3779 3780 static bool intel_dp_dsc_aux_get_ref(struct intel_atomic_state *state, 3781 struct intel_connector *connector) 3782 { 3783 bool ret = intel_dp_dsc_aux_ref_count(state, connector, true) == 0; 3784 3785 connector->dp.dsc_decompression_enabled = true; 3786 3787 return ret; 3788 } 3789 3790 static bool intel_dp_dsc_aux_put_ref(struct intel_atomic_state *state, 3791 struct intel_connector *connector) 3792 { 3793 connector->dp.dsc_decompression_enabled = false; 3794 3795 return intel_dp_dsc_aux_ref_count(state, connector, false) == 0; 3796 } 3797 3798 /** 3799 * intel_dp_sink_enable_decompression - Enable DSC decompression in sink/last branch device 3800 * @state: atomic state 3801 * @connector: connector to enable the decompression for 3802 * @new_crtc_state: new state for the CRTC driving @connector 3803 * 3804 * Enable the DSC decompression if required in the %DP_DSC_ENABLE DPCD 3805 * register of the appropriate sink/branch device. On SST this is always the 3806 * sink device, whereas on MST based on each device's DSC capabilities it's 3807 * either the last branch device (enabling decompression in it) or both the 3808 * last branch device (enabling passthrough in it) and the sink device 3809 * (enabling decompression in it). 3810 */ 3811 void intel_dp_sink_enable_decompression(struct intel_atomic_state *state, 3812 struct intel_connector *connector, 3813 const struct intel_crtc_state *new_crtc_state) 3814 { 3815 struct intel_display *display = to_intel_display(state); 3816 3817 if (!new_crtc_state->dsc.compression_enable) 3818 return; 3819 3820 if (drm_WARN_ON(display->drm, 3821 !connector->dp.dsc_decompression_aux || 3822 connector->dp.dsc_decompression_enabled)) 3823 return; 3824 3825 if (!intel_dp_dsc_aux_get_ref(state, connector)) 3826 return; 3827 3828 intel_dp_sink_set_dsc_passthrough(connector, true); 3829 intel_dp_sink_set_dsc_decompression(connector, true); 3830 } 3831 3832 /** 3833 * intel_dp_sink_disable_decompression - Disable DSC decompression in sink/last branch device 3834 * @state: atomic state 3835 * @connector: connector to disable the decompression for 3836 * @old_crtc_state: old state for the CRTC driving @connector 3837 * 3838 * Disable the DSC decompression if required in the %DP_DSC_ENABLE DPCD 3839 * register of the appropriate sink/branch device, corresponding to the 3840 * sequence in intel_dp_sink_enable_decompression(). 3841 */ 3842 void intel_dp_sink_disable_decompression(struct intel_atomic_state *state, 3843 struct intel_connector *connector, 3844 const struct intel_crtc_state *old_crtc_state) 3845 { 3846 struct intel_display *display = to_intel_display(state); 3847 3848 if (!old_crtc_state->dsc.compression_enable) 3849 return; 3850 3851 if (drm_WARN_ON(display->drm, 3852 !connector->dp.dsc_decompression_aux || 3853 !connector->dp.dsc_decompression_enabled)) 3854 return; 3855 3856 if (!intel_dp_dsc_aux_put_ref(state, connector)) 3857 return; 3858 3859 intel_dp_sink_set_dsc_decompression(connector, false); 3860 intel_dp_sink_set_dsc_passthrough(connector, false); 3861 } 3862 3863 static void 3864 intel_dp_init_source_oui(struct intel_dp *intel_dp) 3865 { 3866 struct intel_display *display = to_intel_display(intel_dp); 3867 u8 oui[] = { 0x00, 0xaa, 0x01 }; 3868 u8 buf[3] = {}; 3869 3870 if (READ_ONCE(intel_dp->oui_valid)) 3871 return; 3872 3873 WRITE_ONCE(intel_dp->oui_valid, true); 3874 3875 /* 3876 * During driver init, we want to be careful and avoid changing the source OUI if it's 3877 * already set to what we want, so as to avoid clearing any state by accident 3878 */ 3879 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0) 3880 drm_dbg_kms(display->drm, "Failed to read source OUI\n"); 3881 3882 if (memcmp(oui, buf, sizeof(oui)) == 0) { 3883 /* Assume the OUI was written now. */ 3884 intel_dp->last_oui_write = jiffies; 3885 return; 3886 } 3887 3888 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) { 3889 drm_dbg_kms(display->drm, "Failed to write source OUI\n"); 3890 WRITE_ONCE(intel_dp->oui_valid, false); 3891 } 3892 3893 intel_dp->last_oui_write = jiffies; 3894 } 3895 3896 void intel_dp_invalidate_source_oui(struct intel_dp *intel_dp) 3897 { 3898 WRITE_ONCE(intel_dp->oui_valid, false); 3899 } 3900 3901 void intel_dp_wait_source_oui(struct intel_dp *intel_dp) 3902 { 3903 struct intel_display *display = to_intel_display(intel_dp); 3904 struct intel_connector *connector = intel_dp->attached_connector; 3905 3906 drm_dbg_kms(display->drm, 3907 "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n", 3908 connector->base.base.id, connector->base.name, 3909 connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout); 3910 3911 wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 3912 connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout); 3913 } 3914 3915 /* If the device supports it, try to set the power state appropriately */ 3916 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) 3917 { 3918 struct intel_display *display = to_intel_display(intel_dp); 3919 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3920 int ret, i; 3921 3922 /* Should have a valid DPCD by this point */ 3923 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3924 return; 3925 3926 if (mode != DP_SET_POWER_D0) { 3927 if (downstream_hpd_needs_d0(intel_dp)) 3928 return; 3929 3930 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 3931 } else { 3932 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3933 3934 intel_lspcon_resume(dig_port); 3935 3936 /* Write the source OUI as early as possible */ 3937 intel_dp_init_source_oui(intel_dp); 3938 3939 /* 3940 * When turning on, we need to retry for 1ms to give the sink 3941 * time to wake up. 3942 */ 3943 for (i = 0; i < 3; i++) { 3944 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 3945 if (ret == 1) 3946 break; 3947 msleep(1); 3948 } 3949 3950 if (ret == 1 && intel_lspcon_active(dig_port)) 3951 intel_lspcon_wait_pcon_mode(dig_port); 3952 } 3953 3954 if (ret != 1) 3955 drm_dbg_kms(display->drm, 3956 "[ENCODER:%d:%s] Set power to %s failed\n", 3957 encoder->base.base.id, encoder->base.name, 3958 mode == DP_SET_POWER_D0 ? "D0" : "D3"); 3959 } 3960 3961 static bool 3962 intel_dp_get_dpcd(struct intel_dp *intel_dp); 3963 3964 /** 3965 * intel_dp_sync_state - sync the encoder state during init/resume 3966 * @encoder: intel encoder to sync 3967 * @crtc_state: state for the CRTC connected to the encoder 3968 * 3969 * Sync any state stored in the encoder wrt. HW state during driver init 3970 * and system resume. 3971 */ 3972 void intel_dp_sync_state(struct intel_encoder *encoder, 3973 const struct intel_crtc_state *crtc_state) 3974 { 3975 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3976 bool dpcd_updated = false; 3977 3978 /* 3979 * Don't clobber DPCD if it's been already read out during output 3980 * setup (eDP) or detect. 3981 */ 3982 if (crtc_state && intel_dp->dpcd[DP_DPCD_REV] == 0) { 3983 intel_dp_get_dpcd(intel_dp); 3984 dpcd_updated = true; 3985 } 3986 3987 intel_dp_tunnel_resume(intel_dp, crtc_state, dpcd_updated); 3988 3989 if (crtc_state) { 3990 intel_dp_reset_link_params(intel_dp); 3991 intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count); 3992 intel_dp->link.active = true; 3993 } 3994 } 3995 3996 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, 3997 struct intel_crtc_state *crtc_state) 3998 { 3999 struct intel_display *display = to_intel_display(encoder); 4000 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4001 bool fastset = true; 4002 4003 /* 4004 * If BIOS has set an unsupported or non-standard link rate for some 4005 * reason force an encoder recompute and full modeset. 4006 */ 4007 if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates, 4008 crtc_state->port_clock) < 0) { 4009 drm_dbg_kms(display->drm, 4010 "[ENCODER:%d:%s] Forcing full modeset due to unsupported link rate\n", 4011 encoder->base.base.id, encoder->base.name); 4012 crtc_state->uapi.connectors_changed = true; 4013 fastset = false; 4014 } 4015 4016 /* 4017 * FIXME hack to force full modeset when DSC is being used. 4018 * 4019 * As long as we do not have full state readout and config comparison 4020 * of crtc_state->dsc, we have no way to ensure reliable fastset. 4021 * Remove once we have readout for DSC. 4022 */ 4023 if (crtc_state->dsc.compression_enable) { 4024 drm_dbg_kms(display->drm, 4025 "[ENCODER:%d:%s] Forcing full modeset due to DSC being enabled\n", 4026 encoder->base.base.id, encoder->base.name); 4027 crtc_state->uapi.mode_changed = true; 4028 fastset = false; 4029 } 4030 4031 if (CAN_PANEL_REPLAY(intel_dp)) { 4032 drm_dbg_kms(display->drm, 4033 "[ENCODER:%d:%s] Forcing full modeset to compute panel replay state\n", 4034 encoder->base.base.id, encoder->base.name); 4035 crtc_state->uapi.mode_changed = true; 4036 fastset = false; 4037 } 4038 4039 return fastset; 4040 } 4041 4042 static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp) 4043 { 4044 struct intel_display *display = to_intel_display(intel_dp); 4045 4046 /* Clear the cached register set to avoid using stale values */ 4047 4048 memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd)); 4049 4050 if (!drm_dp_is_branch(intel_dp->dpcd)) 4051 return; 4052 4053 if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER, 4054 intel_dp->pcon_dsc_dpcd, 4055 sizeof(intel_dp->pcon_dsc_dpcd)) < 0) 4056 drm_err(display->drm, "Failed to read DPCD register 0x%x\n", 4057 DP_PCON_DSC_ENCODER); 4058 4059 drm_dbg_kms(display->drm, "PCON ENCODER DSC DPCD: %*ph\n", 4060 (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd); 4061 } 4062 4063 static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask) 4064 { 4065 static const int bw_gbps[] = {9, 18, 24, 32, 40, 48}; 4066 int i; 4067 4068 for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) { 4069 if (frl_bw_mask & (1 << i)) 4070 return bw_gbps[i]; 4071 } 4072 return 0; 4073 } 4074 4075 static int intel_dp_pcon_set_frl_mask(int max_frl) 4076 { 4077 switch (max_frl) { 4078 case 48: 4079 return DP_PCON_FRL_BW_MASK_48GBPS; 4080 case 40: 4081 return DP_PCON_FRL_BW_MASK_40GBPS; 4082 case 32: 4083 return DP_PCON_FRL_BW_MASK_32GBPS; 4084 case 24: 4085 return DP_PCON_FRL_BW_MASK_24GBPS; 4086 case 18: 4087 return DP_PCON_FRL_BW_MASK_18GBPS; 4088 case 9: 4089 return DP_PCON_FRL_BW_MASK_9GBPS; 4090 } 4091 4092 return 0; 4093 } 4094 4095 static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp) 4096 { 4097 struct intel_connector *connector = intel_dp->attached_connector; 4098 const struct drm_display_info *info = &connector->base.display_info; 4099 int max_frl_rate; 4100 int max_lanes, rate_per_lane; 4101 int max_dsc_lanes, dsc_rate_per_lane; 4102 4103 max_lanes = info->hdmi.max_lanes; 4104 rate_per_lane = info->hdmi.max_frl_rate_per_lane; 4105 max_frl_rate = max_lanes * rate_per_lane; 4106 4107 if (info->hdmi.dsc_cap.v_1p2) { 4108 max_dsc_lanes = info->hdmi.dsc_cap.max_lanes; 4109 dsc_rate_per_lane = info->hdmi.dsc_cap.max_frl_rate_per_lane; 4110 if (max_dsc_lanes && dsc_rate_per_lane) 4111 max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane); 4112 } 4113 4114 return max_frl_rate; 4115 } 4116 4117 static bool 4118 intel_dp_pcon_is_frl_trained(struct intel_dp *intel_dp, 4119 u8 max_frl_bw_mask, u8 *frl_trained_mask) 4120 { 4121 if (drm_dp_pcon_hdmi_link_active(&intel_dp->aux) && 4122 drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, frl_trained_mask) == DP_PCON_HDMI_MODE_FRL && 4123 *frl_trained_mask >= max_frl_bw_mask) 4124 return true; 4125 4126 return false; 4127 } 4128 4129 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) 4130 { 4131 struct intel_display *display = to_intel_display(intel_dp); 4132 #define TIMEOUT_FRL_READY_MS 500 4133 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000 4134 int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret; 4135 u8 max_frl_bw_mask = 0, frl_trained_mask; 4136 bool is_active; 4137 4138 max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 4139 drm_dbg(display->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw); 4140 4141 max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp); 4142 drm_dbg(display->drm, "Sink max rate from EDID = %d Gbps\n", 4143 max_edid_frl_bw); 4144 4145 max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw); 4146 4147 if (max_frl_bw <= 0) 4148 return -EINVAL; 4149 4150 max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); 4151 drm_dbg(display->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask); 4152 4153 if (intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask)) 4154 goto frl_trained; 4155 4156 ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false); 4157 if (ret < 0) 4158 return ret; 4159 /* Wait for PCON to be FRL Ready */ 4160 ret = poll_timeout_us(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux), 4161 is_active, 4162 1000, TIMEOUT_FRL_READY_MS * 1000, false); 4163 if (ret) 4164 return ret; 4165 4166 ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, 4167 DP_PCON_ENABLE_SEQUENTIAL_LINK); 4168 if (ret < 0) 4169 return ret; 4170 ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, 4171 DP_PCON_FRL_LINK_TRAIN_NORMAL); 4172 if (ret < 0) 4173 return ret; 4174 ret = drm_dp_pcon_frl_enable(&intel_dp->aux); 4175 if (ret < 0) 4176 return ret; 4177 /* 4178 * Wait for FRL to be completed 4179 * Check if the HDMI Link is up and active. 4180 */ 4181 ret = poll_timeout_us(is_active = intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask), 4182 is_active, 4183 1000, TIMEOUT_HDMI_LINK_ACTIVE_MS * 1000, false); 4184 if (ret) 4185 return ret; 4186 4187 frl_trained: 4188 drm_dbg(display->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask); 4189 intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask); 4190 intel_dp->frl.is_trained = true; 4191 drm_dbg(display->drm, "FRL trained with : %d Gbps\n", 4192 intel_dp->frl.trained_rate_gbps); 4193 4194 return 0; 4195 } 4196 4197 static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp) 4198 { 4199 if (drm_dp_is_branch(intel_dp->dpcd) && 4200 intel_dp_has_hdmi_sink(intel_dp) && 4201 intel_dp_hdmi_sink_max_frl(intel_dp) > 0) 4202 return true; 4203 4204 return false; 4205 } 4206 4207 static 4208 int intel_dp_pcon_set_tmds_mode(struct intel_dp *intel_dp) 4209 { 4210 int ret; 4211 u8 buf = 0; 4212 4213 /* Set PCON source control mode */ 4214 buf |= DP_PCON_ENABLE_SOURCE_CTL_MODE; 4215 4216 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); 4217 if (ret < 0) 4218 return ret; 4219 4220 /* Set HDMI LINK ENABLE */ 4221 buf |= DP_PCON_ENABLE_HDMI_LINK; 4222 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); 4223 if (ret < 0) 4224 return ret; 4225 4226 return 0; 4227 } 4228 4229 void intel_dp_check_frl_training(struct intel_dp *intel_dp) 4230 { 4231 struct intel_display *display = to_intel_display(intel_dp); 4232 4233 /* 4234 * Always go for FRL training if: 4235 * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7) 4236 * -sink is HDMI2.1 4237 */ 4238 if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) || 4239 !intel_dp_is_hdmi_2_1_sink(intel_dp) || 4240 intel_dp->frl.is_trained) 4241 return; 4242 4243 if (intel_dp_pcon_start_frl_training(intel_dp) < 0) { 4244 int ret, mode; 4245 4246 drm_dbg(display->drm, 4247 "Couldn't set FRL mode, continuing with TMDS mode\n"); 4248 ret = intel_dp_pcon_set_tmds_mode(intel_dp); 4249 mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL); 4250 4251 if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS) 4252 drm_dbg(display->drm, 4253 "Issue with PCON, cannot set TMDS mode\n"); 4254 } else { 4255 drm_dbg(display->drm, "FRL training Completed\n"); 4256 } 4257 } 4258 4259 static int 4260 intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state) 4261 { 4262 int vactive = crtc_state->hw.adjusted_mode.vdisplay; 4263 4264 return intel_hdmi_dsc_get_slice_height(vactive); 4265 } 4266 4267 static int 4268 intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp, 4269 const struct intel_crtc_state *crtc_state) 4270 { 4271 struct intel_connector *connector = intel_dp->attached_connector; 4272 const struct drm_display_info *info = &connector->base.display_info; 4273 int hdmi_throughput = info->hdmi.dsc_cap.clk_per_slice; 4274 int hdmi_max_slices = info->hdmi.dsc_cap.max_slices; 4275 int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd); 4276 int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd); 4277 4278 return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices, 4279 pcon_max_slice_width, 4280 hdmi_max_slices, hdmi_throughput); 4281 } 4282 4283 static int 4284 intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp, 4285 const struct intel_crtc_state *crtc_state, 4286 int num_slices, int slice_width) 4287 { 4288 struct intel_connector *connector = intel_dp->attached_connector; 4289 const struct drm_display_info *info = &connector->base.display_info; 4290 int output_format = crtc_state->output_format; 4291 bool hdmi_all_bpp = info->hdmi.dsc_cap.all_bpp; 4292 int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd); 4293 int hdmi_max_chunk_bytes = 4294 info->hdmi.dsc_cap.total_chunk_kbytes * 1024; 4295 4296 return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width, 4297 num_slices, output_format, hdmi_all_bpp, 4298 hdmi_max_chunk_bytes); 4299 } 4300 4301 void 4302 intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, 4303 const struct intel_crtc_state *crtc_state) 4304 { 4305 struct intel_display *display = to_intel_display(intel_dp); 4306 struct intel_connector *connector = intel_dp->attached_connector; 4307 const struct drm_display_info *info; 4308 u8 pps_param[6]; 4309 int slice_height; 4310 int slice_width; 4311 int num_slices; 4312 int bits_per_pixel; 4313 int ret; 4314 bool hdmi_is_dsc_1_2; 4315 4316 if (!intel_dp_is_hdmi_2_1_sink(intel_dp)) 4317 return; 4318 4319 if (!connector) 4320 return; 4321 4322 info = &connector->base.display_info; 4323 4324 hdmi_is_dsc_1_2 = info->hdmi.dsc_cap.v_1p2; 4325 4326 if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) || 4327 !hdmi_is_dsc_1_2) 4328 return; 4329 4330 slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state); 4331 if (!slice_height) 4332 return; 4333 4334 num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state); 4335 if (!num_slices) 4336 return; 4337 4338 slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, 4339 num_slices); 4340 4341 bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state, 4342 num_slices, slice_width); 4343 if (!bits_per_pixel) 4344 return; 4345 4346 pps_param[0] = slice_height & 0xFF; 4347 pps_param[1] = slice_height >> 8; 4348 pps_param[2] = slice_width & 0xFF; 4349 pps_param[3] = slice_width >> 8; 4350 pps_param[4] = bits_per_pixel & 0xFF; 4351 pps_param[5] = (bits_per_pixel >> 8) & 0x3; 4352 4353 ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param); 4354 if (ret < 0) 4355 drm_dbg_kms(display->drm, "Failed to set pcon DSC\n"); 4356 } 4357 4358 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, 4359 const struct intel_crtc_state *crtc_state) 4360 { 4361 struct intel_display *display = to_intel_display(intel_dp); 4362 bool ycbcr444_to_420 = false; 4363 bool rgb_to_ycbcr = false; 4364 u8 tmp; 4365 4366 if (intel_dp->dpcd[DP_DPCD_REV] < 0x13) 4367 return; 4368 4369 if (!drm_dp_is_branch(intel_dp->dpcd)) 4370 return; 4371 4372 tmp = intel_dp_has_hdmi_sink(intel_dp) ? DP_HDMI_DVI_OUTPUT_CONFIG : 0; 4373 4374 if (drm_dp_dpcd_writeb(&intel_dp->aux, 4375 DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1) 4376 drm_dbg_kms(display->drm, 4377 "Failed to %s protocol converter HDMI mode\n", 4378 str_enable_disable(intel_dp_has_hdmi_sink(intel_dp))); 4379 4380 if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 4381 switch (crtc_state->output_format) { 4382 case INTEL_OUTPUT_FORMAT_YCBCR420: 4383 break; 4384 case INTEL_OUTPUT_FORMAT_YCBCR444: 4385 ycbcr444_to_420 = true; 4386 break; 4387 case INTEL_OUTPUT_FORMAT_RGB: 4388 rgb_to_ycbcr = true; 4389 ycbcr444_to_420 = true; 4390 break; 4391 default: 4392 MISSING_CASE(crtc_state->output_format); 4393 break; 4394 } 4395 } else if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR444) { 4396 switch (crtc_state->output_format) { 4397 case INTEL_OUTPUT_FORMAT_YCBCR444: 4398 break; 4399 case INTEL_OUTPUT_FORMAT_RGB: 4400 rgb_to_ycbcr = true; 4401 break; 4402 default: 4403 MISSING_CASE(crtc_state->output_format); 4404 break; 4405 } 4406 } 4407 4408 tmp = ycbcr444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0; 4409 4410 if (drm_dp_dpcd_writeb(&intel_dp->aux, 4411 DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) 4412 drm_dbg_kms(display->drm, 4413 "Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n", 4414 str_enable_disable(intel_dp->dfp.ycbcr_444_to_420)); 4415 4416 tmp = rgb_to_ycbcr ? DP_CONVERSION_BT709_RGB_YCBCR_ENABLE : 0; 4417 4418 if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0) 4419 drm_dbg_kms(display->drm, 4420 "Failed to %s protocol converter RGB->YCbCr conversion mode\n", 4421 str_enable_disable(tmp)); 4422 } 4423 4424 static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 4425 { 4426 u8 dprx = 0; 4427 4428 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 4429 &dprx) != 1) 4430 return false; 4431 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 4432 } 4433 4434 static int intel_dp_read_dsc_dpcd(struct drm_dp_aux *aux, 4435 u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) 4436 { 4437 int ret; 4438 4439 ret = drm_dp_dpcd_read_data(aux, DP_DSC_SUPPORT, dsc_dpcd, 4440 DP_DSC_RECEIVER_CAP_SIZE); 4441 if (ret) { 4442 drm_dbg_kms(aux->drm_dev, 4443 "Could not read DSC DPCD register 0x%x Error: %pe\n", 4444 DP_DSC_SUPPORT, ERR_PTR(ret)); 4445 return ret; 4446 } 4447 4448 drm_dbg_kms(aux->drm_dev, "DSC DPCD: %*ph\n", 4449 DP_DSC_RECEIVER_CAP_SIZE, 4450 dsc_dpcd); 4451 return 0; 4452 } 4453 4454 static void init_dsc_overall_throughput_limits(struct intel_connector *connector, bool is_branch) 4455 { 4456 u8 branch_caps[DP_DSC_BRANCH_CAP_SIZE]; 4457 int line_width; 4458 4459 connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444 = INT_MAX; 4460 connector->dp.dsc_branch_caps.overall_throughput.yuv422_420 = INT_MAX; 4461 connector->dp.dsc_branch_caps.max_line_width = INT_MAX; 4462 4463 if (!is_branch) 4464 return; 4465 4466 if (drm_dp_dpcd_read_data(connector->dp.dsc_decompression_aux, 4467 DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, branch_caps, 4468 sizeof(branch_caps)) != 0) 4469 return; 4470 4471 connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444 = 4472 drm_dp_dsc_branch_max_overall_throughput(branch_caps, true) ? : INT_MAX; 4473 4474 connector->dp.dsc_branch_caps.overall_throughput.yuv422_420 = 4475 drm_dp_dsc_branch_max_overall_throughput(branch_caps, false) ? : INT_MAX; 4476 4477 line_width = drm_dp_dsc_branch_max_line_width(branch_caps); 4478 connector->dp.dsc_branch_caps.max_line_width = line_width > 0 ? line_width : INT_MAX; 4479 } 4480 4481 void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, 4482 const struct drm_dp_desc *desc, bool is_branch, 4483 struct intel_connector *connector) 4484 { 4485 struct intel_display *display = to_intel_display(connector); 4486 4487 /* 4488 * Clear the cached register set to avoid using stale values 4489 * for the sinks that do not support DSC. 4490 */ 4491 memset(connector->dp.dsc_dpcd, 0, sizeof(connector->dp.dsc_dpcd)); 4492 4493 /* Clear fec_capable to avoid using stale values */ 4494 connector->dp.fec_capability = 0; 4495 4496 memset(&connector->dp.dsc_branch_caps, 0, sizeof(connector->dp.dsc_branch_caps)); 4497 connector->dp.dsc_throughput_quirk = false; 4498 4499 if (dpcd_rev < DP_DPCD_REV_14) 4500 return; 4501 4502 if (intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux, 4503 connector->dp.dsc_dpcd) < 0) 4504 return; 4505 4506 if (drm_dp_dpcd_readb(connector->dp.dsc_decompression_aux, DP_FEC_CAPABILITY, 4507 &connector->dp.fec_capability) < 0) { 4508 drm_err(display->drm, "Failed to read FEC DPCD register\n"); 4509 return; 4510 } 4511 4512 drm_dbg_kms(display->drm, "FEC CAPABILITY: %x\n", 4513 connector->dp.fec_capability); 4514 4515 if (!(connector->dp.dsc_dpcd[0] & DP_DSC_DECOMPRESSION_IS_SUPPORTED)) 4516 return; 4517 4518 init_dsc_overall_throughput_limits(connector, is_branch); 4519 4520 /* 4521 * TODO: Move the HW rev check as well to the DRM core quirk table if 4522 * that's required after clarifying the list of affected devices. 4523 */ 4524 if (drm_dp_has_quirk(desc, DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) && 4525 desc->ident.hw_rev == 0x10) 4526 connector->dp.dsc_throughput_quirk = true; 4527 } 4528 4529 static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *connector) 4530 { 4531 if (edp_dpcd_rev < DP_EDP_14) 4532 return; 4533 4534 if (intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux, 4535 connector->dp.dsc_dpcd) < 0) 4536 return; 4537 4538 if (connector->dp.dsc_dpcd[0] & DP_DSC_DECOMPRESSION_IS_SUPPORTED) 4539 init_dsc_overall_throughput_limits(connector, false); 4540 } 4541 4542 static void 4543 intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *connector) 4544 { 4545 struct intel_display *display = to_intel_display(intel_dp); 4546 4547 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 4548 if (!HAS_DSC(display)) 4549 return; 4550 4551 if (intel_dp_is_edp(intel_dp)) 4552 intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0], 4553 connector); 4554 else 4555 intel_dp_get_dsc_sink_cap(intel_dp->dpcd[DP_DPCD_REV], 4556 &intel_dp->desc, drm_dp_is_branch(intel_dp->dpcd), 4557 connector); 4558 } 4559 4560 static void intel_edp_mso_mode_fixup(struct intel_connector *connector, 4561 struct drm_display_mode *mode) 4562 { 4563 struct intel_display *display = to_intel_display(connector); 4564 struct intel_dp *intel_dp = intel_attached_dp(connector); 4565 int n = intel_dp->mso_link_count; 4566 int overlap = intel_dp->mso_pixel_overlap; 4567 4568 if (!mode || !n) 4569 return; 4570 4571 mode->hdisplay = (mode->hdisplay - overlap) * n; 4572 mode->hsync_start = (mode->hsync_start - overlap) * n; 4573 mode->hsync_end = (mode->hsync_end - overlap) * n; 4574 mode->htotal = (mode->htotal - overlap) * n; 4575 mode->clock *= n; 4576 4577 drm_mode_set_name(mode); 4578 4579 drm_dbg_kms(display->drm, 4580 "[CONNECTOR:%d:%s] using generated MSO mode: " DRM_MODE_FMT "\n", 4581 connector->base.base.id, connector->base.name, 4582 DRM_MODE_ARG(mode)); 4583 } 4584 4585 void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp) 4586 { 4587 struct intel_display *display = to_intel_display(encoder); 4588 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4589 struct intel_connector *connector = intel_dp->attached_connector; 4590 4591 if (connector->panel.vbt.edp.bpp && pipe_bpp > connector->panel.vbt.edp.bpp) { 4592 /* 4593 * This is a big fat ugly hack. 4594 * 4595 * Some machines in UEFI boot mode provide us a VBT that has 18 4596 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 4597 * unknown we fail to light up. Yet the same BIOS boots up with 4598 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 4599 * max, not what it tells us to use. 4600 * 4601 * Note: This will still be broken if the eDP panel is not lit 4602 * up by the BIOS, and thus we can't get the mode at module 4603 * load. 4604 */ 4605 drm_dbg_kms(display->drm, 4606 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 4607 pipe_bpp, connector->panel.vbt.edp.bpp); 4608 connector->panel.vbt.edp.bpp = pipe_bpp; 4609 } 4610 } 4611 4612 static void intel_edp_mso_init(struct intel_dp *intel_dp) 4613 { 4614 struct intel_display *display = to_intel_display(intel_dp); 4615 struct intel_connector *connector = intel_dp->attached_connector; 4616 struct drm_display_info *info = &connector->base.display_info; 4617 u8 mso; 4618 4619 if (intel_dp->edp_dpcd[0] < DP_EDP_14) 4620 return; 4621 4622 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) { 4623 drm_err(display->drm, "Failed to read MSO cap\n"); 4624 return; 4625 } 4626 4627 /* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */ 4628 mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK; 4629 if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) { 4630 drm_err(display->drm, "Invalid MSO link count cap %u\n", mso); 4631 mso = 0; 4632 } 4633 4634 if (mso) { 4635 drm_dbg_kms(display->drm, 4636 "Sink MSO %ux%u configuration, pixel overlap %u\n", 4637 mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso, 4638 info->mso_pixel_overlap); 4639 if (!HAS_MSO(display)) { 4640 drm_err(display->drm, 4641 "No source MSO support, disabling\n"); 4642 mso = 0; 4643 } 4644 } 4645 4646 intel_dp->mso_link_count = mso; 4647 intel_dp->mso_pixel_overlap = mso ? info->mso_pixel_overlap : 0; 4648 } 4649 4650 static void 4651 intel_edp_set_data_override_rates(struct intel_dp *intel_dp) 4652 { 4653 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4654 int *sink_rates = intel_dp->sink_rates; 4655 int i, count = 0; 4656 4657 for (i = 0; i < intel_dp->num_sink_rates; i++) { 4658 if (intel_bios_encoder_reject_edp_rate(encoder->devdata, 4659 intel_dp->sink_rates[i])) 4660 continue; 4661 4662 sink_rates[count++] = intel_dp->sink_rates[i]; 4663 } 4664 intel_dp->num_sink_rates = count; 4665 } 4666 4667 static void 4668 intel_edp_set_sink_rates(struct intel_dp *intel_dp) 4669 { 4670 struct intel_display *display = to_intel_display(intel_dp); 4671 4672 intel_dp->num_sink_rates = 0; 4673 4674 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4675 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 4676 int i; 4677 4678 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 4679 sink_rates, sizeof(sink_rates)); 4680 4681 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 4682 int rate; 4683 4684 /* Value read multiplied by 200kHz gives the per-lane 4685 * link rate in kHz. The source rates are, however, 4686 * stored in terms of LS_Clk kHz. The full conversion 4687 * back to symbols is 4688 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 4689 */ 4690 rate = le16_to_cpu(sink_rates[i]) * 200 / 10; 4691 4692 if (rate == 0) 4693 break; 4694 4695 /* 4696 * Some platforms cannot reliably drive HBR3 rates due to PHY limitations, 4697 * even if the sink advertises support. Reject any sink rates above HBR2 on 4698 * the known machines for stable output. 4699 */ 4700 if (rate > 540000 && 4701 intel_has_quirk(display, QUIRK_EDP_LIMIT_RATE_HBR2)) 4702 break; 4703 4704 intel_dp->sink_rates[i] = rate; 4705 } 4706 intel_dp->num_sink_rates = i; 4707 } 4708 4709 /* 4710 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 4711 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 4712 */ 4713 if (intel_dp->num_sink_rates) 4714 intel_dp->use_rate_select = true; 4715 else 4716 intel_dp_set_sink_rates(intel_dp); 4717 4718 intel_edp_set_data_override_rates(intel_dp); 4719 } 4720 4721 static bool 4722 intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector) 4723 { 4724 struct intel_display *display = to_intel_display(intel_dp); 4725 int ret; 4726 4727 /* this function is meant to be called only once */ 4728 drm_WARN_ON(display->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 4729 4730 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) 4731 return false; 4732 4733 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4734 drm_dp_is_branch(intel_dp->dpcd)); 4735 intel_init_dpcd_quirks(intel_dp, &intel_dp->desc.ident); 4736 4737 intel_dp->colorimetry_support = 4738 intel_dp_get_colorimetry_status(intel_dp); 4739 4740 /* 4741 * Read the eDP display control registers. 4742 * 4743 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 4744 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 4745 * set, but require eDP 1.4+ detection (e.g. for supported link rates 4746 * method). The display control registers should read zero if they're 4747 * not supported anyway. 4748 */ 4749 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 4750 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 4751 sizeof(intel_dp->edp_dpcd)) { 4752 drm_dbg_kms(display->drm, "eDP DPCD: %*ph\n", 4753 (int)sizeof(intel_dp->edp_dpcd), 4754 intel_dp->edp_dpcd); 4755 4756 intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14; 4757 } 4758 4759 /* 4760 * If needed, program our source OUI so we can make various Intel-specific AUX services 4761 * available (such as HDR backlight controls) 4762 */ 4763 intel_dp_init_source_oui(intel_dp); 4764 4765 /* Read the ALPM DPCD caps */ 4766 ret = drm_dp_dpcd_read_byte(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, 4767 &intel_dp->alpm_dpcd); 4768 if (ret < 0) 4769 return false; 4770 4771 /* 4772 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 4773 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 4774 */ 4775 intel_psr_init_dpcd(intel_dp, connector); 4776 4777 intel_edp_set_sink_rates(intel_dp); 4778 intel_dp_set_max_sink_lane_count(intel_dp); 4779 4780 /* Read the eDP DSC DPCD registers */ 4781 intel_dp_detect_dsc_caps(intel_dp, connector); 4782 4783 return true; 4784 } 4785 4786 static bool 4787 intel_dp_has_sink_count(struct intel_dp *intel_dp) 4788 { 4789 if (!intel_dp->attached_connector) 4790 return false; 4791 4792 return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base, 4793 intel_dp->dpcd, 4794 &intel_dp->desc); 4795 } 4796 4797 void intel_dp_update_sink_caps(struct intel_dp *intel_dp) 4798 { 4799 intel_dp_set_sink_rates(intel_dp); 4800 intel_dp_set_max_sink_lane_count(intel_dp); 4801 intel_dp_set_common_rates(intel_dp); 4802 } 4803 4804 static bool 4805 intel_dp_get_dpcd(struct intel_dp *intel_dp) 4806 { 4807 int ret; 4808 4809 if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0) 4810 return false; 4811 4812 /* 4813 * Don't clobber cached eDP rates. Also skip re-reading 4814 * the OUI/ID since we know it won't change. 4815 */ 4816 if (!intel_dp_is_edp(intel_dp)) { 4817 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4818 drm_dp_is_branch(intel_dp->dpcd)); 4819 4820 intel_init_dpcd_quirks(intel_dp, &intel_dp->desc.ident); 4821 4822 intel_dp->colorimetry_support = 4823 intel_dp_get_colorimetry_status(intel_dp); 4824 4825 intel_dp_update_sink_caps(intel_dp); 4826 } 4827 4828 if (intel_dp_has_sink_count(intel_dp)) { 4829 ret = drm_dp_read_sink_count(&intel_dp->aux); 4830 if (ret < 0) 4831 return false; 4832 4833 /* 4834 * Sink count can change between short pulse hpd hence 4835 * a member variable in intel_dp will track any changes 4836 * between short pulse interrupts. 4837 */ 4838 intel_dp->sink_count = ret; 4839 4840 /* 4841 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 4842 * a dongle is present but no display. Unless we require to know 4843 * if a dongle is present or not, we don't need to update 4844 * downstream port information. So, an early return here saves 4845 * time from performing other operations which are not required. 4846 */ 4847 if (!intel_dp->sink_count) 4848 return false; 4849 } 4850 4851 return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd, 4852 intel_dp->downstream_ports) == 0; 4853 } 4854 4855 static const char *intel_dp_mst_mode_str(enum drm_dp_mst_mode mst_mode) 4856 { 4857 if (mst_mode == DRM_DP_MST) 4858 return "MST"; 4859 else if (mst_mode == DRM_DP_SST_SIDEBAND_MSG) 4860 return "SST w/ sideband messaging"; 4861 else 4862 return "SST"; 4863 } 4864 4865 static enum drm_dp_mst_mode 4866 intel_dp_mst_mode_choose(struct intel_dp *intel_dp, 4867 enum drm_dp_mst_mode sink_mst_mode) 4868 { 4869 struct intel_display *display = to_intel_display(intel_dp); 4870 4871 if (!display->params.enable_dp_mst) 4872 return DRM_DP_SST; 4873 4874 if (!intel_dp_mst_source_support(intel_dp)) 4875 return DRM_DP_SST; 4876 4877 if (sink_mst_mode == DRM_DP_SST_SIDEBAND_MSG && 4878 !(intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B)) 4879 return DRM_DP_SST; 4880 4881 return sink_mst_mode; 4882 } 4883 4884 static enum drm_dp_mst_mode 4885 intel_dp_mst_detect(struct intel_dp *intel_dp) 4886 { 4887 struct intel_display *display = to_intel_display(intel_dp); 4888 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4889 enum drm_dp_mst_mode sink_mst_mode; 4890 enum drm_dp_mst_mode mst_detect; 4891 4892 sink_mst_mode = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 4893 4894 mst_detect = intel_dp_mst_mode_choose(intel_dp, sink_mst_mode); 4895 4896 drm_dbg_kms(display->drm, 4897 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s -> enable: %s\n", 4898 encoder->base.base.id, encoder->base.name, 4899 str_yes_no(intel_dp_mst_source_support(intel_dp)), 4900 intel_dp_mst_mode_str(sink_mst_mode), 4901 str_yes_no(display->params.enable_dp_mst), 4902 intel_dp_mst_mode_str(mst_detect)); 4903 4904 return mst_detect; 4905 } 4906 4907 static void 4908 intel_dp_mst_configure(struct intel_dp *intel_dp) 4909 { 4910 if (!intel_dp_mst_source_support(intel_dp)) 4911 return; 4912 4913 intel_dp->is_mst = intel_dp->mst_detect != DRM_DP_SST; 4914 4915 if (intel_dp->is_mst) 4916 intel_dp_mst_prepare_probe(intel_dp); 4917 4918 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst.mgr, intel_dp->is_mst); 4919 4920 /* Avoid stale info on the next detect cycle. */ 4921 intel_dp->mst_detect = DRM_DP_SST; 4922 } 4923 4924 static void 4925 intel_dp_mst_disconnect(struct intel_dp *intel_dp) 4926 { 4927 struct intel_display *display = to_intel_display(intel_dp); 4928 4929 if (!intel_dp->is_mst) 4930 return; 4931 4932 drm_dbg_kms(display->drm, 4933 "MST device may have disappeared %d vs %d\n", 4934 intel_dp->is_mst, intel_dp->mst.mgr.mst_state); 4935 intel_dp->is_mst = false; 4936 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst.mgr, intel_dp->is_mst); 4937 } 4938 4939 #define INTEL_DP_DEVICE_SERVICE_IRQ_MASK_SST (DP_AUTOMATED_TEST_REQUEST | \ 4940 DP_CP_IRQ | \ 4941 DP_SINK_SPECIFIC_IRQ) 4942 4943 #define INTEL_DP_DEVICE_SERVICE_IRQ_MASK_MST (DP_CP_IRQ | \ 4944 DP_DOWN_REP_MSG_RDY | \ 4945 DP_UP_REQ_MSG_RDY) 4946 4947 #define INTEL_DP_LINK_SERVICE_IRQ_MASK_SST (RX_CAP_CHANGED | \ 4948 LINK_STATUS_CHANGED | \ 4949 HDMI_LINK_STATUS_CHANGED | \ 4950 CONNECTED_OFF_ENTRY_REQUESTED | \ 4951 DP_TUNNELING_IRQ) 4952 4953 #define INTEL_DP_LINK_SERVICE_IRQ_MASK_MST (RX_CAP_CHANGED | \ 4954 LINK_STATUS_CHANGED | \ 4955 DP_TUNNELING_IRQ) 4956 4957 static bool 4958 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi) 4959 { 4960 struct intel_display *display = to_intel_display(intel_dp); 4961 4962 /* 4963 * Display WA for HSD #13013007775: mtl/arl/lnl 4964 * Read the sink count and link service IRQ registers in separate 4965 * transactions to prevent disconnecting the sink on a TBT link 4966 * inadvertently. 4967 */ 4968 if (IS_DISPLAY_VER(display, 14, 20) && !display->platform.battlemage) { 4969 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 3) != 3) 4970 return false; 4971 4972 /* DP_SINK_COUNT_ESI + 3 == DP_LINK_SERVICE_IRQ_VECTOR_ESI0 */ 4973 return drm_dp_dpcd_readb(&intel_dp->aux, DP_LINK_SERVICE_IRQ_VECTOR_ESI0, 4974 &esi[3]) == 1; 4975 } 4976 4977 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 4) == 4; 4978 } 4979 4980 static bool intel_dp_ack_sink_irq_esi(struct intel_dp *intel_dp, u8 esi[4]) 4981 { 4982 int retry; 4983 4984 for (retry = 0; retry < 3; retry++) { 4985 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SINK_COUNT_ESI + 1, 4986 &esi[1], 3) == 3) 4987 return true; 4988 } 4989 4990 return false; 4991 } 4992 4993 /* Return %true if reading the ESI vector succeeded, %false otherwise. */ 4994 static bool intel_dp_get_sink_irq_esi_sst(struct intel_dp *intel_dp, u8 esi[4]) 4995 { 4996 memset(esi, 0, 4); 4997 4998 /* 4999 * TODO: For DP_DPCD_REV >= 0x12 read 5000 * DP_SINK_COUNT_ESI and DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0. 5001 */ 5002 if (drm_dp_dpcd_read_data(&intel_dp->aux, DP_SINK_COUNT, esi, 2) != 0) 5003 return false; 5004 5005 if (intel_dp->dpcd[DP_DPCD_REV] < DP_DPCD_REV_12) 5006 return true; 5007 5008 /* TODO: Read DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 as well */ 5009 if (drm_dp_dpcd_read_byte(&intel_dp->aux, DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &esi[3]) != 0) 5010 return false; 5011 5012 return true; 5013 } 5014 5015 /* Return %true if acking the ESI vector IRQ events succeeded, %false otherwise. */ 5016 static bool intel_dp_ack_sink_irq_esi_sst(struct intel_dp *intel_dp, u8 esi[4]) 5017 { 5018 /* 5019 * TODO: For DP_DPCD_REV >= 0x12 write 5020 * DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 5021 */ 5022 if (drm_dp_dpcd_write_byte(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, esi[1]) != 0) 5023 return false; 5024 5025 if (intel_dp->dpcd[DP_DPCD_REV] < DP_DPCD_REV_12) 5026 return true; 5027 5028 /* TODO: Read DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 as well */ 5029 if (drm_dp_dpcd_write_byte(&intel_dp->aux, DP_LINK_SERVICE_IRQ_VECTOR_ESI0, esi[3]) != 0) 5030 return false; 5031 5032 return true; 5033 } 5034 5035 /* 5036 * Return %true if reading the ESI vector and acking the ESI IRQ events succeeded, 5037 * %false otherwise. 5038 */ 5039 static bool intel_dp_get_and_ack_sink_irq_esi_sst(struct intel_dp *intel_dp, u8 esi[4]) 5040 { 5041 struct intel_display *display = to_intel_display(intel_dp); 5042 struct intel_connector *connector = intel_dp->attached_connector; 5043 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 5044 5045 if (!intel_dp_get_sink_irq_esi_sst(intel_dp, esi)) 5046 return false; 5047 5048 drm_dbg_kms(display->drm, 5049 "[CONNECTOR:%d:%s][ENCODER:%d:%s] DPRX ESI: %4ph\n", 5050 connector->base.base.id, connector->base.name, 5051 encoder->base.base.id, encoder->base.name, 5052 esi); 5053 5054 esi[1] &= INTEL_DP_DEVICE_SERVICE_IRQ_MASK_SST; 5055 esi[3] &= INTEL_DP_LINK_SERVICE_IRQ_MASK_SST; 5056 5057 if (mem_is_zero(&esi[1], 3)) 5058 return true; 5059 5060 if (!intel_dp_ack_sink_irq_esi_sst(intel_dp, esi)) 5061 return false; 5062 5063 return true; 5064 } 5065 5066 bool 5067 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 5068 const struct drm_connector_state *conn_state) 5069 { 5070 /* 5071 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 5072 * of Color Encoding Format and Content Color Gamut], in order to 5073 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 5074 */ 5075 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 5076 return true; 5077 5078 switch (conn_state->colorspace) { 5079 case DRM_MODE_COLORIMETRY_SYCC_601: 5080 case DRM_MODE_COLORIMETRY_OPYCC_601: 5081 case DRM_MODE_COLORIMETRY_BT2020_YCC: 5082 case DRM_MODE_COLORIMETRY_BT2020_RGB: 5083 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 5084 return true; 5085 default: 5086 break; 5087 } 5088 5089 return false; 5090 } 5091 5092 static ssize_t intel_dp_as_sdp_pack(const struct drm_dp_as_sdp *as_sdp, 5093 struct dp_sdp *sdp, size_t size) 5094 { 5095 size_t length = sizeof(struct dp_sdp); 5096 5097 if (size < length) 5098 return -ENOSPC; 5099 5100 memset(sdp, 0, size); 5101 5102 /* Prepare AS (Adaptive Sync) SDP Header */ 5103 sdp->sdp_header.HB0 = 0; 5104 sdp->sdp_header.HB1 = as_sdp->sdp_type; 5105 sdp->sdp_header.HB2 = 0x02; 5106 sdp->sdp_header.HB3 = as_sdp->length; 5107 5108 /* Fill AS (Adaptive Sync) SDP Payload */ 5109 sdp->db[0] = as_sdp->mode; 5110 sdp->db[1] = as_sdp->vtotal & 0xFF; 5111 sdp->db[2] = (as_sdp->vtotal >> 8) & 0xFF; 5112 sdp->db[3] = as_sdp->target_rr & 0xFF; 5113 sdp->db[4] = (as_sdp->target_rr >> 8) & 0x3; 5114 5115 if (as_sdp->target_rr_divider) 5116 sdp->db[4] |= 0x20; 5117 5118 return length; 5119 } 5120 5121 static ssize_t 5122 intel_dp_hdr_metadata_infoframe_sdp_pack(struct intel_display *display, 5123 const struct hdmi_drm_infoframe *drm_infoframe, 5124 struct dp_sdp *sdp, 5125 size_t size) 5126 { 5127 size_t length = sizeof(struct dp_sdp); 5128 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 5129 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 5130 ssize_t len; 5131 5132 if (size < length) 5133 return -ENOSPC; 5134 5135 memset(sdp, 0, size); 5136 5137 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); 5138 if (len < 0) { 5139 drm_dbg_kms(display->drm, 5140 "buffer size is smaller than hdr metadata infoframe\n"); 5141 return -ENOSPC; 5142 } 5143 5144 if (len != infoframe_size) { 5145 drm_dbg_kms(display->drm, "wrong static hdr metadata size\n"); 5146 return -ENOSPC; 5147 } 5148 5149 /* 5150 * Set up the infoframe sdp packet for HDR static metadata. 5151 * Prepare VSC Header for SU as per DP 1.4a spec, 5152 * Table 2-100 and Table 2-101 5153 */ 5154 5155 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ 5156 sdp->sdp_header.HB0 = 0; 5157 /* 5158 * Packet Type 80h + Non-audio INFOFRAME Type value 5159 * HDMI_INFOFRAME_TYPE_DRM: 0x87 5160 * - 80h + Non-audio INFOFRAME Type value 5161 * - InfoFrame Type: 0x07 5162 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] 5163 */ 5164 sdp->sdp_header.HB1 = drm_infoframe->type; 5165 /* 5166 * Least Significant Eight Bits of (Data Byte Count – 1) 5167 * infoframe_size - 1 5168 */ 5169 sdp->sdp_header.HB2 = 0x1D; 5170 /* INFOFRAME SDP Version Number */ 5171 sdp->sdp_header.HB3 = (0x13 << 2); 5172 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 5173 sdp->db[0] = drm_infoframe->version; 5174 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 5175 sdp->db[1] = drm_infoframe->length; 5176 /* 5177 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 5178 * HDMI_INFOFRAME_HEADER_SIZE 5179 */ 5180 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); 5181 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 5182 HDMI_DRM_INFOFRAME_SIZE); 5183 5184 /* 5185 * Size of DP infoframe sdp packet for HDR static metadata consists of 5186 * - DP SDP Header(struct dp_sdp_header): 4 bytes 5187 * - Two Data Blocks: 2 bytes 5188 * CTA Header Byte2 (INFOFRAME Version Number) 5189 * CTA Header Byte3 (Length of INFOFRAME) 5190 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 5191 * 5192 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 5193 * infoframe size. But GEN11+ has larger than that size, write_infoframe 5194 * will pad rest of the size. 5195 */ 5196 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; 5197 } 5198 5199 static void intel_write_dp_sdp(struct intel_encoder *encoder, 5200 const struct intel_crtc_state *crtc_state, 5201 unsigned int type) 5202 { 5203 struct intel_display *display = to_intel_display(encoder); 5204 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5205 struct dp_sdp sdp = {}; 5206 ssize_t len; 5207 5208 if ((crtc_state->infoframes.enable & 5209 intel_hdmi_infoframe_enable(type)) == 0) 5210 return; 5211 5212 switch (type) { 5213 case DP_SDP_VSC: 5214 len = drm_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp); 5215 break; 5216 case HDMI_PACKET_TYPE_GAMUT_METADATA: 5217 len = intel_dp_hdr_metadata_infoframe_sdp_pack(display, 5218 &crtc_state->infoframes.drm.drm, 5219 &sdp, sizeof(sdp)); 5220 break; 5221 case DP_SDP_ADAPTIVE_SYNC: 5222 len = intel_dp_as_sdp_pack(&crtc_state->infoframes.as_sdp, &sdp, 5223 sizeof(sdp)); 5224 break; 5225 default: 5226 MISSING_CASE(type); 5227 return; 5228 } 5229 5230 if (drm_WARN_ON(display->drm, len < 0)) 5231 return; 5232 5233 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); 5234 } 5235 5236 void intel_dp_set_infoframes(struct intel_encoder *encoder, 5237 bool enable, 5238 const struct intel_crtc_state *crtc_state, 5239 const struct drm_connector_state *conn_state) 5240 { 5241 struct intel_display *display = to_intel_display(encoder); 5242 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(display, crtc_state->cpu_transcoder); 5243 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | 5244 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | 5245 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 5246 5247 if (HAS_AS_SDP(display)) 5248 dip_enable |= VIDEO_DIP_ENABLE_AS_ADL; 5249 5250 u32 val = intel_de_read(display, reg) & ~dip_enable; 5251 5252 /* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */ 5253 if (!enable && HAS_DSC(display)) 5254 val &= ~VDIP_ENABLE_PPS; 5255 5256 /* 5257 * This routine disables VSC DIP if the function is called 5258 * to disable SDP or if it does not have PSR 5259 */ 5260 if (!enable || !crtc_state->has_psr) 5261 val &= ~VIDEO_DIP_ENABLE_VSC_HSW; 5262 5263 intel_de_write(display, reg, val); 5264 intel_de_posting_read(display, reg); 5265 5266 if (!enable) 5267 return; 5268 5269 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); 5270 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_ADAPTIVE_SYNC); 5271 5272 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); 5273 } 5274 5275 static 5276 int intel_dp_as_sdp_unpack(struct drm_dp_as_sdp *as_sdp, 5277 const void *buffer, size_t size) 5278 { 5279 const struct dp_sdp *sdp = buffer; 5280 5281 if (size < sizeof(struct dp_sdp)) 5282 return -EINVAL; 5283 5284 memset(as_sdp, 0, sizeof(*as_sdp)); 5285 5286 if (sdp->sdp_header.HB0 != 0) 5287 return -EINVAL; 5288 5289 if (sdp->sdp_header.HB1 != DP_SDP_ADAPTIVE_SYNC) 5290 return -EINVAL; 5291 5292 if (sdp->sdp_header.HB2 != 0x02) 5293 return -EINVAL; 5294 5295 if ((sdp->sdp_header.HB3 & 0x3F) != 9) 5296 return -EINVAL; 5297 5298 as_sdp->length = sdp->sdp_header.HB3 & DP_ADAPTIVE_SYNC_SDP_LENGTH; 5299 as_sdp->mode = sdp->db[0] & DP_ADAPTIVE_SYNC_SDP_OPERATION_MODE; 5300 as_sdp->vtotal = (sdp->db[2] << 8) | sdp->db[1]; 5301 as_sdp->target_rr = (u64)sdp->db[3] | ((u64)sdp->db[4] & 0x3); 5302 as_sdp->target_rr_divider = sdp->db[4] & 0x20 ? true : false; 5303 5304 return 0; 5305 } 5306 5307 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, 5308 const void *buffer, size_t size) 5309 { 5310 const struct dp_sdp *sdp = buffer; 5311 5312 if (size < sizeof(struct dp_sdp)) 5313 return -EINVAL; 5314 5315 memset(vsc, 0, sizeof(*vsc)); 5316 5317 if (sdp->sdp_header.HB0 != 0) 5318 return -EINVAL; 5319 5320 if (sdp->sdp_header.HB1 != DP_SDP_VSC) 5321 return -EINVAL; 5322 5323 vsc->sdp_type = sdp->sdp_header.HB1; 5324 vsc->revision = sdp->sdp_header.HB2; 5325 vsc->length = sdp->sdp_header.HB3; 5326 5327 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || 5328 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe) || 5329 (sdp->sdp_header.HB2 == 0x6 && sdp->sdp_header.HB3 == 0x10)) { 5330 /* 5331 * - HB2 = 0x2, HB3 = 0x8 5332 * VSC SDP supporting 3D stereo + PSR 5333 * - HB2 = 0x4, HB3 = 0xe 5334 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of 5335 * first scan line of the SU region (applies to eDP v1.4b 5336 * and higher). 5337 * - HB2 = 0x6, HB3 = 0x10 5338 * VSC SDP supporting 3D stereo + Panel Replay. 5339 */ 5340 return 0; 5341 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { 5342 /* 5343 * - HB2 = 0x5, HB3 = 0x13 5344 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry 5345 * Format. 5346 */ 5347 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; 5348 vsc->colorimetry = sdp->db[16] & 0xf; 5349 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; 5350 5351 switch (sdp->db[17] & 0x7) { 5352 case 0x0: 5353 vsc->bpc = 6; 5354 break; 5355 case 0x1: 5356 vsc->bpc = 8; 5357 break; 5358 case 0x2: 5359 vsc->bpc = 10; 5360 break; 5361 case 0x3: 5362 vsc->bpc = 12; 5363 break; 5364 case 0x4: 5365 vsc->bpc = 16; 5366 break; 5367 default: 5368 MISSING_CASE(sdp->db[17] & 0x7); 5369 return -EINVAL; 5370 } 5371 5372 vsc->content_type = sdp->db[18] & 0x7; 5373 } else { 5374 return -EINVAL; 5375 } 5376 5377 return 0; 5378 } 5379 5380 static void 5381 intel_read_dp_as_sdp(struct intel_encoder *encoder, 5382 struct intel_crtc_state *crtc_state, 5383 struct drm_dp_as_sdp *as_sdp) 5384 { 5385 struct intel_display *display = to_intel_display(encoder); 5386 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5387 unsigned int type = DP_SDP_ADAPTIVE_SYNC; 5388 struct dp_sdp sdp = {}; 5389 int ret; 5390 5391 if ((crtc_state->infoframes.enable & 5392 intel_hdmi_infoframe_enable(type)) == 0) 5393 return; 5394 5395 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 5396 sizeof(sdp)); 5397 5398 ret = intel_dp_as_sdp_unpack(as_sdp, &sdp, sizeof(sdp)); 5399 if (ret) 5400 drm_dbg_kms(display->drm, "Failed to unpack DP AS SDP\n"); 5401 } 5402 5403 static int 5404 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, 5405 const void *buffer, size_t size) 5406 { 5407 int ret; 5408 5409 const struct dp_sdp *sdp = buffer; 5410 5411 if (size < sizeof(struct dp_sdp)) 5412 return -EINVAL; 5413 5414 if (sdp->sdp_header.HB0 != 0) 5415 return -EINVAL; 5416 5417 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) 5418 return -EINVAL; 5419 5420 /* 5421 * Least Significant Eight Bits of (Data Byte Count – 1) 5422 * 1Dh (i.e., Data Byte Count = 30 bytes). 5423 */ 5424 if (sdp->sdp_header.HB2 != 0x1D) 5425 return -EINVAL; 5426 5427 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ 5428 if ((sdp->sdp_header.HB3 & 0x3) != 0) 5429 return -EINVAL; 5430 5431 /* INFOFRAME SDP Version Number */ 5432 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) 5433 return -EINVAL; 5434 5435 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 5436 if (sdp->db[0] != 1) 5437 return -EINVAL; 5438 5439 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 5440 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) 5441 return -EINVAL; 5442 5443 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], 5444 HDMI_DRM_INFOFRAME_SIZE); 5445 5446 return ret; 5447 } 5448 5449 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, 5450 struct intel_crtc_state *crtc_state, 5451 struct drm_dp_vsc_sdp *vsc) 5452 { 5453 struct intel_display *display = to_intel_display(encoder); 5454 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5455 unsigned int type = DP_SDP_VSC; 5456 struct dp_sdp sdp = {}; 5457 int ret; 5458 5459 if ((crtc_state->infoframes.enable & 5460 intel_hdmi_infoframe_enable(type)) == 0) 5461 return; 5462 5463 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); 5464 5465 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); 5466 5467 if (ret) 5468 drm_dbg_kms(display->drm, "Failed to unpack DP VSC SDP\n"); 5469 } 5470 5471 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, 5472 struct intel_crtc_state *crtc_state, 5473 struct hdmi_drm_infoframe *drm_infoframe) 5474 { 5475 struct intel_display *display = to_intel_display(encoder); 5476 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5477 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; 5478 struct dp_sdp sdp = {}; 5479 int ret; 5480 5481 if ((crtc_state->infoframes.enable & 5482 intel_hdmi_infoframe_enable(type)) == 0) 5483 return; 5484 5485 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 5486 sizeof(sdp)); 5487 5488 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, 5489 sizeof(sdp)); 5490 5491 if (ret) 5492 drm_dbg_kms(display->drm, 5493 "Failed to unpack DP HDR Metadata Infoframe SDP\n"); 5494 } 5495 5496 void intel_read_dp_sdp(struct intel_encoder *encoder, 5497 struct intel_crtc_state *crtc_state, 5498 unsigned int type) 5499 { 5500 switch (type) { 5501 case DP_SDP_VSC: 5502 intel_read_dp_vsc_sdp(encoder, crtc_state, 5503 &crtc_state->infoframes.vsc); 5504 break; 5505 case HDMI_PACKET_TYPE_GAMUT_METADATA: 5506 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, 5507 &crtc_state->infoframes.drm.drm); 5508 break; 5509 case DP_SDP_ADAPTIVE_SYNC: 5510 intel_read_dp_as_sdp(encoder, crtc_state, 5511 &crtc_state->infoframes.as_sdp); 5512 break; 5513 default: 5514 MISSING_CASE(type); 5515 break; 5516 } 5517 } 5518 5519 static bool intel_dp_link_ok(struct intel_dp *intel_dp, 5520 u8 link_status[DP_LINK_STATUS_SIZE]) 5521 { 5522 struct intel_display *display = to_intel_display(intel_dp); 5523 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 5524 bool uhbr = intel_dp->link_rate >= 1000000; 5525 bool ok; 5526 5527 if (uhbr) 5528 ok = drm_dp_128b132b_lane_channel_eq_done(link_status, 5529 intel_dp->lane_count); 5530 else 5531 ok = drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 5532 5533 if (ok) 5534 return true; 5535 5536 intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); 5537 drm_dbg_kms(display->drm, 5538 "[ENCODER:%d:%s] %s link not ok, retraining\n", 5539 encoder->base.base.id, encoder->base.name, 5540 uhbr ? "128b/132b" : "8b/10b"); 5541 5542 return false; 5543 } 5544 5545 static void 5546 intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack) 5547 { 5548 bool handled = false; 5549 5550 drm_dp_mst_hpd_irq_handle_event(&intel_dp->mst.mgr, esi, ack, &handled); 5551 5552 if (esi[1] & DP_CP_IRQ) { 5553 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 5554 ack[1] |= DP_CP_IRQ; 5555 } 5556 } 5557 5558 static bool intel_dp_handle_link_service_irq(struct intel_dp *intel_dp, u8 irq_mask); 5559 5560 /** 5561 * intel_dp_check_mst_status - service any pending MST interrupts, check link status 5562 * @intel_dp: Intel DP struct 5563 * 5564 * Read any pending MST interrupts, call MST core to handle these and ack the 5565 * interrupts. Check if the main and AUX link state is ok. 5566 * 5567 * Returns: 5568 * - %true if pending interrupts were serviced (or no interrupts were 5569 * pending) w/o detecting an error condition. 5570 * - %false if an error condition - like AUX failure or a loss of link - is 5571 * detected, or another condition - like a DP tunnel BW state change - needs 5572 * servicing from the hotplug work. 5573 */ 5574 static bool 5575 intel_dp_check_mst_status(struct intel_dp *intel_dp) 5576 { 5577 struct intel_display *display = to_intel_display(intel_dp); 5578 bool force_retrain = intel_dp->link.force_retrain; 5579 bool reprobe_needed = false; 5580 5581 for (;;) { 5582 u8 esi[4] = {}; 5583 u8 ack[4] = {}; 5584 bool new_irqs; 5585 5586 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { 5587 drm_dbg_kms(display->drm, 5588 "failed to get ESI - device may have failed\n"); 5589 reprobe_needed = true; 5590 5591 break; 5592 } 5593 5594 drm_dbg_kms(display->drm, "DPRX ESI: %4ph\n", esi); 5595 5596 ack[3] |= esi[3] & INTEL_DP_LINK_SERVICE_IRQ_MASK_MST; 5597 5598 intel_dp_mst_hpd_irq(intel_dp, esi, ack); 5599 5600 new_irqs = !mem_is_zero(ack, sizeof(ack)); 5601 5602 drm_WARN_ON(display->drm, ack[1] & ~INTEL_DP_DEVICE_SERVICE_IRQ_MASK_MST); 5603 drm_WARN_ON(display->drm, ack[3] & ~INTEL_DP_LINK_SERVICE_IRQ_MASK_MST); 5604 5605 if (new_irqs && !intel_dp_ack_sink_irq_esi(intel_dp, ack)) 5606 drm_dbg_kms(display->drm, "Failed to ack ESI\n"); 5607 5608 if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY)) 5609 drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst.mgr); 5610 5611 if (force_retrain) { 5612 /* Defer forced retraining to the regular link status check. */ 5613 ack[3] |= LINK_STATUS_CHANGED; 5614 force_retrain = false; 5615 } 5616 5617 if (intel_dp_handle_link_service_irq(intel_dp, ack[3])) 5618 reprobe_needed = true; 5619 5620 if (!new_irqs) 5621 break; 5622 } 5623 5624 return !reprobe_needed; 5625 } 5626 5627 static void 5628 intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp) 5629 { 5630 bool is_active; 5631 u8 buf = 0; 5632 5633 is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux); 5634 if (intel_dp->frl.is_trained && !is_active) { 5635 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0) 5636 return; 5637 5638 buf &= ~DP_PCON_ENABLE_HDMI_LINK; 5639 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0) 5640 return; 5641 5642 drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base); 5643 5644 intel_dp->frl.is_trained = false; 5645 5646 /* Restart FRL training or fall back to TMDS mode */ 5647 intel_dp_check_frl_training(intel_dp); 5648 } 5649 } 5650 5651 static int 5652 intel_dp_read_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]) 5653 { 5654 int err; 5655 5656 memset(link_status, 0, DP_LINK_STATUS_SIZE); 5657 5658 if (intel_dp_mst_active_streams(intel_dp) > 0) 5659 err = drm_dp_dpcd_read_data(&intel_dp->aux, DP_LANE0_1_STATUS_ESI, 5660 link_status, DP_LINK_STATUS_SIZE - 2); 5661 else 5662 err = drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 5663 link_status); 5664 5665 if (err) 5666 return err; 5667 5668 if (link_status[DP_LANE_ALIGN_STATUS_UPDATED - DP_LANE0_1_STATUS] & 5669 DP_DOWNSTREAM_PORT_STATUS_CHANGED) 5670 WRITE_ONCE(intel_dp->downstream_port_changed, true); 5671 5672 return 0; 5673 } 5674 5675 static bool 5676 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 5677 { 5678 u8 link_status[DP_LINK_STATUS_SIZE]; 5679 5680 if (!intel_dp->link.active) 5681 return false; 5682 5683 /* 5684 * While PSR source HW is enabled, it will control main-link sending 5685 * frames, enabling and disabling it so trying to do a retrain will fail 5686 * as the link would or not be on or it could mix training patterns 5687 * and frame data at the same time causing retrain to fail. 5688 * Also when exiting PSR, HW will retrain the link anyways fixing 5689 * any link status error. 5690 */ 5691 if (intel_psr_enabled(intel_dp)) 5692 return false; 5693 5694 if (intel_dp->link.force_retrain) 5695 return true; 5696 5697 if (intel_dp_read_link_status(intel_dp, link_status) < 0) 5698 return false; 5699 5700 /* 5701 * Validate the cached values of intel_dp->link_rate and 5702 * intel_dp->lane_count before attempting to retrain. 5703 * 5704 * FIXME would be nice to user the crtc state here, but since 5705 * we need to call this from the short HPD handler that seems 5706 * a bit hard. 5707 */ 5708 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 5709 intel_dp->lane_count)) 5710 return false; 5711 5712 if (intel_dp->link.retrain_disabled) 5713 return false; 5714 5715 if (intel_dp->link.seq_train_failures) 5716 return true; 5717 5718 /* Retrain if link not ok */ 5719 return !intel_dp_link_ok(intel_dp, link_status) && 5720 !intel_psr_link_ok(intel_dp); 5721 } 5722 5723 bool intel_dp_has_connector(struct intel_dp *intel_dp, 5724 const struct drm_connector_state *conn_state) 5725 { 5726 struct intel_display *display = to_intel_display(intel_dp); 5727 struct intel_encoder *encoder; 5728 enum pipe pipe; 5729 5730 if (!conn_state->best_encoder) 5731 return false; 5732 5733 /* SST */ 5734 encoder = &dp_to_dig_port(intel_dp)->base; 5735 if (conn_state->best_encoder == &encoder->base) 5736 return true; 5737 5738 /* MST */ 5739 for_each_pipe(display, pipe) { 5740 encoder = &intel_dp->mst.stream_encoders[pipe]->base; 5741 if (conn_state->best_encoder == &encoder->base) 5742 return true; 5743 } 5744 5745 return false; 5746 } 5747 5748 static void wait_for_connector_hw_done(const struct drm_connector_state *conn_state) 5749 { 5750 struct intel_connector *connector = to_intel_connector(conn_state->connector); 5751 struct intel_display *display = to_intel_display(connector); 5752 5753 drm_modeset_lock_assert_held(&display->drm->mode_config.connection_mutex); 5754 5755 if (!conn_state->commit) 5756 return; 5757 5758 drm_WARN_ON(display->drm, 5759 !wait_for_completion_timeout(&conn_state->commit->hw_done, 5760 msecs_to_jiffies(5000))); 5761 } 5762 5763 int intel_dp_get_active_pipes(struct intel_dp *intel_dp, 5764 struct drm_modeset_acquire_ctx *ctx, 5765 u8 *pipe_mask) 5766 { 5767 struct intel_display *display = to_intel_display(intel_dp); 5768 struct drm_connector_list_iter conn_iter; 5769 struct intel_connector *connector; 5770 int ret = 0; 5771 5772 *pipe_mask = 0; 5773 5774 drm_connector_list_iter_begin(display->drm, &conn_iter); 5775 for_each_intel_connector_iter(connector, &conn_iter) { 5776 struct drm_connector_state *conn_state = 5777 connector->base.state; 5778 struct intel_crtc_state *crtc_state; 5779 struct intel_crtc *crtc; 5780 5781 if (!intel_dp_has_connector(intel_dp, conn_state)) 5782 continue; 5783 5784 crtc = to_intel_crtc(conn_state->crtc); 5785 if (!crtc) 5786 continue; 5787 5788 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 5789 if (ret) 5790 break; 5791 5792 crtc_state = to_intel_crtc_state(crtc->base.state); 5793 5794 drm_WARN_ON(display->drm, 5795 !intel_crtc_has_dp_encoder(crtc_state)); 5796 5797 if (!crtc_state->hw.active) 5798 continue; 5799 5800 wait_for_connector_hw_done(conn_state); 5801 5802 *pipe_mask |= BIT(crtc->pipe); 5803 } 5804 drm_connector_list_iter_end(&conn_iter); 5805 5806 return ret; 5807 } 5808 5809 void intel_dp_flush_connector_commits(struct intel_connector *connector) 5810 { 5811 wait_for_connector_hw_done(connector->base.state); 5812 } 5813 5814 static bool intel_dp_is_connected(struct intel_dp *intel_dp) 5815 { 5816 struct intel_connector *connector = intel_dp->attached_connector; 5817 5818 return connector->base.status == connector_status_connected || 5819 intel_dp->is_mst; 5820 } 5821 5822 static int intel_dp_retrain_link(struct intel_encoder *encoder, 5823 struct drm_modeset_acquire_ctx *ctx) 5824 { 5825 struct intel_display *display = to_intel_display(encoder); 5826 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5827 u8 pipe_mask; 5828 int ret; 5829 5830 if (!intel_dp_is_connected(intel_dp)) 5831 return 0; 5832 5833 ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex, 5834 ctx); 5835 if (ret) 5836 return ret; 5837 5838 if (!intel_dp_needs_link_retrain(intel_dp)) 5839 return 0; 5840 5841 ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask); 5842 if (ret) 5843 return ret; 5844 5845 if (pipe_mask == 0) 5846 return 0; 5847 5848 if (!intel_dp_needs_link_retrain(intel_dp)) 5849 return 0; 5850 5851 drm_dbg_kms(display->drm, 5852 "[ENCODER:%d:%s] retraining link (forced %s)\n", 5853 encoder->base.base.id, encoder->base.name, 5854 str_yes_no(intel_dp->link.force_retrain)); 5855 5856 ret = intel_modeset_commit_pipes(display, pipe_mask, ctx); 5857 if (ret == -EDEADLK) 5858 return ret; 5859 5860 intel_dp->link.force_retrain = false; 5861 5862 if (ret) 5863 drm_dbg_kms(display->drm, 5864 "[ENCODER:%d:%s] link retraining failed: %pe\n", 5865 encoder->base.base.id, encoder->base.name, 5866 ERR_PTR(ret)); 5867 5868 return ret; 5869 } 5870 5871 void intel_dp_link_check(struct intel_encoder *encoder) 5872 { 5873 struct drm_modeset_acquire_ctx ctx; 5874 int ret; 5875 5876 intel_modeset_lock_ctx_retry(&ctx, NULL, 0, ret) 5877 ret = intel_dp_retrain_link(encoder, &ctx); 5878 } 5879 5880 void intel_dp_check_link_state(struct intel_dp *intel_dp) 5881 { 5882 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5883 struct intel_encoder *encoder = &dig_port->base; 5884 5885 if (!intel_dp_is_connected(intel_dp)) 5886 return; 5887 5888 if (!intel_dp_needs_link_retrain(intel_dp)) 5889 return; 5890 5891 intel_encoder_link_check_queue_work(encoder, 0); 5892 } 5893 5894 static void intel_dp_handle_device_service_irq(struct intel_dp *intel_dp, u8 irq_mask) 5895 { 5896 struct intel_display *display = to_intel_display(intel_dp); 5897 5898 drm_WARN_ON(display->drm, irq_mask & ~INTEL_DP_DEVICE_SERVICE_IRQ_MASK_SST); 5899 5900 if (irq_mask & DP_AUTOMATED_TEST_REQUEST) 5901 intel_dp_test_request(intel_dp); 5902 5903 if (irq_mask & DP_CP_IRQ) 5904 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 5905 5906 if (irq_mask & DP_SINK_SPECIFIC_IRQ) 5907 drm_dbg_kms(display->drm, "Sink specific irq unhandled\n"); 5908 } 5909 5910 5911 /* 5912 * Return %true if a full connector reprobe is required after handling a link 5913 * service IRQ event. 5914 */ 5915 static bool intel_dp_handle_link_service_irq(struct intel_dp *intel_dp, u8 irq_mask) 5916 { 5917 struct intel_display *display = to_intel_display(intel_dp); 5918 struct intel_connector *connector = intel_dp->attached_connector; 5919 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 5920 bool reprobe_needed = false; 5921 5922 drm_WARN_ON(display->drm, irq_mask & ~(INTEL_DP_LINK_SERVICE_IRQ_MASK_SST | 5923 INTEL_DP_LINK_SERVICE_IRQ_MASK_MST)); 5924 5925 if (irq_mask & RX_CAP_CHANGED) 5926 reprobe_needed = true; 5927 5928 if (irq_mask & LINK_STATUS_CHANGED) 5929 intel_dp_check_link_state(intel_dp); 5930 5931 if (irq_mask & HDMI_LINK_STATUS_CHANGED) 5932 intel_dp_handle_hdmi_link_status_change(intel_dp); 5933 5934 if (irq_mask & CONNECTED_OFF_ENTRY_REQUESTED) 5935 drm_dbg_kms(display->drm, 5936 "[CONNECTOR:%d:%s][ENCODER:%d:%s] Allowing connected off request\n", 5937 connector->base.base.id, connector->base.name, 5938 encoder->base.base.id, encoder->base.name); 5939 5940 if ((irq_mask & DP_TUNNELING_IRQ) && 5941 drm_dp_tunnel_handle_irq(display->dp_tunnel_mgr, 5942 &intel_dp->aux)) 5943 reprobe_needed = true; 5944 5945 return reprobe_needed; 5946 } 5947 5948 /* 5949 * According to DP spec 5950 * 5.1.2: 5951 * 1. Read DPCD 5952 * 2. Configure link according to Receiver Capabilities 5953 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 5954 * 4. Check link status on receipt of hot-plug interrupt 5955 * 5956 * intel_dp_short_pulse - handles short pulse interrupts 5957 * when full detection is not required. 5958 * Returns %true if short pulse is handled and full detection 5959 * is NOT required and %false otherwise. 5960 */ 5961 static bool 5962 intel_dp_short_pulse(struct intel_dp *intel_dp) 5963 { 5964 bool reprobe_needed = false; 5965 u8 esi[4] = {}; 5966 5967 intel_dp_test_reset(intel_dp); 5968 5969 if (!intel_dp_get_and_ack_sink_irq_esi_sst(intel_dp, esi)) 5970 return false; 5971 5972 /* 5973 * If the current value of sink count doesn't match with 5974 * the value that was stored earlier we need to do full 5975 * detection. 5976 */ 5977 if (intel_dp_has_sink_count(intel_dp) && 5978 DP_GET_SINK_COUNT(esi[0]) != intel_dp->sink_count) 5979 /* No need to proceed if we are going to do full detect */ 5980 return false; 5981 5982 intel_dp_handle_device_service_irq(intel_dp, esi[1]); 5983 5984 /* 5985 * Force checking the link status for DPCD_REV < 1.2 5986 * TODO: let the link status check depend on LINK_STATUS_CHANGED 5987 * or intel_dp->link.force_retrain for DPCD_REV >= 1.2 5988 */ 5989 esi[3] |= LINK_STATUS_CHANGED; 5990 if (intel_dp_handle_link_service_irq(intel_dp, esi[3])) 5991 reprobe_needed = true; 5992 5993 /* Handle CEC interrupts, if any */ 5994 drm_dp_cec_irq(&intel_dp->aux); 5995 5996 if (READ_ONCE(intel_dp->downstream_port_changed)) { 5997 WRITE_ONCE(intel_dp->downstream_port_changed, false); 5998 reprobe_needed = true; 5999 } 6000 6001 intel_psr_short_pulse(intel_dp); 6002 6003 if (intel_alpm_get_error(intel_dp)) { 6004 intel_alpm_disable(intel_dp); 6005 intel_dp->alpm.sink_alpm_error = true; 6006 } 6007 6008 if (intel_dp_test_short_pulse(intel_dp)) 6009 reprobe_needed = true; 6010 6011 return !reprobe_needed; 6012 } 6013 6014 /* XXX this is probably wrong for multiple downstream ports */ 6015 static enum drm_connector_status 6016 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 6017 { 6018 struct intel_display *display = to_intel_display(intel_dp); 6019 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6020 u8 *dpcd = intel_dp->dpcd; 6021 u8 type; 6022 6023 if (drm_WARN_ON(display->drm, intel_dp_is_edp(intel_dp))) 6024 return connector_status_connected; 6025 6026 WRITE_ONCE(intel_dp->downstream_port_changed, false); 6027 6028 intel_lspcon_resume(dig_port); 6029 6030 if (!intel_dp_get_dpcd(intel_dp)) 6031 return connector_status_disconnected; 6032 6033 intel_dp->mst_detect = intel_dp_mst_detect(intel_dp); 6034 6035 /* if there's no downstream port, we're done */ 6036 if (!drm_dp_is_branch(dpcd)) 6037 return connector_status_connected; 6038 6039 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 6040 if (intel_dp_has_sink_count(intel_dp) && 6041 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 6042 return intel_dp->sink_count ? 6043 connector_status_connected : connector_status_disconnected; 6044 } 6045 6046 if (intel_dp->mst_detect == DRM_DP_MST) 6047 return connector_status_connected; 6048 6049 /* If no HPD, poke DDC gently */ 6050 if (drm_probe_ddc(&intel_dp->aux.ddc)) 6051 return connector_status_connected; 6052 6053 /* Well we tried, say unknown for unreliable port types */ 6054 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 6055 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 6056 if (type == DP_DS_PORT_TYPE_VGA || 6057 type == DP_DS_PORT_TYPE_NON_EDID) 6058 return connector_status_unknown; 6059 } else { 6060 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 6061 DP_DWN_STRM_PORT_TYPE_MASK; 6062 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 6063 type == DP_DWN_STRM_PORT_TYPE_OTHER) 6064 return connector_status_unknown; 6065 } 6066 6067 /* Anything else is out of spec, warn and ignore */ 6068 drm_dbg_kms(display->drm, "Broken DP branch device, ignoring\n"); 6069 return connector_status_disconnected; 6070 } 6071 6072 static enum drm_connector_status 6073 edp_detect(struct intel_dp *intel_dp) 6074 { 6075 return connector_status_connected; 6076 } 6077 6078 void intel_digital_port_lock(struct intel_encoder *encoder) 6079 { 6080 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 6081 6082 if (dig_port->lock) 6083 dig_port->lock(dig_port); 6084 } 6085 6086 void intel_digital_port_unlock(struct intel_encoder *encoder) 6087 { 6088 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 6089 6090 if (dig_port->unlock) 6091 dig_port->unlock(dig_port); 6092 } 6093 6094 /* 6095 * intel_digital_port_connected_locked - is the specified port connected? 6096 * @encoder: intel_encoder 6097 * 6098 * In cases where there's a connector physically connected but it can't be used 6099 * by our hardware we also return false, since the rest of the driver should 6100 * pretty much treat the port as disconnected. This is relevant for type-C 6101 * (starting on ICL) where there's ownership involved. 6102 * 6103 * The caller must hold the lock acquired by calling intel_digital_port_lock() 6104 * when calling this function. 6105 * 6106 * Return %true if port is connected, %false otherwise. 6107 */ 6108 bool intel_digital_port_connected_locked(struct intel_encoder *encoder) 6109 { 6110 struct intel_display *display = to_intel_display(encoder); 6111 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 6112 bool is_glitch_free = intel_tc_port_handles_hpd_glitches(dig_port); 6113 bool is_connected = false; 6114 6115 with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE) { 6116 poll_timeout_us(is_connected = dig_port->connected(encoder), 6117 is_connected || is_glitch_free, 6118 30, 4000, false); 6119 } 6120 6121 return is_connected; 6122 } 6123 6124 bool intel_digital_port_connected(struct intel_encoder *encoder) 6125 { 6126 bool ret; 6127 6128 intel_digital_port_lock(encoder); 6129 ret = intel_digital_port_connected_locked(encoder); 6130 intel_digital_port_unlock(encoder); 6131 6132 return ret; 6133 } 6134 6135 static const struct drm_edid * 6136 intel_dp_get_edid(struct intel_dp *intel_dp) 6137 { 6138 struct intel_connector *connector = intel_dp->attached_connector; 6139 const struct drm_edid *fixed_edid = connector->panel.fixed_edid; 6140 6141 /* Use panel fixed edid if we have one */ 6142 if (fixed_edid) { 6143 /* invalid edid */ 6144 if (IS_ERR(fixed_edid)) 6145 return NULL; 6146 6147 return drm_edid_dup(fixed_edid); 6148 } 6149 6150 return drm_edid_read_ddc(&connector->base, &intel_dp->aux.ddc); 6151 } 6152 6153 static void 6154 intel_dp_update_dfp(struct intel_dp *intel_dp, 6155 const struct drm_edid *drm_edid) 6156 { 6157 struct intel_display *display = to_intel_display(intel_dp); 6158 struct intel_connector *connector = intel_dp->attached_connector; 6159 6160 intel_dp->dfp.max_bpc = 6161 drm_dp_downstream_max_bpc(intel_dp->dpcd, 6162 intel_dp->downstream_ports, drm_edid); 6163 6164 intel_dp->dfp.max_dotclock = 6165 drm_dp_downstream_max_dotclock(intel_dp->dpcd, 6166 intel_dp->downstream_ports); 6167 6168 intel_dp->dfp.min_tmds_clock = 6169 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd, 6170 intel_dp->downstream_ports, 6171 drm_edid); 6172 intel_dp->dfp.max_tmds_clock = 6173 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd, 6174 intel_dp->downstream_ports, 6175 drm_edid); 6176 6177 intel_dp->dfp.pcon_max_frl_bw = 6178 drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd, 6179 intel_dp->downstream_ports); 6180 6181 drm_dbg_kms(display->drm, 6182 "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n", 6183 connector->base.base.id, connector->base.name, 6184 intel_dp->dfp.max_bpc, 6185 intel_dp->dfp.max_dotclock, 6186 intel_dp->dfp.min_tmds_clock, 6187 intel_dp->dfp.max_tmds_clock, 6188 intel_dp->dfp.pcon_max_frl_bw); 6189 6190 intel_dp_get_pcon_dsc_cap(intel_dp); 6191 } 6192 6193 static bool 6194 intel_dp_can_ycbcr420(struct intel_dp *intel_dp) 6195 { 6196 if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420) && 6197 (!drm_dp_is_branch(intel_dp->dpcd) || intel_dp->dfp.ycbcr420_passthrough)) 6198 return true; 6199 6200 if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_RGB) && 6201 dfp_can_convert_from_rgb(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420)) 6202 return true; 6203 6204 if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR444) && 6205 dfp_can_convert_from_ycbcr444(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420)) 6206 return true; 6207 6208 return false; 6209 } 6210 6211 static void 6212 intel_dp_update_420(struct intel_dp *intel_dp) 6213 { 6214 struct intel_display *display = to_intel_display(intel_dp); 6215 struct intel_connector *connector = intel_dp->attached_connector; 6216 6217 intel_dp->dfp.ycbcr420_passthrough = 6218 drm_dp_downstream_420_passthrough(intel_dp->dpcd, 6219 intel_dp->downstream_ports); 6220 /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */ 6221 intel_dp->dfp.ycbcr_444_to_420 = 6222 intel_lspcon_active(dp_to_dig_port(intel_dp)) || 6223 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd, 6224 intel_dp->downstream_ports); 6225 intel_dp->dfp.rgb_to_ycbcr = 6226 drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 6227 intel_dp->downstream_ports, 6228 DP_DS_HDMI_BT709_RGB_YCBCR_CONV); 6229 6230 connector->base.ycbcr_420_allowed = intel_dp_can_ycbcr420(intel_dp); 6231 6232 drm_dbg_kms(display->drm, 6233 "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", 6234 connector->base.base.id, connector->base.name, 6235 str_yes_no(intel_dp->dfp.rgb_to_ycbcr), 6236 str_yes_no(connector->base.ycbcr_420_allowed), 6237 str_yes_no(intel_dp->dfp.ycbcr_444_to_420)); 6238 } 6239 6240 static void 6241 intel_dp_set_edid(struct intel_dp *intel_dp) 6242 { 6243 struct intel_display *display = to_intel_display(intel_dp); 6244 struct intel_connector *connector = intel_dp->attached_connector; 6245 const struct drm_edid *drm_edid; 6246 bool vrr_capable; 6247 6248 intel_dp_unset_edid(intel_dp); 6249 drm_edid = intel_dp_get_edid(intel_dp); 6250 connector->detect_edid = drm_edid; 6251 6252 /* Below we depend on display info having been updated */ 6253 drm_edid_connector_update(&connector->base, drm_edid); 6254 6255 vrr_capable = intel_vrr_is_capable(connector); 6256 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n", 6257 connector->base.base.id, connector->base.name, str_yes_no(vrr_capable)); 6258 drm_connector_set_vrr_capable_property(&connector->base, vrr_capable); 6259 6260 intel_dp_update_dfp(intel_dp, drm_edid); 6261 intel_dp_update_420(intel_dp); 6262 6263 drm_dp_cec_attach(&intel_dp->aux, 6264 connector->base.display_info.source_physical_address); 6265 } 6266 6267 static void 6268 intel_dp_unset_edid(struct intel_dp *intel_dp) 6269 { 6270 struct intel_connector *connector = intel_dp->attached_connector; 6271 6272 drm_dp_cec_unset_edid(&intel_dp->aux); 6273 drm_edid_free(connector->detect_edid); 6274 connector->detect_edid = NULL; 6275 6276 intel_dp->dfp.max_bpc = 0; 6277 intel_dp->dfp.max_dotclock = 0; 6278 intel_dp->dfp.min_tmds_clock = 0; 6279 intel_dp->dfp.max_tmds_clock = 0; 6280 6281 intel_dp->dfp.pcon_max_frl_bw = 0; 6282 6283 intel_dp->dfp.ycbcr_444_to_420 = false; 6284 connector->base.ycbcr_420_allowed = false; 6285 6286 drm_connector_set_vrr_capable_property(&connector->base, 6287 false); 6288 } 6289 6290 static void 6291 intel_dp_detect_sdp_caps(struct intel_dp *intel_dp) 6292 { 6293 struct intel_display *display = to_intel_display(intel_dp); 6294 6295 intel_dp->as_sdp_supported = HAS_AS_SDP(display) && 6296 drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd); 6297 } 6298 6299 static bool intel_dp_needs_dpcd_probe(struct intel_dp *intel_dp, bool force_on_external) 6300 { 6301 struct intel_connector *connector = intel_dp->attached_connector; 6302 6303 if (intel_dp_is_edp(intel_dp)) 6304 return false; 6305 6306 if (force_on_external) 6307 return true; 6308 6309 if (intel_dp->is_mst) 6310 return false; 6311 6312 return drm_edid_has_quirk(&connector->base, DRM_EDID_QUIRK_DP_DPCD_PROBE); 6313 } 6314 6315 void intel_dp_dpcd_set_probe(struct intel_dp *intel_dp, bool force_on_external) 6316 { 6317 drm_dp_dpcd_set_probe(&intel_dp->aux, 6318 intel_dp_needs_dpcd_probe(intel_dp, force_on_external)); 6319 } 6320 6321 static int 6322 intel_dp_detect(struct drm_connector *_connector, 6323 struct drm_modeset_acquire_ctx *ctx, 6324 bool force) 6325 { 6326 struct intel_display *display = to_intel_display(_connector->dev); 6327 struct intel_connector *connector = to_intel_connector(_connector); 6328 struct intel_dp *intel_dp = intel_attached_dp(connector); 6329 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6330 struct intel_encoder *encoder = &dig_port->base; 6331 enum drm_connector_status status; 6332 int ret; 6333 6334 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n", 6335 connector->base.base.id, connector->base.name); 6336 drm_WARN_ON(display->drm, 6337 !drm_modeset_is_locked(&display->drm->mode_config.connection_mutex)); 6338 6339 if (!intel_display_device_enabled(display)) 6340 return connector_status_disconnected; 6341 6342 if (!intel_display_driver_check_access(display)) 6343 return connector->base.status; 6344 6345 intel_dp_flush_connector_commits(connector); 6346 6347 intel_pps_vdd_on(intel_dp); 6348 6349 /* Can't disconnect eDP */ 6350 if (intel_dp_is_edp(intel_dp)) 6351 status = edp_detect(intel_dp); 6352 else if (intel_digital_port_connected(encoder)) 6353 status = intel_dp_detect_dpcd(intel_dp); 6354 else 6355 status = connector_status_disconnected; 6356 6357 if (status != connector_status_disconnected && 6358 !intel_dp_mst_verify_dpcd_state(intel_dp)) 6359 /* 6360 * This requires retrying detection for instance to re-enable 6361 * the MST mode that got reset via a long HPD pulse. The retry 6362 * will happen either via the hotplug handler's retry logic, 6363 * ensured by setting the connector here to SST/disconnected, 6364 * or via a userspace connector probing in response to the 6365 * hotplug uevent sent when removing the MST connectors. 6366 */ 6367 status = connector_status_disconnected; 6368 6369 if (status == connector_status_disconnected) { 6370 intel_dp_test_reset(intel_dp); 6371 /* 6372 * FIXME: Resetting these caps here cause 6373 * state computation fail if the connector need to be 6374 * modeset after sink disconnect. Move resetting them 6375 * to where new sink is connected. 6376 */ 6377 memset(connector->dp.dsc_dpcd, 0, sizeof(connector->dp.dsc_dpcd)); 6378 memset(connector->dp.panel_replay_caps.dpcd, 0, 6379 sizeof(connector->dp.panel_replay_caps.dpcd)); 6380 intel_dp->psr.sink_panel_replay_support = false; 6381 connector->dp.panel_replay_caps.support = false; 6382 connector->dp.panel_replay_caps.su_support = false; 6383 connector->dp.panel_replay_caps.dsc_support = 6384 INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED; 6385 6386 intel_dp_mst_disconnect(intel_dp); 6387 6388 intel_dp_tunnel_disconnect(intel_dp); 6389 6390 goto out_unset_edid; 6391 } 6392 6393 intel_dp_init_source_oui(intel_dp); 6394 6395 ret = intel_dp_tunnel_detect(intel_dp, ctx); 6396 if (ret == -EDEADLK) { 6397 status = ret; 6398 6399 goto out_vdd_off; 6400 } 6401 6402 if (ret == 1) 6403 connector->base.epoch_counter++; 6404 6405 if (!intel_dp_is_edp(intel_dp)) 6406 intel_psr_init_dpcd(intel_dp, connector); 6407 6408 intel_dp_detect_dsc_caps(intel_dp, connector); 6409 6410 intel_dp_detect_sdp_caps(intel_dp); 6411 6412 if (intel_dp->reset_link_params) { 6413 intel_dp_reset_link_params(intel_dp); 6414 intel_dp->reset_link_params = false; 6415 } 6416 6417 intel_dp_mst_configure(intel_dp); 6418 6419 intel_dp_print_rates(intel_dp); 6420 6421 if (intel_dp->is_mst) { 6422 /* 6423 * If we are in MST mode then this connector 6424 * won't appear connected or have anything 6425 * with EDID on it 6426 */ 6427 status = connector_status_disconnected; 6428 goto out_unset_edid; 6429 } 6430 6431 /* 6432 * Some external monitors do not signal loss of link synchronization 6433 * with an IRQ_HPD, so force a link status check. 6434 * 6435 * TODO: this probably became redundant, so remove it: the link state 6436 * is rechecked/recovered now after modesets, where the loss of 6437 * synchronization tends to occur. 6438 */ 6439 if (!intel_dp_is_edp(intel_dp)) 6440 intel_dp_check_link_state(intel_dp); 6441 6442 /* 6443 * Clearing NACK and defer counts to get their exact values 6444 * while reading EDID which are required by Compliance tests 6445 * 4.2.2.4 and 4.2.2.5 6446 */ 6447 intel_dp->aux.i2c_nack_count = 0; 6448 intel_dp->aux.i2c_defer_count = 0; 6449 6450 intel_dp_set_edid(intel_dp); 6451 if (intel_dp_is_edp(intel_dp) || connector->detect_edid) 6452 status = connector_status_connected; 6453 6454 out_unset_edid: 6455 if (status != connector_status_connected && !intel_dp->is_mst) 6456 intel_dp_unset_edid(intel_dp); 6457 6458 intel_dp_dpcd_set_probe(intel_dp, false); 6459 6460 if (!intel_dp_is_edp(intel_dp)) 6461 drm_dp_set_subconnector_property(&connector->base, 6462 status, 6463 intel_dp->dpcd, 6464 intel_dp->downstream_ports); 6465 out_vdd_off: 6466 intel_pps_vdd_off(intel_dp); 6467 6468 return status; 6469 } 6470 6471 static void 6472 intel_dp_force(struct drm_connector *_connector) 6473 { 6474 struct intel_connector *connector = to_intel_connector(_connector); 6475 struct intel_display *display = to_intel_display(connector); 6476 struct intel_dp *intel_dp = intel_attached_dp(connector); 6477 6478 drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n", 6479 connector->base.base.id, connector->base.name); 6480 6481 if (!intel_display_driver_check_access(display)) 6482 return; 6483 6484 intel_dp_unset_edid(intel_dp); 6485 6486 if (connector->base.status != connector_status_connected) 6487 return; 6488 6489 intel_dp_set_edid(intel_dp); 6490 6491 intel_dp_dpcd_set_probe(intel_dp, false); 6492 } 6493 6494 static int intel_dp_get_modes(struct drm_connector *_connector) 6495 { 6496 struct intel_display *display = to_intel_display(_connector->dev); 6497 struct intel_connector *connector = to_intel_connector(_connector); 6498 struct intel_dp *intel_dp = intel_attached_dp(connector); 6499 int num_modes; 6500 6501 /* drm_edid_connector_update() done in ->detect() or ->force() */ 6502 num_modes = drm_edid_connector_add_modes(&connector->base); 6503 6504 /* Also add fixed mode, which may or may not be present in EDID */ 6505 if (intel_dp_is_edp(intel_dp)) 6506 num_modes += intel_panel_get_modes(connector); 6507 6508 if (num_modes) 6509 return num_modes; 6510 6511 if (!connector->detect_edid) { 6512 struct drm_display_mode *mode; 6513 6514 mode = drm_dp_downstream_mode(display->drm, 6515 intel_dp->dpcd, 6516 intel_dp->downstream_ports); 6517 if (mode) { 6518 drm_mode_probed_add(&connector->base, mode); 6519 num_modes++; 6520 } 6521 } 6522 6523 return num_modes; 6524 } 6525 6526 static int 6527 intel_dp_connector_register(struct drm_connector *_connector) 6528 { 6529 struct intel_connector *connector = to_intel_connector(_connector); 6530 struct intel_display *display = to_intel_display(connector); 6531 struct intel_dp *intel_dp = intel_attached_dp(connector); 6532 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6533 int ret; 6534 6535 ret = intel_connector_register(&connector->base); 6536 if (ret) 6537 return ret; 6538 6539 drm_dbg_kms(display->drm, "registering %s bus for %s\n", 6540 intel_dp->aux.name, connector->base.kdev->kobj.name); 6541 6542 intel_dp->aux.dev = connector->base.kdev; 6543 ret = drm_dp_aux_register(&intel_dp->aux); 6544 if (!ret) 6545 drm_dp_cec_register_connector(&intel_dp->aux, &connector->base); 6546 6547 if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata)) 6548 return ret; 6549 6550 /* 6551 * ToDo: Clean this up to handle lspcon init and resume more 6552 * efficiently and streamlined. 6553 */ 6554 if (intel_lspcon_init(dig_port)) { 6555 if (intel_lspcon_detect_hdr_capability(dig_port)) 6556 drm_connector_attach_hdr_output_metadata_property(&connector->base); 6557 } 6558 6559 return ret; 6560 } 6561 6562 static void 6563 intel_dp_connector_unregister(struct drm_connector *_connector) 6564 { 6565 struct intel_connector *connector = to_intel_connector(_connector); 6566 struct intel_dp *intel_dp = intel_attached_dp(connector); 6567 6568 drm_dp_cec_unregister_connector(&intel_dp->aux); 6569 drm_dp_aux_unregister(&intel_dp->aux); 6570 intel_connector_unregister(&connector->base); 6571 } 6572 6573 void intel_dp_connector_sync_state(struct intel_connector *connector, 6574 const struct intel_crtc_state *crtc_state) 6575 { 6576 struct intel_display *display = to_intel_display(connector); 6577 6578 if (crtc_state && crtc_state->dsc.compression_enable) { 6579 drm_WARN_ON(display->drm, 6580 !connector->dp.dsc_decompression_aux); 6581 connector->dp.dsc_decompression_enabled = true; 6582 } else { 6583 connector->dp.dsc_decompression_enabled = false; 6584 } 6585 } 6586 6587 void intel_dp_encoder_flush_work(struct drm_encoder *_encoder) 6588 { 6589 struct intel_encoder *encoder = to_intel_encoder(_encoder); 6590 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 6591 struct intel_dp *intel_dp = &dig_port->dp; 6592 6593 intel_encoder_link_check_flush_work(encoder); 6594 6595 intel_dp_mst_encoder_cleanup(dig_port); 6596 6597 intel_dp_tunnel_destroy(intel_dp); 6598 6599 intel_pps_vdd_off_sync(intel_dp); 6600 6601 /* 6602 * Ensure power off delay is respected on module remove, so that we can 6603 * reduce delays at driver probe. See pps_init_timestamps(). 6604 */ 6605 intel_pps_wait_power_cycle(intel_dp); 6606 6607 intel_dp_aux_fini(intel_dp); 6608 } 6609 6610 void intel_dp_encoder_suspend(struct intel_encoder *encoder) 6611 { 6612 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 6613 6614 intel_pps_vdd_off_sync(intel_dp); 6615 6616 intel_dp_tunnel_suspend(intel_dp); 6617 } 6618 6619 void intel_dp_encoder_shutdown(struct intel_encoder *encoder) 6620 { 6621 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 6622 6623 intel_pps_wait_power_cycle(intel_dp); 6624 } 6625 6626 static int intel_modeset_tile_group(struct intel_atomic_state *state, 6627 int tile_group_id) 6628 { 6629 struct intel_display *display = to_intel_display(state); 6630 struct drm_connector_list_iter conn_iter; 6631 struct intel_connector *connector; 6632 int ret = 0; 6633 6634 drm_connector_list_iter_begin(display->drm, &conn_iter); 6635 for_each_intel_connector_iter(connector, &conn_iter) { 6636 struct drm_connector_state *conn_state; 6637 struct intel_crtc_state *crtc_state; 6638 struct intel_crtc *crtc; 6639 6640 if (!connector->base.has_tile || 6641 connector->base.tile_group->id != tile_group_id) 6642 continue; 6643 6644 conn_state = drm_atomic_get_connector_state(&state->base, 6645 &connector->base); 6646 if (IS_ERR(conn_state)) { 6647 ret = PTR_ERR(conn_state); 6648 break; 6649 } 6650 6651 crtc = to_intel_crtc(conn_state->crtc); 6652 6653 if (!crtc) 6654 continue; 6655 6656 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6657 crtc_state->uapi.mode_changed = true; 6658 6659 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 6660 if (ret) 6661 break; 6662 } 6663 drm_connector_list_iter_end(&conn_iter); 6664 6665 return ret; 6666 } 6667 6668 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 6669 { 6670 struct intel_display *display = to_intel_display(state); 6671 struct intel_crtc *crtc; 6672 6673 if (transcoders == 0) 6674 return 0; 6675 6676 for_each_intel_crtc(display->drm, crtc) { 6677 struct intel_crtc_state *crtc_state; 6678 int ret; 6679 6680 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6681 if (IS_ERR(crtc_state)) 6682 return PTR_ERR(crtc_state); 6683 6684 if (!crtc_state->hw.enable) 6685 continue; 6686 6687 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 6688 continue; 6689 6690 crtc_state->uapi.mode_changed = true; 6691 6692 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 6693 if (ret) 6694 return ret; 6695 6696 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 6697 if (ret) 6698 return ret; 6699 6700 transcoders &= ~BIT(crtc_state->cpu_transcoder); 6701 } 6702 6703 drm_WARN_ON(display->drm, transcoders != 0); 6704 6705 return 0; 6706 } 6707 6708 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 6709 struct drm_connector *_connector) 6710 { 6711 struct intel_connector *connector = to_intel_connector(_connector); 6712 const struct drm_connector_state *old_conn_state = 6713 drm_atomic_get_old_connector_state(&state->base, &connector->base); 6714 const struct intel_crtc_state *old_crtc_state; 6715 struct intel_crtc *crtc; 6716 u8 transcoders; 6717 6718 crtc = to_intel_crtc(old_conn_state->crtc); 6719 if (!crtc) 6720 return 0; 6721 6722 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 6723 6724 if (!old_crtc_state->hw.active) 6725 return 0; 6726 6727 transcoders = old_crtc_state->sync_mode_slaves_mask; 6728 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 6729 transcoders |= BIT(old_crtc_state->master_transcoder); 6730 6731 return intel_modeset_affected_transcoders(state, 6732 transcoders); 6733 } 6734 6735 static int intel_dp_connector_atomic_check(struct drm_connector *_connector, 6736 struct drm_atomic_state *_state) 6737 { 6738 struct intel_connector *connector = to_intel_connector(_connector); 6739 struct intel_display *display = to_intel_display(connector); 6740 struct intel_atomic_state *state = to_intel_atomic_state(_state); 6741 struct drm_connector_state *conn_state = 6742 drm_atomic_get_new_connector_state(_state, &connector->base); 6743 struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); 6744 int ret; 6745 6746 ret = intel_digital_connector_atomic_check(&connector->base, &state->base); 6747 if (ret) 6748 return ret; 6749 6750 if (intel_dp_mst_source_support(intel_dp)) { 6751 ret = drm_dp_mst_root_conn_atomic_check(conn_state, &intel_dp->mst.mgr); 6752 if (ret) 6753 return ret; 6754 } 6755 6756 if (!intel_connector_needs_modeset(state, &connector->base)) 6757 return 0; 6758 6759 ret = intel_dp_tunnel_atomic_check_state(state, 6760 intel_dp, 6761 connector); 6762 if (ret) 6763 return ret; 6764 6765 /* 6766 * We don't enable port sync on BDW due to missing w/as and 6767 * due to not having adjusted the modeset sequence appropriately. 6768 */ 6769 if (DISPLAY_VER(display) < 9) 6770 return 0; 6771 6772 if (connector->base.has_tile) { 6773 ret = intel_modeset_tile_group(state, connector->base.tile_group->id); 6774 if (ret) 6775 return ret; 6776 } 6777 6778 return intel_modeset_synced_crtcs(state, &connector->base); 6779 } 6780 6781 static void intel_dp_oob_hotplug_event(struct drm_connector *_connector, 6782 enum drm_connector_status hpd_state) 6783 { 6784 struct intel_connector *connector = to_intel_connector(_connector); 6785 struct intel_display *display = to_intel_display(connector); 6786 struct intel_encoder *encoder = intel_attached_encoder(connector); 6787 bool hpd_high = hpd_state == connector_status_connected; 6788 unsigned int hpd_pin = encoder->hpd_pin; 6789 bool need_work = false; 6790 6791 spin_lock_irq(&display->irq.lock); 6792 if (hpd_high != test_bit(hpd_pin, &display->hotplug.oob_hotplug_last_state)) { 6793 display->hotplug.event_bits |= BIT(hpd_pin); 6794 6795 __assign_bit(hpd_pin, 6796 &display->hotplug.oob_hotplug_last_state, 6797 hpd_high); 6798 need_work = true; 6799 } 6800 spin_unlock_irq(&display->irq.lock); 6801 6802 if (need_work) 6803 intel_hpd_schedule_detection(display); 6804 } 6805 6806 static const struct drm_connector_funcs intel_dp_connector_funcs = { 6807 .force = intel_dp_force, 6808 .fill_modes = drm_helper_probe_single_connector_modes, 6809 .atomic_get_property = intel_digital_connector_atomic_get_property, 6810 .atomic_set_property = intel_digital_connector_atomic_set_property, 6811 .late_register = intel_dp_connector_register, 6812 .early_unregister = intel_dp_connector_unregister, 6813 .destroy = intel_connector_destroy, 6814 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 6815 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 6816 .oob_hotplug_event = intel_dp_oob_hotplug_event, 6817 }; 6818 6819 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 6820 .detect_ctx = intel_dp_detect, 6821 .get_modes = intel_dp_get_modes, 6822 .mode_valid = intel_dp_mode_valid, 6823 .atomic_check = intel_dp_connector_atomic_check, 6824 }; 6825 6826 enum irqreturn 6827 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) 6828 { 6829 struct intel_display *display = to_intel_display(dig_port); 6830 struct intel_dp *intel_dp = &dig_port->dp; 6831 u8 dpcd[DP_RECEIVER_CAP_SIZE]; 6832 6833 if (dig_port->base.type == INTEL_OUTPUT_EDP && 6834 (long_hpd || 6835 intel_display_rpm_suspended(display) || 6836 !intel_pps_have_panel_power_or_vdd(intel_dp))) { 6837 /* 6838 * vdd off can generate a long/short pulse on eDP which 6839 * would require vdd on to handle it, and thus we 6840 * would end up in an endless cycle of 6841 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." 6842 */ 6843 drm_dbg_kms(display->drm, 6844 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", 6845 long_hpd ? "long" : "short", 6846 dig_port->base.base.base.id, 6847 dig_port->base.base.name); 6848 return IRQ_HANDLED; 6849 } 6850 6851 drm_dbg_kms(display->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", 6852 dig_port->base.base.base.id, 6853 dig_port->base.base.name, 6854 long_hpd ? "long" : "short"); 6855 6856 /* 6857 * TBT DP tunnels require the GFX driver to read out the DPRX caps in 6858 * response to long HPD pulses. The DP hotplug handler does that, 6859 * however the hotplug handler may be blocked by another 6860 * connector's/encoder's hotplug handler. Since the TBT CM may not 6861 * complete the DP tunnel BW request for the latter connector/encoder 6862 * waiting for this encoder's DPRX read, perform a dummy read here. 6863 */ 6864 if (long_hpd) { 6865 intel_dp_dpcd_set_probe(intel_dp, true); 6866 6867 intel_dp_read_dprx_caps(intel_dp, dpcd); 6868 6869 intel_dp->reset_link_params = true; 6870 intel_dp_invalidate_source_oui(intel_dp); 6871 6872 return IRQ_NONE; 6873 } 6874 6875 if (intel_dp->is_mst) { 6876 if (!intel_dp_check_mst_status(intel_dp)) 6877 return IRQ_NONE; 6878 } else if (!intel_dp_short_pulse(intel_dp)) { 6879 return IRQ_NONE; 6880 } 6881 6882 return IRQ_HANDLED; 6883 } 6884 6885 static bool _intel_dp_is_port_edp(struct intel_display *display, 6886 const struct intel_bios_encoder_data *devdata, 6887 enum port port) 6888 { 6889 /* 6890 * eDP not supported on g4x. so bail out early just 6891 * for a bit extra safety in case the VBT is bonkers. 6892 */ 6893 if (DISPLAY_VER(display) < 5) 6894 return false; 6895 6896 if (DISPLAY_VER(display) < 9 && port == PORT_A) 6897 return true; 6898 6899 return devdata && intel_bios_encoder_supports_edp(devdata); 6900 } 6901 6902 bool intel_dp_is_port_edp(struct intel_display *display, enum port port) 6903 { 6904 const struct intel_bios_encoder_data *devdata = 6905 intel_bios_encoder_data_lookup(display, port); 6906 6907 return _intel_dp_is_port_edp(display, devdata, port); 6908 } 6909 6910 bool 6911 intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder) 6912 { 6913 struct intel_display *display = to_intel_display(encoder); 6914 enum port port = encoder->port; 6915 6916 if (intel_bios_encoder_is_lspcon(encoder->devdata)) 6917 return false; 6918 6919 if (DISPLAY_VER(display) >= 11) 6920 return true; 6921 6922 if (port == PORT_A) 6923 return false; 6924 6925 if (display->platform.haswell || display->platform.broadwell || 6926 DISPLAY_VER(display) >= 9) 6927 return true; 6928 6929 return false; 6930 } 6931 6932 static void 6933 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *_connector) 6934 { 6935 struct intel_connector *connector = to_intel_connector(_connector); 6936 struct intel_display *display = to_intel_display(intel_dp); 6937 enum port port = dp_to_dig_port(intel_dp)->base.port; 6938 6939 if (!intel_dp_is_edp(intel_dp)) 6940 drm_connector_attach_dp_subconnector_property(&connector->base); 6941 6942 if (!display->platform.g4x && port != PORT_A) 6943 intel_attach_force_audio_property(&connector->base); 6944 6945 intel_attach_broadcast_rgb_property(&connector->base); 6946 if (HAS_GMCH(display)) 6947 drm_connector_attach_max_bpc_property(&connector->base, 6, 10); 6948 else if (DISPLAY_VER(display) >= 5) 6949 drm_connector_attach_max_bpc_property(&connector->base, 6, 12); 6950 6951 /* Register HDMI colorspace for case of lspcon */ 6952 if (intel_bios_encoder_is_lspcon(dp_to_dig_port(intel_dp)->base.devdata)) { 6953 drm_connector_attach_content_type_property(&connector->base); 6954 intel_attach_hdmi_colorspace_property(&connector->base); 6955 } else { 6956 intel_attach_dp_colorspace_property(&connector->base); 6957 } 6958 6959 if (intel_dp_has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base)) 6960 drm_connector_attach_hdr_output_metadata_property(&connector->base); 6961 6962 if (HAS_VRR(display)) 6963 drm_connector_attach_vrr_capable_property(&connector->base); 6964 } 6965 6966 static void 6967 intel_edp_add_properties(struct intel_dp *intel_dp) 6968 { 6969 struct intel_display *display = to_intel_display(intel_dp); 6970 struct intel_connector *connector = intel_dp->attached_connector; 6971 const struct drm_display_mode *fixed_mode = 6972 intel_panel_preferred_fixed_mode(connector); 6973 6974 intel_attach_scaling_mode_property(&connector->base); 6975 6976 drm_connector_set_panel_orientation_with_quirk(&connector->base, 6977 display->vbt.orientation, 6978 fixed_mode->hdisplay, 6979 fixed_mode->vdisplay); 6980 } 6981 6982 static void intel_edp_backlight_setup(struct intel_dp *intel_dp, 6983 struct intel_connector *connector) 6984 { 6985 struct intel_display *display = to_intel_display(intel_dp); 6986 enum pipe pipe = INVALID_PIPE; 6987 6988 if (display->platform.valleyview || display->platform.cherryview) 6989 pipe = vlv_pps_backlight_initial_pipe(intel_dp); 6990 6991 intel_backlight_setup(connector, pipe); 6992 } 6993 6994 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 6995 struct intel_connector *connector) 6996 { 6997 struct intel_display *display = to_intel_display(intel_dp); 6998 struct drm_display_mode *fixed_mode; 6999 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 7000 bool has_dpcd; 7001 const struct drm_edid *drm_edid; 7002 7003 if (!intel_dp_is_edp(intel_dp)) 7004 return true; 7005 7006 /* 7007 * On IBX/CPT we may get here with LVDS already registered. Since the 7008 * driver uses the only internal power sequencer available for both 7009 * eDP and LVDS bail out early in this case to prevent interfering 7010 * with an already powered-on LVDS power sequencer. 7011 */ 7012 if (intel_get_lvds_encoder(display)) { 7013 drm_WARN_ON(display->drm, 7014 !(HAS_PCH_IBX(display) || HAS_PCH_CPT(display))); 7015 drm_info(display->drm, 7016 "LVDS was detected, not registering eDP\n"); 7017 7018 return false; 7019 } 7020 7021 intel_bios_init_panel_early(display, &connector->panel, 7022 encoder->devdata); 7023 7024 if (!intel_pps_init(intel_dp)) { 7025 drm_info(display->drm, 7026 "[ENCODER:%d:%s] unusable PPS, disabling eDP\n", 7027 encoder->base.base.id, encoder->base.name); 7028 /* 7029 * The BIOS may have still enabled VDD on the PPS even 7030 * though it's unusable. Make sure we turn it back off 7031 * and to release the power domain references/etc. 7032 */ 7033 goto out_vdd_off; 7034 } 7035 7036 /* 7037 * Enable HPD sense for live status check. 7038 * intel_hpd_irq_setup() will turn it off again 7039 * if it's no longer needed later. 7040 * 7041 * The DPCD probe below will make sure VDD is on. 7042 */ 7043 intel_hpd_enable_detection(encoder); 7044 7045 intel_alpm_init(intel_dp); 7046 7047 /* Cache DPCD and EDID for edp. */ 7048 has_dpcd = intel_edp_init_dpcd(intel_dp, connector); 7049 7050 if (!has_dpcd) { 7051 /* if this fails, presume the device is a ghost */ 7052 drm_info(display->drm, 7053 "[ENCODER:%d:%s] failed to retrieve link info, disabling eDP\n", 7054 encoder->base.base.id, encoder->base.name); 7055 goto out_vdd_off; 7056 } 7057 7058 /* 7059 * VBT and straps are liars. Also check HPD as that seems 7060 * to be the most reliable piece of information available. 7061 * 7062 * ... expect on devices that forgot to hook HPD up for eDP 7063 * (eg. Acer Chromebook C710), so we'll check it only if multiple 7064 * ports are attempting to use the same AUX CH, according to VBT. 7065 */ 7066 if (intel_bios_dp_has_shared_aux_ch(encoder->devdata)) { 7067 /* 7068 * If this fails, presume the DPCD answer came 7069 * from some other port using the same AUX CH. 7070 * 7071 * FIXME maybe cleaner to check this before the 7072 * DPCD read? Would need sort out the VDD handling... 7073 */ 7074 if (!intel_digital_port_connected(encoder)) { 7075 drm_info(display->drm, 7076 "[ENCODER:%d:%s] HPD is down, disabling eDP\n", 7077 encoder->base.base.id, encoder->base.name); 7078 goto out_vdd_off; 7079 } 7080 7081 /* 7082 * Unfortunately even the HPD based detection fails on 7083 * eg. Asus B360M-A (CFL+CNP), so as a last resort fall 7084 * back to checking for a VGA branch device. Only do this 7085 * on known affected platforms to minimize false positives. 7086 */ 7087 if (DISPLAY_VER(display) == 9 && drm_dp_is_branch(intel_dp->dpcd) && 7088 (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) == 7089 DP_DWN_STRM_PORT_TYPE_ANALOG) { 7090 drm_info(display->drm, 7091 "[ENCODER:%d:%s] VGA converter detected, disabling eDP\n", 7092 encoder->base.base.id, encoder->base.name); 7093 goto out_vdd_off; 7094 } 7095 } 7096 7097 mutex_lock(&display->drm->mode_config.mutex); 7098 drm_edid = drm_edid_read_ddc(&connector->base, connector->base.ddc); 7099 if (!drm_edid) { 7100 /* Fallback to EDID from ACPI OpRegion, if any */ 7101 drm_edid = intel_opregion_get_edid(connector); 7102 if (drm_edid) 7103 drm_dbg_kms(display->drm, 7104 "[CONNECTOR:%d:%s] Using OpRegion EDID\n", 7105 connector->base.base.id, connector->base.name); 7106 } 7107 if (drm_edid) { 7108 if (drm_edid_connector_update(&connector->base, drm_edid) || 7109 !drm_edid_connector_add_modes(&connector->base)) { 7110 drm_edid_connector_update(&connector->base, NULL); 7111 drm_edid_free(drm_edid); 7112 drm_edid = ERR_PTR(-EINVAL); 7113 } 7114 } else { 7115 drm_edid = ERR_PTR(-ENOENT); 7116 } 7117 7118 intel_bios_init_panel_late(display, &connector->panel, encoder->devdata, 7119 IS_ERR(drm_edid) ? NULL : drm_edid); 7120 7121 intel_panel_add_edid_fixed_modes(connector, true); 7122 7123 /* MSO requires information from the EDID */ 7124 intel_edp_mso_init(intel_dp); 7125 7126 /* multiply the mode clock and horizontal timings for MSO */ 7127 list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) 7128 intel_edp_mso_mode_fixup(connector, fixed_mode); 7129 7130 /* fallback to VBT if available for eDP */ 7131 if (!intel_panel_preferred_fixed_mode(connector)) 7132 intel_panel_add_vbt_lfp_fixed_mode(connector); 7133 7134 mutex_unlock(&display->drm->mode_config.mutex); 7135 7136 if (!intel_panel_preferred_fixed_mode(connector)) { 7137 drm_info(display->drm, 7138 "[ENCODER:%d:%s] failed to find fixed mode for the panel, disabling eDP\n", 7139 encoder->base.base.id, encoder->base.name); 7140 goto out_vdd_off; 7141 } 7142 7143 intel_panel_init(connector, drm_edid); 7144 7145 intel_edp_backlight_setup(intel_dp, connector); 7146 7147 intel_edp_add_properties(intel_dp); 7148 7149 intel_pps_init_late(intel_dp); 7150 7151 return true; 7152 7153 out_vdd_off: 7154 intel_pps_vdd_off_sync(intel_dp); 7155 intel_bios_fini_panel(&connector->panel); 7156 7157 return false; 7158 } 7159 7160 bool 7161 intel_dp_init_connector(struct intel_digital_port *dig_port, 7162 struct intel_connector *connector) 7163 { 7164 struct intel_display *display = to_intel_display(dig_port); 7165 struct intel_dp *intel_dp = &dig_port->dp; 7166 struct intel_encoder *encoder = &dig_port->base; 7167 struct drm_device *dev = encoder->base.dev; 7168 enum port port = encoder->port; 7169 int type; 7170 7171 if (drm_WARN(dev, dig_port->max_lanes < 1, 7172 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 7173 dig_port->max_lanes, encoder->base.base.id, 7174 encoder->base.name)) 7175 return false; 7176 7177 intel_dp->reset_link_params = true; 7178 7179 /* Preserve the current hw state. */ 7180 intel_dp->DP = intel_de_read(display, intel_dp->output_reg); 7181 intel_dp->attached_connector = connector; 7182 7183 if (_intel_dp_is_port_edp(display, encoder->devdata, port)) { 7184 /* 7185 * Currently we don't support eDP on TypeC ports for DISPLAY_VER < 30, 7186 * although in theory it could work on TypeC legacy ports. 7187 */ 7188 drm_WARN_ON(dev, intel_encoder_is_tc(encoder) && 7189 DISPLAY_VER(display) < 30); 7190 type = DRM_MODE_CONNECTOR_eDP; 7191 encoder->type = INTEL_OUTPUT_EDP; 7192 7193 /* eDP only on port B and/or C on vlv/chv */ 7194 if (drm_WARN_ON(dev, (display->platform.valleyview || 7195 display->platform.cherryview) && 7196 port != PORT_B && port != PORT_C)) 7197 return false; 7198 } else { 7199 type = DRM_MODE_CONNECTOR_DisplayPort; 7200 } 7201 7202 intel_dp_set_default_sink_rates(intel_dp); 7203 intel_dp_set_default_max_sink_lane_count(intel_dp); 7204 7205 if (display->platform.valleyview || display->platform.cherryview) 7206 vlv_pps_pipe_init(intel_dp); 7207 7208 intel_dp_aux_init(intel_dp); 7209 connector->dp.dsc_decompression_aux = &intel_dp->aux; 7210 7211 drm_dbg_kms(display->drm, 7212 "Adding %s connector on [ENCODER:%d:%s]\n", 7213 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 7214 encoder->base.base.id, encoder->base.name); 7215 7216 drm_connector_init_with_ddc(dev, &connector->base, &intel_dp_connector_funcs, 7217 type, &intel_dp->aux.ddc); 7218 drm_connector_helper_add(&connector->base, &intel_dp_connector_helper_funcs); 7219 7220 if (!HAS_GMCH(display) && DISPLAY_VER(display) < 12) 7221 connector->base.interlace_allowed = true; 7222 7223 if (type != DRM_MODE_CONNECTOR_eDP) 7224 connector->polled = DRM_CONNECTOR_POLL_HPD; 7225 connector->base.polled = connector->polled; 7226 7227 intel_connector_attach_encoder(connector, encoder); 7228 7229 if (HAS_DDI(display)) 7230 connector->get_hw_state = intel_ddi_connector_get_hw_state; 7231 else 7232 connector->get_hw_state = intel_connector_get_hw_state; 7233 connector->sync_state = intel_dp_connector_sync_state; 7234 7235 if (!intel_edp_init_connector(intel_dp, connector)) { 7236 intel_dp_aux_fini(intel_dp); 7237 goto fail; 7238 } 7239 7240 intel_dp_set_source_rates(intel_dp); 7241 intel_dp_set_common_rates(intel_dp); 7242 intel_dp_reset_link_params(intel_dp); 7243 7244 /* init MST on ports that can support it */ 7245 intel_dp_mst_encoder_init(dig_port, connector->base.base.id); 7246 7247 intel_dp_add_properties(intel_dp, &connector->base); 7248 7249 if (is_hdcp_supported(display, port) && !intel_dp_is_edp(intel_dp)) { 7250 int ret = intel_dp_hdcp_init(dig_port, connector); 7251 if (ret) 7252 drm_dbg_kms(display->drm, 7253 "HDCP init failed, skipping.\n"); 7254 } 7255 7256 intel_dp->frl.is_trained = false; 7257 intel_dp->frl.trained_rate_gbps = 0; 7258 7259 intel_psr_init(intel_dp); 7260 7261 return true; 7262 7263 fail: 7264 intel_display_power_flush_work(display); 7265 drm_connector_cleanup(&connector->base); 7266 7267 return false; 7268 } 7269 7270 void intel_dp_mst_suspend(struct intel_display *display) 7271 { 7272 struct intel_encoder *encoder; 7273 7274 if (!HAS_DISPLAY(display)) 7275 return; 7276 7277 for_each_intel_encoder(display->drm, encoder) { 7278 struct intel_dp *intel_dp; 7279 7280 if (encoder->type != INTEL_OUTPUT_DDI) 7281 continue; 7282 7283 intel_dp = enc_to_intel_dp(encoder); 7284 7285 if (!intel_dp_mst_source_support(intel_dp)) 7286 continue; 7287 7288 if (intel_dp->is_mst) 7289 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst.mgr); 7290 } 7291 } 7292 7293 void intel_dp_mst_resume(struct intel_display *display) 7294 { 7295 struct intel_encoder *encoder; 7296 7297 if (!HAS_DISPLAY(display)) 7298 return; 7299 7300 for_each_intel_encoder(display->drm, encoder) { 7301 struct intel_dp *intel_dp; 7302 int ret; 7303 7304 if (encoder->type != INTEL_OUTPUT_DDI) 7305 continue; 7306 7307 intel_dp = enc_to_intel_dp(encoder); 7308 7309 if (!intel_dp_mst_source_support(intel_dp)) 7310 continue; 7311 7312 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst.mgr, true); 7313 if (ret) { 7314 intel_dp->is_mst = false; 7315 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst.mgr, false); 7316 } 7317 } 7318 } 7319 7320 static 7321 int intel_dp_sdp_compute_config_late(struct intel_crtc_state *crtc_state) 7322 { 7323 struct intel_display *display = to_intel_display(crtc_state); 7324 int guardband = intel_crtc_vblank_length(crtc_state); 7325 int min_sdp_guardband = intel_dp_sdp_min_guardband(crtc_state, false); 7326 7327 if (guardband < min_sdp_guardband) { 7328 drm_dbg_kms(display->drm, "guardband %d < min sdp guardband %d\n", 7329 guardband, min_sdp_guardband); 7330 return -EINVAL; 7331 } 7332 7333 return 0; 7334 } 7335 7336 int intel_dp_compute_config_late(struct intel_encoder *encoder, 7337 struct intel_crtc_state *crtc_state, 7338 struct drm_connector_state *conn_state) 7339 { 7340 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 7341 int ret; 7342 7343 intel_psr_compute_config_late(intel_dp, crtc_state); 7344 7345 ret = intel_dp_sdp_compute_config_late(crtc_state); 7346 if (ret) 7347 return ret; 7348 7349 intel_alpm_lobf_compute_config_late(intel_dp, crtc_state); 7350 7351 return 0; 7352 } 7353 7354 static 7355 int intel_dp_get_lines_for_sdp(const struct intel_crtc_state *crtc_state, u32 type) 7356 { 7357 switch (type) { 7358 case DP_SDP_VSC_EXT_VESA: 7359 case DP_SDP_VSC_EXT_CEA: 7360 return 10; 7361 case HDMI_PACKET_TYPE_GAMUT_METADATA: 7362 return 8; 7363 case DP_SDP_PPS: 7364 return 7; 7365 case DP_SDP_ADAPTIVE_SYNC: 7366 return crtc_state->vrr.vsync_start + 1; 7367 default: 7368 break; 7369 } 7370 7371 return 0; 7372 } 7373 7374 int intel_dp_sdp_min_guardband(const struct intel_crtc_state *crtc_state, 7375 bool assume_all_enabled) 7376 { 7377 struct intel_display *display = to_intel_display(crtc_state); 7378 int sdp_guardband = 0; 7379 7380 if (assume_all_enabled || 7381 crtc_state->infoframes.enable & 7382 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA)) 7383 sdp_guardband = max(sdp_guardband, 7384 intel_dp_get_lines_for_sdp(crtc_state, 7385 HDMI_PACKET_TYPE_GAMUT_METADATA)); 7386 7387 if (assume_all_enabled || 7388 crtc_state->dsc.compression_enable) 7389 sdp_guardband = max(sdp_guardband, 7390 intel_dp_get_lines_for_sdp(crtc_state, DP_SDP_PPS)); 7391 7392 if ((assume_all_enabled && HAS_AS_SDP(display)) || 7393 crtc_state->infoframes.enable & intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC)) 7394 sdp_guardband = max(sdp_guardband, 7395 intel_dp_get_lines_for_sdp(crtc_state, DP_SDP_ADAPTIVE_SYNC)); 7396 7397 return sdp_guardband; 7398 } 7399 7400 bool intel_dp_joiner_candidate_valid(struct intel_connector *connector, 7401 int hdisplay, 7402 int num_joined_pipes) 7403 { 7404 struct intel_display *display = to_intel_display(connector); 7405 struct intel_dp *intel_dp = intel_attached_dp(connector); 7406 7407 if (!intel_dp_can_join(intel_dp, num_joined_pipes)) 7408 return false; 7409 7410 if (hdisplay > num_joined_pipes * intel_dp_max_hdisplay_per_pipe(display)) 7411 return false; 7412 7413 if (connector->force_joined_pipes && connector->force_joined_pipes != num_joined_pipes) 7414 return false; 7415 7416 return true; 7417 } 7418