1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include <linux/debugfs.h> 7 #include <linux/iopoll.h> 8 9 #include <drm/drm_print.h> 10 11 #include "g4x_dp.h" 12 #include "intel_de.h" 13 #include "intel_display_jiffies.h" 14 #include "intel_display_power_well.h" 15 #include "intel_display_regs.h" 16 #include "intel_display_types.h" 17 #include "intel_display_utils.h" 18 #include "intel_dp.h" 19 #include "intel_dpio_phy.h" 20 #include "intel_dpll.h" 21 #include "intel_lvds.h" 22 #include "intel_lvds_regs.h" 23 #include "intel_pps.h" 24 #include "intel_pps_regs.h" 25 #include "intel_quirks.h" 26 27 static void vlv_steal_power_sequencer(struct intel_display *display, 28 enum pipe pipe); 29 30 static void pps_init_delays(struct intel_dp *intel_dp); 31 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd); 32 33 static const char *pps_name(struct intel_dp *intel_dp) 34 { 35 struct intel_display *display = to_intel_display(intel_dp); 36 struct intel_pps *pps = &intel_dp->pps; 37 38 if (display->platform.valleyview || display->platform.cherryview) { 39 switch (pps->vlv_pps_pipe) { 40 case INVALID_PIPE: 41 /* 42 * FIXME would be nice if we can guarantee 43 * to always have a valid PPS when calling this. 44 */ 45 return "PPS <none>"; 46 case PIPE_A: 47 return "PPS A"; 48 case PIPE_B: 49 return "PPS B"; 50 default: 51 MISSING_CASE(pps->vlv_pps_pipe); 52 break; 53 } 54 } else { 55 switch (pps->pps_idx) { 56 case 0: 57 return "PPS 0"; 58 case 1: 59 return "PPS 1"; 60 default: 61 MISSING_CASE(pps->pps_idx); 62 break; 63 } 64 } 65 66 return "PPS <invalid>"; 67 } 68 69 struct ref_tracker *intel_pps_lock(struct intel_dp *intel_dp) 70 { 71 struct intel_display *display = to_intel_display(intel_dp); 72 struct ref_tracker *wakeref; 73 74 /* 75 * See vlv_pps_reset_all() why we need a power domain reference here. 76 */ 77 wakeref = intel_display_power_get(display, POWER_DOMAIN_DISPLAY_CORE); 78 mutex_lock(&display->pps.mutex); 79 80 return wakeref; 81 } 82 83 struct ref_tracker *intel_pps_unlock(struct intel_dp *intel_dp, struct ref_tracker *wakeref) 84 { 85 struct intel_display *display = to_intel_display(intel_dp); 86 87 mutex_unlock(&display->pps.mutex); 88 intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref); 89 90 return NULL; 91 } 92 93 static void 94 vlv_power_sequencer_kick(struct intel_dp *intel_dp) 95 { 96 struct intel_display *display = to_intel_display(intel_dp); 97 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 98 enum pipe pipe = intel_dp->pps.vlv_pps_pipe; 99 bool pll_enabled, release_cl_override = false; 100 enum dpio_phy phy = vlv_pipe_to_phy(pipe); 101 enum dpio_channel ch = vlv_pipe_to_channel(pipe); 102 u32 DP; 103 104 if (drm_WARN(display->drm, 105 intel_de_read(display, intel_dp->output_reg) & DP_PORT_EN, 106 "skipping %s kick due to [ENCODER:%d:%s] being active\n", 107 pps_name(intel_dp), 108 dig_port->base.base.base.id, dig_port->base.base.name)) 109 return; 110 111 drm_dbg_kms(display->drm, 112 "kicking %s for [ENCODER:%d:%s]\n", 113 pps_name(intel_dp), 114 dig_port->base.base.base.id, dig_port->base.base.name); 115 116 /* Preserve the BIOS-computed detected bit. This is 117 * supposed to be read-only. 118 */ 119 DP = intel_de_read(display, intel_dp->output_reg) & DP_DETECTED; 120 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 121 DP |= DP_PORT_WIDTH(1); 122 DP |= DP_LINK_TRAIN_PAT_1; 123 124 if (display->platform.cherryview) 125 DP |= DP_PIPE_SEL_CHV(pipe); 126 else 127 DP |= DP_PIPE_SEL(pipe); 128 129 pll_enabled = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE; 130 131 /* 132 * The DPLL for the pipe must be enabled for this to work. 133 * So enable temporarily it if it's not already enabled. 134 */ 135 if (!pll_enabled) { 136 release_cl_override = display->platform.cherryview && 137 !chv_phy_powergate_ch(display, phy, ch, true); 138 139 if (vlv_force_pll_on(display, pipe, vlv_get_dpll(display))) { 140 drm_err(display->drm, 141 "Failed to force on PLL for pipe %c!\n", 142 pipe_name(pipe)); 143 return; 144 } 145 } 146 147 /* 148 * Similar magic as in intel_dp_enable_port(). 149 * We _must_ do this port enable + disable trick 150 * to make this power sequencer lock onto the port. 151 * Otherwise even VDD force bit won't work. 152 */ 153 intel_de_write(display, intel_dp->output_reg, DP); 154 intel_de_posting_read(display, intel_dp->output_reg); 155 156 intel_de_write(display, intel_dp->output_reg, DP | DP_PORT_EN); 157 intel_de_posting_read(display, intel_dp->output_reg); 158 159 intel_de_write(display, intel_dp->output_reg, DP & ~DP_PORT_EN); 160 intel_de_posting_read(display, intel_dp->output_reg); 161 162 if (!pll_enabled) { 163 vlv_force_pll_off(display, pipe); 164 165 if (release_cl_override) 166 chv_phy_powergate_ch(display, phy, ch, false); 167 } 168 } 169 170 static enum pipe vlv_find_free_pps(struct intel_display *display) 171 { 172 struct intel_encoder *encoder; 173 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); 174 175 /* 176 * We don't have power sequencer currently. 177 * Pick one that's not used by other ports. 178 */ 179 for_each_intel_dp(display->drm, encoder) { 180 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 181 182 if (encoder->type == INTEL_OUTPUT_EDP) { 183 drm_WARN_ON(display->drm, 184 intel_dp->pps.vlv_active_pipe != INVALID_PIPE && 185 intel_dp->pps.vlv_active_pipe != 186 intel_dp->pps.vlv_pps_pipe); 187 188 if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE) 189 pipes &= ~(1 << intel_dp->pps.vlv_pps_pipe); 190 } else { 191 drm_WARN_ON(display->drm, 192 intel_dp->pps.vlv_pps_pipe != INVALID_PIPE); 193 194 if (intel_dp->pps.vlv_active_pipe != INVALID_PIPE) 195 pipes &= ~(1 << intel_dp->pps.vlv_active_pipe); 196 } 197 } 198 199 if (pipes == 0) 200 return INVALID_PIPE; 201 202 return ffs(pipes) - 1; 203 } 204 205 static enum pipe 206 vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 207 { 208 struct intel_display *display = to_intel_display(intel_dp); 209 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 210 enum pipe pipe; 211 212 lockdep_assert_held(&display->pps.mutex); 213 214 /* We should never land here with regular DP ports */ 215 drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp)); 216 217 drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE && 218 intel_dp->pps.vlv_active_pipe != intel_dp->pps.vlv_pps_pipe); 219 220 if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE) 221 return intel_dp->pps.vlv_pps_pipe; 222 223 pipe = vlv_find_free_pps(display); 224 225 /* 226 * Didn't find one. This should not happen since there 227 * are two power sequencers and up to two eDP ports. 228 */ 229 if (drm_WARN_ON(display->drm, pipe == INVALID_PIPE)) 230 pipe = PIPE_A; 231 232 vlv_steal_power_sequencer(display, pipe); 233 intel_dp->pps.vlv_pps_pipe = pipe; 234 235 drm_dbg_kms(display->drm, 236 "picked %s for [ENCODER:%d:%s]\n", 237 pps_name(intel_dp), 238 dig_port->base.base.base.id, dig_port->base.base.name); 239 240 /* init power sequencer on this pipe and port */ 241 pps_init_delays(intel_dp); 242 pps_init_registers(intel_dp, true); 243 244 /* 245 * Even vdd force doesn't work until we've made 246 * the power sequencer lock in on the port. 247 */ 248 vlv_power_sequencer_kick(intel_dp); 249 250 return intel_dp->pps.vlv_pps_pipe; 251 } 252 253 static int 254 bxt_power_sequencer_idx(struct intel_dp *intel_dp) 255 { 256 struct intel_display *display = to_intel_display(intel_dp); 257 int pps_idx = intel_dp->pps.pps_idx; 258 259 lockdep_assert_held(&display->pps.mutex); 260 261 /* We should never land here with regular DP ports */ 262 drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp)); 263 264 if (!intel_dp->pps.bxt_pps_reset) 265 return pps_idx; 266 267 intel_dp->pps.bxt_pps_reset = false; 268 269 /* 270 * Only the HW needs to be reprogrammed, the SW state is fixed and 271 * has been setup during connector init. 272 */ 273 pps_init_registers(intel_dp, false); 274 275 return pps_idx; 276 } 277 278 typedef bool (*pps_check)(struct intel_display *display, int pps_idx); 279 280 static bool pps_has_pp_on(struct intel_display *display, int pps_idx) 281 { 282 return intel_de_read(display, PP_STATUS(display, pps_idx)) & PP_ON; 283 } 284 285 static bool pps_has_vdd_on(struct intel_display *display, int pps_idx) 286 { 287 return intel_de_read(display, PP_CONTROL(display, pps_idx)) & EDP_FORCE_VDD; 288 } 289 290 static bool pps_any(struct intel_display *display, int pps_idx) 291 { 292 return true; 293 } 294 295 static enum pipe 296 vlv_initial_pps_pipe(struct intel_display *display, 297 enum port port, pps_check check) 298 { 299 enum pipe pipe; 300 301 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 302 u32 port_sel = intel_de_read(display, 303 PP_ON_DELAYS(display, pipe)) & 304 PANEL_PORT_SELECT_MASK; 305 306 if (port_sel != PANEL_PORT_SELECT_VLV(port)) 307 continue; 308 309 if (!check(display, pipe)) 310 continue; 311 312 return pipe; 313 } 314 315 return INVALID_PIPE; 316 } 317 318 static void 319 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) 320 { 321 struct intel_display *display = to_intel_display(intel_dp); 322 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 323 enum port port = dig_port->base.port; 324 325 lockdep_assert_held(&display->pps.mutex); 326 327 /* try to find a pipe with this port selected */ 328 /* first pick one where the panel is on */ 329 intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port, 330 pps_has_pp_on); 331 /* didn't find one? pick one where vdd is on */ 332 if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) 333 intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port, 334 pps_has_vdd_on); 335 /* didn't find one? pick one with just the correct port */ 336 if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) 337 intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port, 338 pps_any); 339 340 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ 341 if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) { 342 drm_dbg_kms(display->drm, 343 "[ENCODER:%d:%s] no initial power sequencer\n", 344 dig_port->base.base.base.id, dig_port->base.base.name); 345 return; 346 } 347 348 drm_dbg_kms(display->drm, 349 "[ENCODER:%d:%s] initial power sequencer: %s\n", 350 dig_port->base.base.base.id, dig_port->base.base.name, 351 pps_name(intel_dp)); 352 } 353 354 static int intel_num_pps(struct intel_display *display) 355 { 356 if (display->platform.valleyview || display->platform.cherryview) 357 return 2; 358 359 if (display->platform.geminilake || display->platform.broxton) 360 return 2; 361 362 if (INTEL_PCH_TYPE(display) >= PCH_MTL) 363 return 2; 364 365 if (INTEL_PCH_TYPE(display) >= PCH_DG1) 366 return 1; 367 368 if (INTEL_PCH_TYPE(display) >= PCH_ICP) 369 return 2; 370 371 return 1; 372 } 373 374 static bool intel_pps_is_valid(struct intel_dp *intel_dp) 375 { 376 struct intel_display *display = to_intel_display(intel_dp); 377 378 if (intel_dp->pps.pps_idx == 1 && 379 INTEL_PCH_TYPE(display) >= PCH_ICP && 380 INTEL_PCH_TYPE(display) <= PCH_ADP) 381 return intel_de_read(display, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT; 382 383 return true; 384 } 385 386 static int 387 bxt_initial_pps_idx(struct intel_display *display, pps_check check) 388 { 389 int pps_idx, pps_num = intel_num_pps(display); 390 391 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { 392 if (check(display, pps_idx)) 393 return pps_idx; 394 } 395 396 return -1; 397 } 398 399 static bool 400 pps_initial_setup(struct intel_dp *intel_dp) 401 { 402 struct intel_display *display = to_intel_display(intel_dp); 403 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 404 struct intel_connector *connector = intel_dp->attached_connector; 405 406 lockdep_assert_held(&display->pps.mutex); 407 408 if (display->platform.valleyview || display->platform.cherryview) { 409 vlv_initial_power_sequencer_setup(intel_dp); 410 return true; 411 } 412 413 /* first ask the VBT */ 414 if (intel_num_pps(display) > 1) 415 intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller; 416 else 417 intel_dp->pps.pps_idx = 0; 418 419 if (drm_WARN_ON(display->drm, intel_dp->pps.pps_idx >= intel_num_pps(display))) 420 intel_dp->pps.pps_idx = -1; 421 422 /* VBT wasn't parsed yet? pick one where the panel is on */ 423 if (intel_dp->pps.pps_idx < 0) 424 intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_has_pp_on); 425 /* didn't find one? pick one where vdd is on */ 426 if (intel_dp->pps.pps_idx < 0) 427 intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_has_vdd_on); 428 /* didn't find one? pick any */ 429 if (intel_dp->pps.pps_idx < 0) { 430 intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_any); 431 432 drm_dbg_kms(display->drm, 433 "[ENCODER:%d:%s] no initial power sequencer, assuming %s\n", 434 encoder->base.base.id, encoder->base.name, 435 pps_name(intel_dp)); 436 } else { 437 drm_dbg_kms(display->drm, 438 "[ENCODER:%d:%s] initial power sequencer: %s\n", 439 encoder->base.base.id, encoder->base.name, 440 pps_name(intel_dp)); 441 } 442 443 return intel_pps_is_valid(intel_dp); 444 } 445 446 void vlv_pps_reset_all(struct intel_display *display) 447 { 448 struct intel_encoder *encoder; 449 450 if (!HAS_DISPLAY(display)) 451 return; 452 453 /* 454 * We can't grab pps_mutex here due to deadlock with power_domain 455 * mutex when power_domain functions are called while holding pps_mutex. 456 * That also means that in order to use vlv_pps_pipe the code needs to 457 * hold both a power domain reference and pps_mutex, and the power domain 458 * reference get/put must be done while _not_ holding pps_mutex. 459 * pps_{lock,unlock}() do these steps in the correct order, so one 460 * should use them always. 461 */ 462 463 for_each_intel_dp(display->drm, encoder) { 464 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 465 466 drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE); 467 468 if (encoder->type == INTEL_OUTPUT_EDP) 469 intel_dp->pps.vlv_pps_pipe = INVALID_PIPE; 470 } 471 } 472 473 void bxt_pps_reset_all(struct intel_display *display) 474 { 475 struct intel_encoder *encoder; 476 477 if (!HAS_DISPLAY(display)) 478 return; 479 480 /* See vlv_pps_reset_all() for why we can't grab pps_mutex here. */ 481 482 for_each_intel_dp(display->drm, encoder) { 483 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 484 485 if (encoder->type == INTEL_OUTPUT_EDP) 486 intel_dp->pps.bxt_pps_reset = true; 487 } 488 } 489 490 struct pps_registers { 491 i915_reg_t pp_ctrl; 492 i915_reg_t pp_stat; 493 i915_reg_t pp_on; 494 i915_reg_t pp_off; 495 i915_reg_t pp_div; 496 }; 497 498 static void intel_pps_get_registers(struct intel_dp *intel_dp, 499 struct pps_registers *regs) 500 { 501 struct intel_display *display = to_intel_display(intel_dp); 502 int pps_idx; 503 504 memset(regs, 0, sizeof(*regs)); 505 506 if (display->platform.valleyview || display->platform.cherryview) 507 pps_idx = vlv_power_sequencer_pipe(intel_dp); 508 else if (display->platform.geminilake || display->platform.broxton) 509 pps_idx = bxt_power_sequencer_idx(intel_dp); 510 else 511 pps_idx = intel_dp->pps.pps_idx; 512 513 regs->pp_ctrl = PP_CONTROL(display, pps_idx); 514 regs->pp_stat = PP_STATUS(display, pps_idx); 515 regs->pp_on = PP_ON_DELAYS(display, pps_idx); 516 regs->pp_off = PP_OFF_DELAYS(display, pps_idx); 517 518 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ 519 if (display->platform.geminilake || display->platform.broxton || 520 INTEL_PCH_TYPE(display) >= PCH_CNP) 521 regs->pp_div = INVALID_MMIO_REG; 522 else 523 regs->pp_div = PP_DIVISOR(display, pps_idx); 524 } 525 526 static i915_reg_t 527 _pp_ctrl_reg(struct intel_dp *intel_dp) 528 { 529 struct pps_registers regs; 530 531 intel_pps_get_registers(intel_dp, ®s); 532 533 return regs.pp_ctrl; 534 } 535 536 static i915_reg_t 537 _pp_stat_reg(struct intel_dp *intel_dp) 538 { 539 struct pps_registers regs; 540 541 intel_pps_get_registers(intel_dp, ®s); 542 543 return regs.pp_stat; 544 } 545 546 static bool edp_have_panel_power(struct intel_dp *intel_dp) 547 { 548 struct intel_display *display = to_intel_display(intel_dp); 549 550 lockdep_assert_held(&display->pps.mutex); 551 552 if ((display->platform.valleyview || display->platform.cherryview) && 553 intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) 554 return false; 555 556 return (intel_de_read(display, _pp_stat_reg(intel_dp)) & PP_ON) != 0; 557 } 558 559 static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 560 { 561 struct intel_display *display = to_intel_display(intel_dp); 562 563 lockdep_assert_held(&display->pps.mutex); 564 565 if ((display->platform.valleyview || display->platform.cherryview) && 566 intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) 567 return false; 568 569 return intel_de_read(display, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; 570 } 571 572 void intel_pps_check_power_unlocked(struct intel_dp *intel_dp) 573 { 574 struct intel_display *display = to_intel_display(intel_dp); 575 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 576 577 if (!intel_dp_is_edp(intel_dp)) 578 return; 579 580 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { 581 drm_WARN(display->drm, 1, 582 "[ENCODER:%d:%s] %s powered off while attempting AUX CH communication.\n", 583 dig_port->base.base.base.id, dig_port->base.base.name, 584 pps_name(intel_dp)); 585 drm_dbg_kms(display->drm, 586 "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 587 dig_port->base.base.base.id, dig_port->base.base.name, 588 pps_name(intel_dp), 589 intel_de_read(display, _pp_stat_reg(intel_dp)), 590 intel_de_read(display, _pp_ctrl_reg(intel_dp))); 591 } 592 } 593 594 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 595 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 596 597 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) 598 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) 599 600 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 601 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 602 603 static void intel_pps_verify_state(struct intel_dp *intel_dp); 604 605 static void wait_panel_status(struct intel_dp *intel_dp, 606 u32 mask, u32 value) 607 { 608 struct intel_display *display = to_intel_display(intel_dp); 609 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 610 i915_reg_t pp_stat_reg, pp_ctrl_reg; 611 int ret; 612 u32 val; 613 614 lockdep_assert_held(&display->pps.mutex); 615 616 intel_pps_verify_state(intel_dp); 617 618 pp_stat_reg = _pp_stat_reg(intel_dp); 619 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 620 621 drm_dbg_kms(display->drm, 622 "[ENCODER:%d:%s] %s mask: 0x%08x value: 0x%08x PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 623 dig_port->base.base.base.id, dig_port->base.base.name, 624 pps_name(intel_dp), 625 mask, value, 626 intel_de_read(display, pp_stat_reg), 627 intel_de_read(display, pp_ctrl_reg)); 628 629 ret = poll_timeout_us(val = intel_de_read(display, pp_stat_reg), 630 (val & mask) == value, 631 10 * 1000, 5000 * 1000, true); 632 if (ret) { 633 drm_err(display->drm, 634 "[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 635 dig_port->base.base.base.id, dig_port->base.base.name, 636 pps_name(intel_dp), 637 intel_de_read(display, pp_stat_reg), 638 intel_de_read(display, pp_ctrl_reg)); 639 return; 640 } 641 642 drm_dbg_kms(display->drm, "Wait complete\n"); 643 } 644 645 static void wait_panel_on(struct intel_dp *intel_dp) 646 { 647 struct intel_display *display = to_intel_display(intel_dp); 648 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 649 650 drm_dbg_kms(display->drm, 651 "[ENCODER:%d:%s] %s wait for panel power on\n", 652 dig_port->base.base.base.id, dig_port->base.base.name, 653 pps_name(intel_dp)); 654 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 655 } 656 657 static void wait_panel_off(struct intel_dp *intel_dp) 658 { 659 struct intel_display *display = to_intel_display(intel_dp); 660 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 661 662 drm_dbg_kms(display->drm, 663 "[ENCODER:%d:%s] %s wait for panel power off time\n", 664 dig_port->base.base.base.id, dig_port->base.base.name, 665 pps_name(intel_dp)); 666 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 667 } 668 669 static void wait_panel_power_cycle(struct intel_dp *intel_dp) 670 { 671 struct intel_display *display = to_intel_display(intel_dp); 672 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 673 ktime_t panel_power_on_time; 674 s64 panel_power_off_duration, remaining; 675 676 /* take the difference of current time and panel power off time 677 * and then make panel wait for power_cycle if needed. */ 678 panel_power_on_time = ktime_get_boottime(); 679 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time); 680 681 remaining = max(0, intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration); 682 683 drm_dbg_kms(display->drm, 684 "[ENCODER:%d:%s] %s wait for panel power cycle (%lld ms remaining)\n", 685 dig_port->base.base.base.id, dig_port->base.base.name, 686 pps_name(intel_dp), remaining); 687 688 /* When we disable the VDD override bit last we have to do the manual 689 * wait. */ 690 if (remaining) 691 wait_remaining_ms_from_jiffies(jiffies, remaining); 692 693 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 694 } 695 696 void intel_pps_wait_power_cycle(struct intel_dp *intel_dp) 697 { 698 if (!intel_dp_is_edp(intel_dp)) 699 return; 700 701 with_intel_pps_lock(intel_dp) 702 wait_panel_power_cycle(intel_dp); 703 } 704 705 static void wait_backlight_on(struct intel_dp *intel_dp) 706 { 707 wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on, 708 intel_dp->pps.backlight_on_delay); 709 } 710 711 static void edp_wait_backlight_off(struct intel_dp *intel_dp) 712 { 713 wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off, 714 intel_dp->pps.backlight_off_delay); 715 } 716 717 /* Read the current pp_control value, unlocking the register if it 718 * is locked 719 */ 720 721 static u32 ilk_get_pp_control(struct intel_dp *intel_dp) 722 { 723 struct intel_display *display = to_intel_display(intel_dp); 724 u32 control; 725 726 lockdep_assert_held(&display->pps.mutex); 727 728 control = intel_de_read(display, _pp_ctrl_reg(intel_dp)); 729 if (drm_WARN_ON(display->drm, !HAS_DDI(display) && 730 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { 731 control &= ~PANEL_UNLOCK_MASK; 732 control |= PANEL_UNLOCK_REGS; 733 } 734 return control; 735 } 736 737 /* 738 * Must be paired with intel_pps_vdd_off_unlocked(). 739 * Must hold pps_mutex around the whole on/off sequence. 740 * Can be nested with intel_pps_vdd_{on,off}() calls. 741 */ 742 bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp) 743 { 744 struct intel_display *display = to_intel_display(intel_dp); 745 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 746 u32 pp; 747 i915_reg_t pp_stat_reg, pp_ctrl_reg; 748 bool need_to_disable = !intel_dp->pps.want_panel_vdd; 749 750 if (!intel_dp_is_edp(intel_dp)) 751 return false; 752 753 lockdep_assert_held(&display->pps.mutex); 754 755 cancel_delayed_work(&intel_dp->pps.panel_vdd_work); 756 intel_dp->pps.want_panel_vdd = true; 757 758 if (edp_have_panel_vdd(intel_dp)) 759 return need_to_disable; 760 761 drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref); 762 intel_dp->pps.vdd_wakeref = intel_display_power_get(display, 763 intel_aux_power_domain(dig_port)); 764 765 pp_stat_reg = _pp_stat_reg(intel_dp); 766 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 767 768 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD on\n", 769 dig_port->base.base.base.id, dig_port->base.base.name, 770 pps_name(intel_dp)); 771 772 if (!edp_have_panel_power(intel_dp)) 773 wait_panel_power_cycle(intel_dp); 774 775 pp = ilk_get_pp_control(intel_dp); 776 pp |= EDP_FORCE_VDD; 777 778 intel_de_write(display, pp_ctrl_reg, pp); 779 intel_de_posting_read(display, pp_ctrl_reg); 780 drm_dbg_kms(display->drm, 781 "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 782 dig_port->base.base.base.id, dig_port->base.base.name, 783 pps_name(intel_dp), 784 intel_de_read(display, pp_stat_reg), 785 intel_de_read(display, pp_ctrl_reg)); 786 /* 787 * If the panel wasn't on, delay before accessing aux channel 788 */ 789 if (!edp_have_panel_power(intel_dp)) { 790 drm_dbg_kms(display->drm, 791 "[ENCODER:%d:%s] %s panel power wasn't enabled\n", 792 dig_port->base.base.base.id, dig_port->base.base.name, 793 pps_name(intel_dp)); 794 msleep(intel_dp->pps.panel_power_up_delay); 795 } 796 797 return need_to_disable; 798 } 799 800 /* 801 * Must be paired with intel_pps_vdd_off() or - to disable 802 * both VDD and panel power - intel_pps_off(). 803 * Nested calls to these functions are not allowed since 804 * we drop the lock. Caller must use some higher level 805 * locking to prevent nested calls from other threads. 806 */ 807 void intel_pps_vdd_on(struct intel_dp *intel_dp) 808 { 809 struct intel_display *display = to_intel_display(intel_dp); 810 bool vdd; 811 812 if (!intel_dp_is_edp(intel_dp)) 813 return; 814 815 vdd = false; 816 with_intel_pps_lock(intel_dp) 817 vdd = intel_pps_vdd_on_unlocked(intel_dp); 818 INTEL_DISPLAY_STATE_WARN(display, !vdd, "[ENCODER:%d:%s] %s VDD already requested on\n", 819 dp_to_dig_port(intel_dp)->base.base.base.id, 820 dp_to_dig_port(intel_dp)->base.base.name, 821 pps_name(intel_dp)); 822 } 823 824 static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp) 825 { 826 struct intel_display *display = to_intel_display(intel_dp); 827 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 828 u32 pp; 829 i915_reg_t pp_stat_reg, pp_ctrl_reg; 830 831 lockdep_assert_held(&display->pps.mutex); 832 833 drm_WARN_ON(display->drm, intel_dp->pps.want_panel_vdd); 834 835 if (!edp_have_panel_vdd(intel_dp)) 836 return; 837 838 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD off\n", 839 dig_port->base.base.base.id, dig_port->base.base.name, 840 pps_name(intel_dp)); 841 842 pp = ilk_get_pp_control(intel_dp); 843 pp &= ~EDP_FORCE_VDD; 844 845 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 846 pp_stat_reg = _pp_stat_reg(intel_dp); 847 848 intel_de_write(display, pp_ctrl_reg, pp); 849 intel_de_posting_read(display, pp_ctrl_reg); 850 851 /* Make sure sequencer is idle before allowing subsequent activity */ 852 drm_dbg_kms(display->drm, 853 "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 854 dig_port->base.base.base.id, dig_port->base.base.name, 855 pps_name(intel_dp), 856 intel_de_read(display, pp_stat_reg), 857 intel_de_read(display, pp_ctrl_reg)); 858 859 if ((pp & PANEL_POWER_ON) == 0) { 860 intel_dp->pps.panel_power_off_time = ktime_get_boottime(); 861 intel_dp_invalidate_source_oui(intel_dp); 862 } 863 864 intel_display_power_put(display, 865 intel_aux_power_domain(dig_port), 866 fetch_and_zero(&intel_dp->pps.vdd_wakeref)); 867 } 868 869 void intel_pps_vdd_off_sync(struct intel_dp *intel_dp) 870 { 871 if (!intel_dp_is_edp(intel_dp)) 872 return; 873 874 cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work); 875 /* 876 * vdd might still be enabled due to the delayed vdd off. 877 * Make sure vdd is actually turned off here. 878 */ 879 with_intel_pps_lock(intel_dp) 880 intel_pps_vdd_off_sync_unlocked(intel_dp); 881 } 882 883 static void edp_panel_vdd_work(struct work_struct *__work) 884 { 885 struct intel_pps *pps = container_of(to_delayed_work(__work), 886 struct intel_pps, panel_vdd_work); 887 struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps); 888 889 with_intel_pps_lock(intel_dp) { 890 if (!intel_dp->pps.want_panel_vdd) 891 intel_pps_vdd_off_sync_unlocked(intel_dp); 892 } 893 } 894 895 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) 896 { 897 struct intel_display *display = to_intel_display(intel_dp); 898 unsigned long delay; 899 900 /* 901 * We may not yet know the real power sequencing delays, 902 * so keep VDD enabled until we're done with init. 903 */ 904 if (intel_dp->pps.initializing) 905 return; 906 907 /* 908 * Queue the timer to fire a long time from now (relative to the power 909 * down delay) to keep the panel power up across a sequence of 910 * operations. 911 */ 912 delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5); 913 queue_delayed_work(display->wq.unordered, 914 &intel_dp->pps.panel_vdd_work, delay); 915 } 916 917 /* 918 * Must be paired with edp_panel_vdd_on(). 919 * Must hold pps_mutex around the whole on/off sequence. 920 * Can be nested with intel_pps_vdd_{on,off}() calls. 921 */ 922 void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync) 923 { 924 struct intel_display *display = to_intel_display(intel_dp); 925 926 if (!intel_dp_is_edp(intel_dp)) 927 return; 928 929 lockdep_assert_held(&display->pps.mutex); 930 931 INTEL_DISPLAY_STATE_WARN(display, !intel_dp->pps.want_panel_vdd, 932 "[ENCODER:%d:%s] %s VDD not forced on", 933 dp_to_dig_port(intel_dp)->base.base.base.id, 934 dp_to_dig_port(intel_dp)->base.base.name, 935 pps_name(intel_dp)); 936 937 intel_dp->pps.want_panel_vdd = false; 938 939 if (sync) 940 intel_pps_vdd_off_sync_unlocked(intel_dp); 941 else 942 edp_panel_vdd_schedule_off(intel_dp); 943 } 944 945 void intel_pps_vdd_off(struct intel_dp *intel_dp) 946 { 947 if (!intel_dp_is_edp(intel_dp)) 948 return; 949 950 with_intel_pps_lock(intel_dp) 951 intel_pps_vdd_off_unlocked(intel_dp, false); 952 } 953 954 void intel_pps_on_unlocked(struct intel_dp *intel_dp) 955 { 956 struct intel_display *display = to_intel_display(intel_dp); 957 u32 pp; 958 i915_reg_t pp_ctrl_reg; 959 960 lockdep_assert_held(&display->pps.mutex); 961 962 if (!intel_dp_is_edp(intel_dp)) 963 return; 964 965 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power on\n", 966 dp_to_dig_port(intel_dp)->base.base.base.id, 967 dp_to_dig_port(intel_dp)->base.base.name, 968 pps_name(intel_dp)); 969 970 if (drm_WARN(display->drm, edp_have_panel_power(intel_dp), 971 "[ENCODER:%d:%s] %s panel power already on\n", 972 dp_to_dig_port(intel_dp)->base.base.base.id, 973 dp_to_dig_port(intel_dp)->base.base.name, 974 pps_name(intel_dp))) 975 return; 976 977 wait_panel_power_cycle(intel_dp); 978 979 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 980 pp = ilk_get_pp_control(intel_dp); 981 if (display->platform.ironlake) { 982 /* ILK workaround: disable reset around power sequence */ 983 pp &= ~PANEL_POWER_RESET; 984 intel_de_write(display, pp_ctrl_reg, pp); 985 intel_de_posting_read(display, pp_ctrl_reg); 986 } 987 988 /* 989 * WA: 22019252566 990 * Disable DPLS gating around power sequence. 991 */ 992 if (IS_DISPLAY_VER(display, 13, 14)) 993 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 994 0, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 995 996 pp |= PANEL_POWER_ON; 997 if (!display->platform.ironlake) 998 pp |= PANEL_POWER_RESET; 999 1000 intel_de_write(display, pp_ctrl_reg, pp); 1001 intel_de_posting_read(display, pp_ctrl_reg); 1002 1003 wait_panel_on(intel_dp); 1004 intel_dp->pps.last_power_on = jiffies; 1005 1006 if (IS_DISPLAY_VER(display, 13, 14)) 1007 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 1008 PCH_DPLSUNIT_CLOCK_GATE_DISABLE, 0); 1009 1010 if (display->platform.ironlake) { 1011 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1012 intel_de_write(display, pp_ctrl_reg, pp); 1013 intel_de_posting_read(display, pp_ctrl_reg); 1014 } 1015 } 1016 1017 void intel_pps_on(struct intel_dp *intel_dp) 1018 { 1019 if (!intel_dp_is_edp(intel_dp)) 1020 return; 1021 1022 with_intel_pps_lock(intel_dp) 1023 intel_pps_on_unlocked(intel_dp); 1024 } 1025 1026 void intel_pps_off_unlocked(struct intel_dp *intel_dp) 1027 { 1028 struct intel_display *display = to_intel_display(intel_dp); 1029 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1030 u32 pp; 1031 i915_reg_t pp_ctrl_reg; 1032 1033 lockdep_assert_held(&display->pps.mutex); 1034 1035 if (!intel_dp_is_edp(intel_dp)) 1036 return; 1037 1038 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power off\n", 1039 dig_port->base.base.base.id, dig_port->base.base.name, 1040 pps_name(intel_dp)); 1041 1042 drm_WARN(display->drm, !intel_dp->pps.want_panel_vdd, 1043 "[ENCODER:%d:%s] %s need VDD to turn off panel\n", 1044 dig_port->base.base.base.id, dig_port->base.base.name, 1045 pps_name(intel_dp)); 1046 1047 pp = ilk_get_pp_control(intel_dp); 1048 /* We need to switch off panel power _and_ force vdd, for otherwise some 1049 * panels get very unhappy and cease to work. */ 1050 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | 1051 EDP_BLC_ENABLE); 1052 1053 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1054 1055 intel_dp->pps.want_panel_vdd = false; 1056 1057 intel_de_write(display, pp_ctrl_reg, pp); 1058 intel_de_posting_read(display, pp_ctrl_reg); 1059 1060 wait_panel_off(intel_dp); 1061 intel_dp->pps.panel_power_off_time = ktime_get_boottime(); 1062 1063 intel_dp_invalidate_source_oui(intel_dp); 1064 1065 /* We got a reference when we enabled the VDD. */ 1066 intel_display_power_put(display, 1067 intel_aux_power_domain(dig_port), 1068 fetch_and_zero(&intel_dp->pps.vdd_wakeref)); 1069 } 1070 1071 void intel_pps_off(struct intel_dp *intel_dp) 1072 { 1073 if (!intel_dp_is_edp(intel_dp)) 1074 return; 1075 1076 with_intel_pps_lock(intel_dp) 1077 intel_pps_off_unlocked(intel_dp); 1078 } 1079 1080 /* Enable backlight in the panel power control. */ 1081 void intel_pps_backlight_on(struct intel_dp *intel_dp) 1082 { 1083 struct intel_display *display = to_intel_display(intel_dp); 1084 1085 /* 1086 * If we enable the backlight right away following a panel power 1087 * on, we may see slight flicker as the panel syncs with the eDP 1088 * link. So delay a bit to make sure the image is solid before 1089 * allowing it to appear. 1090 */ 1091 wait_backlight_on(intel_dp); 1092 1093 with_intel_pps_lock(intel_dp) { 1094 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1095 u32 pp; 1096 1097 pp = ilk_get_pp_control(intel_dp); 1098 pp |= EDP_BLC_ENABLE; 1099 1100 intel_de_write(display, pp_ctrl_reg, pp); 1101 intel_de_posting_read(display, pp_ctrl_reg); 1102 } 1103 } 1104 1105 /* Disable backlight in the panel power control. */ 1106 void intel_pps_backlight_off(struct intel_dp *intel_dp) 1107 { 1108 struct intel_display *display = to_intel_display(intel_dp); 1109 1110 if (!intel_dp_is_edp(intel_dp)) 1111 return; 1112 1113 with_intel_pps_lock(intel_dp) { 1114 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1115 u32 pp; 1116 1117 pp = ilk_get_pp_control(intel_dp); 1118 pp &= ~EDP_BLC_ENABLE; 1119 1120 intel_de_write(display, pp_ctrl_reg, pp); 1121 intel_de_posting_read(display, pp_ctrl_reg); 1122 } 1123 1124 intel_dp->pps.last_backlight_off = jiffies; 1125 edp_wait_backlight_off(intel_dp); 1126 } 1127 1128 /* 1129 * Hook for controlling the panel power control backlight through the bl_power 1130 * sysfs attribute. Take care to handle multiple calls. 1131 */ 1132 void intel_pps_backlight_power(struct intel_connector *connector, bool enable) 1133 { 1134 struct intel_display *display = to_intel_display(connector); 1135 struct intel_dp *intel_dp = intel_attached_dp(connector); 1136 bool is_enabled; 1137 1138 is_enabled = false; 1139 with_intel_pps_lock(intel_dp) 1140 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE; 1141 if (is_enabled == enable) 1142 return; 1143 1144 drm_dbg_kms(display->drm, "panel power control backlight %s\n", 1145 str_enable_disable(enable)); 1146 1147 if (enable) 1148 intel_pps_backlight_on(intel_dp); 1149 else 1150 intel_pps_backlight_off(intel_dp); 1151 } 1152 1153 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) 1154 { 1155 struct intel_display *display = to_intel_display(intel_dp); 1156 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1157 enum pipe pipe = intel_dp->pps.vlv_pps_pipe; 1158 i915_reg_t pp_on_reg = PP_ON_DELAYS(display, pipe); 1159 1160 drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE); 1161 1162 if (drm_WARN_ON(display->drm, pipe != PIPE_A && pipe != PIPE_B)) 1163 return; 1164 1165 intel_pps_vdd_off_sync_unlocked(intel_dp); 1166 1167 /* 1168 * VLV seems to get confused when multiple power sequencers 1169 * have the same port selected (even if only one has power/vdd 1170 * enabled). The failure manifests as vlv_wait_port_ready() failing 1171 * CHV on the other hand doesn't seem to mind having the same port 1172 * selected in multiple power sequencers, but let's clear the 1173 * port select always when logically disconnecting a power sequencer 1174 * from a port. 1175 */ 1176 drm_dbg_kms(display->drm, 1177 "detaching %s from [ENCODER:%d:%s]\n", 1178 pps_name(intel_dp), 1179 dig_port->base.base.base.id, dig_port->base.base.name); 1180 intel_de_write(display, pp_on_reg, 0); 1181 intel_de_posting_read(display, pp_on_reg); 1182 1183 intel_dp->pps.vlv_pps_pipe = INVALID_PIPE; 1184 } 1185 1186 static void vlv_steal_power_sequencer(struct intel_display *display, 1187 enum pipe pipe) 1188 { 1189 struct intel_encoder *encoder; 1190 1191 lockdep_assert_held(&display->pps.mutex); 1192 1193 for_each_intel_dp(display->drm, encoder) { 1194 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1195 1196 drm_WARN(display->drm, intel_dp->pps.vlv_active_pipe == pipe, 1197 "stealing PPS %c from active [ENCODER:%d:%s]\n", 1198 pipe_name(pipe), encoder->base.base.id, 1199 encoder->base.name); 1200 1201 if (intel_dp->pps.vlv_pps_pipe != pipe) 1202 continue; 1203 1204 drm_dbg_kms(display->drm, 1205 "stealing PPS %c from [ENCODER:%d:%s]\n", 1206 pipe_name(pipe), encoder->base.base.id, 1207 encoder->base.name); 1208 1209 /* make sure vdd is off before we steal it */ 1210 vlv_detach_power_sequencer(intel_dp); 1211 } 1212 } 1213 1214 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) 1215 { 1216 struct intel_display *display = to_intel_display(intel_dp); 1217 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1218 enum pipe pipe; 1219 1220 if (g4x_dp_port_enabled(display, intel_dp->output_reg, 1221 encoder->port, &pipe)) 1222 return pipe; 1223 1224 return INVALID_PIPE; 1225 } 1226 1227 /* Call on all DP, not just eDP */ 1228 void vlv_pps_pipe_init(struct intel_dp *intel_dp) 1229 { 1230 intel_dp->pps.vlv_pps_pipe = INVALID_PIPE; 1231 intel_dp->pps.vlv_active_pipe = vlv_active_pipe(intel_dp); 1232 } 1233 1234 /* Call on all DP, not just eDP */ 1235 void vlv_pps_pipe_reset(struct intel_dp *intel_dp) 1236 { 1237 with_intel_pps_lock(intel_dp) 1238 intel_dp->pps.vlv_active_pipe = vlv_active_pipe(intel_dp); 1239 } 1240 1241 enum pipe vlv_pps_backlight_initial_pipe(struct intel_dp *intel_dp) 1242 { 1243 enum pipe pipe; 1244 1245 /* 1246 * Figure out the current pipe for the initial backlight setup. If the 1247 * current pipe isn't valid, try the PPS pipe, and if that fails just 1248 * assume pipe A. 1249 */ 1250 pipe = vlv_active_pipe(intel_dp); 1251 1252 if (pipe != PIPE_A && pipe != PIPE_B) 1253 pipe = intel_dp->pps.vlv_pps_pipe; 1254 1255 if (pipe != PIPE_A && pipe != PIPE_B) 1256 pipe = PIPE_A; 1257 1258 return pipe; 1259 } 1260 1261 /* Call on all DP, not just eDP */ 1262 void vlv_pps_port_enable_unlocked(struct intel_encoder *encoder, 1263 const struct intel_crtc_state *crtc_state) 1264 { 1265 struct intel_display *display = to_intel_display(encoder); 1266 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1267 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1268 1269 lockdep_assert_held(&display->pps.mutex); 1270 1271 drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE); 1272 1273 if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE && 1274 intel_dp->pps.vlv_pps_pipe != crtc->pipe) { 1275 /* 1276 * If another power sequencer was being used on this 1277 * port previously make sure to turn off vdd there while 1278 * we still have control of it. 1279 */ 1280 vlv_detach_power_sequencer(intel_dp); 1281 } 1282 1283 /* 1284 * We may be stealing the power 1285 * sequencer from another port. 1286 */ 1287 vlv_steal_power_sequencer(display, crtc->pipe); 1288 1289 intel_dp->pps.vlv_active_pipe = crtc->pipe; 1290 1291 if (!intel_dp_is_edp(intel_dp)) 1292 return; 1293 1294 /* now it's all ours */ 1295 intel_dp->pps.vlv_pps_pipe = crtc->pipe; 1296 1297 drm_dbg_kms(display->drm, 1298 "initializing %s for [ENCODER:%d:%s]\n", 1299 pps_name(intel_dp), 1300 encoder->base.base.id, encoder->base.name); 1301 1302 /* init power sequencer on this pipe and port */ 1303 pps_init_delays(intel_dp); 1304 pps_init_registers(intel_dp, true); 1305 } 1306 1307 /* Call on all DP, not just eDP */ 1308 void vlv_pps_port_disable(struct intel_encoder *encoder, 1309 const struct intel_crtc_state *crtc_state) 1310 { 1311 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1312 1313 with_intel_pps_lock(intel_dp) 1314 intel_dp->pps.vlv_active_pipe = INVALID_PIPE; 1315 } 1316 1317 static void pps_vdd_init(struct intel_dp *intel_dp) 1318 { 1319 struct intel_display *display = to_intel_display(intel_dp); 1320 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1321 1322 lockdep_assert_held(&display->pps.mutex); 1323 1324 if (!edp_have_panel_vdd(intel_dp)) 1325 return; 1326 1327 /* 1328 * The VDD bit needs a power domain reference, so if the bit is 1329 * already enabled when we boot or resume, grab this reference and 1330 * schedule a vdd off, so we don't hold on to the reference 1331 * indefinitely. 1332 */ 1333 drm_dbg_kms(display->drm, 1334 "[ENCODER:%d:%s] %s VDD left on by BIOS, adjusting state tracking\n", 1335 dig_port->base.base.base.id, dig_port->base.base.name, 1336 pps_name(intel_dp)); 1337 drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref); 1338 intel_dp->pps.vdd_wakeref = intel_display_power_get(display, 1339 intel_aux_power_domain(dig_port)); 1340 } 1341 1342 bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp) 1343 { 1344 bool have_power = false; 1345 1346 with_intel_pps_lock(intel_dp) { 1347 have_power = edp_have_panel_power(intel_dp) || 1348 edp_have_panel_vdd(intel_dp); 1349 } 1350 1351 return have_power; 1352 } 1353 1354 static void pps_init_timestamps(struct intel_dp *intel_dp) 1355 { 1356 /* 1357 * Initialize panel power off time to 0, assuming panel power could have 1358 * been toggled between kernel boot and now only by a previously loaded 1359 * and removed i915, which has already ensured sufficient power off 1360 * delay at module remove. 1361 */ 1362 intel_dp->pps.panel_power_off_time = 0; 1363 intel_dp->pps.last_power_on = jiffies; 1364 intel_dp->pps.last_backlight_off = jiffies; 1365 } 1366 1367 static void 1368 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct intel_pps_delays *seq) 1369 { 1370 struct intel_display *display = to_intel_display(intel_dp); 1371 u32 pp_on, pp_off, pp_ctl, power_cycle_delay; 1372 struct pps_registers regs; 1373 1374 intel_pps_get_registers(intel_dp, ®s); 1375 1376 pp_ctl = ilk_get_pp_control(intel_dp); 1377 1378 /* Ensure PPS is unlocked */ 1379 if (!HAS_DDI(display)) 1380 intel_de_write(display, regs.pp_ctrl, pp_ctl); 1381 1382 pp_on = intel_de_read(display, regs.pp_on); 1383 pp_off = intel_de_read(display, regs.pp_off); 1384 1385 /* Pull timing values out of registers */ 1386 seq->power_up = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on); 1387 seq->backlight_on = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on); 1388 seq->backlight_off = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off); 1389 seq->power_down = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off); 1390 1391 if (i915_mmio_reg_valid(regs.pp_div)) { 1392 u32 pp_div; 1393 1394 pp_div = intel_de_read(display, regs.pp_div); 1395 1396 power_cycle_delay = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div); 1397 } else { 1398 power_cycle_delay = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl); 1399 } 1400 1401 /* hardware wants <delay>+1 in 100ms units */ 1402 seq->power_cycle = power_cycle_delay ? (power_cycle_delay - 1) * 1000 : 0; 1403 } 1404 1405 static void 1406 intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name, 1407 const struct intel_pps_delays *seq) 1408 { 1409 struct intel_display *display = to_intel_display(intel_dp); 1410 1411 drm_dbg_kms(display->drm, 1412 "%s power_up %d backlight_on %d backlight_off %d power_down %d power_cycle %d\n", 1413 state_name, seq->power_up, seq->backlight_on, 1414 seq->backlight_off, seq->power_down, seq->power_cycle); 1415 } 1416 1417 static void 1418 intel_pps_verify_state(struct intel_dp *intel_dp) 1419 { 1420 struct intel_display *display = to_intel_display(intel_dp); 1421 struct intel_pps_delays hw; 1422 struct intel_pps_delays *sw = &intel_dp->pps.pps_delays; 1423 1424 intel_pps_readout_hw_state(intel_dp, &hw); 1425 1426 if (hw.power_up != sw->power_up || 1427 hw.backlight_on != sw->backlight_on || 1428 hw.backlight_off != sw->backlight_off || 1429 hw.power_down != sw->power_down || 1430 hw.power_cycle != sw->power_cycle) { 1431 drm_err(display->drm, "PPS state mismatch\n"); 1432 intel_pps_dump_state(intel_dp, "sw", sw); 1433 intel_pps_dump_state(intel_dp, "hw", &hw); 1434 } 1435 } 1436 1437 static bool pps_delays_valid(struct intel_pps_delays *delays) 1438 { 1439 return delays->power_up || delays->backlight_on || delays->backlight_off || 1440 delays->power_down || delays->power_cycle; 1441 } 1442 1443 static int msecs_to_pps_units(int msecs) 1444 { 1445 /* PPS uses 100us units */ 1446 return msecs * 10; 1447 } 1448 1449 static int pps_units_to_msecs(int val) 1450 { 1451 /* PPS uses 100us units */ 1452 return DIV_ROUND_UP(val, 10); 1453 } 1454 1455 static void pps_init_delays_bios(struct intel_dp *intel_dp, 1456 struct intel_pps_delays *bios) 1457 { 1458 struct intel_display *display = to_intel_display(intel_dp); 1459 1460 lockdep_assert_held(&display->pps.mutex); 1461 1462 if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays)) 1463 intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays); 1464 1465 *bios = intel_dp->pps.bios_pps_delays; 1466 1467 intel_pps_dump_state(intel_dp, "bios", bios); 1468 } 1469 1470 static void pps_init_delays_vbt(struct intel_dp *intel_dp, 1471 struct intel_pps_delays *vbt) 1472 { 1473 struct intel_display *display = to_intel_display(intel_dp); 1474 struct intel_connector *connector = intel_dp->attached_connector; 1475 1476 *vbt = connector->panel.vbt.edp.pps; 1477 1478 if (!pps_delays_valid(vbt)) 1479 return; 1480 1481 /* 1482 * On Toshiba Satellite P50-C-18C system the VBT T12 delay 1483 * of 500ms appears to be too short. Occasionally the panel 1484 * just fails to power back on. Increasing the delay to 800ms 1485 * seems sufficient to avoid this problem. 1486 */ 1487 if (intel_has_quirk(display, QUIRK_INCREASE_T12_DELAY)) { 1488 vbt->power_cycle = max_t(u16, vbt->power_cycle, msecs_to_pps_units(1300)); 1489 drm_dbg_kms(display->drm, 1490 "Increasing T12 panel delay as per the quirk to %d\n", 1491 vbt->power_cycle); 1492 } 1493 1494 intel_pps_dump_state(intel_dp, "vbt", vbt); 1495 } 1496 1497 static void pps_init_delays_spec(struct intel_dp *intel_dp, 1498 struct intel_pps_delays *spec) 1499 { 1500 struct intel_display *display = to_intel_display(intel_dp); 1501 1502 lockdep_assert_held(&display->pps.mutex); 1503 1504 /* Upper limits from eDP 1.3 spec */ 1505 spec->power_up = msecs_to_pps_units(10 + 200); /* T1+T3 */ 1506 spec->backlight_on = msecs_to_pps_units(50); /* no limit for T8, use T7 instead */ 1507 spec->backlight_off = msecs_to_pps_units(50); /* no limit for T9, make it symmetric with T8 */ 1508 spec->power_down = msecs_to_pps_units(500); /* T10 */ 1509 spec->power_cycle = msecs_to_pps_units(10 + 500); /* T11+T12 */ 1510 1511 intel_pps_dump_state(intel_dp, "spec", spec); 1512 } 1513 1514 static void pps_init_delays(struct intel_dp *intel_dp) 1515 { 1516 struct intel_display *display = to_intel_display(intel_dp); 1517 struct intel_pps_delays cur, vbt, spec, 1518 *final = &intel_dp->pps.pps_delays; 1519 1520 lockdep_assert_held(&display->pps.mutex); 1521 1522 /* already initialized? */ 1523 if (pps_delays_valid(final)) 1524 return; 1525 1526 pps_init_delays_bios(intel_dp, &cur); 1527 pps_init_delays_vbt(intel_dp, &vbt); 1528 pps_init_delays_spec(intel_dp, &spec); 1529 1530 /* Use the max of the register settings and vbt. If both are 1531 * unset, fall back to the spec limits. */ 1532 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \ 1533 spec.field : \ 1534 max(cur.field, vbt.field)) 1535 assign_final(power_up); 1536 assign_final(backlight_on); 1537 assign_final(backlight_off); 1538 assign_final(power_down); 1539 assign_final(power_cycle); 1540 #undef assign_final 1541 1542 intel_dp->pps.panel_power_up_delay = pps_units_to_msecs(final->power_up); 1543 intel_dp->pps.backlight_on_delay = pps_units_to_msecs(final->backlight_on); 1544 intel_dp->pps.backlight_off_delay = pps_units_to_msecs(final->backlight_off); 1545 intel_dp->pps.panel_power_down_delay = pps_units_to_msecs(final->power_down); 1546 intel_dp->pps.panel_power_cycle_delay = pps_units_to_msecs(final->power_cycle); 1547 1548 drm_dbg_kms(display->drm, 1549 "panel power up delay %d, power down delay %d, power cycle delay %d\n", 1550 intel_dp->pps.panel_power_up_delay, 1551 intel_dp->pps.panel_power_down_delay, 1552 intel_dp->pps.panel_power_cycle_delay); 1553 1554 drm_dbg_kms(display->drm, "backlight on delay %d, off delay %d\n", 1555 intel_dp->pps.backlight_on_delay, 1556 intel_dp->pps.backlight_off_delay); 1557 1558 /* 1559 * We override the HW backlight delays to 1 because we do manual waits 1560 * on them. For backlight_on, even BSpec recommends doing it. For 1561 * backlight_off, if we don't do this, we'll end up waiting for the 1562 * backlight off delay twice: once when we do the manual sleep, and 1563 * once when we disable the panel and wait for the PP_STATUS bit to 1564 * become zero. 1565 */ 1566 final->backlight_on = 1; 1567 final->backlight_off = 1; 1568 1569 /* 1570 * HW has only a 100msec granularity for power_cycle so round it up 1571 * accordingly. 1572 */ 1573 final->power_cycle = roundup(final->power_cycle, msecs_to_pps_units(100)); 1574 } 1575 1576 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd) 1577 { 1578 struct intel_display *display = to_intel_display(intel_dp); 1579 u32 pp_on, pp_off, port_sel = 0; 1580 int div = DISPLAY_RUNTIME_INFO(display)->rawclk_freq / 1000; 1581 struct pps_registers regs; 1582 enum port port = dp_to_dig_port(intel_dp)->base.port; 1583 const struct intel_pps_delays *seq = &intel_dp->pps.pps_delays; 1584 1585 lockdep_assert_held(&display->pps.mutex); 1586 1587 intel_pps_get_registers(intel_dp, ®s); 1588 1589 /* 1590 * On some VLV machines the BIOS can leave the VDD 1591 * enabled even on power sequencers which aren't 1592 * hooked up to any port. This would mess up the 1593 * power domain tracking the first time we pick 1594 * one of these power sequencers for use since 1595 * intel_pps_vdd_on_unlocked() would notice that the VDD was 1596 * already on and therefore wouldn't grab the power 1597 * domain reference. Disable VDD first to avoid this. 1598 * This also avoids spuriously turning the VDD on as 1599 * soon as the new power sequencer gets initialized. 1600 */ 1601 if (force_disable_vdd) { 1602 u32 pp = ilk_get_pp_control(intel_dp); 1603 1604 drm_WARN(display->drm, pp & PANEL_POWER_ON, 1605 "Panel power already on\n"); 1606 1607 if (pp & EDP_FORCE_VDD) 1608 drm_dbg_kms(display->drm, 1609 "VDD already on, disabling first\n"); 1610 1611 pp &= ~EDP_FORCE_VDD; 1612 1613 intel_de_write(display, regs.pp_ctrl, pp); 1614 } 1615 1616 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->power_up) | 1617 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->backlight_on); 1618 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->backlight_off) | 1619 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->power_down); 1620 1621 /* Haswell doesn't have any port selection bits for the panel 1622 * power sequencer any more. */ 1623 if (display->platform.valleyview || display->platform.cherryview) { 1624 port_sel = PANEL_PORT_SELECT_VLV(port); 1625 } else if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display)) { 1626 switch (port) { 1627 case PORT_A: 1628 port_sel = PANEL_PORT_SELECT_DPA; 1629 break; 1630 case PORT_C: 1631 port_sel = PANEL_PORT_SELECT_DPC; 1632 break; 1633 case PORT_D: 1634 port_sel = PANEL_PORT_SELECT_DPD; 1635 break; 1636 default: 1637 MISSING_CASE(port); 1638 break; 1639 } 1640 } 1641 1642 pp_on |= port_sel; 1643 1644 intel_de_write(display, regs.pp_on, pp_on); 1645 intel_de_write(display, regs.pp_off, pp_off); 1646 1647 /* 1648 * Compute the divisor for the pp clock, simply match the Bspec formula. 1649 */ 1650 if (i915_mmio_reg_valid(regs.pp_div)) 1651 intel_de_write(display, regs.pp_div, 1652 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, 1653 (100 * div) / 2 - 1) | 1654 REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, 1655 DIV_ROUND_UP(seq->power_cycle, 1000) + 1)); 1656 else 1657 intel_de_rmw(display, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK, 1658 REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, 1659 DIV_ROUND_UP(seq->power_cycle, 1000) + 1)); 1660 1661 drm_dbg_kms(display->drm, 1662 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 1663 intel_de_read(display, regs.pp_on), 1664 intel_de_read(display, regs.pp_off), 1665 i915_mmio_reg_valid(regs.pp_div) ? 1666 intel_de_read(display, regs.pp_div) : 1667 (intel_de_read(display, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); 1668 } 1669 1670 void intel_pps_encoder_reset(struct intel_dp *intel_dp) 1671 { 1672 struct intel_display *display = to_intel_display(intel_dp); 1673 1674 if (!intel_dp_is_edp(intel_dp)) 1675 return; 1676 1677 with_intel_pps_lock(intel_dp) { 1678 /* 1679 * Reinit the power sequencer also on the resume path, in case 1680 * BIOS did something nasty with it. 1681 */ 1682 if (display->platform.valleyview || display->platform.cherryview) 1683 vlv_initial_power_sequencer_setup(intel_dp); 1684 1685 pps_init_delays(intel_dp); 1686 pps_init_registers(intel_dp, false); 1687 pps_vdd_init(intel_dp); 1688 1689 if (edp_have_panel_vdd(intel_dp)) 1690 edp_panel_vdd_schedule_off(intel_dp); 1691 } 1692 } 1693 1694 bool intel_pps_init(struct intel_dp *intel_dp) 1695 { 1696 bool ret; 1697 1698 intel_dp->pps.initializing = true; 1699 INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work); 1700 1701 pps_init_timestamps(intel_dp); 1702 1703 with_intel_pps_lock(intel_dp) { 1704 ret = pps_initial_setup(intel_dp); 1705 1706 pps_init_delays(intel_dp); 1707 pps_init_registers(intel_dp, false); 1708 pps_vdd_init(intel_dp); 1709 } 1710 1711 return ret; 1712 } 1713 1714 static void pps_init_late(struct intel_dp *intel_dp) 1715 { 1716 struct intel_display *display = to_intel_display(intel_dp); 1717 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1718 struct intel_connector *connector = intel_dp->attached_connector; 1719 1720 if (display->platform.valleyview || display->platform.cherryview) 1721 return; 1722 1723 if (intel_num_pps(display) < 2) 1724 return; 1725 1726 drm_WARN(display->drm, 1727 connector->panel.vbt.backlight.controller >= 0 && 1728 intel_dp->pps.pps_idx != connector->panel.vbt.backlight.controller, 1729 "[ENCODER:%d:%s] power sequencer mismatch: %d (initial) vs. %d (VBT)\n", 1730 encoder->base.base.id, encoder->base.name, 1731 intel_dp->pps.pps_idx, connector->panel.vbt.backlight.controller); 1732 1733 if (connector->panel.vbt.backlight.controller >= 0) 1734 intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller; 1735 } 1736 1737 void intel_pps_init_late(struct intel_dp *intel_dp) 1738 { 1739 with_intel_pps_lock(intel_dp) { 1740 /* Reinit delays after per-panel info has been parsed from VBT */ 1741 pps_init_late(intel_dp); 1742 1743 memset(&intel_dp->pps.pps_delays, 0, sizeof(intel_dp->pps.pps_delays)); 1744 pps_init_delays(intel_dp); 1745 pps_init_registers(intel_dp, false); 1746 1747 intel_dp->pps.initializing = false; 1748 1749 if (edp_have_panel_vdd(intel_dp)) 1750 edp_panel_vdd_schedule_off(intel_dp); 1751 } 1752 } 1753 1754 void intel_pps_unlock_regs_wa(struct intel_display *display) 1755 { 1756 int pps_num; 1757 int pps_idx; 1758 1759 if (!HAS_DISPLAY(display) || HAS_DDI(display)) 1760 return; 1761 /* 1762 * This w/a is needed at least on CPT/PPT, but to be sure apply it 1763 * everywhere where registers can be write protected. 1764 */ 1765 pps_num = intel_num_pps(display); 1766 1767 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) 1768 intel_de_rmw(display, PP_CONTROL(display, pps_idx), 1769 PANEL_UNLOCK_MASK, PANEL_UNLOCK_REGS); 1770 } 1771 1772 void intel_pps_setup(struct intel_display *display) 1773 { 1774 if (HAS_PCH_SPLIT(display) || display->platform.geminilake || display->platform.broxton) 1775 display->pps.mmio_base = PCH_PPS_BASE; 1776 else if (display->platform.valleyview || display->platform.cherryview) 1777 display->pps.mmio_base = VLV_PPS_BASE; 1778 else 1779 display->pps.mmio_base = PPS_BASE; 1780 } 1781 1782 static int intel_pps_show(struct seq_file *m, void *data) 1783 { 1784 struct intel_connector *connector = m->private; 1785 struct intel_dp *intel_dp = intel_attached_dp(connector); 1786 1787 if (connector->base.status != connector_status_connected) 1788 return -ENODEV; 1789 1790 seq_printf(m, "Panel power up delay: %d\n", 1791 intel_dp->pps.panel_power_up_delay); 1792 seq_printf(m, "Panel power down delay: %d\n", 1793 intel_dp->pps.panel_power_down_delay); 1794 seq_printf(m, "Panel power cycle delay: %d\n", 1795 intel_dp->pps.panel_power_cycle_delay); 1796 seq_printf(m, "Backlight on delay: %d\n", 1797 intel_dp->pps.backlight_on_delay); 1798 seq_printf(m, "Backlight off delay: %d\n", 1799 intel_dp->pps.backlight_off_delay); 1800 1801 return 0; 1802 } 1803 DEFINE_SHOW_ATTRIBUTE(intel_pps); 1804 1805 void intel_pps_connector_debugfs_add(struct intel_connector *connector) 1806 { 1807 struct dentry *root = connector->base.debugfs_entry; 1808 int connector_type = connector->base.connector_type; 1809 1810 if (connector_type == DRM_MODE_CONNECTOR_eDP) 1811 debugfs_create_file("i915_panel_timings", 0444, root, 1812 connector, &intel_pps_fops); 1813 } 1814 1815 void assert_pps_unlocked(struct intel_display *display, enum pipe pipe) 1816 { 1817 i915_reg_t pp_reg; 1818 u32 val; 1819 enum pipe panel_pipe = INVALID_PIPE; 1820 bool locked = true; 1821 1822 if (drm_WARN_ON(display->drm, HAS_DDI(display))) 1823 return; 1824 1825 if (HAS_PCH_SPLIT(display)) { 1826 u32 port_sel; 1827 1828 pp_reg = PP_CONTROL(display, 0); 1829 port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) & 1830 PANEL_PORT_SELECT_MASK; 1831 1832 switch (port_sel) { 1833 case PANEL_PORT_SELECT_LVDS: 1834 intel_lvds_port_enabled(display, PCH_LVDS, &panel_pipe); 1835 break; 1836 case PANEL_PORT_SELECT_DPA: 1837 g4x_dp_port_enabled(display, DP_A, PORT_A, &panel_pipe); 1838 break; 1839 case PANEL_PORT_SELECT_DPC: 1840 g4x_dp_port_enabled(display, PCH_DP_C, PORT_C, &panel_pipe); 1841 break; 1842 case PANEL_PORT_SELECT_DPD: 1843 g4x_dp_port_enabled(display, PCH_DP_D, PORT_D, &panel_pipe); 1844 break; 1845 default: 1846 MISSING_CASE(port_sel); 1847 break; 1848 } 1849 } else if (display->platform.valleyview || display->platform.cherryview) { 1850 /* presumably write lock depends on pipe, not port select */ 1851 pp_reg = PP_CONTROL(display, pipe); 1852 panel_pipe = pipe; 1853 } else { 1854 u32 port_sel; 1855 1856 pp_reg = PP_CONTROL(display, 0); 1857 port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) & 1858 PANEL_PORT_SELECT_MASK; 1859 1860 drm_WARN_ON(display->drm, 1861 port_sel != PANEL_PORT_SELECT_LVDS); 1862 intel_lvds_port_enabled(display, LVDS, &panel_pipe); 1863 } 1864 1865 val = intel_de_read(display, pp_reg); 1866 if (!(val & PANEL_POWER_ON) || 1867 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1868 locked = false; 1869 1870 INTEL_DISPLAY_STATE_WARN(display, panel_pipe == pipe && locked, 1871 "panel assertion failure, pipe %c regs locked\n", 1872 pipe_name(pipe)); 1873 } 1874