1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include <linux/debugfs.h> 7 8 #include "g4x_dp.h" 9 #include "i915_drv.h" 10 #include "i915_reg.h" 11 #include "intel_de.h" 12 #include "intel_display_power_well.h" 13 #include "intel_display_types.h" 14 #include "intel_dp.h" 15 #include "intel_dpio_phy.h" 16 #include "intel_dpll.h" 17 #include "intel_lvds.h" 18 #include "intel_lvds_regs.h" 19 #include "intel_pps.h" 20 #include "intel_pps_regs.h" 21 #include "intel_quirks.h" 22 23 static void vlv_steal_power_sequencer(struct intel_display *display, 24 enum pipe pipe); 25 26 static void pps_init_delays(struct intel_dp *intel_dp); 27 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd); 28 29 static const char *pps_name(struct intel_dp *intel_dp) 30 { 31 struct intel_display *display = to_intel_display(intel_dp); 32 struct drm_i915_private *i915 = to_i915(display->drm); 33 struct intel_pps *pps = &intel_dp->pps; 34 35 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 36 switch (pps->vlv_pps_pipe) { 37 case INVALID_PIPE: 38 /* 39 * FIXME would be nice if we can guarantee 40 * to always have a valid PPS when calling this. 41 */ 42 return "PPS <none>"; 43 case PIPE_A: 44 return "PPS A"; 45 case PIPE_B: 46 return "PPS B"; 47 default: 48 MISSING_CASE(pps->vlv_pps_pipe); 49 break; 50 } 51 } else { 52 switch (pps->pps_idx) { 53 case 0: 54 return "PPS 0"; 55 case 1: 56 return "PPS 1"; 57 default: 58 MISSING_CASE(pps->pps_idx); 59 break; 60 } 61 } 62 63 return "PPS <invalid>"; 64 } 65 66 intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp) 67 { 68 struct intel_display *display = to_intel_display(intel_dp); 69 struct drm_i915_private *dev_priv = to_i915(display->drm); 70 intel_wakeref_t wakeref; 71 72 /* 73 * See intel_pps_reset_all() why we need a power domain reference here. 74 */ 75 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE); 76 mutex_lock(&display->pps.mutex); 77 78 return wakeref; 79 } 80 81 intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp, 82 intel_wakeref_t wakeref) 83 { 84 struct intel_display *display = to_intel_display(intel_dp); 85 struct drm_i915_private *dev_priv = to_i915(display->drm); 86 87 mutex_unlock(&display->pps.mutex); 88 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); 89 90 return 0; 91 } 92 93 static void 94 vlv_power_sequencer_kick(struct intel_dp *intel_dp) 95 { 96 struct intel_display *display = to_intel_display(intel_dp); 97 struct drm_i915_private *dev_priv = to_i915(display->drm); 98 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 99 enum pipe pipe = intel_dp->pps.vlv_pps_pipe; 100 bool pll_enabled, release_cl_override = false; 101 enum dpio_phy phy = vlv_pipe_to_phy(pipe); 102 enum dpio_channel ch = vlv_pipe_to_channel(pipe); 103 u32 DP; 104 105 if (drm_WARN(display->drm, 106 intel_de_read(display, intel_dp->output_reg) & DP_PORT_EN, 107 "skipping %s kick due to [ENCODER:%d:%s] being active\n", 108 pps_name(intel_dp), 109 dig_port->base.base.base.id, dig_port->base.base.name)) 110 return; 111 112 drm_dbg_kms(display->drm, 113 "kicking %s for [ENCODER:%d:%s]\n", 114 pps_name(intel_dp), 115 dig_port->base.base.base.id, dig_port->base.base.name); 116 117 /* Preserve the BIOS-computed detected bit. This is 118 * supposed to be read-only. 119 */ 120 DP = intel_de_read(display, intel_dp->output_reg) & DP_DETECTED; 121 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 122 DP |= DP_PORT_WIDTH(1); 123 DP |= DP_LINK_TRAIN_PAT_1; 124 125 if (IS_CHERRYVIEW(dev_priv)) 126 DP |= DP_PIPE_SEL_CHV(pipe); 127 else 128 DP |= DP_PIPE_SEL(pipe); 129 130 pll_enabled = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE; 131 132 /* 133 * The DPLL for the pipe must be enabled for this to work. 134 * So enable temporarily it if it's not already enabled. 135 */ 136 if (!pll_enabled) { 137 release_cl_override = IS_CHERRYVIEW(dev_priv) && 138 !chv_phy_powergate_ch(dev_priv, phy, ch, true); 139 140 if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) { 141 drm_err(display->drm, 142 "Failed to force on PLL for pipe %c!\n", 143 pipe_name(pipe)); 144 return; 145 } 146 } 147 148 /* 149 * Similar magic as in intel_dp_enable_port(). 150 * We _must_ do this port enable + disable trick 151 * to make this power sequencer lock onto the port. 152 * Otherwise even VDD force bit won't work. 153 */ 154 intel_de_write(display, intel_dp->output_reg, DP); 155 intel_de_posting_read(display, intel_dp->output_reg); 156 157 intel_de_write(display, intel_dp->output_reg, DP | DP_PORT_EN); 158 intel_de_posting_read(display, intel_dp->output_reg); 159 160 intel_de_write(display, intel_dp->output_reg, DP & ~DP_PORT_EN); 161 intel_de_posting_read(display, intel_dp->output_reg); 162 163 if (!pll_enabled) { 164 vlv_force_pll_off(dev_priv, pipe); 165 166 if (release_cl_override) 167 chv_phy_powergate_ch(dev_priv, phy, ch, false); 168 } 169 } 170 171 static enum pipe vlv_find_free_pps(struct intel_display *display) 172 { 173 struct intel_encoder *encoder; 174 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); 175 176 /* 177 * We don't have power sequencer currently. 178 * Pick one that's not used by other ports. 179 */ 180 for_each_intel_dp(display->drm, encoder) { 181 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 182 183 if (encoder->type == INTEL_OUTPUT_EDP) { 184 drm_WARN_ON(display->drm, 185 intel_dp->pps.vlv_active_pipe != INVALID_PIPE && 186 intel_dp->pps.vlv_active_pipe != 187 intel_dp->pps.vlv_pps_pipe); 188 189 if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE) 190 pipes &= ~(1 << intel_dp->pps.vlv_pps_pipe); 191 } else { 192 drm_WARN_ON(display->drm, 193 intel_dp->pps.vlv_pps_pipe != INVALID_PIPE); 194 195 if (intel_dp->pps.vlv_active_pipe != INVALID_PIPE) 196 pipes &= ~(1 << intel_dp->pps.vlv_active_pipe); 197 } 198 } 199 200 if (pipes == 0) 201 return INVALID_PIPE; 202 203 return ffs(pipes) - 1; 204 } 205 206 static enum pipe 207 vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 208 { 209 struct intel_display *display = to_intel_display(intel_dp); 210 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 211 enum pipe pipe; 212 213 lockdep_assert_held(&display->pps.mutex); 214 215 /* We should never land here with regular DP ports */ 216 drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp)); 217 218 drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE && 219 intel_dp->pps.vlv_active_pipe != intel_dp->pps.vlv_pps_pipe); 220 221 if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE) 222 return intel_dp->pps.vlv_pps_pipe; 223 224 pipe = vlv_find_free_pps(display); 225 226 /* 227 * Didn't find one. This should not happen since there 228 * are two power sequencers and up to two eDP ports. 229 */ 230 if (drm_WARN_ON(display->drm, pipe == INVALID_PIPE)) 231 pipe = PIPE_A; 232 233 vlv_steal_power_sequencer(display, pipe); 234 intel_dp->pps.vlv_pps_pipe = pipe; 235 236 drm_dbg_kms(display->drm, 237 "picked %s for [ENCODER:%d:%s]\n", 238 pps_name(intel_dp), 239 dig_port->base.base.base.id, dig_port->base.base.name); 240 241 /* init power sequencer on this pipe and port */ 242 pps_init_delays(intel_dp); 243 pps_init_registers(intel_dp, true); 244 245 /* 246 * Even vdd force doesn't work until we've made 247 * the power sequencer lock in on the port. 248 */ 249 vlv_power_sequencer_kick(intel_dp); 250 251 return intel_dp->pps.vlv_pps_pipe; 252 } 253 254 static int 255 bxt_power_sequencer_idx(struct intel_dp *intel_dp) 256 { 257 struct intel_display *display = to_intel_display(intel_dp); 258 int pps_idx = intel_dp->pps.pps_idx; 259 260 lockdep_assert_held(&display->pps.mutex); 261 262 /* We should never land here with regular DP ports */ 263 drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp)); 264 265 if (!intel_dp->pps.bxt_pps_reset) 266 return pps_idx; 267 268 intel_dp->pps.bxt_pps_reset = false; 269 270 /* 271 * Only the HW needs to be reprogrammed, the SW state is fixed and 272 * has been setup during connector init. 273 */ 274 pps_init_registers(intel_dp, false); 275 276 return pps_idx; 277 } 278 279 typedef bool (*pps_check)(struct intel_display *display, int pps_idx); 280 281 static bool pps_has_pp_on(struct intel_display *display, int pps_idx) 282 { 283 return intel_de_read(display, PP_STATUS(display, pps_idx)) & PP_ON; 284 } 285 286 static bool pps_has_vdd_on(struct intel_display *display, int pps_idx) 287 { 288 return intel_de_read(display, PP_CONTROL(display, pps_idx)) & EDP_FORCE_VDD; 289 } 290 291 static bool pps_any(struct intel_display *display, int pps_idx) 292 { 293 return true; 294 } 295 296 static enum pipe 297 vlv_initial_pps_pipe(struct intel_display *display, 298 enum port port, pps_check check) 299 { 300 enum pipe pipe; 301 302 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 303 u32 port_sel = intel_de_read(display, 304 PP_ON_DELAYS(display, pipe)) & 305 PANEL_PORT_SELECT_MASK; 306 307 if (port_sel != PANEL_PORT_SELECT_VLV(port)) 308 continue; 309 310 if (!check(display, pipe)) 311 continue; 312 313 return pipe; 314 } 315 316 return INVALID_PIPE; 317 } 318 319 static void 320 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) 321 { 322 struct intel_display *display = to_intel_display(intel_dp); 323 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 324 enum port port = dig_port->base.port; 325 326 lockdep_assert_held(&display->pps.mutex); 327 328 /* try to find a pipe with this port selected */ 329 /* first pick one where the panel is on */ 330 intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port, 331 pps_has_pp_on); 332 /* didn't find one? pick one where vdd is on */ 333 if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) 334 intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port, 335 pps_has_vdd_on); 336 /* didn't find one? pick one with just the correct port */ 337 if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) 338 intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port, 339 pps_any); 340 341 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ 342 if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) { 343 drm_dbg_kms(display->drm, 344 "[ENCODER:%d:%s] no initial power sequencer\n", 345 dig_port->base.base.base.id, dig_port->base.base.name); 346 return; 347 } 348 349 drm_dbg_kms(display->drm, 350 "[ENCODER:%d:%s] initial power sequencer: %s\n", 351 dig_port->base.base.base.id, dig_port->base.base.name, 352 pps_name(intel_dp)); 353 } 354 355 static int intel_num_pps(struct intel_display *display) 356 { 357 struct drm_i915_private *i915 = to_i915(display->drm); 358 359 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 360 return 2; 361 362 if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) 363 return 2; 364 365 if (INTEL_PCH_TYPE(i915) >= PCH_MTL) 366 return 2; 367 368 if (INTEL_PCH_TYPE(i915) >= PCH_DG1) 369 return 1; 370 371 if (INTEL_PCH_TYPE(i915) >= PCH_ICP) 372 return 2; 373 374 return 1; 375 } 376 377 static bool intel_pps_is_valid(struct intel_dp *intel_dp) 378 { 379 struct intel_display *display = to_intel_display(intel_dp); 380 struct drm_i915_private *i915 = to_i915(display->drm); 381 382 if (intel_dp->pps.pps_idx == 1 && 383 INTEL_PCH_TYPE(i915) >= PCH_ICP && 384 INTEL_PCH_TYPE(i915) <= PCH_ADP) 385 return intel_de_read(display, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT; 386 387 return true; 388 } 389 390 static int 391 bxt_initial_pps_idx(struct intel_display *display, pps_check check) 392 { 393 int pps_idx, pps_num = intel_num_pps(display); 394 395 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { 396 if (check(display, pps_idx)) 397 return pps_idx; 398 } 399 400 return -1; 401 } 402 403 static bool 404 pps_initial_setup(struct intel_dp *intel_dp) 405 { 406 struct intel_display *display = to_intel_display(intel_dp); 407 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 408 struct intel_connector *connector = intel_dp->attached_connector; 409 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 410 411 lockdep_assert_held(&display->pps.mutex); 412 413 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 414 vlv_initial_power_sequencer_setup(intel_dp); 415 return true; 416 } 417 418 /* first ask the VBT */ 419 if (intel_num_pps(display) > 1) 420 intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller; 421 else 422 intel_dp->pps.pps_idx = 0; 423 424 if (drm_WARN_ON(display->drm, intel_dp->pps.pps_idx >= intel_num_pps(display))) 425 intel_dp->pps.pps_idx = -1; 426 427 /* VBT wasn't parsed yet? pick one where the panel is on */ 428 if (intel_dp->pps.pps_idx < 0) 429 intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_has_pp_on); 430 /* didn't find one? pick one where vdd is on */ 431 if (intel_dp->pps.pps_idx < 0) 432 intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_has_vdd_on); 433 /* didn't find one? pick any */ 434 if (intel_dp->pps.pps_idx < 0) { 435 intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_any); 436 437 drm_dbg_kms(display->drm, 438 "[ENCODER:%d:%s] no initial power sequencer, assuming %s\n", 439 encoder->base.base.id, encoder->base.name, 440 pps_name(intel_dp)); 441 } else { 442 drm_dbg_kms(display->drm, 443 "[ENCODER:%d:%s] initial power sequencer: %s\n", 444 encoder->base.base.id, encoder->base.name, 445 pps_name(intel_dp)); 446 } 447 448 return intel_pps_is_valid(intel_dp); 449 } 450 451 void intel_pps_reset_all(struct intel_display *display) 452 { 453 struct drm_i915_private *dev_priv = to_i915(display->drm); 454 struct intel_encoder *encoder; 455 456 if (drm_WARN_ON(display->drm, !IS_LP(dev_priv))) 457 return; 458 459 if (!HAS_DISPLAY(display)) 460 return; 461 462 /* 463 * We can't grab pps_mutex here due to deadlock with power_domain 464 * mutex when power_domain functions are called while holding pps_mutex. 465 * That also means that in order to use vlv_pps_pipe the code needs to 466 * hold both a power domain reference and pps_mutex, and the power domain 467 * reference get/put must be done while _not_ holding pps_mutex. 468 * pps_{lock,unlock}() do these steps in the correct order, so one 469 * should use them always. 470 */ 471 472 for_each_intel_dp(display->drm, encoder) { 473 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 474 475 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 476 drm_WARN_ON(display->drm, 477 intel_dp->pps.vlv_active_pipe != INVALID_PIPE); 478 479 if (encoder->type != INTEL_OUTPUT_EDP) 480 continue; 481 482 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 483 intel_dp->pps.vlv_pps_pipe = INVALID_PIPE; 484 else 485 intel_dp->pps.bxt_pps_reset = true; 486 } 487 } 488 489 struct pps_registers { 490 i915_reg_t pp_ctrl; 491 i915_reg_t pp_stat; 492 i915_reg_t pp_on; 493 i915_reg_t pp_off; 494 i915_reg_t pp_div; 495 }; 496 497 static void intel_pps_get_registers(struct intel_dp *intel_dp, 498 struct pps_registers *regs) 499 { 500 struct intel_display *display = to_intel_display(intel_dp); 501 struct drm_i915_private *dev_priv = to_i915(display->drm); 502 int pps_idx; 503 504 memset(regs, 0, sizeof(*regs)); 505 506 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 507 pps_idx = vlv_power_sequencer_pipe(intel_dp); 508 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 509 pps_idx = bxt_power_sequencer_idx(intel_dp); 510 else 511 pps_idx = intel_dp->pps.pps_idx; 512 513 regs->pp_ctrl = PP_CONTROL(display, pps_idx); 514 regs->pp_stat = PP_STATUS(display, pps_idx); 515 regs->pp_on = PP_ON_DELAYS(display, pps_idx); 516 regs->pp_off = PP_OFF_DELAYS(display, pps_idx); 517 518 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ 519 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || 520 INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 521 regs->pp_div = INVALID_MMIO_REG; 522 else 523 regs->pp_div = PP_DIVISOR(display, pps_idx); 524 } 525 526 static i915_reg_t 527 _pp_ctrl_reg(struct intel_dp *intel_dp) 528 { 529 struct pps_registers regs; 530 531 intel_pps_get_registers(intel_dp, ®s); 532 533 return regs.pp_ctrl; 534 } 535 536 static i915_reg_t 537 _pp_stat_reg(struct intel_dp *intel_dp) 538 { 539 struct pps_registers regs; 540 541 intel_pps_get_registers(intel_dp, ®s); 542 543 return regs.pp_stat; 544 } 545 546 static bool edp_have_panel_power(struct intel_dp *intel_dp) 547 { 548 struct intel_display *display = to_intel_display(intel_dp); 549 struct drm_i915_private *dev_priv = to_i915(display->drm); 550 551 lockdep_assert_held(&display->pps.mutex); 552 553 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 554 intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) 555 return false; 556 557 return (intel_de_read(display, _pp_stat_reg(intel_dp)) & PP_ON) != 0; 558 } 559 560 static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 561 { 562 struct intel_display *display = to_intel_display(intel_dp); 563 struct drm_i915_private *dev_priv = to_i915(display->drm); 564 565 lockdep_assert_held(&display->pps.mutex); 566 567 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 568 intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) 569 return false; 570 571 return intel_de_read(display, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; 572 } 573 574 void intel_pps_check_power_unlocked(struct intel_dp *intel_dp) 575 { 576 struct intel_display *display = to_intel_display(intel_dp); 577 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 578 579 if (!intel_dp_is_edp(intel_dp)) 580 return; 581 582 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { 583 drm_WARN(display->drm, 1, 584 "[ENCODER:%d:%s] %s powered off while attempting AUX CH communication.\n", 585 dig_port->base.base.base.id, dig_port->base.base.name, 586 pps_name(intel_dp)); 587 drm_dbg_kms(display->drm, 588 "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 589 dig_port->base.base.base.id, dig_port->base.base.name, 590 pps_name(intel_dp), 591 intel_de_read(display, _pp_stat_reg(intel_dp)), 592 intel_de_read(display, _pp_ctrl_reg(intel_dp))); 593 } 594 } 595 596 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 597 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 598 599 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) 600 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) 601 602 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 603 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 604 605 static void intel_pps_verify_state(struct intel_dp *intel_dp); 606 607 static void wait_panel_status(struct intel_dp *intel_dp, 608 u32 mask, u32 value) 609 { 610 struct intel_display *display = to_intel_display(intel_dp); 611 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 612 i915_reg_t pp_stat_reg, pp_ctrl_reg; 613 614 lockdep_assert_held(&display->pps.mutex); 615 616 intel_pps_verify_state(intel_dp); 617 618 pp_stat_reg = _pp_stat_reg(intel_dp); 619 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 620 621 drm_dbg_kms(display->drm, 622 "[ENCODER:%d:%s] %s mask: 0x%08x value: 0x%08x PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 623 dig_port->base.base.base.id, dig_port->base.base.name, 624 pps_name(intel_dp), 625 mask, value, 626 intel_de_read(display, pp_stat_reg), 627 intel_de_read(display, pp_ctrl_reg)); 628 629 if (intel_de_wait(display, pp_stat_reg, mask, value, 5000)) 630 drm_err(display->drm, 631 "[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 632 dig_port->base.base.base.id, dig_port->base.base.name, 633 pps_name(intel_dp), 634 intel_de_read(display, pp_stat_reg), 635 intel_de_read(display, pp_ctrl_reg)); 636 637 drm_dbg_kms(display->drm, "Wait complete\n"); 638 } 639 640 static void wait_panel_on(struct intel_dp *intel_dp) 641 { 642 struct intel_display *display = to_intel_display(intel_dp); 643 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 644 645 drm_dbg_kms(display->drm, 646 "[ENCODER:%d:%s] %s wait for panel power on\n", 647 dig_port->base.base.base.id, dig_port->base.base.name, 648 pps_name(intel_dp)); 649 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 650 } 651 652 static void wait_panel_off(struct intel_dp *intel_dp) 653 { 654 struct intel_display *display = to_intel_display(intel_dp); 655 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 656 657 drm_dbg_kms(display->drm, 658 "[ENCODER:%d:%s] %s wait for panel power off time\n", 659 dig_port->base.base.base.id, dig_port->base.base.name, 660 pps_name(intel_dp)); 661 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 662 } 663 664 static void wait_panel_power_cycle(struct intel_dp *intel_dp) 665 { 666 struct intel_display *display = to_intel_display(intel_dp); 667 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 668 ktime_t panel_power_on_time; 669 s64 panel_power_off_duration; 670 671 drm_dbg_kms(display->drm, 672 "[ENCODER:%d:%s] %s wait for panel power cycle\n", 673 dig_port->base.base.base.id, dig_port->base.base.name, 674 pps_name(intel_dp)); 675 676 /* take the difference of current time and panel power off time 677 * and then make panel wait for t11_t12 if needed. */ 678 panel_power_on_time = ktime_get_boottime(); 679 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time); 680 681 /* When we disable the VDD override bit last we have to do the manual 682 * wait. */ 683 if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay) 684 wait_remaining_ms_from_jiffies(jiffies, 685 intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration); 686 687 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 688 } 689 690 void intel_pps_wait_power_cycle(struct intel_dp *intel_dp) 691 { 692 intel_wakeref_t wakeref; 693 694 if (!intel_dp_is_edp(intel_dp)) 695 return; 696 697 with_intel_pps_lock(intel_dp, wakeref) 698 wait_panel_power_cycle(intel_dp); 699 } 700 701 static void wait_backlight_on(struct intel_dp *intel_dp) 702 { 703 wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on, 704 intel_dp->pps.backlight_on_delay); 705 } 706 707 static void edp_wait_backlight_off(struct intel_dp *intel_dp) 708 { 709 wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off, 710 intel_dp->pps.backlight_off_delay); 711 } 712 713 /* Read the current pp_control value, unlocking the register if it 714 * is locked 715 */ 716 717 static u32 ilk_get_pp_control(struct intel_dp *intel_dp) 718 { 719 struct intel_display *display = to_intel_display(intel_dp); 720 u32 control; 721 722 lockdep_assert_held(&display->pps.mutex); 723 724 control = intel_de_read(display, _pp_ctrl_reg(intel_dp)); 725 if (drm_WARN_ON(display->drm, !HAS_DDI(display) && 726 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { 727 control &= ~PANEL_UNLOCK_MASK; 728 control |= PANEL_UNLOCK_REGS; 729 } 730 return control; 731 } 732 733 /* 734 * Must be paired with intel_pps_vdd_off_unlocked(). 735 * Must hold pps_mutex around the whole on/off sequence. 736 * Can be nested with intel_pps_vdd_{on,off}() calls. 737 */ 738 bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp) 739 { 740 struct intel_display *display = to_intel_display(intel_dp); 741 struct drm_i915_private *dev_priv = to_i915(display->drm); 742 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 743 u32 pp; 744 i915_reg_t pp_stat_reg, pp_ctrl_reg; 745 bool need_to_disable = !intel_dp->pps.want_panel_vdd; 746 747 lockdep_assert_held(&display->pps.mutex); 748 749 if (!intel_dp_is_edp(intel_dp)) 750 return false; 751 752 cancel_delayed_work(&intel_dp->pps.panel_vdd_work); 753 intel_dp->pps.want_panel_vdd = true; 754 755 if (edp_have_panel_vdd(intel_dp)) 756 return need_to_disable; 757 758 drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref); 759 intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv, 760 intel_aux_power_domain(dig_port)); 761 762 pp_stat_reg = _pp_stat_reg(intel_dp); 763 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 764 765 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD on\n", 766 dig_port->base.base.base.id, dig_port->base.base.name, 767 pps_name(intel_dp)); 768 769 if (!edp_have_panel_power(intel_dp)) 770 wait_panel_power_cycle(intel_dp); 771 772 pp = ilk_get_pp_control(intel_dp); 773 pp |= EDP_FORCE_VDD; 774 775 intel_de_write(display, pp_ctrl_reg, pp); 776 intel_de_posting_read(display, pp_ctrl_reg); 777 drm_dbg_kms(display->drm, 778 "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 779 dig_port->base.base.base.id, dig_port->base.base.name, 780 pps_name(intel_dp), 781 intel_de_read(display, pp_stat_reg), 782 intel_de_read(display, pp_ctrl_reg)); 783 /* 784 * If the panel wasn't on, delay before accessing aux channel 785 */ 786 if (!edp_have_panel_power(intel_dp)) { 787 drm_dbg_kms(display->drm, 788 "[ENCODER:%d:%s] %s panel power wasn't enabled\n", 789 dig_port->base.base.base.id, dig_port->base.base.name, 790 pps_name(intel_dp)); 791 msleep(intel_dp->pps.panel_power_up_delay); 792 } 793 794 return need_to_disable; 795 } 796 797 /* 798 * Must be paired with intel_pps_off(). 799 * Nested calls to these functions are not allowed since 800 * we drop the lock. Caller must use some higher level 801 * locking to prevent nested calls from other threads. 802 */ 803 void intel_pps_vdd_on(struct intel_dp *intel_dp) 804 { 805 struct intel_display *display = to_intel_display(intel_dp); 806 struct drm_i915_private *i915 = to_i915(display->drm); 807 intel_wakeref_t wakeref; 808 bool vdd; 809 810 if (!intel_dp_is_edp(intel_dp)) 811 return; 812 813 vdd = false; 814 with_intel_pps_lock(intel_dp, wakeref) 815 vdd = intel_pps_vdd_on_unlocked(intel_dp); 816 I915_STATE_WARN(i915, !vdd, "[ENCODER:%d:%s] %s VDD already requested on\n", 817 dp_to_dig_port(intel_dp)->base.base.base.id, 818 dp_to_dig_port(intel_dp)->base.base.name, 819 pps_name(intel_dp)); 820 } 821 822 static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp) 823 { 824 struct intel_display *display = to_intel_display(intel_dp); 825 struct drm_i915_private *dev_priv = to_i915(display->drm); 826 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 827 u32 pp; 828 i915_reg_t pp_stat_reg, pp_ctrl_reg; 829 830 lockdep_assert_held(&display->pps.mutex); 831 832 drm_WARN_ON(display->drm, intel_dp->pps.want_panel_vdd); 833 834 if (!edp_have_panel_vdd(intel_dp)) 835 return; 836 837 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD off\n", 838 dig_port->base.base.base.id, dig_port->base.base.name, 839 pps_name(intel_dp)); 840 841 pp = ilk_get_pp_control(intel_dp); 842 pp &= ~EDP_FORCE_VDD; 843 844 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 845 pp_stat_reg = _pp_stat_reg(intel_dp); 846 847 intel_de_write(display, pp_ctrl_reg, pp); 848 intel_de_posting_read(display, pp_ctrl_reg); 849 850 /* Make sure sequencer is idle before allowing subsequent activity */ 851 drm_dbg_kms(display->drm, 852 "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 853 dig_port->base.base.base.id, dig_port->base.base.name, 854 pps_name(intel_dp), 855 intel_de_read(display, pp_stat_reg), 856 intel_de_read(display, pp_ctrl_reg)); 857 858 if ((pp & PANEL_POWER_ON) == 0) 859 intel_dp->pps.panel_power_off_time = ktime_get_boottime(); 860 861 intel_display_power_put(dev_priv, 862 intel_aux_power_domain(dig_port), 863 fetch_and_zero(&intel_dp->pps.vdd_wakeref)); 864 } 865 866 void intel_pps_vdd_off_sync(struct intel_dp *intel_dp) 867 { 868 intel_wakeref_t wakeref; 869 870 if (!intel_dp_is_edp(intel_dp)) 871 return; 872 873 cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work); 874 /* 875 * vdd might still be enabled due to the delayed vdd off. 876 * Make sure vdd is actually turned off here. 877 */ 878 with_intel_pps_lock(intel_dp, wakeref) 879 intel_pps_vdd_off_sync_unlocked(intel_dp); 880 } 881 882 static void edp_panel_vdd_work(struct work_struct *__work) 883 { 884 struct intel_pps *pps = container_of(to_delayed_work(__work), 885 struct intel_pps, panel_vdd_work); 886 struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps); 887 intel_wakeref_t wakeref; 888 889 with_intel_pps_lock(intel_dp, wakeref) { 890 if (!intel_dp->pps.want_panel_vdd) 891 intel_pps_vdd_off_sync_unlocked(intel_dp); 892 } 893 } 894 895 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) 896 { 897 struct intel_display *display = to_intel_display(intel_dp); 898 struct drm_i915_private *i915 = to_i915(display->drm); 899 unsigned long delay; 900 901 /* 902 * We may not yet know the real power sequencing delays, 903 * so keep VDD enabled until we're done with init. 904 */ 905 if (intel_dp->pps.initializing) 906 return; 907 908 /* 909 * Queue the timer to fire a long time from now (relative to the power 910 * down delay) to keep the panel power up across a sequence of 911 * operations. 912 */ 913 delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5); 914 queue_delayed_work(i915->unordered_wq, 915 &intel_dp->pps.panel_vdd_work, delay); 916 } 917 918 /* 919 * Must be paired with edp_panel_vdd_on(). 920 * Must hold pps_mutex around the whole on/off sequence. 921 * Can be nested with intel_pps_vdd_{on,off}() calls. 922 */ 923 void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync) 924 { 925 struct intel_display *display = to_intel_display(intel_dp); 926 struct drm_i915_private *dev_priv = to_i915(display->drm); 927 928 lockdep_assert_held(&display->pps.mutex); 929 930 if (!intel_dp_is_edp(intel_dp)) 931 return; 932 933 I915_STATE_WARN(dev_priv, !intel_dp->pps.want_panel_vdd, 934 "[ENCODER:%d:%s] %s VDD not forced on", 935 dp_to_dig_port(intel_dp)->base.base.base.id, 936 dp_to_dig_port(intel_dp)->base.base.name, 937 pps_name(intel_dp)); 938 939 intel_dp->pps.want_panel_vdd = false; 940 941 if (sync) 942 intel_pps_vdd_off_sync_unlocked(intel_dp); 943 else 944 edp_panel_vdd_schedule_off(intel_dp); 945 } 946 947 void intel_pps_on_unlocked(struct intel_dp *intel_dp) 948 { 949 struct intel_display *display = to_intel_display(intel_dp); 950 struct drm_i915_private *dev_priv = to_i915(display->drm); 951 u32 pp; 952 i915_reg_t pp_ctrl_reg; 953 954 lockdep_assert_held(&display->pps.mutex); 955 956 if (!intel_dp_is_edp(intel_dp)) 957 return; 958 959 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power on\n", 960 dp_to_dig_port(intel_dp)->base.base.base.id, 961 dp_to_dig_port(intel_dp)->base.base.name, 962 pps_name(intel_dp)); 963 964 if (drm_WARN(display->drm, edp_have_panel_power(intel_dp), 965 "[ENCODER:%d:%s] %s panel power already on\n", 966 dp_to_dig_port(intel_dp)->base.base.base.id, 967 dp_to_dig_port(intel_dp)->base.base.name, 968 pps_name(intel_dp))) 969 return; 970 971 wait_panel_power_cycle(intel_dp); 972 973 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 974 pp = ilk_get_pp_control(intel_dp); 975 if (IS_IRONLAKE(dev_priv)) { 976 /* ILK workaround: disable reset around power sequence */ 977 pp &= ~PANEL_POWER_RESET; 978 intel_de_write(display, pp_ctrl_reg, pp); 979 intel_de_posting_read(display, pp_ctrl_reg); 980 } 981 982 /* 983 * WA: 22019252566 984 * Disable DPLS gating around power sequence. 985 */ 986 if (IS_DISPLAY_VER(display, 13, 14)) 987 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 988 0, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 989 990 pp |= PANEL_POWER_ON; 991 if (!IS_IRONLAKE(dev_priv)) 992 pp |= PANEL_POWER_RESET; 993 994 intel_de_write(display, pp_ctrl_reg, pp); 995 intel_de_posting_read(display, pp_ctrl_reg); 996 997 wait_panel_on(intel_dp); 998 intel_dp->pps.last_power_on = jiffies; 999 1000 if (IS_DISPLAY_VER(display, 13, 14)) 1001 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 1002 PCH_DPLSUNIT_CLOCK_GATE_DISABLE, 0); 1003 1004 if (IS_IRONLAKE(dev_priv)) { 1005 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1006 intel_de_write(display, pp_ctrl_reg, pp); 1007 intel_de_posting_read(display, pp_ctrl_reg); 1008 } 1009 } 1010 1011 void intel_pps_on(struct intel_dp *intel_dp) 1012 { 1013 intel_wakeref_t wakeref; 1014 1015 if (!intel_dp_is_edp(intel_dp)) 1016 return; 1017 1018 with_intel_pps_lock(intel_dp, wakeref) 1019 intel_pps_on_unlocked(intel_dp); 1020 } 1021 1022 void intel_pps_off_unlocked(struct intel_dp *intel_dp) 1023 { 1024 struct intel_display *display = to_intel_display(intel_dp); 1025 struct drm_i915_private *dev_priv = to_i915(display->drm); 1026 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1027 u32 pp; 1028 i915_reg_t pp_ctrl_reg; 1029 1030 lockdep_assert_held(&display->pps.mutex); 1031 1032 if (!intel_dp_is_edp(intel_dp)) 1033 return; 1034 1035 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power off\n", 1036 dig_port->base.base.base.id, dig_port->base.base.name, 1037 pps_name(intel_dp)); 1038 1039 drm_WARN(display->drm, !intel_dp->pps.want_panel_vdd, 1040 "[ENCODER:%d:%s] %s need VDD to turn off panel\n", 1041 dig_port->base.base.base.id, dig_port->base.base.name, 1042 pps_name(intel_dp)); 1043 1044 pp = ilk_get_pp_control(intel_dp); 1045 /* We need to switch off panel power _and_ force vdd, for otherwise some 1046 * panels get very unhappy and cease to work. */ 1047 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | 1048 EDP_BLC_ENABLE); 1049 1050 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1051 1052 intel_dp->pps.want_panel_vdd = false; 1053 1054 intel_de_write(display, pp_ctrl_reg, pp); 1055 intel_de_posting_read(display, pp_ctrl_reg); 1056 1057 wait_panel_off(intel_dp); 1058 intel_dp->pps.panel_power_off_time = ktime_get_boottime(); 1059 1060 /* We got a reference when we enabled the VDD. */ 1061 intel_display_power_put(dev_priv, 1062 intel_aux_power_domain(dig_port), 1063 fetch_and_zero(&intel_dp->pps.vdd_wakeref)); 1064 } 1065 1066 void intel_pps_off(struct intel_dp *intel_dp) 1067 { 1068 intel_wakeref_t wakeref; 1069 1070 if (!intel_dp_is_edp(intel_dp)) 1071 return; 1072 1073 with_intel_pps_lock(intel_dp, wakeref) 1074 intel_pps_off_unlocked(intel_dp); 1075 } 1076 1077 /* Enable backlight in the panel power control. */ 1078 void intel_pps_backlight_on(struct intel_dp *intel_dp) 1079 { 1080 struct intel_display *display = to_intel_display(intel_dp); 1081 intel_wakeref_t wakeref; 1082 1083 /* 1084 * If we enable the backlight right away following a panel power 1085 * on, we may see slight flicker as the panel syncs with the eDP 1086 * link. So delay a bit to make sure the image is solid before 1087 * allowing it to appear. 1088 */ 1089 wait_backlight_on(intel_dp); 1090 1091 with_intel_pps_lock(intel_dp, wakeref) { 1092 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1093 u32 pp; 1094 1095 pp = ilk_get_pp_control(intel_dp); 1096 pp |= EDP_BLC_ENABLE; 1097 1098 intel_de_write(display, pp_ctrl_reg, pp); 1099 intel_de_posting_read(display, pp_ctrl_reg); 1100 } 1101 } 1102 1103 /* Disable backlight in the panel power control. */ 1104 void intel_pps_backlight_off(struct intel_dp *intel_dp) 1105 { 1106 struct intel_display *display = to_intel_display(intel_dp); 1107 intel_wakeref_t wakeref; 1108 1109 if (!intel_dp_is_edp(intel_dp)) 1110 return; 1111 1112 with_intel_pps_lock(intel_dp, wakeref) { 1113 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1114 u32 pp; 1115 1116 pp = ilk_get_pp_control(intel_dp); 1117 pp &= ~EDP_BLC_ENABLE; 1118 1119 intel_de_write(display, pp_ctrl_reg, pp); 1120 intel_de_posting_read(display, pp_ctrl_reg); 1121 } 1122 1123 intel_dp->pps.last_backlight_off = jiffies; 1124 edp_wait_backlight_off(intel_dp); 1125 } 1126 1127 /* 1128 * Hook for controlling the panel power control backlight through the bl_power 1129 * sysfs attribute. Take care to handle multiple calls. 1130 */ 1131 void intel_pps_backlight_power(struct intel_connector *connector, bool enable) 1132 { 1133 struct intel_display *display = to_intel_display(connector); 1134 struct intel_dp *intel_dp = intel_attached_dp(connector); 1135 intel_wakeref_t wakeref; 1136 bool is_enabled; 1137 1138 is_enabled = false; 1139 with_intel_pps_lock(intel_dp, wakeref) 1140 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE; 1141 if (is_enabled == enable) 1142 return; 1143 1144 drm_dbg_kms(display->drm, "panel power control backlight %s\n", 1145 enable ? "enable" : "disable"); 1146 1147 if (enable) 1148 intel_pps_backlight_on(intel_dp); 1149 else 1150 intel_pps_backlight_off(intel_dp); 1151 } 1152 1153 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) 1154 { 1155 struct intel_display *display = to_intel_display(intel_dp); 1156 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1157 enum pipe pipe = intel_dp->pps.vlv_pps_pipe; 1158 i915_reg_t pp_on_reg = PP_ON_DELAYS(display, pipe); 1159 1160 drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE); 1161 1162 if (drm_WARN_ON(display->drm, pipe != PIPE_A && pipe != PIPE_B)) 1163 return; 1164 1165 intel_pps_vdd_off_sync_unlocked(intel_dp); 1166 1167 /* 1168 * VLV seems to get confused when multiple power sequencers 1169 * have the same port selected (even if only one has power/vdd 1170 * enabled). The failure manifests as vlv_wait_port_ready() failing 1171 * CHV on the other hand doesn't seem to mind having the same port 1172 * selected in multiple power sequencers, but let's clear the 1173 * port select always when logically disconnecting a power sequencer 1174 * from a port. 1175 */ 1176 drm_dbg_kms(display->drm, 1177 "detaching %s from [ENCODER:%d:%s]\n", 1178 pps_name(intel_dp), 1179 dig_port->base.base.base.id, dig_port->base.base.name); 1180 intel_de_write(display, pp_on_reg, 0); 1181 intel_de_posting_read(display, pp_on_reg); 1182 1183 intel_dp->pps.vlv_pps_pipe = INVALID_PIPE; 1184 } 1185 1186 static void vlv_steal_power_sequencer(struct intel_display *display, 1187 enum pipe pipe) 1188 { 1189 struct intel_encoder *encoder; 1190 1191 lockdep_assert_held(&display->pps.mutex); 1192 1193 for_each_intel_dp(display->drm, encoder) { 1194 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1195 1196 drm_WARN(display->drm, intel_dp->pps.vlv_active_pipe == pipe, 1197 "stealing PPS %c from active [ENCODER:%d:%s]\n", 1198 pipe_name(pipe), encoder->base.base.id, 1199 encoder->base.name); 1200 1201 if (intel_dp->pps.vlv_pps_pipe != pipe) 1202 continue; 1203 1204 drm_dbg_kms(display->drm, 1205 "stealing PPS %c from [ENCODER:%d:%s]\n", 1206 pipe_name(pipe), encoder->base.base.id, 1207 encoder->base.name); 1208 1209 /* make sure vdd is off before we steal it */ 1210 vlv_detach_power_sequencer(intel_dp); 1211 } 1212 } 1213 1214 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) 1215 { 1216 struct intel_display *display = to_intel_display(intel_dp); 1217 struct drm_i915_private *dev_priv = to_i915(display->drm); 1218 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1219 enum pipe pipe; 1220 1221 if (g4x_dp_port_enabled(dev_priv, intel_dp->output_reg, 1222 encoder->port, &pipe)) 1223 return pipe; 1224 1225 return INVALID_PIPE; 1226 } 1227 1228 /* Call on all DP, not just eDP */ 1229 void vlv_pps_pipe_init(struct intel_dp *intel_dp) 1230 { 1231 intel_dp->pps.vlv_pps_pipe = INVALID_PIPE; 1232 intel_dp->pps.vlv_active_pipe = vlv_active_pipe(intel_dp); 1233 } 1234 1235 /* Call on all DP, not just eDP */ 1236 void vlv_pps_pipe_reset(struct intel_dp *intel_dp) 1237 { 1238 intel_wakeref_t wakeref; 1239 1240 with_intel_pps_lock(intel_dp, wakeref) 1241 intel_dp->pps.vlv_active_pipe = vlv_active_pipe(intel_dp); 1242 } 1243 1244 enum pipe vlv_pps_backlight_initial_pipe(struct intel_dp *intel_dp) 1245 { 1246 enum pipe pipe; 1247 1248 /* 1249 * Figure out the current pipe for the initial backlight setup. If the 1250 * current pipe isn't valid, try the PPS pipe, and if that fails just 1251 * assume pipe A. 1252 */ 1253 pipe = vlv_active_pipe(intel_dp); 1254 1255 if (pipe != PIPE_A && pipe != PIPE_B) 1256 pipe = intel_dp->pps.vlv_pps_pipe; 1257 1258 if (pipe != PIPE_A && pipe != PIPE_B) 1259 pipe = PIPE_A; 1260 1261 return pipe; 1262 } 1263 1264 /* Call on all DP, not just eDP */ 1265 void vlv_pps_port_enable_unlocked(struct intel_encoder *encoder, 1266 const struct intel_crtc_state *crtc_state) 1267 { 1268 struct intel_display *display = to_intel_display(encoder); 1269 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1270 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1271 1272 lockdep_assert_held(&display->pps.mutex); 1273 1274 drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE); 1275 1276 if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE && 1277 intel_dp->pps.vlv_pps_pipe != crtc->pipe) { 1278 /* 1279 * If another power sequencer was being used on this 1280 * port previously make sure to turn off vdd there while 1281 * we still have control of it. 1282 */ 1283 vlv_detach_power_sequencer(intel_dp); 1284 } 1285 1286 /* 1287 * We may be stealing the power 1288 * sequencer from another port. 1289 */ 1290 vlv_steal_power_sequencer(display, crtc->pipe); 1291 1292 intel_dp->pps.vlv_active_pipe = crtc->pipe; 1293 1294 if (!intel_dp_is_edp(intel_dp)) 1295 return; 1296 1297 /* now it's all ours */ 1298 intel_dp->pps.vlv_pps_pipe = crtc->pipe; 1299 1300 drm_dbg_kms(display->drm, 1301 "initializing %s for [ENCODER:%d:%s]\n", 1302 pps_name(intel_dp), 1303 encoder->base.base.id, encoder->base.name); 1304 1305 /* init power sequencer on this pipe and port */ 1306 pps_init_delays(intel_dp); 1307 pps_init_registers(intel_dp, true); 1308 } 1309 1310 /* Call on all DP, not just eDP */ 1311 void vlv_pps_port_disable(struct intel_encoder *encoder, 1312 const struct intel_crtc_state *crtc_state) 1313 { 1314 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1315 1316 intel_wakeref_t wakeref; 1317 1318 with_intel_pps_lock(intel_dp, wakeref) 1319 intel_dp->pps.vlv_active_pipe = INVALID_PIPE; 1320 } 1321 1322 static void pps_vdd_init(struct intel_dp *intel_dp) 1323 { 1324 struct intel_display *display = to_intel_display(intel_dp); 1325 struct drm_i915_private *dev_priv = to_i915(display->drm); 1326 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1327 1328 lockdep_assert_held(&display->pps.mutex); 1329 1330 if (!edp_have_panel_vdd(intel_dp)) 1331 return; 1332 1333 /* 1334 * The VDD bit needs a power domain reference, so if the bit is 1335 * already enabled when we boot or resume, grab this reference and 1336 * schedule a vdd off, so we don't hold on to the reference 1337 * indefinitely. 1338 */ 1339 drm_dbg_kms(display->drm, 1340 "[ENCODER:%d:%s] %s VDD left on by BIOS, adjusting state tracking\n", 1341 dig_port->base.base.base.id, dig_port->base.base.name, 1342 pps_name(intel_dp)); 1343 drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref); 1344 intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv, 1345 intel_aux_power_domain(dig_port)); 1346 } 1347 1348 bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp) 1349 { 1350 intel_wakeref_t wakeref; 1351 bool have_power = false; 1352 1353 with_intel_pps_lock(intel_dp, wakeref) { 1354 have_power = edp_have_panel_power(intel_dp) || 1355 edp_have_panel_vdd(intel_dp); 1356 } 1357 1358 return have_power; 1359 } 1360 1361 static void pps_init_timestamps(struct intel_dp *intel_dp) 1362 { 1363 /* 1364 * Initialize panel power off time to 0, assuming panel power could have 1365 * been toggled between kernel boot and now only by a previously loaded 1366 * and removed i915, which has already ensured sufficient power off 1367 * delay at module remove. 1368 */ 1369 intel_dp->pps.panel_power_off_time = 0; 1370 intel_dp->pps.last_power_on = jiffies; 1371 intel_dp->pps.last_backlight_off = jiffies; 1372 } 1373 1374 static void 1375 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) 1376 { 1377 struct intel_display *display = to_intel_display(intel_dp); 1378 u32 pp_on, pp_off, pp_ctl; 1379 struct pps_registers regs; 1380 1381 intel_pps_get_registers(intel_dp, ®s); 1382 1383 pp_ctl = ilk_get_pp_control(intel_dp); 1384 1385 /* Ensure PPS is unlocked */ 1386 if (!HAS_DDI(display)) 1387 intel_de_write(display, regs.pp_ctrl, pp_ctl); 1388 1389 pp_on = intel_de_read(display, regs.pp_on); 1390 pp_off = intel_de_read(display, regs.pp_off); 1391 1392 /* Pull timing values out of registers */ 1393 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on); 1394 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on); 1395 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off); 1396 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off); 1397 1398 if (i915_mmio_reg_valid(regs.pp_div)) { 1399 u32 pp_div; 1400 1401 pp_div = intel_de_read(display, regs.pp_div); 1402 1403 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000; 1404 } else { 1405 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000; 1406 } 1407 } 1408 1409 static void 1410 intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name, 1411 const struct edp_power_seq *seq) 1412 { 1413 struct intel_display *display = to_intel_display(intel_dp); 1414 1415 drm_dbg_kms(display->drm, 1416 "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 1417 state_name, 1418 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); 1419 } 1420 1421 static void 1422 intel_pps_verify_state(struct intel_dp *intel_dp) 1423 { 1424 struct intel_display *display = to_intel_display(intel_dp); 1425 struct edp_power_seq hw; 1426 struct edp_power_seq *sw = &intel_dp->pps.pps_delays; 1427 1428 intel_pps_readout_hw_state(intel_dp, &hw); 1429 1430 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || 1431 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { 1432 drm_err(display->drm, "PPS state mismatch\n"); 1433 intel_pps_dump_state(intel_dp, "sw", sw); 1434 intel_pps_dump_state(intel_dp, "hw", &hw); 1435 } 1436 } 1437 1438 static bool pps_delays_valid(struct edp_power_seq *delays) 1439 { 1440 return delays->t1_t3 || delays->t8 || delays->t9 || 1441 delays->t10 || delays->t11_t12; 1442 } 1443 1444 static void pps_init_delays_bios(struct intel_dp *intel_dp, 1445 struct edp_power_seq *bios) 1446 { 1447 struct intel_display *display = to_intel_display(intel_dp); 1448 1449 lockdep_assert_held(&display->pps.mutex); 1450 1451 if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays)) 1452 intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays); 1453 1454 *bios = intel_dp->pps.bios_pps_delays; 1455 1456 intel_pps_dump_state(intel_dp, "bios", bios); 1457 } 1458 1459 static void pps_init_delays_vbt(struct intel_dp *intel_dp, 1460 struct edp_power_seq *vbt) 1461 { 1462 struct intel_display *display = to_intel_display(intel_dp); 1463 struct intel_connector *connector = intel_dp->attached_connector; 1464 1465 *vbt = connector->panel.vbt.edp.pps; 1466 1467 if (!pps_delays_valid(vbt)) 1468 return; 1469 1470 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay 1471 * of 500ms appears to be too short. Ocassionally the panel 1472 * just fails to power back on. Increasing the delay to 800ms 1473 * seems sufficient to avoid this problem. 1474 */ 1475 if (intel_has_quirk(display, QUIRK_INCREASE_T12_DELAY)) { 1476 vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10); 1477 drm_dbg_kms(display->drm, 1478 "Increasing T12 panel delay as per the quirk to %d\n", 1479 vbt->t11_t12); 1480 } 1481 1482 /* T11_T12 delay is special and actually in units of 100ms, but zero 1483 * based in the hw (so we need to add 100 ms). But the sw vbt 1484 * table multiplies it with 1000 to make it in units of 100usec, 1485 * too. */ 1486 vbt->t11_t12 += 100 * 10; 1487 1488 intel_pps_dump_state(intel_dp, "vbt", vbt); 1489 } 1490 1491 static void pps_init_delays_spec(struct intel_dp *intel_dp, 1492 struct edp_power_seq *spec) 1493 { 1494 struct intel_display *display = to_intel_display(intel_dp); 1495 1496 lockdep_assert_held(&display->pps.mutex); 1497 1498 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 1499 * our hw here, which are all in 100usec. */ 1500 spec->t1_t3 = 210 * 10; 1501 spec->t8 = 50 * 10; /* no limit for t8, use t7 instead */ 1502 spec->t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 1503 spec->t10 = 500 * 10; 1504 /* This one is special and actually in units of 100ms, but zero 1505 * based in the hw (so we need to add 100 ms). But the sw vbt 1506 * table multiplies it with 1000 to make it in units of 100usec, 1507 * too. */ 1508 spec->t11_t12 = (510 + 100) * 10; 1509 1510 intel_pps_dump_state(intel_dp, "spec", spec); 1511 } 1512 1513 static void pps_init_delays(struct intel_dp *intel_dp) 1514 { 1515 struct intel_display *display = to_intel_display(intel_dp); 1516 struct edp_power_seq cur, vbt, spec, 1517 *final = &intel_dp->pps.pps_delays; 1518 1519 lockdep_assert_held(&display->pps.mutex); 1520 1521 /* already initialized? */ 1522 if (pps_delays_valid(final)) 1523 return; 1524 1525 pps_init_delays_bios(intel_dp, &cur); 1526 pps_init_delays_vbt(intel_dp, &vbt); 1527 pps_init_delays_spec(intel_dp, &spec); 1528 1529 /* Use the max of the register settings and vbt. If both are 1530 * unset, fall back to the spec limits. */ 1531 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \ 1532 spec.field : \ 1533 max(cur.field, vbt.field)) 1534 assign_final(t1_t3); 1535 assign_final(t8); 1536 assign_final(t9); 1537 assign_final(t10); 1538 assign_final(t11_t12); 1539 #undef assign_final 1540 1541 #define get_delay(field) (DIV_ROUND_UP(final->field, 10)) 1542 intel_dp->pps.panel_power_up_delay = get_delay(t1_t3); 1543 intel_dp->pps.backlight_on_delay = get_delay(t8); 1544 intel_dp->pps.backlight_off_delay = get_delay(t9); 1545 intel_dp->pps.panel_power_down_delay = get_delay(t10); 1546 intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12); 1547 #undef get_delay 1548 1549 drm_dbg_kms(display->drm, 1550 "panel power up delay %d, power down delay %d, power cycle delay %d\n", 1551 intel_dp->pps.panel_power_up_delay, 1552 intel_dp->pps.panel_power_down_delay, 1553 intel_dp->pps.panel_power_cycle_delay); 1554 1555 drm_dbg_kms(display->drm, "backlight on delay %d, off delay %d\n", 1556 intel_dp->pps.backlight_on_delay, 1557 intel_dp->pps.backlight_off_delay); 1558 1559 /* 1560 * We override the HW backlight delays to 1 because we do manual waits 1561 * on them. For T8, even BSpec recommends doing it. For T9, if we 1562 * don't do this, we'll end up waiting for the backlight off delay 1563 * twice: once when we do the manual sleep, and once when we disable 1564 * the panel and wait for the PP_STATUS bit to become zero. 1565 */ 1566 final->t8 = 1; 1567 final->t9 = 1; 1568 1569 /* 1570 * HW has only a 100msec granularity for t11_t12 so round it up 1571 * accordingly. 1572 */ 1573 final->t11_t12 = roundup(final->t11_t12, 100 * 10); 1574 } 1575 1576 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd) 1577 { 1578 struct intel_display *display = to_intel_display(intel_dp); 1579 struct drm_i915_private *dev_priv = to_i915(display->drm); 1580 u32 pp_on, pp_off, port_sel = 0; 1581 int div = DISPLAY_RUNTIME_INFO(display)->rawclk_freq / 1000; 1582 struct pps_registers regs; 1583 enum port port = dp_to_dig_port(intel_dp)->base.port; 1584 const struct edp_power_seq *seq = &intel_dp->pps.pps_delays; 1585 1586 lockdep_assert_held(&display->pps.mutex); 1587 1588 intel_pps_get_registers(intel_dp, ®s); 1589 1590 /* 1591 * On some VLV machines the BIOS can leave the VDD 1592 * enabled even on power sequencers which aren't 1593 * hooked up to any port. This would mess up the 1594 * power domain tracking the first time we pick 1595 * one of these power sequencers for use since 1596 * intel_pps_vdd_on_unlocked() would notice that the VDD was 1597 * already on and therefore wouldn't grab the power 1598 * domain reference. Disable VDD first to avoid this. 1599 * This also avoids spuriously turning the VDD on as 1600 * soon as the new power sequencer gets initialized. 1601 */ 1602 if (force_disable_vdd) { 1603 u32 pp = ilk_get_pp_control(intel_dp); 1604 1605 drm_WARN(display->drm, pp & PANEL_POWER_ON, 1606 "Panel power already on\n"); 1607 1608 if (pp & EDP_FORCE_VDD) 1609 drm_dbg_kms(display->drm, 1610 "VDD already on, disabling first\n"); 1611 1612 pp &= ~EDP_FORCE_VDD; 1613 1614 intel_de_write(display, regs.pp_ctrl, pp); 1615 } 1616 1617 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) | 1618 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8); 1619 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) | 1620 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10); 1621 1622 /* Haswell doesn't have any port selection bits for the panel 1623 * power sequencer any more. */ 1624 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1625 port_sel = PANEL_PORT_SELECT_VLV(port); 1626 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 1627 switch (port) { 1628 case PORT_A: 1629 port_sel = PANEL_PORT_SELECT_DPA; 1630 break; 1631 case PORT_C: 1632 port_sel = PANEL_PORT_SELECT_DPC; 1633 break; 1634 case PORT_D: 1635 port_sel = PANEL_PORT_SELECT_DPD; 1636 break; 1637 default: 1638 MISSING_CASE(port); 1639 break; 1640 } 1641 } 1642 1643 pp_on |= port_sel; 1644 1645 intel_de_write(display, regs.pp_on, pp_on); 1646 intel_de_write(display, regs.pp_off, pp_off); 1647 1648 /* 1649 * Compute the divisor for the pp clock, simply match the Bspec formula. 1650 */ 1651 if (i915_mmio_reg_valid(regs.pp_div)) 1652 intel_de_write(display, regs.pp_div, 1653 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); 1654 else 1655 intel_de_rmw(display, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK, 1656 REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, 1657 DIV_ROUND_UP(seq->t11_t12, 1000))); 1658 1659 drm_dbg_kms(display->drm, 1660 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 1661 intel_de_read(display, regs.pp_on), 1662 intel_de_read(display, regs.pp_off), 1663 i915_mmio_reg_valid(regs.pp_div) ? 1664 intel_de_read(display, regs.pp_div) : 1665 (intel_de_read(display, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); 1666 } 1667 1668 void intel_pps_encoder_reset(struct intel_dp *intel_dp) 1669 { 1670 struct intel_display *display = to_intel_display(intel_dp); 1671 struct drm_i915_private *i915 = to_i915(display->drm); 1672 intel_wakeref_t wakeref; 1673 1674 if (!intel_dp_is_edp(intel_dp)) 1675 return; 1676 1677 with_intel_pps_lock(intel_dp, wakeref) { 1678 /* 1679 * Reinit the power sequencer also on the resume path, in case 1680 * BIOS did something nasty with it. 1681 */ 1682 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 1683 vlv_initial_power_sequencer_setup(intel_dp); 1684 1685 pps_init_delays(intel_dp); 1686 pps_init_registers(intel_dp, false); 1687 pps_vdd_init(intel_dp); 1688 1689 if (edp_have_panel_vdd(intel_dp)) 1690 edp_panel_vdd_schedule_off(intel_dp); 1691 } 1692 } 1693 1694 bool intel_pps_init(struct intel_dp *intel_dp) 1695 { 1696 intel_wakeref_t wakeref; 1697 bool ret; 1698 1699 intel_dp->pps.initializing = true; 1700 INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work); 1701 1702 pps_init_timestamps(intel_dp); 1703 1704 with_intel_pps_lock(intel_dp, wakeref) { 1705 ret = pps_initial_setup(intel_dp); 1706 1707 pps_init_delays(intel_dp); 1708 pps_init_registers(intel_dp, false); 1709 pps_vdd_init(intel_dp); 1710 } 1711 1712 return ret; 1713 } 1714 1715 static void pps_init_late(struct intel_dp *intel_dp) 1716 { 1717 struct intel_display *display = to_intel_display(intel_dp); 1718 struct drm_i915_private *i915 = to_i915(display->drm); 1719 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1720 struct intel_connector *connector = intel_dp->attached_connector; 1721 1722 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 1723 return; 1724 1725 if (intel_num_pps(display) < 2) 1726 return; 1727 1728 drm_WARN(display->drm, 1729 connector->panel.vbt.backlight.controller >= 0 && 1730 intel_dp->pps.pps_idx != connector->panel.vbt.backlight.controller, 1731 "[ENCODER:%d:%s] power sequencer mismatch: %d (initial) vs. %d (VBT)\n", 1732 encoder->base.base.id, encoder->base.name, 1733 intel_dp->pps.pps_idx, connector->panel.vbt.backlight.controller); 1734 1735 if (connector->panel.vbt.backlight.controller >= 0) 1736 intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller; 1737 } 1738 1739 void intel_pps_init_late(struct intel_dp *intel_dp) 1740 { 1741 intel_wakeref_t wakeref; 1742 1743 with_intel_pps_lock(intel_dp, wakeref) { 1744 /* Reinit delays after per-panel info has been parsed from VBT */ 1745 pps_init_late(intel_dp); 1746 1747 memset(&intel_dp->pps.pps_delays, 0, sizeof(intel_dp->pps.pps_delays)); 1748 pps_init_delays(intel_dp); 1749 pps_init_registers(intel_dp, false); 1750 1751 intel_dp->pps.initializing = false; 1752 1753 if (edp_have_panel_vdd(intel_dp)) 1754 edp_panel_vdd_schedule_off(intel_dp); 1755 } 1756 } 1757 1758 void intel_pps_unlock_regs_wa(struct intel_display *display) 1759 { 1760 int pps_num; 1761 int pps_idx; 1762 1763 if (!HAS_DISPLAY(display) || HAS_DDI(display)) 1764 return; 1765 /* 1766 * This w/a is needed at least on CPT/PPT, but to be sure apply it 1767 * everywhere where registers can be write protected. 1768 */ 1769 pps_num = intel_num_pps(display); 1770 1771 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) 1772 intel_de_rmw(display, PP_CONTROL(display, pps_idx), 1773 PANEL_UNLOCK_MASK, PANEL_UNLOCK_REGS); 1774 } 1775 1776 void intel_pps_setup(struct intel_display *display) 1777 { 1778 struct drm_i915_private *i915 = to_i915(display->drm); 1779 1780 if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915)) 1781 display->pps.mmio_base = PCH_PPS_BASE; 1782 else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 1783 display->pps.mmio_base = VLV_PPS_BASE; 1784 else 1785 display->pps.mmio_base = PPS_BASE; 1786 } 1787 1788 static int intel_pps_show(struct seq_file *m, void *data) 1789 { 1790 struct intel_connector *connector = m->private; 1791 struct intel_dp *intel_dp = intel_attached_dp(connector); 1792 1793 if (connector->base.status != connector_status_connected) 1794 return -ENODEV; 1795 1796 seq_printf(m, "Panel power up delay: %d\n", 1797 intel_dp->pps.panel_power_up_delay); 1798 seq_printf(m, "Panel power down delay: %d\n", 1799 intel_dp->pps.panel_power_down_delay); 1800 seq_printf(m, "Backlight on delay: %d\n", 1801 intel_dp->pps.backlight_on_delay); 1802 seq_printf(m, "Backlight off delay: %d\n", 1803 intel_dp->pps.backlight_off_delay); 1804 1805 return 0; 1806 } 1807 DEFINE_SHOW_ATTRIBUTE(intel_pps); 1808 1809 void intel_pps_connector_debugfs_add(struct intel_connector *connector) 1810 { 1811 struct dentry *root = connector->base.debugfs_entry; 1812 int connector_type = connector->base.connector_type; 1813 1814 if (connector_type == DRM_MODE_CONNECTOR_eDP) 1815 debugfs_create_file("i915_panel_timings", 0444, root, 1816 connector, &intel_pps_fops); 1817 } 1818 1819 void assert_pps_unlocked(struct intel_display *display, enum pipe pipe) 1820 { 1821 struct drm_i915_private *dev_priv = to_i915(display->drm); 1822 i915_reg_t pp_reg; 1823 u32 val; 1824 enum pipe panel_pipe = INVALID_PIPE; 1825 bool locked = true; 1826 1827 if (drm_WARN_ON(display->drm, HAS_DDI(display))) 1828 return; 1829 1830 if (HAS_PCH_SPLIT(dev_priv)) { 1831 u32 port_sel; 1832 1833 pp_reg = PP_CONTROL(display, 0); 1834 port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) & 1835 PANEL_PORT_SELECT_MASK; 1836 1837 switch (port_sel) { 1838 case PANEL_PORT_SELECT_LVDS: 1839 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe); 1840 break; 1841 case PANEL_PORT_SELECT_DPA: 1842 g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe); 1843 break; 1844 case PANEL_PORT_SELECT_DPC: 1845 g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe); 1846 break; 1847 case PANEL_PORT_SELECT_DPD: 1848 g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe); 1849 break; 1850 default: 1851 MISSING_CASE(port_sel); 1852 break; 1853 } 1854 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1855 /* presumably write lock depends on pipe, not port select */ 1856 pp_reg = PP_CONTROL(display, pipe); 1857 panel_pipe = pipe; 1858 } else { 1859 u32 port_sel; 1860 1861 pp_reg = PP_CONTROL(display, 0); 1862 port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) & 1863 PANEL_PORT_SELECT_MASK; 1864 1865 drm_WARN_ON(display->drm, 1866 port_sel != PANEL_PORT_SELECT_LVDS); 1867 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe); 1868 } 1869 1870 val = intel_de_read(display, pp_reg); 1871 if (!(val & PANEL_POWER_ON) || 1872 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1873 locked = false; 1874 1875 I915_STATE_WARN(dev_priv, panel_pipe == pipe && locked, 1876 "panel assertion failure, pipe %c regs locked\n", 1877 pipe_name(pipe)); 1878 } 1879