1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include <linux/debugfs.h> 7 #include <linux/iopoll.h> 8 9 #include <drm/drm_print.h> 10 11 #include "g4x_dp.h" 12 #include "i915_reg.h" 13 #include "intel_de.h" 14 #include "intel_display_jiffies.h" 15 #include "intel_display_power_well.h" 16 #include "intel_display_regs.h" 17 #include "intel_display_types.h" 18 #include "intel_display_utils.h" 19 #include "intel_dp.h" 20 #include "intel_dpio_phy.h" 21 #include "intel_dpll.h" 22 #include "intel_lvds.h" 23 #include "intel_lvds_regs.h" 24 #include "intel_pps.h" 25 #include "intel_pps_regs.h" 26 #include "intel_quirks.h" 27 28 static void vlv_steal_power_sequencer(struct intel_display *display, 29 enum pipe pipe); 30 31 static void pps_init_delays(struct intel_dp *intel_dp); 32 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd); 33 34 static const char *pps_name(struct intel_dp *intel_dp) 35 { 36 struct intel_display *display = to_intel_display(intel_dp); 37 struct intel_pps *pps = &intel_dp->pps; 38 39 if (display->platform.valleyview || display->platform.cherryview) { 40 switch (pps->vlv_pps_pipe) { 41 case INVALID_PIPE: 42 /* 43 * FIXME would be nice if we can guarantee 44 * to always have a valid PPS when calling this. 45 */ 46 return "PPS <none>"; 47 case PIPE_A: 48 return "PPS A"; 49 case PIPE_B: 50 return "PPS B"; 51 default: 52 MISSING_CASE(pps->vlv_pps_pipe); 53 break; 54 } 55 } else { 56 switch (pps->pps_idx) { 57 case 0: 58 return "PPS 0"; 59 case 1: 60 return "PPS 1"; 61 default: 62 MISSING_CASE(pps->pps_idx); 63 break; 64 } 65 } 66 67 return "PPS <invalid>"; 68 } 69 70 intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp) 71 { 72 struct intel_display *display = to_intel_display(intel_dp); 73 intel_wakeref_t wakeref; 74 75 /* 76 * See vlv_pps_reset_all() why we need a power domain reference here. 77 */ 78 wakeref = intel_display_power_get(display, POWER_DOMAIN_DISPLAY_CORE); 79 mutex_lock(&display->pps.mutex); 80 81 return wakeref; 82 } 83 84 intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp, 85 intel_wakeref_t wakeref) 86 { 87 struct intel_display *display = to_intel_display(intel_dp); 88 89 mutex_unlock(&display->pps.mutex); 90 intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, wakeref); 91 92 return NULL; 93 } 94 95 static void 96 vlv_power_sequencer_kick(struct intel_dp *intel_dp) 97 { 98 struct intel_display *display = to_intel_display(intel_dp); 99 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 100 enum pipe pipe = intel_dp->pps.vlv_pps_pipe; 101 bool pll_enabled, release_cl_override = false; 102 enum dpio_phy phy = vlv_pipe_to_phy(pipe); 103 enum dpio_channel ch = vlv_pipe_to_channel(pipe); 104 u32 DP; 105 106 if (drm_WARN(display->drm, 107 intel_de_read(display, intel_dp->output_reg) & DP_PORT_EN, 108 "skipping %s kick due to [ENCODER:%d:%s] being active\n", 109 pps_name(intel_dp), 110 dig_port->base.base.base.id, dig_port->base.base.name)) 111 return; 112 113 drm_dbg_kms(display->drm, 114 "kicking %s for [ENCODER:%d:%s]\n", 115 pps_name(intel_dp), 116 dig_port->base.base.base.id, dig_port->base.base.name); 117 118 /* Preserve the BIOS-computed detected bit. This is 119 * supposed to be read-only. 120 */ 121 DP = intel_de_read(display, intel_dp->output_reg) & DP_DETECTED; 122 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 123 DP |= DP_PORT_WIDTH(1); 124 DP |= DP_LINK_TRAIN_PAT_1; 125 126 if (display->platform.cherryview) 127 DP |= DP_PIPE_SEL_CHV(pipe); 128 else 129 DP |= DP_PIPE_SEL(pipe); 130 131 pll_enabled = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE; 132 133 /* 134 * The DPLL for the pipe must be enabled for this to work. 135 * So enable temporarily it if it's not already enabled. 136 */ 137 if (!pll_enabled) { 138 release_cl_override = display->platform.cherryview && 139 !chv_phy_powergate_ch(display, phy, ch, true); 140 141 if (vlv_force_pll_on(display, pipe, vlv_get_dpll(display))) { 142 drm_err(display->drm, 143 "Failed to force on PLL for pipe %c!\n", 144 pipe_name(pipe)); 145 return; 146 } 147 } 148 149 /* 150 * Similar magic as in intel_dp_enable_port(). 151 * We _must_ do this port enable + disable trick 152 * to make this power sequencer lock onto the port. 153 * Otherwise even VDD force bit won't work. 154 */ 155 intel_de_write(display, intel_dp->output_reg, DP); 156 intel_de_posting_read(display, intel_dp->output_reg); 157 158 intel_de_write(display, intel_dp->output_reg, DP | DP_PORT_EN); 159 intel_de_posting_read(display, intel_dp->output_reg); 160 161 intel_de_write(display, intel_dp->output_reg, DP & ~DP_PORT_EN); 162 intel_de_posting_read(display, intel_dp->output_reg); 163 164 if (!pll_enabled) { 165 vlv_force_pll_off(display, pipe); 166 167 if (release_cl_override) 168 chv_phy_powergate_ch(display, phy, ch, false); 169 } 170 } 171 172 static enum pipe vlv_find_free_pps(struct intel_display *display) 173 { 174 struct intel_encoder *encoder; 175 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); 176 177 /* 178 * We don't have power sequencer currently. 179 * Pick one that's not used by other ports. 180 */ 181 for_each_intel_dp(display->drm, encoder) { 182 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 183 184 if (encoder->type == INTEL_OUTPUT_EDP) { 185 drm_WARN_ON(display->drm, 186 intel_dp->pps.vlv_active_pipe != INVALID_PIPE && 187 intel_dp->pps.vlv_active_pipe != 188 intel_dp->pps.vlv_pps_pipe); 189 190 if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE) 191 pipes &= ~(1 << intel_dp->pps.vlv_pps_pipe); 192 } else { 193 drm_WARN_ON(display->drm, 194 intel_dp->pps.vlv_pps_pipe != INVALID_PIPE); 195 196 if (intel_dp->pps.vlv_active_pipe != INVALID_PIPE) 197 pipes &= ~(1 << intel_dp->pps.vlv_active_pipe); 198 } 199 } 200 201 if (pipes == 0) 202 return INVALID_PIPE; 203 204 return ffs(pipes) - 1; 205 } 206 207 static enum pipe 208 vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 209 { 210 struct intel_display *display = to_intel_display(intel_dp); 211 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 212 enum pipe pipe; 213 214 lockdep_assert_held(&display->pps.mutex); 215 216 /* We should never land here with regular DP ports */ 217 drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp)); 218 219 drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE && 220 intel_dp->pps.vlv_active_pipe != intel_dp->pps.vlv_pps_pipe); 221 222 if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE) 223 return intel_dp->pps.vlv_pps_pipe; 224 225 pipe = vlv_find_free_pps(display); 226 227 /* 228 * Didn't find one. This should not happen since there 229 * are two power sequencers and up to two eDP ports. 230 */ 231 if (drm_WARN_ON(display->drm, pipe == INVALID_PIPE)) 232 pipe = PIPE_A; 233 234 vlv_steal_power_sequencer(display, pipe); 235 intel_dp->pps.vlv_pps_pipe = pipe; 236 237 drm_dbg_kms(display->drm, 238 "picked %s for [ENCODER:%d:%s]\n", 239 pps_name(intel_dp), 240 dig_port->base.base.base.id, dig_port->base.base.name); 241 242 /* init power sequencer on this pipe and port */ 243 pps_init_delays(intel_dp); 244 pps_init_registers(intel_dp, true); 245 246 /* 247 * Even vdd force doesn't work until we've made 248 * the power sequencer lock in on the port. 249 */ 250 vlv_power_sequencer_kick(intel_dp); 251 252 return intel_dp->pps.vlv_pps_pipe; 253 } 254 255 static int 256 bxt_power_sequencer_idx(struct intel_dp *intel_dp) 257 { 258 struct intel_display *display = to_intel_display(intel_dp); 259 int pps_idx = intel_dp->pps.pps_idx; 260 261 lockdep_assert_held(&display->pps.mutex); 262 263 /* We should never land here with regular DP ports */ 264 drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp)); 265 266 if (!intel_dp->pps.bxt_pps_reset) 267 return pps_idx; 268 269 intel_dp->pps.bxt_pps_reset = false; 270 271 /* 272 * Only the HW needs to be reprogrammed, the SW state is fixed and 273 * has been setup during connector init. 274 */ 275 pps_init_registers(intel_dp, false); 276 277 return pps_idx; 278 } 279 280 typedef bool (*pps_check)(struct intel_display *display, int pps_idx); 281 282 static bool pps_has_pp_on(struct intel_display *display, int pps_idx) 283 { 284 return intel_de_read(display, PP_STATUS(display, pps_idx)) & PP_ON; 285 } 286 287 static bool pps_has_vdd_on(struct intel_display *display, int pps_idx) 288 { 289 return intel_de_read(display, PP_CONTROL(display, pps_idx)) & EDP_FORCE_VDD; 290 } 291 292 static bool pps_any(struct intel_display *display, int pps_idx) 293 { 294 return true; 295 } 296 297 static enum pipe 298 vlv_initial_pps_pipe(struct intel_display *display, 299 enum port port, pps_check check) 300 { 301 enum pipe pipe; 302 303 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 304 u32 port_sel = intel_de_read(display, 305 PP_ON_DELAYS(display, pipe)) & 306 PANEL_PORT_SELECT_MASK; 307 308 if (port_sel != PANEL_PORT_SELECT_VLV(port)) 309 continue; 310 311 if (!check(display, pipe)) 312 continue; 313 314 return pipe; 315 } 316 317 return INVALID_PIPE; 318 } 319 320 static void 321 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) 322 { 323 struct intel_display *display = to_intel_display(intel_dp); 324 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 325 enum port port = dig_port->base.port; 326 327 lockdep_assert_held(&display->pps.mutex); 328 329 /* try to find a pipe with this port selected */ 330 /* first pick one where the panel is on */ 331 intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port, 332 pps_has_pp_on); 333 /* didn't find one? pick one where vdd is on */ 334 if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) 335 intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port, 336 pps_has_vdd_on); 337 /* didn't find one? pick one with just the correct port */ 338 if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) 339 intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port, 340 pps_any); 341 342 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ 343 if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) { 344 drm_dbg_kms(display->drm, 345 "[ENCODER:%d:%s] no initial power sequencer\n", 346 dig_port->base.base.base.id, dig_port->base.base.name); 347 return; 348 } 349 350 drm_dbg_kms(display->drm, 351 "[ENCODER:%d:%s] initial power sequencer: %s\n", 352 dig_port->base.base.base.id, dig_port->base.base.name, 353 pps_name(intel_dp)); 354 } 355 356 static int intel_num_pps(struct intel_display *display) 357 { 358 if (display->platform.valleyview || display->platform.cherryview) 359 return 2; 360 361 if (display->platform.geminilake || display->platform.broxton) 362 return 2; 363 364 if (INTEL_PCH_TYPE(display) >= PCH_MTL) 365 return 2; 366 367 if (INTEL_PCH_TYPE(display) >= PCH_DG1) 368 return 1; 369 370 if (INTEL_PCH_TYPE(display) >= PCH_ICP) 371 return 2; 372 373 return 1; 374 } 375 376 static bool intel_pps_is_valid(struct intel_dp *intel_dp) 377 { 378 struct intel_display *display = to_intel_display(intel_dp); 379 380 if (intel_dp->pps.pps_idx == 1 && 381 INTEL_PCH_TYPE(display) >= PCH_ICP && 382 INTEL_PCH_TYPE(display) <= PCH_ADP) 383 return intel_de_read(display, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT; 384 385 return true; 386 } 387 388 static int 389 bxt_initial_pps_idx(struct intel_display *display, pps_check check) 390 { 391 int pps_idx, pps_num = intel_num_pps(display); 392 393 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { 394 if (check(display, pps_idx)) 395 return pps_idx; 396 } 397 398 return -1; 399 } 400 401 static bool 402 pps_initial_setup(struct intel_dp *intel_dp) 403 { 404 struct intel_display *display = to_intel_display(intel_dp); 405 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 406 struct intel_connector *connector = intel_dp->attached_connector; 407 408 lockdep_assert_held(&display->pps.mutex); 409 410 if (display->platform.valleyview || display->platform.cherryview) { 411 vlv_initial_power_sequencer_setup(intel_dp); 412 return true; 413 } 414 415 /* first ask the VBT */ 416 if (intel_num_pps(display) > 1) 417 intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller; 418 else 419 intel_dp->pps.pps_idx = 0; 420 421 if (drm_WARN_ON(display->drm, intel_dp->pps.pps_idx >= intel_num_pps(display))) 422 intel_dp->pps.pps_idx = -1; 423 424 /* VBT wasn't parsed yet? pick one where the panel is on */ 425 if (intel_dp->pps.pps_idx < 0) 426 intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_has_pp_on); 427 /* didn't find one? pick one where vdd is on */ 428 if (intel_dp->pps.pps_idx < 0) 429 intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_has_vdd_on); 430 /* didn't find one? pick any */ 431 if (intel_dp->pps.pps_idx < 0) { 432 intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_any); 433 434 drm_dbg_kms(display->drm, 435 "[ENCODER:%d:%s] no initial power sequencer, assuming %s\n", 436 encoder->base.base.id, encoder->base.name, 437 pps_name(intel_dp)); 438 } else { 439 drm_dbg_kms(display->drm, 440 "[ENCODER:%d:%s] initial power sequencer: %s\n", 441 encoder->base.base.id, encoder->base.name, 442 pps_name(intel_dp)); 443 } 444 445 return intel_pps_is_valid(intel_dp); 446 } 447 448 void vlv_pps_reset_all(struct intel_display *display) 449 { 450 struct intel_encoder *encoder; 451 452 if (!HAS_DISPLAY(display)) 453 return; 454 455 /* 456 * We can't grab pps_mutex here due to deadlock with power_domain 457 * mutex when power_domain functions are called while holding pps_mutex. 458 * That also means that in order to use vlv_pps_pipe the code needs to 459 * hold both a power domain reference and pps_mutex, and the power domain 460 * reference get/put must be done while _not_ holding pps_mutex. 461 * pps_{lock,unlock}() do these steps in the correct order, so one 462 * should use them always. 463 */ 464 465 for_each_intel_dp(display->drm, encoder) { 466 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 467 468 drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE); 469 470 if (encoder->type == INTEL_OUTPUT_EDP) 471 intel_dp->pps.vlv_pps_pipe = INVALID_PIPE; 472 } 473 } 474 475 void bxt_pps_reset_all(struct intel_display *display) 476 { 477 struct intel_encoder *encoder; 478 479 if (!HAS_DISPLAY(display)) 480 return; 481 482 /* See vlv_pps_reset_all() for why we can't grab pps_mutex here. */ 483 484 for_each_intel_dp(display->drm, encoder) { 485 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 486 487 if (encoder->type == INTEL_OUTPUT_EDP) 488 intel_dp->pps.bxt_pps_reset = true; 489 } 490 } 491 492 struct pps_registers { 493 i915_reg_t pp_ctrl; 494 i915_reg_t pp_stat; 495 i915_reg_t pp_on; 496 i915_reg_t pp_off; 497 i915_reg_t pp_div; 498 }; 499 500 static void intel_pps_get_registers(struct intel_dp *intel_dp, 501 struct pps_registers *regs) 502 { 503 struct intel_display *display = to_intel_display(intel_dp); 504 int pps_idx; 505 506 memset(regs, 0, sizeof(*regs)); 507 508 if (display->platform.valleyview || display->platform.cherryview) 509 pps_idx = vlv_power_sequencer_pipe(intel_dp); 510 else if (display->platform.geminilake || display->platform.broxton) 511 pps_idx = bxt_power_sequencer_idx(intel_dp); 512 else 513 pps_idx = intel_dp->pps.pps_idx; 514 515 regs->pp_ctrl = PP_CONTROL(display, pps_idx); 516 regs->pp_stat = PP_STATUS(display, pps_idx); 517 regs->pp_on = PP_ON_DELAYS(display, pps_idx); 518 regs->pp_off = PP_OFF_DELAYS(display, pps_idx); 519 520 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ 521 if (display->platform.geminilake || display->platform.broxton || 522 INTEL_PCH_TYPE(display) >= PCH_CNP) 523 regs->pp_div = INVALID_MMIO_REG; 524 else 525 regs->pp_div = PP_DIVISOR(display, pps_idx); 526 } 527 528 static i915_reg_t 529 _pp_ctrl_reg(struct intel_dp *intel_dp) 530 { 531 struct pps_registers regs; 532 533 intel_pps_get_registers(intel_dp, ®s); 534 535 return regs.pp_ctrl; 536 } 537 538 static i915_reg_t 539 _pp_stat_reg(struct intel_dp *intel_dp) 540 { 541 struct pps_registers regs; 542 543 intel_pps_get_registers(intel_dp, ®s); 544 545 return regs.pp_stat; 546 } 547 548 static bool edp_have_panel_power(struct intel_dp *intel_dp) 549 { 550 struct intel_display *display = to_intel_display(intel_dp); 551 552 lockdep_assert_held(&display->pps.mutex); 553 554 if ((display->platform.valleyview || display->platform.cherryview) && 555 intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) 556 return false; 557 558 return (intel_de_read(display, _pp_stat_reg(intel_dp)) & PP_ON) != 0; 559 } 560 561 static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 562 { 563 struct intel_display *display = to_intel_display(intel_dp); 564 565 lockdep_assert_held(&display->pps.mutex); 566 567 if ((display->platform.valleyview || display->platform.cherryview) && 568 intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) 569 return false; 570 571 return intel_de_read(display, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; 572 } 573 574 void intel_pps_check_power_unlocked(struct intel_dp *intel_dp) 575 { 576 struct intel_display *display = to_intel_display(intel_dp); 577 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 578 579 if (!intel_dp_is_edp(intel_dp)) 580 return; 581 582 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { 583 drm_WARN(display->drm, 1, 584 "[ENCODER:%d:%s] %s powered off while attempting AUX CH communication.\n", 585 dig_port->base.base.base.id, dig_port->base.base.name, 586 pps_name(intel_dp)); 587 drm_dbg_kms(display->drm, 588 "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 589 dig_port->base.base.base.id, dig_port->base.base.name, 590 pps_name(intel_dp), 591 intel_de_read(display, _pp_stat_reg(intel_dp)), 592 intel_de_read(display, _pp_ctrl_reg(intel_dp))); 593 } 594 } 595 596 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 597 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 598 599 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) 600 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) 601 602 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 603 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 604 605 static void intel_pps_verify_state(struct intel_dp *intel_dp); 606 607 static void wait_panel_status(struct intel_dp *intel_dp, 608 u32 mask, u32 value) 609 { 610 struct intel_display *display = to_intel_display(intel_dp); 611 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 612 i915_reg_t pp_stat_reg, pp_ctrl_reg; 613 int ret; 614 u32 val; 615 616 lockdep_assert_held(&display->pps.mutex); 617 618 intel_pps_verify_state(intel_dp); 619 620 pp_stat_reg = _pp_stat_reg(intel_dp); 621 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 622 623 drm_dbg_kms(display->drm, 624 "[ENCODER:%d:%s] %s mask: 0x%08x value: 0x%08x PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 625 dig_port->base.base.base.id, dig_port->base.base.name, 626 pps_name(intel_dp), 627 mask, value, 628 intel_de_read(display, pp_stat_reg), 629 intel_de_read(display, pp_ctrl_reg)); 630 631 ret = poll_timeout_us(val = intel_de_read(display, pp_stat_reg), 632 (val & mask) == value, 633 10 * 1000, 5000 * 1000, true); 634 if (ret) { 635 drm_err(display->drm, 636 "[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 637 dig_port->base.base.base.id, dig_port->base.base.name, 638 pps_name(intel_dp), 639 intel_de_read(display, pp_stat_reg), 640 intel_de_read(display, pp_ctrl_reg)); 641 return; 642 } 643 644 drm_dbg_kms(display->drm, "Wait complete\n"); 645 } 646 647 static void wait_panel_on(struct intel_dp *intel_dp) 648 { 649 struct intel_display *display = to_intel_display(intel_dp); 650 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 651 652 drm_dbg_kms(display->drm, 653 "[ENCODER:%d:%s] %s wait for panel power on\n", 654 dig_port->base.base.base.id, dig_port->base.base.name, 655 pps_name(intel_dp)); 656 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 657 } 658 659 static void wait_panel_off(struct intel_dp *intel_dp) 660 { 661 struct intel_display *display = to_intel_display(intel_dp); 662 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 663 664 drm_dbg_kms(display->drm, 665 "[ENCODER:%d:%s] %s wait for panel power off time\n", 666 dig_port->base.base.base.id, dig_port->base.base.name, 667 pps_name(intel_dp)); 668 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 669 } 670 671 static void wait_panel_power_cycle(struct intel_dp *intel_dp) 672 { 673 struct intel_display *display = to_intel_display(intel_dp); 674 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 675 ktime_t panel_power_on_time; 676 s64 panel_power_off_duration, remaining; 677 678 /* take the difference of current time and panel power off time 679 * and then make panel wait for power_cycle if needed. */ 680 panel_power_on_time = ktime_get_boottime(); 681 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time); 682 683 remaining = max(0, intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration); 684 685 drm_dbg_kms(display->drm, 686 "[ENCODER:%d:%s] %s wait for panel power cycle (%lld ms remaining)\n", 687 dig_port->base.base.base.id, dig_port->base.base.name, 688 pps_name(intel_dp), remaining); 689 690 /* When we disable the VDD override bit last we have to do the manual 691 * wait. */ 692 if (remaining) 693 wait_remaining_ms_from_jiffies(jiffies, remaining); 694 695 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 696 } 697 698 void intel_pps_wait_power_cycle(struct intel_dp *intel_dp) 699 { 700 intel_wakeref_t wakeref; 701 702 if (!intel_dp_is_edp(intel_dp)) 703 return; 704 705 with_intel_pps_lock(intel_dp, wakeref) 706 wait_panel_power_cycle(intel_dp); 707 } 708 709 static void wait_backlight_on(struct intel_dp *intel_dp) 710 { 711 wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on, 712 intel_dp->pps.backlight_on_delay); 713 } 714 715 static void edp_wait_backlight_off(struct intel_dp *intel_dp) 716 { 717 wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off, 718 intel_dp->pps.backlight_off_delay); 719 } 720 721 /* Read the current pp_control value, unlocking the register if it 722 * is locked 723 */ 724 725 static u32 ilk_get_pp_control(struct intel_dp *intel_dp) 726 { 727 struct intel_display *display = to_intel_display(intel_dp); 728 u32 control; 729 730 lockdep_assert_held(&display->pps.mutex); 731 732 control = intel_de_read(display, _pp_ctrl_reg(intel_dp)); 733 if (drm_WARN_ON(display->drm, !HAS_DDI(display) && 734 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { 735 control &= ~PANEL_UNLOCK_MASK; 736 control |= PANEL_UNLOCK_REGS; 737 } 738 return control; 739 } 740 741 /* 742 * Must be paired with intel_pps_vdd_off_unlocked(). 743 * Must hold pps_mutex around the whole on/off sequence. 744 * Can be nested with intel_pps_vdd_{on,off}() calls. 745 */ 746 bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp) 747 { 748 struct intel_display *display = to_intel_display(intel_dp); 749 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 750 u32 pp; 751 i915_reg_t pp_stat_reg, pp_ctrl_reg; 752 bool need_to_disable = !intel_dp->pps.want_panel_vdd; 753 754 if (!intel_dp_is_edp(intel_dp)) 755 return false; 756 757 lockdep_assert_held(&display->pps.mutex); 758 759 cancel_delayed_work(&intel_dp->pps.panel_vdd_work); 760 intel_dp->pps.want_panel_vdd = true; 761 762 if (edp_have_panel_vdd(intel_dp)) 763 return need_to_disable; 764 765 drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref); 766 intel_dp->pps.vdd_wakeref = intel_display_power_get(display, 767 intel_aux_power_domain(dig_port)); 768 769 pp_stat_reg = _pp_stat_reg(intel_dp); 770 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 771 772 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD on\n", 773 dig_port->base.base.base.id, dig_port->base.base.name, 774 pps_name(intel_dp)); 775 776 if (!edp_have_panel_power(intel_dp)) 777 wait_panel_power_cycle(intel_dp); 778 779 pp = ilk_get_pp_control(intel_dp); 780 pp |= EDP_FORCE_VDD; 781 782 intel_de_write(display, pp_ctrl_reg, pp); 783 intel_de_posting_read(display, pp_ctrl_reg); 784 drm_dbg_kms(display->drm, 785 "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 786 dig_port->base.base.base.id, dig_port->base.base.name, 787 pps_name(intel_dp), 788 intel_de_read(display, pp_stat_reg), 789 intel_de_read(display, pp_ctrl_reg)); 790 /* 791 * If the panel wasn't on, delay before accessing aux channel 792 */ 793 if (!edp_have_panel_power(intel_dp)) { 794 drm_dbg_kms(display->drm, 795 "[ENCODER:%d:%s] %s panel power wasn't enabled\n", 796 dig_port->base.base.base.id, dig_port->base.base.name, 797 pps_name(intel_dp)); 798 msleep(intel_dp->pps.panel_power_up_delay); 799 } 800 801 return need_to_disable; 802 } 803 804 /* 805 * Must be paired with intel_pps_vdd_off() or - to disable 806 * both VDD and panel power - intel_pps_off(). 807 * Nested calls to these functions are not allowed since 808 * we drop the lock. Caller must use some higher level 809 * locking to prevent nested calls from other threads. 810 */ 811 void intel_pps_vdd_on(struct intel_dp *intel_dp) 812 { 813 struct intel_display *display = to_intel_display(intel_dp); 814 intel_wakeref_t wakeref; 815 bool vdd; 816 817 if (!intel_dp_is_edp(intel_dp)) 818 return; 819 820 vdd = false; 821 with_intel_pps_lock(intel_dp, wakeref) 822 vdd = intel_pps_vdd_on_unlocked(intel_dp); 823 INTEL_DISPLAY_STATE_WARN(display, !vdd, "[ENCODER:%d:%s] %s VDD already requested on\n", 824 dp_to_dig_port(intel_dp)->base.base.base.id, 825 dp_to_dig_port(intel_dp)->base.base.name, 826 pps_name(intel_dp)); 827 } 828 829 static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp) 830 { 831 struct intel_display *display = to_intel_display(intel_dp); 832 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 833 u32 pp; 834 i915_reg_t pp_stat_reg, pp_ctrl_reg; 835 836 lockdep_assert_held(&display->pps.mutex); 837 838 drm_WARN_ON(display->drm, intel_dp->pps.want_panel_vdd); 839 840 if (!edp_have_panel_vdd(intel_dp)) 841 return; 842 843 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD off\n", 844 dig_port->base.base.base.id, dig_port->base.base.name, 845 pps_name(intel_dp)); 846 847 pp = ilk_get_pp_control(intel_dp); 848 pp &= ~EDP_FORCE_VDD; 849 850 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 851 pp_stat_reg = _pp_stat_reg(intel_dp); 852 853 intel_de_write(display, pp_ctrl_reg, pp); 854 intel_de_posting_read(display, pp_ctrl_reg); 855 856 /* Make sure sequencer is idle before allowing subsequent activity */ 857 drm_dbg_kms(display->drm, 858 "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 859 dig_port->base.base.base.id, dig_port->base.base.name, 860 pps_name(intel_dp), 861 intel_de_read(display, pp_stat_reg), 862 intel_de_read(display, pp_ctrl_reg)); 863 864 if ((pp & PANEL_POWER_ON) == 0) { 865 intel_dp->pps.panel_power_off_time = ktime_get_boottime(); 866 intel_dp_invalidate_source_oui(intel_dp); 867 } 868 869 intel_display_power_put(display, 870 intel_aux_power_domain(dig_port), 871 fetch_and_zero(&intel_dp->pps.vdd_wakeref)); 872 } 873 874 void intel_pps_vdd_off_sync(struct intel_dp *intel_dp) 875 { 876 intel_wakeref_t wakeref; 877 878 if (!intel_dp_is_edp(intel_dp)) 879 return; 880 881 cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work); 882 /* 883 * vdd might still be enabled due to the delayed vdd off. 884 * Make sure vdd is actually turned off here. 885 */ 886 with_intel_pps_lock(intel_dp, wakeref) 887 intel_pps_vdd_off_sync_unlocked(intel_dp); 888 } 889 890 static void edp_panel_vdd_work(struct work_struct *__work) 891 { 892 struct intel_pps *pps = container_of(to_delayed_work(__work), 893 struct intel_pps, panel_vdd_work); 894 struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps); 895 intel_wakeref_t wakeref; 896 897 with_intel_pps_lock(intel_dp, wakeref) { 898 if (!intel_dp->pps.want_panel_vdd) 899 intel_pps_vdd_off_sync_unlocked(intel_dp); 900 } 901 } 902 903 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) 904 { 905 struct intel_display *display = to_intel_display(intel_dp); 906 unsigned long delay; 907 908 /* 909 * We may not yet know the real power sequencing delays, 910 * so keep VDD enabled until we're done with init. 911 */ 912 if (intel_dp->pps.initializing) 913 return; 914 915 /* 916 * Queue the timer to fire a long time from now (relative to the power 917 * down delay) to keep the panel power up across a sequence of 918 * operations. 919 */ 920 delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5); 921 queue_delayed_work(display->wq.unordered, 922 &intel_dp->pps.panel_vdd_work, delay); 923 } 924 925 /* 926 * Must be paired with edp_panel_vdd_on(). 927 * Must hold pps_mutex around the whole on/off sequence. 928 * Can be nested with intel_pps_vdd_{on,off}() calls. 929 */ 930 void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync) 931 { 932 struct intel_display *display = to_intel_display(intel_dp); 933 934 if (!intel_dp_is_edp(intel_dp)) 935 return; 936 937 lockdep_assert_held(&display->pps.mutex); 938 939 INTEL_DISPLAY_STATE_WARN(display, !intel_dp->pps.want_panel_vdd, 940 "[ENCODER:%d:%s] %s VDD not forced on", 941 dp_to_dig_port(intel_dp)->base.base.base.id, 942 dp_to_dig_port(intel_dp)->base.base.name, 943 pps_name(intel_dp)); 944 945 intel_dp->pps.want_panel_vdd = false; 946 947 if (sync) 948 intel_pps_vdd_off_sync_unlocked(intel_dp); 949 else 950 edp_panel_vdd_schedule_off(intel_dp); 951 } 952 953 void intel_pps_vdd_off(struct intel_dp *intel_dp) 954 { 955 intel_wakeref_t wakeref; 956 957 if (!intel_dp_is_edp(intel_dp)) 958 return; 959 960 with_intel_pps_lock(intel_dp, wakeref) 961 intel_pps_vdd_off_unlocked(intel_dp, false); 962 } 963 964 void intel_pps_on_unlocked(struct intel_dp *intel_dp) 965 { 966 struct intel_display *display = to_intel_display(intel_dp); 967 u32 pp; 968 i915_reg_t pp_ctrl_reg; 969 970 lockdep_assert_held(&display->pps.mutex); 971 972 if (!intel_dp_is_edp(intel_dp)) 973 return; 974 975 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power on\n", 976 dp_to_dig_port(intel_dp)->base.base.base.id, 977 dp_to_dig_port(intel_dp)->base.base.name, 978 pps_name(intel_dp)); 979 980 if (drm_WARN(display->drm, edp_have_panel_power(intel_dp), 981 "[ENCODER:%d:%s] %s panel power already on\n", 982 dp_to_dig_port(intel_dp)->base.base.base.id, 983 dp_to_dig_port(intel_dp)->base.base.name, 984 pps_name(intel_dp))) 985 return; 986 987 wait_panel_power_cycle(intel_dp); 988 989 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 990 pp = ilk_get_pp_control(intel_dp); 991 if (display->platform.ironlake) { 992 /* ILK workaround: disable reset around power sequence */ 993 pp &= ~PANEL_POWER_RESET; 994 intel_de_write(display, pp_ctrl_reg, pp); 995 intel_de_posting_read(display, pp_ctrl_reg); 996 } 997 998 /* 999 * WA: 22019252566 1000 * Disable DPLS gating around power sequence. 1001 */ 1002 if (IS_DISPLAY_VER(display, 13, 14)) 1003 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 1004 0, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 1005 1006 pp |= PANEL_POWER_ON; 1007 if (!display->platform.ironlake) 1008 pp |= PANEL_POWER_RESET; 1009 1010 intel_de_write(display, pp_ctrl_reg, pp); 1011 intel_de_posting_read(display, pp_ctrl_reg); 1012 1013 wait_panel_on(intel_dp); 1014 intel_dp->pps.last_power_on = jiffies; 1015 1016 if (IS_DISPLAY_VER(display, 13, 14)) 1017 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 1018 PCH_DPLSUNIT_CLOCK_GATE_DISABLE, 0); 1019 1020 if (display->platform.ironlake) { 1021 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1022 intel_de_write(display, pp_ctrl_reg, pp); 1023 intel_de_posting_read(display, pp_ctrl_reg); 1024 } 1025 } 1026 1027 void intel_pps_on(struct intel_dp *intel_dp) 1028 { 1029 intel_wakeref_t wakeref; 1030 1031 if (!intel_dp_is_edp(intel_dp)) 1032 return; 1033 1034 with_intel_pps_lock(intel_dp, wakeref) 1035 intel_pps_on_unlocked(intel_dp); 1036 } 1037 1038 void intel_pps_off_unlocked(struct intel_dp *intel_dp) 1039 { 1040 struct intel_display *display = to_intel_display(intel_dp); 1041 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1042 u32 pp; 1043 i915_reg_t pp_ctrl_reg; 1044 1045 lockdep_assert_held(&display->pps.mutex); 1046 1047 if (!intel_dp_is_edp(intel_dp)) 1048 return; 1049 1050 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power off\n", 1051 dig_port->base.base.base.id, dig_port->base.base.name, 1052 pps_name(intel_dp)); 1053 1054 drm_WARN(display->drm, !intel_dp->pps.want_panel_vdd, 1055 "[ENCODER:%d:%s] %s need VDD to turn off panel\n", 1056 dig_port->base.base.base.id, dig_port->base.base.name, 1057 pps_name(intel_dp)); 1058 1059 pp = ilk_get_pp_control(intel_dp); 1060 /* We need to switch off panel power _and_ force vdd, for otherwise some 1061 * panels get very unhappy and cease to work. */ 1062 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | 1063 EDP_BLC_ENABLE); 1064 1065 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1066 1067 intel_dp->pps.want_panel_vdd = false; 1068 1069 intel_de_write(display, pp_ctrl_reg, pp); 1070 intel_de_posting_read(display, pp_ctrl_reg); 1071 1072 wait_panel_off(intel_dp); 1073 intel_dp->pps.panel_power_off_time = ktime_get_boottime(); 1074 1075 intel_dp_invalidate_source_oui(intel_dp); 1076 1077 /* We got a reference when we enabled the VDD. */ 1078 intel_display_power_put(display, 1079 intel_aux_power_domain(dig_port), 1080 fetch_and_zero(&intel_dp->pps.vdd_wakeref)); 1081 } 1082 1083 void intel_pps_off(struct intel_dp *intel_dp) 1084 { 1085 intel_wakeref_t wakeref; 1086 1087 if (!intel_dp_is_edp(intel_dp)) 1088 return; 1089 1090 with_intel_pps_lock(intel_dp, wakeref) 1091 intel_pps_off_unlocked(intel_dp); 1092 } 1093 1094 /* Enable backlight in the panel power control. */ 1095 void intel_pps_backlight_on(struct intel_dp *intel_dp) 1096 { 1097 struct intel_display *display = to_intel_display(intel_dp); 1098 intel_wakeref_t wakeref; 1099 1100 /* 1101 * If we enable the backlight right away following a panel power 1102 * on, we may see slight flicker as the panel syncs with the eDP 1103 * link. So delay a bit to make sure the image is solid before 1104 * allowing it to appear. 1105 */ 1106 wait_backlight_on(intel_dp); 1107 1108 with_intel_pps_lock(intel_dp, wakeref) { 1109 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1110 u32 pp; 1111 1112 pp = ilk_get_pp_control(intel_dp); 1113 pp |= EDP_BLC_ENABLE; 1114 1115 intel_de_write(display, pp_ctrl_reg, pp); 1116 intel_de_posting_read(display, pp_ctrl_reg); 1117 } 1118 } 1119 1120 /* Disable backlight in the panel power control. */ 1121 void intel_pps_backlight_off(struct intel_dp *intel_dp) 1122 { 1123 struct intel_display *display = to_intel_display(intel_dp); 1124 intel_wakeref_t wakeref; 1125 1126 if (!intel_dp_is_edp(intel_dp)) 1127 return; 1128 1129 with_intel_pps_lock(intel_dp, wakeref) { 1130 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1131 u32 pp; 1132 1133 pp = ilk_get_pp_control(intel_dp); 1134 pp &= ~EDP_BLC_ENABLE; 1135 1136 intel_de_write(display, pp_ctrl_reg, pp); 1137 intel_de_posting_read(display, pp_ctrl_reg); 1138 } 1139 1140 intel_dp->pps.last_backlight_off = jiffies; 1141 edp_wait_backlight_off(intel_dp); 1142 } 1143 1144 /* 1145 * Hook for controlling the panel power control backlight through the bl_power 1146 * sysfs attribute. Take care to handle multiple calls. 1147 */ 1148 void intel_pps_backlight_power(struct intel_connector *connector, bool enable) 1149 { 1150 struct intel_display *display = to_intel_display(connector); 1151 struct intel_dp *intel_dp = intel_attached_dp(connector); 1152 intel_wakeref_t wakeref; 1153 bool is_enabled; 1154 1155 is_enabled = false; 1156 with_intel_pps_lock(intel_dp, wakeref) 1157 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE; 1158 if (is_enabled == enable) 1159 return; 1160 1161 drm_dbg_kms(display->drm, "panel power control backlight %s\n", 1162 str_enable_disable(enable)); 1163 1164 if (enable) 1165 intel_pps_backlight_on(intel_dp); 1166 else 1167 intel_pps_backlight_off(intel_dp); 1168 } 1169 1170 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) 1171 { 1172 struct intel_display *display = to_intel_display(intel_dp); 1173 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1174 enum pipe pipe = intel_dp->pps.vlv_pps_pipe; 1175 i915_reg_t pp_on_reg = PP_ON_DELAYS(display, pipe); 1176 1177 drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE); 1178 1179 if (drm_WARN_ON(display->drm, pipe != PIPE_A && pipe != PIPE_B)) 1180 return; 1181 1182 intel_pps_vdd_off_sync_unlocked(intel_dp); 1183 1184 /* 1185 * VLV seems to get confused when multiple power sequencers 1186 * have the same port selected (even if only one has power/vdd 1187 * enabled). The failure manifests as vlv_wait_port_ready() failing 1188 * CHV on the other hand doesn't seem to mind having the same port 1189 * selected in multiple power sequencers, but let's clear the 1190 * port select always when logically disconnecting a power sequencer 1191 * from a port. 1192 */ 1193 drm_dbg_kms(display->drm, 1194 "detaching %s from [ENCODER:%d:%s]\n", 1195 pps_name(intel_dp), 1196 dig_port->base.base.base.id, dig_port->base.base.name); 1197 intel_de_write(display, pp_on_reg, 0); 1198 intel_de_posting_read(display, pp_on_reg); 1199 1200 intel_dp->pps.vlv_pps_pipe = INVALID_PIPE; 1201 } 1202 1203 static void vlv_steal_power_sequencer(struct intel_display *display, 1204 enum pipe pipe) 1205 { 1206 struct intel_encoder *encoder; 1207 1208 lockdep_assert_held(&display->pps.mutex); 1209 1210 for_each_intel_dp(display->drm, encoder) { 1211 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1212 1213 drm_WARN(display->drm, intel_dp->pps.vlv_active_pipe == pipe, 1214 "stealing PPS %c from active [ENCODER:%d:%s]\n", 1215 pipe_name(pipe), encoder->base.base.id, 1216 encoder->base.name); 1217 1218 if (intel_dp->pps.vlv_pps_pipe != pipe) 1219 continue; 1220 1221 drm_dbg_kms(display->drm, 1222 "stealing PPS %c from [ENCODER:%d:%s]\n", 1223 pipe_name(pipe), encoder->base.base.id, 1224 encoder->base.name); 1225 1226 /* make sure vdd is off before we steal it */ 1227 vlv_detach_power_sequencer(intel_dp); 1228 } 1229 } 1230 1231 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) 1232 { 1233 struct intel_display *display = to_intel_display(intel_dp); 1234 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1235 enum pipe pipe; 1236 1237 if (g4x_dp_port_enabled(display, intel_dp->output_reg, 1238 encoder->port, &pipe)) 1239 return pipe; 1240 1241 return INVALID_PIPE; 1242 } 1243 1244 /* Call on all DP, not just eDP */ 1245 void vlv_pps_pipe_init(struct intel_dp *intel_dp) 1246 { 1247 intel_dp->pps.vlv_pps_pipe = INVALID_PIPE; 1248 intel_dp->pps.vlv_active_pipe = vlv_active_pipe(intel_dp); 1249 } 1250 1251 /* Call on all DP, not just eDP */ 1252 void vlv_pps_pipe_reset(struct intel_dp *intel_dp) 1253 { 1254 intel_wakeref_t wakeref; 1255 1256 with_intel_pps_lock(intel_dp, wakeref) 1257 intel_dp->pps.vlv_active_pipe = vlv_active_pipe(intel_dp); 1258 } 1259 1260 enum pipe vlv_pps_backlight_initial_pipe(struct intel_dp *intel_dp) 1261 { 1262 enum pipe pipe; 1263 1264 /* 1265 * Figure out the current pipe for the initial backlight setup. If the 1266 * current pipe isn't valid, try the PPS pipe, and if that fails just 1267 * assume pipe A. 1268 */ 1269 pipe = vlv_active_pipe(intel_dp); 1270 1271 if (pipe != PIPE_A && pipe != PIPE_B) 1272 pipe = intel_dp->pps.vlv_pps_pipe; 1273 1274 if (pipe != PIPE_A && pipe != PIPE_B) 1275 pipe = PIPE_A; 1276 1277 return pipe; 1278 } 1279 1280 /* Call on all DP, not just eDP */ 1281 void vlv_pps_port_enable_unlocked(struct intel_encoder *encoder, 1282 const struct intel_crtc_state *crtc_state) 1283 { 1284 struct intel_display *display = to_intel_display(encoder); 1285 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1286 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1287 1288 lockdep_assert_held(&display->pps.mutex); 1289 1290 drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE); 1291 1292 if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE && 1293 intel_dp->pps.vlv_pps_pipe != crtc->pipe) { 1294 /* 1295 * If another power sequencer was being used on this 1296 * port previously make sure to turn off vdd there while 1297 * we still have control of it. 1298 */ 1299 vlv_detach_power_sequencer(intel_dp); 1300 } 1301 1302 /* 1303 * We may be stealing the power 1304 * sequencer from another port. 1305 */ 1306 vlv_steal_power_sequencer(display, crtc->pipe); 1307 1308 intel_dp->pps.vlv_active_pipe = crtc->pipe; 1309 1310 if (!intel_dp_is_edp(intel_dp)) 1311 return; 1312 1313 /* now it's all ours */ 1314 intel_dp->pps.vlv_pps_pipe = crtc->pipe; 1315 1316 drm_dbg_kms(display->drm, 1317 "initializing %s for [ENCODER:%d:%s]\n", 1318 pps_name(intel_dp), 1319 encoder->base.base.id, encoder->base.name); 1320 1321 /* init power sequencer on this pipe and port */ 1322 pps_init_delays(intel_dp); 1323 pps_init_registers(intel_dp, true); 1324 } 1325 1326 /* Call on all DP, not just eDP */ 1327 void vlv_pps_port_disable(struct intel_encoder *encoder, 1328 const struct intel_crtc_state *crtc_state) 1329 { 1330 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1331 1332 intel_wakeref_t wakeref; 1333 1334 with_intel_pps_lock(intel_dp, wakeref) 1335 intel_dp->pps.vlv_active_pipe = INVALID_PIPE; 1336 } 1337 1338 static void pps_vdd_init(struct intel_dp *intel_dp) 1339 { 1340 struct intel_display *display = to_intel_display(intel_dp); 1341 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1342 1343 lockdep_assert_held(&display->pps.mutex); 1344 1345 if (!edp_have_panel_vdd(intel_dp)) 1346 return; 1347 1348 /* 1349 * The VDD bit needs a power domain reference, so if the bit is 1350 * already enabled when we boot or resume, grab this reference and 1351 * schedule a vdd off, so we don't hold on to the reference 1352 * indefinitely. 1353 */ 1354 drm_dbg_kms(display->drm, 1355 "[ENCODER:%d:%s] %s VDD left on by BIOS, adjusting state tracking\n", 1356 dig_port->base.base.base.id, dig_port->base.base.name, 1357 pps_name(intel_dp)); 1358 drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref); 1359 intel_dp->pps.vdd_wakeref = intel_display_power_get(display, 1360 intel_aux_power_domain(dig_port)); 1361 } 1362 1363 bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp) 1364 { 1365 intel_wakeref_t wakeref; 1366 bool have_power = false; 1367 1368 with_intel_pps_lock(intel_dp, wakeref) { 1369 have_power = edp_have_panel_power(intel_dp) || 1370 edp_have_panel_vdd(intel_dp); 1371 } 1372 1373 return have_power; 1374 } 1375 1376 static void pps_init_timestamps(struct intel_dp *intel_dp) 1377 { 1378 /* 1379 * Initialize panel power off time to 0, assuming panel power could have 1380 * been toggled between kernel boot and now only by a previously loaded 1381 * and removed i915, which has already ensured sufficient power off 1382 * delay at module remove. 1383 */ 1384 intel_dp->pps.panel_power_off_time = 0; 1385 intel_dp->pps.last_power_on = jiffies; 1386 intel_dp->pps.last_backlight_off = jiffies; 1387 } 1388 1389 static void 1390 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct intel_pps_delays *seq) 1391 { 1392 struct intel_display *display = to_intel_display(intel_dp); 1393 u32 pp_on, pp_off, pp_ctl, power_cycle_delay; 1394 struct pps_registers regs; 1395 1396 intel_pps_get_registers(intel_dp, ®s); 1397 1398 pp_ctl = ilk_get_pp_control(intel_dp); 1399 1400 /* Ensure PPS is unlocked */ 1401 if (!HAS_DDI(display)) 1402 intel_de_write(display, regs.pp_ctrl, pp_ctl); 1403 1404 pp_on = intel_de_read(display, regs.pp_on); 1405 pp_off = intel_de_read(display, regs.pp_off); 1406 1407 /* Pull timing values out of registers */ 1408 seq->power_up = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on); 1409 seq->backlight_on = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on); 1410 seq->backlight_off = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off); 1411 seq->power_down = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off); 1412 1413 if (i915_mmio_reg_valid(regs.pp_div)) { 1414 u32 pp_div; 1415 1416 pp_div = intel_de_read(display, regs.pp_div); 1417 1418 power_cycle_delay = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div); 1419 } else { 1420 power_cycle_delay = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl); 1421 } 1422 1423 /* hardware wants <delay>+1 in 100ms units */ 1424 seq->power_cycle = power_cycle_delay ? (power_cycle_delay - 1) * 1000 : 0; 1425 } 1426 1427 static void 1428 intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name, 1429 const struct intel_pps_delays *seq) 1430 { 1431 struct intel_display *display = to_intel_display(intel_dp); 1432 1433 drm_dbg_kms(display->drm, 1434 "%s power_up %d backlight_on %d backlight_off %d power_down %d power_cycle %d\n", 1435 state_name, seq->power_up, seq->backlight_on, 1436 seq->backlight_off, seq->power_down, seq->power_cycle); 1437 } 1438 1439 static void 1440 intel_pps_verify_state(struct intel_dp *intel_dp) 1441 { 1442 struct intel_display *display = to_intel_display(intel_dp); 1443 struct intel_pps_delays hw; 1444 struct intel_pps_delays *sw = &intel_dp->pps.pps_delays; 1445 1446 intel_pps_readout_hw_state(intel_dp, &hw); 1447 1448 if (hw.power_up != sw->power_up || 1449 hw.backlight_on != sw->backlight_on || 1450 hw.backlight_off != sw->backlight_off || 1451 hw.power_down != sw->power_down || 1452 hw.power_cycle != sw->power_cycle) { 1453 drm_err(display->drm, "PPS state mismatch\n"); 1454 intel_pps_dump_state(intel_dp, "sw", sw); 1455 intel_pps_dump_state(intel_dp, "hw", &hw); 1456 } 1457 } 1458 1459 static bool pps_delays_valid(struct intel_pps_delays *delays) 1460 { 1461 return delays->power_up || delays->backlight_on || delays->backlight_off || 1462 delays->power_down || delays->power_cycle; 1463 } 1464 1465 static int msecs_to_pps_units(int msecs) 1466 { 1467 /* PPS uses 100us units */ 1468 return msecs * 10; 1469 } 1470 1471 static int pps_units_to_msecs(int val) 1472 { 1473 /* PPS uses 100us units */ 1474 return DIV_ROUND_UP(val, 10); 1475 } 1476 1477 static void pps_init_delays_bios(struct intel_dp *intel_dp, 1478 struct intel_pps_delays *bios) 1479 { 1480 struct intel_display *display = to_intel_display(intel_dp); 1481 1482 lockdep_assert_held(&display->pps.mutex); 1483 1484 if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays)) 1485 intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays); 1486 1487 *bios = intel_dp->pps.bios_pps_delays; 1488 1489 intel_pps_dump_state(intel_dp, "bios", bios); 1490 } 1491 1492 static void pps_init_delays_vbt(struct intel_dp *intel_dp, 1493 struct intel_pps_delays *vbt) 1494 { 1495 struct intel_display *display = to_intel_display(intel_dp); 1496 struct intel_connector *connector = intel_dp->attached_connector; 1497 1498 *vbt = connector->panel.vbt.edp.pps; 1499 1500 if (!pps_delays_valid(vbt)) 1501 return; 1502 1503 /* 1504 * On Toshiba Satellite P50-C-18C system the VBT T12 delay 1505 * of 500ms appears to be too short. Occasionally the panel 1506 * just fails to power back on. Increasing the delay to 800ms 1507 * seems sufficient to avoid this problem. 1508 */ 1509 if (intel_has_quirk(display, QUIRK_INCREASE_T12_DELAY)) { 1510 vbt->power_cycle = max_t(u16, vbt->power_cycle, msecs_to_pps_units(1300)); 1511 drm_dbg_kms(display->drm, 1512 "Increasing T12 panel delay as per the quirk to %d\n", 1513 vbt->power_cycle); 1514 } 1515 1516 intel_pps_dump_state(intel_dp, "vbt", vbt); 1517 } 1518 1519 static void pps_init_delays_spec(struct intel_dp *intel_dp, 1520 struct intel_pps_delays *spec) 1521 { 1522 struct intel_display *display = to_intel_display(intel_dp); 1523 1524 lockdep_assert_held(&display->pps.mutex); 1525 1526 /* Upper limits from eDP 1.3 spec */ 1527 spec->power_up = msecs_to_pps_units(10 + 200); /* T1+T3 */ 1528 spec->backlight_on = msecs_to_pps_units(50); /* no limit for T8, use T7 instead */ 1529 spec->backlight_off = msecs_to_pps_units(50); /* no limit for T9, make it symmetric with T8 */ 1530 spec->power_down = msecs_to_pps_units(500); /* T10 */ 1531 spec->power_cycle = msecs_to_pps_units(10 + 500); /* T11+T12 */ 1532 1533 intel_pps_dump_state(intel_dp, "spec", spec); 1534 } 1535 1536 static void pps_init_delays(struct intel_dp *intel_dp) 1537 { 1538 struct intel_display *display = to_intel_display(intel_dp); 1539 struct intel_pps_delays cur, vbt, spec, 1540 *final = &intel_dp->pps.pps_delays; 1541 1542 lockdep_assert_held(&display->pps.mutex); 1543 1544 /* already initialized? */ 1545 if (pps_delays_valid(final)) 1546 return; 1547 1548 pps_init_delays_bios(intel_dp, &cur); 1549 pps_init_delays_vbt(intel_dp, &vbt); 1550 pps_init_delays_spec(intel_dp, &spec); 1551 1552 /* Use the max of the register settings and vbt. If both are 1553 * unset, fall back to the spec limits. */ 1554 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \ 1555 spec.field : \ 1556 max(cur.field, vbt.field)) 1557 assign_final(power_up); 1558 assign_final(backlight_on); 1559 assign_final(backlight_off); 1560 assign_final(power_down); 1561 assign_final(power_cycle); 1562 #undef assign_final 1563 1564 intel_dp->pps.panel_power_up_delay = pps_units_to_msecs(final->power_up); 1565 intel_dp->pps.backlight_on_delay = pps_units_to_msecs(final->backlight_on); 1566 intel_dp->pps.backlight_off_delay = pps_units_to_msecs(final->backlight_off); 1567 intel_dp->pps.panel_power_down_delay = pps_units_to_msecs(final->power_down); 1568 intel_dp->pps.panel_power_cycle_delay = pps_units_to_msecs(final->power_cycle); 1569 1570 drm_dbg_kms(display->drm, 1571 "panel power up delay %d, power down delay %d, power cycle delay %d\n", 1572 intel_dp->pps.panel_power_up_delay, 1573 intel_dp->pps.panel_power_down_delay, 1574 intel_dp->pps.panel_power_cycle_delay); 1575 1576 drm_dbg_kms(display->drm, "backlight on delay %d, off delay %d\n", 1577 intel_dp->pps.backlight_on_delay, 1578 intel_dp->pps.backlight_off_delay); 1579 1580 /* 1581 * We override the HW backlight delays to 1 because we do manual waits 1582 * on them. For backlight_on, even BSpec recommends doing it. For 1583 * backlight_off, if we don't do this, we'll end up waiting for the 1584 * backlight off delay twice: once when we do the manual sleep, and 1585 * once when we disable the panel and wait for the PP_STATUS bit to 1586 * become zero. 1587 */ 1588 final->backlight_on = 1; 1589 final->backlight_off = 1; 1590 1591 /* 1592 * HW has only a 100msec granularity for power_cycle so round it up 1593 * accordingly. 1594 */ 1595 final->power_cycle = roundup(final->power_cycle, msecs_to_pps_units(100)); 1596 } 1597 1598 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd) 1599 { 1600 struct intel_display *display = to_intel_display(intel_dp); 1601 u32 pp_on, pp_off, port_sel = 0; 1602 int div = DISPLAY_RUNTIME_INFO(display)->rawclk_freq / 1000; 1603 struct pps_registers regs; 1604 enum port port = dp_to_dig_port(intel_dp)->base.port; 1605 const struct intel_pps_delays *seq = &intel_dp->pps.pps_delays; 1606 1607 lockdep_assert_held(&display->pps.mutex); 1608 1609 intel_pps_get_registers(intel_dp, ®s); 1610 1611 /* 1612 * On some VLV machines the BIOS can leave the VDD 1613 * enabled even on power sequencers which aren't 1614 * hooked up to any port. This would mess up the 1615 * power domain tracking the first time we pick 1616 * one of these power sequencers for use since 1617 * intel_pps_vdd_on_unlocked() would notice that the VDD was 1618 * already on and therefore wouldn't grab the power 1619 * domain reference. Disable VDD first to avoid this. 1620 * This also avoids spuriously turning the VDD on as 1621 * soon as the new power sequencer gets initialized. 1622 */ 1623 if (force_disable_vdd) { 1624 u32 pp = ilk_get_pp_control(intel_dp); 1625 1626 drm_WARN(display->drm, pp & PANEL_POWER_ON, 1627 "Panel power already on\n"); 1628 1629 if (pp & EDP_FORCE_VDD) 1630 drm_dbg_kms(display->drm, 1631 "VDD already on, disabling first\n"); 1632 1633 pp &= ~EDP_FORCE_VDD; 1634 1635 intel_de_write(display, regs.pp_ctrl, pp); 1636 } 1637 1638 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->power_up) | 1639 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->backlight_on); 1640 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->backlight_off) | 1641 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->power_down); 1642 1643 /* Haswell doesn't have any port selection bits for the panel 1644 * power sequencer any more. */ 1645 if (display->platform.valleyview || display->platform.cherryview) { 1646 port_sel = PANEL_PORT_SELECT_VLV(port); 1647 } else if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display)) { 1648 switch (port) { 1649 case PORT_A: 1650 port_sel = PANEL_PORT_SELECT_DPA; 1651 break; 1652 case PORT_C: 1653 port_sel = PANEL_PORT_SELECT_DPC; 1654 break; 1655 case PORT_D: 1656 port_sel = PANEL_PORT_SELECT_DPD; 1657 break; 1658 default: 1659 MISSING_CASE(port); 1660 break; 1661 } 1662 } 1663 1664 pp_on |= port_sel; 1665 1666 intel_de_write(display, regs.pp_on, pp_on); 1667 intel_de_write(display, regs.pp_off, pp_off); 1668 1669 /* 1670 * Compute the divisor for the pp clock, simply match the Bspec formula. 1671 */ 1672 if (i915_mmio_reg_valid(regs.pp_div)) 1673 intel_de_write(display, regs.pp_div, 1674 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, 1675 (100 * div) / 2 - 1) | 1676 REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, 1677 DIV_ROUND_UP(seq->power_cycle, 1000) + 1)); 1678 else 1679 intel_de_rmw(display, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK, 1680 REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, 1681 DIV_ROUND_UP(seq->power_cycle, 1000) + 1)); 1682 1683 drm_dbg_kms(display->drm, 1684 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 1685 intel_de_read(display, regs.pp_on), 1686 intel_de_read(display, regs.pp_off), 1687 i915_mmio_reg_valid(regs.pp_div) ? 1688 intel_de_read(display, regs.pp_div) : 1689 (intel_de_read(display, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); 1690 } 1691 1692 void intel_pps_encoder_reset(struct intel_dp *intel_dp) 1693 { 1694 struct intel_display *display = to_intel_display(intel_dp); 1695 intel_wakeref_t wakeref; 1696 1697 if (!intel_dp_is_edp(intel_dp)) 1698 return; 1699 1700 with_intel_pps_lock(intel_dp, wakeref) { 1701 /* 1702 * Reinit the power sequencer also on the resume path, in case 1703 * BIOS did something nasty with it. 1704 */ 1705 if (display->platform.valleyview || display->platform.cherryview) 1706 vlv_initial_power_sequencer_setup(intel_dp); 1707 1708 pps_init_delays(intel_dp); 1709 pps_init_registers(intel_dp, false); 1710 pps_vdd_init(intel_dp); 1711 1712 if (edp_have_panel_vdd(intel_dp)) 1713 edp_panel_vdd_schedule_off(intel_dp); 1714 } 1715 } 1716 1717 bool intel_pps_init(struct intel_dp *intel_dp) 1718 { 1719 intel_wakeref_t wakeref; 1720 bool ret; 1721 1722 intel_dp->pps.initializing = true; 1723 INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work); 1724 1725 pps_init_timestamps(intel_dp); 1726 1727 with_intel_pps_lock(intel_dp, wakeref) { 1728 ret = pps_initial_setup(intel_dp); 1729 1730 pps_init_delays(intel_dp); 1731 pps_init_registers(intel_dp, false); 1732 pps_vdd_init(intel_dp); 1733 } 1734 1735 return ret; 1736 } 1737 1738 static void pps_init_late(struct intel_dp *intel_dp) 1739 { 1740 struct intel_display *display = to_intel_display(intel_dp); 1741 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1742 struct intel_connector *connector = intel_dp->attached_connector; 1743 1744 if (display->platform.valleyview || display->platform.cherryview) 1745 return; 1746 1747 if (intel_num_pps(display) < 2) 1748 return; 1749 1750 drm_WARN(display->drm, 1751 connector->panel.vbt.backlight.controller >= 0 && 1752 intel_dp->pps.pps_idx != connector->panel.vbt.backlight.controller, 1753 "[ENCODER:%d:%s] power sequencer mismatch: %d (initial) vs. %d (VBT)\n", 1754 encoder->base.base.id, encoder->base.name, 1755 intel_dp->pps.pps_idx, connector->panel.vbt.backlight.controller); 1756 1757 if (connector->panel.vbt.backlight.controller >= 0) 1758 intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller; 1759 } 1760 1761 void intel_pps_init_late(struct intel_dp *intel_dp) 1762 { 1763 intel_wakeref_t wakeref; 1764 1765 with_intel_pps_lock(intel_dp, wakeref) { 1766 /* Reinit delays after per-panel info has been parsed from VBT */ 1767 pps_init_late(intel_dp); 1768 1769 memset(&intel_dp->pps.pps_delays, 0, sizeof(intel_dp->pps.pps_delays)); 1770 pps_init_delays(intel_dp); 1771 pps_init_registers(intel_dp, false); 1772 1773 intel_dp->pps.initializing = false; 1774 1775 if (edp_have_panel_vdd(intel_dp)) 1776 edp_panel_vdd_schedule_off(intel_dp); 1777 } 1778 } 1779 1780 void intel_pps_unlock_regs_wa(struct intel_display *display) 1781 { 1782 int pps_num; 1783 int pps_idx; 1784 1785 if (!HAS_DISPLAY(display) || HAS_DDI(display)) 1786 return; 1787 /* 1788 * This w/a is needed at least on CPT/PPT, but to be sure apply it 1789 * everywhere where registers can be write protected. 1790 */ 1791 pps_num = intel_num_pps(display); 1792 1793 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) 1794 intel_de_rmw(display, PP_CONTROL(display, pps_idx), 1795 PANEL_UNLOCK_MASK, PANEL_UNLOCK_REGS); 1796 } 1797 1798 void intel_pps_setup(struct intel_display *display) 1799 { 1800 if (HAS_PCH_SPLIT(display) || display->platform.geminilake || display->platform.broxton) 1801 display->pps.mmio_base = PCH_PPS_BASE; 1802 else if (display->platform.valleyview || display->platform.cherryview) 1803 display->pps.mmio_base = VLV_PPS_BASE; 1804 else 1805 display->pps.mmio_base = PPS_BASE; 1806 } 1807 1808 static int intel_pps_show(struct seq_file *m, void *data) 1809 { 1810 struct intel_connector *connector = m->private; 1811 struct intel_dp *intel_dp = intel_attached_dp(connector); 1812 1813 if (connector->base.status != connector_status_connected) 1814 return -ENODEV; 1815 1816 seq_printf(m, "Panel power up delay: %d\n", 1817 intel_dp->pps.panel_power_up_delay); 1818 seq_printf(m, "Panel power down delay: %d\n", 1819 intel_dp->pps.panel_power_down_delay); 1820 seq_printf(m, "Panel power cycle delay: %d\n", 1821 intel_dp->pps.panel_power_cycle_delay); 1822 seq_printf(m, "Backlight on delay: %d\n", 1823 intel_dp->pps.backlight_on_delay); 1824 seq_printf(m, "Backlight off delay: %d\n", 1825 intel_dp->pps.backlight_off_delay); 1826 1827 return 0; 1828 } 1829 DEFINE_SHOW_ATTRIBUTE(intel_pps); 1830 1831 void intel_pps_connector_debugfs_add(struct intel_connector *connector) 1832 { 1833 struct dentry *root = connector->base.debugfs_entry; 1834 int connector_type = connector->base.connector_type; 1835 1836 if (connector_type == DRM_MODE_CONNECTOR_eDP) 1837 debugfs_create_file("i915_panel_timings", 0444, root, 1838 connector, &intel_pps_fops); 1839 } 1840 1841 void assert_pps_unlocked(struct intel_display *display, enum pipe pipe) 1842 { 1843 i915_reg_t pp_reg; 1844 u32 val; 1845 enum pipe panel_pipe = INVALID_PIPE; 1846 bool locked = true; 1847 1848 if (drm_WARN_ON(display->drm, HAS_DDI(display))) 1849 return; 1850 1851 if (HAS_PCH_SPLIT(display)) { 1852 u32 port_sel; 1853 1854 pp_reg = PP_CONTROL(display, 0); 1855 port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) & 1856 PANEL_PORT_SELECT_MASK; 1857 1858 switch (port_sel) { 1859 case PANEL_PORT_SELECT_LVDS: 1860 intel_lvds_port_enabled(display, PCH_LVDS, &panel_pipe); 1861 break; 1862 case PANEL_PORT_SELECT_DPA: 1863 g4x_dp_port_enabled(display, DP_A, PORT_A, &panel_pipe); 1864 break; 1865 case PANEL_PORT_SELECT_DPC: 1866 g4x_dp_port_enabled(display, PCH_DP_C, PORT_C, &panel_pipe); 1867 break; 1868 case PANEL_PORT_SELECT_DPD: 1869 g4x_dp_port_enabled(display, PCH_DP_D, PORT_D, &panel_pipe); 1870 break; 1871 default: 1872 MISSING_CASE(port_sel); 1873 break; 1874 } 1875 } else if (display->platform.valleyview || display->platform.cherryview) { 1876 /* presumably write lock depends on pipe, not port select */ 1877 pp_reg = PP_CONTROL(display, pipe); 1878 panel_pipe = pipe; 1879 } else { 1880 u32 port_sel; 1881 1882 pp_reg = PP_CONTROL(display, 0); 1883 port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) & 1884 PANEL_PORT_SELECT_MASK; 1885 1886 drm_WARN_ON(display->drm, 1887 port_sel != PANEL_PORT_SELECT_LVDS); 1888 intel_lvds_port_enabled(display, LVDS, &panel_pipe); 1889 } 1890 1891 val = intel_de_read(display, pp_reg); 1892 if (!(val & PANEL_POWER_ON) || 1893 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1894 locked = false; 1895 1896 INTEL_DISPLAY_STATE_WARN(display, panel_pipe == pipe && locked, 1897 "panel assertion failure, pipe %c regs locked\n", 1898 pipe_name(pipe)); 1899 } 1900