1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "i915_irq.h" 8 #include "i915_reg.h" 9 #include "intel_backlight_regs.h" 10 #include "intel_combo_phy.h" 11 #include "intel_combo_phy_regs.h" 12 #include "intel_crt.h" 13 #include "intel_de.h" 14 #include "intel_display_irq.h" 15 #include "intel_display_power_well.h" 16 #include "intel_display_regs.h" 17 #include "intel_display_rpm.h" 18 #include "intel_display_types.h" 19 #include "intel_dkl_phy.h" 20 #include "intel_dkl_phy_regs.h" 21 #include "intel_dmc.h" 22 #include "intel_dmc_wl.h" 23 #include "intel_dp_aux_regs.h" 24 #include "intel_dpio_phy.h" 25 #include "intel_dpll.h" 26 #include "intel_hotplug.h" 27 #include "intel_pcode.h" 28 #include "intel_pps.h" 29 #include "intel_psr.h" 30 #include "intel_tc.h" 31 #include "intel_vga.h" 32 #include "skl_watermark.h" 33 #include "vlv_dpio_phy_regs.h" 34 #include "vlv_iosf_sb_reg.h" 35 #include "vlv_sideband.h" 36 37 /* 38 * PG0 is HW controlled, so doesn't have a corresponding power well control knob 39 * 40 * {ICL,SKL}_DISP_PW1_IDX..{ICL,SKL}_DISP_PW4_IDX -> PG1..PG4 41 */ 42 static enum skl_power_gate pw_idx_to_pg(struct intel_display *display, int pw_idx) 43 { 44 int pw1_idx = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_PW_1 : SKL_PW_CTL_IDX_PW_1; 45 46 return pw_idx - pw1_idx + SKL_PG1; 47 } 48 49 struct i915_power_well_regs { 50 i915_reg_t bios; 51 i915_reg_t driver; 52 i915_reg_t kvmr; 53 i915_reg_t debug; 54 }; 55 56 struct i915_power_well_ops { 57 const struct i915_power_well_regs *regs; 58 /* 59 * Synchronize the well's hw state to match the current sw state, for 60 * example enable/disable it based on the current refcount. Called 61 * during driver init and resume time, possibly after first calling 62 * the enable/disable handlers. 63 */ 64 void (*sync_hw)(struct intel_display *display, 65 struct i915_power_well *power_well); 66 /* 67 * Enable the well and resources that depend on it (for example 68 * interrupts located on the well). Called after the 0->1 refcount 69 * transition. 70 */ 71 void (*enable)(struct intel_display *display, 72 struct i915_power_well *power_well); 73 /* 74 * Disable the well and resources that depend on it. Called after 75 * the 1->0 refcount transition. 76 */ 77 void (*disable)(struct intel_display *display, 78 struct i915_power_well *power_well); 79 /* Returns the hw enabled state. */ 80 bool (*is_enabled)(struct intel_display *display, 81 struct i915_power_well *power_well); 82 }; 83 84 static const struct i915_power_well_instance * 85 i915_power_well_instance(const struct i915_power_well *power_well) 86 { 87 return &power_well->desc->instances->list[power_well->instance_idx]; 88 } 89 90 struct i915_power_well * 91 lookup_power_well(struct intel_display *display, 92 enum i915_power_well_id power_well_id) 93 { 94 struct i915_power_well *power_well; 95 96 for_each_power_well(display, power_well) 97 if (i915_power_well_instance(power_well)->id == power_well_id) 98 return power_well; 99 100 /* 101 * It's not feasible to add error checking code to the callers since 102 * this condition really shouldn't happen and it doesn't even make sense 103 * to abort things like display initialization sequences. Just return 104 * the first power well and hope the WARN gets reported so we can fix 105 * our driver. 106 */ 107 drm_WARN(display->drm, 1, 108 "Power well %d not defined for this platform\n", 109 power_well_id); 110 return &display->power.domains.power_wells[0]; 111 } 112 113 void intel_power_well_enable(struct intel_display *display, 114 struct i915_power_well *power_well) 115 { 116 drm_dbg_kms(display->drm, "enabling %s\n", intel_power_well_name(power_well)); 117 power_well->desc->ops->enable(display, power_well); 118 power_well->hw_enabled = true; 119 } 120 121 void intel_power_well_disable(struct intel_display *display, 122 struct i915_power_well *power_well) 123 { 124 drm_dbg_kms(display->drm, "disabling %s\n", intel_power_well_name(power_well)); 125 power_well->hw_enabled = false; 126 power_well->desc->ops->disable(display, power_well); 127 } 128 129 void intel_power_well_sync_hw(struct intel_display *display, 130 struct i915_power_well *power_well) 131 { 132 power_well->desc->ops->sync_hw(display, power_well); 133 power_well->hw_enabled = power_well->desc->ops->is_enabled(display, power_well); 134 } 135 136 void intel_power_well_get(struct intel_display *display, 137 struct i915_power_well *power_well) 138 { 139 if (!power_well->count++) 140 intel_power_well_enable(display, power_well); 141 } 142 143 void intel_power_well_put(struct intel_display *display, 144 struct i915_power_well *power_well) 145 { 146 drm_WARN(display->drm, !power_well->count, 147 "Use count on power well %s is already zero", 148 i915_power_well_instance(power_well)->name); 149 150 if (!--power_well->count) 151 intel_power_well_disable(display, power_well); 152 } 153 154 bool intel_power_well_is_enabled(struct intel_display *display, 155 struct i915_power_well *power_well) 156 { 157 return power_well->desc->ops->is_enabled(display, power_well); 158 } 159 160 bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well) 161 { 162 return power_well->hw_enabled; 163 } 164 165 bool intel_display_power_well_is_enabled(struct intel_display *display, 166 enum i915_power_well_id power_well_id) 167 { 168 struct i915_power_well *power_well; 169 170 power_well = lookup_power_well(display, power_well_id); 171 172 return intel_power_well_is_enabled(display, power_well); 173 } 174 175 bool intel_power_well_is_always_on(struct i915_power_well *power_well) 176 { 177 return power_well->desc->always_on; 178 } 179 180 const char *intel_power_well_name(struct i915_power_well *power_well) 181 { 182 return i915_power_well_instance(power_well)->name; 183 } 184 185 struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well) 186 { 187 return &power_well->domains; 188 } 189 190 int intel_power_well_refcount(struct i915_power_well *power_well) 191 { 192 return power_well->count; 193 } 194 195 /* 196 * Starting with Haswell, we have a "Power Down Well" that can be turned off 197 * when not needed anymore. We have 4 registers that can request the power well 198 * to be enabled, and it will only be disabled if none of the registers is 199 * requesting it to be enabled. 200 */ 201 static void hsw_power_well_post_enable(struct intel_display *display, 202 u8 irq_pipe_mask, bool has_vga) 203 { 204 if (has_vga) 205 intel_vga_reset_io_mem(display); 206 207 if (irq_pipe_mask) 208 gen8_irq_power_well_post_enable(display, irq_pipe_mask); 209 } 210 211 static void hsw_power_well_pre_disable(struct intel_display *display, 212 u8 irq_pipe_mask) 213 { 214 if (irq_pipe_mask) 215 gen8_irq_power_well_pre_disable(display, irq_pipe_mask); 216 } 217 218 #define ICL_AUX_PW_TO_PHY(pw_idx) \ 219 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + PHY_A) 220 221 #define ICL_AUX_PW_TO_CH(pw_idx) \ 222 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) 223 224 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ 225 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) 226 227 static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well) 228 { 229 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 230 231 return power_well->desc->is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : 232 ICL_AUX_PW_TO_CH(pw_idx); 233 } 234 235 static struct intel_digital_port * 236 aux_ch_to_digital_port(struct intel_display *display, 237 enum aux_ch aux_ch) 238 { 239 struct intel_encoder *encoder; 240 241 for_each_intel_encoder(display->drm, encoder) { 242 struct intel_digital_port *dig_port; 243 244 /* We'll check the MST primary port */ 245 if (encoder->type == INTEL_OUTPUT_DP_MST) 246 continue; 247 248 dig_port = enc_to_dig_port(encoder); 249 250 if (dig_port && dig_port->aux_ch == aux_ch) 251 return dig_port; 252 } 253 254 return NULL; 255 } 256 257 static enum phy icl_aux_pw_to_phy(struct intel_display *display, 258 const struct i915_power_well *power_well) 259 { 260 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 261 struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch); 262 263 /* 264 * FIXME should we care about the (VBT defined) dig_port->aux_ch 265 * relationship or should this be purely defined by the hardware layout? 266 * Currently if the port doesn't appear in the VBT, or if it's declared 267 * as HDMI-only and routed to a combo PHY, the encoder either won't be 268 * present at all or it will not have an aux_ch assigned. 269 */ 270 return dig_port ? intel_encoder_to_phy(&dig_port->base) : PHY_NONE; 271 } 272 273 static void hsw_wait_for_power_well_enable(struct intel_display *display, 274 struct i915_power_well *power_well, 275 bool timeout_expected) 276 { 277 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 278 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 279 int timeout = power_well->desc->enable_timeout ? : 1; 280 281 /* 282 * For some power wells we're not supposed to watch the status bit for 283 * an ack, but rather just wait a fixed amount of time and then 284 * proceed. This is only used on DG2. 285 */ 286 if (display->platform.dg2 && power_well->desc->fixed_enable_delay) { 287 usleep_range(600, 1200); 288 return; 289 } 290 291 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ 292 if (intel_de_wait_for_set(display, regs->driver, 293 HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) { 294 drm_dbg_kms(display->drm, "%s power well enable timeout\n", 295 intel_power_well_name(power_well)); 296 297 drm_WARN_ON(display->drm, !timeout_expected); 298 299 } 300 } 301 302 static u32 hsw_power_well_requesters(struct intel_display *display, 303 const struct i915_power_well_regs *regs, 304 int pw_idx) 305 { 306 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 307 u32 ret; 308 309 ret = intel_de_read(display, regs->bios) & req_mask ? 1 : 0; 310 ret |= intel_de_read(display, regs->driver) & req_mask ? 2 : 0; 311 if (regs->kvmr.reg) 312 ret |= intel_de_read(display, regs->kvmr) & req_mask ? 4 : 0; 313 ret |= intel_de_read(display, regs->debug) & req_mask ? 8 : 0; 314 315 return ret; 316 } 317 318 static void hsw_wait_for_power_well_disable(struct intel_display *display, 319 struct i915_power_well *power_well) 320 { 321 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 322 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 323 u32 reqs; 324 int ret; 325 326 /* 327 * Bspec doesn't require waiting for PWs to get disabled, but still do 328 * this for paranoia. The known cases where a PW will be forced on: 329 * - a KVMR request on any power well via the KVMR request register 330 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and 331 * DEBUG request registers 332 * Skip the wait in case any of the request bits are set and print a 333 * diagnostic message. 334 */ 335 reqs = hsw_power_well_requesters(display, regs, pw_idx); 336 337 ret = intel_de_wait_for_clear(display, regs->driver, 338 HSW_PWR_WELL_CTL_STATE(pw_idx), 339 reqs ? 0 : 1); 340 if (!ret) 341 return; 342 343 /* Refresh requesters in case they popped up during the wait. */ 344 if (!reqs) 345 reqs = hsw_power_well_requesters(display, regs, pw_idx); 346 347 drm_dbg_kms(display->drm, 348 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", 349 intel_power_well_name(power_well), 350 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); 351 } 352 353 static void gen9_wait_for_power_well_fuses(struct intel_display *display, 354 enum skl_power_gate pg) 355 { 356 /* Timeout 5us for PG#0, for other PGs 1us */ 357 drm_WARN_ON(display->drm, 358 intel_de_wait_for_set(display, SKL_FUSE_STATUS, 359 SKL_FUSE_PG_DIST_STATUS(pg), 1)); 360 } 361 362 static void hsw_power_well_enable(struct intel_display *display, 363 struct i915_power_well *power_well) 364 { 365 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 366 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 367 368 if (power_well->desc->has_fuses) { 369 enum skl_power_gate pg; 370 371 pg = pw_idx_to_pg(display, pw_idx); 372 373 /* Wa_16013190616:adlp */ 374 if (display->platform.alderlake_p && pg == SKL_PG1) 375 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC); 376 377 /* 378 * For PW1 we have to wait both for the PW0/PG0 fuse state 379 * before enabling the power well and PW1/PG1's own fuse 380 * state after the enabling. For all other power wells with 381 * fuses we only have to wait for that PW/PG's fuse state 382 * after the enabling. 383 */ 384 if (pg == SKL_PG1) 385 gen9_wait_for_power_well_fuses(display, SKL_PG0); 386 } 387 388 intel_de_rmw(display, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx)); 389 390 hsw_wait_for_power_well_enable(display, power_well, false); 391 392 if (power_well->desc->has_fuses) { 393 enum skl_power_gate pg; 394 395 pg = pw_idx_to_pg(display, pw_idx); 396 397 gen9_wait_for_power_well_fuses(display, pg); 398 } 399 400 hsw_power_well_post_enable(display, 401 power_well->desc->irq_pipe_mask, 402 power_well->desc->has_vga); 403 } 404 405 static void hsw_power_well_disable(struct intel_display *display, 406 struct i915_power_well *power_well) 407 { 408 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 409 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 410 411 hsw_power_well_pre_disable(display, 412 power_well->desc->irq_pipe_mask); 413 414 intel_de_rmw(display, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0); 415 hsw_wait_for_power_well_disable(display, power_well); 416 } 417 418 static bool intel_aux_ch_is_edp(struct intel_display *display, enum aux_ch aux_ch) 419 { 420 struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch); 421 422 return dig_port && dig_port->base.type == INTEL_OUTPUT_EDP; 423 } 424 425 static void 426 icl_combo_phy_aux_power_well_enable(struct intel_display *display, 427 struct i915_power_well *power_well) 428 { 429 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 430 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 431 432 drm_WARN_ON(display->drm, !display->platform.icelake); 433 434 intel_de_rmw(display, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx)); 435 436 /* 437 * FIXME not sure if we should derive the PHY from the pw_idx, or 438 * from the VBT defined AUX_CH->DDI->PHY mapping. 439 */ 440 intel_de_rmw(display, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)), 441 0, ICL_LANE_ENABLE_AUX); 442 443 hsw_wait_for_power_well_enable(display, power_well, false); 444 445 /* Display WA #1178: icl */ 446 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && 447 !intel_aux_ch_is_edp(display, ICL_AUX_PW_TO_CH(pw_idx))) 448 intel_de_rmw(display, ICL_PORT_TX_DW6_AUX(ICL_AUX_PW_TO_PHY(pw_idx)), 449 0, O_FUNC_OVRD_EN | O_LDO_BYPASS_CRI); 450 } 451 452 static void 453 icl_combo_phy_aux_power_well_disable(struct intel_display *display, 454 struct i915_power_well *power_well) 455 { 456 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 457 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 458 459 drm_WARN_ON(display->drm, !display->platform.icelake); 460 461 /* 462 * FIXME not sure if we should derive the PHY from the pw_idx, or 463 * from the VBT defined AUX_CH->DDI->PHY mapping. 464 */ 465 intel_de_rmw(display, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)), 466 ICL_LANE_ENABLE_AUX, 0); 467 468 intel_de_rmw(display, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0); 469 470 hsw_wait_for_power_well_disable(display, power_well); 471 } 472 473 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 474 475 static void icl_tc_port_assert_ref_held(struct intel_display *display, 476 struct i915_power_well *power_well, 477 struct intel_digital_port *dig_port) 478 { 479 if (drm_WARN_ON(display->drm, !dig_port)) 480 return; 481 482 if (DISPLAY_VER(display) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) 483 return; 484 485 drm_WARN_ON(display->drm, !intel_tc_port_ref_held(dig_port)); 486 } 487 488 #else 489 490 static void icl_tc_port_assert_ref_held(struct intel_display *display, 491 struct i915_power_well *power_well, 492 struct intel_digital_port *dig_port) 493 { 494 } 495 496 #endif 497 498 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) 499 500 static void icl_tc_cold_exit(struct intel_display *display) 501 { 502 struct drm_i915_private *i915 = to_i915(display->drm); 503 int ret, tries = 0; 504 505 while (1) { 506 ret = intel_pcode_write(display->drm, ICL_PCODE_EXIT_TCCOLD, 0); 507 if (ret != -EAGAIN || ++tries == 3) 508 break; 509 msleep(1); 510 } 511 512 /* Spec states that TC cold exit can take up to 1ms to complete */ 513 if (!ret) 514 msleep(1); 515 516 /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */ 517 drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" : 518 "succeeded"); 519 } 520 521 static void 522 icl_tc_phy_aux_power_well_enable(struct intel_display *display, 523 struct i915_power_well *power_well) 524 { 525 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 526 struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch); 527 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 528 bool is_tbt = power_well->desc->is_tc_tbt; 529 bool timeout_expected; 530 531 icl_tc_port_assert_ref_held(display, power_well, dig_port); 532 533 intel_de_rmw(display, DP_AUX_CH_CTL(aux_ch), 534 DP_AUX_CH_CTL_TBT_IO, is_tbt ? DP_AUX_CH_CTL_TBT_IO : 0); 535 536 intel_de_rmw(display, regs->driver, 537 0, 538 HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx)); 539 540 /* 541 * An AUX timeout is expected if the TBT DP tunnel is down, 542 * or need to enable AUX on a legacy TypeC port as part of the TC-cold 543 * exit sequence. 544 */ 545 timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port); 546 if (DISPLAY_VER(display) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) 547 icl_tc_cold_exit(display); 548 549 hsw_wait_for_power_well_enable(display, power_well, timeout_expected); 550 551 if (DISPLAY_VER(display) >= 12 && !is_tbt) { 552 enum tc_port tc_port; 553 554 tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx); 555 556 if (wait_for(intel_dkl_phy_read(display, DKL_CMN_UC_DW_27(tc_port)) & 557 DKL_CMN_UC_DW27_UC_HEALTH, 1)) 558 drm_warn(display->drm, 559 "Timeout waiting TC uC health\n"); 560 } 561 } 562 563 static void 564 icl_aux_power_well_enable(struct intel_display *display, 565 struct i915_power_well *power_well) 566 { 567 enum phy phy = icl_aux_pw_to_phy(display, power_well); 568 569 if (intel_phy_is_tc(display, phy)) 570 return icl_tc_phy_aux_power_well_enable(display, power_well); 571 else if (display->platform.icelake) 572 return icl_combo_phy_aux_power_well_enable(display, 573 power_well); 574 else 575 return hsw_power_well_enable(display, power_well); 576 } 577 578 static void 579 icl_aux_power_well_disable(struct intel_display *display, 580 struct i915_power_well *power_well) 581 { 582 enum phy phy = icl_aux_pw_to_phy(display, power_well); 583 584 if (intel_phy_is_tc(display, phy)) 585 return hsw_power_well_disable(display, power_well); 586 else if (display->platform.icelake) 587 return icl_combo_phy_aux_power_well_disable(display, 588 power_well); 589 else 590 return hsw_power_well_disable(display, power_well); 591 } 592 593 /* 594 * We should only use the power well if we explicitly asked the hardware to 595 * enable it, so check if it's enabled and also check if we've requested it to 596 * be enabled. 597 */ 598 static bool hsw_power_well_enabled(struct intel_display *display, 599 struct i915_power_well *power_well) 600 { 601 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 602 enum i915_power_well_id id = i915_power_well_instance(power_well)->id; 603 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 604 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | 605 HSW_PWR_WELL_CTL_STATE(pw_idx); 606 u32 val; 607 608 val = intel_de_read(display, regs->driver); 609 610 /* 611 * On GEN9 big core due to a DMC bug the driver's request bits for PW1 612 * and the MISC_IO PW will be not restored, so check instead for the 613 * BIOS's own request bits, which are forced-on for these power wells 614 * when exiting DC5/6. 615 */ 616 if (DISPLAY_VER(display) == 9 && !display->platform.broxton && 617 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) 618 val |= intel_de_read(display, regs->bios); 619 620 return (val & mask) == mask; 621 } 622 623 static void assert_can_enable_dc9(struct intel_display *display) 624 { 625 struct drm_i915_private *dev_priv = to_i915(display->drm); 626 627 drm_WARN_ONCE(display->drm, 628 (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_DC9), 629 "DC9 already programmed to be enabled.\n"); 630 drm_WARN_ONCE(display->drm, 631 intel_de_read(display, DC_STATE_EN) & 632 DC_STATE_EN_UPTO_DC5, 633 "DC5 still not disabled to enable DC9.\n"); 634 drm_WARN_ONCE(display->drm, 635 intel_de_read(display, HSW_PWR_WELL_CTL2) & 636 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), 637 "Power well 2 on.\n"); 638 drm_WARN_ONCE(display->drm, intel_irqs_enabled(dev_priv), 639 "Interrupts not disabled yet.\n"); 640 641 /* 642 * TODO: check for the following to verify the conditions to enter DC9 643 * state are satisfied: 644 * 1] Check relevant display engine registers to verify if mode set 645 * disable sequence was followed. 646 * 2] Check if display uninitialize sequence is initialized. 647 */ 648 } 649 650 static void assert_can_disable_dc9(struct intel_display *display) 651 { 652 struct drm_i915_private *dev_priv = to_i915(display->drm); 653 654 drm_WARN_ONCE(display->drm, intel_irqs_enabled(dev_priv), 655 "Interrupts not disabled yet.\n"); 656 drm_WARN_ONCE(display->drm, 657 intel_de_read(display, DC_STATE_EN) & 658 DC_STATE_EN_UPTO_DC5, 659 "DC5 still not disabled.\n"); 660 661 /* 662 * TODO: check for the following to verify DC9 state was indeed 663 * entered before programming to disable it: 664 * 1] Check relevant display engine registers to verify if mode 665 * set disable sequence was followed. 666 * 2] Check if display uninitialize sequence is initialized. 667 */ 668 } 669 670 static void gen9_write_dc_state(struct intel_display *display, 671 u32 state) 672 { 673 int rewrites = 0; 674 int rereads = 0; 675 u32 v; 676 677 intel_de_write(display, DC_STATE_EN, state); 678 679 /* It has been observed that disabling the dc6 state sometimes 680 * doesn't stick and dmc keeps returning old value. Make sure 681 * the write really sticks enough times and also force rewrite until 682 * we are confident that state is exactly what we want. 683 */ 684 do { 685 v = intel_de_read(display, DC_STATE_EN); 686 687 if (v != state) { 688 intel_de_write(display, DC_STATE_EN, state); 689 rewrites++; 690 rereads = 0; 691 } else if (rereads++ > 5) { 692 break; 693 } 694 695 } while (rewrites < 100); 696 697 if (v != state) 698 drm_err(display->drm, 699 "Writing dc state to 0x%x failed, now 0x%x\n", 700 state, v); 701 702 /* Most of the times we need one retry, avoid spam */ 703 if (rewrites > 1) 704 drm_dbg_kms(display->drm, 705 "Rewrote dc state to 0x%x %d times\n", 706 state, rewrites); 707 } 708 709 static u32 gen9_dc_mask(struct intel_display *display) 710 { 711 u32 mask; 712 713 mask = DC_STATE_EN_UPTO_DC5; 714 715 if (DISPLAY_VER(display) >= 12) 716 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 717 | DC_STATE_EN_DC9; 718 else if (DISPLAY_VER(display) == 11) 719 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; 720 else if (display->platform.geminilake || display->platform.broxton) 721 mask |= DC_STATE_EN_DC9; 722 else 723 mask |= DC_STATE_EN_UPTO_DC6; 724 725 return mask; 726 } 727 728 void gen9_sanitize_dc_state(struct intel_display *display) 729 { 730 struct i915_power_domains *power_domains = &display->power.domains; 731 u32 val; 732 733 if (!HAS_DISPLAY(display)) 734 return; 735 736 val = intel_de_read(display, DC_STATE_EN) & gen9_dc_mask(display); 737 738 drm_dbg_kms(display->drm, 739 "Resetting DC state tracking from %02x to %02x\n", 740 power_domains->dc_state, val); 741 power_domains->dc_state = val; 742 } 743 744 /** 745 * gen9_set_dc_state - set target display C power state 746 * @display: display instance 747 * @state: target DC power state 748 * - DC_STATE_DISABLE 749 * - DC_STATE_EN_UPTO_DC5 750 * - DC_STATE_EN_UPTO_DC6 751 * - DC_STATE_EN_DC9 752 * 753 * Signal to DMC firmware/HW the target DC power state passed in @state. 754 * DMC/HW can turn off individual display clocks and power rails when entering 755 * a deeper DC power state (higher in number) and turns these back when exiting 756 * that state to a shallower power state (lower in number). The HW will decide 757 * when to actually enter a given state on an on-demand basis, for instance 758 * depending on the active state of display pipes. The state of display 759 * registers backed by affected power rails are saved/restored as needed. 760 * 761 * Based on the above enabling a deeper DC power state is asynchronous wrt. 762 * enabling it. Disabling a deeper power state is synchronous: for instance 763 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned 764 * back on and register state is restored. This is guaranteed by the MMIO write 765 * to DC_STATE_EN blocking until the state is restored. 766 */ 767 void gen9_set_dc_state(struct intel_display *display, u32 state) 768 { 769 struct i915_power_domains *power_domains = &display->power.domains; 770 bool dc6_was_enabled, enable_dc6; 771 u32 mask; 772 u32 val; 773 774 if (!HAS_DISPLAY(display)) 775 return; 776 777 if (drm_WARN_ON_ONCE(display->drm, 778 state & ~power_domains->allowed_dc_mask)) 779 state &= power_domains->allowed_dc_mask; 780 781 if (!power_domains->initializing) 782 intel_psr_notify_dc5_dc6(display); 783 784 val = intel_de_read(display, DC_STATE_EN); 785 mask = gen9_dc_mask(display); 786 drm_dbg_kms(display->drm, "Setting DC state from %02x to %02x\n", 787 val & mask, state); 788 789 /* Check if DMC is ignoring our DC state requests */ 790 if ((val & mask) != power_domains->dc_state) 791 drm_err(display->drm, "DC state mismatch (0x%x -> 0x%x)\n", 792 power_domains->dc_state, val & mask); 793 794 enable_dc6 = state & DC_STATE_EN_UPTO_DC6; 795 dc6_was_enabled = val & DC_STATE_EN_UPTO_DC6; 796 if (!dc6_was_enabled && enable_dc6) 797 intel_dmc_update_dc6_allowed_count(display, true); 798 799 val &= ~mask; 800 val |= state; 801 802 gen9_write_dc_state(display, val); 803 804 if (!enable_dc6 && dc6_was_enabled) 805 intel_dmc_update_dc6_allowed_count(display, false); 806 807 power_domains->dc_state = val & mask; 808 } 809 810 static void tgl_enable_dc3co(struct intel_display *display) 811 { 812 drm_dbg_kms(display->drm, "Enabling DC3CO\n"); 813 gen9_set_dc_state(display, DC_STATE_EN_DC3CO); 814 } 815 816 static void tgl_disable_dc3co(struct intel_display *display) 817 { 818 drm_dbg_kms(display->drm, "Disabling DC3CO\n"); 819 intel_de_rmw(display, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0); 820 gen9_set_dc_state(display, DC_STATE_DISABLE); 821 /* 822 * Delay of 200us DC3CO Exit time B.Spec 49196 823 */ 824 usleep_range(200, 210); 825 } 826 827 static void assert_can_enable_dc5(struct intel_display *display) 828 { 829 enum i915_power_well_id high_pg; 830 831 /* Power wells at this level and above must be disabled for DC5 entry */ 832 if (DISPLAY_VER(display) == 12) 833 high_pg = ICL_DISP_PW_3; 834 else 835 high_pg = SKL_DISP_PW_2; 836 837 drm_WARN_ONCE(display->drm, 838 intel_display_power_well_is_enabled(display, high_pg), 839 "Power wells above platform's DC5 limit still enabled.\n"); 840 841 drm_WARN_ONCE(display->drm, 842 (intel_de_read(display, DC_STATE_EN) & 843 DC_STATE_EN_UPTO_DC5), 844 "DC5 already programmed to be enabled.\n"); 845 846 assert_display_rpm_held(display); 847 848 assert_main_dmc_loaded(display); 849 } 850 851 void gen9_enable_dc5(struct intel_display *display) 852 { 853 assert_can_enable_dc5(display); 854 855 drm_dbg_kms(display->drm, "Enabling DC5\n"); 856 857 /* Wa Display #1183: skl,kbl,cfl */ 858 if (DISPLAY_VER(display) == 9 && !display->platform.broxton) 859 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 860 0, SKL_SELECT_ALTERNATE_DC_EXIT); 861 862 intel_dmc_wl_enable(display, DC_STATE_EN_UPTO_DC5); 863 864 gen9_set_dc_state(display, DC_STATE_EN_UPTO_DC5); 865 } 866 867 static void assert_can_enable_dc6(struct intel_display *display) 868 { 869 drm_WARN_ONCE(display->drm, 870 (intel_de_read(display, UTIL_PIN_CTL) & 871 (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == 872 (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM), 873 "Utility pin enabled in PWM mode\n"); 874 drm_WARN_ONCE(display->drm, 875 (intel_de_read(display, DC_STATE_EN) & 876 DC_STATE_EN_UPTO_DC6), 877 "DC6 already programmed to be enabled.\n"); 878 879 assert_main_dmc_loaded(display); 880 } 881 882 void skl_enable_dc6(struct intel_display *display) 883 { 884 assert_can_enable_dc6(display); 885 886 drm_dbg_kms(display->drm, "Enabling DC6\n"); 887 888 /* Wa Display #1183: skl,kbl,cfl */ 889 if (DISPLAY_VER(display) == 9 && !display->platform.broxton) 890 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 891 0, SKL_SELECT_ALTERNATE_DC_EXIT); 892 893 intel_dmc_wl_enable(display, DC_STATE_EN_UPTO_DC6); 894 895 gen9_set_dc_state(display, DC_STATE_EN_UPTO_DC6); 896 } 897 898 void bxt_enable_dc9(struct intel_display *display) 899 { 900 assert_can_enable_dc9(display); 901 902 drm_dbg_kms(display->drm, "Enabling DC9\n"); 903 /* 904 * Power sequencer reset is needed on BXT/GLK, because the PPS registers 905 * aren't always on, unlike with South Display Engine on PCH. 906 */ 907 if (display->platform.broxton || display->platform.geminilake) 908 bxt_pps_reset_all(display); 909 gen9_set_dc_state(display, DC_STATE_EN_DC9); 910 } 911 912 void bxt_disable_dc9(struct intel_display *display) 913 { 914 assert_can_disable_dc9(display); 915 916 drm_dbg_kms(display->drm, "Disabling DC9\n"); 917 918 gen9_set_dc_state(display, DC_STATE_DISABLE); 919 920 intel_pps_unlock_regs_wa(display); 921 } 922 923 static void hsw_power_well_sync_hw(struct intel_display *display, 924 struct i915_power_well *power_well) 925 { 926 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 927 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 928 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 929 u32 bios_req = intel_de_read(display, regs->bios); 930 931 /* Take over the request bit if set by BIOS. */ 932 if (bios_req & mask) { 933 u32 drv_req = intel_de_read(display, regs->driver); 934 935 if (!(drv_req & mask)) 936 intel_de_write(display, regs->driver, drv_req | mask); 937 intel_de_write(display, regs->bios, bios_req & ~mask); 938 } 939 } 940 941 static void bxt_dpio_cmn_power_well_enable(struct intel_display *display, 942 struct i915_power_well *power_well) 943 { 944 bxt_dpio_phy_init(display, i915_power_well_instance(power_well)->bxt.phy); 945 } 946 947 static void bxt_dpio_cmn_power_well_disable(struct intel_display *display, 948 struct i915_power_well *power_well) 949 { 950 bxt_dpio_phy_uninit(display, i915_power_well_instance(power_well)->bxt.phy); 951 } 952 953 static bool bxt_dpio_cmn_power_well_enabled(struct intel_display *display, 954 struct i915_power_well *power_well) 955 { 956 return bxt_dpio_phy_is_enabled(display, i915_power_well_instance(power_well)->bxt.phy); 957 } 958 959 static void bxt_verify_dpio_phy_power_wells(struct intel_display *display) 960 { 961 struct i915_power_well *power_well; 962 963 power_well = lookup_power_well(display, BXT_DISP_PW_DPIO_CMN_A); 964 if (intel_power_well_refcount(power_well) > 0) 965 bxt_dpio_phy_verify_state(display, i915_power_well_instance(power_well)->bxt.phy); 966 967 power_well = lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC); 968 if (intel_power_well_refcount(power_well) > 0) 969 bxt_dpio_phy_verify_state(display, i915_power_well_instance(power_well)->bxt.phy); 970 971 if (display->platform.geminilake) { 972 power_well = lookup_power_well(display, 973 GLK_DISP_PW_DPIO_CMN_C); 974 if (intel_power_well_refcount(power_well) > 0) 975 bxt_dpio_phy_verify_state(display, 976 i915_power_well_instance(power_well)->bxt.phy); 977 } 978 } 979 980 static bool gen9_dc_off_power_well_enabled(struct intel_display *display, 981 struct i915_power_well *power_well) 982 { 983 return ((intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && 984 (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); 985 } 986 987 static void gen9_assert_dbuf_enabled(struct intel_display *display) 988 { 989 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(display); 990 u8 enabled_dbuf_slices = display->dbuf.enabled_slices; 991 992 drm_WARN(display->drm, 993 hw_enabled_dbuf_slices != enabled_dbuf_slices, 994 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n", 995 hw_enabled_dbuf_slices, 996 enabled_dbuf_slices); 997 } 998 999 void gen9_disable_dc_states(struct intel_display *display) 1000 { 1001 struct i915_power_domains *power_domains = &display->power.domains; 1002 struct intel_cdclk_config cdclk_config = {}; 1003 u32 old_state = power_domains->dc_state; 1004 1005 if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) { 1006 tgl_disable_dc3co(display); 1007 return; 1008 } 1009 1010 if (HAS_DISPLAY(display)) { 1011 intel_dmc_wl_get_noreg(display); 1012 gen9_set_dc_state(display, DC_STATE_DISABLE); 1013 intel_dmc_wl_put_noreg(display); 1014 } else { 1015 gen9_set_dc_state(display, DC_STATE_DISABLE); 1016 return; 1017 } 1018 1019 if (old_state == DC_STATE_EN_UPTO_DC5 || 1020 old_state == DC_STATE_EN_UPTO_DC6) 1021 intel_dmc_wl_disable(display); 1022 1023 intel_cdclk_get_cdclk(display, &cdclk_config); 1024 /* Can't read out voltage_level so can't use intel_cdclk_changed() */ 1025 drm_WARN_ON(display->drm, 1026 intel_cdclk_clock_changed(&display->cdclk.hw, 1027 &cdclk_config)); 1028 1029 gen9_assert_dbuf_enabled(display); 1030 1031 if (display->platform.geminilake || display->platform.broxton) 1032 bxt_verify_dpio_phy_power_wells(display); 1033 1034 if (DISPLAY_VER(display) >= 11) 1035 /* 1036 * DMC retains HW context only for port A, the other combo 1037 * PHY's HW context for port B is lost after DC transitions, 1038 * so we need to restore it manually. 1039 */ 1040 intel_combo_phy_init(display); 1041 } 1042 1043 static void gen9_dc_off_power_well_enable(struct intel_display *display, 1044 struct i915_power_well *power_well) 1045 { 1046 gen9_disable_dc_states(display); 1047 } 1048 1049 static void gen9_dc_off_power_well_disable(struct intel_display *display, 1050 struct i915_power_well *power_well) 1051 { 1052 struct i915_power_domains *power_domains = &display->power.domains; 1053 1054 if (!intel_dmc_has_payload(display)) 1055 return; 1056 1057 switch (power_domains->target_dc_state) { 1058 case DC_STATE_EN_DC3CO: 1059 tgl_enable_dc3co(display); 1060 break; 1061 case DC_STATE_EN_UPTO_DC6: 1062 skl_enable_dc6(display); 1063 break; 1064 case DC_STATE_EN_UPTO_DC5: 1065 gen9_enable_dc5(display); 1066 break; 1067 } 1068 } 1069 1070 static void i9xx_power_well_sync_hw_noop(struct intel_display *display, 1071 struct i915_power_well *power_well) 1072 { 1073 } 1074 1075 static void i9xx_always_on_power_well_noop(struct intel_display *display, 1076 struct i915_power_well *power_well) 1077 { 1078 } 1079 1080 static bool i9xx_always_on_power_well_enabled(struct intel_display *display, 1081 struct i915_power_well *power_well) 1082 { 1083 return true; 1084 } 1085 1086 static void i830_pipes_power_well_enable(struct intel_display *display, 1087 struct i915_power_well *power_well) 1088 { 1089 if ((intel_de_read(display, TRANSCONF(display, PIPE_A)) & TRANSCONF_ENABLE) == 0) 1090 i830_enable_pipe(display, PIPE_A); 1091 if ((intel_de_read(display, TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE) == 0) 1092 i830_enable_pipe(display, PIPE_B); 1093 } 1094 1095 static void i830_pipes_power_well_disable(struct intel_display *display, 1096 struct i915_power_well *power_well) 1097 { 1098 i830_disable_pipe(display, PIPE_B); 1099 i830_disable_pipe(display, PIPE_A); 1100 } 1101 1102 static bool i830_pipes_power_well_enabled(struct intel_display *display, 1103 struct i915_power_well *power_well) 1104 { 1105 return intel_de_read(display, TRANSCONF(display, PIPE_A)) & TRANSCONF_ENABLE && 1106 intel_de_read(display, TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE; 1107 } 1108 1109 static void i830_pipes_power_well_sync_hw(struct intel_display *display, 1110 struct i915_power_well *power_well) 1111 { 1112 if (intel_power_well_refcount(power_well) > 0) 1113 i830_pipes_power_well_enable(display, power_well); 1114 else 1115 i830_pipes_power_well_disable(display, power_well); 1116 } 1117 1118 static void vlv_set_power_well(struct intel_display *display, 1119 struct i915_power_well *power_well, bool enable) 1120 { 1121 int pw_idx = i915_power_well_instance(power_well)->vlv.idx; 1122 u32 mask; 1123 u32 state; 1124 u32 ctrl; 1125 1126 mask = PUNIT_PWRGT_MASK(pw_idx); 1127 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : 1128 PUNIT_PWRGT_PWR_GATE(pw_idx); 1129 1130 vlv_punit_get(display->drm); 1131 1132 #define COND \ 1133 ((vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS) & mask) == state) 1134 1135 if (COND) 1136 goto out; 1137 1138 ctrl = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL); 1139 ctrl &= ~mask; 1140 ctrl |= state; 1141 vlv_punit_write(display->drm, PUNIT_REG_PWRGT_CTRL, ctrl); 1142 1143 if (wait_for(COND, 100)) 1144 drm_err(display->drm, 1145 "timeout setting power well state %08x (%08x)\n", 1146 state, 1147 vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL)); 1148 1149 #undef COND 1150 1151 out: 1152 vlv_punit_put(display->drm); 1153 } 1154 1155 static void vlv_power_well_enable(struct intel_display *display, 1156 struct i915_power_well *power_well) 1157 { 1158 vlv_set_power_well(display, power_well, true); 1159 } 1160 1161 static void vlv_power_well_disable(struct intel_display *display, 1162 struct i915_power_well *power_well) 1163 { 1164 vlv_set_power_well(display, power_well, false); 1165 } 1166 1167 static bool vlv_power_well_enabled(struct intel_display *display, 1168 struct i915_power_well *power_well) 1169 { 1170 int pw_idx = i915_power_well_instance(power_well)->vlv.idx; 1171 bool enabled = false; 1172 u32 mask; 1173 u32 state; 1174 u32 ctrl; 1175 1176 mask = PUNIT_PWRGT_MASK(pw_idx); 1177 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); 1178 1179 vlv_punit_get(display->drm); 1180 1181 state = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS) & mask; 1182 /* 1183 * We only ever set the power-on and power-gate states, anything 1184 * else is unexpected. 1185 */ 1186 drm_WARN_ON(display->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) && 1187 state != PUNIT_PWRGT_PWR_GATE(pw_idx)); 1188 if (state == ctrl) 1189 enabled = true; 1190 1191 /* 1192 * A transient state at this point would mean some unexpected party 1193 * is poking at the power controls too. 1194 */ 1195 ctrl = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL) & mask; 1196 drm_WARN_ON(display->drm, ctrl != state); 1197 1198 vlv_punit_put(display->drm); 1199 1200 return enabled; 1201 } 1202 1203 static void vlv_init_display_clock_gating(struct intel_display *display) 1204 { 1205 /* 1206 * On driver load, a pipe may be active and driving a DSI display. 1207 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck 1208 * (and never recovering) in this case. intel_dsi_post_disable() will 1209 * clear it when we turn off the display. 1210 */ 1211 intel_de_rmw(display, DSPCLK_GATE_D(display), 1212 ~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE); 1213 1214 /* 1215 * Disable trickle feed and enable pnd deadline calculation 1216 */ 1217 intel_de_write(display, MI_ARB_VLV, 1218 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1219 intel_de_write(display, CBR1_VLV, 0); 1220 1221 drm_WARN_ON(display->drm, DISPLAY_RUNTIME_INFO(display)->rawclk_freq == 0); 1222 intel_de_write(display, RAWCLK_FREQ_VLV, 1223 DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(display)->rawclk_freq, 1224 1000)); 1225 } 1226 1227 static void vlv_display_power_well_init(struct intel_display *display) 1228 { 1229 struct intel_encoder *encoder; 1230 enum pipe pipe; 1231 1232 /* 1233 * Enable the CRI clock source so we can get at the 1234 * display and the reference clock for VGA 1235 * hotplug / manual detection. Supposedly DSI also 1236 * needs the ref clock up and running. 1237 * 1238 * CHV DPLL B/C have some issues if VGA mode is enabled. 1239 */ 1240 for_each_pipe(display, pipe) { 1241 u32 val = intel_de_read(display, DPLL(display, pipe)); 1242 1243 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1244 if (pipe != PIPE_A) 1245 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1246 1247 intel_de_write(display, DPLL(display, pipe), val); 1248 } 1249 1250 vlv_init_display_clock_gating(display); 1251 1252 valleyview_enable_display_irqs(display); 1253 1254 /* 1255 * During driver initialization/resume we can avoid restoring the 1256 * part of the HW/SW state that will be inited anyway explicitly. 1257 */ 1258 if (display->power.domains.initializing) 1259 return; 1260 1261 intel_hpd_init(display); 1262 intel_hpd_poll_disable(display); 1263 1264 /* Re-enable the ADPA, if we have one */ 1265 for_each_intel_encoder(display->drm, encoder) { 1266 if (encoder->type == INTEL_OUTPUT_ANALOG) 1267 intel_crt_reset(&encoder->base); 1268 } 1269 1270 intel_vga_disable(display); 1271 1272 intel_pps_unlock_regs_wa(display); 1273 } 1274 1275 static void vlv_display_power_well_deinit(struct intel_display *display) 1276 { 1277 struct drm_i915_private *dev_priv = to_i915(display->drm); 1278 1279 valleyview_disable_display_irqs(display); 1280 1281 /* make sure we're done processing display irqs */ 1282 intel_synchronize_irq(dev_priv); 1283 1284 vlv_pps_reset_all(display); 1285 1286 /* Prevent us from re-enabling polling on accident in late suspend */ 1287 if (!display->drm->dev->power.is_suspended) 1288 intel_hpd_poll_enable(display); 1289 } 1290 1291 static void vlv_display_power_well_enable(struct intel_display *display, 1292 struct i915_power_well *power_well) 1293 { 1294 vlv_set_power_well(display, power_well, true); 1295 1296 vlv_display_power_well_init(display); 1297 } 1298 1299 static void vlv_display_power_well_disable(struct intel_display *display, 1300 struct i915_power_well *power_well) 1301 { 1302 vlv_display_power_well_deinit(display); 1303 1304 vlv_set_power_well(display, power_well, false); 1305 } 1306 1307 static void vlv_dpio_cmn_power_well_enable(struct intel_display *display, 1308 struct i915_power_well *power_well) 1309 { 1310 /* since ref/cri clock was enabled */ 1311 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1312 1313 vlv_set_power_well(display, power_well, true); 1314 1315 /* 1316 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1317 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1318 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1319 * b. The other bits such as sfr settings / modesel may all 1320 * be set to 0. 1321 * 1322 * This should only be done on init and resume from S3 with 1323 * both PLLs disabled, or we risk losing DPIO and PLL 1324 * synchronization. 1325 */ 1326 intel_de_rmw(display, DPIO_CTL, 0, DPIO_CMNRST); 1327 } 1328 1329 static void vlv_dpio_cmn_power_well_disable(struct intel_display *display, 1330 struct i915_power_well *power_well) 1331 { 1332 enum pipe pipe; 1333 1334 for_each_pipe(display, pipe) 1335 assert_pll_disabled(display, pipe); 1336 1337 /* Assert common reset */ 1338 intel_de_rmw(display, DPIO_CTL, DPIO_CMNRST, 0); 1339 1340 vlv_set_power_well(display, power_well, false); 1341 } 1342 1343 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1344 1345 static void assert_chv_phy_status(struct intel_display *display) 1346 { 1347 struct i915_power_well *cmn_bc = 1348 lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC); 1349 struct i915_power_well *cmn_d = 1350 lookup_power_well(display, CHV_DISP_PW_DPIO_CMN_D); 1351 u32 phy_control = display->power.chv_phy_control; 1352 u32 phy_status = 0; 1353 u32 phy_status_mask = 0xffffffff; 1354 1355 /* 1356 * The BIOS can leave the PHY is some weird state 1357 * where it doesn't fully power down some parts. 1358 * Disable the asserts until the PHY has been fully 1359 * reset (ie. the power well has been disabled at 1360 * least once). 1361 */ 1362 if (!display->power.chv_phy_assert[DPIO_PHY0]) 1363 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1364 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1365 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1366 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1367 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1368 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1369 1370 if (!display->power.chv_phy_assert[DPIO_PHY1]) 1371 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1372 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1373 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1374 1375 if (intel_power_well_is_enabled(display, cmn_bc)) { 1376 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1377 1378 /* this assumes override is only used to enable lanes */ 1379 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1380 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1381 1382 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1383 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1384 1385 /* CL1 is on whenever anything is on in either channel */ 1386 if (BITS_SET(phy_control, 1387 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1388 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1389 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1390 1391 /* 1392 * The DPLLB check accounts for the pipe B + port A usage 1393 * with CL2 powered up but all the lanes in the second channel 1394 * powered down. 1395 */ 1396 if (BITS_SET(phy_control, 1397 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1398 (intel_de_read(display, DPLL(display, PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1399 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1400 1401 if (BITS_SET(phy_control, 1402 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1403 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1404 if (BITS_SET(phy_control, 1405 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1406 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1407 1408 if (BITS_SET(phy_control, 1409 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1410 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1411 if (BITS_SET(phy_control, 1412 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1413 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1414 } 1415 1416 if (intel_power_well_is_enabled(display, cmn_d)) { 1417 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1418 1419 /* this assumes override is only used to enable lanes */ 1420 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1421 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1422 1423 if (BITS_SET(phy_control, 1424 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1425 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1426 1427 if (BITS_SET(phy_control, 1428 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1429 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1430 if (BITS_SET(phy_control, 1431 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1432 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1433 } 1434 1435 phy_status &= phy_status_mask; 1436 1437 /* 1438 * The PHY may be busy with some initial calibration and whatnot, 1439 * so the power state can take a while to actually change. 1440 */ 1441 if (intel_de_wait(display, DISPLAY_PHY_STATUS, 1442 phy_status_mask, phy_status, 10)) 1443 drm_err(display->drm, 1444 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1445 intel_de_read(display, DISPLAY_PHY_STATUS) & phy_status_mask, 1446 phy_status, display->power.chv_phy_control); 1447 } 1448 1449 #undef BITS_SET 1450 1451 static void chv_dpio_cmn_power_well_enable(struct intel_display *display, 1452 struct i915_power_well *power_well) 1453 { 1454 enum i915_power_well_id id = i915_power_well_instance(power_well)->id; 1455 enum dpio_phy phy; 1456 u32 tmp; 1457 1458 drm_WARN_ON_ONCE(display->drm, 1459 id != VLV_DISP_PW_DPIO_CMN_BC && 1460 id != CHV_DISP_PW_DPIO_CMN_D); 1461 1462 if (id == VLV_DISP_PW_DPIO_CMN_BC) 1463 phy = DPIO_PHY0; 1464 else 1465 phy = DPIO_PHY1; 1466 1467 /* since ref/cri clock was enabled */ 1468 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1469 vlv_set_power_well(display, power_well, true); 1470 1471 /* Poll for phypwrgood signal */ 1472 if (intel_de_wait_for_set(display, DISPLAY_PHY_STATUS, 1473 PHY_POWERGOOD(phy), 1)) 1474 drm_err(display->drm, "Display PHY %d is not power up\n", 1475 phy); 1476 1477 vlv_dpio_get(display->drm); 1478 1479 /* Enable dynamic power down */ 1480 tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW28); 1481 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1482 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1483 vlv_dpio_write(display->drm, phy, CHV_CMN_DW28, tmp); 1484 1485 if (id == VLV_DISP_PW_DPIO_CMN_BC) { 1486 tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW6_CH1); 1487 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1488 vlv_dpio_write(display->drm, phy, CHV_CMN_DW6_CH1, tmp); 1489 } else { 1490 /* 1491 * Force the non-existing CL2 off. BXT does this 1492 * too, so maybe it saves some power even though 1493 * CL2 doesn't exist? 1494 */ 1495 tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW30); 1496 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1497 vlv_dpio_write(display->drm, phy, CHV_CMN_DW30, tmp); 1498 } 1499 1500 vlv_dpio_put(display->drm); 1501 1502 display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1503 intel_de_write(display, DISPLAY_PHY_CONTROL, 1504 display->power.chv_phy_control); 1505 1506 drm_dbg_kms(display->drm, 1507 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1508 phy, display->power.chv_phy_control); 1509 1510 assert_chv_phy_status(display); 1511 } 1512 1513 static void chv_dpio_cmn_power_well_disable(struct intel_display *display, 1514 struct i915_power_well *power_well) 1515 { 1516 enum i915_power_well_id id = i915_power_well_instance(power_well)->id; 1517 enum dpio_phy phy; 1518 1519 drm_WARN_ON_ONCE(display->drm, 1520 id != VLV_DISP_PW_DPIO_CMN_BC && 1521 id != CHV_DISP_PW_DPIO_CMN_D); 1522 1523 if (id == VLV_DISP_PW_DPIO_CMN_BC) { 1524 phy = DPIO_PHY0; 1525 assert_pll_disabled(display, PIPE_A); 1526 assert_pll_disabled(display, PIPE_B); 1527 } else { 1528 phy = DPIO_PHY1; 1529 assert_pll_disabled(display, PIPE_C); 1530 } 1531 1532 display->power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1533 intel_de_write(display, DISPLAY_PHY_CONTROL, 1534 display->power.chv_phy_control); 1535 1536 vlv_set_power_well(display, power_well, false); 1537 1538 drm_dbg_kms(display->drm, 1539 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1540 phy, display->power.chv_phy_control); 1541 1542 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1543 display->power.chv_phy_assert[phy] = true; 1544 1545 assert_chv_phy_status(display); 1546 } 1547 1548 static void assert_chv_phy_powergate(struct intel_display *display, enum dpio_phy phy, 1549 enum dpio_channel ch, bool override, unsigned int mask) 1550 { 1551 u32 reg, val, expected, actual; 1552 1553 /* 1554 * The BIOS can leave the PHY is some weird state 1555 * where it doesn't fully power down some parts. 1556 * Disable the asserts until the PHY has been fully 1557 * reset (ie. the power well has been disabled at 1558 * least once). 1559 */ 1560 if (!display->power.chv_phy_assert[phy]) 1561 return; 1562 1563 if (ch == DPIO_CH0) 1564 reg = CHV_CMN_DW0_CH0; 1565 else 1566 reg = CHV_CMN_DW6_CH1; 1567 1568 vlv_dpio_get(display->drm); 1569 val = vlv_dpio_read(display->drm, phy, reg); 1570 vlv_dpio_put(display->drm); 1571 1572 /* 1573 * This assumes !override is only used when the port is disabled. 1574 * All lanes should power down even without the override when 1575 * the port is disabled. 1576 */ 1577 if (!override || mask == 0xf) { 1578 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1579 /* 1580 * If CH1 common lane is not active anymore 1581 * (eg. for pipe B DPLL) the entire channel will 1582 * shut down, which causes the common lane registers 1583 * to read as 0. That means we can't actually check 1584 * the lane power down status bits, but as the entire 1585 * register reads as 0 it's a good indication that the 1586 * channel is indeed entirely powered down. 1587 */ 1588 if (ch == DPIO_CH1 && val == 0) 1589 expected = 0; 1590 } else if (mask != 0x0) { 1591 expected = DPIO_ANYDL_POWERDOWN; 1592 } else { 1593 expected = 0; 1594 } 1595 1596 if (ch == DPIO_CH0) 1597 actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH0 | 1598 DPIO_ALLDL_POWERDOWN_CH0, val); 1599 else 1600 actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH1 | 1601 DPIO_ALLDL_POWERDOWN_CH1, val); 1602 1603 drm_WARN(display->drm, actual != expected, 1604 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1605 !!(actual & DPIO_ALLDL_POWERDOWN), 1606 !!(actual & DPIO_ANYDL_POWERDOWN), 1607 !!(expected & DPIO_ALLDL_POWERDOWN), 1608 !!(expected & DPIO_ANYDL_POWERDOWN), 1609 reg, val); 1610 } 1611 1612 bool chv_phy_powergate_ch(struct intel_display *display, enum dpio_phy phy, 1613 enum dpio_channel ch, bool override) 1614 { 1615 struct i915_power_domains *power_domains = &display->power.domains; 1616 bool was_override; 1617 1618 mutex_lock(&power_domains->lock); 1619 1620 was_override = display->power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1621 1622 if (override == was_override) 1623 goto out; 1624 1625 if (override) 1626 display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1627 else 1628 display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1629 1630 intel_de_write(display, DISPLAY_PHY_CONTROL, 1631 display->power.chv_phy_control); 1632 1633 drm_dbg_kms(display->drm, 1634 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1635 phy, ch, display->power.chv_phy_control); 1636 1637 assert_chv_phy_status(display); 1638 1639 out: 1640 mutex_unlock(&power_domains->lock); 1641 1642 return was_override; 1643 } 1644 1645 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1646 bool override, unsigned int mask) 1647 { 1648 struct intel_display *display = to_intel_display(encoder); 1649 struct i915_power_domains *power_domains = &display->power.domains; 1650 enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); 1651 enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); 1652 1653 mutex_lock(&power_domains->lock); 1654 1655 display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1656 display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1657 1658 if (override) 1659 display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1660 else 1661 display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1662 1663 intel_de_write(display, DISPLAY_PHY_CONTROL, 1664 display->power.chv_phy_control); 1665 1666 drm_dbg_kms(display->drm, 1667 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1668 phy, ch, mask, display->power.chv_phy_control); 1669 1670 assert_chv_phy_status(display); 1671 1672 assert_chv_phy_powergate(display, phy, ch, override, mask); 1673 1674 mutex_unlock(&power_domains->lock); 1675 } 1676 1677 static bool chv_pipe_power_well_enabled(struct intel_display *display, 1678 struct i915_power_well *power_well) 1679 { 1680 enum pipe pipe = PIPE_A; 1681 bool enabled; 1682 u32 state, ctrl; 1683 1684 vlv_punit_get(display->drm); 1685 1686 state = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); 1687 /* 1688 * We only ever set the power-on and power-gate states, anything 1689 * else is unexpected. 1690 */ 1691 drm_WARN_ON(display->drm, state != DP_SSS_PWR_ON(pipe) && 1692 state != DP_SSS_PWR_GATE(pipe)); 1693 enabled = state == DP_SSS_PWR_ON(pipe); 1694 1695 /* 1696 * A transient state at this point would mean some unexpected party 1697 * is poking at the power controls too. 1698 */ 1699 ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); 1700 drm_WARN_ON(display->drm, ctrl << 16 != state); 1701 1702 vlv_punit_put(display->drm); 1703 1704 return enabled; 1705 } 1706 1707 static void chv_set_pipe_power_well(struct intel_display *display, 1708 struct i915_power_well *power_well, 1709 bool enable) 1710 { 1711 enum pipe pipe = PIPE_A; 1712 u32 state; 1713 u32 ctrl; 1714 1715 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1716 1717 vlv_punit_get(display->drm); 1718 1719 #define COND \ 1720 ((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) 1721 1722 if (COND) 1723 goto out; 1724 1725 ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM); 1726 ctrl &= ~DP_SSC_MASK(pipe); 1727 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1728 vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, ctrl); 1729 1730 if (wait_for(COND, 100)) 1731 drm_err(display->drm, 1732 "timeout setting power well state %08x (%08x)\n", 1733 state, 1734 vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM)); 1735 1736 #undef COND 1737 1738 out: 1739 vlv_punit_put(display->drm); 1740 } 1741 1742 static void chv_pipe_power_well_sync_hw(struct intel_display *display, 1743 struct i915_power_well *power_well) 1744 { 1745 intel_de_write(display, DISPLAY_PHY_CONTROL, 1746 display->power.chv_phy_control); 1747 } 1748 1749 static void chv_pipe_power_well_enable(struct intel_display *display, 1750 struct i915_power_well *power_well) 1751 { 1752 chv_set_pipe_power_well(display, power_well, true); 1753 1754 vlv_display_power_well_init(display); 1755 } 1756 1757 static void chv_pipe_power_well_disable(struct intel_display *display, 1758 struct i915_power_well *power_well) 1759 { 1760 vlv_display_power_well_deinit(display); 1761 1762 chv_set_pipe_power_well(display, power_well, false); 1763 } 1764 1765 static void 1766 tgl_tc_cold_request(struct intel_display *display, bool block) 1767 { 1768 struct drm_i915_private *i915 = to_i915(display->drm); 1769 u8 tries = 0; 1770 int ret; 1771 1772 while (1) { 1773 u32 low_val; 1774 u32 high_val = 0; 1775 1776 if (block) 1777 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ; 1778 else 1779 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ; 1780 1781 /* 1782 * Spec states that we should timeout the request after 200us 1783 * but the function below will timeout after 500us 1784 */ 1785 ret = intel_pcode_read(display->drm, TGL_PCODE_TCCOLD, &low_val, &high_val); 1786 if (ret == 0) { 1787 if (block && 1788 (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) 1789 ret = -EIO; 1790 else 1791 break; 1792 } 1793 1794 if (++tries == 3) 1795 break; 1796 1797 msleep(1); 1798 } 1799 1800 if (ret) 1801 drm_err(&i915->drm, "TC cold %sblock failed\n", 1802 block ? "" : "un"); 1803 else 1804 drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n", 1805 block ? "" : "un"); 1806 } 1807 1808 static void 1809 tgl_tc_cold_off_power_well_enable(struct intel_display *display, 1810 struct i915_power_well *power_well) 1811 { 1812 tgl_tc_cold_request(display, true); 1813 } 1814 1815 static void 1816 tgl_tc_cold_off_power_well_disable(struct intel_display *display, 1817 struct i915_power_well *power_well) 1818 { 1819 tgl_tc_cold_request(display, false); 1820 } 1821 1822 static void 1823 tgl_tc_cold_off_power_well_sync_hw(struct intel_display *display, 1824 struct i915_power_well *power_well) 1825 { 1826 if (intel_power_well_refcount(power_well) > 0) 1827 tgl_tc_cold_off_power_well_enable(display, power_well); 1828 else 1829 tgl_tc_cold_off_power_well_disable(display, power_well); 1830 } 1831 1832 static bool 1833 tgl_tc_cold_off_power_well_is_enabled(struct intel_display *display, 1834 struct i915_power_well *power_well) 1835 { 1836 /* 1837 * Not the correctly implementation but there is no way to just read it 1838 * from PCODE, so returning count to avoid state mismatch errors 1839 */ 1840 return intel_power_well_refcount(power_well); 1841 } 1842 1843 static void xelpdp_aux_power_well_enable(struct intel_display *display, 1844 struct i915_power_well *power_well) 1845 { 1846 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; 1847 enum phy phy = icl_aux_pw_to_phy(display, power_well); 1848 1849 if (intel_phy_is_tc(display, phy)) 1850 icl_tc_port_assert_ref_held(display, power_well, 1851 aux_ch_to_digital_port(display, aux_ch)); 1852 1853 intel_de_rmw(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch), 1854 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST, 1855 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST); 1856 1857 /* 1858 * The power status flag cannot be used to determine whether aux 1859 * power wells have finished powering up. Instead we're 1860 * expected to just wait a fixed 600us after raising the request 1861 * bit. 1862 */ 1863 usleep_range(600, 1200); 1864 } 1865 1866 static void xelpdp_aux_power_well_disable(struct intel_display *display, 1867 struct i915_power_well *power_well) 1868 { 1869 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; 1870 1871 intel_de_rmw(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch), 1872 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST, 1873 0); 1874 usleep_range(10, 30); 1875 } 1876 1877 static bool xelpdp_aux_power_well_enabled(struct intel_display *display, 1878 struct i915_power_well *power_well) 1879 { 1880 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; 1881 1882 return intel_de_read(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch)) & 1883 XELPDP_DP_AUX_CH_CTL_POWER_STATUS; 1884 } 1885 1886 static void xe2lpd_pica_power_well_enable(struct intel_display *display, 1887 struct i915_power_well *power_well) 1888 { 1889 intel_de_write(display, XE2LPD_PICA_PW_CTL, 1890 XE2LPD_PICA_CTL_POWER_REQUEST); 1891 1892 if (intel_de_wait_for_set(display, XE2LPD_PICA_PW_CTL, 1893 XE2LPD_PICA_CTL_POWER_STATUS, 1)) { 1894 drm_dbg_kms(display->drm, "pica power well enable timeout\n"); 1895 1896 drm_WARN(display->drm, 1, "Power well PICA timeout when enabled"); 1897 } 1898 } 1899 1900 static void xe2lpd_pica_power_well_disable(struct intel_display *display, 1901 struct i915_power_well *power_well) 1902 { 1903 intel_de_write(display, XE2LPD_PICA_PW_CTL, 0); 1904 1905 if (intel_de_wait_for_clear(display, XE2LPD_PICA_PW_CTL, 1906 XE2LPD_PICA_CTL_POWER_STATUS, 1)) { 1907 drm_dbg_kms(display->drm, "pica power well disable timeout\n"); 1908 1909 drm_WARN(display->drm, 1, "Power well PICA timeout when disabled"); 1910 } 1911 } 1912 1913 static bool xe2lpd_pica_power_well_enabled(struct intel_display *display, 1914 struct i915_power_well *power_well) 1915 { 1916 return intel_de_read(display, XE2LPD_PICA_PW_CTL) & 1917 XE2LPD_PICA_CTL_POWER_STATUS; 1918 } 1919 1920 const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 1921 .sync_hw = i9xx_power_well_sync_hw_noop, 1922 .enable = i9xx_always_on_power_well_noop, 1923 .disable = i9xx_always_on_power_well_noop, 1924 .is_enabled = i9xx_always_on_power_well_enabled, 1925 }; 1926 1927 const struct i915_power_well_ops chv_pipe_power_well_ops = { 1928 .sync_hw = chv_pipe_power_well_sync_hw, 1929 .enable = chv_pipe_power_well_enable, 1930 .disable = chv_pipe_power_well_disable, 1931 .is_enabled = chv_pipe_power_well_enabled, 1932 }; 1933 1934 const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 1935 .sync_hw = i9xx_power_well_sync_hw_noop, 1936 .enable = chv_dpio_cmn_power_well_enable, 1937 .disable = chv_dpio_cmn_power_well_disable, 1938 .is_enabled = vlv_power_well_enabled, 1939 }; 1940 1941 const struct i915_power_well_ops i830_pipes_power_well_ops = { 1942 .sync_hw = i830_pipes_power_well_sync_hw, 1943 .enable = i830_pipes_power_well_enable, 1944 .disable = i830_pipes_power_well_disable, 1945 .is_enabled = i830_pipes_power_well_enabled, 1946 }; 1947 1948 static const struct i915_power_well_regs hsw_power_well_regs = { 1949 .bios = HSW_PWR_WELL_CTL1, 1950 .driver = HSW_PWR_WELL_CTL2, 1951 .kvmr = HSW_PWR_WELL_CTL3, 1952 .debug = HSW_PWR_WELL_CTL4, 1953 }; 1954 1955 const struct i915_power_well_ops hsw_power_well_ops = { 1956 .regs = &hsw_power_well_regs, 1957 .sync_hw = hsw_power_well_sync_hw, 1958 .enable = hsw_power_well_enable, 1959 .disable = hsw_power_well_disable, 1960 .is_enabled = hsw_power_well_enabled, 1961 }; 1962 1963 const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 1964 .sync_hw = i9xx_power_well_sync_hw_noop, 1965 .enable = gen9_dc_off_power_well_enable, 1966 .disable = gen9_dc_off_power_well_disable, 1967 .is_enabled = gen9_dc_off_power_well_enabled, 1968 }; 1969 1970 const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 1971 .sync_hw = i9xx_power_well_sync_hw_noop, 1972 .enable = bxt_dpio_cmn_power_well_enable, 1973 .disable = bxt_dpio_cmn_power_well_disable, 1974 .is_enabled = bxt_dpio_cmn_power_well_enabled, 1975 }; 1976 1977 const struct i915_power_well_ops vlv_display_power_well_ops = { 1978 .sync_hw = i9xx_power_well_sync_hw_noop, 1979 .enable = vlv_display_power_well_enable, 1980 .disable = vlv_display_power_well_disable, 1981 .is_enabled = vlv_power_well_enabled, 1982 }; 1983 1984 const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 1985 .sync_hw = i9xx_power_well_sync_hw_noop, 1986 .enable = vlv_dpio_cmn_power_well_enable, 1987 .disable = vlv_dpio_cmn_power_well_disable, 1988 .is_enabled = vlv_power_well_enabled, 1989 }; 1990 1991 const struct i915_power_well_ops vlv_dpio_power_well_ops = { 1992 .sync_hw = i9xx_power_well_sync_hw_noop, 1993 .enable = vlv_power_well_enable, 1994 .disable = vlv_power_well_disable, 1995 .is_enabled = vlv_power_well_enabled, 1996 }; 1997 1998 static const struct i915_power_well_regs icl_aux_power_well_regs = { 1999 .bios = ICL_PWR_WELL_CTL_AUX1, 2000 .driver = ICL_PWR_WELL_CTL_AUX2, 2001 .debug = ICL_PWR_WELL_CTL_AUX4, 2002 }; 2003 2004 const struct i915_power_well_ops icl_aux_power_well_ops = { 2005 .regs = &icl_aux_power_well_regs, 2006 .sync_hw = hsw_power_well_sync_hw, 2007 .enable = icl_aux_power_well_enable, 2008 .disable = icl_aux_power_well_disable, 2009 .is_enabled = hsw_power_well_enabled, 2010 }; 2011 2012 static const struct i915_power_well_regs icl_ddi_power_well_regs = { 2013 .bios = ICL_PWR_WELL_CTL_DDI1, 2014 .driver = ICL_PWR_WELL_CTL_DDI2, 2015 .debug = ICL_PWR_WELL_CTL_DDI4, 2016 }; 2017 2018 const struct i915_power_well_ops icl_ddi_power_well_ops = { 2019 .regs = &icl_ddi_power_well_regs, 2020 .sync_hw = hsw_power_well_sync_hw, 2021 .enable = hsw_power_well_enable, 2022 .disable = hsw_power_well_disable, 2023 .is_enabled = hsw_power_well_enabled, 2024 }; 2025 2026 const struct i915_power_well_ops tgl_tc_cold_off_ops = { 2027 .sync_hw = tgl_tc_cold_off_power_well_sync_hw, 2028 .enable = tgl_tc_cold_off_power_well_enable, 2029 .disable = tgl_tc_cold_off_power_well_disable, 2030 .is_enabled = tgl_tc_cold_off_power_well_is_enabled, 2031 }; 2032 2033 const struct i915_power_well_ops xelpdp_aux_power_well_ops = { 2034 .sync_hw = i9xx_power_well_sync_hw_noop, 2035 .enable = xelpdp_aux_power_well_enable, 2036 .disable = xelpdp_aux_power_well_disable, 2037 .is_enabled = xelpdp_aux_power_well_enabled, 2038 }; 2039 2040 const struct i915_power_well_ops xe2lpd_pica_power_well_ops = { 2041 .sync_hw = i9xx_power_well_sync_hw_noop, 2042 .enable = xe2lpd_pica_power_well_enable, 2043 .disable = xe2lpd_pica_power_well_disable, 2044 .is_enabled = xe2lpd_pica_power_well_enabled, 2045 }; 2046