1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "i915_irq.h" 8 #include "i915_reg.h" 9 #include "intel_backlight_regs.h" 10 #include "intel_combo_phy.h" 11 #include "intel_combo_phy_regs.h" 12 #include "intel_crt.h" 13 #include "intel_de.h" 14 #include "intel_display_irq.h" 15 #include "intel_display_power_well.h" 16 #include "intel_display_rpm.h" 17 #include "intel_display_types.h" 18 #include "intel_dkl_phy.h" 19 #include "intel_dkl_phy_regs.h" 20 #include "intel_dmc.h" 21 #include "intel_dmc_wl.h" 22 #include "intel_dp_aux_regs.h" 23 #include "intel_dpio_phy.h" 24 #include "intel_dpll.h" 25 #include "intel_hotplug.h" 26 #include "intel_pcode.h" 27 #include "intel_pps.h" 28 #include "intel_tc.h" 29 #include "intel_vga.h" 30 #include "skl_watermark.h" 31 #include "vlv_dpio_phy_regs.h" 32 #include "vlv_sideband.h" 33 #include "vlv_sideband_reg.h" 34 35 struct i915_power_well_regs { 36 i915_reg_t bios; 37 i915_reg_t driver; 38 i915_reg_t kvmr; 39 i915_reg_t debug; 40 }; 41 42 struct i915_power_well_ops { 43 const struct i915_power_well_regs *regs; 44 /* 45 * Synchronize the well's hw state to match the current sw state, for 46 * example enable/disable it based on the current refcount. Called 47 * during driver init and resume time, possibly after first calling 48 * the enable/disable handlers. 49 */ 50 void (*sync_hw)(struct intel_display *display, 51 struct i915_power_well *power_well); 52 /* 53 * Enable the well and resources that depend on it (for example 54 * interrupts located on the well). Called after the 0->1 refcount 55 * transition. 56 */ 57 void (*enable)(struct intel_display *display, 58 struct i915_power_well *power_well); 59 /* 60 * Disable the well and resources that depend on it. Called after 61 * the 1->0 refcount transition. 62 */ 63 void (*disable)(struct intel_display *display, 64 struct i915_power_well *power_well); 65 /* Returns the hw enabled state. */ 66 bool (*is_enabled)(struct intel_display *display, 67 struct i915_power_well *power_well); 68 }; 69 70 static const struct i915_power_well_instance * 71 i915_power_well_instance(const struct i915_power_well *power_well) 72 { 73 return &power_well->desc->instances->list[power_well->instance_idx]; 74 } 75 76 struct i915_power_well * 77 lookup_power_well(struct intel_display *display, 78 enum i915_power_well_id power_well_id) 79 { 80 struct i915_power_well *power_well; 81 82 for_each_power_well(display, power_well) 83 if (i915_power_well_instance(power_well)->id == power_well_id) 84 return power_well; 85 86 /* 87 * It's not feasible to add error checking code to the callers since 88 * this condition really shouldn't happen and it doesn't even make sense 89 * to abort things like display initialization sequences. Just return 90 * the first power well and hope the WARN gets reported so we can fix 91 * our driver. 92 */ 93 drm_WARN(display->drm, 1, 94 "Power well %d not defined for this platform\n", 95 power_well_id); 96 return &display->power.domains.power_wells[0]; 97 } 98 99 void intel_power_well_enable(struct intel_display *display, 100 struct i915_power_well *power_well) 101 { 102 drm_dbg_kms(display->drm, "enabling %s\n", intel_power_well_name(power_well)); 103 power_well->desc->ops->enable(display, power_well); 104 power_well->hw_enabled = true; 105 } 106 107 void intel_power_well_disable(struct intel_display *display, 108 struct i915_power_well *power_well) 109 { 110 drm_dbg_kms(display->drm, "disabling %s\n", intel_power_well_name(power_well)); 111 power_well->hw_enabled = false; 112 power_well->desc->ops->disable(display, power_well); 113 } 114 115 void intel_power_well_sync_hw(struct intel_display *display, 116 struct i915_power_well *power_well) 117 { 118 power_well->desc->ops->sync_hw(display, power_well); 119 power_well->hw_enabled = power_well->desc->ops->is_enabled(display, power_well); 120 } 121 122 void intel_power_well_get(struct intel_display *display, 123 struct i915_power_well *power_well) 124 { 125 if (!power_well->count++) 126 intel_power_well_enable(display, power_well); 127 } 128 129 void intel_power_well_put(struct intel_display *display, 130 struct i915_power_well *power_well) 131 { 132 drm_WARN(display->drm, !power_well->count, 133 "Use count on power well %s is already zero", 134 i915_power_well_instance(power_well)->name); 135 136 if (!--power_well->count) 137 intel_power_well_disable(display, power_well); 138 } 139 140 bool intel_power_well_is_enabled(struct intel_display *display, 141 struct i915_power_well *power_well) 142 { 143 return power_well->desc->ops->is_enabled(display, power_well); 144 } 145 146 bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well) 147 { 148 return power_well->hw_enabled; 149 } 150 151 bool intel_display_power_well_is_enabled(struct intel_display *display, 152 enum i915_power_well_id power_well_id) 153 { 154 struct i915_power_well *power_well; 155 156 power_well = lookup_power_well(display, power_well_id); 157 158 return intel_power_well_is_enabled(display, power_well); 159 } 160 161 bool intel_power_well_is_always_on(struct i915_power_well *power_well) 162 { 163 return power_well->desc->always_on; 164 } 165 166 const char *intel_power_well_name(struct i915_power_well *power_well) 167 { 168 return i915_power_well_instance(power_well)->name; 169 } 170 171 struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well) 172 { 173 return &power_well->domains; 174 } 175 176 int intel_power_well_refcount(struct i915_power_well *power_well) 177 { 178 return power_well->count; 179 } 180 181 /* 182 * Starting with Haswell, we have a "Power Down Well" that can be turned off 183 * when not needed anymore. We have 4 registers that can request the power well 184 * to be enabled, and it will only be disabled if none of the registers is 185 * requesting it to be enabled. 186 */ 187 static void hsw_power_well_post_enable(struct intel_display *display, 188 u8 irq_pipe_mask, bool has_vga) 189 { 190 if (has_vga) 191 intel_vga_reset_io_mem(display); 192 193 if (irq_pipe_mask) 194 gen8_irq_power_well_post_enable(display, irq_pipe_mask); 195 } 196 197 static void hsw_power_well_pre_disable(struct intel_display *display, 198 u8 irq_pipe_mask) 199 { 200 if (irq_pipe_mask) 201 gen8_irq_power_well_pre_disable(display, irq_pipe_mask); 202 } 203 204 #define ICL_AUX_PW_TO_PHY(pw_idx) \ 205 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + PHY_A) 206 207 #define ICL_AUX_PW_TO_CH(pw_idx) \ 208 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) 209 210 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ 211 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) 212 213 static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well) 214 { 215 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 216 217 return power_well->desc->is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : 218 ICL_AUX_PW_TO_CH(pw_idx); 219 } 220 221 static struct intel_digital_port * 222 aux_ch_to_digital_port(struct intel_display *display, 223 enum aux_ch aux_ch) 224 { 225 struct intel_encoder *encoder; 226 227 for_each_intel_encoder(display->drm, encoder) { 228 struct intel_digital_port *dig_port; 229 230 /* We'll check the MST primary port */ 231 if (encoder->type == INTEL_OUTPUT_DP_MST) 232 continue; 233 234 dig_port = enc_to_dig_port(encoder); 235 236 if (dig_port && dig_port->aux_ch == aux_ch) 237 return dig_port; 238 } 239 240 return NULL; 241 } 242 243 static enum phy icl_aux_pw_to_phy(struct intel_display *display, 244 const struct i915_power_well *power_well) 245 { 246 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 247 struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch); 248 249 /* 250 * FIXME should we care about the (VBT defined) dig_port->aux_ch 251 * relationship or should this be purely defined by the hardware layout? 252 * Currently if the port doesn't appear in the VBT, or if it's declared 253 * as HDMI-only and routed to a combo PHY, the encoder either won't be 254 * present at all or it will not have an aux_ch assigned. 255 */ 256 return dig_port ? intel_encoder_to_phy(&dig_port->base) : PHY_NONE; 257 } 258 259 static void hsw_wait_for_power_well_enable(struct intel_display *display, 260 struct i915_power_well *power_well, 261 bool timeout_expected) 262 { 263 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 264 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 265 int timeout = power_well->desc->enable_timeout ? : 1; 266 267 /* 268 * For some power wells we're not supposed to watch the status bit for 269 * an ack, but rather just wait a fixed amount of time and then 270 * proceed. This is only used on DG2. 271 */ 272 if (display->platform.dg2 && power_well->desc->fixed_enable_delay) { 273 usleep_range(600, 1200); 274 return; 275 } 276 277 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ 278 if (intel_de_wait_for_set(display, regs->driver, 279 HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) { 280 drm_dbg_kms(display->drm, "%s power well enable timeout\n", 281 intel_power_well_name(power_well)); 282 283 drm_WARN_ON(display->drm, !timeout_expected); 284 285 } 286 } 287 288 static u32 hsw_power_well_requesters(struct intel_display *display, 289 const struct i915_power_well_regs *regs, 290 int pw_idx) 291 { 292 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 293 u32 ret; 294 295 ret = intel_de_read(display, regs->bios) & req_mask ? 1 : 0; 296 ret |= intel_de_read(display, regs->driver) & req_mask ? 2 : 0; 297 if (regs->kvmr.reg) 298 ret |= intel_de_read(display, regs->kvmr) & req_mask ? 4 : 0; 299 ret |= intel_de_read(display, regs->debug) & req_mask ? 8 : 0; 300 301 return ret; 302 } 303 304 static void hsw_wait_for_power_well_disable(struct intel_display *display, 305 struct i915_power_well *power_well) 306 { 307 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 308 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 309 bool disabled; 310 u32 reqs; 311 312 /* 313 * Bspec doesn't require waiting for PWs to get disabled, but still do 314 * this for paranoia. The known cases where a PW will be forced on: 315 * - a KVMR request on any power well via the KVMR request register 316 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and 317 * DEBUG request registers 318 * Skip the wait in case any of the request bits are set and print a 319 * diagnostic message. 320 */ 321 wait_for((disabled = !(intel_de_read(display, regs->driver) & 322 HSW_PWR_WELL_CTL_STATE(pw_idx))) || 323 (reqs = hsw_power_well_requesters(display, regs, pw_idx)), 1); 324 if (disabled) 325 return; 326 327 drm_dbg_kms(display->drm, 328 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", 329 intel_power_well_name(power_well), 330 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); 331 } 332 333 static void gen9_wait_for_power_well_fuses(struct intel_display *display, 334 enum skl_power_gate pg) 335 { 336 /* Timeout 5us for PG#0, for other PGs 1us */ 337 drm_WARN_ON(display->drm, 338 intel_de_wait_for_set(display, SKL_FUSE_STATUS, 339 SKL_FUSE_PG_DIST_STATUS(pg), 1)); 340 } 341 342 static void hsw_power_well_enable(struct intel_display *display, 343 struct i915_power_well *power_well) 344 { 345 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 346 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 347 348 if (power_well->desc->has_fuses) { 349 enum skl_power_gate pg; 350 351 pg = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 352 SKL_PW_CTL_IDX_TO_PG(pw_idx); 353 354 /* Wa_16013190616:adlp */ 355 if (display->platform.alderlake_p && pg == SKL_PG1) 356 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC); 357 358 /* 359 * For PW1 we have to wait both for the PW0/PG0 fuse state 360 * before enabling the power well and PW1/PG1's own fuse 361 * state after the enabling. For all other power wells with 362 * fuses we only have to wait for that PW/PG's fuse state 363 * after the enabling. 364 */ 365 if (pg == SKL_PG1) 366 gen9_wait_for_power_well_fuses(display, SKL_PG0); 367 } 368 369 intel_de_rmw(display, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx)); 370 371 hsw_wait_for_power_well_enable(display, power_well, false); 372 373 if (power_well->desc->has_fuses) { 374 enum skl_power_gate pg; 375 376 pg = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 377 SKL_PW_CTL_IDX_TO_PG(pw_idx); 378 gen9_wait_for_power_well_fuses(display, pg); 379 } 380 381 hsw_power_well_post_enable(display, 382 power_well->desc->irq_pipe_mask, 383 power_well->desc->has_vga); 384 } 385 386 static void hsw_power_well_disable(struct intel_display *display, 387 struct i915_power_well *power_well) 388 { 389 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 390 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 391 392 hsw_power_well_pre_disable(display, 393 power_well->desc->irq_pipe_mask); 394 395 intel_de_rmw(display, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0); 396 hsw_wait_for_power_well_disable(display, power_well); 397 } 398 399 static bool intel_aux_ch_is_edp(struct intel_display *display, enum aux_ch aux_ch) 400 { 401 struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch); 402 403 return dig_port && dig_port->base.type == INTEL_OUTPUT_EDP; 404 } 405 406 static void 407 icl_combo_phy_aux_power_well_enable(struct intel_display *display, 408 struct i915_power_well *power_well) 409 { 410 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 411 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 412 413 drm_WARN_ON(display->drm, !display->platform.icelake); 414 415 intel_de_rmw(display, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx)); 416 417 /* 418 * FIXME not sure if we should derive the PHY from the pw_idx, or 419 * from the VBT defined AUX_CH->DDI->PHY mapping. 420 */ 421 intel_de_rmw(display, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)), 422 0, ICL_LANE_ENABLE_AUX); 423 424 hsw_wait_for_power_well_enable(display, power_well, false); 425 426 /* Display WA #1178: icl */ 427 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && 428 !intel_aux_ch_is_edp(display, ICL_AUX_PW_TO_CH(pw_idx))) 429 intel_de_rmw(display, ICL_PORT_TX_DW6_AUX(ICL_AUX_PW_TO_PHY(pw_idx)), 430 0, O_FUNC_OVRD_EN | O_LDO_BYPASS_CRI); 431 } 432 433 static void 434 icl_combo_phy_aux_power_well_disable(struct intel_display *display, 435 struct i915_power_well *power_well) 436 { 437 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 438 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 439 440 drm_WARN_ON(display->drm, !display->platform.icelake); 441 442 /* 443 * FIXME not sure if we should derive the PHY from the pw_idx, or 444 * from the VBT defined AUX_CH->DDI->PHY mapping. 445 */ 446 intel_de_rmw(display, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)), 447 ICL_LANE_ENABLE_AUX, 0); 448 449 intel_de_rmw(display, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0); 450 451 hsw_wait_for_power_well_disable(display, power_well); 452 } 453 454 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 455 456 static void icl_tc_port_assert_ref_held(struct intel_display *display, 457 struct i915_power_well *power_well, 458 struct intel_digital_port *dig_port) 459 { 460 if (drm_WARN_ON(display->drm, !dig_port)) 461 return; 462 463 if (DISPLAY_VER(display) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) 464 return; 465 466 drm_WARN_ON(display->drm, !intel_tc_port_ref_held(dig_port)); 467 } 468 469 #else 470 471 static void icl_tc_port_assert_ref_held(struct intel_display *display, 472 struct i915_power_well *power_well, 473 struct intel_digital_port *dig_port) 474 { 475 } 476 477 #endif 478 479 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) 480 481 static void icl_tc_cold_exit(struct intel_display *display) 482 { 483 struct drm_i915_private *i915 = to_i915(display->drm); 484 int ret, tries = 0; 485 486 while (1) { 487 ret = snb_pcode_write_timeout(&i915->uncore, ICL_PCODE_EXIT_TCCOLD, 0, 488 250, 1); 489 if (ret != -EAGAIN || ++tries == 3) 490 break; 491 msleep(1); 492 } 493 494 /* Spec states that TC cold exit can take up to 1ms to complete */ 495 if (!ret) 496 msleep(1); 497 498 /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */ 499 drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" : 500 "succeeded"); 501 } 502 503 static void 504 icl_tc_phy_aux_power_well_enable(struct intel_display *display, 505 struct i915_power_well *power_well) 506 { 507 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 508 struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch); 509 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 510 bool is_tbt = power_well->desc->is_tc_tbt; 511 bool timeout_expected; 512 513 icl_tc_port_assert_ref_held(display, power_well, dig_port); 514 515 intel_de_rmw(display, DP_AUX_CH_CTL(aux_ch), 516 DP_AUX_CH_CTL_TBT_IO, is_tbt ? DP_AUX_CH_CTL_TBT_IO : 0); 517 518 intel_de_rmw(display, regs->driver, 519 0, 520 HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx)); 521 522 /* 523 * An AUX timeout is expected if the TBT DP tunnel is down, 524 * or need to enable AUX on a legacy TypeC port as part of the TC-cold 525 * exit sequence. 526 */ 527 timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port); 528 if (DISPLAY_VER(display) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) 529 icl_tc_cold_exit(display); 530 531 hsw_wait_for_power_well_enable(display, power_well, timeout_expected); 532 533 if (DISPLAY_VER(display) >= 12 && !is_tbt) { 534 enum tc_port tc_port; 535 536 tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx); 537 538 if (wait_for(intel_dkl_phy_read(display, DKL_CMN_UC_DW_27(tc_port)) & 539 DKL_CMN_UC_DW27_UC_HEALTH, 1)) 540 drm_warn(display->drm, 541 "Timeout waiting TC uC health\n"); 542 } 543 } 544 545 static void 546 icl_aux_power_well_enable(struct intel_display *display, 547 struct i915_power_well *power_well) 548 { 549 enum phy phy = icl_aux_pw_to_phy(display, power_well); 550 551 if (intel_phy_is_tc(display, phy)) 552 return icl_tc_phy_aux_power_well_enable(display, power_well); 553 else if (display->platform.icelake) 554 return icl_combo_phy_aux_power_well_enable(display, 555 power_well); 556 else 557 return hsw_power_well_enable(display, power_well); 558 } 559 560 static void 561 icl_aux_power_well_disable(struct intel_display *display, 562 struct i915_power_well *power_well) 563 { 564 enum phy phy = icl_aux_pw_to_phy(display, power_well); 565 566 if (intel_phy_is_tc(display, phy)) 567 return hsw_power_well_disable(display, power_well); 568 else if (display->platform.icelake) 569 return icl_combo_phy_aux_power_well_disable(display, 570 power_well); 571 else 572 return hsw_power_well_disable(display, power_well); 573 } 574 575 /* 576 * We should only use the power well if we explicitly asked the hardware to 577 * enable it, so check if it's enabled and also check if we've requested it to 578 * be enabled. 579 */ 580 static bool hsw_power_well_enabled(struct intel_display *display, 581 struct i915_power_well *power_well) 582 { 583 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 584 enum i915_power_well_id id = i915_power_well_instance(power_well)->id; 585 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 586 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | 587 HSW_PWR_WELL_CTL_STATE(pw_idx); 588 u32 val; 589 590 val = intel_de_read(display, regs->driver); 591 592 /* 593 * On GEN9 big core due to a DMC bug the driver's request bits for PW1 594 * and the MISC_IO PW will be not restored, so check instead for the 595 * BIOS's own request bits, which are forced-on for these power wells 596 * when exiting DC5/6. 597 */ 598 if (DISPLAY_VER(display) == 9 && !display->platform.broxton && 599 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) 600 val |= intel_de_read(display, regs->bios); 601 602 return (val & mask) == mask; 603 } 604 605 static void assert_can_enable_dc9(struct intel_display *display) 606 { 607 struct drm_i915_private *dev_priv = to_i915(display->drm); 608 609 drm_WARN_ONCE(display->drm, 610 (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_DC9), 611 "DC9 already programmed to be enabled.\n"); 612 drm_WARN_ONCE(display->drm, 613 intel_de_read(display, DC_STATE_EN) & 614 DC_STATE_EN_UPTO_DC5, 615 "DC5 still not disabled to enable DC9.\n"); 616 drm_WARN_ONCE(display->drm, 617 intel_de_read(display, HSW_PWR_WELL_CTL2) & 618 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), 619 "Power well 2 on.\n"); 620 drm_WARN_ONCE(display->drm, intel_irqs_enabled(dev_priv), 621 "Interrupts not disabled yet.\n"); 622 623 /* 624 * TODO: check for the following to verify the conditions to enter DC9 625 * state are satisfied: 626 * 1] Check relevant display engine registers to verify if mode set 627 * disable sequence was followed. 628 * 2] Check if display uninitialize sequence is initialized. 629 */ 630 } 631 632 static void assert_can_disable_dc9(struct intel_display *display) 633 { 634 struct drm_i915_private *dev_priv = to_i915(display->drm); 635 636 drm_WARN_ONCE(display->drm, intel_irqs_enabled(dev_priv), 637 "Interrupts not disabled yet.\n"); 638 drm_WARN_ONCE(display->drm, 639 intel_de_read(display, DC_STATE_EN) & 640 DC_STATE_EN_UPTO_DC5, 641 "DC5 still not disabled.\n"); 642 643 /* 644 * TODO: check for the following to verify DC9 state was indeed 645 * entered before programming to disable it: 646 * 1] Check relevant display engine registers to verify if mode 647 * set disable sequence was followed. 648 * 2] Check if display uninitialize sequence is initialized. 649 */ 650 } 651 652 static void gen9_write_dc_state(struct intel_display *display, 653 u32 state) 654 { 655 int rewrites = 0; 656 int rereads = 0; 657 u32 v; 658 659 intel_de_write(display, DC_STATE_EN, state); 660 661 /* It has been observed that disabling the dc6 state sometimes 662 * doesn't stick and dmc keeps returning old value. Make sure 663 * the write really sticks enough times and also force rewrite until 664 * we are confident that state is exactly what we want. 665 */ 666 do { 667 v = intel_de_read(display, DC_STATE_EN); 668 669 if (v != state) { 670 intel_de_write(display, DC_STATE_EN, state); 671 rewrites++; 672 rereads = 0; 673 } else if (rereads++ > 5) { 674 break; 675 } 676 677 } while (rewrites < 100); 678 679 if (v != state) 680 drm_err(display->drm, 681 "Writing dc state to 0x%x failed, now 0x%x\n", 682 state, v); 683 684 /* Most of the times we need one retry, avoid spam */ 685 if (rewrites > 1) 686 drm_dbg_kms(display->drm, 687 "Rewrote dc state to 0x%x %d times\n", 688 state, rewrites); 689 } 690 691 static u32 gen9_dc_mask(struct intel_display *display) 692 { 693 u32 mask; 694 695 mask = DC_STATE_EN_UPTO_DC5; 696 697 if (DISPLAY_VER(display) >= 12) 698 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 699 | DC_STATE_EN_DC9; 700 else if (DISPLAY_VER(display) == 11) 701 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; 702 else if (display->platform.geminilake || display->platform.broxton) 703 mask |= DC_STATE_EN_DC9; 704 else 705 mask |= DC_STATE_EN_UPTO_DC6; 706 707 return mask; 708 } 709 710 void gen9_sanitize_dc_state(struct intel_display *display) 711 { 712 struct i915_power_domains *power_domains = &display->power.domains; 713 u32 val; 714 715 if (!HAS_DISPLAY(display)) 716 return; 717 718 val = intel_de_read(display, DC_STATE_EN) & gen9_dc_mask(display); 719 720 drm_dbg_kms(display->drm, 721 "Resetting DC state tracking from %02x to %02x\n", 722 power_domains->dc_state, val); 723 power_domains->dc_state = val; 724 } 725 726 /** 727 * gen9_set_dc_state - set target display C power state 728 * @display: display instance 729 * @state: target DC power state 730 * - DC_STATE_DISABLE 731 * - DC_STATE_EN_UPTO_DC5 732 * - DC_STATE_EN_UPTO_DC6 733 * - DC_STATE_EN_DC9 734 * 735 * Signal to DMC firmware/HW the target DC power state passed in @state. 736 * DMC/HW can turn off individual display clocks and power rails when entering 737 * a deeper DC power state (higher in number) and turns these back when exiting 738 * that state to a shallower power state (lower in number). The HW will decide 739 * when to actually enter a given state on an on-demand basis, for instance 740 * depending on the active state of display pipes. The state of display 741 * registers backed by affected power rails are saved/restored as needed. 742 * 743 * Based on the above enabling a deeper DC power state is asynchronous wrt. 744 * enabling it. Disabling a deeper power state is synchronous: for instance 745 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned 746 * back on and register state is restored. This is guaranteed by the MMIO write 747 * to DC_STATE_EN blocking until the state is restored. 748 */ 749 void gen9_set_dc_state(struct intel_display *display, u32 state) 750 { 751 struct i915_power_domains *power_domains = &display->power.domains; 752 bool dc6_was_enabled, enable_dc6; 753 u32 mask; 754 u32 val; 755 756 if (!HAS_DISPLAY(display)) 757 return; 758 759 if (drm_WARN_ON_ONCE(display->drm, 760 state & ~power_domains->allowed_dc_mask)) 761 state &= power_domains->allowed_dc_mask; 762 763 val = intel_de_read(display, DC_STATE_EN); 764 mask = gen9_dc_mask(display); 765 drm_dbg_kms(display->drm, "Setting DC state from %02x to %02x\n", 766 val & mask, state); 767 768 /* Check if DMC is ignoring our DC state requests */ 769 if ((val & mask) != power_domains->dc_state) 770 drm_err(display->drm, "DC state mismatch (0x%x -> 0x%x)\n", 771 power_domains->dc_state, val & mask); 772 773 enable_dc6 = state & DC_STATE_EN_UPTO_DC6; 774 dc6_was_enabled = val & DC_STATE_EN_UPTO_DC6; 775 if (!dc6_was_enabled && enable_dc6) 776 intel_dmc_update_dc6_allowed_count(display, true); 777 778 val &= ~mask; 779 val |= state; 780 781 gen9_write_dc_state(display, val); 782 783 if (!enable_dc6 && dc6_was_enabled) 784 intel_dmc_update_dc6_allowed_count(display, false); 785 786 power_domains->dc_state = val & mask; 787 } 788 789 static void tgl_enable_dc3co(struct intel_display *display) 790 { 791 drm_dbg_kms(display->drm, "Enabling DC3CO\n"); 792 gen9_set_dc_state(display, DC_STATE_EN_DC3CO); 793 } 794 795 static void tgl_disable_dc3co(struct intel_display *display) 796 { 797 drm_dbg_kms(display->drm, "Disabling DC3CO\n"); 798 intel_de_rmw(display, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0); 799 gen9_set_dc_state(display, DC_STATE_DISABLE); 800 /* 801 * Delay of 200us DC3CO Exit time B.Spec 49196 802 */ 803 usleep_range(200, 210); 804 } 805 806 static void assert_can_enable_dc5(struct intel_display *display) 807 { 808 struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm); 809 enum i915_power_well_id high_pg; 810 811 /* Power wells at this level and above must be disabled for DC5 entry */ 812 if (DISPLAY_VER(display) == 12) 813 high_pg = ICL_DISP_PW_3; 814 else 815 high_pg = SKL_DISP_PW_2; 816 817 drm_WARN_ONCE(display->drm, 818 intel_display_power_well_is_enabled(display, high_pg), 819 "Power wells above platform's DC5 limit still enabled.\n"); 820 821 drm_WARN_ONCE(display->drm, 822 (intel_de_read(display, DC_STATE_EN) & 823 DC_STATE_EN_UPTO_DC5), 824 "DC5 already programmed to be enabled.\n"); 825 826 assert_display_rpm_held(display); 827 828 assert_dmc_loaded(display); 829 } 830 831 void gen9_enable_dc5(struct intel_display *display) 832 { 833 assert_can_enable_dc5(display); 834 835 drm_dbg_kms(display->drm, "Enabling DC5\n"); 836 837 /* Wa Display #1183: skl,kbl,cfl */ 838 if (DISPLAY_VER(display) == 9 && !display->platform.broxton) 839 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 840 0, SKL_SELECT_ALTERNATE_DC_EXIT); 841 842 intel_dmc_wl_enable(display, DC_STATE_EN_UPTO_DC5); 843 844 gen9_set_dc_state(display, DC_STATE_EN_UPTO_DC5); 845 } 846 847 static void assert_can_enable_dc6(struct intel_display *display) 848 { 849 drm_WARN_ONCE(display->drm, 850 (intel_de_read(display, UTIL_PIN_CTL) & 851 (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == 852 (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM), 853 "Utility pin enabled in PWM mode\n"); 854 drm_WARN_ONCE(display->drm, 855 (intel_de_read(display, DC_STATE_EN) & 856 DC_STATE_EN_UPTO_DC6), 857 "DC6 already programmed to be enabled.\n"); 858 859 assert_dmc_loaded(display); 860 } 861 862 void skl_enable_dc6(struct intel_display *display) 863 { 864 assert_can_enable_dc6(display); 865 866 drm_dbg_kms(display->drm, "Enabling DC6\n"); 867 868 /* Wa Display #1183: skl,kbl,cfl */ 869 if (DISPLAY_VER(display) == 9 && !display->platform.broxton) 870 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 871 0, SKL_SELECT_ALTERNATE_DC_EXIT); 872 873 intel_dmc_wl_enable(display, DC_STATE_EN_UPTO_DC6); 874 875 gen9_set_dc_state(display, DC_STATE_EN_UPTO_DC6); 876 } 877 878 void bxt_enable_dc9(struct intel_display *display) 879 { 880 assert_can_enable_dc9(display); 881 882 drm_dbg_kms(display->drm, "Enabling DC9\n"); 883 /* 884 * Power sequencer reset is needed on BXT/GLK, because the PPS registers 885 * aren't always on, unlike with South Display Engine on PCH. 886 */ 887 if (display->platform.broxton || display->platform.geminilake) 888 bxt_pps_reset_all(display); 889 gen9_set_dc_state(display, DC_STATE_EN_DC9); 890 } 891 892 void bxt_disable_dc9(struct intel_display *display) 893 { 894 assert_can_disable_dc9(display); 895 896 drm_dbg_kms(display->drm, "Disabling DC9\n"); 897 898 gen9_set_dc_state(display, DC_STATE_DISABLE); 899 900 intel_pps_unlock_regs_wa(display); 901 } 902 903 static void hsw_power_well_sync_hw(struct intel_display *display, 904 struct i915_power_well *power_well) 905 { 906 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 907 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 908 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 909 u32 bios_req = intel_de_read(display, regs->bios); 910 911 /* Take over the request bit if set by BIOS. */ 912 if (bios_req & mask) { 913 u32 drv_req = intel_de_read(display, regs->driver); 914 915 if (!(drv_req & mask)) 916 intel_de_write(display, regs->driver, drv_req | mask); 917 intel_de_write(display, regs->bios, bios_req & ~mask); 918 } 919 } 920 921 static void bxt_dpio_cmn_power_well_enable(struct intel_display *display, 922 struct i915_power_well *power_well) 923 { 924 bxt_dpio_phy_init(display, i915_power_well_instance(power_well)->bxt.phy); 925 } 926 927 static void bxt_dpio_cmn_power_well_disable(struct intel_display *display, 928 struct i915_power_well *power_well) 929 { 930 bxt_dpio_phy_uninit(display, i915_power_well_instance(power_well)->bxt.phy); 931 } 932 933 static bool bxt_dpio_cmn_power_well_enabled(struct intel_display *display, 934 struct i915_power_well *power_well) 935 { 936 return bxt_dpio_phy_is_enabled(display, i915_power_well_instance(power_well)->bxt.phy); 937 } 938 939 static void bxt_verify_dpio_phy_power_wells(struct intel_display *display) 940 { 941 struct i915_power_well *power_well; 942 943 power_well = lookup_power_well(display, BXT_DISP_PW_DPIO_CMN_A); 944 if (intel_power_well_refcount(power_well) > 0) 945 bxt_dpio_phy_verify_state(display, i915_power_well_instance(power_well)->bxt.phy); 946 947 power_well = lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC); 948 if (intel_power_well_refcount(power_well) > 0) 949 bxt_dpio_phy_verify_state(display, i915_power_well_instance(power_well)->bxt.phy); 950 951 if (display->platform.geminilake) { 952 power_well = lookup_power_well(display, 953 GLK_DISP_PW_DPIO_CMN_C); 954 if (intel_power_well_refcount(power_well) > 0) 955 bxt_dpio_phy_verify_state(display, 956 i915_power_well_instance(power_well)->bxt.phy); 957 } 958 } 959 960 static bool gen9_dc_off_power_well_enabled(struct intel_display *display, 961 struct i915_power_well *power_well) 962 { 963 return ((intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && 964 (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); 965 } 966 967 static void gen9_assert_dbuf_enabled(struct intel_display *display) 968 { 969 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(display); 970 u8 enabled_dbuf_slices = display->dbuf.enabled_slices; 971 972 drm_WARN(display->drm, 973 hw_enabled_dbuf_slices != enabled_dbuf_slices, 974 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n", 975 hw_enabled_dbuf_slices, 976 enabled_dbuf_slices); 977 } 978 979 void gen9_disable_dc_states(struct intel_display *display) 980 { 981 struct i915_power_domains *power_domains = &display->power.domains; 982 struct intel_cdclk_config cdclk_config = {}; 983 u32 old_state = power_domains->dc_state; 984 985 if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) { 986 tgl_disable_dc3co(display); 987 return; 988 } 989 990 if (HAS_DISPLAY(display)) { 991 intel_dmc_wl_get_noreg(display); 992 gen9_set_dc_state(display, DC_STATE_DISABLE); 993 intel_dmc_wl_put_noreg(display); 994 } else { 995 gen9_set_dc_state(display, DC_STATE_DISABLE); 996 return; 997 } 998 999 if (old_state == DC_STATE_EN_UPTO_DC5 || 1000 old_state == DC_STATE_EN_UPTO_DC6) 1001 intel_dmc_wl_disable(display); 1002 1003 intel_cdclk_get_cdclk(display, &cdclk_config); 1004 /* Can't read out voltage_level so can't use intel_cdclk_changed() */ 1005 drm_WARN_ON(display->drm, 1006 intel_cdclk_clock_changed(&display->cdclk.hw, 1007 &cdclk_config)); 1008 1009 gen9_assert_dbuf_enabled(display); 1010 1011 if (display->platform.geminilake || display->platform.broxton) 1012 bxt_verify_dpio_phy_power_wells(display); 1013 1014 if (DISPLAY_VER(display) >= 11) 1015 /* 1016 * DMC retains HW context only for port A, the other combo 1017 * PHY's HW context for port B is lost after DC transitions, 1018 * so we need to restore it manually. 1019 */ 1020 intel_combo_phy_init(display); 1021 } 1022 1023 static void gen9_dc_off_power_well_enable(struct intel_display *display, 1024 struct i915_power_well *power_well) 1025 { 1026 gen9_disable_dc_states(display); 1027 } 1028 1029 static void gen9_dc_off_power_well_disable(struct intel_display *display, 1030 struct i915_power_well *power_well) 1031 { 1032 struct i915_power_domains *power_domains = &display->power.domains; 1033 1034 if (!intel_dmc_has_payload(display)) 1035 return; 1036 1037 switch (power_domains->target_dc_state) { 1038 case DC_STATE_EN_DC3CO: 1039 tgl_enable_dc3co(display); 1040 break; 1041 case DC_STATE_EN_UPTO_DC6: 1042 skl_enable_dc6(display); 1043 break; 1044 case DC_STATE_EN_UPTO_DC5: 1045 gen9_enable_dc5(display); 1046 break; 1047 } 1048 } 1049 1050 static void i9xx_power_well_sync_hw_noop(struct intel_display *display, 1051 struct i915_power_well *power_well) 1052 { 1053 } 1054 1055 static void i9xx_always_on_power_well_noop(struct intel_display *display, 1056 struct i915_power_well *power_well) 1057 { 1058 } 1059 1060 static bool i9xx_always_on_power_well_enabled(struct intel_display *display, 1061 struct i915_power_well *power_well) 1062 { 1063 return true; 1064 } 1065 1066 static void i830_pipes_power_well_enable(struct intel_display *display, 1067 struct i915_power_well *power_well) 1068 { 1069 if ((intel_de_read(display, TRANSCONF(display, PIPE_A)) & TRANSCONF_ENABLE) == 0) 1070 i830_enable_pipe(display, PIPE_A); 1071 if ((intel_de_read(display, TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE) == 0) 1072 i830_enable_pipe(display, PIPE_B); 1073 } 1074 1075 static void i830_pipes_power_well_disable(struct intel_display *display, 1076 struct i915_power_well *power_well) 1077 { 1078 i830_disable_pipe(display, PIPE_B); 1079 i830_disable_pipe(display, PIPE_A); 1080 } 1081 1082 static bool i830_pipes_power_well_enabled(struct intel_display *display, 1083 struct i915_power_well *power_well) 1084 { 1085 return intel_de_read(display, TRANSCONF(display, PIPE_A)) & TRANSCONF_ENABLE && 1086 intel_de_read(display, TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE; 1087 } 1088 1089 static void i830_pipes_power_well_sync_hw(struct intel_display *display, 1090 struct i915_power_well *power_well) 1091 { 1092 if (intel_power_well_refcount(power_well) > 0) 1093 i830_pipes_power_well_enable(display, power_well); 1094 else 1095 i830_pipes_power_well_disable(display, power_well); 1096 } 1097 1098 static void vlv_set_power_well(struct intel_display *display, 1099 struct i915_power_well *power_well, bool enable) 1100 { 1101 struct drm_i915_private *dev_priv = to_i915(display->drm); 1102 int pw_idx = i915_power_well_instance(power_well)->vlv.idx; 1103 u32 mask; 1104 u32 state; 1105 u32 ctrl; 1106 1107 mask = PUNIT_PWRGT_MASK(pw_idx); 1108 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : 1109 PUNIT_PWRGT_PWR_GATE(pw_idx); 1110 1111 vlv_punit_get(dev_priv); 1112 1113 #define COND \ 1114 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 1115 1116 if (COND) 1117 goto out; 1118 1119 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 1120 ctrl &= ~mask; 1121 ctrl |= state; 1122 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 1123 1124 if (wait_for(COND, 100)) 1125 drm_err(display->drm, 1126 "timeout setting power well state %08x (%08x)\n", 1127 state, 1128 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 1129 1130 #undef COND 1131 1132 out: 1133 vlv_punit_put(dev_priv); 1134 } 1135 1136 static void vlv_power_well_enable(struct intel_display *display, 1137 struct i915_power_well *power_well) 1138 { 1139 vlv_set_power_well(display, power_well, true); 1140 } 1141 1142 static void vlv_power_well_disable(struct intel_display *display, 1143 struct i915_power_well *power_well) 1144 { 1145 vlv_set_power_well(display, power_well, false); 1146 } 1147 1148 static bool vlv_power_well_enabled(struct intel_display *display, 1149 struct i915_power_well *power_well) 1150 { 1151 struct drm_i915_private *dev_priv = to_i915(display->drm); 1152 int pw_idx = i915_power_well_instance(power_well)->vlv.idx; 1153 bool enabled = false; 1154 u32 mask; 1155 u32 state; 1156 u32 ctrl; 1157 1158 mask = PUNIT_PWRGT_MASK(pw_idx); 1159 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); 1160 1161 vlv_punit_get(dev_priv); 1162 1163 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 1164 /* 1165 * We only ever set the power-on and power-gate states, anything 1166 * else is unexpected. 1167 */ 1168 drm_WARN_ON(display->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) && 1169 state != PUNIT_PWRGT_PWR_GATE(pw_idx)); 1170 if (state == ctrl) 1171 enabled = true; 1172 1173 /* 1174 * A transient state at this point would mean some unexpected party 1175 * is poking at the power controls too. 1176 */ 1177 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 1178 drm_WARN_ON(display->drm, ctrl != state); 1179 1180 vlv_punit_put(dev_priv); 1181 1182 return enabled; 1183 } 1184 1185 static void vlv_init_display_clock_gating(struct intel_display *display) 1186 { 1187 /* 1188 * On driver load, a pipe may be active and driving a DSI display. 1189 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck 1190 * (and never recovering) in this case. intel_dsi_post_disable() will 1191 * clear it when we turn off the display. 1192 */ 1193 intel_de_rmw(display, DSPCLK_GATE_D(display), 1194 ~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE); 1195 1196 /* 1197 * Disable trickle feed and enable pnd deadline calculation 1198 */ 1199 intel_de_write(display, MI_ARB_VLV, 1200 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1201 intel_de_write(display, CBR1_VLV, 0); 1202 1203 drm_WARN_ON(display->drm, DISPLAY_RUNTIME_INFO(display)->rawclk_freq == 0); 1204 intel_de_write(display, RAWCLK_FREQ_VLV, 1205 DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(display)->rawclk_freq, 1206 1000)); 1207 } 1208 1209 static void vlv_display_power_well_init(struct intel_display *display) 1210 { 1211 struct drm_i915_private *dev_priv = to_i915(display->drm); 1212 struct intel_encoder *encoder; 1213 enum pipe pipe; 1214 1215 /* 1216 * Enable the CRI clock source so we can get at the 1217 * display and the reference clock for VGA 1218 * hotplug / manual detection. Supposedly DSI also 1219 * needs the ref clock up and running. 1220 * 1221 * CHV DPLL B/C have some issues if VGA mode is enabled. 1222 */ 1223 for_each_pipe(display, pipe) { 1224 u32 val = intel_de_read(display, DPLL(display, pipe)); 1225 1226 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1227 if (pipe != PIPE_A) 1228 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1229 1230 intel_de_write(display, DPLL(display, pipe), val); 1231 } 1232 1233 vlv_init_display_clock_gating(display); 1234 1235 spin_lock_irq(&dev_priv->irq_lock); 1236 valleyview_enable_display_irqs(display); 1237 spin_unlock_irq(&dev_priv->irq_lock); 1238 1239 /* 1240 * During driver initialization/resume we can avoid restoring the 1241 * part of the HW/SW state that will be inited anyway explicitly. 1242 */ 1243 if (display->power.domains.initializing) 1244 return; 1245 1246 intel_hpd_init(display); 1247 intel_hpd_poll_disable(display); 1248 1249 /* Re-enable the ADPA, if we have one */ 1250 for_each_intel_encoder(display->drm, encoder) { 1251 if (encoder->type == INTEL_OUTPUT_ANALOG) 1252 intel_crt_reset(&encoder->base); 1253 } 1254 1255 intel_vga_redisable_power_on(display); 1256 1257 intel_pps_unlock_regs_wa(display); 1258 } 1259 1260 static void vlv_display_power_well_deinit(struct intel_display *display) 1261 { 1262 struct drm_i915_private *dev_priv = to_i915(display->drm); 1263 1264 spin_lock_irq(&dev_priv->irq_lock); 1265 valleyview_disable_display_irqs(display); 1266 spin_unlock_irq(&dev_priv->irq_lock); 1267 1268 /* make sure we're done processing display irqs */ 1269 intel_synchronize_irq(dev_priv); 1270 1271 vlv_pps_reset_all(display); 1272 1273 /* Prevent us from re-enabling polling on accident in late suspend */ 1274 if (!display->drm->dev->power.is_suspended) 1275 intel_hpd_poll_enable(display); 1276 } 1277 1278 static void vlv_display_power_well_enable(struct intel_display *display, 1279 struct i915_power_well *power_well) 1280 { 1281 vlv_set_power_well(display, power_well, true); 1282 1283 vlv_display_power_well_init(display); 1284 } 1285 1286 static void vlv_display_power_well_disable(struct intel_display *display, 1287 struct i915_power_well *power_well) 1288 { 1289 vlv_display_power_well_deinit(display); 1290 1291 vlv_set_power_well(display, power_well, false); 1292 } 1293 1294 static void vlv_dpio_cmn_power_well_enable(struct intel_display *display, 1295 struct i915_power_well *power_well) 1296 { 1297 /* since ref/cri clock was enabled */ 1298 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1299 1300 vlv_set_power_well(display, power_well, true); 1301 1302 /* 1303 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1304 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1305 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1306 * b. The other bits such as sfr settings / modesel may all 1307 * be set to 0. 1308 * 1309 * This should only be done on init and resume from S3 with 1310 * both PLLs disabled, or we risk losing DPIO and PLL 1311 * synchronization. 1312 */ 1313 intel_de_rmw(display, DPIO_CTL, 0, DPIO_CMNRST); 1314 } 1315 1316 static void vlv_dpio_cmn_power_well_disable(struct intel_display *display, 1317 struct i915_power_well *power_well) 1318 { 1319 enum pipe pipe; 1320 1321 for_each_pipe(display, pipe) 1322 assert_pll_disabled(display, pipe); 1323 1324 /* Assert common reset */ 1325 intel_de_rmw(display, DPIO_CTL, DPIO_CMNRST, 0); 1326 1327 vlv_set_power_well(display, power_well, false); 1328 } 1329 1330 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1331 1332 static void assert_chv_phy_status(struct intel_display *display) 1333 { 1334 struct i915_power_well *cmn_bc = 1335 lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC); 1336 struct i915_power_well *cmn_d = 1337 lookup_power_well(display, CHV_DISP_PW_DPIO_CMN_D); 1338 u32 phy_control = display->power.chv_phy_control; 1339 u32 phy_status = 0; 1340 u32 phy_status_mask = 0xffffffff; 1341 1342 /* 1343 * The BIOS can leave the PHY is some weird state 1344 * where it doesn't fully power down some parts. 1345 * Disable the asserts until the PHY has been fully 1346 * reset (ie. the power well has been disabled at 1347 * least once). 1348 */ 1349 if (!display->power.chv_phy_assert[DPIO_PHY0]) 1350 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1351 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1352 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1353 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1354 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1355 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1356 1357 if (!display->power.chv_phy_assert[DPIO_PHY1]) 1358 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1359 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1360 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1361 1362 if (intel_power_well_is_enabled(display, cmn_bc)) { 1363 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1364 1365 /* this assumes override is only used to enable lanes */ 1366 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1367 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1368 1369 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1370 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1371 1372 /* CL1 is on whenever anything is on in either channel */ 1373 if (BITS_SET(phy_control, 1374 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1375 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1376 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1377 1378 /* 1379 * The DPLLB check accounts for the pipe B + port A usage 1380 * with CL2 powered up but all the lanes in the second channel 1381 * powered down. 1382 */ 1383 if (BITS_SET(phy_control, 1384 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1385 (intel_de_read(display, DPLL(display, PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1386 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1387 1388 if (BITS_SET(phy_control, 1389 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1390 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1391 if (BITS_SET(phy_control, 1392 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1393 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1394 1395 if (BITS_SET(phy_control, 1396 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1397 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1398 if (BITS_SET(phy_control, 1399 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1400 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1401 } 1402 1403 if (intel_power_well_is_enabled(display, cmn_d)) { 1404 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1405 1406 /* this assumes override is only used to enable lanes */ 1407 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1408 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1409 1410 if (BITS_SET(phy_control, 1411 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1412 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1413 1414 if (BITS_SET(phy_control, 1415 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1416 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1417 if (BITS_SET(phy_control, 1418 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1419 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1420 } 1421 1422 phy_status &= phy_status_mask; 1423 1424 /* 1425 * The PHY may be busy with some initial calibration and whatnot, 1426 * so the power state can take a while to actually change. 1427 */ 1428 if (intel_de_wait(display, DISPLAY_PHY_STATUS, 1429 phy_status_mask, phy_status, 10)) 1430 drm_err(display->drm, 1431 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1432 intel_de_read(display, DISPLAY_PHY_STATUS) & phy_status_mask, 1433 phy_status, display->power.chv_phy_control); 1434 } 1435 1436 #undef BITS_SET 1437 1438 static void chv_dpio_cmn_power_well_enable(struct intel_display *display, 1439 struct i915_power_well *power_well) 1440 { 1441 struct drm_i915_private *dev_priv = to_i915(display->drm); 1442 enum i915_power_well_id id = i915_power_well_instance(power_well)->id; 1443 enum dpio_phy phy; 1444 u32 tmp; 1445 1446 drm_WARN_ON_ONCE(display->drm, 1447 id != VLV_DISP_PW_DPIO_CMN_BC && 1448 id != CHV_DISP_PW_DPIO_CMN_D); 1449 1450 if (id == VLV_DISP_PW_DPIO_CMN_BC) 1451 phy = DPIO_PHY0; 1452 else 1453 phy = DPIO_PHY1; 1454 1455 /* since ref/cri clock was enabled */ 1456 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1457 vlv_set_power_well(display, power_well, true); 1458 1459 /* Poll for phypwrgood signal */ 1460 if (intel_de_wait_for_set(display, DISPLAY_PHY_STATUS, 1461 PHY_POWERGOOD(phy), 1)) 1462 drm_err(display->drm, "Display PHY %d is not power up\n", 1463 phy); 1464 1465 vlv_dpio_get(dev_priv); 1466 1467 /* Enable dynamic power down */ 1468 tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW28); 1469 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1470 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1471 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW28, tmp); 1472 1473 if (id == VLV_DISP_PW_DPIO_CMN_BC) { 1474 tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW6_CH1); 1475 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1476 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW6_CH1, tmp); 1477 } else { 1478 /* 1479 * Force the non-existing CL2 off. BXT does this 1480 * too, so maybe it saves some power even though 1481 * CL2 doesn't exist? 1482 */ 1483 tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW30); 1484 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1485 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW30, tmp); 1486 } 1487 1488 vlv_dpio_put(dev_priv); 1489 1490 display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1491 intel_de_write(display, DISPLAY_PHY_CONTROL, 1492 display->power.chv_phy_control); 1493 1494 drm_dbg_kms(display->drm, 1495 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1496 phy, display->power.chv_phy_control); 1497 1498 assert_chv_phy_status(display); 1499 } 1500 1501 static void chv_dpio_cmn_power_well_disable(struct intel_display *display, 1502 struct i915_power_well *power_well) 1503 { 1504 enum i915_power_well_id id = i915_power_well_instance(power_well)->id; 1505 enum dpio_phy phy; 1506 1507 drm_WARN_ON_ONCE(display->drm, 1508 id != VLV_DISP_PW_DPIO_CMN_BC && 1509 id != CHV_DISP_PW_DPIO_CMN_D); 1510 1511 if (id == VLV_DISP_PW_DPIO_CMN_BC) { 1512 phy = DPIO_PHY0; 1513 assert_pll_disabled(display, PIPE_A); 1514 assert_pll_disabled(display, PIPE_B); 1515 } else { 1516 phy = DPIO_PHY1; 1517 assert_pll_disabled(display, PIPE_C); 1518 } 1519 1520 display->power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1521 intel_de_write(display, DISPLAY_PHY_CONTROL, 1522 display->power.chv_phy_control); 1523 1524 vlv_set_power_well(display, power_well, false); 1525 1526 drm_dbg_kms(display->drm, 1527 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1528 phy, display->power.chv_phy_control); 1529 1530 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1531 display->power.chv_phy_assert[phy] = true; 1532 1533 assert_chv_phy_status(display); 1534 } 1535 1536 static void assert_chv_phy_powergate(struct intel_display *display, enum dpio_phy phy, 1537 enum dpio_channel ch, bool override, unsigned int mask) 1538 { 1539 struct drm_i915_private *dev_priv = to_i915(display->drm); 1540 u32 reg, val, expected, actual; 1541 1542 /* 1543 * The BIOS can leave the PHY is some weird state 1544 * where it doesn't fully power down some parts. 1545 * Disable the asserts until the PHY has been fully 1546 * reset (ie. the power well has been disabled at 1547 * least once). 1548 */ 1549 if (!display->power.chv_phy_assert[phy]) 1550 return; 1551 1552 if (ch == DPIO_CH0) 1553 reg = CHV_CMN_DW0_CH0; 1554 else 1555 reg = CHV_CMN_DW6_CH1; 1556 1557 vlv_dpio_get(dev_priv); 1558 val = vlv_dpio_read(dev_priv, phy, reg); 1559 vlv_dpio_put(dev_priv); 1560 1561 /* 1562 * This assumes !override is only used when the port is disabled. 1563 * All lanes should power down even without the override when 1564 * the port is disabled. 1565 */ 1566 if (!override || mask == 0xf) { 1567 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1568 /* 1569 * If CH1 common lane is not active anymore 1570 * (eg. for pipe B DPLL) the entire channel will 1571 * shut down, which causes the common lane registers 1572 * to read as 0. That means we can't actually check 1573 * the lane power down status bits, but as the entire 1574 * register reads as 0 it's a good indication that the 1575 * channel is indeed entirely powered down. 1576 */ 1577 if (ch == DPIO_CH1 && val == 0) 1578 expected = 0; 1579 } else if (mask != 0x0) { 1580 expected = DPIO_ANYDL_POWERDOWN; 1581 } else { 1582 expected = 0; 1583 } 1584 1585 if (ch == DPIO_CH0) 1586 actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH0 | 1587 DPIO_ALLDL_POWERDOWN_CH0, val); 1588 else 1589 actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH1 | 1590 DPIO_ALLDL_POWERDOWN_CH1, val); 1591 1592 drm_WARN(display->drm, actual != expected, 1593 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1594 !!(actual & DPIO_ALLDL_POWERDOWN), 1595 !!(actual & DPIO_ANYDL_POWERDOWN), 1596 !!(expected & DPIO_ALLDL_POWERDOWN), 1597 !!(expected & DPIO_ANYDL_POWERDOWN), 1598 reg, val); 1599 } 1600 1601 bool chv_phy_powergate_ch(struct intel_display *display, enum dpio_phy phy, 1602 enum dpio_channel ch, bool override) 1603 { 1604 struct i915_power_domains *power_domains = &display->power.domains; 1605 bool was_override; 1606 1607 mutex_lock(&power_domains->lock); 1608 1609 was_override = display->power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1610 1611 if (override == was_override) 1612 goto out; 1613 1614 if (override) 1615 display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1616 else 1617 display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1618 1619 intel_de_write(display, DISPLAY_PHY_CONTROL, 1620 display->power.chv_phy_control); 1621 1622 drm_dbg_kms(display->drm, 1623 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1624 phy, ch, display->power.chv_phy_control); 1625 1626 assert_chv_phy_status(display); 1627 1628 out: 1629 mutex_unlock(&power_domains->lock); 1630 1631 return was_override; 1632 } 1633 1634 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1635 bool override, unsigned int mask) 1636 { 1637 struct intel_display *display = to_intel_display(encoder); 1638 struct i915_power_domains *power_domains = &display->power.domains; 1639 enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); 1640 enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); 1641 1642 mutex_lock(&power_domains->lock); 1643 1644 display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1645 display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1646 1647 if (override) 1648 display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1649 else 1650 display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1651 1652 intel_de_write(display, DISPLAY_PHY_CONTROL, 1653 display->power.chv_phy_control); 1654 1655 drm_dbg_kms(display->drm, 1656 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1657 phy, ch, mask, display->power.chv_phy_control); 1658 1659 assert_chv_phy_status(display); 1660 1661 assert_chv_phy_powergate(display, phy, ch, override, mask); 1662 1663 mutex_unlock(&power_domains->lock); 1664 } 1665 1666 static bool chv_pipe_power_well_enabled(struct intel_display *display, 1667 struct i915_power_well *power_well) 1668 { 1669 struct drm_i915_private *dev_priv = to_i915(display->drm); 1670 enum pipe pipe = PIPE_A; 1671 bool enabled; 1672 u32 state, ctrl; 1673 1674 vlv_punit_get(dev_priv); 1675 1676 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); 1677 /* 1678 * We only ever set the power-on and power-gate states, anything 1679 * else is unexpected. 1680 */ 1681 drm_WARN_ON(display->drm, state != DP_SSS_PWR_ON(pipe) && 1682 state != DP_SSS_PWR_GATE(pipe)); 1683 enabled = state == DP_SSS_PWR_ON(pipe); 1684 1685 /* 1686 * A transient state at this point would mean some unexpected party 1687 * is poking at the power controls too. 1688 */ 1689 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); 1690 drm_WARN_ON(display->drm, ctrl << 16 != state); 1691 1692 vlv_punit_put(dev_priv); 1693 1694 return enabled; 1695 } 1696 1697 static void chv_set_pipe_power_well(struct intel_display *display, 1698 struct i915_power_well *power_well, 1699 bool enable) 1700 { 1701 struct drm_i915_private *dev_priv = to_i915(display->drm); 1702 enum pipe pipe = PIPE_A; 1703 u32 state; 1704 u32 ctrl; 1705 1706 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1707 1708 vlv_punit_get(dev_priv); 1709 1710 #define COND \ 1711 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) 1712 1713 if (COND) 1714 goto out; 1715 1716 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 1717 ctrl &= ~DP_SSC_MASK(pipe); 1718 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1719 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); 1720 1721 if (wait_for(COND, 100)) 1722 drm_err(display->drm, 1723 "timeout setting power well state %08x (%08x)\n", 1724 state, 1725 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); 1726 1727 #undef COND 1728 1729 out: 1730 vlv_punit_put(dev_priv); 1731 } 1732 1733 static void chv_pipe_power_well_sync_hw(struct intel_display *display, 1734 struct i915_power_well *power_well) 1735 { 1736 intel_de_write(display, DISPLAY_PHY_CONTROL, 1737 display->power.chv_phy_control); 1738 } 1739 1740 static void chv_pipe_power_well_enable(struct intel_display *display, 1741 struct i915_power_well *power_well) 1742 { 1743 chv_set_pipe_power_well(display, power_well, true); 1744 1745 vlv_display_power_well_init(display); 1746 } 1747 1748 static void chv_pipe_power_well_disable(struct intel_display *display, 1749 struct i915_power_well *power_well) 1750 { 1751 vlv_display_power_well_deinit(display); 1752 1753 chv_set_pipe_power_well(display, power_well, false); 1754 } 1755 1756 static void 1757 tgl_tc_cold_request(struct intel_display *display, bool block) 1758 { 1759 struct drm_i915_private *i915 = to_i915(display->drm); 1760 u8 tries = 0; 1761 int ret; 1762 1763 while (1) { 1764 u32 low_val; 1765 u32 high_val = 0; 1766 1767 if (block) 1768 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ; 1769 else 1770 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ; 1771 1772 /* 1773 * Spec states that we should timeout the request after 200us 1774 * but the function below will timeout after 500us 1775 */ 1776 ret = snb_pcode_read(&i915->uncore, TGL_PCODE_TCCOLD, &low_val, &high_val); 1777 if (ret == 0) { 1778 if (block && 1779 (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) 1780 ret = -EIO; 1781 else 1782 break; 1783 } 1784 1785 if (++tries == 3) 1786 break; 1787 1788 msleep(1); 1789 } 1790 1791 if (ret) 1792 drm_err(&i915->drm, "TC cold %sblock failed\n", 1793 block ? "" : "un"); 1794 else 1795 drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n", 1796 block ? "" : "un"); 1797 } 1798 1799 static void 1800 tgl_tc_cold_off_power_well_enable(struct intel_display *display, 1801 struct i915_power_well *power_well) 1802 { 1803 tgl_tc_cold_request(display, true); 1804 } 1805 1806 static void 1807 tgl_tc_cold_off_power_well_disable(struct intel_display *display, 1808 struct i915_power_well *power_well) 1809 { 1810 tgl_tc_cold_request(display, false); 1811 } 1812 1813 static void 1814 tgl_tc_cold_off_power_well_sync_hw(struct intel_display *display, 1815 struct i915_power_well *power_well) 1816 { 1817 if (intel_power_well_refcount(power_well) > 0) 1818 tgl_tc_cold_off_power_well_enable(display, power_well); 1819 else 1820 tgl_tc_cold_off_power_well_disable(display, power_well); 1821 } 1822 1823 static bool 1824 tgl_tc_cold_off_power_well_is_enabled(struct intel_display *display, 1825 struct i915_power_well *power_well) 1826 { 1827 /* 1828 * Not the correctly implementation but there is no way to just read it 1829 * from PCODE, so returning count to avoid state mismatch errors 1830 */ 1831 return intel_power_well_refcount(power_well); 1832 } 1833 1834 static void xelpdp_aux_power_well_enable(struct intel_display *display, 1835 struct i915_power_well *power_well) 1836 { 1837 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; 1838 enum phy phy = icl_aux_pw_to_phy(display, power_well); 1839 1840 if (intel_phy_is_tc(display, phy)) 1841 icl_tc_port_assert_ref_held(display, power_well, 1842 aux_ch_to_digital_port(display, aux_ch)); 1843 1844 intel_de_rmw(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch), 1845 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST, 1846 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST); 1847 1848 /* 1849 * The power status flag cannot be used to determine whether aux 1850 * power wells have finished powering up. Instead we're 1851 * expected to just wait a fixed 600us after raising the request 1852 * bit. 1853 */ 1854 usleep_range(600, 1200); 1855 } 1856 1857 static void xelpdp_aux_power_well_disable(struct intel_display *display, 1858 struct i915_power_well *power_well) 1859 { 1860 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; 1861 1862 intel_de_rmw(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch), 1863 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST, 1864 0); 1865 usleep_range(10, 30); 1866 } 1867 1868 static bool xelpdp_aux_power_well_enabled(struct intel_display *display, 1869 struct i915_power_well *power_well) 1870 { 1871 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; 1872 1873 return intel_de_read(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch)) & 1874 XELPDP_DP_AUX_CH_CTL_POWER_STATUS; 1875 } 1876 1877 static void xe2lpd_pica_power_well_enable(struct intel_display *display, 1878 struct i915_power_well *power_well) 1879 { 1880 intel_de_write(display, XE2LPD_PICA_PW_CTL, 1881 XE2LPD_PICA_CTL_POWER_REQUEST); 1882 1883 if (intel_de_wait_for_set(display, XE2LPD_PICA_PW_CTL, 1884 XE2LPD_PICA_CTL_POWER_STATUS, 1)) { 1885 drm_dbg_kms(display->drm, "pica power well enable timeout\n"); 1886 1887 drm_WARN(display->drm, 1, "Power well PICA timeout when enabled"); 1888 } 1889 } 1890 1891 static void xe2lpd_pica_power_well_disable(struct intel_display *display, 1892 struct i915_power_well *power_well) 1893 { 1894 intel_de_write(display, XE2LPD_PICA_PW_CTL, 0); 1895 1896 if (intel_de_wait_for_clear(display, XE2LPD_PICA_PW_CTL, 1897 XE2LPD_PICA_CTL_POWER_STATUS, 1)) { 1898 drm_dbg_kms(display->drm, "pica power well disable timeout\n"); 1899 1900 drm_WARN(display->drm, 1, "Power well PICA timeout when disabled"); 1901 } 1902 } 1903 1904 static bool xe2lpd_pica_power_well_enabled(struct intel_display *display, 1905 struct i915_power_well *power_well) 1906 { 1907 return intel_de_read(display, XE2LPD_PICA_PW_CTL) & 1908 XE2LPD_PICA_CTL_POWER_STATUS; 1909 } 1910 1911 const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 1912 .sync_hw = i9xx_power_well_sync_hw_noop, 1913 .enable = i9xx_always_on_power_well_noop, 1914 .disable = i9xx_always_on_power_well_noop, 1915 .is_enabled = i9xx_always_on_power_well_enabled, 1916 }; 1917 1918 const struct i915_power_well_ops chv_pipe_power_well_ops = { 1919 .sync_hw = chv_pipe_power_well_sync_hw, 1920 .enable = chv_pipe_power_well_enable, 1921 .disable = chv_pipe_power_well_disable, 1922 .is_enabled = chv_pipe_power_well_enabled, 1923 }; 1924 1925 const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 1926 .sync_hw = i9xx_power_well_sync_hw_noop, 1927 .enable = chv_dpio_cmn_power_well_enable, 1928 .disable = chv_dpio_cmn_power_well_disable, 1929 .is_enabled = vlv_power_well_enabled, 1930 }; 1931 1932 const struct i915_power_well_ops i830_pipes_power_well_ops = { 1933 .sync_hw = i830_pipes_power_well_sync_hw, 1934 .enable = i830_pipes_power_well_enable, 1935 .disable = i830_pipes_power_well_disable, 1936 .is_enabled = i830_pipes_power_well_enabled, 1937 }; 1938 1939 static const struct i915_power_well_regs hsw_power_well_regs = { 1940 .bios = HSW_PWR_WELL_CTL1, 1941 .driver = HSW_PWR_WELL_CTL2, 1942 .kvmr = HSW_PWR_WELL_CTL3, 1943 .debug = HSW_PWR_WELL_CTL4, 1944 }; 1945 1946 const struct i915_power_well_ops hsw_power_well_ops = { 1947 .regs = &hsw_power_well_regs, 1948 .sync_hw = hsw_power_well_sync_hw, 1949 .enable = hsw_power_well_enable, 1950 .disable = hsw_power_well_disable, 1951 .is_enabled = hsw_power_well_enabled, 1952 }; 1953 1954 const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 1955 .sync_hw = i9xx_power_well_sync_hw_noop, 1956 .enable = gen9_dc_off_power_well_enable, 1957 .disable = gen9_dc_off_power_well_disable, 1958 .is_enabled = gen9_dc_off_power_well_enabled, 1959 }; 1960 1961 const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 1962 .sync_hw = i9xx_power_well_sync_hw_noop, 1963 .enable = bxt_dpio_cmn_power_well_enable, 1964 .disable = bxt_dpio_cmn_power_well_disable, 1965 .is_enabled = bxt_dpio_cmn_power_well_enabled, 1966 }; 1967 1968 const struct i915_power_well_ops vlv_display_power_well_ops = { 1969 .sync_hw = i9xx_power_well_sync_hw_noop, 1970 .enable = vlv_display_power_well_enable, 1971 .disable = vlv_display_power_well_disable, 1972 .is_enabled = vlv_power_well_enabled, 1973 }; 1974 1975 const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 1976 .sync_hw = i9xx_power_well_sync_hw_noop, 1977 .enable = vlv_dpio_cmn_power_well_enable, 1978 .disable = vlv_dpio_cmn_power_well_disable, 1979 .is_enabled = vlv_power_well_enabled, 1980 }; 1981 1982 const struct i915_power_well_ops vlv_dpio_power_well_ops = { 1983 .sync_hw = i9xx_power_well_sync_hw_noop, 1984 .enable = vlv_power_well_enable, 1985 .disable = vlv_power_well_disable, 1986 .is_enabled = vlv_power_well_enabled, 1987 }; 1988 1989 static const struct i915_power_well_regs icl_aux_power_well_regs = { 1990 .bios = ICL_PWR_WELL_CTL_AUX1, 1991 .driver = ICL_PWR_WELL_CTL_AUX2, 1992 .debug = ICL_PWR_WELL_CTL_AUX4, 1993 }; 1994 1995 const struct i915_power_well_ops icl_aux_power_well_ops = { 1996 .regs = &icl_aux_power_well_regs, 1997 .sync_hw = hsw_power_well_sync_hw, 1998 .enable = icl_aux_power_well_enable, 1999 .disable = icl_aux_power_well_disable, 2000 .is_enabled = hsw_power_well_enabled, 2001 }; 2002 2003 static const struct i915_power_well_regs icl_ddi_power_well_regs = { 2004 .bios = ICL_PWR_WELL_CTL_DDI1, 2005 .driver = ICL_PWR_WELL_CTL_DDI2, 2006 .debug = ICL_PWR_WELL_CTL_DDI4, 2007 }; 2008 2009 const struct i915_power_well_ops icl_ddi_power_well_ops = { 2010 .regs = &icl_ddi_power_well_regs, 2011 .sync_hw = hsw_power_well_sync_hw, 2012 .enable = hsw_power_well_enable, 2013 .disable = hsw_power_well_disable, 2014 .is_enabled = hsw_power_well_enabled, 2015 }; 2016 2017 const struct i915_power_well_ops tgl_tc_cold_off_ops = { 2018 .sync_hw = tgl_tc_cold_off_power_well_sync_hw, 2019 .enable = tgl_tc_cold_off_power_well_enable, 2020 .disable = tgl_tc_cold_off_power_well_disable, 2021 .is_enabled = tgl_tc_cold_off_power_well_is_enabled, 2022 }; 2023 2024 const struct i915_power_well_ops xelpdp_aux_power_well_ops = { 2025 .sync_hw = i9xx_power_well_sync_hw_noop, 2026 .enable = xelpdp_aux_power_well_enable, 2027 .disable = xelpdp_aux_power_well_disable, 2028 .is_enabled = xelpdp_aux_power_well_enabled, 2029 }; 2030 2031 const struct i915_power_well_ops xe2lpd_pica_power_well_ops = { 2032 .sync_hw = i9xx_power_well_sync_hw_noop, 2033 .enable = xe2lpd_pica_power_well_enable, 2034 .disable = xe2lpd_pica_power_well_disable, 2035 .is_enabled = xe2lpd_pica_power_well_enabled, 2036 }; 2037