1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "i915_irq.h" 8 #include "i915_reg.h" 9 #include "intel_backlight_regs.h" 10 #include "intel_combo_phy.h" 11 #include "intel_combo_phy_regs.h" 12 #include "intel_crt.h" 13 #include "intel_de.h" 14 #include "intel_display_irq.h" 15 #include "intel_display_power_well.h" 16 #include "intel_display_types.h" 17 #include "intel_dkl_phy.h" 18 #include "intel_dkl_phy_regs.h" 19 #include "intel_dmc.h" 20 #include "intel_dmc_wl.h" 21 #include "intel_dp_aux_regs.h" 22 #include "intel_dpio_phy.h" 23 #include "intel_dpll.h" 24 #include "intel_hotplug.h" 25 #include "intel_pcode.h" 26 #include "intel_pps.h" 27 #include "intel_tc.h" 28 #include "intel_vga.h" 29 #include "skl_watermark.h" 30 #include "vlv_sideband.h" 31 #include "vlv_sideband_reg.h" 32 33 struct i915_power_well_regs { 34 i915_reg_t bios; 35 i915_reg_t driver; 36 i915_reg_t kvmr; 37 i915_reg_t debug; 38 }; 39 40 struct i915_power_well_ops { 41 const struct i915_power_well_regs *regs; 42 /* 43 * Synchronize the well's hw state to match the current sw state, for 44 * example enable/disable it based on the current refcount. Called 45 * during driver init and resume time, possibly after first calling 46 * the enable/disable handlers. 47 */ 48 void (*sync_hw)(struct drm_i915_private *i915, 49 struct i915_power_well *power_well); 50 /* 51 * Enable the well and resources that depend on it (for example 52 * interrupts located on the well). Called after the 0->1 refcount 53 * transition. 54 */ 55 void (*enable)(struct drm_i915_private *i915, 56 struct i915_power_well *power_well); 57 /* 58 * Disable the well and resources that depend on it. Called after 59 * the 1->0 refcount transition. 60 */ 61 void (*disable)(struct drm_i915_private *i915, 62 struct i915_power_well *power_well); 63 /* Returns the hw enabled state. */ 64 bool (*is_enabled)(struct drm_i915_private *i915, 65 struct i915_power_well *power_well); 66 }; 67 68 static const struct i915_power_well_instance * 69 i915_power_well_instance(const struct i915_power_well *power_well) 70 { 71 return &power_well->desc->instances->list[power_well->instance_idx]; 72 } 73 74 struct i915_power_well * 75 lookup_power_well(struct drm_i915_private *i915, 76 enum i915_power_well_id power_well_id) 77 { 78 struct i915_power_well *power_well; 79 80 for_each_power_well(i915, power_well) 81 if (i915_power_well_instance(power_well)->id == power_well_id) 82 return power_well; 83 84 /* 85 * It's not feasible to add error checking code to the callers since 86 * this condition really shouldn't happen and it doesn't even make sense 87 * to abort things like display initialization sequences. Just return 88 * the first power well and hope the WARN gets reported so we can fix 89 * our driver. 90 */ 91 drm_WARN(&i915->drm, 1, 92 "Power well %d not defined for this platform\n", 93 power_well_id); 94 return &i915->display.power.domains.power_wells[0]; 95 } 96 97 void intel_power_well_enable(struct drm_i915_private *i915, 98 struct i915_power_well *power_well) 99 { 100 drm_dbg_kms(&i915->drm, "enabling %s\n", intel_power_well_name(power_well)); 101 power_well->desc->ops->enable(i915, power_well); 102 power_well->hw_enabled = true; 103 } 104 105 void intel_power_well_disable(struct drm_i915_private *i915, 106 struct i915_power_well *power_well) 107 { 108 drm_dbg_kms(&i915->drm, "disabling %s\n", intel_power_well_name(power_well)); 109 power_well->hw_enabled = false; 110 power_well->desc->ops->disable(i915, power_well); 111 } 112 113 void intel_power_well_sync_hw(struct drm_i915_private *i915, 114 struct i915_power_well *power_well) 115 { 116 power_well->desc->ops->sync_hw(i915, power_well); 117 power_well->hw_enabled = 118 power_well->desc->ops->is_enabled(i915, power_well); 119 } 120 121 void intel_power_well_get(struct drm_i915_private *i915, 122 struct i915_power_well *power_well) 123 { 124 if (!power_well->count++) 125 intel_power_well_enable(i915, power_well); 126 } 127 128 void intel_power_well_put(struct drm_i915_private *i915, 129 struct i915_power_well *power_well) 130 { 131 drm_WARN(&i915->drm, !power_well->count, 132 "Use count on power well %s is already zero", 133 i915_power_well_instance(power_well)->name); 134 135 if (!--power_well->count) 136 intel_power_well_disable(i915, power_well); 137 } 138 139 bool intel_power_well_is_enabled(struct drm_i915_private *i915, 140 struct i915_power_well *power_well) 141 { 142 return power_well->desc->ops->is_enabled(i915, power_well); 143 } 144 145 bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well) 146 { 147 return power_well->hw_enabled; 148 } 149 150 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 151 enum i915_power_well_id power_well_id) 152 { 153 struct i915_power_well *power_well; 154 155 power_well = lookup_power_well(dev_priv, power_well_id); 156 157 return intel_power_well_is_enabled(dev_priv, power_well); 158 } 159 160 bool intel_power_well_is_always_on(struct i915_power_well *power_well) 161 { 162 return power_well->desc->always_on; 163 } 164 165 const char *intel_power_well_name(struct i915_power_well *power_well) 166 { 167 return i915_power_well_instance(power_well)->name; 168 } 169 170 struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well) 171 { 172 return &power_well->domains; 173 } 174 175 int intel_power_well_refcount(struct i915_power_well *power_well) 176 { 177 return power_well->count; 178 } 179 180 /* 181 * Starting with Haswell, we have a "Power Down Well" that can be turned off 182 * when not needed anymore. We have 4 registers that can request the power well 183 * to be enabled, and it will only be disabled if none of the registers is 184 * requesting it to be enabled. 185 */ 186 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, 187 u8 irq_pipe_mask, bool has_vga) 188 { 189 if (has_vga) 190 intel_vga_reset_io_mem(dev_priv); 191 192 if (irq_pipe_mask) 193 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); 194 } 195 196 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, 197 u8 irq_pipe_mask) 198 { 199 if (irq_pipe_mask) 200 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); 201 } 202 203 #define ICL_AUX_PW_TO_PHY(pw_idx) \ 204 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + PHY_A) 205 206 #define ICL_AUX_PW_TO_CH(pw_idx) \ 207 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) 208 209 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ 210 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) 211 212 static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well) 213 { 214 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 215 216 return power_well->desc->is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : 217 ICL_AUX_PW_TO_CH(pw_idx); 218 } 219 220 static struct intel_digital_port * 221 aux_ch_to_digital_port(struct drm_i915_private *dev_priv, 222 enum aux_ch aux_ch) 223 { 224 struct intel_encoder *encoder; 225 226 for_each_intel_encoder(&dev_priv->drm, encoder) { 227 struct intel_digital_port *dig_port; 228 229 /* We'll check the MST primary port */ 230 if (encoder->type == INTEL_OUTPUT_DP_MST) 231 continue; 232 233 dig_port = enc_to_dig_port(encoder); 234 235 if (dig_port && dig_port->aux_ch == aux_ch) 236 return dig_port; 237 } 238 239 return NULL; 240 } 241 242 static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915, 243 const struct i915_power_well *power_well) 244 { 245 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 246 struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch); 247 248 /* 249 * FIXME should we care about the (VBT defined) dig_port->aux_ch 250 * relationship or should this be purely defined by the hardware layout? 251 * Currently if the port doesn't appear in the VBT, or if it's declared 252 * as HDMI-only and routed to a combo PHY, the encoder either won't be 253 * present at all or it will not have an aux_ch assigned. 254 */ 255 return dig_port ? intel_encoder_to_phy(&dig_port->base) : PHY_NONE; 256 } 257 258 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, 259 struct i915_power_well *power_well, 260 bool timeout_expected) 261 { 262 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 263 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 264 int timeout = power_well->desc->enable_timeout ? : 1; 265 266 /* 267 * For some power wells we're not supposed to watch the status bit for 268 * an ack, but rather just wait a fixed amount of time and then 269 * proceed. This is only used on DG2. 270 */ 271 if (IS_DG2(dev_priv) && power_well->desc->fixed_enable_delay) { 272 usleep_range(600, 1200); 273 return; 274 } 275 276 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ 277 if (intel_de_wait_for_set(dev_priv, regs->driver, 278 HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) { 279 drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n", 280 intel_power_well_name(power_well)); 281 282 drm_WARN_ON(&dev_priv->drm, !timeout_expected); 283 284 } 285 } 286 287 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, 288 const struct i915_power_well_regs *regs, 289 int pw_idx) 290 { 291 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 292 u32 ret; 293 294 ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0; 295 ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0; 296 if (regs->kvmr.reg) 297 ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0; 298 ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0; 299 300 return ret; 301 } 302 303 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, 304 struct i915_power_well *power_well) 305 { 306 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 307 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 308 bool disabled; 309 u32 reqs; 310 311 /* 312 * Bspec doesn't require waiting for PWs to get disabled, but still do 313 * this for paranoia. The known cases where a PW will be forced on: 314 * - a KVMR request on any power well via the KVMR request register 315 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and 316 * DEBUG request registers 317 * Skip the wait in case any of the request bits are set and print a 318 * diagnostic message. 319 */ 320 wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) & 321 HSW_PWR_WELL_CTL_STATE(pw_idx))) || 322 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); 323 if (disabled) 324 return; 325 326 drm_dbg_kms(&dev_priv->drm, 327 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", 328 intel_power_well_name(power_well), 329 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); 330 } 331 332 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, 333 enum skl_power_gate pg) 334 { 335 /* Timeout 5us for PG#0, for other PGs 1us */ 336 drm_WARN_ON(&dev_priv->drm, 337 intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, 338 SKL_FUSE_PG_DIST_STATUS(pg), 1)); 339 } 340 341 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 342 struct i915_power_well *power_well) 343 { 344 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 345 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 346 347 if (power_well->desc->has_fuses) { 348 enum skl_power_gate pg; 349 350 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 351 SKL_PW_CTL_IDX_TO_PG(pw_idx); 352 353 /* Wa_16013190616:adlp */ 354 if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1) 355 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC); 356 357 /* 358 * For PW1 we have to wait both for the PW0/PG0 fuse state 359 * before enabling the power well and PW1/PG1's own fuse 360 * state after the enabling. For all other power wells with 361 * fuses we only have to wait for that PW/PG's fuse state 362 * after the enabling. 363 */ 364 if (pg == SKL_PG1) 365 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); 366 } 367 368 intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx)); 369 370 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 371 372 if (power_well->desc->has_fuses) { 373 enum skl_power_gate pg; 374 375 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 376 SKL_PW_CTL_IDX_TO_PG(pw_idx); 377 gen9_wait_for_power_well_fuses(dev_priv, pg); 378 } 379 380 hsw_power_well_post_enable(dev_priv, 381 power_well->desc->irq_pipe_mask, 382 power_well->desc->has_vga); 383 } 384 385 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 386 struct i915_power_well *power_well) 387 { 388 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 389 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 390 391 hsw_power_well_pre_disable(dev_priv, 392 power_well->desc->irq_pipe_mask); 393 394 intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0); 395 hsw_wait_for_power_well_disable(dev_priv, power_well); 396 } 397 398 static bool intel_aux_ch_is_edp(struct drm_i915_private *i915, enum aux_ch aux_ch) 399 { 400 struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch); 401 402 return dig_port && dig_port->base.type == INTEL_OUTPUT_EDP; 403 } 404 405 static void 406 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 407 struct i915_power_well *power_well) 408 { 409 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 410 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 411 412 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 413 414 intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx)); 415 416 /* 417 * FIXME not sure if we should derive the PHY from the pw_idx, or 418 * from the VBT defined AUX_CH->DDI->PHY mapping. 419 */ 420 intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)), 421 0, ICL_LANE_ENABLE_AUX); 422 423 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 424 425 /* Display WA #1178: icl */ 426 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && 427 !intel_aux_ch_is_edp(dev_priv, ICL_AUX_PW_TO_CH(pw_idx))) 428 intel_de_rmw(dev_priv, ICL_PORT_TX_DW6_AUX(ICL_AUX_PW_TO_PHY(pw_idx)), 429 0, O_FUNC_OVRD_EN | O_LDO_BYPASS_CRI); 430 } 431 432 static void 433 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 434 struct i915_power_well *power_well) 435 { 436 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 437 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 438 439 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 440 441 /* 442 * FIXME not sure if we should derive the PHY from the pw_idx, or 443 * from the VBT defined AUX_CH->DDI->PHY mapping. 444 */ 445 intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)), 446 ICL_LANE_ENABLE_AUX, 0); 447 448 intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0); 449 450 hsw_wait_for_power_well_disable(dev_priv, power_well); 451 } 452 453 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 454 455 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 456 struct i915_power_well *power_well, 457 struct intel_digital_port *dig_port) 458 { 459 if (drm_WARN_ON(&dev_priv->drm, !dig_port)) 460 return; 461 462 if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) 463 return; 464 465 drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port)); 466 } 467 468 #else 469 470 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 471 struct i915_power_well *power_well, 472 struct intel_digital_port *dig_port) 473 { 474 } 475 476 #endif 477 478 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) 479 480 static void icl_tc_cold_exit(struct drm_i915_private *i915) 481 { 482 int ret, tries = 0; 483 484 while (1) { 485 ret = snb_pcode_write_timeout(&i915->uncore, ICL_PCODE_EXIT_TCCOLD, 0, 486 250, 1); 487 if (ret != -EAGAIN || ++tries == 3) 488 break; 489 msleep(1); 490 } 491 492 /* Spec states that TC cold exit can take up to 1ms to complete */ 493 if (!ret) 494 msleep(1); 495 496 /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */ 497 drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" : 498 "succeeded"); 499 } 500 501 static void 502 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 503 struct i915_power_well *power_well) 504 { 505 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 506 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); 507 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 508 bool is_tbt = power_well->desc->is_tc_tbt; 509 bool timeout_expected; 510 511 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); 512 513 intel_de_rmw(dev_priv, DP_AUX_CH_CTL(aux_ch), 514 DP_AUX_CH_CTL_TBT_IO, is_tbt ? DP_AUX_CH_CTL_TBT_IO : 0); 515 516 intel_de_rmw(dev_priv, regs->driver, 517 0, 518 HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx)); 519 520 /* 521 * An AUX timeout is expected if the TBT DP tunnel is down, 522 * or need to enable AUX on a legacy TypeC port as part of the TC-cold 523 * exit sequence. 524 */ 525 timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port); 526 if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) 527 icl_tc_cold_exit(dev_priv); 528 529 hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected); 530 531 if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) { 532 enum tc_port tc_port; 533 534 tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx); 535 536 if (wait_for(intel_dkl_phy_read(dev_priv, DKL_CMN_UC_DW_27(tc_port)) & 537 DKL_CMN_UC_DW27_UC_HEALTH, 1)) 538 drm_warn(&dev_priv->drm, 539 "Timeout waiting TC uC health\n"); 540 } 541 } 542 543 static void 544 icl_aux_power_well_enable(struct drm_i915_private *dev_priv, 545 struct i915_power_well *power_well) 546 { 547 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 548 549 if (intel_phy_is_tc(dev_priv, phy)) 550 return icl_tc_phy_aux_power_well_enable(dev_priv, power_well); 551 else if (IS_ICELAKE(dev_priv)) 552 return icl_combo_phy_aux_power_well_enable(dev_priv, 553 power_well); 554 else 555 return hsw_power_well_enable(dev_priv, power_well); 556 } 557 558 static void 559 icl_aux_power_well_disable(struct drm_i915_private *dev_priv, 560 struct i915_power_well *power_well) 561 { 562 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 563 564 if (intel_phy_is_tc(dev_priv, phy)) 565 return hsw_power_well_disable(dev_priv, power_well); 566 else if (IS_ICELAKE(dev_priv)) 567 return icl_combo_phy_aux_power_well_disable(dev_priv, 568 power_well); 569 else 570 return hsw_power_well_disable(dev_priv, power_well); 571 } 572 573 /* 574 * We should only use the power well if we explicitly asked the hardware to 575 * enable it, so check if it's enabled and also check if we've requested it to 576 * be enabled. 577 */ 578 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 579 struct i915_power_well *power_well) 580 { 581 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 582 enum i915_power_well_id id = i915_power_well_instance(power_well)->id; 583 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 584 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | 585 HSW_PWR_WELL_CTL_STATE(pw_idx); 586 u32 val; 587 588 val = intel_de_read(dev_priv, regs->driver); 589 590 /* 591 * On GEN9 big core due to a DMC bug the driver's request bits for PW1 592 * and the MISC_IO PW will be not restored, so check instead for the 593 * BIOS's own request bits, which are forced-on for these power wells 594 * when exiting DC5/6. 595 */ 596 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) && 597 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) 598 val |= intel_de_read(dev_priv, regs->bios); 599 600 return (val & mask) == mask; 601 } 602 603 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 604 { 605 drm_WARN_ONCE(&dev_priv->drm, 606 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9), 607 "DC9 already programmed to be enabled.\n"); 608 drm_WARN_ONCE(&dev_priv->drm, 609 intel_de_read(dev_priv, DC_STATE_EN) & 610 DC_STATE_EN_UPTO_DC5, 611 "DC5 still not disabled to enable DC9.\n"); 612 drm_WARN_ONCE(&dev_priv->drm, 613 intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) & 614 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), 615 "Power well 2 on.\n"); 616 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 617 "Interrupts not disabled yet.\n"); 618 619 /* 620 * TODO: check for the following to verify the conditions to enter DC9 621 * state are satisfied: 622 * 1] Check relevant display engine registers to verify if mode set 623 * disable sequence was followed. 624 * 2] Check if display uninitialize sequence is initialized. 625 */ 626 } 627 628 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 629 { 630 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 631 "Interrupts not disabled yet.\n"); 632 drm_WARN_ONCE(&dev_priv->drm, 633 intel_de_read(dev_priv, DC_STATE_EN) & 634 DC_STATE_EN_UPTO_DC5, 635 "DC5 still not disabled.\n"); 636 637 /* 638 * TODO: check for the following to verify DC9 state was indeed 639 * entered before programming to disable it: 640 * 1] Check relevant display engine registers to verify if mode 641 * set disable sequence was followed. 642 * 2] Check if display uninitialize sequence is initialized. 643 */ 644 } 645 646 static void gen9_write_dc_state(struct drm_i915_private *dev_priv, 647 u32 state) 648 { 649 int rewrites = 0; 650 int rereads = 0; 651 u32 v; 652 653 intel_de_write(dev_priv, DC_STATE_EN, state); 654 655 /* It has been observed that disabling the dc6 state sometimes 656 * doesn't stick and dmc keeps returning old value. Make sure 657 * the write really sticks enough times and also force rewrite until 658 * we are confident that state is exactly what we want. 659 */ 660 do { 661 v = intel_de_read(dev_priv, DC_STATE_EN); 662 663 if (v != state) { 664 intel_de_write(dev_priv, DC_STATE_EN, state); 665 rewrites++; 666 rereads = 0; 667 } else if (rereads++ > 5) { 668 break; 669 } 670 671 } while (rewrites < 100); 672 673 if (v != state) 674 drm_err(&dev_priv->drm, 675 "Writing dc state to 0x%x failed, now 0x%x\n", 676 state, v); 677 678 /* Most of the times we need one retry, avoid spam */ 679 if (rewrites > 1) 680 drm_dbg_kms(&dev_priv->drm, 681 "Rewrote dc state to 0x%x %d times\n", 682 state, rewrites); 683 } 684 685 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) 686 { 687 u32 mask; 688 689 mask = DC_STATE_EN_UPTO_DC5; 690 691 if (DISPLAY_VER(dev_priv) >= 12) 692 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 693 | DC_STATE_EN_DC9; 694 else if (DISPLAY_VER(dev_priv) == 11) 695 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; 696 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 697 mask |= DC_STATE_EN_DC9; 698 else 699 mask |= DC_STATE_EN_UPTO_DC6; 700 701 return mask; 702 } 703 704 void gen9_sanitize_dc_state(struct drm_i915_private *i915) 705 { 706 struct i915_power_domains *power_domains = &i915->display.power.domains; 707 u32 val; 708 709 if (!HAS_DISPLAY(i915)) 710 return; 711 712 val = intel_de_read(i915, DC_STATE_EN) & gen9_dc_mask(i915); 713 714 drm_dbg_kms(&i915->drm, 715 "Resetting DC state tracking from %02x to %02x\n", 716 power_domains->dc_state, val); 717 power_domains->dc_state = val; 718 } 719 720 /** 721 * gen9_set_dc_state - set target display C power state 722 * @dev_priv: i915 device instance 723 * @state: target DC power state 724 * - DC_STATE_DISABLE 725 * - DC_STATE_EN_UPTO_DC5 726 * - DC_STATE_EN_UPTO_DC6 727 * - DC_STATE_EN_DC9 728 * 729 * Signal to DMC firmware/HW the target DC power state passed in @state. 730 * DMC/HW can turn off individual display clocks and power rails when entering 731 * a deeper DC power state (higher in number) and turns these back when exiting 732 * that state to a shallower power state (lower in number). The HW will decide 733 * when to actually enter a given state on an on-demand basis, for instance 734 * depending on the active state of display pipes. The state of display 735 * registers backed by affected power rails are saved/restored as needed. 736 * 737 * Based on the above enabling a deeper DC power state is asynchronous wrt. 738 * enabling it. Disabling a deeper power state is synchronous: for instance 739 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned 740 * back on and register state is restored. This is guaranteed by the MMIO write 741 * to DC_STATE_EN blocking until the state is restored. 742 */ 743 void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) 744 { 745 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 746 u32 val; 747 u32 mask; 748 749 if (!HAS_DISPLAY(dev_priv)) 750 return; 751 752 if (drm_WARN_ON_ONCE(&dev_priv->drm, 753 state & ~power_domains->allowed_dc_mask)) 754 state &= power_domains->allowed_dc_mask; 755 756 val = intel_de_read(dev_priv, DC_STATE_EN); 757 mask = gen9_dc_mask(dev_priv); 758 drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n", 759 val & mask, state); 760 761 /* Check if DMC is ignoring our DC state requests */ 762 if ((val & mask) != power_domains->dc_state) 763 drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", 764 power_domains->dc_state, val & mask); 765 766 val &= ~mask; 767 val |= state; 768 769 gen9_write_dc_state(dev_priv, val); 770 771 power_domains->dc_state = val & mask; 772 } 773 774 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) 775 { 776 drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n"); 777 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); 778 } 779 780 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) 781 { 782 drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); 783 intel_de_rmw(dev_priv, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0); 784 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 785 /* 786 * Delay of 200us DC3CO Exit time B.Spec 49196 787 */ 788 usleep_range(200, 210); 789 } 790 791 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 792 { 793 enum i915_power_well_id high_pg; 794 795 /* Power wells at this level and above must be disabled for DC5 entry */ 796 if (DISPLAY_VER(dev_priv) == 12) 797 high_pg = ICL_DISP_PW_3; 798 else 799 high_pg = SKL_DISP_PW_2; 800 801 drm_WARN_ONCE(&dev_priv->drm, 802 intel_display_power_well_is_enabled(dev_priv, high_pg), 803 "Power wells above platform's DC5 limit still enabled.\n"); 804 805 drm_WARN_ONCE(&dev_priv->drm, 806 (intel_de_read(dev_priv, DC_STATE_EN) & 807 DC_STATE_EN_UPTO_DC5), 808 "DC5 already programmed to be enabled.\n"); 809 assert_rpm_wakelock_held(&dev_priv->runtime_pm); 810 811 assert_dmc_loaded(dev_priv); 812 } 813 814 void gen9_enable_dc5(struct drm_i915_private *dev_priv) 815 { 816 assert_can_enable_dc5(dev_priv); 817 818 drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n"); 819 820 /* Wa Display #1183: skl,kbl,cfl */ 821 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) 822 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 823 0, SKL_SELECT_ALTERNATE_DC_EXIT); 824 825 intel_dmc_wl_enable(dev_priv); 826 827 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 828 } 829 830 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 831 { 832 drm_WARN_ONCE(&dev_priv->drm, 833 (intel_de_read(dev_priv, UTIL_PIN_CTL) & 834 (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == 835 (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM), 836 "Utility pin enabled in PWM mode\n"); 837 drm_WARN_ONCE(&dev_priv->drm, 838 (intel_de_read(dev_priv, DC_STATE_EN) & 839 DC_STATE_EN_UPTO_DC6), 840 "DC6 already programmed to be enabled.\n"); 841 842 assert_dmc_loaded(dev_priv); 843 } 844 845 void skl_enable_dc6(struct drm_i915_private *dev_priv) 846 { 847 assert_can_enable_dc6(dev_priv); 848 849 drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n"); 850 851 /* Wa Display #1183: skl,kbl,cfl */ 852 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) 853 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 854 0, SKL_SELECT_ALTERNATE_DC_EXIT); 855 856 intel_dmc_wl_enable(dev_priv); 857 858 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 859 } 860 861 void bxt_enable_dc9(struct drm_i915_private *dev_priv) 862 { 863 assert_can_enable_dc9(dev_priv); 864 865 drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n"); 866 /* 867 * Power sequencer reset is not needed on 868 * platforms with South Display Engine on PCH, 869 * because PPS registers are always on. 870 */ 871 if (!HAS_PCH_SPLIT(dev_priv)) 872 intel_pps_reset_all(dev_priv); 873 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 874 } 875 876 void bxt_disable_dc9(struct drm_i915_private *dev_priv) 877 { 878 assert_can_disable_dc9(dev_priv); 879 880 drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n"); 881 882 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 883 884 intel_pps_unlock_regs_wa(dev_priv); 885 } 886 887 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 888 struct i915_power_well *power_well) 889 { 890 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 891 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 892 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 893 u32 bios_req = intel_de_read(dev_priv, regs->bios); 894 895 /* Take over the request bit if set by BIOS. */ 896 if (bios_req & mask) { 897 u32 drv_req = intel_de_read(dev_priv, regs->driver); 898 899 if (!(drv_req & mask)) 900 intel_de_write(dev_priv, regs->driver, drv_req | mask); 901 intel_de_write(dev_priv, regs->bios, bios_req & ~mask); 902 } 903 } 904 905 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 906 struct i915_power_well *power_well) 907 { 908 bxt_ddi_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy); 909 } 910 911 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 912 struct i915_power_well *power_well) 913 { 914 bxt_ddi_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy); 915 } 916 917 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, 918 struct i915_power_well *power_well) 919 { 920 return bxt_ddi_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy); 921 } 922 923 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) 924 { 925 struct i915_power_well *power_well; 926 927 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); 928 if (intel_power_well_refcount(power_well) > 0) 929 bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy); 930 931 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 932 if (intel_power_well_refcount(power_well) > 0) 933 bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy); 934 935 if (IS_GEMINILAKE(dev_priv)) { 936 power_well = lookup_power_well(dev_priv, 937 GLK_DISP_PW_DPIO_CMN_C); 938 if (intel_power_well_refcount(power_well) > 0) 939 bxt_ddi_phy_verify_state(dev_priv, 940 i915_power_well_instance(power_well)->bxt.phy); 941 } 942 } 943 944 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 945 struct i915_power_well *power_well) 946 { 947 return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && 948 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); 949 } 950 951 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) 952 { 953 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv); 954 u8 enabled_dbuf_slices = dev_priv->display.dbuf.enabled_slices; 955 956 drm_WARN(&dev_priv->drm, 957 hw_enabled_dbuf_slices != enabled_dbuf_slices, 958 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n", 959 hw_enabled_dbuf_slices, 960 enabled_dbuf_slices); 961 } 962 963 void gen9_disable_dc_states(struct drm_i915_private *dev_priv) 964 { 965 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 966 struct intel_cdclk_config cdclk_config = {}; 967 968 if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) { 969 tgl_disable_dc3co(dev_priv); 970 return; 971 } 972 973 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 974 975 if (!HAS_DISPLAY(dev_priv)) 976 return; 977 978 intel_dmc_wl_disable(dev_priv); 979 980 intel_cdclk_get_cdclk(dev_priv, &cdclk_config); 981 /* Can't read out voltage_level so can't use intel_cdclk_changed() */ 982 drm_WARN_ON(&dev_priv->drm, 983 intel_cdclk_clock_changed(&dev_priv->display.cdclk.hw, 984 &cdclk_config)); 985 986 gen9_assert_dbuf_enabled(dev_priv); 987 988 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 989 bxt_verify_ddi_phy_power_wells(dev_priv); 990 991 if (DISPLAY_VER(dev_priv) >= 11) 992 /* 993 * DMC retains HW context only for port A, the other combo 994 * PHY's HW context for port B is lost after DC transitions, 995 * so we need to restore it manually. 996 */ 997 intel_combo_phy_init(dev_priv); 998 } 999 1000 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 1001 struct i915_power_well *power_well) 1002 { 1003 gen9_disable_dc_states(dev_priv); 1004 } 1005 1006 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 1007 struct i915_power_well *power_well) 1008 { 1009 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1010 1011 if (!intel_dmc_has_payload(dev_priv)) 1012 return; 1013 1014 switch (power_domains->target_dc_state) { 1015 case DC_STATE_EN_DC3CO: 1016 tgl_enable_dc3co(dev_priv); 1017 break; 1018 case DC_STATE_EN_UPTO_DC6: 1019 skl_enable_dc6(dev_priv); 1020 break; 1021 case DC_STATE_EN_UPTO_DC5: 1022 gen9_enable_dc5(dev_priv); 1023 break; 1024 } 1025 } 1026 1027 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, 1028 struct i915_power_well *power_well) 1029 { 1030 } 1031 1032 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 1033 struct i915_power_well *power_well) 1034 { 1035 } 1036 1037 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 1038 struct i915_power_well *power_well) 1039 { 1040 return true; 1041 } 1042 1043 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, 1044 struct i915_power_well *power_well) 1045 { 1046 if ((intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE) == 0) 1047 i830_enable_pipe(dev_priv, PIPE_A); 1048 if ((intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE) == 0) 1049 i830_enable_pipe(dev_priv, PIPE_B); 1050 } 1051 1052 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, 1053 struct i915_power_well *power_well) 1054 { 1055 i830_disable_pipe(dev_priv, PIPE_B); 1056 i830_disable_pipe(dev_priv, PIPE_A); 1057 } 1058 1059 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, 1060 struct i915_power_well *power_well) 1061 { 1062 return intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE && 1063 intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE; 1064 } 1065 1066 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, 1067 struct i915_power_well *power_well) 1068 { 1069 if (intel_power_well_refcount(power_well) > 0) 1070 i830_pipes_power_well_enable(dev_priv, power_well); 1071 else 1072 i830_pipes_power_well_disable(dev_priv, power_well); 1073 } 1074 1075 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 1076 struct i915_power_well *power_well, bool enable) 1077 { 1078 int pw_idx = i915_power_well_instance(power_well)->vlv.idx; 1079 u32 mask; 1080 u32 state; 1081 u32 ctrl; 1082 1083 mask = PUNIT_PWRGT_MASK(pw_idx); 1084 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : 1085 PUNIT_PWRGT_PWR_GATE(pw_idx); 1086 1087 vlv_punit_get(dev_priv); 1088 1089 #define COND \ 1090 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 1091 1092 if (COND) 1093 goto out; 1094 1095 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 1096 ctrl &= ~mask; 1097 ctrl |= state; 1098 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 1099 1100 if (wait_for(COND, 100)) 1101 drm_err(&dev_priv->drm, 1102 "timeout setting power well state %08x (%08x)\n", 1103 state, 1104 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 1105 1106 #undef COND 1107 1108 out: 1109 vlv_punit_put(dev_priv); 1110 } 1111 1112 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 1113 struct i915_power_well *power_well) 1114 { 1115 vlv_set_power_well(dev_priv, power_well, true); 1116 } 1117 1118 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 1119 struct i915_power_well *power_well) 1120 { 1121 vlv_set_power_well(dev_priv, power_well, false); 1122 } 1123 1124 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 1125 struct i915_power_well *power_well) 1126 { 1127 int pw_idx = i915_power_well_instance(power_well)->vlv.idx; 1128 bool enabled = false; 1129 u32 mask; 1130 u32 state; 1131 u32 ctrl; 1132 1133 mask = PUNIT_PWRGT_MASK(pw_idx); 1134 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); 1135 1136 vlv_punit_get(dev_priv); 1137 1138 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 1139 /* 1140 * We only ever set the power-on and power-gate states, anything 1141 * else is unexpected. 1142 */ 1143 drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) && 1144 state != PUNIT_PWRGT_PWR_GATE(pw_idx)); 1145 if (state == ctrl) 1146 enabled = true; 1147 1148 /* 1149 * A transient state at this point would mean some unexpected party 1150 * is poking at the power controls too. 1151 */ 1152 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 1153 drm_WARN_ON(&dev_priv->drm, ctrl != state); 1154 1155 vlv_punit_put(dev_priv); 1156 1157 return enabled; 1158 } 1159 1160 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1161 { 1162 /* 1163 * On driver load, a pipe may be active and driving a DSI display. 1164 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck 1165 * (and never recovering) in this case. intel_dsi_post_disable() will 1166 * clear it when we turn off the display. 1167 */ 1168 intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv), 1169 ~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE); 1170 1171 /* 1172 * Disable trickle feed and enable pnd deadline calculation 1173 */ 1174 intel_de_write(dev_priv, MI_ARB_VLV, 1175 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1176 intel_de_write(dev_priv, CBR1_VLV, 0); 1177 1178 drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0); 1179 intel_de_write(dev_priv, RAWCLK_FREQ_VLV, 1180 DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 1181 1000)); 1182 } 1183 1184 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1185 { 1186 struct intel_encoder *encoder; 1187 enum pipe pipe; 1188 1189 /* 1190 * Enable the CRI clock source so we can get at the 1191 * display and the reference clock for VGA 1192 * hotplug / manual detection. Supposedly DSI also 1193 * needs the ref clock up and running. 1194 * 1195 * CHV DPLL B/C have some issues if VGA mode is enabled. 1196 */ 1197 for_each_pipe(dev_priv, pipe) { 1198 u32 val = intel_de_read(dev_priv, DPLL(pipe)); 1199 1200 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1201 if (pipe != PIPE_A) 1202 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1203 1204 intel_de_write(dev_priv, DPLL(pipe), val); 1205 } 1206 1207 vlv_init_display_clock_gating(dev_priv); 1208 1209 spin_lock_irq(&dev_priv->irq_lock); 1210 valleyview_enable_display_irqs(dev_priv); 1211 spin_unlock_irq(&dev_priv->irq_lock); 1212 1213 /* 1214 * During driver initialization/resume we can avoid restoring the 1215 * part of the HW/SW state that will be inited anyway explicitly. 1216 */ 1217 if (dev_priv->display.power.domains.initializing) 1218 return; 1219 1220 intel_hpd_init(dev_priv); 1221 intel_hpd_poll_disable(dev_priv); 1222 1223 /* Re-enable the ADPA, if we have one */ 1224 for_each_intel_encoder(&dev_priv->drm, encoder) { 1225 if (encoder->type == INTEL_OUTPUT_ANALOG) 1226 intel_crt_reset(&encoder->base); 1227 } 1228 1229 intel_vga_redisable_power_on(dev_priv); 1230 1231 intel_pps_unlock_regs_wa(dev_priv); 1232 } 1233 1234 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1235 { 1236 spin_lock_irq(&dev_priv->irq_lock); 1237 valleyview_disable_display_irqs(dev_priv); 1238 spin_unlock_irq(&dev_priv->irq_lock); 1239 1240 /* make sure we're done processing display irqs */ 1241 intel_synchronize_irq(dev_priv); 1242 1243 intel_pps_reset_all(dev_priv); 1244 1245 /* Prevent us from re-enabling polling on accident in late suspend */ 1246 if (!dev_priv->drm.dev->power.is_suspended) 1247 intel_hpd_poll_enable(dev_priv); 1248 } 1249 1250 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1251 struct i915_power_well *power_well) 1252 { 1253 vlv_set_power_well(dev_priv, power_well, true); 1254 1255 vlv_display_power_well_init(dev_priv); 1256 } 1257 1258 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 1259 struct i915_power_well *power_well) 1260 { 1261 vlv_display_power_well_deinit(dev_priv); 1262 1263 vlv_set_power_well(dev_priv, power_well, false); 1264 } 1265 1266 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1267 struct i915_power_well *power_well) 1268 { 1269 /* since ref/cri clock was enabled */ 1270 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1271 1272 vlv_set_power_well(dev_priv, power_well, true); 1273 1274 /* 1275 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1276 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1277 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1278 * b. The other bits such as sfr settings / modesel may all 1279 * be set to 0. 1280 * 1281 * This should only be done on init and resume from S3 with 1282 * both PLLs disabled, or we risk losing DPIO and PLL 1283 * synchronization. 1284 */ 1285 intel_de_rmw(dev_priv, DPIO_CTL, 0, DPIO_CMNRST); 1286 } 1287 1288 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1289 struct i915_power_well *power_well) 1290 { 1291 enum pipe pipe; 1292 1293 for_each_pipe(dev_priv, pipe) 1294 assert_pll_disabled(dev_priv, pipe); 1295 1296 /* Assert common reset */ 1297 intel_de_rmw(dev_priv, DPIO_CTL, DPIO_CMNRST, 0); 1298 1299 vlv_set_power_well(dev_priv, power_well, false); 1300 } 1301 1302 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1303 1304 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1305 { 1306 struct i915_power_well *cmn_bc = 1307 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1308 struct i915_power_well *cmn_d = 1309 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 1310 u32 phy_control = dev_priv->display.power.chv_phy_control; 1311 u32 phy_status = 0; 1312 u32 phy_status_mask = 0xffffffff; 1313 1314 /* 1315 * The BIOS can leave the PHY is some weird state 1316 * where it doesn't fully power down some parts. 1317 * Disable the asserts until the PHY has been fully 1318 * reset (ie. the power well has been disabled at 1319 * least once). 1320 */ 1321 if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY0]) 1322 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1323 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1324 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1325 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1326 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1327 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1328 1329 if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY1]) 1330 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1331 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1332 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1333 1334 if (intel_power_well_is_enabled(dev_priv, cmn_bc)) { 1335 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1336 1337 /* this assumes override is only used to enable lanes */ 1338 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1339 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1340 1341 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1342 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1343 1344 /* CL1 is on whenever anything is on in either channel */ 1345 if (BITS_SET(phy_control, 1346 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1347 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1348 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1349 1350 /* 1351 * The DPLLB check accounts for the pipe B + port A usage 1352 * with CL2 powered up but all the lanes in the second channel 1353 * powered down. 1354 */ 1355 if (BITS_SET(phy_control, 1356 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1357 (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1358 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1359 1360 if (BITS_SET(phy_control, 1361 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1362 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1363 if (BITS_SET(phy_control, 1364 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1365 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1366 1367 if (BITS_SET(phy_control, 1368 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1369 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1370 if (BITS_SET(phy_control, 1371 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1372 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1373 } 1374 1375 if (intel_power_well_is_enabled(dev_priv, cmn_d)) { 1376 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1377 1378 /* this assumes override is only used to enable lanes */ 1379 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1380 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1381 1382 if (BITS_SET(phy_control, 1383 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1384 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1385 1386 if (BITS_SET(phy_control, 1387 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1388 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1389 if (BITS_SET(phy_control, 1390 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1391 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1392 } 1393 1394 phy_status &= phy_status_mask; 1395 1396 /* 1397 * The PHY may be busy with some initial calibration and whatnot, 1398 * so the power state can take a while to actually change. 1399 */ 1400 if (intel_de_wait(dev_priv, DISPLAY_PHY_STATUS, 1401 phy_status_mask, phy_status, 10)) 1402 drm_err(&dev_priv->drm, 1403 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1404 intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, 1405 phy_status, dev_priv->display.power.chv_phy_control); 1406 } 1407 1408 #undef BITS_SET 1409 1410 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1411 struct i915_power_well *power_well) 1412 { 1413 enum i915_power_well_id id = i915_power_well_instance(power_well)->id; 1414 enum dpio_phy phy; 1415 u32 tmp; 1416 1417 drm_WARN_ON_ONCE(&dev_priv->drm, 1418 id != VLV_DISP_PW_DPIO_CMN_BC && 1419 id != CHV_DISP_PW_DPIO_CMN_D); 1420 1421 if (id == VLV_DISP_PW_DPIO_CMN_BC) 1422 phy = DPIO_PHY0; 1423 else 1424 phy = DPIO_PHY1; 1425 1426 /* since ref/cri clock was enabled */ 1427 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1428 vlv_set_power_well(dev_priv, power_well, true); 1429 1430 /* Poll for phypwrgood signal */ 1431 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, 1432 PHY_POWERGOOD(phy), 1)) 1433 drm_err(&dev_priv->drm, "Display PHY %d is not power up\n", 1434 phy); 1435 1436 vlv_dpio_get(dev_priv); 1437 1438 /* Enable dynamic power down */ 1439 tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW28); 1440 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1441 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1442 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW28, tmp); 1443 1444 if (id == VLV_DISP_PW_DPIO_CMN_BC) { 1445 tmp = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW6_CH1); 1446 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1447 vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW6_CH1, tmp); 1448 } else { 1449 /* 1450 * Force the non-existing CL2 off. BXT does this 1451 * too, so maybe it saves some power even though 1452 * CL2 doesn't exist? 1453 */ 1454 tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW30); 1455 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1456 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW30, tmp); 1457 } 1458 1459 vlv_dpio_put(dev_priv); 1460 1461 dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1462 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1463 dev_priv->display.power.chv_phy_control); 1464 1465 drm_dbg_kms(&dev_priv->drm, 1466 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1467 phy, dev_priv->display.power.chv_phy_control); 1468 1469 assert_chv_phy_status(dev_priv); 1470 } 1471 1472 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1473 struct i915_power_well *power_well) 1474 { 1475 enum i915_power_well_id id = i915_power_well_instance(power_well)->id; 1476 enum dpio_phy phy; 1477 1478 drm_WARN_ON_ONCE(&dev_priv->drm, 1479 id != VLV_DISP_PW_DPIO_CMN_BC && 1480 id != CHV_DISP_PW_DPIO_CMN_D); 1481 1482 if (id == VLV_DISP_PW_DPIO_CMN_BC) { 1483 phy = DPIO_PHY0; 1484 assert_pll_disabled(dev_priv, PIPE_A); 1485 assert_pll_disabled(dev_priv, PIPE_B); 1486 } else { 1487 phy = DPIO_PHY1; 1488 assert_pll_disabled(dev_priv, PIPE_C); 1489 } 1490 1491 dev_priv->display.power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1492 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1493 dev_priv->display.power.chv_phy_control); 1494 1495 vlv_set_power_well(dev_priv, power_well, false); 1496 1497 drm_dbg_kms(&dev_priv->drm, 1498 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1499 phy, dev_priv->display.power.chv_phy_control); 1500 1501 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1502 dev_priv->display.power.chv_phy_assert[phy] = true; 1503 1504 assert_chv_phy_status(dev_priv); 1505 } 1506 1507 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1508 enum dpio_channel ch, bool override, unsigned int mask) 1509 { 1510 u32 reg, val, expected, actual; 1511 1512 /* 1513 * The BIOS can leave the PHY is some weird state 1514 * where it doesn't fully power down some parts. 1515 * Disable the asserts until the PHY has been fully 1516 * reset (ie. the power well has been disabled at 1517 * least once). 1518 */ 1519 if (!dev_priv->display.power.chv_phy_assert[phy]) 1520 return; 1521 1522 if (ch == DPIO_CH0) 1523 reg = _CHV_CMN_DW0_CH0; 1524 else 1525 reg = _CHV_CMN_DW6_CH1; 1526 1527 vlv_dpio_get(dev_priv); 1528 val = vlv_dpio_read(dev_priv, phy, reg); 1529 vlv_dpio_put(dev_priv); 1530 1531 /* 1532 * This assumes !override is only used when the port is disabled. 1533 * All lanes should power down even without the override when 1534 * the port is disabled. 1535 */ 1536 if (!override || mask == 0xf) { 1537 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1538 /* 1539 * If CH1 common lane is not active anymore 1540 * (eg. for pipe B DPLL) the entire channel will 1541 * shut down, which causes the common lane registers 1542 * to read as 0. That means we can't actually check 1543 * the lane power down status bits, but as the entire 1544 * register reads as 0 it's a good indication that the 1545 * channel is indeed entirely powered down. 1546 */ 1547 if (ch == DPIO_CH1 && val == 0) 1548 expected = 0; 1549 } else if (mask != 0x0) { 1550 expected = DPIO_ANYDL_POWERDOWN; 1551 } else { 1552 expected = 0; 1553 } 1554 1555 if (ch == DPIO_CH0) 1556 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1557 else 1558 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1559 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1560 1561 drm_WARN(&dev_priv->drm, actual != expected, 1562 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1563 !!(actual & DPIO_ALLDL_POWERDOWN), 1564 !!(actual & DPIO_ANYDL_POWERDOWN), 1565 !!(expected & DPIO_ALLDL_POWERDOWN), 1566 !!(expected & DPIO_ANYDL_POWERDOWN), 1567 reg, val); 1568 } 1569 1570 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1571 enum dpio_channel ch, bool override) 1572 { 1573 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1574 bool was_override; 1575 1576 mutex_lock(&power_domains->lock); 1577 1578 was_override = dev_priv->display.power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1579 1580 if (override == was_override) 1581 goto out; 1582 1583 if (override) 1584 dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1585 else 1586 dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1587 1588 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1589 dev_priv->display.power.chv_phy_control); 1590 1591 drm_dbg_kms(&dev_priv->drm, 1592 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1593 phy, ch, dev_priv->display.power.chv_phy_control); 1594 1595 assert_chv_phy_status(dev_priv); 1596 1597 out: 1598 mutex_unlock(&power_domains->lock); 1599 1600 return was_override; 1601 } 1602 1603 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1604 bool override, unsigned int mask) 1605 { 1606 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1607 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1608 enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); 1609 enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); 1610 1611 mutex_lock(&power_domains->lock); 1612 1613 dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1614 dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1615 1616 if (override) 1617 dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1618 else 1619 dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1620 1621 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1622 dev_priv->display.power.chv_phy_control); 1623 1624 drm_dbg_kms(&dev_priv->drm, 1625 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1626 phy, ch, mask, dev_priv->display.power.chv_phy_control); 1627 1628 assert_chv_phy_status(dev_priv); 1629 1630 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1631 1632 mutex_unlock(&power_domains->lock); 1633 } 1634 1635 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1636 struct i915_power_well *power_well) 1637 { 1638 enum pipe pipe = PIPE_A; 1639 bool enabled; 1640 u32 state, ctrl; 1641 1642 vlv_punit_get(dev_priv); 1643 1644 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); 1645 /* 1646 * We only ever set the power-on and power-gate states, anything 1647 * else is unexpected. 1648 */ 1649 drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) && 1650 state != DP_SSS_PWR_GATE(pipe)); 1651 enabled = state == DP_SSS_PWR_ON(pipe); 1652 1653 /* 1654 * A transient state at this point would mean some unexpected party 1655 * is poking at the power controls too. 1656 */ 1657 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); 1658 drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state); 1659 1660 vlv_punit_put(dev_priv); 1661 1662 return enabled; 1663 } 1664 1665 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1666 struct i915_power_well *power_well, 1667 bool enable) 1668 { 1669 enum pipe pipe = PIPE_A; 1670 u32 state; 1671 u32 ctrl; 1672 1673 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1674 1675 vlv_punit_get(dev_priv); 1676 1677 #define COND \ 1678 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) 1679 1680 if (COND) 1681 goto out; 1682 1683 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 1684 ctrl &= ~DP_SSC_MASK(pipe); 1685 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1686 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); 1687 1688 if (wait_for(COND, 100)) 1689 drm_err(&dev_priv->drm, 1690 "timeout setting power well state %08x (%08x)\n", 1691 state, 1692 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); 1693 1694 #undef COND 1695 1696 out: 1697 vlv_punit_put(dev_priv); 1698 } 1699 1700 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, 1701 struct i915_power_well *power_well) 1702 { 1703 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1704 dev_priv->display.power.chv_phy_control); 1705 } 1706 1707 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 1708 struct i915_power_well *power_well) 1709 { 1710 chv_set_pipe_power_well(dev_priv, power_well, true); 1711 1712 vlv_display_power_well_init(dev_priv); 1713 } 1714 1715 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 1716 struct i915_power_well *power_well) 1717 { 1718 vlv_display_power_well_deinit(dev_priv); 1719 1720 chv_set_pipe_power_well(dev_priv, power_well, false); 1721 } 1722 1723 static void 1724 tgl_tc_cold_request(struct drm_i915_private *i915, bool block) 1725 { 1726 u8 tries = 0; 1727 int ret; 1728 1729 while (1) { 1730 u32 low_val; 1731 u32 high_val = 0; 1732 1733 if (block) 1734 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ; 1735 else 1736 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ; 1737 1738 /* 1739 * Spec states that we should timeout the request after 200us 1740 * but the function below will timeout after 500us 1741 */ 1742 ret = snb_pcode_read(&i915->uncore, TGL_PCODE_TCCOLD, &low_val, &high_val); 1743 if (ret == 0) { 1744 if (block && 1745 (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) 1746 ret = -EIO; 1747 else 1748 break; 1749 } 1750 1751 if (++tries == 3) 1752 break; 1753 1754 msleep(1); 1755 } 1756 1757 if (ret) 1758 drm_err(&i915->drm, "TC cold %sblock failed\n", 1759 block ? "" : "un"); 1760 else 1761 drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n", 1762 block ? "" : "un"); 1763 } 1764 1765 static void 1766 tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915, 1767 struct i915_power_well *power_well) 1768 { 1769 tgl_tc_cold_request(i915, true); 1770 } 1771 1772 static void 1773 tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915, 1774 struct i915_power_well *power_well) 1775 { 1776 tgl_tc_cold_request(i915, false); 1777 } 1778 1779 static void 1780 tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915, 1781 struct i915_power_well *power_well) 1782 { 1783 if (intel_power_well_refcount(power_well) > 0) 1784 tgl_tc_cold_off_power_well_enable(i915, power_well); 1785 else 1786 tgl_tc_cold_off_power_well_disable(i915, power_well); 1787 } 1788 1789 static bool 1790 tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv, 1791 struct i915_power_well *power_well) 1792 { 1793 /* 1794 * Not the correctly implementation but there is no way to just read it 1795 * from PCODE, so returning count to avoid state mismatch errors 1796 */ 1797 return intel_power_well_refcount(power_well); 1798 } 1799 1800 static void xelpdp_aux_power_well_enable(struct drm_i915_private *dev_priv, 1801 struct i915_power_well *power_well) 1802 { 1803 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; 1804 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 1805 1806 if (intel_phy_is_tc(dev_priv, phy)) 1807 icl_tc_port_assert_ref_held(dev_priv, power_well, 1808 aux_ch_to_digital_port(dev_priv, aux_ch)); 1809 1810 intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch), 1811 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST, 1812 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST); 1813 1814 /* 1815 * The power status flag cannot be used to determine whether aux 1816 * power wells have finished powering up. Instead we're 1817 * expected to just wait a fixed 600us after raising the request 1818 * bit. 1819 */ 1820 usleep_range(600, 1200); 1821 } 1822 1823 static void xelpdp_aux_power_well_disable(struct drm_i915_private *dev_priv, 1824 struct i915_power_well *power_well) 1825 { 1826 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; 1827 1828 intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch), 1829 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST, 1830 0); 1831 usleep_range(10, 30); 1832 } 1833 1834 static bool xelpdp_aux_power_well_enabled(struct drm_i915_private *dev_priv, 1835 struct i915_power_well *power_well) 1836 { 1837 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; 1838 1839 return intel_de_read(dev_priv, XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch)) & 1840 XELPDP_DP_AUX_CH_CTL_POWER_STATUS; 1841 } 1842 1843 static void xe2lpd_pica_power_well_enable(struct drm_i915_private *dev_priv, 1844 struct i915_power_well *power_well) 1845 { 1846 intel_de_write(dev_priv, XE2LPD_PICA_PW_CTL, 1847 XE2LPD_PICA_CTL_POWER_REQUEST); 1848 1849 if (intel_de_wait_for_set(dev_priv, XE2LPD_PICA_PW_CTL, 1850 XE2LPD_PICA_CTL_POWER_STATUS, 1)) { 1851 drm_dbg_kms(&dev_priv->drm, "pica power well enable timeout\n"); 1852 1853 drm_WARN(&dev_priv->drm, 1, "Power well PICA timeout when enabled"); 1854 } 1855 } 1856 1857 static void xe2lpd_pica_power_well_disable(struct drm_i915_private *dev_priv, 1858 struct i915_power_well *power_well) 1859 { 1860 intel_de_write(dev_priv, XE2LPD_PICA_PW_CTL, 0); 1861 1862 if (intel_de_wait_for_clear(dev_priv, XE2LPD_PICA_PW_CTL, 1863 XE2LPD_PICA_CTL_POWER_STATUS, 1)) { 1864 drm_dbg_kms(&dev_priv->drm, "pica power well disable timeout\n"); 1865 1866 drm_WARN(&dev_priv->drm, 1, "Power well PICA timeout when disabled"); 1867 } 1868 } 1869 1870 static bool xe2lpd_pica_power_well_enabled(struct drm_i915_private *dev_priv, 1871 struct i915_power_well *power_well) 1872 { 1873 return intel_de_read(dev_priv, XE2LPD_PICA_PW_CTL) & 1874 XE2LPD_PICA_CTL_POWER_STATUS; 1875 } 1876 1877 const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 1878 .sync_hw = i9xx_power_well_sync_hw_noop, 1879 .enable = i9xx_always_on_power_well_noop, 1880 .disable = i9xx_always_on_power_well_noop, 1881 .is_enabled = i9xx_always_on_power_well_enabled, 1882 }; 1883 1884 const struct i915_power_well_ops chv_pipe_power_well_ops = { 1885 .sync_hw = chv_pipe_power_well_sync_hw, 1886 .enable = chv_pipe_power_well_enable, 1887 .disable = chv_pipe_power_well_disable, 1888 .is_enabled = chv_pipe_power_well_enabled, 1889 }; 1890 1891 const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 1892 .sync_hw = i9xx_power_well_sync_hw_noop, 1893 .enable = chv_dpio_cmn_power_well_enable, 1894 .disable = chv_dpio_cmn_power_well_disable, 1895 .is_enabled = vlv_power_well_enabled, 1896 }; 1897 1898 const struct i915_power_well_ops i830_pipes_power_well_ops = { 1899 .sync_hw = i830_pipes_power_well_sync_hw, 1900 .enable = i830_pipes_power_well_enable, 1901 .disable = i830_pipes_power_well_disable, 1902 .is_enabled = i830_pipes_power_well_enabled, 1903 }; 1904 1905 static const struct i915_power_well_regs hsw_power_well_regs = { 1906 .bios = HSW_PWR_WELL_CTL1, 1907 .driver = HSW_PWR_WELL_CTL2, 1908 .kvmr = HSW_PWR_WELL_CTL3, 1909 .debug = HSW_PWR_WELL_CTL4, 1910 }; 1911 1912 const struct i915_power_well_ops hsw_power_well_ops = { 1913 .regs = &hsw_power_well_regs, 1914 .sync_hw = hsw_power_well_sync_hw, 1915 .enable = hsw_power_well_enable, 1916 .disable = hsw_power_well_disable, 1917 .is_enabled = hsw_power_well_enabled, 1918 }; 1919 1920 const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 1921 .sync_hw = i9xx_power_well_sync_hw_noop, 1922 .enable = gen9_dc_off_power_well_enable, 1923 .disable = gen9_dc_off_power_well_disable, 1924 .is_enabled = gen9_dc_off_power_well_enabled, 1925 }; 1926 1927 const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 1928 .sync_hw = i9xx_power_well_sync_hw_noop, 1929 .enable = bxt_dpio_cmn_power_well_enable, 1930 .disable = bxt_dpio_cmn_power_well_disable, 1931 .is_enabled = bxt_dpio_cmn_power_well_enabled, 1932 }; 1933 1934 const struct i915_power_well_ops vlv_display_power_well_ops = { 1935 .sync_hw = i9xx_power_well_sync_hw_noop, 1936 .enable = vlv_display_power_well_enable, 1937 .disable = vlv_display_power_well_disable, 1938 .is_enabled = vlv_power_well_enabled, 1939 }; 1940 1941 const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 1942 .sync_hw = i9xx_power_well_sync_hw_noop, 1943 .enable = vlv_dpio_cmn_power_well_enable, 1944 .disable = vlv_dpio_cmn_power_well_disable, 1945 .is_enabled = vlv_power_well_enabled, 1946 }; 1947 1948 const struct i915_power_well_ops vlv_dpio_power_well_ops = { 1949 .sync_hw = i9xx_power_well_sync_hw_noop, 1950 .enable = vlv_power_well_enable, 1951 .disable = vlv_power_well_disable, 1952 .is_enabled = vlv_power_well_enabled, 1953 }; 1954 1955 static const struct i915_power_well_regs icl_aux_power_well_regs = { 1956 .bios = ICL_PWR_WELL_CTL_AUX1, 1957 .driver = ICL_PWR_WELL_CTL_AUX2, 1958 .debug = ICL_PWR_WELL_CTL_AUX4, 1959 }; 1960 1961 const struct i915_power_well_ops icl_aux_power_well_ops = { 1962 .regs = &icl_aux_power_well_regs, 1963 .sync_hw = hsw_power_well_sync_hw, 1964 .enable = icl_aux_power_well_enable, 1965 .disable = icl_aux_power_well_disable, 1966 .is_enabled = hsw_power_well_enabled, 1967 }; 1968 1969 static const struct i915_power_well_regs icl_ddi_power_well_regs = { 1970 .bios = ICL_PWR_WELL_CTL_DDI1, 1971 .driver = ICL_PWR_WELL_CTL_DDI2, 1972 .debug = ICL_PWR_WELL_CTL_DDI4, 1973 }; 1974 1975 const struct i915_power_well_ops icl_ddi_power_well_ops = { 1976 .regs = &icl_ddi_power_well_regs, 1977 .sync_hw = hsw_power_well_sync_hw, 1978 .enable = hsw_power_well_enable, 1979 .disable = hsw_power_well_disable, 1980 .is_enabled = hsw_power_well_enabled, 1981 }; 1982 1983 const struct i915_power_well_ops tgl_tc_cold_off_ops = { 1984 .sync_hw = tgl_tc_cold_off_power_well_sync_hw, 1985 .enable = tgl_tc_cold_off_power_well_enable, 1986 .disable = tgl_tc_cold_off_power_well_disable, 1987 .is_enabled = tgl_tc_cold_off_power_well_is_enabled, 1988 }; 1989 1990 const struct i915_power_well_ops xelpdp_aux_power_well_ops = { 1991 .sync_hw = i9xx_power_well_sync_hw_noop, 1992 .enable = xelpdp_aux_power_well_enable, 1993 .disable = xelpdp_aux_power_well_disable, 1994 .is_enabled = xelpdp_aux_power_well_enabled, 1995 }; 1996 1997 const struct i915_power_well_ops xe2lpd_pica_power_well_ops = { 1998 .sync_hw = i9xx_power_well_sync_hw_noop, 1999 .enable = xe2lpd_pica_power_well_enable, 2000 .disable = xe2lpd_pica_power_well_disable, 2001 .is_enabled = xe2lpd_pica_power_well_enabled, 2002 }; 2003