1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "display/intel_crt.h" 7 #include "display/intel_dp.h" 8 9 #include "i915_drv.h" 10 #include "i915_irq.h" 11 #include "intel_cdclk.h" 12 #include "intel_combo_phy.h" 13 #include "intel_csr.h" 14 #include "intel_display_power.h" 15 #include "intel_display_types.h" 16 #include "intel_dpio_phy.h" 17 #include "intel_hotplug.h" 18 #include "intel_sideband.h" 19 #include "intel_tc.h" 20 #include "intel_vga.h" 21 22 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 23 enum i915_power_well_id power_well_id); 24 25 const char * 26 intel_display_power_domain_str(enum intel_display_power_domain domain) 27 { 28 switch (domain) { 29 case POWER_DOMAIN_DISPLAY_CORE: 30 return "DISPLAY_CORE"; 31 case POWER_DOMAIN_PIPE_A: 32 return "PIPE_A"; 33 case POWER_DOMAIN_PIPE_B: 34 return "PIPE_B"; 35 case POWER_DOMAIN_PIPE_C: 36 return "PIPE_C"; 37 case POWER_DOMAIN_PIPE_D: 38 return "PIPE_D"; 39 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 40 return "PIPE_A_PANEL_FITTER"; 41 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 42 return "PIPE_B_PANEL_FITTER"; 43 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 44 return "PIPE_C_PANEL_FITTER"; 45 case POWER_DOMAIN_PIPE_D_PANEL_FITTER: 46 return "PIPE_D_PANEL_FITTER"; 47 case POWER_DOMAIN_TRANSCODER_A: 48 return "TRANSCODER_A"; 49 case POWER_DOMAIN_TRANSCODER_B: 50 return "TRANSCODER_B"; 51 case POWER_DOMAIN_TRANSCODER_C: 52 return "TRANSCODER_C"; 53 case POWER_DOMAIN_TRANSCODER_D: 54 return "TRANSCODER_D"; 55 case POWER_DOMAIN_TRANSCODER_EDP: 56 return "TRANSCODER_EDP"; 57 case POWER_DOMAIN_TRANSCODER_VDSC_PW2: 58 return "TRANSCODER_VDSC_PW2"; 59 case POWER_DOMAIN_TRANSCODER_DSI_A: 60 return "TRANSCODER_DSI_A"; 61 case POWER_DOMAIN_TRANSCODER_DSI_C: 62 return "TRANSCODER_DSI_C"; 63 case POWER_DOMAIN_PORT_DDI_A_LANES: 64 return "PORT_DDI_A_LANES"; 65 case POWER_DOMAIN_PORT_DDI_B_LANES: 66 return "PORT_DDI_B_LANES"; 67 case POWER_DOMAIN_PORT_DDI_C_LANES: 68 return "PORT_DDI_C_LANES"; 69 case POWER_DOMAIN_PORT_DDI_D_LANES: 70 return "PORT_DDI_D_LANES"; 71 case POWER_DOMAIN_PORT_DDI_E_LANES: 72 return "PORT_DDI_E_LANES"; 73 case POWER_DOMAIN_PORT_DDI_F_LANES: 74 return "PORT_DDI_F_LANES"; 75 case POWER_DOMAIN_PORT_DDI_G_LANES: 76 return "PORT_DDI_G_LANES"; 77 case POWER_DOMAIN_PORT_DDI_H_LANES: 78 return "PORT_DDI_H_LANES"; 79 case POWER_DOMAIN_PORT_DDI_I_LANES: 80 return "PORT_DDI_I_LANES"; 81 case POWER_DOMAIN_PORT_DDI_A_IO: 82 return "PORT_DDI_A_IO"; 83 case POWER_DOMAIN_PORT_DDI_B_IO: 84 return "PORT_DDI_B_IO"; 85 case POWER_DOMAIN_PORT_DDI_C_IO: 86 return "PORT_DDI_C_IO"; 87 case POWER_DOMAIN_PORT_DDI_D_IO: 88 return "PORT_DDI_D_IO"; 89 case POWER_DOMAIN_PORT_DDI_E_IO: 90 return "PORT_DDI_E_IO"; 91 case POWER_DOMAIN_PORT_DDI_F_IO: 92 return "PORT_DDI_F_IO"; 93 case POWER_DOMAIN_PORT_DDI_G_IO: 94 return "PORT_DDI_G_IO"; 95 case POWER_DOMAIN_PORT_DDI_H_IO: 96 return "PORT_DDI_H_IO"; 97 case POWER_DOMAIN_PORT_DDI_I_IO: 98 return "PORT_DDI_I_IO"; 99 case POWER_DOMAIN_PORT_DSI: 100 return "PORT_DSI"; 101 case POWER_DOMAIN_PORT_CRT: 102 return "PORT_CRT"; 103 case POWER_DOMAIN_PORT_OTHER: 104 return "PORT_OTHER"; 105 case POWER_DOMAIN_VGA: 106 return "VGA"; 107 case POWER_DOMAIN_AUDIO: 108 return "AUDIO"; 109 case POWER_DOMAIN_AUX_A: 110 return "AUX_A"; 111 case POWER_DOMAIN_AUX_B: 112 return "AUX_B"; 113 case POWER_DOMAIN_AUX_C: 114 return "AUX_C"; 115 case POWER_DOMAIN_AUX_D: 116 return "AUX_D"; 117 case POWER_DOMAIN_AUX_E: 118 return "AUX_E"; 119 case POWER_DOMAIN_AUX_F: 120 return "AUX_F"; 121 case POWER_DOMAIN_AUX_G: 122 return "AUX_G"; 123 case POWER_DOMAIN_AUX_H: 124 return "AUX_H"; 125 case POWER_DOMAIN_AUX_I: 126 return "AUX_I"; 127 case POWER_DOMAIN_AUX_IO_A: 128 return "AUX_IO_A"; 129 case POWER_DOMAIN_AUX_C_TBT: 130 return "AUX_C_TBT"; 131 case POWER_DOMAIN_AUX_D_TBT: 132 return "AUX_D_TBT"; 133 case POWER_DOMAIN_AUX_E_TBT: 134 return "AUX_E_TBT"; 135 case POWER_DOMAIN_AUX_F_TBT: 136 return "AUX_F_TBT"; 137 case POWER_DOMAIN_AUX_G_TBT: 138 return "AUX_G_TBT"; 139 case POWER_DOMAIN_AUX_H_TBT: 140 return "AUX_H_TBT"; 141 case POWER_DOMAIN_AUX_I_TBT: 142 return "AUX_I_TBT"; 143 case POWER_DOMAIN_GMBUS: 144 return "GMBUS"; 145 case POWER_DOMAIN_INIT: 146 return "INIT"; 147 case POWER_DOMAIN_MODESET: 148 return "MODESET"; 149 case POWER_DOMAIN_GT_IRQ: 150 return "GT_IRQ"; 151 case POWER_DOMAIN_DPLL_DC_OFF: 152 return "DPLL_DC_OFF"; 153 default: 154 MISSING_CASE(domain); 155 return "?"; 156 } 157 } 158 159 static void intel_power_well_enable(struct drm_i915_private *dev_priv, 160 struct i915_power_well *power_well) 161 { 162 DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name); 163 power_well->desc->ops->enable(dev_priv, power_well); 164 power_well->hw_enabled = true; 165 } 166 167 static void intel_power_well_disable(struct drm_i915_private *dev_priv, 168 struct i915_power_well *power_well) 169 { 170 DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name); 171 power_well->hw_enabled = false; 172 power_well->desc->ops->disable(dev_priv, power_well); 173 } 174 175 static void intel_power_well_get(struct drm_i915_private *dev_priv, 176 struct i915_power_well *power_well) 177 { 178 if (!power_well->count++) 179 intel_power_well_enable(dev_priv, power_well); 180 } 181 182 static void intel_power_well_put(struct drm_i915_private *dev_priv, 183 struct i915_power_well *power_well) 184 { 185 WARN(!power_well->count, "Use count on power well %s is already zero", 186 power_well->desc->name); 187 188 if (!--power_well->count) 189 intel_power_well_disable(dev_priv, power_well); 190 } 191 192 /** 193 * __intel_display_power_is_enabled - unlocked check for a power domain 194 * @dev_priv: i915 device instance 195 * @domain: power domain to check 196 * 197 * This is the unlocked version of intel_display_power_is_enabled() and should 198 * only be used from error capture and recovery code where deadlocks are 199 * possible. 200 * 201 * Returns: 202 * True when the power domain is enabled, false otherwise. 203 */ 204 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 205 enum intel_display_power_domain domain) 206 { 207 struct i915_power_well *power_well; 208 bool is_enabled; 209 210 if (dev_priv->runtime_pm.suspended) 211 return false; 212 213 is_enabled = true; 214 215 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) { 216 if (power_well->desc->always_on) 217 continue; 218 219 if (!power_well->hw_enabled) { 220 is_enabled = false; 221 break; 222 } 223 } 224 225 return is_enabled; 226 } 227 228 /** 229 * intel_display_power_is_enabled - check for a power domain 230 * @dev_priv: i915 device instance 231 * @domain: power domain to check 232 * 233 * This function can be used to check the hw power domain state. It is mostly 234 * used in hardware state readout functions. Everywhere else code should rely 235 * upon explicit power domain reference counting to ensure that the hardware 236 * block is powered up before accessing it. 237 * 238 * Callers must hold the relevant modesetting locks to ensure that concurrent 239 * threads can't disable the power well while the caller tries to read a few 240 * registers. 241 * 242 * Returns: 243 * True when the power domain is enabled, false otherwise. 244 */ 245 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 246 enum intel_display_power_domain domain) 247 { 248 struct i915_power_domains *power_domains; 249 bool ret; 250 251 power_domains = &dev_priv->power_domains; 252 253 mutex_lock(&power_domains->lock); 254 ret = __intel_display_power_is_enabled(dev_priv, domain); 255 mutex_unlock(&power_domains->lock); 256 257 return ret; 258 } 259 260 /* 261 * Starting with Haswell, we have a "Power Down Well" that can be turned off 262 * when not needed anymore. We have 4 registers that can request the power well 263 * to be enabled, and it will only be disabled if none of the registers is 264 * requesting it to be enabled. 265 */ 266 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, 267 u8 irq_pipe_mask, bool has_vga) 268 { 269 if (has_vga) 270 intel_vga_reset_io_mem(dev_priv); 271 272 if (irq_pipe_mask) 273 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); 274 } 275 276 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, 277 u8 irq_pipe_mask) 278 { 279 if (irq_pipe_mask) 280 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); 281 } 282 283 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, 284 struct i915_power_well *power_well) 285 { 286 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 287 int pw_idx = power_well->desc->hsw.idx; 288 289 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ 290 if (intel_de_wait_for_set(dev_priv, regs->driver, 291 HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) { 292 DRM_DEBUG_KMS("%s power well enable timeout\n", 293 power_well->desc->name); 294 295 /* An AUX timeout is expected if the TBT DP tunnel is down. */ 296 WARN_ON(!power_well->desc->hsw.is_tc_tbt); 297 } 298 } 299 300 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, 301 const struct i915_power_well_regs *regs, 302 int pw_idx) 303 { 304 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 305 u32 ret; 306 307 ret = I915_READ(regs->bios) & req_mask ? 1 : 0; 308 ret |= I915_READ(regs->driver) & req_mask ? 2 : 0; 309 if (regs->kvmr.reg) 310 ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0; 311 ret |= I915_READ(regs->debug) & req_mask ? 8 : 0; 312 313 return ret; 314 } 315 316 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, 317 struct i915_power_well *power_well) 318 { 319 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 320 int pw_idx = power_well->desc->hsw.idx; 321 bool disabled; 322 u32 reqs; 323 324 /* 325 * Bspec doesn't require waiting for PWs to get disabled, but still do 326 * this for paranoia. The known cases where a PW will be forced on: 327 * - a KVMR request on any power well via the KVMR request register 328 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and 329 * DEBUG request registers 330 * Skip the wait in case any of the request bits are set and print a 331 * diagnostic message. 332 */ 333 wait_for((disabled = !(I915_READ(regs->driver) & 334 HSW_PWR_WELL_CTL_STATE(pw_idx))) || 335 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); 336 if (disabled) 337 return; 338 339 DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", 340 power_well->desc->name, 341 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); 342 } 343 344 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, 345 enum skl_power_gate pg) 346 { 347 /* Timeout 5us for PG#0, for other PGs 1us */ 348 WARN_ON(intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, 349 SKL_FUSE_PG_DIST_STATUS(pg), 1)); 350 } 351 352 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 353 struct i915_power_well *power_well) 354 { 355 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 356 int pw_idx = power_well->desc->hsw.idx; 357 bool wait_fuses = power_well->desc->hsw.has_fuses; 358 enum skl_power_gate uninitialized_var(pg); 359 u32 val; 360 361 if (wait_fuses) { 362 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 363 SKL_PW_CTL_IDX_TO_PG(pw_idx); 364 /* 365 * For PW1 we have to wait both for the PW0/PG0 fuse state 366 * before enabling the power well and PW1/PG1's own fuse 367 * state after the enabling. For all other power wells with 368 * fuses we only have to wait for that PW/PG's fuse state 369 * after the enabling. 370 */ 371 if (pg == SKL_PG1) 372 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); 373 } 374 375 val = I915_READ(regs->driver); 376 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 377 hsw_wait_for_power_well_enable(dev_priv, power_well); 378 379 /* Display WA #1178: cnl */ 380 if (IS_CANNONLAKE(dev_priv) && 381 pw_idx >= GLK_PW_CTL_IDX_AUX_B && 382 pw_idx <= CNL_PW_CTL_IDX_AUX_F) { 383 val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx)); 384 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS; 385 I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val); 386 } 387 388 if (wait_fuses) 389 gen9_wait_for_power_well_fuses(dev_priv, pg); 390 391 hsw_power_well_post_enable(dev_priv, 392 power_well->desc->hsw.irq_pipe_mask, 393 power_well->desc->hsw.has_vga); 394 } 395 396 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 397 struct i915_power_well *power_well) 398 { 399 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 400 int pw_idx = power_well->desc->hsw.idx; 401 u32 val; 402 403 hsw_power_well_pre_disable(dev_priv, 404 power_well->desc->hsw.irq_pipe_mask); 405 406 val = I915_READ(regs->driver); 407 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 408 hsw_wait_for_power_well_disable(dev_priv, power_well); 409 } 410 411 #define ICL_AUX_PW_TO_PHY(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) 412 413 static void 414 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 415 struct i915_power_well *power_well) 416 { 417 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 418 int pw_idx = power_well->desc->hsw.idx; 419 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); 420 u32 val; 421 int wa_idx_max; 422 423 val = I915_READ(regs->driver); 424 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 425 426 if (INTEL_GEN(dev_priv) < 12) { 427 val = I915_READ(ICL_PORT_CL_DW12(phy)); 428 I915_WRITE(ICL_PORT_CL_DW12(phy), val | ICL_LANE_ENABLE_AUX); 429 } 430 431 hsw_wait_for_power_well_enable(dev_priv, power_well); 432 433 /* Display WA #1178: icl, tgl */ 434 if (IS_TIGERLAKE(dev_priv)) 435 wa_idx_max = ICL_PW_CTL_IDX_AUX_C; 436 else 437 wa_idx_max = ICL_PW_CTL_IDX_AUX_B; 438 439 if (!IS_ELKHARTLAKE(dev_priv) && 440 pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= wa_idx_max && 441 !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { 442 val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx)); 443 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; 444 I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val); 445 } 446 } 447 448 static void 449 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 450 struct i915_power_well *power_well) 451 { 452 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 453 int pw_idx = power_well->desc->hsw.idx; 454 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); 455 u32 val; 456 457 if (INTEL_GEN(dev_priv) < 12) { 458 val = I915_READ(ICL_PORT_CL_DW12(phy)); 459 I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX); 460 } 461 462 val = I915_READ(regs->driver); 463 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 464 465 hsw_wait_for_power_well_disable(dev_priv, power_well); 466 } 467 468 #define ICL_AUX_PW_TO_CH(pw_idx) \ 469 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) 470 471 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ 472 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) 473 474 static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv, 475 struct i915_power_well *power_well) 476 { 477 int pw_idx = power_well->desc->hsw.idx; 478 479 return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : 480 ICL_AUX_PW_TO_CH(pw_idx); 481 } 482 483 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 484 485 static u64 async_put_domains_mask(struct i915_power_domains *power_domains); 486 487 static int power_well_async_ref_count(struct drm_i915_private *dev_priv, 488 struct i915_power_well *power_well) 489 { 490 int refs = hweight64(power_well->desc->domains & 491 async_put_domains_mask(&dev_priv->power_domains)); 492 493 WARN_ON(refs > power_well->count); 494 495 return refs; 496 } 497 498 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 499 struct i915_power_well *power_well) 500 { 501 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); 502 struct intel_digital_port *dig_port = NULL; 503 struct intel_encoder *encoder; 504 505 /* Bypass the check if all references are released asynchronously */ 506 if (power_well_async_ref_count(dev_priv, power_well) == 507 power_well->count) 508 return; 509 510 aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); 511 512 for_each_intel_encoder(&dev_priv->drm, encoder) { 513 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 514 515 if (!intel_phy_is_tc(dev_priv, phy)) 516 continue; 517 518 /* We'll check the MST primary port */ 519 if (encoder->type == INTEL_OUTPUT_DP_MST) 520 continue; 521 522 dig_port = enc_to_dig_port(&encoder->base); 523 if (WARN_ON(!dig_port)) 524 continue; 525 526 if (dig_port->aux_ch != aux_ch) { 527 dig_port = NULL; 528 continue; 529 } 530 531 break; 532 } 533 534 if (WARN_ON(!dig_port)) 535 return; 536 537 WARN_ON(!intel_tc_port_ref_held(dig_port)); 538 } 539 540 #else 541 542 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 543 struct i915_power_well *power_well) 544 { 545 } 546 547 #endif 548 549 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) 550 551 static void 552 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 553 struct i915_power_well *power_well) 554 { 555 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); 556 u32 val; 557 558 icl_tc_port_assert_ref_held(dev_priv, power_well); 559 560 val = I915_READ(DP_AUX_CH_CTL(aux_ch)); 561 val &= ~DP_AUX_CH_CTL_TBT_IO; 562 if (power_well->desc->hsw.is_tc_tbt) 563 val |= DP_AUX_CH_CTL_TBT_IO; 564 I915_WRITE(DP_AUX_CH_CTL(aux_ch), val); 565 566 hsw_power_well_enable(dev_priv, power_well); 567 568 if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) { 569 enum tc_port tc_port; 570 571 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx); 572 I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2)); 573 574 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), 575 DKL_CMN_UC_DW27_UC_HEALTH, 1)) 576 DRM_WARN("Timeout waiting TC uC health\n"); 577 } 578 } 579 580 static void 581 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 582 struct i915_power_well *power_well) 583 { 584 icl_tc_port_assert_ref_held(dev_priv, power_well); 585 586 hsw_power_well_disable(dev_priv, power_well); 587 } 588 589 /* 590 * We should only use the power well if we explicitly asked the hardware to 591 * enable it, so check if it's enabled and also check if we've requested it to 592 * be enabled. 593 */ 594 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 595 struct i915_power_well *power_well) 596 { 597 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 598 enum i915_power_well_id id = power_well->desc->id; 599 int pw_idx = power_well->desc->hsw.idx; 600 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | 601 HSW_PWR_WELL_CTL_STATE(pw_idx); 602 u32 val; 603 604 val = I915_READ(regs->driver); 605 606 /* 607 * On GEN9 big core due to a DMC bug the driver's request bits for PW1 608 * and the MISC_IO PW will be not restored, so check instead for the 609 * BIOS's own request bits, which are forced-on for these power wells 610 * when exiting DC5/6. 611 */ 612 if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) && 613 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) 614 val |= I915_READ(regs->bios); 615 616 return (val & mask) == mask; 617 } 618 619 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 620 { 621 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), 622 "DC9 already programmed to be enabled.\n"); 623 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 624 "DC5 still not disabled to enable DC9.\n"); 625 WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) & 626 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), 627 "Power well 2 on.\n"); 628 WARN_ONCE(intel_irqs_enabled(dev_priv), 629 "Interrupts not disabled yet.\n"); 630 631 /* 632 * TODO: check for the following to verify the conditions to enter DC9 633 * state are satisfied: 634 * 1] Check relevant display engine registers to verify if mode set 635 * disable sequence was followed. 636 * 2] Check if display uninitialize sequence is initialized. 637 */ 638 } 639 640 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 641 { 642 WARN_ONCE(intel_irqs_enabled(dev_priv), 643 "Interrupts not disabled yet.\n"); 644 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 645 "DC5 still not disabled.\n"); 646 647 /* 648 * TODO: check for the following to verify DC9 state was indeed 649 * entered before programming to disable it: 650 * 1] Check relevant display engine registers to verify if mode 651 * set disable sequence was followed. 652 * 2] Check if display uninitialize sequence is initialized. 653 */ 654 } 655 656 static void gen9_write_dc_state(struct drm_i915_private *dev_priv, 657 u32 state) 658 { 659 int rewrites = 0; 660 int rereads = 0; 661 u32 v; 662 663 I915_WRITE(DC_STATE_EN, state); 664 665 /* It has been observed that disabling the dc6 state sometimes 666 * doesn't stick and dmc keeps returning old value. Make sure 667 * the write really sticks enough times and also force rewrite until 668 * we are confident that state is exactly what we want. 669 */ 670 do { 671 v = I915_READ(DC_STATE_EN); 672 673 if (v != state) { 674 I915_WRITE(DC_STATE_EN, state); 675 rewrites++; 676 rereads = 0; 677 } else if (rereads++ > 5) { 678 break; 679 } 680 681 } while (rewrites < 100); 682 683 if (v != state) 684 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n", 685 state, v); 686 687 /* Most of the times we need one retry, avoid spam */ 688 if (rewrites > 1) 689 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n", 690 state, rewrites); 691 } 692 693 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) 694 { 695 u32 mask; 696 697 mask = DC_STATE_EN_UPTO_DC5; 698 699 if (INTEL_GEN(dev_priv) >= 12) 700 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 701 | DC_STATE_EN_DC9; 702 else if (IS_GEN(dev_priv, 11)) 703 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; 704 else if (IS_GEN9_LP(dev_priv)) 705 mask |= DC_STATE_EN_DC9; 706 else 707 mask |= DC_STATE_EN_UPTO_DC6; 708 709 return mask; 710 } 711 712 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) 713 { 714 u32 val; 715 716 val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv); 717 718 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n", 719 dev_priv->csr.dc_state, val); 720 dev_priv->csr.dc_state = val; 721 } 722 723 /** 724 * gen9_set_dc_state - set target display C power state 725 * @dev_priv: i915 device instance 726 * @state: target DC power state 727 * - DC_STATE_DISABLE 728 * - DC_STATE_EN_UPTO_DC5 729 * - DC_STATE_EN_UPTO_DC6 730 * - DC_STATE_EN_DC9 731 * 732 * Signal to DMC firmware/HW the target DC power state passed in @state. 733 * DMC/HW can turn off individual display clocks and power rails when entering 734 * a deeper DC power state (higher in number) and turns these back when exiting 735 * that state to a shallower power state (lower in number). The HW will decide 736 * when to actually enter a given state on an on-demand basis, for instance 737 * depending on the active state of display pipes. The state of display 738 * registers backed by affected power rails are saved/restored as needed. 739 * 740 * Based on the above enabling a deeper DC power state is asynchronous wrt. 741 * enabling it. Disabling a deeper power state is synchronous: for instance 742 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned 743 * back on and register state is restored. This is guaranteed by the MMIO write 744 * to DC_STATE_EN blocking until the state is restored. 745 */ 746 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) 747 { 748 u32 val; 749 u32 mask; 750 751 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask)) 752 state &= dev_priv->csr.allowed_dc_mask; 753 754 val = I915_READ(DC_STATE_EN); 755 mask = gen9_dc_mask(dev_priv); 756 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", 757 val & mask, state); 758 759 /* Check if DMC is ignoring our DC state requests */ 760 if ((val & mask) != dev_priv->csr.dc_state) 761 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n", 762 dev_priv->csr.dc_state, val & mask); 763 764 val &= ~mask; 765 val |= state; 766 767 gen9_write_dc_state(dev_priv, val); 768 769 dev_priv->csr.dc_state = val & mask; 770 } 771 772 static u32 773 sanitize_target_dc_state(struct drm_i915_private *dev_priv, 774 u32 target_dc_state) 775 { 776 u32 states[] = { 777 DC_STATE_EN_UPTO_DC6, 778 DC_STATE_EN_UPTO_DC5, 779 DC_STATE_EN_DC3CO, 780 DC_STATE_DISABLE, 781 }; 782 int i; 783 784 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 785 if (target_dc_state != states[i]) 786 continue; 787 788 if (dev_priv->csr.allowed_dc_mask & target_dc_state) 789 break; 790 791 target_dc_state = states[i + 1]; 792 } 793 794 return target_dc_state; 795 } 796 797 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) 798 { 799 DRM_DEBUG_KMS("Enabling DC3CO\n"); 800 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); 801 } 802 803 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) 804 { 805 u32 val; 806 807 DRM_DEBUG_KMS("Disabling DC3CO\n"); 808 val = I915_READ(DC_STATE_EN); 809 val &= ~DC_STATE_DC3CO_STATUS; 810 I915_WRITE(DC_STATE_EN, val); 811 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 812 /* 813 * Delay of 200us DC3CO Exit time B.Spec 49196 814 */ 815 usleep_range(200, 210); 816 } 817 818 static void bxt_enable_dc9(struct drm_i915_private *dev_priv) 819 { 820 assert_can_enable_dc9(dev_priv); 821 822 DRM_DEBUG_KMS("Enabling DC9\n"); 823 /* 824 * Power sequencer reset is not needed on 825 * platforms with South Display Engine on PCH, 826 * because PPS registers are always on. 827 */ 828 if (!HAS_PCH_SPLIT(dev_priv)) 829 intel_power_sequencer_reset(dev_priv); 830 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 831 } 832 833 static void bxt_disable_dc9(struct drm_i915_private *dev_priv) 834 { 835 assert_can_disable_dc9(dev_priv); 836 837 DRM_DEBUG_KMS("Disabling DC9\n"); 838 839 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 840 841 intel_pps_unlock_regs_wa(dev_priv); 842 } 843 844 static void assert_csr_loaded(struct drm_i915_private *dev_priv) 845 { 846 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)), 847 "CSR program storage start is NULL\n"); 848 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n"); 849 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n"); 850 } 851 852 static struct i915_power_well * 853 lookup_power_well(struct drm_i915_private *dev_priv, 854 enum i915_power_well_id power_well_id) 855 { 856 struct i915_power_well *power_well; 857 858 for_each_power_well(dev_priv, power_well) 859 if (power_well->desc->id == power_well_id) 860 return power_well; 861 862 /* 863 * It's not feasible to add error checking code to the callers since 864 * this condition really shouldn't happen and it doesn't even make sense 865 * to abort things like display initialization sequences. Just return 866 * the first power well and hope the WARN gets reported so we can fix 867 * our driver. 868 */ 869 WARN(1, "Power well %d not defined for this platform\n", power_well_id); 870 return &dev_priv->power_domains.power_wells[0]; 871 } 872 873 /** 874 * intel_display_power_set_target_dc_state - Set target dc state. 875 * @dev_priv: i915 device 876 * @state: state which needs to be set as target_dc_state. 877 * 878 * This function set the "DC off" power well target_dc_state, 879 * based upon this target_dc_stste, "DC off" power well will 880 * enable desired DC state. 881 */ 882 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, 883 u32 state) 884 { 885 struct i915_power_well *power_well; 886 bool dc_off_enabled; 887 struct i915_power_domains *power_domains = &dev_priv->power_domains; 888 889 mutex_lock(&power_domains->lock); 890 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); 891 892 if (WARN_ON(!power_well)) 893 goto unlock; 894 895 state = sanitize_target_dc_state(dev_priv, state); 896 897 if (state == dev_priv->csr.target_dc_state) 898 goto unlock; 899 900 dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv, 901 power_well); 902 /* 903 * If DC off power well is disabled, need to enable and disable the 904 * DC off power well to effect target DC state. 905 */ 906 if (!dc_off_enabled) 907 power_well->desc->ops->enable(dev_priv, power_well); 908 909 dev_priv->csr.target_dc_state = state; 910 911 if (!dc_off_enabled) 912 power_well->desc->ops->disable(dev_priv, power_well); 913 914 unlock: 915 mutex_unlock(&power_domains->lock); 916 } 917 918 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 919 { 920 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, 921 SKL_DISP_PW_2); 922 923 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n"); 924 925 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), 926 "DC5 already programmed to be enabled.\n"); 927 assert_rpm_wakelock_held(&dev_priv->runtime_pm); 928 929 assert_csr_loaded(dev_priv); 930 } 931 932 static void gen9_enable_dc5(struct drm_i915_private *dev_priv) 933 { 934 assert_can_enable_dc5(dev_priv); 935 936 DRM_DEBUG_KMS("Enabling DC5\n"); 937 938 /* Wa Display #1183: skl,kbl,cfl */ 939 if (IS_GEN9_BC(dev_priv)) 940 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | 941 SKL_SELECT_ALTERNATE_DC_EXIT); 942 943 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 944 } 945 946 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 947 { 948 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 949 "Backlight is not disabled.\n"); 950 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), 951 "DC6 already programmed to be enabled.\n"); 952 953 assert_csr_loaded(dev_priv); 954 } 955 956 static void skl_enable_dc6(struct drm_i915_private *dev_priv) 957 { 958 assert_can_enable_dc6(dev_priv); 959 960 DRM_DEBUG_KMS("Enabling DC6\n"); 961 962 /* Wa Display #1183: skl,kbl,cfl */ 963 if (IS_GEN9_BC(dev_priv)) 964 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | 965 SKL_SELECT_ALTERNATE_DC_EXIT); 966 967 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 968 } 969 970 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 971 struct i915_power_well *power_well) 972 { 973 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 974 int pw_idx = power_well->desc->hsw.idx; 975 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 976 u32 bios_req = I915_READ(regs->bios); 977 978 /* Take over the request bit if set by BIOS. */ 979 if (bios_req & mask) { 980 u32 drv_req = I915_READ(regs->driver); 981 982 if (!(drv_req & mask)) 983 I915_WRITE(regs->driver, drv_req | mask); 984 I915_WRITE(regs->bios, bios_req & ~mask); 985 } 986 } 987 988 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 989 struct i915_power_well *power_well) 990 { 991 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); 992 } 993 994 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 995 struct i915_power_well *power_well) 996 { 997 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); 998 } 999 1000 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, 1001 struct i915_power_well *power_well) 1002 { 1003 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); 1004 } 1005 1006 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) 1007 { 1008 struct i915_power_well *power_well; 1009 1010 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); 1011 if (power_well->count > 0) 1012 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1013 1014 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1015 if (power_well->count > 0) 1016 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1017 1018 if (IS_GEMINILAKE(dev_priv)) { 1019 power_well = lookup_power_well(dev_priv, 1020 GLK_DISP_PW_DPIO_CMN_C); 1021 if (power_well->count > 0) 1022 bxt_ddi_phy_verify_state(dev_priv, 1023 power_well->desc->bxt.phy); 1024 } 1025 } 1026 1027 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 1028 struct i915_power_well *power_well) 1029 { 1030 return ((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && 1031 (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); 1032 } 1033 1034 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) 1035 { 1036 u32 tmp = I915_READ(DBUF_CTL); 1037 1038 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) != 1039 (DBUF_POWER_STATE | DBUF_POWER_REQUEST), 1040 "Unexpected DBuf power power state (0x%08x)\n", tmp); 1041 } 1042 1043 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv) 1044 { 1045 struct intel_cdclk_state cdclk_state = {}; 1046 1047 if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) { 1048 tgl_disable_dc3co(dev_priv); 1049 return; 1050 } 1051 1052 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1053 1054 dev_priv->display.get_cdclk(dev_priv, &cdclk_state); 1055 /* Can't read out voltage_level so can't use intel_cdclk_changed() */ 1056 WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state)); 1057 1058 gen9_assert_dbuf_enabled(dev_priv); 1059 1060 if (IS_GEN9_LP(dev_priv)) 1061 bxt_verify_ddi_phy_power_wells(dev_priv); 1062 1063 if (INTEL_GEN(dev_priv) >= 11) 1064 /* 1065 * DMC retains HW context only for port A, the other combo 1066 * PHY's HW context for port B is lost after DC transitions, 1067 * so we need to restore it manually. 1068 */ 1069 intel_combo_phy_init(dev_priv); 1070 } 1071 1072 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 1073 struct i915_power_well *power_well) 1074 { 1075 gen9_disable_dc_states(dev_priv); 1076 } 1077 1078 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 1079 struct i915_power_well *power_well) 1080 { 1081 if (!dev_priv->csr.dmc_payload) 1082 return; 1083 1084 switch (dev_priv->csr.target_dc_state) { 1085 case DC_STATE_EN_DC3CO: 1086 tgl_enable_dc3co(dev_priv); 1087 break; 1088 case DC_STATE_EN_UPTO_DC6: 1089 skl_enable_dc6(dev_priv); 1090 break; 1091 case DC_STATE_EN_UPTO_DC5: 1092 gen9_enable_dc5(dev_priv); 1093 break; 1094 } 1095 } 1096 1097 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, 1098 struct i915_power_well *power_well) 1099 { 1100 } 1101 1102 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 1103 struct i915_power_well *power_well) 1104 { 1105 } 1106 1107 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 1108 struct i915_power_well *power_well) 1109 { 1110 return true; 1111 } 1112 1113 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, 1114 struct i915_power_well *power_well) 1115 { 1116 if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) 1117 i830_enable_pipe(dev_priv, PIPE_A); 1118 if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) 1119 i830_enable_pipe(dev_priv, PIPE_B); 1120 } 1121 1122 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, 1123 struct i915_power_well *power_well) 1124 { 1125 i830_disable_pipe(dev_priv, PIPE_B); 1126 i830_disable_pipe(dev_priv, PIPE_A); 1127 } 1128 1129 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, 1130 struct i915_power_well *power_well) 1131 { 1132 return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE && 1133 I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE; 1134 } 1135 1136 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, 1137 struct i915_power_well *power_well) 1138 { 1139 if (power_well->count > 0) 1140 i830_pipes_power_well_enable(dev_priv, power_well); 1141 else 1142 i830_pipes_power_well_disable(dev_priv, power_well); 1143 } 1144 1145 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 1146 struct i915_power_well *power_well, bool enable) 1147 { 1148 int pw_idx = power_well->desc->vlv.idx; 1149 u32 mask; 1150 u32 state; 1151 u32 ctrl; 1152 1153 mask = PUNIT_PWRGT_MASK(pw_idx); 1154 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : 1155 PUNIT_PWRGT_PWR_GATE(pw_idx); 1156 1157 vlv_punit_get(dev_priv); 1158 1159 #define COND \ 1160 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 1161 1162 if (COND) 1163 goto out; 1164 1165 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 1166 ctrl &= ~mask; 1167 ctrl |= state; 1168 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 1169 1170 if (wait_for(COND, 100)) 1171 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 1172 state, 1173 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 1174 1175 #undef COND 1176 1177 out: 1178 vlv_punit_put(dev_priv); 1179 } 1180 1181 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 1182 struct i915_power_well *power_well) 1183 { 1184 vlv_set_power_well(dev_priv, power_well, true); 1185 } 1186 1187 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 1188 struct i915_power_well *power_well) 1189 { 1190 vlv_set_power_well(dev_priv, power_well, false); 1191 } 1192 1193 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 1194 struct i915_power_well *power_well) 1195 { 1196 int pw_idx = power_well->desc->vlv.idx; 1197 bool enabled = false; 1198 u32 mask; 1199 u32 state; 1200 u32 ctrl; 1201 1202 mask = PUNIT_PWRGT_MASK(pw_idx); 1203 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); 1204 1205 vlv_punit_get(dev_priv); 1206 1207 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 1208 /* 1209 * We only ever set the power-on and power-gate states, anything 1210 * else is unexpected. 1211 */ 1212 WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) && 1213 state != PUNIT_PWRGT_PWR_GATE(pw_idx)); 1214 if (state == ctrl) 1215 enabled = true; 1216 1217 /* 1218 * A transient state at this point would mean some unexpected party 1219 * is poking at the power controls too. 1220 */ 1221 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 1222 WARN_ON(ctrl != state); 1223 1224 vlv_punit_put(dev_priv); 1225 1226 return enabled; 1227 } 1228 1229 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1230 { 1231 u32 val; 1232 1233 /* 1234 * On driver load, a pipe may be active and driving a DSI display. 1235 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck 1236 * (and never recovering) in this case. intel_dsi_post_disable() will 1237 * clear it when we turn off the display. 1238 */ 1239 val = I915_READ(DSPCLK_GATE_D); 1240 val &= DPOUNIT_CLOCK_GATE_DISABLE; 1241 val |= VRHUNIT_CLOCK_GATE_DISABLE; 1242 I915_WRITE(DSPCLK_GATE_D, val); 1243 1244 /* 1245 * Disable trickle feed and enable pnd deadline calculation 1246 */ 1247 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1248 I915_WRITE(CBR1_VLV, 0); 1249 1250 WARN_ON(dev_priv->rawclk_freq == 0); 1251 1252 I915_WRITE(RAWCLK_FREQ_VLV, 1253 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000)); 1254 } 1255 1256 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1257 { 1258 struct intel_encoder *encoder; 1259 enum pipe pipe; 1260 1261 /* 1262 * Enable the CRI clock source so we can get at the 1263 * display and the reference clock for VGA 1264 * hotplug / manual detection. Supposedly DSI also 1265 * needs the ref clock up and running. 1266 * 1267 * CHV DPLL B/C have some issues if VGA mode is enabled. 1268 */ 1269 for_each_pipe(dev_priv, pipe) { 1270 u32 val = I915_READ(DPLL(pipe)); 1271 1272 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1273 if (pipe != PIPE_A) 1274 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1275 1276 I915_WRITE(DPLL(pipe), val); 1277 } 1278 1279 vlv_init_display_clock_gating(dev_priv); 1280 1281 spin_lock_irq(&dev_priv->irq_lock); 1282 valleyview_enable_display_irqs(dev_priv); 1283 spin_unlock_irq(&dev_priv->irq_lock); 1284 1285 /* 1286 * During driver initialization/resume we can avoid restoring the 1287 * part of the HW/SW state that will be inited anyway explicitly. 1288 */ 1289 if (dev_priv->power_domains.initializing) 1290 return; 1291 1292 intel_hpd_init(dev_priv); 1293 1294 /* Re-enable the ADPA, if we have one */ 1295 for_each_intel_encoder(&dev_priv->drm, encoder) { 1296 if (encoder->type == INTEL_OUTPUT_ANALOG) 1297 intel_crt_reset(&encoder->base); 1298 } 1299 1300 intel_vga_redisable_power_on(dev_priv); 1301 1302 intel_pps_unlock_regs_wa(dev_priv); 1303 } 1304 1305 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1306 { 1307 spin_lock_irq(&dev_priv->irq_lock); 1308 valleyview_disable_display_irqs(dev_priv); 1309 spin_unlock_irq(&dev_priv->irq_lock); 1310 1311 /* make sure we're done processing display irqs */ 1312 intel_synchronize_irq(dev_priv); 1313 1314 intel_power_sequencer_reset(dev_priv); 1315 1316 /* Prevent us from re-enabling polling on accident in late suspend */ 1317 if (!dev_priv->drm.dev->power.is_suspended) 1318 intel_hpd_poll_init(dev_priv); 1319 } 1320 1321 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1322 struct i915_power_well *power_well) 1323 { 1324 vlv_set_power_well(dev_priv, power_well, true); 1325 1326 vlv_display_power_well_init(dev_priv); 1327 } 1328 1329 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 1330 struct i915_power_well *power_well) 1331 { 1332 vlv_display_power_well_deinit(dev_priv); 1333 1334 vlv_set_power_well(dev_priv, power_well, false); 1335 } 1336 1337 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1338 struct i915_power_well *power_well) 1339 { 1340 /* since ref/cri clock was enabled */ 1341 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1342 1343 vlv_set_power_well(dev_priv, power_well, true); 1344 1345 /* 1346 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1347 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1348 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1349 * b. The other bits such as sfr settings / modesel may all 1350 * be set to 0. 1351 * 1352 * This should only be done on init and resume from S3 with 1353 * both PLLs disabled, or we risk losing DPIO and PLL 1354 * synchronization. 1355 */ 1356 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); 1357 } 1358 1359 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1360 struct i915_power_well *power_well) 1361 { 1362 enum pipe pipe; 1363 1364 for_each_pipe(dev_priv, pipe) 1365 assert_pll_disabled(dev_priv, pipe); 1366 1367 /* Assert common reset */ 1368 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); 1369 1370 vlv_set_power_well(dev_priv, power_well, false); 1371 } 1372 1373 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) 1374 1375 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1376 1377 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1378 { 1379 struct i915_power_well *cmn_bc = 1380 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1381 struct i915_power_well *cmn_d = 1382 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 1383 u32 phy_control = dev_priv->chv_phy_control; 1384 u32 phy_status = 0; 1385 u32 phy_status_mask = 0xffffffff; 1386 1387 /* 1388 * The BIOS can leave the PHY is some weird state 1389 * where it doesn't fully power down some parts. 1390 * Disable the asserts until the PHY has been fully 1391 * reset (ie. the power well has been disabled at 1392 * least once). 1393 */ 1394 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1395 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1396 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1397 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1398 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1399 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1400 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1401 1402 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1403 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1404 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1405 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1406 1407 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 1408 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1409 1410 /* this assumes override is only used to enable lanes */ 1411 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1412 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1413 1414 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1415 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1416 1417 /* CL1 is on whenever anything is on in either channel */ 1418 if (BITS_SET(phy_control, 1419 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1420 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1421 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1422 1423 /* 1424 * The DPLLB check accounts for the pipe B + port A usage 1425 * with CL2 powered up but all the lanes in the second channel 1426 * powered down. 1427 */ 1428 if (BITS_SET(phy_control, 1429 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1430 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1431 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1432 1433 if (BITS_SET(phy_control, 1434 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1435 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1436 if (BITS_SET(phy_control, 1437 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1438 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1439 1440 if (BITS_SET(phy_control, 1441 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1442 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1443 if (BITS_SET(phy_control, 1444 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1445 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1446 } 1447 1448 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 1449 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1450 1451 /* this assumes override is only used to enable lanes */ 1452 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1453 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1454 1455 if (BITS_SET(phy_control, 1456 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1457 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1458 1459 if (BITS_SET(phy_control, 1460 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1461 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1462 if (BITS_SET(phy_control, 1463 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1464 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1465 } 1466 1467 phy_status &= phy_status_mask; 1468 1469 /* 1470 * The PHY may be busy with some initial calibration and whatnot, 1471 * so the power state can take a while to actually change. 1472 */ 1473 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS, 1474 phy_status_mask, phy_status, 10)) 1475 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1476 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask, 1477 phy_status, dev_priv->chv_phy_control); 1478 } 1479 1480 #undef BITS_SET 1481 1482 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1483 struct i915_power_well *power_well) 1484 { 1485 enum dpio_phy phy; 1486 enum pipe pipe; 1487 u32 tmp; 1488 1489 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1490 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1491 1492 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1493 pipe = PIPE_A; 1494 phy = DPIO_PHY0; 1495 } else { 1496 pipe = PIPE_C; 1497 phy = DPIO_PHY1; 1498 } 1499 1500 /* since ref/cri clock was enabled */ 1501 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1502 vlv_set_power_well(dev_priv, power_well, true); 1503 1504 /* Poll for phypwrgood signal */ 1505 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, 1506 PHY_POWERGOOD(phy), 1)) 1507 DRM_ERROR("Display PHY %d is not power up\n", phy); 1508 1509 vlv_dpio_get(dev_priv); 1510 1511 /* Enable dynamic power down */ 1512 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1513 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1514 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1515 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1516 1517 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1518 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1519 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1520 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1521 } else { 1522 /* 1523 * Force the non-existing CL2 off. BXT does this 1524 * too, so maybe it saves some power even though 1525 * CL2 doesn't exist? 1526 */ 1527 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1528 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1529 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1530 } 1531 1532 vlv_dpio_put(dev_priv); 1533 1534 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1535 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1536 1537 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1538 phy, dev_priv->chv_phy_control); 1539 1540 assert_chv_phy_status(dev_priv); 1541 } 1542 1543 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1544 struct i915_power_well *power_well) 1545 { 1546 enum dpio_phy phy; 1547 1548 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1549 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1550 1551 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1552 phy = DPIO_PHY0; 1553 assert_pll_disabled(dev_priv, PIPE_A); 1554 assert_pll_disabled(dev_priv, PIPE_B); 1555 } else { 1556 phy = DPIO_PHY1; 1557 assert_pll_disabled(dev_priv, PIPE_C); 1558 } 1559 1560 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1561 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1562 1563 vlv_set_power_well(dev_priv, power_well, false); 1564 1565 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1566 phy, dev_priv->chv_phy_control); 1567 1568 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1569 dev_priv->chv_phy_assert[phy] = true; 1570 1571 assert_chv_phy_status(dev_priv); 1572 } 1573 1574 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1575 enum dpio_channel ch, bool override, unsigned int mask) 1576 { 1577 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1578 u32 reg, val, expected, actual; 1579 1580 /* 1581 * The BIOS can leave the PHY is some weird state 1582 * where it doesn't fully power down some parts. 1583 * Disable the asserts until the PHY has been fully 1584 * reset (ie. the power well has been disabled at 1585 * least once). 1586 */ 1587 if (!dev_priv->chv_phy_assert[phy]) 1588 return; 1589 1590 if (ch == DPIO_CH0) 1591 reg = _CHV_CMN_DW0_CH0; 1592 else 1593 reg = _CHV_CMN_DW6_CH1; 1594 1595 vlv_dpio_get(dev_priv); 1596 val = vlv_dpio_read(dev_priv, pipe, reg); 1597 vlv_dpio_put(dev_priv); 1598 1599 /* 1600 * This assumes !override is only used when the port is disabled. 1601 * All lanes should power down even without the override when 1602 * the port is disabled. 1603 */ 1604 if (!override || mask == 0xf) { 1605 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1606 /* 1607 * If CH1 common lane is not active anymore 1608 * (eg. for pipe B DPLL) the entire channel will 1609 * shut down, which causes the common lane registers 1610 * to read as 0. That means we can't actually check 1611 * the lane power down status bits, but as the entire 1612 * register reads as 0 it's a good indication that the 1613 * channel is indeed entirely powered down. 1614 */ 1615 if (ch == DPIO_CH1 && val == 0) 1616 expected = 0; 1617 } else if (mask != 0x0) { 1618 expected = DPIO_ANYDL_POWERDOWN; 1619 } else { 1620 expected = 0; 1621 } 1622 1623 if (ch == DPIO_CH0) 1624 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1625 else 1626 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1627 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1628 1629 WARN(actual != expected, 1630 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1631 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN), 1632 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN), 1633 reg, val); 1634 } 1635 1636 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1637 enum dpio_channel ch, bool override) 1638 { 1639 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1640 bool was_override; 1641 1642 mutex_lock(&power_domains->lock); 1643 1644 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1645 1646 if (override == was_override) 1647 goto out; 1648 1649 if (override) 1650 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1651 else 1652 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1653 1654 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1655 1656 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1657 phy, ch, dev_priv->chv_phy_control); 1658 1659 assert_chv_phy_status(dev_priv); 1660 1661 out: 1662 mutex_unlock(&power_domains->lock); 1663 1664 return was_override; 1665 } 1666 1667 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1668 bool override, unsigned int mask) 1669 { 1670 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1671 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1672 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base)); 1673 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); 1674 1675 mutex_lock(&power_domains->lock); 1676 1677 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1678 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1679 1680 if (override) 1681 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1682 else 1683 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1684 1685 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1686 1687 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1688 phy, ch, mask, dev_priv->chv_phy_control); 1689 1690 assert_chv_phy_status(dev_priv); 1691 1692 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1693 1694 mutex_unlock(&power_domains->lock); 1695 } 1696 1697 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1698 struct i915_power_well *power_well) 1699 { 1700 enum pipe pipe = PIPE_A; 1701 bool enabled; 1702 u32 state, ctrl; 1703 1704 vlv_punit_get(dev_priv); 1705 1706 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); 1707 /* 1708 * We only ever set the power-on and power-gate states, anything 1709 * else is unexpected. 1710 */ 1711 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); 1712 enabled = state == DP_SSS_PWR_ON(pipe); 1713 1714 /* 1715 * A transient state at this point would mean some unexpected party 1716 * is poking at the power controls too. 1717 */ 1718 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); 1719 WARN_ON(ctrl << 16 != state); 1720 1721 vlv_punit_put(dev_priv); 1722 1723 return enabled; 1724 } 1725 1726 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1727 struct i915_power_well *power_well, 1728 bool enable) 1729 { 1730 enum pipe pipe = PIPE_A; 1731 u32 state; 1732 u32 ctrl; 1733 1734 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1735 1736 vlv_punit_get(dev_priv); 1737 1738 #define COND \ 1739 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) 1740 1741 if (COND) 1742 goto out; 1743 1744 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 1745 ctrl &= ~DP_SSC_MASK(pipe); 1746 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1747 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); 1748 1749 if (wait_for(COND, 100)) 1750 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 1751 state, 1752 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); 1753 1754 #undef COND 1755 1756 out: 1757 vlv_punit_put(dev_priv); 1758 } 1759 1760 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 1761 struct i915_power_well *power_well) 1762 { 1763 chv_set_pipe_power_well(dev_priv, power_well, true); 1764 1765 vlv_display_power_well_init(dev_priv); 1766 } 1767 1768 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 1769 struct i915_power_well *power_well) 1770 { 1771 vlv_display_power_well_deinit(dev_priv); 1772 1773 chv_set_pipe_power_well(dev_priv, power_well, false); 1774 } 1775 1776 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains) 1777 { 1778 return power_domains->async_put_domains[0] | 1779 power_domains->async_put_domains[1]; 1780 } 1781 1782 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 1783 1784 static bool 1785 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 1786 { 1787 return !WARN_ON(power_domains->async_put_domains[0] & 1788 power_domains->async_put_domains[1]); 1789 } 1790 1791 static bool 1792 __async_put_domains_state_ok(struct i915_power_domains *power_domains) 1793 { 1794 enum intel_display_power_domain domain; 1795 bool err = false; 1796 1797 err |= !assert_async_put_domain_masks_disjoint(power_domains); 1798 err |= WARN_ON(!!power_domains->async_put_wakeref != 1799 !!__async_put_domains_mask(power_domains)); 1800 1801 for_each_power_domain(domain, __async_put_domains_mask(power_domains)) 1802 err |= WARN_ON(power_domains->domain_use_count[domain] != 1); 1803 1804 return !err; 1805 } 1806 1807 static void print_power_domains(struct i915_power_domains *power_domains, 1808 const char *prefix, u64 mask) 1809 { 1810 enum intel_display_power_domain domain; 1811 1812 DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask)); 1813 for_each_power_domain(domain, mask) 1814 DRM_DEBUG_DRIVER("%s use_count %d\n", 1815 intel_display_power_domain_str(domain), 1816 power_domains->domain_use_count[domain]); 1817 } 1818 1819 static void 1820 print_async_put_domains_state(struct i915_power_domains *power_domains) 1821 { 1822 DRM_DEBUG_DRIVER("async_put_wakeref %u\n", 1823 power_domains->async_put_wakeref); 1824 1825 print_power_domains(power_domains, "async_put_domains[0]", 1826 power_domains->async_put_domains[0]); 1827 print_power_domains(power_domains, "async_put_domains[1]", 1828 power_domains->async_put_domains[1]); 1829 } 1830 1831 static void 1832 verify_async_put_domains_state(struct i915_power_domains *power_domains) 1833 { 1834 if (!__async_put_domains_state_ok(power_domains)) 1835 print_async_put_domains_state(power_domains); 1836 } 1837 1838 #else 1839 1840 static void 1841 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 1842 { 1843 } 1844 1845 static void 1846 verify_async_put_domains_state(struct i915_power_domains *power_domains) 1847 { 1848 } 1849 1850 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ 1851 1852 static u64 async_put_domains_mask(struct i915_power_domains *power_domains) 1853 { 1854 assert_async_put_domain_masks_disjoint(power_domains); 1855 1856 return __async_put_domains_mask(power_domains); 1857 } 1858 1859 static void 1860 async_put_domains_clear_domain(struct i915_power_domains *power_domains, 1861 enum intel_display_power_domain domain) 1862 { 1863 assert_async_put_domain_masks_disjoint(power_domains); 1864 1865 power_domains->async_put_domains[0] &= ~BIT_ULL(domain); 1866 power_domains->async_put_domains[1] &= ~BIT_ULL(domain); 1867 } 1868 1869 static bool 1870 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, 1871 enum intel_display_power_domain domain) 1872 { 1873 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1874 bool ret = false; 1875 1876 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain))) 1877 goto out_verify; 1878 1879 async_put_domains_clear_domain(power_domains, domain); 1880 1881 ret = true; 1882 1883 if (async_put_domains_mask(power_domains)) 1884 goto out_verify; 1885 1886 cancel_delayed_work(&power_domains->async_put_work); 1887 intel_runtime_pm_put_raw(&dev_priv->runtime_pm, 1888 fetch_and_zero(&power_domains->async_put_wakeref)); 1889 out_verify: 1890 verify_async_put_domains_state(power_domains); 1891 1892 return ret; 1893 } 1894 1895 static void 1896 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 1897 enum intel_display_power_domain domain) 1898 { 1899 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1900 struct i915_power_well *power_well; 1901 1902 if (intel_display_power_grab_async_put_ref(dev_priv, domain)) 1903 return; 1904 1905 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) 1906 intel_power_well_get(dev_priv, power_well); 1907 1908 power_domains->domain_use_count[domain]++; 1909 } 1910 1911 /** 1912 * intel_display_power_get - grab a power domain reference 1913 * @dev_priv: i915 device instance 1914 * @domain: power domain to reference 1915 * 1916 * This function grabs a power domain reference for @domain and ensures that the 1917 * power domain and all its parents are powered up. Therefore users should only 1918 * grab a reference to the innermost power domain they need. 1919 * 1920 * Any power domain reference obtained by this function must have a symmetric 1921 * call to intel_display_power_put() to release the reference again. 1922 */ 1923 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, 1924 enum intel_display_power_domain domain) 1925 { 1926 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1927 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 1928 1929 mutex_lock(&power_domains->lock); 1930 __intel_display_power_get_domain(dev_priv, domain); 1931 mutex_unlock(&power_domains->lock); 1932 1933 return wakeref; 1934 } 1935 1936 /** 1937 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 1938 * @dev_priv: i915 device instance 1939 * @domain: power domain to reference 1940 * 1941 * This function grabs a power domain reference for @domain and ensures that the 1942 * power domain and all its parents are powered up. Therefore users should only 1943 * grab a reference to the innermost power domain they need. 1944 * 1945 * Any power domain reference obtained by this function must have a symmetric 1946 * call to intel_display_power_put() to release the reference again. 1947 */ 1948 intel_wakeref_t 1949 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 1950 enum intel_display_power_domain domain) 1951 { 1952 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1953 intel_wakeref_t wakeref; 1954 bool is_enabled; 1955 1956 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); 1957 if (!wakeref) 1958 return false; 1959 1960 mutex_lock(&power_domains->lock); 1961 1962 if (__intel_display_power_is_enabled(dev_priv, domain)) { 1963 __intel_display_power_get_domain(dev_priv, domain); 1964 is_enabled = true; 1965 } else { 1966 is_enabled = false; 1967 } 1968 1969 mutex_unlock(&power_domains->lock); 1970 1971 if (!is_enabled) { 1972 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 1973 wakeref = 0; 1974 } 1975 1976 return wakeref; 1977 } 1978 1979 static void 1980 __intel_display_power_put_domain(struct drm_i915_private *dev_priv, 1981 enum intel_display_power_domain domain) 1982 { 1983 struct i915_power_domains *power_domains; 1984 struct i915_power_well *power_well; 1985 const char *name = intel_display_power_domain_str(domain); 1986 1987 power_domains = &dev_priv->power_domains; 1988 1989 WARN(!power_domains->domain_use_count[domain], 1990 "Use count on domain %s is already zero\n", 1991 name); 1992 WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain), 1993 "Async disabling of domain %s is pending\n", 1994 name); 1995 1996 power_domains->domain_use_count[domain]--; 1997 1998 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) 1999 intel_power_well_put(dev_priv, power_well); 2000 } 2001 2002 static void __intel_display_power_put(struct drm_i915_private *dev_priv, 2003 enum intel_display_power_domain domain) 2004 { 2005 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2006 2007 mutex_lock(&power_domains->lock); 2008 __intel_display_power_put_domain(dev_priv, domain); 2009 mutex_unlock(&power_domains->lock); 2010 } 2011 2012 /** 2013 * intel_display_power_put_unchecked - release an unchecked power domain reference 2014 * @dev_priv: i915 device instance 2015 * @domain: power domain to reference 2016 * 2017 * This function drops the power domain reference obtained by 2018 * intel_display_power_get() and might power down the corresponding hardware 2019 * block right away if this is the last reference. 2020 * 2021 * This function exists only for historical reasons and should be avoided in 2022 * new code, as the correctness of its use cannot be checked. Always use 2023 * intel_display_power_put() instead. 2024 */ 2025 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, 2026 enum intel_display_power_domain domain) 2027 { 2028 __intel_display_power_put(dev_priv, domain); 2029 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); 2030 } 2031 2032 static void 2033 queue_async_put_domains_work(struct i915_power_domains *power_domains, 2034 intel_wakeref_t wakeref) 2035 { 2036 WARN_ON(power_domains->async_put_wakeref); 2037 power_domains->async_put_wakeref = wakeref; 2038 WARN_ON(!queue_delayed_work(system_unbound_wq, 2039 &power_domains->async_put_work, 2040 msecs_to_jiffies(100))); 2041 } 2042 2043 static void 2044 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) 2045 { 2046 struct drm_i915_private *dev_priv = 2047 container_of(power_domains, struct drm_i915_private, 2048 power_domains); 2049 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2050 enum intel_display_power_domain domain; 2051 intel_wakeref_t wakeref; 2052 2053 /* 2054 * The caller must hold already raw wakeref, upgrade that to a proper 2055 * wakeref to make the state checker happy about the HW access during 2056 * power well disabling. 2057 */ 2058 assert_rpm_raw_wakeref_held(rpm); 2059 wakeref = intel_runtime_pm_get(rpm); 2060 2061 for_each_power_domain(domain, mask) { 2062 /* Clear before put, so put's sanity check is happy. */ 2063 async_put_domains_clear_domain(power_domains, domain); 2064 __intel_display_power_put_domain(dev_priv, domain); 2065 } 2066 2067 intel_runtime_pm_put(rpm, wakeref); 2068 } 2069 2070 static void 2071 intel_display_power_put_async_work(struct work_struct *work) 2072 { 2073 struct drm_i915_private *dev_priv = 2074 container_of(work, struct drm_i915_private, 2075 power_domains.async_put_work.work); 2076 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2077 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2078 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); 2079 intel_wakeref_t old_work_wakeref = 0; 2080 2081 mutex_lock(&power_domains->lock); 2082 2083 /* 2084 * Bail out if all the domain refs pending to be released were grabbed 2085 * by subsequent gets or a flush_work. 2086 */ 2087 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2088 if (!old_work_wakeref) 2089 goto out_verify; 2090 2091 release_async_put_domains(power_domains, 2092 power_domains->async_put_domains[0]); 2093 2094 /* Requeue the work if more domains were async put meanwhile. */ 2095 if (power_domains->async_put_domains[1]) { 2096 power_domains->async_put_domains[0] = 2097 fetch_and_zero(&power_domains->async_put_domains[1]); 2098 queue_async_put_domains_work(power_domains, 2099 fetch_and_zero(&new_work_wakeref)); 2100 } 2101 2102 out_verify: 2103 verify_async_put_domains_state(power_domains); 2104 2105 mutex_unlock(&power_domains->lock); 2106 2107 if (old_work_wakeref) 2108 intel_runtime_pm_put_raw(rpm, old_work_wakeref); 2109 if (new_work_wakeref) 2110 intel_runtime_pm_put_raw(rpm, new_work_wakeref); 2111 } 2112 2113 /** 2114 * intel_display_power_put_async - release a power domain reference asynchronously 2115 * @i915: i915 device instance 2116 * @domain: power domain to reference 2117 * @wakeref: wakeref acquired for the reference that is being released 2118 * 2119 * This function drops the power domain reference obtained by 2120 * intel_display_power_get*() and schedules a work to power down the 2121 * corresponding hardware block if this is the last reference. 2122 */ 2123 void __intel_display_power_put_async(struct drm_i915_private *i915, 2124 enum intel_display_power_domain domain, 2125 intel_wakeref_t wakeref) 2126 { 2127 struct i915_power_domains *power_domains = &i915->power_domains; 2128 struct intel_runtime_pm *rpm = &i915->runtime_pm; 2129 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); 2130 2131 mutex_lock(&power_domains->lock); 2132 2133 if (power_domains->domain_use_count[domain] > 1) { 2134 __intel_display_power_put_domain(i915, domain); 2135 2136 goto out_verify; 2137 } 2138 2139 WARN_ON(power_domains->domain_use_count[domain] != 1); 2140 2141 /* Let a pending work requeue itself or queue a new one. */ 2142 if (power_domains->async_put_wakeref) { 2143 power_domains->async_put_domains[1] |= BIT_ULL(domain); 2144 } else { 2145 power_domains->async_put_domains[0] |= BIT_ULL(domain); 2146 queue_async_put_domains_work(power_domains, 2147 fetch_and_zero(&work_wakeref)); 2148 } 2149 2150 out_verify: 2151 verify_async_put_domains_state(power_domains); 2152 2153 mutex_unlock(&power_domains->lock); 2154 2155 if (work_wakeref) 2156 intel_runtime_pm_put_raw(rpm, work_wakeref); 2157 2158 intel_runtime_pm_put(rpm, wakeref); 2159 } 2160 2161 /** 2162 * intel_display_power_flush_work - flushes the async display power disabling work 2163 * @i915: i915 device instance 2164 * 2165 * Flushes any pending work that was scheduled by a preceding 2166 * intel_display_power_put_async() call, completing the disabling of the 2167 * corresponding power domains. 2168 * 2169 * Note that the work handler function may still be running after this 2170 * function returns; to ensure that the work handler isn't running use 2171 * intel_display_power_flush_work_sync() instead. 2172 */ 2173 void intel_display_power_flush_work(struct drm_i915_private *i915) 2174 { 2175 struct i915_power_domains *power_domains = &i915->power_domains; 2176 intel_wakeref_t work_wakeref; 2177 2178 mutex_lock(&power_domains->lock); 2179 2180 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2181 if (!work_wakeref) 2182 goto out_verify; 2183 2184 release_async_put_domains(power_domains, 2185 async_put_domains_mask(power_domains)); 2186 cancel_delayed_work(&power_domains->async_put_work); 2187 2188 out_verify: 2189 verify_async_put_domains_state(power_domains); 2190 2191 mutex_unlock(&power_domains->lock); 2192 2193 if (work_wakeref) 2194 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); 2195 } 2196 2197 /** 2198 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work 2199 * @i915: i915 device instance 2200 * 2201 * Like intel_display_power_flush_work(), but also ensure that the work 2202 * handler function is not running any more when this function returns. 2203 */ 2204 static void 2205 intel_display_power_flush_work_sync(struct drm_i915_private *i915) 2206 { 2207 struct i915_power_domains *power_domains = &i915->power_domains; 2208 2209 intel_display_power_flush_work(i915); 2210 cancel_delayed_work_sync(&power_domains->async_put_work); 2211 2212 verify_async_put_domains_state(power_domains); 2213 2214 WARN_ON(power_domains->async_put_wakeref); 2215 } 2216 2217 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2218 /** 2219 * intel_display_power_put - release a power domain reference 2220 * @dev_priv: i915 device instance 2221 * @domain: power domain to reference 2222 * @wakeref: wakeref acquired for the reference that is being released 2223 * 2224 * This function drops the power domain reference obtained by 2225 * intel_display_power_get() and might power down the corresponding hardware 2226 * block right away if this is the last reference. 2227 */ 2228 void intel_display_power_put(struct drm_i915_private *dev_priv, 2229 enum intel_display_power_domain domain, 2230 intel_wakeref_t wakeref) 2231 { 2232 __intel_display_power_put(dev_priv, domain); 2233 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2234 } 2235 #endif 2236 2237 #define I830_PIPES_POWER_DOMAINS ( \ 2238 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2239 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2240 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2241 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2242 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2243 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2244 BIT_ULL(POWER_DOMAIN_INIT)) 2245 2246 #define VLV_DISPLAY_POWER_DOMAINS ( \ 2247 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2248 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2249 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2250 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2251 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2252 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2253 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2254 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2255 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2256 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2257 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2258 BIT_ULL(POWER_DOMAIN_VGA) | \ 2259 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2260 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2261 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2262 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2263 BIT_ULL(POWER_DOMAIN_INIT)) 2264 2265 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2266 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2267 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2268 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2269 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2270 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2271 BIT_ULL(POWER_DOMAIN_INIT)) 2272 2273 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 2274 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2275 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2276 BIT_ULL(POWER_DOMAIN_INIT)) 2277 2278 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 2279 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2280 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2281 BIT_ULL(POWER_DOMAIN_INIT)) 2282 2283 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 2284 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2285 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2286 BIT_ULL(POWER_DOMAIN_INIT)) 2287 2288 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 2289 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2290 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2291 BIT_ULL(POWER_DOMAIN_INIT)) 2292 2293 #define CHV_DISPLAY_POWER_DOMAINS ( \ 2294 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2295 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2296 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2297 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2298 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2299 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2300 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2301 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2302 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2303 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2304 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2305 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2306 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2307 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2308 BIT_ULL(POWER_DOMAIN_VGA) | \ 2309 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2310 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2311 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2312 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2313 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2314 BIT_ULL(POWER_DOMAIN_INIT)) 2315 2316 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2317 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2318 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2319 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2320 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2321 BIT_ULL(POWER_DOMAIN_INIT)) 2322 2323 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 2324 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2325 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2326 BIT_ULL(POWER_DOMAIN_INIT)) 2327 2328 #define HSW_DISPLAY_POWER_DOMAINS ( \ 2329 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2330 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2331 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2332 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2333 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2334 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2335 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2336 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2337 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2338 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2339 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2340 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2341 BIT_ULL(POWER_DOMAIN_VGA) | \ 2342 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2343 BIT_ULL(POWER_DOMAIN_INIT)) 2344 2345 #define BDW_DISPLAY_POWER_DOMAINS ( \ 2346 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2347 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2348 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2349 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2350 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2351 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2352 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2353 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2354 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2355 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2356 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2357 BIT_ULL(POWER_DOMAIN_VGA) | \ 2358 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2359 BIT_ULL(POWER_DOMAIN_INIT)) 2360 2361 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2362 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2363 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2364 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2365 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2366 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2367 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2368 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2369 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2370 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2371 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2372 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2373 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2374 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2375 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2376 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2377 BIT_ULL(POWER_DOMAIN_VGA) | \ 2378 BIT_ULL(POWER_DOMAIN_INIT)) 2379 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \ 2380 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 2381 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ 2382 BIT_ULL(POWER_DOMAIN_INIT)) 2383 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2384 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 2385 BIT_ULL(POWER_DOMAIN_INIT)) 2386 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2387 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 2388 BIT_ULL(POWER_DOMAIN_INIT)) 2389 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \ 2390 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 2391 BIT_ULL(POWER_DOMAIN_INIT)) 2392 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2393 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2394 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2395 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2396 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2397 BIT_ULL(POWER_DOMAIN_INIT)) 2398 2399 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2400 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2401 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2402 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2403 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2404 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2405 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2406 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2407 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2408 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2409 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2410 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2411 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2412 BIT_ULL(POWER_DOMAIN_VGA) | \ 2413 BIT_ULL(POWER_DOMAIN_INIT)) 2414 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2415 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2416 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2417 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2418 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2419 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2420 BIT_ULL(POWER_DOMAIN_INIT)) 2421 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ 2422 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2423 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2424 BIT_ULL(POWER_DOMAIN_INIT)) 2425 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ 2426 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2427 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2428 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2429 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2430 BIT_ULL(POWER_DOMAIN_INIT)) 2431 2432 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2433 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2434 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2435 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2436 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2437 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2438 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2439 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2440 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2441 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2442 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2443 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2444 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2445 BIT_ULL(POWER_DOMAIN_VGA) | \ 2446 BIT_ULL(POWER_DOMAIN_INIT)) 2447 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \ 2448 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2449 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2450 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2451 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2452 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2453 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ 2454 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2455 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2456 BIT_ULL(POWER_DOMAIN_INIT)) 2457 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ 2458 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2459 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2460 BIT_ULL(POWER_DOMAIN_INIT)) 2461 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ 2462 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2463 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2464 BIT_ULL(POWER_DOMAIN_INIT)) 2465 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ 2466 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2467 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2468 BIT_ULL(POWER_DOMAIN_INIT)) 2469 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ 2470 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2471 BIT_ULL(POWER_DOMAIN_INIT)) 2472 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ 2473 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2474 BIT_ULL(POWER_DOMAIN_INIT)) 2475 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2476 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2477 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2478 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2479 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2480 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2481 BIT_ULL(POWER_DOMAIN_INIT)) 2482 2483 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2484 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2485 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2486 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2487 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2488 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2489 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2490 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2491 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2492 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2493 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2494 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2495 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2496 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2497 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2498 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2499 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2500 BIT_ULL(POWER_DOMAIN_VGA) | \ 2501 BIT_ULL(POWER_DOMAIN_INIT)) 2502 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \ 2503 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 2504 BIT_ULL(POWER_DOMAIN_INIT)) 2505 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \ 2506 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 2507 BIT_ULL(POWER_DOMAIN_INIT)) 2508 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \ 2509 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 2510 BIT_ULL(POWER_DOMAIN_INIT)) 2511 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \ 2512 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 2513 BIT_ULL(POWER_DOMAIN_INIT)) 2514 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \ 2515 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2516 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2517 BIT_ULL(POWER_DOMAIN_INIT)) 2518 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \ 2519 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2520 BIT_ULL(POWER_DOMAIN_INIT)) 2521 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \ 2522 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2523 BIT_ULL(POWER_DOMAIN_INIT)) 2524 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \ 2525 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2526 BIT_ULL(POWER_DOMAIN_INIT)) 2527 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \ 2528 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2529 BIT_ULL(POWER_DOMAIN_INIT)) 2530 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \ 2531 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \ 2532 BIT_ULL(POWER_DOMAIN_INIT)) 2533 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2534 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2535 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2536 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2537 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2538 BIT_ULL(POWER_DOMAIN_INIT)) 2539 2540 /* 2541 * ICL PW_0/PG_0 domains (HW/DMC control): 2542 * - PCI 2543 * - clocks except port PLL 2544 * - central power except FBC 2545 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers 2546 * ICL PW_1/PG_1 domains (HW/DMC control): 2547 * - DBUF function 2548 * - PIPE_A and its planes, except VGA 2549 * - transcoder EDP + PSR 2550 * - transcoder DSI 2551 * - DDI_A 2552 * - FBC 2553 */ 2554 #define ICL_PW_4_POWER_DOMAINS ( \ 2555 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2556 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2557 BIT_ULL(POWER_DOMAIN_INIT)) 2558 /* VDSC/joining */ 2559 #define ICL_PW_3_POWER_DOMAINS ( \ 2560 ICL_PW_4_POWER_DOMAINS | \ 2561 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2562 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2563 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2564 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2565 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2566 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2567 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2568 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2569 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2570 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2571 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2572 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2573 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2574 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2575 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2576 BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \ 2577 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ 2578 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ 2579 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ 2580 BIT_ULL(POWER_DOMAIN_VGA) | \ 2581 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2582 BIT_ULL(POWER_DOMAIN_INIT)) 2583 /* 2584 * - transcoder WD 2585 * - KVMR (HW control) 2586 */ 2587 #define ICL_PW_2_POWER_DOMAINS ( \ 2588 ICL_PW_3_POWER_DOMAINS | \ 2589 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2590 BIT_ULL(POWER_DOMAIN_INIT)) 2591 /* 2592 * - KVMR (HW control) 2593 */ 2594 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2595 ICL_PW_2_POWER_DOMAINS | \ 2596 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2597 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2598 BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \ 2599 BIT_ULL(POWER_DOMAIN_INIT)) 2600 2601 #define ICL_DDI_IO_A_POWER_DOMAINS ( \ 2602 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2603 #define ICL_DDI_IO_B_POWER_DOMAINS ( \ 2604 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2605 #define ICL_DDI_IO_C_POWER_DOMAINS ( \ 2606 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2607 #define ICL_DDI_IO_D_POWER_DOMAINS ( \ 2608 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) 2609 #define ICL_DDI_IO_E_POWER_DOMAINS ( \ 2610 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) 2611 #define ICL_DDI_IO_F_POWER_DOMAINS ( \ 2612 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) 2613 2614 #define ICL_AUX_A_IO_POWER_DOMAINS ( \ 2615 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2616 BIT_ULL(POWER_DOMAIN_AUX_A)) 2617 #define ICL_AUX_B_IO_POWER_DOMAINS ( \ 2618 BIT_ULL(POWER_DOMAIN_AUX_B)) 2619 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \ 2620 BIT_ULL(POWER_DOMAIN_AUX_C)) 2621 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \ 2622 BIT_ULL(POWER_DOMAIN_AUX_D)) 2623 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \ 2624 BIT_ULL(POWER_DOMAIN_AUX_E)) 2625 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \ 2626 BIT_ULL(POWER_DOMAIN_AUX_F)) 2627 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \ 2628 BIT_ULL(POWER_DOMAIN_AUX_C_TBT)) 2629 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \ 2630 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) 2631 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \ 2632 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) 2633 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \ 2634 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) 2635 2636 #define TGL_PW_5_POWER_DOMAINS ( \ 2637 BIT_ULL(POWER_DOMAIN_PIPE_D) | \ 2638 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ 2639 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ 2640 BIT_ULL(POWER_DOMAIN_INIT)) 2641 2642 #define TGL_PW_4_POWER_DOMAINS ( \ 2643 TGL_PW_5_POWER_DOMAINS | \ 2644 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2645 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2646 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2647 BIT_ULL(POWER_DOMAIN_INIT)) 2648 2649 #define TGL_PW_3_POWER_DOMAINS ( \ 2650 TGL_PW_4_POWER_DOMAINS | \ 2651 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2652 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2653 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2654 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2655 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2656 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2657 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \ 2658 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \ 2659 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \ 2660 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2661 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2662 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2663 BIT_ULL(POWER_DOMAIN_AUX_G) | \ 2664 BIT_ULL(POWER_DOMAIN_AUX_H) | \ 2665 BIT_ULL(POWER_DOMAIN_AUX_I) | \ 2666 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ 2667 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ 2668 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ 2669 BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \ 2670 BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \ 2671 BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \ 2672 BIT_ULL(POWER_DOMAIN_VGA) | \ 2673 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2674 BIT_ULL(POWER_DOMAIN_INIT)) 2675 2676 #define TGL_PW_2_POWER_DOMAINS ( \ 2677 TGL_PW_3_POWER_DOMAINS | \ 2678 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2679 BIT_ULL(POWER_DOMAIN_INIT)) 2680 2681 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2682 TGL_PW_2_POWER_DOMAINS | \ 2683 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2684 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2685 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2686 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2687 BIT_ULL(POWER_DOMAIN_INIT)) 2688 2689 #define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \ 2690 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) 2691 #define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \ 2692 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) 2693 #define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \ 2694 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) 2695 #define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \ 2696 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO)) 2697 #define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \ 2698 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO)) 2699 #define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \ 2700 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO)) 2701 2702 #define TGL_AUX_A_IO_POWER_DOMAINS ( \ 2703 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2704 BIT_ULL(POWER_DOMAIN_AUX_A)) 2705 #define TGL_AUX_B_IO_POWER_DOMAINS ( \ 2706 BIT_ULL(POWER_DOMAIN_AUX_B)) 2707 #define TGL_AUX_C_IO_POWER_DOMAINS ( \ 2708 BIT_ULL(POWER_DOMAIN_AUX_C)) 2709 #define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \ 2710 BIT_ULL(POWER_DOMAIN_AUX_D)) 2711 #define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \ 2712 BIT_ULL(POWER_DOMAIN_AUX_E)) 2713 #define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \ 2714 BIT_ULL(POWER_DOMAIN_AUX_F)) 2715 #define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \ 2716 BIT_ULL(POWER_DOMAIN_AUX_G)) 2717 #define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \ 2718 BIT_ULL(POWER_DOMAIN_AUX_H)) 2719 #define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \ 2720 BIT_ULL(POWER_DOMAIN_AUX_I)) 2721 #define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \ 2722 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) 2723 #define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \ 2724 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) 2725 #define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \ 2726 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) 2727 #define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \ 2728 BIT_ULL(POWER_DOMAIN_AUX_G_TBT)) 2729 #define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \ 2730 BIT_ULL(POWER_DOMAIN_AUX_H_TBT)) 2731 #define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \ 2732 BIT_ULL(POWER_DOMAIN_AUX_I_TBT)) 2733 2734 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 2735 .sync_hw = i9xx_power_well_sync_hw_noop, 2736 .enable = i9xx_always_on_power_well_noop, 2737 .disable = i9xx_always_on_power_well_noop, 2738 .is_enabled = i9xx_always_on_power_well_enabled, 2739 }; 2740 2741 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 2742 .sync_hw = i9xx_power_well_sync_hw_noop, 2743 .enable = chv_pipe_power_well_enable, 2744 .disable = chv_pipe_power_well_disable, 2745 .is_enabled = chv_pipe_power_well_enabled, 2746 }; 2747 2748 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 2749 .sync_hw = i9xx_power_well_sync_hw_noop, 2750 .enable = chv_dpio_cmn_power_well_enable, 2751 .disable = chv_dpio_cmn_power_well_disable, 2752 .is_enabled = vlv_power_well_enabled, 2753 }; 2754 2755 static const struct i915_power_well_desc i9xx_always_on_power_well[] = { 2756 { 2757 .name = "always-on", 2758 .always_on = true, 2759 .domains = POWER_DOMAIN_MASK, 2760 .ops = &i9xx_always_on_power_well_ops, 2761 .id = DISP_PW_ID_NONE, 2762 }, 2763 }; 2764 2765 static const struct i915_power_well_ops i830_pipes_power_well_ops = { 2766 .sync_hw = i830_pipes_power_well_sync_hw, 2767 .enable = i830_pipes_power_well_enable, 2768 .disable = i830_pipes_power_well_disable, 2769 .is_enabled = i830_pipes_power_well_enabled, 2770 }; 2771 2772 static const struct i915_power_well_desc i830_power_wells[] = { 2773 { 2774 .name = "always-on", 2775 .always_on = true, 2776 .domains = POWER_DOMAIN_MASK, 2777 .ops = &i9xx_always_on_power_well_ops, 2778 .id = DISP_PW_ID_NONE, 2779 }, 2780 { 2781 .name = "pipes", 2782 .domains = I830_PIPES_POWER_DOMAINS, 2783 .ops = &i830_pipes_power_well_ops, 2784 .id = DISP_PW_ID_NONE, 2785 }, 2786 }; 2787 2788 static const struct i915_power_well_ops hsw_power_well_ops = { 2789 .sync_hw = hsw_power_well_sync_hw, 2790 .enable = hsw_power_well_enable, 2791 .disable = hsw_power_well_disable, 2792 .is_enabled = hsw_power_well_enabled, 2793 }; 2794 2795 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 2796 .sync_hw = i9xx_power_well_sync_hw_noop, 2797 .enable = gen9_dc_off_power_well_enable, 2798 .disable = gen9_dc_off_power_well_disable, 2799 .is_enabled = gen9_dc_off_power_well_enabled, 2800 }; 2801 2802 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 2803 .sync_hw = i9xx_power_well_sync_hw_noop, 2804 .enable = bxt_dpio_cmn_power_well_enable, 2805 .disable = bxt_dpio_cmn_power_well_disable, 2806 .is_enabled = bxt_dpio_cmn_power_well_enabled, 2807 }; 2808 2809 static const struct i915_power_well_regs hsw_power_well_regs = { 2810 .bios = HSW_PWR_WELL_CTL1, 2811 .driver = HSW_PWR_WELL_CTL2, 2812 .kvmr = HSW_PWR_WELL_CTL3, 2813 .debug = HSW_PWR_WELL_CTL4, 2814 }; 2815 2816 static const struct i915_power_well_desc hsw_power_wells[] = { 2817 { 2818 .name = "always-on", 2819 .always_on = true, 2820 .domains = POWER_DOMAIN_MASK, 2821 .ops = &i9xx_always_on_power_well_ops, 2822 .id = DISP_PW_ID_NONE, 2823 }, 2824 { 2825 .name = "display", 2826 .domains = HSW_DISPLAY_POWER_DOMAINS, 2827 .ops = &hsw_power_well_ops, 2828 .id = HSW_DISP_PW_GLOBAL, 2829 { 2830 .hsw.regs = &hsw_power_well_regs, 2831 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 2832 .hsw.has_vga = true, 2833 }, 2834 }, 2835 }; 2836 2837 static const struct i915_power_well_desc bdw_power_wells[] = { 2838 { 2839 .name = "always-on", 2840 .always_on = true, 2841 .domains = POWER_DOMAIN_MASK, 2842 .ops = &i9xx_always_on_power_well_ops, 2843 .id = DISP_PW_ID_NONE, 2844 }, 2845 { 2846 .name = "display", 2847 .domains = BDW_DISPLAY_POWER_DOMAINS, 2848 .ops = &hsw_power_well_ops, 2849 .id = HSW_DISP_PW_GLOBAL, 2850 { 2851 .hsw.regs = &hsw_power_well_regs, 2852 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 2853 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 2854 .hsw.has_vga = true, 2855 }, 2856 }, 2857 }; 2858 2859 static const struct i915_power_well_ops vlv_display_power_well_ops = { 2860 .sync_hw = i9xx_power_well_sync_hw_noop, 2861 .enable = vlv_display_power_well_enable, 2862 .disable = vlv_display_power_well_disable, 2863 .is_enabled = vlv_power_well_enabled, 2864 }; 2865 2866 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 2867 .sync_hw = i9xx_power_well_sync_hw_noop, 2868 .enable = vlv_dpio_cmn_power_well_enable, 2869 .disable = vlv_dpio_cmn_power_well_disable, 2870 .is_enabled = vlv_power_well_enabled, 2871 }; 2872 2873 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 2874 .sync_hw = i9xx_power_well_sync_hw_noop, 2875 .enable = vlv_power_well_enable, 2876 .disable = vlv_power_well_disable, 2877 .is_enabled = vlv_power_well_enabled, 2878 }; 2879 2880 static const struct i915_power_well_desc vlv_power_wells[] = { 2881 { 2882 .name = "always-on", 2883 .always_on = true, 2884 .domains = POWER_DOMAIN_MASK, 2885 .ops = &i9xx_always_on_power_well_ops, 2886 .id = DISP_PW_ID_NONE, 2887 }, 2888 { 2889 .name = "display", 2890 .domains = VLV_DISPLAY_POWER_DOMAINS, 2891 .ops = &vlv_display_power_well_ops, 2892 .id = VLV_DISP_PW_DISP2D, 2893 { 2894 .vlv.idx = PUNIT_PWGT_IDX_DISP2D, 2895 }, 2896 }, 2897 { 2898 .name = "dpio-tx-b-01", 2899 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2900 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2901 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2902 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2903 .ops = &vlv_dpio_power_well_ops, 2904 .id = DISP_PW_ID_NONE, 2905 { 2906 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, 2907 }, 2908 }, 2909 { 2910 .name = "dpio-tx-b-23", 2911 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2912 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2913 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2914 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2915 .ops = &vlv_dpio_power_well_ops, 2916 .id = DISP_PW_ID_NONE, 2917 { 2918 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, 2919 }, 2920 }, 2921 { 2922 .name = "dpio-tx-c-01", 2923 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2924 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2925 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2926 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2927 .ops = &vlv_dpio_power_well_ops, 2928 .id = DISP_PW_ID_NONE, 2929 { 2930 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, 2931 }, 2932 }, 2933 { 2934 .name = "dpio-tx-c-23", 2935 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2936 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2937 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2938 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2939 .ops = &vlv_dpio_power_well_ops, 2940 .id = DISP_PW_ID_NONE, 2941 { 2942 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, 2943 }, 2944 }, 2945 { 2946 .name = "dpio-common", 2947 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 2948 .ops = &vlv_dpio_cmn_power_well_ops, 2949 .id = VLV_DISP_PW_DPIO_CMN_BC, 2950 { 2951 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 2952 }, 2953 }, 2954 }; 2955 2956 static const struct i915_power_well_desc chv_power_wells[] = { 2957 { 2958 .name = "always-on", 2959 .always_on = true, 2960 .domains = POWER_DOMAIN_MASK, 2961 .ops = &i9xx_always_on_power_well_ops, 2962 .id = DISP_PW_ID_NONE, 2963 }, 2964 { 2965 .name = "display", 2966 /* 2967 * Pipe A power well is the new disp2d well. Pipe B and C 2968 * power wells don't actually exist. Pipe A power well is 2969 * required for any pipe to work. 2970 */ 2971 .domains = CHV_DISPLAY_POWER_DOMAINS, 2972 .ops = &chv_pipe_power_well_ops, 2973 .id = DISP_PW_ID_NONE, 2974 }, 2975 { 2976 .name = "dpio-common-bc", 2977 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 2978 .ops = &chv_dpio_cmn_power_well_ops, 2979 .id = VLV_DISP_PW_DPIO_CMN_BC, 2980 { 2981 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 2982 }, 2983 }, 2984 { 2985 .name = "dpio-common-d", 2986 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 2987 .ops = &chv_dpio_cmn_power_well_ops, 2988 .id = CHV_DISP_PW_DPIO_CMN_D, 2989 { 2990 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, 2991 }, 2992 }, 2993 }; 2994 2995 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 2996 enum i915_power_well_id power_well_id) 2997 { 2998 struct i915_power_well *power_well; 2999 bool ret; 3000 3001 power_well = lookup_power_well(dev_priv, power_well_id); 3002 ret = power_well->desc->ops->is_enabled(dev_priv, power_well); 3003 3004 return ret; 3005 } 3006 3007 static const struct i915_power_well_desc skl_power_wells[] = { 3008 { 3009 .name = "always-on", 3010 .always_on = true, 3011 .domains = POWER_DOMAIN_MASK, 3012 .ops = &i9xx_always_on_power_well_ops, 3013 .id = DISP_PW_ID_NONE, 3014 }, 3015 { 3016 .name = "power well 1", 3017 /* Handled by the DMC firmware */ 3018 .always_on = true, 3019 .domains = 0, 3020 .ops = &hsw_power_well_ops, 3021 .id = SKL_DISP_PW_1, 3022 { 3023 .hsw.regs = &hsw_power_well_regs, 3024 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3025 .hsw.has_fuses = true, 3026 }, 3027 }, 3028 { 3029 .name = "MISC IO power well", 3030 /* Handled by the DMC firmware */ 3031 .always_on = true, 3032 .domains = 0, 3033 .ops = &hsw_power_well_ops, 3034 .id = SKL_DISP_PW_MISC_IO, 3035 { 3036 .hsw.regs = &hsw_power_well_regs, 3037 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, 3038 }, 3039 }, 3040 { 3041 .name = "DC off", 3042 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, 3043 .ops = &gen9_dc_off_power_well_ops, 3044 .id = SKL_DISP_DC_OFF, 3045 }, 3046 { 3047 .name = "power well 2", 3048 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3049 .ops = &hsw_power_well_ops, 3050 .id = SKL_DISP_PW_2, 3051 { 3052 .hsw.regs = &hsw_power_well_regs, 3053 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3054 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3055 .hsw.has_vga = true, 3056 .hsw.has_fuses = true, 3057 }, 3058 }, 3059 { 3060 .name = "DDI A/E IO power well", 3061 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, 3062 .ops = &hsw_power_well_ops, 3063 .id = DISP_PW_ID_NONE, 3064 { 3065 .hsw.regs = &hsw_power_well_regs, 3066 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, 3067 }, 3068 }, 3069 { 3070 .name = "DDI B IO power well", 3071 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3072 .ops = &hsw_power_well_ops, 3073 .id = DISP_PW_ID_NONE, 3074 { 3075 .hsw.regs = &hsw_power_well_regs, 3076 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3077 }, 3078 }, 3079 { 3080 .name = "DDI C IO power well", 3081 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3082 .ops = &hsw_power_well_ops, 3083 .id = DISP_PW_ID_NONE, 3084 { 3085 .hsw.regs = &hsw_power_well_regs, 3086 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3087 }, 3088 }, 3089 { 3090 .name = "DDI D IO power well", 3091 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, 3092 .ops = &hsw_power_well_ops, 3093 .id = DISP_PW_ID_NONE, 3094 { 3095 .hsw.regs = &hsw_power_well_regs, 3096 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 3097 }, 3098 }, 3099 }; 3100 3101 static const struct i915_power_well_desc bxt_power_wells[] = { 3102 { 3103 .name = "always-on", 3104 .always_on = true, 3105 .domains = POWER_DOMAIN_MASK, 3106 .ops = &i9xx_always_on_power_well_ops, 3107 .id = DISP_PW_ID_NONE, 3108 }, 3109 { 3110 .name = "power well 1", 3111 /* Handled by the DMC firmware */ 3112 .always_on = true, 3113 .domains = 0, 3114 .ops = &hsw_power_well_ops, 3115 .id = SKL_DISP_PW_1, 3116 { 3117 .hsw.regs = &hsw_power_well_regs, 3118 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3119 .hsw.has_fuses = true, 3120 }, 3121 }, 3122 { 3123 .name = "DC off", 3124 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, 3125 .ops = &gen9_dc_off_power_well_ops, 3126 .id = SKL_DISP_DC_OFF, 3127 }, 3128 { 3129 .name = "power well 2", 3130 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3131 .ops = &hsw_power_well_ops, 3132 .id = SKL_DISP_PW_2, 3133 { 3134 .hsw.regs = &hsw_power_well_regs, 3135 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3136 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3137 .hsw.has_vga = true, 3138 .hsw.has_fuses = true, 3139 }, 3140 }, 3141 { 3142 .name = "dpio-common-a", 3143 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, 3144 .ops = &bxt_dpio_cmn_power_well_ops, 3145 .id = BXT_DISP_PW_DPIO_CMN_A, 3146 { 3147 .bxt.phy = DPIO_PHY1, 3148 }, 3149 }, 3150 { 3151 .name = "dpio-common-bc", 3152 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, 3153 .ops = &bxt_dpio_cmn_power_well_ops, 3154 .id = VLV_DISP_PW_DPIO_CMN_BC, 3155 { 3156 .bxt.phy = DPIO_PHY0, 3157 }, 3158 }, 3159 }; 3160 3161 static const struct i915_power_well_desc glk_power_wells[] = { 3162 { 3163 .name = "always-on", 3164 .always_on = true, 3165 .domains = POWER_DOMAIN_MASK, 3166 .ops = &i9xx_always_on_power_well_ops, 3167 .id = DISP_PW_ID_NONE, 3168 }, 3169 { 3170 .name = "power well 1", 3171 /* Handled by the DMC firmware */ 3172 .always_on = true, 3173 .domains = 0, 3174 .ops = &hsw_power_well_ops, 3175 .id = SKL_DISP_PW_1, 3176 { 3177 .hsw.regs = &hsw_power_well_regs, 3178 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3179 .hsw.has_fuses = true, 3180 }, 3181 }, 3182 { 3183 .name = "DC off", 3184 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, 3185 .ops = &gen9_dc_off_power_well_ops, 3186 .id = SKL_DISP_DC_OFF, 3187 }, 3188 { 3189 .name = "power well 2", 3190 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3191 .ops = &hsw_power_well_ops, 3192 .id = SKL_DISP_PW_2, 3193 { 3194 .hsw.regs = &hsw_power_well_regs, 3195 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3196 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3197 .hsw.has_vga = true, 3198 .hsw.has_fuses = true, 3199 }, 3200 }, 3201 { 3202 .name = "dpio-common-a", 3203 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, 3204 .ops = &bxt_dpio_cmn_power_well_ops, 3205 .id = BXT_DISP_PW_DPIO_CMN_A, 3206 { 3207 .bxt.phy = DPIO_PHY1, 3208 }, 3209 }, 3210 { 3211 .name = "dpio-common-b", 3212 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, 3213 .ops = &bxt_dpio_cmn_power_well_ops, 3214 .id = VLV_DISP_PW_DPIO_CMN_BC, 3215 { 3216 .bxt.phy = DPIO_PHY0, 3217 }, 3218 }, 3219 { 3220 .name = "dpio-common-c", 3221 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, 3222 .ops = &bxt_dpio_cmn_power_well_ops, 3223 .id = GLK_DISP_PW_DPIO_CMN_C, 3224 { 3225 .bxt.phy = DPIO_PHY2, 3226 }, 3227 }, 3228 { 3229 .name = "AUX A", 3230 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, 3231 .ops = &hsw_power_well_ops, 3232 .id = DISP_PW_ID_NONE, 3233 { 3234 .hsw.regs = &hsw_power_well_regs, 3235 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 3236 }, 3237 }, 3238 { 3239 .name = "AUX B", 3240 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, 3241 .ops = &hsw_power_well_ops, 3242 .id = DISP_PW_ID_NONE, 3243 { 3244 .hsw.regs = &hsw_power_well_regs, 3245 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 3246 }, 3247 }, 3248 { 3249 .name = "AUX C", 3250 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, 3251 .ops = &hsw_power_well_ops, 3252 .id = DISP_PW_ID_NONE, 3253 { 3254 .hsw.regs = &hsw_power_well_regs, 3255 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 3256 }, 3257 }, 3258 { 3259 .name = "DDI A IO power well", 3260 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, 3261 .ops = &hsw_power_well_ops, 3262 .id = DISP_PW_ID_NONE, 3263 { 3264 .hsw.regs = &hsw_power_well_regs, 3265 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 3266 }, 3267 }, 3268 { 3269 .name = "DDI B IO power well", 3270 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3271 .ops = &hsw_power_well_ops, 3272 .id = DISP_PW_ID_NONE, 3273 { 3274 .hsw.regs = &hsw_power_well_regs, 3275 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3276 }, 3277 }, 3278 { 3279 .name = "DDI C IO power well", 3280 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3281 .ops = &hsw_power_well_ops, 3282 .id = DISP_PW_ID_NONE, 3283 { 3284 .hsw.regs = &hsw_power_well_regs, 3285 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3286 }, 3287 }, 3288 }; 3289 3290 static const struct i915_power_well_desc cnl_power_wells[] = { 3291 { 3292 .name = "always-on", 3293 .always_on = true, 3294 .domains = POWER_DOMAIN_MASK, 3295 .ops = &i9xx_always_on_power_well_ops, 3296 .id = DISP_PW_ID_NONE, 3297 }, 3298 { 3299 .name = "power well 1", 3300 /* Handled by the DMC firmware */ 3301 .always_on = true, 3302 .domains = 0, 3303 .ops = &hsw_power_well_ops, 3304 .id = SKL_DISP_PW_1, 3305 { 3306 .hsw.regs = &hsw_power_well_regs, 3307 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3308 .hsw.has_fuses = true, 3309 }, 3310 }, 3311 { 3312 .name = "AUX A", 3313 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS, 3314 .ops = &hsw_power_well_ops, 3315 .id = DISP_PW_ID_NONE, 3316 { 3317 .hsw.regs = &hsw_power_well_regs, 3318 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 3319 }, 3320 }, 3321 { 3322 .name = "AUX B", 3323 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS, 3324 .ops = &hsw_power_well_ops, 3325 .id = DISP_PW_ID_NONE, 3326 { 3327 .hsw.regs = &hsw_power_well_regs, 3328 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 3329 }, 3330 }, 3331 { 3332 .name = "AUX C", 3333 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS, 3334 .ops = &hsw_power_well_ops, 3335 .id = DISP_PW_ID_NONE, 3336 { 3337 .hsw.regs = &hsw_power_well_regs, 3338 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 3339 }, 3340 }, 3341 { 3342 .name = "AUX D", 3343 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS, 3344 .ops = &hsw_power_well_ops, 3345 .id = DISP_PW_ID_NONE, 3346 { 3347 .hsw.regs = &hsw_power_well_regs, 3348 .hsw.idx = CNL_PW_CTL_IDX_AUX_D, 3349 }, 3350 }, 3351 { 3352 .name = "DC off", 3353 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS, 3354 .ops = &gen9_dc_off_power_well_ops, 3355 .id = SKL_DISP_DC_OFF, 3356 }, 3357 { 3358 .name = "power well 2", 3359 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3360 .ops = &hsw_power_well_ops, 3361 .id = SKL_DISP_PW_2, 3362 { 3363 .hsw.regs = &hsw_power_well_regs, 3364 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3365 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3366 .hsw.has_vga = true, 3367 .hsw.has_fuses = true, 3368 }, 3369 }, 3370 { 3371 .name = "DDI A IO power well", 3372 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS, 3373 .ops = &hsw_power_well_ops, 3374 .id = DISP_PW_ID_NONE, 3375 { 3376 .hsw.regs = &hsw_power_well_regs, 3377 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 3378 }, 3379 }, 3380 { 3381 .name = "DDI B IO power well", 3382 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS, 3383 .ops = &hsw_power_well_ops, 3384 .id = DISP_PW_ID_NONE, 3385 { 3386 .hsw.regs = &hsw_power_well_regs, 3387 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3388 }, 3389 }, 3390 { 3391 .name = "DDI C IO power well", 3392 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS, 3393 .ops = &hsw_power_well_ops, 3394 .id = DISP_PW_ID_NONE, 3395 { 3396 .hsw.regs = &hsw_power_well_regs, 3397 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3398 }, 3399 }, 3400 { 3401 .name = "DDI D IO power well", 3402 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS, 3403 .ops = &hsw_power_well_ops, 3404 .id = DISP_PW_ID_NONE, 3405 { 3406 .hsw.regs = &hsw_power_well_regs, 3407 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 3408 }, 3409 }, 3410 { 3411 .name = "DDI F IO power well", 3412 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS, 3413 .ops = &hsw_power_well_ops, 3414 .id = DISP_PW_ID_NONE, 3415 { 3416 .hsw.regs = &hsw_power_well_regs, 3417 .hsw.idx = CNL_PW_CTL_IDX_DDI_F, 3418 }, 3419 }, 3420 { 3421 .name = "AUX F", 3422 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS, 3423 .ops = &hsw_power_well_ops, 3424 .id = DISP_PW_ID_NONE, 3425 { 3426 .hsw.regs = &hsw_power_well_regs, 3427 .hsw.idx = CNL_PW_CTL_IDX_AUX_F, 3428 }, 3429 }, 3430 }; 3431 3432 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = { 3433 .sync_hw = hsw_power_well_sync_hw, 3434 .enable = icl_combo_phy_aux_power_well_enable, 3435 .disable = icl_combo_phy_aux_power_well_disable, 3436 .is_enabled = hsw_power_well_enabled, 3437 }; 3438 3439 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = { 3440 .sync_hw = hsw_power_well_sync_hw, 3441 .enable = icl_tc_phy_aux_power_well_enable, 3442 .disable = icl_tc_phy_aux_power_well_disable, 3443 .is_enabled = hsw_power_well_enabled, 3444 }; 3445 3446 static const struct i915_power_well_regs icl_aux_power_well_regs = { 3447 .bios = ICL_PWR_WELL_CTL_AUX1, 3448 .driver = ICL_PWR_WELL_CTL_AUX2, 3449 .debug = ICL_PWR_WELL_CTL_AUX4, 3450 }; 3451 3452 static const struct i915_power_well_regs icl_ddi_power_well_regs = { 3453 .bios = ICL_PWR_WELL_CTL_DDI1, 3454 .driver = ICL_PWR_WELL_CTL_DDI2, 3455 .debug = ICL_PWR_WELL_CTL_DDI4, 3456 }; 3457 3458 static const struct i915_power_well_desc icl_power_wells[] = { 3459 { 3460 .name = "always-on", 3461 .always_on = true, 3462 .domains = POWER_DOMAIN_MASK, 3463 .ops = &i9xx_always_on_power_well_ops, 3464 .id = DISP_PW_ID_NONE, 3465 }, 3466 { 3467 .name = "power well 1", 3468 /* Handled by the DMC firmware */ 3469 .always_on = true, 3470 .domains = 0, 3471 .ops = &hsw_power_well_ops, 3472 .id = SKL_DISP_PW_1, 3473 { 3474 .hsw.regs = &hsw_power_well_regs, 3475 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 3476 .hsw.has_fuses = true, 3477 }, 3478 }, 3479 { 3480 .name = "DC off", 3481 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, 3482 .ops = &gen9_dc_off_power_well_ops, 3483 .id = SKL_DISP_DC_OFF, 3484 }, 3485 { 3486 .name = "power well 2", 3487 .domains = ICL_PW_2_POWER_DOMAINS, 3488 .ops = &hsw_power_well_ops, 3489 .id = SKL_DISP_PW_2, 3490 { 3491 .hsw.regs = &hsw_power_well_regs, 3492 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 3493 .hsw.has_fuses = true, 3494 }, 3495 }, 3496 { 3497 .name = "power well 3", 3498 .domains = ICL_PW_3_POWER_DOMAINS, 3499 .ops = &hsw_power_well_ops, 3500 .id = DISP_PW_ID_NONE, 3501 { 3502 .hsw.regs = &hsw_power_well_regs, 3503 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 3504 .hsw.irq_pipe_mask = BIT(PIPE_B), 3505 .hsw.has_vga = true, 3506 .hsw.has_fuses = true, 3507 }, 3508 }, 3509 { 3510 .name = "DDI A IO", 3511 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 3512 .ops = &hsw_power_well_ops, 3513 .id = DISP_PW_ID_NONE, 3514 { 3515 .hsw.regs = &icl_ddi_power_well_regs, 3516 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 3517 }, 3518 }, 3519 { 3520 .name = "DDI B IO", 3521 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 3522 .ops = &hsw_power_well_ops, 3523 .id = DISP_PW_ID_NONE, 3524 { 3525 .hsw.regs = &icl_ddi_power_well_regs, 3526 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 3527 }, 3528 }, 3529 { 3530 .name = "DDI C IO", 3531 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 3532 .ops = &hsw_power_well_ops, 3533 .id = DISP_PW_ID_NONE, 3534 { 3535 .hsw.regs = &icl_ddi_power_well_regs, 3536 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 3537 }, 3538 }, 3539 { 3540 .name = "DDI D IO", 3541 .domains = ICL_DDI_IO_D_POWER_DOMAINS, 3542 .ops = &hsw_power_well_ops, 3543 .id = DISP_PW_ID_NONE, 3544 { 3545 .hsw.regs = &icl_ddi_power_well_regs, 3546 .hsw.idx = ICL_PW_CTL_IDX_DDI_D, 3547 }, 3548 }, 3549 { 3550 .name = "DDI E IO", 3551 .domains = ICL_DDI_IO_E_POWER_DOMAINS, 3552 .ops = &hsw_power_well_ops, 3553 .id = DISP_PW_ID_NONE, 3554 { 3555 .hsw.regs = &icl_ddi_power_well_regs, 3556 .hsw.idx = ICL_PW_CTL_IDX_DDI_E, 3557 }, 3558 }, 3559 { 3560 .name = "DDI F IO", 3561 .domains = ICL_DDI_IO_F_POWER_DOMAINS, 3562 .ops = &hsw_power_well_ops, 3563 .id = DISP_PW_ID_NONE, 3564 { 3565 .hsw.regs = &icl_ddi_power_well_regs, 3566 .hsw.idx = ICL_PW_CTL_IDX_DDI_F, 3567 }, 3568 }, 3569 { 3570 .name = "AUX A", 3571 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 3572 .ops = &icl_combo_phy_aux_power_well_ops, 3573 .id = DISP_PW_ID_NONE, 3574 { 3575 .hsw.regs = &icl_aux_power_well_regs, 3576 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 3577 }, 3578 }, 3579 { 3580 .name = "AUX B", 3581 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 3582 .ops = &icl_combo_phy_aux_power_well_ops, 3583 .id = DISP_PW_ID_NONE, 3584 { 3585 .hsw.regs = &icl_aux_power_well_regs, 3586 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 3587 }, 3588 }, 3589 { 3590 .name = "AUX C TC1", 3591 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, 3592 .ops = &icl_tc_phy_aux_power_well_ops, 3593 .id = DISP_PW_ID_NONE, 3594 { 3595 .hsw.regs = &icl_aux_power_well_regs, 3596 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 3597 .hsw.is_tc_tbt = false, 3598 }, 3599 }, 3600 { 3601 .name = "AUX D TC2", 3602 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, 3603 .ops = &icl_tc_phy_aux_power_well_ops, 3604 .id = DISP_PW_ID_NONE, 3605 { 3606 .hsw.regs = &icl_aux_power_well_regs, 3607 .hsw.idx = ICL_PW_CTL_IDX_AUX_D, 3608 .hsw.is_tc_tbt = false, 3609 }, 3610 }, 3611 { 3612 .name = "AUX E TC3", 3613 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, 3614 .ops = &icl_tc_phy_aux_power_well_ops, 3615 .id = DISP_PW_ID_NONE, 3616 { 3617 .hsw.regs = &icl_aux_power_well_regs, 3618 .hsw.idx = ICL_PW_CTL_IDX_AUX_E, 3619 .hsw.is_tc_tbt = false, 3620 }, 3621 }, 3622 { 3623 .name = "AUX F TC4", 3624 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, 3625 .ops = &icl_tc_phy_aux_power_well_ops, 3626 .id = DISP_PW_ID_NONE, 3627 { 3628 .hsw.regs = &icl_aux_power_well_regs, 3629 .hsw.idx = ICL_PW_CTL_IDX_AUX_F, 3630 .hsw.is_tc_tbt = false, 3631 }, 3632 }, 3633 { 3634 .name = "AUX C TBT1", 3635 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, 3636 .ops = &icl_tc_phy_aux_power_well_ops, 3637 .id = DISP_PW_ID_NONE, 3638 { 3639 .hsw.regs = &icl_aux_power_well_regs, 3640 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, 3641 .hsw.is_tc_tbt = true, 3642 }, 3643 }, 3644 { 3645 .name = "AUX D TBT2", 3646 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, 3647 .ops = &icl_tc_phy_aux_power_well_ops, 3648 .id = DISP_PW_ID_NONE, 3649 { 3650 .hsw.regs = &icl_aux_power_well_regs, 3651 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, 3652 .hsw.is_tc_tbt = true, 3653 }, 3654 }, 3655 { 3656 .name = "AUX E TBT3", 3657 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, 3658 .ops = &icl_tc_phy_aux_power_well_ops, 3659 .id = DISP_PW_ID_NONE, 3660 { 3661 .hsw.regs = &icl_aux_power_well_regs, 3662 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, 3663 .hsw.is_tc_tbt = true, 3664 }, 3665 }, 3666 { 3667 .name = "AUX F TBT4", 3668 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, 3669 .ops = &icl_tc_phy_aux_power_well_ops, 3670 .id = DISP_PW_ID_NONE, 3671 { 3672 .hsw.regs = &icl_aux_power_well_regs, 3673 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, 3674 .hsw.is_tc_tbt = true, 3675 }, 3676 }, 3677 { 3678 .name = "power well 4", 3679 .domains = ICL_PW_4_POWER_DOMAINS, 3680 .ops = &hsw_power_well_ops, 3681 .id = DISP_PW_ID_NONE, 3682 { 3683 .hsw.regs = &hsw_power_well_regs, 3684 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 3685 .hsw.has_fuses = true, 3686 .hsw.irq_pipe_mask = BIT(PIPE_C), 3687 }, 3688 }, 3689 }; 3690 3691 static const struct i915_power_well_desc tgl_power_wells[] = { 3692 { 3693 .name = "always-on", 3694 .always_on = true, 3695 .domains = POWER_DOMAIN_MASK, 3696 .ops = &i9xx_always_on_power_well_ops, 3697 .id = DISP_PW_ID_NONE, 3698 }, 3699 { 3700 .name = "power well 1", 3701 /* Handled by the DMC firmware */ 3702 .always_on = true, 3703 .domains = 0, 3704 .ops = &hsw_power_well_ops, 3705 .id = SKL_DISP_PW_1, 3706 { 3707 .hsw.regs = &hsw_power_well_regs, 3708 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 3709 .hsw.has_fuses = true, 3710 }, 3711 }, 3712 { 3713 .name = "DC off", 3714 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS, 3715 .ops = &gen9_dc_off_power_well_ops, 3716 .id = SKL_DISP_DC_OFF, 3717 }, 3718 { 3719 .name = "power well 2", 3720 .domains = TGL_PW_2_POWER_DOMAINS, 3721 .ops = &hsw_power_well_ops, 3722 .id = SKL_DISP_PW_2, 3723 { 3724 .hsw.regs = &hsw_power_well_regs, 3725 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 3726 .hsw.has_fuses = true, 3727 }, 3728 }, 3729 { 3730 .name = "power well 3", 3731 .domains = TGL_PW_3_POWER_DOMAINS, 3732 .ops = &hsw_power_well_ops, 3733 .id = DISP_PW_ID_NONE, 3734 { 3735 .hsw.regs = &hsw_power_well_regs, 3736 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 3737 .hsw.irq_pipe_mask = BIT(PIPE_B), 3738 .hsw.has_vga = true, 3739 .hsw.has_fuses = true, 3740 }, 3741 }, 3742 { 3743 .name = "DDI A IO", 3744 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 3745 .ops = &hsw_power_well_ops, 3746 .id = DISP_PW_ID_NONE, 3747 { 3748 .hsw.regs = &icl_ddi_power_well_regs, 3749 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 3750 } 3751 }, 3752 { 3753 .name = "DDI B IO", 3754 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 3755 .ops = &hsw_power_well_ops, 3756 .id = DISP_PW_ID_NONE, 3757 { 3758 .hsw.regs = &icl_ddi_power_well_regs, 3759 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 3760 } 3761 }, 3762 { 3763 .name = "DDI C IO", 3764 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 3765 .ops = &hsw_power_well_ops, 3766 .id = DISP_PW_ID_NONE, 3767 { 3768 .hsw.regs = &icl_ddi_power_well_regs, 3769 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 3770 } 3771 }, 3772 { 3773 .name = "DDI D TC1 IO", 3774 .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS, 3775 .ops = &hsw_power_well_ops, 3776 .id = DISP_PW_ID_NONE, 3777 { 3778 .hsw.regs = &icl_ddi_power_well_regs, 3779 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 3780 }, 3781 }, 3782 { 3783 .name = "DDI E TC2 IO", 3784 .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS, 3785 .ops = &hsw_power_well_ops, 3786 .id = DISP_PW_ID_NONE, 3787 { 3788 .hsw.regs = &icl_ddi_power_well_regs, 3789 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 3790 }, 3791 }, 3792 { 3793 .name = "DDI F TC3 IO", 3794 .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS, 3795 .ops = &hsw_power_well_ops, 3796 .id = DISP_PW_ID_NONE, 3797 { 3798 .hsw.regs = &icl_ddi_power_well_regs, 3799 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, 3800 }, 3801 }, 3802 { 3803 .name = "DDI G TC4 IO", 3804 .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS, 3805 .ops = &hsw_power_well_ops, 3806 .id = DISP_PW_ID_NONE, 3807 { 3808 .hsw.regs = &icl_ddi_power_well_regs, 3809 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, 3810 }, 3811 }, 3812 { 3813 .name = "DDI H TC5 IO", 3814 .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS, 3815 .ops = &hsw_power_well_ops, 3816 .id = DISP_PW_ID_NONE, 3817 { 3818 .hsw.regs = &icl_ddi_power_well_regs, 3819 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5, 3820 }, 3821 }, 3822 { 3823 .name = "DDI I TC6 IO", 3824 .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS, 3825 .ops = &hsw_power_well_ops, 3826 .id = DISP_PW_ID_NONE, 3827 { 3828 .hsw.regs = &icl_ddi_power_well_regs, 3829 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6, 3830 }, 3831 }, 3832 { 3833 .name = "AUX A", 3834 .domains = TGL_AUX_A_IO_POWER_DOMAINS, 3835 .ops = &icl_combo_phy_aux_power_well_ops, 3836 .id = DISP_PW_ID_NONE, 3837 { 3838 .hsw.regs = &icl_aux_power_well_regs, 3839 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 3840 }, 3841 }, 3842 { 3843 .name = "AUX B", 3844 .domains = TGL_AUX_B_IO_POWER_DOMAINS, 3845 .ops = &icl_combo_phy_aux_power_well_ops, 3846 .id = DISP_PW_ID_NONE, 3847 { 3848 .hsw.regs = &icl_aux_power_well_regs, 3849 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 3850 }, 3851 }, 3852 { 3853 .name = "AUX C", 3854 .domains = TGL_AUX_C_IO_POWER_DOMAINS, 3855 .ops = &icl_combo_phy_aux_power_well_ops, 3856 .id = DISP_PW_ID_NONE, 3857 { 3858 .hsw.regs = &icl_aux_power_well_regs, 3859 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 3860 }, 3861 }, 3862 { 3863 .name = "AUX D TC1", 3864 .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS, 3865 .ops = &icl_tc_phy_aux_power_well_ops, 3866 .id = DISP_PW_ID_NONE, 3867 { 3868 .hsw.regs = &icl_aux_power_well_regs, 3869 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 3870 .hsw.is_tc_tbt = false, 3871 }, 3872 }, 3873 { 3874 .name = "AUX E TC2", 3875 .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS, 3876 .ops = &icl_tc_phy_aux_power_well_ops, 3877 .id = DISP_PW_ID_NONE, 3878 { 3879 .hsw.regs = &icl_aux_power_well_regs, 3880 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 3881 .hsw.is_tc_tbt = false, 3882 }, 3883 }, 3884 { 3885 .name = "AUX F TC3", 3886 .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS, 3887 .ops = &icl_tc_phy_aux_power_well_ops, 3888 .id = DISP_PW_ID_NONE, 3889 { 3890 .hsw.regs = &icl_aux_power_well_regs, 3891 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, 3892 .hsw.is_tc_tbt = false, 3893 }, 3894 }, 3895 { 3896 .name = "AUX G TC4", 3897 .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS, 3898 .ops = &icl_tc_phy_aux_power_well_ops, 3899 .id = DISP_PW_ID_NONE, 3900 { 3901 .hsw.regs = &icl_aux_power_well_regs, 3902 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, 3903 .hsw.is_tc_tbt = false, 3904 }, 3905 }, 3906 { 3907 .name = "AUX H TC5", 3908 .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS, 3909 .ops = &icl_tc_phy_aux_power_well_ops, 3910 .id = DISP_PW_ID_NONE, 3911 { 3912 .hsw.regs = &icl_aux_power_well_regs, 3913 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5, 3914 .hsw.is_tc_tbt = false, 3915 }, 3916 }, 3917 { 3918 .name = "AUX I TC6", 3919 .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS, 3920 .ops = &icl_tc_phy_aux_power_well_ops, 3921 .id = DISP_PW_ID_NONE, 3922 { 3923 .hsw.regs = &icl_aux_power_well_regs, 3924 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6, 3925 .hsw.is_tc_tbt = false, 3926 }, 3927 }, 3928 { 3929 .name = "AUX D TBT1", 3930 .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS, 3931 .ops = &hsw_power_well_ops, 3932 .id = DISP_PW_ID_NONE, 3933 { 3934 .hsw.regs = &icl_aux_power_well_regs, 3935 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, 3936 .hsw.is_tc_tbt = true, 3937 }, 3938 }, 3939 { 3940 .name = "AUX E TBT2", 3941 .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS, 3942 .ops = &hsw_power_well_ops, 3943 .id = DISP_PW_ID_NONE, 3944 { 3945 .hsw.regs = &icl_aux_power_well_regs, 3946 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, 3947 .hsw.is_tc_tbt = true, 3948 }, 3949 }, 3950 { 3951 .name = "AUX F TBT3", 3952 .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS, 3953 .ops = &hsw_power_well_ops, 3954 .id = DISP_PW_ID_NONE, 3955 { 3956 .hsw.regs = &icl_aux_power_well_regs, 3957 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, 3958 .hsw.is_tc_tbt = true, 3959 }, 3960 }, 3961 { 3962 .name = "AUX G TBT4", 3963 .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS, 3964 .ops = &hsw_power_well_ops, 3965 .id = DISP_PW_ID_NONE, 3966 { 3967 .hsw.regs = &icl_aux_power_well_regs, 3968 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, 3969 .hsw.is_tc_tbt = true, 3970 }, 3971 }, 3972 { 3973 .name = "AUX H TBT5", 3974 .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS, 3975 .ops = &hsw_power_well_ops, 3976 .id = DISP_PW_ID_NONE, 3977 { 3978 .hsw.regs = &icl_aux_power_well_regs, 3979 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5, 3980 .hsw.is_tc_tbt = true, 3981 }, 3982 }, 3983 { 3984 .name = "AUX I TBT6", 3985 .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS, 3986 .ops = &hsw_power_well_ops, 3987 .id = DISP_PW_ID_NONE, 3988 { 3989 .hsw.regs = &icl_aux_power_well_regs, 3990 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6, 3991 .hsw.is_tc_tbt = true, 3992 }, 3993 }, 3994 { 3995 .name = "power well 4", 3996 .domains = TGL_PW_4_POWER_DOMAINS, 3997 .ops = &hsw_power_well_ops, 3998 .id = DISP_PW_ID_NONE, 3999 { 4000 .hsw.regs = &hsw_power_well_regs, 4001 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4002 .hsw.has_fuses = true, 4003 .hsw.irq_pipe_mask = BIT(PIPE_C), 4004 } 4005 }, 4006 { 4007 .name = "power well 5", 4008 .domains = TGL_PW_5_POWER_DOMAINS, 4009 .ops = &hsw_power_well_ops, 4010 .id = DISP_PW_ID_NONE, 4011 { 4012 .hsw.regs = &hsw_power_well_regs, 4013 .hsw.idx = TGL_PW_CTL_IDX_PW_5, 4014 .hsw.has_fuses = true, 4015 .hsw.irq_pipe_mask = BIT(PIPE_D), 4016 }, 4017 }, 4018 }; 4019 4020 static int 4021 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 4022 int disable_power_well) 4023 { 4024 if (disable_power_well >= 0) 4025 return !!disable_power_well; 4026 4027 return 1; 4028 } 4029 4030 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 4031 int enable_dc) 4032 { 4033 u32 mask; 4034 int requested_dc; 4035 int max_dc; 4036 4037 if (INTEL_GEN(dev_priv) >= 12) { 4038 max_dc = 4; 4039 /* 4040 * DC9 has a separate HW flow from the rest of the DC states, 4041 * not depending on the DMC firmware. It's needed by system 4042 * suspend/resume, so allow it unconditionally. 4043 */ 4044 mask = DC_STATE_EN_DC9; 4045 } else if (IS_GEN(dev_priv, 11)) { 4046 max_dc = 2; 4047 mask = DC_STATE_EN_DC9; 4048 } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) { 4049 max_dc = 2; 4050 mask = 0; 4051 } else if (IS_GEN9_LP(dev_priv)) { 4052 max_dc = 1; 4053 mask = DC_STATE_EN_DC9; 4054 } else { 4055 max_dc = 0; 4056 mask = 0; 4057 } 4058 4059 if (!i915_modparams.disable_power_well) 4060 max_dc = 0; 4061 4062 if (enable_dc >= 0 && enable_dc <= max_dc) { 4063 requested_dc = enable_dc; 4064 } else if (enable_dc == -1) { 4065 requested_dc = max_dc; 4066 } else if (enable_dc > max_dc && enable_dc <= 4) { 4067 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n", 4068 enable_dc, max_dc); 4069 requested_dc = max_dc; 4070 } else { 4071 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc); 4072 requested_dc = max_dc; 4073 } 4074 4075 switch (requested_dc) { 4076 case 4: 4077 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6; 4078 break; 4079 case 3: 4080 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5; 4081 break; 4082 case 2: 4083 mask |= DC_STATE_EN_UPTO_DC6; 4084 break; 4085 case 1: 4086 mask |= DC_STATE_EN_UPTO_DC5; 4087 break; 4088 } 4089 4090 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask); 4091 4092 return mask; 4093 } 4094 4095 static int 4096 __set_power_wells(struct i915_power_domains *power_domains, 4097 const struct i915_power_well_desc *power_well_descs, 4098 int power_well_count) 4099 { 4100 u64 power_well_ids = 0; 4101 int i; 4102 4103 power_domains->power_well_count = power_well_count; 4104 power_domains->power_wells = 4105 kcalloc(power_well_count, 4106 sizeof(*power_domains->power_wells), 4107 GFP_KERNEL); 4108 if (!power_domains->power_wells) 4109 return -ENOMEM; 4110 4111 for (i = 0; i < power_well_count; i++) { 4112 enum i915_power_well_id id = power_well_descs[i].id; 4113 4114 power_domains->power_wells[i].desc = &power_well_descs[i]; 4115 4116 if (id == DISP_PW_ID_NONE) 4117 continue; 4118 4119 WARN_ON(id >= sizeof(power_well_ids) * 8); 4120 WARN_ON(power_well_ids & BIT_ULL(id)); 4121 power_well_ids |= BIT_ULL(id); 4122 } 4123 4124 return 0; 4125 } 4126 4127 #define set_power_wells(power_domains, __power_well_descs) \ 4128 __set_power_wells(power_domains, __power_well_descs, \ 4129 ARRAY_SIZE(__power_well_descs)) 4130 4131 /** 4132 * intel_power_domains_init - initializes the power domain structures 4133 * @dev_priv: i915 device instance 4134 * 4135 * Initializes the power domain structures for @dev_priv depending upon the 4136 * supported platform. 4137 */ 4138 int intel_power_domains_init(struct drm_i915_private *dev_priv) 4139 { 4140 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4141 int err; 4142 4143 i915_modparams.disable_power_well = 4144 sanitize_disable_power_well_option(dev_priv, 4145 i915_modparams.disable_power_well); 4146 dev_priv->csr.allowed_dc_mask = 4147 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc); 4148 4149 dev_priv->csr.target_dc_state = 4150 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 4151 4152 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); 4153 4154 mutex_init(&power_domains->lock); 4155 4156 INIT_DELAYED_WORK(&power_domains->async_put_work, 4157 intel_display_power_put_async_work); 4158 4159 /* 4160 * The enabling order will be from lower to higher indexed wells, 4161 * the disabling order is reversed. 4162 */ 4163 if (IS_GEN(dev_priv, 12)) { 4164 err = set_power_wells(power_domains, tgl_power_wells); 4165 } else if (IS_GEN(dev_priv, 11)) { 4166 err = set_power_wells(power_domains, icl_power_wells); 4167 } else if (IS_CANNONLAKE(dev_priv)) { 4168 err = set_power_wells(power_domains, cnl_power_wells); 4169 4170 /* 4171 * DDI and Aux IO are getting enabled for all ports 4172 * regardless the presence or use. So, in order to avoid 4173 * timeouts, lets remove them from the list 4174 * for the SKUs without port F. 4175 */ 4176 if (!IS_CNL_WITH_PORT_F(dev_priv)) 4177 power_domains->power_well_count -= 2; 4178 } else if (IS_GEMINILAKE(dev_priv)) { 4179 err = set_power_wells(power_domains, glk_power_wells); 4180 } else if (IS_BROXTON(dev_priv)) { 4181 err = set_power_wells(power_domains, bxt_power_wells); 4182 } else if (IS_GEN9_BC(dev_priv)) { 4183 err = set_power_wells(power_domains, skl_power_wells); 4184 } else if (IS_CHERRYVIEW(dev_priv)) { 4185 err = set_power_wells(power_domains, chv_power_wells); 4186 } else if (IS_BROADWELL(dev_priv)) { 4187 err = set_power_wells(power_domains, bdw_power_wells); 4188 } else if (IS_HASWELL(dev_priv)) { 4189 err = set_power_wells(power_domains, hsw_power_wells); 4190 } else if (IS_VALLEYVIEW(dev_priv)) { 4191 err = set_power_wells(power_domains, vlv_power_wells); 4192 } else if (IS_I830(dev_priv)) { 4193 err = set_power_wells(power_domains, i830_power_wells); 4194 } else { 4195 err = set_power_wells(power_domains, i9xx_always_on_power_well); 4196 } 4197 4198 return err; 4199 } 4200 4201 /** 4202 * intel_power_domains_cleanup - clean up power domains resources 4203 * @dev_priv: i915 device instance 4204 * 4205 * Release any resources acquired by intel_power_domains_init() 4206 */ 4207 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) 4208 { 4209 kfree(dev_priv->power_domains.power_wells); 4210 } 4211 4212 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 4213 { 4214 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4215 struct i915_power_well *power_well; 4216 4217 mutex_lock(&power_domains->lock); 4218 for_each_power_well(dev_priv, power_well) { 4219 power_well->desc->ops->sync_hw(dev_priv, power_well); 4220 power_well->hw_enabled = 4221 power_well->desc->ops->is_enabled(dev_priv, power_well); 4222 } 4223 mutex_unlock(&power_domains->lock); 4224 } 4225 4226 static inline 4227 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv, 4228 i915_reg_t reg, bool enable) 4229 { 4230 u32 val, status; 4231 4232 val = I915_READ(reg); 4233 val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST); 4234 I915_WRITE(reg, val); 4235 POSTING_READ(reg); 4236 udelay(10); 4237 4238 status = I915_READ(reg) & DBUF_POWER_STATE; 4239 if ((enable && !status) || (!enable && status)) { 4240 DRM_ERROR("DBus power %s timeout!\n", 4241 enable ? "enable" : "disable"); 4242 return false; 4243 } 4244 return true; 4245 } 4246 4247 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 4248 { 4249 intel_dbuf_slice_set(dev_priv, DBUF_CTL, true); 4250 } 4251 4252 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 4253 { 4254 intel_dbuf_slice_set(dev_priv, DBUF_CTL, false); 4255 } 4256 4257 static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv) 4258 { 4259 if (INTEL_GEN(dev_priv) < 11) 4260 return 1; 4261 return 2; 4262 } 4263 4264 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, 4265 u8 req_slices) 4266 { 4267 const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; 4268 bool ret; 4269 4270 if (req_slices > intel_dbuf_max_slices(dev_priv)) { 4271 DRM_ERROR("Invalid number of dbuf slices requested\n"); 4272 return; 4273 } 4274 4275 if (req_slices == hw_enabled_slices || req_slices == 0) 4276 return; 4277 4278 if (req_slices > hw_enabled_slices) 4279 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true); 4280 else 4281 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false); 4282 4283 if (ret) 4284 dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices; 4285 } 4286 4287 static void icl_dbuf_enable(struct drm_i915_private *dev_priv) 4288 { 4289 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST); 4290 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST); 4291 POSTING_READ(DBUF_CTL_S2); 4292 4293 udelay(10); 4294 4295 if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) || 4296 !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) 4297 DRM_ERROR("DBuf power enable timeout\n"); 4298 else 4299 /* 4300 * FIXME: for now pretend that we only have 1 slice, see 4301 * intel_enabled_dbuf_slices_num(). 4302 */ 4303 dev_priv->wm.skl_hw.ddb.enabled_slices = 1; 4304 } 4305 4306 static void icl_dbuf_disable(struct drm_i915_private *dev_priv) 4307 { 4308 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST); 4309 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST); 4310 POSTING_READ(DBUF_CTL_S2); 4311 4312 udelay(10); 4313 4314 if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) || 4315 (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) 4316 DRM_ERROR("DBuf power disable timeout!\n"); 4317 else 4318 /* 4319 * FIXME: for now pretend that the first slice is always 4320 * enabled, see intel_enabled_dbuf_slices_num(). 4321 */ 4322 dev_priv->wm.skl_hw.ddb.enabled_slices = 1; 4323 } 4324 4325 static void icl_mbus_init(struct drm_i915_private *dev_priv) 4326 { 4327 u32 val; 4328 4329 val = MBUS_ABOX_BT_CREDIT_POOL1(16) | 4330 MBUS_ABOX_BT_CREDIT_POOL2(16) | 4331 MBUS_ABOX_B_CREDIT(1) | 4332 MBUS_ABOX_BW_CREDIT(1); 4333 4334 I915_WRITE(MBUS_ABOX_CTL, val); 4335 } 4336 4337 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) 4338 { 4339 u32 val = I915_READ(LCPLL_CTL); 4340 4341 /* 4342 * The LCPLL register should be turned on by the BIOS. For now 4343 * let's just check its state and print errors in case 4344 * something is wrong. Don't even try to turn it on. 4345 */ 4346 4347 if (val & LCPLL_CD_SOURCE_FCLK) 4348 DRM_ERROR("CDCLK source is not LCPLL\n"); 4349 4350 if (val & LCPLL_PLL_DISABLE) 4351 DRM_ERROR("LCPLL is disabled\n"); 4352 4353 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC) 4354 DRM_ERROR("LCPLL not using non-SSC reference\n"); 4355 } 4356 4357 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 4358 { 4359 struct drm_device *dev = &dev_priv->drm; 4360 struct intel_crtc *crtc; 4361 4362 for_each_intel_crtc(dev, crtc) 4363 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 4364 pipe_name(crtc->pipe)); 4365 4366 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2), 4367 "Display power well on\n"); 4368 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, 4369 "SPLL enabled\n"); 4370 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, 4371 "WRPLL1 enabled\n"); 4372 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, 4373 "WRPLL2 enabled\n"); 4374 I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, 4375 "Panel power on\n"); 4376 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 4377 "CPU PWM1 enabled\n"); 4378 if (IS_HASWELL(dev_priv)) 4379 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 4380 "CPU PWM2 enabled\n"); 4381 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 4382 "PCH PWM1 enabled\n"); 4383 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 4384 "Utility pin enabled\n"); 4385 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, 4386 "PCH GTC enabled\n"); 4387 4388 /* 4389 * In theory we can still leave IRQs enabled, as long as only the HPD 4390 * interrupts remain enabled. We used to check for that, but since it's 4391 * gen-specific and since we only disable LCPLL after we fully disable 4392 * the interrupts, the check below should be enough. 4393 */ 4394 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 4395 } 4396 4397 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) 4398 { 4399 if (IS_HASWELL(dev_priv)) 4400 return I915_READ(D_COMP_HSW); 4401 else 4402 return I915_READ(D_COMP_BDW); 4403 } 4404 4405 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) 4406 { 4407 if (IS_HASWELL(dev_priv)) { 4408 if (sandybridge_pcode_write(dev_priv, 4409 GEN6_PCODE_WRITE_D_COMP, val)) 4410 DRM_DEBUG_KMS("Failed to write to D_COMP\n"); 4411 } else { 4412 I915_WRITE(D_COMP_BDW, val); 4413 POSTING_READ(D_COMP_BDW); 4414 } 4415 } 4416 4417 /* 4418 * This function implements pieces of two sequences from BSpec: 4419 * - Sequence for display software to disable LCPLL 4420 * - Sequence for display software to allow package C8+ 4421 * The steps implemented here are just the steps that actually touch the LCPLL 4422 * register. Callers should take care of disabling all the display engine 4423 * functions, doing the mode unset, fixing interrupts, etc. 4424 */ 4425 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 4426 bool switch_to_fclk, bool allow_power_down) 4427 { 4428 u32 val; 4429 4430 assert_can_disable_lcpll(dev_priv); 4431 4432 val = I915_READ(LCPLL_CTL); 4433 4434 if (switch_to_fclk) { 4435 val |= LCPLL_CD_SOURCE_FCLK; 4436 I915_WRITE(LCPLL_CTL, val); 4437 4438 if (wait_for_us(I915_READ(LCPLL_CTL) & 4439 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 4440 DRM_ERROR("Switching to FCLK failed\n"); 4441 4442 val = I915_READ(LCPLL_CTL); 4443 } 4444 4445 val |= LCPLL_PLL_DISABLE; 4446 I915_WRITE(LCPLL_CTL, val); 4447 POSTING_READ(LCPLL_CTL); 4448 4449 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) 4450 DRM_ERROR("LCPLL still locked\n"); 4451 4452 val = hsw_read_dcomp(dev_priv); 4453 val |= D_COMP_COMP_DISABLE; 4454 hsw_write_dcomp(dev_priv, val); 4455 ndelay(100); 4456 4457 if (wait_for((hsw_read_dcomp(dev_priv) & 4458 D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 4459 DRM_ERROR("D_COMP RCOMP still in progress\n"); 4460 4461 if (allow_power_down) { 4462 val = I915_READ(LCPLL_CTL); 4463 val |= LCPLL_POWER_DOWN_ALLOW; 4464 I915_WRITE(LCPLL_CTL, val); 4465 POSTING_READ(LCPLL_CTL); 4466 } 4467 } 4468 4469 /* 4470 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 4471 * source. 4472 */ 4473 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 4474 { 4475 u32 val; 4476 4477 val = I915_READ(LCPLL_CTL); 4478 4479 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 4480 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 4481 return; 4482 4483 /* 4484 * Make sure we're not on PC8 state before disabling PC8, otherwise 4485 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 4486 */ 4487 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 4488 4489 if (val & LCPLL_POWER_DOWN_ALLOW) { 4490 val &= ~LCPLL_POWER_DOWN_ALLOW; 4491 I915_WRITE(LCPLL_CTL, val); 4492 POSTING_READ(LCPLL_CTL); 4493 } 4494 4495 val = hsw_read_dcomp(dev_priv); 4496 val |= D_COMP_COMP_FORCE; 4497 val &= ~D_COMP_COMP_DISABLE; 4498 hsw_write_dcomp(dev_priv, val); 4499 4500 val = I915_READ(LCPLL_CTL); 4501 val &= ~LCPLL_PLL_DISABLE; 4502 I915_WRITE(LCPLL_CTL, val); 4503 4504 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) 4505 DRM_ERROR("LCPLL not locked yet\n"); 4506 4507 if (val & LCPLL_CD_SOURCE_FCLK) { 4508 val = I915_READ(LCPLL_CTL); 4509 val &= ~LCPLL_CD_SOURCE_FCLK; 4510 I915_WRITE(LCPLL_CTL, val); 4511 4512 if (wait_for_us((I915_READ(LCPLL_CTL) & 4513 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 4514 DRM_ERROR("Switching back to LCPLL failed\n"); 4515 } 4516 4517 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 4518 4519 intel_update_cdclk(dev_priv); 4520 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); 4521 } 4522 4523 /* 4524 * Package states C8 and deeper are really deep PC states that can only be 4525 * reached when all the devices on the system allow it, so even if the graphics 4526 * device allows PC8+, it doesn't mean the system will actually get to these 4527 * states. Our driver only allows PC8+ when going into runtime PM. 4528 * 4529 * The requirements for PC8+ are that all the outputs are disabled, the power 4530 * well is disabled and most interrupts are disabled, and these are also 4531 * requirements for runtime PM. When these conditions are met, we manually do 4532 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 4533 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 4534 * hang the machine. 4535 * 4536 * When we really reach PC8 or deeper states (not just when we allow it) we lose 4537 * the state of some registers, so when we come back from PC8+ we need to 4538 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 4539 * need to take care of the registers kept by RC6. Notice that this happens even 4540 * if we don't put the device in PCI D3 state (which is what currently happens 4541 * because of the runtime PM support). 4542 * 4543 * For more, read "Display Sequences for Package C8" on the hardware 4544 * documentation. 4545 */ 4546 static void hsw_enable_pc8(struct drm_i915_private *dev_priv) 4547 { 4548 u32 val; 4549 4550 DRM_DEBUG_KMS("Enabling package C8+\n"); 4551 4552 if (HAS_PCH_LPT_LP(dev_priv)) { 4553 val = I915_READ(SOUTH_DSPCLK_GATE_D); 4554 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 4555 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 4556 } 4557 4558 lpt_disable_clkout_dp(dev_priv); 4559 hsw_disable_lcpll(dev_priv, true, true); 4560 } 4561 4562 static void hsw_disable_pc8(struct drm_i915_private *dev_priv) 4563 { 4564 u32 val; 4565 4566 DRM_DEBUG_KMS("Disabling package C8+\n"); 4567 4568 hsw_restore_lcpll(dev_priv); 4569 intel_init_pch_refclk(dev_priv); 4570 4571 if (HAS_PCH_LPT_LP(dev_priv)) { 4572 val = I915_READ(SOUTH_DSPCLK_GATE_D); 4573 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 4574 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 4575 } 4576 } 4577 4578 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, 4579 bool enable) 4580 { 4581 i915_reg_t reg; 4582 u32 reset_bits, val; 4583 4584 if (IS_IVYBRIDGE(dev_priv)) { 4585 reg = GEN7_MSG_CTL; 4586 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; 4587 } else { 4588 reg = HSW_NDE_RSTWRN_OPT; 4589 reset_bits = RESET_PCH_HANDSHAKE_ENABLE; 4590 } 4591 4592 val = I915_READ(reg); 4593 4594 if (enable) 4595 val |= reset_bits; 4596 else 4597 val &= ~reset_bits; 4598 4599 I915_WRITE(reg, val); 4600 } 4601 4602 static void skl_display_core_init(struct drm_i915_private *dev_priv, 4603 bool resume) 4604 { 4605 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4606 struct i915_power_well *well; 4607 4608 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 4609 4610 /* enable PCH reset handshake */ 4611 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 4612 4613 /* enable PG1 and Misc I/O */ 4614 mutex_lock(&power_domains->lock); 4615 4616 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4617 intel_power_well_enable(dev_priv, well); 4618 4619 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 4620 intel_power_well_enable(dev_priv, well); 4621 4622 mutex_unlock(&power_domains->lock); 4623 4624 intel_cdclk_init(dev_priv); 4625 4626 gen9_dbuf_enable(dev_priv); 4627 4628 if (resume && dev_priv->csr.dmc_payload) 4629 intel_csr_load_program(dev_priv); 4630 } 4631 4632 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 4633 { 4634 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4635 struct i915_power_well *well; 4636 4637 gen9_disable_dc_states(dev_priv); 4638 4639 gen9_dbuf_disable(dev_priv); 4640 4641 intel_cdclk_uninit(dev_priv); 4642 4643 /* The spec doesn't call for removing the reset handshake flag */ 4644 /* disable PG1 and Misc I/O */ 4645 4646 mutex_lock(&power_domains->lock); 4647 4648 /* 4649 * BSpec says to keep the MISC IO power well enabled here, only 4650 * remove our request for power well 1. 4651 * Note that even though the driver's request is removed power well 1 4652 * may stay enabled after this due to DMC's own request on it. 4653 */ 4654 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4655 intel_power_well_disable(dev_priv, well); 4656 4657 mutex_unlock(&power_domains->lock); 4658 4659 usleep_range(10, 30); /* 10 us delay per Bspec */ 4660 } 4661 4662 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) 4663 { 4664 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4665 struct i915_power_well *well; 4666 4667 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 4668 4669 /* 4670 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 4671 * or else the reset will hang because there is no PCH to respond. 4672 * Move the handshake programming to initialization sequence. 4673 * Previously was left up to BIOS. 4674 */ 4675 intel_pch_reset_handshake(dev_priv, false); 4676 4677 /* Enable PG1 */ 4678 mutex_lock(&power_domains->lock); 4679 4680 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4681 intel_power_well_enable(dev_priv, well); 4682 4683 mutex_unlock(&power_domains->lock); 4684 4685 intel_cdclk_init(dev_priv); 4686 4687 gen9_dbuf_enable(dev_priv); 4688 4689 if (resume && dev_priv->csr.dmc_payload) 4690 intel_csr_load_program(dev_priv); 4691 } 4692 4693 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 4694 { 4695 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4696 struct i915_power_well *well; 4697 4698 gen9_disable_dc_states(dev_priv); 4699 4700 gen9_dbuf_disable(dev_priv); 4701 4702 intel_cdclk_uninit(dev_priv); 4703 4704 /* The spec doesn't call for removing the reset handshake flag */ 4705 4706 /* 4707 * Disable PW1 (PG1). 4708 * Note that even though the driver's request is removed power well 1 4709 * may stay enabled after this due to DMC's own request on it. 4710 */ 4711 mutex_lock(&power_domains->lock); 4712 4713 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4714 intel_power_well_disable(dev_priv, well); 4715 4716 mutex_unlock(&power_domains->lock); 4717 4718 usleep_range(10, 30); /* 10 us delay per Bspec */ 4719 } 4720 4721 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume) 4722 { 4723 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4724 struct i915_power_well *well; 4725 4726 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 4727 4728 /* 1. Enable PCH Reset Handshake */ 4729 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 4730 4731 /* 2-3. */ 4732 intel_combo_phy_init(dev_priv); 4733 4734 /* 4735 * 4. Enable Power Well 1 (PG1). 4736 * The AUX IO power wells will be enabled on demand. 4737 */ 4738 mutex_lock(&power_domains->lock); 4739 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4740 intel_power_well_enable(dev_priv, well); 4741 mutex_unlock(&power_domains->lock); 4742 4743 /* 5. Enable CD clock */ 4744 intel_cdclk_init(dev_priv); 4745 4746 /* 6. Enable DBUF */ 4747 gen9_dbuf_enable(dev_priv); 4748 4749 if (resume && dev_priv->csr.dmc_payload) 4750 intel_csr_load_program(dev_priv); 4751 } 4752 4753 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) 4754 { 4755 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4756 struct i915_power_well *well; 4757 4758 gen9_disable_dc_states(dev_priv); 4759 4760 /* 1. Disable all display engine functions -> aready done */ 4761 4762 /* 2. Disable DBUF */ 4763 gen9_dbuf_disable(dev_priv); 4764 4765 /* 3. Disable CD clock */ 4766 intel_cdclk_uninit(dev_priv); 4767 4768 /* 4769 * 4. Disable Power Well 1 (PG1). 4770 * The AUX IO power wells are toggled on demand, so they are already 4771 * disabled at this point. 4772 */ 4773 mutex_lock(&power_domains->lock); 4774 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4775 intel_power_well_disable(dev_priv, well); 4776 mutex_unlock(&power_domains->lock); 4777 4778 usleep_range(10, 30); /* 10 us delay per Bspec */ 4779 4780 /* 5. */ 4781 intel_combo_phy_uninit(dev_priv); 4782 } 4783 4784 static void icl_display_core_init(struct drm_i915_private *dev_priv, 4785 bool resume) 4786 { 4787 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4788 struct i915_power_well *well; 4789 4790 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 4791 4792 /* 1. Enable PCH reset handshake. */ 4793 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 4794 4795 /* 2. Initialize all combo phys */ 4796 intel_combo_phy_init(dev_priv); 4797 4798 /* 4799 * 3. Enable Power Well 1 (PG1). 4800 * The AUX IO power wells will be enabled on demand. 4801 */ 4802 mutex_lock(&power_domains->lock); 4803 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4804 intel_power_well_enable(dev_priv, well); 4805 mutex_unlock(&power_domains->lock); 4806 4807 /* 4. Enable CDCLK. */ 4808 intel_cdclk_init(dev_priv); 4809 4810 /* 5. Enable DBUF. */ 4811 icl_dbuf_enable(dev_priv); 4812 4813 /* 6. Setup MBUS. */ 4814 icl_mbus_init(dev_priv); 4815 4816 if (resume && dev_priv->csr.dmc_payload) 4817 intel_csr_load_program(dev_priv); 4818 } 4819 4820 static void icl_display_core_uninit(struct drm_i915_private *dev_priv) 4821 { 4822 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4823 struct i915_power_well *well; 4824 4825 gen9_disable_dc_states(dev_priv); 4826 4827 /* 1. Disable all display engine functions -> aready done */ 4828 4829 /* 2. Disable DBUF */ 4830 icl_dbuf_disable(dev_priv); 4831 4832 /* 3. Disable CD clock */ 4833 intel_cdclk_uninit(dev_priv); 4834 4835 /* 4836 * 4. Disable Power Well 1 (PG1). 4837 * The AUX IO power wells are toggled on demand, so they are already 4838 * disabled at this point. 4839 */ 4840 mutex_lock(&power_domains->lock); 4841 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4842 intel_power_well_disable(dev_priv, well); 4843 mutex_unlock(&power_domains->lock); 4844 4845 /* 5. */ 4846 intel_combo_phy_uninit(dev_priv); 4847 } 4848 4849 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 4850 { 4851 struct i915_power_well *cmn_bc = 4852 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 4853 struct i915_power_well *cmn_d = 4854 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 4855 4856 /* 4857 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 4858 * workaround never ever read DISPLAY_PHY_CONTROL, and 4859 * instead maintain a shadow copy ourselves. Use the actual 4860 * power well state and lane status to reconstruct the 4861 * expected initial value. 4862 */ 4863 dev_priv->chv_phy_control = 4864 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 4865 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 4866 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 4867 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 4868 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 4869 4870 /* 4871 * If all lanes are disabled we leave the override disabled 4872 * with all power down bits cleared to match the state we 4873 * would use after disabling the port. Otherwise enable the 4874 * override and set the lane powerdown bits accding to the 4875 * current lane status. 4876 */ 4877 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 4878 u32 status = I915_READ(DPLL(PIPE_A)); 4879 unsigned int mask; 4880 4881 mask = status & DPLL_PORTB_READY_MASK; 4882 if (mask == 0xf) 4883 mask = 0x0; 4884 else 4885 dev_priv->chv_phy_control |= 4886 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 4887 4888 dev_priv->chv_phy_control |= 4889 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 4890 4891 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 4892 if (mask == 0xf) 4893 mask = 0x0; 4894 else 4895 dev_priv->chv_phy_control |= 4896 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 4897 4898 dev_priv->chv_phy_control |= 4899 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 4900 4901 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 4902 4903 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 4904 } else { 4905 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 4906 } 4907 4908 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 4909 u32 status = I915_READ(DPIO_PHY_STATUS); 4910 unsigned int mask; 4911 4912 mask = status & DPLL_PORTD_READY_MASK; 4913 4914 if (mask == 0xf) 4915 mask = 0x0; 4916 else 4917 dev_priv->chv_phy_control |= 4918 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 4919 4920 dev_priv->chv_phy_control |= 4921 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 4922 4923 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 4924 4925 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 4926 } else { 4927 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 4928 } 4929 4930 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 4931 4932 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n", 4933 dev_priv->chv_phy_control); 4934 } 4935 4936 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 4937 { 4938 struct i915_power_well *cmn = 4939 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 4940 struct i915_power_well *disp2d = 4941 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); 4942 4943 /* If the display might be already active skip this */ 4944 if (cmn->desc->ops->is_enabled(dev_priv, cmn) && 4945 disp2d->desc->ops->is_enabled(dev_priv, disp2d) && 4946 I915_READ(DPIO_CTL) & DPIO_CMNRST) 4947 return; 4948 4949 DRM_DEBUG_KMS("toggling display PHY side reset\n"); 4950 4951 /* cmnlane needs DPLL registers */ 4952 disp2d->desc->ops->enable(dev_priv, disp2d); 4953 4954 /* 4955 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 4956 * Need to assert and de-assert PHY SB reset by gating the 4957 * common lane power, then un-gating it. 4958 * Simply ungating isn't enough to reset the PHY enough to get 4959 * ports and lanes running. 4960 */ 4961 cmn->desc->ops->disable(dev_priv, cmn); 4962 } 4963 4964 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) 4965 { 4966 bool ret; 4967 4968 vlv_punit_get(dev_priv); 4969 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; 4970 vlv_punit_put(dev_priv); 4971 4972 return ret; 4973 } 4974 4975 static void assert_ved_power_gated(struct drm_i915_private *dev_priv) 4976 { 4977 WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), 4978 "VED not power gated\n"); 4979 } 4980 4981 static void assert_isp_power_gated(struct drm_i915_private *dev_priv) 4982 { 4983 static const struct pci_device_id isp_ids[] = { 4984 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, 4985 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, 4986 {} 4987 }; 4988 4989 WARN(!pci_dev_present(isp_ids) && 4990 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), 4991 "ISP not power gated\n"); 4992 } 4993 4994 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); 4995 4996 /** 4997 * intel_power_domains_init_hw - initialize hardware power domain state 4998 * @i915: i915 device instance 4999 * @resume: Called from resume code paths or not 5000 * 5001 * This function initializes the hardware power domain state and enables all 5002 * power wells belonging to the INIT power domain. Power wells in other 5003 * domains (and not in the INIT domain) are referenced or disabled by 5004 * intel_modeset_readout_hw_state(). After that the reference count of each 5005 * power well must match its HW enabled state, see 5006 * intel_power_domains_verify_state(). 5007 * 5008 * It will return with power domains disabled (to be enabled later by 5009 * intel_power_domains_enable()) and must be paired with 5010 * intel_power_domains_driver_remove(). 5011 */ 5012 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) 5013 { 5014 struct i915_power_domains *power_domains = &i915->power_domains; 5015 5016 power_domains->initializing = true; 5017 5018 /* Must happen before power domain init on VLV/CHV */ 5019 intel_update_rawclk(i915); 5020 5021 if (INTEL_GEN(i915) >= 11) { 5022 icl_display_core_init(i915, resume); 5023 } else if (IS_CANNONLAKE(i915)) { 5024 cnl_display_core_init(i915, resume); 5025 } else if (IS_GEN9_BC(i915)) { 5026 skl_display_core_init(i915, resume); 5027 } else if (IS_GEN9_LP(i915)) { 5028 bxt_display_core_init(i915, resume); 5029 } else if (IS_CHERRYVIEW(i915)) { 5030 mutex_lock(&power_domains->lock); 5031 chv_phy_control_init(i915); 5032 mutex_unlock(&power_domains->lock); 5033 assert_isp_power_gated(i915); 5034 } else if (IS_VALLEYVIEW(i915)) { 5035 mutex_lock(&power_domains->lock); 5036 vlv_cmnlane_wa(i915); 5037 mutex_unlock(&power_domains->lock); 5038 assert_ved_power_gated(i915); 5039 assert_isp_power_gated(i915); 5040 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) { 5041 hsw_assert_cdclk(i915); 5042 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 5043 } else if (IS_IVYBRIDGE(i915)) { 5044 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 5045 } 5046 5047 /* 5048 * Keep all power wells enabled for any dependent HW access during 5049 * initialization and to make sure we keep BIOS enabled display HW 5050 * resources powered until display HW readout is complete. We drop 5051 * this reference in intel_power_domains_enable(). 5052 */ 5053 power_domains->wakeref = 5054 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5055 5056 /* Disable power support if the user asked so. */ 5057 if (!i915_modparams.disable_power_well) 5058 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5059 intel_power_domains_sync_hw(i915); 5060 5061 power_domains->initializing = false; 5062 } 5063 5064 /** 5065 * intel_power_domains_driver_remove - deinitialize hw power domain state 5066 * @i915: i915 device instance 5067 * 5068 * De-initializes the display power domain HW state. It also ensures that the 5069 * device stays powered up so that the driver can be reloaded. 5070 * 5071 * It must be called with power domains already disabled (after a call to 5072 * intel_power_domains_disable()) and must be paired with 5073 * intel_power_domains_init_hw(). 5074 */ 5075 void intel_power_domains_driver_remove(struct drm_i915_private *i915) 5076 { 5077 intel_wakeref_t wakeref __maybe_unused = 5078 fetch_and_zero(&i915->power_domains.wakeref); 5079 5080 /* Remove the refcount we took to keep power well support disabled. */ 5081 if (!i915_modparams.disable_power_well) 5082 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); 5083 5084 intel_display_power_flush_work_sync(i915); 5085 5086 intel_power_domains_verify_state(i915); 5087 5088 /* Keep the power well enabled, but cancel its rpm wakeref. */ 5089 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 5090 } 5091 5092 /** 5093 * intel_power_domains_enable - enable toggling of display power wells 5094 * @i915: i915 device instance 5095 * 5096 * Enable the ondemand enabling/disabling of the display power wells. Note that 5097 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled 5098 * only at specific points of the display modeset sequence, thus they are not 5099 * affected by the intel_power_domains_enable()/disable() calls. The purpose 5100 * of these function is to keep the rest of power wells enabled until the end 5101 * of display HW readout (which will acquire the power references reflecting 5102 * the current HW state). 5103 */ 5104 void intel_power_domains_enable(struct drm_i915_private *i915) 5105 { 5106 intel_wakeref_t wakeref __maybe_unused = 5107 fetch_and_zero(&i915->power_domains.wakeref); 5108 5109 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 5110 intel_power_domains_verify_state(i915); 5111 } 5112 5113 /** 5114 * intel_power_domains_disable - disable toggling of display power wells 5115 * @i915: i915 device instance 5116 * 5117 * Disable the ondemand enabling/disabling of the display power wells. See 5118 * intel_power_domains_enable() for which power wells this call controls. 5119 */ 5120 void intel_power_domains_disable(struct drm_i915_private *i915) 5121 { 5122 struct i915_power_domains *power_domains = &i915->power_domains; 5123 5124 WARN_ON(power_domains->wakeref); 5125 power_domains->wakeref = 5126 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5127 5128 intel_power_domains_verify_state(i915); 5129 } 5130 5131 /** 5132 * intel_power_domains_suspend - suspend power domain state 5133 * @i915: i915 device instance 5134 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) 5135 * 5136 * This function prepares the hardware power domain state before entering 5137 * system suspend. 5138 * 5139 * It must be called with power domains already disabled (after a call to 5140 * intel_power_domains_disable()) and paired with intel_power_domains_resume(). 5141 */ 5142 void intel_power_domains_suspend(struct drm_i915_private *i915, 5143 enum i915_drm_suspend_mode suspend_mode) 5144 { 5145 struct i915_power_domains *power_domains = &i915->power_domains; 5146 intel_wakeref_t wakeref __maybe_unused = 5147 fetch_and_zero(&power_domains->wakeref); 5148 5149 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 5150 5151 /* 5152 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 5153 * support don't manually deinit the power domains. This also means the 5154 * CSR/DMC firmware will stay active, it will power down any HW 5155 * resources as required and also enable deeper system power states 5156 * that would be blocked if the firmware was inactive. 5157 */ 5158 if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) && 5159 suspend_mode == I915_DRM_SUSPEND_IDLE && 5160 i915->csr.dmc_payload) { 5161 intel_display_power_flush_work(i915); 5162 intel_power_domains_verify_state(i915); 5163 return; 5164 } 5165 5166 /* 5167 * Even if power well support was disabled we still want to disable 5168 * power wells if power domains must be deinitialized for suspend. 5169 */ 5170 if (!i915_modparams.disable_power_well) 5171 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); 5172 5173 intel_display_power_flush_work(i915); 5174 intel_power_domains_verify_state(i915); 5175 5176 if (INTEL_GEN(i915) >= 11) 5177 icl_display_core_uninit(i915); 5178 else if (IS_CANNONLAKE(i915)) 5179 cnl_display_core_uninit(i915); 5180 else if (IS_GEN9_BC(i915)) 5181 skl_display_core_uninit(i915); 5182 else if (IS_GEN9_LP(i915)) 5183 bxt_display_core_uninit(i915); 5184 5185 power_domains->display_core_suspended = true; 5186 } 5187 5188 /** 5189 * intel_power_domains_resume - resume power domain state 5190 * @i915: i915 device instance 5191 * 5192 * This function resume the hardware power domain state during system resume. 5193 * 5194 * It will return with power domain support disabled (to be enabled later by 5195 * intel_power_domains_enable()) and must be paired with 5196 * intel_power_domains_suspend(). 5197 */ 5198 void intel_power_domains_resume(struct drm_i915_private *i915) 5199 { 5200 struct i915_power_domains *power_domains = &i915->power_domains; 5201 5202 if (power_domains->display_core_suspended) { 5203 intel_power_domains_init_hw(i915, true); 5204 power_domains->display_core_suspended = false; 5205 } else { 5206 WARN_ON(power_domains->wakeref); 5207 power_domains->wakeref = 5208 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5209 } 5210 5211 intel_power_domains_verify_state(i915); 5212 } 5213 5214 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 5215 5216 static void intel_power_domains_dump_info(struct drm_i915_private *i915) 5217 { 5218 struct i915_power_domains *power_domains = &i915->power_domains; 5219 struct i915_power_well *power_well; 5220 5221 for_each_power_well(i915, power_well) { 5222 enum intel_display_power_domain domain; 5223 5224 DRM_DEBUG_DRIVER("%-25s %d\n", 5225 power_well->desc->name, power_well->count); 5226 5227 for_each_power_domain(domain, power_well->desc->domains) 5228 DRM_DEBUG_DRIVER(" %-23s %d\n", 5229 intel_display_power_domain_str(domain), 5230 power_domains->domain_use_count[domain]); 5231 } 5232 } 5233 5234 /** 5235 * intel_power_domains_verify_state - verify the HW/SW state for all power wells 5236 * @i915: i915 device instance 5237 * 5238 * Verify if the reference count of each power well matches its HW enabled 5239 * state and the total refcount of the domains it belongs to. This must be 5240 * called after modeset HW state sanitization, which is responsible for 5241 * acquiring reference counts for any power wells in use and disabling the 5242 * ones left on by BIOS but not required by any active output. 5243 */ 5244 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 5245 { 5246 struct i915_power_domains *power_domains = &i915->power_domains; 5247 struct i915_power_well *power_well; 5248 bool dump_domain_info; 5249 5250 mutex_lock(&power_domains->lock); 5251 5252 verify_async_put_domains_state(power_domains); 5253 5254 dump_domain_info = false; 5255 for_each_power_well(i915, power_well) { 5256 enum intel_display_power_domain domain; 5257 int domains_count; 5258 bool enabled; 5259 5260 enabled = power_well->desc->ops->is_enabled(i915, power_well); 5261 if ((power_well->count || power_well->desc->always_on) != 5262 enabled) 5263 DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)", 5264 power_well->desc->name, 5265 power_well->count, enabled); 5266 5267 domains_count = 0; 5268 for_each_power_domain(domain, power_well->desc->domains) 5269 domains_count += power_domains->domain_use_count[domain]; 5270 5271 if (power_well->count != domains_count) { 5272 DRM_ERROR("power well %s refcount/domain refcount mismatch " 5273 "(refcount %d/domains refcount %d)\n", 5274 power_well->desc->name, power_well->count, 5275 domains_count); 5276 dump_domain_info = true; 5277 } 5278 } 5279 5280 if (dump_domain_info) { 5281 static bool dumped; 5282 5283 if (!dumped) { 5284 intel_power_domains_dump_info(i915); 5285 dumped = true; 5286 } 5287 } 5288 5289 mutex_unlock(&power_domains->lock); 5290 } 5291 5292 #else 5293 5294 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 5295 { 5296 } 5297 5298 #endif 5299 5300 void intel_display_power_suspend_late(struct drm_i915_private *i915) 5301 { 5302 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) 5303 bxt_enable_dc9(i915); 5304 else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 5305 hsw_enable_pc8(i915); 5306 } 5307 5308 void intel_display_power_resume_early(struct drm_i915_private *i915) 5309 { 5310 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) { 5311 gen9_sanitize_dc_state(i915); 5312 bxt_disable_dc9(i915); 5313 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5314 hsw_disable_pc8(i915); 5315 } 5316 } 5317 5318 void intel_display_power_suspend(struct drm_i915_private *i915) 5319 { 5320 if (INTEL_GEN(i915) >= 11) { 5321 icl_display_core_uninit(i915); 5322 bxt_enable_dc9(i915); 5323 } else if (IS_GEN9_LP(i915)) { 5324 bxt_display_core_uninit(i915); 5325 bxt_enable_dc9(i915); 5326 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5327 hsw_enable_pc8(i915); 5328 } 5329 } 5330 5331 void intel_display_power_resume(struct drm_i915_private *i915) 5332 { 5333 if (INTEL_GEN(i915) >= 11) { 5334 bxt_disable_dc9(i915); 5335 icl_display_core_init(i915, true); 5336 if (i915->csr.dmc_payload) { 5337 if (i915->csr.allowed_dc_mask & 5338 DC_STATE_EN_UPTO_DC6) 5339 skl_enable_dc6(i915); 5340 else if (i915->csr.allowed_dc_mask & 5341 DC_STATE_EN_UPTO_DC5) 5342 gen9_enable_dc5(i915); 5343 } 5344 } else if (IS_GEN9_LP(i915)) { 5345 bxt_disable_dc9(i915); 5346 bxt_display_core_init(i915, true); 5347 if (i915->csr.dmc_payload && 5348 (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 5349 gen9_enable_dc5(i915); 5350 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5351 hsw_disable_pc8(i915); 5352 } 5353 } 5354